pax_global_header00006660000000000000000000000064150562061640014517gustar00rootroot0000000000000052 comment=70c3ebf496f7c402dacc931b9484f626d4db065c xarray-2025.09.0/000077500000000000000000000000001505620616400133445ustar00rootroot00000000000000xarray-2025.09.0/.binder/000077500000000000000000000000001505620616400146655ustar00rootroot00000000000000xarray-2025.09.0/.binder/environment.yml000066400000000000000000000007721505620616400177620ustar00rootroot00000000000000name: xarray-examples channels: - conda-forge dependencies: - python=3.11 - boto3 - bottleneck - cartopy - cfgrib - cftime - coveralls - dask - distributed - dask_labextension - h5netcdf - h5py - hdf5 - iris - lxml # Optional dep of pydap - matplotlib - nc-time-axis - netcdf4 - numba - numpy - packaging - pandas - pint>=0.22 - pip - pooch - pydap - rasterio - scipy - seaborn - setuptools - sparse - toolz - xarray - zarr - numbagg xarray-2025.09.0/.codecov.yml000066400000000000000000000011641505620616400155710ustar00rootroot00000000000000codecov: require_ci_to_pass: true coverage: status: project: default: # Require 1% coverage, i.e., always succeed target: 1% flags: - unittests paths: - "!xarray/tests/" unittests: target: 90% flags: - unittests paths: - "!xarray/tests/" mypy: target: 20% flags: - mypy patch: false changes: false comment: false flags: unittests: paths: - "xarray" - "!xarray/tests" carryforward: false mypy: paths: - "xarray" carryforward: false xarray-2025.09.0/.git-blame-ignore-revs000066400000000000000000000001631505620616400174440ustar00rootroot00000000000000# black PR 3142 d089df385e737f71067309ff7abae15994d581ec # isort PR 1924 0e73e240107caee3ffd1a1149f0150c390d43251 xarray-2025.09.0/.git_archival.txt000066400000000000000000000002111505620616400166110ustar00rootroot00000000000000node: 70c3ebf496f7c402dacc931b9484f626d4db065c node-date: 2025-09-03T20:50:12-07:00 describe-name: v2025.09.0 ref-names: tag: v2025.09.0 xarray-2025.09.0/.gitattributes000066400000000000000000000002121505620616400162320ustar00rootroot00000000000000# reduce the number of merge conflicts doc/whats-new.rst merge=union # allow installing from git archives .git_archival.txt export-subst xarray-2025.09.0/.github/000077500000000000000000000000001505620616400147045ustar00rootroot00000000000000xarray-2025.09.0/.github/FUNDING.yml000066400000000000000000000000771505620616400165250ustar00rootroot00000000000000github: numfocus custom: https://numfocus.org/donate-to-xarray xarray-2025.09.0/.github/ISSUE_TEMPLATE/000077500000000000000000000000001505620616400170675ustar00rootroot00000000000000xarray-2025.09.0/.github/ISSUE_TEMPLATE/bugreport.yml000066400000000000000000000054171505620616400216320ustar00rootroot00000000000000name: πŸ› Bug Report description: File a bug report to help us improve labels: [bug, "needs triage"] body: - type: textarea id: what-happened attributes: label: What happened? description: | Thanks for reporting a bug! Please describe what you were trying to get done. Tell us what happened, what went wrong. validations: required: true - type: textarea id: what-did-you-expect-to-happen attributes: label: What did you expect to happen? description: | Describe what you expected to happen. validations: required: false - type: textarea id: sample-code attributes: label: Minimal Complete Verifiable Example description: | Minimal, self-contained copy-pastable example that demonstrates the issue. This will be automatically formatted into code, so no need for markdown backticks. render: Python - type: checkboxes id: mvce-checkboxes attributes: label: MVCE confirmation description: | Please confirm that the bug report is in an excellent state, so we can understand & fix it quickly & efficiently. For more details, check out: - [Minimal Complete Verifiable Examples](https://stackoverflow.com/help/mcve) - [Craft Minimal Bug Reports](https://matthewrocklin.com/minimal-bug-reports) options: - label: Minimal example β€” the example is as focused as reasonably possible to demonstrate the underlying issue in xarray. - label: Complete example β€” the example is self-contained, including all data and the text of any traceback. - label: Verifiable example β€” the example copy & pastes into an IPython prompt or [Binder notebook](https://mybinder.org/v2/gh/pydata/xarray/main?urlpath=lab/tree/doc/examples/blank_template.ipynb), returning the result. - label: New issue β€” a search of GitHub Issues suggests this is not a duplicate. - label: Recent environment β€” the issue occurs with the latest version of xarray and its dependencies. - type: textarea id: log-output attributes: label: Relevant log output description: Please copy and paste any relevant output. This will be automatically formatted into code, so no need for markdown backticks. render: Python - type: textarea id: extra attributes: label: Anything else we need to know? description: | Please describe any other information you want to share. - type: textarea id: show-versions attributes: label: Environment description: | Paste the output of `xr.show_versions()` between the `
` tags, leaving an empty line following the opening tag. value: |
validations: required: true xarray-2025.09.0/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000012761505620616400210650ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: ❓ Usage question url: https://github.com/pydata/xarray/discussions about: | Ask questions and discuss with other community members here. If you have a question like "How do I concatenate a list of datasets?" then please include a self-contained reproducible example if possible. - name: πŸ—ΊοΈ Raster analysis usage question url: https://github.com/corteva/rioxarray/discussions about: | If you are using the rioxarray extension (engine='rasterio'), or have questions about raster analysis such as geospatial formats, coordinate reprojection, etc., please use the rioxarray discussion forum. xarray-2025.09.0/.github/ISSUE_TEMPLATE/misc.yml000066400000000000000000000007521505620616400205510ustar00rootroot00000000000000name: πŸ“ Issue description: General issue, that's not a bug report. labels: ["needs triage"] body: - type: markdown attributes: value: | Please describe your issue here. - type: textarea id: issue-description attributes: label: What is your issue? description: | Thank you for filing an issue! Please give us further information on how we can help you. placeholder: Please describe your issue. validations: required: true xarray-2025.09.0/.github/ISSUE_TEMPLATE/newfeature.yml000066400000000000000000000022061505620616400217570ustar00rootroot00000000000000name: πŸ’‘ Feature Request description: Suggest an idea for xarray labels: [enhancement] body: - type: textarea id: description attributes: label: Is your feature request related to a problem? description: | Please do a quick search of existing issues to make sure that this has not been asked before. Please provide a clear and concise description of what the problem is. Ex. I'm always frustrated when [...] validations: required: true - type: textarea id: solution attributes: label: Describe the solution you'd like description: | A clear and concise description of what you want to happen. - type: textarea id: alternatives attributes: label: Describe alternatives you've considered description: | A clear and concise description of any alternative solutions or features you've considered. validations: required: false - type: textarea id: additional-context attributes: label: Additional context description: | Add any other context about the feature request here. validations: required: false xarray-2025.09.0/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000004021505620616400205010ustar00rootroot00000000000000 - [ ] Closes #xxxx - [ ] Tests added - [ ] User visible changes (including notable bug fixes) are documented in `whats-new.rst` - [ ] New functions/methods are listed in `api.rst` xarray-2025.09.0/.github/config.yml000066400000000000000000000022201505620616400166700ustar00rootroot00000000000000# Comment to be posted to on first time issues newIssueWelcomeComment: > Thanks for opening your first issue here at xarray! Be sure to follow the issue template! If you have an idea for a solution, we would really welcome a Pull Request with proposed changes. See the [Contributing Guide](https://docs.xarray.dev/en/latest/contributing.html) for more. It may take us a while to respond here, but we really value your contribution. Contributors like you help make xarray better. Thank you! # Comment to be posted to on PRs from first time contributors in your repository newPRWelcomeComment: > Thank you for opening this pull request! It may take us a few days to respond here, so thank you for being patient. If you have questions, some answers may be found in our [contributing guidelines](https://docs.xarray.dev/en/stable/contributing.html). # Comment to be posted to on pull requests merged by a first time user firstPRMergeComment: > Congratulations on completing your first pull request! Welcome to Xarray! We are proud of you, and hope to see you again! ![celebration gif](https://media.giphy.com/media/umYMU8G2ixG5mJBDo5/giphy.gif) xarray-2025.09.0/.github/dependabot.yml000066400000000000000000000003311505620616400175310ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: # Check for updates once a week interval: "weekly" groups: actions: patterns: - "*" xarray-2025.09.0/.github/labeler.yml000066400000000000000000000045411505620616400170410ustar00rootroot00000000000000Automation: - changed-files: - any-glob-to-any-file: - .github/** CI: - changed-files: - any-glob-to-any-file: - ci/** dependencies: - changed-files: - any-glob-to-any-file: - ci/requirements/* topic-arrays: - changed-files: - any-glob-to-any-file: - xarray/core/duck_array_ops.py topic-backends: - changed-files: - any-glob-to-any-file: - xarray/backends/** topic-cftime: - changed-files: - any-glob-to-any-file: - xarray/coding/*time* topic-CF conventions: - changed-files: - any-glob-to-any-file: - xarray/conventions.py topic-dask: - changed-files: - any-glob-to-any-file: - xarray/compat/dask* - xarray/core/parallel.py topic-DataTree: - changed-files: - any-glob-to-any-file: - xarray/core/datatree* topic-documentation: - all: - changed-files: - any-glob-to-any-file: "doc/**/*" - all-globs-to-all-files: "!doc/whats-new.rst" topic-groupby: - changed-files: - any-glob-to-any-file: - xarray/core/groupby.py topic-html-repr: - changed-files: - any-glob-to-any-file: - xarray/core/formatting_html.py topic-hypothesis: - changed-files: - any-glob-to-any-file: - properties/** - xarray/testing/strategies.py topic-indexing: - changed-files: - any-glob-to-any-file: - xarray/core/indexes.py - xarray/core/indexing.py topic-NamedArray: - changed-files: - any-glob-to-any-file: - xarray/namedarray/* topic-performance: - changed-files: - any-glob-to-any-file: - asv_bench/benchmarks/** topic-plotting: - changed-files: - any-glob-to-any-file: - xarray/plot/* - xarray/plot/**/* topic-rolling: - changed-files: - any-glob-to-any-file: - xarray/computation/rolling.py - xarray/computation/rolling_exp.py topic-testing: - changed-files: - any-glob-to-any-file: - conftest.py - xarray/testing/* topic-typing: - changed-files: - any-glob-to-any-file: - xarray/core/types.py topic-zarr: - changed-files: - any-glob-to-any-file: - xarray/backends/zarr.py io: - changed-files: - any-glob-to-any-file: - xarray/backends/** xarray-2025.09.0/.github/release.yml000066400000000000000000000001261505620616400170460ustar00rootroot00000000000000changelog: exclude: authors: - dependabot[bot] - pre-commit-ci[bot] xarray-2025.09.0/.github/stale.yml000066400000000000000000000041251505620616400165410ustar00rootroot00000000000000# Configuration for probot-stale - https://github.com/probot/stale # Number of days of inactivity before an Issue or Pull Request becomes stale daysUntilStale: 600 # start with a large number and reduce shortly # Number of days of inactivity before an Issue or Pull Request with the stale label is closed. # Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. daysUntilClose: 30 # Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable exemptLabels: - pinned - security - "[Status] Maybe Later" # Set to true to ignore issues in a project (defaults to false) exemptProjects: true # Set to true to ignore issues in a milestone (defaults to false) exemptMilestones: true # Set to true to ignore issues with an assignee (defaults to false) exemptAssignees: true # Label to use when marking as stale staleLabel: stale # Comment to post when marking as stale. Set to `false` to disable markComment: | In order to maintain a list of currently relevant issues, we mark issues as stale after a period of inactivity If this issue remains relevant, please comment here or remove the `stale` label; otherwise it will be marked as closed automatically closeComment: | The stalebot didn't hear anything for a while, so it closed this. Please reopen if this is still an issue. # Comment to post when removing the stale label. # unmarkComment: > # Your comment here. # Comment to post when closing a stale Issue or Pull Request. # closeComment: > # Your comment here. # Limit the number of actions per hour, from 1-30. Default is 30 limitPerRun: 2 # start with a small number # Limit to only `issues` or `pulls` # only: issues # Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': # pulls: # daysUntilStale: 30 # markComment: > # This pull request has been automatically marked as stale because it has not had # recent activity. It will be closed if no further activity occurs. Thank you # for your contributions. # issues: # exemptLabels: # - confirmed xarray-2025.09.0/.github/workflows/000077500000000000000000000000001505620616400167415ustar00rootroot00000000000000xarray-2025.09.0/.github/workflows/benchmarks-last-release.yml000066400000000000000000000051071505620616400241630ustar00rootroot00000000000000name: Benchmark compare last release on: push: branches: - main workflow_dispatch: jobs: benchmark: name: Linux runs-on: ubuntu-latest env: ASV_DIR: "./asv_bench" CONDA_ENV_FILE: ci/requirements/environment.yml steps: # We need the full repo to avoid this issue # https://github.com/actions/checkout/issues/23 - uses: actions/checkout@v5 with: fetch-depth: 0 - name: Set up conda environment uses: mamba-org/setup-micromamba@v2 with: micromamba-version: "1.5.10-0" environment-file: ${{env.CONDA_ENV_FILE}} environment-name: xarray-tests cache-environment: true cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{env.PYTHON_VERSION}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}-benchmark" create-args: >- asv - name: "Get Previous tag" id: previoustag uses: "WyriHaximus/github-action-get-previous-tag@v1" # with: # fallback: 1.0.0 # Optional fallback tag to use when no tag can be found - name: Run benchmarks shell: bash -l {0} id: benchmark env: OPENBLAS_NUM_THREADS: 1 MKL_NUM_THREADS: 1 OMP_NUM_THREADS: 1 ASV_FACTOR: 1.5 ASV_SKIP_SLOW: 1 run: | set -x # ID this runner asv machine --yes echo "Baseline: ${{ steps.previoustag.outputs.tag }} " echo "Contender: ${{ github.sha }}" # Use mamba for env creation # export CONDA_EXE=$(which mamba) export CONDA_EXE=$(which conda) # Run benchmarks for current commit against base ASV_OPTIONS="--split --show-stderr --factor $ASV_FACTOR" asv continuous $ASV_OPTIONS ${{ steps.previoustag.outputs.tag }} ${{ github.sha }} \ | sed "/Traceback \|failed$\|PERFORMANCE DECREASED/ s/^/::error::/" \ | tee benchmarks.log # Report and export results for subsequent steps if grep "Traceback \|failed\|PERFORMANCE DECREASED" benchmarks.log > /dev/null ; then exit 1 fi working-directory: ${{ env.ASV_DIR }} - name: Add instructions to artifact if: always() run: | cp benchmarks/README_CI.md benchmarks.log .asv/results/ working-directory: ${{ env.ASV_DIR }} - uses: actions/upload-artifact@v4 if: always() with: name: asv-benchmark-results-${{ runner.os }} path: ${{ env.ASV_DIR }}/.asv/results xarray-2025.09.0/.github/workflows/benchmarks.yml000066400000000000000000000054561505620616400216130ustar00rootroot00000000000000name: Benchmark on: pull_request: types: [opened, reopened, synchronize, labeled] workflow_dispatch: env: PR_HEAD_LABEL: ${{ github.event.pull_request.head.label }} jobs: benchmark: if: ${{ contains( github.event.pull_request.labels.*.name, 'run-benchmark') && github.event_name == 'pull_request' || contains( github.event.pull_request.labels.*.name, 'topic-performance') && github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' }} name: Linux runs-on: ubuntu-latest env: ASV_DIR: "./asv_bench" CONDA_ENV_FILE: ci/requirements/environment-benchmark.yml steps: # We need the full repo to avoid this issue # https://github.com/actions/checkout/issues/23 - uses: actions/checkout@v5 with: fetch-depth: 0 - name: Set up conda environment uses: mamba-org/setup-micromamba@v2 with: micromamba-version: "1.5.10-0" environment-file: ${{env.CONDA_ENV_FILE}} environment-name: xarray-benchmark cache-environment: true cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{env.PYTHON_VERSION}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}-benchmark" # add "build" because of https://github.com/airspeed-velocity/asv/issues/1385 create-args: >- asv python-build mamba<=1.5.10 - name: Run benchmarks shell: bash -l {0} id: benchmark env: OPENBLAS_NUM_THREADS: 1 MKL_NUM_THREADS: 1 OMP_NUM_THREADS: 1 ASV_FACTOR: 1.5 ASV_SKIP_SLOW: 1 run: | set -x # ID this runner asv machine --yes echo "Baseline: ${{ github.event.pull_request.base.sha }} (${{ github.event.pull_request.base.label }})" echo "Contender: ${GITHUB_SHA} ($PR_HEAD_LABEL)" # Run benchmarks for current commit against base ASV_OPTIONS="--split --show-stderr --factor $ASV_FACTOR" asv continuous $ASV_OPTIONS ${{ github.event.pull_request.base.sha }} ${GITHUB_SHA} \ | sed "/Traceback \|failed$\|PERFORMANCE DECREASED/ s/^/::error::/" \ | tee benchmarks.log # Report and export results for subsequent steps if grep "Traceback \|failed\|PERFORMANCE DECREASED" benchmarks.log > /dev/null ; then exit 1 fi working-directory: ${{ env.ASV_DIR }} - name: Add instructions to artifact if: always() run: | cp benchmarks/README_CI.md benchmarks.log .asv/results/ working-directory: ${{ env.ASV_DIR }} - uses: actions/upload-artifact@v4 if: always() with: name: asv-benchmark-results-${{ runner.os }} path: ${{ env.ASV_DIR }}/.asv/results xarray-2025.09.0/.github/workflows/ci-additional.yaml000066400000000000000000000233311505620616400223300ustar00rootroot00000000000000name: CI Additional on: push: branches: - "main" pull_request: branches: - "main" workflow_dispatch: # allows you to trigger manually concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true env: FORCE_COLOR: 3 jobs: detect-ci-trigger: name: detect ci trigger runs-on: ubuntu-latest if: | github.repository == 'pydata/xarray' && (github.event_name == 'push' || github.event_name == 'pull_request') outputs: triggered: ${{ steps.detect-trigger.outputs.trigger-found }} steps: - uses: actions/checkout@v5 with: fetch-depth: 2 - uses: xarray-contrib/ci-trigger@v1 id: detect-trigger with: keyword: "[skip-ci]" doctest: name: Doctests runs-on: "ubuntu-latest" needs: detect-ci-trigger if: needs.detect-ci-trigger.outputs.triggered == 'false' defaults: run: shell: bash -l {0} env: CONDA_ENV_FILE: ci/requirements/environment.yml PYTHON_VERSION: "3.12" steps: - uses: actions/checkout@v5 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: set environment variables run: | echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV - name: Setup micromamba uses: mamba-org/setup-micromamba@v2 with: environment-file: ${{env.CONDA_ENV_FILE}} environment-name: xarray-tests create-args: >- python=${{env.PYTHON_VERSION}} cache-environment: true cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{env.PYTHON_VERSION}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}" - name: Install xarray run: | python -m pip install --no-deps -e . - name: Version info run: | python xarray/util/print_versions.py - name: Run doctests run: | # Raise an error if there are warnings in the doctests, with `-Werror`. # This is a trial; if it presents an problem, feel free to remove. # See https://github.com/pydata/xarray/issues/7164 for more info. # # If dependencies emit warnings we can't do anything about, add ignores to # `xarray/tests/__init__.py`. python -m pytest --doctest-modules xarray --ignore xarray/tests -Werror mypy: name: Mypy runs-on: "ubuntu-latest" needs: detect-ci-trigger defaults: run: shell: bash -l {0} env: CONDA_ENV_FILE: ci/requirements/environment.yml PYTHON_VERSION: "3.12" steps: - uses: actions/checkout@v5 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: set environment variables run: | echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV - name: Setup micromamba uses: mamba-org/setup-micromamba@v2 with: environment-file: ${{env.CONDA_ENV_FILE}} environment-name: xarray-tests create-args: >- python=${{env.PYTHON_VERSION}} cache-environment: true cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{env.PYTHON_VERSION}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}" - name: Install xarray run: | python -m pip install --no-deps -e . - name: Version info run: | python xarray/util/print_versions.py - name: Install mypy run: | python -m pip install "mypy==1.15" --force-reinstall - name: Run mypy run: | python -m mypy --install-types --non-interactive --cobertura-xml-report mypy_report - name: Upload mypy coverage to Codecov uses: codecov/codecov-action@v5.5.0 with: file: mypy_report/cobertura.xml flags: mypy env_vars: PYTHON_VERSION name: codecov-umbrella fail_ci_if_error: false mypy-min: name: Mypy 3.11 runs-on: "ubuntu-latest" needs: detect-ci-trigger defaults: run: shell: bash -l {0} env: CONDA_ENV_FILE: ci/requirements/environment.yml PYTHON_VERSION: "3.11" steps: - uses: actions/checkout@v5 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: set environment variables run: | echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV - name: Setup micromamba uses: mamba-org/setup-micromamba@v2 with: environment-file: ${{env.CONDA_ENV_FILE}} environment-name: xarray-tests create-args: >- python=${{env.PYTHON_VERSION}} cache-environment: true cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{env.PYTHON_VERSION}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}" - name: Install xarray run: | python -m pip install --no-deps -e . - name: Version info run: | python xarray/util/print_versions.py - name: Install mypy run: | python -m pip install "mypy==1.15" --force-reinstall - name: Run mypy run: | python -m mypy --install-types --non-interactive --cobertura-xml-report mypy_report - name: Upload mypy coverage to Codecov uses: codecov/codecov-action@v5.5.0 with: file: mypy_report/cobertura.xml flags: mypy-min env_vars: PYTHON_VERSION name: codecov-umbrella fail_ci_if_error: false pyright: name: Pyright runs-on: "ubuntu-latest" needs: detect-ci-trigger if: | always() && ( contains( github.event.pull_request.labels.*.name, 'run-pyright') ) defaults: run: shell: bash -l {0} env: CONDA_ENV_FILE: ci/requirements/environment.yml PYTHON_VERSION: "3.12" steps: - uses: actions/checkout@v5 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: set environment variables run: | echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV - name: Setup micromamba uses: mamba-org/setup-micromamba@v2 with: environment-file: ${{env.CONDA_ENV_FILE}} environment-name: xarray-tests create-args: >- python=${{env.PYTHON_VERSION}} cache-environment: true cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{env.PYTHON_VERSION}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}" - name: Install xarray run: | python -m pip install --no-deps -e . - name: Version info run: | python xarray/util/print_versions.py - name: Install pyright run: | python -m pip install pyright --force-reinstall - name: Run pyright run: | python -m pyright xarray/ - name: Upload pyright coverage to Codecov uses: codecov/codecov-action@v5.5.0 with: file: pyright_report/cobertura.xml flags: pyright env_vars: PYTHON_VERSION name: codecov-umbrella fail_ci_if_error: false pyright39: name: Pyright 3.11 runs-on: "ubuntu-latest" needs: detect-ci-trigger if: | always() && ( contains( github.event.pull_request.labels.*.name, 'run-pyright') ) defaults: run: shell: bash -l {0} env: CONDA_ENV_FILE: ci/requirements/environment.yml PYTHON_VERSION: "3.11" steps: - uses: actions/checkout@v5 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: set environment variables run: | echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV - name: Setup micromamba uses: mamba-org/setup-micromamba@v2 with: environment-file: ${{env.CONDA_ENV_FILE}} environment-name: xarray-tests create-args: >- python=${{env.PYTHON_VERSION}} cache-environment: true cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{env.PYTHON_VERSION}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}" - name: Install xarray run: | python -m pip install --no-deps -e . - name: Version info run: | python xarray/util/print_versions.py - name: Install pyright run: | python -m pip install pyright --force-reinstall - name: Run pyright run: | python -m pyright xarray/ - name: Upload pyright coverage to Codecov uses: codecov/codecov-action@v5.5.0 with: file: pyright_report/cobertura.xml flags: pyright39 env_vars: PYTHON_VERSION name: codecov-umbrella fail_ci_if_error: false min-version-policy: name: Minimum Version Policy runs-on: "ubuntu-latest" needs: detect-ci-trigger if: needs.detect-ci-trigger.outputs.triggered == 'false' defaults: run: shell: bash -l {0} env: COLUMNS: 120 steps: - uses: actions/checkout@v5 with: fetch-depth: 0 # Fetch all history for all branches and tags. - uses: actions/setup-python@v5 with: python-version: "3.x" - name: All-deps minimum versions policy uses: xarray-contrib/minimum-dependency-versions@e2ac8ff0a76e8603d8536ef5d64743a375961ce9 # v0.1.1 with: policy: ci/policy.yaml environment-paths: ci/requirements/min-all-deps.yml - name: Bare minimum versions policy uses: xarray-contrib/minimum-dependency-versions@e2ac8ff0a76e8603d8536ef5d64743a375961ce9 # v0.1.1 with: policy: ci/policy.yaml environment-paths: ci/requirements/bare-minimum.yml xarray-2025.09.0/.github/workflows/ci.yaml000066400000000000000000000144421505620616400202250ustar00rootroot00000000000000name: CI on: push: branches: - "main" pull_request: branches: - "main" workflow_dispatch: # allows you to trigger manually concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true env: FORCE_COLOR: 3 jobs: detect-ci-trigger: name: detect ci trigger runs-on: ubuntu-latest if: | github.repository == 'pydata/xarray' && (github.event_name == 'push' || github.event_name == 'pull_request') outputs: triggered: ${{ steps.detect-trigger.outputs.trigger-found }} steps: - uses: actions/checkout@v5 with: fetch-depth: 2 - uses: xarray-contrib/ci-trigger@v1 id: detect-trigger with: keyword: "[skip-ci]" test: name: ${{ matrix.os }} py${{ matrix.python-version }} ${{ matrix.env }} runs-on: ${{ matrix.os }} needs: detect-ci-trigger if: needs.detect-ci-trigger.outputs.triggered == 'false' defaults: run: shell: bash -l {0} strategy: fail-fast: false matrix: os: ["ubuntu-latest", "macos-latest", "windows-latest"] # Bookend python versions python-version: ["3.11", "3.13"] env: [""] include: # Minimum python version: - env: "bare-minimum" python-version: "3.11" os: ubuntu-latest - env: "bare-min-and-scipy" python-version: "3.11" os: ubuntu-latest - env: "min-all-deps" python-version: "3.11" os: ubuntu-latest # Latest python version: - env: "all-but-numba" python-version: "3.13" os: ubuntu-latest - env: "all-but-dask" python-version: "3.12" os: ubuntu-latest - env: "flaky" python-version: "3.13" os: ubuntu-latest # The mypy tests must be executed using only 1 process in order to guarantee # predictable mypy output messages for comparison to expectations. - env: "mypy" python-version: "3.11" numprocesses: 1 os: ubuntu-latest - env: "mypy" python-version: "3.13" numprocesses: 1 os: ubuntu-latest steps: - uses: actions/checkout@v5 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: Set environment variables run: | echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV if [[ ${{ matrix.os }} == windows* ]] ; then if [[ ${{ matrix.python-version }} != "3.14" ]]; then echo "CONDA_ENV_FILE=ci/requirements/environment-windows.yml" >> $GITHUB_ENV else echo "CONDA_ENV_FILE=ci/requirements/environment-windows-3.14.yml" >> $GITHUB_ENV fi elif [[ "${{ matrix.env }}" != "" ]] ; then if [[ "${{ matrix.env }}" == "flaky" ]] ; then echo "CONDA_ENV_FILE=ci/requirements/environment.yml" >> $GITHUB_ENV echo "PYTEST_ADDOPTS=-m 'flaky or network' --run-flaky --run-network-tests -W default" >> $GITHUB_ENV elif [[ "${{ matrix.env }}" == "mypy" ]] ; then echo "CONDA_ENV_FILE=ci/requirements/environment.yml" >> $GITHUB_ENV echo "PYTEST_ADDOPTS=-n 1 -m 'mypy' --run-mypy -W default" >> $GITHUB_ENV else echo "CONDA_ENV_FILE=ci/requirements/${{ matrix.env }}.yml" >> $GITHUB_ENV fi if [[ "${{ matrix.env }}" == "min-all-deps" ]] ; then # Don't raise on warnings echo "PYTEST_ADDOPTS=-W default" >> $GITHUB_ENV fi else if [[ ${{ matrix.python-version }} != "3.14" ]]; then echo "CONDA_ENV_FILE=ci/requirements/environment.yml" >> $GITHUB_ENV else echo "CONDA_ENV_FILE=ci/requirements/environment-3.14.yml" >> $GITHUB_ENV fi fi echo "PYTHON_VERSION=${{ matrix.python-version }}" >> $GITHUB_ENV - name: Setup micromamba uses: mamba-org/setup-micromamba@v2 with: environment-file: ${{ env.CONDA_ENV_FILE }} environment-name: xarray-tests cache-environment: true cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{matrix.python-version}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}" create-args: >- python=${{matrix.python-version}} # We only want to install this on one run, because otherwise we'll have # duplicate annotations. - name: Install error reporter if: ${{ matrix.os }} == 'ubuntu-latest' and ${{ matrix.python-version }} == '3.12' run: | python -m pip install pytest-github-actions-annotate-failures - name: Install xarray run: | python -m pip install --no-deps -e . - name: Version info run: | python xarray/util/print_versions.py - name: Import xarray run: | python -c "import xarray" - name: Restore cached hypothesis directory uses: actions/cache@v4 with: path: .hypothesis/ key: cache-hypothesis enableCrossOsArchive: true save-always: true - name: Run tests run: python -m pytest -n ${{ matrix.numprocesses || 4 }} --timeout 180 --cov=xarray --cov-report=xml --junitxml=pytest.xml - name: Upload test results if: always() uses: actions/upload-artifact@v4 with: name: Test results for ${{ runner.os }}-${{ matrix.python-version }} ${{ matrix.env }} path: pytest.xml - name: Upload code coverage to Codecov uses: codecov/codecov-action@v5.5.0 env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: file: ./coverage.xml flags: unittests env_vars: RUNNER_OS,PYTHON_VERSION name: codecov-umbrella fail_ci_if_error: false event_file: name: "Event File" runs-on: ubuntu-latest if: github.repository == 'pydata/xarray' steps: - name: Upload uses: actions/upload-artifact@v4 with: name: Event File path: ${{ github.event_path }} xarray-2025.09.0/.github/workflows/configure-testpypi-version.py000066400000000000000000000022131505620616400246340ustar00rootroot00000000000000import argparse import copy import pathlib import tomli import tomli_w def split_path(path, sep="/"): if isinstance(path, str): return [part for part in path.split(sep) if part] else: return path def extract(mapping, path, sep="/"): parts = split_path(path, sep=sep) cur = mapping for part in parts: cur = cur[part] return cur def update(mapping, path, value, sep="/"): new = copy.deepcopy(mapping) parts = split_path(path, sep=sep) parent = extract(new, parts[:-1]) parent[parts[-1]] = value return new parser = argparse.ArgumentParser() parser.add_argument("path", type=pathlib.Path) args = parser.parse_args() content = args.path.read_text() decoded = tomli.loads(content) with_local_scheme = update( decoded, "tool.setuptools_scm.local_scheme", "no-local-version", sep="." ) # work around a bug in setuptools / setuptools-scm with_setuptools_pin = copy.deepcopy(with_local_scheme) requires = extract(with_setuptools_pin, "build-system.requires", sep=".") requires[0] = "setuptools>=42,<60" new_content = tomli_w.dumps(with_setuptools_pin) args.path.write_text(new_content) xarray-2025.09.0/.github/workflows/hypothesis.yaml000066400000000000000000000072541505620616400220340ustar00rootroot00000000000000name: Slow Hypothesis CI on: push: branches: - "main" pull_request: branches: - "main" types: [opened, reopened, synchronize, labeled] schedule: - cron: "0 0 * * *" # Daily β€œAt 00:00” UTC workflow_dispatch: # allows you to trigger manually env: FORCE_COLOR: 3 jobs: detect-ci-trigger: name: detect ci trigger runs-on: ubuntu-latest if: | github.repository == 'pydata/xarray' && (github.event_name == 'push' || github.event_name == 'pull_request' || github.event_name == 'schedule') outputs: triggered: ${{ steps.detect-trigger.outputs.trigger-found }} steps: - uses: actions/checkout@v5 with: fetch-depth: 2 - uses: xarray-contrib/ci-trigger@v1 id: detect-trigger with: keyword: "[skip-ci]" hypothesis: name: Slow Hypothesis Tests runs-on: "ubuntu-latest" needs: detect-ci-trigger if: | always() && ( needs.detect-ci-trigger.outputs.triggered == 'false' && ( (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch') || contains( github.event.pull_request.labels.*.name, 'run-slow-hypothesis')) ) defaults: run: shell: bash -l {0} env: CONDA_ENV_FILE: ci/requirements/environment.yml PYTHON_VERSION: "3.12" steps: - uses: actions/checkout@v5 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: set environment variables run: | echo "TODAY=$(date +'%Y-%m-%d')" >> $GITHUB_ENV - name: Setup micromamba uses: mamba-org/setup-micromamba@v2 with: environment-file: ci/requirements/environment.yml environment-name: xarray-tests create-args: >- python=${{env.PYTHON_VERSION}} pytest-reportlog cache-environment: true cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{env.PYTHON_VERSION}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}" - name: Install xarray run: | python -m pip install --no-deps -e . - name: Version info run: | python xarray/util/print_versions.py # https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache - name: Restore cached hypothesis directory id: restore-hypothesis-cache uses: actions/cache/restore@v4 with: path: .hypothesis/ key: cache-hypothesis-${{ runner.os }}-${{ github.run_id }} restore-keys: | cache-hypothesis- - name: Run slow Hypothesis tests if: success() id: status run: | python -m pytest --hypothesis-show-statistics --run-slow-hypothesis properties/*.py \ --report-log output-${{ matrix.python-version }}-log.jsonl # explicitly save the cache so it gets updated, also do this even if it fails. - name: Save cached hypothesis directory id: save-hypothesis-cache if: always() && steps.status.outcome != 'skipped' uses: actions/cache/save@v4 with: path: .hypothesis/ key: cache-hypothesis-${{ runner.os }}-${{ github.run_id }} - name: Generate and publish the report if: | failure() && steps.status.outcome == 'failure' && github.event_name == 'schedule' && github.repository_owner == 'pydata' uses: scientific-python/issue-from-pytest-log-action@v1 with: log-path: output-${{ matrix.python-version }}-log.jsonl issue-title: "Nightly Hypothesis tests failed" issue-label: "topic-hypothesis" xarray-2025.09.0/.github/workflows/label-prs.yml000066400000000000000000000003451505620616400213470ustar00rootroot00000000000000name: "PR Labeler" on: - pull_request_target jobs: label: runs-on: ubuntu-latest steps: - uses: actions/labeler@v5 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" sync-labels: false xarray-2025.09.0/.github/workflows/nightly-wheels.yml000066400000000000000000000022161505620616400224300ustar00rootroot00000000000000name: Upload nightly wheels on: workflow_dispatch: schedule: - cron: "0 0 * * *" jobs: cron: runs-on: ubuntu-latest if: github.repository == 'pydata/xarray' steps: - uses: actions/checkout@v5 with: fetch-depth: 0 - uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install dependencies run: | python -m pip install --upgrade pip python -m pip install build twine - name: Build tarball and wheels run: | git clean -xdf git restore -SW . python -m build - name: Check built artifacts run: | python -m twine check --strict dist/* pwd if [ -f dist/xarray-0.0.0.tar.gz ]; then echo "❌ INVALID VERSION NUMBER" exit 1 else echo "βœ… Looks good" fi - name: Upload wheel uses: scientific-python/upload-nightly-action@b36e8c0c10dbcfd2e05bf95f17ef8c14fd708dbf # 0.6.2 with: anaconda_nightly_upload_token: ${{ secrets.ANACONDA_NIGHTLY }} artifacts_path: dist xarray-2025.09.0/.github/workflows/publish-test-results.yaml000066400000000000000000000025201505620616400237460ustar00rootroot00000000000000# Copied from https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.23/README.md#support-fork-repositories-and-dependabot-branches name: Publish test results on: workflow_run: workflows: ["CI"] types: - completed concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: publish-test-results: name: Publish test results runs-on: ubuntu-latest if: github.event.workflow_run.conclusion != 'skipped' steps: - name: Download and extract artifacts env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} run: | mkdir artifacts && cd artifacts artifacts_url=${{ github.event.workflow_run.artifacts_url }} gh api "$artifacts_url" -q '.artifacts[] | [.name, .archive_download_url] | @tsv' | while read artifact do IFS=$'\t' read name url <<< "$artifact" gh api $url > "$name.zip" unzip -d "$name" "$name.zip" done - name: Publish Unit Test Results uses: EnricoMi/publish-unit-test-result-action@v2 with: commit: ${{ github.event.workflow_run.head_sha }} event_file: artifacts/Event File/event.json event_name: ${{ github.event.workflow_run.event }} files: "artifacts/**/*.xml" comment_mode: off xarray-2025.09.0/.github/workflows/pypi-release.yaml000066400000000000000000000060051505620616400222250ustar00rootroot00000000000000name: Build and Upload xarray to PyPI on: release: types: - published push: tags: - "v*" pull_request: types: [opened, reopened, synchronize, labeled] workflow_dispatch: jobs: build-artifacts: runs-on: ubuntu-latest if: ${{ github.repository == 'pydata/xarray' && ( (contains(github.event.pull_request.labels.*.name, 'Release') && github.event_name == 'pull_request') || github.event_name == 'release' || github.event_name == 'workflow_dispatch' || startsWith(github.ref, 'refs/tags/v') ) }} steps: - uses: actions/checkout@v5 with: fetch-depth: 0 - uses: actions/setup-python@v5 name: Install Python with: python-version: "3.12" - name: Install dependencies run: | python -m pip install --upgrade pip python -m pip install build twine - name: Build tarball and wheels run: | git clean -xdf git restore -SW . python -m build - name: Check built artifacts run: | python -m twine check --strict dist/* pwd if [ -f dist/xarray-0.0.0.tar.gz ]; then echo "❌ INVALID VERSION NUMBER" exit 1 else echo "βœ… Looks good" fi - uses: actions/upload-artifact@v4 with: name: releases path: dist test-built-dist: needs: build-artifacts runs-on: ubuntu-latest steps: - uses: actions/setup-python@v5 name: Install Python with: python-version: "3.12" - uses: actions/download-artifact@v5 with: name: releases path: dist - name: List contents of built dist run: | ls -ltrh ls -ltrh dist - name: Verify the built dist/wheel is valid run: | python -m pip install --upgrade pip python -m pip install dist/xarray*.whl python -m xarray.util.print_versions upload-to-test-pypi: needs: test-built-dist if: github.event_name == 'push' runs-on: ubuntu-latest environment: name: pypi url: https://test.pypi.org/p/xarray permissions: id-token: write steps: - uses: actions/download-artifact@v5 with: name: releases path: dist - name: Publish package to TestPyPI if: github.event_name == 'push' uses: pypa/gh-action-pypi-publish@v1.12.4 with: repository_url: https://test.pypi.org/legacy/ verbose: true upload-to-pypi: needs: test-built-dist if: github.event_name == 'release' runs-on: ubuntu-latest environment: name: pypi url: https://pypi.org/p/xarray permissions: id-token: write steps: - uses: actions/download-artifact@v5 with: name: releases path: dist - name: Publish package to PyPI uses: pypa/gh-action-pypi-publish@v1.12.4 with: verbose: true xarray-2025.09.0/.github/workflows/upstream-dev-ci.yaml000066400000000000000000000104031505620616400226300ustar00rootroot00000000000000name: CI Upstream on: push: branches: - main pull_request: branches: - main types: [opened, reopened, synchronize, labeled] schedule: - cron: "0 0 * * *" # Daily β€œAt 00:00” UTC workflow_dispatch: # allows you to trigger the workflow run manually concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true env: FORCE_COLOR: 3 jobs: detect-ci-trigger: name: detect upstream-dev ci trigger runs-on: ubuntu-latest if: | github.repository == 'pydata/xarray' && (github.event_name == 'push' || github.event_name == 'pull_request') outputs: triggered: ${{ steps.detect-trigger.outputs.trigger-found }} steps: - uses: actions/checkout@v5 with: fetch-depth: 2 - uses: xarray-contrib/ci-trigger@v1 id: detect-trigger with: keyword: "[test-upstream]" upstream-dev: name: upstream-dev runs-on: ubuntu-latest needs: detect-ci-trigger if: | always() && ( (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch') || needs.detect-ci-trigger.outputs.triggered == 'true' || contains( github.event.pull_request.labels.*.name, 'run-upstream') ) defaults: run: shell: bash -l {0} strategy: fail-fast: false matrix: python-version: ["3.12"] steps: - uses: actions/checkout@v5 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: Set up conda environment uses: mamba-org/setup-micromamba@v2 with: environment-file: ci/requirements/environment.yml environment-name: xarray-tests create-args: >- python=${{ matrix.python-version }} pytest-reportlog - name: Install upstream versions run: | bash ci/install-upstream-wheels.sh - name: Install xarray run: | python -m pip install --no-deps -e . - name: Version info run: | python xarray/util/print_versions.py - name: Import xarray run: | python -c 'import xarray' - name: Run Tests if: success() id: status run: | python -m pytest --timeout=60 -rf -nauto \ --report-log output-${{ matrix.python-version }}-log.jsonl - name: Generate and publish the report if: | failure() && steps.status.outcome == 'failure' && github.event_name == 'schedule' && github.repository_owner == 'pydata' uses: scientific-python/issue-from-pytest-log-action@v1 with: log-path: output-${{ matrix.python-version }}-log.jsonl mypy-upstream-dev: name: mypy-upstream-dev runs-on: ubuntu-latest needs: detect-ci-trigger if: | always() && ( contains( github.event.pull_request.labels.*.name, 'run-upstream') ) defaults: run: shell: bash -l {0} strategy: fail-fast: false matrix: python-version: ["3.11"] steps: - uses: actions/checkout@v5 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: Set up conda environment uses: mamba-org/setup-micromamba@v2 with: environment-file: ci/requirements/environment.yml environment-name: xarray-tests create-args: >- python=${{ matrix.python-version }} pytest-reportlog - name: Install upstream versions run: | bash ci/install-upstream-wheels.sh - name: Install xarray run: | python -m pip install --no-deps -e . - name: Version info run: | python xarray/util/print_versions.py - name: Install mypy run: | python -m pip install mypy --force-reinstall - name: Run mypy run: | python -m mypy --install-types --non-interactive --cobertura-xml-report mypy_report - name: Upload mypy coverage to Codecov uses: codecov/codecov-action@v5.5.0 with: file: mypy_report/cobertura.xml flags: mypy env_vars: PYTHON_VERSION name: codecov-umbrella fail_ci_if_error: false xarray-2025.09.0/.gitignore000066400000000000000000000021651505620616400153400ustar00rootroot00000000000000*.py[cod] __pycache__ .env .venv # example caches from Hypothesis .hypothesis/ # temp files from docs build doc/*.nc doc/auto_gallery doc/rasm.zarr # C extensions *.so # Packages *.egg *.egg-info .eggs dist build eggs parts bin var sdist develop-eggs .installed.cfg lib lib64 # Installer logs pip-log.txt # Unit test / coverage reports .coverage .coverage.* .tox nosetests.xml .cache .prettier_cache .dmypy.json .mypy_cache .ropeproject/ .tags* .testmon* .tmontmp/ .pytest_cache dask-worker-space/ # asv environments asv_bench/.asv asv_bench/pkgs # Translations *.mo # Mr Developer .mr.developer.cfg .project .pydevproject # IDEs .idea *.swp .DS_Store .vscode/ # xarray specific doc/_build doc/generated/ doc/api/generated/ xarray/tests/data/*.grib.*.idx # Claude Code .claude/ # Sync tools Icon* .ipynb_checkpoints doc/team-panel.txt doc/external-examples-gallery.txt doc/notebooks-examples-gallery.txt doc/videos-gallery.txt # Until we support this properly, excluding from gitignore. (adding it to # gitignore to make it _easier_ to work with `uv`, not as an indication that I # think we shouldn't...) uv.lock mypy_report/ xarray-2025.09.0/.pre-commit-config.yaml000066400000000000000000000047601505620616400176340ustar00rootroot00000000000000# https://pre-commit.com/ ci: autoupdate_schedule: monthly autoupdate_commit_msg: "Update pre-commit hooks" repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v6.0.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml - id: debug-statements - id: mixed-line-ending - repo: https://github.com/pre-commit/pygrep-hooks rev: v1.10.0 hooks: # - id: python-check-blanket-noqa # checked by ruff # - id: python-check-blanket-type-ignore # checked by ruff # - id: python-check-mock-methods # checked by ruff - id: python-no-log-warn # - id: python-use-type-annotations # too many false positives - id: rst-backticks - id: rst-directive-colons - id: rst-inline-touching-normal - id: text-unicode-replacement-char - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.12.11 hooks: - id: ruff-check args: ["--fix", "--show-fixes"] - id: ruff-format - repo: https://github.com/keewis/blackdoc rev: v0.4.1 hooks: - id: blackdoc exclude: "generate_aggregations.py" additional_dependencies: ["black==24.8.0"] - repo: https://github.com/rbubley/mirrors-prettier rev: v3.6.2 hooks: - id: prettier args: [--cache-location=.prettier_cache/cache] - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.17.1 hooks: - id: mypy # Copied from setup.cfg exclude: "properties|asv_bench" # This is slow and so we take it out of the fast-path; requires passing # `--hook-stage manual` to pre-commit stages: [manual] additional_dependencies: [ # Type stubs types-python-dateutil, types-setuptools, types-PyYAML, types-pytz, typing-extensions>=4.1.0, numpy, ] - repo: https://github.com/citation-file-format/cff-converter-python rev: ebf0b5e44d67f8beaa1cd13a0d0393ea04c6058d hooks: - id: validate-cff - repo: https://github.com/ComPWA/taplo-pre-commit rev: v0.9.3 hooks: - id: taplo-format args: ["--option", "array_auto_collapse=false"] - repo: https://github.com/abravalheri/validate-pyproject rev: v0.24.1 hooks: - id: validate-pyproject additional_dependencies: ["validate-pyproject-schema-store[all]"] - repo: https://github.com/adhtruong/mirrors-typos rev: v1.35.6 hooks: - id: typos xarray-2025.09.0/.readthedocs.yaml000066400000000000000000000007111505620616400165720ustar00rootroot00000000000000version: 2 sphinx: configuration: doc/conf.py fail_on_warning: true build: os: ubuntu-lts-latest tools: python: mambaforge-latest jobs: post_checkout: - (git --no-pager log --pretty="tformat:%s" -1 | grep -vqF "[skip-rtd]") || exit 183 - git fetch --unshallow || true pre_install: - git update-index --assume-unchanged doc/conf.py ci/requirements/doc.yml conda: environment: ci/requirements/doc.yml formats: [] xarray-2025.09.0/CITATION.cff000066400000000000000000000071771505620616400152520ustar00rootroot00000000000000cff-version: 1.2.0 message: "If you use this software, please cite it as below." authors: - family-names: "Hoyer" given-names: "Stephan" orcid: "https://orcid.org/0000-0002-5207-0380" - family-names: "Roos" given-names: "Maximilian" - family-names: "Joseph" given-names: "Hamman" orcid: "https://orcid.org/0000-0001-7479-8439" - family-names: "Magin" given-names: "Justus" orcid: "https://orcid.org/0000-0002-4254-8002" - family-names: "Cherian" given-names: "Deepak" orcid: "https://orcid.org/0000-0002-6861-8734" - family-names: "Fitzgerald" given-names: "Clark" orcid: "https://orcid.org/0000-0003-3446-6389" - family-names: "Hauser" given-names: "Mathias" orcid: "https://orcid.org/0000-0002-0057-4878" - family-names: "Fujii" given-names: "Keisuke" orcid: "https://orcid.org/0000-0003-0390-9984" - family-names: "Maussion" given-names: "Fabien" orcid: "https://orcid.org/0000-0002-3211-506X" - family-names: "Imperiale" given-names: "Guido" - family-names: "Clark" given-names: "Spencer" orcid: "https://orcid.org/0000-0001-5595-7895" - family-names: "Kleeman" given-names: "Alex" - family-names: "Nicholas" given-names: "Thomas" orcid: "https://orcid.org/0000-0002-2176-0530" - family-names: "Kluyver" given-names: "Thomas" orcid: "https://orcid.org/0000-0003-4020-6364" - family-names: "Westling" given-names: "Jimmy" - family-names: "Munroe" given-names: "James" orcid: "https://orcid.org/0000-0001-9098-6309" - family-names: "Amici" given-names: "Alessandro" orcid: "https://orcid.org/0000-0002-1778-4505" - family-names: "Barghini" given-names: "Aureliana" - family-names: "Banihirwe" given-names: "Anderson" orcid: "https://orcid.org/0000-0001-6583-571X" - family-names: "Bell" given-names: "Ray" orcid: "https://orcid.org/0000-0003-2623-0587" - family-names: "Hatfield-Dodds" given-names: "Zac" orcid: "https://orcid.org/0000-0002-8646-8362" - family-names: "Abernathey" given-names: "Ryan" orcid: "https://orcid.org/0000-0001-5999-4917" - family-names: "Bovy" given-names: "BenoΓt" - family-names: "Omotani" given-names: "John" orcid: "https://orcid.org/0000-0002-3156-8227" - family-names: "MΓΌhlbauer" given-names: "Kai" orcid: "https://orcid.org/0000-0001-6599-1034" - family-names: "Roszko" given-names: "Maximilian K." orcid: "https://orcid.org/0000-0001-9424-2526" - family-names: "Wolfram" given-names: "Phillip J." orcid: "https://orcid.org/0000-0001-5971-4241" - family-names: "Henderson" given-names: "Scott" orcid: "https://orcid.org/0000-0003-0624-4965" - family-names: "Awowale" given-names: "Eniola Olufunke" - family-names: "Scheick" given-names: "Jessica" orcid: "https://orcid.org/0000-0002-3421-4459" - family-names: "Savoie" given-names: "Matthew" orcid: "https://orcid.org/0000-0002-8881-2550" - family-names: "Littlejohns" given-names: "Owen" title: "xarray" abstract: "N-D labeled arrays and datasets in Python." license: Apache-2.0 doi: 10.5281/zenodo.598201 url: "https://xarray.dev/" repository-code: "https://github.com/pydata/xarray" preferred-citation: type: article authors: - family-names: "Hoyer" given-names: "Stephan" orcid: "https://orcid.org/0000-0002-5207-0380" - family-names: "Joseph" given-names: "Hamman" orcid: "https://orcid.org/0000-0001-7479-8439" doi: "10.5334/jors.148" journal: "Journal of Open Research Software" month: 4 title: "xarray: N-D labeled Arrays and Datasets in Python" volume: 5 issue: 1 year: 2017 xarray-2025.09.0/CLAUDE.md000066400000000000000000000005331505620616400146240ustar00rootroot00000000000000# xarray development setup ## Setup ```bash uv sync ``` ## Run tests ```bash uv run pytest xarray -n auto # All tests in parallel uv run pytest xarray/tests/test_dataarray.py # Specific file ``` ## Linting & type checking ```bash pre-commit run --all-files # Includes ruff and other checks uv run dmypy run # Type checking with mypy ``` xarray-2025.09.0/CODE_OF_CONDUCT.md000066400000000000000000000027351505620616400161520ustar00rootroot00000000000000# NUMFOCUS CODE OF CONDUCT You can find the full Code of Conduct on the NumFOCUS website: https://numfocus.org/code-of-conduct ## THE SHORT VERSION NumFOCUS is dedicated to providing a harassment-free community for everyone, regardless of gender, sexual orientation, gender identity and expression, disability, physical appearance, body size, race, or religion. We do not tolerate harassment of community members in any form. Be kind to others. Do not insult or put down others. Behave professionally. Remember that harassment and sexist, racist, or exclusionary jokes are not appropriate for NumFOCUS. All communication should be appropriate for a professional audience including people of many different backgrounds. Sexual language and imagery is not appropriate. Thank you for helping make this a welcoming, friendly community for all. ## HOW TO REPORT If you feel that the Code of Conduct has been violated, feel free to submit a report, by using the form: [NumFOCUS Code of Conduct Reporting Form](https://numfocus.typeform.com/to/ynjGdT?typeform-source=numfocus.org) ## WHO WILL RECEIVE YOUR REPORT Your report will be received and handled by NumFOCUS Code of Conduct Working Group; trained, and experienced contributors with diverse backgrounds. The group is making decisions independently from the project, PyData, NumFOCUS or any other organization. You can learn more about the current group members, as well as the reporting procedure here: https://numfocus.org/code-of-conduct xarray-2025.09.0/CONTRIBUTING.md000066400000000000000000000002131505620616400155710ustar00rootroot00000000000000Xarray's contributor guidelines [can be found in our online documentation](https://docs.xarray.dev/en/stable/contribute/contributing.html) xarray-2025.09.0/CORE_TEAM_GUIDE.md000066400000000000000000000474051505620616400161530ustar00rootroot00000000000000> **_Note:_** This Core Team Member Guide was adapted from the [napari project's Core Developer Guide](https://napari.org/stable/developers/core_dev_guide.html) and the [Pandas maintainers guide](https://pandas.pydata.org/docs/development/maintaining.html). # Core Team Member Guide Welcome, new core team member! We appreciate the quality of your work, and enjoy working with you! Thank you for your numerous contributions to the project so far. By accepting the invitation to become a core team member you are **not required to commit to doing any more work** - xarray is a volunteer project, and we value the contributions you have made already. You can see a list of all the current core team members on our [@pydata/xarray](https://github.com/orgs/pydata/teams/xarray) GitHub team. Once accepted, you should now be on that list too. This document offers guidelines for your new role. ## Tasks Xarray values a wide range of contributions, only some of which involve writing code. As such, we do not currently make a distinction between a "core team member", "core developer", "maintainer", or "triage team member" as some projects do (e.g. [pandas](https://pandas.pydata.org/docs/development/maintaining.html)). That said, if you prefer to refer to your role as one of the other titles above then that is fine by us! Xarray is mostly a volunteer project, so these tasks shouldn’t be read as β€œexpectations”. **There are no strict expectations**, other than to adhere to our [Code of Conduct](https://github.com/pydata/xarray/tree/main/CODE_OF_CONDUCT.md). Rather, the tasks that follow are general descriptions of what it might mean to be a core team member: - Facilitate a welcoming environment for those who file issues, make pull requests, and open discussion topics, - Triage newly filed issues, - Review newly opened pull requests, - Respond to updates on existing issues and pull requests, - Drive discussion and decisions on stalled issues and pull requests, - Provide experience / wisdom on API design questions to ensure consistency and maintainability, - Project organization (run developer meetings, coordinate with sponsors), - Project evangelism (advertise xarray to new users), - Community contact (represent xarray in user communities such as [Pangeo](https://pangeo.io/)), - Key project contact (represent xarray's perspective within key related projects like NumPy, Zarr or Dask), - Project fundraising (help write and administrate grants that will support xarray), - Improve documentation or tutorials (especially on [`tutorial.xarray.dev`](https://tutorial.xarray.dev/)), - Presenting or running tutorials (such as those we have given at the SciPy conference), - Help maintain the [`xarray.dev`](https://xarray.dev/) landing page and website, the [code for which is here](https://github.com/xarray-contrib/xarray.dev), - Write blog posts on the [xarray blog](https://xarray.dev/blog), - Help maintain xarray's various Continuous Integration Workflows, - Help maintain a regular release schedule (we aim for one or more releases per month), - Attend the bi-weekly community meeting ([issue](https://github.com/pydata/xarray/issues/4001)), - Contribute to the xarray codebase. (Matt Rocklin's post on [the role of a maintainer](https://matthewrocklin.com/blog/2019/05/18/maintainer) may be interesting background reading, but should not be taken to strictly apply to the Xarray project.) Obviously you are not expected to contribute in all (or even more than one) of these ways! They are listed so as to indicate the many types of work that go into maintaining xarray. It is natural that your available time and enthusiasm for the project will wax and wane - this is fine and expected! It is also common for core team members to have a "niche" - a particular part of the codebase they have specific expertise with, or certain types of task above which they primarily perform. If however you feel that is unlikely you will be able to be actively contribute in the foreseeable future (or especially if you won't be available to answer questions about pieces of code that you wrote previously) then you may want to consider letting us know you would rather be listed as an "Emeritus Core Team Member", as this would help us in evaluating the overall health of the project. ## Issue triage One of the main ways you might spend your contribution time is by responding to or triaging new issues. Here’s a typical workflow for triaging a newly opened issue or discussion: 1. **Thank the reporter for opening an issue.** The issue tracker is many people’s first interaction with the xarray project itself, beyond just using the library. It may also be their first open-source contribution of any kind. As such, we want it to be a welcoming, pleasant experience. 2. **Is the necessary information provided?** Ideally reporters would fill out the issue template, but many don’t. If crucial information (like the version of xarray they used), is missing feel free to ask for that and label the issue with β€œneeds info”. The report should follow the [guidelines for xarray discussions](https://github.com/pydata/xarray/discussions/5404). You may want to link to that if they didn’t follow the template. Make sure that the title accurately reflects the issue. Edit it yourself if it’s not clear. Remember also that issues can be converted to discussions and vice versa if appropriate. 3. **Is this a duplicate issue?** We have many open issues. If a new issue is clearly a duplicate, label the new issue as β€œduplicate”, and close the issue with a link to the original issue. Make sure to still thank the reporter, and encourage them to chime in on the original issue, and perhaps try to fix it. If the new issue provides relevant information, such as a better or slightly different example, add it to the original issue as a comment or an edit to the original post. 4. **Is the issue minimal and reproducible?** For bug reports, we ask that the reporter provide a minimal reproducible example. See [minimal-bug-reports](https://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports) for a good explanation. If the example is not reproducible, or if it’s clearly not minimal, feel free to ask the reporter if they can provide and example or simplify the provided one. Do acknowledge that writing minimal reproducible examples is hard work. If the reporter is struggling, you can try to write one yourself and we’ll edit the original post to include it. If a nice reproducible example has been provided, thank the reporter for that. If a reproducible example can’t be provided, add the β€œneeds mcve” label. If a reproducible example is provided, but you see a simplification, edit the original post with your simpler reproducible example. 5. **Is this a clearly defined feature request?** Generally, xarray prefers to discuss and design new features in issues, before a pull request is made. Encourage the submitter to include a proposed API for the new feature. Having them write a full docstring is a good way to pin down specifics. We may need a discussion from several xarray maintainers before deciding whether the proposal is in scope for xarray. 6. **Is this a usage question?** We prefer that usage questions are asked on StackOverflow with the [`python-xarray` tag](https://stackoverflow.com/questions/tagged/python-xarray) or as a [GitHub discussion topic](https://github.com/pydata/xarray/discussions). If it’s easy to answer, feel free to link to the relevant documentation section, let them know that in the future this kind of question should be on StackOverflow, and close the issue. 7. **What labels and milestones should I add?** Apply the relevant labels. This is a bit of an art, and comes with experience. Look at similar issues to get a feel for how things are labeled. Labels used for labelling issues that relate to particular features or parts of the codebase normally have the form `topic-`. If the issue is clearly defined and the fix seems relatively straightforward, label the issue as `contrib-good-first-issue`. You can also remove the `needs triage` label that is automatically applied to all newly-opened issues. 8. **Where should the poster look to fix the issue?** If you can, it is very helpful to point to the approximate location in the codebase where a contributor might begin to fix the issue. This helps ease the way in for new contributors to the repository. ## Code review and contributions As a core team member, you are a representative of the project, and trusted to make decisions that will serve the long term interests of all users. You also gain the responsibility of shepherding other contributors through the review process; here are some guidelines for how to do that. ### All contributors are treated the same You should now have gained the ability to merge or approve other contributors' pull requests. Merging contributions is a shared power: only merge contributions you yourself have carefully reviewed, and that are clear improvements for the project. When in doubt, and especially for more complex changes, wait until at least one other core team member has approved. (See [Reviewing](#reviewing) and especially [Merge Only Changes You Understand](#merge-only-changes-you-understand) below.) It should also be considered best practice to leave a reasonable (24hr) time window after approval before merge to ensure that other core team members have a reasonable chance to weigh in. Adding the `plan-to-merge` label notifies developers of the imminent merge. We are also an international community, with contributors from many different time zones, some of whom will only contribute during their working hours, others who might only be able to contribute during nights and weekends. It is important to be respectful of other peoples schedules and working habits, even if it slows the project down slightly - we are in this for the long run. In the same vein you also shouldn't feel pressured to be constantly available or online, and users or contributors who are overly demanding and unreasonable to the point of harassment will be directed to our [Code of Conduct](https://github.com/pydata/xarray/tree/main/CODE_OF_CONDUCT.md). We value sustainable development practices over mad rushes. When merging, we automatically use GitHub's [Squash and Merge](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/incorporating-changes-from-a-pull-request/merging-a-pull-request#merging-a-pull-request) to ensure a clean git history. You should also continue to make your own pull requests as before and in accordance with the [general contributing guide](https://docs.xarray.dev/en/stable/contributing.html). These pull requests still require the approval of another core team member before they can be merged. ### How to conduct a good review _Always_ be kind to contributors. Contributors are often doing volunteer work, for which we are tremendously grateful. Provide constructive criticism on ideas and implementations, and remind yourself of how it felt when your own work was being evaluated as a novice. `xarray` strongly values mentorship in code review. New users often need more handholding, having little to no git experience. Repeat yourself liberally, and, if you don’t recognize a contributor, point them to our development guide, or other GitHub workflow tutorials around the web. Do not assume that they know how GitHub works (many don't realize that adding a commit automatically updates a pull request, for example). Gentle, polite, kind encouragement can make the difference between a new core team member and an abandoned pull request. When reviewing, focus on the following: 1. **Usability and generality:** `xarray` is a user-facing package that strives to be accessible to both novice and advanced users, and new features should ultimately be accessible to everyone using the package. `xarray` targets the scientific user community broadly, and core features should be domain-agnostic and general purpose. Custom functionality is meant to be provided through our various types of interoperability. 2. **Performance and benchmarks:** As `xarray` targets scientific applications that often involve large multidimensional datasets, high performance is a key value of `xarray`. While every new feature won't scale equally to all sizes of data, keeping in mind performance and our [benchmarks](https://github.com/pydata/xarray/tree/main/asv_bench) during a review may be important, and you may need to ask for benchmarks to be run and reported or new benchmarks to be added. You can run the CI benchmarking suite on any PR by tagging it with the `run-benchmark` label. 3. **APIs and stability:** Coding users and developers will make extensive use of our APIs. The foundation of a healthy ecosystem will be a fully capable and stable set of APIs, so as `xarray` matures it will very important to ensure our APIs are stable. Spending the extra time to consider names of public facing variables and methods, alongside function signatures, could save us considerable trouble in the future. We do our best to provide [deprecation cycles](https://docs.xarray.dev/en/stable/contributing.html#backwards-compatibility) when making backwards-incompatible changes. 4. **Documentation and tutorials:** All new methods should have appropriate doc strings following [PEP257](https://peps.python.org/pep-0257/) and the [NumPy documentation guide](https://numpy.org/devdocs/dev/howto-docs.html#documentation-style). For any major new features, accompanying changes should be made to our [tutorials](https://tutorial.xarray.dev). These should not only illustrates the new feature, but explains it. 5. **Implementations and algorithms:** You should understand the code being modified or added before approving it. (See [Merge Only Changes You Understand](#merge-only-changes-you-understand) below.) Implementations should do what they claim and be simple, readable, and efficient in that order. 6. **Tests:** All contributions _must_ be tested, and each added line of code should be covered by at least one test. Good tests not only execute the code, but explore corner cases. It can be tempting not to review tests, but please do so. Other changes may be _nitpicky_: spelling mistakes, formatting, etc. Do not insist contributors make these changes, but instead you should offer to make these changes by [pushing to their branch](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/committing-changes-to-a-pull-request-branch-created-from-a-fork), or using GitHub’s [suggestion](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/commenting-on-a-pull-request) [feature](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/incorporating-feedback-in-your-pull-request), and be prepared to make them yourself if needed. Using the suggestion feature is preferred because it gives the contributor a choice in whether to accept the changes. Unless you know that a contributor is experienced with git, don’t ask for a rebase when merge conflicts arise. Instead, rebase the branch yourself, force-push to their branch, and advise the contributor to force-pull. If the contributor is no longer active, you may take over their branch by submitting a new pull request and closing the original, including a reference to the original pull request. In doing so, ensure you communicate that you are not throwing the contributor's work away! If appropriate it is a good idea to acknowledge other contributions to the pull request using the `Co-authored-by` [syntax](https://docs.github.com/en/pull-requests/committing-changes-to-your-project/creating-and-editing-commits/creating-a-commit-with-multiple-authors) in the commit message. ### Merge only changes you understand _Long-term maintainability_ is an important concern. Code doesn't merely have to _work_, but should be _understood_ by multiple core developers. Changes will have to be made in the future, and the original contributor may have moved on. Therefore, _do not merge a code change unless you understand it_. Ask for help freely: we can consult community members, or even external developers, for added insight where needed, and see this as a great learning opportunity. While we collectively "own" any patches (and bugs!) that become part of the code base, you are vouching for changes you merge. Please take that responsibility seriously. Feel free to ping other active maintainers with any questions you may have. ## Further resources As a core member, you should be familiar with community and developer resources such as: - Our [contributor guide](https://docs.xarray.dev/en/stable/contributing.html). - Our [code of conduct](https://github.com/pydata/xarray/tree/main/CODE_OF_CONDUCT.md). - Our [philosophy and development roadmap](https://docs.xarray.dev/en/stable/roadmap.html). - [PEP8](https://peps.python.org/pep-0008/) for Python style. - [PEP257](https://peps.python.org/pep-0257/) and the [NumPy documentation guide](https://numpy.org/devdocs/dev/howto-docs.html#documentation-style) for docstring conventions. - [`pre-commit`](https://pre-commit.com) hooks for autoformatting. - [`ruff`](https://github.com/astral-sh/ruff) autoformatting and linting. - [python-xarray](https://stackoverflow.com/questions/tagged/python-xarray) on Stack Overflow. - [@xarray_dev](https://x.com/xarray_dev) on X. - [xarray-dev](https://discord.gg/bsSGdwBn) discord community (normally only used for remote synchronous chat during sprints). You are not required to monitor any of the social resources. Where possible we prefer to point people towards asynchronous forms of communication like github issues instead of realtime chat options as they are far easier for a global community to consume and refer back to. We hold a [bi-weekly developers meeting](https://docs.xarray.dev/en/stable/developers-meeting.html) via video call. This is a great place to bring up any questions you have, raise visibility of an issue and/or gather more perspectives. Attendance is absolutely optional, and we keep the meeting to 30 minutes in respect of your valuable time. This meeting is public, so we occasionally have non-core team members join us. We also have a private mailing list for core team members `xarray-core-team@googlegroups.com` which is sparingly used for discussions that are required to be private, such as nominating new core members and discussing financial issues. ## Inviting new core members Any core member may nominate other contributors to join the core team. While there is no hard-and-fast rule about who can be nominated, ideally, they should have: been part of the project for at least two months, contributed significant changes of their own, contributed to the discussion and review of others' work, and collaborated in a way befitting our community values. **We strongly encourage nominating anyone who has made significant non-code contributions to the Xarray community in any way**. After nomination voting will happen on a private mailing list. While it is expected that most votes will be unanimous, a two-thirds majority of the cast votes is enough. Core team members can choose to become emeritus core team members and suspend their approval and voting rights until they become active again. ## Contribute to this guide (!) This guide reflects the experience of the current core team members. We may well have missed things that, by now, have become second natureβ€”things that you, as a new team member, will spot more easily. Please ask the other core team members if you have any questions, and submit a pull request with insights gained. ## Conclusion We are excited to have you on board! We look forward to your contributions to the code base and the community. Thank you in advance! xarray-2025.09.0/DATATREE_MIGRATION_GUIDE.md000066400000000000000000000140011505620616400174010ustar00rootroot00000000000000# Migration guide for users of `xarray-contrib/datatree` _15th October 2024_ This guide is for previous users of the prototype `datatree.DataTree` class in the `xarray-contrib/datatree repository`. That repository has now been archived, and will not be maintained. This guide is intended to help smooth your transition to using the new, updated `xarray.DataTree` class. > [!IMPORTANT] > There are breaking changes! You should not expect that code written with `xarray-contrib/datatree` will work without any modifications. At the absolute minimum you will need to change the top-level import statement, but there are other changes too. We have made various changes compared to the prototype version. These can be split into three categories: data model changes, which affect the hierarchal structure itself; integration with xarray's IO backends; and minor API changes, which mostly consist of renaming methods to be more self-consistent. ### Data model changes The most important changes made are to the data model of `DataTree`. Whilst previously data in different nodes was unrelated and therefore unconstrained, now trees have "internal alignment" - meaning that dimensions and indexes in child nodes must exactly align with those in their parents. These alignment checks happen at tree construction time, meaning there are some netCDF4 files and zarr stores that could previously be opened as `datatree.DataTree` objects using `datatree.open_datatree`, but now cannot be opened as `xr.DataTree` objects using `xr.open_datatree`. For these cases we added a new opener function `xr.open_groups`, which returns a `dict[str, Dataset]`. This is intended as a fallback for tricky cases, where the idea is that you can still open the entire contents of the file using `open_groups`, edit the `Dataset` objects, then construct a valid tree from the edited dictionary using `DataTree.from_dict`. The alignment checks allowed us to add "Coordinate Inheritance", a much-requested feature where indexed coordinate variables are now "inherited" down to child nodes. This allows you to define common coordinates in a parent group that are then automatically available on every child node. The distinction between a locally-defined coordinate variables and an inherited coordinate that was defined on a parent node is reflected in the `DataTree.__repr__`. Generally if you prefer not to have these variables be inherited you can get more similar behaviour to the old `datatree` package by removing indexes from coordinates, as this prevents inheritance. Tree structure checks between multiple trees (i.e., `DataTree.isomorophic`) and pairing of nodes in arithmetic has also changed. Nodes are now matched (with `xarray.group_subtrees`) based on their relative paths, without regard to the order in which child nodes are defined. For further documentation see the page in the user guide on Hierarchical Data. ### Integrated backends Previously `datatree.open_datatree` used a different codepath from `xarray.open_dataset`, and was hard-coded to only support opening netCDF files and Zarr stores. Now xarray's backend entrypoint system has been generalized to include `open_datatree` and the new `open_groups`. This means we can now extend other xarray backends to support `open_datatree`! If you are the maintainer of an xarray backend we encourage you to add support for `open_datatree` and `open_groups`! Additionally: - A `group` kwarg has been added to `open_datatree` for choosing which group in the file should become the root group of the created tree. - Various performance improvements have been made, which should help when opening netCDF files and Zarr stores with large numbers of groups. - We anticipate further performance improvements being possible for datatree IO. ### API changes A number of other API changes have been made, which should only require minor modifications to your code: - The top-level import has changed, from `from datatree import DataTree, open_datatree` to `from xarray import DataTree, open_datatree`. Alternatively you can now just use the `import xarray as xr` namespace convention for everything datatree-related. - The `DataTree.ds` property has been changed to `DataTree.dataset`, though `DataTree.ds` remains as an alias for `DataTree.dataset`. - Similarly the `ds` kwarg in the `DataTree.__init__` constructor has been replaced by `dataset`, i.e. use `DataTree(dataset=)` instead of `DataTree(ds=...)`. - The method `DataTree.to_dataset()` still exists but now has different options for controlling which variables are present on the resulting `Dataset`, e.g. `inherit=True/False`. - `DataTree.copy()` also has a new `inherit` keyword argument for controlling whether or not coordinates defined on parents are copied (only relevant when copying a non-root node). - The `DataTree.parent` property is now read-only. To assign a ancestral relationships directly you must instead use the `.children` property on the parent node, which remains settable. - Similarly the `parent` kwarg has been removed from the `DataTree.__init__` constructor. - DataTree objects passed to the `children` kwarg in `DataTree.__init__` are now shallow-copied. - `DataTree.map_over_subtree` has been renamed to `DataTree.map_over_datasets`, and changed to no longer work like a decorator. Instead you use it to apply the function and arguments directly, more like how `xarray.apply_ufunc` works. - `DataTree.as_array` has been replaced by `DataTree.to_dataarray`. - A number of methods which were not well tested have been (temporarily) disabled. In general we have tried to only keep things that are known to work, with the plan to increase API surface incrementally after release. ## Thank you! Thank you for trying out `xarray-contrib/datatree`! We welcome contributions of any kind, including good ideas that never quite made it into the original datatree repository. Please also let us know if we have forgotten to mention a change that should have been listed in this guide. Sincerely, the datatree team: Tom Nicholas, Owen Littlejohns, Matt Savoie, Eni Awowale, Alfonso Ladino, Justus Magin, Stephan Hoyer xarray-2025.09.0/HOW_TO_RELEASE.md000066400000000000000000000114031505620616400160640ustar00rootroot00000000000000# How to issue an xarray release in 16 easy steps Time required: about an hour. These instructions assume that `upstream` refers to the main repository: ```sh $ git remote -v {...} upstream https://github.com/pydata/xarray (fetch) upstream https://github.com/pydata/xarray (push) ``` 1. Ensure your main branch is synced to upstream: ```sh git switch main git pull upstream main ``` 2. Add a list of contributors. First fetch all previous release tags so we can see the version number of the last release was: ```sh git fetch upstream --tags ``` Then run ```sh python ci/release_contributors.py ``` (needs `gitpython` and `toolz` / `cytoolz`) and copy the output. 3. Write a release summary: ~50 words describing the high level features. This will be used in the release emails, tweets, GitHub release notes, etc. 4. Look over whats-new.rst and the docs. Make sure "What's New" is complete (check the date!) and add the release summary at the top. Things to watch out for: - Important new features should be highlighted towards the top. - Function/method references should include links to the API docs. - Sometimes notes get added in the wrong section of whats-new, typically due to a bad merge. Check for these before a release by using git diff, e.g., `git diff v{YYYY.MM.X-1} whats-new.rst` where {YYYY.MM.X-1} is the previous release. 5. Open a PR with the release summary and whatsnew changes; in particular the release headline should get feedback from the team on what's important to include. Apply the `Release` label to the PR to trigger a test build action. 6. After merging, again ensure your main branch is synced to upstream: ```sh git switch main git pull upstream main ``` 7. If you have any doubts, run the full test suite one final time! ```sh pytest ``` 8. Check that the [ReadTheDocs build](https://readthedocs.org/projects/xray/) is passing on the `latest` build version (which is built from the `main` branch). 9. Issue the release on GitHub. Click on "Draft a new release" at . Type in the version number (with a "v") and paste the release summary in the notes. 10. This should automatically trigger an upload of the new build to PyPI via GitHub Actions. Check this has run [here](https://github.com/pydata/xarray/actions/workflows/pypi-release.yaml), and that the version number you expect is displayed [on PyPI](https://pypi.org/project/xarray/) 11. Add a section for the next release {YYYY.MM.X+1} to doc/whats-new.rst (we avoid doing this earlier so that it doesn't show up in the RTD build): ```rst .. _whats-new.YYYY.MM.X+1: vYYYY.MM.X+1 (unreleased) ----------------------- New Features ~~~~~~~~~~~~ Breaking changes ~~~~~~~~~~~~~~~~ Deprecations ~~~~~~~~~~~~ Bug fixes ~~~~~~~~~ Documentation ~~~~~~~~~~~~~ Internal Changes ~~~~~~~~~~~~~~~~ ``` 12. Make a PR with these changes and merge it: ```sh git checkout -b empty-whatsnew-YYYY.MM.X+1 git commit -am "empty whatsnew" git push ``` (Note that repo branch restrictions prevent pushing to `main`, so you have to just-self-merge this.) 13. Update the version available on pyodide: - Open the PyPI page for [Xarray downloads](https://pypi.org/project/xarray/#files) - Edit [`pyodide/packages/xarray/meta.yaml`](https://github.com/pyodide/pyodide/blob/main/packages/xarray/meta.yaml) to update the - version number - link to the wheel (under "Built Distribution" on the PyPI page) - SHA256 hash (Click "Show Hashes" next to the link to the wheel) - Open a pull request to pyodide 14. Issue the release announcement to mailing lists & Twitter (X). For bug fix releases, I usually only email xarray@googlegroups.com. For major/feature releases, I will email a broader list (no more than once every 3-6 months): - pydata@googlegroups.com - xarray@googlegroups.com - numpy-discussion@scipy.org - scipy-user@scipy.org - pyaos@lists.johnny-lin.com Google search will turn up examples of prior release announcements (look for "ANN xarray"). Some of these groups require you to be subscribed in order to email them. ## Note on version numbering As of 2022.03.0, we utilize the [CALVER](https://calver.org/) version system. Specifically, we have adopted the pattern `YYYY.MM.X`, where `YYYY` is a 4-digit year (e.g. `2022`), `0M` is a 2-digit zero-padded month (e.g. `01` for January), and `X` is the release number (starting at zero at the start of each month and incremented once for each additional release). xarray-2025.09.0/LICENSE000066400000000000000000000240341505620616400143540ustar00rootroot00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: You must give any other recipients of the Work or Derivative Works a copy of this License; and You must cause any modified files to carry prominent notices stating that You changed the files; and You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2014-2024 xarray Developers Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. xarray-2025.09.0/README.md000066400000000000000000000217641505620616400146350ustar00rootroot00000000000000# xarray: N-D labeled arrays and datasets [![CI](https://github.com/pydata/xarray/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/pydata/xarray/actions/workflows/ci.yaml?query=branch%3Amain) [![Code coverage](https://codecov.io/gh/pydata/xarray/branch/main/graph/badge.svg?flag=unittests)](https://codecov.io/gh/pydata/xarray) [![Docs](https://readthedocs.org/projects/xray/badge/?version=latest)](https://docs.xarray.dev/) [![Benchmarked with asv](https://img.shields.io/badge/benchmarked%20by-asv-green.svg?style=flat)](https://asv-runner.github.io/asv-collection/xarray/) [![Formatted with black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/python/black) [![Checked with mypy](http://www.mypy-lang.org/static/mypy_badge.svg)](http://mypy-lang.org/) [![Available on pypi](https://img.shields.io/pypi/v/xarray.svg)](https://pypi.python.org/pypi/xarray/) [![PyPI - Downloads](https://img.shields.io/pypi/dm/xarray)](https://pypistats.org/packages/xarray) [![Conda - Downloads](https://img.shields.io/conda/dn/anaconda/xarray?label=conda%7Cdownloads)](https://anaconda.org/anaconda/xarray) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.11183201.svg)](https://doi.org/10.5281/zenodo.11183201) [![Examples on binder](https://img.shields.io/badge/launch-binder-579ACA.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFkAAABZCAMAAABi1XidAAAB8lBMVEX///9XmsrmZYH1olJXmsr1olJXmsrmZYH1olJXmsr1olJXmsrmZYH1olL1olJXmsr1olJXmsrmZYH1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olJXmsrmZYH1olL1olL0nFf1olJXmsrmZYH1olJXmsq8dZb1olJXmsrmZYH1olJXmspXmspXmsr1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olLeaIVXmsrmZYH1olL1olL1olJXmsrmZYH1olLna31Xmsr1olJXmsr1olJXmsrmZYH1olLqoVr1olJXmsr1olJXmsrmZYH1olL1olKkfaPobXvviGabgadXmsqThKuofKHmZ4Dobnr1olJXmsr1olJXmspXmsr1olJXmsrfZ4TuhWn1olL1olJXmsqBi7X1olJXmspZmslbmMhbmsdemsVfl8ZgmsNim8Jpk8F0m7R4m7F5nLB6jbh7jbiDirOEibOGnKaMhq+PnaCVg6qWg6qegKaff6WhnpKofKGtnomxeZy3noG6dZi+n3vCcpPDcpPGn3bLb4/Mb47UbIrVa4rYoGjdaIbeaIXhoWHmZYHobXvpcHjqdHXreHLroVrsfG/uhGnuh2bwj2Hxk17yl1vzmljzm1j0nlX1olL3AJXWAAAAbXRSTlMAEBAQHx8gICAuLjAwMDw9PUBAQEpQUFBXV1hgYGBkcHBwcXl8gICAgoiIkJCQlJicnJ2goKCmqK+wsLC4usDAwMjP0NDQ1NbW3Nzg4ODi5+3v8PDw8/T09PX29vb39/f5+fr7+/z8/Pz9/v7+zczCxgAABC5JREFUeAHN1ul3k0UUBvCb1CTVpmpaitAGSLSpSuKCLWpbTKNJFGlcSMAFF63iUmRccNG6gLbuxkXU66JAUef/9LSpmXnyLr3T5AO/rzl5zj137p136BISy44fKJXuGN/d19PUfYeO67Znqtf2KH33Id1psXoFdW30sPZ1sMvs2D060AHqws4FHeJojLZqnw53cmfvg+XR8mC0OEjuxrXEkX5ydeVJLVIlV0e10PXk5k7dYeHu7Cj1j+49uKg7uLU61tGLw1lq27ugQYlclHC4bgv7VQ+TAyj5Zc/UjsPvs1sd5cWryWObtvWT2EPa4rtnWW3JkpjggEpbOsPr7F7EyNewtpBIslA7p43HCsnwooXTEc3UmPmCNn5lrqTJxy6nRmcavGZVt/3Da2pD5NHvsOHJCrdc1G2r3DITpU7yic7w/7Rxnjc0kt5GC4djiv2Sz3Fb2iEZg41/ddsFDoyuYrIkmFehz0HR2thPgQqMyQYb2OtB0WxsZ3BeG3+wpRb1vzl2UYBog8FfGhttFKjtAclnZYrRo9ryG9uG/FZQU4AEg8ZE9LjGMzTmqKXPLnlWVnIlQQTvxJf8ip7VgjZjyVPrjw1te5otM7RmP7xm+sK2Gv9I8Gi++BRbEkR9EBw8zRUcKxwp73xkaLiqQb+kGduJTNHG72zcW9LoJgqQxpP3/Tj//c3yB0tqzaml05/+orHLksVO+95kX7/7qgJvnjlrfr2Ggsyx0eoy9uPzN5SPd86aXggOsEKW2Prz7du3VID3/tzs/sSRs2w7ovVHKtjrX2pd7ZMlTxAYfBAL9jiDwfLkq55Tm7ifhMlTGPyCAs7RFRhn47JnlcB9RM5T97ASuZXIcVNuUDIndpDbdsfrqsOppeXl5Y+XVKdjFCTh+zGaVuj0d9zy05PPK3QzBamxdwtTCrzyg/2Rvf2EstUjordGwa/kx9mSJLr8mLLtCW8HHGJc2R5hS219IiF6PnTusOqcMl57gm0Z8kanKMAQg0qSyuZfn7zItsbGyO9QlnxY0eCuD1XL2ys/MsrQhltE7Ug0uFOzufJFE2PxBo/YAx8XPPdDwWN0MrDRYIZF0mSMKCNHgaIVFoBbNoLJ7tEQDKxGF0kcLQimojCZopv0OkNOyWCCg9XMVAi7ARJzQdM2QUh0gmBozjc3Skg6dSBRqDGYSUOu66Zg+I2fNZs/M3/f/Grl/XnyF1Gw3VKCez0PN5IUfFLqvgUN4C0qNqYs5YhPL+aVZYDE4IpUk57oSFnJm4FyCqqOE0jhY2SMyLFoo56zyo6becOS5UVDdj7Vih0zp+tcMhwRpBeLyqtIjlJKAIZSbI8SGSF3k0pA3mR5tHuwPFoa7N7reoq2bqCsAk1HqCu5uvI1n6JuRXI+S1Mco54YmYTwcn6Aeic+kssXi8XpXC4V3t7/ADuTNKaQJdScAAAAAElFTkSuQmCC)](https://mybinder.org/v2/gh/pydata/xarray/main?urlpath=lab/tree/doc/examples/weather-data.ipynb) [![Twitter](https://img.shields.io/twitter/follow/xarray_dev?style=social)](https://x.com/xarray_dev) **xarray** (pronounced "ex-array", formerly known as **xray**) is an open source project and Python package that makes working with labelled multi-dimensional arrays simple, efficient, and fun! Xarray introduces labels in the form of dimensions, coordinates and attributes on top of raw [NumPy](https://www.numpy.org)-like arrays, which allows for a more intuitive, more concise, and less error-prone developer experience. The package includes a large and growing library of domain-agnostic functions for advanced analytics and visualization with these data structures. Xarray was inspired by and borrows heavily from [pandas](https://pandas.pydata.org), the popular data analysis package focused on labelled tabular data. It is particularly tailored to working with [netCDF](https://www.unidata.ucar.edu/software/netcdf) files, which were the source of xarray\'s data model, and integrates tightly with [dask](https://dask.org) for parallel computing. ## Why xarray? Multi-dimensional (a.k.a. N-dimensional, ND) arrays (sometimes called "tensors") are an essential part of computational science. They are encountered in a wide range of fields, including physics, astronomy, geoscience, bioinformatics, engineering, finance, and deep learning. In Python, [NumPy](https://www.numpy.org) provides the fundamental data structure and API for working with raw ND arrays. However, real-world datasets are usually more than just raw numbers; they have labels which encode information about how the array values map to locations in space, time, etc. Xarray doesn\'t just keep track of labels on arrays \-- it uses them to provide a powerful and concise interface. For example: - Apply operations over dimensions by name: `x.sum('time')`. - Select values by label instead of integer location: `x.loc['2014-01-01']` or `x.sel(time='2014-01-01')`. - Mathematical operations (e.g., `x - y`) vectorize across multiple dimensions (array broadcasting) based on dimension names, not shape. - Flexible split-apply-combine operations with groupby: `x.groupby('time.dayofyear').mean()`. - Database like alignment based on coordinate labels that smoothly handles missing values: `x, y = xr.align(x, y, join='outer')`. - Keep track of arbitrary metadata in the form of a Python dictionary: `x.attrs`. ## Documentation Learn more about xarray in its official documentation at . Try out an [interactive Jupyter notebook](https://mybinder.org/v2/gh/pydata/xarray/main?urlpath=lab/tree/doc/examples/weather-data.ipynb). ## Contributing You can find information about contributing to xarray at our [Contributing page](https://docs.xarray.dev/en/stable/contributing.html). ## Get in touch - Ask usage questions ("How do I?") on [GitHub Discussions](https://github.com/pydata/xarray/discussions). - Report bugs, suggest features or view the source code [on GitHub](https://github.com/pydata/xarray). - For less well defined questions or ideas, or to announce other projects of interest to xarray users, use the [mailing list](https://groups.google.com/forum/#!forum/xarray). ## NumFOCUS Xarray is a fiscally sponsored project of [NumFOCUS](https://numfocus.org), a nonprofit dedicated to supporting the open source scientific computing community. If you like Xarray and want to support our mission, please consider making a [donation](https://numfocus.org/donate-to-xarray) to support our efforts. ## History Xarray is an evolution of an internal tool developed at [The Climate Corporation](https://climate.com/). It was originally written by Climate Corp researchers Stephan Hoyer, Alex Kleeman and Eugene Brevdo and was released as open source in May 2014. The project was renamed from "xray" in January 2016. Xarray became a fiscally sponsored project of [NumFOCUS](https://numfocus.org) in August 2018. ## Contributors Thanks to our many contributors! [![Contributors](https://contrib.rocks/image?repo=pydata/xarray)](https://github.com/pydata/xarray/graphs/contributors) ## License Copyright 2014-2024, xarray Developers Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Xarray bundles portions of pandas, NumPy and Seaborn, all of which are available under a "3-clause BSD" license: - pandas: `setup.py`, `xarray/util/print_versions.py` - NumPy: `xarray/core/npcompat.py` - Seaborn: `_determine_cmap_params` in `xarray/core/plot/utils.py` Xarray also bundles portions of CPython, which is available under the "Python Software Foundation License" in `xarray/core/pycompat.py`. Xarray uses icons from the icomoon package (free version), which is available under the "CC BY 4.0" license. The full text of these licenses are included in the licenses directory. xarray-2025.09.0/asv_bench/000077500000000000000000000000001505620616400152745ustar00rootroot00000000000000xarray-2025.09.0/asv_bench/asv.conf.json000066400000000000000000000142471505620616400177140ustar00rootroot00000000000000{ // The version of the config file format. Do not change, unless // you know what you are doing. "version": 1, // The name of the project being benchmarked "project": "xarray", // The project's homepage "project_url": "https://docs.xarray.dev/", // The URL or local path of the source code repository for the // project being benchmarked "repo": "..", // List of branches to benchmark. If not provided, defaults to "master" // (for git) or "default" (for mercurial). "branches": ["main"], // for git // "branches": ["default"], // for mercurial // The DVCS being used. If not set, it will be automatically // determined from "repo" by looking at the protocol in the URL // (if remote), or by looking for special directories, such as // ".git" (if local). "dvcs": "git", // The tool to use to create environments. May be "conda", // "virtualenv" or other value depending on the plugins in use. // If missing or the empty string, the tool will be automatically // determined by looking for tools on the PATH environment // variable. "environment_type": "mamba", "conda_channels": ["conda-forge"], // timeout in seconds for installing any dependencies in environment // defaults to 10 min "install_timeout": 600, // the base URL to show a commit for the project. "show_commit_url": "https://github.com/pydata/xarray/commit/", // The Pythons you'd like to test against. If not provided, defaults // to the current version of Python used to run `asv`. "pythons": ["3.11"], // The matrix of dependencies to test. Each key is the name of a // package (in PyPI) and the values are version numbers. An empty // list or empty string indicates to just test against the default // (latest) version. null indicates that the package is to not be // installed. If the package to be tested is only available from // PyPi, and the 'environment_type' is conda, then you can preface // the package name by 'pip+', and the package will be installed via // pip (with all the conda available packages installed first, // followed by the pip installed packages). // // "matrix": { // "numpy": ["1.6", "1.7"], // "six": ["", null], // test with and without six installed // "pip+emcee": [""], // emcee is only available for install with pip. // }, "matrix": { "setuptools_scm": [""], // GH6609 "numpy": ["2.2"], "pandas": [""], "netcdf4": [""], "scipy": [""], "bottleneck": [""], "dask": [""], "distributed": [""], "flox": [""], "numpy_groupies": [""], "sparse": [""], "cftime": [""] }, // fix for bad builds // https://github.com/airspeed-velocity/asv/issues/1389#issuecomment-2076131185 "build_command": [ "python -m build", "python -mpip wheel --no-deps --no-build-isolation --no-index -w {build_cache_dir} {build_dir}" ], // Combinations of libraries/python versions can be excluded/included // from the set to test. Each entry is a dictionary containing additional // key-value pairs to include/exclude. // // An exclude entry excludes entries where all values match. The // values are regexps that should match the whole string. // // An include entry adds an environment. Only the packages listed // are installed. The 'python' key is required. The exclude rules // do not apply to includes. // // In addition to package names, the following keys are available: // // - python // Python version, as in the *pythons* variable above. // - environment_type // Environment type, as above. // - sys_platform // Platform, as in sys.platform. Possible values for the common // cases: 'linux2', 'win32', 'cygwin', 'darwin'. // // "exclude": [ // {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows // {"environment_type": "conda", "six": null}, // don't run without six on conda // ], // // "include": [ // // additional env for python2.7 // {"python": "2.7", "numpy": "1.8"}, // // additional env if run on windows+conda // {"platform": "win32", "environment_type": "conda", "python": "2.7", "libpython": ""}, // ], // The directory (relative to the current directory) that benchmarks are // stored in. If not provided, defaults to "benchmarks" "benchmark_dir": "benchmarks", // The directory (relative to the current directory) to cache the Python // environments in. If not provided, defaults to "env" "env_dir": ".asv/env", // The directory (relative to the current directory) that raw benchmark // results are stored in. If not provided, defaults to "results". "results_dir": ".asv/results", // The directory (relative to the current directory) that the html tree // should be written to. If not provided, defaults to "html". "html_dir": ".asv/html" // The number of characters to retain in the commit hashes. // "hash_length": 8, // `asv` will cache wheels of the recent builds in each // environment, making them faster to install next time. This is // number of builds to keep, per environment. // "wheel_cache_size": 0 // The commits after which the regression search in `asv publish` // should start looking for regressions. Dictionary whose keys are // regexps matching to benchmark names, and values corresponding to // the commit (exclusive) after which to start looking for // regressions. The default is to start from the first commit // with results. If the commit is `null`, regression detection is // skipped for the matching benchmark. // // "regressions_first_commits": { // "some_benchmark": "352cdf", // Consider regressions only after this commit // "another_benchmark": null, // Skip regression detection altogether // } // The thresholds for relative change in results, after which `asv // publish` starts reporting regressions. Dictionary of the same // form as in ``regressions_first_commits``, with values // indicating the thresholds. If multiple entries match, the // maximum is taken. If no entry matches, the default is 5%. // // "regressions_thresholds": { // "some_benchmark": 0.01, // Threshold of 1% // "another_benchmark": 0.5, // Threshold of 50% // } } xarray-2025.09.0/asv_bench/benchmarks/000077500000000000000000000000001505620616400174115ustar00rootroot00000000000000xarray-2025.09.0/asv_bench/benchmarks/README_CI.md000066400000000000000000000174561505620616400212600ustar00rootroot00000000000000# Benchmark CI ## How it works The `asv` suite can be run for any PR on GitHub Actions (check workflow `.github/workflows/benchmarks.yml`) by adding a `run-benchmark` label to said PR. This will trigger a job that will run the benchmarking suite for the current PR head (merged commit) against the PR base (usually `main`). We use `asv continuous` to run the job, which runs a relative performance measurement. This means that there's no state to be saved and that regressions are only caught in terms of performance ratio (absolute numbers are available but they are not useful since we do not use stable hardware over time). `asv continuous` will: - Compile `scikit-image` for _both_ commits. We use `ccache` to speed up the process, and `mamba` is used to create the build environments. - Run the benchmark suite for both commits, _twice_ (since `processes=2` by default). - Generate a report table with performance ratios: - `ratio=1.0` -> performance didn't change. - `ratio<1.0` -> PR made it slower. - `ratio>1.0` -> PR made it faster. Due to the sensitivity of the test, we cannot guarantee that false positives are not produced. In practice, values between `(0.7, 1.5)` are to be considered part of the measurement noise. When in doubt, running the benchmark suite one more time will provide more information about the test being a false positive or not. ## Running the benchmarks on GitHub Actions 1. On a PR, add the label `run-benchmark`. 2. The CI job will be started. Checks will appear in the usual dashboard panel above the comment box. 3. If more commits are added, the label checks will be grouped with the last commit checks _before_ you added the label. 4. Alternatively, you can always go to the `Actions` tab in the repo and [filter for `workflow:Benchmark`](https://github.com/scikit-image/scikit-image/actions?query=workflow%3ABenchmark). Your username will be assigned to the `actor` field, so you can also filter the results with that if you need it. ## The artifacts The CI job will also generate an artifact. This is the `.asv/results` directory compressed in a zip file. Its contents include: - `fv-xxxxx-xx/`. A directory for the machine that ran the suite. It contains three files: - `.json`, `.json`: the benchmark results for each commit, with stats. - `machine.json`: details about the hardware. - `benchmarks.json`: metadata about the current benchmark suite. - `benchmarks.log`: the CI logs for this run. - This README. ## Re-running the analysis Although the CI logs should be enough to get an idea of what happened (check the table at the end), one can use `asv` to run the analysis routines again. 1. Uncompress the artifact contents in the repo, under `.asv/results`. This is, you should see `.asv/results/benchmarks.log`, not `.asv/results/something_else/benchmarks.log`. Write down the machine directory name for later. 2. Run `asv show` to see your available results. You will see something like this: ``` $> asv show Commits with results: Machine : Jaimes-MBP Environment: conda-py3.9-cython-numpy1.20-scipy 00875e67 Machine : fv-az95-499 Environment: conda-py3.7-cython-numpy1.17-pooch-scipy 8db28f02 3a305096 ``` 3. We are interested in the commits for `fv-az95-499` (the CI machine for this run). We can compare them with `asv compare` and some extra options. `--sort ratio` will show largest ratios first, instead of alphabetical order. `--split` will produce three tables: improved, worsened, no changes. `--factor 1.5` tells `asv` to only complain if deviations are above a 1.5 ratio. `-m` is used to indicate the machine ID (use the one you wrote down in step 1). Finally, specify your commit hashes: baseline first, then contender! ``` $> asv compare --sort ratio --split --factor 1.5 -m fv-az95-499 8db28f02 3a305096 Benchmarks that have stayed the same: before after ratio [8db28f02] [3a305096] n/a n/a n/a benchmark_restoration.RollingBall.time_rollingball_ndim 1.23Β±0.04ms 1.37Β±0.1ms 1.12 benchmark_transform_warp.WarpSuite.time_to_float64(, 128, 3) 5.07Β±0.1ΞΌs 5.59Β±0.4ΞΌs 1.10 benchmark_transform_warp.ResizeLocalMeanSuite.time_resize_local_mean(, (192, 192, 192), (192, 192, 192)) 1.23Β±0.02ms 1.33Β±0.1ms 1.08 benchmark_transform_warp.WarpSuite.time_same_type(, 128, 3) 9.45Β±0.2ms 10.1Β±0.5ms 1.07 benchmark_rank.Rank3DSuite.time_3d_filters('majority', (32, 32, 32)) 23.0Β±0.9ms 24.6Β±1ms 1.07 benchmark_interpolation.InterpolationResize.time_resize((80, 80, 80), 0, 'symmetric', , True) 38.7Β±1ms 41.1Β±1ms 1.06 benchmark_transform_warp.ResizeLocalMeanSuite.time_resize_local_mean(, (2048, 2048), (192, 192, 192)) 4.97Β±0.2ΞΌs 5.24Β±0.2ΞΌs 1.05 benchmark_transform_warp.ResizeLocalMeanSuite.time_resize_local_mean(, (2048, 2048), (2048, 2048)) 4.21Β±0.2ms 4.42Β±0.3ms 1.05 benchmark_rank.Rank3DSuite.time_3d_filters('gradient', (32, 32, 32)) ... ``` If you want more details on a specific test, you can use `asv show`. Use `-b pattern` to filter which tests to show, and then specify a commit hash to inspect: ``` $> asv show -b time_to_float64 8db28f02 Commit: 8db28f02 benchmark_transform_warp.WarpSuite.time_to_float64 [fv-az95-499/conda-py3.7-cython-numpy1.17-pooch-scipy] ok =============== ============= ========== ============= ========== ============ ========== ============ ========== ============ -- N / order --------------- -------------------------------------------------------------------------------------------------------------- dtype_in 128 / 0 128 / 1 128 / 3 1024 / 0 1024 / 1 1024 / 3 4096 / 0 4096 / 1 4096 / 3 =============== ============= ========== ============= ========== ============ ========== ============ ========== ============ numpy.uint8 2.56Β±0.09ms 523Β±30ΞΌs 1.28Β±0.05ms 130Β±3ms 28.7Β±2ms 81.9Β±3ms 2.42Β±0.01s 659Β±5ms 1.48Β±0.01s numpy.uint16 2.48Β±0.03ms 530Β±10ΞΌs 1.28Β±0.02ms 130Β±1ms 30.4Β±0.7ms 81.1Β±2ms 2.44Β±0s 653Β±3ms 1.47Β±0.02s numpy.float32 2.59Β±0.1ms 518Β±20ΞΌs 1.27Β±0.01ms 127Β±3ms 26.6Β±1ms 74.8Β±2ms 2.50Β±0.01s 546Β±10ms 1.33Β±0.02s numpy.float64 2.48Β±0.04ms 513Β±50ΞΌs 1.23Β±0.04ms 134Β±3ms 30.7Β±2ms 85.4Β±2ms 2.55Β±0.01s 632Β±4ms 1.45Β±0.01s =============== ============= ========== ============= ========== ============ ========== ============ ========== ============ started: 2021-07-06 06:14:36, duration: 1.99m ``` ## Other details ### Skipping slow or demanding tests To minimize the time required to run the full suite, we trimmed the parameter matrix in some cases and, in others, directly skipped tests that ran for too long or require too much memory. Unlike `pytest`, `asv` does not have a notion of marks. However, you can `raise NotImplementedError` in the setup step to skip a test. In that vein, a new private function is defined at `benchmarks.__init__`: `_skip_slow`. This will check if the `ASV_SKIP_SLOW` environment variable has been defined. If set to `1`, it will raise `NotImplementedError` and skip the test. To implement this behavior in other tests, you can add the following attribute: ```python from . import _skip_slow # this function is defined in benchmarks.__init__ def time_something_slow(): pass time_something.setup = _skip_slow ``` xarray-2025.09.0/asv_bench/benchmarks/__init__.py000066400000000000000000000032271505620616400215260ustar00rootroot00000000000000import itertools import os import numpy as np _counter = itertools.count() def parameterized(names, params): def decorator(func): func.param_names = names func.params = params return func return decorator def requires_dask(): try: import dask # noqa: F401 except ImportError as err: raise NotImplementedError() from err def requires_sparse(): try: import sparse # noqa: F401 except ImportError as err: raise NotImplementedError() from err def randn(shape, frac_nan=None, chunks=None, seed=0): rng = np.random.default_rng(seed) if chunks is None: x = rng.standard_normal(shape) else: import dask.array as da rng = da.random.default_rng(seed) x = rng.standard_normal(shape, chunks=chunks) if frac_nan is not None: inds = rng.choice(range(x.size), int(x.size * frac_nan)) x.flat[inds] = np.nan return x def randint(low, high=None, size=None, frac_minus=None, seed=0): rng = np.random.default_rng(seed) x = rng.integers(low, high, size) if frac_minus is not None: inds = rng.choice(range(x.size), int(x.size * frac_minus)) x.flat[inds] = -1 return x def _skip_slow(): """ Use this function to skip slow or highly demanding tests. Use it as a `Class.setup` method or a `function.setup` attribute. Examples -------- >>> from . import _skip_slow >>> def time_something_slow(): ... pass ... >>> time_something.setup = _skip_slow """ if os.environ.get("ASV_SKIP_SLOW", "0") == "1": raise NotImplementedError("Skipping this test...") xarray-2025.09.0/asv_bench/benchmarks/accessors.py000066400000000000000000000011721505620616400217510ustar00rootroot00000000000000import numpy as np import xarray as xr from . import parameterized NTIME = 365 * 30 @parameterized(["calendar"], [("standard", "noleap")]) class DateTimeAccessor: def setup(self, calendar): np.random.randn(NTIME) time = xr.date_range("2000", periods=30 * 365, calendar=calendar) data = np.ones((NTIME,)) self.da = xr.DataArray(data, dims="time", coords={"time": time}) def time_dayofyear(self, calendar): _ = self.da.time.dt.dayofyear def time_year(self, calendar): _ = self.da.time.dt.year def time_floor(self, calendar): _ = self.da.time.dt.floor("D") xarray-2025.09.0/asv_bench/benchmarks/alignment.py000066400000000000000000000031601505620616400217410ustar00rootroot00000000000000import numpy as np import xarray as xr from . import parameterized, requires_dask ntime = 365 * 30 nx = 50 ny = 50 rng = np.random.default_rng(0) class Align: def setup(self, *args, **kwargs): data = rng.standard_normal((ntime, nx, ny)) self.ds = xr.Dataset( {"temperature": (("time", "x", "y"), data)}, coords={ "time": xr.date_range("2000", periods=ntime), "x": np.arange(nx), "y": np.arange(ny), }, ) self.year = self.ds.time.dt.year self.idx = np.unique(rng.integers(low=0, high=ntime, size=ntime // 2)) self.year_subset = self.year.isel(time=self.idx) @parameterized(["join"], [("outer", "inner", "left", "right", "exact", "override")]) def time_already_aligned(self, join): xr.align(self.ds, self.year, join=join) @parameterized(["join"], [("outer", "inner", "left", "right")]) def time_not_aligned(self, join): xr.align(self.ds, self.year[-100:], join=join) @parameterized(["join"], [("outer", "inner", "left", "right")]) def time_not_aligned_random_integers(self, join): xr.align(self.ds, self.year_subset, join=join) class AlignCFTime(Align): def setup(self, *args, **kwargs): super().setup() self.ds["time"] = xr.date_range("2000", periods=ntime, calendar="noleap") self.year = self.ds.time.dt.year self.year_subset = self.year.isel(time=self.idx) class AlignDask(Align): def setup(self, *args, **kwargs): requires_dask() super().setup() self.ds = self.ds.chunk({"time": 100}) xarray-2025.09.0/asv_bench/benchmarks/coding.py000066400000000000000000000010101505620616400212160ustar00rootroot00000000000000import numpy as np import xarray as xr from . import parameterized @parameterized(["calendar"], [("standard", "noleap")]) class EncodeCFDatetime: def setup(self, calendar): self.units = "days since 2000-01-01" self.dtype = np.dtype("int64") self.times = xr.date_range( "2000", freq="D", periods=10000, calendar=calendar ).values def time_encode_cf_datetime(self, calendar): xr.coding.times.encode_cf_datetime(self.times, self.units, calendar, self.dtype) xarray-2025.09.0/asv_bench/benchmarks/combine.py000066400000000000000000000045611505620616400214050ustar00rootroot00000000000000import numpy as np import xarray as xr from . import requires_dask class Combine1d: """Benchmark concatenating and merging large datasets""" def setup(self) -> None: """Create 2 datasets with two different variables""" t_size = 8000 t = np.arange(t_size) data = np.random.randn(t_size) self.dsA0 = xr.Dataset({"A": xr.DataArray(data, coords={"T": t}, dims=("T"))}) self.dsA1 = xr.Dataset( {"A": xr.DataArray(data, coords={"T": t + t_size}, dims=("T"))} ) def time_combine_by_coords(self) -> None: """Also has to load and arrange t coordinate""" datasets = [self.dsA0, self.dsA1] xr.combine_by_coords(datasets) class Combine1dDask(Combine1d): """Benchmark concatenating and merging large datasets""" def setup(self) -> None: """Create 2 datasets with two different variables""" requires_dask() t_size = 8000 t = np.arange(t_size) var = xr.Variable(dims=("T",), data=np.random.randn(t_size)).chunk() data_vars = {f"long_name_{v}": ("T", var) for v in range(500)} self.dsA0 = xr.Dataset(data_vars, coords={"T": t}) self.dsA1 = xr.Dataset(data_vars, coords={"T": t + t_size}) class Combine3d: """Benchmark concatenating and merging large datasets""" def setup(self): """Create 4 datasets with two different variables""" t_size, x_size, y_size = 50, 450, 400 t = np.arange(t_size) data = np.random.randn(t_size, x_size, y_size) self.dsA0 = xr.Dataset( {"A": xr.DataArray(data, coords={"T": t}, dims=("T", "X", "Y"))} ) self.dsA1 = xr.Dataset( {"A": xr.DataArray(data, coords={"T": t + t_size}, dims=("T", "X", "Y"))} ) self.dsB0 = xr.Dataset( {"B": xr.DataArray(data, coords={"T": t}, dims=("T", "X", "Y"))} ) self.dsB1 = xr.Dataset( {"B": xr.DataArray(data, coords={"T": t + t_size}, dims=("T", "X", "Y"))} ) def time_combine_nested(self): datasets = [[self.dsA0, self.dsA1], [self.dsB0, self.dsB1]] xr.combine_nested(datasets, concat_dim=[None, "T"]) def time_combine_by_coords(self): """Also has to load and arrange t coordinate""" datasets = [self.dsA0, self.dsA1, self.dsB0, self.dsB1] xr.combine_by_coords(datasets) xarray-2025.09.0/asv_bench/benchmarks/dataarray_missing.py000066400000000000000000000035201505620616400234640ustar00rootroot00000000000000import pandas as pd import xarray as xr from . import parameterized, randn, requires_dask def make_bench_data(shape, frac_nan, chunks): vals = randn(shape, frac_nan) coords = {"time": pd.date_range("2000-01-01", freq="D", periods=shape[0])} da = xr.DataArray(vals, dims=("time", "x", "y"), coords=coords) if chunks is not None: da = da.chunk(chunks) return da class DataArrayMissingInterpolateNA: def setup(self, shape, chunks, limit): if chunks is not None: requires_dask() self.da = make_bench_data(shape, 0.1, chunks) @parameterized( ["shape", "chunks", "limit"], ( [(365, 75, 75)], [None, {"x": 25, "y": 25}], [None, 3], ), ) def time_interpolate_na(self, shape, chunks, limit): actual = self.da.interpolate_na(dim="time", method="linear", limit=limit) if chunks is not None: actual = actual.compute() class DataArrayMissingBottleneck: def setup(self, shape, chunks, limit): if chunks is not None: requires_dask() self.da = make_bench_data(shape, 0.1, chunks) @parameterized( ["shape", "chunks", "limit"], ( [(365, 75, 75)], [None, {"x": 25, "y": 25}], [None, 3], ), ) def time_ffill(self, shape, chunks, limit): actual = self.da.ffill(dim="time", limit=limit) if chunks is not None: actual = actual.compute() @parameterized( ["shape", "chunks", "limit"], ( [(365, 75, 75)], [None, {"x": 25, "y": 25}], [None, 3], ), ) def time_bfill(self, shape, chunks, limit): actual = self.da.bfill(dim="time", limit=limit) if chunks is not None: actual = actual.compute() xarray-2025.09.0/asv_bench/benchmarks/dataset.py000066400000000000000000000012771505620616400214170ustar00rootroot00000000000000import numpy as np from xarray import Dataset from . import requires_dask class DatasetBinaryOp: def setup(self): self.ds = Dataset( { "a": (("x", "y"), np.ones((300, 400))), "b": (("x", "y"), np.ones((300, 400))), } ) self.mean = self.ds.mean() self.std = self.ds.std() def time_normalize(self): (self.ds - self.mean) / self.std class DatasetChunk: def setup(self): requires_dask() self.ds = Dataset() array = np.ones(1000) for i in range(250): self.ds[f"var{i}"] = ("x", array) def time_chunk(self): self.ds.chunk(x=(1,) * 1000) xarray-2025.09.0/asv_bench/benchmarks/dataset_io.py000066400000000000000000000610021505620616400220760ustar00rootroot00000000000000from __future__ import annotations import os from dataclasses import dataclass import numpy as np import pandas as pd import xarray as xr from . import _skip_slow, parameterized, randint, randn, requires_dask try: import dask import dask.multiprocessing except ImportError: pass os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE" _ENGINES = tuple(xr.backends.list_engines().keys() - {"store"}) class IOSingleNetCDF: """ A few examples that benchmark reading/writing a single netCDF file with xarray """ timeout = 300.0 repeat = 1 number = 5 def make_ds(self): # single Dataset self.ds = xr.Dataset() self.nt = 1000 self.nx = 90 self.ny = 45 self.block_chunks = { "time": self.nt / 4, "lon": self.nx / 3, "lat": self.ny / 3, } self.time_chunks = {"time": int(self.nt / 36)} times = pd.date_range("1970-01-01", periods=self.nt, freq="D") lons = xr.DataArray( np.linspace(0, 360, self.nx), dims=("lon",), attrs={"units": "degrees east", "long_name": "longitude"}, ) lats = xr.DataArray( np.linspace(-90, 90, self.ny), dims=("lat",), attrs={"units": "degrees north", "long_name": "latitude"}, ) self.ds["foo"] = xr.DataArray( randn((self.nt, self.nx, self.ny), frac_nan=0.2), coords={"lon": lons, "lat": lats, "time": times}, dims=("time", "lon", "lat"), name="foo", attrs={"units": "foo units", "description": "a description"}, ) self.ds["bar"] = xr.DataArray( randn((self.nt, self.nx, self.ny), frac_nan=0.2), coords={"lon": lons, "lat": lats, "time": times}, dims=("time", "lon", "lat"), name="bar", attrs={"units": "bar units", "description": "a description"}, ) self.ds["baz"] = xr.DataArray( randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32), coords={"lon": lons, "lat": lats}, dims=("lon", "lat"), name="baz", attrs={"units": "baz units", "description": "a description"}, ) self.ds.attrs = {"history": "created for xarray benchmarking"} self.oinds = { "time": randint(0, self.nt, 120), "lon": randint(0, self.nx, 20), "lat": randint(0, self.ny, 10), } self.vinds = { "time": xr.DataArray(randint(0, self.nt, 120), dims="x"), "lon": xr.DataArray(randint(0, self.nx, 120), dims="x"), "lat": slice(3, 20), } class IOWriteSingleNetCDF3(IOSingleNetCDF): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() self.format = "NETCDF3_64BIT" self.make_ds() def time_write_dataset_netcdf4(self): self.ds.to_netcdf("test_netcdf4_write.nc", engine="netcdf4", format=self.format) def time_write_dataset_scipy(self): self.ds.to_netcdf("test_scipy_write.nc", engine="scipy", format=self.format) class IOReadSingleNetCDF4(IOSingleNetCDF): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() self.make_ds() self.filepath = "test_single_file.nc4.nc" self.format = "NETCDF4" self.ds.to_netcdf(self.filepath, format=self.format) def time_load_dataset_netcdf4(self): xr.open_dataset(self.filepath, engine="netcdf4").load() def time_orthogonal_indexing(self): ds = xr.open_dataset(self.filepath, engine="netcdf4") ds = ds.isel(**self.oinds).load() def time_vectorized_indexing(self): ds = xr.open_dataset(self.filepath, engine="netcdf4") ds = ds.isel(**self.vinds).load() class IOReadSingleNetCDF3(IOReadSingleNetCDF4): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() self.make_ds() self.filepath = "test_single_file.nc3.nc" self.format = "NETCDF3_64BIT" self.ds.to_netcdf(self.filepath, format=self.format) def time_load_dataset_scipy(self): xr.open_dataset(self.filepath, engine="scipy").load() def time_orthogonal_indexing(self): ds = xr.open_dataset(self.filepath, engine="scipy") ds = ds.isel(**self.oinds).load() def time_vectorized_indexing(self): ds = xr.open_dataset(self.filepath, engine="scipy") ds = ds.isel(**self.vinds).load() class IOReadSingleNetCDF4Dask(IOSingleNetCDF): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.make_ds() self.filepath = "test_single_file.nc4.nc" self.format = "NETCDF4" self.ds.to_netcdf(self.filepath, format=self.format) def time_load_dataset_netcdf4_with_block_chunks(self): xr.open_dataset( self.filepath, engine="netcdf4", chunks=self.block_chunks ).load() def time_load_dataset_netcdf4_with_block_chunks_oindexing(self): ds = xr.open_dataset(self.filepath, engine="netcdf4", chunks=self.block_chunks) ds = ds.isel(**self.oinds).load() def time_load_dataset_netcdf4_with_block_chunks_vindexing(self): ds = xr.open_dataset(self.filepath, engine="netcdf4", chunks=self.block_chunks) ds = ds.isel(**self.vinds).load() def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self): with dask.config.set(scheduler="multiprocessing"): xr.open_dataset( self.filepath, engine="netcdf4", chunks=self.block_chunks ).load() def time_load_dataset_netcdf4_with_time_chunks(self): xr.open_dataset(self.filepath, engine="netcdf4", chunks=self.time_chunks).load() def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self): with dask.config.set(scheduler="multiprocessing"): xr.open_dataset( self.filepath, engine="netcdf4", chunks=self.time_chunks ).load() class IOReadSingleNetCDF3Dask(IOReadSingleNetCDF4Dask): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.make_ds() self.filepath = "test_single_file.nc3.nc" self.format = "NETCDF3_64BIT" self.ds.to_netcdf(self.filepath, format=self.format) def time_load_dataset_scipy_with_block_chunks(self): with dask.config.set(scheduler="multiprocessing"): xr.open_dataset( self.filepath, engine="scipy", chunks=self.block_chunks ).load() def time_load_dataset_scipy_with_block_chunks_oindexing(self): ds = xr.open_dataset(self.filepath, engine="scipy", chunks=self.block_chunks) ds = ds.isel(**self.oinds).load() def time_load_dataset_scipy_with_block_chunks_vindexing(self): ds = xr.open_dataset(self.filepath, engine="scipy", chunks=self.block_chunks) ds = ds.isel(**self.vinds).load() def time_load_dataset_scipy_with_time_chunks(self): with dask.config.set(scheduler="multiprocessing"): xr.open_dataset( self.filepath, engine="scipy", chunks=self.time_chunks ).load() class IOMultipleNetCDF: """ A few examples that benchmark reading/writing multiple netCDF files with xarray """ timeout = 300.0 repeat = 1 number = 5 def make_ds(self, nfiles=10): # multiple Dataset self.ds = xr.Dataset() self.nt = 1000 self.nx = 90 self.ny = 45 self.nfiles = nfiles self.block_chunks = { "time": self.nt / 4, "lon": self.nx / 3, "lat": self.ny / 3, } self.time_chunks = {"time": int(self.nt / 36)} self.time_vars = np.split( pd.date_range("1970-01-01", periods=self.nt, freq="D"), self.nfiles ) self.ds_list = [] self.filenames_list = [] for i, times in enumerate(self.time_vars): ds = xr.Dataset() nt = len(times) lons = xr.DataArray( np.linspace(0, 360, self.nx), dims=("lon",), attrs={"units": "degrees east", "long_name": "longitude"}, ) lats = xr.DataArray( np.linspace(-90, 90, self.ny), dims=("lat",), attrs={"units": "degrees north", "long_name": "latitude"}, ) ds["foo"] = xr.DataArray( randn((nt, self.nx, self.ny), frac_nan=0.2), coords={"lon": lons, "lat": lats, "time": times}, dims=("time", "lon", "lat"), name="foo", attrs={"units": "foo units", "description": "a description"}, ) ds["bar"] = xr.DataArray( randn((nt, self.nx, self.ny), frac_nan=0.2), coords={"lon": lons, "lat": lats, "time": times}, dims=("time", "lon", "lat"), name="bar", attrs={"units": "bar units", "description": "a description"}, ) ds["baz"] = xr.DataArray( randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32), coords={"lon": lons, "lat": lats}, dims=("lon", "lat"), name="baz", attrs={"units": "baz units", "description": "a description"}, ) ds.attrs = {"history": "created for xarray benchmarking"} self.ds_list.append(ds) self.filenames_list.append(f"test_netcdf_{i}.nc") class IOWriteMultipleNetCDF3(IOMultipleNetCDF): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() self.make_ds() self.format = "NETCDF3_64BIT" def time_write_dataset_netcdf4(self): xr.save_mfdataset( self.ds_list, self.filenames_list, engine="netcdf4", format=self.format ) def time_write_dataset_scipy(self): xr.save_mfdataset( self.ds_list, self.filenames_list, engine="scipy", format=self.format ) class IOReadMultipleNetCDF4(IOMultipleNetCDF): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.make_ds() self.format = "NETCDF4" xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format) def time_load_dataset_netcdf4(self): xr.open_mfdataset(self.filenames_list, engine="netcdf4").load() def time_open_dataset_netcdf4(self): xr.open_mfdataset(self.filenames_list, engine="netcdf4") class IOReadMultipleNetCDF3(IOReadMultipleNetCDF4): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.make_ds() self.format = "NETCDF3_64BIT" xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format) def time_load_dataset_scipy(self): xr.open_mfdataset(self.filenames_list, engine="scipy").load() def time_open_dataset_scipy(self): xr.open_mfdataset(self.filenames_list, engine="scipy") class IOReadMultipleNetCDF4Dask(IOMultipleNetCDF): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.make_ds() self.format = "NETCDF4" xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format) def time_load_dataset_netcdf4_with_block_chunks(self): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.block_chunks ).load() def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.block_chunks ).load() def time_load_dataset_netcdf4_with_time_chunks(self): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.time_chunks ).load() def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.time_chunks ).load() def time_open_dataset_netcdf4_with_block_chunks(self): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.block_chunks ) def time_open_dataset_netcdf4_with_block_chunks_multiprocessing(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.block_chunks ) def time_open_dataset_netcdf4_with_time_chunks(self): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.time_chunks ) def time_open_dataset_netcdf4_with_time_chunks_multiprocessing(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="netcdf4", chunks=self.time_chunks ) class IOReadMultipleNetCDF3Dask(IOReadMultipleNetCDF4Dask): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.make_ds() self.format = "NETCDF3_64BIT" xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format) def time_load_dataset_scipy_with_block_chunks(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="scipy", chunks=self.block_chunks ).load() def time_load_dataset_scipy_with_time_chunks(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="scipy", chunks=self.time_chunks ).load() def time_open_dataset_scipy_with_block_chunks(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="scipy", chunks=self.block_chunks ) def time_open_dataset_scipy_with_time_chunks(self): with dask.config.set(scheduler="multiprocessing"): xr.open_mfdataset( self.filenames_list, engine="scipy", chunks=self.time_chunks ) def create_delayed_write(): import dask.array as da vals = da.random.random(300, chunks=(1,)) ds = xr.Dataset({"vals": (["a"], vals)}) return ds.to_netcdf("file.nc", engine="netcdf4", compute=False) class IONestedDataTree: """ A few examples that benchmark reading/writing a heavily nested netCDF datatree with xarray """ timeout = 300.0 repeat = 1 number = 5 def make_datatree(self, nchildren=10): # multiple Dataset self.ds = xr.Dataset() self.nt = 1000 self.nx = 90 self.ny = 45 self.nchildren = nchildren self.block_chunks = { "time": self.nt / 4, "lon": self.nx / 3, "lat": self.ny / 3, } self.time_chunks = {"time": int(self.nt / 36)} times = pd.date_range("1970-01-01", periods=self.nt, freq="D") lons = xr.DataArray( np.linspace(0, 360, self.nx), dims=("lon",), attrs={"units": "degrees east", "long_name": "longitude"}, ) lats = xr.DataArray( np.linspace(-90, 90, self.ny), dims=("lat",), attrs={"units": "degrees north", "long_name": "latitude"}, ) self.ds["foo"] = xr.DataArray( randn((self.nt, self.nx, self.ny), frac_nan=0.2), coords={"lon": lons, "lat": lats, "time": times}, dims=("time", "lon", "lat"), name="foo", attrs={"units": "foo units", "description": "a description"}, ) self.ds["bar"] = xr.DataArray( randn((self.nt, self.nx, self.ny), frac_nan=0.2), coords={"lon": lons, "lat": lats, "time": times}, dims=("time", "lon", "lat"), name="bar", attrs={"units": "bar units", "description": "a description"}, ) self.ds["baz"] = xr.DataArray( randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32), coords={"lon": lons, "lat": lats}, dims=("lon", "lat"), name="baz", attrs={"units": "baz units", "description": "a description"}, ) self.ds.attrs = {"history": "created for xarray benchmarking"} self.oinds = { "time": randint(0, self.nt, 120), "lon": randint(0, self.nx, 20), "lat": randint(0, self.ny, 10), } self.vinds = { "time": xr.DataArray(randint(0, self.nt, 120), dims="x"), "lon": xr.DataArray(randint(0, self.nx, 120), dims="x"), "lat": slice(3, 20), } root = {f"group_{group}": self.ds for group in range(self.nchildren)} nested_tree1 = { f"group_{group}/subgroup_1": xr.Dataset() for group in range(self.nchildren) } nested_tree2 = { f"group_{group}/subgroup_2": xr.DataArray(np.arange(1, 10)).to_dataset( name="a" ) for group in range(self.nchildren) } nested_tree3 = { f"group_{group}/subgroup_2/sub-subgroup_1": self.ds for group in range(self.nchildren) } dtree = root | nested_tree1 | nested_tree2 | nested_tree3 self.dtree = xr.DataTree.from_dict(dtree) class IOReadDataTreeNetCDF4(IONestedDataTree): def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.make_datatree() self.format = "NETCDF4" self.filepath = "datatree.nc4.nc" dtree = self.dtree dtree.to_netcdf(filepath=self.filepath) def time_load_datatree_netcdf4(self): xr.open_datatree(self.filepath, engine="netcdf4").load() def time_open_datatree_netcdf4(self): xr.open_datatree(self.filepath, engine="netcdf4") class IOWriteNetCDFDask: timeout = 60 repeat = 1 number = 5 def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() self.write = create_delayed_write() def time_write(self): self.write.compute() class IOWriteNetCDFDaskDistributed: def setup(self): # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() requires_dask() try: import distributed except ImportError as err: raise NotImplementedError() from err self.client = distributed.Client() self.write = create_delayed_write() def cleanup(self): self.client.shutdown() def time_write(self): self.write.compute() class IOReadSingleFile(IOSingleNetCDF): def setup(self, *args, **kwargs): self.make_ds() self.filepaths = {} for engine in _ENGINES: self.filepaths[engine] = f"test_single_file_with_{engine}.nc" self.ds.to_netcdf(self.filepaths[engine], engine=engine) @parameterized(["engine", "chunks"], (_ENGINES, [None, {}])) def time_read_dataset(self, engine, chunks): xr.open_dataset(self.filepaths[engine], engine=engine, chunks=chunks) class IOReadCustomEngine: def setup(self, *args, **kwargs): """ The custom backend does the bare minimum to be considered a lazy backend. But the data in it is still in memory so slow file reading shouldn't affect the results. """ requires_dask() @dataclass class PerformanceBackendArray(xr.backends.BackendArray): filename_or_obj: str | os.PathLike | None shape: tuple[int, ...] dtype: np.dtype lock: xr.backends.locks.SerializableLock def __getitem__(self, key: tuple): return xr.core.indexing.explicit_indexing_adapter( key, self.shape, xr.core.indexing.IndexingSupport.BASIC, self._raw_indexing_method, ) def _raw_indexing_method(self, key: tuple): raise NotImplementedError @dataclass class PerformanceStore(xr.backends.common.AbstractWritableDataStore): manager: xr.backends.CachingFileManager mode: str | None = None lock: xr.backends.locks.SerializableLock | None = None autoclose: bool = False def __post_init__(self): self.filename = self.manager._args[0] @classmethod def open( cls, filename: str | os.PathLike | None, mode: str = "r", lock: xr.backends.locks.SerializableLock | None = None, autoclose: bool = False, ): locker = lock or xr.backends.locks.SerializableLock() manager = xr.backends.CachingFileManager( xr.backends.DummyFileManager, filename, mode=mode, ) return cls(manager, mode=mode, lock=locker, autoclose=autoclose) def load(self) -> tuple: """ Load a bunch of test data quickly. Normally this method would've opened a file and parsed it. """ n_variables = 2000 # Important to have a shape and dtype for lazy loading. shape = (1000,) dtype = np.dtype(int) variables = { f"long_variable_name_{v}": xr.Variable( data=PerformanceBackendArray( self.filename, shape, dtype, self.lock ), dims=("time",), fastpath=True, ) for v in range(n_variables) } attributes = {} return variables, attributes class PerformanceBackend(xr.backends.BackendEntrypoint): def open_dataset( self, filename_or_obj: str | os.PathLike | None, drop_variables: tuple[str, ...] | None = None, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, use_cftime=None, decode_timedelta=None, lock=None, **kwargs, ) -> xr.Dataset: filename_or_obj = xr.backends.common._normalize_path(filename_or_obj) store = PerformanceStore.open(filename_or_obj, lock=lock) store_entrypoint = xr.backends.store.StoreBackendEntrypoint() ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) return ds self.engine = PerformanceBackend @parameterized(["chunks"], ([None, {}, {"time": 10}])) def time_open_dataset(self, chunks): """ Time how fast xr.open_dataset is without the slow data reading part. Test with and without dask. """ xr.open_dataset(None, engine=self.engine, chunks=chunks) xarray-2025.09.0/asv_bench/benchmarks/datatree.py000066400000000000000000000006551505620616400215620ustar00rootroot00000000000000import xarray as xr from xarray.core.datatree import DataTree class Datatree: def setup(self): run1 = DataTree.from_dict({"run1": xr.Dataset({"a": 1})}) self.d_few = {"run1": run1} self.d_many = {f"run{i}": xr.Dataset({"a": 1}) for i in range(100)} def time_from_dict_few(self): DataTree.from_dict(self.d_few) def time_from_dict_many(self): DataTree.from_dict(self.d_many) xarray-2025.09.0/asv_bench/benchmarks/groupby.py000066400000000000000000000144231505620616400214560ustar00rootroot00000000000000# import flox to avoid the cost of first import import cftime import flox.xarray # noqa: F401 import numpy as np import pandas as pd import xarray as xr from . import _skip_slow, parameterized, requires_dask class GroupBy: def setup(self, *args, **kwargs): self.n = 100 self.ds1d = xr.Dataset( { "a": xr.DataArray(np.r_[np.repeat(1, self.n), np.repeat(2, self.n)]), "b": xr.DataArray(np.arange(2 * self.n)), "c": xr.DataArray(np.arange(2 * self.n)), } ) self.ds2d = self.ds1d.expand_dims(z=10).copy() self.ds1d_mean = self.ds1d.groupby("b").mean() self.ds2d_mean = self.ds2d.groupby("b").mean() @parameterized(["ndim"], [(1, 2)]) def time_init(self, ndim): getattr(self, f"ds{ndim}d").groupby("b") @parameterized( ["method", "ndim", "use_flox"], [("sum", "mean"), (1, 2), (True, False)] ) def time_agg_small_num_groups(self, method, ndim, use_flox): ds = getattr(self, f"ds{ndim}d") with xr.set_options(use_flox=use_flox): getattr(ds.groupby("a"), method)().compute() @parameterized( ["method", "ndim", "use_flox"], [("sum", "mean"), (1, 2), (True, False)] ) def time_agg_large_num_groups(self, method, ndim, use_flox): ds = getattr(self, f"ds{ndim}d") with xr.set_options(use_flox=use_flox): getattr(ds.groupby("b"), method)().compute() def time_binary_op_1d(self): (self.ds1d.groupby("b") - self.ds1d_mean).compute() def time_binary_op_2d(self): (self.ds2d.groupby("b") - self.ds2d_mean).compute() def peakmem_binary_op_1d(self): (self.ds1d.groupby("b") - self.ds1d_mean).compute() def peakmem_binary_op_2d(self): (self.ds2d.groupby("b") - self.ds2d_mean).compute() class GroupByDask(GroupBy): def setup(self, *args, **kwargs): requires_dask() super().setup(**kwargs) self.ds1d = self.ds1d.sel(dim_0=slice(None, None, 2)) self.ds1d["c"] = self.ds1d["c"].chunk({"dim_0": 50}) self.ds2d = self.ds2d.sel(dim_0=slice(None, None, 2)) self.ds2d["c"] = self.ds2d["c"].chunk({"dim_0": 50, "z": 5}) self.ds1d_mean = self.ds1d.groupby("b").mean().compute() self.ds2d_mean = self.ds2d.groupby("b").mean().compute() # TODO: These don't work now because we are calling `.compute` explicitly. class GroupByPandasDataFrame(GroupBy): """Run groupby tests using pandas DataFrame.""" def setup(self, *args, **kwargs): # Skip testing in CI as it won't ever change in a commit: _skip_slow() super().setup(**kwargs) self.ds1d = self.ds1d.to_dataframe() self.ds1d_mean = self.ds1d.groupby("b").mean() def time_binary_op_2d(self): raise NotImplementedError def peakmem_binary_op_2d(self): raise NotImplementedError class GroupByDaskDataFrame(GroupBy): """Run groupby tests using dask DataFrame.""" def setup(self, *args, **kwargs): # Skip testing in CI as it won't ever change in a commit: _skip_slow() requires_dask() super().setup(**kwargs) self.ds1d = self.ds1d.chunk({"dim_0": 50}).to_dask_dataframe() self.ds1d_mean = self.ds1d.groupby("b").mean().compute() def time_binary_op_2d(self): raise NotImplementedError def peakmem_binary_op_2d(self): raise NotImplementedError class Resample: def setup(self, *args, **kwargs): self.ds1d = xr.Dataset( { "b": ("time", np.arange(365.0 * 24)), }, coords={"time": pd.date_range("2001-01-01", freq="h", periods=365 * 24)}, ) self.ds2d = self.ds1d.expand_dims(z=10) self.ds1d_mean = self.ds1d.resample(time="48h").mean() self.ds2d_mean = self.ds2d.resample(time="48h").mean() @parameterized(["ndim"], [(1, 2)]) def time_init(self, ndim): getattr(self, f"ds{ndim}d").resample(time="D") @parameterized( ["method", "ndim", "use_flox"], [("sum", "mean"), (1, 2), (True, False)] ) def time_agg_small_num_groups(self, method, ndim, use_flox): ds = getattr(self, f"ds{ndim}d") with xr.set_options(use_flox=use_flox): getattr(ds.resample(time="3ME"), method)().compute() @parameterized( ["method", "ndim", "use_flox"], [("sum", "mean"), (1, 2), (True, False)] ) def time_agg_large_num_groups(self, method, ndim, use_flox): ds = getattr(self, f"ds{ndim}d") with xr.set_options(use_flox=use_flox): getattr(ds.resample(time="48h"), method)().compute() class ResampleDask(Resample): def setup(self, *args, **kwargs): requires_dask() super().setup(**kwargs) self.ds1d = self.ds1d.chunk({"time": 50}) self.ds2d = self.ds2d.chunk({"time": 50, "z": 4}) class ResampleCFTime(Resample): def setup(self, *args, **kwargs): self.ds1d = xr.Dataset( { "b": ("time", np.arange(365.0 * 24)), }, coords={ "time": xr.date_range( "2001-01-01", freq="h", periods=365 * 24, calendar="noleap" ) }, ) self.ds2d = self.ds1d.expand_dims(z=10) self.ds1d_mean = self.ds1d.resample(time="48h").mean() self.ds2d_mean = self.ds2d.resample(time="48h").mean() @parameterized(["use_cftime", "use_flox"], [[True, False], [True, False]]) class GroupByLongTime: def setup(self, use_cftime, use_flox): arr = np.random.randn(10, 10, 365 * 30) time = xr.date_range("2000", periods=30 * 365, use_cftime=use_cftime) # GH9426 - deep-copying CFTime object arrays is weirdly slow asda = xr.DataArray(time) labeled_time = [] for year, month in zip(asda.dt.year, asda.dt.month, strict=True): labeled_time.append(cftime.datetime(year, month, 1)) self.da = xr.DataArray( arr, dims=("y", "x", "time"), coords={"time": time, "time2": ("time", labeled_time)}, ) def time_setup(self, use_cftime, use_flox): self.da.groupby("time.month") def time_mean(self, use_cftime, use_flox): with xr.set_options(use_flox=use_flox): self.da.groupby("time.year").mean() xarray-2025.09.0/asv_bench/benchmarks/import.py000066400000000000000000000007641505620616400213040ustar00rootroot00000000000000class Import: """Benchmark importing xarray""" def timeraw_import_xarray(self): return "import xarray" def timeraw_import_xarray_plot(self): return "import xarray.plot" def timeraw_import_xarray_backends(self): return """ from xarray.backends import list_engines list_engines() """ def timeraw_import_xarray_only(self): # import numpy and pandas in the setup stage return "import xarray", "import numpy, pandas" xarray-2025.09.0/asv_bench/benchmarks/indexing.py000066400000000000000000000150361505620616400215750ustar00rootroot00000000000000import os import numpy as np import pandas as pd import xarray as xr from . import parameterized, randint, randn, requires_dask nx = 2000 ny = 1000 nt = 500 basic_indexes = { "1scalar": {"x": 0}, "1slice": {"x": slice(0, 3)}, "1slice-1scalar": {"x": 0, "y": slice(None, None, 3)}, "2slicess-1scalar": {"x": slice(3, -3, 3), "y": 1, "t": slice(None, -3, 3)}, } basic_assignment_values = { "1scalar": 0, "1slice": xr.DataArray(randn((3, ny), frac_nan=0.1), dims=["x", "y"]), "1slice-1scalar": xr.DataArray(randn(int(ny / 3) + 1, frac_nan=0.1), dims=["y"]), "2slicess-1scalar": xr.DataArray( randn(np.empty(nx)[slice(3, -3, 3)].size, frac_nan=0.1), dims=["x"] ), } outer_indexes = { "1d": {"x": randint(0, nx, 400)}, "2d": {"x": randint(0, nx, 500), "y": randint(0, ny, 400)}, "2d-1scalar": {"x": randint(0, nx, 100), "y": 1, "t": randint(0, nt, 400)}, } outer_assignment_values = { "1d": xr.DataArray(randn((400, ny), frac_nan=0.1), dims=["x", "y"]), "2d": xr.DataArray(randn((500, 400), frac_nan=0.1), dims=["x", "y"]), "2d-1scalar": xr.DataArray(randn(100, frac_nan=0.1), dims=["x"]), } def make_vectorized_indexes(n_index): return { "1-1d": {"x": xr.DataArray(randint(0, nx, n_index), dims="a")}, "2-1d": { "x": xr.DataArray(randint(0, nx, n_index), dims="a"), "y": xr.DataArray(randint(0, ny, n_index), dims="a"), }, "3-2d": { "x": xr.DataArray( randint(0, nx, n_index).reshape(n_index // 100, 100), dims=["a", "b"] ), "y": xr.DataArray( randint(0, ny, n_index).reshape(n_index // 100, 100), dims=["a", "b"] ), "t": xr.DataArray( randint(0, nt, n_index).reshape(n_index // 100, 100), dims=["a", "b"] ), }, } vectorized_indexes = make_vectorized_indexes(400) big_vectorized_indexes = make_vectorized_indexes(400_000) vectorized_assignment_values = { "1-1d": xr.DataArray(randn((400, ny)), dims=["a", "y"], coords={"a": randn(400)}), "2-1d": xr.DataArray(randn(400), dims=["a"], coords={"a": randn(400)}), "3-2d": xr.DataArray( randn((4, 100)), dims=["a", "b"], coords={"a": randn(4), "b": randn(100)} ), } class Base: def setup(self, key): self.ds = xr.Dataset( { "var1": (("x", "y"), randn((nx, ny), frac_nan=0.1)), "var2": (("x", "t"), randn((nx, nt))), "var3": (("t",), randn(nt)), }, coords={ "x": np.arange(nx), "y": np.linspace(0, 1, ny), "t": pd.date_range("1970-01-01", periods=nt, freq="D"), "x_coords": ("x", np.linspace(1.1, 2.1, nx)), }, ) # Benchmark how indexing is slowed down by adding many scalar variable # to the dataset # https://github.com/pydata/xarray/pull/9003 self.ds_large = self.ds.merge({f"extra_var{i}": i for i in range(400)}) class Indexing(Base): @parameterized(["key"], [list(basic_indexes.keys())]) def time_indexing_basic(self, key): self.ds.isel(**basic_indexes[key]).load() @parameterized(["key"], [list(outer_indexes.keys())]) def time_indexing_outer(self, key): self.ds.isel(**outer_indexes[key]).load() @parameterized(["key"], [list(vectorized_indexes.keys())]) def time_indexing_vectorized(self, key): self.ds.isel(**vectorized_indexes[key]).load() @parameterized(["key"], [list(basic_indexes.keys())]) def time_indexing_basic_ds_large(self, key): # https://github.com/pydata/xarray/pull/9003 self.ds_large.isel(**basic_indexes[key]).load() class IndexingOnly(Base): @parameterized(["key"], [list(basic_indexes.keys())]) def time_indexing_basic(self, key): self.ds.isel(**basic_indexes[key]) @parameterized(["key"], [list(outer_indexes.keys())]) def time_indexing_outer(self, key): self.ds.isel(**outer_indexes[key]) @parameterized(["key"], [list(big_vectorized_indexes.keys())]) def time_indexing_big_vectorized(self, key): self.ds.isel(**big_vectorized_indexes[key]) class Assignment(Base): @parameterized(["key"], [list(basic_indexes.keys())]) def time_assignment_basic(self, key): ind = basic_indexes[key] val = basic_assignment_values[key] self.ds["var1"][ind.get("x", slice(None)), ind.get("y", slice(None))] = val @parameterized(["key"], [list(outer_indexes.keys())]) def time_assignment_outer(self, key): ind = outer_indexes[key] val = outer_assignment_values[key] self.ds["var1"][ind.get("x", slice(None)), ind.get("y", slice(None))] = val @parameterized(["key"], [list(vectorized_indexes.keys())]) def time_assignment_vectorized(self, key): ind = vectorized_indexes[key] val = vectorized_assignment_values[key] self.ds["var1"][ind.get("x", slice(None)), ind.get("y", slice(None))] = val class IndexingDask(Indexing): def setup(self, key): requires_dask() super().setup(key) self.ds = self.ds.chunk({"x": 100, "y": 50, "t": 50}) class BooleanIndexing: # https://github.com/pydata/xarray/issues/2227 def setup(self): self.ds = xr.Dataset( {"a": ("time", np.arange(10_000_000))}, coords={"time": np.arange(10_000_000)}, ) self.time_filter = self.ds.time > 50_000 def time_indexing(self): self.ds.isel(time=self.time_filter) class HugeAxisSmallSliceIndexing: # https://github.com/pydata/xarray/pull/4560 def setup(self): self.filepath = "test_indexing_huge_axis_small_slice.nc" if not os.path.isfile(self.filepath): xr.Dataset( {"a": ("x", np.arange(10_000_000))}, coords={"x": np.arange(10_000_000)}, ).to_netcdf(self.filepath, format="NETCDF4") self.ds = xr.open_dataset(self.filepath) def time_indexing(self): self.ds.isel(x=slice(100)) def cleanup(self): self.ds.close() class AssignmentOptimized: # https://github.com/pydata/xarray/pull/7382 def setup(self): self.ds = xr.Dataset(coords={"x": np.arange(500_000)}) self.da = xr.DataArray(np.arange(500_000), dims="x") def time_assign_no_reindex(self): # assign with non-indexed DataArray of same dimension size self.ds.assign(foo=self.da) def time_assign_identical_indexes(self): # fastpath index comparison (same index object) self.ds.assign(foo=self.ds.x) xarray-2025.09.0/asv_bench/benchmarks/interp.py000066400000000000000000000041121505620616400212620ustar00rootroot00000000000000import numpy as np import pandas as pd import xarray as xr from . import parameterized, randn, requires_dask nx = 1500 ny = 1000 nt = 500 randn_xy = randn((nx, ny), frac_nan=0.1) randn_xt = randn((nx, nt)) randn_t = randn((nt,)) new_x_short = np.linspace(0.3 * nx, 0.7 * nx, 100) new_x_long = np.linspace(0.3 * nx, 0.7 * nx, 500) new_y_long = np.linspace(0.1, 0.9, 500) class Interpolation: def setup(self, *args, **kwargs): self.ds = xr.Dataset( { "var1": (("x", "y"), randn_xy), "var2": (("x", "t"), randn_xt), "var3": (("t",), randn_t), "var4": (("z",), np.array(["text"])), "var5": (("k",), np.array(["a", "b", "c"])), }, coords={ "x": np.arange(nx), "y": np.linspace(0, 1, ny), "t": pd.date_range("1970-01-01", periods=nt, freq="D"), "x_coords": ("x", np.linspace(1.1, 2.1, nx)), "z": np.array([1]), "k": np.linspace(0, nx, 3), }, ) @parameterized(["method", "is_short"], (["linear", "cubic"], [True, False])) def time_interpolation_numeric_1d(self, method, is_short): new_x = new_x_short if is_short else new_x_long self.ds.interp(x=new_x, method=method).compute() @parameterized(["method"], (["linear", "nearest"])) def time_interpolation_numeric_2d(self, method): self.ds.interp(x=new_x_long, y=new_y_long, method=method).compute() @parameterized(["is_short"], ([True, False])) def time_interpolation_string_scalar(self, is_short): new_z = new_x_short if is_short else new_x_long self.ds.interp(z=new_z).compute() @parameterized(["is_short"], ([True, False])) def time_interpolation_string_1d(self, is_short): new_k = new_x_short if is_short else new_x_long self.ds.interp(k=new_k).compute() class InterpolationDask(Interpolation): def setup(self, *args, **kwargs): requires_dask() super().setup(**kwargs) self.ds = self.ds.chunk({"t": 50}) xarray-2025.09.0/asv_bench/benchmarks/merge.py000066400000000000000000000046131505620616400210660ustar00rootroot00000000000000import numpy as np import xarray as xr class DatasetAddVariable: param_names = ["existing_elements"] params = [[0, 10, 100, 1000]] def setup(self, existing_elements): self.datasets = {} # Dictionary insertion is fast(er) than xarray.Dataset insertion d = {} for i in range(existing_elements): d[f"var{i}"] = i self.dataset = xr.merge([d]) d = {f"set_2_{i}": i for i in range(existing_elements)} self.dataset2 = xr.merge([d]) def time_variable_insertion(self, existing_elements): dataset = self.dataset dataset["new_var"] = 0 def time_merge_two_datasets(self, existing_elements): xr.merge([self.dataset, self.dataset2]) class DatasetCreation: # The idea here is to time how long it takes to go from numpy # and python data types, to a full dataset # See discussion # https://github.com/pydata/xarray/issues/7224#issuecomment-1292216344 param_names = ["strategy", "count"] params = [ ["dict_of_DataArrays", "dict_of_Variables", "dict_of_Tuples"], [0, 1, 10, 100, 1000], ] def setup(self, strategy, count): data = np.array(["0", "b"], dtype=str) self.dataset_coords = dict(time=np.array([0, 1])) self.dataset_attrs = dict(description="Test data") attrs = dict(units="Celsius") if strategy == "dict_of_DataArrays": def create_data_vars(): return { f"long_variable_name_{i}": xr.DataArray( data=data, dims=("time"), attrs=attrs ) for i in range(count) } elif strategy == "dict_of_Variables": def create_data_vars(): return { f"long_variable_name_{i}": xr.Variable("time", data, attrs=attrs) for i in range(count) } elif strategy == "dict_of_Tuples": def create_data_vars(): return { f"long_variable_name_{i}": ("time", data, attrs) for i in range(count) } self.create_data_vars = create_data_vars def time_dataset_creation(self, strategy, count): data_vars = self.create_data_vars() xr.Dataset( data_vars=data_vars, coords=self.dataset_coords, attrs=self.dataset_attrs ) xarray-2025.09.0/asv_bench/benchmarks/pandas.py000066400000000000000000000033431505620616400212340ustar00rootroot00000000000000import numpy as np import pandas as pd import xarray as xr from . import parameterized, requires_dask class MultiIndexSeries: def setup(self, dtype, subset): data = np.random.rand(100000).astype(dtype) index = pd.MultiIndex.from_product( [ list("abcdefhijk"), list("abcdefhijk"), pd.date_range(start="2000-01-01", periods=1000, freq="D"), ] ) series = pd.Series(data, index) if subset: series = series[::3] self.series = series @parameterized(["dtype", "subset"], ([int, float], [True, False])) def time_from_series(self, dtype, subset): xr.DataArray.from_series(self.series) class ToDataFrame: def setup(self, *args, **kwargs): xp = kwargs.get("xp", np) nvars = kwargs.get("nvars", 1) random_kws = kwargs.get("random_kws", {}) method = kwargs.get("method", "to_dataframe") dim1 = 10_000 dim2 = 10_000 var = xr.Variable( dims=("dim1", "dim2"), data=xp.random.random((dim1, dim2), **random_kws) ) data_vars = {f"long_name_{v}": (("dim1", "dim2"), var) for v in range(nvars)} ds = xr.Dataset( data_vars, coords={"dim1": np.arange(0, dim1), "dim2": np.arange(0, dim2)} ) self.to_frame = getattr(ds, method) def time_to_dataframe(self): self.to_frame() def peakmem_to_dataframe(self): self.to_frame() class ToDataFrameDask(ToDataFrame): def setup(self, *args, **kwargs): requires_dask() import dask.array as da super().setup( xp=da, random_kws=dict(chunks=5000), method="to_dask_dataframe", nvars=500 ) xarray-2025.09.0/asv_bench/benchmarks/polyfit.py000066400000000000000000000017751505620616400214630ustar00rootroot00000000000000import numpy as np import xarray as xr from . import parameterized, randn, requires_dask NDEGS = (2, 5, 20) NX = (10**2, 10**6) class Polyval: def setup(self, *args, **kwargs): self.xs = {nx: xr.DataArray(randn((nx,)), dims="x", name="x") for nx in NX} self.coeffs = { ndeg: xr.DataArray( randn((ndeg,)), dims="degree", coords={"degree": np.arange(ndeg)} ) for ndeg in NDEGS } @parameterized(["nx", "ndeg"], [NX, NDEGS]) def time_polyval(self, nx, ndeg): x = self.xs[nx] c = self.coeffs[ndeg] xr.polyval(x, c).compute() @parameterized(["nx", "ndeg"], [NX, NDEGS]) def peakmem_polyval(self, nx, ndeg): x = self.xs[nx] c = self.coeffs[ndeg] xr.polyval(x, c).compute() class PolyvalDask(Polyval): def setup(self, *args, **kwargs): requires_dask() super().setup(*args, **kwargs) self.xs = {k: v.chunk({"x": 10000}) for k, v in self.xs.items()} xarray-2025.09.0/asv_bench/benchmarks/reindexing.py000066400000000000000000000025461505620616400221260ustar00rootroot00000000000000import numpy as np import xarray as xr from . import requires_dask ntime = 500 nx = 50 ny = 50 class Reindex: def setup(self): data = np.random.default_rng(0).random((ntime, nx, ny)) self.ds = xr.Dataset( {"temperature": (("time", "x", "y"), data)}, coords={"time": np.arange(ntime), "x": np.arange(nx), "y": np.arange(ny)}, ) def time_1d_coarse(self): self.ds.reindex(time=np.arange(0, ntime, 5)).load() def time_1d_fine_all_found(self): self.ds.reindex(time=np.arange(0, ntime, 0.5), method="nearest").load() def time_1d_fine_some_missing(self): self.ds.reindex( time=np.arange(0, ntime, 0.5), method="nearest", tolerance=0.1 ).load() def time_2d_coarse(self): self.ds.reindex(x=np.arange(0, nx, 2), y=np.arange(0, ny, 2)).load() def time_2d_fine_all_found(self): self.ds.reindex( x=np.arange(0, nx, 0.5), y=np.arange(0, ny, 0.5), method="nearest" ).load() def time_2d_fine_some_missing(self): self.ds.reindex( x=np.arange(0, nx, 0.5), y=np.arange(0, ny, 0.5), method="nearest", tolerance=0.1, ).load() class ReindexDask(Reindex): def setup(self): requires_dask() super().setup() self.ds = self.ds.chunk({"time": 100}) xarray-2025.09.0/asv_bench/benchmarks/renaming.py000066400000000000000000000014201505620616400215600ustar00rootroot00000000000000import numpy as np import xarray as xr class SwapDims: param_names = ["size"] params = [[int(1e3), int(1e5), int(1e7)]] def setup(self, size: int) -> None: self.ds = xr.Dataset( {"a": (("x", "t"), np.ones((size, 2)))}, coords={ "x": np.arange(size), "y": np.arange(size), "z": np.arange(size), "x2": ("x", np.arange(size)), "y2": ("y", np.arange(size)), "z2": ("z", np.arange(size)), }, ) def time_swap_dims(self, size: int) -> None: self.ds.swap_dims({"x": "xn", "y": "yn", "z": "zn"}) def time_swap_dims_newindex(self, size: int) -> None: self.ds.swap_dims({"x": "x2", "y": "y2", "z": "z2"}) xarray-2025.09.0/asv_bench/benchmarks/repr.py000066400000000000000000000045211505620616400207350ustar00rootroot00000000000000import numpy as np import pandas as pd import xarray as xr class Repr: def setup(self): a = np.arange(0, 100) data_vars = dict() for i in a: data_vars[f"long_variable_name_{i}"] = xr.DataArray( name=f"long_variable_name_{i}", data=np.arange(0, 20), dims=[f"long_coord_name_{i}_x"], coords={f"long_coord_name_{i}_x": np.arange(0, 20) * 2}, ) self.ds = xr.Dataset(data_vars) self.ds.attrs = {f"attr_{k}": 2 for k in a} def time_repr(self): repr(self.ds) def time_repr_html(self): self.ds._repr_html_() class ReprDataTree: def setup(self): # construct a datatree with 500 nodes number_of_files = 20 number_of_groups = 25 tree_dict = {} for f in range(number_of_files): for g in range(number_of_groups): tree_dict[f"file_{f}/group_{g}"] = xr.Dataset({"g": f * g}) self.dt = xr.DataTree.from_dict(tree_dict) def time_repr(self): repr(self.dt) def time_repr_html(self): self.dt._repr_html_() class ReprMultiIndex: def setup(self): index = pd.MultiIndex.from_product( [range(1000), range(1000)], names=("level_0", "level_1") ) series = pd.Series(range(1000 * 1000), index=index) self.da = xr.DataArray(series) def time_repr(self): repr(self.da) def time_repr_html(self): self.da._repr_html_() class ReprPandasRangeIndex: # display a memory-saving pandas.RangeIndex shouldn't trigger memory # expensive conversion into a numpy array def setup(self): index = xr.indexes.PandasIndex(pd.RangeIndex(1_000_000), "x") self.ds = xr.Dataset(coords=xr.Coordinates.from_xindex(index)) def time_repr(self): repr(self.ds.x) def time_repr_html(self): self.ds.x._repr_html_() class ReprXarrayRangeIndex: # display an Xarray RangeIndex shouldn't trigger memory expensive conversion # of its lazy coordinate into a numpy array def setup(self): index = xr.indexes.RangeIndex.arange(1_000_000, dim="x") self.ds = xr.Dataset(coords=xr.Coordinates.from_xindex(index)) def time_repr(self): repr(self.ds.x) def time_repr_html(self): self.ds.x._repr_html_() xarray-2025.09.0/asv_bench/benchmarks/rolling.py000066400000000000000000000120151505620616400214300ustar00rootroot00000000000000import numpy as np import pandas as pd import xarray as xr from . import _skip_slow, parameterized, randn, requires_dask nx = 3000 long_nx = 30000 ny = 200 nt = 1000 window = 20 randn_xy = randn((nx, ny), frac_nan=0.1) randn_xt = randn((nx, nt)) randn_t = randn((nt,)) randn_long = randn((long_nx,), frac_nan=0.1) class Rolling: def setup(self, *args, **kwargs): self.ds = xr.Dataset( { "var1": (("x", "y"), randn_xy), "var2": (("x", "t"), randn_xt), "var3": (("t",), randn_t), }, coords={ "x": np.arange(nx), "y": np.linspace(0, 1, ny), "t": pd.date_range("1970-01-01", periods=nt, freq="D"), "x_coords": ("x", np.linspace(1.1, 2.1, nx)), }, ) self.da_long = xr.DataArray( randn_long, dims="x", coords={"x": np.arange(long_nx) * 0.1} ) @parameterized( ["func", "center", "use_bottleneck"], (["mean", "count"], [True, False], [True, False]), ) def time_rolling(self, func, center, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): getattr(self.ds.rolling(x=window, center=center), func)().load() @parameterized( ["func", "pandas", "use_bottleneck"], (["mean", "count"], [True, False], [True, False]), ) def time_rolling_long(self, func, pandas, use_bottleneck): if pandas: se = self.da_long.to_series() getattr(se.rolling(window=window, min_periods=window), func)() else: with xr.set_options(use_bottleneck=use_bottleneck): getattr( self.da_long.rolling(x=window, min_periods=window), func )().load() @parameterized( ["window_", "min_periods", "use_bottleneck"], ([20, 40], [5, 5], [True, False]) ) def time_rolling_np(self, window_, min_periods, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): self.ds.rolling(x=window_, center=False, min_periods=min_periods).reduce( np.nansum ).load() @parameterized( ["center", "stride", "use_bottleneck"], ([True, False], [1, 1], [True, False]) ) def time_rolling_construct(self, center, stride, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): self.ds.rolling(x=window, center=center).construct( "window_dim", stride=stride ).sum(dim="window_dim").load() class RollingDask(Rolling): def setup(self, *args, **kwargs): requires_dask() # TODO: Lazily skipped in CI as it is very demanding and slow. # Improve times and remove errors. _skip_slow() super().setup(**kwargs) self.ds = self.ds.chunk({"x": 100, "y": 50, "t": 50}) self.da_long = self.da_long.chunk({"x": 10000}) class RollingMemory: def setup(self, *args, **kwargs): self.ds = xr.Dataset( { "var1": (("x", "y"), randn_xy), "var2": (("x", "t"), randn_xt), "var3": (("t",), randn_t), }, coords={ "x": np.arange(nx), "y": np.linspace(0, 1, ny), "t": pd.date_range("1970-01-01", periods=nt, freq="D"), "x_coords": ("x", np.linspace(1.1, 2.1, nx)), }, ) class DataArrayRollingMemory(RollingMemory): @parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False])) def peakmem_ndrolling_reduce(self, func, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): roll = self.ds.var1.rolling(x=10, y=4) getattr(roll, func)() @parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False])) def peakmem_1drolling_reduce(self, func, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): roll = self.ds.var3.rolling(t=100) getattr(roll, func)() @parameterized(["stride"], ([None, 5, 50])) def peakmem_1drolling_construct(self, stride): self.ds.var2.rolling(t=100).construct("w", stride=stride) self.ds.var3.rolling(t=100).construct("w", stride=stride) class DatasetRollingMemory(RollingMemory): @parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False])) def peakmem_ndrolling_reduce(self, func, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): roll = self.ds.rolling(x=10, y=4) getattr(roll, func)() @parameterized(["func", "use_bottleneck"], (["sum", "max", "mean"], [True, False])) def peakmem_1drolling_reduce(self, func, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): roll = self.ds.rolling(t=100) getattr(roll, func)() @parameterized(["stride"], ([None, 5, 50])) def peakmem_1drolling_construct(self, stride): self.ds.rolling(t=100).construct("w", stride=stride) xarray-2025.09.0/asv_bench/benchmarks/unstacking.py000066400000000000000000000035071505620616400221360ustar00rootroot00000000000000import numpy as np import pandas as pd import xarray as xr from . import requires_dask, requires_sparse class Unstacking: def setup(self): data = np.random.default_rng(0).random((250, 500)) self.da_full = xr.DataArray(data, dims=list("ab")).stack(flat_dim=[...]) self.da_missing = self.da_full[:-1] self.df_missing = self.da_missing.to_pandas() def time_unstack_fast(self): self.da_full.unstack("flat_dim") def time_unstack_slow(self): self.da_missing.unstack("flat_dim") def time_unstack_pandas_slow(self): self.df_missing.unstack() class UnstackingDask(Unstacking): def setup(self, *args, **kwargs): requires_dask() super().setup(**kwargs) self.da_full = self.da_full.chunk({"flat_dim": 25}) class UnstackingSparse(Unstacking): def setup(self, *args, **kwargs): requires_sparse() import sparse data = sparse.random((500, 1000), random_state=0, fill_value=0) self.da_full = xr.DataArray(data, dims=list("ab")).stack(flat_dim=[...]) self.da_missing = self.da_full[:-1] mindex = pd.MultiIndex.from_arrays([np.arange(100), np.arange(100)]) self.da_eye_2d = xr.DataArray(np.ones((100,)), dims="z", coords={"z": mindex}) self.da_eye_3d = xr.DataArray( np.ones((100, 50)), dims=("z", "foo"), coords={"z": mindex, "foo": np.arange(50)}, ) def time_unstack_to_sparse_2d(self): self.da_eye_2d.unstack(sparse=True) def time_unstack_to_sparse_3d(self): self.da_eye_3d.unstack(sparse=True) def peakmem_unstack_to_sparse_2d(self): self.da_eye_2d.unstack(sparse=True) def peakmem_unstack_to_sparse_3d(self): self.da_eye_3d.unstack(sparse=True) def time_unstack_pandas_slow(self): pass xarray-2025.09.0/ci/000077500000000000000000000000001505620616400137375ustar00rootroot00000000000000xarray-2025.09.0/ci/install-upstream-wheels.sh000077500000000000000000000030701505620616400210670ustar00rootroot00000000000000#!/usr/bin/env bash if which micromamba >/dev/null; then conda=micromamba elif which mamba >/dev/null; then conda=mamba else conda=conda fi # temporarily (?) remove numbagg and numba $conda remove -y numba numbagg sparse # temporarily remove numexpr $conda remove -y numexpr # forcibly remove packages to avoid artifacts $conda remove -y --force \ numpy \ scipy \ pandas \ distributed \ fsspec \ zarr \ cftime \ packaging \ bottleneck \ flox # pint # to limit the runtime of Upstream CI python -m pip install \ -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple \ --no-deps \ --pre \ --upgrade \ numpy \ scipy \ matplotlib \ pandas \ pyarrow # manually install `pint`, `donfig`, and `crc32c` to pull in new dependencies python -m pip install --upgrade pint donfig crc32c python -m pip install \ --no-deps \ --upgrade \ git+https://github.com/dask/dask \ git+https://github.com/dask/dask-expr \ git+https://github.com/dask/distributed \ git+https://github.com/zarr-developers/zarr-python \ git+https://github.com/Unidata/cftime \ git+https://github.com/pypa/packaging \ git+https://github.com/hgrecco/pint \ git+https://github.com/pydata/bottleneck \ git+https://github.com/intake/filesystem_spec \ git+https://github.com/SciTools/nc-time-axis \ git+https://github.com/xarray-contrib/flox \ git+https://github.com/h5netcdf/h5netcdf \ git+https://github.com/dgasmith/opt_einsum # git+https://github.com/pydata/sparse xarray-2025.09.0/ci/policy.yaml000066400000000000000000000010431505620616400161200ustar00rootroot00000000000000channels: - conda-forge platforms: - noarch - linux-64 policy: # all packages in months packages: python: 30 numpy: 18 default: 12 # overrides for the policy overrides: {} # these packages are completely ignored exclude: - coveralls - pip - pytest - pytest-asyncio - pytest-cov - pytest-env - pytest-mypy-plugins - pytest-timeout - pytest-xdist - pytest-hypothesis - hypothesis # these packages don't fail the CI, but will be printed in the report ignored_violations: [] xarray-2025.09.0/ci/release_contributors.py000066400000000000000000000023651505620616400205540ustar00rootroot00000000000000import re import textwrap import git from tlz.itertoolz import last, unique co_author_re = re.compile(r"Co-authored-by: (?P[^<]+?) <(?P.+)>") def main(): repo = git.Repo(".") most_recent_release = last(list(repo.tags)) # extract information from commits contributors = {} for commit in repo.iter_commits(f"{most_recent_release.name}.."): matches = co_author_re.findall(commit.message) if matches: contributors.update({email: name for name, email in matches}) contributors[commit.author.email] = commit.author.name # deduplicate and ignore # TODO: extract ignores from .github/release.yml ignored = ["dependabot", "pre-commit-ci"] unique_contributors = unique( contributor for contributor in contributors.values() if contributor.removesuffix("[bot]") not in ignored ) sorted_ = sorted(unique_contributors) if len(sorted_) > 1: names = f"{', '.join(sorted_[:-1])} and {sorted_[-1]}" else: names = "".join(sorted_) statement = textwrap.dedent( f"""\ Thanks to the {len(sorted_)} contributors to this release: {names} """.rstrip() ) print(statement) if __name__ == "__main__": main() xarray-2025.09.0/ci/requirements/000077500000000000000000000000001505620616400164625ustar00rootroot00000000000000xarray-2025.09.0/ci/requirements/all-but-dask.yml000066400000000000000000000011431505620616400214640ustar00rootroot00000000000000name: xarray-tests channels: - conda-forge - nodefaults dependencies: - aiobotocore - array-api-strict<2.4 - boto3 - bottleneck - cartopy - cftime - coveralls - flox - h5netcdf - h5py - hdf5 - hypothesis - lxml # Optional dep of pydap - matplotlib-base - nc-time-axis - netcdf4 - numba - numbagg - numpy - packaging - pandas - pint>=0.22 - pip - pydap - pytest - pytest-asyncio - pytest-cov - pytest-env - pytest-mypy-plugins - pytest-timeout - pytest-xdist - rasterio - scipy - seaborn - sparse - toolz - typing_extensions - zarr xarray-2025.09.0/ci/requirements/all-but-numba.yml000066400000000000000000000017611505620616400216520ustar00rootroot00000000000000name: xarray-tests channels: - conda-forge - nodefaults dependencies: # Pin a "very new numpy" (updated Sept 24, 2024) - numpy>=2.2 - aiobotocore - array-api-strict<2.4 - boto3 - bottleneck - cartopy - cftime - dask-core - distributed - flox - fsspec - h5netcdf - h5py - hdf5 - hypothesis - iris - lxml # Optional dep of pydap - matplotlib-base - nc-time-axis - netcdf4 # numba, sparse, numbagg, numexpr often conflicts with newer versions of numpy. # This environment helps us test xarray with the latest versions # of numpy # - numba # - numbagg # - numexpr # - sparse - opt_einsum - packaging - pandas # - pint>=0.22 - pip - pooch - pre-commit - pyarrow # pandas raises a deprecation warning without this, breaking doctests - pydap - pytest - pytest-asyncio - pytest-cov - pytest-env - pytest-mypy-plugins - pytest-timeout - pytest-xdist - rasterio - scipy - seaborn - toolz - typing_extensions - zarr xarray-2025.09.0/ci/requirements/bare-min-and-scipy.yml000066400000000000000000000004501505620616400225630ustar00rootroot00000000000000name: xarray-tests channels: - conda-forge - nodefaults dependencies: - python=3.11 - coveralls - pip - pytest - pytest-asyncio - pytest-cov - pytest-env - pytest-mypy-plugins - pytest-timeout - pytest-xdist - numpy=1.26 - packaging=24.1 - pandas=2.2 - scipy=1.13 xarray-2025.09.0/ci/requirements/bare-minimum.yml000066400000000000000000000004311505620616400215650ustar00rootroot00000000000000name: xarray-tests channels: - conda-forge - nodefaults dependencies: - python=3.11 - coveralls - pip - pytest - pytest-asyncio - pytest-cov - pytest-env - pytest-mypy-plugins - pytest-timeout - pytest-xdist - numpy=1.26 - packaging=24.1 - pandas=2.2 xarray-2025.09.0/ci/requirements/doc.yml000066400000000000000000000017141505620616400177550ustar00rootroot00000000000000name: xarray-docs channels: # Don't change to pkgs/main, as it causes random timeouts in readthedocs - conda-forge - nodefaults dependencies: - python - bottleneck - cartopy - cfgrib - kerchunk - dask-core - hypothesis - h5netcdf - ipykernel - ipywidgets # silence nbsphinx warning - ipython - iris - jupyter_client - jupyter_sphinx - matplotlib-base - nbsphinx - ncdata - netcdf4 - numba - numpy>=2.2 - packaging - pandas - pooch - pip - pre-commit - pyarrow - pydata-sphinx-theme - pyproj - rich # for Zarr tree() - scipy - seaborn - setuptools - sparse - sphinx-autosummary-accessors - sphinx-copybutton - sphinx-design - sphinx-inline-tabs - sphinx>=6,<8 - sphinxcontrib-mermaid - sphinxcontrib-srclinks - sphinx-remove-toctrees - sphinxext-opengraph - sphinxext-rediraffe - zarr - pip: # relative to this file. Needs to be editable to be accepted. - -e ../.. xarray-2025.09.0/ci/requirements/environment-3.14.yml000066400000000000000000000021761505620616400221420ustar00rootroot00000000000000name: xarray-tests channels: - conda-forge - nodefaults dependencies: - aiobotocore - array-api-strict<2.4 - boto3 - bottleneck - cartopy - cftime - dask-core - distributed - flox - fsspec - h5netcdf - h5py - hdf5 - hypothesis - iris - lxml # Optional dep of pydap - matplotlib-base - nc-time-axis - netcdf4 # - numba # - numbagg - numexpr - numpy - opt_einsum - packaging - pandas - pandas-stubs<=2.2.3.241126 # https://github.com/pydata/xarray/issues/10110 # - pint>=0.22 - pip - pooch - pre-commit - pyarrow # pandas raises a deprecation warning without this, breaking doctests - pydap - pytest - pytest-asyncio - pytest-cov - pytest-env - pytest-mypy-plugins - pytest-timeout - pytest-xdist - rasterio - scipy - seaborn # - sparse - toolz - types-colorama - types-docutils - types-psutil - types-Pygments - types-python-dateutil - types-pytz - types-PyYAML - types-setuptools - typing_extensions - zarr - pip: - jax # no way to get cpu-only jaxlib from conda if gpu is present - types-defusedxml - types-pexpect xarray-2025.09.0/ci/requirements/environment-benchmark.yml000066400000000000000000000006411505620616400235020ustar00rootroot00000000000000name: xarray-benchmark channels: - conda-forge - nodefaults dependencies: - bottleneck - cftime - dask-core - distributed - flox - netcdf4 - numba - numbagg - numexpr - numpy>=2.2,<2.3 # https://github.com/numba/numba/issues/10105 - opt_einsum - packaging - pandas - pyarrow # pandas raises a deprecation warning without this, breaking doctests - sparse - scipy - toolz - zarr xarray-2025.09.0/ci/requirements/environment-windows-3.14.yml000066400000000000000000000017511505620616400236300ustar00rootroot00000000000000name: xarray-tests channels: - conda-forge dependencies: - array-api-strict<2.4 - boto3 - bottleneck - cartopy - cftime - dask-core - distributed - flox - fsspec - h5netcdf - h5py - hdf5 - hypothesis - iris - lxml # Optional dep of pydap - matplotlib-base - nc-time-axis - netcdf4 # - numba # - numbagg - numpy - packaging - pandas - pandas-stubs<=2.2.3.241126 # https://github.com/pydata/xarray/issues/10110 # - pint>=0.22 - pip - pre-commit - pyarrow # importing dask.dataframe raises an ImportError without this - pydap - pytest - pytest-asyncio - pytest-cov - pytest-env - pytest-mypy-plugins - pytest-timeout - pytest-xdist - rasterio - scipy - seaborn # - sparse - toolz - types-colorama - types-docutils - types-psutil - types-Pygments - types-python-dateutil - types-pytz - types-PyYAML - types-setuptools - typing_extensions - zarr - pip: - types-defusedxml - types-pexpect xarray-2025.09.0/ci/requirements/environment-windows.yml000066400000000000000000000017431505620616400232460ustar00rootroot00000000000000name: xarray-tests channels: - conda-forge dependencies: - array-api-strict<2.4 - boto3 - bottleneck - cartopy - cftime - dask-core - distributed - flox - fsspec - h5netcdf - h5py - hdf5 - hypothesis - iris - lxml # Optional dep of pydap - matplotlib-base - nc-time-axis - netcdf4 - numba - numbagg - numpy - packaging - pandas - pandas-stubs<=2.2.3.241126 # https://github.com/pydata/xarray/issues/10110 # - pint>=0.22 - pip - pre-commit - pyarrow # importing dask.dataframe raises an ImportError without this - pydap - pytest - pytest-asyncio - pytest-cov - pytest-env - pytest-mypy-plugins - pytest-timeout - pytest-xdist - rasterio - scipy - seaborn - sparse - toolz - types-colorama - types-docutils - types-psutil - types-Pygments - types-python-dateutil - types-pytz - types-PyYAML - types-setuptools - typing_extensions - zarr - pip: - types-defusedxml - types-pexpect xarray-2025.09.0/ci/requirements/environment.yml000066400000000000000000000023051505620616400215510ustar00rootroot00000000000000name: xarray-tests channels: - conda-forge - nodefaults dependencies: - aiobotocore - array-api-strict<2.4 - boto3 - bottleneck - cartopy - cftime - dask-core - distributed - flox - fsspec - h5netcdf - h5py - hdf5 - hypothesis - iris - lxml # Optional dep of pydap - matplotlib-base - mypy==1.17.1 - nc-time-axis - netcdf4 - numba - numbagg - numexpr - numpy>=2.2 - opt_einsum - packaging - pandas - pandas-stubs<=2.2.3.241126 # https://github.com/pydata/xarray/issues/10110 # - pint>=0.22 - pip - pooch - pre-commit - pyarrow # pandas raises a deprecation warning without this, breaking doctests - pydap - pydap-server - pytest - pytest-asyncio - pytest-cov - pytest-env - pytest-mypy-plugins - pytest-timeout - pytest-xdist - rasterio - scipy - seaborn - sparse - toolz - types-colorama - types-docutils - types-psutil - types-Pygments - types-python-dateutil - types-pytz - types-PyYAML - types-requests - types-setuptools - types-openpyxl - typing_extensions - zarr - pip: - jax # no way to get cpu-only jaxlib from conda if gpu is present - types-defusedxml - types-pexpect xarray-2025.09.0/ci/requirements/min-all-deps.yml000066400000000000000000000024631505620616400214740ustar00rootroot00000000000000name: xarray-tests channels: - conda-forge - nodefaults dependencies: # MINIMUM VERSIONS POLICY: see doc/user-guide/installing.rst # Run ci/min_deps_check.py to verify that this file respects the policy. # When upgrading python, numpy, or pandas, must also change # doc/user-guide/installing.rst, doc/user-guide/plotting.rst and setup.py. - python=3.11 - array-api-strict=1.1 # dependency for testing the array api compat - boto3=1.34 - bottleneck=1.4 - cartopy=0.23 - cftime=1.6 - coveralls - dask-core=2024.6 - distributed=2024.6 - flox=0.9 - h5netcdf=1.3 # h5py and hdf5 tend to cause conflicts # for e.g. hdf5 1.12 conflicts with h5py=3.1 # prioritize bumping other packages instead - h5py=3.11 - hdf5=1.14 - hypothesis - iris=3.9 - lxml=5.1 # Optional dep of pydap - matplotlib-base=3.8 - nc-time-axis=1.4 # netcdf follows a 1.major.minor[.patch] convention # (see https://github.com/Unidata/netcdf4-python/issues/1090) - netcdf4=1.6 - numba=0.60 - numbagg=0.8 - numpy=1.26 - packaging=24.1 - pandas=2.2 - pint=0.24 - pip - pydap=3.5.0 - pytest - pytest-asyncio - pytest-cov - pytest-env - pytest-mypy-plugins - pytest-timeout - pytest-xdist - rasterio=1.3 - scipy=1.13 - seaborn=0.13 - sparse=0.15 - toolz=0.12 - zarr=2.18 xarray-2025.09.0/conftest.py000066400000000000000000000050661505620616400155520ustar00rootroot00000000000000"""Configuration for pytest.""" import pytest def pytest_addoption(parser: pytest.Parser): """Add command-line flags for pytest.""" parser.addoption("--run-flaky", action="store_true", help="runs flaky tests") parser.addoption( "--run-network-tests", action="store_true", help="runs tests requiring a network connection", ) parser.addoption("--run-mypy", action="store_true", help="runs mypy tests") def pytest_runtest_setup(item): # based on https://stackoverflow.com/questions/47559524 if "flaky" in item.keywords and not item.config.getoption("--run-flaky"): pytest.skip("set --run-flaky option to run flaky tests") if "network" in item.keywords and not item.config.getoption("--run-network-tests"): pytest.skip( "set --run-network-tests to run test requiring an internet connection" ) if any("mypy" in m.name for m in item.own_markers) and not item.config.getoption( "--run-mypy" ): pytest.skip("set --run-mypy option to run mypy tests") # See https://docs.pytest.org/en/stable/example/markers.html#automatically-adding-markers-based-on-test-names def pytest_collection_modifyitems(items): for item in items: if "mypy" in item.nodeid: # IMPORTANT: mypy type annotation tests leverage the pytest-mypy-plugins # plugin, and are thus written in test_*.yml files. As such, there are # no explicit test functions on which we can apply a pytest.mark.mypy # decorator. Therefore, we mark them via this name-based, automatic # marking approach, meaning that each test case must contain "mypy" in the # name. item.add_marker(pytest.mark.mypy) @pytest.fixture(autouse=True) def set_zarr_v3_api(monkeypatch): """Set ZARR_V3_EXPERIMENTAL_API environment variable for all tests.""" monkeypatch.setenv("ZARR_V3_EXPERIMENTAL_API", "1") @pytest.fixture(autouse=True) def add_standard_imports(doctest_namespace, tmpdir): import numpy as np import pandas as pd import xarray as xr doctest_namespace["np"] = np doctest_namespace["pd"] = pd doctest_namespace["xr"] = xr # always seed numpy.random to make the examples deterministic np.random.seed(0) # always switch to the temporary directory, so files get written there tmpdir.chdir() # Avoid the dask deprecation warning, can remove if CI passes without this. try: import dask except ImportError: pass else: dask.config.set({"dataframe.query-planning": True}) xarray-2025.09.0/design_notes/000077500000000000000000000000001505620616400160255ustar00rootroot00000000000000xarray-2025.09.0/design_notes/flexible_indexes_notes.md000066400000000000000000000667221505620616400231050ustar00rootroot00000000000000# Proposal: Xarray flexible indexes refactoring Current status: https://github.com/pydata/xarray/projects/1 ## 1. Data Model Indexes are used in Xarray to extract data from Xarray objects using coordinate labels instead of using integer array indices. Although the indexes used in an Xarray object can be accessed (or built on-the-fly) via public methods like `to_index()` or properties like `indexes`, those are mainly used internally. The goal of this project is to make those indexes 1st-class citizens of Xarray's data model. As such, indexes should clearly be separated from Xarray coordinates with the following relationships: - Index -> Coordinate: one-to-many - Coordinate -> Index: one-to-zero-or-one An index may be built from one or more coordinates. However, each coordinate must relate to one index at most. Additionally, a coordinate may not be tied to any index. The order in which multiple coordinates relate to an index may matter. For example, Scikit-Learn's [`BallTree`](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.BallTree.html#sklearn.neighbors.BallTree) index with the Haversine metric requires providing latitude and longitude values in that specific order. As another example, the order in which levels are defined in a `pandas.MultiIndex` may affect its lexsort depth (see [MultiIndex sorting](https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#sorting-a-multiindex)). Xarray's current data model has the same index-coordinate relationships than stated above, although this assumes that multi-index "virtual" coordinates are counted as coordinates (we can consider them as such, with some constraints). More importantly, This refactoring would turn the current one-to-one relationship between a dimension and an index into a many-to-many relationship, which would overcome some current limitations. For example, we might want to select data along a dimension which has several coordinates: ```python >>> da array([...]) Coordinates: * drainage_area (river_profile) float64 ... * chi (river_profile) float64 ... ``` In this example, `chi` is a transformation of the `drainage_area` variable that is often used in geomorphology. We'd like to select data along the river profile using either `da.sel(drainage_area=...)` or `da.sel(chi=...)` but that's not currently possible. We could rename the `river_profile` dimension to one of the coordinates, then use `sel` with that coordinate, then call `swap_dims` if we want to use `sel` with the other coordinate, but that's not ideal. We could also build a `pandas.MultiIndex` from `drainage_area` and `chi`, but that's not optimal (there's no hierarchical relationship between these two coordinates). Let's take another example: ```python >>> da array([[...], [...]]) Coordinates: * lon (x, y) float64 ... * lat (x, y) float64 ... * x (x) float64 ... * y (y) float64 ... ``` This refactoring would allow creating a geographic index for `lat` and `lon` and two simple indexes for `x` and `y` such that we could select data with either `da.sel(lon=..., lat=...)` or `da.sel(x=..., y=...)`. Refactoring the dimension -> index one-to-one relationship into many-to-many would also introduce some issues that we'll need to address, e.g., ambiguous cases like `da.sel(chi=..., drainage_area=...)` where multiple indexes may potentially return inconsistent positional indexers along a dimension. ## 2. Proposed API changes ### 2.1 Index wrapper classes Every index that is used to select data from Xarray objects should inherit from a base class, e.g., `XarrayIndex`, that provides some common API. `XarrayIndex` subclasses would generally consist of thin wrappers around existing index classes such as `pandas.Index`, `pandas.MultiIndex`, `scipy.spatial.KDTree`, etc. There is a variety of features that an xarray index wrapper may or may not support: - 1-dimensional vs. 2-dimensional vs. n-dimensional coordinate (e.g., `pandas.Index` only supports 1-dimensional coordinates while a geographic index could be built from n-dimensional coordinates) - built from a single vs multiple coordinate(s) (e.g., `pandas.Index` is built from one coordinate, `pandas.MultiIndex` may be built from an arbitrary number of coordinates and a geographic index would typically require two latitude/longitude coordinates) - in-memory vs. out-of-core (dask) index data/coordinates (vs. other array backends) - range-based vs. point-wise selection - exact vs. inexact lookups Whether or not a `XarrayIndex` subclass supports each of the features listed above should be either declared explicitly via a common API or left to the implementation. An `XarrayIndex` subclass may encapsulate more than one underlying object used to perform the actual indexing. Such "meta" index would typically support a range of features among those mentioned above and would automatically select the optimal index object for a given indexing operation. An `XarrayIndex` subclass must/should/may implement the following properties/methods: - a `from_coords` class method that creates a new index wrapper instance from one or more Dataset/DataArray coordinates (+ some options) - a `query` method that takes label-based indexers as argument (+ some options) and that returns the corresponding position-based indexers - an `indexes` property to access the underlying index object(s) wrapped by the `XarrayIndex` subclass - a `data` property to access index's data and map it to coordinate data (see [Section 4](#4-indexvariable)) - a `__getitem__()` implementation to propagate the index through DataArray/Dataset indexing operations - `equals()`, `union()` and `intersection()` methods for data alignment (see [Section 2.6](#26-using-indexes-for-data-alignment)) - Xarray coordinate getters (see [Section 2.2.4](#224-implicit-coordinates)) - a method that may return a new index and that will be called when one of the corresponding coordinates is dropped from the Dataset/DataArray (multi-coordinate indexes) - `encode()`/`decode()` methods that would allow storage-agnostic serialization and fast-path reconstruction of the underlying index object(s) (see [Section 2.8](#28-index-encoding)) - one or more "non-standard" methods or properties that could be leveraged in Xarray 3rd-party extensions like Dataset/DataArray accessors (see [Section 2.7](#27-using-indexes-for-other-purposes)) The `XarrayIndex` API has still to be defined in detail. Xarray should provide a minimal set of built-in index wrappers (this could be reduced to the indexes currently supported in Xarray, i.e., `pandas.Index` and `pandas.MultiIndex`). Other index wrappers may be implemented in 3rd-party libraries (recommended). The `XarrayIndex` base class should be part of Xarray's public API. #### 2.1.1 Index discoverability For better discoverability of Xarray-compatible indexes, Xarray could provide some mechanism to register new index wrappers, e.g., something like [xoak's `IndexRegistry`](https://xoak.readthedocs.io/en/latest/_api_generated/xoak.IndexRegistry.html#xoak.IndexRegistry) or [numcodec's registry](https://numcodecs.readthedocs.io/en/stable/registry.html). Additionally (or alternatively), new index wrappers may be registered via entry points as is already the case for storage backends and maybe other backends (plotting) in the future. Registering new indexes either via a custom registry or via entry points should be optional. Xarray should also allow providing `XarrayIndex` subclasses in its API (Dataset/DataArray constructors, `set_index()`, etc.). ### 2.2 Explicit vs. implicit index creation #### 2.2.1 Dataset/DataArray's `indexes` constructor argument The new `indexes` argument of Dataset/DataArray constructors may be used to specify which kind of index to bind to which coordinate(s). It would consist of a mapping where, for each item, the key is one coordinate name (or a sequence of coordinate names) that must be given in `coords` and the value is the type of the index to build from this (these) coordinate(s): ```python >>> da = xr.DataArray( ... data=[[275.2, 273.5], [270.8, 278.6]], ... dims=("x", "y"), ... coords={ ... "lat": (("x", "y"), [[45.6, 46.5], [50.2, 51.6]]), ... "lon": (("x", "y"), [[5.7, 10.5], [6.2, 12.8]]), ... }, ... indexes={("lat", "lon"): SpatialIndex}, ... ) array([[275.2, 273.5], [270.8, 278.6]]) Coordinates: * lat (x, y) float64 45.6 46.5 50.2 51.6 * lon (x, y) float64 5.7 10.5 6.2 12.8 ``` More formally, `indexes` would accept `Mapping[CoordinateNames, IndexSpec]` where: - `CoordinateNames = Union[CoordinateName, Tuple[CoordinateName, ...]]` and `CoordinateName = Hashable` - `IndexSpec = Union[Type[XarrayIndex], Tuple[Type[XarrayIndex], Dict[str, Any]], XarrayIndex]`, so that index instances or index classes + build options could be also passed Currently index objects like `pandas.MultiIndex` can be passed directly to `coords`, which in this specific case results in the implicit creation of virtual coordinates. With the new `indexes` argument this behavior may become even more confusing than it currently is. For the sake of clarity, it would be appropriate to eventually drop support for this specific behavior and treat any given mapping value given in `coords` as an array that can be wrapped into an Xarray variable, i.e., in the case of a multi-index: ```python >>> xr.DataArray([1.0, 2.0], dims="x", coords={"x": midx}) array([1., 2.]) Coordinates: x (x) object ('a', 0) ('b', 1) ``` A possible, more explicit solution to reuse a `pandas.MultiIndex` in a DataArray/Dataset with levels exposed as coordinates is proposed in [Section 2.2.4](#224-implicit-coordinates). #### 2.2.2 Dataset/DataArray's `set_index` method New indexes may also be built from existing sets of coordinates or variables in a Dataset/DataArray using the `.set_index()` method. The [current signature](https://docs.xarray.dev/en/stable/generated/xarray.DataArray.set_index.html#xarray.DataArray.set_index) of `.set_index()` is tailored to `pandas.MultiIndex` and tied to the concept of a dimension-index. It is therefore hardly reusable as-is in the context of flexible indexes proposed here. The new signature may look like one of these: - A. `.set_index(coords: CoordinateNames, index: Union[XarrayIndex, Type[XarrayIndex]], **index_kwargs)`: one index is set at a time, index construction options may be passed as keyword arguments - B. `.set_index(indexes: Mapping[CoordinateNames, Union[Type[XarrayIndex], Tuple[Type[XarrayIndex], Dict[str, Any]]]])`: multiple indexes may be set at a time from a mapping of coordinate or variable name(s) as keys and `XarrayIndex` subclasses (maybe with a dict of build options) as values. If variable names are given as keys of they will be promoted as coordinates Option A looks simple and elegant but significantly departs from the current signature. Option B is more consistent with the Dataset/DataArray constructor signature proposed in the previous section and would be easier to adopt in parallel with the current signature that we could still support through some depreciation cycle. The `append` parameter of the current `.set_index()` is specific to `pandas.MultiIndex`. With option B we could still support it, although we might want to either drop it or move it to the index construction options in the future. #### 2.2.3 Implicit default indexes In general explicit index creation should be preferred over implicit index creation. However, there is a majority of cases where basic `pandas.Index` objects could be built and used as indexes for 1-dimensional coordinates. For convenience, Xarray should automatically build such indexes for the coordinates where no index has been explicitly assigned in the Dataset/DataArray constructor or when indexes have been reset / dropped. For which coordinates? - A. only 1D coordinates with a name matching their dimension name - B. all 1D coordinates When to create it? - A. each time when a new Dataset/DataArray is created - B. only when we need it (i.e., when calling `.sel()` or `indexes`) Options A and A are what Xarray currently does and may be the best choice considering that indexes could possibly be invalidated by coordinate mutation. Besides `pandas.Index`, other indexes currently supported in Xarray like `CFTimeIndex` could be built depending on the coordinate data type. #### 2.2.4 Implicit coordinates Like for the indexes, explicit coordinate creation should be preferred over implicit coordinate creation. However, there may be some situations where we would like to keep creating coordinates implicitly for backwards compatibility. For example, it is currently possible to pass a `pandas.MultiIndex` object as a coordinate to the Dataset/DataArray constructor: ```python >>> midx = pd.MultiIndex.from_arrays([["a", "b"], [0, 1]], names=["lvl1", "lvl2"]) >>> da = xr.DataArray([1.0, 2.0], dims="x", coords={"x": midx}) >>> da array([1., 2.]) Coordinates: * x (x) MultiIndex - lvl1 (x) object 'a' 'b' - lvl2 (x) int64 0 1 ``` In that case, virtual coordinates are created for each level of the multi-index. After the index refactoring, these coordinates would become real coordinates bound to the multi-index. In the example above a coordinate is also created for the `x` dimension: ```python >>> da.x array([('a', 0), ('b', 1)], dtype=object) Coordinates: * x (x) MultiIndex - lvl1 (x) object 'a' 'b' - lvl2 (x) int64 0 1 ``` With the new proposed data model, this wouldn't be a requirement anymore: there is no concept of a dimension-index. However, some users might still rely on the `x` coordinate so we could still (temporarily) support it for backwards compatibility. Besides `pandas.MultiIndex`, there may be other situations where we would like to reuse an existing index in a new Dataset/DataArray (e.g., when the index is very expensive to build), and which might require implicit creation of one or more coordinates. The example given here is quite confusing, though: this is not an easily predictable behavior. We could entirely avoid the implicit creation of coordinates, e.g., using a helper function that generates coordinate + index dictionaries that we could then pass directly to the DataArray/Dataset constructor: ```python >>> coords_dict, index_dict = create_coords_from_index( ... midx, dims="x", include_dim_coord=True ... ) >>> coords_dict {'x': array([('a', 0), ('b', 1)], dtype=object), 'lvl1': array(['a', 'b'], dtype=object), 'lvl2': array([0, 1])} >>> index_dict {('lvl1', 'lvl2'): midx} >>> xr.DataArray([1.0, 2.0], dims="x", coords=coords_dict, indexes=index_dict) array([1., 2.]) Coordinates: x (x) object ('a', 0) ('b', 1) * lvl1 (x) object 'a' 'b' * lvl2 (x) int64 0 1 ``` ### 2.2.5 Immutable indexes Some underlying indexes might be mutable (e.g., a tree-based index structure that allows dynamic addition of data points) while other indexes like `pandas.Index` aren't. To keep things simple, it is probably better to continue considering all indexes in Xarray as immutable (as well as their corresponding coordinates, see [Section 2.4.1](#241-mutable-coordinates)). ### 2.3 Index access #### 2.3.1 Dataset/DataArray's `indexes` property The `indexes` property would allow easy access to all the indexes used in a Dataset/DataArray. It would return a `Dict[CoordinateName, XarrayIndex]` for easy index lookup from coordinate name. #### 2.3.2 Additional Dataset/DataArray properties or methods In some cases the format returned by the `indexes` property would not be the best (e.g, it may return duplicate index instances as values). For convenience, we could add one more property / method to get the indexes in the desired format if needed. ### 2.4 Propagate indexes through operations #### 2.4.1 Mutable coordinates Dataset/DataArray coordinates may be replaced (`__setitem__`) or dropped (`__delitem__`) in-place, which may invalidate some of the indexes. A drastic though probably reasonable solution in this case would be to simply drop all indexes bound to those replaced/dropped coordinates. For the case where a 1D basic coordinate that corresponds to a dimension is added/replaced, we could automatically generate a new index (see [Section 2.2.4](#224-implicit-indexes)). We must also ensure that coordinates having a bound index are immutable, e.g., still wrap them into `IndexVariable` objects (even though the `IndexVariable` class might change substantially after this refactoring). #### 2.4.2 New Dataset/DataArray with updated coordinates Xarray provides a variety of Dataset/DataArray operations affecting the coordinates and where simply dropping the index(es) is not desirable. For example: - multi-coordinate indexes could be reduced to single coordinate indexes - like in `.reset_index()` or `.sel()` applied on a subset of the levels of a `pandas.MultiIndex` and that internally call `MultiIndex.droplevel` and `MultiIndex.get_loc_level`, respectively - indexes may be indexed themselves - like `pandas.Index` implements `__getitem__()` - when indexing their corresponding coordinate(s), e.g., via `.sel()` or `.isel()`, those indexes should be indexed too - this might not be supported by all Xarray indexes, though - some indexes that can't be indexed could still be automatically (re)built in the new Dataset/DataArray - like for example building a new `KDTree` index from the selection of a subset of an initial collection of data points - this is not always desirable, though, as indexes may be expensive to build - a more reasonable option would be to explicitly re-build the index, e.g., using `.set_index()` - Dataset/DataArray operations involving alignment (see [Section 2.6](#26-using-indexes-for-data-alignment)) ### 2.5 Using indexes for data selection One main use of indexes is label-based data selection using the DataArray/Dataset `.sel()` method. This refactoring would introduce a number of API changes that could go through some depreciation cycles: - the keys of the mapping given to `indexers` (or the names of `indexer_kwargs`) would not correspond to only dimension names but could be the name of any coordinate that has an index - for a `pandas.MultiIndex`, if no dimension-coordinate is created by default (see [Section 2.2.4](#224-implicit-coordinates)), providing dict-like objects as indexers should be depreciated - there should be the possibility to provide additional options to the indexes that support specific selection features (e.g., Scikit-learn's `BallTree`'s `dualtree` query option to boost performance). - the best API is not trivial here, since `.sel()` may accept indexers passed to several indexes (which should still be supported for convenience and compatibility), and indexes may have similar options with different semantics - we could introduce a new parameter like `index_options: Dict[XarrayIndex, Dict[str, Any]]` to pass options grouped by index - the `method` and `tolerance` parameters are specific to `pandas.Index` and would not be supported by all indexes: probably best is to eventually pass those arguments as `index_options` - the list valid indexer types might be extended in order to support new ways of indexing data, e.g., unordered selection of all points within a given range - alternatively, we could reuse existing indexer types with different semantics depending on the index, e.g., using `slice(min, max, None)` for unordered range selection With the new data model proposed here, an ambiguous situation may occur when indexers are given for several coordinates that share the same dimension but not the same index, e.g., from the example in [Section 1](#1-data-model): ```python da.sel(x=..., y=..., lat=..., lon=...) ``` The easiest solution for this situation would be to raise an error. Alternatively, we could introduce a new parameter to specify how to combine the resulting integer indexers (i.e., union vs intersection), although this could already be achieved by chaining `.sel()` calls or combining `.sel()` with `.merge()` (it may or may not be straightforward). ### 2.6 Using indexes for data alignment Another main use if indexes is data alignment in various operations. Some considerations regarding alignment and flexible indexes: - support for alignment should probably be optional for an `XarrayIndex` subclass. - like `pandas.Index`, the index wrapper classes that support it should implement `.equals()`, `.union()` and/or `.intersection()` - support might be partial if that makes sense (outer, inner, left, right, exact...). - index equality might involve more than just the labels: for example a spatial index might be used to check if the coordinate system (CRS) is identical for two sets of coordinates - some indexes might implement inexact alignment, like in [#4489](https://github.com/pydata/xarray/pull/4489) or a `KDTree` index that selects nearest-neighbors within a given tolerance - alignment may be "multi-dimensional", i.e., the `KDTree` example above vs. dimensions aligned independently of each other - we need to decide what to do when one dimension has more than one index that supports alignment - we should probably raise unless the user explicitly specify which index to use for the alignment - we need to decide what to do when one dimension has one or more index(es) but none support alignment - either we raise or we fail back (silently) to alignment based on dimension size - for inexact alignment, the tolerance threshold might be given when building the index and/or when performing the alignment - are there cases where we want a specific index to perform alignment and another index to perform selection? - it would be tricky to support that unless we allow multiple indexes per coordinate - alternatively, underlying indexes could be picked internally in a "meta" index for one operation or another, although the risk is to eventually have to deal with an explosion of index wrapper classes with different meta indexes for each combination that we'd like to use. ### 2.7 Using indexes for other purposes Xarray also provides a number of Dataset/DataArray methods where indexes are used in various ways, e.g., - `resample` (`CFTimeIndex` and a `DatetimeIntervalIndex`) - `DatetimeAccessor` & `TimedeltaAccessor` properties (`CFTimeIndex` and a `DatetimeIntervalIndex`) - `interp` & `interpolate_na`, - with `IntervalIndex`, these become regridding operations. Should we support hooks for these operations? - `differentiate`, `integrate`, `polyfit` - raise an error if not a "simple" 1D index? - `pad` - `coarsen` has to make choices about output index labels. - `sortby` - `stack`/`unstack` - plotting - `plot.pcolormesh` "infers" interval breaks along axes, which are really inferred `bounds` for the appropriate indexes. - `plot.step` again uses `bounds`. In fact, we may even want `step` to be the default 1D plotting function if the axis has `bounds` attached. It would be reasonable to first restrict those methods to the indexes that are currently available in Xarray, and maybe extend the `XarrayIndex` API later upon request when the opportunity arises. Conversely, nothing should prevent implementing "non-standard" API in 3rd-party `XarrayIndex` subclasses that could be used in DataArray/Dataset extensions (accessors). For example, we might want to reuse a `KDTree` index to compute k-nearest neighbors (returning a DataArray/Dataset with a new dimension) and/or the distances to the nearest neighbors (returning a DataArray/Dataset with a new data variable). ### 2.8 Index encoding Indexes don't need to be directly serializable since we could (re)build them from their corresponding coordinate(s). However, it would be useful if some indexes could be encoded/decoded to/from a set of arrays that would allow optimized reconstruction and/or storage, e.g., - `pandas.MultiIndex` -> `index.levels` and `index.codes` - Scikit-learn's `KDTree` and `BallTree` that use an array-based representation of an immutable tree structure ## 3. Index representation in DataArray/Dataset's `repr` Since indexes would become 1st class citizen of Xarray's data model, they deserve their own section in Dataset/DataArray `repr` that could look like: ``` array([[5.4, 7.8], [6.2, 4.7]]) Coordinates: * lon (x, y) float64 10.2 15.2 12.6 17.6 * lat (x, y) float64 40.2 45.6 42.2 47.6 * x (x) float64 200.0 400.0 * y (y) float64 800.0 1e+03 Indexes: lat, lon x y ``` To keep the `repr` compact, we could: - consolidate entries that map to the same index object, and have an short inline repr for `XarrayIndex` object - collapse the index section by default in the HTML `repr` - maybe omit all trivial indexes for 1D coordinates that match the dimension name ## 4. `IndexVariable` `IndexVariable` is currently used to wrap a `pandas.Index` as a variable, which would not be relevant after this refactoring since it is aimed at decoupling indexes and variables. We'll probably need to move elsewhere some of the features implemented in `IndexVariable` to: - ensure that all coordinates with an index are immutable (see [Section 2.4.1](#241-mutable-coordinates)) - do not set values directly, do not (re)chunk (even though it may be already chunked), do not load, do not convert to sparse/dense, etc. - directly reuse index's data when that's possible - in the case of a `pandas.Index`, it makes little sense to have duplicate data (e.g., as a NumPy array) for its corresponding coordinate - convert a variable into a `pandas.Index` using `.to_index()` (for backwards compatibility). Other `IndexVariable` API like `level_names` and `get_level_variable()` would not useful anymore: it is specific to how we currently deal with `pandas.MultiIndex` and virtual "level" coordinates in Xarray. ## 5. Chunked coordinates and/or indexers We could take opportunity of this refactoring to better leverage chunked coordinates (and/or chunked indexers for data selection). There's two ways to enable it: A. support for chunked coordinates is left to the index B. support for chunked coordinates is index agnostic and is implemented in Xarray As an example for B, [xoak](https://github.com/ESM-VFC/xoak) supports building an index for each chunk, which is coupled with a two-step data selection process (cross-index queries + brute force "reduction" look-up). There is an example [here](https://xoak.readthedocs.io/en/latest/examples/dask_support.html). This may be tedious to generalize this to other kinds of operations, though. Xoak's Dask support is rather experimental, not super stable (it's quite hard to control index replication and data transfer between Dask workers with the default settings), and depends on whether indexes are thread-safe and/or serializable. Option A may be more reasonable for now. ## 6. Coordinate duck arrays Another opportunity of this refactoring is support for duck arrays as index coordinates. Decoupling coordinates and indexes would _de-facto_ enable it. However, support for duck arrays in index-based operations such as data selection or alignment would probably require some protocol extension, e.g., ```python class MyDuckArray: ... def _sel_(self, indexer): """Prepare the label-based indexer to conform to this coordinate array.""" ... return new_indexer ... ``` For example, a `pint` array would implement `_sel_` to perform indexer unit conversion or raise, warn, or just pass the indexer through if it has no units. xarray-2025.09.0/design_notes/grouper_objects.md000066400000000000000000000302001505620616400215360ustar00rootroot00000000000000# Grouper Objects **Author**: Deepak Cherian **Created**: Nov 21, 2023 ## Abstract I propose the addition of Grouper objects to Xarray's public API so that ```python Dataset.groupby(x=BinGrouper(bins=np.arange(10, 2))) ``` is identical to today's syntax: ```python Dataset.groupby_bins("x", bins=np.arange(10, 2)) ``` ## Motivation and scope Xarray's GroupBy API implements the split-apply-combine pattern (Wickham, 2011)[^1], which applies to a very large number of problems: histogramming, compositing, climatological averaging, resampling to a different time frequency, etc. The pattern abstracts the following pseudocode: ```python results = [] for element in unique_labels: subset = ds.sel(x=(ds.x == element)) # split # subset = ds.where(ds.x == element, drop=True) # alternative result = subset.mean() # apply results.append(result) xr.concat(results) # combine ``` to ```python ds.groupby("x").mean() # splits, applies, and combines ``` Efficient vectorized implementations of this pattern are implemented in numpy's [`ufunc.at`](https://numpy.org/doc/stable/reference/generated/numpy.ufunc.at.html), [`ufunc.reduceat`](https://numpy.org/doc/stable/reference/generated/numpy.ufunc.reduceat.html), [`numbagg.grouped`](https://github.com/numbagg/numbagg/blob/main/numbagg/grouped.py), [`numpy_groupies`](https://github.com/ml31415/numpy-groupies), and probably more. These vectorized implementations _all_ require, as input, an array of integer codes or labels that identify unique elements in the array being grouped over (`'x'` in the example above). ```python import numpy as np # array to reduce a = np.array([1, 1, 1, 1, 2]) # initial value for result out = np.zeros((3,), dtype=int) # integer codes labels = np.array([0, 0, 1, 2, 1]) # groupby-reduction np.add.at(out, labels, a) out # array([2, 3, 1]) ``` One can 'factorize' or construct such an array of integer codes using `pandas.factorize` or `numpy.unique(..., return_inverse=True)` for categorical arrays; `pandas.cut`, `pandas.qcut`, or `np.digitize` for discretizing continuous variables. In practice, since `GroupBy` objects exist, much of complexity in applying the groupby paradigm stems from appropriately factorizing or generating labels for the operation. Consider these two examples: 1. [Bins that vary in a dimension](https://flox.readthedocs.io/en/latest/user-stories/nD-bins.html) 2. [Overlapping groups](https://flox.readthedocs.io/en/latest/user-stories/overlaps.html) 3. [Rolling resampling](https://github.com/pydata/xarray/discussions/8361) Anecdotally, less experienced users commonly resort to the for-loopy implementation illustrated by the pseudocode above when the analysis at hand is not easily expressed using the API presented by Xarray's GroupBy object. Xarray's GroupBy API today abstracts away the split, apply, and combine stages but not the "factorize" stage. Grouper objects will close the gap. ## Usage and impact Grouper objects 1. Will abstract useful factorization algorithms, and 2. Present a natural way to extend GroupBy to grouping by multiple variables: `ds.groupby(x=BinGrouper(...), t=Resampler(freq="M", ...)).mean()`. In addition, Grouper objects provide a nice interface to add often-requested grouping functionality 1. A new `SpaceResampler` would allow specifying resampling spatial dimensions. ([issue](https://github.com/pydata/xarray/issues/4008)) 2. `RollingTimeResampler` would allow rolling-like functionality that understands timestamps ([issue](https://github.com/pydata/xarray/issues/3216)) 3. A `QuantileBinGrouper` to abstract away `pd.cut` ([issue](https://github.com/pydata/xarray/discussions/7110)) 4. A `SeasonGrouper` and `SeasonResampler` would abstract away common annoyances with such calculations today 1. Support seasons that span a year-end. 2. Only include seasons with complete data coverage. 3. Allow grouping over seasons of unequal length 4. See [this xcdat discussion](https://github.com/xCDAT/xcdat/issues/416) for a `SeasonGrouper` like functionality: 5. Return results with seasons in a sensible order 5. Weighted grouping ([issue](https://github.com/pydata/xarray/issues/3937)) 1. Once `IntervalIndex` like objects are supported, `Resampler` groupers can account for interval lengths when resampling. ## Backward Compatibility Xarray's existing grouping functionality will be exposed using two new Groupers: 1. `UniqueGrouper` which uses `pandas.factorize` 2. `BinGrouper` which uses `pandas.cut` 3. `TimeResampler` which mimics pandas' `.resample` Grouping by single variables will be unaffected so that `ds.groupby('x')` will be identical to `ds.groupby(x=UniqueGrouper())`. Similarly, `ds.groupby_bins('x', bins=np.arange(10, 2))` will be unchanged and identical to `ds.groupby(x=BinGrouper(bins=np.arange(10, 2)))`. ## Detailed description All Grouper objects will subclass from a Grouper object ```python import abc class Grouper(abc.ABC): @abc.abstractmethod def factorize(self, by: DataArray): raise NotImplementedError class CustomGrouper(Grouper): def factorize(self, by: DataArray): ... return codes, group_indices, unique_coord, full_index def weights(self, by: DataArray) -> DataArray: ... return weights ``` ### The `factorize` method Today, the `factorize` method takes as input the group variable and returns 4 variables (I propose to clean this up below): 1. `codes`: An array of same shape as the `group` with int dtype. NaNs in `group` are coded by `-1` and ignored later. 2. `group_indices` is a list of index location of `group` elements that belong to a single group. 3. `unique_coord` is (usually) a `pandas.Index` object of all unique `group` members present in `group`. 4. `full_index` is a `pandas.Index` of all `group` members. This is different from `unique_coord` for binning and resampling, where not all groups in the output may be represented in the input `group`. For grouping by a categorical variable e.g. `['a', 'b', 'a', 'c']`, `full_index` and `unique_coord` are identical. There is some redundancy here since `unique_coord` is always equal to or a subset of `full_index`. We can clean this up (see Implementation below). ### The `weights` method (?) The proposed `weights` method is optional and unimplemented today. Groupers with `weights` will allow composing `weighted` and `groupby` ([issue](https://github.com/pydata/xarray/issues/3937)). The `weights` method should return an appropriate array of weights such that the following property is satisfied ```python gb_sum = ds.groupby(by).sum() weights = CustomGrouper.weights(by) weighted_sum = xr.dot(ds, weights) assert_identical(gb_sum, weighted_sum) ``` For example, the boolean weights for `group=np.array(['a', 'b', 'c', 'a', 'a'])` should be ``` [[1, 0, 0, 1, 1], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]] ``` This is the boolean "summarization matrix" referred to in the classic Iverson (1980, Section 4.3)[^2] and "nub sieve" in [various APLs](https://aplwiki.com/wiki/Nub_Sieve). > [!NOTE] > We can always construct `weights` automatically using `group_indices` from `factorize`, so this is not a required method. For a rolling resampling, windowed weights are possible ``` [[0.5, 1, 0.5, 0, 0], [0, 0.25, 1, 1, 0], [0, 0, 0, 1, 1]] ``` ### The `preferred_chunks` method (?) Rechunking support is another optional extension point. In `flox` I experimented some with automatically rechunking to make a groupby more parallel-friendly ([example 1](https://flox.readthedocs.io/en/latest/generated/flox.rechunk_for_blockwise.html), [example 2](https://flox.readthedocs.io/en/latest/generated/flox.rechunk_for_cohorts.html)). A great example is for resampling-style groupby reductions, for which `codes` might look like ``` 0001|11122|3333 ``` where `|` represents chunk boundaries. A simple rechunking to ``` 000|111122|3333 ``` would make this resampling reduction an embarrassingly parallel blockwise problem. Similarly consider monthly-mean climatologies for which the month numbers might be ``` 1 2 3 4 5 | 6 7 8 9 10 | 11 12 1 2 3 | 4 5 6 7 8 | 9 10 11 12 | ``` A slight rechunking to ``` 1 2 3 4 | 5 6 7 8 | 9 10 11 12 | 1 2 3 4 | 5 6 7 8 | 9 10 11 12 | ``` allows us to reduce `1, 2, 3, 4` separately from `5,6,7,8` and `9, 10, 11, 12` while still being parallel friendly (see the [flox documentation](https://flox.readthedocs.io/en/latest/implementation.html#method-cohorts) for more). We could attempt to detect these patterns, or we could just have the Grouper take as input `chunks` and return a tuple of "nice" chunk sizes to rechunk to. ```python def preferred_chunks(self, chunks: ChunksTuple) -> ChunksTuple: pass ``` For monthly means, since the period of repetition of labels is 12, the Grouper might choose possible chunk sizes of `((2,),(3,),(4,),(6,))`. For resampling, the Grouper could choose to resample to a multiple or an even fraction of the resampling frequency. ## Related work Pandas has [Grouper objects](https://pandas.pydata.org/docs/reference/api/pandas.Grouper.html#pandas-grouper) that represent the GroupBy instruction. However, these objects do not appear to be extension points, unlike the Grouper objects proposed here. Instead, Pandas' `ExtensionArray` has a [`factorize`](https://pandas.pydata.org/docs/reference/api/pandas.api.extensions.ExtensionArray.factorize.html) method. Composing rolling with time resampling is a common workload: 1. Polars has [`group_by_dynamic`](https://pola-rs.github.io/polars/py-polars/html/reference/dataframe/api/polars.DataFrame.group_by_dynamic.html) which appears to be like the proposed `RollingResampler`. 2. scikit-downscale provides [`PaddedDOYGrouper`](https://github.com/pangeo-data/scikit-downscale/blob/e16944a32b44f774980fa953ea18e29a628c71b8/skdownscale/pointwise_models/groupers.py#L19) ## Implementation Proposal 1. Get rid of `squeeze` [issue](https://github.com/pydata/xarray/issues/2157): [PR](https://github.com/pydata/xarray/pull/8506) 2. Merge existing two class implementation to a single Grouper class 1. This design was implemented in [this PR](https://github.com/pydata/xarray/pull/7206) to account for some annoying data dependencies. 2. See [PR](https://github.com/pydata/xarray/pull/8509) 3. Clean up what's returned by `factorize` methods. 1. A solution here might be to have `group_indices: Mapping[int, Sequence[int]]` be a mapping from group index in `full_index` to a sequence of integers. 2. Return a `namedtuple` or `dataclass` from existing Grouper factorize methods to facilitate API changes in the future. 4. Figure out what to pass to `factorize` 1. Xarray eagerly reshapes nD variables to 1D. This is an implementation detail we need not expose. 2. When grouping by an unindexed variable Xarray passes a `_DummyGroup` object. This seems like something we don't want in the public interface. We could special case "internal" Groupers to preserve the optimizations in `UniqueGrouper`. 5. Grouper objects will exposed under the `xr.groupers` Namespace. At first these will include `UniqueGrouper`, `BinGrouper`, and `TimeResampler`. ## Alternatives One major design choice made here was to adopt the syntax `ds.groupby(x=BinGrouper(...))` instead of `ds.groupby(BinGrouper('x', ...))`. This allows reuse of Grouper objects, example ```python grouper = BinGrouper(...) ds.groupby(x=grouper, y=grouper) ``` but requires that all variables being grouped by (`x` and `y` above) are present in Dataset `ds`. This does not seem like a bad requirement. Importantly `Grouper` instances will be copied internally so that they can safely cache state that might be shared between `factorize` and `weights`. Today, it is possible to `ds.groupby(DataArray, ...)`. This syntax will still be supported. ## Discussion This proposal builds on these discussions: 1. https://github.com/xarray-contrib/flox/issues/191#issuecomment-1328898836 2. https://github.com/pydata/xarray/issues/6610 ## Copyright This document has been placed in the public domain. ## References and footnotes [^1]: Wickham, H. (2011). The split-apply-combine strategy for data analysis. https://vita.had.co.nz/papers/plyr.html [^2]: Iverson, K.E. (1980). Notation as a tool of thought. Commun. ACM 23, 8 (Aug. 1980), 444–465. https://doi.org/10.1145/358896.358899 xarray-2025.09.0/design_notes/named_array_design_doc.md000066400000000000000000000667611505620616400230270ustar00rootroot00000000000000# named-array Design Document ## Abstract Despite the wealth of scientific libraries in the Python ecosystem, there is a gap for a lightweight, efficient array structure with named dimensions that can provide convenient broadcasting and indexing. Existing solutions like Xarray's Variable, [Pytorch Named Tensor](https://github.com/pytorch/pytorch/issues/60832), [Levanter](https://crfm.stanford.edu/2023/06/16/levanter-1_0-release.html), and [Larray](https://larray.readthedocs.io/en/stable/tutorial/getting_started.html) have their own strengths and weaknesses. Xarray's Variable is an efficient data structure, but it depends on the relatively heavy-weight library Pandas, which limits its use in other projects. Pytorch Named Tensor offers named dimensions, but it lacks support for many operations, making it less user-friendly. Levanter is a powerful tool with a named tensor module (Haliax) that makes deep learning code easier to read, understand, and write, but it is not as lightweight or generic as desired. Larry offers labeled N-dimensional arrays, but it may not provide the level of seamless interoperability with other scientific Python libraries that some users need. named-array aims to solve these issues by exposing the core functionality of Xarray's Variable class as a standalone package. ## Motivation and Scope The Python ecosystem boasts a wealth of scientific libraries that enable efficient computations on large, multi-dimensional arrays. Libraries like PyTorch, Xarray, and NumPy have revolutionized scientific computing by offering robust data structures for array manipulations. Despite this wealth of tools, a gap exists in the Python landscape for a lightweight, efficient array structure with named dimensions that can provide convenient broadcasting and indexing. Xarray internally maintains a data structure that meets this need, referred to as [`xarray.Variable`](https://docs.xarray.dev/en/latest/generated/xarray.Variable.html) . However, Xarray's dependency on Pandas, a relatively heavy-weight library, restricts other projects from leveraging this efficient data structure (, , ). We propose the creation of a standalone Python package, "named-array". This package is envisioned to be a version of the `xarray.Variable` data structure, cleanly separated from the heavier dependencies of Xarray. named-array will provide a lightweight, user-friendly array-like data structure with named dimensions, facilitating convenient indexing and broadcasting. The package will use existing scientific Python community standards such as established array protocols and the new [Python array API standard](https://data-apis.org/array-api/latest), allowing users to wrap multiple duck-array objects, including, but not limited to, NumPy, Dask, Sparse, Pint, CuPy, and Pytorch. The development of named-array is projected to meet a key community need and expected to broaden Xarray's user base. By making the core `xarray.Variable` more accessible, we anticipate an increase in contributors and a reduction in the developer burden on current Xarray maintainers. ### Goals 1. **Simple and minimal**: named-array will expose Xarray's [Variable class](https://docs.xarray.dev/en/stable/internals/variable-objects.html) as a standalone object (`NamedArray`) with named axes (dimensions) and arbitrary metadata (attributes) but without coordinate labels. This will make it a lightweight, efficient array data structure that allows convenient broadcasting and indexing. 2. **Interoperability**: named-array will follow established scientific Python community standards and in doing so, will allow it to wrap multiple duck-array objects, including but not limited to, NumPy, Dask, Sparse, Pint, CuPy, and Pytorch. 3. **Community Engagement**: By making the core `xarray.Variable` more accessible, we open the door to increased adoption of this fundamental data structure. As such, we hope to see an increase in contributors and reduction in the developer burden on current Xarray maintainers. ### Non-Goals 1. **Extensive Data Analysis**: named-array will not provide extensive data analysis features like statistical functions, data cleaning, or visualization. Its primary focus is on providing a data structure that allows users to use dimension names for descriptive array manipulations. 2. **Support for I/O**: named-array will not bundle file reading functions. Instead users will be expected to handle I/O and then wrap those arrays with the new named-array data structure. ## Backward Compatibility The creation of named-array is intended to separate the `xarray.Variable` from Xarray into a standalone package. This allows it to be used independently, without the need for Xarray's dependencies, like Pandas. This separation has implications for backward compatibility. Since the new named-array is envisioned to contain the core features of Xarray's variable, existing code using Variable from Xarray should be able to switch to named-array with minimal changes. However, there are several potential issues related to backward compatibility: - **API Changes**: as the Variable is decoupled from Xarray and moved into named-array, some changes to the API may be necessary. These changes might include differences in function signature, etc. These changes could break existing code that relies on the current API and associated utility functions (e.g. `as_variable()`). The `xarray.Variable` object will subclass `NamedArray`, and provide the existing interface for compatibility. ## Detailed Description named-array aims to provide a lightweight, efficient array structure with named dimensions, or axes, that enables convenient broadcasting and indexing. The primary component of named-array is a standalone version of the xarray.Variable data structure, which was previously a part of the Xarray library. The xarray.Variable data structure in named-array will maintain the core features of its counterpart in Xarray, including: - **Named Axes (Dimensions)**: Each axis of the array can be given a name, providing a descriptive and intuitive way to reference the dimensions of the array. - **Arbitrary Metadata (Attributes)**: named-array will support the attachment of arbitrary metadata to arrays as a dict, providing a mechanism to store additional information about the data that the array represents. - **Convenient Broadcasting and Indexing**: With named dimensions, broadcasting and indexing operations become more intuitive and less error-prone. The named-array package is designed to be interoperable with other scientific Python libraries. It will follow established scientific Python community standards and use standard array protocols, as well as the new data-apis standard. This allows named-array to wrap multiple duck-array objects, including, but not limited to, NumPy, Dask, Sparse, Pint, CuPy, and Pytorch. ## Implementation - **Decoupling**: making `variable.py` agnostic to Xarray internals by decoupling it from the rest of the library. This will make the code more modular and easier to maintain. However, this will also make the code more complex, as we will need to define a clear interface for how the functionality in `variable.py` interacts with the rest of the library, particularly the ExplicitlyIndexed subclasses used to enable lazy indexing of data on disk. - **Move Xarray's internal lazy indexing classes to follow standard Array Protocols**: moving the lazy indexing classes like `ExplicitlyIndexed` to use standard array protocols will be a key step in decoupling. It will also potentially improve interoperability with other libraries that use these protocols, and prepare these classes [for eventual movement out](https://github.com/pydata/xarray/issues/5081) of the Xarray code base. However, this will also require significant changes to the code, and we will need to ensure that all existing functionality is preserved. - Use [https://data-apis.org/array-api-compat/](https://data-apis.org/array-api-compat/) to handle compatibility issues? - **Leave lazy indexing classes in Xarray for now** - **Preserve support for Dask collection protocols**: named-array will preserve existing support for the dask collections protocol namely the **dask\_\*\*\*** methods - **Preserve support for ChunkManagerEntrypoint?** Opening variables backed by dask vs cubed arrays currently is [handled within Variable.chunk](https://github.com/pydata/xarray/blob/92c8b33eb464b09d6f8277265b16cae039ab57ee/xarray/core/variable.py#L1272C15-L1272C15). If we are preserving dask support it would be nice to preserve general chunked array type support, but this currently requires an entrypoint. ### Plan 1. Create a new baseclass for `xarray.Variable` to its own module e.g. `xarray.core.base_variable` 2. Remove all imports of internal Xarray classes and utils from `base_variable.py`. `base_variable.Variable` should not depend on anything in xarray.core - Will require moving the lazy indexing classes (subclasses of ExplicitlyIndexed) to be standards compliant containers.` - an array-api compliant container that provides **array_namespace**` - Support `.oindex` and `.vindex` for explicit indexing - Potentially implement this by introducing a new compliant wrapper object? - Delete the `NON_NUMPY_SUPPORTED_ARRAY_TYPES` variable which special-cases ExplicitlyIndexed and `pd.Index.` - `ExplicitlyIndexed` class and subclasses should provide `.oindex` and `.vindex` for indexing by `Variable.__getitem__.`: `oindex` and `vindex` were proposed in [NEP21](https://numpy.org/neps/nep-0021-advanced-indexing.html), but have not been implemented yet - Delete the ExplicitIndexer objects (`BasicIndexer`, `VectorizedIndexer`, `OuterIndexer`) - Remove explicit support for `pd.Index`. When provided with a `pd.Index` object, Variable will coerce to an array using `np.array(pd.Index)`. For Xarray's purposes, Xarray can use `as_variable` to explicitly wrap these in PandasIndexingAdapter and pass them to `Variable.__init__`. 3. Define a minimal variable interface that the rest of Xarray can use: 1. `dims`: tuple of dimension names 2. `data`: numpy/dask/duck arrays` 3. `attrs``: dictionary of attributes 4. Implement basic functions & methods for manipulating these objects. These methods will be a cleaned-up subset (for now) of functionality on xarray.Variable, with adaptations inspired by the [Python array API](https://data-apis.org/array-api/2022.12/API_specification/index.html). 5. Existing Variable structures 1. Keep Variable object which subclasses the new structure that adds the `.encoding` attribute and potentially other methods needed for easy refactoring. 2. IndexVariable will remain in xarray.core.variable and subclass the new named-array data structure pending future deletion. 6. Docstrings and user-facing APIs will need to be updated to reflect the changed methods on Variable objects. Further implementation details are in Appendix: [Implementation Details](#appendix-implementation-details). ## Plan for decoupling lazy indexing functionality from NamedArray Today's implementation Xarray's lazy indexing functionality uses three private objects: `*Indexer`, `*IndexingAdapter`, `*Array`. These objects are needed for two reason: 1. We need to translate from Xarray (NamedArray) indexing rules to bare array indexing rules. - `*Indexer` objects track the type of indexing - basic, orthogonal, vectorized 2. Not all arrays support the same indexing rules, so we need `*Indexing` adapters 1. Indexing Adapters today implement `__getitem__` and use type of `*Indexer` object to do appropriate conversions. 3. We also want to support lazy indexing of on-disk arrays. 1. These again support different types of indexing, so we have `explicit_indexing_adapter` that understands `*Indexer` objects. ### Goals 1. We would like to keep the lazy indexing array objects, and backend array objects within Xarray. Thus NamedArray cannot treat these objects specially. 2. A key source of confusion (and coupling) is that both lazy indexing arrays and indexing adapters, both handle Indexer objects, and both subclass `ExplicitlyIndexedNDArrayMixin`. These are however conceptually different. ### Proposal 1. The `NumpyIndexingAdapter`, `DaskIndexingAdapter`, and `ArrayApiIndexingAdapter` classes will need to migrate to Named Array project since we will want to support indexing of numpy, dask, and array-API arrays appropriately. 2. The `as_indexable` function which wraps an array with the appropriate adapter will also migrate over to named array. 3. Lazy indexing arrays will implement `__getitem__` for basic indexing, `.oindex` for orthogonal indexing, and `.vindex` for vectorized indexing. 4. IndexingAdapter classes will similarly implement `__getitem__`, `oindex`, and `vindex`. 5. `NamedArray.__getitem__` (and `__setitem__`) will still use `*Indexer` objects internally (for e.g. in `NamedArray._broadcast_indexes`), but use `.oindex`, `.vindex` on the underlying indexing adapters. 6. We will move the `*Indexer` and `*IndexingAdapter` classes to Named Array. These will be considered private in the long-term. 7. `as_indexable` will no longer special case `ExplicitlyIndexed` objects (we can special case a new `IndexingAdapter` mixin class that will be private to NamedArray). To handle Xarray's lazy indexing arrays, we will introduce a new `ExplicitIndexingAdapter` which will wrap any array with any of `.oindex` of `.vindex` implemented. 1. This will be the last case in the if-chain that is, we will try to wrap with all other `IndexingAdapter` objects before using `ExplicitIndexingAdapter` as a fallback. This Adapter will be used for the lazy indexing arrays, and backend arrays. 2. As with other indexing adapters (point 4 above), this `ExplicitIndexingAdapter` will only implement `__getitem__` and will understand `*Indexer` objects. 8. For backwards compatibility with external backends, we will have to gracefully deprecate `indexing.explicit_indexing_adapter` which translates from Xarray's indexing rules to the indexing supported by the backend. 1. We could split `explicit_indexing_adapter` in to 3: - `basic_indexing_adapter`, `outer_indexing_adapter` and `vectorized_indexing_adapter` for public use. 2. Implement fall back `.oindex`, `.vindex` properties on `BackendArray` base class. These will simply rewrap the `key` tuple with the appropriate `*Indexer` object, and pass it on to `__getitem__` or `__setitem__`. These methods will also raise DeprecationWarning so that external backends will know to migrate to `.oindex`, and `.vindex` over the next year. THe most uncertain piece here is maintaining backward compatibility with external backends. We should first migrate a single internal backend, and test out the proposed approach. ## Project Timeline and Milestones We have identified the following milestones for the completion of this project: 1. **Write and publish a design document**: this document will explain the purpose of named-array, the intended audience, and the features it will provide. It will also describe the architecture of named-array and how it will be implemented. This will ensure early community awareness and engagement in the project to promote subsequent uptake. 2. **Refactor `variable.py` to `base_variable.py`** and remove internal Xarray imports. 3. **Break out the package and create continuous integration infrastructure**: this will entail breaking out the named-array project into a Python package and creating a continuous integration (CI) system. This will help to modularize the code and make it easier to manage. Building a CI system will help ensure that codebase changes do not break existing functionality. 4. Incrementally add new functions & methods to the new package, ported from xarray. This will start to make named-array useful on its own. 5. Refactor the existing Xarray codebase to rely on the newly created package (named-array): This will help to demonstrate the usefulness of the new package, and also provide an example for others who may want to use it. 6. Expand tests, add documentation, and write a blog post: expanding the test suite will help to ensure that the code is reliable and that changes do not introduce bugs. Adding documentation will make it easier for others to understand and use the project. 7. Finally, we will write a series of blog posts on [xarray.dev](https://xarray.dev/) to promote the project and attract more contributors. - Toward the end of the process, write a few blog posts that demonstrate the use of the newly available data structure - pick the same example applications used by other implementations/applications (e.g. Pytorch, sklearn, and Levanter) to show how it can work. ## Related Work 1. [GitHub - deepmind/graphcast](https://github.com/deepmind/graphcast) 2. [Getting Started β€” LArray 0.34 documentation](https://larray.readthedocs.io/en/stable/tutorial/getting_started.html) 3. [Levanter β€” Legible, Scalable, Reproducible Foundation Models with JAX](https://crfm.stanford.edu/2023/06/16/levanter-1_0-release.html) 4. [google/xarray-tensorstore](https://github.com/google/xarray-tensorstore) 5. [State of Torch Named Tensors Β· Issue #60832 Β· pytorch/pytorch Β· GitHub](https://github.com/pytorch/pytorch/issues/60832) - Incomplete support: Many primitive operations result in errors, making it difficult to use NamedTensors in Practice. Users often have to resort to removing the names from tensors to avoid these errors. - Lack of active development: the development of the NamedTensor feature in PyTorch is not currently active due a lack of bandwidth for resolving ambiguities in the design. - Usability issues: the current form of NamedTensor is not user-friendly and sometimes raises errors, making it difficult for users to incorporate NamedTensors into their workflows. 6. [Scikit-learn Enhancement Proposals (SLEPs) 8, 12, 14](https://github.com/scikit-learn/enhancement_proposals/pull/18) - Some of the key points and limitations discussed in these proposals are: - Inconsistency in feature name handling: Scikit-learn currently lacks a consistent and comprehensive way to handle and propagate feature names through its pipelines and estimators ([SLEP 8](https://github.com/scikit-learn/enhancement_proposals/pull/18),[SLEP 12](https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep012/proposal.html)). - Memory intensive for large feature sets: storing and propagating feature names can be memory intensive, particularly in cases where the entire "dictionary" becomes the features, such as in NLP use cases ([SLEP 8](https://github.com/scikit-learn/enhancement_proposals/pull/18),[GitHub issue #35](https://github.com/scikit-learn/enhancement_proposals/issues/35)) - Sparse matrices: sparse data structures present a challenge for feature name propagation. For instance, the sparse data structure functionality in Pandas 1.0 only supports converting directly to the coordinate format (COO), which can be an issue with transformers such as the OneHotEncoder.transform that has been optimized to construct a CSR matrix ([SLEP 14](https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep014/proposal.html)) - New Data structures: the introduction of new data structures, such as "InputArray" or "DataArray" could lead to more burden for third-party estimator maintainers and increase the learning curve for users. Xarray's "DataArray" is mentioned as a potential alternative, but the proposal mentions that the conversion from a Pandas dataframe to a Dataset is not lossless ([SLEP 12](https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep012/proposal.html),[SLEP 14](https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep014/proposal.html),[GitHub issue #35](https://github.com/scikit-learn/enhancement_proposals/issues/35)). - Dependency on other libraries: solutions that involve using Xarray and/or Pandas to handle feature names come with the challenge of managing dependencies. While a soft dependency approach is suggested, this means users would be able to have/enable the feature only if they have the dependency installed. Xarra-lite's integration with other scientific Python libraries could potentially help with this issue ([GitHub issue #35](https://github.com/scikit-learn/enhancement_proposals/issues/35)). ## References and Previous Discussion - [[Proposal] Expose Variable without Pandas dependency Β· Issue #3981 Β· pydata/xarray Β· GitHub](https://github.com/pydata/xarray/issues/3981) - [https://github.com/pydata/xarray/issues/3981#issuecomment-985051449](https://github.com/pydata/xarray/issues/3981#issuecomment-985051449) - [Lazy indexing arrays as a stand-alone package Β· Issue #5081 Β· pydata/xarray Β· GitHub](https://github.com/pydata/xarray/issues/5081) ### Appendix: Engagement with the Community We plan to publicize this document on : - [x] `Xarray dev call` - [ ] `Scientific Python discourse` - [ ] `Xarray GitHub` - [ ] `Twitter (X)` - [ ] `Respond to NamedTensor and Scikit-Learn issues?` - [ ] `Pangeo Discourse` - [ ] `Numpy, SciPy email lists?` - [ ] `Xarray blog` Additionally, We plan on writing a series of blog posts to effectively showcase the implementation and potential of the newly available functionality. To illustrate this, we will use the same example applications as other established libraries (such as Pytorch, sklearn), providing practical demonstrations of how these new data structures can be leveraged. ### Appendix: API Surface Questions: 1. Document Xarray indexing rules 2. Document use of .oindex and .vindex protocols 3. Do we use `.mean` and `.nanmean` or `.mean(skipna=...)`? - Default behavior in named-array should mirror NumPy / the array API standard, not pandas. - nanmean is not (yet) in the [array API](https://github.com/pydata/xarray/pull/7424#issuecomment-1373979208). There are a handful of other key functions (e.g., median) that are are also missing. I think that should be OK, as long as what we support is a strict superset of the array API. 4. What methods need to be exposed on Variable? - `Variable.concat` classmethod: create two functions, one as the equivalent of `np.stack` and other for `np.concat` - `.rolling_window` and `.coarsen_reshape` ? - `named-array.apply_ufunc`: used in astype, clip, quantile, isnull, notnull` #### methods to be preserved from xarray.Variable ```python # Sorting Variable.argsort Variable.searchsorted # NaN handling Variable.fillna Variable.isnull Variable.notnull # Lazy data handling Variable.chunk # Could instead have accessor interface and recommend users use `Variable.dask.chunk` and `Variable.cubed.chunk`? Variable.to_numpy() Variable.as_numpy() # Xarray-specific Variable.get_axis_num Variable.isel Variable.to_dict # Reductions Variable.reduce Variable.all Variable.any Variable.argmax Variable.argmin Variable.count Variable.max Variable.mean Variable.median Variable.min Variable.prod Variable.quantile Variable.std Variable.sum Variable.var # Accumulate Variable.cumprod Variable.cumsum # numpy-like Methods Variable.astype Variable.copy Variable.clip Variable.round Variable.item Variable.where # Reordering/Reshaping Variable.squeeze Variable.pad Variable.roll Variable.shift ``` #### methods to be renamed from xarray.Variable ```python # Xarray-specific Variable.concat # create two functions, one as the equivalent of `np.stack` and other for `np.concat` # Given how niche these are, these would be better as functions than methods. # We could also keep these in Xarray, at least for now. If we don't think people will use functionality outside of Xarray it probably is not worth the trouble of porting it (including documentation, etc). Variable.coarsen # This should probably be called something like coarsen_reduce. Variable.coarsen_reshape Variable.rolling_window Variable.set_dims # split this into broadcast_to and expand_dims # Reordering/Reshaping Variable.stack # To avoid confusion with np.stack, let's call this stack_dims. Variable.transpose # Could consider calling this permute_dims, like the [array API standard](https://data-apis.org/array-api/2022.12/API_specification/manipulation_functions.html#objects-in-api) Variable.unstack # Likewise, maybe call this unstack_dims? ``` #### methods to be removed from xarray.Variable ```python # Testing Variable.broadcast_equals Variable.equals Variable.identical Variable.no_conflicts # Lazy data handling Variable.compute # We can probably omit this method for now, too, given that dask.compute() uses a protocol. The other concern is that different array libraries have different notions of "compute" and this one is rather Dask specific, including conversion from Dask to NumPy arrays. For example, in JAX every operation executes eagerly, but in a non-blocking fashion, and you need to call jax.block_until_ready() to ensure computation is finished. Variable.load # Could remove? compute vs load is a common source of confusion. # Xarray-specific Variable.to_index Variable.to_index_variable Variable.to_variable Variable.to_base_variable Variable.to_coord Variable.rank # Uses bottleneck. Delete? Could use https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rankdata.html instead # numpy-like Methods Variable.conjugate # .conj is enough Variable.__array_wrap__ # This is a very old NumPy protocol for duck arrays. We don't need it now that we have `__array_ufunc__` and `__array_function__` # Encoding Variable.reset_encoding ``` #### Attributes to be preserved from xarray.Variable ```python # Properties Variable.attrs Variable.chunks Variable.data Variable.dims Variable.dtype Variable.nbytes Variable.ndim Variable.shape Variable.size Variable.sizes Variable.T Variable.real Variable.imag Variable.conj ``` #### Attributes to be renamed from xarray.Variable ```python ``` #### Attributes to be removed from xarray.Variable ```python Variable.values # Probably also remove -- this is a legacy from before Xarray supported dask arrays. ".data" is enough. # Encoding Variable.encoding ``` ### Appendix: Implementation Details - Merge in VariableArithmetic's parent classes: AbstractArray, NdimSizeLenMixin with the new data structure.. ```python class VariableArithmetic( ImplementsArrayReduce, IncludeReduceMethods, IncludeCumMethods, IncludeNumpySameMethods, SupportsArithmetic, VariableOpsMixin, ): __slots__ = () # prioritize our operations over those of numpy.ndarray (priority=0) __array_priority__ = 50 ``` - Move over `_typed_ops.VariableOpsMixin` - Build a list of utility functions used elsewhere : Which of these should become public API? - `broadcast_variables`: `dataset.py`, `dataarray.py`,`missing.py` - This could be just called "broadcast" in named-array. - `Variable._getitem_with_mask` : `alignment.py` - keep this method/function as private and inside Xarray. - The Variable constructor will need to be rewritten to no longer accept tuples, encodings, etc. These details should be handled at the Xarray data structure level. - What happens to `duck_array_ops?` - What about Variable.chunk and "chunk managers"? - Could this functionality be left in Xarray proper for now? Alternative array types like JAX also have some notion of "chunks" for parallel arrays, but the details differ in a number of ways from the Dask/Cubed. - Perhaps variable.chunk/load methods should become functions defined in xarray that convert Variable objects. This is easy so long as xarray can reach in and replace .data - Utility functions like `as_variable` should be moved out of `base_variable.py` so they can convert BaseVariable objects to/from DataArray or Dataset containing explicitly indexed arrays. xarray-2025.09.0/doc/000077500000000000000000000000001505620616400141115ustar00rootroot00000000000000xarray-2025.09.0/doc/Makefile000066400000000000000000000204271505620616400155560ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXATUOBUILD = sphinx-autobuild PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " rtdhtml Build html using same settings used on ReadtheDocs" @echo " livehtml Make standalone HTML files and rebuild the documentation when a change is detected. Also includes a livereload enabled web server" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " epub3 to make an epub3" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" @echo " dummy to check syntax errors of document sources" .PHONY: clean clean: rm -rf $(BUILDDIR)/* rm -rf generated/* rm -rf auto_gallery/ .PHONY: html html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: rtdhtml rtdhtml: $(SPHINXBUILD) -T -j auto -E -W --keep-going -b html -d $(BUILDDIR)/doctrees -D language=en . $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: livehtml livehtml: # @echo "$(SPHINXATUOBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html" $(SPHINXATUOBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html .PHONY: dirhtml dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: singlehtml singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." .PHONY: html-noplot html-noplot: $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: pickle pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." .PHONY: json json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." .PHONY: htmlhelp htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." .PHONY: qthelp qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/xarray.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/xarray.qhc" .PHONY: applehelp applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." .PHONY: devhelp devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/xarray" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/xarray" @echo "# devhelp" .PHONY: epub epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: epub3 epub3: $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 @echo @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." .PHONY: latex latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." .PHONY: latexpdf latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: latexpdfja latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: text text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." .PHONY: man man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." .PHONY: texinfo texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." .PHONY: info info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." .PHONY: gettext gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." .PHONY: changes changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." .PHONY: linkcheck linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." .PHONY: doctest doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." .PHONY: coverage coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." .PHONY: xml xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." .PHONY: pseudoxml pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." .PHONY: dummy dummy: $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy @echo @echo "Build finished. Dummy builder generates no files." xarray-2025.09.0/doc/README.rst000066400000000000000000000002751505620616400156040ustar00rootroot00000000000000:orphan: xarray ------ You can find information about building the docs at our `Contributing page `_. xarray-2025.09.0/doc/_static/000077500000000000000000000000001505620616400155375ustar00rootroot00000000000000xarray-2025.09.0/doc/_static/.gitignore000066400000000000000000000000561505620616400175300ustar00rootroot00000000000000examples*.png *.log *.pdf *.fbd_latexmk *.aux xarray-2025.09.0/doc/_static/advanced_selection_interpolation.svg000066400000000000000000000747211505620616400250540ustar00rootroot00000000000000 image/svg+xml y x z y x z Advanced indexing Advanced interpolation xarray-2025.09.0/doc/_static/ci.png000066400000000000000000002312601505620616400166440ustar00rootroot00000000000000‰PNG  IHDRvx‰σsBITΫαOΰtEXtSoftwaregnome-screenshotοΏ>-tEXtCreation TimeThu 09 Mar 2023 12:10:35 PM ESTω3K2 IDATxΪμ½}PSYžΏίέΪΪν¦Ϊٚέύgζkk\ό*5™Ζ/ΞP­%₯φ°τΧ’γvΓFϋ"FFB+tDμ (ΪQa±Ϋtm5 t 4FyA ΚC ¦³όΞΉχ&Ή< >΅οW}Κ’δ>œsr“œWΞ9Ÿϋw³‰ΏC  XΕΕ(@±ŠŠP,€b€b  XΕΕ(@±@±Šυ40hKΕaL‚jЍ-±ό’Ξξ“¦ΦŒ&Ί¨—‘[ŒU§H˜„ £ 9†6_&ŽΜo0½x΅›ͺK—0±gϋν?ۘ"I*μ~1_˜Ρσρ‘κq\’Ό@5Οώ©Λ φ^ΊΜa[Τ/;}qθrφΘ>Q₯C‚Η'.†―π^Ί|cΪuϊΧ ΕFΊM@FΛcWδv\ζšx²yp.—ϋŠε/’b/ΛΌΘ[r»Κ0 €b½"ΜτξaBΥγ3"™Θ\­νw¨©·²()>.D"•$*‹[ϋmΛ4Z[”½S*–ΔE*©νkt¨XM9!’4UWΗyeZd$#Ž”%ε7› Ϊ²œ©4„έ½Α:¦aΥ•€ο•I"˜ΘΈΨCωηoš?–ΩΙu)•=•'S$’Έc­|ίZ{!?i)$²3)%·’σ‘ΓκNφVgN"G&eˆ=zaΨδϊ€Ω“–χ·œUΖr•=|eΜ4άt.=‘m™=J•y0ͺ³@&Ž)iι­Λ–Λhy€iΗ*ϋMS=š“ζŠη6κf¬u‘Y5‡q!Νh$EšξΤπ5GΔEΘQ΅œτ{HΛηJ’Σ1I):“Ή;2gX’œ‘Ό@gζΎ@¦:bΧiΕ7;TΗSh!#βb3Ο΅˜;`-9qβE-3Ά\GΗ4άXx4-’!ΝB*›’RPΧk΄*V‚ͺ§·2?)†₯’˜΄Œ3\ΛΨϋ‰!ΜΌ/=2[ΑΓΥΊŠŒdφEά™’^Φ1)p9χ―7'ݟεͺRUAΜ¦ΕU,γ Υ©Μc§4·η>~eZΪA –+‹Š O(Λڍ/ΨGэcAKWΛ«/B•’XOZŒGΐβ(–½Bρ^Fr²±S£ŒŽ4ΒLυWζgΕJΩO†II9SΆ_ίςŽ>₯]Ύ1η|&Œ6œΝIˆηί§IJ΅ε3Νeμύ& ηλΓΫΗυWς䑁kW­πφςYλΎ,σrωΥ£|‡ξx’}ΰrFh€ŸΧr?°T ωπκ»(£ΧrοUλ"Κυ6Š΅<(8˜Ύ@ΎρζA\NΤΥ(BύW³ΏžD$ζ5Œ<*–(ΌLί­’―σσZαηΎΌ΄Λθ^ωηŸKε΄kΆ₯–Ά δm¨!χȀ5"ς¬—hm “!|φΖ‘ :ξ§hŸjKc]Z΅ϋβΕT³†»γͺω°ŽhI쑬„γκΚϊΦΞ1I€Ώžj$Εh©>—"eB’Υ½vΏ.οUE*ΡΤ·6T«Σγq|QΛ”«“²Ά™¨HWiuc]cQ¬„‘ΘJΙT·θ γΊΦΌ½Tf:Y ι<“$ŽLJPδWvŽχœWΔ‰%))‡ΩΥ=Γc£ιY« 6Š5;=ά[±“ Ι¬θνλžš―QJ$2° ZR˜ΊβΜ$R•ΞQΏ'%voΪ±²Ί†ΦVMBF_Rξρς,vXΊρx%}€ΈwήQκsBΒβ’χ€+οΠιGum—i^βTΠ™bq~ž˜Ύ^KhΠ%0LδΙΦIs£EP$.Ρ4jκ.eμaȲφα<Εβd=ϋ˜aΈ«‚žύ@Ε0ζ“+02SM^—–Κ’€ψ΄€DFœ<unΞ; Ι–REae«φ¦ΆφBNt©l΄σψƒΑ–3iδeΚkκο½g0qLT$%+‹«[[Hϋ+dβ0Yv›ΙόŽqϋz[Œ1‡Εϊ…œ=ι|_ΠO’Η홁ދε6OX‹ZŒ“(–ύšΩΩAU2ω^P€ΘσΟΧ4Άτ‘oCνΡ8²ύ±‹-mΪ†‹ω±δώhέψ¬λ·ΌγOiηoΜΉ 'ebϊ>₯_O-5κτzν”π@±tκέkΉhίwBόΨϋ…žζΎuΉ[θ.ΎΫ’Wo |g£Χ2Ÿ!‡˜ιύ³V’¨R½P±B•eτ?βΜv7kθ"γΟgc`πΖUμ8˜rA X‘ 2_’=+ΜCdVer^ώ9η2Ά(‚X9YΈ=6t»ρjσ²1#/N^λΔ‘LTh ϋ¬(’‹›(eΘU'b.ΠΟc3[ΑekΏ`ύαΞ’z…52C₯ܟ>+ς?*ό}ΡΦ&VN›Τ{=Β U¬ UψrΆΨΫCι@Ω V±†TΜjφυΪΑ0‘Ύlέmττzož–Α½±y X 7¬–“qb&§aŠ έΩΈŒΊiΛ·Te¦”HW‹₯W¦»” 1+–I›M{΄uγΒ|˜cΕ"O•˜Ÿ"ύοςeic™=j^ 6Σ“OΎν.YΏν6f0ŒDΩj2wάΕ{J:-ίͺzΊ,Aefj#—Ρ8m§²9¬CNYυ2:".£~Ϊ­“ZΏ€Ωoz‰βΌΉ‹2|Qaω³—(ωΦo5 +ndM’«΅U,RλŽl©u’-g|I§ΕjLύ΅ͺK φ†`hΏ'Lš€±δΜ`3——Œlϋ .τTς;Μ»*tόgΎaQα!…Μ±`ŽkHxr¦Xlγ§T[›zΌ΅BUΩ1.x₯΄ζ‹g’Ήμ6{ŠEK9g;‹dβe­ΡΞ8ΩDΛώTUΫ6d―FΑ A^š³)b‰}Seλk;QPΈε­flIΏ‡Χ›ω—T&ΘΧ‡τόόΓδ…Χ&άQ¬‘ϊS1[6ZΑώTΌ=Cc) ·d|ETιν%΄Šν7π}eΗ3Ή.υœπW΄ΟŽ]dD΄'§΄Œxθ ‚IgE«v΅LΞI½ Ν‰alαΙS[b••|ι»OˆΩŸŸͺNΔ¬‘ͺ'_ΰΊz—c½ζ”πύβ‘TΩ¬XΕκτ‘ΘkuPψ§ό@0χk:ν™±υPοYgR9)†γz9+!ύ=ώ8Μοε›YsK+ΩA~Ρ§ͺŠh©8]tVxgSO'ZNΗyωl =x±p·bΪUiŒΨW$β_ΚλΛΡG yΛΘΩ·Μ=Λ»’Gu4+½¬ΗϊS^ϋ¦žrω–wό)νό9χΧ4ϊU+±§οΣ0iJ₯ΑU՟}±8ρ0+–XΩ₯WGΣ-W1젊3ΕβG§—‘©Ω"Œ\ˆυ]F‡Β2»¬—ŽP—uβJz[€ςfwΚo{Η iοlτυί|’?†UΔ`ξBΊ–Aλβ«ζ_8½ϊΓΠΰm±™•ά[΅MΎŽ«ωΡΒ•dSΖ•ϊγ¬On”Χsή„ζΓ ΊΨl{ρχ ɍ/E_4kις΅ŒJgŽς±ΥΜ5Ν”Ζϊ―[λ γ>ΚθΠ4m%9»lB-sΦp}γϋΈώά@±άfЉ“ζ~¨©#/† Q˜­i†ύ3³Ξ:c¦Ώ8ΡάΑΥ©c%LEAB<ΆχιX±βŽ5™ΏΡfXΣ,›iɍK‹΄3όWcB™0·‘‘ς0Γ+Ϋqη…λsΧ(ΩΞΊΙΚTλ1†‰,θ˜Χg‡\„Υ±ύ>v~RA½L΅™δ©sb'Ϊ₯χ™K’Ui™9Φ{.ZΒ€[$„mΐγμ<6NJ₯+KKdι;†] 9Π~@φΜϋr½ιΪγRͺ-\«? Zk[G‘bI3κ ΧM‹ΝuGœ)–‘6uˆΌ€ΆwήœNΆ‘‘Ή>aιŸžΰ―νΣ‘τGχ„ם“z΅Ÿ €:Δ)Θ=&£O-Jk¦ΗΘ£_΅.(pχράΣ©‘«ΩΞe[x]ƒϊ΄Œφ΄|BΣJiρΤίZ¦=Q•YΕZκγGΟuB@{`kc؝YŠ“b8—³φ“½ΦEe~¦*ύμ8ΐφ}Ϋ]},λŠC}θƒχŸΚ= YzφΞ οΈ5ŒίΚi·›\Q' 2w‹©[«―8”\l"qβι‹κγαλΨA§5φ¦%Ϋ"όΚι[ήρ§΄Σ7ζ&Ω_X„cψάΖό—“x XFMΌίά9{]§—[¬ΙμaΌ¨J£–.\kb8KΙΣΩ*–ω8D«Θ5ζD±8u!οΩδs9υ7*kͺ*kn YέΓ|Α°€΅x.Λο`-ΦΤΔȐ~DW#ΰ~¬a~5%rΡ,Φ4χζ ±UΙͺ,Ÿ,ζι‚άؚΑ†ΉŸ9Ζ ƒ^?2€Ss…άR0 T¬wNYσΡZͺy°mή«fd’ο.b[~ET)7Pv9–Ί₯(VC4V}θ'4.k`ι;Vo˜|8Ν†‘%W&ŽPhτόwRFΏ'Ν!sχ&C˜^'θÚ―Ε’3dΩ–#±Š%όjΤZλf =liξP·z^ϊ5oYH#ˆ£s}Έ―Ψ{ι<;©©φ¨Ν7½©~ŽbεX{φ½ηb%ζ₯An+Φ¬±_s<)$Œ.ˆ=”S¨iΥ+VDŽP$&«³ΔζΎ…©1ΗbΆ¬X,bΎbΙς„ 3luNηk±Ζ›J’v² “–ž«μ2Λ&έ«mφKxm“[Ί;ά˜λΰ t¦Xl^–9}Ύαω"νX±„«κg΄T’Ψbxp½Q© ΏΉ»KScβeόΟΊNKΧPJϊχενt―1v̊X Ήί_ιόϋΓ!Žfr?ϋ%~kϋp;Ϋ1ΪtœΝNΖu_ψ_ΈΰΈ^|g…οu™•`UB…Ρ¬XKΉsΝΆ\kΣω*¦?;3τž€ΚΌbYΟ΅ΡR §–β¨Ξκ嬄uΤmΌΆηVuν-νΊWΏ›π…ŸλSNpύ`7ΛQk«ό¬Pn&€Y±ZiΛi7²Œ”Ρ'ωΣÏξ-S!œ…;Ω]—™½3.$‚‘kJ%Œb9~Λ;ό”vϊƜk©ͺzΊa۟Ψβ±’zyY§œ«Ž`§ ˆ sώš&Η γFOOϊŒΛZΚnu‚½L€³ζΩ;ž;γΕ²(ΒD7`rτι‘”rσ ζOΜ©£s8™|b ͺφΪ*–΅…Gk•ζ‹’X€YvΪΝτΈ9Q5d›!+R[‘φX±<ΈήfšέO’Xܚ₯Uο―jοh/]nG±¬?K/T±θ:„Ul?i€j‰(ψ΄nυš§"elŸr‹@±>t€XΞΛ£*[]m‹Ξךk)Ρs«ΐ-ΕΤΛE §tU₯§δρΔ™ΩNήFyΡ•b±Ώƒn(V΄[ŠΕυ€Ν›Š5Α)V ’¦εZ›%nάΦ{ψQ3χ-#\ΜΙ–$Ή₯XN>₯=Q,’s' [3,ςΆ8Šeo’Χ_'—ΚeγB˜Β+0"x΅£‰‚ό|“­βWUT›W n7ΊR,—ε·9ΧHg)…ά‹hd— :ψνΓΠU‘φ>;ϋŽΉh0b،2υq³aΧoa뢜κ~l§tέ#ςX±7pφηΌ‰Š5;ΫΜ.„[—ͺζή_ρΖYk‘`Ώσμ¬ώgΏlΈoG6A‚0έs—[‡#Hw1|Qα,έ…›ŠΕΝΕŽ·piΈŒσ;ξά0kR²}kqξΉZ;Ι!Ψ…Τ‘Y•c6ώCWyv§₯X!'Ή$τ£΅gςσκFηΘƒέ%άττ‹;±/iI“@–$qιΗ³B"²*Μ:V,s&@ΑΨ —{ζHŒPVZ&rθiζwN±&»*²s.is\…ν©,ŠbqcV1ΦκL5λΞΛά†€₯΄Mwa’™σΓΆ₯Ψε —:νΙyp½ρ²dPWΌ%4ζ3αd’t»°?aνRπͺsM½΅Β²ˆάξΐΕ‚kΦΨ@—sl’Λ·‹,ΛܝγΈ^σ&Τ)‚Έ^©Ρ₯bYVk8R,•ΗŠ΅tS†ΝDAΆάyωΒ›2N΄W gυrV ݍζ†|“I xΩ]1b ίδπΨLtQxϋ­1g’ Ώ†Ÿ(Θφ‘}ΜωΑ‡Ϊ―Τ·έ2zψQ3χ-Γ-Σ²ΎΧj³cλ#ΞήςŽ?₯=Q,ξW3aΊ 67©ω–ΕQ¬YCΉ9]Δ.©ƒ^Οž¬Že΅@Εb—eϊX&ΉΩOwQυ!—ξ"V=Δ½*r6…;.νB±\–ίζ\fKαoΨ5PΕξΛΏ…Κδ‘ΑA -ιaΈŸ¨bρ‰XΨ"YFV·ωρyΫυ\ΖvQpΫόš.>ΉŸ±ύT0Χ6“™]*―ό;t’!-kΓPk’φΗντ°|cg~S­Εzω`3[ˆνΞcϋΠ\&ΊΙΖš2[‘lόΉ‰i {,)³MΪ‚$qX\RQuCkkmYNBrZlδ‚k–KY.=YAS–7VgΛγČβ<Χύ΅sCΫiZ I\ )†Ά£₯ΎβΨ^’…9΅vΏ,u4pΘή|UMcm₯:=^jIΪξΙIŸ†b±yDb”ͺΊΦ–ΎϋtE\dJFY]C[‡Ά΅Qu2%Db?71›I9)6Y‘WNS“Wž‘™”mΪg¦Ÿώ*ζhΉE±β’χ€€”° ŽΛ‹H+β― 6Ο€4‘¨•f7ξm-T(βΝWŽ5½2›΄½’Ν`ΞνΈ8Š5;Ξ&ӏVΧ…&mOT€μu€XΒ6œž5υ’+AšUX£₯IΫUJzG|­έF0Ρl‡„‚Ί†Ζža=9O7.qπς΅‘ι…'d>4Bn—Ν&\'Ζ7,£°TUzΉmΔμ^οg”ͺNΕ‹h'l#SΤ00ε¬7Οζ΄°f;>Θ¦΅`θB±ΜΚ΄t9ύ)Χ­ίNκΕ₯…π Š9Q\Θ₯…X!V²K{\(£Έ_θΑ‚Β’ε‘β– Šε€Κό/ξ’΅4έΕ±Ψ€\Κ26a4Ÿϋa#s¬ sw¨ΏΏν”£b8—³²K›V½Ÿš«Ί¨V§m#ύWΏp•+‹½ΝΝσ ώπ”ς`Dΐj«b9)Ό“Φ°€»`χϋ Αqι.θKYLrξζςζΩξΤΜ{Λά€ΓΰΡΚκ–ξξnytέoJ™V7fra8?₯=Q¬Ωι–\ς>MΚ(³ΉΉŸŠv‘K˜τܝΠΐu"K²»ΩΩΕP,6OΊSΕ’s"ΓWΟKڞP12λZ±\•ίφ\όψ’·W@DψΆ U’ΠΔφsΓ'(<ύβ@οB«CΓ™¨Π-lIΈ‰‚ά°Qjs)x=Eκ’ϐΫτŸD>}P(α/ZžΛJ£_`τρͺ1w' ͺΉδΛΧn φωΗΛΓΩΛήwKlξ5Λ瞹mΧΘ«„?»›Θ²ΫΠŠυd†Εφ›Μ^Ρ{ΰ&°·Ž€·­ήψΥ4X™―ˆf€τφΈ‡KjuZ’mv[Ÿ{n3” Εb3•‡oςσbowγ»InΎž€ΚάΉ|χ_Τ("ό}D^kΔ1ŸYn΅¬―J MΊ:ˆΙk¨bσ=§09(†Γz9g›h±άͺu99ZDbQ›;Ϋ]&'ύBš3`wζΫnόL0‡…wzL΄δΡDω4i{z…šM#Ιg±#υΊ’o3ηί&£χΰ£fή[†|q\Θα?4θ‡avJ[˜)£οMW†γΰSΪ#Ε’σ–[Ξζ$ΔΨ»Eψ’)–ωΦ½μKμEοΖ+Θρ°ŠΕfο\νT±ΘυΧWA}›½υ0½ΖNΟΉυ°Εr^ώΉ#fέ₯άeιη»EF―‘ŠΔ@?zhΥΉΛ·‹ω…XδParε·μ΅Τœκ+Θ=CΪŽΘΊ~μqr ›Eύύ‚ξΗδ8Μ;€<μm”Kۍ{Jι …‰Ϊ‘7£›ŠΕ~вwP 7lH―0o|JoULΜ-­ήΌM_›>‘N³4ΪLjΘ[ξ/€bΰ:iGpŸ‡ŠΥυRΤf~g,#·,ΑDΰ½"cϊ>=Χνγ&\kG³°˜πκ=ψi ΕΐΜΓΦcR››lΎ\Š5άx.γθΉ‹rkδμή„ xŽαšJyDΌF°ƒe ,•a’ΒνD¬ΌL‡v[ .šwˆΝ°|#sB₯ώ,ƒ¦:Xa;²X u₯ΜΪ₯ψi (žbχ΄ΤWΠEe;•΅ΞoZ+–©‹fX‘Λ½΅-y€:vr΄€'μλEΠ8tΦYn»ωβ0Rw*f‹yRβ™²N6`‘θΙ ΫΘ­H€w$/G²v Xx»fW*Ω›sΎkΪΕ¦/φDΑα&uF²Œ_–v όMΈ€'U,nι—(]λ…φŠP,€b€b  X XΕ(@±@±ŠP,P,€b  X XΕ((@±ŠP,OO±Tέε‘_δWϊήWψΜ‚œŽœ”œzΡ«ύ\ͺƒ@ @ ^όX †ΈV¬Ž±žΎ|+I @б(rΕU‡©ά_ήW7f|Ι!š@d(Γ“iΘίΉ’d¨‡cα–ΕU§΄λ€sˆ8<†ΈP¬η>~5g,kmDŽΏΈoYžjˆ3ΕRu—ΏhΣ"².‹μΛTξΗUp"iˆ3Ŋόϊ£M±H‘žΈiΘΎε}uΈDξC$Β# q¦X/`Β=R€'n²/ς[<‚H„GβL±^ΜŠOά4 ΩπΚβ‘J@±ŠΕ@± X( ΕzVŠ%ύ&υξδ°eΗmgV—ώ7 ΕςX±$εrΫ?4MYD«¨] Ε@±Z XΞΛ’OΔ΅N|™% ΩIΣ”eι7©‹¨XΟΐ²ˆ_h8±ˆ9ΙΡόπ.°Η_~ΪυΪ~ύ΅Ϋ‡Gοoˆκ ͺzLwτΝ¨ξέZ{Ή¬;‡–EυΚο,RYf&e‰]oͺ‘Uυ/‰Ίϋ₯Ρν}GξϋGέ kώqΡKςj)–“Ϋg‘χ^9šήtŠϋ“ΨΧ"f|z–5σγΜ’Œ_Ν ESžρρOυ²¨ϊΠoι2ο₯ΛΕΚΫμίΊ‚ΰεήδ―ψŠ'<βγφΜMτKΧe΄<~Š%7–Ey-£' /xZ­…φγμEε»ΏzΖ‘QγεσύΏŠΊ½[;γμhBΕ5„%ήZyzbˆͺš!,φΦ›―4 <ϊςΛώ_νΊ“>π²)Vε@=٘όΛMδWΜz>Οπi(Φ"ZΦ7Ίο<½•§αsf3)ν“9՟JΘZy]»2˜φqWνΎhX Ε©/–oϋ―-].Z΅NΜ€«nŒΝS¬€Œ–)Fθ/ςZαη,SΦλmŽ2Φ^šΰη΅άΫK΄10:£΄έV–¦zΤΗdΑ›Φz­π^κ³6 L¦¬Τ+–αΫTΎ+Ώϋβϋ©bμ«P&DψΣS,%ex'*ρtΓΘc׊•Ϋ7Ρ­J ₯§& Mό¬έ¦έτm…£Χ±‡υρσ_–YΞ¬ϋ7l(²?}α6{δΠ\_όΚS‰Ϋ‚|E’₯+DΎ›"σœlδ³φ°~1•ϊ–Οδ΄`€ΡΦ…§_μ6ΊU0ž‰vυ‘X€«H“’Χ.@Μ,nσdRτ‹™ρ‘kD^d πύs60v—e„²Mη/fŽU \‡bOM±>°σ˘~ λ†杯}yη5^±LΩw-ωΨκTθΒΫ²n;g|0jl΅ŒšΝ΅δΘψχλΡχƒΏ"gμ±τo ²O³οόΘ*VΧΚΟ¦ΜcJly>1?i©S}o™ChΚ?rkΙ±ρ!'G³”dzJ~ΰΦ²OΖψqΆύ›δP7΄φY»υNΏlŠυέ0Νrό•tΞΝ……#²PΕϊ²ύiξDΕς­Ϊ$+¬οh―Ι σγ αH;WΈͺYύ[$―gϋζSνΚ-"V Ψ[cΛANΟό‚O΄ΘG„Ύ!“λύ―ˆ(ΤΝS¬±9?t&טλΗ―Cσ‰*4uηExq"ΧεT±–‰ΌΦˆεemέ}νκƒb^3ήη f–Υe’ΰΣμqυVν–ΏA?Κz”ο°Šdš)« ±…rίW̚ΙͺνΕάhΫΘeΫ†k+ν― ΅Tv©ODn—‘WΠ@oώ΅λr§`³³ίΚYωYs™o#ρ%:ZεΗ_.7°ΌΈώ2ΎϋŠΓΩ³(ΨWΦΨ χg7X$―€[o«³ B±€EW,7Χb=Š"Φ$0’›ζ΅XΣτ©Χˆ, β΅ΊVž·ˆŸXJrΧ’ύƒω7½γ‡ ¦όOτ3‹ͺιΤ yL‚,QQ1 μ…/Ϋ(o6Ώ·ΜSΕΌ˜‹τ!cE 7ΎΔ Ϋ›Ώλe) ρCNΨ!8ΎFΝ‰ ²˜y.ϋs„U±>k+άΞΦbuDa—υέΜβ­Νl6υ±Ρΰtέ–Y±ΌOτΜ1Υ₯«εά'•‘«αJ‰ΆώcG_ΖIE¬Ϊ(TΏΔoΉΒL¨£Ή1Ί(ΞS,ΔX„Š4λK«*ŒNΛχΓΛ#EάkαΝ9•Λ‚Ή^ζ~Ϊ:οΡ8a=‘Λ f›3ό9V΄Νmgnιέ5~α ήHQ„ xΎŠe4ΜΕzΤj;ŠUς°}πaτζ₯ψ£»Σφηρ|ΛθbΡ]vλς~°-ƒrx4Ά$o©ΊcKθZ–71ί£NŸνUTwXσΜK¦X§šψa’ΛΫ>ϋ4“›?ΕR4η-Š_ΝΊ1ΈχqS'E›Pοφ›£(ybN9Μ£:O’Xšxn"ΩΖ΄kΞlQ,³“P¦.r3ε–nSQΑ³€’_-έΞΕ–άPsΩhsσ^Ί]epδλX·Φ2H"|—ΚΉ‘!Ά:ώβΠέςΜΟ*Z†ŒkωΖ΄λ–ΗtΉοsΓG2₯:cνUͺε‘Œ΄t©‘λ8“‰RsώΦΗ·!oŒHhFsŠόί7Χ}{D§”οœκvZYΩΞΞΦΙ}η¨‹σ‚ ©ΒE橏’ΫdςͺͺvΑ]m0RΚ- σ^΅)Β\ς¨`~ΒgD‘~Φ`.§ΥQgYοΒDAΰω*ΦΜ#y²p-֏t€H°λO ™²»Fœ[ώdΙσ>xΓ.‹]=u[fY e0μNο“}?γD±i—EέId–4ώΧ›sx4AIΈ4[Π±»G#S_Ά>²ΦqfJΆ―λMRΛΏŠΕ₯Έ°ΘΥ,›NΠύϋ\½ŠωυG‹βWξd¨―ΨηAΙΖ.†ϋxΫ¬ƒDpžξIkBΝpύι Μ.WŠ%Μ(ψΈ‚*ΦuΎΟm7¨E˜7πΪ]at₯XΦX!XλΕ–φF‘,p΅ν6ΛΧ†i0ΈP,aFAσXY±F*SνΆ­Εdˆ•m1'όxlNτc.LΨ΄‘έ JόΨIeEαe‚a8>‰χͺ„χ 6;RŠΩ4ημ"¦ΰΖ”[ δ…:,9;_ќ–ƒXγ15 xnŠΕ ϋDυ~πΝTϋ ΝΪ·!ωΆ £ΰ½eQ··|υπΪ¨ihpͺΈ¨οΨώόωkΔΗΗ‚vu­ό«‘}άΤ~s샏ο~pβφ’ΔΑ/GgΉ™Qπΐ­%ϋοεk]λžHδφ’„{_>t6ŠE3 ¦ίzγΐ`qχCγ?\kέ’xkεg“œΝ&iϋLUQο’ΨώμΞΦn‡]~xmΔ44b¬ͺΊ·’&xΩF±žq<Ε T3ΟΖ―Hl*‹tΏ`#₯στC‚‘ΛθΑ(–ΕΊmΝ aί <ΕZ΅%U]Ξ.’«›Ts? Ot7WžΘˆΩ.φε“sψΕ”O<‘b=6―5E(›υFZΑ‰msM†ŸNID΄έΘs΅ei–Ή ι³ξΎ –Κ Τ₯’“7V]ά+w°‘uΡqyΌyθl™·z›;Μ£X6Ε`Ε ˜3Š…‰‚ΐσV¬YΣΕEύ+Ω»H½ω‰ώ²V¦5#ߏ½Ν#ό}±vu―όσ`v·]χψ±½vΠ?½EUϊ½μž™G=£ώtλΔΑΛΣξέk|2ύ{σ«(r–‘βλ}±μ+Φ,{_¬ΣύόΈοlQMτšœmΞ­‡S²ύ]o|<Ϊnši― :@λHο‹΅_'«}τςέλUP,Ρ“¦kχΤ―H¬<σά.±~ρOn}[Λ5k”&¬ΣΉχx-7ېv²­k±JγŁAaΗ©SΉ£XΖ~-Φ&s&ϊΰ„Α²¨²ΏάΊ«ώxπ;Aβ6EžmFAγ•ύ\zŒ΅B}2κu‚ ŠΖΫ§ΈΪ™³nxXζI€€"Όϋ=n“―›g2Ίb.‘Eπ§*Ξ|V%XΧPYςR0‚!)γΔ„ρ±kΕ¦έΏΑ'ˆg_ 7 6‘οΦ |kͺ-m“M2Xd)ΩZΩ© ƒεσb-/ΖκΖ–±/(~b@±_±άY6_’žΐ―<«£ŽOXg“7œ£>ΥΧ&³ŸηuΕ‘+l3 –Ιψ•6\φpwΛ’Qp™_xQ;›ΌBWΊ›}DUJησeZΛηΗ;ΖfkΛ|ŸΛ(ΘunvK½€Τ+ST0Έbx…²Μ‚3\ΛΰΗB‹τO¨XCΕ‘ΛΝ“iΉ'ZŽ…Znž)Χ7Οrύ7ς©Ώό’cΞ(Έ40΅Š-ŸΈo™(@Ρζ<έΡγDφVWΖv>•½‹Χmw f¬βςR¬‰-ν3ŸD_“ΈŽ›I.—Π—7.ŸΠΜfΆΩ‡jΈ\ωέ³‚μ Λƒδε΄nRΞ5Θ((Λ f~όρ–ΎηNΒ‘J=±_‰Šίu³`ζ&›AsWέ<,€χŸ'Έ/Vw‘ΰΖGζϋbY³₯»₯X€ο^‘`>ˆˆ½Ρ­νm―θ]ž,‹‚VˆΜσύ‚σڍ³φ‹,/”[°xŒ δ€9›όε~Ύλ6ϊ―1ίρiSjΥΨμ*Φ¬Žw'z稍δ˜>‘ΚΣ|Ϊ ―5AΜg|ωG>‹π°²1'㍼ΠUζ[~­Zν7χήbŽ+p½‘–—uΉ”ώV€έ(XWA0ŸΝ‚ή(Μߟ޿˜­š8“»g„Λ ˆ2Υg˜W|‰V­ρ›ΏΑ@Q„πΎXΤη™(ώΦΓΙP,@± XNΩTιΡ„ΐ'φ+_„»W(σ}™|’JντΧΝχ#^ΆQ^„ŠE»Ρί$n ς‰¨!„2ιͺ–sΉ©XT’Ϊ Fc»ι>kΒdΚJ‰L΄—*bƒθDΓΆΙ…ΜW¬Yc[Zσ¨Β>s9·‹ύΉ›―πσέΚυI‹ή‘KΉ[LoμKΚΌ-΅΄}bφ±4>h)δκ •y‘’^jφOƒmσ_ˆςS1aμ ‚IΫ΄‘cΕ /ΥέP₯†ωyω‹™c5–Fv£`ΖΫΚ„ΎΝι™Ε‘ §4·yΫ]m@-«ύbζξPΪͺτΘƒwWw 7˜h)’³G »3'j†Tά—–Š5?§C7Ο%H‘ž‹b}PΉίύeW_ήxbΏ"ωυGΈˆ_Yμψ$ψΙ(Φ3•?Xˆ~,D±>nΚυ(ΉΕϋ½/Vs.b( ΰ'¨XͺξςM±H‘ž‹b©{*έO!xwrψ‰ύŠΔ…ήj\ΔP,(ΐOP±})ώΕρ+R˜gΦ.sθŸ|λ\Ψ3¨γϊ/Ά oβ  Xΰ'₯Xc=/Ȋ,R R˜η₯X„”ε3¨ζ†Έ‚ψΙ*gYΟ},‹`~΅pΕͺ½ΫμsfσS­¦χίώλέwΈ‚ψ)+‡ͺ»<ς돞ρˆ99ιBΦ_-’b>ͺ?ώTλKŽΛ€WB±^΅v±KΣ°vΓ—ΫŸ’_m*ΫΡ2r—/P¬WE±EjοΏ= Ε*l/Γ΅ λΥR,‚’)oΡύJќ7σγ ] X―œbLί?ΠpbQ³žΌθ.\ X―’bόπp±Ζ²Νyπ+ X―΄bŒ(κPΧω¨…δ·(l/Γό@ XP,ž¦amςΥcžή/ΛϋoυQύρEΙh5ζ)R"#qD\τ•ΦΰΡή J©ψ@ΕΈ»?tͺρΞ’–§ešΝa&δh£Ι^υ‡›ΤΙ² ΒΘŽͺτ‚­¦z49ŠhFJφPQeŸι'ρ&0u–eE“«%2§Φθx«™ŽΌ&Ά€ίύΓΆœŒ'_-R)ΗΛ³Δ₯³@±~ͺŠΕQ=А t'Ÿϋϊ/Άh8ρνέ¦Ε9ρX]ϊNF"/Ρ4utΆ5–‰#ηο-\±f‡Ϋͺ5MƒΟΖ*ž—bMΆζGK€±'+΄=ΪϊKιρŒ8ρ\'·έΜΰωCq!{σ5­=ΪΊΒq☒–Ÿ@ίؘΙDŸ¬λμœ…b@±^TΕβθŸόͺ·ϊγ¦άπŠ}›Κ"Wžω䌒βwΎόϊ£›σ.τVߝ^d9‰TVZVr=¨K‰`. .\±ž%ΟI±ΘγR±ό’ευ05εK$I…]ζG*Ξ[ž먬lΥM½όοΥ)&½Ζ•;C± X/‚b=¦Fuχ ΦώςŒφcΏg<½tLž$‰`Δiδ^eaύ¨iΎbMuξ•JWτšΟh³w2 *mmΎ"ša§#.iΠσ§sψ}Φ -ΛIˆ ‘H%1i*νΈE’tM!… Ω™”RΤΨpΦ‘b™tu%ι{e 7 2_Σ=Ν=1Y™ΒδΧvWgHb“’κ° ΛLvW£Σ€’xEv₯φΌƒQ,ΣΨ¨nLπpχΉhI\v+ 9q!Šj7ΜsΊ·Ό(i©##ŽŒ‹=lžOΘ·ŒΥu{KRΔ1%ZŽ3£ %ΚX©”Ύ‰ΚΒ¦Qs‹9x|Φ4άx.ŸΠ˜””SΡωΠόŒΎ΅ψhZ$C_V›FΆϋx[‘$ŒsAζ‘γB:S,“¦$%‘V9„Ή¬cά’XςKΪ¦s)lk–Οkt£όφ6G± '“B€9•δ2 Φd±MD/ΫC@± X‹ιfI΄D–έ:Ο&6f0ΨάΊΞ{£ΓΓ= g³$)Ε}ΆŠeΤŽ I>§²}œτΆ₯Τa24=“3³&}kή^iˆœ―pςΤ¬©σLJHDJvuN?ΪY_’@ pΆ‡-™‘23N,UžΧχ·¨ˆTΔΩW,κ<„’ΖNέθp_‡ζΈL,ΝηfλMVg…Hdъ’†aν¦—+%Y·΄mͺυ˜”‘R·½tΪσʴȝŽΦbΩ +KGρ T%3Ρ­-ς“ˆ"2²XEIν=;0i‹"%²Œςέ0)‘φόQU“sΕ2i ’Δ;Ευ=½}=΅Ei!tθΜδψρΩΙ¦όHI\RQcοπ¨«:;Qr¨‚`Υ" ~‘wpX?ΨYW’)M)uψψŒirΈ"EΒ€T&§LΞ ιX±ΖλsH•Σ/΄vφυk+σc#€ eƒΌbI“’ηj»ϊu½­Ε‡γΔLN;ξηΈόŽŽ&T,SηΩ΄FqΎ—6E/΅ρ,Uk°~T§­>–(•('ρ‘(λ©πP›GzœŠj;3ϋΤ±Δ@΄I˜Φuυ UjΖА“Ÿί0Ζ»ƒbΕ0Β u“uΚIšJητ©‡ι‘6}tv6`~ι4ΡΉjIΛΗθωŒ}Ε2tΊΡΙ‘qΙςΊ8ΕRŠΓdΩmζΣ#˜€‹τ˜¦ϊœIJq―y―)ΊϊΘ₯b™ΊΥΔCΚΨΟτδΕ3iRΒɊ–ξώ^muv²”Εš7Qp\£°Ι15ΪΩ=H μΔ^Φ‘–αŠΚž«_s2'―fΠαγlϋ„¨Άι$Ϊ3ΩR&φ¬΅‘Ηϋ:zυ&‡Οš' ΦΉjs¨Xla2λ&-ŽY–“Q:Ξ)VD–fΜΌa+;λ²wΦYωΝͺXΓΥΚΘΘ”B-7ziͺ=*²Ž.šτύ:ƒ ω€bA±}+H>§΅;oΚΤS˜Θ„HΩͺΊ–ξQA—”S©K ͺ΄YΙφq«bEζwXwκ=+‘k29{κfI€$.[+(}$©°{v–ωΙ$Sμ<“δ`’ΰtoΝΉŒ)Ρ12ΙΞ8 #‡qωXΕ’(¬zv†$g t #"§ΑZΙQ•ά…bMjΟ%1θœVΎοΞ©cβΉ^K‘ξ]JHSͺηekΤ]JŠd$ΙωΕ孝ΓΣΦΗΨ m’»σ αθqS+QD‘/q°26ݐ#G$₯\ͺlλ·ΦΠΡγ V,0ͺω‹ύXΕJTχΞρα›NΛοπhΌbiKb#dυΦf―VJΒ€±Ηϝ―οΠ=„[ŠΕz:˜ϊͺΣcΙ‘KNς1<μ©<““'cBv¦»Ψ3iQ©H)]J‘RάνP±’…½m:λ¦;yͺ)Ÿ.ϋ‘ΨFXά±ΦYςTηZ–€» ύiI\ΚΩΦήaΓψΓΈΆΔΌVŠS¬,kžbuaΛΝ 9δL±†λς£#€ EZλΚ«™ώΒ=LˆR0α’«iQ½Ζo,>©ˆeθ'I’²ΈΝΰΒ^ζUŸΗΡγSu)’ω-ΙDŸa 3cΠjJθ2'Ϊ²€όΊ^nHΝΡγ T¬‡΄0Φ‘Ά9Š%LwaQ,'εwx4V±Β€!‘t-YŠFθ`¦α¦Kي”Θri΄β;SŠΕZD†λ₯heΞ½ζ€Ύ§φŒ‚¨Λ±Ζi^₯v*T]=šΓqβ=%Ϊ)ϋŠe3TΕ.‘²?ŠeyŠ“9V7¨»'ŒΡq#MΊ0w«@fO±h&@±`‚ΩμΝ’H7kή(]XεH±Ζ›ˆ_ΙR.φΫ>;]™)€»`§ήρVγΐsΗ{[‹3eβHv²ά<{‘#u‚Q¬l£XَF±Š΄Ά-98όΐΆΘSƒΪΚ’„H&Ί ΓΩγNΛZH'£XΆCR.ΛIωS,Yze?]ˆ‘¦κχκ™ ½­{€!Ιjέ, Š΅X{ ₯‘G놝f<7 w44υ[³ΜτΖs][Jιι-Ά’s΄“φKΨ{―Μ’ ξ9}jͺ1=R*XpEVρV ―H²Y‹5¨’Ϋ]‹5zήf‚ίtKŽΜv’ }ΕbΧƒ Φb±‰μν+ζ—t‘ώSΓEH€`e]Μ&Ν¨Ÿžkέ­ ]‚Ωƒt>!«IsG½ • ©u-ρ •Ε+5ΗΣRT=g,IŽ “:Ls“§[κ΅΅f=ηPΕΈ£Ηη*–γB:Y‹E^Αj¨Ξ²¬„£:'Šε€ό&Hw13¨: Ω{“ŽΒMχ65jυ‚ί ͺ³ΔΚZŒcŠΕZ,tˆNδΥwh΅‚θ›»dΘΔe\ΠφŽχkΛsΜ‰m’ΆΧΡτnμ—ΉŠ%‘¦€ŸmνΥ†»ͺ3β>#œ“§fME)!;…υ=Γc†αξΖβCq!ΙάZQzC*©ς|[ΏNΧS{&+6&Ξvj_jmLΜd©nŽŽλϋJ²’rς©›•υO)MŸΈ“‘’ωτt½­ͺΜ΄h©ύϋbΡ‘ͺψόJ­Mλυriά§΄4γ…Ό€RΫίΩV½W*ήS9ο6M½%€ŽYō4kβ°§2?%„αnSƎƒν)j G›™ξ­€™ Ν§΅ωIβΘ΄ΌšŽήήvD1)ο¦Ιργ\F>Yzmδρ{•ωi&KCZω©©46§ZΫGšh΄·υR:7Τζθρ9Šε€.2 Ω›lΐ"š#„j‘Εr\~‡G³MΪ~―"…a’σ;&©­IΙ‹R{sfμn-<r £X€bA± SνQσmŽ„‘¨›—ΖΪΤ[]’²—ή˜ˆ½ηRV^uΏϋbΡΙΔ;••ϊΉŠ{¦΅ΈΠN)½?•β\ 7Όγδ)ϊ¬‘₯ŒώβΌd}j¬΅PΑήϊIJ)hμΤ(B˜ό–ωc;ŠΙfτH))gZ‡M†Ϊ“I!qI)ιΣίΌ”±—»Υ’"»²§ςΈ4d~›°Σδζ·—4œ2άZ|4%2’GΚbͺ[τv_z'«„φΎX€ϊ‡ςΟw™GΊξ5f`οΕΎσ’"dg_,²ΧώώW’De‘εώQŽάWŠ=K‘Ζ|–ΙŠμCI’HZς˜΄τ3άΰ•£ΗmΛq!ί‹\KμΝ―B€μ·Ψ(–“ς;8Ϊά[W€όǚ¦gΗ΄*₯"š\laάE₯Ά^TP,(Φˁ“ήΆ³Ž8P,΄ €bΕ‚bA±€bA±P,(ŠΕ‚b XP, Š€b‘]ŠΕ@± X( •€b Š€bA±P,΄ @± X( Ε‚b P,€bA±P,(ŠΕ( Ε‚b XP,TβΥT¬kν=@ α~@±œ)Φ΄Ρ„@ @ ^ρ0x Š…@ @  XP,@ Ε‚b!@ ( @ @@± XP,@ Ε‚b!@ ˆY± ‘XP,@ ±ŠUhŠυͺ(ΦΤ£LLŽ=˜pd²%ή3@ „ϋŠUh λ'Xϊ±½ύΊ]ξٞμ…w@ αR± νΕϊi*ΦΤ£ϊ¨\JKϋέο~ΏdΙ’γ ² Ω’lOφ"ϋbD @ ps-]όΔ‹ΨΡν;ύ_|©φ^±βxΩ‹μKŽΛB @  XP,Sί€Ž8’έ‘«Χ_ύwΏϋ]hhhddδψGŸόηvG΄ΘΘqπB @  X/·bΥΦ7Ÿω\ύQjζV&Žωω³‘ΕύυW7:Ίζ_ύΫΏύΫΗ<999§jΣΣΣYYYώο>,‹λ²@ (ΦΛͺX·z”tHΖΨ ςΩΐεAzϋu‡„²τψ{φμΉ{χ“ &''Σ?ύ“pGrr4Ό‹@ (ΦΛ§XŸ}ζTκ£C™΅υΝ›"!’ΉgΟ|vΎ λFGΧο~χ{‹&kJMM}όψ1©Ε?ώΘU‡ϋϜ?gffŽ9ςΪk―Yφ%Η!GsΎ"K½λ7K—y ΓΛwΓΫ;}w)6ΧΤυΓΘΉ~³υάδ3}Gυόeσrο₯ΛCNφΈήxΈ„ρZζ½^qέn‹yνRΫΫK#σ#Ο”U=ΣzΥν[OΪσ­]ητͺŸ£U^Ί!«eκω½FO9n†―$/ίζlZΗ':Β`«FyxvFJήΞ!‘²˜ƒy%υƒό³Σ7rb˜˜3=sχrτψS‰ΡσiLHζUΎA±ˆq{θ·Q]―}ΐΗ±·«ΨϋυĝιE=Λ΄!>±λ·g’ί)ο{=j ΤπλW@±ž³bq~φAά—_•;Ϊ†Fνΐ{άQ,k@±^uŊίwΠωΨΤθ˜Α’ξ‚lΖ­Λr©XπPQQΑMύοώ―elΚz½ž›1ΨάάLφ}rΕ2ή‘v±ΜϋΝδ:‹oTη&Ώ·ΑΟkΉ·—»;U·ΈIVSΝϋόιό΄}ΥwΤιΜzίίxωΌυvόίZΖ¬½Ϋ«…ίϋΓ[tGί ›γR}ΧF±vœ»S}|χz?Ϋ-‡ύϊzΡސ7}ΘΎA;r―]KΜ»+½½½όήέu¦sάr sΩ–Šήz{WΆ¦?{™p:·υoWKΦ‹ΨκΨ*ΦpυΑυήμx]Λ<‹˜¨ΪCΌΕ7Y3ε™bΩLμΘ~›œΞ'AέS—υ._‘uΓάΖ5ί\Ζ>˟b’5οίjž«;ΐΆΟΫ':ΉΓjOΌΛΚΟΑjύΌ!‘‘?ΉZΏ›]}^±•΄ΌχoVn`kψ™lΪΜ ΆMΜγ]Sl5—ων<ΉbΩig*~Χ‹’ϊΚ.§ΨwΞςz™Ζo©χmέ°ϋ7^ώ!» ›?g[μΝύu.Ϊ„χI‡‡ώξo{Ά‘F^JžZ²#Ss‹”ώμΦε‚y°λW§Ψ׏^`{ΎφΤ •ž§XŽ':ΏΪΛHχœι>ήωEŒ$)§¦)pΦ&„IΪ›σmχC“‹§HŒvΟωxηN©X"έώα'ωuƒΦO˜vΝ‘$YˆDΊ5ξc₯ζš Š…@ ˆg’XΣΖκΟφΌΥwόg{ύPπΑΫoDu½Ϋ³>OΝ}ϊψ΅s½–†Ώeϋƒ£;3z~Ή»λ΅¨[ΏLκ‹vrάιDΑΑΆαΠΤΫΏŒ¦Ϋ:₯ο•©q(ΦO[±jλ›ΉυWNόŠΛΡςύMξn]ΩΡΉby{{[V[MNNώμg?sβWόΟlο"ψϊϊ.`λ,;Šε·γKnaΟδΥτ Ϊσφί—™΅η]Ϊ!^¨nœu‘t ΠoΦo~χχο&μ‰η&š;Δ“-ΗY1π ΪŸ°5ˆξθυΕΥ1kχύν­!+ύ‚ήzΛk™`GΛaƒή]Ώ#aΗfΊγεA[ΓƒΦ‡'μzχ-ϊ§7―IΪάΊ―ο»»»Ψ²ymΞ֚S±½κwίφυϋύ搭'Ϋ(Φ­³[I5—oΨsqΠNΛpvτΗΏυ X·ΈΣ½½yΓΫ»’χ„o`«ωΦ.Ν}—:1ώb=Υ³“€0ΩLΝ!θΐΥωJ0©‰χϊ0o\"Ώ7°ϋΐ§9ΕžΤ›=ΞΣQ,;ν<Υyς=Z—•ο&Ξ<ψžΏ`ΨD3kδr Ω!Kxoύ†7ύ/½sΕrrΨΎ³[E€šo½πΐ‘δ­ mςϋύuγΧ‹ν¦-I.Ά]Š'ͺϊΉΓ²/βοΣ›=^ˆ₯9&έ™ωE₯vptΪ©bέ½ϊqŒtηΙ¦Α9χ’nύΣ‘’ΦQƒqj°ν‹½‘ΜΞΒ.OM¨φKΕqΩͺցΎ»=υgl•$)›Ψ™‡cMGv1[~Qί3ΨΧsMuψΰK6‹ΠΠP—w δ²bβββ΄{λŽέϋΞ±λj΄¬ƒΊ%†eoνψrpξDΑΆΊ χ{/·Σ)ΉΫ‹­4O/D±ΈΣ-σ~[qέψΞΙwΩαΑ½ξŒΨL^UPA]Ή5λπV?jόAζDgΦfzΜχΞά·™μ·<θp›pΐŸφω4k~;kθL<ΡξΟ‡Lœ.ώž΄CUίρ‹μSήόkDΥΘΗ]ΕrrΨι‹ ΄ ούεΡVΜΜ>yξϊ0[Η]ά|ΥZfνqΊzmiψΩaΟ§α5Ÿύdg$›&4B“–―nκ›§Xc7ς?”nseχτ<υ’Εμ,θ²L>¬Ο‰ǜω~ΪΩS†¦Ώn§+Ύ¦³₯!‡+‰Ώj²C$ϋ‹:-ΉO~ ΕB Δ³R¬‘ΡυQ]λ/NO§§w½žΞ:ΧΠήϋuΤνψ›τυg{^·L$]’ώΙvΛ Ι±]ΏSO:T,zκΫ;Ϋ,§~tνζΓφq(ΦOZ±Έ!)»k«μϊ•eν–݁/‘beddp…ΧλυΏψΕ/\*ΦΟώsKbχ“'Oz¦XsΒϋ­υαŠΟ΅μ΅ώu2νΧ.KΗ`‰Ξ“τζσ@˜»οο6χ­ΟS3Yκw°št‘«“iwΩ›);Z‹ε½9χŽ-‡-δ7ώ»ρ§άΖwN²R±ω΄νθΣ”iό.;+Μl|Χί7A31g-Φ†·Ή!΅­s”’%}΅‹’ϋ‹ XΫ1šͺi ²―fܝIqΧoζ_šΟξΒ‘©f*fG²ΦΪΰΥ)›"yνP?UΕ΄3oΪ›³φ±ΧΜ­³;|ΨKθ©…+@?HΧΌmuW±œvΊ-‹ͺzσέέ{ωΌΊsΨ2ΓӞb Ÿal‹αa<m―Ά€ Oώ‘,„Έ“šΣ8jU©Β«ηΗ…|τΕχ{£[μPΥΗ5Y2υ©?KŽ\uφTχΩύβˆO*σΡ»Ο₯ŠwζΥO›Ϊ‹ΙSΩ5­ΛK>‚b!βY)Φ==U¬ςιιΙρΠέ]Ώ=χΠϊϋΘϊ‹SsΛhΌ9ɝίξ½ύλψξ_Ζίzύ^«μ+ΦδΔޏΊ^ίΣ·σ‹ϋeΣ‹²Šυ’+—ΎΒ}Ώβ‚<Ύ•‰sXΉΉΉ\αu:Ρ'—Šυϊλ―χττp»œ9sζ‰' Žί½ώω^nf`²fŒΟ™―aԎζχΒΉqΦ”ψ­έe§ι.;Ξ{Ά3+H`\\" ³žυWgοΨLΧzYΛF”¦EΠυ_―°¦η6+™Mς]Yοmύε8 V,kv«‡6Xχu©XFΣ-nδ²ίX\t^KΦρˊͺm…ηY–Zs^ΗMz|ŠŠ%hηκ½oΩρvΆ=―ξί`) Wk=έQ,'‡%[jK’7ϋ ~/π ΩΗM΅§XόΕΉ!λ‰S·[?£{Ύ=ς'FΗC±*)KΘ#­s€XqΚF›Ι‡bΙΗͺ»ΞžϊΎ ‰Ž›IζΔ'—ΗΨ§XΧ²p„b!βY)VΫ½_GuG40=~?0ͺ롨ΧρΪ]ήΕs«wψwΡ]ή'τšΫSwτξ GΔ;U,ςψύ‰œβυ{©Œ½'ββΔ λT,η~Ε)ΩΡΉb}ϊι§\αΗΖΖώυ_Υ₯bύΛΏόΛππ0·Kaaα]˜-Ϋpΰ;σ(–wΘασU5ޚtaJόŽΦQ¬qύ`έΑα±ΕS,~£ίζCjΝΥζ«_gρ£F-σ„ΑF±ήΪqζΊzο.Ρω-{ή²x‹ΟξΟǞΎbyοώ|Β<žσGoΕβR;. H_¦¨Η“Q,%9‡‰ωu‹› g1œ1ΝŸER,A;·(Έ‘ͺƒŸk„ΧLVo~j³`λσF±΄‰“ΓZV¦υ·U}~:kΟίβΦΤcF±JžpΛ0288/΅7Φt~Δ¬XIgκ;―~Ό‹ΩltgλΛT‡£Xζ§θ(Vδ'ηo tίΖθ¨qώ(Φ@I @ Ο(έ…ζ―·_Ϋ=πΧϋό˜ΥoΟ<ΈΦ?%ŒvύsλΪ½―GόΥ2ΣΟΐξθ\±,½Ž{-ξϋeTwh½ŠυjMtιWnN”Λε–΅XψΓΟή»5yζσmg>Ψ.ΊΣ­Σ}ΰgGfRΖEa—! Ξd‡μ0“Y˜a’Ϋ0 i  l(+ @+ςE…ϊUμ*ςΧUΓJ›_[@)…­‚¨h$40ΠΒ!‰‡έvώΧ}ȝ9‰Ψj}g^ΰ>\χuέΈ^ω\ŸλήP±bbbΈ\¬ΚΚΚΗQ,ν))=8ž²œΡ˜°Δ½W˜Άy ]Ωή34Θ”ώΓN›ΑΞsA™J)3‘*FϊΝ*ϊWΖ ύ(γ<Μ ^_ΧΛlζ£½bŠΕŒm ‹g'±ΠžNqΝJΊol”Π3‹μοι’Θy·ΑϋzοΉXάξ¦Ny,‹u€šœΠπ!=ϋqZK ηιιΛWZ±Ψ€©θ|#?³ΓͺV₯ς‹›5"Τ-KηήjΏηΔO±†ήζšͺͺΊ.Η₯\ φȍ1F+θ:ΰ2s±ΖU…©œSw{±υ-H–ο[tS©™Ύγ;SsΛ:Œs±Ίη²A0ί«θ\,—„+ςΏΑΘΚήΜ₯CnΉXί^ή“ΕπC(–ρΪ]²ΧΈ@wЍ\¬΅Gf]βKχυχ˜Ÿ)ΕΚc«οŒσgJ₯.―σ;PΠ2±πΡ•Eg±φ…ΌBvfB(Φσ2έE@Ώ ~Ί ‘PΘΥ_―ΧξwΏσγW―ΌςΚνΫ·ΉνΉι1}Ί‹€A43Τ*\Βτι­]etτ Z$+©’Kθ1„‚Rͺ›λί”6P3 ($Bz”3νψJ)–ΆAHΝaΑ—TU”e'DB:k‹/©iΉn  XάLθ‚.χh3%Cx^§ΕΟΫΗίοŠ@ΡωhŠΕNœHMΥ(+ΜnMd2ΔΠž₯-—©3SΘΖθ”SK‡ zΞ(ΘΆzs\dl’„œvΊΜυ›˜:ά©O g} R3@ςI ”€EΘΪWR±¨Χϋ&ΡSmΛ.*+eκ)SθιRε έJ΅:EϏ]:™€χsβ§Ψιφ|Κ7'IUUUr15bhRÐK™‘ρR™’™δ²gœο?Q,JΝ•Χ]ότλ[7Ԛώ―{?>Z.NΝ*lΥ-y/Φ|ί±βdiuΫΈ—(ΦΞwͺ^ŒκK's₯9g­²λΞξΞ½{ςΣ‘qβVκΎ‹e9Y™'nQι­Μ”ŠK›»‡ΗG†―ž}―Σ­niEŽVW)ΫkΔΤ™'W3»¦››Δ:Τʞ1~’’qΰ&£'/YAΕ’ζκo(LD“ρΒ‰μνW9n'ͺΥBΊΥ[Eς¦ώF™[šΟs X*‰0>œVξπΨΔ”ΒΣ½SލμyEB}“kο½X”8©?o,)ήIO*(’δ”oμŸ_: ωpλDTTrqd~I.Φ₯Α³KH!’]Ε…ΗzΩ—_ωYE©”ζγ:ζ½XRQNyYΣΥ1Ηͺ™λ+ίΝQΛ+«4ŸΜU\žAoΐJ+χz+ζύT™νσz«ΫΈAuŸ‘}/Φ[#ΌͺΙΪΫΊkή^¬% ·4‘²ύBΣΨ†lMHφhΜ‘i•ιΑΰg† oi72λ½Ό?ΨyχυPΕRΗέmΘλ\Δ{±ž»WΏΞ/ύψσκa²‹Χ΅ŠE>ΉΉΉΔ—ͺ««ίxγ ‹ΕBDkhhθ/ω ·ŸΟ'κE–»6Ά΄΄”Ϋ  bg“Jεθ]xž^₯²ώμ ΣΜ`Ε­₯½ ?F“Ίϊˆ«°r@±žΕbr«Δ\―S·{lI6σ5Ι;anΑJ€hυκՌ ύκWΏ$–•˜˜XTTd6›§¦¦\ίAόσŸόΑƒ-U«ΥΏώυ―™΅€R)O›IττΤΐΞ'‘X*d9ωή9ΦoyΆΛΖΌI,ΒΫ¨K(Ε‚b=ŠΕed}ΊΠώ™ŸψγW\Όk)Ά{ˆΕΔπ9‰ϊγh2™ξή½ϋ§?ύiνΪ΅K3²Ύύφ[™sssώσŸ]&ΐΰ“H™x~β°#©©;ΕςφTZ8σž±λό@±(λ‘,‹ΙΛϊ]_rq*ςω•,dΦϊρ+†o eϋφΉJTTTΤΘΘΘƒ]»ζK±ΖΖΖbcc]W‘rHixŠ€b={ŠΕΨ“—ε²*ΰHB‚ivξΦ ο΅Χ<^+|ςδI‡z(–Ιd"Λ›››W―^ύΒ /pΛI €Rž" XΟ€b1|ΩΧ΅(w—xγΝ\ωόκuώ@_ŒOœ;―δ2²˜Ρ§ˆˆˆ΄΄4ΕϊΫίώυβ‹/Ί.$ϋ’H9x„€b=ۊυψΨξ=Υˆ#yΔ²‚ό½ΘΎ€daΕ‚b±–56>q뎦lίΎ˜ΎGDΛλ‡lCΆ$Ϋ“½ΘΎπ+ XP,ΟΌ¬o ”hΩωW@± Xώ"Zs ΦΩΉmΉŠΕ@± X( Ε‚bA±P,(ŠΕ@± XP, Š€b=CŠ5¨Φžs XˆbΕ‚b XP, ŠΕ@± X( Ε‚b XP,(ŠΕ@± XOΆ{欳s ώ!ې-ρΜΕ‚byΗ4;g˜˜ΤŒŒŽθt£ ې-Ιφd/<9@± Xn‘«ρ‰IνθθώŠ >ŸΏzυκ τ!ې-Ιφd/²/"Z@± X¬_}3fhϋΧΏ^{ν΅yτΩ‹μKJ€eΕ‚b=ŸPΆ΅y\…„„ΔΔΔlίΎ}Χ];v숍]³fΧˆ)”ƒG(Φs­X¦ΩΉ;ZνψΥ+―ΌRYYi΅Z=šf·Ϋkjj^}υΥ₯±,Rς²€b=׊υa’|ί~WYϊΩΟ~&—Λ'''ύ4Πh4*Š_όβ;’rHiAΤ:Τ^/'ρ£"Φ‡ρB£β$₯ έFv­­§(–·~#/εC£Χ}UyΡλ7ΖΙ»E{LD Y/lΠ>έηί* %υŒ―°-gwU9Όπ‚Ξ»!FeN© Ώ€Ης8εθ„aΌυa’:έSΥΊεz}·~sZƒnΉ%Μλ:šκ ίΙ₯J“S³ΔΉεe':Τ³O{ΓgΎn.”e%§Ύζo3γ'ΥΙ©‡:ζƒ]ξϋΰαLiςnΘΟρŸžF§ΆdhV½Ι²φνΡ-Uγ…_,θν+zϋ|ήί5[šΙΟϊΟΖB2Ζ?š‡bA±V" λΦM ŸΟi±¦ςςςώχΏ€ί=Σζ_Ώϋξ»κκκU«Vqϋ’rHi2²Œͺ’$J*aαIJ؟γΔ§†ƒP¬αšD²qZΓδΚ)ΦΨι/‘vψρN¦΅K·~SΎςMΙ[/‘H% ₯vYe|˜/‘dΛ>Όω#‹bW)Ÿœνθ|₯ιρŠϊ©*–Ϋ=fν-‰§Ϊ%SN/ΛfΫJs“e•G?ΉzcX§VίκΎxR.•ŠJ.ŽΨŸζ“0ΣV*M.hκ7Ξ/G±ζuWΫTƒcΤΖΕ«•ιRω©«ύΧnq¨6t\ΰYQ,AλœκϊΌκϊά…/gJλ7dhΦퟺd bwΓtΜί'Ukά»τεΜGΓχ XP¬•dnΑJ€ˆΛΒzα…JKK‰;‘&0ΨΧ‡YK\‹XΩ‹ΛΘ"₯‘2ύqΊ-?’rͺˆ„εΣŸΊΩXHKΧ&ϊ{}Š5«”lβ­ίZΥk[1Ε~("G\ΕZθ‘Gσ–‘X?t™O&„Υ ¦Tω±5υ'«Xžχm\λΓ’j†–qŠΞΙS³*»ηέDέΗsή­oΣ?Ν'aόl±T\7pΛεD«|1{Ή,UZΦ §€gU±ΆεφΕ½qΘσ–fΓYŒΏόύr|­<ΕrΕ‚bΛ©+^έ½ε/|m0;·@€ˆ CmάΈΡl63Mxωε—ϋί8pΰΒ… ,//'6uξάΉΪΪΪ?όα/½τΓ‡Χ ηJ ₯‘2φΕCSάΝΗ6άX]t€Ήw2b]©β“έ%ήΎώ7έlΘEnŠέŸR¦μϊΐC±Œ]Η)ρΡ‘aΌΠΨ$IU§–ς}Η†Ρ6={-ύXήQξΝHŠά±žΝOQ4 ˜]:Έͺ ™ˆβ™(έΫ:L‹Sε‡ot–žηM Όξθg ΰ2mύτ™‰+jο©ΨΚ£Bybj.ΪΞ‹ΙNυ«Τΰ½Θ’ž Φϊjυϊ„0ΚτZœoXŸX?δΧ‹¨zn%υŒ–΅φ“+Β'nŠKΘ;=ΐŒdλ.₯ΫιΦ– κ–ΰ—υ/_±ΌΧΠλg0χ~Κ‹€U)»™ƒ‘Q›JΒ£Or―/Wχ]μXOMžHΝζ5tMϊΊΗ̍ͺΥ‚ͺG?7gΕΊ4οc[ΗiςΎ瞫'Ε©%gΩA‰Ά‘ަ=ΤCQNyeλ-n³uΗαbqΊT$-.¬Sέp ;œΧ^>ZQ²s—49=7g_Σ§Z‡,Žwœ¨Ξ‘ώIE™τ.3Μ.σκON²ƒw‘]NR»ΜχΥq ΥΛ­ώΪ_%ƒ(hλΝ½Χ‘ξnΪC”.ηVνρφηbRU˜š[έηU±ΌΥxΊΛ~A_³nUΖXν]φWuίΤ_KGΧfhBήΦ Ž›.™©εƒ­ίp# ·΄R_ύ‡g2ίΧ­ΛΦ¬ΚΠ+Λ»l΅ψ(hΌ>½½|tέ[Τφώa(όΚfbA±\ύŠΠ©ωw0Šυβ‹/~ώωηLlκ?ω›ςυ1™L̈Αώώ~²oPŠeλ”mφ30p.–‘)κ˜XΪ153Ή@λ7%Š ²”x~l΄KΟΨΪ»?‘2™Ψ΄’5ς$j• ¬ΗrίάυaiΚVΪaRςχξ?έ5EzωΝβ(Κrͺ*Κ€|*mFΪ¨c{l2Ίn‘IΩςœ4jΥΖhρY½ύ޲"'‰2’°DqIUΕωα₯]=ο;ϊQ¬₯eΪϊχ θ 0146Q˜€h™r—ΩΪmH3E’‚|αΦxώV{ρΏΦO«΅Œ$&$FGΖ‹„²ΣΑι³t8υ²έ€λΙ NΦΧQ£θΆtN?!ΕςRC_WœΉ‹€΄ΎF $ω2b•±qτy–6NT,?ΕΧIC"ψiŠ’²RΉ8žlšX?`σv‘:|HέΙλ“=WpρΦΡ\irfωΡOΥίΪI±Œέυ;S ΚΞ_½‘Υυ«Žη€gΙ[Η©mΖ;φH³rwτ λΤWU•9RqΕeͺ„ΙΛe™ηϊ΄Ζ1έ­$gΦw|K₯>S’œY}φkέΨ€qδZGuA–ΈΆw†ό Ήv’’ς“[#γΖ1νΰΗ ’sšϊΞΟjί•Š_™΅ΝΫW±ϊη&S^wy„¨ΰ’±γpA²΄Ύ{ΦkΔ―ΰ¨·Τ/_υDŸžnΕzh6ς2΄Μrγ•Ι #‚&Kί„]}ۜΎ[»ΆΚ€&ΫXο_j…δM^0ί·XΪηηεšuοO_ΠΪΤ†Ε η λ2t…·ψT,σμφ·΅[N˜/μϊ‰ΕZΗΦ½υMι( ŠεβWδ?›Ή*Ηγ²­¬VλK/½δΗ―~ωΛ_rρ.ς‰ŠŠ J±¦šSΒθψ†κας‹ΚMΪ!k[2q²9…φI«™ ‹Υ$ρœ=γΙfρ&ΊίάEο8Ϋ)‹ζ­η₯5Nρ„4žλ ·ή2*O&t+Z:Ζ/‘` *‘*3±fΘƌώJ sΨ^σ5¨Οώώ¦»π(Σv³"žΗ”3°ΰeΊ Kψβ±±8»ξt ΥjΦ^ό―υΣjV{Θ―Š―ΉCμΎ9*Λ}·z†g°ΑΖi¦›²[LOF±–ΦΠί'.δ’|ĚθϊSŠe €X~Š€οmž΄…ΝFΣ+ΤΤ|¨šυr9£y›σ• ώgZwωpIΏSYvτܧ׍σΛψq‰TτήεΗ–ύ­υ•MWΙ–κSΕΙ™ΗϋV3ΣΧ\Y{±φ‘Ί©8yΧ!Ζ©θφ^­”Ji+#GΙJ.ue~RwC7Cκ`ΌX™Ό«ή)H³Ζκρ»Η@Α•P,R±YΧέ‹O /9WκζΜΤά=‡λ s©h•8§Ό²yΠHρ]OO·bMΝ24‚v»ύΎ½vΏ&d?νTμh»2FσnS?χ5λBΈ‚δΏ²Αͺζ2Έ¬sιokb”VŸŠEz4σ:wθ{ƒ·Υ(Φs―XAϊ•‡b½ωζ›άlΫ·oψΖαΧ_™ƒ|rssƒT,ρc)–Ύ.‰Κ`a%Α•.Υa Kkœb— Υ&9{Ζ_(θ0EZΓ£aŒ0\·ƒ I(Uσθώ‡ :λZjK£Ά.™ ΡRθUϋϋ}`τ‘Xώv\†b ?ΠsΫΈJΘΐF丁|Ζ±S’όυΣjN`βχ^ρ~Ι”9n­sΤ3₯ΙθeŠί;πDΛ₯†~ψ¬’–¨Y»ΥΝv‚Q,?Ε.°χmθV‘DQΣpΎGλœωΓ›b1ƒΓλ[3ϊ[Ο>P™)₯\k灎‘EΏφ²x΅r—4§yάΛDϋ€ΎδΎ\ε²|ζΣ}¬€??$~#+η`σΗέ·Ff\‚iΊ‹…»€ββ㍟\½1>ο#k%λέsj7•*8z}ΙYΊή”)ΝΝ¬hnλ»uch°γLυΞTiζ?oΝϋ«'€§[±ξš(ΕϊΜn·ZΆgkΆ΄.:WΡKν6OΕ"DnΟδo)ݐ7².Oς&«UήΛΊPΈ["Λήa€s±rNΊN13i€–ζΥ}½ύ.r8σyurϊ‘ŽΕ‡FυΥξ!—pώ’œ}Χ°›bωδJ*–±‡N*›w={TΦ™ΏzxzΛxν.YΘk\ ΫRΉXkΜΊ|Ex_?qω™R¬›ΰeŒfφ#ŠυάδbqZυV˞eψ•‡b …Bώz½ώwΏϋΏzε•WnίΎΝmΟM„bΡύΛξΣEΤK’Bι7%d”6τεbΡ#ʘΑfήηθ©ΙH η‘SΚTgι Ν6‡ΊΨŒ]δ τλŒ6Η sκU\ήΞΠiI|4ε{[₯uCΜε^IR$―ˆˆά&-jΊΙΝ€gΉ£rΌ<ΚνυV€zͺ2Q$q‰MΡEηΜH_;ϊS,2*)mΰ΄LJΪ"Ι›ϊ3ά^Ύ`­―VR¬%οΕrΤσTCˆ*œpEσ#\CU#‘©FZΡωα.:©θYQΕςwΕΙ*Υ~)σΒ.AFJε’‹ΕάHφFWujΫι™·±³Aϊ)vzΰtQ}‰7R«$UΧΝ>ξ1vΔμrή‹EώLž=Z-Ο‘ίι”š%Ξ-/;‘κηβZσΊΆΪςιRΡ‚œƒϋžΛ!φΒΞcωœ~—TͺT$£§ΧγΒPjUu υ^¬δ]…‡½½+5kgAυΡ›²e<[[™™™•όύ*­Ϊs}LέM‡ΨΊ₯ηf–xh~I.–ΏJμ@AγuU5ύΎ/¦ώ‡?Ρ°*ϊ¬'€§H±ΈΧ[1ο§ΚlŸΧ[έΖ ͺϋŒμ{±ήαUMΦήvˆΠ]σφb-YΈ₯‰όy·_hې­ ɍ92­2=όΜ°α-ν†Cf½χ‚χ;οΎώͺXκΈ» y‹+ς^¬ ?P¬Ί Ξ²–αWŠ΅fΝζmΒƒα»οΎ»yσ&ΗσκW7nΰϋύχίΏϊ꫏€Xΰ‰`3kϊ»ΎθΧ.Έ«3³Ÿ΅…±‘~£΄sʐ%*ψTsέ]±~˜q­aτΘXά·ΐO(Φ³7£ 1«ˆ―/Γ―<‹|rss‰/UWWΏρΖ‹…ˆΦΠΠΠ_ώςn>ŸOΤ‹,wmlii)·λΗΔ102)Ώh•l=ΰm«‚`νγaι¦Sμ’σ•SOD±¦»λe9ωήQ4ٞ-Εb³ΏΒeͺiά΄ Šυ¬Όz8Hζ¬DŠV―^Ν―~υ«ΑΑAbY‰‰‰EEEf³yjjΚυΔ?ωΟ+ έΘTέ PŽ­SEͺ-S=έ·ŸAH]Qn9»[Z©³±>Ύfΐφ#7D{*-|#/TXX5qΏΑžžΦ­$³=EΖ8Y›qΉ…ΜίP5•‹Σ₯Ι©RQfqαΑζνioψβxΫΑ1©p©jΜίq.'5χπΥ —όw»|tuDβΒΓͺ³θβž)F§ΆdhV½Ι²φνΡ-Uγ…_,θν+zϋ|ήί5[šΙΟϊΟΖB2Ζ?š‡bA±›Ή+‘". λ…^(--%ξDšΐ\`_f-q-bYd/.#‹”FΚτ{P£ͺ$‰κ>Β"Β‰e±?Η‰O ©XU‰μ^㊺¬+£Xχυ-Šl±$»¦—)ΠΪ₯ˆ[Ώ)_Ιφq=ΦώŠΥ[GΊΰ’6λΚ)ΦpΝ6ήzq³αρͺ=έ–WΤϋˆ;NuVdHΕ5ͺ©ε”iι­—H€…Rϋγ>DSJ 9Ϋaρ{σΖψΙ*–Ϋs4έ–IΪ΅΅΄k9έ}›ϊLΉ(΅ΈμΜεΎ!Z«ιΏt±Ί +9σΠ§Ζ§ϋ$\;Ή35·LyK=>c_†bΝκΊU—ϋ΅ΖήJYVζΑ‹έΧ5ύέηΚdRρΑ^#zl€gM±­sͺλσͺλsΎœ)=ߐ‘Y·κ’%ˆέ Σ1ŸTφ±{—Ύœωhψ ŠυœΊrαΥέ[.άψΒΧ³s DŠΈ0ΤƍΝf3Σ„—_~ωχΏύ.\ΈpπΰΑςςrbSηΝ«­­ύΓώπK/=|ψq­ππpR)ΣowœξcmŒH(Q1Ϊ3u³±–Mi Ί Λvs/Սl#ΚΑ‹,챬ŒbΉ³Π#ζΉ(ց_Ε26€†$ΥάyΈbŠΕW±Μ-”λφΰ©xe>†j“Θ% Ÿ~LMύΙ*–ΗsdI€š™τθ,ϋ­£9ΗnΝ».œμ({§όpχΜS}ϊκE©•Œλ.3Zε•,½Ϋ¬^dQχΆuk X€gN±Άε66Κ8dŒyK³α, YΎ_+F±œ@± XΰW„NΝΏƒQ¬_|ρσΟ?gbSωΟΈΨ”―ΙdbF φχχ“}ƒS,cƒ˜ […¦Έ[mΈ± »θHsοdŠ5PΕ'b°9ΏEUJΩZ΄Bε6VΠάu$;!:"”-Τ(Ϋ«άΛt³!OΉ)"ts|J™²λUωαl”Œ"<―ΣΛ@ΑΙώΊΒ49PXDhtbJACΧ$[eΥΖ„Ϊ›gK…[£Cyό”–;\”Γά{L‘J΅".AV― B±hqŠR¨ΌυΉ΅ͺ*qΊξ-jϋ 0~e³@± X~E~𳙫bρx<.ΫΚj΅ΎτK~όκ—Ώό%ο"Ÿ¨¨¨ ΛΦ)Ϋ(Ο*b1nž£š^`ΆŒ“α£₯mJ£{uΡ  Ή,‰GύΚ*–Y™CΎΦoJ(d)ρόΨhοŠuGY‘“Dο˜(.©ͺ8?μ©XSrέkŒ—Κ ςSτpG‚σƘR€0I H“ζ )α‘ru˜ύΠ1Υ{ŽJ’ν―’%E»ς§XwκHw|‡·hΙP}:DdRΆΌ@*ΨΙsφŒ§Ϋ”‘ςβΕe5{eT΄Ι"έey }6Ά¦•U5X‰θΦ₯P ORT(M‰ef˜ήΉΎALWuk𬠛iΏ€\‚›eΩt^\D‚¬jο‘Ξ%Υσ±£ΕςV¦*=₯‘›γ’D{ΏπξΚΖ§δ($Iq|ύ«°žξΗϋ_kνݟH›Vt FN_A΅υοΠΥ&†Ζ& “-c΅ ΤE sͺsC£ββšc5²Dϊƈ―κ]xRŠ΅τΜΨuΝbZrͺ*Κ€|r3l–6κΨ[EHί*αΒly}«lrή*ώΛ׍D―’CΣΡIEΥ^EvB”γΞYϊq±Sž΄qκ‘±Ίη&§μ9uΉ_7hŠ΅¨k,Ξ7}zM§V_mά—›,;ή=γ{ω}ۍS%’τ’ΓŸkF&7Ί›δ¬œ3JEfz+ΙΟG/ίΠΗΖ5έgͺΕι%ZίΛm3—κE©εg΅σ3δίφc*ΦηΥ’Τ‚ΜŠ¦ξq’‚Ά‘O‰έJs0syOjVε皢£•™™Y’Μβ=G;ΤΜχ²Ύκ‰ΞΰΩP,ς‡ΤΘΛΠ2ˍW&7dŒš,}vυmsϊnνΪ*“šlc½©I’7yΑ|ίb%ζεšuοO_ΠΪΤ†Ε η λ2t…·ψT,σμφ·΅[N˜/μϊ‰ΕZΗΦ½υMι(λΉW¬ ύΚC±ή|σMn6‹νΫ·|γπλ―ΏΞ̊A>ΉΉΉA)ΦTsJX 1lώ‹νυFK¨?ζAϊ—.Ή(‘–Bf‰΅·Œξ52½ΨΙζΪ=ؘƒmΈ&‰ηsΊ‹+UόœάΦ²QAi/“Rbκ”ΣuN82Μυ³ΧGe·Π=o‹JAu@IGœŠV•eTNWQ«ž΅#F3΄Λrž ΌD–τ{ΛΡ’γQΫjάϊ»μIR`&Ι06¦EP«ΪιVIrνR[˜˜ΓζlF$,Wθh!κ±tΡMΰΡ#9ι^2UmmGD›7ρ| κ󷣟ι.–”ΙžMi ZoΣ]Lžf¬ψ¬‘½²B—+`m³x“KRίl§,šͺpγ$wΡΙI¨π:« sΖΆ:cwl=clΣƍqς'¦XKΞ σDθŽΣL˜HK‡jωτΓή*ρULsάoŠεοF’³±Xœ¨mhd&°ρxŽ(…VΡ7~ο•eL˜‘i;LΝ‘όΐ)<Ψtφ’† .ωQ¬ω―‹S‹O¨«Ζ/=xΌmΨηrβ!e»€9M:ξΠ#Ν%Ι™Η»Ι]ν9Κ‘qsl̏ iΖf}/'?χΠ\Ε:”όFΑα«6gΣ₯…Κ%*υε©Y;sJΚZ―ήΠκn\j.Μ”Š\¦ΖΥψ©'<Š55#ΘΠΪνφϋφΪύšύ΄S1€†ξnΘΝ»MύάΧ¬ α Ϊολ V5—ΑeK[£΄ϊT,κΠ£™ΧΉCίΌ½¨Ά@±žoΕ ή―<λύχίg*o2™^~ωε€Š΅fΝnbχΊΊΊ Kό˜ŠΥKχΨ6g+MLή}¬ΰ¬RBu¦#˜ž½=½Σ‹νbU‡ϋϊœI€ytΕ’Σ’¨ρ`œπX™`BhZσ4ʐ)Yρ#]|ͺ'X1δΡ‡–Iϊ„UΧ(ΦΐώxκθgΝ^Ζ^w>g_l–>Ι&₯˜‰h΅κ cFBWΥδΘ’ž₯Š5°Ÿξa kzι- ΪfΙ&6DΓΞ/’δmp—_Ες·γ£+sz½(–+λνŠpF€οΠ­ΫΑc§q(–π½Χ{ΥΠ”ζΡ:Άž9* 7’Žv ιŸ¨bΉœ}ύΕ_Ρ©₯―£Άn>₯Κ^nIQ,Ώ7’φTZ(=cM‚D±χƒfΥ€Ρ9šb©bΩϊχneεώ•ϋVΧΧ‘:QwHž“EΉΦ;Η;&(%H«\ΕΙΫrϋυ¦Δjy,‘elQs’@*’UnΎά§vbηkωŠ+Vje›Ρ9x²Z*•7{ΫEΊσθ W £ͺšŠ€ιύΦž ΕΊk’λ3»έjٞ­ΩΊθ\E/΄Ϋ<‹όΌ=“wHΏ₯ptCήΘΊ|uή―bέψgq²τxŸέ˜^—ΫϋŽ‹ί ζ…wγάκ―™±všOOΥζζΑ΄ςj₯†Ν†ς΅|…«ϊΣo)Φ€ͺΠ5-$«ΊΟ毞πL(Φυ»2FϋΨ-ζΧ34«24!.¬zSΓ;³ΰ©XίLΗΌ₯α1©FmzΣ=ύ”%=Ο―b‘εζ…£gΖ…”Œ…διΣΫŒP¬ηG±ψΰ¦ς³³³Ώύνo*Φo~σ›ιιif—S§N7έ…ΉEBOw‘Τ0ds›ξ’FΝ—Ryω~ΛΦ_΄Υ[χ‘Ξ€·8£XΡΞ(Vχ(–3 „νS>rKο3Š%QNϋW,vXZ΄°L©κνού’F„b1]αMΩ-³>ft­Μ¬KhΒ|Q©TNŒ^’XLΔ)±΄ΕuKUϐ‰ p9Γ5–)£aΫ=‘%V΅ΆΘ\άγΎΉKαrs˜b9r±$Ž`”Ά§εΌJuEoρr«Έεb>€1*Ÿ…k8δdhm¨(«WŽΉζeŽβρ=F.ΦLΗ!QjΑα―έeΐk,Šή»$C_xmqpβΎρ«ρu£Ϋ?[ΤOΩ.]Ύ+xrϋnΝڝQ›xW¬k“€Ψνšλ›°λ'¬ͺΟ&x£™ύˆb=OΣ]p–΅ ΏςP¬5kΦ0o6 ί}χέΝ›7y<žWΏΪΈqγΐΐΧΨοΏώΥW_ Z±θΎfχι’ ?*‚zQT|BFiC―1@.;E{DΚ)£ηΰÌfw:-Δ¨Ϊ/εoŽXΏ).!£^uΕΡ fΎbŸκ©ΙH€^μ•˜R¦8Kηα0_·{φ§ͺ25ϋω¦h’Σβε½X=uΤ{±(QŒM—4˜|τzέr±¬CgΒXj/ΝmΈb6¨J›©ήΏΌέμK±˜m^Ψς9ΤZJΩ /"R˜_ΧΫΓζΊ΄±UΥΆΧH„ρΜ;‹;uά©6υ׈γ™K c"ΣΧJ%,‰Θ€Ε…΅ϋΨI&lκΟ›φΌ[ J₯ί+UP}τsέΌΏεξŠE:}Ν{ή‘_·υNεΡέ§²DzηW\±θχ_USυe–μωgοΘ’ϊΐΣ¨Xά뭘χSeΆΟλ­nγΥ}Fφ½Xoπͺ&ko;Dθy{±–,άDώMΨ/4mΘΦ„dΖ™V™ ~fΨπ–vΓ!³ήϋ@Αϋƒw_U,uάέ†ΌΞEΌλΉ›Q˜UāחαWŠE>ΉΉΉΔ—ͺ««ίxγ ‹ΕBDkhhθ/ω ·ŸΟ'κE–»6Ά΄΄”Ϋ HΕ`E±†nφvuŒ9Rγθi6Βσ˜œ(k &‰‹¬ϊ{³ΩSˆϋt?Θ™ΡΉ)MFά·ž X?‘WΙά‚•HΡκΥ«AϊΥ―~588H,+11±¨¨Θl6OMMΉΎƒψη?ωƒ\[ͺV«ύλ_3kI9€4R&$πΓbl”0#τdeUE9IαΤP΄Δ 6½ΚΪΗcJ)‰β­‹/κ΅>ΕSUδεΛrΌ‘η{F‹ΝώΪZΪ…1€bA±~ͺŠe»χ€HQL Ÿ“¨?ώρ&“ιξέ»ϊӟΦ]»4#λΫoΏεš977χη?Ωe >)”‰ όИn6*€ ±LΞUœ@¬¨λ6»φρО’r C…υ^sΖΕr2ΫSD₯ΨΕΙΪΒΕ‚bύt‹πa’lί>W‰ŠŠŠyπΰΑΐΐΐ΅kΧ|)ΦΨΨXll¬λ*R) OP¬ηW±L³s·ξhx―½ζρZα“'O’†<|ψΠC±L&YήάάΌzυκ^x[NJ επΕz~‹06>qξΌ’ΛΘb>DŸ"""<λoϋ[TTΤ‹/ΎθΊμKJ εΰŠυΌ+–νήƒQ½8’G,+ΘΩ‹μKJ@P,(kYcγ·ξhΚφ틉α{D΄Ό~Θ6dK²=Ω‹μ ΏŠΕςΜΛϊΖ@‰Vπν‘ Šε/’5·`[πΩ‘+ XP, Š€bA±P,( Ε‚b XP, ŠΕ@± X(Φ3€Xƒjΰ9Š…(Q,(ŠΕ@± XP, Š€bA±P,(ŠΕ‚b XP, Šυta»χ`nΑ:;·ΰ² ΩΟ P,(–wL³s†‰IΝΘθˆN7² Ω’lOφΒ“ ŠεΉŸ˜ΤŽŽξ―¨ΰσω«W―ώŸ@² Ω’lOφ"ϋ"’ ŠΕϊΥ7c†Άύλ΅Χ^ϋŸG½ΘΎ€XP,(ΦΓ±ρ e[›ΧΘUHHHLLΜφνΫwνΪ΅cǎΨΨΨ5kΦxh‘H9x„€b=׊eš»£Υ._½ςΚ+•••V«Υ£iv»½¦¦ζΥW_]Λ"ε / (Φs­Xί&Κχνw•₯ŸύμgrΉ|rrOF£B‘ψΕ/~αΊ#)‡”ΔA­Cνυrq?*b}/4*>ARΪΠmdΧΪzŠbyλ7ςR>4zξhRŠydU„¬έmyWaΩ~ύŽΣ†Ÿή3©mHγ­O Τ4[§<Šœ™h™ Ε~T~Θ a»YO=βVλrvΧ5Ι­&ͺΣύΨ'νz}y7§5<^MTΡδο@xAηΣΥΊ•Δ¨Μ‘ώΦρKz,Λ-ΔxMuΈ’d§4+ω ©hWANιρ³}Ζ§ΎαΆ­Υ™»€Ι»κ;ζ}ofΏu4GšΣ€ vyζo|rrΟ;Ή’T©HZ ?Πά=nß8~PF§ΆdhV½Ι²φνΡ-Uγ…_,θν+zϋ|ήί5[šΙΟϊΟΖB2Ζ?š‡bA±V" λΦM ŸΟi±¦ςςςώχΏ€ί=Σζ_Ώϋξ»κκκU«Vqϋ’rHi2²Œͺ’€Π”D­‹'–Εώ'>5όγ*Φt[vxX\QοSt,mωδ\E*zVgoνRΔ­ί”―΄=^έΖN§πx ΅Γ?δΩx /ЏκΊϊEΆX’]Σk]Ξšκ¬ΘŠ3jTS?ξIΣ7μ "Ž<ξ½τSU,·Ϋ^ל²‰΄+©βϊrŠšΉz235+§φbΗUZ«»qυrγ{Ε’Τ‚Γ_Ο?Υ'aΎ·r—4σπεZγΜύGW¬ϋ37.ut¨­#ΚJrfΚΞτφ iϊ»/VζJ“ίiκ_D―€Z±­sͺλσͺλsΎœ)=ߐ‘Y·κ’%ˆέ Σ1ŸTφ±{—Ύœωhψ Š΅’Μ-X‰qYX/ΌπBii)q'ζϋϊ0k‰kΛ"{qY€4R¦ίξB~$εT %Κ!ΣΥ»ΩXHKΧ&ϊkμM±Μ-€«χtυΰͺ©0E“yΕzφ =ςhήγ+–φCΉd?¬b=θ© 'ώhY_"t—ςΙΓ•―4=„bqΫ[»Jβ©f樦=Τ]›•\pNνΪα°λΞ–žœšOΒ·{R₯e‚HˌVyeόμξܝ‡―r§eΎοΈ8΅ψΔz½όЊ΅ύ+·/ξCƘ·4ώΟb ψεΛρ΅ς`Λ  Š,§\xuχ– 7Ύπ΅Αμά‘". ΅qγF³ΩΜ4αε—_ώύοΰΐ .d\+<<œ+”FΚτΒjSa«Π”­[ΗtΈ± »θHsοδc+֝zjdOΪΠΫ\΄#>œΗ N”ιηΊ#ΣWNΛΕ‰‘›θΪV‘δ€J»@Jn‡ρΨxAPΥk{¨Μ‘ͺšPΫ£,I ‹΄ΡOμεތ€ΘΝλyΡόEΓ€S~ _ΤΛ’¨#«ςλzΩU†¦4ͺLρι‘zq|thX?₯T9φΠΠ^•²5b}X4_\Σε=’`n” χxλC«ͺ¨x‘‰ΩuέJJœ\zφήλ©Κίθlfx^'sQΊŽ)R¨ŠρBc“$UZ§}™{O•¦l‹ ₯Ηs σΊ&©°CΠε\‘^¬ΦC3Tκ29]H{LDm)€/ϊOκ-σBXΊΤ Ό΄ΖIη½-ΩDν(W-ύ†ΒλUπ3PΠΛβΪΨ{6_°™Ž‹zJˆ±«6;!:"”-H«RœN‘Φ¦5L³–œφά†nŽζΤw9–{½F1UUυπFv2#ί¦™z&Υw΅U‰I“yαρ •‘ύ뱃·ήΥjT5€¦ε+–χšn6*€ή0ͺE­Γάΐ<‹VY$&-ΙNυ·Π,‘γΜέ₯Τeu~a₯νˆΗ/λχχT2c§[«Δ‰qΜΝ™(έΛΡΫmo¨’Z½)»ε‘Ïσο-Q,W†›sR Ž:γcƏK€’Ϊ«ΟΉuΆΆ!I΄χ j”Ž˜ŽQ$δTU”IωT‰΄‘ξΓY0½½Έ”Βͺ½²D*(GVΡ½Ζ³RκΧ­I ρ"Y^΅ιξ "IžT@u¬IΣ[r…­Ÿ:€/5»d՝ΎBvLΚ–H[γ"y.={_υΌ£¬ΘI’,+,Q\RUqžtU­½ϋιΪΖ¦¨‘'QM”1•±Τ2ΡΕDq^Ύ8‘ZΊ­ͺwΦάυaiΚVΊ)ω{χŸφΤŠυSΊ@ΛΎ ΜM!>kv#uˆV¨<%ΔΗUπ£X^.i#έGOJˆŠζ Eβ#7=$D{*ΎK *_&IŠΔSΆ@μe*πΪιvŸζxqYΝ^UΫPaύ­^‘ϋΎW@5AΖ6aΊ•Ύ›£#·eού ‘"#žϊ•'ͺ»σ€ΛK mΓu)”…')*”¦PW*NΦF_©…ώ½ρ΄νĊ$ω)佦S«―6ξΛM–"cύ'Š“3+»5j­¦γŸε"*Τc£²§N•ˆK™4ήθn’K³rΞh蠐±­4K΄»Ή{x|lrόΖ₯&ω¬=Ÿ}.·ΫfΖU{R₯{T33³6»ύρkœ(VΦΞw5^›!•™QŸ+Lw+Νk―hlθςαw³Δϋ:Fμ~κ1?Œb‘?VF^†–YnΌ2Ή!cDΠdι›°«o›ΣwkΧV™ΤdλύKMΊΌΙ ζϋλCϋό\Ί\³ξύι Z›Ϊ°xαœa]†πφŸŠežέώΆvΛ σ%ƒ]?±ψQλΨΊ·Ύ)…bA±\όŠόΰg3WΕβρx\Ά•Υj}ι₯—όψΥ/ωK.ήE>QQQA)ΦTsJX aT+£X)κ]Ώ&_Ÿršκί·S©MλΉΪdg݁ϊΊΦ›ΣŒώmr¦ΗF _tDizΛ¨!:‘;N3»k?Ρ@υŸ†Ξ*$©dη΄³ lΪΒτΓχ^±:Ζώ‘΅ρEέΤ―C΅IΜχΣ6oι4€-‰υCKΞR/=XhύΆf/Ά‹ζ8±~κiΏRΕwνN6‹™VwΡέ΅ΩNY΄#Ί2₯”l¦ε‡ι\Ξφ‘^{q!3^Ksομ>²bύ$.Πς/G:ρ2fΈ—U™CwτKDΞΟUπ7έ…ηbΫΈ1NrήθmΊ‹αšDZ :§]F£9μ%ΠZ:h&8p“ι—7¦EPg έκυyόA ’4aI5wΊΥ3,‘Ν5bΫΘTέ|RŠ΅€†&Ψ»™ YΘ#γx -ντ*ž#r;Ζ<>A)–Ÿ›³·$ΞΡF戧+j™€–άφτXϊ<,'{m¦Ώω5oΔδτ‚œ}υ'”WGf+ΦόΧτ09΅cΥψ壏· ?΄Ο\.Ϋ%-Tr‘+]Ϋαϊ£γφ™^²άΥvFšK’3wΟSΒsX&Ν9γ\eΤήROΪ|.η ^’^Εrέ†nγ{½ΎΖIŽ/§hY™΅l°ΛO=?ŒbMΝ24‚v»ύΎ½vΏ&d?νTΜίΟ‘»2FσnS?χ5λBΈ‚φϋzƒUΝepYηίΦΔ(­>‹:τhζuξΠχo/ͺ-P¬η^±‚τ+ΕzσΝ7ΉΩ,ΆoίπΓ―Ώώ:3+ωδζζ©XβF±\:mlŒ"J‘"ύžλ5ΜWΒTΜ‘ͺ‘₯kxΪζaσΦƒMkžζΖ_%Ρέ)E§vΜh3jΫια^ D \`;Βczg―1Ά΄‹>–ε|6ύ=}ΎŠ94ӟ‹Vt-νΑ‘§‡σY–Ž·€ϋš¬5Q_«θΚ3'Φo==‹>5βλ΅₯alΈn‡#ͺΐfcϋ―ώ{π¬X? τ8‚ιŽGεSa«…NY-uKΣΜό\…e(sΈ₯Š5«”°O–£(¦zŒ½ψ_Λ>•Ρ²V=} »ͺ’Έ±sK‘;ŽQ£\λΈkΡksΏΚετ“T,Χμ§[XΣK7Η m¦Ζp£"Π«œwk;A(–Ώ› †Ε%H{?hV -χ}ήφΞΘwΧ=*‹3κΎΛgy|Ο»"βς£_ΟψW,Jλ»—Nφp½i'ΩεšΧεΉ‡―y,a$mΎ» 9½xΟ‰‹Ÿ^՝eϊZΎβŠ•UΩΝΡό§οI“χuψLκψvόΖΠ­ξΟΟ•½“%.Ήxcήo=?Œbέ5QŠυ™έn΅lΟΦli]t’—ΪmžŠu‘ρφLή!ύ–ΒΡ y#λς΄!o²Zε]±¬ …»5!ς±Μsζ ΓvΛJ4ŠυΜ+Vπ~ε‘XοΏ>Sy“ΙτςΛ/T¬5kΦp»ΧΥΥ9Pž €νΪΪ=:ΎΛΡΥγ†Ή}-¦;IlΧ*­aΜ±Aέcζe+θœ‡³ at„3½!JTΤnτΣƒg{lLv›K^„³Ϊ tͺ I"=HΜ‰[>ž t°G‡žνC3…Έ·wθH’χsεˆ*8ϋšΞ±CκιX¬N,”]ε}bŒΗW¬gςYZ§+”RϊΗΈΆ~*΅1Nήe΅|A‚Ι΄ρΘΦυs–‘X±8W ™dΒΛqEέάH{ρΏ–‰΅.i&‘"ΛkδΩ§Λαe·,Έ+–#*H}RΒ}ςΛ΅†μW6„%V 8‚–Ξ™u¬JYDPŠΰfΠ+Λψ›\ς$·ε7ήρ©X]…ΡΜΰΥψ—―»\ύŽ49·ι†έŸbέψgq²τxί±…}ΗEΡ-—εTUͺ;oδVνHΣΊΨTVL ^zAα±ΛŽ‘ε+X=A+ΗψΕΒΤ¬=ŸΜψ«'ΰ‡Q¬λw7dŒ€χ?°[Μ―ghVehB\Xυ¦†wfΑS±Ύ™ŽyKΓ;bRΪτ¦{ϊ)Kzž_Ε"ΛΝ Gό ) ΙΣ§·/‘XΟƒbujώνU’Ι―<λΨ±cLε'&&ˆ>T¬NΗμΤΤάtζ =έERÐΝmΊ‹a4_\JεϋQ,=žgϞνζΊw­ΉIΩD7‡±wΆ|X#ίΗ|΅―œ ’__—BsZ©κTq|q“t΅λ„T»"Ε5-]ύ½WTEΫ_±˜ή›KΏΦ5x’ΒsM£·Οͺ$ΑŸυτΕβ‰*Ϊ\ΆTuφj­ŽU.“Ρ0iœž V±δ]WdΎϋwS¬gς9†Ϋρ˜‘†ΗΉŽ‘cό²Ζ"ϋoz9™~Β2+ΎΖ»b±_^DϋbEϋb‰¨\o!Υ€ΡP±˜(VΨ’(V΄γδSC(#<’XމξΥX9ΕbGŠ&–ΆΈ6GΥ3dr¬ΊD±v,‰b9Ρum€›‡ΊU ΅₯bAX3¬tkώ[£q‰ Œ)+“S«ΫΎυ¦X»έ’X^ήIEG±{bT_Ρ»²δθ³γύͺ“ς]Μ·ό-χ«X7N―Όb-ϋ/]ξΣΩ\ !ΗΝό§&¨ϊžδtͺ“£«²ΗOšΩ˜Υ–¦ΉAƒΝ΅ι‡b žϋ&δ­ρ“άHΏyzGŠΕ}Χywαδ™±u#ΫϋξC±žΕςP©Gυ+Εϊΰƒ˜ΚΟΞΞώφ·Ώ ¨XΏωΝo¦§§™]N:œb=΄tΡs4oŒ*¦“ΆηΡΣπ’j†όF±ξ›™”•υQ:f2.›±χH=K^œ\evv­¨Ξ₯#3Dνˆ9<4τ6ΧTUΥu™ΓΊ6;"*μTΡς/¬Ύϊ^l6…€™νi{ZΞ«TWτ–χπΪ$=gιΑπŠe»I%Φ‡ΉL:η:6’Ξδq¦1“%x€-­'§X?3φ­Ρ’ΎξΑηψ:[ίαάδR•ΡΧrOΕ’|ΙEuf>­ΘzŠ5HΪΎ³Ξe.{ϊμQYg~κ ψ&mΏv—,δ5.ΠI©\¬΅Gf]ΐϋϊ‰{ΜΟ”b届ΥwΖω3₯R—ΗΧω(h™XψθΚ’³XϋB^!;3!λ§?PΠC¨–αWŠ΅gΟ.kΫΆm+&&†ΛΕͺ¬¬ R±¨)μX)b^=ό«‡©~I³8Ϊ1):.ά1΄†Ÿ§2Έv7ΖEnΚς΄x6˟ž-€`35sWEU•\L­₯BjΞΙΝx‘ρR™’™tnΌτΩ9Α"ψiŠ’’|!=$,εΓa.)?41oU©8>^˜Bw§ΆεΧ΄/³OzŸ<οƒΗœC€¨‰μςι‰μβιQFŽ,5_υ€N`CμΔί‘/?Φc!έε2ϊ‹ωh‘¬€J.‘]WPJw.­½UŽΉμ !νρ₯]τ*ζ{tκLT΅h—˜p½1/žή1>!Ι廟ΜzΜ α’υ½OjΒ>/Ύ‚?ΕςΌ@‹Μιœ30-‘\p{ρΏ–Ίjaμ<Eτ”ŒΌ€Šk`ΕZ2£ [ΟΝq‘±€ζ I’c. π”²ΥΘP{•/Lβ‡9'6\)Ε’‚κIτŒ‚Ϋ²‹ΚJ™:DΚ”z>FωB·¦Ι ©ω±.ŠΕ]ΠXj­pkb½―ϋŒ‚Ko+k_©Ό¬¦Β1‘¦`ΏΕΫm3 ΞχŸ(₯ζΚλ.~ϊυ­jMΧ½-§fΆκζi―8LΌβΰε±ybDγέ?{οΥΤ•―o»ζΣΡΞκΤΥyΠ'¬Y2a­H(8©dΑ,\΄θ?σg ³pŒ”ŒP(Π‚# ΒW(γ΄H­£‚·Xiσ­*₯ΠQ(4r‹ †kLΫώφΉδδBJλεύY―’œ³ΟήϋμƒϋΕΎœ γdΘχM; Ύ·52υπω«*eΗΥ›jΪQpͺΉt[ψζάC—:•]—Žε‰7n;ΤFο(ψQVd\ή‡υέν¨FΩXžΉή/ώVc^άΦ„βKΝ=ZνVωŸ 92iά±n‡ŸΫ(%E[Γί9Z―5’Ž‘Rq8-1q& •UΉ‘S3+/S―nRμίΆ5ΙmJqcΣΫ6wUδszF«Θ‰€–$y ‚2θ4ŒΪΊƒ)‘Aτ{±θ—)Μ½±Fξ•Y–od’―ˆ Π―’Ο~ηpCQŒˆͺ|?‘8ΏΆ‡Ω‡mM5;τ‰ΉAz#,vV˜cDΒΑ]p¦XΆ7hNΕ"Ν@±Kκογνζ[’P0«€€μ œKnMM{Χψ‚ υΕ¦ΗyΕ"*H/|ςMeχt1ε3_^SΔΌ£ΜwM|Q=χΛA+ί%ρχαSِ•5φ0ƒ`μΊΎ‡¦XTn.K“ H α{ΩΫ%οΰV‹΅TΘB醙\Ω\.³~σω–kfg»κhw’ήBζΌ1Υς‚xζ€Ύ –ΘΤφν7ϋx/eΚ‹UyYΫ6Ρ› FJS²—Χpc5Ϊ¦ͺΜwθ7Y½“wθ’κΣ‚­‘¦έφ΄­'φζR'nLŒΛ>ϊi‘›SWŒ}/–8uί‡ ζχb5*I£ή‹E.΄-mο…&-{•ΡΕώμmb& Ή9Η™A!GŸ[+Φ½iuγώ¬T*ΩΈ¬ΜcW―Ισ"γŽ.ΒZ¬ΡkςΓtmHΓ7§&μδŠμ0Ÿ€EP,ξυVΜϋ©βj¦Τ«yƒΚ&-ϋ^¬-½όό‘½ίšDθζXΤΆςαk•SΣ3Ση*5Λγ»—Δχ­:0’Πέmύ¬ω–žεϋΖΤφ' Ξ΄Φή\ϋO*YκΊΫϋ“joγ½XOΧvœe-ΐ―lλ…^`ή&άίίέwί΅··σω|»~εξξήΒφϋοΏωε—η₯X‹ˆΛ]+€ΜJ¦io¬―U΄™ήΣΕlμΘ’:φΫHΟφ‹—λLh|€aη.šγ-ώ™ι‘ž Š΄[x|€b=~; ³ς.X»Ώ²Q,‰‰‰Δ— 7lΨ011AD«££cυκΥάώώώD½Θη–…ΝΞΞζ€bά Η‘ŽƒτT@―`qυφΫ jύwhi—+ί>κ²υήΤ«uι9A±Ίͺ³Rd φ)ϊ|μ1S,fυ—GXQ-@± X*“z‘’₯K—2‚τ«_ύͺ΅΅•X–H$JOOΆ|ρΟώσ»wοZ–T©TώϊΧΏfΎ%ιΤHšθΑά Η m51€š«ΖψŠlζ4:φΑh+ ασέ|¨WΕr~δ τ{Ψ&ΠbŠΕzd1ήΉK€hΥ*N’ώψΗ?κtΊ›7o\ΉrΩ²e³WdέΊu‹+ζδδδŸώτ'‹ 0όIj$MΩy½›Ώb…Νk…=J rοή=Ειtδσͺͺͺ₯K—>σΜ3άη$’I OP¬§W±šΑ3gε܊,&ˆ>y{{K$Εϊϋίξηηχμ³ΟZ~HΞ%)tπΕzΪΛxηnŸΊŸ8’ΝX–‹AΞ"η’° (‹΅,Νΐ`ηυ;W­ς·Ρ²δr$9žœEΞ…_Ε‚bΩΛΊΡO‰–λγ±ώ (Λوրή0>©w9#W@± X( Ε‚b XP,(ŠΕ@± X( Š€bA±P¬ΗH±Z•*ΐS £XŒbA±P,(ŠΕ‚b XP, Š€bA±P,( Ε‚b=ŒwξNκ γ“zηcΘ‘xf€bA±μ£ŸΌΡ?Ψy½ΫuΘρδ,<9@± XV#WšJrvξ\΅Κι₯3WcΘ‘δxr9#Z@± X¬_υ©ϋϜ•σW¬ψŸω9‹œKR€eΕ‚bέΣ G²;r΅dΙ’U«VEEEmήΌyύϊυB‘π…^°;’ER ιΰŠυT+–n|²σzχμρ«—^z)//Ο`0ΨmzzΊ¨¨θε—_ž=–EΑΊ, XO΅bέθΜΩΉΣR–~φ³Ÿ%'' 9) V«ΝΘΘψΕ/~ay"I‡€†§(ΦSͺXΖ;w;―w―ZεΟi±¦άάάϋχο“R|ύχLq˜Ψόψέwί>χάsάΉ$’šσYr™·›;ߌ‡·§0LœUΥ’£06€ ωVΈσy>!’μς+cNδΙδσ,»"U@NτL­ώ±N΄ƒ±yGߍ_=nύΉͺ,ΤƒTNd±κ'*ΕxC:ɘ{€μόΨƒpβ””GnbpQ‹‘Ά}w0IΣ[|Κ€_΅?*m%!|Ύ›€L΅Π¦T—*KήIŒά( ίΈUœ˜›σα%εψ£^πΡT₯ΙΆ†oLݍ“ΓŒυ{·†g)΄³Ύ~RΎqί₯©y^wΰjωϋΉ›€tE½“·“ξQ΄@X}Γ―Εv?χΛ²·ϋ^ΛHϋ\―ž~¨W™žJz·ϋ΅ͺΫδίκΟ4KbNNA± XΜ€ή@€ˆ[…υΜ3Οdggw"E`n°£`Ύ%E,‹œΕ­Θ"©‘4ηV,ΎΐW@αΗ/¬„ξ…³ŠE΄Š=@(ΰ1ΕΫέbxk¨*‚¨”¨€cζΡR¬–‚0κ[IUƒπIU¬‘σρžιάU΄†ο&~Кœ7šŠ>?doύ£‘1+˜Ί‘2ωΘBRӞΟN —εϊδκ΅.•RΩYαh²T™u‘wϊQΎM£η³₯α©•υ]Ϊ©…(Φ”κκyE«f^eoݟ άvτ|S硎ΦO?Μoؚ©Π’« S¬ S“ŠΆ)EΫδΉ/G³«—ΗvΏ²kψ‹ NοYυξbξίαwΎψrτdΧ(k»rξεν―»φΉ£Ζ'υDŠΈa(wwχ±±1¦/Ύψβοϋ‚‚‚sηΞ½ώϋΉΉΉΔ¦Ξœ9³wοή?όαΟ?ό½{χΧςττδR ©‘4ηT, #2τœMρ§$J «1pŠqΔά)™PΙeΤ‡ž Љ'L±κ2|IRI³ΚυΣ*–N!σ#œ^ .τ„*ΦXu,iu)V[QΗO X=G"IΝ›‹5.7°’Žω§¦:“Όqk^ύ”ΥQύα„χJΞ«εΫ4pb›T\ά:Χak!ƒ6ή΄9χ„Ϊ*‘»/c ¦XQ_Y͍vhWmι^ώΑĜΏ΄'ΎX–μŠb™bA±ζαW„Ϊξ―]Q¬gŸ}φβΕ‹ΜΨΤϋ_nlΚQθt:fΖ`ss39w‘Šeš,ηΞ"έA{ŠEhΜ‘ώog΄gnΕk,͈πH7Χ' DV’ΠΨ8†’₯2#4Pΐγ ό#2ͺ―›{=ŠYX°'ŸΟσ M(©r 'Ίφς iΐ›ηαν,M?ΥΕζδzIΉ¨WJυ• q ·έΜχŒ$I…–ͺ§ηP¬Ε/…ε₯qαKΛ«ΧΣI D1š™ρŽuk8…0Φ&ϋΡΒ¬XΈbυWJθ4+O€ωπ}3˜±”:S%π„a1ω΅=Fnθ ½,5ίΛ›*c†Ό±’Ύ(SαυΩDbΙ]³h;βϋη4ΟqϋH=\©H‹|½θ鬁‘1Š=9ΎJμa1‘5(ΏΡθ° l»5!>AΝΊœθQμˆ συρ¦’ –€WΆ;k0Ίφ²€0_SΡZNΕσ¬»u’.΅œ‹YάC5‘ςͺΰAωνσώeUGλ‹)GŠr©@Ύσ’ω»«GΕ³N°MΧΨ{©23•ša™›wͺ“;lTyiφ6q΄4RΊ-­XqΝ4νpͺης‘έY›6KΓ£vV~Ϊcd“½=pιΓΒjβŸ42Ž>e”9eJωΙQvγfrΚQκ”©Ζςγ†ΔΒ8Ι€ §»%HΣΞv6ίG2Ήqλ¦νG?U]<%ό_F±με](˜bMΟάmͺR=«Ω{“ύQΩ4Ό.»oYlχ’·UA‡u_ŒQŸ·žΊΑΝ0|ξuh»Fγφ¨^‰ο~.Άη•mš€Λ† §΅m#QΉ}―l‘Ž_ώΟώ΄―ŒP,(–₯_‘89ΜR±ψ|>·ΪΚ`0<όσNόκ—Ώό%7ήEΒΟΟ€ q¬XuiTg1’¬gžŠΥQJύ-ίΝ/LΆ+_FI/”νž²Ž,ς’Δ$Ε‡θώhp~£žξ^ΧdψS^,Ξ)Ϊ! γΡ'2Κa%'Ζβ*ža» ²#„λ—zM…ˆΎΑ‘‘²ŠY™7ΘHRΙuχœ+Φ’—Β:WŠ$κ+“Ψ˜2γ.π “”ηH|ιcNiI±FNHi{ ρψ‡FŠ΄SSέv‰¨€„’τ‚’dΊ‚rθ›>&O’›‡WpDBFLX°Ώώ1΄€gNΕrrϋ4UbRuIΩ;r2Δk¨+ϊg5LθΫΛs⩁)wοYώŽ΅N†§ˆί&GΠ™ ”€ηδ—·¦u΅ΙΤUΌύ%Ω»³€ώ|ϊŠ5Ž̘<Α²hώAEsX'cuG²#˜ίˆ”»*κ†iq=B‹k˜‡hnwJ”†Ηεϊ€UyΛ8/ΕΦ—lژšsφκ΅U³βpBτΦδSτ‚₯K™­ ϋ/5u©”Wy RρξΛT C—sβ€›vŸiκΡjT§ίO +Ήt‹JJy<+<πΔTš!mο7— S·Šχ6u™ϊζ(ΉDή'½ZMOλι‚Τπ„ΚζΫχ¦Ζ»Λί“Šχ_7NM?¨b}˜(EΌΠ=:}oϊVλ‘Ti€ejv*Ν8uk ιTα¦θm‡Rvκ(ŸθEΐ|λήt—–ΫΓ|½2΄<Ά7¨r’ipZωνXτφžeω:%9Ζ0σE₯jIΠΉ±™ Γ½ι©ΙθδξWφŒœλ1*ϋoŸ;ΣJ¬*νΫ»kl<κνžΧ>ϋ’Z=xϋδ)Ν+[ndχA±žzΕrΡ―lλ­·ήβv³ˆŠŠšσΓkΧevΕ ‘˜˜Έπ‰‚§β©N°{5-͎bϊλK"|θa‚yΕΚsβΕ1ρι§θa’z”€ύ£>λn~ρΥCΊˆπ|n ΦΟ„2Wdώδ―-—x³S­εdB‘βIρ‰―¦{±WςύΉ΅U¬™πύ3μ/€!Κ±†δGR>δ\±½Άk‡θ³"*Η¬Λ;√u°TΖx)i\ ΕbΟrˆ9«ε­‰½θFRGŸ;^+#2Ι§«n¨Ššη.ŸΠ2ΦΔδί-΄lNΕrvϋjRx–V?T[\PR|ͺ}„.£ŒΙŒ ;„YΥOK™,V#+©Σӎ-£οBRν΄έ3Tα¬hNκdf¬\bϊ³—¦*|RδϊωΗ ΊΌ?+•Ώ“—sθΜ§mΪ©ΉΛr ‡:²ωTI^εUr€ςΨΆπΈΓM¦Ι£MUy{/4ίSVn ίΌq*ΊPWσ€RΪΚΘUΆ†g›―25€Ί¦%yΠ^Θ ί\bή”b\{M9@‰ΥDΑ‡ Xδι+ee•GσO¦[ ₯τš,ο̲©9Μ'€ω(ΦπhPlwPΝττΜτή]έKvΡNΕτ:n.νKϊ–ϊwS•j 7QpzFέoPr+Έ “Ρow―’*uιΎΈ6ξwZΏ½­œ€b=݊εΊ_Ω(֞={˜ΜλtΊ_|qNΕzα…Έέ‹‹‹η₯Xn^ώΑv» OIΥk΄·£ {@XQγψ½y*–₯Οά›’§xyˆv·™ε„'3­ƒ2 ¦…TOλδbΊk+;₯ξΧh uωTGΩ7«ΑFNZvΡ‚ZΤHΦίSCΝ+“” q=ζΰWd‰ΉŠέ™fŽΦb-N)¬/ќhv$‹Μ„]72Ρ£j γb*–_Š‚“Ο3("{gMWρz*©˜σf=eͺΓ&«Ωζ’b9»}¬¬zϋ†Ε'η—UΧup·ιAΛβVNλ ΜLB^,έzg7ηEsR'v‹™ˆhWι]cTέyιΒ™ύyq΄?l*ΈΤ{Ϋ©½άΎš·YšP5`g#ŠRK_²ώάvF?έΙJšφβ>ρ†­ οWομ΅LS]HΫ,o;\ώΙΥkSΦb=ΕΪTΪΙ}₯9›]Rοp jJΣΥέ|΅ρτ‘\qτΆΒz­Σ|˜bέΤQŠυΩτ΄a"*Ύϋ΅S·Ν_џΥm‹όJv4iŸϊ΅΄ΎεI½―$υ,y‹Υ*ϋŠeΠ§mο^’¬‰;3vkzβaŠυx(VmχΧv%j^~e£X₯₯₯Lζ‰>Ν©XK–,Q©TΜ)•••σS,Λ=Ω…aβ]ςFŸfν(θI]ίW&ο1Ί<σΠrUO]ILhΟrρ ‘“³œ˜§ΓΝt‰LcešŠ;šΗ‹‘b)'μ F˜KΜΉeEcΆ?¦1.λs»Φ]†dίjϊ"›I™iΨτyzE?^_LΕ Κo1έτ‰³ρ<{βM-cc†›HfLχ“Έ¨XΞnߌ‘γDF¨ΐ’ΉϊE¦ΧhT±τ]εY’ ?«§ΐJ±,ŒΣ’9«»Š₯ͺ`_ψΦνζYp£ΝΗσθ½ςFΩΛθε̍4ΉΦΞFΫ)qš²χ95ψ³Ρςγφ τπ‘QΣtaξ¬MΡδΓ­q»«κŒ¦ε[εϋσhρ§ξ+Ώ:Ί(Šυa·ΛŠev­¦βΤpΩΡζi'ω0Εj»Ή<Ά7ΊωξτΔΨΪΨξηb»—Xπά[έόγz[ΕΊ1²jK7€NΡgTλ'’“œ*ω|Lθψ@P%cK’ΤΡ5z-λιQ,•š―_Ω(ΦΑƒ™Μφ·ΏS±~σ›ίŒŒŒ0§;vlαk±¬ϋχ6ΩeH^’²žω'ΘΞ€„ζȍ͍Ÿ1k],εΔΞψO©yόG|@‘PԚiΡڎbεΣ‚!ΚΆR”Ό>€X“?b±`ΖΧ44^i.§—Z9T,;EَbΩ­»ŠΥ³ΠQ¬ι)ΝΠ¬Q—Ϋ­…qΔ::νΨΛ›G±’₯ ΗUΣsVY|žuαšj WmΝΥo*£Θ{gkδΆ3½Φ«Β΄]WΛ75|sαyν\ŠΕeς‘*ΦhΧΥOΏθΆLŠšΈ±πΣQ'ω0ν.Gϋž‹8:ƎY½V9ΩΪo΄D©»k£X­gn,Ω2p”›ι7EŸθ\±Έ>ΙMύΡγšWb{£šf XOΕDA‘Z€_Ω(Vff&·k͚5s*ΦͺU«Έ΅Xyyy‹€XΤϊ“0ϊ”υv²œ%hšgUFχ)Gτ¦‰Xμ*&AŠ\g΅Š)½ήΌ‰šnΗHE£Όϊ|m]Ϙύ΅X‚“Θx—β”\ώy{ΏqnΕ’ΣaWF9S¬Ε/…5jk±Hέ΄¦+Fΐά‘~n?6ύgγ=έA±ΟτνΈΒœ;ΦR#―iθ6mGNmΏ1fgΑS!Α;ι{*"ΌψΆk±μέΎώΖͺ’όόβ:S%θ2ΣPQ,jq €^π6Η#Φs0Μb“ VπΨQGcW1έΆέbͺFμ*;Ιξ,B*’΄Z‹e·NLŠεΏ«έrΤ”Y‹₯˜ίZ,cSqbΈtί§620€H‹–&S‹εhEaš(zOε†sk±ΆK#-&^;U˜\  ^tνΨΆpiI=·‹ΰΥΚ΄ν‡/iι΅X G-7 –¦”MΝr8z±0£ΌΓό›R¬$V±šŽ›M©ΤεWœNœԟΌrۜμ΄>)έ™ŠυT¬Εβ΄jKuζόΚF±BCCΉό«ΥκίύξwNόκ₯—^ϊφΫoΉγΉν1Ύb‘~pKQέ‡Ž8¦ž{q—‰ ŒZj‹6>½{[Lώξœψ ΘΠ5tΏ3¦¨ΊΝΐŽƒύΧH“ΣRBι½ψx"vΓ½‘šjί=';F$`¦5φΟΜ©X΄ ˜v­p¦X?B)\ΪQ0ΐ708T–‘, fχ’`vή»^BίΎghJr’Δ?(,„ή1OVσPkΖP—Cΰ"eYωΙ1τNzAΩ΄i«ciΟτ‰S3daδξ˜=ΔτJ7‘DFκ'Ϊ―ΟrGAG·ͺ:JHΒb2ςwηη'‹©RσΒΚ:,δKeU-Ζ9vG€²κ@ͺH‘Q0Sύ"“σ‹’ΧEDW‰L.mθ·Σ`Έ’G$€Δ„XΝY°7‘Κj~uΟƒν(¨Ύ”'Lάχα'MέΧ::λU9οl O8\‹ρŠΌΘθάΫ¨±&mΗ…Όm©‘³ΚΝ; &fgv<šΌykr=¨5 Θά,+ΈΠ€T]£‡€Δ»/iΨ·ΖνU4«΄Ϊ!Uσ'‡’ιWriOgmo―ΌΤ6@ν(¨ΌϊaVbdekΚΚ¬ΘΈΒς†ξή!­FΥύiiV$Β[6Šε$“u’ΰ­Ζ<™4ς½Γ§¨W_:ΎoΣFiBeχ”³|p¨X¦WOϋjŒ}υpώ·[’φΚΰςΨΎuςΙ¦›Σκ~ύΡ£κeok ϋ}¬^«NϋζvλΰŒφ«Wbϋ’>»­6~qωfО‘¨νέΛώ5ͺ»k_±Ύ"ΙF}<Ω48­4(>δΗφΕ5cλiΪξ‚³¬ψ•b½πΒ ΜΫ„ϋϋϋΏϋξ»φφv>ŸoΧ―άέέ[ZZΈΒ~ύχ/Ώόςβ)΅95Σ•τ±3]pφβ.v} u z9z‘oXJΩ•±~EvΥaM“'0krjλΖ‡Όέψ‚ IΎάB‡zjŠbB©Χ@Q_­Ο(nΤZŒ>YΘΙpsYš$ˆ€ΰΑχ†Εμ’wθnYa9‘Žžk7‡bύ8₯°΄ζεKœα˜2ST'gί‹% “i7-!3tœb³η–QήN"1›.6oΡ‰qΩ‡OwLΝZ‹ε$“y-Φ”ΊρΓ‚άMR¦’ς O΅j§η€CΕβ^oΕΌŸ*fJm°š7¨l²οΕΪΛΟΪϋ­I„nŽEmλ!ΎVI~ΩNŸ«Τ,ο^ί·κΐˆBw·υ³ώε[z–οS۟(8ΣZ{sν?©d©λnοOͺ½χb=u; ³ς.X»Ώ²Q,‰‰‰Δ— 7lΨ011AD«££cυκΥάώώώD½Θη–…ΝΞΞζ˜S±ΐc€N!£Ά¬¦ΆΡŸqaߎG vΈ&tώΓ5†ώŽφƺڍi€‘σLRΜϋ=3#¬¨-ψ±b=!―v‘I½HΡ₯KAϊΥ―~ΥΪΪJ,K$₯§§ [Ύƒψη?ωέ»w-KͺT*ύλ_3ί’tHj$M₯͏“buUg₯8*KΡηc–ΈiΩ›P"ΛΙOO£6Ψδ‹v_™ο#f¨Λ’Ζξ')tHjxŠ€b=½ŠEΠ ž9+ηVd1AτΙΫΫ["‘Ψ(Φίώw??ΏgŸ}ΦςCr.I€ƒG(ΦΣXΖ;wϋΤύΔ‘lΖ²\ r9—€€UX@± X¬ei;―wημάΉj•ΏΝˆ–έ ǐ#Ιρδ,r.ό (Λv]֍~J΄\‡υW@± XΞF΄&υ†ρI½sΘ1ΉŠΕ@± X( Ε‚bA±P,(ŠΕ@± XP, Š€b=FŠΥͺTžr XΕ` Š€bA±P,( Ε‚b XP, Š€bA± X(λ‘`ΌswRoŸΤ;‡CŽΔ3 ŠeέψδώΑΞλέCŽ'gαΙŠΕ²Ή Pr•³sηͺUώK—.ύŸΉ‚CŽ$Η“³ΘΉΡŠΕbύͺOέ欜ΏbΕΜ?ΘYδ\’, (λžf`8’έ‘«%K–¬Z΅***jσζΝλΧ― …/Όπ‚έ-’IP¬Η[±Ύlj¬–oΟ-K δδΗ†―[\_Υy½{φψΥK/½”——g0lŠ6==]TTτςΛ/ΟΛ"ι`]P¬ΗU±χάxg[Nψ©]ΘWδ€9ΉΡ?˜³s§₯,ύμg?KNNrR@­V›‘‘ρ‹_όΒςD’I OP¬ΗO±ώ]-gTj{NΑ—M͜M‘Ι‡Μ·•Υrη«°:―w―ZεΟi±¦άάάϋχο“R|ύχLq˜Ψόψέwί>χάsάΉ$’š +² 5%Ιβ0?o7>Ο/8$&»¬^Λ~klHςέάωG΄Ά'κδb>ωΚ[Vcυy]Z9ήm}E“χLφ”…xπέ"žΔ’-”žHκv‡–υ<Μd΅ςͺωg5LΌ€gΏΥ~RΎqί₯)Ϋ³}Ύ8e¬ά΄qΫ‡]xάΰ§£oψ΅ΨξηήbYφvίkωiŸλΥΣυ*ΣSIοvΏVu›ό[ύ™fIμΐΙ)(ΦS¬XŒ_mx+ρμ…ΟCΎ"Γœ3©7)βVa=σΜ3ΩΩΩĝH˜μ(˜o‰kΛ"gq+²Hj$Mη}YEVϝ’(7oObYμΏΔΗΊ~ZΕ9οιήψύ~™8ŸBκΚ7£αΗΊ’‘.#ΐΝ+En|Ίk’.۟€)H‘λ,©'V±ΊŠΦπέΔUτ#fhΜ ¦Κ%“,ΘfΟg'†Λς}rυZ—J©μ¬Ώp4Y*ΜΊΠ;νL₯¦TWΟ+Z5ΣP,xŠ+θΤ€’mJΡ6yξΛΡμΓκε±έ―μώbΒ…ΣϋGV½;€˜ϋ;_|9z²λ ŠES1~5η<@rcYŽŽŸΤ)↑άέέΗΖƘ"Όψβ‹Ώύο Ν;χώϋοηζζ›:sζΜή½{π‡?<όσχξέc\ΛΣΣ“K€Ftͺ1)Ύ”Sy‡dΙ;˜ξμp{y-]^τίΕ2Ε«Ž%Ύχh)VKΎˆ”W\9φ#]Qߐ,ΰ?}Š₯-Sͺ²·λA“zR«­(ΘƒS¬{ӚŠς$z„u, ŠΞ$oܚW?eωαhύα„χJΞ«τΡ*(<ڊυ•Υά(m‡vΥ–ξεLhηόγι—Λ’]Q,3P¬§]±’³MŽOqΫ]Γ˜uYs*Φ³Ο>{ρβEflκΏύ/76ε(t:3c°ΉΉ™œλšb±}Y^„uΨΨUžŸ~ ͺqθλz 5³Ž/-k¬J_μΙησ’˜ΝܟΫGT$‹EΎ^τZ`dL’GOR{πΩρ4BP~£ρž<ιv7Θ³Β<=ΌcΞΣOμuωŽΨ0_o7Ύΐ?"£¬Ε,?ύŸ—ΘΒ¨+_₯7²_υWJ¨4Εu%β`ΟΓΫ?"[ΉΧ_“θνζ!πΥ Ϋ·Ύςr€hG‹νW":MS—ΧX›μGr.)θ22W +©;Ÿ/^ΐγ{{Kw+Lυ©WΛ βCINˆM‰³ΛΠYU€xΊ›+Α3©–­L―”κ+b’UQI#9 Ά€<ŸΰΠ„’Ί!s†K3"‚ι”}Bd% ϋ•©2›KγC€φBR«:τcu{₯A€2½B³=ŽΌNΧ^–ιλεM.‘#―;h£XΪ:ΣEyΒ°˜όZ:uYŸžψΧlν«€ZfΉ™΅M›ΣιΚ<ΥLλOg/$©’…™ΙfTΔPΠ¬βVΚ·0Ε"WiΦ4μ^Oξ—€lhz>HU#/’δΛλ™‹2giΛΦσ­t±…Ά#iΉi€Ξa²FcN°›EΓŸc[¦;?(Ώ}ήΏΑΊͺβˆb}1εθkΕ­ίΏ-RVςιεηΖKπ݊ζO'ΛΆ†oάΊ)u_ωΥQϊx'_QίjͺrΆ₯Fn”FJ·₯+rE΄υΖI·†G§&œiͺ? Ε€GP±¦gξ6U©ž‹Υμ½Ιώ¨l^—έ·,Ά{ΙΫͺ ΓΊ/Ζ¨Ο[OέΰfΎvŠš]₯νΫ£z%ΎϋΉΨžWΆi’.&œNΤΆDεφ½²…:~ω?ϋΣΎ2N@±žlΕϊ²©™YεΔ―˜=0Z}Λ|Β¬Λ"':W,>ŸΟ­Ά2 Ο?ΌΏϊε/ɍw‘πσσsI±Œ΅2ϊd>ζΑ‹ιγΊ <ΓdeΕ9_ϊǘStjš*1Ι? "){GN†x€]~£o/Ο‰§z’ξή!²όjIRŠ$κ[ίP"TΑ!a‘;>'‰W‰i“ IΘߝ#υ§V€HΛιΞτΔ¦#‘–ΏC&’εΘW΄`ŒœR?†…GΚ’$ΤY€Ϋ½^“$ ς’~τM³·ˆιy{ΕWΟS±NΡWτψ‰ίq°lwl0υ#?²ψ:=Λ‹ξ1{‰OΞΙO'κβaZWs]Ύ;!Œ²,‘8+χΩι¦ο.  |ƒ#CeΔ"Fj2ό)‰ ηνQÏΌΠ’ڎ:J#© ω…ΙvεΛΒ¨Ϊ#_1VΖΘ†―(,(4^#’]Ξ;4Fβ/Š—Ε₯ξ‘έΑΊ1f•”›—Hœš!‹φ ,ΛΠΈ‹m‘$½ (™ΎhPU™ύG$<Ϊ–[ŒSέάω‘₯jΫ‘Uζ™rΪΨΎ#ˆΎGQhZIρŒP?ϊ₯֎,šbΡWτ ρ„’Π°Œκagυά_)ej,(&…Τž―0€ΎkςαΉΛq²Q²δϊJsςΛ[¨«HυΪχΥ9Ήέy(Q—{θ“Vε-£SΕ2^;ž)Ν;έe΄ωΌώύ­αΤδύ—{Ι£15piΆpιΎO΅ΞΏΊ7ΪtxΣΖΔ΄•ΪގKϋS·Ff+4τE{ΟζFnάVx±[3€½V_™ωΡ0(<‚ŠuoΊKˏνa>Χ^ZΫT9Ρ48­όv,z{Ο²|’c˜ω’R΅$iθάΨΜ„όw0άύʞ‘s=Feνsgϊ_‰U₯}{Χ‘bG½έσΪ‡c_τO«oŸ<₯yeˍμ>(Φ­XΜ*,»ΎdιWIιΩδίΜ‡Wϋ%ωδίφφ½°T¬·ήz‹ΫΝ"**jΞ7―]»–ΩƒDbb’KŠ5\αa6ΕT,οˆ#LOΪ H₯Ώ τ`Ί†ZΪδƍ‘ Υ”ŸjaτΟΛͺλΜτ©ι‹=μ'L”·Ύ‚9½‡NaΖI:NdΔΔHcv1½p¦ήτίϋοM0Βγ!ΪqΕ`KqN―§~μΨζfeh*¨κβΊώ.+wΕέm¬3μζ›ΖΊŠDT1“?g—Μ΅œ*Ϊ}°BΑ”ρJ>΅$‰›(ΘV&ί?£a„[“J'Uΐ _hΛ%ήΤuk Ԏ9ρβ˜ψτStΝwΠ#`D6z,*S˜‘ :ϊκβ0zHΔ/EN[A9=Άi_E†ͺ¨9iœ€»Š˜s₯ͺ3w­Ž.Ξx­L@όAR>ΔMfνf&³±Γ›’2ν%Ψۚ ˜˜±ͺ.ΟXv­«¬Δuu‹€XμέDE-ϊ9λ™ύŠU>“™$ΚΉb9MΦqΓθ8fΥήυΩԌ_ŸΉ~ώΝ¨.οΟJeφγΏ“—sθΜ§mΪ©YŠ₯ΉΈoΣζ¬Ώ™š­^υ{·†G~z‹k$Š΄[3£NΏžΞ’Ff±NEe£­2nγΆCTσP•Ώ',h5}₯<žΕ€GS±†Gƒb»ƒj¦§g¦χξκ^²‹v*fr`ΗΝε±}IίRnͺR-α& ’žGΏAΙ­ΰ2LFΏέ½Jnp¨XΤ₯ϋβΪΈKίiύφΆrŠυD+3$ewm•]ΏβΦnΩψ²T¬={φ0™Χιt/Ύψ✊υΒ /p»»¨XβG±<Š[Qψe(Œ¦%%ξήΎaρΙωeΥu]#F‹6{ŠΕ“T™μ‚ŒΪΆ_£ν©ΙπuŸ₯@$A=Ϋ‘eLXαfΧΡך8O1₯(˜K3Σσu³λσ OzΒήΔΒK˜έh΄.K q†±jm;>’ˆ€μ’JE£Κb{ϋŠΌγŠΝ-ΘN©ϋιJ¨Λ§:ίΎYΦrοM ΡχΪ€yld¬Ζ°·,†­[Vrbεv†ςκθJφ”›ζR²RΚ( ]EΤΉλZ:?]Ε”`xǜ7˜WXTs>Μ³”„«IŒώ»šm„'’ΤΩΡΌ`jΖζb*VθAυάυ<.aŸƒ•νΈ’XΞnŸ³†aG±eυ mvAΏΚF՝—.œΩ_'₯\kSΑ₯ήΫf•:ίP™šW?jwt‹ς¨χΞ(ΉφΣ­ϋγ€qu;ϋκφΥΌΝ„γ*‹ρ4κ“4Ήvϊvc^΄4‘jΐόΥ‹‘Xπh*ΦM₯XŸMO&’β»_;uΫόύIPΡV±Θ"ߎ&νSΏ–Φ·<©χ•€ž%o±Ze_± ϊ΄νέK’5qgΖΞuMO<Œβ@±uΕbΆ―pέ―¨ΏK+Vii)“ωΑΑA’Os*Φ’%KT*sJee₯‹i°3YkΪθ‚b™:—tΪό9;B"¦ϋλ*ΣΊn°β<­4όxϊΟν†Ž‘oσΪΏΘτ­Ε2χI?x ίrQŠ)ZήTŠέ1"_ΎΥWVŠ\ĎS1#i&γb‚IΔΖ£θ~­ΊrQ±Φq#cuY:ΤP#ς4―=σφ-cΩU,n‘7°6«ˆΌQsσκJbBx–Εj±S™Μ–tΔΈ,WI1Š₯`μΛέ4ΨΒTΉ}¦²τ“pJΓϊκ,˜Jλ―€η FTτΥΕτΈbεμͺz›n`‡¦f6Δ”: ½~QΛ[ΜM•tRΟCUζΜ0wXŽS9U,§·ΟIΓ°£Xͺ ¦€ ίΊέ¬:£ΝΗσΔ˜±&Z₯6lάLνηžyaΐ‘bYŒGMOwJŠ΅:ϋjόr&΅GΌ5€qΗΊ§G©―(ΧβΞϊk±ΰQU¬Ά›Λc{£›οNOŒ­ν~.Ά{‰Ο½ΥΝ?·U¬#«Άtσθ}F΅ξŽzx":Ι©b‘ΟΗτ‡Ž₯Q2Ά$I]£ΧB±žBΕrξWŒb‘+ΦΑƒ™Μφ·ΏS±~σ›ίŒŒŒ0§;vΜ΅ν.ΖͺιUςΌ°²£ΥvE‘q6΅{„Ε2ϋέΩξόΖLp2udΩ>iŽ·Ϋ„•ΓϊΫj«%―ΰ1ΣΥΖ]P¬¦›ΞχO¨+jŸ·χΟt‡RεςUΧ57^Q€―ypΕ2ΘeήV=iKΕbFrΈMΗ1^³K`ΊIЍ‘G±ΈΕƚͺ’œψ zu\ȁ.—Λ4 "> PXVB‹Φ4gOš#W467~^Δވω*#Μμ(–Τ<ŠΕθ₯ε(?rχωZΛό4φ˜I†Tnως6z —΄ΪήΆ"G±Ži­Gl貘+ΉΞ^~T± sΧ3ϋ‡σQ,³1Z~λ$Y§ Îbυ,tkzJ34λwΤνΦΒ8ι¦;MŠ•š£PQ ±’sOtbυšΣl-”ZbΩωŠΕϊ¨΅W=`‰ζ–qzΚvkͺ©k±ΰΡάξBq΄οΉψ£cμ˜Υk•“­ύFK”Ί»6ŠΥzζΖ’-GΉ™~Sτ‰Ξ‹›|xSτΈζ•Ψή¨¦(ΦΣ5QpNΏrq’`ff&·k͚5s*ΦͺU«Έ΅Xyyy.nΪΞΎ€Θέ;(MήΒτw‡ΫΛ“θ ψτΠN‹Ϊω€ξϋI‹™­όŒΪΖzι@²bΜrωPΘ^Σj“³€Šτϋ«Šςσ‹λLcz΅ύ3δΕnΕ!ΰΦ’Μξ³σΩbL͞†κ³ Ευ„ήzxmˆήTΓ4οkŠelίμ° Λn6ΰΗΎΗ©l<³•b™–{Mλj“…L·»kzΌK^Z²#_Ξ).Σ{φMk0+_Z­s X¦Ε<ά”ΆώFyυωΪΊž1nF³ވ‚ώΡ4ΙΠuΕ²³ Νf-–hΦZ,Σ:7BZjδΥ5 ¬J±kΔ±Τp·ΆΚΕ΅X<±yΡ=·3žͺm>Rͺoίm™Ÿ‡₯XNκΩ΄o‡o3ƒΤP—eΉΛP-σf&Έ­t¬.Γς[ΗΙ:m=Γl7·hdΧb)ζ·ΛΨTœΘν?a±βN‘-M>«΅R©ιY[#ί«Ί65ΧZ,Υ…δ[3/N9ύŠZ‹%~ίΌΰjzfJ3@œV•ΏcΉΛxν£mX‹ biΏΉI>δ—λι©΅XΛŒ[Œ/Ν¨ο0¦+‰U¬¦γζS*uyΰ§'υ'―ά6';­OJcw&„b=-Ϋ]ΜιWowΚε_­VξwΏsβW/½τ·ί~ΛΟm1§bQ«σY)b^=,ΰΉώκaΚjͺΔΣμ&A€§ϋo$«=¬bψ‡Κ2’%τfzξ1ηΗθνΤθ·rω„ΕdδοΞΟOSίRCjζ]έψΌ`©,£Šθώ1»£ ·Ώ$#=+%”Κ‰ βH—ΉS.JΩ‘Ÿ- {νkRŠjΊ¨XΓτ ½}ΌzΈ^ΒμLš’œ$ρ šW©±Wτ π†Ε€fĈLϋv¨˜CoͺλSvμ*Ϊ‘B•(@ΖjOY½V|›\Ϊ0aG±θ:τ`weLO₯7Hδ‡νn1P –hΟτΙߝJ›€LQu›aαŠ5£­Žp; Ζ„ϋΡ3 Ω½ u9τ ’ R–•ŸC»zPΆBg½Ω±πΌύΧ‹ΝήQΉ›ΎΒ`ͺL›@ϊg0»>ŽΙ“θ πƒ©ό„‡„‰ΜωyhŠεΈžι©‰t‘!±Ι±a€©{š%ŠΪoω6ˆ|#ς σχ0:LΦiΓ`«Θ#€-ΝK ΟΠϋ[TQϋ[*:{Υ͏¦½“Š΅Xπ((–ιΥΓSηΎc_=œ?dh^\Ϋ·N>ΩtsZέ―?zT½μmΝ‘AϊϋΗκ%±κ΄on·ΞhΏx%Ά/κ³Ϋκaγ—oνŠΪή½μ_£Κ±»φλ›!’lΤΗ“MƒΣκAƒβ³A~l_\3F±ž†MΫs ,tβWΣ.oΪώΒ /0oξοοξ»οΪΫΫω|Ύ]Ώrwwoiiα ϋύχίΏόςΛ.+έ ¬―Hτχσ¦ήeδ›]Φ¨{-λ9΅Εi’!σφXόŽΚvσθ„©[T'gί‹% “α0tœΚ'έbOZ<…’ˆ΄ŠFΣ䱞³!~ήDό|C‹φϋΗς1aΜk΅|ΧHΣM—ΉR# ΰi ’€ŸθšPΙeAΤ;―‚v5/P±θω]</tA2B…TnύΓ2Κ[ΪιΡ vΝtΕ|yMσ&.ί5ρEυ¦ϊj(N•1 θ ά}ΎΛδ6ZEN$΅’ΜK”QkW±¨Šͺ)bλ/ZŸQΜή;z%ί°”²+cύŠlj²™WprΝΨ(±Ν†’Xu+ύD9Š–ρV+͌ںƒ)‘Aτ{±θ=),s;,{™v/wΠ mί‹ežcΝe©‘Τ½&ΙfTuθ-ςΓζGœ_ΫSCoX²¦ˆyxŠεΈž©"+vI™vΕ)ΦoΎ2jε»$ώTΝ„ΘΚ{˜}GΜ«&λ€aθšIKbžVυώvΖοBή‹Eώ›h=q¨09!1’Z΅Uœ˜›σ‘’Y;{Ν…F±OΌ1΅°ijΦZ¬ MΔ…I"[Ε©ϋN΄1Ώ|uΟς½XαΡ‰qΩGΟw˜ΎΊ=p©”ήx#:1αΟ4]=“@K‰.όĊŽފy?U\Ν”Ϊ`5oPΩ€eί‹΅₯—Ÿ?΄χ[“έ‹ΪΦC>|­’όͺŸ>W©Yί½$ΎoՁ…ξnλgύΛ·τ,ί7¦Ά?Qp¦΅φζΪRΙRΧέޟT{οΕzκ^=ό΅_:ρ+ζΥΓδ»ίZ*‰ΔΔDβK………6l˜˜˜ ’ΥΡΡ±zυjξ’^δsΛΒfggsΈ¨X‹ˆ+xͺ°•:0μρμ?ΒσθfϋrαE‡έŸžΩϋ™έΣB;Ώ―€Ÿ (Φc XΜΪ*±4ΡξΦν6G’ΓmςN˜Τˆ-]Ί”€_ύκW­­­Δ²D"QzzϊΨΨΨππ°ε;ˆώσŸί½{Χ²€J₯ςΧΏώ5σ-I‡€F„bA±³«§— θχt-†bi»“Rd φHΚ—«/ΕbWyΚ#3P,ŠυD(·"‹θΣΉšΟœŒ_1~ōwΝΖxη.‘’U«ό9‰ϊγ¨ΣιnήΌΉrεΚeΛ–Ν^‘uλΦ-˜“““ϊӟ,6ΐπ'©‘4‘XP¬Ηn Kž@ΏHΊa£Xs\Ž^θ#)ϋΙ1((k1-‹Y—υu_rγTδδGς!σ­ΏbΈΡ?˜³s§₯Dωωωυφφή½{·₯₯ε›oΎq€XF(Z~E!©α)Šυψ)cSΜΊ,»―ζœIHЍOv^οζ―XaσZα£G’‚ά»wΟF±t:ωΌͺͺjι₯Ο<σ χ9I€CRΓSλ±T,†/›š]-ߞS°α­DωωΡξώŽΠ ž9+ηVd1AτΙΫΫ["‘Ψ(Φίώw??ΏgŸ}ΦςCr.I€ƒG(Φγ­XŽρΞέ>u?q$›±,ƒœEΞ%)όΔ«°€bA±Λ v^οΞΩΉsΥ*›-»AŽ!G’γΙYδ\ψP,(–νΊ¬ύ”hΉ9λ―€bA±œhMκ γ“zηc0r Š€bA±P,(ŠΕ‚b XP, Š€bA± X( Ε‚bSΕΩ (–«ŠΥͺTžrlΖ©œψ £X€yOtδWP,(`!k±μϊ ŠXˆba» (ŠΕ@± X( Š€bA±P,Όzπ$Εr¦X? @ Δbͺ @ @  XP,@ Ε‚b!@ ( @ @@± XP,@ Ε‚b!@ ( @ @@±P/@ Ε‚b!@ ( @ @@± X@ ŠΕ‚b!@ ( @ @@± X@ Š…zA @ ( @ @@± X@ ŠΕB @ P,( @ @@±sΕϊξϋοΡ"@ (kή‘Φ}|£.ΏωπζΟ·―•K}އ‘+ώιdΔ›η6Ώu)+οj©\u©_―EE °‰‰Ι)uΰυžΎΞλέ< €!‘ζDλ1V¬ΛCW³šφωΉ„sNmψηΧϋΏΌΩŒί€@ $ξΜΜ¨Τύ€C<:>e$?άϋ/x|!zσ(dcκΆαΦθxκiZ€MA±3ΕϊΟpϋφ¦½―ΡVγUJΞΊ:ίͺ@ žrΏΊήΣGzΓ€[|η.αޝπCλ‘Θ iHtsΤ“φP, Šυc(ΦwίW”Ώy.f^reIπΩMεΧεψ݊@ β© •ΊŸψι OΟάOD±©ό¦E,‹43(Φc Xcw&·ΉlΑrΕΑ―ε_=|kz Ώa@1©ξ„_A±›ξ>ilP¬GZ±ˆ_νψϊΐƒϋImςξmόžE ρTΕ Νΐθψ΄Š΅ΨŒά% Šυθ*ΦwίχPΖ―lΘΏzxζώέE)φ}eΑ›|7wΎΫŸχ΄άwxTν{κπύ} ΌΞΜΉXž;u!ΙIύ#Ϊ\« Ϋόhu/ι"ώ/\HΓθ>ύξίήxui]>~)gfέ…Η εόˆΟΡ’ΆΊG·ͺ‡§μ‘έ\aΧ{ϊ Σw %P¬ΕfR›46(Φ£«XεJωCχ+†y―ΛΊœω*έ[rΒκ© XP¬G’ΞIϊϊ%ΥL=Śι<ύAΑšΞ™Ÿ΄ρ Φ—ώλƒc—uόΝ* Š…@<ρhvΗΑ“§XLΖ X¨b}­½φ ϋ[8η;/ίƒU±~˜™™λ§τ ο±B±vBw<Š=·Ώ}4pίΑ]ΈΟ΄=ύŒλ7E_#υ"έΨ³­Ν©LNΝΟHg‘ˆDΈ«νA{᳋Ε‚b!P,Ε‚b=%Š΅½iο"ωCFγΏζ‘›qΥW—λk.Oψ3Ÿωο秦/Χ·hfθP,(ΦCν:ΏšR@wΑ:¦Ξ½M5ΆW±5§W,;ŁbA±(€bA±žΕϊbπΚ|ί5_Ό*CλΎ^PξTϋΒ(Φ¬N'Χ£]½§Ε8¨Θ]+τα­ψ­Kέίδt‚“^)/|{έ›Μ*šWW‡K³·Œ»{Σ |ΆWϊ—@žŸχΊH²«¦—ϋ»χ«ιŽΤΪƒͺήs™k_χqσJU0ύl]Ϋ±μΨ΅³άΌΒΏ₯|6ȝwλίΡτΈά™Ν3·>Hψ+΅Ά‡·R•m‘8sdΣρΜMαΒ•>Tž.έuΊs|~UαDft_οϊψπΌWKχ*-λYχΥ‘LΙ_Ψκς{3ϊέΓ_ί²R}ΛΏχHϊ†ŸΙU3λβχΚ•Ž{Ίέ¬5ΥՌζbΑ–p?R]³KWέΙ]o―[Θ[AWέ_bί=X?0Χx€³Šbϊύʚ‚€θΥBκŽπ|WoH-ψXe~Gϊύ―3…teώγτ-£ςdvμκΧm+Sοc3¬Κ“žvq’ΰŒ¦~{¦«―|ƒͺ«nΊHχ/JWXΦrm~-V/ίβΓ΄„NΣ=ϊ*+IΠο½zSε –£³½Ι6۽ٍ-ξ9*Υθ{OηF½In ©ή¨w­΄zyGΕq±ΥQKέΘmŠ’*ŸY(’d=πCϊσΌ2fV¬”‹Sέ§3!ςσ"9pν–½΅ƒvšά€ςΈt­ΐΝ#0³Ω… »ό(9luΦΏ$]½ΉŠΕ‚bA±ζŒ¬¦ύ‹κW όzb)Φ›ΉϋSή°κΜyE•φ9P¬εώul˜χz πuΣ3s[[«Ÿ£χΆ.ιm!ιΠx˜/δGΊMΜAΆ#%LΙ•ψ0y kΌ>s5s°ίjΊwEύ;PzŽν¬OfHσsWS=~S~Hβυ\fΰd¬ŸιΊΝr—[Α•ΪguΖΕ[l»jΫΙά²uΉ;! ΗΌκe[™΅Yoψ½.0%"π# Ι΅ν.f”­σ™uuRWΝ3?ά―χρ¦ͺπyυυ@Ώ?§Κυσn±G’θ‹>¦3΅΅¦+ΎΉ—υ.fώιš΄|λ=ν·’ΛU΄u”ΟQΤΞ±Φσx\cΆ~<Η₯VχΓLσή΅L΅aX'ς[ΑάΗ{HFcžoΖΜJωTΙJ«ŒΉ½Ήη+£M“Λ|—½ΧŒbΝ•aΧ%g­Ξκ—€Λ7@@± XP,(–σP뇂ΞόγGP¬ΰ³›ϊυΪEQ,/ŸWίL=Φ€PΦl`;Ύ« •φ‹]θ˜π©IrHŸ‰ϊ ±@rRηΌχζζ!zχcjjͺϋ8Ϋϋχt[­z…pΛG΅JUo;50{0œιb;BUYW#]iκMZχΒέVˆήύtιE±’φΆœ)ςπι(¦WG:©rύ™ήΣoϋ1­¬―g\ ‡_χ₯Υvφ6}$a­&πέΛτ1­{„ŒΨl©‘ δώ`ιίθ.ڊθcΓlg]BηmuφΧl·OWξjZ)Ν][ϋΧ%β΄6Ÿώ“όxΫΞΏ0=?{έT₯₯?ρ”ώ[9EŽ1Κ3D<ΣΈŠ}{›³’†O³Ό2z3u»g†λwώ•νΤ&|¦·jW€[όQ§žIPΌχ†meΊ0QΠV±ξ«XYZžy‰ΊϊTϋGQVνaFΌYΆφωΆΨφ½τ€*©IΊ’†ι5c+Ek…TΊ”άhΞ₯oλ;[νΝoT2)Ψ›(θξΓ{=<σ\[―F)Οg›ξί>r°Y¬½βΈκΈ, SL5Η%tE­ΞW.ό!}πΖ<Œ™[»»ΟΪ]υ·f¨ΦΎŸ}6}’ώ­³*ο ζωUφv+τ.dΨ•GiξV·°›‹@ ]±δ2o7―ΉqΦWηγyξ‚δϐ¦Ά\Μw -λYΜξ~]FΙΆΒψ*‡Ί8ŒοΆΎ’ŠΕϊ λγuOι_‘€Ά~‘λό° 7κE±θYvΦύK>OZ3cO±f>~›5’1ρ™qΊΧ{{5Ύ†;ΕδN|֝ΈΎΞΚT…E>§ΊΏώͺΠ6`d»lΗ60}©·ε36‰sB2£H°]Γv‹q CF_OžŸ™’ϊnώΕ—«ΒaηΟ"Ο¦k™Κ5¬€σuη0›Fοpφ―μM?˜{πξ|Ώ€Σ½F>nπΐ]χΟΉ_™r6u2šgyέφ=lζ7ΎΕhΌ˜ΐt _Ο¬q<€γΈ’Έ­=¨2ŸΦΜvdy[j¬ΊΡ€ΝpΏ²ϊΨYζΚ\€b™ eq―θ<™K²—πήτ8›'™w‹©—₯΅¨2NΡ§σ6?FM τ‘œΣ›+je*UσS,6YφB―3ν'³φώΌΛY«3έaΎ9_e:YηζCϊΰyώ3—Whnνœ;‘ζ4εθωu%Γσy”·Ί…έ\ρ„+VKΎΘ3©ŠΕ‚b-0ς›?Ώ"±ρ³χœ™w΅tQΛ²`¬‘¬0-€±;ŠΕcPsήXϋΤΜ§k•:Wk]ΉΕϊΣΆ‡~Y_[υu6ΆΊW֞ώhឝ»ΉQΜξ+b™*»‰3ϋΉ™& ύ Hbώ,M9Ψ ΧͺΒα(–Τ"Ο¦rYtΆtŸ.=°—Ξžwhoδτ”9α_cv}t²A5ε‚b₯1w'2ύ,{λd΄βΥ#ͺTc'αΉ*j†;€]θΒ~\ΓZMχ’-*σ«ϋ³Žα*sώŠ5u2–-ΤαΑ\φ™w‹ΥΛ₯>¦Ϋ:SK ©FFc^₯–cΝ0kΙxL[—byΌ±³έ|#Kf1'vΎ£XŽ[wχ_}3Z²)–aέj«  xHΈ1/ cφ[;χl2γHά-πIUΨΈ³σ »π(ΉΠκvsΔ“­XΪ²υή‹XF(λΙU¬ΝŸo(~Eώ1ηΑ’‹ι‹’X–?ζΩ;R,j;„€oΪlWΰ#”~ΤitX>’Σ9`§Z™zΨφ »u)w­—½}ηg)–εŽ¦-Εb{ΜζωNNΛiU8μόYζΉ)Χ²φΓ`MΒj{ϋζs3ϊΘ1wώ㠞Νφo¦žμ›ωa‘Χ8ΜkXŒGYW…‘Z8(ύΙM|;άg‡}ά^§ΚΕʜΏb9(Τ\N2Λ QΓ&ʝoš”²}΅HiυžΞvεΟ:¦Ο=?Ε²άtΞdΌ S,ξΎ,‡ηύ>hc^@ΖΜνδ½z‹&w1Α˞Υ9Χj(lΞ /όQ²«XσΊΉβ‘P¬‰³ρ<°’ϊ†bY˜/΅›N@PL‘BΓlh©Μ π<Ό=ΧHw+ΪΛ¬KΫXš@oΗ$Ι.ο0Π棐™».ή1ηك름ѻζDiΓŽΛ'Eή!O_μΙησό‚#²δzζΫ’5|Ο$EΛ©Ώ?hW;υ‘½Π̝ޘμΖ—VλLŠE1«+]cω6Ύš*‘A‘ΰζ—"7 yMΤgϋσdgΗΜWι0]ESαΑΚo‡bA±~<Εr.E?{ΟD-ΐ―~jΕb—¦Έ½ώφI©[€«χΟΆ]»½7·"fK™ξγ’Χ­‡SμφuL«η©ι@ΤΥτ-ŠzΥΓ4­Hγͺbύ0x/Fnž glίμx-VKQ‡wΜ)ΓlΕ9%εyˆv\1LθMθ2ΎoVƒ}Εςˆ,V™?ι9f*Z~‚‹Z―Βκ)4ŸN)–@Vc.]ΟA’”€LC•eGuιlΦb‰J:Μ―•yρ}3~ZΕΪσώ>G³ΝΙWP¬'J±Ύϋώϋ?Œ˜S±lTjΑ~εs<μ§Εκζ^γσͺπ Wήγ4uΪ΄^|Sτ«ΤfΣζΥΒ,Σ jμ―Ε<Ά{£Ρ«©χGν?Β.Fη½.’ώ[ε’b‘OΚΝο«α^χδΆ:S‘›GU,@u:ΉΡz^-ΰy$G>rοz―fΚΘmΝη­ š_…$ˆ*Wύ°ΠλRΡwZ"4ΏΜ‡{/–ί¦γ½Ž ζ¨(λχbYάJΑΊmSσiW Q,ςIλάŠxo(ΪΩΔΐm‚Gg/œZr6ϋƒΕ}VΣ&™εXξ ±μ–—ΫΉ‘ΩFœά[X/άNq\»ϋSM{L‹}^}]πP‡˜η›1n(ϋΥΏΕσ²jΙΌΏ˜ήf’  v­2ηluP,β1W¬€τz‹o{ΚB=ψ!Ί¦υŠ>ί3΅Φz Εj(N“† <½Όy|ζΕ}ήb{ŠΥAɞπbεv‹/·˜C8rBκζœήh#Š‘–ͺτ˜0€Ηχζ1/6΄T,«υŒ4)΅`ΜΊtŽ·»0Φ&!LkψΙG±μZΦϊλΕzσάζyM\°_VŸ‘ότŠE:}χ§D―ύ3έ=ς έΦπ¨”}{«·ώΝ,f$\χžΞ•¬%}\ž0\ϊ―ϊϋN΅κ¨|½?>œzγ0ιύ#χ€ROύ]³r»˜ŠΕ5?ϊκ>D³£’φΚ»-ͺΚ¨,Q5ΉBΰχ—TωΰBZ¬•M­΄Ψΰή΄Ÿ»ΥήqφΚ;πYnԟ© δ½ώΖΊ΅-Ό>»8.ί}ζ6 WϊP  WΎ±.ήΊ’π>€Ζ<―ŒqC‚ΒόΆ[­ΗίέπI™΄Ίu)}₯sΪδ\Ι°Λ•ιΌΥA±ˆGT±’ΌM“β¬ Ε$Œb5Z)Vˆ+Šel/y»ωE¦WΦ6Άuu\οκ8ŸαοD±QŠw΄ω‹Ν-Λ”‰%MΪ.jsΙZiώχoσ“₯Iηύ'mŸΫ‰ΕŒ‚ΏύΔκ1¬,zοqξ>kbbUΆV₯|§U*"SΎοτΦ¦£»&S%Ώ~ΨΞbΉέώΘ=Τ€oSΖkͺ³Uή5¬3y;½νY•ϊž2Δ₯Ψt£bΩui±Q;ε΅JHψ¦Fg όAX*“ΚμκηŽμ•ή‚Ψ§4Γζζ€]qBwg¨uŽŸYy Ÿ:š¦Tl‹OΘ»ZW8Yb9τ5I»γBCεwΊ?[έξΥνΛτΣ:πέ~₯"­ΡιΈz$ΤϋΦ2κ=§@^=₯B{΄¨ΞθZCχήR•/½ρΘπύbKZ[ΎΟˆ’Xάgg5ž3]τγ—5φDΑ‡&ϋƒxJψ΄άϊίήuL%φΓyS'ε7NšX:±eƞpk©ΝP„&λ9ώx’ναΓα‘mk;•Eb=‘ρ«ή>ƒΨΑΔn&v6‹Δš~bΕηήΠ€GFΖ«=G–NCρήΘΨ’žκ4ob9Zςa ιUzΣ@KmA¬2rGa§Γ-ΡΚπ]ͺ€‚«-:ƒΩαt΄E„FΖ~sUgΠΥ—&썏SΖ?>±Žν ΩŸQ£—³‘ϊXœ8B­υ^h|Je§ήhhψ>-6&>T¬€nβQ}Ο©½‘н•š]{γ©C;C"³λξ:ΆΖ”He葝ΑΨΧΣPœ,=½ΥXΉΡJ©"ԝρΗδŽ&s—Rq¨Κ8±β">N+οΤwwV#V).½QΞSM6eΈͺ΄‘{ΐ¨Ώω]j\ΘΆŒκ‰s:ΚΝ£Ψ–ΰZ–ΓԘΉ;RρρYύŒΆ§Νa1žKU&T[FΕο­ Ωρ!JUnm§Ρ4 k<› ήiκUs ΒR•,ΦyGΪΩ±MF{ŠχG†DΖΗ§Σ‰<Ϊyj―χOΎ2ΞαΚΟΔVMϋNc0Ε–²3pb΅ΛzήΥύ€±ϋ¦Zl“ΘμΫ”‰%d›κTm§N|ΠyΎ—Ϋκί{dHT†ZΣc4€Ν[f˜,±€ίΠΘ€ΛήQ2kuκNΕώsFž0χοί‡Ών]Ίn½iπ³_X³’yψθφ֝ءΔ&v³_9%H¬g?±tβ(3Nύ½? K,6Ί†δΔΊ{5)άsL)#Ÿ0–]gsGKΘήRο¬ΧdΗ…Dεi<$ζͺ4Ε{“$Vxšοϊ7ηΉ;J:‘K©H½κ2r4dΔ…J,Gcv¨θ–±&35[-½‚ΥάΧcτΎ²C“’TΖ–Ό‰βwΰ«+ήΘΔfVυ½Gj=C1rΐ„¦5:€§ΔKkξmͺΡΖ#ήΧ—XQΚοVu:-5iŠΠDiXc&ΫΣ}’`ΌAοΦ$…)c‹}Λ2«UŠPΥw¦O΄T₯…ΌŸι™_ΤX–špͺΫσΖ½γQS¬Μ΄hΏφ–κ(pbΩ†υϊΛCβŠs/zςΔrΪJΩγ>yo໏Ηζ|‘`RΥ½·(εU’λ₯ς3eH€wVT*Ž^•uς~Μ²όKlpχώ)j=κc™—ΕS“AoςŽOΚ =Te™,±D’©"}λ?zυˆ22‘jΨ ΏJeέ»wΟxΫΤ‘λΦΆΆ‹ctΔΗTμHbw;•Ψ΅f₯―H¬η0±\ΗΔ‰“šDuΘχέ/±n„‹γHί₯ŸΘyγΚπώN:ή Qω]Αε:L˜X»K}έδ:(Ώ!ŸΣ%*’ΜWι„«‰%•@˜λ³ρXZ«2ͺvDΗ…oΫΊm§h<χ±ΌΆαΩ7½”Β#Π‹Θ«ͺς;yZ-ްχžΥ‹7xHrΠ2ΊαΚCζ‘Δύ—%ΗFdJ½c&ΫslbέΘ σDi»νLι7α‰Rb…&«οψΏ#ίΥeΟ–ήψ+£‘ηWGΞ–όψIN΄κ.ٟ'6x¨2Rͺϋ ₯i&–£Q|ξ; ύζκy(RD~§{oΩ{ΦSŽΊTωεψρμr“~τ“%–τfγ25ώt iŠΏNŽρΌT¨ύ”IΕ²\Ν S}''΅₯6#4ΠDπ„xψπ‘ΓαGΓV€YBμNb§zόσI¬η8±δ±#ιW©ΗΈNτK¬ϊl銠бΎ'Žμ=Ρς7$ Ε{₯1(_³ΘΙ8±|Ν~‰u·F:/ΒοKϊ«%/>D`Κ©?¦ŒH=ΧΠ=`Ύ3lΊš²mlb}Σ9½ΔςŸξB>(ίU€{h(ώΨuI¦3ZεŸX~£Cβp<ΦK3؞c«>[1ξ²4ΧkΦ:&>Ρ•X•£ώοΘχN}‰5ΕΚLXάdΣ]˜«BCw&6κŒΓ6ΧD„Ξ0±δΟέ7šδmΤ―5ξ½εcΏΔŸΕΑͺ‰5ΕG?YbMά€ξ…ˆ?JΕl΅Ζ`oj@ύΩΟ$–Σq3=Κ5Ζ(%ž‘q8ΰWεΡ£Gϊ)G±'ŽbΉχγήγT‰%ŸDͺΨ]ͺΏ{υˆ2>σ…$ΦσXΞΎ³±a‰GRγ=Έϋ%ΦθΥ€pω0€koŒw’Εѐ±3$Ί ΕsnΎ<ω΅XKFS†σΫN}-V\¦w¨ΰΞΥτUΉŽq'³‰}ο%Φ{ς xξ^’OΓ;ζΉΛοb3)fL~—!M,χhL–Εκ›Ρφœp-Vψ˜λΎŒί«BΒδqbM±2¦sρcΕ’Gπ$–\MG½C—Vι#σž(¨?+"°ήα͞”m“\‹΅μ΅XR›Ή.‡›nbMυΡOq-V˜rG±7ν κΤΔ„βN§λJ?ο^Χw.>Μ“‘SLΪ.ύ!JΘ,H •<ω«H¬η<±€c\iήjΟmˆύΛιhΙKPl“'²6Ά_=up§bo©t9αΘ^”OxθΞψ~hXίx6iBČF±\Κβ8΅ΆΗ8dΠ¨3βwO>£ΰξHιTζ]kcρgq₯Λ`BγŽTτ˜‡Z.$Κ>²KϊY•ώc¦‰q0»²uΐFbM±2Ξυ‘HiFΑζ½Ύ³:?yGΤΞ@³ƒHΣ₯„(“‹oH€ 9>#[j³’‹MϊeΊXΙΤ£ψw›‘ξ›δˆH₯"΅Ρ΅―€(Ε―ͺZΊ₯)ΝW3ΒCγ’Κ€=Η¨»šω±ψp‹ZΞι'ΦTύ˜eωΟ(hΥdΗ‡„'f^Ύ©Σέ¬Ξ΄<5T•¦Œψϊͺ΄kͺRφ§₯ ‰Κ3YSέkX­’ώω2ΈύlΒΗΙΕνό΅@b‘XΟcb‰£p•ΒwΎbI·Nj(ɐοv₯T(γγΣΞ6Έ¦‚˜˜NkKYƎm‘;AUwΛΧbΙ£ΣL,§­§2#1<\©‹Ο¨j©Ο χ› o Cšβ΄Dω&H"‡ς<χ§¨ΛSEˆ†ΗΕ¦–jξ8τΙαa‘αGkΜΣO,΅*DΎ©TΚ^Ο}±*zΌςέKΊ“Rrζεžη„ΉΔσλD™ΈΆ†ͺ¨Α;Ζτ·η˜Δ’oβT˜εΊYSbRA£ϋήQΏ<±&_i 7ζͺβ₯ϋb)γΎΉΪ’{HvΓΔw{χζ)ρ°Pi’ϊ„|±JΓΥΗβa;γεiKΜυEέΟ€U©ΔΆͺ<ιςr΄ͺ€JSKw “n•°kβjLϋZ¬)>ϊ1Λ{_,Η@]ΎϋΎX‘»rέχgsθ«²c₯ξŒψ8ϋ»v«£½4~[€"*―Ξ:Υ­‡Ε"i\Ρ{΄ζΌPΧ…m@b‘XΟEb=–‘aίAΈ|Ώ ”«3Ή.Εa5ίρ Y.§)Ζ\υŒΰ?ΦΟ Έ_¨ο–$‰υψ%Ο&Ÿ/έ…V―“ο•Χ0“Ω«uΕ;αήΤχtΝηŽD+CΦ˜ŸΉ Ab=_ϋύ°±»³ς›DEXbq7›H,kV6υUξΣ½BΒγv¨ ͺυ3œZΝwβ–R±->αλͺ–gρώB$ΦsE{Ρ±»F'ηΦ°1€Δ"±€Δ"±€Δ"±€Δ"±H, ±H, ±H, ±Ψ.$‰$‰$‰€‰@b‘X@b‘X@b±]H, H, H, H  €Δ"±€Δ"±€Δ"±H, H, H¬gk»4i;§/‰5UbYmDDDDDΔιKb‘XˆˆˆˆˆHb‘XˆˆˆˆˆHb‘XˆˆˆˆˆHb‘Xˆˆˆˆˆˆ$‰…ˆˆˆˆˆ$‰…ˆˆˆˆˆ$‰…ˆˆˆˆˆ$‰Eb!"""""‰Eb!"""""‰Eb!"""""‰Eb!"""""’X$"""""’X$"""""’X$"""""’X$‰…ˆˆˆˆˆ$‰…ˆˆˆˆˆ$‰…ˆˆˆˆˆ$‰…ˆˆˆˆˆHb‘XˆˆˆˆˆHbύ&λdsyhΡ—Ώ\οΙƒ8χϋͺΨcΕ~ΛW$""""‰5·«ιΦΝ '#~μ­?d―ωΟΌΏ.=Ήq EήΜ…ΥψϜΏό!kυβΟ$φ^±σE‰ˆˆˆHbΝ‰ΔΗ¦L_/U_Μ_χ‡ :±6θΔΔ)‰57Φd­Ψc_<±.θ«·Ε>Le!"""’Xs"±Φηo}΅$/β₯§£H¬9΅>bο•%φdΎ+I¬§œX'›Λ₯σOΠWψ '–πyYόωŸΈ. ‘ΔzΚ‰υ^aLPΦšŸυΔ}½ZμΟ|]""""’XO3±–§―ϋ\ ‰υΝj±?σu‰ˆˆˆHb=΅Δ½gΗΚ‘Gbαo!±Δž,VŒ―KDDDDλ)'Α€ΏΔr­_—ˆˆˆˆ$ΦΣΩϋͺž,±Φ+Ϋ‡œ£?DUd• Ή^§’!ζ•’=)έ¦ϋςΣuιNz",κ§΅Χϋ›tΉΚbOΕΥΦΫυf\H-2‹Νή|°Pϊαλ•§+ΜCΆvΫhGyÞ Zη#mJ‰ϋ)―ό#½¨ΏWZΚρξκύ6 ‰υΈφ4_Ώ6Θτ4ΛήΥ1rQo{ό™ϋξ^l³šΙsν΅—Τ³ΣB~/e½¨±tYΩ‘Δ"±fοDΑ5Ν6§]ۚΈζΤ$9±=cΐξΌί[ήp8κβαΓmڎαώOŠLXKΟ‹sšn—IŒͺI?y{Θω¨δyωΑε§΅œ¦Ύ3«•—RΕˎ<²ΧΥ+½£Xځ^­ξτΎκΓϋšμΈο΄ δ9hθiŠEˆ5r:-½ΪΫεϋ*”o–Ζμkο΅9‡Κ/m πΎZ΅#ύΆIλύΦ~ηύώŽΫ?>―|³μ“Œ~‹XJΣmmyύž5%JεOΝbƒΤ] “Ό)ͺ]l‘:mzΤ…ΔΨϊς&«Σf>σΎΌy—Φόh{Τ―5u4΅fν«ύd¨²τ:»Σf©ΟΈ"Φ?·Βά―’6ΰaW••g‰ίŽ ]L©‘ΆRF·XΛ₯Ϊ0 έĚ+iήΣ}cΒΟ­³χRˆˆˆ$‰5;Χbή“a²H«bοopΌ!UYΆi|Γ8ΪŸΆ{~ςξ>½xό΄kMέΕ:ΣΕƒ%ήQ£¬¦GN}λž₯β‰Χšz3Κ½ zWΩp&ε’7±œ#ϊTΟpΣϊ¨NߘΟψDœ|Ώ?‘”λφp©η·gδίή”~ΰ}υψήW€ΔrZ*ͺ7ωΚθΡ%Ύβzΐ©Δr›τŸBΛr΅œΪοXΣ+—.šD]yΧχĞïx~»α§iž_ο]ΊϋN)±ŠδZΣ 9mυϋнΉ«L1ٝΓg'žγΔι*MR―XΆ(hΩ’7~QΣ#\“ΊqΡζτoUŠ—_\Ήη²ίγϋK?|iΩ–ίh•±@Ήθ₯νίφϋŸ(hiϋN΅εν•β5ύqΥζέ… CλθυύΑΛ6ηάC^Ή,όο•―\΅hKaΟΈu3 ψBχΒφΦyνΛ?7z†\lMϊVǷϏhέœ9Pk–~ΓΡ“z­Γ§Ί θΆ>ν;έcΙΙι~u_Η‚«KG sοem[ž?”‘έύj|ΗόΚF΄ΝΖw’ΔSΪ_8p+Gο^΄φšaCBΗό­­σΆ·/Oι/q/Z~ϊ)sN~Ο«ϋ:μkeͺv•‰΅$Ώ;hGΫΌν―~eΚ95ν₯ΥξMm4…‰uΨΥΎ`WWt­ΥύψΑ‘Τ―Ί€ΧŒh{α@ολΦ©Nœόu 7LaŸvˆΝ%6cΠ§ϊΤ6iMv΄ώۊŸ΄-ψΚlΦτ½ ž[έχjtΫς"‹υϊ­=9fΟRύώΣ:zϊTΟςθΆy[ΕΦ§ΆLx)-ςƒc€ΟίΥυNιp—kΫ–κζ%KΞίZ} Sήμϊ έύψ|Χ‹·ΞΡ­Ξ7k-ό}ˆˆ$‰ε>pWQ{¦άu6šΜˆΉώx΅ o6hΗ4€œUΆι%Φ„«W؝#έRŠ,­”ΞΚΣwg)KΦΊΛ~銯τ6hz₯sηJ§s“oξΔ2Ÿήΰ ’S‡+ξ;G:₯ίNύΎ$–wdI¬a…”guΧήυ,tΟΙ§νVͺxΩΧ―‰/»iι©υn‹R/έwšΪ?Yκykuuή·ΆiŸήξ΄εB”βJ,ρ²§­/ύuοKΪ€ψ—΄)Ÿyš‰υΕO''۟ůžpbY€Άyc{žFJ¦ž+ι›_ZΆωx—ψχΆ/7Š:Ϊ’t‘­Π<κ”ΑV,ϊ°ΠθΉ+οƒeK’ΤFΏk±ΜU^{iγ~u—Yό{wΝ‘+^ްTν^))ŸΥ6ψνG+‚ί^υΦg”_€%yέ²u_w]·ΡΤ€Ά>7]μ·ϊG¨Ϊη«΄"ι_ˆθάV;*½xߝθm Ύ’ς©Ε΄΅myŽY:Kmx8:^¦wοn–NΖ3ΤκDt§JΗύΆœ”Φy1=ͺ6ιη]=σ·Ά/jP:”·XhugOί@pD[°¨2‹Γl‹n›Ÿ2(ηότθݍχδz'Ί-ψϋQι₯ΞχΜίήu@sO: πΊ!XtΒ4K^ν /š†ε3ξD2EίR[δ·/σω@ν Q•ϊ ν]ͺŽΙk²Χ±ŽDοn{΅p€KzVuQχ‚xΓE±‰ϊLΑξ‘'ιΉνΛΏ4ŠMm›4±μ΅…σβυ9χΜζΡ’|έόθήΣ¦±/εK,ϋΕόΞy»z3:Δ¦ΆnWοh VKΫJ[Φ%BΤ½ΦΡΤ#bσJŸ ΄ czszδL7Ά»}Γ…{ό}ˆˆ$‰5O6­9ψ°φG­M¬šϋ„4…VΣwd”ω%ΠΕ‹Σ2¬­Έ™UΆi²ι.€‘žGkŠEΈΛ”ϋ扱&7MύΎ%VύΎSώ‰%"ΠσΘ“1"±œrbI5`―xίΪ%ο"ΒRLbuΛί?ι[–Bή€RbΙAˆώ“κυOw+`e=f_M+±F.Δό1ΘΏmͺφ\Έ.]#%–bα‹ΫΏ5ΊώG»δEežk€βVαζWƜ·ψMw!ςiΩ’Έ fίγχ.‘_Κό]μ’7\•—ϋߊδά½/νDϋE6&kΖ.¨£ωVέoTθοδTˆΝI‘‚Ηΰ}ρ{ηGτd Ί:‘sw›ϋκ Σ_΄ΝK2uΉGήV‹Πxισ!χΣuΖε[Ϋ·5ΊΟάSηtΜ;2 Κn0έ3{N™λRwΟΫΥ'e‰λιG½KΏ—q€uAζΧθΦό/̞·l;ύεŒ«Γw–¦ο…­]©}ςۏθRι}Α©ϊ΄myαέ)+ΠλXΆ)΅ŒΏ^kBby·Γ€‰eήΣ¬φ ¦ [J.˜k'K,±άθVWΊ>iσ0j]‰΅C_β‘j*ΥΝ‹ο―΅Κ?ί₯?mβοxDD$±H¬ιX’~ΙξtΚ—?H‘KΣL¬MQ"q†šn¦GQœέΎαY—ΌCL—*މ­?S€—'xΠ_^ύξ kΚEΜ4±.ΝVbυ—Χ~ςώω=c<π­ΉKαŸXΪ1‰eλ?­χRηc֜~ϊ' Ž«¬Ηο«i%Vχ‰u‹W|€φύ€νkΕΒ?ξUJ‰΅(Xuet’0[±lKΑuΎί"W5ω«%ymΠΒߍuρΖδ›"u ·Ό(Kƒ*x•ͺαVι–žτš°,smοΌ­~ϊξ+…ξΨΧtjΔχoY‰‰θ–’B> /ωͺ]€”»+Μƒ"±Ά5ΫεFj[ν™ΚOoz5ΒWq¨‹j@²{ΏΏ΅z_Η‚θvΙν­sKνN¬6Ή©ά)%υήWwΔZν–Φκn€ς™NbEtyVΫύŸ’¬€·Ώ xώΐΤ.zD]6t±ΟΞ_φˆˆHb‘X«z{Τ΅άΓޝpκ 'ςθΚ›γO¨{½^;ώZ,ίoεΙχ\‰uRΊBΙ֟ξ›Α―0½ξΑ˜ΔςYœXn­tZDέ kκEL™XήW³σ±λuιlΓ‘’ λ§7ύ¦}·μNΫΕ(_bmŠνs’ Σ”υζ\΄έ[Y³W³X«’FŸ^X΅wΥ’ {F₯¬ Nrμ7&±^;x=Π»Žm^Ά9ί ^όei˜«λΨΖ•1U–ͺOVMœνύ&VΏ_b}ρ Λp©w~„.ϊΪ¨λι]έσ~6±βΗ$–tBέγ'ΦΔ·osΜ<±\#NΦ‹΅rz–οh]pd Ι ±|Ο}Z‰ε?μΈ“SfKκ˜Ρ}έΖίχˆˆHb=χ‰UxΈΒ&Rjό]†—–η6έ—Ξm{έ}Ρ”S«ΩόpΏέ›"f¨¨φΕΖρa§;±\>uφΞψ©CΊΛ±τ“υŠϊ*n~έoϊή2~mύΛE;΅ψNT•5Yl9Ÿ9QΠP-(˜38‹‰%Ÿ–dz:ͺ$³ύgF±δ3ύO+ω˜‰%Ÿ'Ω­ρ&ŠέΠ`rφι$–Ωd3Ωͺς[ž"±DSmνΞπ|d† =ξΨ“O|΅ΜsξŸΕrΊΤT’ŸτDΑmΡ~[5»}ή§&χ‰‚ΛrΟ0μ}kχ€Ϊ·II¬ηyφkυ¦GN›΅£’υtJ}ϊΑϊάγνυRkhS\Ηύ’šΜNηύފζΤؚτŒξ^ύp―ο„:‘"rLg’ΚΓή,Ϋs°S«φŒbPΎmwΪ;NΦ~ς~₯<Ϋ{"E,Vnί|­YΔF‡ξτΑκΔ¨K‡kλυΜ§4%Fυc\²εD›·ΝήXό’"«S>ϊ>vΙ*ρŸ/ϊ²¨’Ϊ¨Œκ>›‘oDu΄}Ύœ=†λ·€ι.YέΣ]$΄½π•Ω=έΕ,biK»ζEχζτΩ­Φ{΅·6|Ϊ9o»\q“&–C«ξvMwaΆΪš~μ›ΩtΣHšξbώ§΅tΎœ][Ϋ·|{Gt³}Ζ‰₯3ΎΊ½3¬Ϊb°ΚΣfTτ.ΨΡ“c’†€‚·ΆmΈ`5 ΫΗ?WJ¦φwͺε©&‡w'΅{ΖΣμOuΞ‹ιMmΉgUvΝ[iάKωOw!ΌKŸ£“¦»θτοhίpήj4±lβ#[pΔ(Ώ_χ'λμ―9ΆW•§αο~DD$±žίk±ΦΏY™Ut«Co³K+$έΈ£σtT™/––₯έvί’·£'wμ-zΧ―ΉςC“Εβ”:­·β§Os›ΉFΓR‹€{υΊον[Έ~CC³ι}d䇨“›ή―‘nXώ­ΌΠ Mβš“3žξbͺEœš2±δχu²_z_6ΧsϟιxδNΏ0±Δ†Eiά·–ΆX_ωΎ³“Ξδ!mΐšς&ιΑύ—‹ΕΌ/MŸθI,[»nΠόΣγa=󓢏v•ͺ”Α+V,zqΩ’`EΜρλ©&±δρ¨…Ώ[ΆωΈΑ:!±Diώ=hΡK«ΦEeUέςΏŽ+hα۞WΎUΈE©»»ΒΤyσ§ΊKυ₯λύΆŽωd۲ƝΊˆˆHb1έΕΟΈτ‚bαμ*ηά_Ξ=‘’αΦȈˆˆ$‰Ebύ]ϊ¬ς[υ•žyb»|/―Η½Ή0‰…ˆˆˆHb‘X$ΦsiΡα «Σ9Ϊ|²ώpTujFOΏΝi©«W>ι’!±I,‹ΔϊmκΊΪjΔu΅ΥˆΆΌ~Ο+OΎdH,DDDD‹Δš+‰…ΟΊ$""""‰Eb‘XHb!""""‰Eb!‰Eb!"""’X$’X$""""‰υά%Φςc]œσ6Ν€Ώ…Δ:ώΆΨŸωΊDDDD$±žZbY¬ΆwNξ\œωΝ€ΏΔZόυŸί9΅“―KDDDDλiŽbeώxzρ¦π™O¬άΥ‹>_•u₯―KDDDDλi&ΦνσڜMe᳝X‹Ύό³Ψ“ΕώΜΧ%""""‰υ4kΔrο’ΆNΊ"‹ΚΒg4±rE_ύIμΓbO±XωΊDDDD$±žfb Ν#4WΦζ|Έθ‹?-Ξ|‹Ω/π™I¬γo/ώϊϞϊ¦Ψ{«nΤ‰=™οJDDDDλι'–p`hΈχV_ΖεSŠ“;—ϋ«8€FœϋŠ}Uμ±bΏνΡχ‰}˜/JDDDDk$–πΈεΆiPίgθκΉ…ψ¬Ψ{Λ φ[±χς-‰ˆˆˆHbΝ­Δrywτž8XΎ;Š8ΗY%ώ9zΟΞχ#""""‰5w I,‹]I, I, I, ‘Δ"±‘Δ"±‘Δ"±‘Δ"±I, I¬ΉΒlb@DDDDDkvΚ[{Ο~Ÿ]§©(kRVœήl±ίc/ADDDDΔijw<²έ)Ab όόΎŸn·²— """"β4½ΰaEwH +ΕνqΥGΔVbGADDDDğU΄Γ#§SYω‰H +0›ΞFWυ\εŠ,DDDDDό™Ύ²ίππΡιΦ³""fΟWbέκ\qz³¨,Ζ²qŠρ«ŠΎω "‚Δϊ™ΚWύΩO·[-φ{Œh!""""’KQvΗƒa›₯²§NYω‰‡™φΥσ˜Xήλ²ΒΟοIϊϋΌΥˆˆˆˆˆˆ^E&ˆX˜ΡυW$ΐΔ ±H, H, €Δ ±€Δ ±H, ±~ώ?&,Ljo%IENDB`‚xarray-2025.09.0/doc/_static/dask-array.svg000066400000000000000000000344651505620616400203320ustar00rootroot00000000000000 xarray-2025.09.0/doc/_static/dataset-diagram.png000066400000000000000000001154521505620616400213040ustar00rootroot00000000000000‰PNG  IHDR‘QDΨtΪ pHYsΓΓΗo¨dtEXtSoftwarewww.inkscape.org›ξ<š·IDATxΪμ|ΤVςΗεF±WL5½Œ{oτή ½χήB轇!@(‘₯‘ž {%ε.=wχΟ%—K»τKΉχ£}ZΛ²$?ι½Ε2Μ|>σ{wg΅²VϊjήΜo ΫjP―ΙωάHκή3œzξz44444―[<υeΤ«^Ÿ%‘ϊlκΑψgEσˆm₯ΎŸσΉ·P_Ξώ_zΞΧ…POΡ}‡ηQ?» Νλ֎ϊ ΆΖΛ6€ϊkΤCρϊζλD½§ƒγ7Ÿύυ9_Τ―©·`?gQr‰οχΤ[Ίx]-κ“_[ΦίΏ(ΐηο›©‡xBfύκ $ΕƒdΑ<= ‘]Ϊ'”Τ€~υΪμχIΤχR?N}5υXέkQoM};υ{©_A=†ϊ*φόμgν’Ϊ•ϊhκwQ?Β.ΖmΨΜbν€ή\χΌ.•°aΏkCύjφό›tψdκwRœϊ&Ε—šF=W>ίZΕ—1 eΫܐ}ΦaΊ“ϊ΅,><7“‹Ξ³γ²#¨χQ|Kؚ5gΗ|+κύtΏ‡Lϋ꽨W3Δ„eτώμ˜mͺϋ}}έΟI rΰί±Τ ΨqͺYΆ] ΤQšΑχnυ‘:ΐ cσυAμ»Ǟ«Yυl‹m―ΟΎπy‡2xΌώΖΟ°}ΜcStηψΫ@½ϋyΌαpjp#±?€ŸŽ‘O¨WΉϊ°B·Pί(Ξ±ΗπT„†vi[]κΫ¨O}»€eP;I@Fΰ κ―²‹'Ψί©ΏN}υωμ„ύ;)ΑI:`„ ι§,!,7ώ‡½”ό•ϊέΤ'QΏƒAd3φψԟb~9{ώ?©ίΚ.W3€Ξdpz=υc  ω<υΉΊΟΫ–ϊ_Φ@τκO³“aΌήΗ>ϋ9φY#ρPΉ¨ ώΎ²‹χ)vΜΌ¦»αΈ‡ϊv,ždΏƒ›±―Ψ…ψCκo0€S°ΌKύMκgΨχi<{l»YRΨsξΦ½/|nΠe±ΰϋ²žϊLφ½ω‚=Ά˜ϊ·ΤO³οΓμΖ)–ΕϊϊKŠ/σ6ϊcμuΉ©Cύ!φή/κn,Χ±ΔΨχκ=ζ•ύ˜w‘pήYφpφ―ΒφϊJ‘Aμοk—Ν…Η’m€³z9ﻇ% œXΈξ;f‘ΥΨγAxŠBC»tΜΈ/;Αι2<ολ.ͺ‘σt―‡ηί¦ϋ.jθ.ͺΣ]Π‚ΨώVφσz‘ΊΗά4 Σmλ₯€φ-”]ψ'³ŸυΛρΑJ_&W³» hŸ½:ƒδIx˜\TΗΠOμζE»θΎΞnΘ΄cŽ[my;Oρ-w§²Ÿ«2π;Κ~Ύ•Α₯vOcY…γΪΕφ―ΐKe`Ω‡ύ|D—]Z§”,Η‡±Χ6@ΐ}μΪrΌ–ΔΎW`έ€ΆΣΐκφσzΆ ­uϋbuΈˆ 4„ν““μ|`™Δƒ›ι„Oe ΄‹έOe³WΨσ’<ΥΦ½OWv3Ywvή‚ύΏϊννΑώnpc~₯"Ύb„ΠŽμ|ύ3;ΖoѝΫ/cΗκvvƒυ »YюGΈ©zœϊ―Š/³Ύ™έ8΅bηDΈYkΎο™ίΕφΥί•Ωχ±μ;’νml{~f7AιΎ‘l›ΎgΓM^o;\ό“ρ0Ήθ Ԙ …μΰΫμΨ1B(Τ;ίnx>dtΩ'=΄E°οP-₯όLθΊ5+mΛ@wˆ.kΆNαΟ„ή«”³ƒ›·w.!…ΟžΘώ&°Ÿ „μ]f؎Ϋ„F°`Θz/Σ9œOK‚P8W>jx<™η"„Ύcx\ƒP(9ωI)­,’ΐŽ;Q…ς€Q†χέΛΎ‘μ=^e°ͺω?±Ζ'44΄J‘_±“.@Τ1}€ψΊΚλ0ƒ,P— '.¨!…J¨‡ϊ’τΐΊ²χΜή ~†¦¨Ύ ϋ»@αύ|vbŸ­ƒΠwY†Nš°Όt;I6b'θ?- TaΫ 'τ|Ά=γΨφ΅ΕΓδ’ƒP ]FJ_rb„ΠN 4΅cŽ:xΌŸ}΄›£‰ HΒM  @’Oeπ8ΠBo½7J‰κΐνme‘=Ω1άNY/(₯kB/v…FΕΟΩώΓn4‘Ÿ΍`73aη›΅ Πτž! B±o½*Β/μX΄ƒPΨ†Ÿ•’T°ζ Lέ@θl„Ύ₯”­«?ΒΎαμoηξƏΖSΪΕoqμ’£u₯Γφ*v'ϊƒΊAΊηΓΊPχσ₯tΓ€Ϋv„ž`'ι/Ω…|«R²\Δ2P³χ‚“),Wi²5W± Ίf %σ$‹wΧ°”y”]€³”’¬ξίΩΕ΄€]ψαωαΕΆ%œΓ† J–υϊŒmΟ_ £]|ϊ&;ξΞ°›.Θ Χ³€ΠvΑ…γθ!v,½Ι.”`°$C°D’γDέχC‘G°‹ΏΩcUL tϋnaΠϊΫFh,y…(ά°¬Ru½p“•c€Π0φ=ό‚Ας_Δ_" η3(=Π/χ¦K„Π:}Ϊpά„±››ύJIc^ξ©*ƒ’ ΄€m{cέγλΩί;΄…LιΨρ€θn¦„Ζ°ύ’Ζ~†dΖν:½ž}o΄l|ϋhί‹'ΨM—’‹χϋ yϊˆ$‘K[0xβ€€q·nΒq`[šΠ8‰ύϋ ΗoΨΉ³ϊ9eĊΝΛ#&Ηι1w.‰ΞΞ&¦MŽΥuζL‘–FΊΠEβτš7Τˆ‰Ρ_ηy @ϋ΄Ν"ϋŒς‰‰dD“ α8ΰΓ§“I‰…Rb MH%³Σ» ΗYίk P'‘,Μο'k]Q€WTs²’σPαX«Ί#έk6!+%Δ‚νιέ‚¬₯Ϋ'gY·a€FΥjϊcώ²J δ"Π` °Ο„ІζemΫ±#Yωθ£dνΉs}Ϊm·‘D .‹~X(ψ˜#GH+ TΛNŽ5pΧ.’2>YyζŒp¬ήΫΆ‘’Υ«ΙκΗŽU°r₯O4lKΪε—“aϋχ ΗZNΦΣ§“Q‡ ΕYC·)₯GυB]ηzΰ˜ΧtpJ>ω`υςΙΪ\ϋ™Ρ+Ι•­{Ώ,= όxΏΉdwΪ ςΑΚΓΒ±ξξ3›-K>^s½Pxύ Ζ“;{ΝŽυαͺ#δڌΛΘΙαK…?ί{Λͺϋύτ¨Β±ΰo·­moςμΤΝBqή^~€7ORυͺΑ‘Ϊ1?¨\ΰζπχZUtQ@hυ•Τ¬BBƒ‚BΡΠ+‹|š›‹Іζ1ƒlΠέκ ³S'α%x/hκ‚R2 ² ±xΝλͺ«„cΑηJ_ΈPΚΌ #Π΄ή½ύšΏ{7ιxΓ ^€P8ζO)A*€Ύ·κ°”%x™PzOί9*€~$˜α•  ϋ³‡‘‡‡.––΅τ<hΗνΥγ;©V]ςXώ4ςzΗ•Bηhϊ⅐΅•B5ύτΝΜLB:vDECσ €ή₯θ AΨΣj@e¨ŒX !”Q*@;¬]+ @3Έ‚ έ·ΟSšή§@7&ω»v©κ…cώΎ  €ώuε!!θ’Q Ί+u 4½Ύxœ0€Βλ½  ΫΫυ!Ž\ξImOτLώT@+„–ΠJ‘! @¨_₯ y3+KP„P44ο-Α«ЀΝ₯hΫY³€5!ΙΜ€€,1Πj@e-ΑΛΚ€Xg.Z$@΅&$™Z”ώ 4­`Πg@e(,γ"€–ί„΄/kθE  oQν` ‡PψώoΠJ ‘€ώΠ7X! Ν[: χΘΜ€Ά›=›,zθ!a{τ¨ B^PY]π^PYMHΠΠζ_‚7Π „Πp€*δ²”O¨Œ&$YKπz•Ρ5 ^PψΫA¬g¦lŠσΞςλό5 I&κa…ΠίβΒ}5 z­€¬ΠΧ ŠІζ &΅ ©f³f€ϋƍκ³[οΎe ‰<˜t\·N(xΊ-5ϊφ%]7mŽΛ¨Υ퍕·|9©3|Έ Ž’± n³α˜1*ΤŠΖ‚l1ΘVɈΥxόx΅”B(ݎΆ]ΊΨhA¨  πži΅κ“[{LSvάϊM&’ω‘)δζΞ“„β€.MD₯’Ϋ{LŽu(YŸ­‚£h,¨K]Σ¨˜άΥ{–p,€υΝ-» ǁmYΧ€#ΩΪ¦§”XπωΰsŠΔΉ­ΟLRΠ$Ρί„t–Υ€V…οήo±Z !TΠ?¬! Νͺf@Ϋ‘‘¨™/·ήyΓhά8uωV$xξ²e€ιΔ‰Rb΅Ÿ;W- `–M‘IF,ΨWΦ―Ž3βΊλHέ#T(5dο^υ&’οŽBqFmŠOIQ/΄ –ͺ­`υhv9δΤ¨εjΣ­_—;B!ΘΔ‰Δί‘ΤWΝΔ‰ΖΡ²²Po)>Χϊ¦Θ'Ηω₯ευςT(υАEdaLΊšιυΐΰ…*¬ΓΏ"q$FΦρΛ0=V0έ@=‘sT­n  • Bƒ€ώY§JKEECσΘ|Ϋ<%Γ$Sˆ~Πξέ*„Κμ‚]‚Χd˜½(D/Z : š S΅Ϊ΅I£‘#-τCh„ “σ„…θώY%MτΎσ₯Τ€jKπ²t@>οθ9CJ ¨,&Mˆ^–¨4!ϊΎ%ψ:aδΊv-Τc:²…°??Η@+„jϊΏ˜Π0ryF–ŠІVqζoBς"€ΚͺՄ车ͺΥ€z @₯uΑC (Ј„’Ωρ1cΌ‘L†Ι7 ι}A&PXRφ€jMH ²t@e(|.YMH°ΏaωέK2LoιΊΰΫΦ¬C& $G“{Bƒ4…&€—¦*δͺ•Ba ~ΉΒt@OI%λκ5GECσ €ͺ2Lκ(NΑl£l•Υ/s§We˜dκ€B™A t@s―ΉΖ κ“aR‚Τ&€χ›dκ€ΚPY: ²2 ²e˜evΑ‹6!©2L-“Kι€ή™6²2@¨@_`]π•BΛθ€~•_ˆІζUυš¨ ^Τ…ξ22  =·nυ€(Τ•BΤZ @etΑC&N†¨l•%ΓδU•-Γ$@;ιTλ‚―Z@+9„†° h)P„P44θrι€ΒΌL•!Γ$s ^€V6Π †PΏ¨Χd˜Ό  ^Υ=5b™‡–ΰ”Κ€κe˜<‘Π„τ»™h%…P @Λθ€"„’‘yΗͺλ—ΰ— ž ²t@‘ IΦΌl•₯*+š΅x±ΪΑ.@eΤ€¦P¬Ο€ζd˜*BΓ}P_ θ_%,Α{ @Αχ›+MTP/ι€BΙƒL•₯ͺΥ€‚ Σiƒ¨‡!2 ͺ “@+)„Βό2 @_3tΑ#„’‘yΓ@†Ιί„$Ί5 ^P˜ο5….xY P/θhŒΧ€šθ€V„ͺ 1%ΛPX‚χ$$ @‘ If ¨ •Ω„YKΟ(Λ€B’™½G!Τ ΥΝ΄B¨@λS}5#£ `"„’‘y@Υ hbq1Y!Ψ„4γŽ;ΤPKπ2tπž=jΌΜ&$Y*« j@½ ZΊΰ­t@+B@ο…xΪηI邇&$/f@AΨ^FΌL••}Ε!όeΥ€ΚΠ65βΥ&$3ΐτ „–  • B‘³)υ?κU©R&ŠІζ%x5Z;1‘΄₯π˜4gŽkO;–Δ ’ŒHπz#G’ΪC‡ͺ@+ βΐτ"ΘΊƊ½μ2uzŒ‰―0€4™0AJ¬šύϊ‘ζ“' >¨” Ž₯ˆ-J„θwοΆLM¦€- Bύšߌ\Υ·*οΦAX}qν,u^$8ΔYV7WΝμ‰ΖZ—IV5,TΑJ$Όώςθ4²ΆqαX°`Σ†f…?@œIdS‹Β±@Η¦Yœ–Hœ­»“ΥcΤγ΄uΪ䜍=„nhΣύBAθ,ΘZ-Α;Π/+$ά ΠΥ+GˆžBίΟΙ!Υ‚ƒBΡΠ$[Έ>*ͺ PYC2gΑΛ’‡ A²»ΰed@A†IfΌ P-ͺΠ‹@Αž=Ά€Y„ζ_{-©E/$ šξ bBτ’ΠΗΖ–¦zbΰrMΚ)MHΪΌ¬ (Œ•ΥSŒ„…θ *šΥ7!EW5]‚w‘§s'“œπz†Π  T UΘρaφ€Y„ώw•B6wVHhp…B¨–%U‚‚Ι£νSl³<ύOa™Ӑ„!„’‘I4Έί©h5 –ΰe¨L&PY: €Κ˜„Τqέ:ΟΙ0ΑmΝ΅ŽΣ-„κ»ΰyfΗΫ@huE’W|Ί”G† TFdf½ ²š@T«….ψc©ΓΙΥ­zΊ†P}<Ομx!zU2 ε£„BtWO€ς̎„jϊ;4!Aσ7!%JΠ+|PZ’Χ2 ²&!{@…›`ΧZκΣ*3€ͺKπm …u@A†IΆ¨Œ (€tuΛ••• °- DοYmΨ°”½υλ€κΤ%„jΠυKΚ!οyHT6€Κ’aΊΉσ$iKπ2e˜ σ,k<ΐ¬LΠ³Ί.x7z:Χ§j’„N7P7 : ΫL„θ/0„j]πj θ+!z§ϊƒn<Ομx Z‹z&»>k֘zkδH!Φω»ΤδŸ%™ϊۊ―‘l9υI•u ^Π‹]:Ξ½¦* @eλ€ΚhBZ­PΠ5tΑ;…Π‚}ϋ,u@Bθ-ΪΟc:  ε³3ΉΏ'u@oλ>MJ’¬%xOκ€°Χu ‘ω΄• ‘ –: N τw V2LBύ2L―štΑ;P@Y (ΟμxI ΧζW¨/e?Χ‘ώ‘"wke· Η³Μ·φϋXκΡμšΑs~₯ž€ϋ}ώΪΊΧλ3ιΪ A„ξχμοb³=/";_‡Ε4{¬žα½¬>wυΣΕ 2|¦`έ~ˆύώgΨ/ΪcΡμωAκX]Ÿ• ^PX‚χš½Μ&$UτθP ižΠ3gJ¨‰½… ¨  : !τίΪΌŒ ((@ŒΧTζ$$™*£ –ΰ!κ%…&€. @­t@@θ#ΩΙΈΨ$KPIΔTΥ΅’η…POX‚7Λ€^@εε…PΘ|Bτi Π/ΗgQŒz:υ©4₯KΩ |dυ›\@5‘ώ υ™Ÿ’ސα_ΰΈ`ϋt 2` ΨsίdΩg XίfKάί)Ύ‰aaΤ―aΩTxΝGΤϋλΆg·βϋ(υϩ̞―Αj6‹ ―ύžϊΖa`½ΨM†φΨv#bfηŸπ½₯ώwꍨocKς`c¨ίΖ’-_P…ϊκ‡ΩΟ°―` {φ|ί“lόΔφ_ΐ3ξαl#IΣμlrω½χͺKήn΄-ArΞ=χΕ}ψ0i2a™{όΈp,Π΄˜]ψΐΒ±@Σ.Ώ\-3P˜†ΐ.^ΫsκE· κw!‹ š’BΫDγhMH°o₯κBc»u³œ„Δ ‘)‹kd£ey}αnuωά­Ÿ½’¬m܁Ό6o§PpX‚‡εό7/ί-랾sΘ΅—‘·νŽucΗ δhΡXα8°-°’ύ’±Ξ/ά£f@΄eΔ‚ύ~zΤ ‘8―c©KλT ¨•(/„Ξ¨“NΖΔ΄³’—‘~΅Σε…Πiek@/0„ϊu@ν&!qChΣD2¬F=ςTϋtΩρͺ ]IύoΤΟSAξ,e—S†ϊUΤΫ°εφΧάΥbpu?υG@6g–Λοκg€Φ€Ύϊ§,YΚSΨγ9 XaΙϋ]κ‰μυs¨E½۞£,ΙΡ‰ύά—]κλx&±χšJ½%{έ$Ά©Τίgοgf‘Τ;2˜N`Ηύ~Άύ`SΨϋeΝaηυ ˆ‹Ω~hΟ~~”}ίb™ίDύ%C)ˆt½6ͺAj*ΙX°@­%tλ S‚ 'Ό! D—+~Ψ0hά8u^Ίh¬Έ!CH‹)S„γΐΆDSπ‚ŽsΡX°jQ@ƒ:WΡXΠ¬Ρ§p¬tz,E΄jU¦ Ι-„B4"+Λ@y ΄θΰA—žN@΄CB"Ω•9XOξΦW5,TΡχ€‹Ε_Ÿ­Β,€£h¬Ε΅³ΤΜ¬hΨ–Λ£ΣT؍΅;m™™’6όˆΖ‚LρܚνΥIH2bΑvνJ(g[J?²jtIRήK(δPhBκήX­ε™oχœΥ­ΊΨAθT @Ÿ™$Ž Γ΄©³BξΚ7;ήξρΟ*€Ί;…‹ν"(+¨S€ς@(,ΑOŽjHh“Μ5;ήξ9ΝΞ&Υ‚ƒέBh »/δ2i%‚Πwu°”Ε²ˆ5tΟIb™ΜΊlΰ«[zώ7L}ΝθΧΤ;³ηC¦1_—a+υΡΊη‡°&§™:½Kχ8έΏΨίpˎκΑ qΛܟ5dΉ!ΛϋͺΝgO£ώέg5BθΫΊγ%‰eo£tεί°ύΥ’νŸΖΊΨυΨσΫj ^P˜„$*Γ€uΑΛhBOaAV’Μ.xΘ€Βd%KπΦ―'έ·l‘c=e4!Α>’"D: ¨½ ͺUIΦζΝ\σήν„θ‘ Ύf~>Χμx+-¦Z›^‚4&I: 2Ίΰe-ΑΓ²ω}ζK’‡&$X‚—Ρ„Bτ ^(eΩΰσ‘aK<£ MH]Y΄VprOζ[(,B΅.xΘ‚ς̎·ƒΠcIz΅x+…%Ζίͺ…*δ@1pΤt@Χuδ›oλΗε YU€°`ΗκΠjAΑδΔvεf9ν τΗ’bu ~FtΧμx;ύΆ ΜŒkHΒ‚‚ά@h(Ϋχ³εΧLδΞ2ϊ„ξηΡlιωiΏΘΐ΄½BdΎhx>ΐΩX‘?³μ©Β–σ!Ξλ†ηΓίeƒB―΅€PΝ§,n$Ž2@ΥΗ}‹ϊ{,iθBυێůn‘]YΖχYέ{?Λΐ΄K :Γn“  2d˜@[N*@— ΦΈj*ST&€ΒητL (έ& @A΄nίΎ*Ί…P@{υ"dyfΗ[A¨  YYD«P¨υ€‚€Κ’a’Ω/[ˆ^€ΚΥ&!Attl;r:g²kΒd˜xfΗΫAθ}@GΗ΄%‹š›A¨Ϊ]]!»)<ή7ά=8j: η›o ²©ΛαrΌ Ώ Σ†„fδI›εσς Tλ‚šΖΰ™o‘ίQ]ί„<Ψ6ΩνrόD#1 tž·„PEΉŒ-q74ρ*mΖ 2ΫδΉ& χ-υ~&Ϗβ€Π9l‰;ΨπxVΧz«Iάϊ6ΩoYZΘκD[›Ό΅€(4!]̚tȟ• ηΖ―•*@e Ρ{M†IΣ•!D―eSeh‹ˆΈRBτn θAτ¬AΤ „ϊ΄m©Žz„ž€εκθκ₯»ΰέ@(θφne»ΰέ@θΟ+²ͺX!/OuΥϊgΌAˆή „ώXδƒFcΌΥ¦*½Λ'&ΤΪ(e'³šΞ[φΡ- 7dαυGΨσ'³ξt…-E749iΘ}μψ\hΈ)Θ1ΤνΒϋ f¨Ε„&&…ΪΓ`R³(¦μp'ιdΓ{Žy{γ»8”iΊŒnOέλ#ٍ¨&Ζ’buϋlλŠ¦”Υ@uέ„δΟ€ΚPΩ: ²2 ²„θe¨Μ&$ @=%DMH€Φ―_FΤ)„Ϊ Ρ;PύΌŒIHέΨΌ‹PhΞ‘Y*s­έ¦JPYMHš¨¬IHR„θ——Τ€'!ΉP˜o₯κB΅ θ9C,„’Ψκee˜œBθo@―ξn.ΓδB@W•Υ&ε€P-ϊΏ:&BτN!TΛZšΙ09…Π°lͺ@BΡ.UσΛ0©£8%,Α˞„δ΅Qœ^PΩ£8‘tΨώύΒ]πY¬ IΏοBνΤ „66LΠ Vϊ 5 ›[v“’0ƒΞnΡ ¨Φ/³ I€^—;BZΌ¬Yπ³ ’P’οΪ: Ρ—Μ‚w ‘°?Αb’=’4HΠ±š’3šδͺΫ 2LϚΘ09P΅ ž5!ρ̎·‹υσ _ΤLΏΥj@PΤ0ŠΣ)„€.ͺέ„hΧμx;…&$¨}Ϋ ‘h—*€ή"­ ^b ¨ 2`Vf’LνΌaιΆy³§ΊΰaIPΦ„d…Ό sΰcaΌ(„ζlίNBcb€Κ邇&$5 ^Πc]&K[‚—  ²ΊΰeΝ‚Χ(tΑ?j‘Κ ‘Λ Ι$uόΩρvΊΊq±%€iέ•;P^…¦!θ€|ίμx;)§ΥΕΦΣ™l ΄”¨€:ΠiΡ U}@@(Œτ\¦6!eB¬ ­RYu @«6h@ –-S³rnd…’TaH$xρΪ΅$Όwo’+ΈMΰŒ5ϊφU»ΧEcΑφ€θ;ΤoŠΖ‚ιE lϋM4,›7=Z88L³J;V,ύLνΊw/’ΏϊjK(δΠμmΫH΅””25 N!T­ΝΜτAB΅Xr]αhuΤ€[‡ Asj$©KΛ"qΐ‘fsVx[5¦h,؞Q©δ†γ…c€ό’ΈLu"’h,Θ4lP |C³ΞdM£b)±`4“  &ŽΝ’ΚP^=ž1† ¨ΩΌT’[=’4Xe  RLτMHn!2 32ςΨ8ΎΩρvρ.ψIφΣ™, Τ Π„τ²€ςB(θ€v­RΫ@y!τrσIŸjρδΌΝσBΡ.+%Γ4λφΫΥ1šnf‘Γ4€ι‚qΐϋξΨA―.λ‹Δ™}χέ*,B (Τ¨Šntΐ„B\ΡX•ν²q£pΨG-†μ¬h¬i·έ¦‚,dSEβΜ’ΫΤΌ¨H½W΅P¦˜.]H~ύΈΖvZ¨‘τωΩΫΘ+svΈφ;{ΝTAθ…[…β€__6Ιί„$‘'2Η©£8΅P=„mέM­ͺV{n!Τ "ϊΛJ_’Ύ,ΐ„Β—?γΓΒό5 n!τ§’b”gvΌ„ώΘt@ίa]π‘h—²UΣ–ΰ!*@euBˆ^FΦR€j2L²2 ²d˜d(Μ‚Ο8°Œ½[υΛ0隐ά@h™IH2  2e˜dι€ΚΠ[ΊNρ”(@:d@½$Γ: Ϊ|Kέ$$· Bτγb’JΙ0Ή…PΘ€Ž6ΡkMZΌ± Ι „ώfΕ)‘Ώ0PcRy±Ύ*Pͺο‚w‘?•P·ϊ=Λ€Ύ£“aBE»T Ίΰ)’FqΚ’•K¦¨L•%Γ$s§ 5•aΚ¦p¨˜θ€ΊP«IHN!TΠμlU†©ŸͺzPΈ–ΰ½&ΓP™5 ²d˜ ΦΥK5 ²&!€jP’Χ7!ΉPXzo’κBO°Pc,€ΠqmHP’‡%x=Μ9…PhBRe˜ΖσΝ{·ƒPm’YΌ]¬?Χ(d[7…B΅@_14!9…PMτ “.x§ 5 KΩ,xžΩρ‘h—€Ά‘Π„${ΌL•Υ„δE „¨t5tΑ;…Π˜Kπ&2LN Τ ο8θ™&$/¨L!zͺ5=AiR ™Ω€j~¦2 ‚Ž(―R«–­=„‚hMz‘Ίv-Χμx»IHZ<:z <Ύj [!z…Μg―πΖδތ1\³γνž5 «6°œͺΤ!Ά RΘSνa―<….ψ-]r€?ίΌwΫIH+²€@!χw+·‘BΒƒƒΙΣiφΰX„ΒόŒθr{Λv\³γν‡&€ ‘ ΘΩv©\³γνžs>3‹T v‘αŠRΏŠόΟ₯ϊ?§)­βι*­49οΌS©!4·S'rβ΅Χ„½pύzrσ3ΟH‰ΥyΣ&rο+―ΗΉϋ₯—H>…ΫtΣΣO“[ΆH‰΅μYŸΒ™ŒX‡Ξ#S‘kγ‰dΥέw Η9ώβ‹$44T͂ځcyͺι€B#Ομx;ΝΩ±ƒΤ ₯ zqšΤ(ωθΔ9aΏ­λ)qΐoΜM>8ώ˜pœŽŸUcΙΨ¦7άEξξ7WJ¬·έ@™ΌVJ¬s‹w’'–ν’λ‘ρ+Ι3kχK‰BR#λΫBayͺι€Ž‹Mβšo‘Z θ8κVbυ…1MΤiHΠόγB@‘ώr{7ΎΩρvͺι€Πς̎7‹•C!ΔθΏ-,t ‘0Šj@WΤiΚ5;ήBdS• Ομx;ύ*ηΜϋο“Π°0!-ΨΏί―Κ3;ήBswξ$1έΊ‘δ₯KI…YCǐoΟ½.μυš+%ψρΒIδλ³―ΗωϊΜ+δxΡd)Ϋτ·[N‘G‡,‘λύύΗΙ³³―’λυ΅ΙŽJ‰υττ-δνmΗ€Δ ‚Πǘ(€(Ομx;Υt@!¦έΔ$QΥd˜xfΗΫA¨^”gv|  TΣ…PžΩρv ͺι€ς̎·ƒΠεqΘS)i"ΛρυB+΅ΑΤ― Τͺψ΄o―£^!!!T"„ͺͺӁР ±@αρœ+―DEυ$„‚½^TB: ‚PU΄G‰ “„ώbaͺ(Υ2 Z’„~―J:5&o3PGίγ9y’5‘‘?"„’!„"„"„š@¨ z&·ͺ(ͺ?#„"„zB!σ9!Ά½*HΟ3;ήBΝd˜‘š¨Ύ Ι-„ώl2 Ι-„Β„€–±ξ tq|r.)kvΌ„j: ηuΠιBΏdϊw  “BΡBBBΝ T­ՍβP@i¬]cB(B¨Χ šΜt@έ@θύ™γLu@eC¨¦zf,ίμx;p4Ξ‚PΠ₯ iιBa ~qνκBΏ+(2•ar‘_°%x @BΡBBB‘ D_t@7mβšo‘ώ ¨‘3!!ΤK ςKͺ¨ l:…Πϋu5 <³γέB(d@―5P7 ΊΎ£Ή6©Sύf©―£ώνYΞ—γbKπFu‘ ΠΜτFZΧμx;…&€…΅“OuŠІŠŠ*BA†©vοήͺ =Ομx;…‘T[‚GEυ"„€N‘ϊ¨h:P;• ‘Ώ―VȞžζκBA†iC'ki(' Ί8_!οΜr^ϊ£ €:…PX‚_n N!κI?ΙΞ•­ŠIh‘‘‘„FΡ“ͺ€:Π˜.]|MH&ŠŠκΧ^Ν€>j™Ό:Ώ~Άε(N™ΊŒBθžφBτΌ πΉ‘£½6)/„(*  N t…=c ¨[ΥP­΅’η…ΠΩ1,3‘h‘‘‘’ 4ξΟκιιΆΚ ‘™0«gd˜f@BB½‘'³'‘žαl'!ρBθ©#Θ€šΝΥΞzžΩρn!τ5 ™‘ι£Ι3;ήξ9?―PΘ”τςΕρy 2 £Ϊ—P^… θˆυl”BA†©GΥΪΆΚ ‘Ÿdη‘αu,! !!!T„B|LΧj&”gl§„Βc0Φ³NώΆqBB+BA2 FqςΎ·ƒPX‚έΦ’;ΧP· 2LΓΪρν΄ƒΠ_ι{¬ι YY|c;ν τ[ΆΏ²ΨΉD“V:‡cΤfy u›“"r͎·ƒPθ‚ŸέP­) ΰΨN„P4„P„ΠKB5¦Τ5kΈgΗ[Ah.Σ…Xε͎GE­(}Œι€>@αQBΥΠθΆδφΤ\³γν 4ίB‘tW_”wvΌ„ώΚt@A†‰wvΌ„jMHuͺκΧMJγžo‘?¨: MΘ[ιΩ|³γm T“az=-+Π³γ±& !!τ…P½(Ομx;ΥwΑ—7Ά!!΄’ τ¬_t Χμx;ΥΛ0ρ̎·ƒΠc)ΓHΠpSύΙ0Λ7;ήB: "ϊ “az{–s±ϊŸ : "ͺι€jKπ"ͺΧ噏™P4„P„P„PjԁP£(B(B¨!τ Σ=Νt@E Τ¨*‘Η’‡©ΝQ‘ Κ@¨™ “[5Σu ‘€Ύ5ΣωΔ€ŸL„θέB¨YΌ[5κ€"„’!„"„"„BΝt@έBh‰(B(B¨Χ ΐΣΨοBΝd˜άB¨ OΜ,Sͺθι±|³γν ΤJΤ „jKπzε…PMˆώqC’…šΝΧ 2Ln Τ¨ŠІŠŠ*B 4ЍΉfΗΫA¨_Τ DŠκ%… (θ€ž2d3έ@¨•¨½%e™ηPccΤ€š¨έΨI!ONΰ›χn‘ίZ(„€.‰/  n τ{–}έDΤ)„€šΙ0!„’!„"„"„J„PX‚7P7j'DŠκ=Γ2 §Lj:B¨Ά―Ÿ+οBoMN&Ζ%ϋT‘?,χΝ‚·’w‘vκBύ“fςΗ τςςT}ΜB†Ι „ώΐj@_·’w‘€~l"Γ„ІŠŠ* B«Υ­.Α›¨SM]»Φt'B(B¨— ΄ex¬Zj%DοB$ ²P§zSς2>6I}2MΑ Ή²‹½=/„ήv™o ή @@θΙΡek@y!4ŽžfΖ&X¨=Ω6E]Ξ·Σε…ΠΣνRΙŠŠvIBh:=¨oxι%aΟΩ΄‰\ϋδ“RbnέJŽΎψ’pœΓΟ?O2)ΙΨ¦έO<‘­ŒX[OŸVA[F¬νgΟ’ΡGJ‰΅δΎϋΘ‚»ξŽsδΉηHHh( ©QΓVˆή „Φ*,΄P§:ΉΗ@ςΙ  ϋ½E“€ΔΏ3cωψθΒq>:ςΉ3s΄”mzϋͺ›Ιƒ=fK‰υΪͺλΘΩQ+₯ΔzfζVςάΌνRbΊ”ΌΈh—”X‘τΨͺW₯†Ώ IBgSp\«₯%€:Πiρ©κrώsΥ 48H!Žα›_„^–XΎ=/„‚½€ΪAhυ rͺ] Χμψς t`xrΎ‘y.mšHFΦ¬g+D‘ ΄–φή JΗjˆ@h‘υSSΙ ύϋ…½αW>{φH‰Υxρb2PBœϋφ‘ JΩ&Θ‚6‘Ϋ%#Vχ;I‹eΛ€Δ˜MY»VJ,€Ό-[„γ ΌφZLO’ p͎/ogxf¦*ιΔ3;ήξ9)tŸ“A-2Θ©A‹„ύ†Ζ½₯Δ?RΏ;99π α8'.TcΙΨ¦ΊΟ"7΅θ/%Φ}§’Ϋ’‡K‰uWφXrwξx)±nMBŽL”+$(˜{vΌέsΘOΊTk¨.Εσ̎·{,Αw¨R―ΤΌΫΩρvΊJ!Λ rγ@ΎyοvC θΜΜςΑΨ©D“S…%x°M2Χμx»Η_@†Φ¨GžOΙΰšoχœ—Σ3Hz¬ @θOQJ•*αˆ@hΈΛρ—τμx;Υt@γϋφεšo‘±5(Μ„βr<.ΗWδμx;}€5!‹Mβšo‘Π„5 “©‹ΞŽ·ƒΠ_Ψ$$˜¬Δ3;ήB‘–ΰA£”gv|  š–Ε7![΄ΰšo‘  ΊΆn3ΩρvϊAV.UŸT Ζεx4„P„P„Π@A¨Ϊ„Δt@yfΗΫA(θ’FwιBRW¬ΐšP„PΟB(d@@a žgvΌ„κ»ΰEgΗΫA(Μ‚_ΧΡ·Ο3;ήBΏ]ZRΚ3;>Pͺ—aβ™o‘Ru@“Ώεδq͎·ƒΠ)€Β„¦W3°& !!!4Pͺ¨ IB@aF}ή]ؘ„κYU4Ί­ΏTB@5PΡΩρv ]πt]π"ͺPžΩρ‚P @_KΛδšo‘š¨V*‘€BV“ΠBBB‘f: n!T ΨκUΥt@Οκt@έB¨Q4Pj&ΓδBΝt@έBθ—‹"Ζ„~oP5“ar ‘ca\c@±; !!!4@j%DοB‘4¦K?€"„"„zBKt@§p͎·ƒP3Π@@¨•¨5f@E τί‹}3κέ@θ¬Τ¨κB­t@έ@θG4Ζε@αχ(Ρ„†ŠŠ 5.Α‹@h&…ΝθNHύ{‘N(B¨W!TΠ3&’NN!τφΤj3Σ3&2L2!žg₯κB5=?“ovΌ]¬―—(dE‘BώΆΐωrΌ6ΦΣ B(Τ€Zι€:…ЏXτ+–EECEE „ζξΪεΛ€ξΨΑ5;ήB³Άn%Q:Ό={P¬!Τ³ϊ`–― ιŒ…¦¨KaφY• ‘ Γ΄Άƒ΅½ύΞ@B(tΤ/£Ϋχχ˝ׄ€.²’w‘°\YK+!z'j\‚— ‘?!„’!„"„"„}YTdΊοB#‹‹I$Πόk―Ε‰I‘ž…ΠQΡ‰₯šD tz|šš΅PY 2LvBτΌ »Δ@@θς"ίΆύγrηI(μΑXO»IHΌ:1²*ΓτivΧμx;ΥΠ4ŠІŠŠ*B³wμ Υ32l”BΣΦ―'α4–Y!!Τ+z_ΖX3Ό±-€ςBθ ν/#Γ£ZS°Ι5;ή-„‚耴ς'!ρ@(,ΑC¬σ3ωfΗΫ=η«% —R’u‘Π„4Ίf}ΛYπN `±g΅x[ε…Πw2²ΙˆΊ¦P\ŽGCEE•‘•ѝ;«Ξ3ΆΣB‘ ©fA©ΣΏ?ΎGυ,„ΒόΘVά³γν š.«Υ’loΩƒkvΌ[Υj@‡·γΫi‘ί²eσωΉ|c;ν TmBκΰΛΞ:•hΊΰ!ΫΘ3ΆΣBΏΜχι€Bφ’gvΌ„‚ Στθ²²N3œ†ŠŠ(՚@B‰wvΌ„j]π©λΦqώGE½ΠͺΦ€F·U›‘D!tR\2Ή'}4Χμx;Ν‹nl ‘ZόψgΗ[A¨Ύ”wvΌ„€Β’ώ'σλ„€€ϊjZ&χμx+Υ7!q͎·PhB‚XΙΜΉ ³γBΡBB/Iυ7!]}5Χμx;Υbς̎GE­} K›„4…kvΌ„jϊTΑ Ωρvzcϋ!€^h„)„€n꬐sγωfΗΫA¨¦ͺ-Α‹@(θ NΕκ`ΠW™¨„ϊd˜š¨πΘ3;ήBυMH<³γBΡBBB]@¨QTBU& <³γBB+Ba’^†IBoKN&Δ΅χλ€Š@((ΌGvTBύΩ "j&ΓδB‘t@5u‘?hi!z·ϊ/ @³rΉfΗΫAh‰hΧμx„P[kB=Ξδwρˆz‘‘—8„ϊu@u2Ln!4sλVŸ¨Nˆ!!Τk P£¨[-ѝΙ5;ήBoJBζΡψ/Ν)S MH›;—mBr‘V: n τ¦ϊιηc;A†i‰‰½ύ’-Αdar‘f2L‘BΆ“ϊΝΊŸ«Q”ϊD=„P„ΠKBΥ%x€Ί…Π,ϊχTu@ Bτ‘‘^‚P+P7j₯κBoNͺώό"Pccθ€B θ+SωfΗΫAθwΛ¬u@B¨QΤ „ϊ…θMΊΰB¨¨Sύ˜e@]πB/V‰¦Τ?§Ν~ξEύίΤ« κ!„"„^’ͺ―ε™o‘™LˆήL!!Τ+ :*Ίm™Qœn –ΰ­t@Bθ1  ³κf”ΚŠj Π]π―Nγ›o‘ε Ρ;P˜„dΤε…ΠΛΝS3 oXΘ09P€C;P'j ‘ΒAύcŸ`·Pί‹˜‡Šz‰BhΥΨXΣ ¨MZ²Δ·o‘ŠŠκmZ=š ΡOαšo‘»ZχVk@­t@@θΡ€ΑeΤ‘ΑΎ₯ξΧ¦σ͎·ƒΠψ ρΝ|σήν τ‘Q YYμΕι$@h=Μ‰K°Υε…Π{[·/WˆžBο£±`9ίJ—γ…  σzκ1, š˜ηqM*,$ΫΞφ”΅kΙΪ‡’+sγF²υΜα8TνΧ¬‘²Mk|dmΪ$%Φ’{ο%EŽeΔZvβ°oŸ”XΣnΉ…LΌι&α8Wž:EBBCIXd€*HΟ3;ή΍Cj”Y‚w ‘γŠzͺΰ!κwf‘όΦ€‘δ­­7 ΗyλΚΙ­ν‡JΩ¦—–μ!χO–λΩ9ΫΘ#ύ/—λ±Ρ«ΘΉρk₯Δz¨χ\ςΤ”MRb…‡ϊUjZŽβt‘3λd¨0«5!‰@θ”Ϊ)j<³y€Πΰ @ς̎·ση'+dH[{ε…ΠΣc29­l (/„FΠΏΕs)ι\³γνόΑ6Ιͺx|yBτ<z°Iu²Τ•ς̎$„F*aοοTςͺ_„LΣ‰ϊ?¨Ο€~žz(bžΗ!΄If¦ ’ή|ιR2ξϊλ₯Δj½b™zμ˜pœΙ7ίLš.Y"e›ΖΟΫ%#ΦπC‡T8–kδαΓ$ŸΒ•ŒX}χμ!=wνŽ3εΖI0…PήΩρvΟ¦ͺ))j7<Ομx»η΄[°€“aν ΘSΣ6 ϋ±Φ₯ΔΏ‘Iς䔍Βq Ζ MϋHΩ¦3Γ—“ΫΪ“λδΐ…δžό Rbέίey ϋ,)±ξΚGAtž”X!τΨβoχX‚οT΅>y(kΧμx»η@R—ͺ ,Ηz:™oχψ–)dNΆBnΜ7οέξqašMc=>ή],'³γνX[«>9Ω6…kvΌέγ°?0Όy)5“kvΌέsON%aAAB₯„}p‘Bh Ά$υ•ˆxΈΛρؘδB3™(LBβ™o‘Ι«V‘Z99*„βr<.ΗWδμx;—L&ΧNαšo‘7΅χuΑO‹Ožo‘Z θ‘ώ|³γν T’?ڟov|  d˜ ‘ιΪΖ­ΉfΗΫAθ'Ωyj θΦ-ΈfΗΫA(hœŽ¬UT₯η1\Ž·΄}Τ‘ή!!!Τ„κu@yfΗΫA(h=ΞΣΧ¬ΑšP„PΟB(tΑOˆm― Ρσ̎·ƒP @A†ItvΌ„j£8a žgvΌ„Bj&DΟ3;>Pϊ%P£Ι3;ήBυMH<³γν τ΅΄,υ}^IΟΔΖ${[Mύ,υD<„P„P„PΗšΕt@5!zMY½šΔcΌ`~lLBυ,„ήΑt@΅P½YΥΝRTtvΌ„~Ητ|³γνΐd˜V‚θ|ΎΩρ‚P˜C2!zΥt@ΏdMH" ώ³c©cc’₯SκοQx‡ŠŠκBΝt@έB¨  έ»“B  ΨκU½ƒι€>£«Ϋt ‘ǘ¨ ‚Pt¨„jΩT}Ό[-ΡΖQξ ²–ϊIHn!τ @uMHn!τ<}ΝτθςuA!vΗΫΫPκ?SŠzMΔ;„P„P„PGκPƒ “MY³†ΔtλF @‰&„PΟBhΙ$€Y\³γν τ–”a¦2L²!T’7vΑ»PΘ€./,+ΓδBAKty‘BΪΔ9‡Π―™¨Qˆή „~b‘κB߀ϟݐ|WPˆMεtΒ7ǎx„P„P„PΗ 5 Q j"ΓδBUνΪ΅€"„"„z Bo£:>Ά=yΖD†Ι)„€ZΙ0Ι„Πο–Zλ€:…P¨P3&§ͺh‘/¦ΣεxMˆώ“IHN!ΤΈ/‘ q:3&|SPˆ:‘h‘‘‘‚P΅ ©sg ¨„Z(B(B¨— Tλ‚ΚBΤ „Β(ΞΩ6 ) BΝ–ΰέBθW‹²†5!ρ̎·‹υw–uZ ΩΚΝΤ)„€B=©•¨…&€94&dhQ¬ !!!4@Z3?_•a²ƒL^­™—§Ž5P„P„P―@θΰZ-Ι$@gp͎·ƒP’Ÿ[/Λ? >PΊ΄Π| ή „|‚ ΣΗσωfΗΫωyΎlκWKœ7&­ͺΫL…F} ¨[S«Ύλ_6BτΌ Ϊ€™΅šͺ„ІŠŠ*B36o&Υ32ΚLmΏ|9©AOΞVŠŠκ…eσ5›Ϋ(/„n7Hλω‚ €Κ€PΘ€ŽM.„,NI·P^…:R˜ͺΪ’N»γGΧ¬οP˜¦Τ§zΌν$$^}!%CΠd ‘h‘‘‘ –ΰ#‹‹U-PžΩρv ]𡊊HέpvovΌ„~Λt@‘”gvΌ„B θκb…όk‘sPΘ0.ͺݘ|•Λ5;ήBa4!AV•gvΌ„ΒοAτ½Μœ 2;! !!τ’„Π¬mΫό: <³γν š „θyfΗ#„"„V„€BΦ„θyfΗΫA(,ηΟf: <³γν to›Ύ€nh„)„ώ‡5!€ς̎·ƒΠo™ “Φ/‘Π5 _.v.V5ΣΥ–ΰE τS€ς̎·ƒΠσιΩ>PΊ}<³γBΡBBB]@(d@#uBτ"šΊvm©.x„P„P/Bθ*€–LBΠ[™ Σ‹ (E tiBΙ‹n\BcοBU-*­κBo‘/–Ύ”BAζhΎAΤ-„~ΚfΑλk@έBθy¦ϊ-“aBECEE „fnέJ’θ T―κBŠŠκE½]Υ-Pυ逦—‚I·Ί/±Ÿ  ―t˜W¦&τ?2Ln ΤJΤ „€Βvι»ΰy!& ™ Ρ»Pm ήΨ„δBα3 : ‘h‘‘‘’!ΤLΤ „š(B(B¨Χ :׍]πn j@Ν&!ΉP=€“ŒKπ" °ΈΖB†Ι)„j“ŒΚ‘PͺŽβ4Ρu ‘ V2LN!τuέ,xžΩρ‘h‘‘‘. Ίΰ­t@B¨Ω(N„P„P―A((Ρ?i"ΓδBoVgΑg™Κ09…Pm ^P=„~±Θ§j N!`t@?šΗ7οέBΞj@2L<ϊ~vŽΪ„d%ΓδB?1Τ€Š@(θ\ Π ‘؏†ŠzρCh•θh[!z'Ϊzξ\΅ šxfΗ#„"„V„&T­₯f@Ÿ΄Πu‘W·μIζΥΛ²Τu‘Χ΄κU@5 VΘ‚\λIHN τh{u‘w υuΑΉΨY,€Π˜Π02+6|•Γ5;ήBonήVΝ¦Ϊ ΡσBθ4<χK PΜ„’]rΪΆ €l¦ΰ!κΙkΧ’•< %VϊϊυdΣΙ“ΒqΦ?ςi·z΅”mZ~βΙΨ°AJ¬ΛοΎ›P ’kΡργ€ο΅ΧJ‰5ωζ›ΙΈλ―޳αα‡IHh( ‹Š²Υχδ…Π„Q£TI'+u ‘c Ί“σ›oφ;3FI‰~KΫΛΘ› Η8ƒX2Άι……א{ς'J‰υτŒ+ΙC}ζI‰ufΔ rvτ*)±μ9‡<1q½”X‘Α!€Q΅H[!z^…ϊΟιρiδω’Ω\³γν|"…β% σΛ¨‘ΑA yeίμx;n²BΖ$Ϋ(/„ž£™™>&§±Bk‚σY\³γνόD›duRyBτ<ΊΏI2>²%€ςBθK€Jp°„F*aοοTςͺ_ŒP©„ k’Dlι§ΤGΔ«Ϊ:/¬’π(κmW­" οΉGJ¬ ΄+ξΏ_8Ξ2 މ+WJΩ&ΗΤuλ€ΔšuΫm$gσf)±ζάq‡š9–kΜΡ£dψ‘CΒq–ίwŸ ‘Ό³γνž: amΫ’μνΫΉfΗΫ=§νόωD‘'οΡ9]Θ««ϋν)Γ₯Δ?ΦzyeΕ>α8―,ίGŽ΅$e›ž™΅•ά•3NJ,½ΊΟ’λΤΰEδτΠ₯Rbθ<œ΅RJ,€PήΩρvρΠNUλ“G²'r͎·{,Αw©Φΐ2›κdvΌέγΠ?+K!wα›χnχ8,ΑΓ$€§&Ί‹εdv|y“`zΡΩv©\³γν3=‹t«Z›ΌœšΙ5;ήκρχsrH«κΥ@£>!΄΄USBF+AΏ„)Α?7TjΕ βαr<.Ηcc’kΥt@ Κ5;ήB“ιΝHtQ ’ŠΛρΈ_‘³γν TӝQ'kvΌ„€.K(,ΣU/B5PX>η™o‘šθCψfΗ B?ΝρΥ€ήΠ¬-Χμx;}‹ι€n§οΗ3;ή B?€Ϊ’@gPΒεψ«„Œ ;δϊί_ͺ(!£\ξ4„P„P„Π]π<³γν ΄Nώ$kΣ&¬ Eυ,„ήš2\Ζ‹ηp͎·ƒP}’θμx;Υt@A†‰gvΌ„~¦Σε™(Υj@yfΗΫA(Ρk: <³γ­ τ£ά\\@/Z₯:œξ2‰h‡ŠŠκB‘μš΄.xΥ΄ψπalLBυ,„€ΞΠe,E T•ajX„(ύ†θͺbί(NžΩρvΰψ™¦Ί˜ovΌU,hbjεBυΚ3;ήBa ~LBb: n!τc  M«Uύ/P»θΊγ€œΜ2 h‘‘‘ξ TΝ€κTB@λ@ŠΒξx„PΟBθ-†IH"jΠ@AθΧLTί„δBΥIH…₯e˜ά@(hnC`Ν„j£8ΏΠ5!Ή…Π7ό: \³γ­ Τ°?KΒ5’Κ„t "B(B(B¨kMPX‚7tΑ»P#€"„"„zB©: ™e‡ά@θ~ „~m‘κB³ΰέBθΏKτOκΏ;…ΠΏε”P·ͺι€»ΰBθΠ„Ÿιwκ3%]σ/5Τ€ŽFœCEEu ‘j’…¨S5P„P„P―A(θάzY¦2LN!2 KLT6„jϊα<ΎΩρvΰ]π j&ΓδB Ίϊ+N τo†%xυ¨I,' ]π­}*+zQA(Π‘@¦ύ!!!TBύΊoΧμx;ΥΧ€’X=B¨W!Τ@Bθ>‹ ¨l…Π56BτN b}±ˆovΌ•Γ”§ό@Η4”ϊ Όϊ© €:…P¨c N tRL#€!„–ΠQt‡όΔ2 ΈŠŠκBkΠ;~;u‘5rs-!!Τ+:°fsu§=/„ŽŽn«vΑΏl ² ti‘oΩόγy|³γν|E‘/ϊΕ"ΎΩρVKψ kωj@€†°λ„.ojΊοB‡Χ¨GfΖ$˜Žβt‘η3³HΝΰΠ@h₯‡P–Υ(Κ0!„"„"„ΊƒPΘ€†gfΪ(/„&-]JjY(B(B¨ 2 #’ΪXŠΗ;Π=­ϋYu2ΘΛΕσΈfΗ»…Πο–ω&!i]π" MH D_ή$€ς –ΰ όΊ] \ Kπ0 ι‹r&!ρ@θΫΩ€W΅x5‹Ι3;ήκρJ ΡΟ `UZPͺ€ύ― ‘‘‘Β 2L‘ΕΕj#Ομx;M^΅ŠDwξ¬Φβμx„P―Bθν©ΓΙψΨ$ξΩρv MH“βRȎ–|³γ­ ΄ ύn©/s9)ovΌ„~ΖΊΰyF€ΪA(hΎ5€– ‘Z θͺ:ΝΈfΗΫAθ[@§F7TΕθyfΗ[AθΗ₯u@g°*₯DSu%dέ!Ώ‚h˜2Ρ !!!Τ5„j: –<³γν T«…±žΌ³γBB/4„ήFfΑŸΛŸ& ‘ϋΥ&€|u¬'Ομx+=”:„D„T±„PMτγω|ΰh‘š($Ομx+…Χη5τΧ€š¨-„κk@yfΗΫAθyM4ΏkvΌ„t@gψš_ι Τ Γ„MH‘‘‘ξ!T―Κ3;ήBυ]π<³γBB+Boυλ€Ξαšo‘€BΟμx+=˜z©ͺ‚)„u@E Τ¨κBuϊ'ΠP‹λš)„e˜D `t@΅P·Π‹j9u@BBB₯A¨QTB5-b2L‘‘^„PmΌV*‘zε™o‘ ‘a*€~Cύc#„ͺ2LKΛ0Ή…P3P7jΠ6j ‘f: n!τ “.x7 Π‹BQ!!!T„B4Ά[·RMHn!j@a ΎH§ŠŠκ5ΤwΑ»…P#€Ί…PX‚Χhwκλ!–ΰ@?˜Λ7;ήBa‘κBΏZ\J†ικr΄ „ώ-Α‘—Ο5;ήBA† €Σ(ιδB@Ϋ”Θ0ΝΎ€ΧόJ‘, ]πΏb!!!TBU5‘ar‘*€φλWFˆ!!ΤKκΠ,ςœA†Ι „Z Ρ;…ΠΓ@ΩΌ ŠBΏe: Ξε›o‘°Ρu‘j|#?€ξΰΠRZ’Νηšo‘ηYΤLSΤ „€&–Π „Π2P@Γ”±ˆi‘‘‘!Τ {χr͎·ƒP?€šΘ0!„"„zBo5Ι€Ί…PUˆt@Md˜œ@θδΑ€fhUŸo©wΣ]Tύό ί²Ή•½…PUτ ΎΩρVϊΰH…6ςwΑ_mΡ„d ‘ofd‘…΅“ΟMΤ)„Ύ•ž­κ€Z ΡσBθρΔ$}tŽrαe†< ‘Ί θ/@Η)(Γ„ŠŠκBCkΥ"1†%x·ΪjΞŸ½!ŠŠκ%mP΅–ͺέi%DοBΧ7ν€NB²ε…ΠQρνΥ.xzΎžzΓ5ΰαΠ`…\‘ηλ‚η™o‘ϋzϋ`ΦNˆžB­&Q₯4ΤΑuν…Hϊ·˜A‘Ρ @@θ&mΘ΄h{!zέΧ΄5‰ «Hυ4„2¦ŸYτRΠΩΩdΑέw {λ+Ȍ[o•«νͺUdώ]w Η™{Η€Ε²eRΆiΪ-·v«WK‰5žQϊϊυRbMΊι&‘ήȈuفdΐή½Βqζέ~;  %UbbLgΑ;…ΠF£G«Ο³’ηΠδ… IPp0‘ށ<Ώ`§°ίnˆ”8ΰ75οGž›·C8ΔΈ©E)ΫtnΒ:rGΪH)±ΞŒXNξλ0UJ¬‡zΟ#χ] %ΐ©Α‹€Δ !ΝͺΗΨ ΡσBθτψtryύΫIH<z$m(©αΠ©w2Ή<€χης͎·σg')dBjωBτεAθWKόMHnT…Π( ‘eηr͎·σγ­Ϋ«ΣΎ.GˆΎ<ύ΄Dτχ P BŠRΒ>Ψ©δUχ ˆΤPͺ@τWΠKBې­§O {ςΪ΅dυƒJ‰•±aƒš‘³ιδI’DΑQΖ6­zΰ’Ήq£”X‹Ž'…[·J‰΅δΎϋH Ž2bM=vLdΡ8›y„„Pεo Žτ†$¬];ξΩρV§Cœ5Τ ΫΈΒδ­­7 ϋ™£₯Δ =Ώωα8η7_On₯±dlΣK‹v«&#Φ3³ΆRpœ/%ΦΩQ+ΙccWK‰Ωμ'&­— ”wvΌέs΄νO:V©Oεœoυ8Τ€²%ψX¨blLr ‘ΌB!)€Ζ7;ήn’Nt‡ƒ%xΛΖ$· Kπ0 ι\RΧμx½ΠMH–™ΠH%μ}―@hu%dHτ;[‚ΗP\ŽΗεx\ŽΏp³γν Tλ‚O9RBΣW$a5kjY\ŽΗεψ o‘ ‹ζ«²N§ τPIΤ€v΅ΉC¨6 ιαQ|³γ­ TΧ§Λ ¨4…&€Y1 δ¦ζmΉfΗ[AθG@[–θ\σ=΅OthO€t""B(B(B¨' TP¦Κ3;ή B@«ΤͺΠ›[„P„P/BθD€B<Ομx+=TZ΄[9Χ!…&€•L†‰gvΌ„ΒΌ$†Π7u]π<³γ­ „θ[—θ€ΞφΘ5ί3Κj@Α (B(B(B¨§ Τ¨κBu ΕξPgΤ!!Τ‹ͺPžΩρVj’ͺ B5P­ Ι-„B΄ΐ™hΐ τMƒ “[5θ€ΞρΠ5ίͺΧEEEEυ „¦¬^]FΤ „B ¨@‘Π½B(B¨Χ ΤLˆή „Ij¦υλ€^Α7;ή B‘Τ Γ*αΊζ BΟ§g«£8υ: n Τ Z‘MHž„Pƒθ8Δ0„P„P„PO@¨•¨SM§ Λj@Ρ¨‚Šκ55“ar ‘GΣ†Zι€J‡P+P§ ZT ;%¨+};Γ\Τ)„~X6κ΅.ο …PΤECEυ$„ZMBr ‘kΦh]π χ1Φp’CEυ „ξOμ―NB2Σu‘ ,j¦*B?[h­κBΏ*•  Ž!τ  Σ’šκ€:Πss΅P―h…B(kBBP4„P„PoA¨ΩΌΜΙΡ2 Ώ15B(B¨' τ:Φo%DΟ ‘cλ€hϊ“ u‘ω\Vh­Κ ‘+‹Jθ5’Τ„ΎKtjTCς•…(/„ŽŠ¬GZ•4!Νυ0`©:‘B)€fMH γ@ΡBB=‘­Fq:Π”%KHP˜Ιh‹γ!!΄Β!΄WxcD_±’ηΠ½ν’° `·PGΊ΄ΐη_.ζ›o'DŸηoB‚ hHk\ 3ε§G'X(/„ώ-7—Τ υb’'2‘ @ΓP4„P„POAhςΚ•$";ΫίοBuMHΏZd@BB=‘P:>6Ι@y τHIόw ’+!ϊαIφΚ‘ %2LΘ€rC(ΡΓ$€/mFqς@¨n 2 σ*Α5‚Bh %dhp‰ύxD.4„P„PO@(,ΑΓR|l·n\³γ­ TΠΘH³&$„P„POAθumΩu3ΈgΗ[Aθα…&€‚Χ[Υj@gdς͎·‚Π―ΛhX―kΆ ]πsb‘Uu›q͎·‚P’O¬\zA!T_ZM ™‚Έ…†Šκ Uk@ϋχ'ω{φp͎·‚P€Nΰ¨3BE­ψ|2†„)­ΪSΒ5ΐB΅.ψΟ―ΰ›o‘e5 »j ‘*€Ζ6"_δp͎·‚Pθ‚o‘-ΑΟ«DΧό RΚ„θΥ€’‘!„"„V<„ꛐxfΗ[A(Θ0ι&!ρ(B(Bh…@¨6ŠσεσΈfΗ[A(tΑλ2 =%]L!t@?Ώ‚ovΌ„P†Ι1„‚ θ€~Αd˜άB¨ €U2 h&”θ @' f‘!„"„zB]πn!d˜t5 \BB/(„€.Q»ΰηr͎·‚ΠλΣ†‘Z>Πο$,ΑΫBθgΊ (Ομx+¦βΖν‚η†Paš“PJˆή „κ–ΰ5 dΧό€B([‚Χ(vΑ£!„"„V<„jͺoBr‘kΧj2L₯ξTκ!!τ‚A¨?ΚΤ-„^_Z΄›δk@)…&€•u‘°_AZBίe: Ζ&$§ϊ Π6₯k@ƒ+α5?`Jtˆn τ³pκ―QOCEυ„¦¬YCβMt@B¨@α"ΰFκ!!τ‚@¨VjΤu ‘Ύ ¨(7„j: fBτN τ›%₯–ΰ/4€–‚PMΤLˆή „ώ-/O―:―V@jBu: E½d-‚ϊ§Τ³BB½‘ ¨Πx !z'Ϊvξ\½ύ—_„P„Π€C¨ f2LN tk›žΪΌˆ(„~2ί^”BφWHAι&€ ΈͺϊLJΊͺϊo &^=Φ²-iQ½Ίvξ™[ΙAAz&Τ ŠMH—¦Υ’ώ*»IϋB±ΦκFE½pZ«V™%x7ΪhθP\΅ͺV*"vŒŠP­VΓVˆžBaR5ŸΊ¨hΉ¬…yφ: <ϊδ…Τ­α—aΪUP?„Φ !£”ͺu‘w΅I"Q!‘•M†ι‚A(ΠΛ‚|ηeΤ½΄ 2ί ¨zWκΥΈ!4!=Œ½ώzao²x1vπ ”XΝ—.%cŽŽ3κΘhΡ")Ϋ4„S‹eΛ€Δ°w/I\ΉRJ¬Aϋχ“¬₯ΔκΆs§z3!g4…Ξ`z¨GŠθ1Α3;ήVˆή'Γδ΄ ΙB‡$ζ’ΗΗφ›šχ“όϊ„žδ±Ρ«„γ@ ˆ%c›N\HŽ%–λA μwfŽ–λ^ Ωχu˜*%Φι£Θύ]gJ‰LZEΔ©]π<³γ­\ν‚•ήo ‘!A ωΗε|³γ­ό›₯~&ΠέJΰe˜l!ΐργœ\ΩρvBτΊΠω ,¨₯Tωπ ’.¨†Re˜nόδ°KήΒ]-Η§uθ@?Ό°]sξœ”Xω[ΆƒΟ='ηΐ3ϐŒ €lΣՏ?N ·n•kΣ©S*πɈuειΣdδαΓRb]qό8™{ΗΒq{ϊiΚ=;ή@A†©Dt’„:#B'uλO>:|Ώ°/˜(%ψi#Ι‡οŽσΑuχͺ±dlΣωΝ7¨€&#Φ++φ‘3Γ—K‰ΩΛgfm•λΡΛ“η\-%V(½ρβo Qaίήy€ΙU•oόΛn*)$ Τ@ˆ€@H (1„€z‘*ˆ4)‘ˆ„ͺ’‚ Š„"¨(U₯W‰„* πG@z―ξΌ9ίuΟήΜξΞξΞΞΜΞώ~Οσ>»sΛΉwξ=sο{ΎΣϊeτ+exq=w|sλ^kμ„τί*0 «Ž/fξψζΦ©ό˜8 “ θ!5d˜ΠAΦλΙΩ6Ήo{Q'€:λρžͺα1 Π!Ju<Υρ•œ;Ύ9Ί`¦Pͺ㩎―ŠΉγ›3‘κ„”LΕ9₯L/Ž™Pυ‚ί¨±|5Π›ΠΔ€~μΠZκdΣαŽIή >†‰™Š ­-ͺ^πΙ8 {”π%€ Ε„V₯ έh@ί*£ν ]`@Whb@{VΙΛ°έ&t^Sͺh]…΅ υ6 ™έΣθ˜PLh-™Πq³f₯γ€~­Δ9L(&΄κLhb@ί±Dίi&τ΅…# =«θeΨ.ϊl0 «5Ά­EΪ!κ½ΰίΓ€B³YΔl^ΠχƒFaB1‘]Ξ„Κ€zτ“N0 ˜PLhΥ™ΠΔ€κεώε Ό8ΪlBΥ )ˆώ§URί!ϊό„ ££ύ4θΠ6Xν2‘‹XύVΙ8 PhŽ]ƒ~΄&Ϊ₯L¨Ϊ€vp zL(&΄K™ΠΡ7F@Ώ\‘—F›L¨" —k ©gΎΫdBŸ tT΄Φ:!5gBΫΤ&Τ θGέŸ₯Š ­Έ §^π1šUΑw˜PLhU˜ΠΓ05Άέ¬‚ο€’Mh2gUl@ΫdBŸm: Σ!έΰί¦Hh?«ίΪΗΕ€&Z{&tΰΈqι8 ₯μ„„ Ε„V‘ έΏaσA#Σa˜¦TψP” =|ύ… h―*~―eBΎ|Γj΅9 SIL¨wBzί«ΰ1 € Ε„Φ– ]λΨc³™ΤΦ¨νŒ0‘˜ΠŠšΠsΧάΆ‘§υ(η8 6‘ͺ‚³DUΪθ“ ?3Z5‘†i™ή}2zh7zηeBXύ οL(&΄φLhnΠr5tΗ„bB+fBgWfΠ™P Γ4y…ͺλ„΄jΠχ‚^ Ϊ°­&τι…‡aκNlZm*Zg=²™φΔ&&ZS&tΑ8 Uπ{•ρ%€ Ε„VΔ„^°φφ ƒ+3h»M¨" 7Π³¬²m@ϋ{auŽŸΟ£’ϊL0 c«ΰ­λfοό#‘> Σ‚q@ϋXέ~F/xΐ„bBkΙ„Σ8 ΣGeŒ€bB1‘3‘Ρ€.hͺ—ϋζUφ(hBsτΜ F@Wσ¨ηk^h½,h“VžMθό 6 έΡ€ΆhB½|6ύ^†L(&΄–Lθ:MΗέ£9L(&΄¬&41 t³*|,dBߘـ ώ¬ PΝi>#θF?‡Ηƒf-^δώ ™Π‚]΅qΠoucƒUΠ„ΊeX0‘˜Πš3‘ΙTœ2 »W(Ώw z&΄*Lh…gBΚ³zΠπΦLh2ύ=ZΞ*ψq;>½n±­xυl+MLh2}wλ„Τœ m&4ΠiΑ€~dΡ`B1‘΅cB“NHy΄R Ε„–Ε„Κ€z΄m@•η/υΰ^-™Π΄ЁAϋέοQΟΗ<κΉXόŸ ?~|ύΔ# έ&‘Π`@§ϊAΐ„bBkΗ„.0 ƒg*=έ&Ϊι&τόFͺ^πS+”Χ— :ΫΝ׍elΆ:ώΕΓΚn@Ηωω½ΣΑ¨g³&tξ:λd½ΰ?Ε€.lB‡Zο‚} ˜PLhMšΠ^€to«|;#L(&΄SMθ}TΪ€³Ψ‘GΖξ.k}ηkλλ¬aό2e1 ƒ<κω@.κ9€ΔΗ™3°ΎΎadγTœ‡m›˜Π>Vχn}cΠ½Ή,P1ΊΞ€I ΏΎυΦkόQG5όβϊλK’Φ€cmΈ(’ަ3ϋ¦›Φ;ςΘ’œΣΟ»a£γŽ+IZ?ΌςΚ†)'T’΄ΞΈζš†έ‚9.EZΗ\|qΓa³gw8 oΌ±‘gϞYΗ†j1 3‘;O™ήp――ι°ΞΩhΧ’€#ύlmξ»πͺ§sί…W†΄f”δœξψΡω ³7ί§$iέxΒY ΏΫεΘ’€uυ'7όρSK’Φov8¬αΊ#X’΄zγγyΎtq7ŸŠli£iEξw­Ÿsgφ‚Ο’žκyύΆ?―E6ŒSVMLh0Ÿοφ° α’4,oύ―έΜ–Ϊ΅½ΪΚFμί‘ύSmm#φ+UZ₯<―mJ”ΦζΆδξΣmΔΧK‘ΦԐ֖ΆΤ>₯Hk [ziΆτ^₯Hk‚-~ΐΊ6xΓ6™ΠΊΊΊ†ή}ϋvXu½z5τξΣ§Ί i,H«瀴κ«4­ž½{—$­^!^₯H+Ξ‚”΅έ»Š^ L¨ΜBίή}:¬ήυ=K’Ξ‚΄κκ«0­ή%ϋŽ}zυnθΣ³Wi ι”,­ϊ^ Ξ­iyž—Ρ*ηLH<š(γϋˆΕ^εmω½]m3θ’υœλιίηŸ”αšΜI hΎ³‘­m鉣lΐ'2 unD*•B§aΌ-Φ¦ΞίE¨€ˆΜυAGUYbhΠ)άΤIΊ!θeΚΛ‹X¬ύe‹CνΦNΓutΠ·Kh@Σ¨η[ώZeώ+’»§]ˆύmΉ!λΪΰsϋ[ύoΒΗ‹*₯κ­ξβA6h1~i΅Io(Ύ4ίοYαsΚ’žU κ ˆŒ¦’O[Œ~ͺ ΎO…Ο)‹zͺκ›ώšά*€ͺΨΥΞσΙ Wέ|φ«ΰω Άε|ؚF=ϋs«j›©Ž=κQvΦ‹Vπ\²¨§†υΙ’žŸηΐδ ]­ςΥsΠυΠ<έ+ZεΫBSσy―ΕΞ=2ŸC*t:’œκuŸF=α@Ζ!#j+¦ήίΛpI HΦwƒ1KQχβ‹C›)ΈdΞA=Κ'ZcΤσ un4‡ͺκ4dΛΌ Ο‚ώh1’Β0%ΠSέ„~ŽKQ1ΦσίλΗAYŒL—›αΫ›>eD= ΤΉωΤKMΣπ=αζ”αR »ΈιΞ₯(;«Yœ3ύS»r…ž—Ή~έbΤs,·:Š^jίσ—K6pτj\H8ΠM(ν‰ΛΗ ώ[όΔ ‹εξΰ£j~E=ŸΆ¦QΟ~ά(5¬qXUΥίhqΨ—z.M·η;Ηy„ΞgΩΔ|κ78ŒΗΞG=_ςκΚά(ύE€‘Ϊ)"²8—₯ΫrzΠ \†Ne˜Ύ‚ξ Ϊ¨ŒΗ^ΚγσrΠήά¨KΝ zΕ_ŽκΑΈݏΩ‡ΰ³Έ›OEšηM+ΣqΣ¨§ ›φσΙ-€jBmΉΫΫ‡izΐ^\šnΑUAwpJŠšΏ(ϊψ¦|ύΎΚ1JΕ~άg¬iΤ“ί2T=ι¬(/ZŒžŒΰ²Τ4·]Νe( ΞH#QhΌήΗ½0WΧΙΗΜG=³ίνŠάθŠdcΞ·8pφeώ’ƒΪCΥ.ΰ2t΅―άΧ ΰ|Ώ³g αΏΡg¨'Τ κ=?Ν_ps”‘\jηƒ~Μeh2šŠvj¨£—έφιδίcυό?‹QΟΈPΛhFŸXœΟϊ y.K—Gχσx.C›PΈ’ŽOύΗΝggΜ–±Ζš‰4κΩ“[έ MͺhθγΖτ ]Uέͺ3ΪA\Š’Q^ŸτΆΕ(δ N:NυԌJ/ψρJUπΫΖš­;°¦ΕiaSfψr€ΞfΣ Ι\θ κr/IE…˜΄k±„›Π]ΉE™ΟΏ[ŒΛ ι€γd³œ½δΏ«ΞŠzjD„Iά֚η»AGη–]t —Κΐο‚Žδ2@g“½8_3¦νJŒvΊ—’YΦΊΕb=ελ%;α½έhfm―ŸχίΣrψ½.· …ΪζΌ }rΛ~τK. ” ύHΚF:=θιA«ρnB7ΰR,„ͺ0ΥΤδc7Ÿ1TΩ(7›κΤτi™/Š„ύΫ\σhœΪurΛ¦zA‡&TΠ™ φΒ;ΓΕAEH§Uοa¦­>¦Έ %jέΘklb’Ώ₯ž[=›"‹z>ηFtΩ2Οuƒž°ΞΗ*‡:΄½^ PΣΫ—oΞ%‚Nd?‹νη* ΣƒV/;Ί ]ŠK± ΄~Άš]£Δι―βfσk:φn%£QšJτ«άϊšEΥξ?jfέ!Ϋρχε2A' (¨jxΆεR@΅ΐτ ΥΗ7ό^tηΡ²‰ωTtr\'δω,κωO‹5KTΙw— Φ°OCω)Τc½ΐ³L3λΥΡν―^ΰκΝε‚’ότη λΈP­€ΣƒώΫb„h.KΩ9ΦbtΊ;2ΜσΎ]A•0νўΆΖ­–¨gsœt§u^o(?κ<§!τvoe;έσ‚nζω %BMξ΅8’Θ .T;Š1=hεPΗ”»αCRρ½ {J˜ίMN£žOzήVεΧCmBUeϋ+m*ƒš:=εΜbΠΈΟZl#ϊ­ EΈ„ΠΞηΘvΗ3Ύ*h1. t΅ œNzΏΕ^φ<;—_=ΦMΎλ7…oZμ1,ΓXŠΘδͺnj_ ϊΠͺ;κΩ»X¬•8ΗbϋUθZd‘}΅ΓΫ©ϋOυηφ?ΝΜŠ/ΔμτΕi„χ6F]€.Žͺ2σΣƒΐeι°ΨF·–QAζ`ΉͺŠr7λxπ4κ©6΅OΈΑνκm+%>Αβ@ω·θΏG¨NΤφN=άg[£ωηΦ±N†=<½K-Φ<γUσ¨6¦΄ί΄0Ɵ ΧXlR7Χ?χγς@-‘φ$Іͺš0›/{₯¬’"£ρ§~A+¨ΉΑ³ώGgPVΩ€ ]9κYŒΙžζζFΧNm[ΥΡΰt‹ΩΆ²8Άμ*F»―rΠ?h€Ε‰φ°©Ό-θ/Dͺ΅D'SΡQ5Ωω›› 5™zΔΝ‡‚š G‘/°°±Ε¦š`aˆ1τWWe Ε©Χ ϊ’@ΎγΟ‚{½p’vτjG¬™ΈVη’A­ShzPEsi:ΜƒAΧΨwR΄FΡNMϋΌΏ ϋtА₯QΟΗ­ϋy;Β ΙαAΏ°ΨζKΉΤΫm‹Mh^wύΫ―}{₯φŒχΉd~·8Ν䙣΄»AYf£³€Ώ”ΏthΠ‰~NΏφgΣυžGŠΡΎOͺk’υχψχΪ_ϊYTς½΄™†r‘Ψh/€β&τχnFπσ{Λ#y}δRi~G΄#4κ™΅+žHv¨Iτ›W~υθVΐ€ͺMήΉΖόι°0λzώP{›Ωζ›nVO0Ζ΄€ Σƒ~ΘΚ„NκηͺNBΩθ—Ήn ꫨηά€`’Ο΄ΝκΘ$ά›DU«ΆŸgCΎAΛ…UΝ«vmTnέIΗΣ₯ ­’Mϊ¬1=hΖ¦nΘΖVρ9θ…΅χUtvBΤNλ-m~έεŸγύύ‚ώjtd„βPO{Ί‘5ΥΡh κΌΆ—ΪΣƒ6²½›ΠUxnΛ%ζσFkۜζYΤσA#κ (ŸΏζRM`ΐ ΫΠ–χ†žCl°η£-Ή,ΠΊϋτ ϋΉI«¦q³ω―ΥC]mzΫY$‹zͺVυdώkHΉΘσՍ\ h#*΄¨Y—& Έ™Λ₯’»Nͺ)Χ>ͺ’sYάΝ§ €†9)Ά©Δ`Ώw[Σ¨g²5`ͺη“}ΈΠώμωg—JMw›TσOΏ\αsΰΧXSΔ©σŒ"ΝΪαμMŸαΈ 5Τ¦OΝp¨Š‡φ ™Œ>€ ¦Λ«υιA54Ν:ΆΪΰμ&Xs±οf­O§θχu³šF=ΪΒ`£G<΄υ–_”Λε"TΖGνΙ­λYίν«ΎΛIo7Žκς¬ίΪ΅L£žoψk5 »Mͺ_°8;ΣΠ.ό}ΤΈώΊ2σgAΟ}έZž<`ΈΕjϊ§Œ¨'ΐ–rƒ$#šMΪ{akΊΚKΚ|L™φζf§IΫδΚθΏn1κ9–,ΠHWŸTσ#]η±€›ϊ§­iΤ³Y  e˜΄m䣞šώN㴎ᴦ-Ž­,ŽΙzCΠΆ£Κme‚Uw“‘Ό άδ§UϋK:Φ4ΛΞ…ΠPl―»ž³8’G‘&ςqruΒό;·*…JιΏ΄8=θ«ώ e°c€ζ‘ρœε…ΥρA;έbqrŠu“ν΄~ι*ώšŠxUn'T ’}¨Ε*θt`vRupjnΈ§ ƒΦkη1'ύΊ ΫοZDTα/A£ΉPλy‘K#Hh†―Zλ“ts‚&'ητH‰Σ_ΑθHΧ&τΐά2εM97YΆ•?#³υj΅\ΠQώμ;Δ—λύ(θWΆpυxΤΣsλχς`ΒξηŽ?ΥΝeF/ίζό  ƒŽ·ΨαR¬ιϋ₯ltFΠ9A{XΣ±’7 šβΟξσ‚Ξ Z+·ΏΆωΉΕΡM~΄6ΩΪ‹σ‚Ž+°NΥχoωƒͺ&TΟ?΅²Ν§ΉH@!τΒΐ_”…α„ ƒͺΐΉ¨³ΰWόQn6J‰~£_δ–wΊ 57ˆ n*EZίΛΧ=θ…ϊ5+Ϋ™AχξΛΥ2ζW³Η=ε†υ›Ϋτ“¬,θQ7˜έζF8›yλϋ^°ΩΗ ηƒ&ωΊ|uΌΜͺ:°Ξςο7ί i2—šΙξV?Ϋχ¬1Ϊ«|όšE‡Ώ΄?Ω:ΒρώΜO'ΈKΠ;ηXΟ(fΪΈΦrgŸEšYŸšΠ~VxΈ©ζLh?k~:D•τs›» Ѝk έ ΉεΚω(”ςφ-δ>ηz^`έΟ[Κογό―¨ϊ\bTRšΟ‹‹Έtna₯fςρς#Rω‹A§φέΟg@nύP~…"₯ƒά ιϋΡD§8Ίš͍Z0‘G$ΫβΛΞKЦžθ+ί¨ι²Ιϊqnϋ'&4SYPΥΦ?«R{Υ}›ω© Uή{%θΙzέϋΟS,ϊLξωό˜§cn’/!{@)YήD“rΛoς’°y)ό]—Œ©"MiΥζίΌτ~Ώ?tUbίΒ┣;ϋΓύ}πͺ΄=2gBυ€žνΗQ$λΉbή„ͺwϋ“ž¦ΞλτΔΌκ…|₯§σ–—θζvΧ<OςmKόΐΝκ?<\lM«&χχ|ψ€½[rRFΰ»#CΟ}Ωb€σŸ'“Π2 φRbB7τ|š‘YΌ.·8ήοΣ~ΌkέΘf†φOο!_ΛΔ¨^ηΏέ} |Ήͺko·Ψ±ζΟi΅μnΎμA7ο$¦š7‘ŸχηΫΔLθ’ν§ϋ=HΡ3νΌΔ$*ΜLt”ίΣ5˜7™j›Ί±?Λσ§ςό6Ia(oBWτσ[>—–ςΧ‰ Ν›Μ«-Φ$˜ηρ=X°·Uw{XθB¨'ύ―Z0¦Σ“‡ΫΚ#§_KΆά¦“όΕ©(ΛWύEœ±ΎGz²Žχ—œ ύΔΝl½ΤgόΑ\Θ„οζaΊΏ”—ρu‰P•ΡœδΌ³¦Υ`P{ττc¦ω΅N9+)$αŒ™΄lbBwΛ₯‘‚OΥίΔσYμ€:‘€ γη·D.­Η=h™Πσrλ―:)ω¬tNτίΓΗΦ4² Π.vςtVtœGtͺΑΑώBηͺ‹r²SriζMhφ νiΜπp}bBŸO>›?ΰώٌ U[ͺΩΉτm} ωwƒέƒώ’έ±•ν.,ρQώ½ΓW;ϋrλe^Ξ™Πν’ΟCύΨ«%Λ†ψv-™ΠksΗω³5m(#ͺθΪ6ώ›ΉΝbΆ9:Џ9яŸιŽΔ0(jͺˆά d™’Mh?žά’,λ¨ έŸ»ƒZ8—bLhšW.χό7‘‹x!³dϋΕΌ0ΆiLhŠΪ½Ξ'»@GΡφ ΨΙx>•Ό{ψΛRU*jϊ/‹=νSϊ΅VL¨’7jχ°§q§?΄‡&&τζ\{i»GͺκFEB_OτΆ5V σγ(’ϋ —ήεVΧ4už'Žhe»› ΌXχLς«^ΖΏΟ­Ÿθω΅obBΗ'λΧτυωφ•o΄bBσM.OŒΝ`/T=δ.Uο«ωΐ9-˜Π5ό<ζΉΩL•]χέώېΙ9ά*3r@5›Π³άθ)ϊωm‹Ν2T33²„&TMΤ\βz‹νFUXX˟…ΕšΠ™ΦΨ&Y5Mχ'ϋη;&©ΰρ¨η‘εόόI ώ­™P΅·V•όβώ|UmΦ­d(?χRώ†nφ²™υΠyΥ<§ΉΑKMθ.­˜Πδ"TΩK{XbBσ+Oχύ¬€ ½ΖΟ£5TuΆoς°‡ΪFχψΪVΆωΕΆ•)Š~ΞρgYŒ8ζσσ›Ιg™Πtψ²Άp›»ΎnτZ2‘ΏjΑ„μΏ‰Τ ^ي ]ΒΟγsE\«‘nTήtƒεŸϋ\ΚŠšοS €‘ηO]ξιۏJΦ«yŸsϋΜΜ™ΜΜΜ©ΰ?KΟHΦ«045—Ζ-Iήϋ±Τζ]5Iš‚ΈO’g/Nφ[Δσά<^™3Υjš)Em§χO jx~Εχ_žμ₯`]aή”3k*…§Uσ}<*ΠΪΗΣN;Zΐ„~’{(ͺΚͺfLθLL Μ·WξoΖφώ†Ϊf’5Ά M›“¨έ·ύ½ό%œε½ώž‡³ˆΟžΗψηzMόΎͺc)Κtb²lk½MhK&τX‹Λ ΅ι|?gBe(vΘ₯ρ°§›šWΥv ςeω^φκa}$Y*Ι#ώœ‘,α‘’ΛΌ”¬ώk{$T‘Λψ‹Uγ‡ώ΅€ }ΒΟA½1­RΥϊ͘P½TU%―jΖοω>WZγΨ€zQ_νιΞςs9•[ά-P[MUYͺ½šzςͺϊύ­ΔάΙTήΰ…EŠIškMΫζύάσί₯£?Οη Hy*&ϋqώδfRωσΥ˜PU―ͺ:ΟΧ* ޚ3‘ΗωοSΝ[²Ά’c=Ώλ·4Ϋ{―ΉΉΦοF=œ‘;ΧΣVυσ2d¨$κU©ͺλ>Ήε#έ<*R€φdjχ45χ•ΫG‚vJ>χυ΄e]쟳6vjΧ€ΩΦtΆ™ ΅»R„λDO7k6 j’½ό….Ί ··[1ΐ͟ς˜fΆΙ'£Θ₯FXΠπ4ͺΞ.4.­ςΉ†6šb ΑΉ‰ξP’ˆ«:ΘmνΏ#Ο,ς¨<Ώq²­ͺΜWΛνΏ†5Ž"!†{ogOGϋ¬Zΰ˜Š―•ϋ½}ΡΫζL¦ –Ϋψ΅™–όZ@ΓiMif]6θ9Ρ0cKTΑuΡΠdΓȝƒ¦Ώ<%·l²ΕI>tύ€ˆtdX5ιΑ€ž&-ψR\ί<›μPz4±ζqΟOoͺY³4ωE_7­ˆ΄΅83άkΔ„j†·-N! %D³Γύ1·L³³)ϊΉYΣΚ›PΝVu„Ε*mZEwwc›2έβ΄ͺ§-Wΐ„ :ΐ·Ρ,tc“u;ϊώ)ϋδφΧΜ]ίχύ5kXΪΌ`¨_Mi{vΠώΉσ»Δχ€r—Ε<2e»l― ΥT±Ÿέt\ΠΡn0Hφ9(θM‹Uί‡=τ~b"UΕhΠ A_:ΥӘΰλΗϋφλ'τekœΊφΠ Wύψϋ=tQrό‡‚ΪΥΟλ27α_χλ%δ½ -rΛΦp#χDΠνAύ“uκΐ΄VLhƒ5Jν¦Tτ΅Ψ†tϋdύ΄ά>Ηݟ3†ίϊsςY&z~ΠF›lξΛ‡}`M› σο<&h1?Φ²-\Ÿ ƒώΤ‹¬PϊΊ ›˜,SU΄’ƒ§Έ©ό{ΠΝAύ|ύγA[ΆΑ„~–3pΫΝσΗψφ‹&λ{ϋ>™ ½2莠™‰Ξ ϊwυύΎ—3ŸΉρMχΙb5Ύφ»ΧΟηΏυΉο΄ΊŸγp² @i ϋΘ#‡b7]«ϋgE ηZŒ<τ‹UδŚЏsΫ|Υb―{±Ε(cŸά9}˜˜Π›]3sϊV²Ϊx^οiš,Ÿζίof­ιΫΘ\οtΉΕjύϋrίo‚§EΘ.₯γ‹m%-1eŸMM–©σŽΪEͺϋΐj« •ΑύΜΝhFΝLθYn0[b¦§9ݍδ:Ύ|%OkΥ"―…ΖK}%h‡dΩN«ϊ „hόΟ sΛΤKό9‹mEΥΑG½Λj1By•5ί>²­&T\twΠH‹=γopœ™Π±n,gYμm/C<Ωbτ2;†ΦχΟκ€τ”5F3―±Ψ¦tΌ/m±”Ϊ†*κ{pЊnΎ'ΊΡŸœί™ΖX‘]ύΘ5γD.@Uςy‹½Νϋ&ΛT=ώ ½c±Šz?7ͺ?ͺ™΄ψΆYdSœζδΆ‘‘M‡„Rο{υΔWy΅Νά9θVkύ‚Εh¨’”oYlΗΉ»›aυl?(٢΍νL¬jτXlΛ*³ͺ¨ζEn˜ΥΞσOΫ—~ΰΫμ“€₯φ‘/ZΗί€ °•—ŠWγRT-Κ™/ˆlg±STWΰΗAL(@—a”5»"ͺς_½‹œ«šK\Γ-ΐ„`B0‘˜PL(`B0‘˜PL(`B0‘˜PL(`BŠ0‘ZœE!Τ}€—⍑²θML(@S4νΪe‘Ίž%B¨uΆj‡―X㸚χ]τƒ uΪ‘ΦΎACΫy£όŽε–Τ>»½΄IΠΤ o]τqΠwېN‹CŒkηyœτrΠ‚zs[jί„ΎS`ωt7•_NLζΪAΝ Ϊ9¨o²ύΎύΜ A|ωΎνρΎο¨Ηͺz!h— —‚ΆΙ­΄~ΠrA‡ν4άΟq@Π>A'ψΆΓrΗ[%Igω iŽΏAΠZd€Κ›PρPΠ―όΕƒώtVΠ)As|}_]7‘—m±j^\τΫ “ƒ.zΫbΤ5eŠΕ)ϊtmnύΙ~¬Ηƒ~tLΠ¦ž–Ξγ’ s}[Eq—;ή¦ΎnDΠ'Ac’΄e€_uC U`Beοπ{Έ2z=΄S²ΎPu|]ξ³"™·ζ–ύ>1‘k}΄tΞ„~`1’™±©oF+ΗϋVΠνΙg™δŸ$Ÿ5}ΑΏT ύuΠέΙgU‹+β©θ¦ζ’.θΔVLθ@‹Υβηψ>2„/&λaύ0hR²LQΟ£r&τφ\Ί2‘Ÿ0:ή7“γέf±­i†šΌn1κjžξ d€κ1‘ ΊΔ‚o7+hk‹Uκ2p?lΑ„ͺ­η½{ά+βΈ™Ε6£o&Ϋτ™Εhθe=a‘W™Π+ ˜ΠΧrΛUυόυΫ—κxG澟T³‚έ‚>ηFvY²@u˜ΠuέfΥέ§]šΫ摜 ύΤχΛXōι€dف9:7θj7§™ŽqsΈAMθJ~ΌAΙ² |?UΡίτ£ ?’*cBί 4Ϊb„σθ 7,F'³6–Zφ¨Όn&?ML¨x&θˆ Ε‚ϊ[μΑώ™5v νΫd&t-7Ÿ+p^ŠfžΧF:ΜΟiJb‚η0‘:Ώχ}ωd€ς³Ε6’™TU}UΠΦ΄#’AwYμ €νTm^ι'%Ϋ¨‡ωΓngϋ2™R΅ω|Ω ‘’Οϊ:υ²Ώ½™σΪ1hΎΕήλίΆΨ5eγ § μwXr<ήCƒž/°Ξ]mZλΙՏ: jγ>|~©œ©νL²γΥ5³^η‘Ά£Ηq; ¬τ}‹M†r9 œi±)Α$.@ePcο?p ¨νφ·ŠΩP=_αz@ PgΐΗ0‘€ L(&0‘€ ΐ„& έ&τν }«X{Ν@•U[m‚*«6‡PΧkm1‘ !„B•HE™ΠUE¨Z΅ΠL„PΩurΠχBeΥιAg·A³h•Pj~τΊλω ƒΞ Z§ιŒ :ΎΜηΎwΠdn!@Χγ‚ [-ΞΊρ… mƒΞϊ$hΏ6€σe7²εδΖ £Έ…]Σ„^Y`ω1A-ιŸ³8i€"§η}=¨W²ξΌ χ­q€σΟϋΊMƒNσγœ΄bξ8£-Μ|qΠOƒΆΘ­JΠ‚~΄]²|JΠΏ‚χγΒ­θϊ&tQ‹SΑν៧]t€λ Λ‹0‘7XŒVξe±šν ε}έ£§? Ϊ-θπ Ÿ$ηpJΠωώί z )& FM¨x)θ„fΦ-τiΠrώΉΨκψΛ‚Nτ§Νof»‚> Z=Y62θγ ₯ό3Υρ5hB_΅ΖΞF½-F#― ϊ[Π}ۍNnΕ„N°A½ΣχωwΠ%ΎNUύ―Ν :,hΥdΏm‚ή±…η17θK˜P€Ϊ4‘Γ,VΗοδŸΥsΕ6šc-F%U΅Ύy &t¬os°ΕŽOΪGΥφHΆQTσˆ Ϋ,F>Οχε{zz3 hL(@mšΠ3‚ή βŸΪ=YΏ’›ΤΜ„NvΓ™r ΕhΚM9š²ž§©6£Š ͺΊx η~]Π±άB€iBοΪΔbΟτƒ‚ώτ^ΠΦΙv ΊΚMιJ£'&tY7;ZŒz ڍ¬ΖUG§C-v^ΚLθ€ ν-VΛχ³8$”ͺΰ ͺ ϊ«Ε©:9 ΄X]/ΣΩΗχ‰―΄&· λ0Λb[MIm=Υζσ8kμp”±rΠ‡mR―τ,φ|Ÿl£π7{Z2°=‚N·ΨξSfTUρκšoΏAΠ]AoΈι•ιœœ€78θAOωqηY*JUω³ƒξ±8Φ)δψΐ”ΌΙ€oIENDB`‚xarray-2025.09.0/doc/_static/index_api.svg000066400000000000000000000066771505620616400202400ustar00rootroot00000000000000 image/svg+xml xarray-2025.09.0/doc/_static/index_contribute.svg000066400000000000000000000047401505620616400216320ustar00rootroot00000000000000 image/svg+xml xarray-2025.09.0/doc/_static/index_getting_started.svg000066400000000000000000000076111505620616400226430ustar00rootroot00000000000000 image/svg+xml xarray-2025.09.0/doc/_static/index_user_guide.svg000066400000000000000000000144351505620616400216110ustar00rootroot00000000000000 image/svg+xml xarray-2025.09.0/doc/_static/logos/000077500000000000000000000000001505620616400166625ustar00rootroot00000000000000xarray-2025.09.0/doc/_static/logos/Xarray_Icon_Final.png000066400000000000000000000406061505620616400227250ustar00rootroot00000000000000‰PNG  IHDR Δ Δ­ pHYs.#.#x₯?v˜PLTE―΅!l‰I“ͺkθθξτγ€―΅!l‰I“ͺkθθξτγ€―΅!l‰I“ͺkθθξτγ€―΅!l‰I“ͺξτγ€―΅I“ͺkθθξτγ€―΅!l‰I“ͺkθθξτγ€―΅I“ͺξτγ€―΅!l‰I“ͺkθθξτγ€―΅I“ͺξτγ€―΅!l‰I“ͺkθθξτγ€―΅!l‰I“ͺξτγ€Ee―΅!l‰?‰’I“ͺJΟέkθθ‘νρξτγ€―΅!l‰I“ͺkθθξτγ€―΅!l‰kθθξτγ€―΅!l‰I“ͺkθθξτγ€Ee―΅!l‰$n‹&q)s+v‘.x“0{•3}—5€š8‚œ:„ž=‡ ?‰’BŒ€DަG‘¨I“ͺJΐΠJΟέK˜Mž²O£ΆT½V³ΑZΎΙ^ΘΡcΣΩeΨάgέΰiγδkθθnθιtικxκλ{κμ~κνλν„λξ‡λοŠμπμπ‘νρ”νς—νσšξσξτ〫”Σ{WtRNS 000000@@@@@PPPPP``````pppp€€€€€€ŸŸŸŸŸŸ―――――ΏΏΏΏΏΏΏΏΏΏΟΟΟΟΟΟίίίίίοοοοοοz`>n?1IDATxΪμέMo\χyΖaΩm©!PEλΠy\%±‰4d±baΜtαzΓ•^yΑηu–ΆPΔ‹lβEΰ…³r h–…σ>œ9M€Β}A Œι!ηάηΊΎΐœΉŸΝ$₯sγ›ϊ‘  ž[&hΧΡ™  ˆgΟmεΌh €[t6‡†βnjqh8@Γ" h8@Δ‘α ˆ8 h8@Δ‘α ˆ84 α§α ˆ8zνΎ† ˆ8β|h(b₯α@ΔΡPÝΫͺ4άΨ βΠp@^Γya*ˆ84 α‡†4 βΠp€†Dœ†4 βΠp€†DΠp€ˆΣp€†DΠp€ˆc«φ5T±Φp€ˆk¨α6€": ˆΈ–ξΠP€αFk#"NΓqh8@Γ" h8@Δi8@Γ" h8@Δ‘α ˆ8 h8@Δ‘α ˆ8Άι4 ∳wͺ᠊±†D\; 7<11YΩqˆkΈ₯ §α ˆ84 α‡†4 β4 α‡†4 βΠp€†Dœ†4 βΠp€†D[u€α@ΓˆΈΌ†{ίPΔBΓ"‘†;³TiΈ© §α ˆ84 α‡†4 β4 α‡†4 βΠp€†Dœ†4 βΠp€†D[υ–†  ββ|`(βBΓ"‘†;·±šΨqˆkΈqg@Δi8@Γ" h8@Δ‘α ˆ8 h8@Δ‘α ˆ84 α§α  β4 αΗ6έΥp αD\œύS@έDΓ"†Š4άhm@Δi8@Γ" h8@Δ‘α ˆ8 h8@Δ‘α ˆ84 α§α  β4 α‡†4 βjΫ{¬α  ˆΈ†nxb(b’α§α€Ό†[ΪqΠp€ˆCΓqh8@Γ"NΓ@Δi8@Γ" h8@Δi8@Γˆ8 h8@Δ±U§4€ˆ‹sτ  ˆΉ†D\C wf(b1³ β4ΧpS"NΓqh8@Γ" h8@Δi8@Γˆ8 h8@Δ‘α ˆ8 h8§α ˆ8Άκ§ͺXj8@Δ΅γΰ#@+ ˆΈ†ξάP₯αƝ§α  β4 α‡†4 β4 αDœ†4 βΠp€†Dœ†4€ˆΣp€†DΫτͺ†  ββμΏc(’Σp€ˆk¨α‡F€" 7p€ˆΣp@^Γ­ˆ8 h8§α ˆ84 α§α  β4 α‡†4 β4 αDœ†4 βΨ¦½§4€ˆ‹kΈαΫF€"¦q 5ά‰ ˆΙ€ˆΣp€†qΠp€ˆCΓqΠp"NΓqh8@Γ"NΓ@Δi8@Γ" h8@Δχ@ΓA3 ˆΈvΩŠXΜmˆ8 Δ5άΤ€ˆΣp€†qΠp€ˆCΓqΠp"NΓqh8@Γ"NΓ@Δi8@ΓˆΈ­Ί―α@ΓˆΈ8ڊXi8@Δ5Τpη6€* 7Ά β4Χp§α  β4 α‡†4 β4 αDœ†4 βΠp€†Dœ†4€ˆΣp€†qΫ΄ΘPΔZΓ"‘†jθ4 β4Χp£΅§α  β4 α‡†4 β4 αDœ†4€ˆΣp€†Dœ†4€ˆΣp€†qΫτ @ΔΕΩ;ΥpPΕDΓ"†žͺ4ά… §α€Έ†[ΪqΠp"NΓqh8@Γ"NΓ@Δi8@Γˆ8 h8@Δi8@Γˆ8 h8·UG4€ˆΛkΈχmE,4 βjΈ3@•†›ΪqΠp"NΓ@Δi8@Γ"NΓ@Δi8@Γˆ8 h8@Δi8@Γˆ8 h8·Uoi8Πp".ΞΑ6€".4 βjΈs@«‰ §α€Έ†wFDœ†4€ˆΣp€†qΠp€ˆΣp€†qΠp"NΓqΠp"NΓ@ΔmΣ] @ΔΕΩ?΅ΡM4 βΪiΈΑ‘ HÍΦFDœ†4€ˆΣp€†qΠp€ˆΣp€†qΠp"NΓqΠp"NΓ@Δi8@Γ"Ά½ΗͺΠp€ˆk¨α†'F€"&qΘkΈ₯ §α  β4 αDœ†4 β4 αDœ†4€ˆΣp€†Dœ†4€ˆΣp€†q[uͺα@ΓˆΈ8Gο:?1Χp€ˆk¨αΞ\ŠXΜlˆ8 Δ5άΤ€ˆΣp€†qΠp"NΓqΠp"NΓ@Δi8@Γ"NΓ@Δi8@ΓˆΈ­ϊ©†ƒ*–qν8ψΘΙ‘ˆ•†D\C wξβP₯αƝ§α  β4 αDœ†4 β4 αDœ†4€ˆΣp€†¨q4€ˆΣp€†qWοU @ΔΕΩǝ‘ˆNΓ"‘†Ί3iΈ‘†Dœ†ςnm@Δi8@Γˆ8 h8§α ˆ8 h8§α  β4 α*Eœ†  β4 αDάΥΫ{ͺα@ΓˆΈΈ†ΎνΈPΔTΓ"‘†;q[(b²΄ β4 αDœ†4€ˆΣp€†h7β4h8§α  β4 αDœ†  β4 αDάNh8Πp".Ο‘†ƒ*fq 5ά™ƒB‹Ή §α€Έ†›ΪqΠp"NΓ@Δi8@Γ4q4€ˆΣp€†qΠp"NÁ† …ˆΣp αD\žϋ4€ˆ‹sπ‘+B«™ š‰ΈƒsG„* 7ξŒΠJΔi8Πp"NΓ@Δi8@Γˆ8 €"NÁ†qΠp"NΓ@Δi8Πp΄q4€ˆΛ³Θ須΅†h'βφ‡N5t ˆΣpP§αFk#΄q4€ˆΣp€†qΠp"NÁ† …ˆΣp αΘ‹8 €ΌˆΣp αΘ‹8 €ΌˆΫΣp αˆ‹Έ½‘†ƒ*& ™ˆΫž8TiΈ ΄q 5ά­Dœ† @^Δi8ΠpδEœ† @^Δi8ΠpδEœ† @^Δi8ΠpδEœ† @^Δi8ΠpFά‘†ƒ*¦ ˆ;zߍ ˆΕΒΝDάΡ™A•†›Ϊ ™ˆΣp αΘ‹8 €ΌˆΣp αΘ‹8 €ΌˆΣp αΘ‹8 €ΌˆΣp αΘ‹8 €Όˆ{KÁ† .β>p(βBΓ΄qηξE¬&6h&β4ΤiΈqg€V"NÁ† /β4h8ς"NÁ† /β4h8ς"NÁ† /β4h8ς"NÁ† /β4h8ς"n_Á† .βφŽEt ΠLΔνŠ4άhm€V"NÁ† /β4h8ς"NÁ† /β4h8ς"NÁ† /β4h8ς"NÁ† /β4h8ς"NÁ† /βφk8¨b¬αš‰Έ½α‰ @“• Z‰8 …ni€V"NÁ† /β4h8ς"NÁ† /β4h8ς"NÁ† /β4h8ς"NÁ† /β4h8#ξTÁ† .βŽή5;1ΧpνDάΡ™Υ‘ˆΕΜΝDœ†ƒ: 7΅@3§α@Ðq4y§α@Ðq4y§α@Ðq4y§α@Ðq4ΫsλΊ>θψߍ Eό΅ ή‹&qˆ8D€ˆ@Δ βD" βqˆ8D€ˆ@Δ βD" βqˆ8€ˆ@Δ βD" βqˆ8€ˆ@Δˆ8D" βqˆ8€ˆ@Δˆ8D"@Δ βqˆ8€ˆ@Δˆ8D"@Δ βqˆ8€ˆ@Δˆ8D"@Δ βq"€ˆ@Δˆ8D"@Δ βq"€ˆqˆ8D"@Δ βq"€ˆqˆ8D€ˆ@Δ βq"€ˆqˆ8D€ˆ@Δ βq"€ˆqˆ8D€ˆ@Δ βD"€ˆqˆ8D€ˆ@Δ βD" βqˆ8D€ˆ@Δ βD" βqˆ8€ˆ@Δ βD" βqˆ8€ˆ@Δ βD" βqˆ8€ˆ@Δˆ8D" βqˆ8€ˆ@Δˆ8D"@Δ βqˆ8€ˆ@Δˆ8D"@Δ βq"€ˆ@Δˆ8D"@Δ βq"€ˆ@Δˆ8D"@Δ βq"€ˆqˆ8D"@Δ βq"€ˆqˆ8D€ˆ@Δ βq"€ˆqˆ8D€ˆ@Δ βD"€ˆqˆ8D€ˆ@Δ βD" βL βqˆ8€ˆ@Δˆ8D"@Δ βqˆ8€ˆ@Δˆ8D"@Δ βq"€ˆ@Δˆ8D"@Δ βq"€ˆqˆ8D"@Δ βq"€ˆqˆ8D€ˆ@Δ βq"€ˆqˆ8D€ˆ@Δ βq"€ˆqˆ8D€ˆ@Δ βD"€ˆqˆ8D€ˆ@Δ βD" βqˆ8D€ˆ@Δ βD" βqˆ8€ˆ@Δ βD" βqˆ8€ˆ@Δ βD" βqˆ8€ˆ@Δˆ8D"€ίL"€8?·ˆ84 βΠp€ˆΰϊ•†@œg_ΪDq χΉ @ĠဆDqqh8@Δ α α€†D—ρΕοl"€Έ†ϋδΒ βΠp€ˆ@Γ" ˆ8 ˆ84 βΠp€ˆΠp€ˆ@Γ"€Kψ½†D@œ―>Σp€ˆˆkΈOΏ6 β4 βΠp€ˆ@Γ"@Γ" ˆ84 β4 βΠp€ˆΰ2~£αηγ?Ωqq χά€ˆΠp€ˆ@Γ" ˆ8 ˆ84 βΠp€ˆΠp€ˆ@Γ"€Λx¦αΧpŸΫqqh8@Δ α α€†Dqqh8@ΔpΏΥp€ˆˆσΕg6D@\Γ}ra@Δh8@ĠဆD€†Dqh8@Δh8 αΐ%|υk ˆ8€Έ†ϋtb@ΔΔ5άΧFD€†Dqh8@Δh8@ĠဆD€†q} αηc ˆ8€Ό†{n@Δh8@ĠဆD€†qqh8@Δh8 αΐeόJΓ" Ξ³/mˆ8€Έ†ϋά€ˆΠp€ˆ@Γ" ˆ8  β4 βΠp€ˆΠp"@Γ"€Λψβw6D@\Γ}ra@Δh8 α€†D€†qqh8@Δh8 αΐ%ό^Γ" ΞWŸi8@ΔΔ5ά§_q@Δh8@Δ α αD€†Dq@Δh8@ΔpΏΡp€ˆˆσρŸlˆ8€Έ†{n@Δh8 α€†D€†qqh8@Δh8 αΐe<Σp€ˆΘkΈΟmΐ½τ―6HφΒΏˆ8 GΙ†ž!Ήα†wD€†CΓ‘αD€†CΓΡPΓ‰8 ‡†#°αD€†CΓΨp"@Γ‘αl8 αΠp6œˆΠph8NΔ\ΦoϋΣpΰŽr wγ–Υ.eυχzσ,_ώΡ=4ΥΞOβ.Ωp³UožeΡΉ‡†£\Γ‰8€ό†›Έ‡†c·NvΠp"@Γ‘αψ~_έΕ§Š8 ‡†γ{5άnώ@VΔh84y 'β4Žΐ†q G`Γ‰8 ΗN=ΠpNΔh8βŸΩ@Γ‰8€«ΧΝϋΣp+ §α(άp"ΰ;5άμ’? 7r Gα†qί©α–=j8/ΫpTn8 αΠp6œˆΠph8NΔh84 'β4Žΐ†q G`Γ‰8 ‡†#°αD€†CΓΨp"` GΟNΔl`αΠpτ¬αDΐ 7οΝ£t §αΠp" αF+χΠph8 αΠp„6œˆΠph8NΔh84 'β4Žΐ†q G`Γ‰8 ‡†cΏθWΓ‰8 ‡†c―έοΩ‰8€o³Πph8ΎiΈ'}{"π-–3 ‡†£· 'βΎ­α¦ύy–±†Σph8Χp“ χΠph8Χp χˆχ3 §αD€†#ΞΩ@Γ‰8 G\ÝΫ@Γ‰8 ‡†CΓ‰8 ‡†#²αD€†CΓΨp"@Γ‘αl8πΏ­zΤMNΓ‘αDΐ† 7λΟΛ4œ†CΓ‰8 ‡†£©†q=mΈ…†Σph8ΧpχΠph8 αΠp΄Υp"@Γ‘αl8 αΠp6œˆΠph8NΔh84 'βώb­αΠpd5œˆψ³nαΠpd5œˆψsΓΝ– GVΓ‰8 ‡†#°αD@Ÿn©α4NΔΔ5άJΓi8vλ•G9Ο*β ן†u’αΨiΓ ^qNΓi84œˆΠph84œˆΠph8²NΔNΓ±%?ΤpNΔ\‹Ή†c‹ξœΪ@Γ‰8€kiΈ…†c‹ 784‚†qΧΡps ‡†#ΆαD α4NΓ6œˆ4œ†CΓiΈΐ†q€†Ϋ΅n¬α4NΔΔ5άhν 'ββnε 'β4Ž 'β §αΠpNΔh8 §αΠp"ΰ ,5ŽNΔεnͺαΠp4Πp"Πp §αD€†Σp 'β4 'β ן†»‘α4NΔδ5άDΓi84œˆΘkΈ…{h84œˆΠph8ͺ5œˆ4œ†CΓi8ΠW Ηyαq 'β€VύyΧ–†ΣpμΊα†wšψ"(Ρp³ώόcP §αΠp"@Γ‘α(Ϊp"Πp §αD€†Σp{IΓi8 αΘkΈ‘†Σp" ZΓΝ4\ wb 'βŠ5άbζ 'ββnβ 'β4ŽΚ 'β€–us ‡†£Ρ†q@Λ 7»Πph8m8΄άpK ‡†£Υ†q€†Σph8 'β4œ†Σph8 αΠph8h8 ‡†Σp"@Γi8 ‡†q6άjκ 'ββnΤ9ˆ†CΓ‰8€Mh84ν7œˆΪ3Χph8Ϊo8΄Χps ‡†£ύ†q€†Σph8 'β4œ†Σph8 αΠph8h8 ‡†«γΈρ†q€†ΣphΈ΄ώ E α4Α†»ΧόWq€†ΫΎNΓi84œˆΠph84œˆZ΅θSΓ­ά#†Σp"ΰz,gŽν9ΦpNΔ\OΓM5[lΈ3h8 αΠph8 αΠph8h8 ‡†Σp"@Γi8 ‡†q ‡†q€†Σph8 'β4œ†Σph8Π@Γݘj8 ‡†qY-ϊσ,“…{h84œˆΨ¨αfύωα—†Σph8 αΠph8h8 ‡†Σp"@Γi8 ‡†q ‡†q€†Σph8 'β4œ†Σph8 αΠph8h8 ‡†£pΓ‰8 ΠΊG 7Υp 'β6ΝϋΣp‹Ή{h8vι^έ†q@^ΓΝ–ύiΈ‰{h8vι΅ΓΒ_^ΔNΓi8RξIεo/β §α4NΔh84NΔh84NΔNΓρίήΠpNΔh8ςξάNΔh84NΔh84Nč˜k84NΔy ן—”4œ†CΓ‰8€ ?/)]œCΓ‘αD@^Γuξ‘αΠp"@Γ‘αΠp"Πp §αD€†Σp 'β4 'β §αΠpNΔh8 §αΠp"@Γ‘αΠp"¨k©αΠph8δ5ά΄7²Φp 'ββk8 ‡†qq 7ZΉ‡†CΓ‰8 ‡†CΓ‰8@Γi84œ†qNΓi8zεo4œˆ4œ†ΣpΔyε© D α4œ†#α/AΔNΓi84œˆΈ* ‡†q@œUή΅₯α4NΔlΪp³ή„“†Λwη‘ 4œˆ¨Φp7Ζ.Ύα‡FΠp" ZΓM.άCΓ‘αD@\Γ-άCΓ‘αD€†CΓ‘αD α4NΓ‰8 §α4NΔh84NΔNΓ‘α4"θ«nαΠph8Δ5ά¬?)›†Σph8°iΓ-5 'β §α4NΔjΈ…†Σph8ΧpχΠph8 αΠph8h8 ‡†Σpˆ8@Γi8 ‡†q ‡†q@ ‡†q@žΉ†CΓ‘αDΧps ‡†γ/^Φp"ΠpNΓη…S 'β §α4q 7ΌchΈοξBΓi84œˆˆkΈΥΨ94NΔΔ5ά¨s ‡†qŽλυS §αD€†#α†oAΓ‰8 G\ÝAΓ‰8€k±Πph84œˆβ,g ‡†q@\ΓM5 'β §α4NΔh84NΔh84NΔn‹Ί±†Σph8Χp£΅{h84œˆˆkΈ•{h84œˆΠph84œˆ4œ†CΓi8DΠ«…†CΓ‘αDΧp³ή„“†Σph8 αΠph8h8 ‡†Σpˆ8@Γi8 ‡†q ‡†q€†Ϋ §αΠp" ―α&NΓ‘αD@^Γ-άCΓ±C4œˆ4œ†ΣpΔ9ΌgΔXk84NΔqΊΉ†CΓ‘αDΧp³₯†CΓ‘αD α4\]§NΓ!β Gœγwm αq€†#αΞl αq€†CΓ‘αD€†CΓ‘αD αΆnα4NΔΔ5άbκ 'ββnβ 'β62Χph84œˆς?„¦α4NΔlΪps ‡†CΓ‰8@Γi8 ‡†CΔ ‡†ΛwλΊ>θΛώ|η74 'β6τo½ωΚ;g‡«8 ‡†CΓ]½zΏN}xμκP„†Σph8אŸh8Πph84œˆ‹σϊΠΝ‘ˆ•wmi84œˆk¨αήsr¨p#ͺαΠp"NΓ ‡†qΠph8 ‡ˆΣp€†Σph8§α ‡†γ{ω±†qΠpŽ8―Ω@Δi8@Γi8βξ‰ Dœ†4œ†CΓQ;β4h84NΔεω; Ž?ΧpχΫο84ΡiΈto|h ‡ˆϋ¦αw4 wn ‡ˆΣpP°αVFΠph8§α ‡†CΓ‰8 h84œ†CΔi8@Γi84œˆΣp€†CΓ‘αDœ†4NΓ!β4 α4ŽJ§α@Γ‘αΠp"NΓ ‡†qW閭 ‡†CΓ‰ΈΌ†j8¨bͺα4NΔ5ΤpŽ EL6Πph8§α ‡†CΓ‰8 h84œ†CΔi8@Γi84νFœ† ‡†CΓ‰8 h84NΔi8@Γ‘α4"NÁ†CΓ‘αDœ†4ŽΝΌ’αDœ†4œ†#―α6q[υ@ΓAS §αΨiΓ½l·M]ŠXΜm αΠp"NΓq 7±†CΓ‰8 h8Χίk8 ‡ˆΣp αˆsηh8Dœ† G\Γ  αq4 'β4 αΠph8§α ‡†Σpˆ8 h8 ‡†£₯ˆΣp αΠph8—ηM  ‡†qq^gg„"VNΓ‘αh'β^Ο‘JÍl αΠp4q 5\g ‡†£•ˆΣp αΠph8§α ‡†CΓ‰8 h84œ†CΔi8Πph84-Dœ† ‡†CΓ‰8 h84NΔi8@Γ‘α4"NÁ†CΓ‘αh!βn?r;(b­α4Žv"ξφΰΫA έXΓi84ΝDœ†ƒ: 7ZAΓ±3/h8§α §αΘkΈ‘†qΠpŽΌ†»c§α §αΠpԎ8  ‡†#/β4h84ŽΌˆΣp αρc §αq4q 7<1‚†CΔi8Πph84ΉχW4 G\Δέ<ΥpPΕDΓi84ΝDάΝα{A•†[Ϊ@Γ‘αh%β4jΈ… 4ŽV"NÁ†CΓ‘αΘ‹8  ‡†#/β4h84ŽΌˆΣp αΠph8ς"NÁ†CΓ‘αΘ‹8  ‡†#/β4h84ŽΌˆΣp αΠph8#ξXÁ†CΓ‘αˆ‹Έ‡ ŠXh8 ‡†£ˆ{xμFP₯α&6Πph8š‰8  ‡†#/β4h84ŽΌˆΣp αΠph8ς"NÁ†CΓq 4œˆΣp€†Σpδ9Τp"NΓNΓ‘Χpχl β4 α4Žβ§α@Γ‘αΠpδEά/5h84ŽΈˆ{ύ‰Λ@.ά §αq£αήs(b5ΆAΆγ3h8Dœ†ƒ‚ 7ꌠαΠp΄q4 G^Δi8Πph84y§α@Γ‘αΠpδEœ† ‡†CΓ‘q4 G^Δi8Πph84y§α@Γ‘αΠpδEœ† ‡†CΓ‘q«α@Γ‘αΠpΔEάν§Et §αΠp4q·w]Š4άhe ‡†£•ˆΣp αΠph8ς"NÁ†CΓ‘αΘ‹8  ‡†#/β4h84ŽΌˆΣp αΠph8ς"NÁ†CΓ‘αΘ‹8  ‡†#/β4h84ΧΰNΔi8@Γi8βΌvί"n«n>ΦpP…†Σpμ²αžΨ@Δm·α†NEL4œ†CΓΡLΔi8(Τp h84­Dœ† ‡†CΓ‘q4!~¦α4"NÁ†#ΞΩ@Γ!β4h8βξά§α@Γ‘αΠpGœ† ‡†CΓ‘q4 G^Δi8Πph84χO4 G\Δ=|ΣξPΔ\Γi84νDάΓc³C‹© 4Žf"NΓA†›Ψ@Γ‘αh&β4h84ŽΌˆΣp αΠph8ς"NÁ†CΓ‘αΘ‹8  ‡†#/β4h84ŽΌˆΣp αΠph8ς"NÁ†CΓ‘αΘ‹8  ‡†#/β~’᠊₯†Σph8ή­λϊ ‡al(β¦ ²ύΨΡ~d‚2^4°e D"@Δ βq"€ˆqˆ8D"@Δ βq"€ˆqˆ8D€ˆ@Δ βq"€ˆqˆ8D€ˆ@Δ βD"€ˆqˆ8D€ˆ@Δ βD" βqˆ8D€ˆ@Δ βD" βqˆ8D€ˆ@Δ βD" βqˆ8€ˆ@Δ βD" βqΩήλ4r†Qnr7@½E*šνf΄X8«€ε‘Ψ‚Y)™]%Y‘)Œ La²ιƒ&ΰ™3σάφ‘~}/ βD" βqˆ8D€ˆ@Δ βD" βqˆ8€ˆ@Δ βD" βqˆ8€ˆ@Δ βD" βqˆ8€ˆ@Δˆ8D" βqˆ8€ˆ@Δˆ8D"@Δ βqˆ8€ˆ@Δˆ8D"@Δ βq"€ˆ@Δˆ8D"@Δ βq"€ˆ@Δˆ8D"@Δ βq"€ˆqˆ8D"@Δ βq"€ˆqˆ8D€ˆ@Δ βq"€ˆqˆ8D€ˆ@Δ βD"€ˆqˆ8D€ˆ@Δ βD" βL βqˆ8€ˆ@Δˆ8D"@Δ βqˆ8€ˆ@Δˆ8D"@Δ βq"€ˆ@Δˆ8D"@Δ βq"€ˆqˆ8D"@Δ βq"€ˆqˆ8D€ˆ@Δ βq"€ˆqˆ8D€ˆ@Δ βq"€ˆqˆ8D€ˆ@Δ βD"€ˆqˆ8D€ˆ@Δ βD" βqˆ8D€ˆ@Δ βD" βqˆ8€ˆ@Δ βD" βqˆ8€ˆ@Δ βD" βqˆ8€ˆ@Δˆ8D" βqˆ8€ˆ@Δˆ8D"@Δ βqˆ8€ˆ@Δˆ8D"@Δ βq"€ˆ@Δˆ8D"@Δ βq"€ˆ@Δˆ8D"΄&@Δ@^ΓΥ6ˆΆqPfΓυFˆnΈ[%ϊIΓi8qNΎΨ@Γ‰8ˆkΈkh8ŽΩ6œˆ G`Γ‰8Πp6œˆ G`Γ‰8€ψΥŽβNΔΜΰxll α(αD€†CΓΨp" ]«α4%6œˆΧ]Ω@ΓQbΓ‰8€π†[Ϋ@ΓQdΓ‰8 ‡†#°αD€†CΓΨp"@Γ‘αl8 αΠp6œˆΠph8NΔh84 'β4Žΐ†q G`Γ‰8€H½†Σpήp" ²α*h8F֍άp" ²αFΠpŒάpν؟@Δh84y 'β4Žΐ†q G`Γ‰8 Ηρ½ΣpNΔh8βœ~²†qŽΈ†;·†q G‰ 'β‚μ4œ†CΓ‰8€8ϋFΓi84œˆˆkΈzk ‡†q GnΓ‰8 ‡†#°αD€†CΓΨp"@Γ‘αl8 αΠp6œˆΠph8NΔh84 'β4Žΐ†q“·Πp 'ββ,οl αΠp" α66Πph8 αΠpΜ’αD€†CΓΨp"@Γ‘αl8 αΠp6œˆΠph8žΣO·αD€†CΓρ\ΓUώp"@Γ‘αx¦αžDNΓ‘αDΐμuNΓ‘αD@^Γ­mο½†Σp"@ΓημΒNΔh8βz4‚†q ‡†q GxΓ‰8 ‡†#°αD€†CΓΨp"@Γ‘αl8 αΠp6œˆ˜’ §αΠp" οzψ  'β―ΗΑ 'β4ŽΩ6œˆΠph8NΔh84 'β4Žΐ†q G`Γ‰8 ‡†#°αD€†CΓΨp"@Γ‘αl80Ί}£α4NΔΔ5\ύ` ‡†qq ·5‚†cάWΩp"@Γ‘αJ…‘ 'β4πWxŸωΑE€†CΓi8€†Σph8 α˜ΈŸ5œ†qŽ8νBΓi8 αˆkΈΊ7‚†q ‡†qΣΆp 'ββ,76Πph8 αΠp”Υp"@Γ‘α4œˆ@Γi84œˆΠph84œˆΠph8ζp"@Γ‘α4œˆ@Γi84œˆΠph84œˆ˜ΐωΧp 'ββtW6Πph8Χpkh84œˆΠph8Šm8 αΠpNΔ α4NΔh84NΔh84œ†qh8 GΕΜNΔh84\ –Ξν‰8 ‡†+ αngχ•Dΐ›λ5œ†CΓ‰8€Ό†«l αΠp" ―αFΠph8 α8Ί•†Σp"@ΓηδΪNΔh84NΔh84E6œˆΠph8 'βΠp 'β4 'βŽj§α4NΔΔΩ7NΓ‘αD@\ΓΥ[#h84œˆΠph84œˆΠph8 'βΠp 'β4 'β4NΓ‰84œ†CΓ‰8 ‡†CΓ‰8 ‡†£”†q―k‘α4NΔ䝏;h84œˆˆ;h84œˆΠph8^䒜†q 7έ/}Y αΠpsiΈΆ€o+β4NΓ‰8 ‡†CΓ‰8 ‡†CΓ‰8 Ησ~ΤpNΔh8βœ^Ϊ@Γ‰8€Ξ‡†›AÝΫ@Γ‰8€ΞΗΪ 'β4 'β4NΓ‰84œ†CΓ‰8 ‡†CΓ‰8 ‡†Σp" §αΠp"@Γ‘αΠp"@Γ‘α(ΊαDΐ`7NΓ‘αD@œήωΧph8ΧpΥΑ 'β4 'β4NΓ‰84œ†CΓ‰8 ‡†CΓ‰8 ‡†Σp" §αΠp"@Γ‘αΠp"@Γ‘αθ/m β^lίh8 ΗΘΏH=AΔΌΈακ#h84œˆˆkΈ­4NΔh84NΔh8ώΝ §αD€†#ΞYc 'β4q W=AΓ‰8 ‡†CΓ‰8 ‡†CΓ‰8 ‡†Σp"@Γi8 ‡†qqVNΓ‘αD@œεΖ 'β4 'β4NΓ‰84œ†CΓ‰8 ‡†CΓ‰8 ‡†Σpˆ8 §αΠp"@Γ‘αΠp"@Γ‘αΠp"`ˆVΓi84œˆˆΣ]Ω@Γ‘αD@\Γ­m αΠp"@Γ‘αΠp"@Γ‘α4œˆ@Γi8¦eQΓ‰8 §αˆkΈϊή"@Γi84œˆΠph84œˆΠph84œˆΠp|Χj8 'βŠΣkΈό†«5œ†qΕ5\eƒό†λ αD@q w0‚†CΓ‰8 ‡†CΓ‰8 ‡†Σpˆ8 §αΠp"@Γ‘αΠp"@Γ‘α4"@Γi84œˆΠph84œˆ˜€†Σph8w? §αΠp" ο~l αΠp"@Γ‘αΠp"@Γ‘α4"@Γi84œˆΠph84œˆΠph8 ‡ˆΠp 'β4 'β4ΞΤp"`ˆ…†ΣpŒκ£†q,οl αυ ώn0ΰ~ll αυ ήΪ@Δh8 ‡†q ‡†qŽ· α4œˆΠpδ9ΡpNΔh8ςξΪNΔh84NΔh84NΔh84œ†CΔtNΓ‘αD@^Γ­m αΠp"@Γ‘αΠp"@Γ‘α4"@Γi84œˆΠph84œˆΠph8 ‡ˆΠp 'β4 'β4 'β†ΈΡp 'ββτη6Πph8ΧpΥΑ 'β4 'β4NΓ!β4œ†CΓ‰8 ‡†γi5œˆΠpŽ8]g α4q ΧΪ@Δh8 ‡†CΔ ‡†qS΄o4œ†CΓ‰8€Έ†«ŒξTΓi8DP`Γmήpώη­†CΔ ‡†q ‡†q §αqNΓ‘αD€†CΓ‘αD€†CΓi8D€†Σph8 αΠph80Ž•†Σph8gΉ±†CΓ‰8 ‡†CΓ‰8 ‡†Σpˆ8 §αΠp"@Γ‘αΠp"@Γ‘α4"@Γi84"Πph84œˆΠph8 §αDΐ­†Σph8w@l αΡW 'β5άΪŽυg6qNΓΧpΥ“D€†Σph8D αΠph8 αΠph8 αψξμh8βz4‚†CΔ ‡†q ‡†q―~@4œ†CΓ‰8€ΐb ‡†qyδ` ‡†q ‡†q §αqNΓ‘αq€†CΓ‘αD€†CΓi8D€†Σph8D αΠph80; §αΠp"Ξ@œ}£α4NΔ™ˆkΈzk ‡†q&4 'β4NΓ!β4œ†CΓ!β ‡†CΓ‰8 ‡†+ΛNΓ‰8 §αΘ{‚†qNΓ‘χο β4œ†CΓ!β€",4œ†CΓ!β€8Λ;h84"ˆkΈ ²΅†Σpˆ8@ΓΧpυΞh8β7‚†CΔ ‡†q ‡†q §αqNΓ‘αq€†CΓ‘αD€†CΓi8Dΐ?θ4œ†CΓ!β€Ό†[Ϋ@Γ‘αq€†CΓ‘αD€†CΓi8D€†Σph8D αΠph8 αΠp α4h84NΔh84œ†Σp"`ˆ §αΠpˆ8 Nn ‡†CΔq WŒ αΠpˆ8@Γ‘αψοVNΔh8 Gœε­ D€†Σph8D αΠph8D αΠph8 αψ[£α4"ΠpΔ9ωΓh8βξΪgίh8 ‡†CΔq W?AΓ‘αq@\Γm αΠpˆ8@Γ‘αΠp"@Γ‘α4"@Γi84"Πph84œˆΠph8 ‡ˆΠp ‡ˆ4 ‡ˆ4NΓΩ@Δ ±p ‡ˆς.ΘΖ ‡ˆ4 'β4NΓ!β4œ†CΓ!β ‡†CΓ‰8 ‡†Σpˆ8 §αΠpˆ8@Γ‘αx]†qNΓ‘Χp­ Dΐ­†Σph8DwAl αΠpˆ8 m αΠpˆ8@Γq\ο4œ†CΔŽ8§Ÿl αq€†#αΞm αq€†CΓ‘αq€†CΓ‘αD€†CΓi8D€†Σph8D αΠph8p ½†Σph8DΧp• 4δ5άΑ ‡ˆ4 'β4NΓ!β4œ†CΓ!β ‡†CΓ!β ‡†Σpˆ8 §αΠpˆ8@Γ‘αΠpˆ8`vNΓ‘αq@œ}£α4Δ5\½5‚†CΓ!β ‡†CΓ!β ‡†›±^Γ‰8 §αΘkΈΚ"@Γi8ςξΙ"@Γi84"Πph84"ΠpΌχNΓ!β Gœ³ h8DP …† oΈκΡ”gyg ‡†CΔq ·±†CΓ!β ‡†CΓ!β ‡†Σpˆ8 §αΠpίΗϊA_m ₯ΨhΈlΏ}ΦpΙΎ}Φp―ν/R‚#!Ύ£θIENDB`‚xarray-2025.09.0/doc/_static/logos/Xarray_Icon_Final.svg000066400000000000000000000023401505620616400227310ustar00rootroot00000000000000 xarray-2025.09.0/doc/_static/logos/Xarray_Logo_FullColor_InverseRGB_Final.png000066400000000000000000001240731505620616400267450ustar00rootroot00000000000000‰PNG  IHDRˆ ΔΡr~n pHYs.#.#x₯?v“PLTE―΅!l‰I“ͺkθθξτγ€Ee―΅Yw!l‰5€šI“ͺkθθξτγ€―΅!l‰"_|I“ͺkθθξτγ€Ee―΅Yw!l‰+v‘,lˆ5€š;€™?‰’I“ͺJΟέR¨ΊZΎΙ[άγcΣΩgέζkθθxκλ„λξ‘νρξτγ€\ΝύtRNS@@@@@@@€€€€€€€€€€ΏΏΏΏΏΏΏΏΝ@§(IDATxΪμΨΑmTAEQ/˜-^NHˆ?3Ξ?<"@€ΫmwBI―χν ΞρΝ €½n0Α»±[ιpΜπϊnνΐN:C:ά%Δ;ιpLιpB°“ǘ'ΔιpΜιpB°Η 'ΔΫθpLκpB°‹Η¨'Δ›θpΜκpB°‡Η°'Δ[θp ρ”t8Ζu8!Ψ@‡c^‡β€υt8v8!XOŸa`‡β€εξ ;œ¬¦Γ1²Γ qΐb:3;œ¬₯Γ1΄Γ qΐR:S;œ¬€Γ1ΆΓ qΐB:s;œ¬£Γ1ΈΓ qΐ2:“;œ¬’Γ1ΊΓ qΐ":³;œ¬‘Γ1ΌΓ qΐ:Σ;œ¬ Γ1ΎΓ qΐ::œτt8t8!θιpθpBΠΣαΠα„8 §Γ1Γλ”t8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@ο&Π Γ q@N‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€žΗ—μ€Γ1ΔCˆvαΠα„8 §Γ‘Γ q@O‡C‡β€ξ :œδt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpΜπΌ„8`'Ž^—μ€Γ‘Γ q@O‡C‡β€ž‡'Δ½›@ƒ'Δ9Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz:C\B°“Η!ΨI‡C‡β€ž‡'Δ=NˆΠgΠα„8 whΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ1Γσ”t8fx]B°“‡'Δ=Nˆz::œτήt8!Θέt8!ΘιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαβ!Δ;ιpθpBΠΣαΠα„8 §Γ‘Γ qΐϊ :œτξ :œδt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q€Ηw8!t8Πα„8@‡ƒ3:œ:θpB ΓΑNˆt8!ΠαΰŒ'Δ€:œθppF‡β@‡Nˆt88£Γ q Γ'Δ:œΡα„8ΠαΰK{]B Γ'Δ€:œθppT‡β@‡NˆώΛM A‡βv8!t8Πα„8@‡ƒ3:œ:θpB ΓΑNˆt8!ΠαΰŒ'Δ€:œθppF‡β@‡Nˆt88£Γ q Γ'Δ:œΡα„8Πα@‡βΞθpBθp Γ q€gt8!t8ψ"B Γ'Δ€:œθp0‘Γ q Γ'ΔqhΠα„8@‡Nˆt8!Παΰ¨'Δ€:œθpθpB Γ'Δ€:œθp0ͺΓ q Γ'Δ::œθp Γ q Γ'Δ:ŒκpBθp Γ q€‡'Δ:θpBθp Γ q€ςΌ„8@‡ƒΪλβt8!t8Πα„8@‡ƒΑNˆ€/β&Π Γ q€:œ:θpB ΓΑ„'Δ€:œθpθpB Γ'Δ€:œθp0ͺΓ q Γ'Δ€:œθp Γ q Γ'Δ:ŒκpBθp Γ q Γ'Δ:θpBθp Γ q€—θp{q€:œ:θpB Γ'Δΐ§§Ο Γ q@ο.Π Γ q€:œ:θpB ΓΑΰ'Δ€:œ:θpB Γ'Δ€:œθpθpB Γ'Δ€:œθp Γ q Γ'Δ€:œθp Γ q Γ'Δ::œθpP{^Bœ:Τ^:œ:θpBθp Γ q€:œŸή»@ƒ'ΔΉ›@ƒ'Δ:θpBθp Γ q€:œ:θpBθp Γ q€:œ:θpB Γ‘Γ q€:œ:θpBπo~ΐΏt7!„8θύΤέ„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!!Nˆ„8β„8β@ˆβ!„8!„8β„8β@ˆβ!„8!„8β„8@ˆCˆCˆ!„8!„8β„8@ˆ!Nˆ!„8!ββ„8@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ„8„8„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!!Nˆ„8β„8β@ˆβ!„8!„8β„8β@ˆβ!„8!„8β„8@ˆCˆCˆ!„8!„8β„8@ˆ!Nˆ!„8!„8β„8@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ„8„8„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!„8!„8β„8β@ˆβ~³woKšάVbF[1 !Ά…₯,7-“Αή±ήι|‘‘L‰έbΐ>¬υδΚ•ϋΛD ā'Δ€Bœqqq ā'Δ€Bœq Δ q ā'Δ€Bœq Δ q ā'ΔBqBq Δ q ā'ΔBqBq Δ q€‡'ΔΩ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ„8„8έMˆ!„8!„8β„8@ˆ!Nˆ!„8!ββ„8@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ„8„8„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!!Nˆ„8β„8β@ˆβ!„8!„8β„8β@ˆβ!„8!„8β„8@ˆCˆCˆ!„8!„8β„8@ˆ!Nˆ!„8!ββ„8@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ„8„8„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!„8!„8β„8β@ˆβ!„8!„8β„8@ˆCˆCˆ!„8!„8β„8@ˆ!Nˆ!„8!„8β„8@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ„8„8„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!„8!„8β„8β@ˆβ!„8!„8β„8@ˆCˆβμ ā'Δ€Bœq Δ q ā'Δ€Bœq Δ q ā'ΔBBœξ&Δ€BœBqB ā'Δ€BœqqB ā'Δ€Bœq Δ q ā'Δ€Bœq Δ q ā'ΔBBBq Δ q ā'ΔBqBq Δ q€‡'ΔBqBq Δ q€BœBqBq Δ q€BœBqB Δ!Δ!Δ€BœBqB ā'Δ€BœqqB ā'Δ€Bœq Δ q ā'Δ€Bœq Δ q ā'ΔBBBq Δ q ā'ΔBqBq Δ q ā'ΔBqBq Δ q€BœBqBq Δ q€BœBqB Δ!Δ!Δ€BœBqB ā'Δ€BœBqB ā'Δ€Bœq Δ q ā'Δ€Bœq Δ q ā'ΔBBBq Δ q ā'ΔBqBq Δ q ā'ΔBqBq Δ q€BœBqBq Δ q€BœBqB Δ!Δ qφ?β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!!Nwβ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ„8„8!β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!!!„8β„8β@ˆβ!„8!„8β„8@ˆCˆβ!„8!„8β„8@ˆ!Nˆ!„8!„8β„8@ˆ!ξ!ΏΆ€BœBq: ā§Γ€B\΄χ€BœBq: ā§Γ€BœBq: ā§Γ€Bœq Διp ā§Γ€Bœq Διp ā§ΓBBœq Διp ā§ΓBq:q Διp ā§ΓBq:q Διp€Cœm„8βφϋ΅m„8βt8β@ˆΣα!„8„8βt8@ˆCˆΣα!„8„8βt8@ˆ!N‡!„8„8βt8@ˆ!N‡!„8β ]ˆΣα@ˆ!N‡!„8β@ˆΣα@ˆ!N‡„8„8β@ˆΣα@ˆ!.aK!„ΈνώhG!„8„8βt8@ˆ!N‡!„8β IˆΣα@ˆ!N‡„8„8β@ˆΣα@ˆ!N‡„8θβt8β@ˆΣα!!N‡„8βt8β@ˆΣα!Ί„8„8βt8@ˆCˆΣα!„8„8βt8@ˆƒ.!N‡!„Έ~mχ!„8ββt8@ˆ!N‡!„8β IˆΣα@ˆ!N‡„8„8β@ˆΣα@ˆ!N‡„8θβt8β@ˆΣα!!N‡„8βt8β@ˆΣα!Ί„8„8βt8@ˆƒ !N‡!„8β BˆΣα@ˆ!N‡„8¨βώh£!„8β @ˆΣα@ˆ!N‡„8¨βt8β@ˆΣα!*„8„8βt8@ˆƒ !N‡!„8Ž/ϊwKπ˜_Y@ˆ!N‡γ+ώτ£5xΚοk !„8Ž―u8!ξ±χg!β@ˆΣαψj‡βλpB ā§Γρυ'Δ=Φα„8@ˆ!N‡γλNˆ{¬Γ q€BœΗΧ;œχX‡β!„Έέ~ψέέώ·£Γ qu8!β@ˆΫέαΎ;CˆΫα„ΈΗ:œq ΔΥθpίό7ϋߎ'Δ=Φα„8@ˆ!F‡ϋ,ΔmιpBάcNˆ„8βjt8!ξAΏωQˆ{ΎΓ q€BάNη:œ·§Γ qu8!β@ˆΫθϋsNˆΫΣα„ΈΗ:œq ΔΥθpBܞ'Δ=Φα„8@ˆ!.{‡ϋΛg!n_‡βλpB ā—ΌΓ}ϋYˆΫΨα„ΈΗ:œq ΔΥθpBܞ'Δ=Φα„8@ˆ!F‡βφt8!ξ±'ΔBq5:œ·§Γ qu8!ϊσ K،!„ΈNˆΫΣα„ΈΗ:œ΅ύΧxϋςzb^eΉL,ζ[7£e3„8θβ.t8!nO‡βλpB4ήΨήΎnΝ9§εοεŸ*Š‹Γbڌ!„ΈXNˆΫΣα„ΈΗ:œΕfή—=–Έu3ϊκ+K–Ηbڌ!„ΈPNˆΫΣα„ΈΗ:œUΖϋ—–cb.+!Ξbڌ!„ΈΈNˆΫΣα„ΈΗ:œ¦ήχLΐΎόuυͺΚΛ…`1―oFr Δ!ΔιpB܁'Δ=Φα„8(?ήο’Ζ5ήp]Ή,fŒΝH„8„8NˆΫΫα„ΈΗ:œεΗϋ½―£˜[_XBœΕήƒσ’. ΔA‚w­Γ q{:œχX‡β@„σjο½°όθ{/¦Νβ@ˆ‹Χα„Έ=Nˆ{¬Γ q`ξ}dώυf\nο|«Iφ軘q7#W% Δ!ΔυξpBܞ'Δ=Φα„ΈG§’Cκ±ϋε¬FQηήΏŸ k™\*\€οΏ²¦Ε Ί˜έ7£aιtΫpσήΜέ―B\ΐ'ΔνιpBάcNˆ{ςfοΨγ~λψaωBܘ/΄{1ngKȐTf’Ε|i·˜6£4›‘_y©Χw}Bˆ!.^‡βφt8!ξ±'Δ=ιΨ΅o©Ό.‘Β΅£‡#!.πbڌR=qλΠψ΅­ai…8βv8!nO‡βλpB\¦Ϋ½`Ly*œ§}ϊ4V¬!t₯^ΜΩ`’·ω₯ηžΒΚ q Δ½ΖάνpBܞ'Δ=Φα„Έ|·ΣΕ_‰s05ύΰΫ«ΕΝΌή#λΣb]L›QžΝΘo½Δ«λdͺBάύpΉΓ q{:œχX‡βςέN{βNš紃ο_—ΉΕ‰¬νh<τMβ’.fΪηL :›šψΎΜΙT!„ΈNˆΫΣα„ΈΗ:œχπφ+mFQ_‹;‘:»‘΄¬BqU;άwŸ…ΈγNˆ{¬Γ qο§λ>έv@ημEδPX»vτxU™³τLo3*σ«―ι«[N¦’Π Δ•νpίqη;œχX‡β²=}­}S}βΥ‹θMh”›|k§ΈTνhΓΛMΣbq57£ˆο0y•τΒΙT!„Έ NˆΫΣα„ΈΗ:œχ|GρΟέ„žϊ‚ί6—:Φ"Ε%jG[.i1«nκu7£€Ώ=MMš8Lβ@ˆ Ϊα„Έ=Nˆ{¬Γ q鞿V~Όέ~Υ OΎeS\žv΄β?ZL›QΣηljΒι–Wˆ!.h‡βφt8!ξ±'ΔεΌ‘ω$ΆϋΑΤβ“oΡ—₯Ν ΫΕ΄l3ςπ.eˆs2Uˆ!.j‡βφt8!ξ±'Δ%|›"*-˜οšLΎ%S\Žv4ό?ZΜ0!ΞfTσΎ‘γ1JKŠ‡Χ΄Γ q{:œχX‡βήωU}{bφ“αξOΏBάρ!oηΥ5-fΉ©ΎΟf(Ε9›šrYέο q ΔEνpBܞ'Δ=Φα„Έ¬wΤυ^‰›lΎ4Rλ‚νh₯ωi‹i3κΊ9›šπjv2Uˆ!.l‡βφt8!ξ±'Δmαο5Δ¬—Qoš[MΎΥ ςˆ~ΑΞL%#ϊn9Λg!›‘GQ•xͺ„‡Χ΅Γ q{:œχX‡βF₯jw}¦φ9•Zσ=‚ΨWμφ«kZΜBzΛΝhτΉmh׍œLEˆCˆkΫα„Έ=Nˆ{¬Γ q{ψ{ ‡˜‹Υpς­5†σf²€Ε΄΅έŒΌDŸξVΜΙT!„ΈΈNˆΫΣα„ΈΗ:œ—ψ–ΊΤ}`Χ΅š/]•ωϋ©Ηζο7M‹Y₯ ›Qω_έΞ¦Κ_X„8ˆβ’u8!nO‡βλpB\φ‘¦Θ}υ‰$βe²^+2ΔΔ½fgΎŸ’Ε΄΅έŒœMMχ„Ο­B\ά'ΔνιpBάcNˆKϊ ΆΨ+qM¦Ξ—ήjΌuΞ;V¦Ε,‘IlF=ξzέ‡9™Š‡Χ·Γ q{:œχX‡β2§₯:OΈ ρn™{ΏRη…Ξ SσLΉYL›QίΝΘΩΤdwN¦ q ΔξpBܞ'Δ}Ρ―ώ,Δυy[κ wΛβ¦Ι·Ζ+1ΏάΗΒΚ΄˜ω{›QŒΝΘ“»\ κ>Wˆ!.p‡βφt8!ξ±'Δ₯Ύ§–2Žή@)s<5β wπŸέŸ3ύ6e3 ςΈ¦ε«αy³“©Bqσ}ΐ'ΔνιpBάcNˆΫ˜Zο3}˜ν ΏύBάΙœ¦ΕLβlFa6£¦΄(λz:™*āΉΓ q{:œχX‡βrχ₯eLυJ‘q&ήΠΌςώτ,fΙψγxj€λΏa=4ββw8!nO‡βλpB\ςΐTαΞzu[!ΗRK₯δh“ήαΛkZΜΤ•Ν(fδlj’[0'S…8βBw8!nO‡βλpBάNώ^CUŠuΓμ$ΨΟ@™§ϋfŸΎΌς„ΈΩm1mFι^Ÿv65Ρj:™*āΊΓ q{:œχX‡βŽθen»Lu,ό(s;:~yM‹™w'Χα’mFnς,¦;\!„ΈΠNˆΫΣα„ΈΗ:œWcΜIœ1ΊLu¬Ϊ%κ=yM‹™v«²…ΫŒœMMsεdͺBάΑχ—ΟB\'Δ=Φα„ΈƒNή[ΒΡlqŒΎεJ\€0sαςš3kˆ³Ε{vγljš΅t2Uˆ!ξ\‡ϋφ³₯Γ qu8!.}fJ^1V«΅q¬ΰ\gMόi1s~ό͘Ooά-dYJχ·Bq±;œ·§Γ qu8!ΐσνΜ7…³Φl«Γ5|―3ΜΕ{ηςš3εfe3ŠΉ΅ϋ#βYCœ“©BqΑ;œ·§Γ qu8!ΐσνΔχΦ½¦}KŽ6QΎΦ—.―i13nγ6£ ›‘³©IVΙT!„ΈΰNˆϋ²…Έ(Nˆ+3ο€\VSύΉΤš%.Θ°w«¬L‹™pΈΧαΒnF>Z³©›ο.άά q ΔοpBάύIˆ Σα„Έ ­)mΓ…&Ϋ0‚α·eˆ»VVfšoI»ΕΤα2nFΞ¦¦(šN¦ q ΔEοpBܞ'Δ=Φα„Έ:#O§ܝŽWuΌ 11ί++ΟυΣbڌzoF'Ύw^N¦"Δ!Δ5οpBܞ'Δ=Φα„Έτχƒ‰Ɓ• S'Ύeη›νhTωYL›Qχη©]†«ά­έC\ό'ΔνιpBάcNˆ+4φdK¦}λΈΪΡΝ“†Σb&Ϋ°lF±τϊSβI{¦“©Btq :œ·§Γ qu8!FpJωŒv<Έϊ|T%qˆuZ…Ε΄uߌzύ-ρ€«θdͺΝC\†'ΔνιpBάcNˆ«”arέ]šS”Ρ·[‰»ήŽξώΛϋΣb¦οmFα7#ν‚ο$/N¦ qΠ=Δ₯θpBܞ'Δ=Φα„Έ*7Φω FŸσ7Fί%ξv;Ίό0§ΕΜ΄eω{©ρ7#gSΓί_8™*ΔAο—£Γ q{:œχX‡βͺάX§»;μs0ufKO9—ΫΡν²2-f’>’Γ%xχΙΩΤπkθdͺ­C\’'ΔνιpBάcNˆ«R²έφ9˜jτ->&ήmGΧ/―i1σlΰ6£›QΙΌXκρ—{Z!:‡Έ,NˆΫΣαΪ‡Έη:œW,ΖH“Ρ’€Ρ·ϊ WΫΡ(φ£²˜6#%ΞΩΤΰ-ΣΙT!:‡Έ²t8!nO‡λβμpB\™ξ”λφzœžŒΎυ^gŒήŽ\^ΣbfΩΏmFI6#gSƒ‡8'S…8hβςt8!nO‡kβžμpB\ϋΒt£ΛB}λ—Έ›ν(ΐί™3Λ€ο―Ζdٌ|¦Ϊχ[N¦"Δ!ΔιpBܞΧ;Δ=Ϊα„Έ"7†Ιt·9˜jτ­ΖΖΕvαςš3ΙΆe3J^ά.Œχ[Λ/'„8„ΈφNˆΫΣαZ‡Έg;œW.ΙdΈΏήžΨκυCWβZ†Έ—Wžχm1mFY7£‘ϋR»ΟΪ!Δ!ΔιpBܞΧ9Δ=άα„ΈJ·ΦiNMtΙ‘ΣΫΰ•y+ΔΈΌŠ„ΈŠ‹yηΝ(Ε―ΘΗ+LEˆCˆΣα„ΈMqˆ{ΊΓ qηψ{ ηfήk`τm1ξάjGA.―i13l\6£T%ΞΩΤΐ7[N¦ qΠ5Δ%λpBܞΧ7Δ=ήα„ΈZΉSά`w9˜jτν1ο\jGQ.―i13„8ΫIͺΝΘΩΤΐW»“©B4 qΩ:œ·§Γ΅ qΟw8!ΦCξύb«£ΙητJγε‡T"Δ­’‹i3Κ[_όΐκήj ‡‡§Γ q›:\Χ·‘Γ qΛLμ~1ͺLFίήϝv΄j ‹™ϊ!Η‰~buΟ¦ξ]<'S…8θβuΈοΎβ‚wΈ¦!nG‡βj%¨ 7ŠM>Ύƒ©]Fž+ν(NY™3|ˆ³εیʽγWfνœLβ eˆKΨα„Έ=gˆΫα„Έ£ό½†6SΎm^ڸю]^Σb†ίΉm$ω~­ξEYφΝ.'Sββt8!n[‡kβφt8!ΪSξ跊¦RlθΉΡŽΚζ ‹i3²m&Τ.JN¦"Δ!ΔιpBάΆΧ1ΔmκpB\₯;Δ χŠ«ΗGχo2υ9t‘­²?!‹™ραGg·Ψ\cήd8™*ΔAΓ—³Γuq»:\Γ·«Γ qΥ:Tπ#}³Η'7ϊ–Ώ’oΆ£P™wZΜΨCΎΝ(ηfδljΜ„ι.Vˆƒ~!.i‡kβΆuΈ~!n[‡βŠš¨·ΨMς> 6Ώ$πoόaρx;ŠUV¦ΕŒ½}ٌrnFΞ¦†\8'S…8θβ²vΈή!n_‡kβφu8!j’™=?½ƒ©_\•9ηk.ˆcpψW⎷£`1%χbΒ‹™z3sΞp'όώZνz—ϋ.ΓΙT!Ϊ…Έ΄uˆΫΨαΊ…ΈNˆ;ξΤΘρ³οϋΔΑԟ ½οω‘zœ9%q₯3XΆ΅E{hWσν.o"Δ!ΔιpέCάΞΧ,ΔνμpB\­±=ψ=v“'ό!ζΖ5?ΊQΰΩω9σq΅Σf”z3r65ΰ²9™*ΔA·—ΈΓ5q[;\―·΅Γ qu3MΌ#}Ϋ S6χ>υΓζ_!ξhΝξβΚ}6£Ό›QΗ›„违Lβ YˆΛάαϊ†Έ½UˆΫΫα„ΈσΪώ½†&S?U™{όλ•Έ8ν(άΏα7-fΰKέf”x3Zέ·Φ€—ΌX!z…ΈΤmˆΫάα:…ΈΝNˆ» λ+q«ΗT±Ύ›κέρWˆ;žρ[„Έκ‹yχνά]›Ρ§»/ΖUΩYͺv₯½_j'S…8hβRwΈ!nw‡kβvw8!ή#Ϋ¨7ΩΫ§ήΥγcž|―·ΈΨοmmGαQζW}1mFΙΝ8›­^:™*ΔA―χ}κΧ4Δmοp}Bάφ'ΔΥ»UŒzΣΨd¦X5Fψ/ŽΏΧήDβ._]%Cά¬Ύ˜χ6£a3Κρσ«–ΌAˆ‡§Γuqϋ;\›·ΏΓ q₯cΝθτ™cŒw†ϋuθ'}λM”ΠγβΑvο,eβW1ο|Β5Kο΅'7£?ΐjχVN¦"Δ!ΔιpC܁Χ%ΔθpB\Α›Ε·M¦^9νΆNΧ;Σ―w΄ΰ·qυsٌ’oFΞ¦ΖΊ$œLβ SˆKία:†ΈIˆ;Ρα„Έ;ϊ½·z|ΤY|ς½5ύF€Ξ΅£€g)σ†Έϊ‹i3JΏΝΦ;kΌ ·B4 qω;\Γw€ΓυqG:œwI·7‰f“y’ώδ{iϊβ>Eόγ‰C\ύΕ΄₯ߌœM uΡ;™*ΔA£W Γυ qg:\‹w¦Γ qUΓT°ηέM¦Ξ“ο§Rϊhϊ]ώι(βYΚΗ·‹i3z£•ό’ΏSέX9™*āW³Γ΅ q‡:\‡w¨Γ q·τ:œΪδ`κιwP.~κΣ|ΰwN…Έˆ\ mˆk°˜uλΤυη•νԊKN¦"Δ!Διp]Cά©Χ ΔκpB\υ±Z|Ψ ΓΔΩypuϊ°_‰;βbΎΓ•4ΔΥ_ΜΩi3šU7£Qύ'—ιQ˜“©B΄ q5:\³w¬ΓΥqΗ:œWΎcDMf‰Qt ρqγΎΈ±un-wρΧ`1›mFGΛκͺσ»³Φk^N¦"Δ!ΔιpMCάΉW?ΔύYˆ«―ή1š;wΗqή–š5GΑϋ19ψΌxζSMG9C\ύΕ΄9›Ϊ//Ν¦Ώ‚βΰΡw¨Γ}ϋYˆKΩαΚ‡Έί q ΄ω{ ]¦žœξ¦ΗΈσβ‘ΩoΎ5-fΔΕμχΝ%?²³©az'S…8hβΚtΈN!ξd‡«βv8!Α#όκŸ3ΚύρΉι~…ωΣΏqΗ #_ή¨ι(eˆ«Ώ˜ScΗΝθ₯ΖξRνE―‘i§Cˆƒ !N‡kβŽvΈβ!ξd‡βΜ «ψnj2ΆLR£αΐ£…}‡+cˆk°˜=ϋψ*Έ9›e­ά² qΠ"Δκp}BάΩW;ΔνpB\‡±αnΑθ2FΜ–£οΑα7μΌxbψ ›Ž2†Έϊ‹i3*σΉGߍ5Φ½†“©B΄q•:\›wΈΓ•qg;œWΊQEΈƒμr0υSπ‘hψmββΎΓ•0Δ5XΜ¦δfδljŒ₯r2Uˆƒ!T‡λβNwΈΚ!ξp‡β9gεP1ʁΕSΣ}ΐš«οG?Վ⦣„!ώb›Qί©³νΖk₯ά― qΠ ΔΥκpMBάρW8ĝξpB\©‘ξ'œΝ~”£ρeυ•„ύίάΐοpε q ΣfδljΓw½V8B\ qΕ:\wΎΓΥ qΗ;œwWωΏΧ0«~°K?Ι ο.~Ϋ†Έ—ϊνΘb>·˜6£R›ΡκΊ±FΪ;œLβ Aˆ«ΦαZ„Έ lˆ;ία„ΈΛͺhl3 Ξ£ο©Έ1ϊ}ψ£»D‹Χ`1gοΝheΌξ―ώ8kœMu2!!N‡λβntΈͺ!ξB‡βzŒ[oŽ΅9˜ϊ©χθ{θœΩο;|r“xΓv2Ζb>Έ˜#Σf΄šR]…>O·½œLEˆCˆϋˆ(Χα„Έ+hˆ»Ρα„Έγ­φh3 Ξζ£ο‘λΈgˆ ςמπf1mF6£»Ο³ŠΌξ₯U"Δ!Δ}ΐυ:\ύw§ΓΥ qW:œWϋ1ξε[ν>oˆu}M1_ άύ΅ πΖ–ΕL΄˜«ύft€ΔU:›κ™¦“©B4q;\ωχ›…ΈάNˆkςτώΚδ4ΛLB~ˆΡΟΖ¬R³o vtο£Ξbfϊb3r6΅Σ£ 'S…8(βJvΈκ!ξV‡«β.u8!ψύγΝ·‰ϊL=ς3 j·7ΰu‘­9-fΕ΄ϊ9ϊ4:“R‰‡§Γu qΧ:\Αw«Γ qMƍ{νΥg4ϊ~:σ&NΓ7Nw£9-fζΕ΄ڌέ‚³©·βnS…8(βŠvΈΪ!ξ^‡«βu8!ΙΜpazΪύ±=£>πJ†WΌ…ςΕ„½ΰΙwΈΦ3ωb›Ρ‘=ΉΘΏœXδ/'SββtΈF!ξb‡+βξu8!IΐΈπT·Ρ3όeτ=3.Ζ\‡½Wy£Wg1_ξ-ζ¬φ‹$ξoΥB&iΪΊDN¦ qP9Δ•νp•CάΝW-Δ]μpB\§^‰;z7Ωθ`κρ~Έγ6€­ŸχΜ?!9§ΕtΆΧfu3r6υξuο&UˆƒΒ!n‡+βvΈb!ξf‡βB(ψχύ₯†?Ύ,δWΛ°u,ύφ–ΕLΈ-›‘³©½ξ‘œLβ pˆ+άακ†Έ»Vˆ»Ϊα„ΈOs―άOξž‚ZM|iζ€Ρr^άϊy[UΈΝ‹9,ζώόd3Ί°­Žk˜r2UˆƒΊ!r‡+β.wΈR!ξn‡βΊΜ ‡ί'šU>HˆŒ:\Θ-›δΦΟΫͺΒYL›Qž_BΞ¦vω%μUˆƒ²!t‡«βnwΈJ!ξr‡βΊŒΓw”rΜφ)ΡΑ˜Ρq^|Ιi†L*Σf”ζZX[όv2Uˆƒ²!v‡+βwΈB!ξv‡βΪŒ£g,:LέQ D±ηŌαhE=me1},ΖτY:ό¨Lβ jˆ;ΤαΎϋFˆ«Τακ„ΈλNˆk33œœ£F«aΠ„Τ|5ΌΏe1}ύlFΙΥ₯Ύ²2qq:\‹ Γ• qχ;œΧm]>K¬“"Σ€τS«Υ?e;šφΑͺ‹9lF'χζUι‹‘ωlκhφ+!žqε;\Ε‘ΓU q:œΧg„:w»=[ ƒ³s6‘²΅£|ήΆ˜6£™`:™Š©Γ5q!:\‘‘Γ qq”ω{ ½¦~κžξ^ΗΪQα g1mF™6£ƒW€³©·{χ¦B” q :\½£ΓΥq!:œG™ΏΧ°yψY½~lΣu}A”#‹ΩcυlFDˆ»³6N¦ qP2ΔuθpεB\W"ΔΕθpB\ §ώ^CςΩ§ΩOΝP$ΔυΙpΣf”ι©Π,τΓM|ΣΙT„8xcˆkΡαͺ…Έ(Bˆ α„Έ†3θJύ)’ ΐΛptv`\]Ώ·2œΕ΄I“wžp%~υkuΊγ@ˆƒB\W,Δ…ιpB\”'ΔERβο5¬fΓ‚wPN_ΖΪQεibڌ,ɝ―†G˜ΕNμ"Δ!Δυξp΅B\œ—?Δ…ιpB\Ÿ»gbV· ³ω‹ι2βZ]F3μζ»lFW7#gSo,Œ“©B qίχθp₯B\ —>ΔΕιpB\§AκΔ½εζΉ'ά»(³Yt °&απ)3]<ΣΟ“’;·YΟ¦:™Šo q]:\₯©Γeq:œΧjjΨίwόϋ`&£έ‹2{}ά_CΓ΅Σd1—Ν¨τfδlκ…uqW*ΔAΉΧ¦Γ q‘:\ς©Γ q=ΗΠ•τΏΫ,ιΣψUgφMߎ¦°ΝbΊ’jΧIgSΏhX„8xCˆλΣακ„ΈX.wˆ Υα„ΈNχ”ϋ O·ƒ©N¦^X•n―£Ρλp3π/›ΡνUq6υόOΨΙT!ͺ…ΈFLˆ ΦαR‡ΈXNˆ‹&υίkθχ…ος²>oVΌΑe1mFM―¨Z_?`'S…8(β:uΈ*!.Z‡Λβ‚u8!YΓΨ;Vυ›φvΣ΄Oγ…8opYΜZ!Ξft3Ϊ~65γΩΙT„8x}ˆkΥαŠ„Έp.qˆ‹Φα„ΈfΣΤΦ’ανsPHˆΪŽrΐQ;œβd3Ί-³gώω:™*ΔA­Χ«ΓΥqρ:\ήΓ qm'Ρη'Λ~S7δΌOγ{’‹™Ž†ν―ΩbڌΚoΫεŠ„?ίe=βΰ•!Y‡+βvΈ΄!.^‡βΊΝ gΛΥoD˜†"M d;Zœn‹ιdjύΝΘΩΤ³?^'S…8(βΊuΈ !.b‡Λβv8!Ϋή}Σεh8!LγΏχI:²˜ž 4½€V­―GΎςδd*BΌ2Δ΅λpB\Θ—4ΔEμpB\@I^CΗaΨLt#&―NWΧ—,f€ο\ζΝ¨R‘t6υ芸β PˆλΧας‡Έ˜.gˆ Ωα„Έ~ƒΓ¦»ΜΥq>05˜}σ΅£ΜοqX̐+7lF!6#gSO^φN¦ qP(Δ5μpιC\Π—2ΔΕμpB\ηaτΙΫ̎S7θΜWp«•q’bz*`eJ|?¦ί4N¦ qP1ΔuμpΩC\Τ—1ΔνpB\̱ε>su‡§‰HˆΦŽrw8‹i3²]ϋE›oϋ˜~Ρ ΔΑϋCά‘χ—ΟB\ύ—0ΔEνpB\Lιώ^Γζ3‡€άOγ…8ιΘb qυΞB?ζ„ρΙU―q‡:ά·Ÿ…Έ._ˆ Ϋα„ΈήγθΘρ΅Iωw’[ΜΎ™ΪΡ΄σ5š^ κΡ(ν ?εd*BΌ"Δ5νp©C\ΰ—.ΔΕνpB\ΗΡαωkυœ‡E€³ožv”t΄˜Ξff3r6U~Fˆƒ·…Έ.sˆ‹άα²…ΈΐNˆ‹κΠαΤg¦ˆΝ¦]ΛΩ7ω«MBœtd1mF!ŒJkγlκ©ϋ$'S…8(βΪvΈΔ!.t‡Kβ"w8!εμπτ΄Ωt v,¦Ιμ›₯U8Je1…8ksσ2ύdLβ RˆλΫας†ΈΨ.Wˆ έα„Έ–z>ˆ2ΓξαXŒ.©•-¦Ν(ΧEUνΞΐ_(q2UˆƒJ!q‡Kβ‚wΈT!.v‡βΪO€8»Lέ:,—ošΕ‘Ž,¦$!ΔUψI'ϋi;™ŠΏβ~hάᲆΈθ.Sˆ ήα„Έ¦“Υ“·έ«λDl θ2ϋfhGERYΜxΏ.oF΅ώΌ΅‹ΘR8™*ΔA…ΧΊΓ% qα;\’χ«? q„ \MY{_ˆ‹όf˜)ΰVG֎ʾΏb1—u+umΥڌ¦­€ί/„8xWˆλέαr†Έψ.Oˆ ία„ΈΐNύ½†Π“sΧΩ7ύΥ+ΔIGΣfd3Κwcΰηκdͺ5B\σ—2Δ%θpiB\ό'ΔE–βο5΄=˜jφβΒ΄£2ΞbڌlF—Ώ#ΓEοdͺB\χ—1ΔeθpYB\‚'ΔJ?vΛΩχ`ͺΗρB\”v΄μyέΣ‚άϊ-UloMτΫgτωƒοqν;\Β—’Γ% q:œΧ7f<3hν}!νOGˆβzΣf”λͺ:κ”³©.z„8ψΕ§Γε q9:\Ž—’Γ q±…{ {SaμΣ!³ν'Ώ>ϋjGeOQYLM"ΧU5λ}I†Ÿͺ“©Bdq:\Ύ—€Γ₯q9:œ[ψΏΧΠωŒΨlSšLI‘ΫQ©™Ρbڌ„Έ«ΟΏ€W/"ΔΑWCœ—/ΔeιpB\’'Δόο54>˜ΊχΓ›} Š—ΣQ­CTSˆ³]~@·Ϊ_σN¦ q<ΔιpωB\š— ΔeιpBœΉτ€O%~4Σ•+Δ•š-fΪe[ͺp›‘$nσ“0'S…8ΘβtΈ|!.O‡‹βt8!σSί[½gb!Nˆ Ўe³oΤv4loΆlF>YΜoΘ²Ή’ΓΑ9ί~βtΈx!H‡βς—~S1ϋ^oGž3XLlF§nZνΎ(θpP©ΓE qU:\€W₯Γ q ω{ λδΦ“h³―η.‹‰Ν(Ԧӏ~€₯:\°W¦Γέqe:œ—НWↁ³ονvTώ²±˜ΨŒ‚|΄ˆΏ›η±[ΠαΠα> q:\ΌW§Γ q¦ŠWΎη`*fίΫν¨ώWΕbb3zƒΡμK"ΜΑοt8!m‡»β u8!Ξ°ϊΊcη$γA΄ΩWˆσU±˜ΨŒ’}c‚ύ$G—ί*„φo :œΧ·Γ]q•:œ—‘Γ©‡ώ‹ξΝΎBœw6,&6£XŸ-ή―g'SΡα@‡ΛβJuΈ»!T‡βr:σχζ‘ bf_!N³Ά˜ΨŒή‘ΧΩTatΈt!V‡»βju8!.§Σ―Δνόοymφβ|W,&6£€_™.ϋƒ0Ε:\˜W¬Γέ qΕ:œg°xΝΓpο₯`φ½ήŽ:Ό<κd*6£H7³ΙR˜G‡ƒb.Jˆ«Φα.†ΈjNˆ3―Ύβήtj ˜}―·£ic³˜ΨŒŽ~gB}Q–0:\WΓέ qε:œ—Υ™Γ©kϋΛch³―gT<Ώ˜v›Q‘ΝhυΩv„yt8Παr…ΈzξZˆ«Χα„Έ΄ΦΉiuΉϋΕμ{½΅x{Τbb3Šσιb%λ!̣Á—*Δμp·B\Α'Δ₯uξ•ΈaΖμ{Ώ΅¨Φ›Q¨/Νlρsτ%A‡ƒj.Bˆ«Ψα.…ΈŠNˆ3[όβsbΗΓ0ϋhG-.'S±½Q›³©Β<:θp™B\Ιw'Δ•μpBœ‘υ—ήY›n~1ϋήoGΛ¦jb3:ϋρ"Ek'SΡα@‡ΛβjvΈ;!ξχB‘œ9œκ/¦bφΠŽzdk‹‰Ν(Φ·f6ψ1ϊŽ ΓAΉw=ΔνpWB\Ν'ΔeΆ^rσΪμ+ΔωΊXLlF‘oκίρσθpP―ΓέqU;܍W΄Γ q™άΞ½―ΩWˆσΞ†ΕΔfχσϊΆσθp Γ₯ qe;ά…W΅Γ qΖ‹[Luq qΊ΅ΕΔfϋk3Λ}EΠα ^‡»βκvΈσ!l‡βL­¦bφՎ,&6£¬Χή³©Aš9™Š:\–WΈΓqu;œ—[βΓ©n}ΝΎBœw6,&6£Π0ΜχΕSAt8Πα’„ΈΚξtˆ+άα„Έδώ½SΝΎBœ/ŒΕΔfώ{β3atΈ!t‡;β*w8!.Ή΄―ΔymφβΌAj1±= ΑΩΤi?@‡ƒΎβtΈ€!t‡βL¦bφ ߎΊ”k‹‰Ν(܍@ν­Α~€;ά΅WΌΓ q΅;œgpu0³oψ― ύΜbb3ΊτΕ π!LE‡.Eˆ«ήαN†ΈβNˆK/εαTO ΝΎBœtm1±ň!>€“©θp Γeqε;άΑW½Γ qω%ό{ n|ΝΎBœ―ŒΕΔfτΝOδ*ίθx.ΘΏ Ο Γ qoσο? q:œΧFΎW⼐bφβ̊›Q’oΞ(όω|?ψΊ?4θpBά›όζG!N‡β ¦bφ­ŽΪ\6›QΌϋ€YχγyANˆΣαβ…ΈNˆ3»:˜Ϊ՘;-!ΞK€B6£C+XϋK³/Δy0ˆ‡'ΔιpαB\‡'Δ•˜aLε—™ύ#iΣ–+^[LlF΅Ώ:£μ§σuD‡C‡βtΈh!E‡βJH5Οxώ|xή-1ν qBœΕΜύΌΘf΄y·―ό1‡νtΈΰ!I‡;βzt8!ƈclαŸϊ[Α‰·qˆλsνZL›‘Ν(βm@ΥΚθΙ ::œ§Γ qM:œWdΨq0•ΏΎsς'ΔYΜψ›Ρ²ΥωξTύlΎͺθpθpBœ+ΔuιpBœρΥΑΤ %ΆIk▝ΜbڌlF7ŸΗ͚ϋ‚WτΡαΠα„8.VˆkΣα„Έ*―ZL½fίͺν¨ΡΧΖbڌlFοl'SΡα@‡ βuΈν!O‡βͺΘqΘΛ(OŽ]§^!Nˆ³˜!7£e3ͺύε©yγ»‹‡'Διp‘B\£'Δ•„LΥΰΜΎEΫQ£KΫbڌlFοTχlͺ,:\ΰΧͺΓmq:œWΖ4²4ωAχ{…8!ΞbΖٌ–Ν¨ΛγΈ‹ο³;™Š:\ΰΧ«Γν q­:œWGό‰ΘΑԏΞY"\ΛΧι‹c1=°ΕόφT qΎΗθpθpBœ'ΔυκpB\‘Jγ`jνηέ“!Σ›€Σfd3Ьξώ· Γ6Δuλp;C\³'Δ² ,U™{…8!ΞbΖ،Ό χ‚ͺωIχ=dτpNˆΣαΒ„ΈnNˆ3Γ:˜jξ5ϋ†ώή ›˜Ε a،boF{ŸΨ\ϋθd*:θpAC\Ώ·/Δ΅λpB\©dγ`jΉ©Wα„ΈVΣ’Ε΄ٌ‚ήάϊΎzό"A‡._ˆkΨαΆ…Έ~Nˆ+eW ρφ‰'ΔYL›‘Ν(ΘχgVϋPž’Ñà q:\Χ°Γ q΅¦%S Ύfίjν¨ΥwΗbڌlFQΖέωLN¦’ΓΑ“Ύύ,ΔιpρB\Η'ΔΥχ•8ž ΎBœ—I-¦ΝΘf”2Zέϋ5ξd*:θp!C\Ο·'Δ΅μpBœ1ΦΑTƒ―ΩW;²˜ΨŒς_Q?«Ηƒθp ΓE qM;ά–Χ³Γ qŝ›œKώ4ϋjGΣfDψ+j•ϋEΎο_άπG‡C‡βtΈ!i‡βͺ‰y8Υ“ηΧ Ύώ,‘Χ|\΄˜6#›QΨzZκ‰ςόΤοt8!N‡»βΊv8!šaTΙϊ“σώ‰§YL›‘Ν(μ7hVϊ@žς&Π Γ q:ά₯ΧΆΓ qε|ΑΑTοŸqΪ‘Ε΄ٌrί\ψ]ξd*:θpρB\γχxˆλΫα„8“¬ƒ©ή?1ϋjG“Ώf8ϋKκWνlͺ“©θp Γ… q;άΣ!q‡βΜQ¦ήΞpή?β|,¦ΝΘfώ+tώΣn»"= D‡C‡βtΈλ!s‡β 6J9˜κύ!N;²˜ 6#ΐ%Uμlͺ—cΡα@‡ βzwΈgC\λ'ΔU|©ΑΑTΞμ« qBœΝ¨_ˆΫ{Pζͺ΄ Γ‘Γ q:άνΧ»Γ q-cŠΙΧμ« qΆ#›QΏίp₯¬Ν*tΈ"!{‡{2Δ5οpBœaΦΑT“―Ω7πΧΝήeϊΆٌ‚ό0g• Α—NˆΣαξ†ΈξNˆ3U|MΎf_!Ξbڌ(β*M}~lθp Γeq:άs!}‡βjZ†“―ΩW;β„8›Q»ίq…ž­9™Š:\€§Γ=βt8!¦(―ΑΑT“―§YL›‘Ν¨ΘΟtΦΨ|σΡαΠα„8ξfˆΣα„Έͺ‚Όη©³ΙWˆ31ZLllFU~ͺ%φοκ£Γ‘Γ q:άΝ§Γ qζYSOΟHΛθ*Δy«ΤbڌlF)ΏE%žZyFˆ‡'ΔιpCœ'Δα΅+S―ΙWˆS³-¦ΝΘfTηΧΙΌνυυG‡C‡βtΈ{!N‡βJ 0dyθ1š}΅#!›QαkjλΩΤU`;° Γ‘Γ q:ά½§Γ qΕ_wπBœ‰ΩW;βΜί6£nΧT‘³©ΫR ΡαΠα„8ξZˆΣα„ΈκΏηGπ3…ƒ`Bœvd1ύv°mWδlκts‚:\Œ§Γ=βt8!ΞHλ`j™ΙΘμ«Ω΅,¦ΝHˆϋ―Η>5ήu· Γ#ΔιpΟ…8Nˆ3m9˜zr.ςŠ'h q6#›QώοQϊœθ)‘'ΠΠΓ7Bœ/ΔύVβ:Έ:oY~o qΎHΣfd3*φSžΩ?„]@‡NˆΣαξ„Έ_ΙoB\7LυŠΩW;β ΰ6£^!nTψȫՏ tΈ¨!N‡{.ΔιpB\χf.S½"ΔiGΣΓ›Q½_ώΙ7 u8Πα„8ξJˆΣα„8S­Χ‘†š}΅#[–g3κβΆ>Ή?‚»t8!N‡»βt8!‘{/cyζμ !ΞΠh1mF6£rί€3Ÿy6ϋ‰qΘt8!N‡»βt8!ΞGX{o qΎHΣc›QΉί=©χO u8Πα„8ξBˆΣα„8ο@τOŽΎ'ΔiGΣC›Q­ξz’e »:θp·Cœχ\ˆΣα„8Σ—Γ©υ+¨ΩW;β,¦Ν¨σ#§μΪΙTt8Παn‡8ξΉ§Γ qύ\›OΪ½'Α„8νΘbκp6£rαVβ­ΐ#Bt8!N‡;βt8!NςJ\…1ΘμΫ¦uλΩBœΝΘfWβ­ΐM‘:œ§Γq:œg;Κ²#Δ9G₯jڌlFΕΎKϋ?΅“©θp Γέ q:άs!N‡βzΊw(iYt„8c£ΕΌ»ιp=/ͺάgSwύί;™ͺÁ'Διp‡Cœ'Δuεο5θpf_νHˆλβlF]C\^ΤG‡ξjˆϋ“δφTˆΣα„ΈΎΌgτ5ϋjG6«i3’ΡE•ωc{:θp7CœχXˆΣα„ΈΖ†₯Ξ‹qBœΕΤαlFΏh%ώΨ»~‹:™ͺÁ'ΔιpGCœ'Δ΅ζο5τXm³―v$ΔYL„Έύ?ύ€ΫAt8!N‡;βt8!7―A‡3ϋjGBœΝˆNΥΞΟ=Rώ―;™ͺÁ'Διp'Cœ'ΔuwoJiwΔθ+Δ-¦ΝΘfTϋΨϋΉLE‡ξ^ˆΣα q:œΧέΕylYj„8νΘbڌlF§m}>ε΅λn°₯ω «Γ=βtΈΗBœ'Δu7Œ)F_³―v$ΔٌhuQ₯}ΫέΐƒώtπΏu·ηBœΦφXˆϋ­Ψ&Δ5wwN1ϊ"Δ™-¦ΝΘftΪLϊΑLEˆ!Nˆβ„8²»ό‡σN5ϊ qΪ‘Ε΄ٌ‚I³οΊiq;(ā'Δ qBœG[q‘Ύf_νΘbڌlFνΎOωώ·Lβ@ˆβ„8!Nˆ£ΟDf‘⌎σœi“βv_ϋ>ω§‡N¦ q Δ qBœ'ΔΡg"›!Ξ—Ηbž2μ~½mΏVΊ_¦ξ…8β„8!Nˆβh4‘ §YL›‘Ν¨ΞjΫτςύGˆ!Nˆβ„8R q^²ΑίkΘ7ϊΟHφ^ƒ'ΔYL›‘ΝθζηήωΡ=Dˆ!Nˆβ„8Œdn€‹ŒΎkη +ΔiGΣfd3κs5d‹¦n…8β„8!Nˆβ(0ΦΊ·Ξ_ί€ΤΛBœo’Εόše3βΞ|£rύ¬œLβ@ˆβ„8!Nˆ£ΩH6­σ₯·NnΎŒ(ΔωζXL›‘ΝθSΕΎοUw/ζ#ā'Δ qB™:£Tϊx™z…8νΘbVο.6£„UΎ³©»ώέ q Δ qBœ'ΔΡ-Uώ{ #άΨ[½ qBœΕΜ°½ΨŒJ₯2ύ¨œLβ@ˆβ„8!Nˆ£ί»uο‚G¨7:\€Bœg1mF6£Ϋχ#ΡΰdͺBœ'Δ qB ߍZΝ½Bœvd1[6#!ξΰΐL΄Έ%β@ˆβ„8!Nˆ£ΣLVόpjŒežχ qΪ‘Ε΄ٌΚ^+ΟOΚΙT!„8!Nˆβ„8RN†[ζ5»­'ΔY̟6#!ξμο¨<έΠΙT!„8!Nˆβ„8O΄^‰‹2ϊΐs §YL›‘Ν¨ξ—j¦ωΏuK(ā'Δ qBœΗ+\ˆ+9/ƒ―ηKc1mF6£ŽΖσΧ†ο>BqBœ'ΔαεπÎΎ}\Ÿ'ΔYΜc_·΄.œ·ΚΘς?λdͺBœ'Δ qBΩηY‡SοηΞ•`°β΄#‹i3²ώVΝ$―ξ …8β„8!Nˆβ(ώrDŸ§^†3ϋjG3„koηΞa3κ{m$ qN¦ q Δ qBœ'ΔQϊεˆN―ΔMξNTβ„8‹i3β\9ώWLβ@ˆβ„8!Nˆ£πΛ­&α;ΉsεYC!Ξ7Ζb–~φ’gE[‡Έ_«‡ΧJ‘ β@ˆβ„8!Nˆ#ΞΛΝLίȝ+Σ qΪ‘Ε,όμ%ΣzφqyΞ¦ΊRβ@ˆβ„8!/G8œ+wζš'„8€Ε,»ηOΫušeHs6uΣ…μdͺBœ'Δ qB5_ŽθwGlςβ΄#‹i3βZ_ ~Lξ …8β„8!Nˆβ¨ψrDΓ[βγΉ3ίΛ„Bœvd1SW¦Gδ…ΈgΈ¨>…Uε‘BqBœ'ΔaΤ0Χέ]ητnqΪ‘Εάkٌ„ΈϋΡ6πŸ“©qqBœ'Δ qTΙΪOvΣέ^(!Nˆ³˜{cw±ΝHˆ‹6uΟ/ 'Sββ„8!Nˆβ(0’Š—ΡWˆΣŽ,f£§/ΓfTα’ ώλΚΙT„8β„8!NˆΓHΆΰY||β΄#‹Ya36£U쳩Γw!„8!Nˆβ0hxηλfπ.I!Nˆ³˜v}›Q‘‹j„ΎFœLEˆ!Nˆβ„8ΌaΎ»Q JMBœvd1mF6£ΧΛ«°η—{C„8„8!Nˆβ„86Z C\ΚWβ¦ΡΧμ«YΜ6›Ρ΄TeΦ'τΩTW/BqBœ'Δ‘MΖβrή$+”f_νΘbΆyόb3*tQE>›κd*BqBœ'Δa|u8υ^ρ,2ϊnΜBœΜbڌ„ΈB{ςžŸ{C„8„8!Nˆβ„8rΏaΚ;΅UβΏqΪ‘ΕΜ—κmFBάζeX!ΏωN¦"Δ!Δ qBœ'Δ‘ϋΝcήώNPμ0g’΄˜Ή7£i­j-QΨ$nψeŠBœ'Δ q2Nρ^EΥ³`Bœb1mF6£PžMu2!„8!Nˆβ0“l4^€ΩW;²˜M6£αΊ*vQ…=›Ίόlβ@ˆβ„8!ŽTΖKfΉF=λaφՎ,f“ΝhZ¬r‹υ’ί¦q Δ qBœ‡#ΞIuψiZ³―vd1mF+/u?Y½œLEˆ!Nˆβ„8Μc^‰»Υ<‡ Sˆβ,¦ΝHˆKΆ-ΟpW³“©qqBœ'Δ qdœ\Ο4>k]σ!Ξ,i1mF6£.ΧMΈ+'Sββ„8!NˆβHψ^ΔΖ‹ςΣq2!!!NˆβHχJΔι•β0‰Γ`f_νΘbڌ¬WώŊ΄£ίΝBqBœ‡—Τ8;Yψ{ Ϋ—@)β„8‹"ΔU«ΓEuj{žAώ_ά#"Δ!Δ!Δ qB¬ΓMΘίk˜#€§YΜ>›‘§ε/ͺHgSύPβ@ˆβ„8!ƒΕ—οaΟΌχuΙ Bœ ΓbڌlFΝΎg#Ċ“©qqqBœGΆiυ‹Ε3―ΔΝ¦K^ppXBœ(b1~Ή„Έ«ηlͺ“©q Δ qBœ‡9μ«IθΜ+qq{”Γ`‚Šj1mFΦ«ΒEηlκς3Aˆ!Nˆβ„8šίC‹¬l qq©4³―ΩΧG΅˜6#λUβ’ sυxΑ!„8!Nˆβ0V|ύΆχ+qώU¦@±Xˆ³Ήu^ΜΝbηrQ5Έ¨’œMb BqBœ'Δ qό’ugλύχ„ΈHλ%Δ qΣfd3Š[sΧύŽ“©qqqBœG²Χ!Ζ₯ύzσμ[ξ(g¦΄˜BœΝ¨ί½ϋN¦"Δ!Δ!Δ qByξŸιiφ‘WβFΏUχ―2 qBœΕ βόΚlqQΝΏΑ]Αq Δ qBœ‡!μζόϊ_(z1ϋ qΪ‘Ε΄Y―"Uˆq2!„8!NˆβΘΰκ?‚ζ•Έ·Ργ£ΩW;²˜BœυͺyQ…8›Ί#Δ9™Š‡‡'Δ q€Ήy~Ντ™WβB‚fΉ7™BœKΓbz*PαUή‹jFψ$#ā'Δ qBνgŠ_)Ξό½†€“ έziGΣS›Qυ °"Γ!„8!Nˆβθ>‚ύrkϋχΜro³„8—†Ε΄ٌ.ΜΊ{-;™Š‡‡'Δ q€)Ζ큸―ΔΝnε1rNβ„8!Ξ"ٌ’^Hwοhά'"Δ!Δ!Δ qBΟΧσΧ‘WβΒΝ6›g_ͺ'ΔYLO„Έ +3nώ8™Š‡‡'Δ q€š(^uχάτο5q‘–Kˆ³Η q6#›Ρ­ΫKβd*BqBœ'ΔΡ}’xεyΠ3!.ΪαΤ³―ΩW;²˜BœεͺsQ]?›:]Ώq Δ qBœGt›ΟϋAk q‘–Kˆβ/ζ²ٌb―ΝΕΎ“©qqqBœG¦ωkΖψίϊJœgφՎ,¦ΝΘrUΊ¨.ŸMέρTΟΙT„8„8„8!NˆγQQ$΅ό{ fί@—ͺ'Δ q6£ Ώ―¦ϊ½ΟќLEˆ!Nˆβ„8š_o˜&ΞΌλΙv―OˆΣŽ,¦g3*Qέ½ˆ–ŸBqBœ'Δ\ θLˆ U§Œr‘.!Nˆβ„ΈύR_T»2εΈu%;™Š‡‡'Δ q<)Ζ_j8τŽAΌ[j£œ§YL!Ξj•Ί¨ΖΝEq2!„8!Nˆβθ=NΌρν³C‡S…8oYq6:‹)ΔE}‚•ώ’ΊyM? „8β„8!Nˆ#ΆΝιλ/Ÿ΅ϋ{ Σ('ΔiGSˆ³ΥΊ¨ζΕ«ΘΙT„8β„8!Nˆ£υcύ7slέώ^ƒ(qBœ'Δٌ"ίZΜ;iχŠqqqBœΗƒV°ΡλΠ+qaώ^ƒλββ„8!NˆβΒ.Π+~w;™ŠBœ'Δ qΔ―5ϋ{ B\¬kCˆβΪ.ζβlFρWθΚγE'Sβββ„8!ŽηŒ€“Χ™e(βή` q ‹)Δٌ:ί]Œ_u7‹qqqBœGšaβ]ƒD―Γ©B\¬F+Δ qBœg3Š»DσΒuμd*BBBœ'Δ‘υΗZW«ΏΧ`φ³VBœ'ΔٌΒ|θ³Νίk0Λ…Ί „8!Nˆ³H6£Π+5^ΔN¦"Δ!Δ!Δ qB±T?xλzθ•ΈλχΨυΌLο qBœWχδ$Ώ> ]TΗWΖΙT„8β„8!Nˆ#¬?t9œjφ u9qBœg3²Εn–'ƒN¦"Δ!Δ!Δ qB&o˜}#] BœMOˆ³ύkSˆ»½V'―a'Sβββ„8!ŽUγ‘βΤk—o³ΝΎ!–Iˆβ,¦²d3Κ±XσΰmƒF„8„8„8!Nˆγ#Gό9τΤεEΝ0ι!Nˆβ¬’Ν(ψSΏuξ‡γd*BBBœ'Δ‘£p=uηΪβ•8Γ\€wP„8!ΞΧΜ*ExBTdΉŸM]Εξβ@ˆβ„8!Nˆ+cχύ•ε4ΔΡ“β/ό=cqΖ|‹™?1ٌΤέ½yΜ5‹Bœ'Δ q~E7#ž{‚άαο5˜}#½ƒ"Δ q]ڌlFΡΧkžΊ‚LEˆCˆΣέ„8!Nˆ#Ερΰ+Xώ^Γ¬όὃ"Δ qBœΝΘfύ’Z§ώcN¦"Δ!ΔιnBœ'ΔΡnΦjπχ¦i.Π;(Bœ§™X¦›kTξzςεJ/q"ā'Δ qq ΗΡΗ‡‡ϊ―ΔmŸλς#qίAβμ|BœΝθώ―€BίΠuξwφπ3@ˆ!Nˆββέo{€\ο5ΜŸ-Λ5+Δ qΣfι7R‘oθ<·8N¦"ā'Δ qq!l³Γ‘‘gΥύ‰€%NΎ'Δ qNڌB*υ =ΧtΧ‘ qqBœ'Δύ?φξfIrΫJΐ(€…9o kcΎEΎΣΝž‘cέ]Y ‚χ眡BR‘H$ρ¨βΆŒX΄κ_ƒεά/\BœλΒ`šŒ"˜B\”AΫrύ:™ŠBœ'Δ qdXF¬οYεΏ―‘π–*Ε qBœηlκΣTπ=vΞ‘ΰ!„8!!NˆληHΨ|Žκ‹kί%Vˆβ fε­Η)ήdΥΌCwΞτΩ‰Bœ'Δ!Δulw¬²ͺ_ƒυ\ˆ+Δ qsšŒLFY.« ·Έ“©q Δ qBœG‚UΦ=­›V=WΥίJ—g™o0ΛLF/“QΏ;τΘβœLEˆ!Nˆβ„8ΌΞO»8|ς‘ΫΪ7Π!NˆβœM5%ΈχζύW―§F„8β„8!Nˆ#ώλό™υόэcΣ‚ξ©z"Δ qssjJ'Sβ@ˆβ„8!Žΰοσχνm˜E@GΛΚqι+Δ qέSeŠ2½ά_ΎοœLEˆ!NˆββzνoΨΉ Ϊ΅Ϊύ^υηΚ·τβ„8!Ξdb2ͺβϋž>œLEˆ!Nˆβ⚭#ŽZ?Ξ#»5’?WΎ₯―'Δ5Μ)3™ŒͺΥ yίMχ3BqBœ‡Wϋτ£O¬WΝ³mνΏΔ=Ότβ„8!Ξd΄ωσ¦ΙΆ¬ϋή:™ŠBœ'Δ!ΔυͺE3MΥϊtΈb­}΅#ƒYl R Ϊe2Š>žσžkΦ³#BqBœ'ΔzI΅;μύΎ†—£Γ q&C!Ξd’Γ•ΛAσj'Sβ@ˆβ„8„ΈV ‰ύ§‹j§Ε]Œ'Δ qέs*q1:άΛ=ψΕ1šK„8β„8!Nˆβ‚;*.€jn‰ΫΈφ½Z_°Bœg0Γά†‘'£Λd”‘mήro{xDˆ!Nˆβ„8"ΏΡΏJώTO,­_Ώ!:œ'Δ΅L“‘Ι(Ν §yΓ/ΛΙT„8β„8!Nˆ#ΤCo„…BΝοkΈ,~C,}…8!ύ`šŒLFinΒ9œLEˆ!Nˆββz-%žY}^—ˆsηϊξhz½ qBœΑ 6]&£F!nήrύ\Ζ!„8!NˆCˆk΄­‘βŠϊ‘^΅wεw΄Ό\…8!Ξ`Fša£–Έi2Κσ9·ώzu2!„8!Nˆ∼–8κώhϋΧA―ή%.J‡β„8ƒyυ.q&£Lwα±Όξ9™ŠBœ'Δ q;WαŸmκzσ&ŒXαΰx½¬}΅#ƒΩη-Nάw\&£L—Φ\ώοττˆBœ'Δ q~©ά{γmέζ(ψ3\όΞ8Nˆβ ζh<&£\Γ;W?μ8™ŠBœ'Δ q^M<ωΈZρϋ^mΏΧΛΪW;2˜½ή㘌znΝΊa”œLEˆ!Nˆββϊμhh±kaγ3ωφmaAΏ‘v qζDƒωΘdt˜Œz„ΈΞ¦u„8β„8!!Ν:jV·?”ο_ηEXό^/k_νΘ`φ{“c2jy‹kζE„8β„8!Nˆ#Σ³ξΣΫ© >•_ Χ3ά2L;β fΛΙθΠ„j6^'Sβ@ˆβ„8!Ž*«¨§ŸVg½Ης'’Τ³=υΈ,Γ΄#ƒΩσ]ŽΙ¨η-οε‹“©q Δ qBœG’'έηΎXΑοkθžˆΛ°Pk_«|ƒYq΄NFΣdTΈρšβ@ˆβ„8!ŽΊηko‰{©Nλ0νΘ`6~›c2κy‹:™ŠBœ‡'ΔUtυX lΫOU~«ΐK°η“„8!Ξ`>—K¦Ι¨ϊ-ϋlͺH„8β„8!Nˆ#lΞιςsnή₯ρΤYΝkwŠ Όβ„8ƒi22•{αdVDˆ!Nˆβ„8rοdˆqz£ήχ5<·$ΌJώβ„8KNƒ™.—lMq&£’(N¦"ā'Δ qBε62\A~Τz•ͺΑšοΈ^/k_νΘ`Κ%ΟΈΗ4U~…ζd*BqBœ'ΔQhΓΡθgέ»šΥW}3x†β„8ƒ`2Ϊ±+ΞdΤ°ρšβ@ˆβ„8!Ž;\žUλ}_Γ³«›WΏΡχŸqBœΑ4™ŒΚ|p;™ŠBœ'Δ q”ΩΕη§­χ} O―PΫ–~7,|―ςk_«|ƒYϋΓΔdγ"τΊ²œLEˆ!Nˆβ„8•©HKΞmέκ(τ όΥοχ†Ÿυ–ν'Χ,_T΄#ƒYuΔjMF―© ₯Ί²Μ‰q Δ qBœGͺ0λ%½-qρ—Ώσ¦ύχό{―6—·g0q皌žŒΚ†Έ¨gSLEˆ!Nˆβ„8‚.œŽv?ρΦ…φdE2ΧόΐσΖ_Π¨ΏφՎ ζƒLF&£Β‰ΧΙT„8β„8!Nˆ#ΑR3ζ_­)w85Ξ^λ³½(ǜ7§!N;2˜&#“‘›Ρ]ŒBœ'Δ!Δ•^6΅έ΄qUϋΎΊε ΰ9ηŽ+Qˆ³κ4˜&#“‘g'Sβ@ˆβ„8„ΈΚ«¦xλM[βv,€ΏΆ>朻ώχ§§Μ~“Ρe2* ζΩT‘q Δ qBœG°…fΨ'Υ£ΪSϊρ kώΛρo»Mώε‰5“§L“Ρσ“Ρ!ΔE|Zq#ā'Δ qB5ή1MιΧ‹/ύ2„8ΛNƒi2z~“ήβ*\XN¦"ā'Δ qB!χ.\!ςjΟ釕νΧV¨Bœvd0ξ\ŠpŽς“QχgF!„8!NˆβΘώ†9ζγmτW‘_e qBœvd0;“'νΡΚ^xLEˆ!Nˆβ„8B—’>¨–ϋΎkΫ/Υ2!ΞΊΣ`šŒbl€rYeέδd*BqBœ'Δρcί‚£ιΏsμBωΪϊ΄όΪW;2˜^ρ˜Œϊέ£ρ+‘q Δ qBœGΔΧΛGλŸ~λΊΘκφKg„…8‹|ƒΩoλRΘ~γ²Jώ!ηd*BqBœ'ΔqOΨψη―vzΕχ5|ι· ΔYxL“‘Ι(£τ BqBœ'Δ)DE~Nυ} 7Δ qωΣdd2xLEˆ!Nˆββj”Π#PνόŠ](_IΒBœEΎΑ,4»šŒ\VN¦"ā'Δ qBœηΥr–χΕΎ―‘yοJNˆ³ξ7˜^ Όϋ~Κe•ϋ3ΞΙT„8β„8!Nˆ#ΰ›εθ+‚kΚ«Θ”wι+ΔYδL“QŒσŒ χ‘—κ»ž#β@ˆβ„8!Žx/–Γ/φ=ΤϋΎ†0;„8νΘ`šŒBLFχΔJw€“©q Δ qBœ'Δυ]$Ε?·QnKœΓ©Ώώς[#,= f›—=Ι'£Y}2ͺ}U9™ŠBœ'Δ qΔ+PNΘ”{wξ<Ψ―V¦εΧΎΪ‘ΑlφžΓdΤ'ΔA• ā'Δ qB1ή*ˆFΓy°_m\β΄#ƒi22Ή%έΏq Δ qBB\ΉgΩO©§Φ77 vd¦4˜Jά›ŸB\ζO8'Sβ@ˆβ„8!Žpυ)ΙW·•ϋΎηΑ~q% qΪ‘Α4…8Ν(Δe»ž$β@ˆβ„8!ŽpO²Yή—Ϋ7,vz% qΪ‘ΑTβLFξI·/BqBœ‡WμAφΚ2GΉηvηΑ~Z„…8‹Oƒ9LFΪΝt]½oύd!„8!Nˆβx|“Bž‡ΤYξΑݟ‰ϋYžΥ―wνΘ`*q9&£αΊΚ{Iy”Dˆ!Nˆβ„8’=ΖfZ <Έ ˜Z³.}Λ―}έΨ³Ϋ{“Q§λΚΙT„8β„8„8!ΞΒ(ύΛβz[βό™ΈŸ]ˆBœΥ§ΑTβBœet]…ώΔv2!„8!NˆβΘτ›λυz2 έΒy°Ÿ\ˆBœ5ΎΑάΗdΤx2*ό’Ι³$BqBœ'ΔlUt“‡WHΏ?ΒBœ5ΎΑTβBet]Ε~uf&Dˆ!Nˆβ„8ς<Βει‘ρ… ? ΒBœε§ΑTβLFiEψts2!„8!NˆβΆ$šFεω½‚ώ2ӏ[ˆ³Ζ7˜νv0ŜŒξIJnK'Sβ@ˆβ„8!Nˆkχλ {„χθ—₯oΟ΅―vd0•8“‘λΙ½‹Bœ'Δ qB\£η׌g6*ΎHΏ,}…8λOƒ©œΔέ?%Δsζd*BqBœ'Δρ1ίΤπθΘμ^|_–ΎBœvd0•Έ°ΩFˆ‹ώζΜΙT„8β„8!Nˆ#ΕΣkΞWΕΏ―!Δ_ ΈcAˆΣŽ ¦g2r5Ήuβ@ˆβ„8„ΈϋMO¨?Tρϋ:[α±ϋ6Ў„8ƒ©Δ½9Ε'£ΤΟ2N¦"ā'Δ qBi²Œeb εχaιΫoν«L%.Ιd4\XoL©!„8!NˆβHΆΚϋ¦ΈδCόaιΫnν«L%ΞdδZrη"ā'Δ qBœχΌ-‡9.γkK_!Ξ Τ`ͺ'&#3ή7"ā'Δ qq₯–”Pk>Ζχ+qΧρΔ/Zˆ3kL%ξέΉή…ώγΪΙT„8β„8!Nˆ#ψ(υ δχ5τ[όώzp…8K|ƒ©ΔEx)pΟΞιJrγ"ā'Δ qBœΧ§2y΄ψ ίjρϋ…ΘY|νk j0M²&#O46ώ#ā'Δ qBœWzAYδωtγ£½Ε …8K|ƒ©Δ՝Œ^nM/β@ˆβ„8!Nˆλ²ϊΉŒRΠ‘j³ψύRΪ(ΎφՎ fd³Λdt™Œj^GN¦"ā'Δ qB_³g―Wώe·4Yό†v$ΔΜ8ŸFI&££φdTς2r2!„8!NˆβψίΤ­Wνή=xXϊvYϋΊΙ ¦„’e2΅'£―͜LEˆ!Nˆβ„8βφ%Οφ‘αGωγ©Χ³ΏeνΘbί`Fz3d2ςXγΆEˆ!Nˆβ„8!ψ„ΓPΕ―β‹ί7–GBœE¨ΑTβBlxve%ψ°v2!„8!NˆβΊθΉŒUτ+ύ‡βήYΥ^ϋjGSEI3]¬7§“©q Δ qBœG†%Οa°ΒXέΕο{U³φΪW;2˜fΪ²›β·fφιΚΚυfΙΙT„8β„8!Nˆ#Ξ{γ2O§—‡΅Ίΐk£Ϊk_χΉΑ4՚ŒΌbτΚ!„8!Nˆβ„ΈκΛKο«₯ŠΗS―#Β qBœΑLRLFξN:q Δ qBB\‰ΕN‘·ΔSΥ£VοD،ρ+β,υ fΘ·DaΟΘίχέγς™)>n@ˆCˆCˆβ„ΈŽ‚.?ΌflΨjmŠ»Ž(ΏbνΘτi0MF&£boLEˆ!Nˆβ„8‚.tJ=œΥκ+mŠϋ^Λβ΄#ƒƒΙHˆKπΜΙT„8β„8!Nˆ#\T*Ά ΌΚ?ΥWΩ‡r}³d qnuƒΩοΕ‡ΙΘ{Fχ,BqBœ‡W₯)Y¦{¬/±ευΣkGBœΑ4…ˆIMPy~Ν Δ!Δ qBœ'Δ5β`jτ…αcc—|κ v_DΉ ΄#ƒ™­§˜Œ\Z9›!„8!NˆβxxωGrŒ[ΚΑ›]WΎΕΧΎΪ‘ΑΜ—βrŸJ55ωsΛ"ā'Δ qBQ^<¬1{<ΩηMqΧgέQyf!j0ΝΉy&£αJρΞΜΙT„8β„8!Nˆ#.ƒ’ό]ΞΊ\-WΎ΅ΧΎξvƒ)Ε5ŸŒ.ΝζO0w)BqBœ'Δ€&i˜™—L :ΣuΔΌ5·½g0₯Έψ“ΡΛ%γŽEˆ!Nˆβ„8!π»βΓΰεΑd)ξ:Βώz΅#!Ξ`vJqk&£)Δ…έθd*BqBœ'ΔρΥΐ`?WφΡ‹1„ΗlΆς­½φՎ ¦g2ςΎΡ¨"ā'Δ qBœWυυ0|Φγ9VΏ3φΟ« q³Νdt„ώyW‹!„8!Nˆβ„ΈͺΟ§…ŸL›½gΏϊ½V^kGα΅―ή`fπ \~FΏΘ¦[TέDˆ!Nˆβ„8!n»£OBΚ_¦bœο ύΗβζβ5Pα΅―vd0σ~EžŒ.“Q ―χ%BqBœ'Δβι΄τβ†oΪƒn‹»fŠ_g0˜Υ'£—Ι¨λuβ~Eˆ!Nˆβ„8~΅‘ ΡNάcl ~qΓΌ#S^Bœ΅¨Α >7™Œ|[CŽ7fN¦"ā'Δ qB!žM‹?˜φϊΎ†?—…Wυ…oν΅―vd0λ΄8“‘εsΪνˆBœ'Δ qDx4­ΎŽάΈ%.XΣ βζ}£"ΔΉη f υ'£C5 7—»]β@ˆβ„8!Žo<šnaΧ ·ΉπρεοΝCRχ†θuΜΏ“‘OηgjΉ“©q Δ qBœlτܟhΊl>LFόΪ¦wF!„8!Nˆβ€-φοEΉlLF|1:™ŠBœ'Δ qBXZχp2!„8!Nˆβ„8(Ήώ½ϋόΟ΄γ0ρξαd*BqBœ'Δ q`όήΦ{O“ίq8™ŠBœ'Δ qB”^τ,\[υ&#>βd*BqBœ'Δ qΠΐœŸό΅¦9ώ–MF—Ι¨ρΰd*BqBœ'Δ qΠlό₯eπυΟˆ&#–9œLEˆ!Nˆβ„8!¬†§•.`2β~N¦"ā'Δ qBœΐŽήκd*BqBœ'Δ qάοp2!„8!Nˆβ„86p2!„8!Nˆβ„86˜N¦"ā'Δ qBœΐύ'Sβ@ˆβ„8!Nˆ`'Sβ@ˆβ„8!Nˆ`ƒιd*BqBœ'Δ qδqN¦"ā'Δ qBlqN¦"ā'Δ qBό‹“©q Δ qBœ'ΔpΏΓΙT„8β„8!Nˆ⸟“©q Δ qBœ'Δ°“©q Δ qBœ'Δp?'Sβ@ˆβ„8!Nˆ`'Sβ@ˆβ„8!Nˆ`'Sβ@ˆβ„8!Nˆΰ~N¦"ā'Δυτ!n™ί|DπN¦"ā'ΔιpBœΐN¦"ā'ΔιpBœΐύœLEˆ!NˆΣα„8€ œLEˆ!NˆΣα„8€ œLEˆ!NˆΣα„8€ϋ9™ŠBœ§Γ q:\N¦"āήߞ5$·UNˆΣαψ˜“©q ΔΕχ»χx‡βt8>5LEˆ!p‡β–u8!N‡ΰS— qq ΔξpBά²'Διp|κΖw]„8βžξpBά²'Διp|ΘΙT„8βJw8!nY‡βt8>δd*Bq₯;œ·¬Γ q:r2!„ΈNˆ[Φα„8€Ο8™ŠB\ν'Δ-λpBœΐg^N¦"āWΊΓ qΛ:œ§Γπ‘ΓΙT„8βjw8!nY‡βt8>rηΙΤΓπ"āχ|‡β–u8!N‡ΰ#N¦"āWΌΓ qΛ:œ§Γπ‰ΓW5 āWΌΓ qΛ:œ§Γπ‰ΛΙT„8βŠw8!nY‡βt8>αd*BqΥ;œ·¬Γ q:˜N¦"āW½Γ qΛ:œ§ΓπΛ†8„8βͺw8!nY‡βt8ΎοΞ―jΈ /Bq!:œ·¬Γ q:ίηd*Bqυ;œ·¬Γ q:ίη«β@ˆ«ία„ΈeNˆΣαψΆιd*Bqυ;œ·¬Γ q:ίvηW5†!„Έοϊcν¬‘Γ-J!N‡ΰ›'Sβ@ˆkΠα†'Διp<ΝW5 āΧ‘Γ NˆΣαxΪΛΙT„8βtΈ‘Γ qkόέG0ίε«β@ˆkΡα†'Διp<μr2!„ΈnθpBœΐ³|UBq=:άΠα„8€g]N¦"āΧ’Γ NˆΣαx–―j@ˆ!G‡:œ§Γπ¨ιd*Bq=:άΠα„8€G½|UBq=:άΠα„8€'Ω‡B\—7t8!N‡ΰI/_Υ€B\“7t8!N‡ΰA‡―j@ˆ!K‡:œ§Γπ ΛΙT„8βΊtΈ‘Γ q:Ο9|UBqm:\ΗwΧPž:ΌΛ†8„8βϊtΈ†!ξ/Bœ@‡―j@ˆ!O‡:œ§Γπ˜ΛW5 āΧ§Γ NˆΣαxΚ­βœLEˆ!.X‡:œ§Γπ”ΛW5 āΧ¨Γ NˆΣαxˆ qq Δ²aΦΠα„8€‡\Ύͺ!„Έ0~βuΈV!N‡`{7Δωͺ„8β’uΈ‘Γ q:ΟΈœLEˆ!U‡:œ§Γπˆ{7Δωͺ„8βΒuΈ‘Γ q:°!!„ΈfnθpBœΐlˆCˆ![‡:œ§Γπ„—―j@ˆ!Y‡:œ§Γπ€yk‡» 0Bqρ:άΠα„8€Ψ‡B\»7t8!N‡`Ώ{Ώ©ΑW5 ā±Γ NˆΣαΨξήojπU q Δ…μpC‡βt8Ά³!!„Έ†nθpBœΐnΣ†8„8βvΈ‘Γ q:›έ|0ΥW5 ā³Γ NˆΣαΨμ惩—Fˆ!.d‡:œ§Γ°— qq ΔυμpC‡βt8Ju8ββ@ˆ Ϊα†'ΔιpluσΑT_Υ€B\Τ7t8!N‡`§›Ώ1υυ2Δq ΔνpC‡βt86Ίϋ`ͺ qq Δ…νpC‡βt86zΩ‡BάγώϊΠ¬‘Γ q:ϋ\6Δ!āχΈ?žš5t8!N‡`›iCBq};άΠα„8€]nq―Λ #āΆΓ NˆΣα(Σα^‡QFˆ!.l‡:œ§Γ°ΙeCBq;άΠα„8€*Ξ†8„8β"wΈ‘Γ q:U:œ qq ΔEξpC‡βt8Št8ββ@ˆ έα†'Διpιp6Δ!āΊΓ NˆΣα¨ΡαlˆCˆ!.v‡:œ§ΓP£ΓΩ‡B\μ7t8!N‡ D‡³!!„ΈΰnθpBœ@‰gCBqΑ;άΠα„8€ Ξ†8„8β’wΈ‘Γ q:w:6u8ββ@ˆ‹ήα†'Δύ¦,μp―— qq Διp₯BάσNˆϋΝ‡)ΛΜ]Ξ†8„8βΒwΈ‘Γ q:·Ή^6Δ!ā§ΓΥ q:\•§Γ°ΜΆc©6Δ!ā—‘Γ NˆΣαΈΗΎνp6Δ!ā—‘Γ NˆΣαΈΓܘαlˆCˆ!.C‡:œ§Γ°ή±s;œ qq ΔύΠοB\ΕW!Διp€Μp6Δ!ā—’Γ NˆΣαȝα^/cŽB\†7t8!N‡ w†{M£ŽB\†7t8!N‡`ωz€aGˆ!.E‡:œ§Γ°ͺΒ]Od8ββ@ˆKαr‡ΈΏ…ΚS‡@…{€oj@ˆ!.I‡Kβώk(O€¬ νυšs~°±lΞΧsΏ@„8βrtΈ‘Γ q:€χoΫΛζ›In>ΆΞ†8„8β’uΈ‘Γ q:€χ£?Ύφ§·νμ˜s>žΰlˆCˆ!.W‡:œ§Γq―Ό|SBqi:άΠα„8@ˆKβόφβ@ˆKΣα†Χ>ΔιpLβ@ˆƒJ!.j‡:\χ§Γ8Δω¦„8βuΈ‘Γ5q:™Cœoj@ˆ!.Q‡:\ο§Γ9ΔΩ‡B\¦7tΈΦ!N‡ uˆσ«Cˆ!.S‡:\η§Γ:Δ9˜ŠB\ͺ7tΈΖ!N‡ uˆs0!„Έ\nθp}Cœ@ξgCBqΉ:άΠαΪ†8€ά!nϊΕ!ā—«Γ kˆΣαΘβLEˆ!ξω#ό¬‘Γ5 q:ΙCœƒ©q ΔeλpC‡λβt8’‡8Sβ@ˆKΧα†Χ2Διp$q¦"ā—―Γ cˆΣαΘβLEˆ!._‡:\çÐ=Δ9˜ŠB\Β7tΈ~!N‡ {ˆs0!„ΈŒnθpνBœ@ϊη`*Bq;άΠαΊ…8€τ!ΞΑT„8βRvΈ‘Γ5 q:ιCœƒ©q ΔεμpC‡λβt8ς‡8Ώ0„8βrvΈ‘Γ΅ q:ωCœ?‡B\7tΈN!N‡ ˆσββ@ˆΛΪα†Χ(Διpδqώ@Bqi;άΠαϊ„8€!Ξo !„Έ΄nθpmBœ@ηΔ!ā—·Γ KˆΣα(βό8„8βwΈ‘Γ5 q:Bœ?‡B\ζ7tΈ!N‡ @ˆΣαβ@ˆKέα†Χ"ΔιpTqώ@Bq©;άΠα:„8€ !N‡Cˆ!.w‡:\ƒ§ΓP!Διpq Δύ)笑ΓΥq:Bœ/LEˆ!ξOΏ q½;\Ψ§ΓP!Δω’„8βwΈ‘ΓUq:Bœ‡B\ώ7tΈβ!N‡ BˆΣαβ@ˆ+Πα†W;ΔιpTq:Bq:άΠαJ‡ΈΣη"Bœ_Bq:άΠα*‡ΈΏϋX Bˆ;όŠβ@ˆ«Πα†W8Διp”q:Bq5:άΠακ†8€!N‡Cˆ!H‡:\Ω§ΓP"Διpq ΔUιpC‡«βt8J„8!„Έ2nθpECœ@‰§Γ!āW§Γ fˆΣα¨β.Ώ„8β uΈ‘Γ• q:Bœ‡B\©7tΈŠ!N‡ BˆΣαβ@ˆ«Υα†W0ΔιpTq:BqΕ:άΠακ…8€ !ΞΧ4 āW­Γ \ˆΣα¨βt8„8βΚuΈ(!ξ/Bœ€χη±T!„ΈznθpΕBœ@ηΟΓ!āW±Γ VˆΣα(β¦_ BqϊkYC‡+βt8 „8ΗRβ@ˆϋ7š5tΈJ!N‡ ˆs,!„Έͺnθp…Bœ@ώg;Bqe;άΠακ„8€τ!Ξv8„8β wΈ‘Γ• q:ιCœνpq ΔUξpC‡«βt8²‡8Ϋαβ@ˆ«έα†W$Διp$q—νpq ΔοpC‡«βt8’‡ΈιΧ€B\υ7tΈ!N‡ wˆs*!„ΈnθpBœ@κηT*Bq-:άΠα „8€Μ!N†Cˆ!I‡:\ώ§Γ8ΔΙpq Δ΅ιpC‡Kβt8ς†8!„ΈFnθpΩCœ@Ϊη›Rβ@ˆkΥα†—<Διp$ q— ‡B\³7tΈά!N‡ gˆ›Ξ€"āΧΓ .uˆΣαΘβl†Cˆ!e‡:\ζ§Γ/Δ©pq ΔuνpC‡Kβt8²…8!„ΈΖnθpyCœ@ηοΒ!āΧ»Γ .mˆΣαHβ¦­pq Δ΅οpC‡Λβt8’„ΈΛN8„8βΎθw!N‡‹βt8:ζΌξΩg#Bq:ά3!t‡Ϋβt8n0ηœqπTˆ+ία†—2Διpάι˜σ›[δ©ΐ!ā§ΓqΕ;άΞ§Γ°Νό_?άωφOF !„8.Nˆ«ήα6†8@ˆƒb!C‡:\Ύ§ΓqP,Δ΅θpC‡Kβt8!Š…ΈnθpΩBœ ΔA±Χ€Γ .YˆΣα„8(βΊtΈ‘Γε q:€ΕB\›7tΈT!N‡β XˆλΣα†—)ΔιpB q:άΠα…8@ˆƒb!S‡:\ž§ΓqP,Δ΅κpC‡Kβt8!Š…Έ^nθpYBœ ΔA±Χ¬Γ .IˆΣα„8(βΊuΈ‘Γ% qΏω„β TˆkΧα†—#ΔιpBΤ qύ:άΠαR„8@ˆƒZ!a‡:\†§ΓqP+ΔύΡqΦΠα„8@ˆƒZ!e‡:\ό§ΓqP+ΔυμpC‡ βt8!j…Έ¦nθpΡCœ ΔA­Χ΅Γ .xˆΣα„8¨βΪvΈ‘ΓΕq:€΅B\ί7tΈΠ!N‡β Vˆkάα†9ΔιpBΤ q;άϊχ·ΆCyκpBq:άΆχΎCyκpBq:άΧΈΓ-q:€΅B\σ7tΈ¨!N‡β Vˆλήα†4ΔιpBΤ qν;άΠαb†8@ˆƒZ!N‡:\Θ§ΓqP+Διp+C\χ·2ΔιpBΤ q:άΚΧΎΓ- q:€΅Bœ·2ΔιpλBœ ΔA­§Γ­ q:άΊ§ΓqP+Διp+Cœ·.ΔιpBΤ q:άΚ§Γ­ q:€ΕBœιbaˆΣαΦ…8@ˆƒb!ξwΣΕΊ§Γ­ q:€ΕBœ·0ΔιpλBœ ΔA±§Γ- q:άΊ§ΓqP,Διp Cœ·.ΔιpB q:άΒ§Γ­ q:€ΕBœ·0ΔιpλBœ ΔA±§Γ- q:άΊ§ΓqP,Διp Cœ·.ΔιpB q:άΒ§Γ­ q:€ΕBœ·0ΔιpλBœ ΔA±§Γ- q:άΊ§ΓqP,Διp Cœ·.ΔιpB q:άΒ§Γ­ q:€ΕBœ·0ΔιpλBœ ΔA±§Γ- q:άΊ§ΓqP,Διp Cœ·.ΔιpB q:άΒ§Γ­ q:€ΕBœ·0ΔιpλBœ ΔA±§Γύ˜·Μ©Γ΄’Π ΔιpοΡα–9u8€V„ΈτWSΓOθp˜:@+ BάψΓΜπ3:ά2§ЊBƒ§Γ½G‡[ζΤαZQhβtΈχθp˜:@+ Bœχn™S‡hE‘AˆΣαή£Γ-sκp­(4q:ά{tΈeN …!N‡{·Μ©Γ΄’Π ΔιpοΡα–ωjˆ; @ Bœχn™/†ΈΏ)€„8ξ=:ά2§ЊBƒ§Γ½G‡[ζΤαZQhβtΈχθp˜:@+ Bœχn™S‡hE‘AˆΣαή£Γ-sκp­(4q:ά{tΈeN …!N‡{·Μ©Γ΄’Π ΔιpοΡα–9u8€V„8ξ=:ά2§ЊBCϋ§Γ½I‡[ζΤαZQhθβtΈwύ,ĝ·œ:@+ ΝCœχΆŸtΈΏ·œ:@+ ΝCάοfwιp˜:@+ ½Cœχ>n™S‡hE‘‘uˆΣαΎA‡[ζΤαZQhθβtΈοΠα–9u8€V‡8ξ[tΈeN …†Ύ!N‡ϋn™S‡hE‘‘mˆΣαΎI‡[ζΤαZQhθβtΈοα–9u8€Vš†8ξΫtΈeN …†ž!N‡ϋ>n™S‡hE‘‘eˆΣα> Γ-sκp­(4t q:ά'tΈeN …††!N‡ϋˆ·Μ©Γ΄’ΠΠ/ΔιpŸΡα–9u8€VΪ…8ξC:ά2§ЊBC·§Γ}J‡[ζΤαZQhhβtΈιp˜:@+ ½Bœχ9n™S‡hE‘‘UˆΣαΠα–9u8€V:…8nn™S‡hE‘‘QˆΣα–Πα–9u8€Vϊ„Έ?άπKθp˜:@+ mBœ·ˆ·Μ©Γ΄’ΠΠ%Διp«θp˜:@+ MBœ·Œ·Μ©Γ΄’ΠΠ#Διpλθp˜:@+ό·{}n™ί Σς·a €όΜmΑD6ZIENDB`‚xarray-2025.09.0/doc/_static/logos/Xarray_Logo_FullColor_InverseRGB_Final.svg000066400000000000000000000065221505620616400267560ustar00rootroot00000000000000 xarray-2025.09.0/doc/_static/logos/Xarray_Logo_RGB_Final.png000066400000000000000000001241661505620616400234330ustar00rootroot00000000000000‰PNG  IHDRˆ ΔΡr~n pHYs.#.#x₯?v“PLTE―΅!l‰IIII“ͺkθθξτγ€Ee―΅Yw!l‰5€šIIII“ͺkθθξτγ€―΅!l‰"_|IIII“ͺkθθξτγ€Ee―΅Yw!l‰+v‘,lˆ5€š;€™?‰’IIII“ͺJΟέR¨ΊZΎΙ[άγcΣΩgέζkθθxκλ„λξ‘νρξτγ€Nr';tRNS@@@@@@@€€€€€€€€€€ΏΏΏΏΏΏΏΏΝ@§cIDATxΪμΨΑmTAEQo˜-^NHˆ?3Ξ?;"@€ΫmwBI―χν ΞρΝ €½n0Α»±[ιpΜπϊnνΐN:C:ά%Δ;ιpLιpB°“ǘ'ΔιpΜιpB°Η 'ΔΫθpLκpB°‹Η¨'Δ›θpΜκpB°‡Η°'Δ[θp ρ”t8Ζu8!Ψ@‡c^‡β€υt8v8!XOŸa`‡β€εξ ;œ¬¦Γ1²Γ qΐb:3;œ¬₯Γ1΄Γ qΐR:S;œ¬€Γ1ΆΓ qΐB:s;œ¬£Γ1ΈΓ qΐ2:“;œ¬’Γ1ΊΓ qΐ":³;œ¬‘Γ1ΌΓ qΐ:Σ;œ¬ Γ1ΎΓ qΐ::œτt8t8!θιpθpBΠΣαΠα„8 §Γ1Γλ”t8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@ο&Π Γ q@N‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€žΗ—μ€Γ1ΔCˆvαΠα„8 §Γ‘Γ q@O‡C‡β€ξ :œδt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpΜπΌ„8`'Ž^—μ€Γ‘Γ q@O‡C‡β€ž‡'Δ½›@ƒ'Δ9Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz:C\B°“Η!ΨI‡C‡β€ž‡'Δ=NˆΠgΠα„8 whΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ1Γσ”t8fx]B°“‡'Δ=Nˆz::œτήt8!Θέt8!ΘιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q@O‡C‡β€ž‡'Δ=Nˆz::œτt8t8!θιpθpBΠΣαβ!Δ;ιpθpBΠΣαΠα„8 §Γ‘Γ qΐϊ :œτξ :œδt8t8!θιpθpBΠΣαΠα„8 §Γ‘Γ q€Ηw8!t8Πα„8@‡ƒ3:œ:θpB ΓΑNˆt8!ΠαΰŒ'Δ€:œθppF‡β@‡Nˆt88£Γ q Γ'Δ:œΡα„8ΠαΰK{]B Γ'Δ€:œθppT‡β@‡NˆώΛM A‡βv8!t8Πα„8@‡ƒ3:œ:θpB ΓΑNˆt8!ΠαΰŒ'Δ€:œθppF‡β@‡Nˆt88£Γ q Γ'Δ:œΡα„8Πα@‡βΞθpBθp Γ q€gt8!t8ψ"B Γ'Δ€:œθp0‘Γ q Γ'ΔqhΠα„8@‡Nˆt8!Παΰ¨'Δ€:œθpθpB Γ'Δ€:œθp0ͺΓ q Γ'Δ::œθp Γ q Γ'Δ:ŒκpBθp Γ q€‡'Δ:θpBθp Γ q€ςΌ„8@‡ƒΪλβt8!t8Πα„8@‡ƒΑNˆ€/β&Π Γ q€:œ:θpB ΓΑ„'Δ€:œθpθpB Γ'Δ€:œθp0ͺΓ q Γ'Δ€:œθp Γ q Γ'Δ:ŒκpBθp Γ q Γ'Δ:θpBθp Γ q€—θp{q€:œ:θpB Γ'Δΐ§§Ο Γ q@ο.Π Γ q€:œ:θpB ΓΑΰ'Δ€:œ:θpB Γ'Δ€:œθpθpB Γ'Δ€:œθp Γ q Γ'Δ€:œθp Γ q Γ'Δ::œθpP{^Bœ:Τ^:œ:θpBθp Γ q€:œŸή»@ƒ'ΔΉ›@ƒ'Δ:θpBθp Γ q€:œ:θpBθp Γ q€:œ:θpB Γ‘Γ q€:œ:θpBπo~ΐΏt7!„8θύΤέ„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!!Nˆ„8β„8β@ˆβ!„8!„8β„8β@ˆβ!„8!„8β„8@ˆCˆCˆ!„8!„8β„8@ˆ!Nˆ!„8!ββ„8@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ„8„8„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!!Nˆ„8β„8β@ˆβ!„8!„8β„8β@ˆβ!„8!„8β„8@ˆCˆCˆ!„8!„8β„8@ˆ!Nˆ!„8!„8β„8@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ„8„8„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!„8!„8β„8β@ˆβ~³woKšάVbF[Q(6δ…!…,7-³œαίι|‘‘L‰έbΐ>¬υμ•?*χ—  ā'Δ€Bœqqq ā'Δ€Bœq Δ q ā'Δ€Bœq Δ q ā'ΔBqBq Δ q ā'ΔBqBq Δ q€‡'ΔΩ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ„8„8έMˆ!„8!„8β„8@ˆ!Nˆ!„8!ββ„8@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ„8„8„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!!Nˆ„8β„8β@ˆβ!„8!„8β„8β@ˆβ!„8!„8β„8@ˆCˆCˆ!„8!„8β„8@ˆ!Nˆ!„8!ββ„8@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ„8„8„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!„8!„8β„8β@ˆβ!„8!„8β„8@ˆCˆCˆ!„8!„8β„8@ˆ!Nˆ!„8!„8β„8@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ„8„8„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!„8!„8β„8β@ˆβ!„8!„8β„8@ˆCˆβμ ā'Δ€Bœq Δ q ā'Δ€Bœq Δ q ā'ΔBBœξ&Δ€BœBqB ā'Δ€BœqqB ā'Δ€Bœq Δ q ā'Δ€Bœq Δ q ā'ΔBBBq Δ q ā'ΔBqBq Δ q€‡'ΔBqBq Δ q€BœBqBq Δ q€BœBqB Δ!Δ!Δ€BœBqB ā'Δ€BœqqB ā'Δ€Bœq Δ q ā'Δ€Bœq Δ q ā'ΔBBBq Δ q ā'ΔBqBq Δ q ā'ΔBqBq Δ q€BœBqBq Δ q€BœBqB Δ!Δ!Δ€BœBqB ā'Δ€BœBqB ā'Δ€Bœq Δ q ā'Δ€Bœq Δ q ā'ΔBBBq Δ q ā'ΔBqBq Δ q ā'ΔBqBq Δ q€BœBqBq Δ q€BœBqB Δ!Δ qφ?β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!!Nwβ@ˆ!Nˆ!„8!β@ˆβ@ˆ!Nˆ„8„8!β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ@ˆ!Nˆ„8β„8β@ˆβ!!!„8β„8β@ˆβ!„8!„8β„8@ˆCˆβ!„8!„8β„8@ˆ!Nˆ!„8!„8β„8@ˆ!ξ!ΏΆ€BœBq: ā§Γ€B\΄χ€BœBq: ā§Γ€BœBq: ā§Γ€Bœq Διp ā§Γ€Bœq Διp ā§ΓBBœq Διp ā§ΓBq:q Διp ā§ΓBq:q Διp€Cœm„8βφϋ΅m„8βt8β@ˆΣα!„8„8βt8@ˆCˆΣα!„8„8βt8@ˆ!N‡!„8„8βt8@ˆ!N‡!„8β ]ˆΣα@ˆ!N‡!„8β@ˆΣα@ˆ!N‡„8„8β@ˆΣα@ˆ!.aK!„ΈνώhG!„8„8βt8@ˆ!N‡!„8β IˆΣα@ˆ!N‡„8„8β@ˆΣα@ˆ!N‡„8θβt8β@ˆΣα!!N‡„8βt8β@ˆΣα!Ί„8„8βt8@ˆCˆΣα!„8„8βt8@ˆƒ.!N‡!„Έ~mχ!„8ββt8@ˆ!N‡!„8β IˆΣα@ˆ!N‡„8„8β@ˆΣα@ˆ!N‡„8θβt8β@ˆΣα!!N‡„8βt8β@ˆΣα!Ί„8„8βt8@ˆƒ !N‡!„8β BˆΣα@ˆ!N‡„8¨βώh£!„8β @ˆΣα@ˆ!N‡„8¨βt8β@ˆΣα!*„8„8βt8@ˆƒ !N‡!„8Ž/ϊwKπ˜_Y@ˆ!N‡γ+ώτ£5xΚοk !„8Ž―u8!ξ±χg!β@ˆΣαψj‡βλpB ā§Γρυ'Δ=Φα„8@ˆ!N‡γλNˆ{¬Γ q€BœΗΧ;œχX‡β!„Έέ~ψέέώ·£Γ qu8!β@ˆΫέαΎ;CˆΫα„ΈΗ:œq ΔΥθpίό7ϋߎ'Δ=Φα„8@ˆ!F‡ϋ,ΔmιpBάcNˆ„8βjt8!ξAΏωQˆ{ΎΓ q€BάNη:œ·§Γ qu8!β@ˆΫθϋsNˆΫΣα„ΈΗ:œq ΔΥθpBܞ'Δ=Φα„8@ˆ!.{‡ϋΛg!n_‡βλpB ā—ΌΓ}ϋYˆΫΨα„ΈΗ:œq ΔΥθpBܞ'Δ=Φα„8@ˆ!F‡βφt8!ξ±'ΔBq5:œ·§Γ qu8!ϊσ K،!„ΈNˆΫΣα„ΈΗ:œ΅ύΧxϋςzb^eΉL,ζ[7£e3„8θβ.t8!nO‡βλpB4ήΨήΎnΝ9§εοεŸ*Š‹Γbڌ!„ΈXNˆΫΣα„ΈΗ:œΕfή—=–Έu3ϊκ+K–Ηbڌ!„ΈPNˆΫΣα„ΈΗ:œUΖϋ—–cb.+!Ξbڌ!„ΈΈNˆΫΣα„ΈΗ:œ¦ήχLΐΎόuυͺ•— Αb^ߌδ8@ˆCˆΣα„ΈNˆ{¬Γ qP~ΌίE+jΌαΊr XΜ›‘qq:œ··Γ qu8!ʏχ{_G1ΆΎ°„8‹9.ΌηE]@ˆƒ!ξZ‡βφt8!ξ±'āηΥ8ή{aωΡχ^L› ā―Γ q{:œχX‡βΐάϋΘόλΝΈάήωV“μΡw1γnFJ@ˆCˆλέα„Έ=Nˆ{¬Γ qN%‡ΤbχΛY’Ξ½?Φ2ΉTΈHίeM‹t1»oFΓ&ιΆαζ½™»_!„Έ€NˆΫΣα„ΈΗ:œχδΝή±ΗύΦρΓς…Έ1_2hχbάΞ–!©Μ$‹ωn1mFi6#ΏςR―οϊ„B\Ό'ΔνιpBάcNˆ{±7jίRy]B…3ώjGGB\ΰΕ΄₯9€zβΦ‘ρk[Γ q Δ5μpBܞ'Δ=Φα„ΈL·{=ΐ™ςT8-N;ϊτi¬XCθJ½˜³ΑDo3ςKΟ=…•β@ˆ{ΈΫα„Έ=Nˆ{¬Γ qωn§‹Ώη`jϊΑ·W‹›yΌGώιΣb]L›QžΝΘo½Δ«λdͺBάύpΉΓ q{:œχX‡βςέN{βNš紃ο_—ΉΕ‰¬νh<τMβ’.fΪηL :›šψΎΜΙT!„ΈNˆΫΣα„ΈΗ:œχπ qΗ;œχX‡β2ήOΧ}Ίν€ΞΩ‹Θ‘°vνθρͺ2-fι™ήfTζW_ΣW·œLE‘Aˆ+ΫαΎβΞw8!ξ±'Δe{ϊZϋ¦ϊΔ«Ρ›Π(7ωΦNq©ΪΡ†—›¦Ε,βjnFίaς *ι]…“©BqA;œ·§Γ qu8!ξωŽβwzκ ~Ϋ\κX‹—¨mΉΈ¦Ε¬Ί©Χ݌ώφt65iβt2Uˆ!.h‡βφt8!ξ±'Δ₯{ώZωρvϋU+<ω–MqyΪъo΄˜6£¦)ΞΩΤ€…Σ-―B\Π'ΔνιpBάcNˆΛyC]σIlχƒ©Ε'ί’).K;šΆ!‹i3:&Ψfδα]ΚηdͺB\Τ'ΔνιpBάcNˆKψ6ET Z0ί57˜|K¦Ένh&ψ7ZΜ0!ΞfTσΎ‘γ1JKŠ‡Χ΄Γ q{:œχX‡βήωU}{bφ“αξOΏBάρ!oηΥ5-fΉ©ΎΟf(Ε9›šrYέο q ΔEνpBܞ'Δ=Φα„Έ¬wΤυ^‰›lΎ4Rλ‚νh₯ωi‹i3κΊ9›šπjv2Uˆ!.l‡βφt8!ξ±'Δmαο5Δ¬—Qoš[MΎΥ ςˆ~ΑΞL%#ϊn9Λg!›‘GQ•xͺ„‡Χ΅Γ q{:œχX‡βF₯jw}¦φ9•Zσ=‚ΨWμφ«kZΜBzΛΝhτΉmh׍œLEˆCˆkΫα„Έ=Nˆ{¬Γ q{ψ{ ‡˜‹Υpς­5†σf²€Ε΄΅έŒΌDŸξVΜΙT!„ΈΈNˆΫΣα„ΈΗ:œ—ψ–ΊΤ}`Χ΅š/]•ωϋ©Ηζο7M‹Y₯ ›Qω_έΞ¦Κ_X„8ˆβ’u8!nO‡βλpB\φ‘¦Θ}υ‰$βe²^+2ΔΔ½fgΎŸ’Ε΄΅έŒœMMχ„Ο­B\ά'ΔνιpBάcNˆKϊ ΆΨ+qM¦Ξ—ήjΌuΞ;V¦Ε,‘IlF=ξzέ‡9™Š‡Χ·Γ q{:œχX‡β2§₯:OΈ ρn™{ΏRη…Ξ SσLΉYL›QίΝΘΩΤdwN¦ q ΔξpBܞ'Δ}Ρ―ώ,Δυy[κ wΛβ¦Ι·Ζ+1ΏάΗΒΚ΄˜ω{›QŒΝΘ“»\ κ>Wˆ!.p‡βφt8!ξ±'Δ₯Ύ§–2Žή@)s<5β 7Ξύ§ΕLΏMٌ‚£έ?>U‰kβFVa1mFέ7£W`ƒŒ4ςSβ pˆΛΠα„Έ=Nˆ{¬Γ q•2L»λQsŠ2ϊv+qΧΫΡΥtT-ΔUZL›QΞΝΘC»ΰ;Ι‹“©Btq):œ·§Γ qu8!ʍuΎ‚ΡηόΡ·t‰»έŽξ¦£b!Τbό€6£ϋ ©Η ]z"Δ!ΔuοpBܞ'Δ=Φα„Έ*7ΦιξϋL†ΩSΞεvt»¬L‹™¨θp ή}ΆΨθkθdͺ­C\’'ΔνιpBάcNˆ«R²έ˜5F›OͺΔυ qΧ/―i1σlΰ6£›QΙΌXκρ—{Z!:‡Έ,NˆΫΣαΪ‡Έη:œW,ΖH“Ρ’€Ρ·ϊ WΫΡ(φ£²˜6#%ΞΩΤΰ-ΣΙT!:‡Έ²t8!nO‡λβμpB\™ξ”λφzœžŒΎυ^gŒήŽ\^ΣbfΩΏmFI6£?¨κ)Ι}BB\ϋ'ΔνιpΝCά“Nˆ«q_˜.`tY£oύw³ψ; Σbfπύ՘,›ΡΟTϋ~ΛΙT„8„8NˆΫΣαz‡ΈG;œWδΖ0Ωƒξ6SΎυίظ؎"\^Σb&ΩΆlFiΒˁۅQϊ~kω儇׾à q{:\λχl‡βΚ%™ χΧϋί[½~θJ\ΛβςΚβ^Ί-¦Ν(λf4r_jχY;„8„8NˆΫΣα:‡Έ‡;œWιΦ:Ν©‰.9rš`Ό²1o€—W‘Wq1οό²₯ψYϊx₯“©qq:œ·©Γ5qOw8!ξ―αάΜc ŒΎ-Ɲ[ν(Θε5-f†Λf”ͺΔ9›ψfΛΙT!Ί†ΈdNˆΫΣαϊ†ΈΗ;œWλ!wŠμύ#αjς9kβ’\^ΣbfqΆ“T›ΡΘ|­UΏΧr2Uˆƒ¦!.[‡βφtΈΆ!ξω'ΔΥzȝ’_μ5p4ωœήBiβ’όJ„ΈUr1mFyλˁXέ[­aαββt8!nS‡λβ6t8!b™‰έ/F΅‘ΙθΫ{βΉΣŽVΝ@a1S?ςX ΡO¬ξΩΤ½‹ηdͺ=Cά‘χέ7B\πΧ4ΔνθpB\­•αF±ΙΗ&Χ&#Ο•v§¬L‹>ΔٌςmFεήρ+³vN¦ qΠ2Δ%μpBܞΧ3ΔmιpBάQώ^C›ƒ©Fί6/mάhG.―i1Γοά6’|ΏVχ’,ϋfΧζ/΄Y!:†ΈŒNˆΫΣαZ†Έ=Nˆ«φ”;ϊ­β(6.έnΥ qΡΛΚ΄˜Ρ7/›QΒΝhΪ\c.“©Bt q);\ϋ·©Γu q›:œWι1Γ½βκρΡύ?™ϊΊΠŽVٟΕΜψ𣉳Ώ[l1o2œLβ aˆΛΩαΊ‡Έ]aˆΫΥα„Έj*ψ‘ΎΩγ“}Λ_Ι7ΫQ¨Μ;-fμ!ίf”s3r65fΒt+ΔAΏ—΄Γ5qΫ:\Ώ·­Γ qE MΤ[μ&ωŸ›_ψίX<ގb••i1co_6£œ›‘³©!ΞΙT!ϊ…Έ¬wˆΫΧαΪ…Έ}Nˆ«šhfΟOο`κWeΞωš "δώ•Έγν(XLΙ½˜«πb¦ήŒΖœ3ά £ΏV»ή%ΔΎΛp2Uˆƒv!.m‡kβ6vΈn!nc‡βŽ;52Dόμϋί>q0υgCο{>@¨'ΔENGΙC\ιΕ –mmFΡΪΥ|»Λ[„qq:\χ·³Γ5 q;;œWkl~έδ ˆΉq͏.F”xvώFGΞΌB\νΕ΄₯ތœM ΈlN¦ qΠ-Δ%ξpCάΦΧ+ΔmνpB\έLοHίφΑΒΑΤΏΝ½OύπG„ωWˆ;Z³;…ΈςG‡Ν(οfΤρ&!ϊο_'S…8hβ2wΈΎ!no‡kβφv8!ξΌS£ΡκχΑcŒUζή?z%.N; χπ›3π₯n3JΌ­ξ[kΐKή ¬½B\κΧ6ΔmξpBάζ'Δ]Πυ•ΈΥcͺXWίM υξψ+ΔΟψ-B\υΕΌϋvξΝθΣέγͺμ,U»ή/΅“©B4 q©;\Χ·»Γ5 q»;œWο‘mΤ›μνSοκρ1ΟΎΧ[\μχ6ŽΆ£pι(sˆ«Ύ˜6£δΏfœMV/Lβ Wˆϋ>u‡kβΆwΈ>!n{‡βκέ*F½il2S¬#όΗίko’q—―’!nV_Μ{›Ρ°εψωΥ Kή DˆCˆΣα:‡ΈύMˆΫία„Έ±ftϊΜ1FŠ;Γύ:τ“Ύυ&Jθqρ`;Šw–2qˆ«Ώ˜w>αš₯χΪ“›Ρ`΅{+'SββtΈΞ!ξ@‡λβt8!ΰΝbΘΫΖ&S―œv['ƒλιWˆ;Zπ[„Έϊ‹ΉlFΙ7#gSc]N¦ qΠ)Δ₯οpCά‰Χ$ĝθpBάύ^‰[=>κ,>ωޚ~#@ηΪQΐ³”yC\ύΕ΄₯ߌfλ5ލ†[W!…Έόaˆ;αz„Έ#Nˆ»€Ϋ›D³ΙΔύ^ˆk Νίkθr0υδtί0=ƝΜ~σ%¨i1#.fΏoζ(ω‘M sΥ;™*ΔA“W¦Γu q';\υw°Γ q αWœQξΟMχ+Μ0νΰπw :ς博ŽR†Έϊ‹y05v܌^jμ.Υ^τ™v:„8βκtΈF!ξh‡+βNv8!Αά°ŠΜ(ƒ`Λ$5ό7ΪQΨwΈ2†Έ‹Ω³―‚›‘³©QΦΚ-«-B\‘Χ'ĝνp΅CάΡ'ΔuξŒ.cΔl9ϊ~ΓΞ‹'†Ώ°ι(cˆ«Ώ˜6£2Ÿ{τέXcέk8™*ΔA‹W©Γ΅ q‡;\ιwΆΓ q₯U„;Θ.S?% ‰†ίΖ!.ξ;\ C\ƒΕlΪαJnFΞ¦ΖX*'S…8θβJuΈ.!ξt‡«βw8!ξͺ‘sVυ£X<5έ< Ήϊ~τSν(n:Jβκ/ζ°Υω:Ϋn¬±VΚύͺ B\­Χ$Δοp…Cάι'Δυ˜κ~ΒΩμG9_ΖQ_IΨΝ όWΎΧ`1mFΞ¦6|ΧkΥψ#ΔΑ΅W¬Γυqη;\έwΌΓ qwzsαΪMδ¬ϊΑ.ύ$ƒΎ»phψmβ§£|!ώbڌJmF«λΖiοp2Uˆƒ!Z‡kβ.tΈ²!ξ|‡β.«~ ±Ν,Έ:Ύ§βΖθχαξ-B\ƒΕœ½7£•ρΊΏϊγ¬q6ΥΙT„8„8[ˆ»Ραͺ†Έ NˆλQ0n½9Φζ`κ§ή£ο‘wpfΏοπΙMβ ΫΙό‹ωΰbŽL›ΡjώKuϊ<5ήφr2!!ξ#ώ£\‡kβtΈ’!ξF‡βzŒK·Ϊ£Ν,8›Ύ‡γž!.Θ;\{Β›Ε΄ٌξ>Ο*ςΊ—V‰‡χ?ΤλpυCܝW3Δ]ιpB\νΗΈ—o΅ϋΌ!Φ}τ=4ύΗ|%pχΧ6ΐ[3Ρbφ›Ρ‘WιlͺgšN¦ qΠ<ΔUμpεCάo~βrw8!ΙΣϋ+“Σ,3 Eψ!F?³JΝΎΪΡ½wΈŽ8‹™ι7ŠΝΘΩΤN‚œLβ xˆ+Ωαͺ‡Έ[bˆ»Τα„Έβχ7ί&κs0υΘΟ0όP»νόΏω― ΥhΝi1“.¦ΝθΠΟΉΠ§©Π™”J„8„8Sˆ»Φα †Έ[Nˆk0nάk―>Γ ΡχΣ™7q†ΈqΊΝi13/¦ΝθΠftnœM½ύw›*ΔAιW΄ΓΥqχ:\½w­Γ qMf† ΣΣξθυwP2Ό²pΰ-”/&μOΎΓ΅ζ°˜Ιs،νΙEώΟ‰Eήψr2!!N‡kβ.vΈr!ξ^‡βšŒ Ou=Γ_Fί3γbΜuΨ{•7ŠpuσεήbΞjΏHβώV-τaς—¦­Kδdͺ•C\ΩW9ΔέμpΥBάΕ'ΔEpκ•Έ£w“¦ο‡ 9nΨϊyΟό/$紘Ξφڌ’nFΞ¦ή½ξέ€ qP8ΔΥνp…CάΥW,ΔέμpB\^Γξ1h΅ϊρey ΏZF€­`ι··,fΒΝhٌœMνuεdͺ…C\αW7Δένp΅BάΥ'Δ5xš{ε~rχΤjβK3Œ–σβΦΟΫͺΒm^ΜΡ`1χη'›Ρ…ΝhuάXΓ,“©BΤ q•;\ΩwΉΓ• qw;œΧef8ό>Ρ¬ςABdΤ‘ζBnΩ$·~ήVΞbڌςόr6΅Λ/aw¨B” q₯;\Υw»ΓU q—;œΧ%`Ύ£μ”cΆΟH‰ƌŽσβKN3dR±˜6£4Χ’ΐΪβ—°“©B” q΅;\Ρw½Γ q·;œΧ&`=cΡι`κώˆj Š=/f G+κi+‹ιϋ—f1¦ΟαGνdͺUCά‘χέ7B\₯W'Δ]οpB\›™αδ5Z ƒ&€ζ«αύ-‹ιλg3Jώ¨.υύ“•AˆCˆΣαZ„ΈLˆ»ία„Έnθ*πYb™€ŸZ­~ψ)ΫΡ΄V]Μa3:Ή7―J_ŒΜgSG³_9qπDˆ+ία*†ΈJˆ Πα„Έ>#ΤΉΫνΩjœ³‰­­ΰσΆΕ΄幚*=πΙ|ΣΙT„8ψOAˆ ΡαŠ„ΈNˆ‹£Μίkθu0υSΏπtχ:֎ g8‹i3Κ΄ΌœM½uέ»7β dˆkΠακ…ΈFˆ Ρα„Έ8N½·ύωξζαgυϊ±MΧqτQŽ,fΥ³]\!ξΞΪ8™*ΔAΙΧ‘Γ• qA:\‰£Γ qœϊ{ ΙgŸf?5C‘Χ'ΓYL›Q¦§B³Π7ρL'Sβΰ!E‡«β’tΈ !.H‡βΞ +υ§ˆ6/ΓΡفquύήvΘpΣf$MήyΒ•ψΥ―ΥιŽ!q=:\±¦ΓqQ:œΙ©Γ©[ο,W³aΑ;(§/cν¨ς4i1mF–δΞWΓ#Μb'vββzwΈZ!.N‡ΛβΒt8!Οƒέ31«[…ΩόΖΕt q­.#‹vσ]6£«›‘³©7ΖΙT! †Έο{tΈR!.P‡Kββt8!Σ uβήrσάξ]”Ω,:X“pΧ@ψƒ”™.‹ι‹ηIɝۃ¬gSLEˆƒ7…Έ.Rˆ‹ΤᲇΈ@Nˆk55μο;ώ`&£έ‹2{}ά_CΓ΅Σd1—Ν¨τfδlκ…uqW*ΔAΉΧ¦Γ q‘:\ς©Γ q=ΗΠ•τίίν–τiόͺ3ϋ¦oGΣΨf1]I΅λ€³©_4, BΌ!ΔυιpuB\¬—;Δ…κpB\§{Κύ…§ΫΑT'S/¬J·ΏΧΡθu8‹ψ—‡Νθφͺ8›zώ'μdͺΥB\£W&Δλp©C\¬'ΔE“ϊο5ŒvcΒή)/λΣψfEΐ\ΣfΤτŠͺυεπv2Uˆƒβ!S‡«β’uΈΜ!.X‡βš5Œ½cUΏ)ao7Mϋ4^ˆσ—Ε¬βlFχ7£νgS3ώLEˆƒΧ‡ΈVHˆ Χα‡ΈhNˆk6Mm ώΡ>…$¨ν(η ΅Γ)N6£ϋί2{Ζ៯“©BΤ q½:\―Γε qα:œΧv}~²μw0uσGΞϋ4ΎΧ!Ή˜ιhΨώš-¦Ν¨ό&½ύ\‘π绬BΌ2Δ5λp%B\ΐ—6ΔΕλpB\·ΉaγlΉϊΣP€ „lGΛ•Σm1L­Ώ9›zφΗλdͺ₯B\·W!ΔEμpYC\ΐ'Δu{Β»oΊ '„iόβ>IGΣS¦—ΤͺυυΘWžœLEˆƒW†Έv@ˆ Ωα’†ΈˆNˆ θΤ+qίsw†ΝD7bςκtx}ΙbψΞeތ*JgSˆ»Q! …Έ~.ˆ‹Ωαr†ΈNˆλ78lΊΛ\η#QƒΩ7_;Κό‡Ε ΉrΓfb3r6υδeοdͺ…B\Γ—>Δνp)C\Μ'ΔuFŸΌΝμx0uσ‡Ξ|·Z')-¦§V¦ΔχcϊMγdͺC\Η—=ΔEνpC\Π'Δ5Μ[ξ3WΗqxšˆThν(w‡³˜6#›Ρ΅_΄ωΆΩθ BΌ?Δκpω,ΔΥοp C\Τ'ΔΕ”ξο5l>1ΣqΘύ4^ˆ“Ž,¦WΧι,τcNŸ\υqπŠw¨Γ}ϋYˆkΠας…Έ°Nˆλ=ŽŽή¨MΚ'ΊΕμ›©M;_Σ¨ιΥ ςSN¦"ΔΑ+B\Σ—:ΔξpιB\ά'Δuž°VΟyXθ1ϋζiGωGG‹qαlFa6#gSεg„8x[ˆλΪα2‡ΈΘ.[ˆ άα„Έ¨N}fŠ/=§@/&q‘ΪQW8,¦Νθi£Ϊ8›zκ>ΙΙT!Š„ΈΆ.qˆ έα’…ΈΘNˆk9;<=m6ˆ‹i2ϋfiGŽRYL!ΞΪάό†L?Y'S…8¨βϊvΈΌ!.v‡ΛβBw8!ε£ή‡’ΜπΒ=‹Ρ"΅£ƒ£Ε΄εΊ¨ͺέψ %N¦ qP)Δ5ξpiC\π—*ΔΕξpB\ϋ‰τγgΧƒ©[§€εςM³8‘Ε”$„Έ ?ιd?m'SβΰB܍;\Φ½Γe qΑ;œΧt²zςΆ{uˆM]fί ν¨ΘA*‹οΧEϊͨ֟·vρY 'S…8¨βZwΈ€!.|‡Kβ~υg!ސλ±)kο q‘ί 3άκΘΪQΩχ7B,ζ²n₯­Z›Ρ΄•τϋ%ƒο q½;\ΞΏΓε qα;œΨ84u†žœ»ΞΎι―^!N:²˜6#›QΎ?W'S…8¨βšwΈ”!.A‡Kββw8!.²―‘νΑT³―¦•ιpΣfd3Ίό.z'S…8¨βΊwΈŒ!.C‡Λβt8!ΞPϊ±[ΞΎS=Žβ’΄£eΟ뾘δΦo©b{k’ί>£Οο„8xGˆkία†Έ.IˆΛΠα„ΈΎ1γ™Akο qm:BœΧsh΄˜6£\WΥωW§œMuΡ#ΔΑ/†8._ˆΛΡαr„ΈNˆ‹-όίkΨ› cŸ™m?ωυΩW;*{ŠΚbjΉͺYοK2όTLβ {ˆΣας…Έ$.EˆΛΡα„ΈΨΖ‘α3ζDόHΤlSšLI‘ΫQ©™Ρbڌ„Έ«ΟΏ€W/"ΔΑWCœ—/ΔeιpB\’'Δόο54>˜ΊχΓ›} Š—ΣQ­CTSˆ³]~@·Ϊ_σN¦ q<ΔιpωB\š— ΔeιpBœΉτ€¦?šιΚβJ Ν3ν²-WUΈΝΘ$nσ“0'S…8ΘβtΈ|!.O‡‹βt8!σSί[½gb!Nˆ ЎαdͺYCœ—/ΔeμpAC\Ζ'Δ%μο58˜Šw·-&6£χm n@„8HβΎΧα…Έ”.fˆKΩα„Έ$ύ½†ΰ_㐘ΩWˆkϋή¨ΕΔftϋ3…οQΣf€βtΈ|!.g‡ βrv8!.‹P―a™‰βΆ£ͺ―mXLlFΎ*³ι6ανXήιίA™:\ψ—΄ΓE qI;œg@}{›fb„Έ«νhΩη,&­7£ΖgS‡;t8ΠᲇΈ¬.`ˆΛΪα„8£Δ›§ Sβ.·£²Σ’ΕΔfβ– ιΟΣ·ŠtΈΰ!.m‡‹βv8!.0―ΑΑT„Έ»ν¨n―Ά˜ΨŒB|WfΛ]ΒΙTt8¨αb‡ΈΌ.\ˆΛΫα„Έ<’ό½†q7bφβκ^6›Ρλ¬ΆΪ-:θpΉC\β-Δ%ξpB\"A^‰σn BάέvTψ­ ‹‰Νθώ§ ]¨LE‡.wˆΛάα‚…ΈΜNˆ3£ΎρNtΎΈ Fˆ»ΪŽlr›QΧD½~ft8Πα …ΈΤ.VˆKέα„8ΣΔΫnELEˆ»άŽ*‹›Ρύ ϊuχ θp ΓeqΉ;\¨—»Γ q©:œzλŸΰ`ͺΩWˆkώ—ΕΔfΰcώn8™Š:\ζ—ΌΓE qΙ;œ—ΚύΏΧ0ŒΔqwΫQιwΈ,&6£ ί–Ωξ§i;@‡ƒ:.jˆΛήα…ΈμNˆΛεϊίk0#Δ]nG₯/‹‰Ν(ΒηŠϋ+ΩΙTt8Πας†Έτ.NˆKία„8cκ[^‰›7ώ£˜}…Έ.ΑΪbb3z­Ρ1SntΈB!.‡ βςw8!Ξ@ρ†ηΒώRBάνvδQƒΕΔftβλ2šύ0uyt8(ΤαB†Έ.Jˆ+Πα„Έlώ½†υβ!ξj;*ώζ¨ΕΔfβƒEύ­μi :θpYC\…$ΔUθpB\6‡ώ^Γ<ύί6›}…8³’ΕΔfη~ Ωα›‚…:\ΐW’ΓΕq%:œg€xύ©‰³οεvT½X[LlFaΎ/£ΥΟkωθpπ3ω,ΔιpρB\'Δ™T_=’φ)+˜}£Ά£a{³ν`3:πΙb~C–ΝΞωφ³§ΓΕ qE:œ—ΟΈ4π;˜ŠΩχz;ςœΑbb3:u;Πj‡πEA‡ƒJ.Zˆ«α„Έ*NˆKθΜίkX'³žD›}…8οpYLlF‘Ύ0~”φt8(Υα‚…Έ2ξ~ˆ+Σα„Έ„ξΌ7 Ę}o·£ς—ΕΔfδ£Eόέ<ές€‡χYˆΣαβ…Έ:Nˆ3UΌς•8S1ϋήnGυΏ*›ΡŒf_až~'Π Γ qm;άνW¨Γ q†ΥΧ ;'’ΝΎBœ―ŠΕΔfνμ'9ΊόV!΄hΠα„ΈΎξrˆ«Τα„Έ”N=τ_tkφβΌ³a1±Εϊlρ~=;™Š:\ΎWͺΓέ q₯:œ—Σ™ΏΧ0ό3ϋ qš΅ΕΔfτ½Ξ¦ σθp Γ₯ q΅:άΥW«Γ q9~%nηΟsh³―η»b1±όΚtΩ„yt8(ΦαΒ„Έbξfˆ+Φα„8ƒΕk†{/³ουvΤαεQ'S±EΊ˜M~Β<:λpQB\΅w1ΔUλpBœyυχ¦S[ΐμ{½M›ΕΔftτ;κ‹²„yt8Παr…Έrξ^ˆ+Χα„Έ¬ΞN]Ϋ[C›}…8£βωΕ΄σ،ŠlF«ΟΆ#̣Á—+ΔΥλpΧB\½'Δ₯΅ΞM«Λέ/fίλν¨ΕΫ£›QœO+YatΈT!`‡»β v8!.­s―Δ Σ0fίϋν¨E΅Ά˜ΨŒB}if‹Ÿ£/ :TλpB\Εw)ΔUμpBœΩβŸ;†Ω7@;jqΩ8™ŠΝθΪœMζΡα@‡ΛβJvΈ;!d‡⌬ΏτΞΪtσ‹Ωχ~;Zv5U›Ρُ)Z;™Š:\¦W³Γέ qΏβεΜαT1³o„vΤ#[[LlF±Ύ5³ΑΡwΚuΈλ!h‡»βjv8!.³υ’›‡Πf_!ΞΧΕbb3Š|+PŽG˜G‡ƒzξvˆ«Ϊαn„Έ’NˆΛlδξpξ}ΝΎBœw6,&6£ΈŸ/Π·E˜G‡.Mˆ+Ϋα.„ΈͺNˆ3^άβ`ͺ‹SˆΣ­-&6£Ψ_›Yώ§θ+‚υ:άέW·Γqe;œgju0³―vd1±e½–φžM ςΠΜΙTt8ΠᲄΈΒξxˆ«Ϋα„ΈάNuλkφβΌ³a1±…ώ€aΎ/ž ’Á—$ΔUξp§C\α'Δ%—φο58˜jφβ|a,&6£πί›Ÿqσθp Γεq₯;άαWΉΓ qΙ₯}%Ξ#h³―η R‹‰Νθ Ξ¦Nϋ:lτ§Γ q₯;œgΒp0³oψvΤ₯\[LlFαnjo φt8¨Ψα…Έβξhˆ«έα„8ƒ«ƒ©˜}Γνg›Ρ₯/N€ιd*:θp)B\υw2ΔοpB\z)§zmφβ€k‹‰Ν(ώG ρ!LE‡.Cˆ+ία†ΈκNˆΛ/αίkpγkφβ|e,&6£‡l~"WωFΗsAώ}Nˆ{›QˆΣα„Έ6ς½η…³―gV΄˜ΨŒ’|sFαΟηϋΑΧύA A‡βήδ7? q:œgΘp0³o•vΤζ²±˜ΨŒβέΜΊΟ ²θpθpBœ/Δ5θpBœΩΥΑΤΖάi q^"β°ZΑΪ_š}!ΞƒAt8t8!N‡ β:t8!Δ γ`*Ώ<ΘμI›Ά\ρΪbb3ͺύΥe?―#::œ§ΓE q-:œWBͺyΖσηΓσn‰iWˆβ,fξηE6£Ν»}ε9lθp ΓqM:ά‘Χ£Γ q5Fc Τί NΌC\ŸkΧbڌlFoͺVFOΡαΠα„8.Xˆkα„Έ"΃©όυ“—„8!ΞbΖߌ–Ν¨Ξw§κgσUE‡C‡βtΈX!K‡⌯¦V(±M \Σ·μdΣfd3Ίω›Ίλ_οdͺ:œ§Γq:œΧ•ΏΧ Γ™}΅#!gˆ³u qΉΟ¦zQtΈ«!ξO’ΫS!N‡βϊςJœΡΧμ«Ω¬¦ΝˆFUζ=μθp Γέ q:άc!N‡βf”:/" Δ qS‡³ύ’•ψcοϊ-κdͺ:œ§Γ q:œךΏΧΠc΅ΝΎΪ‘g1=βφτ“nnu8Πα„8ξdˆΣα„Έήξ½Ρπpͺ'ΔiGΣfd3*ύm)ιN¦κp Γ q:άΙ§Γ qέέ›RΪ1ϊ qG‹i3²ΥΎφ~n'SΡα@‡»βtΈΗBœ'Δuwq[–!N;²˜6#›Ρi[ί…OyνΊlιώhΰκpΟ…8ξ±§Γ qέ cŠΡΧμ« q6#Z]Tiίv·π όoένΉ§΅=β~+Ά qΝݝSŒΎq&G‹i3²6“~p'Sβ@ˆβ„8!Žμ.αΌF‡SΎBœvd1mF6£ Fίμ»nZά q Δ qBœ'ΔQΰVάCh£―ΩW;²˜6#›Q»οSΎΆ“©BqBœ'Δ qτ™Θ,4BœΡΡbž3mBάξ aί'ίυτΠΙT!„8!Nˆβ„8ϊLdΣB#ΔωςXΜS†=Β―·νWΒJχΛΤύ Bœ'Δ qB&²a‘β΄#‹i3²ΥωBmϋG/ί„8β„8!Nˆ#΅η%ό½†|£οό‰dο5qBœΕ΄ٌn~ξέA„8β„8!NˆΓHζΈΘθ»vΊBœvd1mF6£>WCΆhκ†Pˆ!Nˆβ„8!Žc­;ΰpλόυχMJ½Œ!Δω&YΜ―Y6#!ξΜ7*ΧΟΚΙT!„8!Nˆβ„8šdΣ:_zλδζˈBœoŽΕ΄ٌ>Uόΰϋ^uχb>BqBœ'Δ‘Y 3J₯©WˆΣŽ,fυξb3JxQε;›Ίλ_μŽPˆ!Nˆβ„8!Žn¨ςίkαΖήκM@ˆβ,f†ΝθΕfTϊ+•ιGεdͺBœ'Δ qBύލ¨{O₯½ gφՎ,fΧήΝΓfΤχΪHβœLβ@ˆβ„8!Nˆ£τ˝^‰›2ܝ¨"Δ qΣf$ΔΈ8rόSLβ@ˆβ„8!Nˆ£πΛ­&α;ΉsεYC!Ξ7Ζb–~φ’gE[‡Έ_«‡ΧJ‘ β@ˆβ„8!Nˆ#ΞΛΝLίȝ+Σ qΪ‘Ε,όμ%ΣzφqyΞ¦ΊRβ@ˆβ„8!/G8œ+wζš'„8€Ε,»ηOΫušeHs6uΣ…μdͺBœ'Δ qB5_ŽθwGlςβ΄#‹i3βZ_ ~Lξ …8β„8!Nˆβ¨ψrDΓ[βγΉ3ίΛ„Bœvd1SW¦Gδ…Έ%Ίcž–AˆΣŽ,f‹ΝhΈ¬Š^TΟ¦:™ŠBœ'Δ qxΆύUθHˆΛρφ…U0ϋjG³όΦo3ͺ}QΕ»hΆ\ΝN¦"Δ!Δ qBœ'Δ‘ο­ˆyκΝ‹<χΜΑμ«YL›‘εJ}QΕ;›κs„8β„8!Nˆ#SΧ±‰/ΝϋΣDΉ:…8!χb:&/ΔܚG “©qqBœ'Δ qd{°ύΣ;κq€ΔΝήΛ]pp˜Bœ«Βb& qΕ.»a©Ž|Ιf «ΩΙT„8„8!Nˆβ„8 λ`€Κς†β΄#‹i3²%Ώ¨f°λΖ%ŒBœ'Δ q˜Εώωiς™WβVλυw2UˆΣŽ,¦ΝΘfΤκήs5;™Š‡'Δ qBœΗλΨvζ•Έθ'IfσΟιϊβ„ΈΦ‹ιdͺς[6ΓόtœLEˆCˆβ„8!Nˆ#ΧCνŸίΔz%ξΐ쫨qBœΕ΄ٌς^?οϊ-Ύ\Βq Δ qBœ‡ΡβηwΣσH‰›}Όΰa0!N;²˜»,›‘Ν(δcΌ(?'Sβββ„8!Žt―DœŽP)“8 fφՎ,¦ΝΘzε_¬H«1ϊέL ā'Δ qqI³“Ε8βVΫ/x’f qšˆΕΜβͺUŒα’:΅=Ο χˆqqqBœΗλpςχfγ)ΔiG³Οfδ©@ω‹*ΩT?„8β„8!NˆΓ`ρε{Ψ3―Δ½t]ς‚ƒƒηΒ°˜6#›Q³οΩρOq2!!!NˆβΘ6­~ρΏxζ•ΈΩtΙ KˆE,fΒ/—Χa΅βœMu2!„8!Nˆβ0‡}5 y%.nrLPρQ-¦ΝΘzUΈ¨βœM]~&q Δ qBœGσ{θρ?+›GB\άUšΩΧμλ£ZL›‘υ*qQ…ΉzΌ`ŽBœ'Δ q+Ύ~ Ϋϋ•8W¦@±Xˆ³Ήu^ΜΝΗΞε’jpQE9›:"Δ@„8β„8!NˆβψEλΞ6Ξ”8!„)ΔΉ2,¦ΝΘfΤ­ζϋ?'Sβββ„8!Žd―CŒK0ϊ=τζΩ·άQ!ΞLi1…8›QΏzχN¦"Δ!Δ!Δ qByξŸιiφ‘WβFΏUχeβ„8‹$Δω•Ω⒚1~ƒ»‚β@ˆβ„8!CΨΝω/τ‘θΕμ+ΔiGΣfd½Š\T!ΔΙT„8β„8!Nˆ#ƒqs¦8σJ\ΘΫθŽρΡμ«YL!ΞzΥΌ¨BœMέβœLEˆCˆCˆβ„8ά<ΏζϊΜ+q!A³ά›L!Ξ₯a1=¨π*οE5#όW’β@ˆβ„8!Žφ3Ε/Žgώ^CΐIΠέziGΣS›Qυ °"Γ!„8!Nˆβθ>‚ύrGB\ΐ£%fΉ·YBœKΓbڌlF fέ½–LEˆCˆCˆβ„8Rγφ? ξ+q³[yŒœS„8!Nˆ³H6£¨έ;χ‰qqqBœΗ³Ζυόuθ•Έp³ΝζΩΧ…*Δ qΣS!Βʌ›'Sβββ„8!ŽTΕ«ξž›ώ½!.r qφ8!Ξfd3ϊ u{IœLEˆ!Nˆβ„8ΊO―<z&ΔE;œϊbφ5ϋjGSˆ³\u.ͺλgS§λ!„8!NˆβˆnσyΏβ_τ±ΆiΉ„8!ρb.›‘Ν(φΪ\όο;™Š‡‡'Δ qdšΏfŒFΠWβ„8³―vd1mF–«EuωlꎧzN¦"Δ!Δ!Δ qBŠr ιΠ+qZKΉΩwΊ8ŒωΣfΑβΞοΤλήޝ"BBBœ'Δ‘hόzΓ4qζ•ΈXOΆ{ύρ„8νΘb q6£ςΥέ‹hωi ā'Δ qBΑϊ q‘κ”Q.Τ"Δ qBœβΉQκ‹jW¦·d'Sβββ„8!Ž'm>`σ¦ΫΧy¦ΔEΊ₯6Κ qΪ‘Εβ¬V©‹jά\'Sβ@ˆβ„8!ŽήγΔί>;t8Uˆσ–…g£³˜B\Τ'Xι/ͺ›WΡτΓ@ˆ!Nˆβ„8bۜΎήψςΩ8ββάTO£œ§YL!ΞfT뒚―"'Sβ@ˆβ„8!ŽΦυίόΏcλφχ„Έ@‘@ˆβ„8!ΞfωΦbήωO»WDˆCˆCˆβ„8΄‚^‡^‰ σχ„ΈX‡'Δ qBœv^ρ»ΫΙT„8β„8!Nˆ#Άx¨Ωίkβb]BœΧv1§g3ŠΏBW/:™Š‡‡'Δ qΧεŸΔ|Eˆ³σ q6£ϋΏ’ }CΧΉίΩΓΟ!„8!Nˆγ±w7ΛΫVFasf§0Hnq±ή{t;Υ©{}t‚ϋg­±ΛΆ ">GB\£›βΫ Χ½†Yψ΅eωΜ qBœΑ4EϊF*t…Ξ}ƒγd*BqBœ‡‘mν°iΙsΥ}G/%vnˆβ„8§MF!•ΊBχ5έkΛ„8„8!Nˆβ„8‚-#–­]ζ(ϋ–€_Ξ]BœΟ…Α4E0…Έ(ƒΆεσλd*BqBœ'Δ‘a±Ύg•½†Β/-UŠβ„8!ΞΩΤ§¨ΰzμ#Α BqBBœΧΟ‘°ωΥ=ΦΎ!J¬'ΔΜΚ[S<Ιͺy…ξι»!„8!NˆCˆλΨ4ξXeU½λΉ!Vˆβ ζ4™Œ²|¬6\βN¦"ā'Δ qB VYχάΆnZυ\Uί•άgS/!Ξ2ί`–™Œ^&£~Wθ‘5Δ9™ŠBœ'Δ q$xœŸvqψδM·΅o =(Bœ'Δ9›j2JpνΝϋ?½ξβ@ˆβ„8!ŽψσgΦρG7ŽM Ί§κ‰'ΔΜΝ©)ρH]B\€/ΉϋCœ“©q Δ qBœGό5Φ•φόΡΫξ―ΞNˆβ fˆπm2κw…ξ9›jχ&BqBœ‡Χp‰uί]λeΧΎY»Ο‚ q&@!N^zllΚ^‘;Ύ―"7q Δ qBœ'ΔUr{ΜΊσdη¦…Ο3‡S랺MS`…8!Ξ`šŒLFwΩq6ΥΙT„8β„8!!.œϋχέΉλͺτο5μXΰωΠ qBœΑ 1Σ&£vWθ±a|.T„8β„8!!έγό{ »–>UΧΎ9—t/!ΞgΒ`šŒLFΎ?Ίφψ !„8!NˆCˆλ·Ύͺ±a–|o’).!Ξ2ί`nu˜ŒLFiΎεξύ/8™ŠBœ'Δ q„_^έ}ΣΊkτΔikΊGγ«'ΔL“‘Ι(έΞ[Cœ“©q Δ qBœGτŒuϋΰή΅zβ/‰—=t›`X„8!Ξ`šŒLFGpήϊp߈Bœ'Δ q|ζώÏGΧπΨcp‹ΊGβ±'ΔΜG.;“QΏ+τφ³©F!„8!NˆCˆλφ8ͺπ"{>‹ΎαUˆ3 L“Ρ/&£˜Χߝ\'Sβ@ˆβ„8!Žθσwά³Ξ²«ŸiUeι+Δ q½sΟ…w“vWθuο-Χ»qDˆ!Nˆβ„8‚/#φ,κώ^CΥ­~α‡Dˆβ ζW^ͺνF—Ι(θσ¦yίηΦΙT„8β„8!Nˆ#ϊΚͺΦΎ„«ΰ;”mΚ%ΔiG³nˆ3υ»Bo½υX~wΰd*BqBœ'ΔξAτ3λ…²[β’―+ίWˆβšζ4™Œ δ}[wŽq Δ qBœΗ'ξίGΆosΓPΡ΅oš](σeν«ΜŸ/ΠbIο‘OΦΌ«ς9™ŠBœ'Δ qžΏooΓ,Ί:ZV¦ˆK_!Nˆλ>˜*S”Ιθε όςuηd*BqBœ‡ΧkΓΞΥΠξ»πͺ―+ίWˆβ„8“QˆΙ¨Zˆ»ξ»ϋp2!„8!NˆCˆkΆŽ8j½œGvk\E_WΎ₯―'Δ5Μ)3™ŒͺΥ yίMΧ3BqBœ‡Wϋτ£w¬WΝ³mνΏΔ=Ότβ„8!Ξd΄ωϋ¦ΙΆ¬ϋž:™ŠBœ'Δ!ΔυͺE3MΥϊtψΔ&ZϋjG³Ψ@₯΄Λd}<η=ŸYχŽq Δ qBœGθ%Υξ°Sτχ^J\Œ'Δ™ …8“QˆW.Ν»Ύ¨LEˆ!NˆββZ-$φŸ.ͺΉFœw1:œ'ΔuΜ©ΔΕθp/ΧΰΗhvϊ\"ā'Δ qBœάQq!UsKάΖ΅οΥϊ+Δ q3Μey2ΊLFΪζ-ΧΆ›G„8β„8!Nˆ#ςύ«δ«zbiύ²ψ Ρα„8!ύ`šŒLFi8Νή,'Sβ@ˆβ„8!ŽP7½ GΙUΠeρbι+Δ qνΣdd2JsΞαd*BqBœ‡Χk)ρΜκσͺΈDœ;ΧwGΣΟ«'ΔΜ`“Ρe2jβζ-ŸŸΛΈ#ā'Δ qqΆ5T\Q?Τ«φόŽ–W!Nˆ3˜‘fΨ¨%nšŒς|Ο­Ό:™ŠBœ'Δ qD^Ku_ΪώuΠ«w‰‹α„8!Ξ`^½KœΙ(ΣUx,―{N¦"ā'Δ qBΓΞUψ΅ν_]oή„+―—΅―vd0ϋ<Ε‰[βŽΛd”ι£5—;έ="ā'Δ qBκ?χάx[·9 Ύ¦€‹ίωzYϋjG³Ρcœ°“Ρa2Κ5Όsυ͎“©q Δ qBœGΰΥΔ“·«―αΥvρ{½¬}΅#ƒΩλ9ŽΙ¨ηΦ¬FΙΙT„8β„8!!ώ†»6ή“oίdρiŠgN4˜LF‡Ι¨Gˆ»αlͺQGˆ!NˆββΪ¬£fυΈύ¦|:/ΒβχzYϋjG³ί““QΛK4Xλ4/"ā'Δ qB™ξuŸήNUπόjΈζ˜α–aΪ‘g0[NF‡&T³ρ:™ŠBœ'Δ qTYE=}·:λέ–?₯žν©Ηe¦̞ΟrLF=/Ρx_œLEˆ!Nˆβ„8’άι>χΕ ώ^C·πtE\†…ZϋZε̊£p2š&£ΒΧ΄ˆBœ'Δ q”ΈΡmt;_{KάsMuZ‡iG³ρΣ“QΟKΤΙT„8β„8„8!’«ΗaΫ~ͺς[žXz„=Ÿ$Δ qσΉ\2MFΥ/ΡΨgSέ@"ā'Δ qBasN—ΧΉy—ΖSg5―έ).πRLˆβ ¦ΙΘdTY!„8!NˆβΘ½“!ΖιzΏΧπά’π*ωΖ q–œ3].ٚβLF%oQœLEˆ!Nˆβ„8ΚmdΈ‚ΌΤz•ͺΑšοΈ^/k_νΘ`Κ%ΟΈΗ4U~„ζd*BqBœ'ΔQhΓΡθ΅ξ]Νκ«Ύ<Γ qBœΑ 0νΨg2jΨxMŠq Δ qBœΗNχͺυ~―αΩUΘΝ«ίθϋO„8!Ξ`šŒLFeΎΈLEˆ!Nˆβ„8Κμbˆσj·m‰Ϋv8υιΚuΫο†…οU~νk•o0k™˜ŒbόQ„^Ÿ,'Sβ@ˆβ„8!Žte*’s[·: ½φώήπZoΩ~rΝςEE;2˜UG¬ΦdτšΊPͺO–9!„8!NˆβH¦b=€·%.ώςwή΄αžοΥζγ-ΔΜ?ΉrMFΟNFeC\Τ³©N¦"ā'Δ qBANG»WΌu‘}Y‘Μ5/xήψϊk_νΘ`>Θdd2*œxLEˆ!Nˆβ„8,5cώ՚r‡SγμΈ>Ϋ‹rΜysϊβ΄#ƒi22Ή]Εq Δ qBB\ιeSΫMW΅τΥΝ(ίXΟ9w|…8«Nƒi22ΉWq2!„8!NˆCˆ«ΌjŠ·ή΄%nΗψk+ΰcΞΉλ qΪ‘Αμ7]&£bžMu‰Bœ'Δ q[h†½S=ͺέ₯―°ζΏΆΫδ_žX3 qΪ‘Α4=?B\Δ»1BqBœ'ΔQγσΡτuo½MΏ^|ιΝβ,; ¦ΙθωMzCˆ«πΑr2!„8!NˆβΉwα ωΚ«έ§VΆ_[‘ qΪ‘ΑlΈs)bΐ9ΚOFIŸŸm„8β„8!Nˆ#ϋζ˜OŒ·έ_…ήΚβ„8νΘ`v &!O4Ϊ£•½π:™ŠBœ'Δ q„\/E½Q-χ{ ΦΆ_ͺeBœu§Α4ΕΨHεc•ύq““©q Δ qBœΗ»Ž} ަ/~ηΨ…ς΅υiω΅―vd0;<β1υ»Fγ}άF"ā'Δ qB/­_ύΦu‘Υν—Ξ qω³ίΦ₯ύΖΗ*ω—œ“©q Δ qBœGΔ=aWΰΧ_ντΚayϋ•wAˆ³π4˜&#“QF3θ BqBœ'Δ)DEΎOυ{ 7Δ qωΣdd2xLEˆ!Nˆββj”Π#PνόŠ](_IΒBœEΎΑ,4»šŒ|¬œLEˆ!Nˆβ„8!Ξ£ε,Ο‹χ…«M/ȟHŠIˆ³ς4˜ DΨ'Δ%Žs2!„8!Nˆβψd9ϊŠΰΪΏς*ς‚ς.}…8‹|ƒi2Šqž±α>τR}Χ}$BqBœ'ΔοΑrψΑΎ›zΏΧfη‚§L“QˆΙθžXιŠt2!„8!Nˆβ„ΈΎ‹€ψη6Κm‰s8υO‡ΏόΦKOƒΩζaOςΙhVŸŒjͺœLEˆ!Nˆβ„8β¨ 'dΚ=;wμΟV¦εΧΎΪ‘ΑlφœΓdΤ'ΔAίJβ@ˆβ„8!ŽO• Δ£α<؟m\β΄#ƒi22Ή$]Ώq Δ qBB\Ή{Ωw©§Φ77 vd¦4˜Jά›ίB\ζo8'Sβ@ˆβ„8!Žpυ)ΙO·εξڝϋυ'QˆΣŽ ¦Ι(ΔiF!.sέu'‰Bœ'Δ q„»“Νς΄Έά–Έa±ϋΛO’§L%Ξdδštω"ā'Δ qqΕnd―,ƒq”»owμ—EXˆ³ψ4˜Γd‘έLŸ«χΝ ί, ā'Δ qBoRΘs“:ΛέΈϋ3qΏ*Β³ϊη];2˜J\ŽΙhψ\εύHΉ•Dˆ!Nˆβ„8’έΖfZ <Έ ˜Z³.}Λ―}]Ψ³Ϋs“Q§Ο•“©q Δ qqBœ…Qϊ‡ΕυΆΔω3qΏϊ qVŸS‰ q–Ρη*τ7Ά“©q Δ qBœG¦›Ψ\χ¨Χ“YθΞƒύβƒ(ΔYγΜ}LF'£ΒšάK"ā'Δ qBΑVE—1yx…dρϋσ ,ΔYγL%.ΔQFŸ«ΨΞΜ„q Δ qBœGž[ΨΓ <=2~°α§AXˆ³ό4˜JœΙ(­ίnN¦"ā'Δ qBΑ–DΣ¨<ΏWΠ_fϊΩ` qΦψ³έ¦˜“Ρ=IΙeιd*BqBœ'Δ qνn`=aπύ²τνΉφՎ ¦g2ςyrν"ā'Δ qBœΧθώ5㙍Š/K_!ΞϊΣ`*'qχO qΑŸœ9™ŠBœ'Δ q|μx4€΄™έ‹οΛWˆΣŽ ¦6Ϋqџœ9™ŠBœ'Δ q€Έ{Νω¨Έΰο5„ψK:w,qΪ‘ΑTβLF>M.]„8β„8!!ξ~ΣκOνΫwU|Q‰–ΎBœ¨ΑTβbLFGρΙ(υ½Œ“©q Δ qBœGš,c™hω}Xϊφ[ϋjGS‰K2 ¬€¦Τ‰Bœ'Δ q$[ε}R\ς&ώ°τm·φՎ ¦g2ςYrε"ā'Δ qBœχΌ-‡9.γkK_!Ξ Τ`ͺ'&#·3ž7"ā'Δ qq₯–”nPkήΖχ+qΧρΔ-Δ™5 ¦χξ\οƒώλΪΙT„8β„8!Nˆ#ψ(υ`_²Ίκ½σ:ά3K8!Ξrί`šŒή|(pΟΞι“δΒEˆ!Nˆβ„8!OerkρFΎΥβχ ‘³ψΪΧΤ`šdMFξhlόGˆ!Nˆβ„8!τ‚²ΘύιΖ[{‹ί7 q–ψS‰«;½\š8"ā'Δ qBœΧeυs₯ CΥfρϋ₯΄Q|ν«ΜΘf—Ιθ2Υό9™ŠBœ'Δ q|͞½^ωl”=άdρ{<8Ϊ‘g0γ|%™ŒŽΪ“Qɏ‘“©q Δ qBœΗΧψ₯†h½jχξΑΓ·ΛΪΧEn0%”,“Ρ¨=Υxlζd*BqBœ'Δ·/Ή·Ό?ΚO½ž}—΅#‹}ƒιɐΙΘmΛ!„8!Nˆβ„ΈβC{ΌŠ/~ίX q‘S‰ ±αΩ'+Α—΅“©q Δ qBœGΠEΟe¬’Xι?χΞκ¨φΪW;2˜*JšΙθςΙ yq:™ŠBœ'Δ qdXς+όˆΥ]όΎW5k―}΅#ƒ™a¦-»)ξzkfŸ>YΉž,9™ŠBœ'Δ qΔyn\ζξtγς°φ« Ό6ͺ½φuLS­ΙΘ#Fβ@ˆβ„8!Nˆ«Ύά±τΞ±Zͺx<υ:" ‚'ΔΜ!Εdδκt«ƒBœ'Δ!Δ•XμzJΌ1U=1jυN„Νo±g©o0C>% {FώΎ/θŸ™βλ„8„8„8!Nˆλ(θςΓcφΗ†­Φ¦ΈλˆςkG¦Oƒi22{Κθd*BqBœ'Δt‘Sκζτ¨~S_iSάχZ†§ΜLFB\‚‡fN¦"ā'Δ qBα’R±εUώΎΚ>”λ›%Sˆs©Μ~>LFž3Ίfβ@ˆβ„8„Έ*MΙΚ0έm}‰}(G¨W― qΣd"&59@yδy›AˆCˆβ„8!NˆkΔΑΤθ ΓΗΖ.ωΤ캈rhG3[O1ωhεhΈ.V„8β„8!NˆγαeδΙ1n)ov]ω_ϋjG3_ŠΛ}*ΥdΤδKΜ%‹Bœ'Δ qDy\\π°ΖμqgŸ7Ε]Ÿ}θŽΚk4 QƒiΞΝ3 ­Ο̜LEˆ!Nˆβ„8"ν2(yΛίε¬ΛΥrε[{νλj7˜R\σΙθκς‘Ωό ζ*Eˆ!Nˆβ„8‚Τ$ 3σ’)α_gΊŽ˜—ΖΥα²β ¦2zωΘΈbβ@ˆβ„8!Nˆ+ό¬ψ0xΉG0Yлްo―v$ΔΜN)nΝd4…ΈπLEˆ!Nˆβ„8ΎXμηΚ>z1†π˜ΝVΎ΅ΧΎΪ‘Α”βLFž7U„8β„8!Nˆβͺή †―Βz<ΗκwΖ~½Ϊ‘g0ΫLFGθΧ{ψ΄Έ`β@ˆβ„8!Nˆ«zZψΞ΄Ωsφπ«ίkεgν(ΌφuΑΜμ_^αΛΟθ²ιU7β@ˆβ„8!NˆΫξ蓐ς—©η{C±ΈΉx Txν«Μόί_‘'£ΛdΘΖOŠλ!„8!NˆβqwZϊ qΓ'νA·Ε]3Ε›+Δ™ fυΙθe2κϊ9q½"ā'Δ qBΆ‘ ΡNάcl ώpΓΌ#S^Bœ΅¨Α >7™ŒόZCŽ'fN¦"ā'Δ qB!ξM‹ί˜φϊ½†?–…Wυ…oν΅―vd0λ΄8“‘ε{ΪεˆBœ'Δ qDΈ5­ΎŽάΈ%.XΣ βζ}£"ΔΉζ f υ'£C5 7—»\β@ˆβ„8!ŽoܚnaΧ ·ΉπρεοΝCRχ‚θυ95˜-ή“‘oηgjΉ“©q Δ qBœlτܟhΊl>LFόΉMό 4BqBœ'Δ[μί‹rΩ(˜Œψb€u2!„8!Nˆβ„8°ώ΅ξLFlΰd*BqBœ'Δ qPrύ{χωŸiΗ`2βέ„“©q Δ qBœ'āπ{[Oμ=LF|Ηαd*BqBœ'Δ qPzΡ³plΥ ˜Œψˆ“©q Δ qBœ'ΔAs~ςךζtψ X6]&£Ζ'Sβ@ˆβ„8!Nˆƒf‹ΰ/-ƒ―ώ“F 0±Μαd*BqBœ'Δ q`5<­t“χs2!„8!Nˆβ„8vτV'Sβ@ˆβ„8!Nˆΰ~‡“©q Δ qBœ'Δ°“©q Δ qBœ'Δ°Αt2!„8!Nˆβ„8ξw8™ŠBœ'Δ qB8™ŠBœ'Δ qBL'Sβ@ˆβ„8!Nˆ wˆs2!„8!Nˆβ`Cˆs2!„8!Nˆβΰ_œLEˆ!Nˆβ„8!€ϋN¦"ā'Δ qBœΐύœLEˆ!Nˆβ„8!€ œLEˆ!Nˆβ„8!€ϋ9™ŠBœ'Δ qB8™ŠBœ'Δ qB8™ŠBœ'Δ qBχs2!„8!§qΛόζ+€―p2!„8!N‡βt86p2!„8!N‡βt8ξηd*BqBœ'Διplΰd*BqBœ'Διplΰd*BqBœ'ΔιpάΟΙT„8β„8NˆΣαΨΰr2!„Έπ~|{֐άVu8!N‡ΰcN¦"āίοBάγNˆΣαψΤt2!„ΈΒNˆ[Φα„8€O]6Δ!āWΈΓ qΛ:œ§Γπ©;άetβ@ˆ{ΊΓ qΛ:œ§Γπ!'Sβ@ˆ+έα„ΈeNˆΣαψ“©q Δ•ξpBά²'Διp|ΘΙT„8βJw8!nY‡βt8>γd*Bq΅;œ·¬Γ q:Ÿy9™ŠB\ι'Δ-λpBœΐG'Sβ@ˆ«έα„ΈeNˆΣαψȝ'SΓ‹BάσNˆ[Φα„8€8™ŠB\ρ'Δ-λpBœΐ'?Υ€B\ρ'Δ-λpBœΐ'.'Sβ@ˆ+ήα„ΈeNˆΣαψ„“©q ΔUοpBά²'Διp|`:™ŠB\υ'Δ-λpBœΐ.ββ@ˆ«ήα„ΈeNˆΣαψΎ;ͺα2Όq Δ…θpBά²'Διp|Ÿ“©q ΔΥοpBά²'Διp|ŸŸj@ˆ!~‡β–u8!N‡ΰΫ¦“©q ΔΥοpBά²'Διp|۝?Υp^„8βΎλΗΪYC‡[6”Bœΐ7N¦"āΧ Γ NˆΣαxšŸj@ˆ!C‡:œ§Γπ΄—“©q Δ5θpC‡βΦψ»―`ΎΛO5 āΧ’Γ NˆΣαxΨεd*Bq:άΠα„8€gω©„8βztΈ‘Γ q:ΟΊœLEˆ!E‡:œ§Γπ,?Υ€B\7t8!N‡ΰQΣΙT„8βztΈ‘Γ q:zω©„8βztΈ‘Γ q:O²!!„Έ.nθpBœΐ“^~ͺ!„Έ&nθpBœΐƒ?Υ€B\—7t8!N‡ΰA—“©q ΔuιpC‡βt8žsψ©„8βΪtΈŽ!ξ‘ΔΩ‡B\ε7tΈ*!N‡ {ˆ³!„ΈΪnθpEBœ@ςwΩ‡B\ρ7tΈ!N‡ yˆ›ή„8βͺwΈ‘Γ•q:ΉCœS©q Δ5θpC‡«βt8R‡8§Rβ@ˆkΡα†W Διpdq2BqM:άΠας‡8€Δ!N†Cˆ!M‡:\ϊ§Γ7ΔΙpq Δ5κpC‡Λβt8†8Ώ”ŠB\«7tΈδ!N‡ iˆ»d8„8βšuΈ‘Γεq:9Cάt&!„Έvnθp©Cœ@Ζg3Bq-;άΠα2‡8€|!N…Cˆ!k‡:\β§Γ-Δ©pq Δ5ξpC‡Λβt8r…8!„ΈήnθpiCœ@’7m…Cˆ!}‡:\֧Ð$Δ]vΒ!āχEΏ q:\Δ§Γ°Π1ηuΟ>8αβ@ˆΣហq₯;άή§Γpƒ9η”ΰ@ˆƒ§B\ω7tΈ”!N‡ΰNǜίά"wM!„8.@ˆ+ήαv†8€mζϊιΞ·2Rq ΔιpqB\υ·1ΔιpB q:άΠας…8@ˆƒb!E‡:\Ί§ΓqP,ΔυθpC‡Λβt8!Š…Έ&nθpΙBœ ΔA±Χ₯Γ .WˆΣα„8(βΪtΈ‘Γ₯ q:€ΕB\Ÿ7tΈL!N‡β XˆkΤα†—(ΔιpB q:άΠας„8@ˆƒb!U‡:\š§ΓqP,ΔυκpC‡Λβt8!Š…ΈfnθpIBœ ΔA±Χ­Γ .IˆϋΝ7€₯B\»7tΈ!N‡β VˆλΧα†—"ΔιpBΤ q ;άΠα2„8@ˆƒZ!ξGΗYC‡Kβt8!j…Έ–nθpρCœ ΔA­Χ³Γ .|ˆΣα„8¨βšvΈ‘ΓEq:€΅B\Χ7tΈΰ!N‡β VˆkΫα†;ΔιpBΤ q};άΠαB‡8@ˆƒZ!q‡:\δ§ΓqP+ΔuξpλCάίΪε©Γq ΔιpΫBά?ϊε©Γq Διp»B\γ·<ΔιpBΤ qΝ;άΠα’†8@ˆƒZ!{‡:\Π§ΓqP+Δ΅οpC‡‹βt8!j…8nθp!Cœ ΔA­§Γ­ qέ;άΚ§ΓqP+Διp+C\ϋ·0ΔιpBΤ q:άΚ§Γ­ q:€΅Bœ·2ΔιpλBœ ΔA­§Γ­ q:άΊ§ΓqP+Διp+Cœ·.ΔιpB q¦‹…!N‡[βt8!Š…ΈίMλBœ·.ΔιpB q:άΒ§Γ­ q:€ΕBœ·0ΔιpλBœ ΔA±§Γ- q:άΊ§ΓqP,Διp Cœ·.ΔιpB q:άΒ§Γ­ q:€ΕBœ·0ΔιpλBœ ΔA±§Γ- q:άΊ§ΓqP,Διp Cœ·.ΔιpB q:άΒ§Γ­ q:€ΕBœ·0ΔιpλBœ ΔA±§Γ- q:άΊ§ΓqP,Διp Cœ·.ΔιpB q:άΒ§Γ­ q:€ΕBœ·0ΔιpλBœ ΔA±§Γ- q:άΊ§ΓqP,Διp Cœ·.ΔιpB q:άΒ§Γ­ q:€ΕBœχs:ά2§ЊBƒ§Γ½G‡[ζΤαZQhβώΣ_M Ώ Γ-sκp­(4qᇙαWtΈeN …!N‡{·Μ©Γ΄’Π ΔιpοΡα–9u8€V„8ξ=:ά2§ЊBƒ§Γ½G‡[ζΤαZQhβtΈχθp˜:@+ Bœχn™S‡hE‘AˆΣαή£Γ-σΥw*€„8ξ=:ά2_ q7R5(4q:ά{tΈeN …!N‡{·Μ©Γ΄’Π ΔιpοΡα–9u8€V„8ξ=:ά2§ЊBƒ§Γ½G‡[ζΤαZQhβtΈχθp˜:@+ Bœχn™S‡hE‘AˆΣαή£Γ-sκp­(4q:ά{tΈeN …†φ!N‡{“·Μ©Γ΄’ΠΠ=ΔιpοϊUˆ3:o9u8€Vš‡8ξmΏθp1:o9u8€Vš‡ΈίΝοα–9u8€Vz‡8ξ}:ά2§ЊBCλ§Γ}ƒ·Μ©Γ΄’ΠΠ9Διpί‘Γ-sκp­(44q:ά·θp˜:@+ }Cœχ=:ά2§ЊBCΫ§Γ}“·Μ©Γ΄’ΠΠ5Διpί₯Γ-sκp­(44 q:ά·ιp˜:@+ =Cœχ}:ά2§ЊBCΛ§Γ}@‡[ζΤαZQhθβtΈOθp˜:@+ Cœχn™S‡hE‘‘_ˆΣα>£Γ-sκp­(4΄ q:ά‡tΈeN …†n!N‡ϋ”·Μ©Γ΄’ΠΠ,ΔιpΣα–9u8€Vz…8ξs:ά2§ЊBC«§Γ- Γ-sκp­(4t q:ά :ά2§ЊBC£§Γ-‘Γ-sκp­(4τ q?\πKθp˜:@+ mBœ·ˆ·Μ©Γ΄’ΠΠ%Διp«θp˜:@+ MBœ·Œ·Μ©Γ΄’ΠΠ#Διpλθp˜:@+ό·k}n™ί όO{p Θίz„*€(Φ6ΔOΉ„ΒIENDB`‚xarray-2025.09.0/doc/_static/logos/Xarray_Logo_RGB_Final.svg000066400000000000000000000065221505620616400234410ustar00rootroot00000000000000 xarray-2025.09.0/doc/_static/numfocus_logo.png000066400000000000000000000606401505620616400211320ustar00rootroot00000000000000‰PNG  IHDR²ζ)ωfHagIDATxΪ흼UΕφΗorιQQl± ΔΐΐξφΩέΟ|θ{κΓN°Ÿ-v'Ά»»  Pςή¬}Χ9gΞffο™έϋœίϊ|~Ÿά³cfφΜwΦ¬YSSƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑͺΖΪ^~sMΫσŠGβΪ0 ƒΑ`0XτΰzΞ5M§ž_ΣτŸs ͺjκ*ΤWhΠςB«j΄‚Π`‘E„Ί ΅αk΄^O\›ξ°…Α`0 ƒE ¬½…VΪ^θx‘+„zEθ#‘‰BS„¦i4Uθ;‘…^zDθj‘QB»­.΄ PΐƒΑ`0 f―εΰZ/΄°Π‘ έ!τΠ/B³…Z"Φ\]βϋ„ώ#4Rh1†θr°ΤΒ`0 ƒU9ΐΞοu]BhO‘λ…>š΄šκo‘Ο„Ζ (΄τ|ήZ- KΣZφY3wχ5s4’kΩg“ŠyŸ²χ›—{Ukωε©μB—½ψΞ*Άγn‹oQ–%ή;xίϋΊΠΞB·}ΝήΡ–Œi‡%ά-΄—P?'Ύ^Z³ϊΖΖ;Xšν©’Ϋ Œίm7¬V¨^£šŸΆ_'70ΛοSΓΟήΖCυτ·έ«P†:Υ„½W εWχ;άΛωχ™»mPKί™O{ͺ‰«-QΗ&yΫT‰j齄.½Q†Χ‘…Ξz'¦pΈD ύ‰ΠωBk•yiΕ;2<7ηΥsέΛm±ΜlΌ =ΉMa’λ;₯τ>›= m©‹P…:Im¬27vΞi…‰U„nοB»δ &ζ”ΰθP‘g…žVθ‘έζ„ΎW£ΠB— ]κeBη ž“εwκ)t&?ΏκF u¨ό†]¬Έ•ηΡBmσ4‘ jτ}q»]FθEΩSyό“ΚcN| ΫYθBΟ =]αzPhρ$@Vα%θ[OθZ‘Ÿr―:MαΠƒM…ΪW›‡Vγa'pθΓσΆ:\θ,‘+…nz@j‹O έΛήψΛ„NΪ_h‘₯8»DmΰΑ}Boܐc΅ύD·Mˆ“ž$ZΌS‘a–ο΄\Rογν„*΄‡ΠΙBc…ξβ6D>ίPθE‘'…ξΊœΫΧΎάΎ(T¨—Σ^έν+Oί/ΓΔΦB-½#΄p^`BΩK<ή‰trD ΦNθ!ϋΜ‘3 4Ιγ~κQωm/Τ¬ΉΟBδΙ£K«c=Κύe‘.1‚,Νή?­°2Ρt‘!qNšψΧυX~­ΐ2ύSθΪΆ•G«W‚uyc…_|ΐ?3`yΞΰ‰ΞΛ<ι9€Ϊ¬3YH`γχ δΝϋ?‘Ώ<2aΘ’Ώ{^¨{†AvI‘ΟΉ0y'ŠΏ!ξχQτuΌΩs3^΅‘μ$_ύAθQ3ΏΟBo1 ŸΜ›;ϋσD,?›;&Άš§<ι?*/^Y d/ςΩQ‚μχ™A0–CύΡ㝾Z,’ςΫNhŽΗ½'―w%‡H!«π$AW/&²U Θ`°B\ƒ“+„ †C⬕^Ά΄· ­ΝƒqΕ„(`£ΏηΩϋ5-ΖrmfπxTθ(φΦΖVΎΘΎeωœT=2 ²KX Ή#ΆΎbώ6E!³·υCž$ΉΉσ φήΘm¬!σ“R%½-΄PΌ²ΩŠΩ_„†:moο+d9¬ –C Z²ωYΕ Τ‡s΄~[%ε*k’Π9Ό),ΧήYE½ΰmΓiʚœP›Η1ʍQ{Ο$}³AφGΛwΊ=Ύb^ς&ζ ‘ΝζΞ‰œ=e$‡ddσ;6ΩΉšy―,@Άβ@–tye+1V–ΰœΓ –šΝ/Θ*6rmΞΛ²Ν) Bs\š—³Ό-΄k^7„Ήκ΅žγ&L9-š¬_8φvΕ²Θζd] 3$’ΈΙŸψĝU²˜³Q“¨}NώeοΠπ΄aΦΓΈO,*₯­οe[ΎΩπ +υτ]Ÿšα¬¦ΊΖ £Κ’76 Θf:ƒ²£² ²_Ρ‘ΏyφΚJήΨ-ή ½gΑ d]»v€DρͺΝ[΄“UNλ”ι\«/-AΑ@ί—œ,5˜•,ΤVh΄AόhΟ,ye%oμΦάtΟ>Eh-‘u}ώ Ώw‹‘·,{―‘‚Xj‹sE}Ν !ϊ}m:ŽgwΕ /ΙΠc›ΞlΕ°Λ―Θυ’|J«Kσ©qΆοr0@6r]+ΐδθQΏ,°θ@Άΰ••U―,@Άβ@–<™BKϊxegρfͺLxe%ol{‘{|ΪβυBuBλdS…z‘σxηΆSBμήΪ©žϋ [Q»pCΧK‘ύ’jy2iϋμ ͺgwΕwΐnrŽε α•εφNρ±7€Ώ£ύr°¦ΰ•mΐΓέΒud£Ωνyυΐζϊge©MUΘφτIθώRšž1€lE‚lΧ©ŸWφ)ΪT•―¬διχ:UhνΩΤ@ °Μό‹εRχIͺ]ΖΊFθρΊDhΩ"&Τ₯φ;PΚZcϋμ7 mAQχ³»`vΓσή šQ˜<υ`q{ο ΆτΧ‚χ=3 [ς.²c²‘ƒ¬m˜eρΨ ›,Θ’φ€ςŒνž–g [‘ Ϋ–λ”Ό²_yό-₯ΆΪ%m―¬1Τ6ξ4πΖΆΘ¦ξm ΟιΞζ#PΝ-υC΄Ιο§)ΧHo-EΧ››@?"΅ίΕBζ=&ύ!t½ϋΩ%˜₯Νu—Y–ύsΞ¦±€^Ynού9TΑζΎο9iΔ2΄!GΩέΔɎΘF²Ά‡kΜpNΐΓ&―ΨAφ$ΘφzΥγδοixΖ² ²δέ:ί§^£ΝUize%oΕΌώκ;Lϊ²ιyc·΅άΰυ©sz!Ύ+a8·mΪSπJH,θlj#Iτ#sΝϋPS}&Τ_υμΜ.j™ξŒΌαυΚr{_=ΐ’4ΩiΚR,£4Ϋΐ³έΒ©Η²Ρ‚μΉ–ΧώChmΔΗΖ²c\ K:HhnΦΌ²ΩŠY Bίω”χ6iye%oyYo6πΖ6dS€N|ˆΝnο}Ό ŠΫ6eۘ N ›$ϊ~vj—wEτμΤ¦7=»4™ΨΒ,ίrςnπdq{ί*@ϊs³ΈΜe8Hθ‘Η…“τ(§;»Ε₯ρΕv d/΅Ό6΅ω΅V?Θ^TˆΡšΎΛϊ…Ί/yeΚ+ ­x₯Ν/ψΤνCiye%oΦ:>ήXϊ·αo ›ž7v†eHA/€βΆ½ΆΠoΑ ΅ι6 ‚,mΤΊ%’g§C>6Χ‚liBΡ`b@±…GρΚrέΔΝ΅lοGdq XΪΌΦήi›εjορΫZΎnΥdSYςΘ…G6!-xΈ€₯Σς‰D™ρΚd+dMΌ²”‹uΣ4Ό²όm\νΣώξ Άγϊ¦²Ι{c;ςi\¦οKΰš~πΘ›TŒ¬4;ΘΰˆθΠ λΤG)Δ`°Π'υρͺmΜͺTχΗΘc»KƒB[θ ,Wh @6ΘΪf>ωΛY@Œl2 [舀εΣE„ήυρΚvO3­Luy.MΌ²΄Ιͺ]’mOϊ.ΦšδρlΏΣR«όMqYd“ΠHZζ|<»˜₯ΐcΠ‰d©ι“T[–ΪqφΚΞ‹d]ς£,<₯t4λ6ΛύRݟdΩΦg mV-KΐΩΐ ϋΛk7sζd-Hd³μ•ΘV&Θ*VόΌ²΄[z“$Ϋ?ε ½ΒΐΫV€lΒZ+4QήΨk,7x 29}Ηd?η‘±B‡!6Ι<²₯Έέ97ψεΟIφBh-yJ{ ½lQ/δQο`κ)•@φœž³‘Y€¬Ουƒ²q.@6•–ŸΘ+ϋ^Vbe²• ²ŠΥςΚ^ζSΗγiƒLmOzΉ||½±2`dΜJΐ΄ Ÿ7oϊ'šΖc‚μ8ΞΔα₯ΔCPΐlί3 d]^Ωύ,’Κ—ŽŒ5Xš•@6ΘπΪY€¬Ουw {ύ4•5βdYΧΙ/'ωœΈ”˜W [Ω λ`WϊΩ簁αI΄=φΖΦρ·βΥξnW…<dΜlc$Ι;ΐ–,@ΦiΛΤ~²t<­άΟ뎩₯g–Ζ€h@6ΈWφρψύ³Iye²•²86,γί·WVϊ–χρΖώ‘ςΖdΜZ½~Ω bw„€‘ΗΔd³p€rΘ \d λςΚΖ™ Lκη}‘…Lκ d[τΘρχ5Ψτ•0ΘΊ:²“³ΰ•ΘVΘJ‘-«ωxeιЁ‘qνψvEzΆO›»K· ›Π@Vςψ­c‘³τ'‘mΝKίΰΣίF²₯:κ'τ±aΝΪΪ€Ž²Ω˜AΆS€£Iί­ζθe€l@•r?―l"±²ΩκY©έ™lΊ’ώ.ŽI”ΤKϋŸK)ΑΆΤM沉d€S,ή‘’Η7Ϊ .Ωΰ λςΚΪΐζX‘ZΏzΘdcΩZ‘Λ€l‘―逃„AΦ啝ΆW [ λjw~^YJ…΅z“(ΓΆOzP¨ƒξ² d­€ΤΙ"¬€<}ΫΫξ(Θ†Ωl’ηό7ΓΊzO¨ŸW d ΎfYϊΝΙΞa€ΩdAVςŽ-εγ•Šέ+ ­•ΪΕΚ^εSίγ’φΚZ¬F7v+―έηΩ±’υ‘I6±—Άqkِ [ͺ+Še~Ζ°¦q^`ΟI@ Θ–ΪνβB_τΚR;>V¨ΙιwΔδ1³ €¬Λ3uVš^Y€lυ€¬«έ­ΓY ΌžyH”©Œ γΓ ή؎^m ›Ψ VHνΤlψ~—‰YΘ†YΙ{n{pΑ²Ω΄@Vj·tόο‚l‘­QzΈ^Ε `πΞΖ²–^ΩΨNϋΘVΘJνΠ >u~!₯Ȋ’νI™ ϊ }μηυΫlMl£³ε―Ά+Ψ&H’r€l [Ξ΅-Β θp„&€,@65-ye7eοjP˜₯Ιφ“BλO$ο,) m< λςP—–W [] λjwΓ}Ό²ίqЬP \9”ρ8ΩΞ76 ›ψ Φ]θ5ΓwϋΒΩ9 @6-΅ΛW λμsͺ3€,@6-•Ό²ybΥR“Ή­.ΙνΆ!1‚¬΄ƒΫοt£Ψbe²Υ²–^Ω³ΒN’\§Ϊ½λSχ[›ά ›ΐ Φ:Έ,o{—P› ©p²αAVͺ3%†uF‰θΧχͺ3€,@6v-΅±Ν…ώŒf[8ζv4ŸHX  d]Η‡^”†W [} λ΄½RΏŸW–Β^– ++y€τωn&xšLΨ²1`%οήΆ»‰Ozώ9@6"-Υ۞†Η~rμ‘^^t€,@6v-΅Ϋ&‹P&S}Λ©ζΦη͐5Uv%ΘΊ€bˆW6–ΣΎ²Υ ²`ΆαΣΌΌκώΜβφ–mOš¬- τšΗ=f νl:YΘ&2€ΥpjShΩ8θ);Ωh@VςΚΔΗxšΤέe^€,@6nu­& βΤp-λ‘ ΞΔ­υ4±†ͺυF ²PWφ²€½²ΩκYiΙ„ΠοΧόŒSfYΗΚJήΨƒ…ζϊxc»šΒ2@6‘¬Ξb'1y=–zΒ@6Bm­»ήBοΦέ#Bν²ΩTAΆδ•%me1 ²)ŒN»^hG‘…CͺΙKΘJ@±'£Χ]χIΣ₯W€,@ΦΒ[Jur§OύŸd;‰’ίKθUŸ{g›λdΐhσΕS†ουR˜χΘF²BχYδώνΘ¦ ²Ξύ.½±³tΪΧΓέ£f }ΐ1ε›–₯οͺd¨d-ΥΕ ™W [½ λς˜n,τ‡Ηu)eV›I”tν=…fG[1 ;&ΫgƒK^=Σχ»U¨ ›΅Οh‡7@ ›6ΘΊ`–&cβvΤ’€θ>― )΄Oδ+3τ u`Ά+λη•}T¨ST^Y€luƒ¬…WΆ™SgM’€λφz>JolΞAφyg°£Ξq ΟψγΤΈΫΒΌΕ‘}oψ^gέ萍 d3¬»ί…†ιΒB²Ω$AΦ³m„Ž±Θ‹•~z\θ`>u¬²BβYΙƒE^Ω«“ςΚd«d]“(?―,₯Ξκk2‰’Ό±»q|·ξšOΩΔΖVΘΎ"Τ_¨gΜκΖCΚο·wθ&οu(@6# [Š5ܞ—NMΞ«ίJ·Q Md]0K±ϊ»„8Β6l<-εZΎPhMΞͺ“ΓβY)VΦοψPςΚvŽΒ+ ΘJΰiβ•=Ϊo%yc»=αq= 7Ψ+Θ€,Η ;]θ]‘·bmςy€61ΩΦΔ# —υ„v š± -ΘJυ·.οΤ6i—ΠMD²Ω4@Φ³€U„6L+‡hσΩ-B›ΠζΘ\‡Δ ²Dυ7%α•Θd]“¨‘BΣ<†P―I”δέ‘eέ΅ώO¨g YŽA6)}%4 Π†‹’GvτΞ3„σƒf,ΘΖ²­νsˆΠ/†νεp€,@6k «€Y:iπxΞ’’VίJ6άΑΕ†\mœ λ‚€u Ό²‘ce²YΧ$ͺ£ΠCΧ§{€›DIΧιΔmΤΛ»wΠΙ@ΦθΈΨώ@Ά.»ή‹βΧΦ 3pdcΩΌ‘Λ€Gd²YYΜ’VΊΖ"τ)MΊ\h©ά…Δ ²4 έμ|‘½²Y€¬b΅΅ R*­T“(Ι³»…g7°7 ›Θjx/ςϊ­ΝΘ.&τ]ΨΝzY€lΪ [X)rBfJ@Ϋ†OλΊ5e ύTθ‘ΉρΞΖ ²Μ–oΎω3ΞXY€,@6 WvΥ$ŠaΈƒΠύqyc²‰μQ »@6s » EϋΘd3 ²e@K™JPK°† ]i1q‹Z3…nΰ ΩχΞ&²τέg¬,@ λᕝξqŸη„Ί;“(1ρrMΐ6‰Σ M dΘζd°8κ ΝΘ–=_yΘA½Π²&σͺΠί)τ»―1Tgf“YΧζ›ΨΌ²Y€l@―,₯Τϊ΅ϊϋ黬_ψ][‘ρqzc²πΘdκ°/·€,@Ά"AVγ‘­αΊθ˜Ϋk9mW’™ΎαΤwΩ…Ω€@Φ•¨>6―,@ λγ•υΕ§),mA#„~χψΝkB½#Ϊ€dχ³HKΝΘ.fq Εh€,@6 ;Τ–-yir_v―ΠO υΏtŸ2 ³I¬³₯₯Ϊ-|μΐ ²YITg>>Φk΅ƒ±B·xόύ\‘C#LdKι·v1Όε*ΝΘ²ΈΘdσ²>^ΪΆB+πIaO'pZ}{Ϋa6Kΐ’YΙ;F©ŒŽΓ+ ΘϊxewšιqΏΗxιo‡ ύβρ·― -αAΩ@ΦyΏΦ„ϊ[σa&ΐ²9ςΘfd[λoUNd^Θd+d  Ά3Λ,4†™S?ό5ί§&L˜{•be·1πΚvxΤ'@ «σΚvεcd½&QΤ6λ„KΒ 5—‘@Άυύ(΅Ν4ƒ{Ρ‘ {ΰd―l€¬δQߘ«09†sg€,@ΆAv>¨-ΪBvjws˜TΤ}ρΛΞ±δY 1Hd] ζ#χΚd²xeοf¨œ”„76η ;—απΟ5½ ‹…Ω•…&‡]šΘ& ²%πάΓπdΆτj&"Y€l₯€¬—–BΦΊ$†4^γœ4aY 1Hd]@±£PX{e²ΩΌ²½™”76η ϋ.ŸΥ=”—œβΠ:Ό¬ά6$ΘφγΈ&οu)@6s {‚EΦ‰UuKŸY€l₯¬—ΆNh9‘ Ό·NΣ ›Ώ2bΘJ@эwŠGvΪ@ ‘WΆ%)olΞAφωβ 0chυ Δ©q·…yΏξƒςƒt@63 [Λ βMγψϊικ ­dυπΠΖ'‡ύΜΎθδvΞ‚W6 uŜΓ3―μ\³Ž [₯ λšD=b›…ŽŽ›s}6Δζ`P¦₯Άϋ,<Ν d3²νygΆIέQςψY€l5ƒlΩ{—΄@ί^BŸ…Y +;<^YC½0κΧΒ+k++]w?€,@Φg΅h`²ο -₯7Ά"@6㝼/[δ’]1h d[3τ·8 αv/o:@ [m [τΠ–‡¬(4!$ΜΎž ―¬!ȎͺΊΓ•€bχ(½²|έύ²YŸΙN/†CoμqQ{c²Ιtβ܁Β»ΪύήkŽ“w6` d₯z[Ο0cη©^Y€l΅‚¬Ζ;K§ε7μU’t†»‡ΩO$ΘήL鈒ξp]@ρRT^Y~'€,@Φd΅―…W6o,@6‘ΌΥ³·οj7y·σƒ.›d#Ωtc±άΉ»Χ ­fUxg{ έΒ+{³7ΝπC½%uΕ~>@σ P{ˆΘd-'Q—¦7 ›θ ֟sšΌΫ³N’ρ^Y€lD Ϋ:ωhδpӐUΌU€,@ΆΪAΦυ}2ΊΌdZ&Υπ‚΄AVŠή>^Ω?…6₯NsΟsd²1xecσΖdΔΪ =nψn“8ŽΜzΩ d₯°‚E…>5¬³·„zzΥ@ ο+„οόd),αΐTΓ YPμο5χWΦΟ#ΘπΚΎ”–7 ›θ VΓ1”¦οw@6%-Υ-<Σ°Ύ§4CY€,@Φ°LJ1³”β^Ω›…κ«d% XΠ'ύ4ςΚ:^±½7Θd£œDΒ¨žα#zίΈΌ±ΩΔAvK‘Ώ-⿚lγΏ²€lλ²' —EιΘd²Κo4Pθ“ *]aE€¬ (Žπy–{όbe²Ωˆ'Q§ΔιΘ&ΤY—–Ρ(μs‹ψ―εlγΏ²α@Vͺ+ΪUύ‘a]ΡρΓ+ωΥ@ Υ~o€Σ€μ―~±ιU²P,$τ–I¬¬,²Ω€“¨ΓίΑ§BK˜ΔfdsγyhδΣmLίρXΫμِ [‚Ν]9ŏI==EΰεWOY€,@VQ6₯ƒUxRhS>”p·Τ6|ed£τΚd²!&QoͺΌ±q†dSρ<μ#4Οβ»Ϋtِ Ϋ:α “ΨξΆh‹6‰gΘd²žί]‡€%œ”Ϊ†―,¬‹π.qΟXYW Θ+ϋycύb²²ωY©³$τΐliγ•ΘYΙ3΄§Σ2]Ϊ\Λd²Θd}Ώ3€μ8Ši―zuΕ‰Ό[άΪ+ Θ†˜DΙ±²±ΗΖdSλ¬)Όΰ&‹χ€ΣošL½²6 ›DϋJhd%yΠ%υC€Ž& €,@ λϋύm'4Λ²Œξu6ΖdΛ€’ŸΠ‡AΌ²Y€lΘAωŸBŸ½±1†dSυ:μ`‘Φ‰<~λ›ze Avl!l…ΪύF%ŠΝN’ ͺϊ⹚g"Ρ3s_]ΘϊdK»§—šhX7"r˜ι²&@ υύ‡M΅,£gΙ$@6―,@ rΥ_h'jσIyΛ²©xzσ kϊ·˜ze AφV‘’ο°K1F;ζπ–²oAάK‚ΤόMθž“ών΄° +Υ ₯άcQ/_:iƒ ½εY€,@Φ·ŒϊΜ²Œήκ e›[Š»ΐ»Ζ­NϋΘd#˜DΥƝ© ›ΊΧ‘†7(˜ΎλoB™xe Aφ/‘Ÿ5š$τƒΠΣB#“Ψp¨˜Π‘ΦzXθ;~&έσώdKu²’Πχυr™s‚aό2@ 5 ^·,£χ…ΊdΥιι>@:ίi_Y€lΰΆ·ίΘβrͺ³€+ώ7@ΆAΆ3K }mρΎ:ƒ;y.½1,Țκ‘Υ“šXIŽ„!Όα1μσϋ‚¬δm#tE}ΠςηP›έΩΨA–¨;@6χυώ@6,P”–Ά }asΪ{Υ²Ωά@6αΞΊ|ϋB‹χ₯˜Ϊ½ςXʎdI—*A߁”‰`LDΟξ²%oμfBXΤΗm6›π\ {>@Φ·œΪ =`‰Ήw…μψ*«χ‘Η²˜΄Μkδ•%˜ΎΛϊΞ Θςα¦Y€,@6cvyπŸ,;μ^!\Ÿk ύ >•ΤwΐΟή(to +ΥCoή0bZΏ πσŽ{€μ©–m&1#ͺ hΪ =nYNŸ -šIm0­j‘Φ­ +²Ω@&-q-)τ•I¬¬€¬€μlΪ8Θd3Ψi·lυB—ZΎχεN / Dq}βΨ(`π.‚ΛAΆ^θƈžύw‘aͺg— Ά–4˜g›ΝφXLΙΌeΣ cΫ¦v §Χ;»Νω†Kό^Σ'΄εp›rϊVh‰¬υ R½Sφ‘?-ίιβ* -x I±²΄ρζ\―lFAvηΌδŠδwZŠ7vθήι[‘E²ΩάƒlΙ;·œΕ €i||ͺ2Δ@κn‹ιϋΨ/…ٝ؛φω'us·[ΧωξZ‹I΅υ‚ž$Δ˜ƒ„š-ξI½_jΗpšMΘΊρwlͺvΊ2”SΜ·œr’Ύο–y£[ψpŽ%«dK“’“ƒ„_dΝ½…« ύδ“Α`C‘½γY °·ηx0έ½ήꙐ­η£3½Κξμ(βυ²ΩŒzeiYν Λr˜Κήάΐ¬ΔnΒPhSΖ“xΗyhO·χF ±yςο₯8Yn·½„Ε΄π‘zm-§Aκκg‘²RN νhΉ@z+«<ΔΤ–hΰCRΣm‘ΪΔ%/ +y? ΄Ζϊ€ΦM_;Θrω­κ³!b mxΘzζ†ςBozΌK3y»£x€,@6£^YΎ–!-œd}+ΐ¬/ΔΰwZΪUτz‡,%°9+όQ\im–Ν₯6»Vο6ιŸ Ϋ5ΐf<Y /²:\ΰ=nI-φ3Ά΄\€#(Dg™Τκ:/ λΐliΒ*>^ΩIœφ¦9!ν+τ±όν›εΜDa ŸŒ‡<<Β²Θd³\”?σͺƒήw€YOˆ]ζ·ΨTΧ5ͺ2um²Ν\@> Λζ*BψXJA΅ΘΦ0ΌΫ^BΒ €φ· ηΈ΅}²–©"ζΆτ―eD.©•Qž@Vς6]γŽΣτΘ6 =θs?J.^ŸU―¬tŠΪ?}ήγσ(rΘd²9θΠ)-Τ„Ω‹ήCq=UΒωŠXJOeY‚Ψ!φ#]š³ΌOƒ…&Z>Ο<މNΥΫ(½CWή„Φ TcˆhJ »o€ŒӜεζΛΙ΅Κrf€2’wΨ ’Uρ{“ƒ(R†ύΎβΖI—₯ ϋyY)Vv˜Π/!’tG ²€S|ξχ…ΠβYτΚJή؎œ‘ΐλ=ξjγ@ ›I+ό†τβPκš8‘|MOƒJpBPΓq₯AΚqj™‡;BΟ^ˆxΐμKΣλ.•οV6c΅pΞΩn^mSΩ8ξΥφt*Xη΄ΚI*£lς"½'Τ'tLvωͺΔβN™d%μ’όΩN²<]―pβέΞΩ`ΠE™ ώ—6ΘJ`ν—{•Κχ`ϊΫ¬ye%oμpƒδη‘εΔΘd3 aε;mβϊ= 7ηΤβ@NžΩŒΧD@ςΔΆeοε/ΚοoŽα¬‹„$H;*ΐ³‘Ξu&*)@šΤ>ΙϋHΐηΏΠ>\Ω<‚άgŸ –x¨TF8Ξ5H]αΔC‡θί\}Ι¦Bπ&»Άi―ΨΈžm(gh°-£Ο…ϊ§B’7u`Ά+;Œ7R₯²X/(τžΟ=Ÿ§δθI'77|~ΪDw₯Οσδ•W Ν ΜΦs β̝ό\N‹ΤΏβf]#₯·Ί”΄%ΐςύωΕΑ>†2smnω!ΐ3δf—€λΥUΖGˆρmaξ¦&π!y5ƒ?ύ<,Ιr’Κ¨VθΈ™ iυΆ γitΥΥζͺΔίκΠ1-Ο¬λΩ £"]OΊT½ΛyYΙ‹Ψ†Ÿ-5uy4/68²qΥI7πΖι=“}ž:*3*2@ ›y0+ βνΨ‹2/`gOηoT7[AήY—–΄oφi ¨ΨΫλ ΟυΫΒcGΆaRζ‚Ν.χ6ηt7)[ ψ— O\ΠΫB«&QN.ˆέ+`6‡Β7Ϋ+π1Θzˆ•7ΫΡ&Ί“ξ\ΟΦO聀eD@Ύuκ›ας ²’>εŒύ=M•Όšr[―ϋ’Χv`B $Οv‘ϋ|ž»™žŒμ„2€,@6σ€VΎa© θDΚŠ~ραO²ΗΊ6φ£)#κ7 τχBcœ `… nΤ«`»2 ύβYΙ;Ί°Θ–€Ÿ@τΧχžΘωe»F΄Šς)€Ωϊ_ΐp9ΕΪΐ €ζϊ.vεμ&HPϋY’¬ύDϋg* q$―(-£Ω||qϊ›Φς ²<υΚF ²’‡x7‘™>χžΙπXO0IοΡ²ίΘ4 Ά’“ Κk Ao”!Y€lž<@Π-€gΆ°2tsεUΐΓ>XΰΗεAΊ‡—9k’τπΈvςήγ jύ‹›Τ 6YŠw›/=Syw眷O„œ(Μ)Ϋ|eXΖRϋoΓ1‘-!h§¨+_π(#Ÿς!oρ’ΌλώσΟΧΜΧ±4ΕΜ“lœ:'…΄ecΪ†ΚΛ©ŽΣkνΟ‘%sB–ΣsNύeαβ<ƒ¬Λ+{gš +=KWC―μ|ϊX}’a.ˆ₯zΞΰY)uΨRQ{²ΩΌΑ¬4(tcr0 εαSŠž˜ ­`dοΰ€Γ-eα ΎΏi”aαƐοRxΚ—{5o[†= σyΫΤPFήν&†Žu9σΕ‹7ΝΉυPΠƒ%$H[%ΐ‘΅ΊψΚW„Nsr΅Άzβ›ψύΛΛh~―k=ƒήR\Ζ ι]”υΊSφ–€&΅£zŽΝr’ϋχ ›pΠΩ’ ΥπίR¨Ο{°·υ8ngγy’π}n·r&YπΖVΘJžΠB€ ²·>ΗlŽρ]‘Ν€€‘B’€Zΐ’ϊqv…i†Οχh1eXĐ ΘζdΛ7€ΡŽχCΖ,Κ’M!γψ¬JΟKŒGΩd9ΌRš ΅yΩσ³1Β²~γγ0Ϋ§žO³-Λo’œ\“!ϊΖξ ˆ=4γ’“Ω‡„²Ό¬O`7!MUΐμΦBž@&§μšΜή„ήz™σ}‘Ο…&1Δ7‡ΈΟ ±±gSΘd+fΛwτΣλυx©2‰εiΪΘ1EθKN:"Η/>ΰC| εۜζηΡٜP\έVΞNꌧsyζςfπ{Υ;ΖvΔo9ΜξQ&ƒ€υ¦ΠΠ¨'Vd¨Wˆcš³’Ÿ9 C]fs^‚,acΦaBΚεΊ“WvTάογ‚Ω yyΏ%Γϊ޳$’ί ­pοloήέύ]…ΖΛ™hΗg.’§Γ\υΨƒwŠΟΘ‘‡q7g#brΗΑΦp ­·rRFΝ<±[6Žvιš ΅ε `_ζπϋ}‡Wmj3ύν‚μyY)>ΥΔ+;*‰χqΑμ’Bwœ’•†žVŒ‰MΰΔ1€,@Ά `ΆwσΖ’νηIσλ6tεν3W=RXΖޜ±!ΛeN)—–/΄½DNG+/§%Έ­ΟΜp9MεΚΐRΘΓΏ9ΞΆ&Ιcs²ΩŠ…Ωω±&Že}$ηˌ~šΕΩvžoCZN-3¨Gςΰ]ΛρΎYΚJπ²ΠξΌa/ρMt.˜mΟ›‚ήΚXHΖ,Žߐ-HΠ’Ν8ΤgZΏίy΄Χc>& •²’WΆ§ΠσYΩΒsΉ2 :M賐1­AEΠ.Z1Lͺ/€,@ kB”FkNg5½‚v&μ?Έε`}κ‘`dcφ8–r™ΏΔ›pϊ€]抢Ύ'η?βΓ‚xͺŸgzΧ4ΚIQ6hoŽΰhη¨6ΎΙ›GΚέχ[i λςΚξ屌?*χqyg @{΄Π3†§…e#x…=°ΛSV‚€½°Y€,€ΦΘ:qκ’ρΌι*―ϋ{—v*œΞUIλγ]kΗGΘŽεƒ9 yΜ&rž9KEM– xz„€Oα°—s?>ξΉMΫ¦ζ’NOˆΠMάL‰πxΩfŽΟ₯>ώ>y%žεϋϋεtu†§zXθtΪɞ'˜Ό²Ϋρ! JοD}Η,ΌOj]‡4 -Β`»§Π)BW έ+τΝœ†λMNΓEaχ ]#t†Πώ ύ9ίl&ΰUΡφΦβχzPΣφNΙΣ$*dYPΰ;=ΚβͺΛAΆ'OH‘ΊI₯‡yGz{€l$-o#t₯Π‡ƒΪYœ²λfΆEŠ;™3 R‰Υay=ΦrΆŠU9Nt4{Ÿη€ϋ_²§p k*ηwόoοrˆy\Οδk¬.΄@1ύQBΗΗάΦΫržUŠW=œOR#Οΰ+άφΏϊASN_pYΎΐ+g1ΰ―Ε^Ε†<•“¦Υs;Z‰W;ώΕύυΉ―ρD²PFS₯r"}Οmι}N·wηo>€σε.RΜh‘³Ά€5NYEKΜm4"¨j$š›#˜Ό²΅šwͺΟβϋhΌ΅r΅s„P8‘{ϋˆ[Ÿ¬ΐk5΄½<–…tNx£&™~M‚ύFgw4@6jε/ݜ η8ŽnvΒˆ&±Ηύ‘ν<ͺΙϋ°Υp™΅g0ΐ'ΐ­,i%ώπNϊφΕrΞΈΧ5"h+¨ O¦ F -γQN½+΅œ<Κ¨ŽϋάΞΗͺ*£•Ω½8O~ΪϊθJkKεΰ΄ίΘ’GΠKŘS4nogίΗλωκ+cΰZMm/oeαœHτ˜SRŽ­P_aƒΨ²9šO›z‹r {.§Ϊ™ΔήΐΩΛΎΩΩUy.ΦΌ.Γͺ‚Λε„2‚Α`0XυyψΩ#Ež—υyωωD‘σ85ԝ €ΟΈτ0Ϋuœΰ_Ό“{#φμτ)ΖV²χƒΑ`0 –ΤΊ5?δΦ3ŒΆγΝ5²ΪρΏΥ+aή ƒΑ`0X& K0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0X¦¬νε7Χ΄=ο*{‰ί]άmΑoyΏΘξCΧ2ΎΆλo­žΛη·Y―ΫJmK±ά3ζΊN΅‰’Œ¨“ΌŠΫRj}^ΜύH’ύ]ϋ:£²Χτ'qυWžΧ ;6φ©—y•χσΩΨs¨i:υόš¦œKZHhC‘Γ…Ζ]#t£ΠUBg  4L¨—σχβwτ{ίF,*Wό}ƒΠšB› mζ#ϊ›…Vεgj4Ήί§žgrΥ}Wͺ₯k)½λo‡8+žΙΰύϋm*=ύη χ}"‹ςΊν ΄’ΠB§ εΊ½Vθ\‘£…6ZΠ¦n=ΪRw‘‘BϋsΫΉΒuΏ£„6κKehΩ–j…–7¬c*σu„–κΑνΓκύψž  ΨH½γͺλLτ%­eDu²…aylΐίΟ¦ί·β~}ΦΕζάΦ;r[­αΎQˆϊu_5‘:‘ΆBΓ#Ό.}Sm mIϊV°({Yτ›5θY5}^/ώN7χy.ϊ›΅… uεw·ξG"Λαg;^θ"‘„Ί@θŸ<Ξυ.{F˜*έ‹ϊ“~B[ ΐΧΏžϋΊK„NΪQh)ͺ7ΣϋIcζ0WΩoΞύx?αί.b €φΪ€iΈμ6g sή‹ήΙ£Ž₯ίο•γ­«!tŽΠu\ζ4.΄_»½e™·w=£mΩ δ~>{Ο₯7ʍb †χ…ώjρΠB―qΡ§ΨHΔυ|H‘gψs 4KhŠΠGB· ν ΤΙλ~RC|Πβ>²θ77ytκ»Ν“ώφMηƒ2ϋˆ·ε²-όΎ™€26₯Ύ΅n©ηo\w/*Ÿο΄Φ.΄9―ϋqΩΣxχΊ«^[xν²1τν³Ÿζ•ŒΊ8`ΦΥ c8΅ύ6Η;wΏ~ t/*ο}ΪmΛδώmA?Χ‰λΫ}ρ ϋŸναCΜ€(ύΥŒϊ…ΎUŒ·ϊw)―ίΝΨΡbϋ>²—ΆA™Θ~’μΞΘ&5θ”Όu΄„πOΕΜ3θX{ΝΘ @v«Ω ‘<ΕΛF5š-,Ȏ·Y‹B= >D@Vϊπ;ςrš_έz•ω3~g~7.‹0χϋ©2ΊN1"-θ7‘C αy ƒΥ /m5<‘1™†YZ-XZΩ₯ VlτΈ²ŸExέwœ0•hAφΉ Ϋ,υΫ~Χ'πΩΫt…'@»«a‡ΚG>Ž―gύŸ³τνΥ”ΖΝέΉοz/š0miΠΟ₯²ŸR»΅ΩŸœςχοCνAΆTζΓB²Κ,gσ+σ° {6@6Yo,-γ=«ω©³Ό˜γOφγψΙ«Ψελš|ˆd?ηεύΨ»IqF§ρ2Μgπ9߁MΧύ|@v2ΗiR Σ…]̝T­ΘΞβ%NΏ$-uμϋh–»¨ΎKhΗ<Ζeςަ¬Oφ|·ΦކΛ4mγdε:>Œλοui9FΦΛESτΩ?Ή-Κu<–ΫΓχšη£₯±υ &b:}ŠίηBΡΏ/]₯ ;MŠM,”ύέύμ…Ÿ£ρΞ6U ;›Ϋ·W\ΔΛΟ H »)ώώMϋ‘eK5ύΘό]t:QsέKω¦}]χš€cd½Aφχπλσh2Wo ²·s³?ǜžΞΛΔiΎλοyDM”߁{y·¦o~žΓζδ:9WΌs,«ςω€q“`μgχE^φ>˜ου1Q•ί ΞμέΟE ²)ΎEΥ·Acpg mαίΦϊŒV +•9Ε\?ζα=Ύ„bώΦΖsΫwνΧΞ^ ο2ׁμG>εV(»M²Ι 8Π™£Eгη³//4¦Zή03Aρ›ίπn :}Ύ8.έ«Π9δŽxž’S\Ξ}?}Λ‰Χ’ίŒΉΒ~Η¦d[ΈLΊψΌ2 Ϋ –΄©ι4ޝ‹›Κ΅(‡n¨:ŠE•‘₯ŽfUBχo_β8²:Εύϊπ@>O1‘:\; xƒμDΗ3<[j'΄ ·₯يίψ΄σι„u {@Π6U% ϋ7΄ε~ζ $žj0ΉPμŒb_dR'^Sά\ ™τ€4Yί«tέ&Ύ†}_UΘZpž'ΘΎλταϊ<Θ‘鳩NΟΠ|+7©<Ώ΄»uήτύ\Τ ;ΩSΓ΅Θ~c‰v [*σ4a]χσ5έeήΐq±ί)~s†O™λ@φτσΩpΪI’d}ΐYœο Ά^ƒ½έΏ½ΐ§θ@φgφ7ζŠ"Ήv'φΥΔπ龟Θφ,ά'`Ήνκ±Ij;η2)•ΐrSΕ@Μ3νNϊθΚΛz†]·Wc;PŽξTžί>χ[PΪΜΰφrv ²Kx΄₯ΞΟΝ§]{wΒ:=(h›ͺ"TV'σ·ƒίyζ»Ω ’ͺ:[SΥwQ?Σ>Θ½ψΊ‘ϋ*ν²ΟӁμQσΥi9 ί€i KΖ²£χΊ‡'K5ESΦζΦPŒcSœIΉ~EήΘ¦Š1.Ÿ0”·ο&~•Σ¨.a²=θ@Ά‰ϊ10Θ6πζ6ΥJξR>γΛh͊_Χ {#ϊωμ 8K(T /»(w˜J0K彊߾¦xŒ@Φ I₯ϋΥςXχοnvF °υY}Ί«Δ@Φ,*z±υuΫAγ©8έd;2xͺ`΄ƒΟύΌ:ά!AVί–Ό6mγ3Σƒ,–’όAVU₯zY”γςζk―A6’:αϋxƒl€{EΥWlΈ>O²ͺ:-Ε4ŽT|/δΉά6†>O΅ΰ¨"XΚ«v₯6ΧΘcˆ{²G_·†&6φΐΉ&‡+~χ†žcΩpνΑ d?wΨB?ΩΎjδΎΧ5Ε½ ϊρe„’νjϋ_E?ŸΉg#E'3Ο‰υΨΉ)uR§h–­— d]χ;N3³κœ!Ζw&v7)mx©CμξλZΓ;PέΏ½Υc\?Ν‡‘glm©³ΩBβBΛn;E²ϋ Τ<οuͺ8A€l| +΅»φŠ8ϊžΦΘζ dK«B4ίΩq1τy·+ξs …=ψτu*ΪΟ% «jo;+`^ρά­ήχΝύ…Τ ¬ mαUέdΧδΠEχ}ώiΐ*K1ΈΚΏ£”ld+cΐΩ_ρAd0Ψ)\θά=έω}” [ΊίŠH;ih²²€ΫŠ1sσΗξ&²]ςU»¨› œŒΥόΆQΣΉS¨ΙTΕoŽχΝ-ΨΪΩ¬©Ω-~¬Η•5ΘJχλΚΫTqΞν² ƒlιΏM€dσ²wF‡ Όwz"~ΖρŠϋœ€Ν7^κλΦ•Vgζ2 Qε5•r³ͺ²_¬l0n«ˆ“%οξJ²ο9^Tυd·Sδnvώe>H'KυΌ3@Ά2œc4;<ΧσR°‡bg* (ΫΗ²{)ξG»}ϋe d§ͺ’C' ²=5K1χzu~tžςt »±f ;Μ0(€τ«2œAΏD dK;œŸΣx¬;dSΩ:ΕFC€l^AΆυwέ4ρ€q€μεš>«³f[ϋΊv)g«YžγθuΩK4![K0\²”eeX…μΌβf] 4C€μΎŠoœώχVe>@‘„~»+@Ά2œΣ‚€¬Τμ‘I±²cL {°&Χ]ŸŒl Ÿ°Σ¨ψˆ“YΪ5όe Υ·'<@v€π3<ΊW²x (a@Ά&˜Ό΄]²²‘ρξڐ‘€SdoΥZΩάzdζŒ'σMPcθσUάg&gΔPοω(•[‡βnwyӐzΗώΥΆν» AΆ°_¦·μˆΰ4qA@φ( ,›€¬ͺ d+ddKGCΖ²cбL§*~χΊ΄ΎGR »„ζΔS=C³qΛdΝΪ’d―πΘη[½ [šάΡ&ˆ!œiΒFCtŠ$ ²Rά²jσ)mY ›/•κtEM.ΟCchw+kΰκN3Y_„ΤX2-Θς_=Ί*Θ«θ?¨\Χ©ύΓ•Φp6{RkŠ™-Ζ\dO ²ͺΥΙέ²•²ggd₯Τݞ”BεΝ€Ζ”Σo5sš™―κ\yσ²ΫsΩήΒ’Nσ_ ΣγΩqΉΩΣ²π³P&s8”e²₯¦²·».QυNΏu΄’?ΉΓ+ηhbι·*dcNΏ₯θ³ ž΄fEšΒbhwm±ι·Kρj]‘_½•wή€τ[O)Ύg‹ΉKc` ۍcžo“Κύ&έJodΓ‚l'MΏBG:U‡Ι#λ›ΤX³άκZp’"τ[>+w λ©Ž«v=D:ΥH₯Ϊ(—ρ₯AρΚ {m‚ KΛ»{sNΗ ω°Ž]ψpŽϋ©ufqήβ6¨Αχρ¬ΣwΟ9ΘΎΗ'θ…ισt {'«oΟ€zq8Α±ŠœΐrŸXΣ!0€Ν4§9Ιc͚eρ°vš` kΩ―Ζ²ΏήT {7‡:½€ψ>:₯²ΑΛ\²7qωΤ);XεƒμϜ˜ϊ:ΦmΌργkΝΊ39ύ—.Wnp˜ΚΛ9Χ)tψM@φxnΔξδ3†Κά€lΐϋU;ΘΎΔ`¨jWΧsV‡8@vΎώu–Ίž³€Τ&²Νœβhk¦bΙΉΎοMήΨΩNŽ―³Ω9œ“VU'γgR% ϋ+χ§ΊvpΆΑιi:ύ€wό?Μ€ψΟ& œΣ>™L΅½ςOFzxf Ωnvε2«ρېΘ†ψž’ΩΏ:uίeX$ΘήΛ¨ƒ\\0½°²’4Θ†(sΘ~αΡ§ήΐk²Υ ²6šΙ1/νc€Ω…8όf–G;Έ±Ψ?ϋΔΝV ΘR:°‚l—ϋ‰ŠŒTΘސ­n₯@όIbPΠDφΌΙαrσμp|‡Yά±~ΙΛK²Ύαe²φ!@Άp αtUrh€lΕ‚μ/š6υwˆϋU9ΘNηεν±\§7π·φ†ζάϊB,γržGλAΆ™ΛΦΥΙ'6ιΉr²~}ή“ξ\ά ϋ#ΗΰΎΟωζ‘žA7XE³νx"9Υ@ž`ΐ2[ς²sωyuίΖ;^‡;ψ€l[.σΕ9¬|£Tk]<Η ;Ν£ά&r­b₯N{m)ΗεώXετeΑωώΉώtƒΓ‡Ό)k1'V«\ύy_d©γyT‘οΆ‡Π6يΩ“8/k?M»κΘ.Γ§FZjSήα]›`ϊ­ΑόΜ΅ΌA¨-ΫΓlg*~χ¨σέhΌx Ku΄χύ4j_% ϋ±“ΣZίηυUΗj²…”Vέx³ΧζμΡsώ#β )P>wy*°Zv"|κ³xsqO{Λ;ΘΠ――ω6ϊs;idIg*²%¬ΖχΘ+Θήγџτη#™±Ω«ŠAφyηtΏΤKRƒCχoΰςtνβ˜ηπ€»5@Ά"Aφΐ”o]Βνι/KΝfοA’ydη+£RΚ¦vχ>Wα=z³}²¬τ[Ετ[=cJΏu€+₯Z/ t™ΦοςΞΠ?ν³w2”+‘»B@v²3>DŸ~Λ ²Λ+<τc~ Ώ {od­DωnIˆΠ ²εzw~'ωoh³Γώ Οoα@„  ;.D<κΥiˆPφ›sŠ'χυV€ξρEˆ` ²±ˆ ΐx±βοώ+&ι‘-{‡‹―—aΆ/§Q𫁣SŠΩ¨7η=ύV\"ΈAΆŽ3 Έϋζν‡ε%ύDΘ;’6­ΑΑdΊ* Ι] ΚβΫrg#λfN1†“½²sDmžςΘꏨ-Οz’¦>YQ΅η^εΗ­“υߜܭ1oτ*†(Vρ\‘]ΈU©βΓt‡Ώ„9aIώύ’Lεd―菨uƒl ηλuέ} Omά'{νΟ(νܚ8’ uƒl‘C/Ε/.Δ0ΈsψΝΛȎVόφiΥocΩ+²ž KΙίO:ΥR§σ»Τ¦ ²εߍεK1–λd3 ²ς©Š­ήυE4'zύ+8YΝ‘ΗΚ͌ @'“ρ”a7€μz<ΈΗ„u*dΛW&Ϋ08ΊγΧg%² kb£wΘVȞ dΟ»Jεa:Φΐ3ΘͺRœΨ€μiš½φ λ3pω€μ₯ž‘8@φέ\€¬ί’™"Œη 5Π·ΖJŠ’ϊ­²9Ωsά ι?:Fν•uυ·μ@Ζ‡j,9_ίAίN xδ°Υ©‘‹Ζ²ΓybζNw5¬R@V±29‚Σoy~ {BHύ@ρΫ]²•²ΗΩΦA «Y—‡i€Ζ;7Θφαόφ ΫZ·ηivυZƒ¬ίΐε²gΖ²ν9fY•GΈsΦA6γύJ˜~ˆβ˜gZ¦ή ›-Τ‰{Ω~RGΚ·ΞR1ž3Σό^τΊιR·•<³»σζΗω6 jϊ+uY9²dη5iΟd ²ύ9e@ΆBœΓΗDΞr>`/šΤ@χQόžM«d]εθ@ΆηvtίλΟΤ*₯Ί«ψνγ ;\3λ>ΪoΰβrZ]“OτdMa@Ά ·UΊ§vΩΤ<²+2πΐ#›G-υƒ5+B‡Ε²΄„ύΆPœSzցœOΧtBσm.yœlkϋQ-?Ε_Υya[Η²žRδΩέ5οΆΉFi½―φ>₯~goMύτ©Du}ΫP8ΞLAvi~υ˜δ]ζk+ΌΰS|ΌΰΩ 8=3QƒNηκ‚Wϊ’>ΌΓ[υq΅Ι<ΘƐGΦ'΅ΠjŠ%Σ8A–t€b€ŸΙ©΅ζ;ŠQϊπζ83χ‘Ÿϋx€l=§sRγ>@u"›tΏF>‚ΨύΫχ|:xouΥ±+1ϊhΝςΰ>§ϊd"lnAΦ]'ε}J;φΨ©6 u@ِ WYύJΤJŠ>o‰ ~Vε£’έύΦ(Ÿώn9$ΡκΠ†šΎnqEΤNιTοqŸήΌͺΪ·Π&q)¬G{θ₯Ιm²tjά³šόΔέ|Κό@E@aw Yτσ™pHgiΞή½ψρΣ)cʚΤr:ŒΉŠ°„½|rΎ_/ξ Σt“xωΏ&Θ~ΟπξόL½8&v(wμ_jžqkΟx^o%ψοκΣ¦z:ρΌ4ε²?π†Ύn\=Έ- ΰτ<—iB=ήα³ίmAφ/'ΦΏt?z˜ٜsύρwχ)‹nΉSν@ΆΤηύ[³JΣ/8ΩύΆβ$UˆΑαάΗϊ„εωXδiŠΏ?Χ£'m(eFυ{œHνk!GxUρχίςŠ$AvŠλ[Τ©»Ηž;-9;ΥlB6ΩAš} ?1δΚ¬B+ΖλhBδ¦sώνš {»;τt"AH`ΐ)5,w΅'τD―rrɊXΖμμ[› ΑΔk’x©Q Θ–ΚΊŸμ?Θ–όοk<Ας χ*OBΎΥ΄ƒΙN§`Άp+ ¬:Πwψ~o°—Nu?j_g—ιτ“"ΘΞb/Τ«R}~α‘ːξw*ώο¨Ω―₯ϋιDΎE•‚μ,žDΚeτ·Ήιšz!PΨ±θmΡ·ΘΞc―šWΌΞ)Ψ†T8ȚφyΤ7- Θ–ϊΌuίέl§^#Œ“•ξΧΖc?Β\φˆΎΖερ³f,{·θ±Τ―>5i2»ήοkn_―r;Mσ·§λŽΓdη(ΎEΥ·ρ…™E².―μ(+-Ο|€&ΞΆ™oVyΟc"νΰSζ:βσΪΦ(€lRƒN)Ž€ >σu9ƒ7€@Ϊ k*ϊ»E²xKΕ€=Θ–όkbΊL4—vλ S²Υ±wrzΐϋ5³χ·»nCšΘΪθ/ψΊϊ:䲦ڧJAΦVλx¨*ζΠdmϊu*dME“½Ε’Y©Ο#―ή+šϋ’=ΉΤΗφb0 Zz:dJχYHγρ3Υ=ΌΚιηό‰dME«b"Ω1ˆΛΩd]¬Aθ"ΝΡΒ¦γϊ2eYS]Mrΰ)}”4Ÿδ“&JπRJ―Η²ͺ“T^Šd ΡίπΩέTΗ/z‚lι#ξΜ›WδߞG£wΌε©[”ΑGeuλγ=qmήڍbΐψžOνn•uš f&ϊΣΑμĞςΓƒΆςΨyk’}«d― Q>ΏrγEO•Ÿ²gΘ‰Œ)ΘΆΥτ]E²iΒ zY‚μ !ΚβKέXξΔw„T―‰AνΣή€B˜Σ=6ΨΆ(6Α>RgςtΘ”ί§οœΩ²_Ζ \cΰόιΜ^bχuξτΜ ήϊΫSC΄‡Ι> λΊί d]Ό1F‘έ¦Ρπ·9ψKK‡Μˆ5(σšKS] Mrΰ)?―–wpΟΠσ)ΗKN“4™—Jξ嘣~ͺ‘ €wlO1Π=Αύ‚ ƒ‡χd΄λ>¦zŠw—vΤ|Δλ1ώή"ΓΣΠ 0Tψέ“ͺ|…1ΑlWŽΌ†AύGWέNε:—p˜Φ­ζ~}yΟ<ᙀΈίη\'sόu]1ÁG°<ΧC-Έ`RΗΈ£½œίmm jT<ξΉ&'^Ÿ ]‘6₯:yΪ°<&0@ό#΄χ6}Κ>¬ΓΆN mb9CmΤτ]£ƒΖ>{τUOqzΓ. [X y:@»|’Γ©Πτy+sΏ0ΑυŒΫ€μpO&HεN›²–c%JΟκLː‰ζPθ~α°«[9,«q»›>«p8Τ³Όϊυ»tŸ?ωή/rϋY§θπιw€‰ΞXEϋ8ΙkΣvwΫ νaΌG¨I_Γ σœκΦ6±4{€'πoOΤmΎ°J Η}ΛύΗ§ ΦΣ₯r‘ΗΉ+8&Ά£IύJ'cή°μθϋ; ›n˜AAν80{οΆ,h9NΟΤΖ\ΈY Q₯d‘ξS―Έ©|ŽFu½ο2™βLpγίFΨΉžΏ'w&«Hu;„λΌ©Ψ, Φγ~ μ}XVqΏ~άQ[έ/`70ό–Ϊ-ν.5ŒΣ“ΰΉ1„κͺdλ|kuAκΕγ{΄Q­!Θκϊϊ }—O;6ξ#μσtœjmΫ³kFC’ί‚b<λΒKΪ+KύΟςΞζd9£ŠαXζqŸŽœžk%ι>«π½»„θη¬Ϋ]ί†_{h°ύ\c`CoHQζm¬² „ΠsΎ>ίΜa‘k³¦ͺΘ¦5€ψ₯”pΛζ£§U)+Œξc*wκ―k<»ΆŒHΥwύ&yΏΠuΰ½"kW•œ~+αzIͺNβκ»’Ίnj}žΟσωφ1~ ΦύOΐΎ.υ~NS†q}aΪƒoyΆ‡L-HΏƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`0 ƒΑ`° ±?v£MΧ•ωIENDB`‚xarray-2025.09.0/doc/_static/opendap-prism-tmax.png000066400000000000000000002231011505620616400217710ustar00rootroot00000000000000‰PNG  IHDR &${οsΞϋώΞyžχό½ηΓΒΒΒΒΒΒΒΒ«ύ½ΈaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaaaaaaaXaaaaaaaaXaaaaaaaaXaaaGΐ.Ύψβy°χΌη=‡ρ…ςηŸώ~ŸeΗ±±±Οώσ―ύληϟξwΏϋ‘‡:ΠKάrΛ-#\vΩeρ‡……`………½ΤΆeΛ–l«V­ϊπ‡?|ζ™gvΥΆ~ύϊΓψB]3νλ_ΊθŽ;ξΠSz‘ς©_ώς—'žxβΏψE=΅wοήw½λ]§œrΚψΓ‡~ψίψΖ«_ύκ%K–μχ%ώϊ―ϊmo{[y¨V«oqXXXVXXΨΛi_|ρ›ίόζ—ΰ…žyζ™ΣN;mΏ/νΫ·οψΟ=χ\‘•ήuΧ]β°ΫoΏέ ΄Χ›ήτ¦ηž{nφΎϊ§ϊ™Ο|&ήΗ°°°¬°°°£ °O\Ύ|ω‹z‘O|βguΦΔΔΔμ§nΌρΖO<±»»›‡ίύξwuόΙΙI7ψΥ―~₯-kΧmμ(δ:υΤSτ£Εϋ€v4–γ‰ ?τWyςΙ'EH·ήzλμ§„\§Ÿ~ϊ%—\β-7έt“χττxΛχΏ}m™%άΌy³Άς“Ÿ|λ[ίzI'iΉί— ΐ {eΦa±}μcΏϋ»Ώϋμ³ΟΞ~κϊλ―?ρΔ…Jή²cǎΣN;ν_ώΛΩέέ=66&:γŒ3R·έv[cί;οΌSΫΟ;οΌ₯K—.[Άμ’‹.Γ[nΉ%ήΦ°°°¬°°°W4`‰ŠφξΟf·ά·oŸŸΥΊ· ‘~ς“ŸμχψgŸ}φ_όΕ_46vuu½γο ωφ·Ώύη?ΉVξΊλF3‘Ψ<°kΧ.oΉΰ‚ DcρΆ†……`………½’λΠs°Jε…Rξαϊλ―Υ«^%š}π΅kΧͺρβΕ‹χϋΫΆmλλλΣΚ₯KΥμ±Η{ΑΛωя~€–ΓΓΓρΞ†……`………½r«ΏΏ©ύΩμtυ7ϊΩM›6yϋ\πΎχ½oΏξwΏ;ώόέ»w—GFFn½υ֝;wzΛwΎσN8all¬±ϋγ?ήHΊϊήχΎ'˜ΫοlaaaaXaaa/`½ε-o9rΗξΉηN;ν΄+―ΌrΏΟ~δ#ω³?ϋ³ΖΖ;v§œJ5>>.|ο{ί;{χ«ΊjήΌy†ΉgŸ}φάsΟ=όσγm ΐ {™λˆ&Ήχχχ7D­JΣKώ󟟽ύӟώτλ_ϊ_όβK—.ύ“?ω“SN9eυκΥ<500πδ“Oς§ΧπππgœρΦ·ΎUΗΏχή{?τ‘|ςΙ]]]ρΆ†……`………½œvΙ%—Ρ°„;'œpΒƒ>Έίg_ϋΪΧ~λ[ߚ½}rrςK_ϊ’ΰι·ϋ·?ψΑώϊΧΏφSώί{›εΪϋϊϊ>ώρΏα oΠ‘Ξ?ό'žx"ήΣ°°°¬£ΫΠ›Ά}βŸx>‹½ο}ο›?ώΫίώφeΛ–Ε§!,,,,,,,λEΨΥW_ύoώΝΏm||όΉηž{η;ίωoνΏέ°aΓ5Χ\#Μzζ™gβ€u¨vΡE]~ωεε–‡~XP555ΕΓχΏύW]uU| ΒΒΒΒΒΒΒ°ΥΞ=χάΖρο~χ»ε &ΡΥ>πψ@„………………`’=χάs―}νk?υ©O}φΩΏχ{ΏχΝo~sϞ=_ϊ—>ωΙOΊΝOϊΣsΞ9'>aaaaaaaX‡dL>«Ώϊ«΅kΧ.Y²δMozΣ—ΏόεΟ~φ³_|±Ϋόμg?ϋύίύψ@„………………`ͺ{}ρβΕ―yΝkΎπ…/4ώΑzη;ίˆ°°°°°°°¬ίΔz{{ηΝ›wΥUW•9XW^yε?ψΑƒο888φ2ϊπpšω¨εΛ{qνqωqωqνΗΐεΡQζ#οΜΡƒN°Ž{ΰN?ύtOόε/ωΊΧ½ξ‘G9υΤSwνΪΕΖχΎχ½/8‹πενkΪνTNNΛ㰟=ž―=.?>ωρΦ`…`½BmbbβMozΣ§>υ©7.]Ίτ¬³ΞϊΑ~πμ³ΟžsΞ9Ÿόδ'Χ­[wΝ5ΧΆ°b˜‰ΛΛkΐš£ύωςζθA'XGSXπύο)§œrζ™g~η;ίac__ίωηŸςΙ'Ώγοxψα‡_π ΡΥΖ΅ΗεΗεΗ΅``…`f‹6=.?.?=λνγι?œ£Η€€€]m\{\~\~\{VVXVVτ³qωqωqνXGώς?ϋ‡sτp°°’«kΛΛkΐšaύηhŽnVVt΅qνΗψεξUΖ»Χ€€€€ύl\~VΌυqω/'`}ζΏψGsτ|‰mΫΆ}όγ?ύτΣΟ:λ¬―|ε+»wοφScccgžyζΒ… cΤΐ ΐŠ6ύ•{ωgν21ΦˈYρɏO~VÞ{ξΉ?ω“?ωπ‡?Ό~ύϊΗόμ³ΟώΪΧΎζg?χΉΟΝ›7οΦ[oQ;++ΊΪΈφ¬xχγڏΐϊ«ςΟΡ~|q•jǎ<\΄hΡYgΕ:ΌuΖg```EWΧώJ–ο~\{ΦΛXcccΛ–-σΓΏϋ»Ώ›?ΎVvοήύΆ·½ν‘‡zσ›ί€€€]m\{VΌϋqνΗ`]ϊ_ύγ9ϊ‘ΏΦ³Ο>{ήyηύωŸΉΦ―ΌςΚ‹.ΊH+XXΗ `νΫ°|οΦ5{ϊΧξiυμθέ=Ψ·«½mͺΣ‘Λ&&;[vŒŽN<½mt`ηΔΖ‘1­Θ{Ά'ΧʚΡΥ#‡ΖuλΗΤFwŒMn™Πz{\»γ›†ΗΨ"οέ>ͺCΩi<2>©»Z#Λ6―άΊS½Ϊγ“}ΓiwΞ–ΧVuV t&ΪQg’mΡvν‡έ­Ξ³qΥ:Žžύυ3#ΪK-Υ ΧmΗ΅ξν]­ͺεc};ξ[?τ‹§žYψλgX.Y7¨kΏ§wPλςŸ<±Ργ}ςο<Όι[¬Ώκ‘r­οΡMZωϊή―ά»ξΛwχh)ΧC΅Α΅~Ε² Έžϊβ]OΛ?χ«΅Ÿ½c–₯k‹όβE«εŸώε―εŸόE·W>zΛ“ή΄ς½7ψ³Ÿ<ώ§?^%+ς}έcςwύΰΡ?ϊώ#οψήΓς·]ύ+ήΒFϋΩ»¬τρνϋ_χε%ς³Ύqߟίτ€./nYεh6oϊΦ3Ώ~―όŒ―ή#Wc\Ϋί|ε8Ν΄‘–r=ΤFœ6>fγdΚ—£e@F\{Φ Ϊώώ?™£ϊk}υ«_}νk_Ϋ›ν oxqΓ¬¬¬¬¬¬€ŒΈφ¬ί°Ύώυ―ΏκU―Ίλ»΄~ήyηέtΣMl`Ε,Β¬γ°Φ?Ίοιe`Φξν›’·¦&'pΐ$ˆΡŠΨ^ιΝK9P².χ½\Τ T6°΄jCBb)HK Ԙà h†F'ΔL‚5ΝέτZΪKŒΕΎ=™t4Σ•^NΫETςh¦AGΣFωŠ-m=”kΉvΧ™XF+―°QΙ»jΐκΞπwΟΊΑΏ[= YwK`έ·~PΌΕφ›WυΛΓ“ύ7άϊύΗ6 ›DKf,-'Kλ¦+œ–Ϊ.Ψ‚’ŒS ΖςvΉΡκ »>φσU¬ xύγ–v4ξ¨M›μπd6*[ΐmμ¦(Žoΐb%+=λν―ώ?™£Κ«|ιK_]έ~ϋνZοοοŸ7oήόΪN8α„“O>ωΒ /Œ;+++++ #=λPλͺ«:逓/^ΜΓ}ϋφm©­――οo|γ~πƒνΫ·Ηΐ€uΜ–ΈJΛg{Ω·ζώΔXWμθέ³mΓΤā1Π‡<`ιPeαœiα%B©9IΗi0–9¬D15fέPθσ ΐŠkΐ:DϋςύOηθ?ώϊυλO<ρΔ+―Όrhhh°Ά²Aδ``````dΔ΅k€υ•ζŸΞΡ~όkΉfήL;α„°°ŽΐΪ»jqςξ»…V{ŸΊ7ΦϊGχn^EΆ{§ΣI\5>²{Η€θJτ£-»ΪΫ„8P θ#ΰ ΦfΐZ—qG$ΚNφ:₯’Η՞"0d=•ΐ/>”Žz¨%qFμ#ΨηδχξŒG:CςߍYΞ^ΧiΛΛ4vAVHfΏgέ ό Cb,ΉθJXΏzz› ΜΊo}X<”ϋ) μζUύΒ,a“–„ΏΨfΆ°ri…Ϊ.~}ρ§Ζ~pΐr”ΥFΪ»£™Ϊ«όδϋE2βΪ°Ž`…`````dΔ΅w€υυφ·ζθ1ΰ``c^₯Ίoyjo_wε›WML&΄iA<ΔΤΘΧCμihεμu”Ϊ9Gž{+S”Άχδœt­ŒNthΣΚΟΆr.˜V©SUΖι€!4ίξ³kš±J†+ΙoΏ!ΞςdΑΤvξ:av+‡dΔ΅```…``````½»ϊœ7G7λx¬=ΛoCέ*e¬Λ³ΜΥ ₯«Ύns+Iɝr„Ή ©ξB+ΉMv„©¨!μ.z JΨΪ™ϊšώ°†κОπˆ€`.œ˜μ j₯6Z—« @“‘z5TgΈΛ§: ³΄;'γTzΗ(²V]ϊ°U—>θkΊ2cX8!<½zg:˜EΈ°«Θ/…%c=ΦΧΦ΅?±΅­Γj£X5ž}a;$΅š±|Η©`HΞ;yξΤ.Ԋ–W,Ϋ@ΥBVΐ/"†V=΄€V©/Μ‚!,‘g”€`)η’Ο,z/‘€εƒDIΛ:U₯T•sαˌψ°X‡η 5›κD’œ^" &G!Μh₯‡lρz9ΪA¨άΜί`”ε„—€€€€€€€€ulΦ5ΣΌ9z ΈXΗiˆpΟΚ;cX¦ Λ“oZ)Ίbβƒ{W-N-Ε^Ή₯V*Ξrάpj|DΤB‘@ρ\Eώ8‰δ–Vλ€sΤF° ωυv,rιP’%AΓρΌ.c£(Š€ ΡΚΑDΪk#‡΅8K¬Vwϋ§OnΥεΌ»…τΖΟΊ[ώ&"ΨAx:#.•X?ψŸηΝΡcΐ ΐ ΐ ΐ ΐ ΐ ΐ ΐ ΐ ΐ ΐ:,€΅ό6‘“ IΐT1SξμΫ³mÞώ΅»F†woί΄wλš}W8> fκ°,ξνλͺΕBNNΌUδu¦tΒ,E XΖ,Ψh€Ξj',ΨιtόpjrbjbL[hi(°\ِ„w90d½S‘‘Aδ$΄T{T–e Sz‘ZXΔ²VΎRRλ5τš†ρ:Œ,@ͺαh=2―Ɯ‰Λ‡Θ“:αέ€eΑ"₯%cα~xΕ² p©ρ₯¦ƒ(b(ι1R€£$­h»sΐΝX¬ό»E«uωZz/mΤa8ZR„’Κ(‘Άθψ%Š•ς€NooH0ΜΞΗ/Λ'l„φ\ί/`5ξ ^–V`,9·ϊ¦UύΊόψ뀡±πΧΟπΤ ͺ³z―ƒ0βς_i€υ£ε„9z ΈXXXXXXXXXaX‡°c­XdHΊ\€•4v ˆ™Xx/θ*V3ΆΈώΡ=ŽZ‚MT‘!ZWbS IΓ3;'Q e»–²RΡΨdδψΙ3]M°…¬v™‘ŠΓ’,j‘Q€‹,u=+|ι…A‚•4@©5sτNrž>€Υ_KͺBo8Ό…„σβ΅€Ο¦α$²*Μ" T5ά9ςV)= ]Ι­ƒJζ;P… )ƒ·–b,0 Ζ" ήnφ‚ΊD__ΪKH ΐ‚%” ʈ‘ͺΑX,XDK“Κξ\£Λάν«KvΡ G΄„Ϋ¬μΰς8ΞXŸ-θΠΘyo4Μ¦¨Fΰ,UβTITa¬FpΊ2`Yƒ€¬nΈπwΙΊΑύ޲zƒ0βς_™€υ“υ„9z ΈXXXXXXXXXaX‡ °Hu§ŽV9­ZLΒϋžVOŠτξι_›’ΩŠQςٚΩΥ=Υa ‚ˆg€,-E$B+ΓΐH,·!72>œ^J0°;VΡ>16^K3ŒδŠ:¦4`ŽΚ<ˆ‹ŽδWο© ψh₯]+‘κ$IΜ'†hΦ!b(Ζ…θPjΰ”yl*Λͺ €ωσ₯¦εύΉŸνΝ™υΦ#u™2VHtŠ’[hΤΒ PQBm‡©1Œ…”°EθP8εœw q‡Ιy/εΚϊΠr€J‰AErjμ˜ZJ!ƒ’B 1KΛ†b'Οr"€U£ KΩ”‘ΓFˆ°D+λGυJ½Π2|Yͺ‰–8Րchx™Ϋ^ͺ382¨Ϋ‹šF@FΦQ X?ύίNœ£Η€€€€€€€€€5ΓnϊίOœ£Η€€u\Φξoή³bQ XU(pΛSϋ6­LtU+‘ξΫΈ"Ε ,q²h7τuƒVB ’c’Q‚€Γz‘ν\°y¨–ipBΊΰiͺΦ_°š(ΰe8«’„ςŒV»ΖΪcu¦C„:†Hr½ΰIΔ •e€ \ w!₯&΄\t₯s~Ή„³œ’:„3Δ&\ӚŒ~­Έ~ŽΓ…ͺςΤ‘ΥMΓ‰ΜΊg–Φρ‹Z€Τ’£"'ΫΡ&%|ιΡΜνwΔPΓ9U’΅ΏΠ#%η*ΉΚŽ6)!-ܚαΡCa\E‘H"ΑΔύΖΞ™ς—-^Ϋ¬tΘ1·ξθμϊ6θ2P$§δͺR΄‘ƒϋ”JP‡Κ3iΠΥ~‚,Σ$ͺϋΧ€€€€€€€€€€Υ΄Ÿύ―š£Η€€€€€€€€€€€uψkΧ=Χο~d‘‹”,¦&Iχ5χ£η>]΅0Ο:¬ΚfΩχ$΅}“¨ETpˆi•JΠ»’Œΰφ<‹°=^Νϋcž`Κ¬Β빁$WΉΌ`‰VΣ>2œH+?₯fθZ‘PEφ•α¦LœbR‘62‹ΖR»@„’t°Θž}o΅§«ΏfΘ 9‹½|ΘΣΘΘ?fυζT-;JZ₯ΌΣ°¨c(Χ -4// Šΐ,ψ`Ήd!™X Υ g»Ε±œŒ…°»\λhe5r³(bBiiή²7`k6`AWΘ¦{Š_9-‘u'f•κνΈ; w‹iƒNk°Q#«‘ fxςLΙc•—ζ,’tstη>Κκσΐ\T*΄ Β8φ.ί_ήW,`-ό?_5G7+++++++k†έφ4G7λΈ,‹jƒ–kO€΅bΡ4T•³kOΫ7HΣ z:LDSͺ•α uO΅³PrΝe)8MW5`υ«$―&Ζ*9­*ΨƝƒzˆΞ;V kANπΘ…Ϊ;Ϋμ₯χ­",Θ…τΤ³υ”F;x‘ŠμXy‚Μ΄Τ»i μI€ΥίžΰžŒŒOί—5τ˜JDΤ°΄μΚ ο½9dΙάCχΞΔ]Έ°AWΈ%―fsΥ숑e±J­f3u -똳ή`¬ο§¬rς ωΖτΣZ{έ"XEθ…oϊΦ³ΎqŸ‹Ψ’5βu:1-}|λΡ7ζ ²n`‚™fƒ”£ ν+$―t“ηŽ ¦!­ˆ‘“—―ο/uN½…ΉΐΈΦ°Β°°°°°°°ŽAΐϊ»ϋΥsτp°Žwΐ"Υ½Κ^G«=«Ί“όήΰͺ ­2]%ΐjυΘ§2Ω΄³ϊΐ1’‘G Ρ†ε NM%ΐJQΑΙ*±šω™΄Κν₯°DWνmΪ’Ϊg),PuA¬‰JΌΕΉ"€l’Έν+πΘβUV’X! D ΠψE”°U{‰YήN=š^b»αœΰ/Μ"€θόzŽο\x¨Ξ€EαBm!K2ΎUζέ#Σ)‹±ξNrŸ-‚5[ΛΘELΠ^V*΄PVι0–‘ Ωχ’„/„΅,±¦›kΤ1t}ΐ²€ 3ίA.Χ%Τ΄"·τ°ΚH_CΞͺ!άe„’kXτEαΪR6ΣύΡϋΡγ}s„ ζ…@ωT₯Œ,οcυςI`žŠάΏ‘JΧ–¬°¬¬¬¬¬¬¬c °ύ?'ΟΡcΐ ΐ ΐΫuί ,βƒΣ€΅qE•η^'Ή§‡9Γ]h5žεΜ`-€N@v¬‹'΄Ύ«¬J™ wW¬ΣΨj―β€’(Ήp '\˜·οnbH½Γ1·Υ• a―‘Z±ͺU —$ΊB™« C§Ί†ˆ?–N’=i‘σWΙ5+‰ ”μ ͺ(’Yο4g9lG=[Τ/Σ«w&Ύ,\8;ΙέΥ k@΅Αci©ˆΏ_±lNΪ»“܁­2tXΖ Α,Κ@›π…‘–ΐάlξ)1 'Voρp6o•Ήν1hvΤoΆ—€ejΘΩCW&-]ΎtΏh…ψ>N΅ΑA†§50‘«~ΗC§ΰ˜Ό|βϋLO‘3‘₯L‚XΎ f`…```````M€uΗ?xΝ=ά¬¬±©;―© jε»Y(O˜΅ό6*’Z¨V컇ΆŠoD6ςRο˜ `‡v¨KΛ]“©GfufŠ/T‘Α° Ί U˜U’ΦΞΑέ;΄D¦Š―%¦jΖBΉTΖιΑO„ Ɂ\ZoΚ ω}7L-ΎΆͺŸCΆϋΚ;]mXci%ρΦΖ»·o4d΅L ΏPŠ…R»FD²«“ϊ-«ψ U§—™μBrb©„S0V‘κ^ɐf}^q|rέ¦κϊ9œƒ³Ρ!$K-θ΄!›ώ"c½!%*Gˆ΄w-½…dτ ι’%-…ADδήO· ! K9ppŸ’Sμ5ΎΒR_ξ;δΏ#Φ`Ίš X’%Ρ•–„ A.=d»ΩK¬@¬Π‘Δ2σΖ*΅ΚψxAW₯0)Έ`}ϋώ^tΔLZ–Z³uJEK0“–δΌ#:Ϊ(‰Cτάv$OαͺRX‘Œςrε7€«,Kш{z£n…λα8Π£7B·REάΩ£μ[Ϋz³‰eˆΥ^ ΄zCQαΦ[L4B£Ϊώπώπ`½Œ—―7ΡΉν–&@μO‚Eƒ½|A#:Xόκ·N™£Η€€€€€€€€€€€uΈK>Ήπryη—W₯ε’«“pCM8E]η\H΄.MύΛ䔀I4ΦfαxE¨‰™DiΔώϋh6̞ڈΓx¨§rƒ΄1^ςjDkzχL]ΜaD” Β³r•§je"G0Ε+ŒChSA?‚˜‰-έ™o(¨ƒπχΎΙ’-j@{ "VΣ-π­™tΕ–BB₯wΥ'C;J›9PΘοyjŽ>X_Ηs.dF!άPŠΏ—€U2–PΓ ‰ΉΔ‘MO/ίΌ/Ν"όk + j tUκWD³+ –JZθΒ{z`γ€%·Ν©2DXjΈ3αN ’K3NyJ&ΑA‚°ά0—€,€υΘζ&*/Y1ρ4"Ό~7υήιΣUB6‘b‹Ώ΄LΝdlΤ^Œί|˜ͺ€uD]wž!?‡t-ΒΆFΤ:οοΛXχΞ?}ŽnVVVVVVVΦΛX»wο>ηœs{μ1.]ΊτοxΗόωσθώθα‡Ž!;먬DW·|U.΄JΎψZΉHkχC?Ϋ³όΆ1ΌAJrίςTB™φ6βnH΄S|PPΥΚςNTlΔχ mM1Αα­:‡=uTχ½[ΧTΐ4tό±ZD]Ψ”ΑΞϋΒ^Ud0«g₯ΧΕκJ…N΄wώ8h…ήΊΦXΪΘPΧ]Ηψz³Ί•z:€I #ΚΡ:ωζj―–j―1LO°t‘A5&έ:X,Š!zΕ*\VήrΆ;‘F bΝv―eΔ"ε8T9–»π™Ηr†s ͺα³c…’σ%aA²ΏΛΖj—όΰ±ΝΟg%w0ΊrD―δ‘ΕυTIΠΫ‘+C•qͺ|X*Z5ͺ(šκά² }βΊRΣ•oZ)*Ζv€°¨ ­ˆ9 δaΥ!ΒF”ƒx=蚫τ¦w՟XΚr6oήΌεΛ—λaΙ'ŸόΓώpλΦ­ίϋήχ„YΫΆm‹Q;++++++λΨ¬ϋOύύΰΗοννύΓl¬;ξΈγw~ηwάΰτΣO_ΌxqŒΪXG}ˆpκΞkδU†ϋ=Χ'΄Κε¬έޜ«―;ΡUζΛ΄LC'Υϋ(&ΘS"­΄N’j,£<έΕ€Ϊ]±τΘN~w­γF8\θ !9˜%Dp’;B‚_€ΐ—Β dykI¬ΠOΑdΪt°„&Jξ0V#œg6"ΈίJ‚κς…dΝ—qΐ2{½LZG«έ+e¬έΉ;Ϊ ƒκ2ug"΄ ΎΦ ’δŽΓFOlm3ʚ¨Κψ ιŠ₯½δc™Ηΰ2ˆϋ£[:³7 uύiaχ¬#δzγžμίIˆ/c©ΤΐϋK·Γ{ΚΧό8¬ |ε+_™šš2`mάΈρΥ―~υ]wέυάsΟi©υM›6Ε¨€€€€€€€uμΦ―{Γύ_Θ€%»υΦ[O8α„W½κUΪ¨υ²°Žΐͺb…·|5EΌ9…ο_ ‘––)DΈζώ”α>Π ΑTμ’ΙF€‚Φ¨•E΅DΥ“b…Ό]΄gdθωZh΄ δα`VΞy—οές”\+&‘›k"Ύ0;/ό",8΄Υϊ’UNύ`ŸΆkί΅Μ8¨s͈]†²JŒ₯Ύ 0βί{†1ϊ5²€΅Qmzκ$w­‘iX"]}0WZ€ΨbYβΠ μ¬ΡœkoΊ,B„–E‘” ¦ z²½»`DMK\S„!qp|cNxιVp |αl‡$Θ|·)ΒFXWKMˆ (b¬Ώ]ΆΡ* e2;h尝A§ŒρνWm‘L~°$»PŠ£:ήόρ’x©:1°Pjfι]δ±qΣ-c|ΊoI€₯±Φ!Z=eΙGl΅nΚ2εΩΈμ‘ΈLcη3Πpqω` GμΕZ>*ejΌwΖ°Ž`•΅y7ωήι£’·Ύ«ψΙδ_q/{’ϋKX===σηΟΞwΎ³f͚oϋΫ§žzκ† bΤΐ ΐ ΐ ΐ ΐ ΐ ΐ:vkΩgΞΡ_,`]zι₯^x‘·_pΑ_ψΒbΤΐ:κkθŠOΙ'\&΄JDuΟυ{–ί–…+νYyG1\yΗή§ξϊ€¨1»L’ƒ‹₯φn^ΕK$¨ͺ₯JυP{Y¦AL£3$„‡Xƒs~΅E/Ρ»½Κ"Χζπ!B2…-JLƒvΤ:+NTG}”ΒAΪNΤ™†ρΞη™°@1 μXP‡’X%l‘Ήμ,xFGη>«'σwΧϋaGc–ΦΛIϋŒρ#΅^ DΑ\WΨrL°α΄DΕŽΉnyŸ.κ,=Ϊΰ*=4ܘ~Ώ+½Δ&ƒWYcJ+“֝±ξ#—ͺ‘œr ₯C‰V±%tΤτιX!€…”«΅^IrhΣ0₯oδ¨EGΕs@dΖ Ί‚°‰νβϋύŽΓXF2ͺX2VΦat½}D‡ ϊGίDG Ι…Ÿν/Xι¬}θCίϊΦ·Όύk_ϋZΙ[aXXXXXXXG=`=τΖ³ζθ/°Ύτ₯/}τ£υφ|δ#Ϊ£vΦQXγ7|QNp0Uzξλήχτ2qUέ΄ ο©~NNu―r€4sρ“£K‚‰Umœ‘-Σ€Uˆ…κ˜]Ι΅ήםH‡[iN μYξ‘*€SgΦvi—Ν«¨ξ¬“ιΤE¦yVN˜’Θ a>†™ξ¬° v!؝ΓΧFΧΖ`£ŽκΞ«£]„D"$WΆ&P$G•–zJ' ₯ j²3₯kΧ©±Ε2 ¬kί2JˆΠαKt% ‰ς°β*=¨C›žLμžU"ΪωοF.‚SŽU•b€¨e 3:ΌΚόnsUιzΚJ 7<‘ήϊ<ΆΩIε^ip+”E£Λ΄τFrϊl]†VYτΖΐ€uͺ8—DΥ –h…˜*€εI³C„p•£„Φƒ‡½έ:Ÿ U#hk/ƒƒ₯ h)+ΊίZΞ–εao‡Γz·ΎdqάΚΘ4t΅FΚί6ζf'Άϋ‡Sω;κ_βˆΏωsτ X½½½'t~τ£-[Άh©υυλΧΗ¨€€€€€€€€υ›VzΡ‡>χάsηϟw½+”ά°ŽΐΒS†ϋ# U)r·mCJl_¨HK+ڎξθ³½€’ζq½­kͺ Νd—ηšΝΥ-Κΰlί΄_ΐJGΘDU1V½’2ά©Ω,tΫ1P‘τf!Σ,‘ΧTιPj―—€«†rβ9Œ0E<€1€%d–EΖΣIΎN†·Vnέ©ν(/PZGWpύ™±ικΈλ^t©œα±tn΄Ϊ³ώφ΄Δ(F‡υκδ&;[Ή'ΣZ¦0˜EvΌa‹nΊ,cέ=Ί#Δ ΛπQB»c^Vv³\φΨΞ-"Β”Ÿ>™4f…Ye‰hgš;*Η₯xΜX%`ΡΈ &–R ₯[J΄΄2ͺ1‹‡ελZ”AK΄UK) .­¬“MY›R\„‚’"τ@ΦZ ₯ψ€Ίd ήͺœ} ο2Ÿ%?DσΦhή{Π`bΦάsΫ=cΖ€UΞ2ρ»ά δςk{( Gt°xτχώΕ=ά¬¬¬¬¬¬¬¬¬°¬¬¬¬¬¬¬#i½υΝsτp°°²’;υ﹞iƒb,&θ‰φvߝR―΄d"αͺΕi£ΈŠ){(­3―P+(Qe΄š―ν›πͺ‘gΦeΣS­ž}›VκΣΓχάQ$>Q]…Ί7°Ώ° ιSΡΨœVκZ–ŸΛhQΜ:ήKο£ΰI_RΔυΐ¨ΦKz–[Fί{ό|*cΈό|šύžΎ€΅βΏ7G7+++++++++μΈ¬ /Όπ’K.aύ²Λ.›W؍7ήψ›ΦδΒΛε_^…S|poχέΙW-N€υΠΟHrO*Yέw§$τ‘α*lWKR₯pe s}@rή+ΐΚ> XΫ6ΈaEfcν©ΞtΥΒκb―Μaΰ”^BΨ‘ΡBK4·&κZ‡Ϊ"ž#2ˆTΐ—‘ρIλKi˜q~:¬£ž«ΊF'Hχ¦_C7¨•«¦ή­–Ό§ZnΤw—.τϊhΤδ˜™cR?ϋΔΦΆϊY‚’œžΞΣθFF<±EΌ΄δ)-υ¬^˚[†"…E/ fΝ@WΖ†H³HuΧι9xAάͺ,g’²Ϊ»C‡2|₯(”…Α«±._˜eŽΑ‰Κ5τ₯Jόr<"xρ’Υ³ƒ€³‘jΏϊμeJ{ω~u½„Sl±Z½ΉŠ‹r!Bœΰ)t•₯ΊΣ»―%χΦ8UΦ΄V;ή5SιΚtuθ"IƒωΓ|#δΖG½dnΒR₯ΈΪϊ¬ιzC]@\ε ƒΪχ”Z„Z&ΞΞί2 -8,Ψ[—:ε› fyΙχϊe¬•τϋsτ “¬£Μ-Z$2`]pΑίώχ‡k›šš ΐ ΐ ΐ ΐ ΐ ΐ ΐ ΐzΆsηΞ7Ύρκ_ύ+–>τΠCsNώόr…EW‹’v?²Ί’£ΡPΕW-ή·ώΡ”δ>2άΞ™γh $=!QNfG1!­CWY£!-7?Ÿ“άΣΖj&6œeΝwB~+)ζΨήF;δ!pQ―€Ξht’2PC Ψ9ΈFTŠjΥD…z{W†!<]†rθ°1ͺ‘“λ$Ε7ΠΰEς;ιν 5ΥujEKS»§μζ_tδηλjt$·RdPE’;΄€- ΊΌ(\8Uߍρ\ZΣ³dΑ›«(tOνΜw‹;pb]5]Š*§Ž›ˆd1r“kΩq²Ή5ŠXΖ]Λ22HXΠ "‹Φ θς΅‹£o ν8ΖβΔΥ £R΅‘Μdͺœπ~oh²—tεσq@ΠΌUŒei{œτ²ψ ΅ˆ 'z<ύpn;χŸ·[KΖέيގߑ'rπΩϋ‰‰&χΞ w΅¦UŸ7;μ±Βc°τ.CWΌγ|ό.S‹PK“4}οcΟLŠβ[LdwΗiςrβΒKXOΎλμ9zΠIΦΡdβͺΛ/Ώό’lz8>>>oήΌώώώ¬¬¬¬¬¬Γh«ώυΜΡƒN°Ž{ψα‡Ο>ϋμ]»v]|ρΕΦͺU«N8α„K/½τo|γώαήzλ­Ώy’ϋ’λδ)Ιύώx ’Ϋ^Qp Γ}o_7ωγ σςΙ.OʟC[“6©|ΓrrΫΡ#MŠ ΦΰfθͺJk§tψœ/Œ Ft,e©η§ˆEςZθ…¦˜`­;jΣΎ”TOdΧς£“0£ƒ¨ƒ#‡]‡κΞ=k{Ί!ϊΌ"‚jFl…ϊJ­33ΏLUΦxi­rΙΧεސ-jζ!ϊ(ŽκτA‹/p]:«“Υ•:tωBOδv7m/Γ…Zθ2…]YƒΤΉ·¬8>H@°,Ÿ§-–ΑΤΊΐψ€6₯ώ `EΡ&ΥΨσ«§·λςo[ύ ΰRζ’;ηcΙX₯¨•H­ЦC©eκφ ι‡Ωh΅_ΖςΉ•ΕuzΊ^ηψ[…ΥS#W*ͺB„Ž’ιΜ=ΦIy.§&πasΐHίS~„,i~€ͺ a;HcBΜΦι­χ:φˁςΖvOρΗεD΅ξβžόP‘>i© K`b ϊ όΡ;»vΫ³[,΄ΑO– „r<·€+Ίn-ύ˜ ΐ ΐ:Ό&]-[ΆŒ±¬… R{Ό§§η†nxυ«_½dΙ’¬¬¬¬¬¬9ZχŸΎmŽt€utΨ7ΏωΝO}κS¬ϋ,’„nsΩe—}ΰ8ψq†‡ΗΥΣΝφ©nΑχ<΅t_kνžΥμzxαžUK’―\Ό§λž½=λ)-χ{LΛ}ύ«wMŽMeλhΉgdhώΦ{·mά·ρ ωήgQιαήgΦi%ι‹ΆSœhοpΏ6ξioKΩξΫ7¦§vξžέ•γ`㝩‘$ΗΠα°zSΟ&V›Ό₯½«ΣΡ’•]ω4΄γΨd΅γ3;'G';#“r­ŒKޞuϊΫ†Ζ6οWχ§₯vα!+RΧ6Ύ}tR[΄ΤA„Gj™³ΧΗ΅ώΔΦLn~hSκ@ί–k£όΙώΪψΘζd—ήΑ15ή’‡™ Cκ%'δ:Ϋtζt£ω΄uΥr^Nη¬6­:uiΰLgsSϋρ|‹t ƒΉ₯Ϊ«ρφbγk_\;n.σͺρ€ZΛ'ΖIζ€Ε¨Φ΅Τ%h‹h@ ΡεθJL!­tzJ ΄νχ‹Ζzεχ­O•Ε=Ϋρ%λε<₯•‡6%‚6‰±ώγ―+ywKKm©Ϊ‘‡‹Φ$2S›žΨςΣ'·jiρŠδΧ-ο“k…j#ΧΓΰΩ$Χ³ή~Σͺ~ωΝ]-œ—.½άιΔJg£ΞJη© 0UΣ.Y7θΞά7AνjΌ2g:―jνΤύ”λξρ™ΡSr=ΤmΧ{‘ Ώ7ΥίYή―ύ~νΌΛ~Ηq„‘Ώq¨zͺ•’Μ|/Όεΰ/ρb}t4‰ςπφέΧkίL?οRuκo„ο€ΎG|wτP7Moo„ΦυαηKσP-ιάθ;₯γλ‹©ΛΚ}Nιάy-ι¦ψ — tlΜ}λι%|uXaX‡Αής–·Όζ5―™Ÿν€l§œrJ£Ν7ήxΞ9ηό8Ο=χ\|bΒΒΒΒΒnΏώί>G{€utX«ΥΪ’­――ογΩ΄ώνoϋ}ο{ŸΫ\zι₯Ϊώόƒ΅σ¦owξΌ6ωέΧO=pKϊοjυ»ŸΈ3ύw•=­σoVώ+ύ‘΅~yΚUΫ)Χοͺφxϊ?iΟπΦτΥΐzœ­φ>Σ³―΅|ο3λφ ¬Σ9μάΈoλSϋZk«Άψ+KΛ‘!ύ8Σo5~₯ΙG';“τ'ž^nt8­Œοi?“ώχΫ©6ιόοΧdώχKηÏΒΝωΧ|ώ G?ωγG_½©–Θ?1υPυΣ“f쫟ϋΪwυΐ¨h]Ϋω)¬]Τ’ΏψΑʊ\Ώ\ΩBώζ‘ΑΪmc}ωηf{’Ξηœώ‘κδ?ήtΪZn­ώ…βΟ'ώZπόςζ2u-Ίη©tτXυοΧ`ρ ˜Ÿΰ\‹–ά\Ή Όˆƒλ2ωŸƒ?¨œιΟEρ‹7ͺς žΏρΚ;ρ,IUwΆ?°qΘ`ύβ©ι‰6P}E{ω’5όέεŸcρSΉAγΟ-φυΏVφƟU‡liόwε:CώyΊ?…Yς'Vι\‹\Χυp]™?ε?XόM¨›¬OoτΊ™5υζΏ¬ΌύΎς6~ΗωsL;GfΕ-·Τpκcs όƒU^lω'\mκc ωρ½γk¨›°%ίy½;rώbLͺ+›wπ~­NΓQ½}|ςύΏ/υj{ϋLώK_RΠίkώγVσέΞ`έπίίnΎ°8_φ—ζ¬ΥοyΗ=θ$λθ3‡»»»O:ι€ώπ‡’Ÿώτ§'Ÿ|ςͺU«Ύο~³†―ϊτΔ‚Λδ“?FgΡΥS‹―έ³bQJΓzπfœ”,2±˜T˜δέz‘°B‚œ™ƒ)έjΛSxJΊθM 7ονλή·₯;VkνΎ ΛSεΑΎn΅IϊXYΙ}χށc“­œ>…·s2SΡ…gFa*q˜%ΰ™ΙΘ\Βρ¬‰…:C…₯Ÿή6Ϊ7<ŽŒ–œΙ€ΪώXίͺ΄ρ C4c_υΪ}φξVΚί"B»₯D9B­―¨§†1ορΩAb ‚έˆ‘ƒ₯ޜξdzfM2+p$+Ξ“VUN)hΘΡC.s4U%ˆ…˜Ι[N"±ξ—–₯Ό;*bΤ=δ’P ·Ί7‰AΊœ‹ͺŜͺj£‡bΉξ^£[¬•Ε,B-­νξ$'§^y^IΓRΛFι@{97Π ΚΉ„.;ΈίΩ‚εΖΩνεόΑ²Vt† uϋ²\c™’e™{1–.Αό‘( 2+“™§mοn”Σ υ E\­«υΪW½Ϋ§3·όŽσξ.¦Ηrp―”σIHJeŽώ¬ςb|f58Ο¬tή›Εδψ’}O5R'ΙιΓ―·χ‹ΈΜ΄Υ§€:"yΕ½c₯Žwt’άJ§W’eEκ'_^Φωb’qΕ·Ϋ5U]μα₯ΙΑ ΐ ;ΛIξ²»οΎϋο|§ΠκœsΞyΑ χηœδήώή%ςΡλ.Έε«Sw^“ςΩ³ΈhJxΏοκηh=%Βkύ‘…Οφ>"H’Z3‰ν©NΪ”Μž 9#"*fJŒΥΧ­φ §†Ά<LΓφM©σΆ Ό6―₯jΝ“"'’Ω…5IT5ž3W‰¨Ζ³`^ΡΰEr½6Žd½υJtO8‘ήSέTŽƒVΤύ©—€μŒΆ΄n‘ S`…šμT2Δ«\j§πΡ>€· XωΏ’$01UΤ¨-Cυ™γξˆιθyQŸήDwhΧdFk™ΞΝ•φΧuΨ‹zΥtΩά(5ξ-W]ZμVsp]²Ί‹'₯#Όι™κ)³–Φ-βΰ΄qל±—©ξjLζ]9u½©FαΆXS΄‘nκshΤΓq~#Ι½, KΒU]n7`™/Ϊ4μŠΞV₯¦Έ?i.νμZΞΌ#|πΒΖ)+‘ϊ˜&6ΤJμ(Σή Η`ωςν₯X‰η ΐΞ·£Ώ])Όπ½Π}ΰΑΟ*½01ίh«³ϊ+γ>^‚§΄ώΜΞτΦφΡIkΎ¬― uλY΄cψΚσέδ\₯~¨p~vΎdIξkήΞ9zΠIΦρeXXXXXXXaXXXXXXX/5`­ύΰΝΡcΐ ΐ ΐJ9X;―ύ¬<ιΉ/ΌΌ³θκͺώ`†ͺXχ\/WU‰YύlΟΚ;φmZ‰ΜU*,(¨2] τV ³ΐU‚!α”žκ_»gx«Ξ!%Βχu'9x-7,OͺZ5cY·‡•ž»Aj2ΡV|νHf"L+Ϊβ<δ:Ψι[Ι²BΫ])ŠY £° κYκ1IT’“νͺA‡D%jΖU–•*³—pλ`miO€ŒΎŽZκ’¬X‡²†ϊXΊ%Χ™««E M,J%–Υϋλ½ΏΘπ Χ¦γΆvΌ—(oΉX!΁’ˆx‘H$x* Γͺ帚9-IΜ„Φβžν’ €IKρ“«ψA$¦«FV O%]•EΛ ƒe•C”΄΄œ-p…W&Z5$ζνΙΑZXdΔ—ξ¬ςΩϋ7€G6ο­Hb“#τ―λ»]’­n©?cέ/”€ε!₯ώΖΡJ`έ$η$<4·Tφξ(¬RH ”1`ω·„ε¦Γγ‡ΦΠhՁ@WΊωzC&’±θΠ²ςέv[wξRΐ8Ž %€5˜ x"ϋSž”ΩG'œLΙw“,Rγ”ύ₯―E€€€€€€€€u˜νι ϝ£Η€€€5ΆγκΟT³qEη—WM-Ύ–8 §’πυ}O/KΑΑυ2Κƒ"'¦υΑXUpPŒ%ZΚθ0•c|‰&S°;γ‘ˆjߚϋu¨t@…}έ‰Γ2ΑRL¬Ž#€UΔ0?«N§€«XΥ¬œ’Qόί;• ™@ΔΤ*"nΪ¨‡Κƒκ˜4€‡t…D AΗη΅>Ε8S˜ω θY½;k’Γfςκ!ωžˆσSΝΗ$"ί“ιJΗAht°’{ 6’¨v]Zt°R›•’£/x½Gt°XχΡ?ž£Η€€€€€€€€€€€uΈ« ζψ kκΞkͺr„5`₯J…‹―Υ::XV ωε$χ”αN’{Ξg―’ύk΅…x_JKCLŒνžΥ9$ΠΗͺΠjσ*Ϊ“τh)ΡΤאπ^Ε 3opΙ‡[3Κζxώηwy―‘\δ:Ώ•ξ’X‰F;§Ας?J6½Y œ*Σ‡–ŒVΦΧιΓ (jjΣ›{Cυ³$οWΧ2‘Υ+:μTμ₯Kv\Œu]Ž.|„`4fk½ŠfΖΞΚ  tΜX–ωαψ¬X6ŒΚZΦ°+’ –£u‰dm;Α_xA|PδDŒμΑΓZGψŠμo‹`Ιi<;J(ξ1ε”zWf,ŠΒXeΑ²Φ‘ъ—†~8>Έ_ΐβuA4=KcB„f,k€™±x10’ύ©EψdΞ2„ŠœRc&AyζγΓvπ±&@ ΆN›aΛσ0οvΒ{™~4VY“/xo‘tUŠ`•˜E*Ί§€π­αΫ‘z Ό{wW…Ψ δλΐ{Τ›uζϊΫΣΎ%ΧΪz>—Κ™ύSžcΆ °ΥΉzy«χczŽnVVVVVVVVVXΦα,Τδ`!B–¨K[%LΎieYΘ)νκD΄ΘΰKXλ?qή=ά¬¬¬¬¬¬¬¬Άα/ί=G7+klμǟcΙΗoψβΔMΣYt΅ε`,‘•Ά'όΚYπΪ²·ϋn PFϊ”Ÿžƒƒ0VŠf&zItRΩΧ]SS¦%Jΐδ(VεΆηέ|dP£%U “ ι„q„ s]B^‘x’:/ώ{ηhb,uvH)z·cyj]#œ\ωP£UY²9ΨeR­η~χδ ήD\ς ΐMθ^,λ5€Y¨QŒδ`1 g"Λ«ϊκΨ>”YΝ’¨<μ­Κž-§[Ψ Vs,©±tO₯ €u,φΛθNšφŠ\(šΛGž±B©“ܝπ^Φ%$)ήUΓnQG ΅β˜ !?Rμy$ŸΖr}Ym°¬Fš|£x’1«Œ ’ξΪ”φΥ£"rΑΤ‡°Ίg­;]]£ψAπΒoMOσBύ’Xm#Γέ)ν…2Ό![ŠE€ΕϏ2Ι½¬žb{ΟφižeΎΛP%δ7Ÿs¨—Θ Eί:έRzGσΝO(ˆ’δ><Φœ)J2;ZΚςφ‹αͺ¬°¬¬¬¬¬¬¬£°6^tώ=ά¬¬±‰—°R@πΞkΠhHιν‹―έuΟυdΎΛEWς€2Ίa9iμ)3=Ηυΰ‘ rΚ6B+<ΰZ7{΅A„iͺƒ†U2;h•³ιI¨―D"DuΒ,­‹±t&“ΥΜg‹ΐg:f{ά±pJ#"!PUVΐΤeΓ^ΐ-Λ³˜ΐΥ)#[@LZύ¦αq‚DΤ|X=› 3VΎ- Ίi¦„±&xεͺ;Zš©[HWNšH1!άισΦh@šΑ…w}’7ΝM+c Ž±ΒU±ΚQΝ*&0«¬·lhCΡ€Όx#‘αΙswμ―ΜF/#†ϊ6瑍ΞΡJάsJ•ΡFœqΏωςœ‘uJΉ Q”1KΧ `i© GkΤΥ28h*EA.ΠPƒU \ΈΙSΚ‰e€ΫνK΄ς›·ϋ•X+ΕGΤ?l¬šK ”λj„ΏuνΕ― 1Ύ/ΤΞβsΛ]%I ό5Ε·ήiς„έ­Ά`Ν‘±!ΒΑ™t…θŒ|.Χ€€€€€€€€u4Φ¦ΟΌwŽnVVJrϋρηε)(–TέΏ UzΞrδz )RέS!VO L<–H˜ΚβŸ#™ .ۜ˜)‡Η;Sz¨AVM%'«π”ΣέIIξ~• ­2T₯όϊV)φ]ι©%„9΄;ό‘²Θ3€ξ‚ι5tiθeΌ$+™`VΞIOEv ήν3«„ wΩΩ½<3πΜθ ΥYλΪλkλυ,"θDθ :·ΩΒ©Z|‘d#:e’‡•Τj§cΊmO': aέBb¦zΈEGΩEΛώ°ΆπζA—ΫΒ Ε¨c‘˞Z)ΐE`΄\»m Βp! 5άJθ58!2XΑl<-MW0[–E1W•ρA§Υ%t„‘ψ œ}]YνH$XV™Yέ %”)νφEτ>Κ,ͺI’»½δ6‹2”΄†`©cm‡…±^™Ÿ-—οlt_£!¨™>νj)ƒ‰ός·C{q»zκi+|ώ{j΄rP’O>_₯Αϊ{η7#`ΉϊM#8Ζ ΐ ΐ ΐ ΐ ΐ ΐ ΐ ΐ:š«ο’χΟΡcΐ ΐ ΐ ΐ ΐ ΐ ΐ ΐ ΐša[ώέζθ1ΰ`ο€%rJhεZ„‹&οjjΙuZЍ*ΊϊΕ)=kΙu»Y(ΎΡp.> %_"Τ>›΄ΊŠ™ƒ‡XžΧI²?ΊZΣ9X,ΦIΓ*S )_‘4θΚ)G―|ΐBŒœ΅ούε-Λ’χ€εZΡήbξ0oPOρ}χο W Υ^‘0X€X±Τ #wωXaXXXXXXXG`mύό‡ζθ1ΰ`ο€Υώή%β§ρΎ('XΡΥ}7Lέy͎«?#O‘Γ<»0Υ"μλή=΄5Ι‘TzT(°g*²δ\e‘ρŠς,B-'jσC#υD€L΄„όŒα`_Ϊ¨§ϊΧ6«Vxg"EύδĐw’DΝΝ‡ΙZYη]©;b YφfbL‚₯ΰ―°—)M!–½,†^ώƒ2Ρ²š!ύ•§ΛC˜ŠΰΰP-Ήύ€e³ΞŒ–/Ρ©OίΙw†£ς`l ¨ΚΠ_M³ΐΤΎΥξξeΙ^ŽXyGlCβ\ p¬Yx`€eq,Oύ39Ι Ο•‘:W<τlD„Χi£έK–2fy"!+ =wO!,c—¦7ΟΤFKΨςΣe:DHμOΧH&Zz ‘εΕˈ^)O΅_Ά(E°¬νd‘2׎<`υq΄Ζ»ι €―dΐ’r(§]‚fw-ΒޘY^£λ18PξΉ~|ϋΜaώψϋŽk;Οj;»σ†―^ϊut$/?+,++++++λh¬ώ/~xŽnΦρXβ§α«>-Μ’Wξ°:‹oM,Έ όJςξχέ°οιe{Z="€* έ₯IΝΞjL•π:υυhuΫK%wƒTEΙς–“Ϋ>ά2ΐUρΑ,…U©·gΥ«τA,žΝ9μD£$LV•©p){ϋCσ‰.²Μa/k™•zΠ iξ^ρ²R4‹Α’ C¬ϊύ_=½- sNL_l>αN­ΖnwλAΣγsΆΌt¨––•λ"HaΡ¬NgFL–r‡\5c0?ά ]"[­™ŒΕΥι!QΘV-3ζ IοΜ1›Ž j`mΞ1Ωr$2λΜwΒj–’Bσu.lP!€N?tΐΞ¦‡: ‚[T!΄–‘­Tu/Ε΄ΚDxB“:1σ+,=$Υέ9ζΊLπZŸ@Š+…xJ),WΚs|πΰtεB„ώτϊγg‰φ’΄Œώ=…°™½‘Ϋ^κŸ₯μο#Xϋ™ρBhrό!τ}ΰΪKŽ$Θ/ ΎΒ–‚󏓑:|±•/Α}FCΞ?uόέ¬Ψψηε‘ζΛ#:X΄.ϋθ=ά¬¬¬¬¬¬¬¬—°vοή}Ξ9η<φΨc<|ςΙ'Ο;οΌωσηΏυ­o½ε–[bΘΐ:šλςΏψκ_°i˜Ίσ1–@Jh•²ΪqΕΔM#ΐ½ξ‰[Ύ*κΪϋΤ½©a˜u42"Lk+Έn`ˆτIξ»:ˆ•»e§Ζ ΫL‚³z*λΆ§ !Υϋqς.dΌΦ‰ ³)J=ϊJΐ….άPvΦfθέ±²₯μΎŠΚQΠΥyθ4ω†(<Τ…C40‘οΰb…e ²±ι‡Ωte%zτ>tζe&;οώκΡς2Kΐκσ²Ή9.Sψ‚€UŠ(΄>{YλNϊž=MΑ“S*ς#Xό60Q•žΑFΧψΣgΟΑΎώβ#κŸ³λ+˜\{κ*’|€ύύ¬εΧ-Sbbσ·ž(!σBόΥΕX©ιj|²σDHθ`ρΜίόωύ_bΧ]ϋΨΗζΝ›·|ως<< žvΪi—_~y__ίν·ίώšΧΌfι₯1j````````*`υφφώa6Φ‚ ήώφ·»Αη>χΉ‹.Ί(Fν¬£°ϊΏx‘ΛήYtυδΟΏ!–’O.Ό\t΅νŸΨqυg¦_+δΪ»jρΎM+…8iΐN‘’”3-4ŠΌ‚ Κ)η9©ΑžφΆiΐ"8˜I«”{¨bΉ=AƊՈς*57¨;K·oJΊ£b,PŒΖ9υΎͺ“θ,ς:σ½,zΜώ"Ρ•ώš΄nχ°ξXΛΠ@Y•¬\׎DdΣ΄LƒZ œδqΣ’K`Z@;sΟ~ΛΞxSΡm{ρΐB 4˜1· Φq˜*"†ŽΕθψΦKd )ΉΘ·XΛ6b-‹ά9ΟΪ7οH€EFpοΜ:q0N~7βœδe“φNΈš“$Ε Ι©pC(’qΚωο^)Eπ§Jˆgλ‹‚tzQ½§hΰ XEΘEΩ Rέc!•iѐCkύ3 ·±—₯3Λ  YdφC3 ŸŠ#X#5< ΞψœΫ­Οιώ’–q=³W«Žh—tUzoύCΒAΏΊ'”8ζhŒsH±Φ·’3,ικΨ¬m_ϋ‹9ϊΑ/œϊΚWΎ255eΐκοοοκκ*λ#ωHŒΪXXXXXXXX‡ X6ViΓΓΓ§žzκόγ΅°ŽVΐj]φΡ‘+>%½ξRβ€V fΐJΛ—ιaͺ™sί {ϋΊΕ1Ϊ5Κ&%…B¦!Q©λ9BWN–Mž©kΟπΦ”Υ8ΦΦSc±Kζ*τR<1“ϊ±ξ1L$5>§±κJθ6Π»wλyΒ,rή }‡2ΧΎS«w"ά€πΑl”iΥA@ΠͺU«q€ΆJN—Ρ(EG)i‘΄Ήqh\λD…h©Α―g pJNfwΘaNtυι95qξWΈuZ†Μͺ1(D•ΤWTήΏ(Ωτγυ΄sξ›ΕH‘©²K“Ϊ’‡™ώvU₯7ŠΑ8iΰpIYHΗς –£NC`Ξ©ρμJs\Οκ%0YΞΤhU*—ΡμJDSi£•£Ÿe©b ~Φ€Ε΅—#`¬CkG½E{C·‘PΠSDΐΛ½σ6ψl!ΐhJrςw­Μ:/ρ«Τ3l9Ύ_Ζ7h΅>O pΨΉ]Ρψ}ΒΡΠw(V"μ/JβTΕpΖ§ιjb&]Υ€΅ύŸ˜£Ζ€555υξwΏϋώΰvνΪ£vVVVVVVΦ±Xƒ—εύ7¬‰‰‰χΎχ½gœqF___ ΩXG+`‰«v^ϋΩ\pB£)Ι=Wtb… .Σ2 7άwΓξGξΫ΄’*ΛΘ4¨C™†˜’#«ΊΚRοtRξy‘ͺυ=νg,G§Λ9£ šΞJ‘C‚}ω©κP1ΤΊ4ΦΙPBGηfνέςT‚­VOrνΫκ!jιssFΉ5<q k7τ·§‹Ι€>TΡqΤ€™‡±ώ:w}='μ D(Μ2RΈ: %fΫυɘω UN€Υ:Τ\;@ΉΣ2­EάvZΌ4{§3CΈΑΙΕ™N•χΑiΏDΥ€TzΒ(eΕά†p₯·s+ϊσ0ΣΪ9ΑψWsιM’$ΘΆ …ϊœ€ΐCT,Y1cX†0ΓΘU²ΐδ Σh:ΰ%“5"ƒδΆ7ͺβ Ώΐ —ug'Ήϋ©ςΒωΐ”΅™ε‡XΖ‹žν3Šή45\Βω@oŸχ-ΏGP¦at’ :—)δΒδ|5Κ™Z§ΦMOVι™@Λ©pνΓα‹Ζ§½,>νϊR¨%? ε¬κbE†vœͺΏ’Xs¬ρρρσΞ;Ot΅~ύϊ―°°°°°°°Ž5ΐΊςSsτ XΟ>ϋμϋήχΎ3Ο'WC³«φ0Y>)Yi˜ΠZ\5\ΞΟ,β$-2?ϊkΝ'χψ.gΥ'Έ ΖΝΠ K­έ6 Xlρ¬6K½'xψqb–ΣSt’ ”N•~ί2ξΣQ«g_–“ αTφb>”ηΆkbδζ’a;'―”‚CAΪΉ/ψφΡXƒ£“άΖ φ δςzOMZvӘάYN‡ςΔC },kΏ^&i•πloΜ@,SΈh£ƒ3Π³ Id\X«F-χQ•hUΞ:4]1—νEΦl΄β½(°BηΎσΦm·vΏ'χ9,ςωψy*.ί/^š¬²ςĘΔJ&Ÿ‰Ό”S{νHͺ(Ӎ­~ΗΧJŸv}mgΰPPNsK:ςϋXώV©*XδŒRͺ9«}υΕsτp°°°°°°°°fΨΞkώjŽnΦq X;―ύμψ _DυJξΘΰԝΧL-ΉNh•δ―ξ_b…+οΨ·aΉ0E“Χ\:°, beΖ’ί©€Υ3~₯>¨ΣI!ΒΡaFwj VΣύϊΧΙ"8h¨Ϊ»ε©ς#Ψc!Ÿ'o $i.Ÿc‹T*D …w˜£Œ£eU-KLΉtdΛ»;ΪeQwΔΦΎ‚“±υ5RΌ`ˆfu·fΘ>yNYoM:TΆ0`ΡΏsyH}@-u&Ό:€5’ίSΠ°~³˜’™‚ΉυΤΒRΛ7³»b$C]©¬Γθ‘·‘ΦΤM’Ccι­ΧZ%ΤΠ/Ušš΄˜€ΩPΨ"6„ξΉi¦,hθ•r‚aι₯Θ† ;a>α턬2Ϊ’YΐαώξYͻ߈ BZl΄Ά»ΐ‘‡8Φϊž—r°74[·}zߜ!ΰ@„Αǘπ4tεω€,V»λ­'’fPx¦ 6tπΥΨlδ20εG­ξV*ZY―ΏώE3ΪαϋHxqjf•O~Μ4θ*+,+++++++kΪF~ποζθ1ΰ`ο€%κ,Ίš‘pJ€΅·ϋξ$ˆυ‹+Z °²ο~dαή§ξ%“‘*$¬P]― "4evΙͺξ)˜˜ ¦΄χ‘‘ηs’ϋP†WY`Πp «}λΕχn^•nZ™Κ ͺ±Θ%œ¨εg΄(”΄Ž<ΗL ο[ž,|ΆΣ%ΝXΉ—€ ’ώ –~–ŽΥ“‘/ τμΘ—SeΟIΩ½ƒ3B„ Μb@eGBl.zh‘žΑYbΦ σiθ₯ ―0P΅ΠzcΥ΄]#ή™.28;κjΐ²’·Uζ‡κ ΎSrUIWœήp¬φxΗ‰σ–Ζ.χ5ΈΆ£Έ‰ΟΊ‚‘λ:l䉃YΞ1Χ: UΞ‡@"’Š,μγaC@« :ΌθC5Š ϊΦω ^=0j₯+gdƒb–ϋjΦ‘Χ"4ωƚntΥΰ­rІcpώΨ ΐςΜOέO8‘ˆB­ V1·?!ZΧΎ&Βξϊ6–εϋσ·ς―+Ώsxσ― ”LW₯Ϊ{™Ox‘.b:O=Φ‰±c&Ι=+,++++++λ0ΫθuŸ›£Η€€u\Φ؏?Ÿ’rΫο_Β‚Y·=)2¬X΄oγ ρVŠnZiΊ"ځ'rΟ‘ΰ&±KU­υ‘\k/ν29`G¬…™tœΔU–“_―υ.́ByI ιd€Ό,ϊΰά”Joνxb Ϋ68¬9 [9^Vqa{[‡dΥ:+@‘#vΞ/A gv%dψgΰ$;?;ώ|EH£{&Z‘¬MPΜ<Ρ¨ƒζςˆ& Υ‘ ΆΈ=*#%'9]5’κuπu)Cηυ“Ϋ^"΄ς5#όΗ«sΚ’uγŸ’ρ'`ii,cœλŸεΞ‹/3θ-ϊ@Ύ³nT©OΈΠqCηwΧ*.δdM‡†\Fδj0V™O ‘’ΙŒΘ )κNTο.ήYΧ"l¨΄[― }λ;xίCΡh(‰‡P]8―՞&WίΫΩ‘C‹qT˜  $ τP†κ/ΡP‘ΞΧ‡8`)zb1”²(θ`1ήS‹ƒ”υC[υ'Š―O©@‘gωUΦ©K;ΠΜQH1‹y'lLΏΗθpς—«Μ—8fkμϊ/ΜΡcΐ ΐ ΐ ΐ ΐ ΐ ΐ ΐ ΐ ΐ ΐ:|€•2άs|0Eο_β€έwkejΙuBœ•ϊτu DD9βD>+0Κ‰δΙs;˜Eψ©β•άŒjp{v΄t{·mT7”ΤD9š(3CΎ`(1֚ϋm^΅οιe‰½υίς1t˜‡EeΓDTYš‘ςΎnœω*ΫέΔ7‡[d»3φ8ΣJ»žΒ]Ζ5ˆ5@Ρ&ι=ΨχnŸH‰ηΉŸU{ηδšli4υpRzΜκlAΙΥΥ’Χ*ƒΘ3βI!BT[3Z±β(jεVf—΄Η Ε|Ο'²p‘ιj¨#zZ{™žοΩώ㝩ΤeΧ!Χ2ζΨ₯Œj9ηέYπDˆΚΌlK9¦ζ-WχλjMKP:hXJ!”±Ež5*Ήβ‘έDe>sf}C8Τrr*Q«ΛSξ°8&[8%SγA†ΨV¦ϋ…ΤF”°ρ›ϝI&@^xW'}ƒψ0@-–ΥεCB”Π‘αrgξ™%Ύ’2‚\}Λv̈2;c½Ό-ΖJ-“ΚΙdUί“ίQCY—€”΅\‹Λ•’Η[υE9© J-`Nώc°Ζoόλ9z ΈXXXXXXXXXaX‡°’Φθ-_Ek4…οΉ>i4hύΞkD6‚A•ΖέcBAT9o½ ΖΥ²Uˆ0'PW)ŸΉ¦ qυSt΅{F†ͺνu–h•“ž…@υ*ϋ6HQB!Ρ–§ͺΈεwHE/ ςTž™―’ξΙ¦]ΉZŽV2]M§ΙΧ 8c©sμΣΎe€ΜΥc,Ώ R#ϊΰ!Σ²‡ΞUοέ>Ά°ϊΫ)9ΧaΚ<螺4G‘ήή?³\‰γ)Τκ§ˆΑY}t¨Žε92Β₯U€UλΎ¦@!o(οW¦α-&bX)(2ωΌ«γ„_FCEκ½£–ƒE 3λ#)D¨eYŒ¨lΰL«ˆŠ­<άΆ Uˆ†H¦SΆί€°.¨β;oΘ&"œa±PΠΗ‘ΐ2ΞθP (Ζξε‘½e9Ε‘>ΏŠ©‹σδ”t–bϋ ΰ0|XSΐZΈeΠmΏ:ώΐΜ±0ŽίNι“35εθπP‘‰ΰΚTˆΩRΚ†7·>]_ηοϋτΚt~Μ6U‘.o|6Τή¨gi*φ@ΒPΜ©‰P-«²T5`UtUχόhα&€i¬}”V*€677+++++++++,λ°ŒEͺϋδΟΏ‘2άϊY >xsŠ©υ―UGYU³ι_+FΡF’„WΥ lδΠ[5fΧΉη)-t|dχΔh LMMδ‘:F­ γ ,šsSMžLH•”€ΟλάσŠ₯LWωυ9Τ•τ(Β“‘M˜a$=ˆΊ~Ξtˆ‰|(υ›τΪκˆ8w’‹θ!B§Φ’•L^*gζΎ>‡ΫŒ ˜Ϋ΅΄ΐ#/9ΓΩβf([kŽi€π‡§P¬ΦΡ@ξ6χ‘.(”nΒp U†άΖtA‘\ϋˆ[AΡvμ~θ菇R ™†ΖΩ–uv[3±sζ ULgƒΚ»έ[θ‘6Λ^†y¬TYB’Κh ΡΚλe΄Ρ$W3Iξ†ΖΚj?[šΑŸ— –­?0]•ρΎ° UˈG°,ΊΡ˜EΡ8‚ƒΣΥΔ눳η7xΎH_.O€_Y…Ϊ³ΕχhΌ·ˆ·ΪΣν½—QNβ:‰l©Ιή€Ει΅Η']gΪ€V!vωι!3HάβΉW©"€Ή>ΨΡX“7ύΝ=ά¬¬¬¬¬¬¬¬™€uΛΧζθ1ΰ`γκΟL,Έ,₯Ί/Ί:Υ~ώω7φZLΒϋΎ5χ§hݚϋ!­”rΎq€•FbFkβkEIœιΌι¬‘@γ$ ‡κšΤωͺσšΚS£Σv £‚™¦(³“Α«TBžΤ§5]υ(~LKzΐ6’;‘ δΐ/ρ"αBvAͺ ͺ€QΓD»ˆ€Lαf½,ΫΘjgTp„’΅3]{+οΞZ2ψip…H!η€πdxθ―ƒ#žΈξβ9Θ$:l7X'ΰki9Œ*œΑΛβvΧjΚXδ[]J`τ™˜œŽ’–KΩG ›9Κ6Λteš.ΥRW•Άfc€ρΛΕU,8ι(‘A wθΘ ΣΙ­_κ™eΕ›K5"}eΐqφ¬Cγ%φ5ζ:,ρΒ€₯‹:4Γ~gE”tސumΆ@§9r•εjω„ψ­›œž?Qj4τΧ“6ŒΘ0–η.”"4pνηR–‚+-ΨπεκoWΠ[3εCύ ,ΛC΅λ‰,νz^ ΑNέ!tŠΞ‘”ip§7έΦ€…ΊrVXVVVVVVVΦσ ζθ1ΰ`````````…`φ,qΥΒΛ“βŠΞ/―ͺ&ήyM£Z±(qΥ¦•IN½―;)Tm\Q‰W‘”ΛVσ Q₯Το0³/ M©·Jmς|’]‰―’όLU@prˆΡ^κβ«Βyδͺ|δ *Ξ€«iI§ΜX.+ζώ}WF¬J J$W'`Uρ›Wi…d#$ 2u49αPίMל&vε±_ΟΒ½Z9Η2pΦΐΘ$ƒY©Ξΐΐ`Π?3υΚ 4ζ0R|4π0₯‘JRκ°“‚C ld΅HXU2η`Uχ°žSΩ@ΨκMΙ€Υ©Λ–3°Κ;o―ΣV&³ΦxgΚΙ.³σΖ<©°δœK4[Ϋ©!qԐG"³­LŠςzO/–ooOΘΈ["«kΦ$AU©$ξγ8•JO ­€UN3d£O¦wϋhY˜―œ4gžhΐ„Λ8ͺΜρεLΜR «1yΠ•ιχ%w/Xް,a9’§άŽΜΔkK •_L>₯|€U)KδΜύ΅β†p«IΆγζXA­ΜEσΟO­žiΛ•σψ¬|!u?¦rΕfwΞ₯ijΛΥ=žƒG)`un½|ŽnVVVVVVVΦLΐΊνΚ9z ΈXXc;―ύ,θŽίπEaVgΡΥ»ϊ™ΘC°ΘC’κφ―ΥC­μΫ°\Γs'CΞ,Όͺ,`&ͺjχœΎΊV Ϊ'²Ι:X#Y=™ξL;\ bΘq¦η ‘PrŸœ¨žΛ ϊ΅¦ νeevuΎDχπt±…Ξ3Η+Σμ9Q…ηΠΙσδG8́°*Φ™Žz¨GVgΝ 1‘'ΊH™η²i°d&":.…–{ωΙηλiteΕ@’ZŒeΘΓ“ζΣp‚ή’\ƒ1A1Λ_(χŒΤΡ:‚˜ιZ˜ƒ ͺfŠͺξ€–Ί|B½₯P€e]~ή‘:D853΄_ŸœΣJCTfλtƒλ-—5c²aλ4•ΪβΔ°Z3΅Α\rΏ(ΓdτnŸAKφήBό½1–{Χ²QκΞΣg—ΐλ)&―9ΌΥ[ˆŒoή‘kΣπxy(ΗMu–O3dψ¬Κxh5sˆ°)₯~γ‘i¬!…UFfΛ‰~ϋ)/¨O~^)γδ Iσ“P°uCdŽ—38ς‘&"μ‹uωΛRΝΡCͺρwχ—‹‡C…T›?fΦΈςΟ€‰"Θόειψ \EΈ<‹ΓM§ΚԈb.αΡδ€€€€€€€€uΈλοώvŽnVVrkjρ΅“ΏΈbΟςΫvέwΓήξ»cm^•€Υsqΐ}O/Σθ+ήRG†`R"Ή!*‡π¬\•υ₯ žσ=5ΦsSιΟφƒ:fu¨άΎ ΪgώO~}UΔPx—£όiŸϊ΅Ρ ThΙΏϊ οΨE—`…χ\²Ϊ|ΣR[΅μt¦³wu’ΠWͺy΄ΣIΐ’μΠ L­”θΪQZ/u¨K}νV=TτΧθΠ*t‘€»Z#ˆYx 6e̎aoZH¬V½Ÿ Μ¦ΗΚςξ†Šͺσό†JOΏΙ¬…R¨,‡+Z"Μο|yi7Υ³ΚΈ’eΎ=ξΊδ`½½‘ίjO‹Χ«e«Vkdy›¨Ji¨r<Ά˜VΙO=EΦyo±₯¬šgν₯R_ž6[2`mήQ…/μ™ΩlvB·UΎ—ΰ0tLΚ,Ι©d―ώ™šjεzΙ―ž6ΡP«²ͺY9‘’* ŽdαƒΑΧDK’άΗ Ά)Κ8rηsΠΓ²g{|6ς°§Έ™|<+B‡E+Ž#8τܞ™Bΰ“d₯ fΙ7λ+κBŽΔφ"Ύ¨’Ό*uk1χ¬°¬¬¬¬¬¬¬¬Κ¦nΏzŽnVΦΨθu—Š«ΘpŸZrέξoή³ςyβͺ ΛS•ΐή}W€„χ4Τ–„&­k W%βwωaυGz݁SΦπX¦«ϊw„:θ2ΤΐDZϊtˆpΦίοzΉ*φD€rΈ•vΙX¦.5QB–c ƒ[LP±T-eN~wU―.―²ς-bNυΓΞ4(06Π»Χ. ŽNgj# ή€^Έ\ •΅ΈΪkP±FςΦi΄₯ŒΡpά₯*ϊiHy3§΅έ™ΰ @+t&Ο½–wgΰadjH]OΞؘƧ©€ε8 ghΡόRVΫ€Υš)pο‘·QΠΠιΌA„t=0agΗΘ&³:ΣS(†;nθ8`ΉnωŒζJZͺtόwŒμ½‹³UΥ•οWόξ―~U·κW]u«0Ρ¨1&&鴦;j§μΔ²r[[£±bٚŸz£’΄1ZΓmB‚!ΠAmD/ŠQ ”·ˆ€Θ8œΓyqžϋΐβο;ζwΞ1ǚ{ƒΖƒςΘX5Ο΅Χ^{­΅ΧZ{ŽΟΩί1ΏC·cIeΎ‚Ÿ¬Ν‡…ͺž,b,₯‘e)»Z!ΏΪqΥdp΄^λΰίmκτιm Ί7–Υ {ΜΥWΣ“Žͺ­?—«ς]$LΝJV\HƒHd5΅MαD―,^β§ΠBďQk1=χKF Œ_œτο"έΫρ]hΨ—ζοT,ι…ΊΫ°|rΐrΐrΐrΐrΐrΐrΐrΐ €꧍€yΐuΐrΐΐβχh54{ @Š™μx<Έc555‘«Υ³‡WΜ”ΠLGs6΄Ϊλ…,iυΩθZΠ©αoΈsŽaM=UšΠΝe;z]2₯:lΑRΖ"oΕDu%@RO'KάΧB!ΒXΗ°ΏGDPΩ0ΊŒ¦΄ξH&"Z±)i1‘;‘DQŒ ½pKΘsΧn:‚ sZΈ0™Δ0#Ε #$Ω,f•ͺ3=U/F> iXΡφN< k`K°Z l5Ύw(œm½ρz1₯#ΙULa΄M.²V₯ΥS€²:YΊ²TͺIξΈτjΟaσΧ›>XΣTM‡Χq֟B₯+=Q*]ιϊΕ0ώzF±\RΤ7΄v― ₯F½vTθjy΄™mξPӍB°³ƒ‚ ]Rο,Ί»šυΏ»ŠPυ:`Ρ,ͺZΜ²C41Ό§P΅o+*]Ε|’ΗΥaί` ›ZEI΄=/’6Ά9υΎ*,ͺ 0―ͺώO’wZ“ν“μΣ²ςi‘ΎDOΙ•ζθ tK„³aσ€λ€ε€ε€ε€ε€ε€ε€ε€ε€ε“Φ±,±iF£ϋη=ŠfjZΕτvβΛ‘ν+oYŠ—°όΠΖER?‡ O›V‰bΨΌNsΟνδy…_ΰΩη²T΁§ι/J|ΙΙΧ!=j‹κΰ·―=Ζώ–Mτ»ήΎ2ΦΨ‘ΥBW+:P©{RڱӘΜςΩ±Zevz:°8“σ>¨έ«ͺ„CΙUkepPΊν―©8Dg>¬ΐ¨P¬F,zΦ’=)mθ8jq‘2N>…ΊcŠˆυΧ·φΪ΅ΜˆgΞ―ΦAη‹tT*m(`QŒ,EDVπRΐb£g,―Zc΄ͺ’·Ζ‘^Œ²XΖε!F»”;<”ΐ«ΘyΧ°Z„Žjψ·•Xμ»T«U8S7N†ηB;«ΗΛdVΛ«η!^ΝϊFΐΪΛ‡Z²φυκ^CWΥϊ¦4y$Šj―š»^―υŽ―ΆͺŒ]Šžή«έ” XΥα('ΒξOw©ΚΑ:ίΡ;Pο‘G«^v( ―£΅>αυν6Έύι2N~ŠΖ‘i`MvΦ΅ΕΈΤB9tžN^ΐΪ?ηρ6ΈXXXXXXXXX>9`kΐκœπƒgΗ Xόό‘υ ε΅q"«θkm۝+²ΰΊ? Σ,~^²ΰWΜΔB%-αžέΘXΒd‘œ0)J][в’XQ&PšQgΛhϋΙXn¬JΡΠKŠ·ΒΞ΅¬ή#yχ8*ΐ@A+‡-ΰ-<*iaΌ]0eAY9ˆZ6G-R³Ph©‹ΰ₯ΌΕΤאΗo'Ί"aΠΫ“ DxBcˆ ς ³°UΫbl`ŒQ&ΐωΡPg‡Ν«ž²±­wi“X²r½1§)\Ζ!ειdΪz8Φh”g/Φ)@™ϊ 9!υ•Ό•±l‘μ~£«j½,o€lΘT%œ’€€’Ž΅ͺ₯ͺL©ήd‹φ₯{Ϊ{K“έu3υŒUHr»λ|,ci₯ΛF\ήΪ3hΛrU1s€Vv΄ΤΑV½ΫBC­oνZ‡ρν¨‚W½zhMbυœ«8” dΕ›$U ¬D1”θ¬Η―ο΅9ς֘—o׌{ήj{«ω¨ΥBΞ0ΈtlMΓφ—2ΠI Xσžaσ€λ€ε€ε€ε€ε€ε€ε€ε€U¬ωO°yΐuΐrΐrΐrΐrΐrΐrΐrΐrΐςΙλ˜VΗψ»Ί&ή34{ ΪώyOI΅ΑδΫ~hΫ ™©³Z6ΙΣ­Λd΅ωSεΰ…ΟΚ«2Ϊ&…D¨ͺΡΓ8>¦Τ°§“¨8”,FSJ€«`I%{iZΕƒšύ009Œγ IQΐ& W#fά΅^²ΑB E~ &`1ŒF\\MΦLƒζlΖX ŠZθ0 ¦‹ωL©X!‡ŒA9Ϋ¦ΰ·ϊΑγ{­=[BoΨΡWkO€ΕΨLΫ@bc‘†U†7– ΄%Ϋ€nΨ…Φ­ΓΣ΅-=xυέ==rΊΐL€%mj|•ψ’§~`Ωδ]‰3%δQ„Ί„Ά@&š ™d΅θ$–ΰκ“­ τjΪ–ΝΩ°eΰψ^32Ρ/uΣ,λ hΕPΚυωTΗ¬‹’5ŽΒi¬Η<¨ωRš°U–E±–#€F±εήήAξHݞ¬ΛWΓά)eš†£Zκόΐte…’z²VΕY΅|o!ΖnΚ2™b…Z;ZPr›Bζε~½= `©±œ¦aρVH‘ ά—f@γMΝλy₯¬ΣχΥoμΩ”bΊT2ΥΦ3=V\u ց7ώk„Ν–––––––––OXΗV"wΗΐ³c„™ζO₯pJΘ 3b†W½vpݟ€,˜T‰ηϋŒq΄}^1SZΛ&qi›•₯KQ9+AΥΦήOfHyP[πaα~Ρ“ΛαQΚR™Bl“c ElΩ$ ΄EϊΛG鐇ΤΣ‰—Cη"rνή c ιZNώHHqDΐ υ ej…8˜pό8ŒPg0vλ΄bΏ¬CΊϊ­ή#({‚?»υ[Rχξπvεο5j2֚° λΞlŒRΡΊ=αT€¨€Ζζω­₯*ŒŒC`™π™――ͺ«@Ϊ+0]H„ύΦ],¦” ’©ά‘:fΥ“A‘r΅ώp§)`QκIfKέUw1e©vγ!nΟp½ΧR7šΟR±P ‰WΏ³/š³#υΪλ8½Φ ©κω©½}κIέ±μΞΓ-•Γ؊ν[JΣ~VŠ­ˆƒΌ ϋΘΦZΘRA\'ιι%)qήμ±QΫ₯Γ–v#΅0€·#yχΣμŠo/ξ%Λλqœ ϊΐin@υžW§«‘@Υ)X‹žaσ€λ€ε€ε€ε€ε€ε€ε€ε€ε€ε“Φ±¬Ξ ?θŸϊ@χ€Ρ”%}ύBΡךV­UΧΞ¬υ ΑR€ͺΑAλ{ςώiX:M2ί5IB€0_¦Bχχ “ΐŒε½΄"SΡ©E:‹œx²w΅ΖόCθEΗ·,Ϊ£YWΰ3A«@"ΉB-B9€Αμzއ©ξ’˜ΟΦ–"Eϊ, ΅Ρβ±ρσΆ‡B„Φˆœq‚¬ƒG*G‰Ίϋkτ›ΆΥθ(QνVά£,bσ²­,ΕΈ‚χ‚₯h8ζLxΊ²Ή{iSΧς]XH_{ω°ZgΠV!¬*€•HSE+5*‹ΆXαTdΊTχDW1•8eθΫψr_Ό%’h+'ΦR 8ͺZ3N£―•’ £Ak)_1β,Λ穌₯X£ιΥυEύvΧ%›[iOYG5ΎέuFσκγί;uΜ’_½HW$› m±K C…ϋ‡±U+0¨° +ŽΗ˜Νv/RΪν™,Ι&Έšρ*°π]ΡΚ“ν-±‡‘—>›WΥ*ƒ!Xh·7Ξ9ELœυ₯«hΝzsβΎJχp΄«»ν?:‰kΙ #lp°°°°°°°°*Σπ²GΨ<ΰ:`ύU芹ν¬‹Ÿ•0 •pΫVΉ°|hΞγBWΑπ€Υϋψ}hX‚†ΥXP»3ηLR‡τ}ές½νιΰ«1½:€ΣΟωTRΧ™ 8iμμm1ƒ^ž‰ύ)+ ’“θ!ΕCb>εΛ[jL€έ%­0ϊJ$§† `…`1±»-ζΪS$c ΰ¨:·4Q—~Σ»MΞςήήNW—ͺ°°°°°°°°°°SlGΠ>X…P ͺΥ `%3Ϊ’7/¨ΦΐΤ’n{‹Jρ χ —~Ζό.ŠΤJW:>Cι*5¬γX «―}νkkΧ]²dΙη>χΉiΣ¦9Π8`Οι•W^5jλΟώσ•W^yχέwoΫΆmςδΙΐ¬={φ8`9`9`9`9`9`p:Ές•Ά£oΏ§§ηŒ3Ξxλ­·ψ!μG?ϊ‘Φq›φνΫwΙ%—|λ[ί"`ϊU¬Dι†n˜0aΒ_Xh ₯hšΪΰKγ₯NΕ3€Ÿο_ψ¬Φΐτ±Σ¦Όˆ6Όκ΅CΫVHž;:Σα"]ΡCπMw›DΩ–˜dJ—K:0ό³οS»?[xoN²ΖK!ξΚξ‚ItΫn*$ΆΣn”Αžβ#]'Έ0+’XYΣαC |°‚Θ>ZM³)7C΄ΠΈBΉJe φd*bS(•Σά= Ήν* kTSͺ1ίέ_pJλpS ν»Γhv*ΌρϋΕo\πCΡλ5P'Ž΅ΒXΑ%$*Τ:>Cοyϊ ‡π°ŽX«f°}ϋσζΝ»πΒ `°N” \5nάΈΡaΒΣίώφ·Χ^{­Ύ ΊΊρΖ°°°°°°NpΐzςΙ'ιŸώιΕ_όΪΧΎφΥ―~υ7Ώω͟όg¬γ3-Y²δ²Λ.ΫΏ½χήKΐϊιOzηwκ Ο<σΜε—_ώ—[핉hͺbώΐΐ¬CM«ΨY°h%€5νa‡ΣΡ—ΖG§ΔΏZ. ΩΎroSL―¦‡BG³Dَf‘σάDwΠδ³k6‡2;άcjφΙT§χ€0Φκ ©λLAΤM‰ϋ= B‘h€,IM΄Eϊ5ΨͺΟIΞ°ωέδ… Ί R%‘δA9ANΓκοΩ7Θ²6¬lC²v`&΅ C5Φy―UœK—6uQΐ’YƒnS7‹u–ομ‡Υpz+€₯3!ΐT`+πPE"$NUσίsŽY[`KΛe!Bu―ψ>ΰ½hϊφΙue¬ΩΥ‚CBγ 6~°u«;ΒρjΫvi²Ϊ¦xX!l<Άΰ/ { ζχΙγ@―Μ τ›z€ˆ5&tμb&‰p5ζϋΠιΧ†€Θο‡bΟ‘ήσ Zk΄έέRG…kv†WET’€T“9lY\m¨Ή{`MKΟ[Νέh‘–³@ΆŒwΫ°¬Π²o`[Gߎ~΄½Ψhm)§hοφΨΒ©Žg;œΊxΞγjΣ+‘ρ<θ™©,±—o η$œœΚΫε±_ŒFρ7Φ Ÿ1΄A)¨’ίΫ>œYS·Ζ7βrpΧι2Ιrl © πbιΚy΅x™lγ%۟N2#υYΑΚbΌ œοή•+;˜^κ —[D@³_’%CrυρhWˆW96„+kΓ6ϋσ½TΏχŒ6=˜’ρΥ~sόCιƒΗ §(Μ§«°/-HN/―Q8Ϋy^οΥ°ZΎx{αα>.δNyφτΛ2˜B8ΉθΌ΅πv{Et2Ηwm―šύρ+ΟnΑ|ψ-8Jgx¬Zooπ˜ν­}XƒΞ‘w现}ϋ3fΜ8묳τι‚ μSŸ°>Ήιoφoqσ¦3Β„™I“&Ω¬_ύκW7έtΣΡ·γ"·O>ωδ“OΗ°ΆmΫ6jΤ¨¦¦&>}όρΗΏώυ―ϋiwΐ:SKKKs˜vξάy{˜0ΏtιsΟ=—?`ϊ—ω—EθΏ`ω/Xώ –‚εΏ`ω/X XοΝaϋΐ]|ο{ί»ζšk6lΨπΖo\tΡES§Nu qΐ:Ξ“J„‡ΎόςΛοΌσΞΝ›7Ož<°ΥΪΪzτχ]Ρ˜φp핉ƒ3ΖΙpΒωS™•upν\ΊaΙΓ”€Υ?υ4fb L˜-©]hΖ2γAΗ”‰u;͐觼·ILΖ9v/€LΕ”,“$o±£ή’νxά`ΘΌαš4ΈβψΑhιφεmΫΔg+ΊxT’ƒΕL¬PΠ0μKΗMΌBHLγHΖ]ςφ4X’£Ÿ˜πΑ4)kώΝP((GŠρz8lŠ+wW“~Ί“-P­–s°H΄M{{ηoν`Vw=§6Zx ρ3«vνϋΓ{­‹ΆΛ±ΥBΊ?# >ζδ³δe―ωXš‚–MΏtΐ`a|₯iXΕψAΝ‹JXy;ΖΙέζEFΫ•ι$kau–ΧAΩˆΛnAsΌμ@E[ŸΞψΘ3OΞfφδuμ‹κf4ƒ*:ΛW*¦,ΝΠβ%ΞΩf&«ή(KΣͺ6μ©&~ΩΑtΌ©tΩ‰n&•0'N™q •l~ψμa€ϊ k±±ͺ`{ΘσΥ@((FY'β"KAΫ―%ΏrΒp t΅¦₯g}kοΚζξW7΄-mκ’;<9Œ@{AWθυ’©½©Ζh«@Vδm5 Ϋw ^υt₯Α|ΚtV„*I₯ +³fΉ°J 6’›‚.«ζ©)ΉΦ=,€ΐ*Hεγ¬>‹PβE^«EΪ³Žη„Ώ‘ŠDΨ;PΚιν*fCφj}Γ|VvT‹v«ΖΒ|ϊJXZ€Aa…θlIΪBvxͺίάόύU^g‰–‘μι ¬O!•ξσ4pB|JŠίŸR’Φ―ͺ4S΄Ξiψ/ΘΦ$Θ^ν֝Gα+dAΆO†NnΐΪΊ|„Ν–––––––V°Ά½5ΒζΧΛ«”ΑXέ“Fχ=y?ZΤφϟzΰΝ睋J„Δ©ά°B ΌdΗcύΥ³m\$ΌzΙ(’™ Α&j μΩ’PUvΣAkˆbœf7‡ςdŒίŒ|JœŠωυ βΠq&Θ‡ήp(Ω”J&’KΟ»s-;b‘ΰς`‹‘k2ŞzX‡[‹²cCΒ>¦ϊβ³k†rη!|FrͺŽEΜJ˜‰ΡXFϋ" ‚΄Xk[zΘyΝ]‚_X™Ÿ‚v"Žh˜Iϊi6§Π°Κ˜—Z½τœX†’’•ΰ™δ²šeκx{’Œ‡_ΛW~²ZUςοΘu$•$ΜT`Ν*^Κ[ΟR`:€ Φ1_QH‘²].Y΅ύ5q›μ7IξV(,‹!’"‚(`ι–«wNf/“Ρo3ξγ­E•-œ«29χjΊΦ%I[κ2·„]Y•Έ(:s†_ηΞ]X]-‚c³5:γ΅γ7%y΅Δ±ΙεC²†)Αs!ξ+΅lGΒ‘+Ι‘Γt˜·πI•–OXXXXXXXXy:΄ύν6ΈXX•ΦφΘχX~ΐ4φΑ—Ζ͚,>’o>'F£/oXUκβ[$Ϋ}ΕΜCΫVˆ9BΘXίj­P&£ξF΅.Ζxφθ1)W±»gζ¬ ™Lϊfx#]ΕxdΑˆYΥbšmRu 7#… ˜Œ΄^;ElP@Mƒ\.*dΘ—Ν΄tχ£I,ΔΑ€]D±ƒΏ§“Ÿύΐ@/γ₯|aΠͺVΝ³¦DHCQ쨩S΄?΄5!±½%ΜƒΊΦ·φβΥ-{{ίΨΦΡΦ#£ίωROP0εh“¦ΙΔ‰:;VΗ`¦V‰ ³bx$ΐͺ’p)‘ڐ¬š#%ήΜL °f΅—V ½%-:Μθ ’°9Ι ΐΚΐW¨“UOΛΚΣ—ΘXP‰xW˜2€•qαΠ¬ύlv|¨’_}">€ͺΕ€VΟα I5[LT§Ψ§v»ι~Θ߈#αTυfΘGΒ=Ή§£Y«{‘ΠΟΕθ@Nςυ0ύV–σP5i]Gi$€¨ΒLc§; ζΡ(އ%Ÿ$]9`ωδ€ε€ε€ε€ε€ε€ε€ε€e«ι6ΈXXu=Β€Ρϋ¦όHςά9‰ΏθŒqΔ¬ΪΛ°04ΤV ±ή΅α³"¬!:Lw•r°¬ άιOύΪ½\ψR₯BNŠ—θš Μ™£ξCS ±y²q\wˆ ΄u` Ž~§‚@AAν (fΥΓ&Μξ\«α4C}P£Ύ†l-F.ΔΪ26`“Ι’bE5›T½ΧΪ³Ά₯‡9μTAQhx AW -¬†™φΰDΚΡοTaΠHrŠk1—?ΔΒJ°Τ4aύΌfΜA%ˆj1O‘β¬e2“ƒz,)Π[ΝsϜd-C-c%V(+yd‹δQqΒTYΉa~z:YθƒΩ„σ}Pλ3χλ+”XΞ…tT%TοPZuτ˜J[Z{Ί΄|1ωδΧ‘¦@f­SωYX@F ·hPκI3ΌˆYDVœo~[ iE+„dco$J„zΐΑ‘Uέd_¬fΓν^{›t@FόΗ€η^’?p\B ₯+cMB_†OžNnΐΒωYσ€λ€ε€ε€ε€ε€ε€ε€ε€ε€ε“ΦΗ X­coλœπΠ•θƒLZe"θJλε h²πΩ1RQΗ@UΙXΘΌω+η ϋcΤ‘εΰPΩ&–― ±9φι-’iž΄BνάΕL•Ωώΐ1τ %‹0œDλQ“3Λ¦C»ΉΣ,&ςhιξGN—ΡΘ !Hkχ…;ήGU¨™€˜ϊ4fΟF-°ƒ@Bΐ ƒΥ5έ>>ολfLUUfτ7wυσγD­»­LΡΐX,³΅½h…ω•ΝέX+hαl3όψ°γ‰=ϊ€vΣ2H₯JD™Π‡²,RRjβϊ ΟI"Œd%B+SΣLB<jγ™ ΪT42Λdu•ylΉžΧΥ@AΝ6mc5§ϊ΄΅ΈδέEυSΛ ˆΥbΙ’†$w`–΅Z°°ULd/=’h£ZαΦR0)ΑΏRB»Θ‹O΅™σΑΆ aΠIv ΅P₯]sΨΓΥ·ζ&ꃇP Š#WΊχΌŸ dΕkNBόJeβΎ–Μ"±i’:oHE(έKAW €„«6.bq0Όε“§«“°šΧްyΐuΐrΐrΐrΐrΐrΐrΐrΐrΐςΙΛΛΛΛΛΛλγœξZ7ΒζΧΛ«l{Ί•ŒΕt+°”œχ…B–Uƒ4,2VZ(c gOaiBa,@ BΞPτΑŠh28;ξΦ-θy₯S}ύ1–0„ΎAtΑτ=Η»ˆyΐšz2YΖΪΧΌ­iΘ±KΨZτmη{Σ™˜U`8ήJX°u β–ƒ0˜’½ τ8–ͺ«E‹jς}Όp$1u,|RΝ|Š£·Ϊwβc,yW,Aΐj VXx«ΡI“o˜mΖ±iΰ0ΛͺπHMg±ƒ³K,ΞXTv‹‘Nγ_QάΠΨ»GΆξνΜ9R&S­:h!ΌϋσΰΚ ]i.”f™ςˆΐRC¬b,žΎΛ8‰gfμΐ"ρξ 7‰­JΩ ΄Ÿ¦'Ή†½δΛ\β°0΅θkμh―›JC2σG+ΌΧI¨Φ',U>h<ⲄŸG[ΈμhUm±‚grΣ…}ύΔ{†γgϋφaΧʎq˜0O¬ύšγΰRΛγ“‘[œ©O½ ΩW¬…whύΒƒλώ„v\ΠΚΛ–––––––V°v―aσ€λ€ε€uΔ’„ΒXΣ`΅»£οΙϋk―L€h˜ 6¬°\Κ ,;ΈvΨ£·οD€άΫ;ˆcθ·*pIΤγ@Z΅u@Κv —šΉSzS„[Y- *δΓH9!0GŸΠψT(J5Ί~'e„q+Ί΄‡·PΩ¬ Η- Ÿ4ƒΓhξκ§τΖcPΥ&z±CKƒφιΨ4ήpxcŒ| α΅ €PiRΟ-ϊ`­oνmκμ£JΈ%(†²>ž=WD „7aΗ*ƒ°l+†t΄ ϊf©–€£Ί aΡZvήZ†R=ί5ΔͺWΈ:Z)©l—tΊFΰ‹œD?'Β™βι™ŠžUϊ <)0ε•ΓmSΊ™λΡ&Œ‹o―R]naw εJλf±, TέY)„clcUΑΒxΜ°WΖ2ME4²”±1“ωt•νύ½©ΐ@΅ͺΚχOΠσ“p'ΏYT‡{;ωΝ»8p?˜›BgΗ.•˜+: +πSEδ8AΥ-Wm\$΅PAWθ|VΟΖ£ΦG¬–#lp°°°°°°°°°|rΐϊD ­kβ= +`«{θΑγ„ŸBE*Λ0]ΰ°VΟFχ‡NSBBHυνξ―νμ”duB³zcŠq­¦5ςXb ƒP#`Dpu’r€ςœ­‘—=Κ“/|Τ,Τe‡α* @„9Ϊπ„x†c–=ο*1S­ »ϋעȁ³b’}O#›― …#‘DΨΣAŽΔ–ωΑc½9VZμλŽiώκS$•>μ’'ω}μpοξι‘'žβQμιƒΏ<ΦgΚ<>]ΜφՈΕr„ŒLTXBPŒœT œN²˜U Γυ€U‰ΓΗαΗoίa‰9°&[ΆΠ!8‹ ͺ Z‘Pi‰H‘θͺ">2ZiΆΈa΄yί†+€₯εΉ}σ’Τ¨b%ώsΐ|,ΕΈΟz}U4Κ‚5uƒΆψ£΄υ"υ~VώK”™Y9Xω“Œγ–4lβ8―.SFυ£ΊZ₯W0]½―p«œ\…Aα€ύ6―Ÿ•:‰Ϊ$E~–Ξ)™€η°Έ;ή½€(α§udφΈ$AΪπͺΧЎ/cΔ€΅gσ›\,,,,,,,,,Ÿ°>)ΐ’P΄κœπ΄Ύ'οBαQ²έ[UχvζΏK Ζ,ψΪΛθη.ΥύΔΟΊF‰ˆ°±­<‘4Ρ;Jύ=’m—sΝ\Vɏoa6·ζΆΗ’{ O!8Ș6[x”k,ΙRΙ,§ΨιεBzφm+p΄«v‰ΚƒγΣ°Š₯·šΪ4¨ΰΒlq5LƒΥh£_|Η.~F'μ…,3Ά.#ΑdΔ~Xηͺ©³ 8΅½CfhμŽƒaΙBœΑ¬ΰΑ₯“υ q φϟ«C_˜Ηo[‘‘,{Χΐ©6 ŒMμΜΓγU"<’ΘHDkί!€ΥΡ\©`¨ϊ”V’4W§b… ²`Ί7ΤΙ]¦¬¦₯r„ωΥͺΉ|ζͺ„)™ͺtUV»Σάv›]^ΤjTεK%H•Ι¬m½ς’F™ιœθzώλ½R}Ι8€@ J&ͺγ‚D@πO‹ύ,*ͺbqδlΫ΄ΊŸ!ž =Jηix„~ω_«7|S‚{ιΟ2aΘγ}kKυΑ8俈S€'4|eB>;‰ͺhXΑ%0 ·naσ€λ€ε€ε€ε€ε€ε€ε€ε€U¬Άν#lp°°> ΅=ς}H‹e »&ή“Υ@›πΞ™΄@64{ ϊAQ “N„°ioο &―°[”όθ YWLŠDf†¨2τtR[ŒγηMΥ£!Εo1ƒήQ}K%Ο$“]cFΠμ΄jaΤ/tψzΚΊνθ.J>8ψθδ©‚cdΤβNƒ'λ0β `wVκ’³₯ˆV-ΩοΗΦ΄J£5³ƒœZχ €«˜ΖΒ $Zν 38±tqΔΑdΥT"́₯3†WΎ‚Fa%2–€―†Oΰ¬Z%QΕ ]XθD…kΪ^ιmρ˜₯Cs–*ΆΆ&Sέ ‹δyE%‰ŒUΚjdψ(@*ρYV“•κX)υ;ΣƒςŸϊMΩΡΨΘΡ¦DοΚέbΈ5γοZ9-VΒ㫚N¬Š³Ρ–sγ"\tvV„ν…3ŠpΎpι₯»σ|ρvλλaFoπ_&!ͺXςΩ“ν‹xV%„3†]δ{²ΎΡ‚aΫ ¬“₯@όσF0ΝK 7yœΑ«žδξ€ε“––––––Φ XΈ!GΦ<ΰ:`9`}(Ζ"]³ΪΗέ!€₯ ₯-,‰€‰ΊΘX΅W&Xό<ΊEŽΥο― Ρ*s}«h[  ©…YδTτ˜τMڈͺPΘΪ–—B‚0H’Ό4uœΌΦZ‰O9Bžte$ MάΆ‚teέ)i“¨q”Nβqˆ;"β`πΠδhSmŸ9ƒCΐaΠU5‘œΊd# ŠJAβ‰Μr’i4 bV»šŽ’΅υΘΰΨ0/ΠΩΧMα5z6ξZjύBκ)q(;[Jx―€3«F“(­^!*Φ¬ΰΒΔ-dΌJκ«Κ^Ι­΄’PŸ2»γ™Wš± ΰzŠΤΡ@mΤδBEFS}%–u‚­¦“ηw \ # ` +Ξ€εUlf©;Σ¦Α–ζγ—v ΆUM>³2«—ΐ’« £ƒͺ?ΏώTΡΤ¨²ΥW• M―UcKΔSΒt•§υζgκz-ωάς³5¬”)ωiŒ‚ΔpœάnΛΠ’σ‚±`°hAJΡjΕL`%>8«u9`9`ωδ€ε€ε€ε€ε€ε€ε€ubnΞ‘5ΈXXΠv?pσž‡nWυ>~_Ηψ»hΩrΚ,•–°ρ)Φ§ΛΓΰŒqθιδ‡ύe‡»€K±bqS§€f,ˆ₯vS‹Φ£;ΧΖx2εY2VΐG*χκΙj3€«ΘYR:vYΌ–f€FdŒa>X$p§κ‘Ž‘;fΠXV™t¨Η•½c΅< p)F£¬¨“Fιkν”œmΡ‘§³?@'`%mh7J}Kp$DU9Θΐ£ΰ0j 4’;*‡Δ4α”,Μ¦Τ†7ς[))ιњ=*i…σ¬Έ&›έ³)Φ¦¬΅™Β,ΆΐŽš5Δͺ>% 6€)W]­8+–ͺ–«ͺ°€`ŠbΖιN°χF™ΌŸΐ’"qV5²œΏ―`p`Δk›ΥžτΑlcΫπ“2¨jlq˜(Υ₯ΓΛεbΆ―”―[Hϊg88r"§Ε-^DζtΉ©<#τSΘΗFU¬ M*’υΣy$Ι£ΐ¬¬«ͺUGϊοEφθ?‰ς_ΈcγΘ Φ½‘ωB)iœΧ§€«7Ÿ“πλ8•–OXXXXXXXXy*<α>Bσ€λ€ε€ε€ε€ε€ε€ε€ε€U¬–6ΈXXΦ‹€ΥφΘχ130},k’5ΘΗJΌEΐ’š†ΣΗξ_ψμπΊ8†CΫίKͺΆm@“ν}σ6·wˆ`bΝΠG ―„4–-‹γι")ŽFΓ‘φ‚)dN˜¬€4ŠAQ³L*q4ΈΜ«ΙVŽm)+ˆ#Ξ€/!l€fxΐB0ΙΏJwŠg»νŽ]Œ…²)ϊ` τ ± τΙ¦ΒGζ‘Gkο0ƊΙg΅ZΜHΓ:Ψ#Π TGήbHοΪ{cQΑm d†q`¦5sθa6ΫRcŽ fK©Zš “#±B˜Ζγ’™xs°ΪΆΗƒ1XSο5eΣ‘b<ΆΕ «γ΅b`Ύ –NΈ#›₯FeaŒ€ήKΌ³teR޲!Sαf¬Ώ*υ “ Χp6Έ LŒ€₯^Vζγηfm« ΐ"Ώ¦Q„ςg{γ"€Ύn?Θ™˜ή€NΚUΌ|&αI—™O9!―Θ»²t₯₯™bΘrŠΑΕ~˜clυ$eζ\dqE„t0¨\U •PΤ U–±ψίu|ιΚΛ',,,,,,,,Xέ­#lΗ1­Y³¦~α3Ο<γ ΰ€u"[λΨΫXΐ,μšxžΚhΑF*‘ …XΉφΚΔαU³%Κn^.#C/ΌeoοΫ:dΘ[ ͺξ0œέ7xΠΐ#8#»³Ό ΠJ ­Ν€«¨uΑ.(†’„θ$œW δ9H§Šƒ”ηδHϊ(Γ_ΐ4rDiΐmά±e``cδ`φ΅γ³ο Υ #Z₯„Ϊθο%_#ΩŠςΜτΟ-@Cωns<νkάxXAΚjΕhθ”F`Ε±WΑ.+“ L‹ΎΕœBoΌ6Š‘nΖpϋŽΒψκhE sδΑƒ–]’ηY₯ώc•*ΰb+ήa€§ΪχΆςŐ½(€ΪŽ…eυ²b!ΗΚ°ΊZ*ώυ–γΥΊ]ASeJ­ύWxPUΚ”A‰\Κ ‘ ]½ωœH„°xιcmJ₯dΎ+1t&ͺΐβB<„­ άιiQU·ΑαLς?"uΊ’8~pΟ&~ 3UΣ‹ŽwH؝Šƒθ$ύKι*ρ“ά¨α^˜ξX–|κπq§+¬γ5qΖ“'O6―ύ»ίύξ§>υ)g,,,,,,¬γ XϋφްΗH7mΪ΄sΞ9η†nZ͚5λ‚ .ΈόςΛώ¬ε“Φ X–±XhΝ?Ύ ₯tήb<[d¬ιcksŸΘX6S:ϊΰΦΨήΡΗΊ~쎁 ~λ[{©ώbm΅-N(‰QxDCa,:`±daθΔmqSho4;FΔXpXd¦ψ.&M·nΑnΪΫKwͺ{Μ:'βΘ‘ζ’’lλ25Ž’’$ΗΒ ή'LΖνkI;κ\ΤVX€5ΠGΏxΪ²…r‡b΅Εΐ" ή;΄BΚ‚ ­Žήθ F+|πΞ‰„^VΘ#ŽzŠϊU%f£%Οχθ…­ςΊgi3Ε +Όe(‘7E‰ΠšcΝTΗ³U󒐧τ¬ΙμI+€δΗ4π¬6*1+`Yπ25ώ*…mF9›’Uί}ΉŒ7•έWE"μάU€qΊͺ$Τu‹β€…™I(hj’Ξ{jhφΥ³ζ3ώš‹˜ua–φ ΤΒ‹(oΑuLΚ |L{ƒRΟ² Δ±Θ`ϊμ|» dkΒ{’‡DκεΈ uk£ρUapΥΟp―jγB|p<žturVOΗΫρ vMMMW_}50λŒ3ΞψΥ―~5<<μΰ€ε€ε€ε€ε€ε€ε€ε€5’ιν·ίΎβŠ+.Έΰ‚ΣO?}τθΡ}}}X'4`νyθΦ–1· ³0Ο…Mw_·νŽoο}=–(]°πΘΥ7ΩxλŽBŽπTp6νBΜΊ"{…0#6 fd~TˆΤ¬<©*Ψ2…Ρ˜Α 2Κ~“«;£W RΖ3μ·'X½Σ_~ 49Ÿ]­Ρy!R”Zꜯ+γΫΩ‚#Λ))štψ‰«vfUΤΓέοαγjΩPΤ:,kαΩ†&P6ΚΈ¬ ”₯U.ίkΥΖϊ–Π*+ƒΖ?½Β:Gΰ›Œ5υ€UΏ―€ΧΓν;¬3{E σΦ<Ϋ±… r^ψΘLΤΤ‘¨Ά+€d_;ΠA=b.yH η(Rš vΊU¬y‡fϊG%= ς°’.3άε|†2”b⟎S$Z~œp΄Ω+„_œ@WQΒΖρs†ϊuϊ`ό©Rx$fΉΈ<,qΐα΄Ώ·k„ν8FΊ1cΖ|κSŸϊΧύΧwήyηK/ύΒΎ0wξ\g,,,,,,,¬8}ζ3Ÿyα…τιΰΰΰόγQ£F98`Έ€EίQVσoBQ‘vώΫwπ3νγξ /ΈJιŠMkφοD&›ύ»ΪΛΌ%ϋ{ε+” ύΑzT‚J0HO`‘ΧnΩΔΚ„Λξ\c$“ˆιFj‚ΉΣΓ0䘳§~₯9Θ?.D¨8 ρ›‘ν½ΨΕΠΰ€Ί3P$fiε;ςY²ΞIq0Ω3Ζπί&af8”STΠ@~Ή_Wk”MΥ@•Ιz E™ 0aDμκκdζ»yΔ3ΕQMo―z'Teρ%δG³š[,Φ.lXužš+-Νk°vΏW)~W[zfˆ) ;Ώ”d;[Σ°bΒΩHy,3Η cUŒ94 ή&Ή€Uη•P_’ΡVcŒϋmίR[«±~Ί}Ϊ.gΡϊέU GͺG(Ϙϋ¬Φ¦WJ²kρK ‚4¬·„ςŽΰ·)ͺ™f( ž½^ X­[σβΩ£μ¨h₯NΆΑ\΄rZ †PZQΤΟ9³E¨Rޚ?λœ turVίΎΆγιš››λΟ?ίΐΛΛΛΛΛΛλ¬ύϋχπ‡?όμg?{ρΕ?ώψγΗφψ<ΨΦΦΦ¦έ»woίΎ}ζΜ™ΞX'4`5έ}έφ»ώΉsΒUlΰ*Π–³¨Xͺkβ=ΚUhέ“F£a¦6kŠόZϋκ$ρq˜>cΔ¬ ² 6ννmλ‘4mRBϊθ7Ÿ³Q‡¨A]ƒΎŽ’rΟιή)<Α˜ͺU8BXBο̐#~@.ΖiιΝW̌*Iσ:ZBΠΡ}} ΄λ HN―fΆuΗ.[ #GΪoΜή₯Σ&ΓL#€ P¨>œ4 ˆ; Ό•Ω+τ΄ {g&π_O,ϊ5;CŠ| ‡‘½­Jh’άsqu\~WŠύ€½ψ*žΆοd¨SSaΠp&£—\Fc31¬~I.B^~orUQ©RFΊ°ΆΏΈyMVυ\ΩΓAT΅5†΅†t•’Τι–ξhζg―\Σ”YŸ-JkΎΐƒ‰ΉνS©Ε% «X']9`ezϋν·O?ύτƒςι²eΛΐ[ΗκΰΟ<σΜ 6`ζΫίώφ’%K0σΒ /|ϋίw`:λόσΟς—ΏΜΛμ€ε€ε€ε€ε€ε€u‚–Ψ鍬}ϋόγ/Ύψb}ΊuλΦQ£Fuww“ƒ?ηœsvοލ™Ρ£G?ωδ“˜Ω΅kΧyηηΐt V{{ϋm·έvΪi§3fώύ'5`‘mΈρ›o½zέu—+lmΉύΜΠnԊƒ [°πΘ,x΄Α—Ζ£C”.~χ†‘ώžŽήΆžM{{»ϋяGΙoχMγEg*ŠUι;θν+΅ΦoLΧeL q‹9³‚VΫVpwƒ3ΖqPw핉藱œ›ͺ½<Kdαμ)Ρn1eςΚ¦’φ½Cρ°²ΩBCό`βΌ¬€”!ΝY«aΩ+lΩ4η₯θS`Γ°MjVm‹Ί hΣ@}PELϊLF±/lSdΦS+ΩΔ!Ό©qC [Ζε‘RNG}ͺβTάγ&ωοβPΣ;cI~ŠϊΤψϊ2 ½ XΚ7Šκ[}‰žΒS΅BΥ9ΆκΘPΩ£- dŽ‘@ι³Jΐ*ΤΖ’κN½οCέ:κͺ’ΞGhΣΫΥΈτΜkGπ ―J>{Πΐy~;@6ρ]ͺσVΗW TO/G $ΠWr•σͺ$­σ‘¨DbρΘύι s%œδΏ )ν)ω’β«Ύ­δ-~mO(΄rΐ:ϊφ_|ρΕ―|ε+ϊ΄ΉΉ€ΥΦΦvLώ[ίϊΦSO‰―υδΙ“oΏύvΜ¬\Ής³Ÿύ¬Σ)›ƒE`ϋΏϋγXΙΛΛΛΛΛλƒ+d£Ž€}ϋ―½φZύ/X½½½ΗδΰηΜ™σ©O}κΏώλΏZZZ>ύιOί|σΝ_όβIZ>²Iξύύύώο~ΦYgέqΗ£Νδ€ε€ε€ε€ε€ε€uβVmhh„νΓδ`>|˜O—.]zφΩgΓγχέw™†΅|ωςο}ο{>ψΰΎ}ϋ˜NeΐκμμΌχή{ΑιίϊΦ·5ΣΙXl,ΪϊάtΛ77ή|ڞ‡nmwGΫ#ίοœπƒ°ΠX=ΣΗwŒΏKx‹5 §= ά‘,ξΆύ=@«-{{% ‘½u‹ψZ-~>–Qk–&IšσΈ`zωΰ—#uHy‰CY@-Œ˜‹Ξ=Α“KΨ)£ƒΖN™z5πμL|αtχΆ_΄›Oƒ³β0CF…”?ΔΐVΑΎ€I?ZP/8ΉjΩ `ΤΐάΘ`AŒO6—HΝ―5œ§\%± £ΓΦΞ΅@a¬ΑhF;ΖΠ—YΫ‘<“L½*ͺ†α„e>VCΜ2‰Y6‹‘txΓ"!ŒΝΛυUuXΈ4eσχ#€KΕͺšνdΆ,•Cόͺƒ5 +ŽΪ«Ί”ρGšΧ3GXa‘ŒΕ<­– °6URΗx< ΝΦΡͺH΄*P¬₯Ρ©?gƒ₯±„Ό.ϋ~γ/₯@=ίΓ―x-(ξ,ΐ,=^_ΠyJ\KΉj Η8€7%±E ΄ΐΦΩƒ5.—λ!5LΊ’,+¦X­^ž€‡κaΛλ$¬Z­vΦYg­\Ή’O}τΡO2ϊtͺΦsΟ=GG΅γXΙΛΛΛΛΛλ§ΑΪΠΫξβώϋοΏβŠ+Φ];gΜσΞ;Ηκΰ»ΊΊΖŽ{γ7Ϊ g˜NAΐjjjΒΥ5jΤέwί}¬4ζγXΪX@«νwύ30  +0H 3ŠV¬ZΈoΪ/q xδSŽ4c‘gD·+ρ>X΄·χllλΕι[ϋΊ›ξΎŽ–ρΒX$λξ}=‰ώU”ό€/žχ‡AΙΐ’ωS9NP† jβ^˜>M0Ξ’¦‰GŽ:ΤΘG‹ωlΔΪpHΨ)ΛΖΡӁ1ΓXTk˜ΜCόΘIκ"Ÿ d“˜)΄>ьVVJα»œκλ–ένή@T•‘ƒ¬½MJrŠBX#(D5[Ι8c­$-;ΐΠV>uŒρA"Δcϋiάδ­Βˆ*Έ˜ψ©`‘R³«žœR=,HΛ:ͺW¬ΤJ^ΗΈι±Εю gτM-Υ—1‹σ,Ε¨±λ:£φϊ‚ƒGb¬r} £,JHπ%γ*WY‡t+ρ„5ŒΒ@έHή€13φPfx‡³6h†]Δ’!V8o+[g– ;U’γήθkf8B >ΉŠMpjΖ8Ϋδ^x3X'` έ{ο½ηœsΞ%—\œτc5έtΣM\pΑwάq―™>Ιl¬On:묳ΎψΕ/ž…°°°°°°>8cx°6Βv# mΩ²eŽG€pξλλ;ŽδΨ~½7ίz5Ϊ†Ώ±ξΊΛ1Σόγ›Άά~ΝΞϋΠͺuμm-cn!HuŒΏ KΠ:ž+‰hOύ2흏θ‰voZ5uφ-ίΩ΅¦₯G œή|ŽaŒ:#ϊVž΅sYχD‹Y{X5―B΄ΐ|Τ½IθjΕKθ»žΓύ­Τ(†“¦ƒdFΉΚ ²pϋJρ—oΩΔ4v΅κŽ?κΔΝbyΙ(HK‹ήξ’ΙL‰ β™ή\γYI0&ƒΞ¨IΝλΊϋ£&ήBΏ{A ΰ0ΠJ²0Nˌq€¨ ρΙXG¬"νΔ–+Yme³%z/Q"`.\φκ_'ΓU>Έ>Mͺ\&­gW’Y|1NWy ΑΖέ Α¬kY_±’ž―€Υ²!λ•ϊAUΖͺOu/˞³Z”₯ Q …<8ν[—Ea=Πd”±Ν ͺ’εxή3X3ƒΦΦΤbŽΡ".Ώ‘+εΛΧ\iZ=λ`ζRƒ)₯=«€ͺ‘–fψ/ΎΘtΧCsΐrΐΊμ²Λή}χ]Η£Ώ$w,,,,,¬°ϊk#lΗ1Ν™3ηκ«―^²dΙ]»ZΜδ ΰ€u“άW^ρUΊ6lΊε›D՞‡nέύΐΝd,ςΦήIcδ¦<€>οΡΠ9Š3Βϊ…`Φ¦½½Ϋ;ϊΔu}ρσϋS• ΊΦιŸϊzglΌιξλDP˜5K°ζͺ‹τCŽάGtά}έR­oρσX«αp`„6 $+fŠ[DΛ&- ¨Σ’Ϋuΐζέώb)ΓžcŠ΄f.§Rzβζΐ…»’“;+ιq1Χ›L¦ ΰφ•9b₯ͺy$ΉŠώUgηέΥ7ˆsΖ1ΰ¬}'¨‹ŒΕ *κ›Ο‘„D—Ξˆ ϋόŒͺά€ΕσΥκ„y­RWMΟž| €…GMŠ/΅EΛXκΜnqJΟ@ 5ιΘζš#oΙFΣΟrΟ€Υv τ …•;2‘0‘V6G`γN X¬ΗWο__G~`jŒV.Lφ•qᨲ5ΛZiTmάγsΕιζ…<£εΩΥδ›Β%ΌΟΣωŒT½ ήα[Wθ₯§0 βͺl K ―v©d– 8•[§β€ul§žΪΫρ¬sΟ=wTu:ν΄Σœ°°°°°°°°>βτ•―|εΦ[o?ώ²κδ ΰ€uΦ–Ϋ―af§ήύŸ_§eÎΡΧƒ±€Mxrα€%S’DΘB„xdώ»2b 3άΫzΔq(Γώ]‹¬‰&ΈπYΖt©€9rΜMfβφΞ΅T Υ-"Oe˜δ–1¬β Y9Z6$l²*O$6“P¬’oλˆV₯κ?Y/†ZρK[΅Φη#`₯«`ΩΛjyΆ”f ΄εxvž;U­Π¬Y?_ρSΕ^hU*Q–θ‘ΰΟ:q(_r\E*FΙϋ3“Ÿ©?˜ΫΝψΥϋTΊΝW9\Ω|}υ>!@§…ρ“§Z·Θ1`wψ―€…2qϋιK)σί0Όnξ‹΅>5₯]A[XP_Je%·ΙμΣΗ fM{˜8Ε¦ζΖ€+¬c5νλa;Ž‘ξμ³Οnnnφˆο€ε€ε€ε€ε€ε€ε€ε€uΜ¦›oΎω₯—^ςˆο€uΦΊλ.GΫύΐΝΐ#fΈƒ±4α9μΰ'Μ·»4Σ3}όϋ‘TŽ,e,ΦΜιι¬Υ€Μ Kδ­ΕΟΫΈˆ^›eN:ζ±GτΉbΎ0νaΙXg;},zvl¬†&H4*φ³Ϊ±~ΐ)τΡ`>Μc8<ΐY’!ΥW-9>OεCρQ »Ϋ$ήT '+Ωλ!`G10XMοέΞ<ύ¨‘„!ύ£œ„]W« D Ηΰυ`3££§@ΐ¬Š΅Aι8 ΔyP,ΓG°©ΔVνΠ}[Ÿ$7} Γ-KΡ*i:Σj X\mxΝ<ωψqς’Ν‚/<0σΙQf-Θ#‰t<α•Ί+UpQ½¬lΊzΨΈΊΘfMMΙ/Νsγ%QΩ,ςTο₯,ΩxΣ;ο³\ŒΆ!Εβiω’B‘ ‘:.ΑH™ϊ13ίTGx+Ή-hφ:'ή Iσ-²Ξmz<ΫV¨xΝ;Ό2pΑZxΰBσ―™ΗΗΝxR„’ƒ€΄“΄ `eΊJj J «r±9`|κκa;Ž‘nςδΙηœsΝwήωΛ_ώr‚™œ°°°°°°°°>zVΓΙΐλ€,Ά7_Ek@¦[ΎΉζšXΓ•@Ÿ–1·\ZΗήΐΒ<‹₯rϊf<ΚίσΩ'‚ˆ_¬ύ,Ά™AθΨ•Έ@ !P­B'‹·°ΔrΣέΧ‰ό·ψωΪ+E]….²Τ½ιΨE3tθθ”±μΗƒ}a508‚YέYCJΜ퍼b9^b„ΛsS«Δ6υ₯d°ά΅ξ}¦9[%+‰Y’­Pσ²6•ͺ¦œχh=ͺyρ­xZbμΧƒ †¨C΅š “‹ή4ƒ8Δ0ώ"-1ΡXΥ™ηrKΑΈΑΚ”xRͺreψύΒg‡WΟΙ€•tCž¨Β³”fj›ύŠ”p«¬%E¬rb«V/όiμω0δɘ+_*oΑψ@Χ,INw¬ €\L•‡Δ /? ΠyEτ΄»(˜―¨YdΜ]ELx½*BpΈβΥΧΔs‰HŠ*]y£ I”­si€4jΚ¬Υsςύn'α'΅ Φ•·raœ  ς{Νa1όOIν‹u Œ.‘.ΕkΔSgίΰΫρ¬ξξn»€­­νΒ /tpΐrΐrΐrΐrΐrΐrΐ:ž€Υή;8ΒφΙΈW_}ut˜FuηwŽ6Σ΅Χ^λ€ε€uςΐΌ²αΖo€±Xh,qŽ`α%@Lχ3Ώ D¨?ι3₯VΠkD€kΫF«CM¦r„~¨Δ:6t4σ…ΰ:(ώ /G_ŒύuRΎ}₯˜‹B15>V”ΑΩSπή£―Η±‰aιΛ(oiΈŠedln5cXςe\ ΥΆVΜΜu`l'„4g‰μ~βT(i"› ωΒ£K­*rή#`©Σchβ/ΪΩΒνϋπκΠ@Ÿ$ϋS”Q₯ό€,-h@JγV LΡο1΅˜•̍0 +Ψρφ:€΅j6£lΕǁσ&)>*P©b±„QνUΐ ”h:Ή’F‘^,υƒ°Klb[Α:ΫIh‰˜:9¬žΚlwΦφ·ΛtψFŸέ‚Υ@Λ$ϊBδa«XI.$wͺcBX§bιiRΙ)Ju*ψŠ ˜όβπ‹`(σβΓ Fύ`εΕxιWΝΆυ›£>ά€m=œ\Ξ9•Α±š fΠΎύKΛ³ρinX}€Υrm˜XΧ\sΝ΅fϊήχΎ·`Αg,,,,,,¬γ X{{F؎c€QυττxΔwΐrΐrΐrΐrΐrΐrΐrΐςΙΛλu [Ζά²ρζ«€V,PˆGp²ο`΅?φ3ΓΎiΏd"EΐzαΊ1ΕJ!αFϊY0Σ³cΠ‹kΤ›ΟɈΏ`αƒώ»ΐΫiεŒ‰VςΛaΘZ13–3›1Ž=5WΖvKH$΄Λb“SQ˜ΖEbCΔjZ%¬C§+ `Κh‰#ΰꂨ„IuB* Ρ+δHΕα]Ι|+5ΫΆ"ύβ頍]sμ[SFίv‰Δ‘bL$ LΚOβ'dΣ\ΘX\Αδ$‡!™ΐβΐ. XŒ»α]ωUό•π+n$EΝΛ€ΗŠcVΓ‡hr:Β?;ž1`%4± Uζ'%_(ža¬˜eΕ·§έρήΘΉP)+Λ—Σ7Zn«ΠRp3Ηc\M³»κάΆŠ–Σϋͺ†ašσW¬06SΣ­"`ΩΓ©f<ω™L‚^;½LxJϋ+ήωρ¨ΒΥ‘T­pSεqˆˆ ΐ²©xoϊ—žT§ξV,΄ΐ‚‘a V̎νώΡαΜ»²DΕ―lΣε‚_X#›Z{FΨ<ΰ:`9`9`9`9`9`9`9`U¦=ϋFΨ<ΰ:`9`³ΆcτυMw_·εφk€YJZXˆŽε;ž+}ΝΣ?'QρG~ύy_*°Bœxˆ εC"„Nύ²X±Ο‡…΄ΖAžwΟC·β[ΰ«θΦΛΠ§3Μ£Q…€Φ€½γ½€΄8΄jι €φψ«‚l«§1&…8”#za£sk5Β 4―ΐj^k½²8/‘)DkY™r€ιΐO”£Ÿ;‹žX)ό‹™ϋΞ΅:ΕS²ΑρgE/ ¬8FΔΑ—'Θ(Ξ Ζu’j£ Α’ƒ7_’HN―*†ρ‘YKfΰγο_6³ˆί–΄lMΓ<3Έ=΄THfŽ”Όbt[kpΗZͺ)—š])‘Ψ’ˆtηNiΌpΆT_e †€΅yy H,³.φυ\Ϊ¨TΪ·θͺλX«ύŠ !2²ρ6.Λ’V”φ^γˆBΠε>4ς(ΟUxX©4Β4‹—>ήWΌύΒmΏqα€Π―–W:rPέ­πW¨|χ΅YΜŒ––OXXXXXXX'`νξaσ€λ€ε€u,Ϋφ»ώŒ…G`– …Ϋξψ6ˆ!0K³SΩ-°H?θ ρˆυΡΥ2ς‘ŸUΐboK«ΡΓO™GO;.ϊ`Y&ΐšŒ¬BH°S‘Ž\bιNi#Dqΐ’6΄~!Jψ j šΫdα\ϊ-΅¨ΐΒcΞΧVOφd°d³Τ­ΗREπJnοςΖ΅s™ϋOω2kF[—1μ‘s‰@#ΚhF²iK nCˆpT―‰-ΜΗΈHKEβψφ°ΘXΩδ}ΩL‰²KfΨ tΪ²ΖZ/r©»X•™ƒˆ…•κ+΄i.Άd|σ‚’t׊GA€VΆK7Ži`‘^-kΣ’J‚Ό-Xo"o?]aΈ•Ξ€zί[/~Bj>N5rΣρ)ΣάhΰU@`λs(IK˜F!Ψ37«njκΰŸΌΰ©σηLvnͺ@T‘«ΒπE+Mo·cbτ_2₯«–1·μ~ΰf6Μ£)i9`9`ωδ€ε€ε€ε€ε€ε€ε€uΦώ6ΈXXΗΈ­θδŽbcΆ{Λψϋp {'‘ΎΟ–φ•ΒC_Ψ€^X`‹ͺ“ά_ž@Ι@‡m `…ΎX`kΞγ4j©λx{?ΒU3Ζ1ˆ,ΠMγ`ˆV)πFl„t•ΙƒΆH~Š#ΟI¬νβ.“፠Ϋj™Νθ«Υθ8P?ŠƒjMnΤ%ΡeΒΖ‹‘ψ‘œR°'Δς!Έ’’t >™AoC`T ΑO3Ζi0“™δ¬X£Α/Ίi‡&βl0Ν·ϊ ,ŽνO.πY=-œ£l2‘·†ς6':’’šΌ56g—§δτΘ7&3½υΥ ©Ζy!RT₯Rsν5i=0J”ω–4”Aω†7C=ΐ΄4Όa>>+t₯υΜΤΠGή’ZHg|»Ωx–(‡υU„Uχθ˜0{JΦdΑxz9–"ΎŸέ[vd νB΅Μeό―άfK²Ϊ λɟ=ή~Ι«Ν–­3Θ11* r\Εδ<ςΏ,–ސQ5XX>9`9`9`9`9`9`9`8€΅³«„Ν–Φ±oλ»όέωυΝ·^^ύ₯7|τ',τ}”νpkτ•θ=νΔb ΔFh΅i@Μ–Δvꦬ΄BνFŒνˆπgl±B6Λώέ4އiς$3φοDΒ™ΖUO$,±9†jέΆΤ©ΌdυΠ|ZNœ“_₯<_DP›˜έG)ά4G—U΅’@I3O£‰šΣΌŽ ’Β =$³8Θπ–+Ά€Prr§¨!’’¬”CφR%‘Ž‘Ι’pŒ¬Νθι•Ϊs”#Sκ½Β+₯¨R€SfͺΦΤσ\nO±#Ή†FΚ¬s…ˆυ΅&£"WXσ΅©|±Έ^hΌϊqpCB.λΪ  ’αί{Ckέ‚JαΕjƊާΙώ Σή‹•MEV̌Š7.RO-F%BuMͺ—Œ† ΜaNρ4ςTh‰Ιp«[ίڊ[‡-g™Τα( …!άBρN n’JThzϋΡ¦bX΄_p©’{aZ{e,<:`dΪΡΩ?ΒζΧΛΛΛΛΛΛΛΛΛ§ΏVΐΪ±cǍ7ήxΞ9η|ωΛ_~μ±ΗΈp̘1£ΜττΣOŸ€…ΐfmΌω*φ}μ™δή:ρAt—μ%m ¬€nQ ‰κ₯@υ$Dη‹ξX‹Hβ=C`©6Š;Γό© Ζ ΚΑ“ΑCβk’‚π'ΓψUέΣY’œ„WU$*bdUίžΨΆbxχΦ‡‰›ΫWeT¬+’(Ι£Β Dͺ gΠ»AάPΧΞΥtf›V+ΜXP3ίΉˆ§ FͺΖεJQ΄‹ WΚ6j‘΅ΧŸ `iš|Εύ!y–Z)RN@„ˆMz’’ΉGAΔ ΕΎHfΊ²’’fύ›Z1tΌΒŠ™Ί°^OT»Š(Ξ¦@TΝr! ΒܚyrυΧΜΛ„—D[ή3Ε‡ŠmRΎF7ϊΣo‹œήΦ¬€+σΑs…œΤμΗTΐ₯J(E« Q’&9Uύ>2R›Λ―2n‰ΐΦx$‚gΡ„P§BιŸ-ή™ΙhT«βhJ;“Ω™ΨŽNcΗθλUdγS¬‘LΫ;ϊF؜N°NŽιπαΓ—]vΩέwί½sηΞ œwήyψƒd6|η;ίωΟόΟΞ4 9`9`9`9`9`9`9`ωδ€υ‘¦½{χήy睃ƒƒ|zΫm·=ψΰƒ˜Ήδ’K/^|’I„lkω`ΫΆ;Ύ~pο€1” RbάϊMVΛi‘oe?N.‘ϋ%%+"Da+Qι €DˆžWςά“ΨΗdyf¬γ½θvY`GœBΟ.9ςΑΎε’ ±μ1Sγ^\ψςSΤP Θ1 ‡P―T?dxέ ±ο½a"£Y€FΗ€‘±ΔV4dX³³” J }>ηΔXteΌ)ά΅žυL°rςE&ΔΆJe₯.ƒYΡ}ΤΪ7„·0Ο][άEjΆ¨δ&ΐΪΏψkLZ2…έ’x‹΅Q0$QJf/σR^Ή`‹jŠ· ΚάNΆβdξΆY9?Ϊ ^F>«< f›Γ«η¨B—!ΟΦ χCΌΚi΄Dfwu©"šeG2₯ŠzbδahR•Π|6Ωζaά£Λϋ™όΗΣ*ηΨRKEΨ\V—;!\ϊΪμίEAΩ¦·§›ί;ώ+E’‹xVGbBε?όη†6.T―ΠΥ R'瀬m}#lN'X'Ωτη?yεΚ•^xα¬Y³ϊϋϋG΅{χn,,,,,¬c8miοas:qΐ:Ι¦/ωΛ€ͺ[nΉεπαΓ«W―>ν΄Σξ»οΎK.Ήδρ_|ρE,,,,,,,Ÿ°ώβiέΊuσηΟ—Ύ4fΜΥ駟ώΔOlΪ΄iκΤ©Ÿώτ§ηΜ™sβΪκo}mύ W‚±Ϊ§<„cΨσθOθ€Ε…BΘsσ­W +,ΦΞE―δdkνsY8K°5ΘaŠFΜ― π$^Y!$ ―'~1!ŒΥeG¬ήη=Ε„ &91=…  cC‡γ1M§Rr.Ε€rL’Τκ9ΜΒΙ C6 ¦^Ω·)²Η4,fέδΓhE&`©?V·HO#Ό7½1&EΡW̎T+mcˆMή ZJtΕ+™ΛδrΕ<­΄)ΔW‰²σžΦ ζQ‡6`Σ"KηGf«wurͺX9™6ΡJ‡jΎTšΧjŒ‘νŠδ€°rΔJR…uΤρqΚX ³J:L(vΰ­WεꯚG}6J{*n…›ΚSιΟRz³Εœ3&ŠqγΥΥϋŠFC/Ό€ΖW,o‡|Wͺ!`%΄²ξhΈθrι_ΒBΌ΅Ό²_m|ΥΊ]°μψA&`ͺΨθ‰EΖ:qΈκ¬Ν{ϋF؜N°NΚιό#pκΰΑƒύύy(,λΖo<ϊ;;ϋρUΔΪ¦»•mϋOώΏ]?aχ όΨ>mRηSΡώΨΟφMϋeΗc1/Εs¦<΄w˜?» r°uλΑΝΛΡ/Όό[ΆΪ¬)hƒ―NκŸω΄ή~G‰Φ―NΒFπ΄gϊψξg~mb;ΨζΡΉςώe3₯ΠμΒiX /νώΕΏu?ύσ=ΏΎσΨrߌGΡjσŸ9°όΨ…ΜΟώ΄ΧŸΨΏdƁ·g!4".βν΅ΉOν_ό–c+c^^Z·@ΌCC;Έu{xΓ"PϊΦ«|―@–lZ&!+Όχ†”LΩ΄3π*Ϋκ9x”εαρPΣ;RΊyM^ Ϋ\ώm8ωAhα449i›—ce9ΌΗή~0μNί(9ζoLΗ‘σ>cnXˆG.Τy΄ωΟ Ι^π^œ<χ΄Όˆ¬ΙWωžς]h˜g ›Zς>ώΠςWuƒςψΖtœdΉ@xd[6S>šΞΏυͺ\…吓ΏœI4=ilXbO&WΖ™7rx9<Ϟμg’ΧΪΆ°2_β-Δ&—GΘω°Ψ»ς5—pόο½)WΓω‹«ιΑΌ=+΄κΝ Kp;ρsُύΰa ±a…pηΘ-ξ@Ι©ηΖ‹ΥΈ_=ΌΤδ.ΪΊ/‰™Θ&9T|FlDNTύ•*ZX'6½OΠp•— \-ο4άα»ΜV|΅ρδ£6|mρ— ρ©—ΐcλΔ1Γ&ίϊΗ~&φΕό$;·Ωz{kψψxό86ξ€ε“Φ1˜:;;ν―S[Άl5jTww·]η駟ΎόςΛ?0…ΛοŸ|ςΙ'ŸŽ>mάΫ;ΒζηΠλδ˜ήyηΣN;­­­O_|ρΕ‹.ΊhόψρΧ_½sί}χέ~ϋν'Τ/X›ο½mΓΊ•Ώ`νόΩ=όk`ή4όΊηџΰΡΚ?ΩλȏC³¦`ώpσό3-ϋς7’YS^ώmόg7όο«?;ρ§¬žιγρόΚυϊ˜η[ψ3ρ]σžΖΣύƒΗζ}/ŽdΟ―οΗ»XoĚX»ΖΌw™ύ;ώ”ΒίδπψΓΟ¬)X-ώ6ω«y-[ό]ŠΏ`₯ίBδ7,ΩΊR~ΓXΏX~?ΰ/.ό5B’Y·@~~ΨΌόΰΦό Š?GΙΟ›—s›ςBψα*ώvυΖtώ„½Θo±ΙšήαZ²Δό‚%?-„ δΰ_"ώ€?8Ν†ηOMό"ώΑΖ₯Έώά₯/₯K¦ΏΖ™YS†ϐŸ1–ΌTόT&Ώ!Ω?ψKIϊέ(ώV€?hΩίlψ‹NύOYϊ ύ= σζW%n*Ÿτ«€ώ₯?z僩-=΅o¬ΌjΦ‰Ώ`­_¬?_•ΏuιοXφGΈτKUόΰu υ—§xαZσ½ιη«β79Ω—›ήκO<ώ$,/Sρ3•\,»0ݐ•ηφ­Χ⏗ιή°_aΜσ'+~­Π8―Ώ`ρΗιψ Vψ‰š?bΕφΨΟΈόόνκdΛΛ§χz|°ΎωΝoήxγ[·n]°`ΑΕ_όϋί~νΪ΅gœqΖο~χ»;w>σΜ3gžyζκΥ«ΎO>kΝ5ΐlχ7_ΥωΤΰΠkΣLœΕ%‡·uˁ½M‡Φ/<°ψyq₯²9ΧΣΞYν)7V½ž™²½oʏΊ&ήΓW[Ζά’e ™₯„W›|“:rmΉύfbmΊε›XήφΘχ%a+ψfα]ΜΠBγ6₯ˆαό©κηΛ¨Ν‡υiβΥ=iτМΗcmAΊΟW-š²ΩχͺΩψμ½Τδ©>‘(™kΦΌu?Šy3)Χ‡ΙLτ΅ΗSImV'-E—ŠFΫξΩS’•ΆfΈΏπ3ΝmM¬EΨ°ΩΜ*ΫBM%gšjΆF45i7곕sπCB½&JkJV>Άp΄Ρl]›I`/’Ν΅Δ!O†ς †`DΞL,›ίΖυ³ΟSΚΑΗc—¨ }2ˆ*σχiŸκρ Ϋir½f‹σ½)«βΧ•rσ+^\EΚκSŽ7¨i•l°”…VIΣ:›ζδΔΛ‘uιy–lΟrΣϊ•ΕΎΏ1ύ\•’Ω;'ό ί/6>εH.Χ§ό22ÝwΜΪδjšό~¦^9XΪzG؜N°Nšiοή½·έvΫyηχ…/|a€I\8wξά+―Όhuωε—`†»–––––Φ‡™ήkνas:qΐϊ뚎Λ(BυkΨρΠeeΈ\p'žγΫVά±Z<Φ/”Ρ|‘―G|‘j`­YΖp :4[gˆEǍ—ΠαΚ`’DHh !)CΊΒ<xƒ₯°VX ahτš§ƒC΄7Γ£°SΌ‘oΗr²Q—g¬r\dEΆe3Υϋ»2Ί>ΡU–ΆΜ‡ w:ΰKΥ (ΨΎΰ©²ΪΚW΄β‘ £ν8ΗΝC|KHd¬2jVƒ« ΖŒ5νaπΕΰgKΛ Οό%/όښnD³&ς¬o˜v*θc|βΨCς:υ§±{y쑎 ζυŠ2yτŸέ™±•„‘ §βEnά*0ZηGΟ3©ƒ.υψχ/™ρ:‘n4Ό`ωΛP °2Ž‚¬ΦI¬xL$°Λ£D_ž`Η’Z;‰ψΩλ½*ρδ ’u<»¨ήωzY¨JΜ‡šΆ–4φ@Wόbς>Q—”bι©V)΅εJωΰΖsήΛΛ',,,,,,¬“°Φνιaσ€λ€ε€υρzΏαJ4–&lŽaΧΟΘT‹ΈQήΒSϊ…Rΰ“ΑΛJAΩS'€PqP&qE„˜σ8ήΎωΦ«YqΣ-ίDΓS`·°αΖoμ}=ψ‰˜ήB_M ±>ήΘxb™3κ C₯F‰FλQθXΣ-ΥΤ"ƒZ·N’c¬(Ϊrx:oεBϋΤ )X±Zb.θΖ‚Œ3Ζ‰wQ)5…b]ΒXΙ6)‡Ο„GQiMΖ‘υ-.o$f3TΑeγ ωΤ+RυείΦK„$0ε° rι~i—•|,³/WpbγiαRkPq±š=EόΜfMn ―ўΚΪA©lGγ4’’’'ρΒ⦱TUC²#΅‘7¦Ώ―>«ΑSMO&ίΥΊz­“7F‘λGKΌ Η¬₯ԚγΩH•4΅Ϊ£ήœJ’ΊBΕΕ*9ΣΦγTvJ ΅&­"¬hΕyVߌGyo¨,h@+κSΫ2ŠΨ8ε€ε€ε€ε€ε€ε€ε€ε€ε€ε€uΔi힞6ΈXXŸ„PΘ™ λV)n»γΫΐ’ Β0k\ ±šJf±ΜΖE˜‘Α¨©Z;χξI£Y놛<§Π€wάϊnπCΌJΓ#x GΒ·0 ‡WρvϊΒ‹Γϋ³cΤ±ύhμl /eρeήS"κ…Μtm.₯’iΞΥ\μμδ‚βƒ…±rN@%ΚF¨+₯Β>Œp ΟΡ΄έΤη‰οM±™²T.‰ͺ ±X7•EΊzŠ2’bcΐš>€C)ιJK2Σήi£om»KΜJGRιLMM„Χ,ψΒuπQVΌ R©’Ο­%έZ₯Ύ?o¨#TgΑau ΰςζ>υ>‡wXΥUΟ31«κ_,s³•ΚΑW)3ΧόΑ-΄bf6^7ιφ1Οέ…v;Y,tα[•2J‰’pΥΤ„]Ea½ΎT‡Xͺ kSΊRYP3Ω-xΕω“ ­NvΐZΣ3ΒζΧΛΛΛΛΛΛΛλD¬_ϊΧ_|ρωηŸί}χ8pΐ·–ΦGιkΐ^«Ίlε_Ε#¨ LΣ2ζ€ψ=ώ‘¦U‡Ά.cΚΆTIΫΫ€£ξ%υ»yέΑ]λξ\ φ1θΩ1θ²ρvŠSθΝ7ίz5CΠ±XŠΊαξnΖμ+­ˆ}τ•`n>^Ε:θά0°MDτώΨSΰΡϋ7ψ&jx5rΨ+YPJνjΪr(χ+ςόXtt₯eζRΕkΙUOΫQ‰ANrπgŒ‹QS+ξ₯Β½1ƒΫxdJH­pU°υw­ξΓHiΉ§BWι]AU ͺ½°^‚GΒ]σŒι:ΐR”©B^=fEeυςL‘„ΩΛΰW%pŠάvMό/2ρ‹†j1 6k ω²’±κΩ Jw₯ξ΅aΓ¬v₯’”–³ιÝΐΊŠ•ΌυT«›ψ₯ οCσνQΩ%]@ηuΤ2‚ΚXό?‡uš{¦Ηg—J8SPo…zPΛ’ͺCΦON΄:ΩλέϋFΨ>Ϊ~{μ±Ο}ξsΛ–-[³fΝ₯—^ϊ‹_όΒ·–––––––ΦG¬Γ‡_tΡE/Ύψ"ŸΎόςΛX`Χ',¬#φ5@+6kύ WRΪΓ<»WbCzp ΊiDIίξΨ5ΤίR ί§·θ‡ͺσΦ±›ΒS<€˜ω8…%τ’ΐΰKψτνo\Š†Γ ŽΉη‘[±S ύQ^Ψ 6Ž£ΕI˜Ρ}”‘ ρŒL ΠC‰pωβ(ϊ3ΥΉQL/8ZΗQ‘΄°&‘‹˜Y}vŒ[$‹H₯470 [PC‡ΡM‚y°Œ_hΡTύ)]Rn»ΝXg ³šFͺodο Ώ~?ŒΥη‘ΣƒTͺh₯±9»ΘZ9σͺΗΩ4σ†YπΖaΑjyΕΚΦL‘Pά*΄"EV6 BHM‰ήTHγ6•tΉ~:λSg¬ξ™όNUψ«ΈEΤ{LΫ΅„Θ«¦Œ‡T@m8ηz* _Y^qUx΅ρVΡαέΟΘ/ΐ,ά δ*₯¨ΣNjΊ:©ke󾢏°Σ7žvΪiύύύ¬°°°°°°°°Ž `Ν™3ηΒ /\Έpα7Ύρ/}ιK=τη`9`9`9`9`9`9`9`:€΅bgχΫΡ·Ώώuӌ3Ξ>ϋlΠΥ²eΛ/^όwχwcƌρΐν€ε€uΔ¦4³ωήΫp ΫΈ€4ΑB`&ca,d:S ψˆ…L₯λlΉύ4ΐ Ίi&iή²3θλ5Χ„©QθšA†{δ 3X9[ά5‹λp |ΛβKΎΨtχu/,Η Œ:xϋ"1-p$8N’ΜΠΒi§,«*ЏdVΗvχW%Μ£Ϋμ0ΓΩS˜Ρ%΅ƒ©}6abq½TJ‰3ΤΑzΦfέ&W±ς£Š{ΥεΘ²9Œi76KSmeϋf<ͺ#ΈbjΣΟL―μڟr°4κkαΒJωBγφžs§lMCM₯ͺ¦U,Ύ’ΓYhΦw­ͺμΜ秚Δλυ'*9L"+ι\υ&[šCΠ*•^ρρ ²«eΏž Νξʌ₯ƒuli]–•žI …vδ`Γ†K/]¬§ŽΟήωΤhΎ›x΅hWιr¬γXKwt°}ϋ@¨QΥι΄ΣN{υΥW1σΦ[oιZgu–n,,,,,,,¬X §εΛ—°ΊΊβ{·nέjŸϊδ€ε€ΥΈVΆήχύΩΧƒ(ΥzXtfY”‘,Hη*τμˆθϊΡ/ΣfVμx‹ΰ]9ΘA…hΨ²ω¨Š‘βψ WόΓίΌβ«ψΛ/ϋΚŸϋ<ž1ЀΓΐŽ8τ@Τ ‡ΡΝFͺZ6ΪΎ’C#ιςEΐͺ±xΆ; &[XS+β‰ψ8νaŽjΤz‚q€Η‹…­i,TΫ!2’•R‚Ž£θc݌>d³Ψ€ςΖT«ΒΨ¨(±α±¦Kh1¨°έRΙ§ {UθΚΰZQe―€³ƒζͺκ*?/οΐΒ©`PkΦϋvε!|φ½“Ζΰ«aΉŠ_4₯«S§N%ΐzc[ηΫGΫοπππC=tΑœώωcƌΑSάXXXXXXX§`-ΨΪ1ΒζΧΛλ„ξkΘXDΰϊtτΪ@+4šΆ£χ| χuw=˜f!©$ήDExŠa› "6IZ΄fΐ © ΛρΜ`Υ‚σ/ΒγμQηώρ|ΐ,,!x‘ΙΘ|oΎ ΫΗ‘nΉύC‰²‘"›T£ DιΖΞDcz²G‰ΰͺTiα™(£°‚νΛv˜ͺ4˜κ20[ƒuˆšΟžσ²S(΅PΈu“¨ψH΄βΖ΅lS›ΌLΐκ~ζ<υy·D…0Ο%ά‘ξ]Ÿͺ}$B X‘Τ¦Α²‰±ͺΥχ ΐ*ΞL=cΩͺ·Ξψ XxδϊVa΄iγ•LωjŽ?½-τͺ©―}v„WSψ”³_ωΰ©€£[…₯ EεTwσ’ž^}mτΎR°ΆR ֞_ίOq_λθ+œμ^νXXX>9`9`9`9`9`9`}¨iή–φ6ΈXX'z_£ζŸhθ Ρ#γqσ­W3Ο]-x"ΐψΔƒNb"<υDtύθΝ™Nb#]€d@( ‘˜ΑSmηŸ{αœΣΟTU―ώΏg‘α)³T+DΓΦ°:°0"%p°^Bƒ·Vε“Η εΕϊƒ«gγQτ>€ Β_4ޜ1N‡ΑΣ₯“¨ΔXˆ(Θ‚w±!δhλύ?Užcd΅Φ A›Ÿš£fΠ«0D1Θ2Vϋc?{?d:s‘ˆ†ΙƒT#47ΒΣΕεό€ά]ŒύΖ%ΥΪ19έ•Dο†Κ Ρ­”VΤ δ©¨w¦P$Rœ’Q…¬\†+Kΐ|u―IEαR·¦mό°Ζ7·M<£'Z-R=cyB¬Ηlδ*MW7† ϊΘ‹xgΑK_εαY€ζ2δ*Ν^ηΥgΫ;IŒ"χ<ϊ|=ρ½ΰGψš¨bx Σ•–OXXXXXXXXyš³Ή}„Ν–Φ ΧΧΠ"‘‘wΓϊDο T`‘§l„XBՌElœ؈v ΄x@oΞ$wl\₯*!žj>»ζ© ‚Ÿ€S―όχ3§_Ÿϊ?ντ—Ÿ3X―ύΝΩ–±HuΨ6’’‚Π‚η >΅yO3.
ai:ΐ\uhۊƒkηb†…q€JͺŠšΉ―δ‘Ϊβl` Χύ‰– ¨j|ͺμ’v#jZ¨ΆYΐRΫΤψͺωΛœ:ΨΛ Xˆ΅Œ©LΧ,xΚ ΥΦεΑjšΚ3ΟΒΰ ΚIχ›n½λhΜ"HY ¨Ž΄Κ 9I’Sš±6 ϊFλο`‹Ÿ΄:)^+ {Ρ["ή TNΝgΧr=jΣZ0™ώTΝ0lD3άνE'`©%ŠM`ηSΎJΐjψ Ώ¬ΏΊrΐςΙΛΛΛΛΛΛΛ+OάΈw„Ν–ΦqλkGa¬†Λ΅B3Pΰ“£C€T7Q ε<Π‡‘x «Ί PE{6΅iΰr*ƒ˜Y|Ι™Υ’zι>€E­ ΅aXΒ,x-θ€ŠθδI/– ―2G™iϊX„7)LGEέ½4>Φχ5YE%kp@™LΔιcγ@χqw`MlExΠΘpŒΑͺΔV4XͺϋhPΦΝ‘!]Y©¨Pˆ4½ eΫ§<€’!3m,κ-(΅Π Λ ‘ΨW¬(°¦d‘‚’”–Œα‚V₯t΅Ž™ψL«Η~Y&¨ζo˜‡ŽΠBYŠ Aϊfš>–UnΤ’εMX YΥ9N^J\‹ eΜ`΅ύσ§°ΐj4/ΕΙ±iιvP=žn­§Ά¬Β[’ά£*‘Νm'E!¬ΆN|Ώγ‰±Ί&³lυDΝƒ¦‡2„­ ΄D΄U΅lŽΉΥμ,©”ϊ`5ΎήY΄’δnAΗځšŠ7v9ΠVCϋΦΒω"{–ζŸi4ƒΦ«±(\$έΫ"ά…ΤkΡJύ5¬2«h+β 5%<ιε£/=ΥaE1Φ)OW'5`ύa}Ϋ›\,,,,,,,,,Ÿ°°°°°°°>Ξiζ{­#lp°°N¬ΎζΓPeύ W*c©Ϋ"€|TZpώEΚI@,N~Φ\σΨ=ί™}δΒSDcBι ,’ϊ?νt4Ά ’RΖβ@BΫθ‰… ;<φ…γάvǷňλgχΰ³7ο{9ψMSUdθ_ψŒ[τ΅Š…κ‚Ε‚7έΨuH™ κδ‚&“Ρˆa !VΠZ„ά#¨Žΰc4ΥθkZΜ:WiHΆtΕ#ΡΠˏΙ(Λ,λσ^–²Σ³8­πΚ*’Γ˜T)ό—2]%ξ‰(¨±{–HXέlήΎΙΗͺ·Χ*-Vs°Φv¬Oq++S(u’=Ί½šv‘s‘‡—ΤȊΕ«‰…šΗlBŽύdΣ±ŸΌΎΌθψ&⟠6άσjvΕKΏηџ·ώͺθΚΛ',,,,,,,¬<ΝxwΟ›\,¬―ih‚U_ Œ…yD5‚Α–Ϋ―Αr4–TTχ€@Q:`3D.4¬Lr²cO―ύΝΩ`©Wώϋ™@+f°ήEΨ"fΡK‡R‹€ˆ‰-sΜ#RXα†ΟΎύ;iΚEKzΊ#)%ˆeT¨`Θjzb—ͺb ‘ˆ!YM­ΘXV"DΐjZΐΒ{’-]iΕ@ [vYaψnω¦hZy»¦ζ¨βρ 3ˆ―ο3$>ΖρƒzδΚ[: c λ₯+-„§ˆ`GαUΚω%ΜRΦαΩr % ­«;mΙκ« ‘8h±(X˜Ε§Αƒ$$urΗ£Ž΄τ£€₯γCιh₯€e΅KEa5U·˜E²f7n‰WŠυυΪ)vΗ ’a}zτ+ς’i1AˆyBξg| q“γ)WΫσλϋρΩ[ΖίΗkϊΧƒV';`½°Άe„Ν–––––––––OX§:`ΕK œŽΔHžϋKγA!ˆ θΩρ^`ζͺ³}HTEV;¨ ―\₯€₯š ΦGc†ϋίήΉΐWU_ω~ξ£Σ:3ν΄vz{oο΅­&„W‚@Š x#O(D«€ A©( Wˆ(T cUE°Z’"‘GDεU€X[ή(ή°ΔHK!9X^Τ¬Oω1‰επδL¦?TΎOεκav匾*W7Dξ °δo$œ{/x―. ΖGΏΟ[y¦RP"ΘπžXΗΜzυΊ§ωΎΪyt/ψπ!―kΙΕgag¬ΔφW!ν™qŸΉιΜ Žƒ‘DgdnGε1UV―l¦ΊωϋΓϋΪGnJπ)&#υμ~ΕtΝ=™Αb ‡g{}\<Υλeμa=»›FΝVΦS{b [ixΤs|^Βο}AŠ<‡«7@˜Ίu*MεcŽuΈίΉY#¨Ξ nφΦJψΦuΖςŽ!eχ%<<¦ςΐ°€`X€`XIX¬x?¦p,+EΎkΎΎ)?ψR£&Κϊ©­ƒΊŒŠŸ°”TβΟΆ#Εgβ*-•τ™sμƒ*‹mFQš’Η"‘•ωώώ]ƒ,YdςRwοΪ ˆeρL±ΦγhΨΑk̝0l§‚nXa·;›—NOXœvrrFQari=rπ\!Hyς.l²ΰ9 πQEYsΗοδ—…χκ{ϋε˜μHΣa+lR*ΖRΨφ^—’:/ΔVNΣ»‘ϊΜ?ΞL_e #›G ΙΓ– ž†‹΄ZMl¬™τΖ³“B½£ϊ&,οκΔ¦?EyξOOν΅πޚΑvšΏe <_ ‘₯<Μρ©bέo#πδo8d>”>θNW"]”χU3%΅΄BΜο–Φιτ©°,`X€`XVΦο_ΫΣΈ€Uν+4ƒ!5 fij,3us0³”I„E&ΙQŠΠŽWαΌ-νœEΫi™•φ=œ"ά4κ·Βe e‚ U ΫcgΨΉ@ΞEγω5Ez/+VH³pFš²wχWρu$ρηΝΌΫ§ΰΔΪs/iwή +δ'oAιFν"8yΐφ³9]ω“ŠΤ²2|Ί ΌTί›―j§]Η—Δω•ΕC‰Υλ‰ W#5γδ »!x7ˆ–½ͺΏ>>τ?ν.ΰWμ‡ρ.Μ?†uλaΡηωφa ΣΈΞR‘ΫϋΈF€X+*ΝΆτD­ή–ΞRn‚ͺΘN½½€ΐ°€`X€`XV©ZΎ9¦p, ΐ°, ΐ°Žδe›bΐ°ͺ5`ΉfiϊBυΑšσ³Ϊb,•0K…VΊQ}ΫCΨˆΆTγψ%­š‹ ‘ °ήΏkωώ^ΑMΆSΑΜQΓφhΕ _BΘP,TεΟζ¦(«^S ·šΚP7ŽVŽb₯ΚŸgA]XzΖ04:Ο9N…7ϊωmeή§Ϋ©(ΌgPΡW„ά7ΎΕo@σ^JaΧ«πuNΝ,B`a]š£ƒϊ0)Ί+„·FξτMΗ©Θ”‹~Ώ^€y}ΨJήi,²ιΥ`:IiέΥτ±^ƒή`‰Ιτ2Β†XήζJBU’EΠ*$loŸΞ*θcκŒε ΜΌε•Yxσ RI–fιŸ³dωΤX°, ΐ°,λΤΦ€₯›bΐ°¬cΨμŸΦ4ΐRk+&£%c&΅ΏςΫu’χ…])Ο¨[j4―© ·ŽlΎΏ;,}οŽ"*` „ήΫ#’O§hgf‘OΗψ­sN`Ά_l‘ΐ© ”χISΩζώΩγΜ›’œšrpΊ₯Ρ'SΔ g—K,ΧΘ­‚ ΙήΦKΧϋΤoΉs€2€ΆίSKΐ IΛ»‡ϋja&Ρ―ίQθ„η³"ΪΗ¨Ω}δ@ŸΠϋH9Ηh΄Σ[η{ς°υy„Ι"]¬΄.>Ϋ5ν°œΓ<™Ι†·j=―wΜ<`$!ή θΓ'φυKξΧXkΏ~bο+ΏP¦·½ŸΝt?¬²Ιϋ©°,`X€`XV%κώ%οΕ4.€`Xε™wΓRB0μΫ^πκefK™©½–χj·P΄ϊβΆΖΚ‘)Y(RVθΐσ“-&Y0σTNΨ€ΚB©ΒžΟϋζzμ΄ύ˜½²XszΠ ™O_x؞4°μUy²G\ΟFϊ`©qWxŒΜƒ±Έ*μυeŽ °lΏiK¦žT²š§ύ‰"•υaλ&O&:jCUm+](²q$ςΉΌ<ά[FEΣϋλΡ pΫ’… D‡­°…zXώb<¨;tΆ° έ{YιUy“t?ΐ³₯‘2φ°uY˜οσ ΝΥC»p(EZš@ΣΤLh΅η…fώo€•2€5~ρ»1€ `X€`X€`! ΐͺ c…έΫUΜΎ²c›Χr[™ωΊ­ΌqEŽ’w’„fξ7}yx2>–E>έ’ΰO…ΚC…a2œNaή›ž{\ ³?Mm)zΠδt‰σͺτ[Ψ‘|V€₯DaHNᓆΡa1„‘HΥsUγΏuμ`• =΅δΓh"¨ΒAΨ΄"Β5˜'ΓnζaSƒΔ^‘³%ΎΏΞ!ξ$›ϋΞ²šoϊΨHα|8)€Ο!θΩ=OϋΫ τ+Ό}αxΣA†@و…ΦΦ5p”°ƒgC¨ςuuI½O}u¬q‹ή‰i\ ΐ°, ΐ°, XV•šwjpΐz΅]Kي [kΕ‚₯‡4‹Uή¦Α– H>…œEJ;F $½~Ή,τ~“Wε%κjβ Ω ܚ6- ±'X!c)(„ λ 5 ŠίλάS₯Β[ξΓΘޟΙ!†·τ{ΨφxμI₯w‡ε›ϋo軦Λζ~δΡπyΡ΄τ#yώtU‘RnΟ|9%Ξπ™(ΣU0'©ΡQ/ΜΥFΊDΪ₯ΪϊΗ“ Μ}[zμTŒοwxΝώ1 ΓύΗδέDs΄ 3zž–υΌ°N«ϋ"μ›XzλT"γκσ©―&€5fα†˜Fΐ°, ΐ°, ΐ:J£ΌΣΈ€`UŒ­μΨF³;‹·”`²₯*”Υ¦aλΨΑ™μ0A†™mΪa*‹φ»τΛͺžΏωk»(ύJ«Όέ^ΜIΥS  "Έ¦ωB ³fα΄Νφ¨’―Q”ƒ”σΖ!78`ylVΐI+Βvͺ…·γXήγTέJ½Ήƒw% _•SN€^ζ3;9²¨?| ΗDΐKgΉŽ€ΡGχ7χ?œp{xGBa‘’υH’°μ1ιΚ'±q7½n=gCwΌ_†ή“‘qQJW\ε¦ρυ΄2€`X€`X€`Xεiδό·bZœg/))ιΡ£ΗΜ™3}ΟƍmO½zυZΆl9i$;€€`XΥξ«Φ’Ξκ‹ΫΪR—qΐ‘α)xŠyΚN ­Ττrχδ[XžCtΐJd©Π<7d+Β©Ά|J`ΫTθ]qakεLm]D–N姍ςΔ₯φ [τDNzaί˜·–pφr’ΠΈ„Β‰₯½iE€£¦ΒΉΣmN3aΧΟ`jεΓϋ†šϋŒβ$ΌέαΜ‘Θ*,<ŸθxΤε―Α§!rΐςq”kLΪμj8Œώ•μΦΐ…&nvψκ*XVVqqρˆ#f͚₯=ϋχο74hΠ{ο½·pαΒƍO›6€`X€`XV’Φπ?―i'χΌΫΆmΛΛΛ;οΌσ4hΰ€υβ‹/Ϊζ‘C‡΄YXXxι₯—Π, ΐ°, ΐJ2ΐϊάΊ˜vrΟ»`Α‚μΪ΅ΛΛkϋφν―Ύϊͺ3i€άά\:€`ρU[ΎλΦ?Ώ1MΆ4ŸΒΉ­μ™2DυajΦ巘%šΓΕTίάx1°FaΫΒ³Ει00oΈυz–νcω ½ΛIKA ₯ξl5’2rΐrΖς€aΔόfΨR”ηS`}CΐZΊtiΪΡJOOŸ?~9€υωηŸίx㍙™™«V­"šX_΅€`XΙXηΌΣ*6ExθΠ‘λ».++kωςε„r ΐβ«ΆŠ}·«ΒmΝ{¨ΆX>ŸνωΫγwμ›yχΞϋoρu#Ο …+žcαtužΡsτ‘θΟ ­Μύ·nΎ.’{Rπvκς”’ΧbGs:‘Ώ'§ $–¨‡ω5Ο†ΙΝ°ΫS9–ˆ_N? i潂›ΜύM£~λ„!°ΘŠ―«Έ^υΊαCξW„¨ŽΩΟ였%_μbΪ(W9Z9`9uiEΗ¨Σ›­ˆ±JηΦδS`U2` 8°^½z+V¬ ŽX_΅€`XΙ X7Ο~#¦U `-Z΄(--ν±ΗϋδˆvξάI@°,Ύj«Ψw ozμΨφq7`•ΪCmίνQ;Μƒβn˜*R@Š9o…©4οδΦM—NΡxΣ΅¬0ίδέSN:Ώ™νΤω#}ΓυΒ<ΙθΣαω H€RˆΟ3θTτ­|YΨ²άϋD²₯ak‰H› Ώ ΐ·υwoοgξf9%NΧ›W»‡η‰^€ΙE„qΓƒCά 3žay»ΖΒ,LŠ₯^mΧRM7lsY›σάlΏf/ΰSŸ’€•Τš˜V€5tθΠτττ°`«eΛ–t ΐβ«ΐ°, ΐB€`₯¨οq½αԎρΏύΫμύΓ0%͌Ί‘=aηΙ#Ολi§(ͺ”œŽNαiέβ³g—XΆΤΩ”`ςˆξ©(ΠOξΟ₯u±‚3G^‘9I8 hŽΘ°^ϋ•­]%φt: θ°D—#·θυλeΫCοΛ7χmιi»HΥ|HEaκ3ω³ΐr²t@Œ”δ‡ψεΟuΜξ ZWΠ@jq‹fKZ5·Ρ΄u[q³MŸϊ”¬fΎΣΈ€`X€`X°,ΎgOΦT™ώAΑΥκP`Pe˜υα¨kE]οήxΉΟ #PP\χ,‘z~†ό!ζˆδΕμxΕce—,Z―ΝΏΖά³_Ϋc±Yh叆%ΥN3‘z"―š?^ΞΞyΒ1)R}ο3½„ν$όιŽΧ›ΰ˜”^D+miq8EhKŸ &’Rτΰβh(Z΅§φ™mδέρζ5 a1LSzΙό1A0μΞ 8?™…Pe°%ގ±ΏβSŸ’€u݌’˜Fΐ°, ΐ°, ΐ:JW?±:¦p, ΐͺFΎ[(έp}ηχίbŒυΡ}±ΤΈΑ֍<λdψ₯žœ’Γ‰S"Ψ‘g ™™’²Kfheξf)Ν€ΰ­‡ŝ΄Β'ςΤ€ο9&Ύ8ρ$֘Σϋ…zΉwˆz^ξ-σ^ž©t*K 3‰'¬HWο•Β₯ίg ΪHϋ†Δ©<9’’χ³PΆ1ςςBΊςͺvWškζPeλB+·pΓ§ΐ°, Θ°, ΐ°N ή­Ši\+i΄iΣ¦ž={fffΆhΡβΑΤΞ-[ΆtλΦΝv^pΑ‹-°¬š…Ψwϊ_fŒ΅cόo«4э1–&Q’ΠB―GάHΫΙ>"wυ+3¨$ '˜”lZsνUζ~QŸ+U4m;Γ“„€F$[ηFΪ“zR,nΪ©UgϊΓH9Ώ6Υ9"’_s”‰4³ˆ–f `ιIΥζ4¬0©gvšπ*uY971$Ύ0wGΩFa§ ‡]α― ΠΛΩΩ©w΍_jΤΔΝ‘ΚVμA˜­Ψ>υ€`%«Š‹‹Ϋ΄isσΝ7oήΌyαΒ…υλΧζ™gJJJΪ·oo;ίyηΒΒBΓ¬?όΐ°, ΐ°bͺη£+ct`%‡>ώψγόόό}ϋφi³oίΎΓ† [Όx±AՁ΄³{χξγǏ°, ΐ°, XίL%%%―½φΪ9ηœσ§?ύiβΔ‰yyyώΡUϞ=,λ„f1X―Œ¨4 ‘A•…|MΥVδ˜yeRβόtf*΄ˆˆ«Ό[’Rΐ.κέΛά_έ«—β±ŸY'aΞ§ KΎB†σ^M^ctΜΚ'g&Dra…Sx«πώ[vŒŠΙDΒ»Θ™#ui‰ιOXΆtΐ2Υ•―€ρ ο@4σ ²l8ˆaY•°IPeŒ%3ήrω¦γ—Ο§>υ«Ϋτ1 :°’L-Z΄HKK»ϊκ«‹‹‹GŒ‘ŸŸοMŸ>=''ΐ°, ΐ°b*οΛct`%™ήx㍿όε/Ν›7/((ΈυΦ[ ΰ=ρΔ­[·°¬―€?Ή»ŽρΏΥΤ„ς57Ÿ²Q!]…a8r{gΫ|[Vά΅u%’ ­Μύ]»[TΆGΓ™μE}΄εšk―²•2λέΛXΚ ΚΦmiφΦπαζώΪΫn-―›=j۟ΨIήΠΧΞζOgb¦z ΪΤٞΪ_‘Ώ*™Ή­˜ι$fϊ[3[χG΅bOjΧΔ7m]/C%Oυ\Ϊ“ψΌ‰'ρσ›m™p§ΉoK;ζέΫϋΩEήrη»ΪΆ΄α°λ_ClΟεΓ€+`+~ευΒtpι φξ΅²G –™­Ϋ.ουΥK―-λ”·τ’.2ΫԊν΄u;ŒO}•Έ_©Q沇_iΠ €•Ϊ±cΗΌyσ|sΓ† iii&Lk°ξ½χή^‡'(Ώ„‹w B! X₯Z΅jUzzϊΆmΫ΄9kΦ¬&Mš,Y²€^½zΤΞ]»žπ.B~ΑΒw³΅ωΧμš>φ“G<©`˝6Ήα½‚›τγŠ-υ[‘ό£ΝΘ/XώƒF₯Ÿ=d―εu[Ρ΅»­υ.Ϋ―_°ή<Τ2Σ/Iϊq%bαOVϊΡE;υ³~3“EŽΧΑα―nώR#OώΎ₯_žΜq»2]ύeλξΎϋξ―'ρ—*5.ς³ί»w6χm© «3ϋW₯?(VΞ@‡ΏF~r‹Ž Ρς.]υ«ΥβŽ]όχ*7νΡCϊ‹O}Šύ‚Υi겘XΙ‘βββ‹.Ί¨gϞ7n\Έpavvφ#ϊ¨όσP‡„οfKZ5_Ω±Ν{7_aΆΎwG•u‡•C‘:χ°K%;*z9;Ϋ«‘UΑ£u/ˆΦfΆΝ} ΫzΘ{˜ΩΊ7sWΑ“*„Όw€Μ«ˆΌX[Uᣉ}αύΰΠ…HΊw9Χ‘ͺςΆMVθ½Tο]ΧΓΚzοΤ`gP-šΏ ½`ƒ›/7²χβ-•σUWi-9 Ŏw°·cP »Χ]=ŸVΟΜΦ½ K{žϋU–™­¨‹O}ŠΥ`XθΛκSƒυρΗχνΫ·~ύϊM›64i’vnήΌ9//―vνΪΉΉΉ‹/>αI€ |°, ΐ:‘ώύ‘₯1 :°ͺ—€ |—Cθ=‹»šΙNΑXχΑyƒMοZώΩΔRX~3ΏΜΛ–ΎS‡‰Μ„J!QiOΔΔOφ C–}‹‰θ˜ήF¬°±η1ύŠΜτGZBx'ΏΓ1D=C+,ί―§8ᡍ X+ψ#€%ΖΚ³Ώ¨kh5χΜ:s~VΫΰSŸb€uαδ%1€ `X€`X€`! ΐβ{Ά‚«΄hηvj})pρ\’ˆD©ΊςηBρΨμ Α°5₯r…+Ίv`yW$mͺΏ₯ΟΣβ¦sΪŠΪ–zκ-œέEβ=N½W˜.τŒ‘χ&-ͺŽg>OJ£t[€qW€Ή—žέΛ_φ) «“³γ₯Γ΄ –θΚlφOkΪΊ‹O}*Vnαβ˜Fΐ°, ΐ°, ΐ°€`ρ=[qf°αϊΚ]ΨθΔ)D`TQώD( KZΝΎ—wιϊεαNξΉm™KXλΔΓMG.Χ;Zi3|4œ₯Gζ•ζFBžγ«πΛ™AΘοΠ³ΏΩ―ΉoKObκvΣΣΔXή=¬j7]™Z=ω―ΈΖS?Ι0²=|κS °ΪM|%¦p, Θ°, ΐ°ŽR›·(¦p, Θ°, ΐ°,`X|ΟV¨YΌLjrŠ:·{ϋΪKDzθ›žΠ‹x|JΰW/½Βά·₯WφΨRεY^ͺ₯Νπ•hι¦u;Lΰ%¨ςω‰΅Η‹±ό¦B1–JΝ*‰X‰tuΜ,©Hλ4¬ΑrPΆKν½―„V’¨XΆT%–ΐ§>e«Υψ—cΐ°€ ΐ°, ΐB€Εχl₯ε† _”ν²ΨζɝΚS2Φ²NyΪTΚIΉ'οnΗϋ­…Ž\ή.ΛοnΣΞHQYΕ0Q(ΊΝ‰F<•Χ-=΄Δδ`Ψ+ύπΗΣσΝ K*JφΫ €ΜlΕ κΡՐy–ΐJ%ΐjyοK1€ `X@ΎX€`Xΐ°ψž­L3θ±°*j‰s1–0kιE]Μ}[zΕ΄ΧJϋ΅S2SQ"—­ΫaaΖ0,ΟFΪb©ΒύTή4ΪρFί^€Ψλ43Ψeχ¦ν’+3Γ)ƒͺi’nf€e›2% ωΤ§`΅ΈηŘFΐ°, ί, ΐ°ŽRσ»Ζ4.€`ψ^)¦Œžj₯w,¬πn[±8­Τ’™­«VΪφ+c¨΄ ’‰Ξ[ΪΌ>ΪτN §I.qτcΒλ©4Ρ•σShΚz³ο| ΐ°, ί, ΐ°Ž‘fwύ%¦p, ΘΐχΚmYi„τJ‡ΛΜ}[Š«ά|>;3―•Άc”¦TNΠ+βέ|CΟ†]FO7ΊJφΡΧ%–²„Ά4i©ώέ–Όσ, ΐ°3ψ`1ϊ€UΣ1 bΐ°3ψ^Ή€eH$ΐzω‚N!`)]¨ ”’P’+=€RwŸͺEEρž=Τl<ήΈAν܌ύŠ,Ÿ§S†WmHωΤ';`eί± ¦p,‹0ƒο£`X°,Ύg“ΗΤ^AmεvvΐŠΜlf€₯N žTΛ†p²a/xWωΌZ6Όμ\oΪyΊ‘Uj–g ₯"ΉB™FPmHyη';`5ύBL#ΰXaί,Fΐ°,`X|Ο&…Fwμβ3δ­<χ$sΐ1"ͺΠ|φh―s]unw*»‰VΓ‘*ΩΓιq|Ϊϊœ9*~睟μ€ΥhΤό˜Fΐ°,Β ΎXŒ>€`F€URR£G™3gϊžΥ«WwκΤ)33³mΫΆsζΜ!šX`X€•|€uNΑΌ˜vO]\\.ΆE9NϋξΏ™ύ§2½_σΞ°* °zθ‘φνΫϋC}ϊτ! XA˜°,+Ι+σΦgcZΦΝ;Ο>ϋμίύξw›7oΆuκΤY΄hΐ°€ ΒL™ϋ/ύ¦“ΆpJt,uxψ‡ιJ?ΩΊ·o08[Ψ ρΪξνύSf6X"*Οz1{α‘œ`αaΊWωΗ_<㬑ίώ₯­ˆ±xηXρΛ΄vνΪ.]ΊdeeεζζΙΐ°€ |°,ήωI Xu͍i\ ΐ"Μΰϋ©s_μfΆ¦„P>ϋŠΛΜVΔX:μ₯FMŠ:·cτO™iDΌ­¨Χ³‹Œ«FΓ―Œ₯ΜDWfΆΗΛ*<\ηΞ;ΐB€E˜Αw‹Ρ°¬RΥ07¦p,‹0ƒο§Ϊ}Ν¬bŒ₯t‘fzV§aqΣ,ΒΖX/gg3ϊ§Ό­¨r‚nFWγΏWX*i`Ω¦Μ²cxη'/`ΥΊyNL#ΰXaί,Fΐ°,`X|Ο&?`yβIUνf‰“ψ¦²„Ο§ΥcτO½i˜Ό¨ͺڝ±” {4(Q¨=fΌσ“°jήτLL#ΰXaί,Fΐ°,`X|ΟXŒ>€`U¦2ςgΗ4.€`fπύT»οΕUΌΥiIU>Ua–™–6ΏvFΏJLX†S,ˆ-ŸTw₯z,ŸˆΠ6RC,ήωI XιύŸŽi\ ΐ"Μΰ;€ΕθX€…,‹οΩδwίοGΣν„f‰\₯©ξ&ΏŒΓζžY‡Ρ―›|€λ•L£&ΐ2„Ί—P|lΗx] όΦ/&4lΟ;?+톧bΐ°3ψ`1ϊ€`! ΐβ{6Ιέχ‚h[QΎIέΫ#hεΝ–lέlΞΟj3ϊUežυ3Q)?¨M/u·ρΩžαί)MN<ΐJJΐϊuίY1€ `X„|°} ΐ°€`ρ=›όξkΖ: Γ“ΏTkO ͺͺZ·ύi=ώγV•ΦΰΏ₯™a“HKeζΘvjP‘{a£Όσ“°~uν̘Fΐ°,Β ΎXŒ>€`₯³y2¦p,‹0ƒοUγΎΕ`C(,έα?9aΒ;•N?υ“ŒgQ—Ρ―B3~`Ή όΦ/nώο₯f€ενFEW†\Β/Φο›tΰ`! ΐ"Μΰ;€ΕθXΥ°~ΩgFL#ΰXaί«Ζ}5ΥL,žtΊς\‘3Ίš—QŸΡ―BσανηύώλΟmih₯₯φ+Q(Ί23Ί²*r°1€`! ΐ"Μΰ;€ΕθXΥ°~ήϋ‰˜Fΐ°,Β ΎW™ϋή ΑK™ΑΒ#he;qηΣκ`%ι|Ο©1ϊ#Ο8K eDΥχΏœιŒ5π[e¦jw™*άνΰϋδšογδςΞ°€`fπΐbτ¬j XgφϊcL#ΰXaί,Fΐ°Žι1=¦p,‹0ƒοUζ~ΨϋJ+FW‰€e6ηg΅ŸϋU–£_…6ό;₯½―lE•XnΒ,΅Θ²‹w>€…,‹0ƒο£`Uwΐϊίέ‰i\ ΐ"Μΰϋiα~ap ‘ίHθΩΓGTΪΙ}ξ™uύΣ']¨Ϋ eΞXŽYj”5φμήω°,Β ΎXŒ>€`}ω³Ό©1€ `X„|?άχξZNωAš·Λzς_3¬Σ*]hPeh₯‚w―y—y«χq€“°ώW—cΐ°3ψ`1ϊ€`! ΐβ{6ΥέχNξΚͺΫ»Jέ“qFΒ”ύ‘gœ₯Ή•(4ΜΊζοΞ΄₯xΛkή'4lΟ;?λ^ZΣΈ€E˜Αw‹Ρ°, Xί³ΥΟ}ρΦS?Ι`τOSCΡ1Xj†Y}ώξL31–ΛθΚ¬°QήωΙX?νtL#ΰXaί,Fΐ°,`X|Οβ>ξŸ66ξ»6ςŒ³9 XaŠPŒ5₯ €•”€υ?.ΣΈ€E˜ΑwάΗ} ΐ:J?ωχq1€ `X„|Η}܏›.T{b‘;C`! ΐ"Μΰ;ξγ>€`•κΗwlL#ΰXaίqχcΩπο”ΆlπisΤtΤ‹‘°€`fπχqΐ°Κτ£cbΐ°3ψŽϋΈ`XGι_rGΗ4.€`fπχq?N©₯»θΚ–š‘‘°Ύ‘φμΩsλ­·6i€Q£FότΣ£άόόσΟ;tθ0~<= ¬ ΥΆmΫΏώϊ† ž{ξΉ£Gώμ³ΟlgAAAZ iΣ¦X„XάΗ} ΐŠ©ΆΣNξyϋχορΕΏyX—\rΙ 7ά>ZXXh‘ΐ°*R%%%:uΊκͺ«6nάΈ|ως6mΪάy睢Ώ{χξ<πΐŽ#:pΰ€EˆΕ}ά?υζσΚl]}°zλλkίΎ}EEEΪ\΅j•mκΧΣ¦M›š6mš““`X)γ*Γφ;wjsΜ9ηž{­4kΦμ•W^!EHˆΕ}ά°¬ Τڏi'ρ€|ω嗝¨V\ioχξέΪμΪ΅λγ?ž——`X©O?ύtΡ’EΎωΜ3Οdeeνέ»Χή|[·n°±ΈϋUXb,qΥποœeΖΠXqΤΏ /ΌPλ3fΜΈμ²Λlΐ°*QΕΕŝ;wΎφΪkW―^žž>dȐf͚uθΠaΦ¬Y!χqΐ°βλϋ­‡Ζ΄ώX΅)Aϋχοχώπ‡?X€SŠfǎΩΩΩ6l°¬ΚΥwάQ·n]{«Νœ9³f͚S§N}λ­·μ½X«V­yσζX„XάΗύͺb,[]}rΈ·;CŸΤ€υΟ­Η΄ςΟΏtι΄£e85ώ|=:mΪ4Ϋ|δ‘G΄™ŸŸ?vlYηR ΐͺ,3&##γΟώ³6χξέλτμΩ³ό?ί±c―}Τ«Κφμ)ύοΔ–UψπχqΏ’μΎΉΆœxN{[Ni‘―lχ“°ΚΡƒ>hΌ5eΚίc›uλΦΝ<¬5jΤ¬Y377ϊ°*R#FŒ0Ίš;wξ15δΟΙΙ) %%%ΌcB•―ο7(¦άσΜ9ΣpΚ»’ΆΡζΝ›/Ήδ’‘#G~ψα‡Œ€Ua?~ΌaϋσΟ?ο{ξΉηžnέΊωζ!CΏώz~Αβ7 άΗύ*΄ΒFl܁‘Oφ_°ͺ°vοޝ™™9pΰΐνΫ·rDΕΕΕα1€¬ ΦƍkΤ¨qο½χ†o»Χ_ݐ둇2¨Ÿ>}zνΪ΅W―^]ώy¨DΑwάΗύŠ$gθ·©Ί+-GžqCŸ5Xίm~KL;‰'3gNbaΦ|`X•(΅―ΌνlόωσΫ·ooh•““sΒ w ίqχ,λλθŸΞ½)¦A'VυaίqχΏ¦ή―Λ?ΐXJν„Y =€`Xaίqχ,λΈϊ‡μώ1€ `X„|Η}άZfDεΐp‘°, ΐ"Μΰ;ξγ>€`@g4Ί>¦p,‹0ƒοΈϋ€`! ΐβ{χqί¬ΚΤw^ΣΈ€ΕW-Ύγ>ξγ;€u”Ύ}vŸ˜Fΐ°,Ύjρχqί, Xί³Έϋψ`U¦ώΎή•1€ `X|Υβ;ξγ>ΎX°,ΎgqχρΐͺL}+³GL#ΰX!„BΐB!„°B!„,„B! !„BX!„BB!„€…B!„,„B! !„BΐB§V%%%=zτ˜9s¦οΩΈq£ν©W―^Λ–-'Mšdh›oΎyρΕΧ­[χ’‹.zγ7RΥ}Σξέ»7nόΑψžjβϋ]»ϊφν›••eCτΣO§Άϋίώφ·Αƒ7jΤ¨Y³f<πΐ /E*ι“O>1Ο>ϋμsΟ=wμΨ±ώI―Ύ/]Ί4-A}τQ5qX¨U\\©αΎEYγΛ:y8`Uί7oήz=xπΰ¦€ϋ;wšΛΛ–-Σζœ9sš6mZΞ₯H1effΪ?QZ=z΄Eυρ=Τ3ΟvμXh̘1W]uUͺΊώϋUM|葇:uκδ-\Έ0333΅έ7–²ή΄i“»l›kΦ¬9ή₯H1WΩ FζuϞ=‹‹‹Λy€ͺž~ϊι† ~φΩgε °Π1d oJPXFsLΐ²oΫ~ύϊ5nάxǎΆΩ­[·ργΗϋ£γƍλή½{ͺΊ¬jβϋ„ ςςςό‘Ε‹gddΨJΧ]“Τύ^Š}ϋφuξάΩxqϞ=Ÿ|ς‰­ΫΈ―\Ήςx—"•ΎΜχΌΓ***š7o^vvφΔ‰Λy€κΰε—_~Χ]wωΑ©η>°P%*ρ~™τττωση—CŸώω7ήhΊ­Z΅J{,…_CcƌΉϊκ«SΥύ`Uί'Ožωί½^½zIνώΧΉ›7oΎπΒ m½aΓ†Σ§O·ύoΏύφρ.E*}Lš4©FΫ·oΧ1³gΟ6Ώψβ‹σ½όOύχhλ6β~p깏,T•ŠΖ‘C‡»ξΊ¬¬¬εΛ—ϋΞΫn»-,φΌε–[†š’ξ'V5ρέB¬mϊC3fΜψΝo~“Ϊξ»vξάioϋuλΦYΈέ»woβ₯H½BœΉsηfggϋζ† μ=oΧ‘:ψξ²7λΦ­Γ=ΥΚ}`‘SMMퟢ+V„ΗΨM›6m΄^RRrώωη§Μ­d'¬jβϋ–-[ΜλmΫΆisΠ AβͺvΏΈΈΈgϞo½υ–6 ;vμXΞ₯H%½ώϊλFTΪ|ώωηνͺjβ»kψπαα ΥΝ}`‘Se-Zdί/=φΨ'G€―`ϋ·ΎqγΖ#GŽ΄t š6mzΰΐjXΥΗχ^½z]qΕλΧ―όρΗλΤ©c18΅έ7]}υΥW]uΥ¦M›ζΝ›—™™ωάsΟ•s)RL—^z©ΉiΓΊlΩ2γζ»οΎ»ϊψ.εεε…έe«›ϋΐB§4Κ:4===¬WhΩ²₯***²ον§S§NλΦ­KIχ°ΒF£ΥΗwƒisΣΒνάΉsSήύ/χ%ιΣ§OVVVλΦ­g̘qΒK‘J2σσσ6lΨ’E‹{ξΉη‹/Ύ¨>ΎKνΪ΅³&/K5qX!„BΐB!„°B!„,„B! !„BX!„BB!„€…B!„,„B! !„BΐB₯œΆlΩ’••uΛ-·„;Χ¬YS«V­G}”λƒBBθd4sζΜ΄΄΄gŸ}V›Ÿ~ϊi«V­ϊχοΟ•A! !tςκΧ―_Γ† ·mΫfλ}ϋφmέΊυή½{Ή,!`!„N^{φμiήΌyϞ={챚5kY³†k‚BB(–.]Z£F £«©S§r5BΐBU€φοίί¬Y3c¬uλΦq5BΐBU€ ”έΎ}ϋœœœƒrABΐBΕμΩ³Σ^xα…uλΦΥͺUkψπα\„°B'―M›6eee 6L›“'O6ΨZΈp!W!„,„ΠΙθ³Ο>λΨ±cnn­hOIII^^^γƍwμΨΑυA! !τUPPP§NυλΧ‡;·nέZΏ~ύή½{s}BΐB!„°B!„,„B!`!„BX!„BB!„°B!„,„B! !„BΐB!„BB!„€…B!`!„B‘o¦F9HœŒβ·IENDB`‚xarray-2025.09.0/doc/_static/style.css000066400000000000000000000032741505620616400174170ustar00rootroot00000000000000/* Override some aspects of the pydata-sphinx-theme */ /* Xarray Branding Guide: Primary Color palette (Hex): #17afb4 #e28126 #59c7d6 #0e4666 #4a4a4a Secondary Color Palette (Hex): #f58154 #e7b72d #b3dfe5 #8e8d99 #767985 Primary Typeface: Acumin Variable Concept - Semicondensed Medium */ /* Increase Xarray logo size in upper left corner */ .navbar-brand img { height: 75px; } .navbar-brand { height: 75px; } /* Adjust index page overview cards, borrowed from Pandas & Numpy */ /* Override SVG icon color */ html[data-theme="dark"] .sd-card img[src*=".svg"] { filter: invert(0.82) brightness(0.8) contrast(1.2); } /* https://github.com/executablebooks/sphinx-design/blob/main/style/_cards.scss */ /* More space around image */ .intro-card { padding: 30px 1px 1px 1px; } /* More prominent card borders */ .intro-card .sd-card { border: 2px solid var(--pst-color-border); overflow: hidden; } /* Shrink SVG icons */ .intro-card .sd-card-img-top { margin: 1px; height: 100px; background-color: transparent !important; } /* Color titles like links */ .intro-card .sd-card-title { color: var(--pst-color-primary); font-size: var(--pst-font-size-h5); } /* Don't have 'raised' color background for card interiors in dark mode */ .bd-content .sd-card .sd-card-body { background-color: unset !important; } /* workaround Pydata Sphinx theme using light colors for widget cell outputs in dark-mode */ /* works for many widgets but not for Xarray html reprs */ /* https://github.com/pydata/pydata-sphinx-theme/issues/2189 */ html[data-theme="dark"] div.cell_output .text_html:has(div.xr-wrap) { background-color: var(--pst-color-on-background) !important; color: var(--pst-color-text-base) !important; } xarray-2025.09.0/doc/_static/thumbnails/000077500000000000000000000000001505620616400177055ustar00rootroot00000000000000xarray-2025.09.0/doc/_static/thumbnails/ERA5-GRIB-example.png000066400000000000000000000666461505620616400233030ustar00rootroot00000000000000‰PNG  IHDRˆ2ΰG59tEXtSoftwareMatplotlib version3.3.3, https://matplotlib.org/Θ—·œ pHYs  šœmIDATxΪν}Ό\U΅ΎDΕΚSQ_€*ι Š PAPπYD€ˆXώ’i‚TŠ‚(R!‘%@! C Ν$€P%…ωοoξώnVVΦ>ΣgΞΜ]ίο·’{οΜ½sζΜ9ϋΫ«}λM…BαMnnnnnnΪό$ΈΉΉΉΉ9AΈΉΉΉΉ9AΈΉΉΉΉ9AΈΉΉΉΉ9AΈΉΉΉΉ9AΈΉΉΉΉ9AΈυ”‹νMoΪ4ΎXΧΟ‡[ƒ―5όσ?NyΎHη{-ΨΛΒ~ϋn°Uρg/›μΛΖί8-^μŸT?ί+ΨκoQΓ±ώ%Ψ™νHο vc°[lL°OΥροίμθΉ&qέ=Π‚Εzόάρu°ηίT\‚ZKΈo‚=οΓgƒ§ŽυέΑn φJ°§‚}K<Ά^°[⽎μ₯ώφ;ƒ]ί+μ4'7I_(u£Ό9Ψβ"Nρœ^Αf[μ2ƒ ž­γ±Ά3Alμ'Α6 ΆN°c‚- ΆaO"ˆrΞk­QΝg?σιΑΎ"~v@όY‘ΗQ!Aό0ΨgγbΑH§ˆΗ±ωΈ ΧS°έγ&€ ˆǟΟ3βš` Ά~Όώq?ιαV6AΔο׏υ.βg{Dδ;‘$ΦkA|?Ψ¬`K‚έμκF<6ΨΜ`/€Θ@lρ1,ΦΔΕϊΙ`ΗK‚ΐί‰oIόϋίWžΣΝΑ{)ΨγΑvΰύ`7Έ“8Χπ*~li<–Oǟ?wvG$ώΞYΡΣϋς?ldލΎjΑ›)Ύί6ΗϋΔΟ@ΐΫ—IίΠ»ΐψωόZœΛ?‹ΗNΔΞX½ώRuΘσRœΏΎ<Ψκ΅ΰ±μ)~χ{%Ξ δΐ βsβϋΜcJ…{‚]C©Ψ8ό9ώ¬Pb‘>Imˆή¦^σΐŒΧ4―ΛηκΘΈΠoΏGθiΎαQίS&A\μΈΞγϋ…·σΊ„[%Δ†1Ζy©xόΫ1|±ž7αωοΔίΫ‹\β±o‹DφΠ a’γΥΟΰβƊυΚΏ½‡/‰Η>*»Ο…κοbα)βϊJςqWy―\œ ήZ SΌΉw/“ ώ_ό,$ργά^nKό\Pβλ―TΧ‰Ο}b(‰vqβ†ιo΄‰‹Ψ55Δoc̜† ΟΗgϋx_\bρρØ$ύP°w»[ε ξG7ΎΦ'" ~±R‚ˆ9š;β‚²nd\)Aό7Άψ~£XξxX|mΨ.HΧ@Sβyzwρkjl7λΔχ΄Hώ„„ξƒΈ5ƒ >wύIκρΔέϋ6±βΉx=“ΗΥHeˆΠΟμθϊ#ρχ‘2 bݘ<_“΄ΊŠιCρο-‰XUΕT.Aμ{UλΟΦ‰ v 6#VΓ\"Βeƒ#y.Žω–νk V1-uσλ«ένψψΨΌX6ΉQjό%q1Ί0†έŽe™ƒωxόΩΖΡc})†€NΛ"ˆRΗTnΙ©b:>n–ΖJ‘Ώ– ˆubuԜxμγymVH£#‰Ύl…b#i3&ϋŸ–}β3Πχ=μCcΞ«Ρ“ϋ_οƒpss«‹§ιζζαζζααζαζζζαζαζζζζζαζζζζζαζζζζζΡnφžχΌ§°ΣN;ΉΉΉΉΉ•iZΡ c oΦαp8ε#Δ#N‡Γαp‚p8‡„„Γαp8A8A8GGDΐΥqκΤTρ³›’`ΥDNΒ*€;K§Δη=βαp8E{ΔΡ„S€jiAlμ„Γαpt AVK4O5~ή+JToιαp8]˜ΉΰΕŠ•«zo°I©IOξA8ŽNΑ˜Y ‹ρ΅?ŒιΉ§Œ-SΙJό L·ϊ™„Γαθd ™όο"AzΕΨMUxoΖοl Ζ)βλ±ψ'‡ΓΡΙΈζ'‹ρ­??Ψω‡|Ο‹3”1οχ¨ΒκΩ°Η֞΅<$~½y +ΑΦ―ΰIj‡ΓΡαψΝν βμ!DΑ凣,|ι’ϋбχω£ œ ‡c5ΆώΥΠ"AlyκΒͺUo8A8A8G‘πΖo6ο;Έ°έo†Ibήל œ ‡£Px}Εͺ"1ό‡1Ε'<΅Δ Β Βαp8 …₯―./Γ>Rόώ  œ GOΐmŸ+|φάQ…ΙΟ,5ŸΏμ΅"1œϊΙΕ‡N™ηααp8zŽϊΛψβΒύCsΝΗη.κ’Ω0bzρ[yΖ Β Βαpτ|ϋΟώ?έ;Ϋ||ΪΌeΕΗŽΣύΏ„„Γαθ8θ²Š οFN7GRŽr=Λ Β Βαpτ ω Ωƒe>N‘>όΏΩ)wΞφ„„„Γαθ `CΏ['›šΆ ψψcOΏPΨ¦°Βi·Ou‚p‚p8=9up‘Nώλcζγ -!ρΙ³F~ώ·‰NN‡£Σ±rΥΕş}nπlρρΩΟΏT,‡ύq‚Hœ œ Gαε¬θ&ˆο\ωωœ›Η?]|όιΕ―>wΑθΒq7<κααp8:‹^ϊO7Aœ˜wΓCOuk0ύοοξ-|ΪρNN‡£Σρμ ―vΔW.½ί|Ξ΅±aa <η»W?μααp8:³ž©› φ»θ>σ9Wήί5Mnι+Λ‹^λœ œ G‡cκsK‹‹¦§άYΨηΒ{Νη\qΟ¬βs―ΐDΉCš8—Ϊ Βαp8Z„GζvuIχι?¬˜€Άpιέ3ŠΟμ7Ω_½μ''‡ΓΡι3³«Kύ {%Ζ‰^Eϊ08θ{׌+Žu‚p‚p8Ž»§Νοš5Ό‡έΟ½Ϋ|ΞyΓ¦'Κθ•ψβ…χt6A\μω`SΕΟN φ\°‰ΡφOόξΎΑ¦›μ'‡ΓΡ`—4ͺ“v;ϋ.σ9ΠhΪͺߐβΧ' šτ4:‰ φΆ£A?+ρ{λ›lσ`λ›lk'‡Γюψϋ£Ο βώψ`a—3GšΟφ4˜€“oz¬πιίήέω!¦€M« ˆέ‚ ίχ…9A8Žv† 04hΗΣG˜Οωε­S Ϋfxρλ_ά2)I$= ζ›CPο2~ηλΑίμχN‡£Α‡“nœPψΔiΓΝηœςχI…#)H²θiρΎBzs°³@ΖοbΔ₯―q ή ¬wοή~5:Ž\αχ£fvΟ›fIγ§7OμΞOœ~ΗγΕ’ΨGε<ζ!&‡ΓΡI0ό‰b“ά™w>^ψΨ/‡šΟw±Ηy£Š_vΘ΄Β–1aέΣ<ˆMΔΧ'ϋ«ρ;λ{2Ψf"IέΗ Βαp΄#Ξό―ΒG9€pΞΠ°πŸj/όΗ]hw =K7›lE°gƒμΊ`SbβvFΐ‚ Ώ»°±š©_ΑΛ\G›βWœRœ(‡1’'jκ­Pq.Ή«««zωΚUνAΌQΞαpτp`:ά§ΞΊkni#Eχ4u™^y}…„„Γαθdœ8hBaΟσFu{+ ΟϊKώΎKιšΊͺž–ΌόΊ„„Γαθd0|tΩθj¦Χ–―\λ9Ppύϊε]Γ„=Όzx„„Γαθ`vΥΓ…‚wπΗ{WKzk€Πi °σzξ’— œ G'³ Φ=θΥεk=η Λθtη€.ν¦'ζ½θααp8:π^s ‹άžOωx—ϊλ€g^p‚p‚p8 䐇ψ`—&Σ‚ΧΞ-|ω’Υs¨ο›ρ|ρyγζ,v‚p‚p8Œ½Ο]”πΞJ>cVυQWόϊα'ŸwŒ…NN‡£“ιnh-έ4ώιβΒΜ’W’^0ριŠΟ»λ_σ œ G'c§3F…ϊ²ͺ“Ύ0ΰžΒ―€ψ5’Σx 9A8A8Ž6Ί’―;'³β ΏΉύρΒ?{ΆΈπΟzώ%3 uό Ώž³πεβσώ1α''G;£Yݎ|bΤ΄έΓ€R€@„ϊξ˜τ\ρΉΣη―M&PrύэŠ_{ι«ΕηέψπSNNŽvT:q# ;HGΟğο›]ΌX€±jΥΕΗ7rzaθ”ώ†ύ{ΩZΟϋΜ9wNώλcΕ―Q‹ηύeLs+'‡£€<3nδŸά4ΡOFΕΩq“pψU6A@Vaτ¬ΒˆΨί0εΩ₯k=Γ‚ΘΠiη‘σΪ Β ΒΡ¦ψδY#‹7ςΧώ0ΖOFΕΙ7=VΌ(΄§±τ•εΕΗ―ΊΙξpΤcOΏ`^Kοo“Š_CΜΟƒΈŸ„„£M±Έ‘·ύυ°ΒV¬τξg\{?Ϊ||Α²ΧŠ_ΠάΒ½Σ»ΰ™»vάNgŒ,œςχΙέίoήwpq~„„„£ ΑΨ2vŽψςŽž4ΈασΗ@ O/~₯ψψίy¦πΐΜ…Ε―š½h­ηνpϊˆBΏ[WΔΗ5΄8’Τ Β Β‘C vl%‰_λ x’Ίgc—3»ΒŒ[%fHΟ\ΠΥΣpϋΔη bΐΧcfέ!ύ‰Σ†~}ΫΤξο·„σΛ[§8A΄;A ‘„κGgΪύ)=Θ%‰αΑΉΕωΑFLχ“Φ½H„‚p€F‰"!Η‡OW?§KBZKμ• 0“θœ ڜ Py€ύρη–ωΣ!˜ύόKΕΟφΫ!Σ2w†·…αާX#~μθXτŠΧΒCψεͺ΅G‰>2wIρ±{¦?_˜πTΧΧ£žX°Φσ>φΛ5CJ²/Β ’ :ο©]£=Α™ΐ0ιφKtίμΣφΉπήΒΡ׎χΧ œςχI…_ά2)wΗEIŒύ/Ύ/9)nΜ¬ΌΒK“ŸYšΜWmΩoHαμ!κώώ‹ήSψΑΐGœ ڝ U h£wtŽΊύ0Φ¦kά=m~wΙ"&υ”RWΘ@X;εFe’°ΌIgtQγe―­=ήΓ†Q|=tΚΌ΅ž·EίΑ…s‡φV₯όwGDΐΥΑž6Uόμό`O›μΦ`οLόξά`S‚MΜ:ψ<Δ1».μ:”$rQΒηkΚœ¨RΑNbkΖΣS€ΩpQ…Aχ(O Ά―ώ9₯ψ?BN † ιq`bœςX _]֊ Η7τ`GΔΑvT±O°uγΧηΒ2bγvπ ΎwΝΈβ‡ŽŽJGWβςΖ7<τTΫΎ‡ΏŽ{ͺ{Qϊ֟훝±xό•ΧWœΠθΤιψΣ½]²H¨6 μ(Žκ|eyΞe6.!IkΞƒθ›Ήΰ₯ξΌ•ˆ?ΏP:`όθΑMςJ[b ΨT„zμ `7΄;A`–,>\YΓά“ρΤ’Wr(Pηδ{ψΚ₯χ›ΟAB‰E~ύΡ_iλΟ­œέωw|¨xN°Ϋ}υυζ4R"†Β€< β»Yx“)O3 R*­μœΎXtNcγω₯KξλΡqG°ο$›lB°GƒSβ5ŽΑ„υξέ»ιΙήQηΗQh«§ƒ.5Ί‹Ϋ2I½w’C π”€KοžQ|n»vSS/;βw@œ|ηXχί¬ΕzDό,š5‚³\  žc–Œ7nj>βΊ›ζ@(ΈnπσߏšΩύ3Μ†ψ|“Β–Ή#ˆ€~1Ρ+ρ{ˆΏ7Ψ$„«ςκA°Δνϋ^Ε²Ζ ±MΔE#gt/JX-ΰσΖ°β{އ5kΈΐDjκ½/E9„>Rr·ό,ςV)xμu]Ή§!“»TZ§Ν[– C!’ρ†7ΖPW”Ίφ8‚8"ΨƒΑΦ/σoœμgy$ΔΫΡ “«ξiΈςώ'Ϋ>ΔT τR8νφ©Ιx;>oƈΉƒΜ[€]zV’δΗ5ώGW3€Ο€ΧšΝς\1ΜPi…W@ο’ΊLΧ…M”»ς‘γ±<ΤCϋϋW°Ξψ ‚m$Ύ‹ίΛ#A,}uu ^JΝ±§Ι[X³βΤυz }€έ+Β*ŸχaQβyt܁7kWέ¨]:Β‚©\Ζdβ9†?ΡΤigθΰυT―Rr ηT 䧎ΈϊαbŽοΡ§–¬υtΨ³‹uzΞKy±Ή"šYψΠͺ*¦ƒΝ Ά"Ψ³ΑŽ 6+Ψ3±|vCJΑ†Δ―7a%ΨγGrš€– ٞPζXixΖJΪ΅Π”©Θ-γ},_Ήj­η|^Μ–Ms툾˜άύ™-4J5–›’: _σΐ“5Ώ.ΊŒ±«ΞΒI7N(&‚ρš¨.«¬ͺGΩμžη*œ0hBw3œ%Β‡ζ7κ4Iιo ’η3¬Ρ‘ίΡD‘Γε&=σBρƒ…°~8 …σ†­ŽO0vTνΔqσ3~ό’Ρ΅kψΌϋfEiŽ['δ«Y BAƒ'gk…wãݟ™΅Θu-ζ]:Bwύk~χ„΄ZπϊŠΚ ]ΚϊP8w£)α~DβxUF#ίΒΈ‹Oυ-Θs‡χlm,ΰXΘ©±tŒ…¦7Κ‚ ζqd( @[ >‹u‚hc‚xψΙΕέήC³>ΜΌ%ŸΌG΄©6%$ ™p·’ΟHΒS†ca"tΠj°ΣφΒ+ιΩΩH<γύX5ϊΔxΧ<žΫŸ΅)rsU*W…0> NeΛB9 ?σΤΰJΎ]]γ’ς‘SσUψ;)ήΩNgtyrΊœΔ|13‚8ύŽΗ [j¨D;›x Η„Xuή:=[ώ±«vΛ#Ο΄ε{@\ω€Kο/jψγ} ”¨'p Λr£Ž= "mͺ“@½=€πΌk²εΓ¦ΞλNΔ’΄›α΅jqK<·₯6V‡\>Άπ?v靕RΜε{EΏA ¨ *'ιM)•”Ζ= „!§>ΧΥa>Μψ{Θ% \)―”EK<χΒΪΥMMmΩ―9½5N gΜ’ά­λΰλ $YΩum›ΞHΐ‚tH }H"ΰ}LŸΏfuΊ§υN^°λΛΏη"7>£‡α²Εη"‡dα3 ΞO­ΪSμ5I Ϊ!>ςšqΕ¦Δ³2Τ Έ`Γ Š—E8ε$½Ωΰ›m«¬<š1ΏKBγŽIk{_²‰C¦txΞκΐζcσD„D›.|°§Ζ$’MY1Mxθ¨lvuO3=ά;Ζήμ`Ϋ¨HCΧ0w‘U(‚7τMβ†ήU Ο πΙ χ‘‹—γ-Q‹²R LN£–ΔA Ÿ=·Ά}†"q|Yβίwύ£Ε~£¬:όLŠyf-L>ΉBv„gΙΑHο α0 †± JuHΊŸ‘8;dψš£DY!†q$,ύ)'ˆn2K²ͺv€ΎΟΧ/oŽΖ ..ČqΦ£¬―Γ£ΕωΌ2ΣnΨχ’ϋŠςέcf–j– ΖΏΤυ‡δwޚ%QŠ‹j«"™{Ϊ|!ώύ™sξN*°W;g,ξΤ’κ*½›g_x5ω<ΣOnšXά…g0§΅₯B= JψΪYγπ³ηΩΒ}Ν―­sŒ™ˆΗ½(U[9H†έωπŒœ Ϊ” dΙ_ͺ“’@²‹\3šΘ+†>Π¦1άΣ,eH,’X,e·έy ”/>ΛWG«/2/CΘE₯€Οψ&KE€¨yέ₯Τ†Y’‹κ$δ!ŽHHL£:mσ˜gcn¦–Ζ@ξβKνψY)„E‹m R³ιoΉ/ά˜άV*gtδ5γ2s³Eε›u€‚άςυ΅°'CT·‹ "œιΡ¦qUμFεG©F)ϋlAά,b¨j `ΑB#Λ@Ϋ »ΕpQJžΉ»ΊIΤπc&₯7,΄’»œ‹«F#1ͺ»ΙoIqΡN5|BŒrϋ˜/`OD-kx”λPφ€ΡJ!$!`—wάYε°Μ@5•7υΊiρž•‘‚UNΩ@…ΎΎΪx]΄ΌχϊτΆV KόΎ,G–NmJμFύ3ώGŽ!Ή+’~ΝZ(€ΘlΧ&υiΐS…l$k7`aΕ‚ΘςC«F  ˆ ± ’κ…‘ΝYΝ^“ChΠG€j Μ£ α5ν™Π‘‘ l-IyμͺjΝ*fY(ςxn–Œ‘―( Υ’w,.€7…κ©_%JuΩ{ql”΄Š-diλΛ±ΏχœŠ ·5wf–vββX—ΧΑeeω ŒmKπύηUΘOw΅κ:v$|­δϋΚUo˜έγψ]$νAˆΣτMμn덫XO`ί‚ξηΰϋcŠ’ϊ³<Œ΅ΑyZ‹,>σ¬p¦L ρΒ{ ?h'‹™;Γ'u=άO d§C8Δ`1τ{ΒK°Β€ΗΗ¦<½ΐlŽe*ˆIζͺ€Η “α©2α-)S2/ΐ…qQ½Š@œ ΔΩX„'bύX8h 3\Y’H½Β=Ό8a)ν{-‡δ]*$5(*\fI” „Ύ†‘Υ1lh²*CšμͺS󣹘²&€LπfυwμrζΪύˆ±SCH.<7aA₯`mŠ‘–Λ»B-Φd3>“uϊΦΉΡ XJ4OΞΟΘςBx]‚PRΉ€} 7Ηκ$ΒΡ^žR’ΞϊsΔ&@i―Uέ}}ǜˆΙς„φRΌQϋ°B{χˆ*) ό=<φΡ_α‘C‚@7˜‡˜Γ₯9 Λ–`%EJwΏπL%=YZ)ε‘‹Ή›lWž‹vͺš€l›³(«¬Λτ(s ‡¦4 <7©yΏ¬}§ΞΞτ€ˆm3f!`GŠΚΔΐrδ‚ΛΠΜ*aΩHhΉθ”χ"«y8cύ%&ǚ„B$ΰ‘S–ΐ¬ζ¬Nkn’'ayͺv¬#|w΅Ϊ±K<&6]ΤΡ²p˜οE„mAv;’ΈΗوϊr$¬”Bh' β°rVΨyΜΜτ”:Κ”μ•8~'ˆœDjwΑv(¬Π1n©ύ^)δΜμ†5^2:;³tvθΎΧΊ«_)δŒu•ργXΝSρ‘Υ€qφTxN‡Ρ‘ΛEsZϟΐB ’ΰίηF‰λ,)‹jςPYςςσI)m„Ί¬²NνΥΰwt‡ρ½‰έl‘„ŒΌΒp±|ΩΨρ˘{Φ9‘a2Ζό!.˜Κ-€άΘπH°)‘ΰ &~©ΈšΚUPν•³"R‘(±ώήpCΫ+kŒ-BKxΜ"g'ˆDκ’ΐN υσ\(€„r©|@98qΠj%L+FΙT)"KΉ€”^U\EμI•Φrχ#§g•ΊIuβ΄ΰζgΡ@JZ»ϋψμΆ‰Υ3©X<π.’šέσΌρE ΅€!•¬Ο •7Rϊ#ΥΈΈ³§0T’cψΊζ_ο’υHV*Θ¦† ςοξlVΏŒ\Π³zyXJŒŠ:³6B»₯Κfy _»»αύοwΡΪΉ =―χ–Eΐ]UZ«=·#άΟττgl ΒKΒc[Υi Dƒͺp€ό°ξeœΎ–άφ=†ώΣlcF2άάTX…»ϊZγγ © —!λπ%(ΡPΙΠ{’ B ΅@Ξ#ή#ΡtΕ›Ή@VΟ<š±«8δ! Ή@2#½‘Zΐ8stsUvKεͺtY&@oP—fί–πυϋ€Όe™|Ζf'*‘mfXεα,γEΈ5«k\~–Θ ν`Tρα|θTž…2;ΜZΉ†φδ ޳•°JZ_ΚΠw’Θa½ZBW{>ΨTρ³wlfό]‰ίέ7Ψτ`³‚’W‚Θ¨g­·₯™₯»_p1C±βΌΤv‘2Ε²;Tƒ’ΠGf¨e–΅Σ²ΗΈΉXŠ+΅ω₯h]%9ˆƒ£KmMμͺfη,5ΙL7(q!@ψΜ’ω–@‰+ίία‚Δ δxΰUΤZβΛ|™Ϋ ͺ.!{v²rU–§ΑΧΣ=lπ’›.lhͺϋjlΥ!+vl£ά•?Ι™΅K—^‡ΒAπΪp^WšQπΈ¨5E =Ό++…< ς5YΉή§Œ ox‘·uπKkά+)^x©ΌΆκ‘φΪ*‚Ψ#؎Š Ξわƒkόή:ΑfΫ<ΨzΑ&Ϋ:Ϊg-Ό%‘Ž.묫 mqΡ΄”&?§˜ήYϊςœω‹έW-`ω–LJrd ?KqΣ*Nj‘u (ьΘf§ΨΣΊ€x@]~μbK ¨±9ο;}Ή{fLΝx΅€E Y› ,VrΑΒ⸍j4“c3εΉFι©γϊ}³ΏF’-9ι½b1Ԟ)+¬°πλ\Ι ˆΥSΥXφi•ΟΚΝO*$Δ Ι ―’ΡUlHΔ#L”εdε—tC«₯0 ϋ=²ςέ€ˆ ―Ν„{Ϋ†˜6U―`“ψυ&ψήψέ‚ ίχ…΅‚ &a¨Α‚₯νnν(uiήχ„NR5ΐ†Bj‘eU„$&ΔΝSΌL¬Φš@eh <Η γξ2%l–U?SΙαrΑf5†¬^4v_Vδ"~H”›HI¬Β|V‰'R3“Λe;HA% ό΅©"EN““rζ֎ώͺDΙ2=ΒνΕOι{&ω4*·δ‚έ$”`[`Ίό»VU αΎΐ˜TξŽα,νR‰ΔjupŸ­Ž[φ“Hh­)œ_-½·„ξώžžΑvzυθΜΟA,UΏ`όΞΧƒ])Ύ?,Ψο3^γΌAXοή½λJ₯šΡR]ϊ΅nπT> p υώSa―εξΦJjγ¦Αc΅Šλqπ bͺ:Ω°ƒvnSXθ*ύ ¬ΖΑ‚’]z‚‰W’:kρ±˜f…ιV/γ“žʞKΔ,œ£ερi΅R«€»}¦Δ{ΤR"τ>Y±#‘=.v«<ΜωΘΩΧ[J'Š9­ϋf¬s“ y=Zo4x3|₯Ε™ίΑλ)il ϋˆΞυη bΝ“§υΫΦΜ}pσ\ηήτ¦‘k[XΧp§Δ!A\Ϊl‚ͺž)‚ΰn1₯=ŸΛέ―Έipσ žjΥδΛ#"+ΑŠΔ)ΛRΛ,\QυBA6YS/“Δ•$ιΉ ΦZeΕ8φΐθΩY ~†?ΈpȜD9ϊR (©Z@»ΏZϋM°-EθX0ey΅U&j5mκκ²qφ}WηU(•M2)ΖάUZVΤιη§αΗ2LεkXΉΉΡ C‡ϊuG?±fυ>«-P”Μg' ΙνaIoΩςH¬ϋ•d§&u˜XJlεd<ΔΤ‚`|5EˌψmΉ`l΅ιg2έcAΘͺ|³Κζ°+ΖcYj™ε`ΌJZ"V*t iT*4Ζ²ΎΤT΄rΑNZζhτ€8Ζ¦ε‚0Z”½¦–•₯‡Τ’\δδ4„/RU,τ2 «βˆ»εEΨƚd–Z$έ€œœ\8­3z2X!ΦύΕ#欑\2Ό“•τ– xvBλΟA—τ²2MοSnpmκϋ‡χͺœ}šφBeŸξ>*‘Ξu %ΣΫIqΎJRŸgόΞΊΑž Ά™HRχi6A0ζ™ €tkΚAJ†’Hέ™”Ž”YΠ I]6'=₯Τ4±rΑξOƟυ,IΊ)O {Δ²ΎͺP• ηΐ>kPM5U;W$Iυϊj€ ώήθ'Tυϋlδβ9IεΘtυ ϋR€œƒU•Ε„©œg"{AΦ:§’ΫUQΘgIΟ†Ι\ι}`!εuA•[«― +FΘ©m©€2 {$x-θ‚κ5Ρ LyO(Y–―ƒ°εφ*Ω.Uk₯Η…Π΄•«ΤΗB…ΩI/ͺΠRΆ₯πφX¦Z΄2žc°yΑV{6ΨQΑήμξXζz7Nΐ‚ Ώ»°±š©_‘etΙSn‡|ŒͺβF―e€:KUC!ެœY‘ΛζH8xlΛoFuΟDX—ςOFR…YžO X$ΊΚ:k+Γ••\)Ήt½•ΥXυθ„f˜νͺ*+²X Ζ2Ϊ13f†!Sή Μ8HεWΞ=±€j%ΐ`xJVAi$"sgΦ1X»r9 ΞZψ!οaνμ%€ξӍ‰°CŠœ‰a…άxΘd3Βazδ,σp²)Φ"V·i/Šδ)‘MŒŒͺr£Q7‚ψA°Αζ›νΙB‡7Κa!εbv³qQJ½ύJΑΦϊjͺYδ‚•RΣdG―»Φes”ώΨ-Ζ΅k™x¦w€Zˆ:T0T” άe)ŒjbcGrΦΠϊrο`›88§Kνvrf<ΤY]Ί•xˆAg•GgσEΨܘ}ΤωY@ =*Ω€h ―™§Bkς>`΅τ6tȊ1t€TXΥ€σ –:€ Y±Ϊ§Ζ\m©|@bΣ₯₯Ίη#• ΧRηžΥμƜ‹ r±Χα.V½IΗž K@±έώΖ…ΦI… ’ԌΫTεA9θφ>¦UΞώa‰˜2BΊfΐ°"«’BΖ~ ΞύΕb.ΕΗͺ;nΩͺ»ZG†_―ԜfξΒκQ†‹]7eτM+ž ¬Œ³°ΨjαΕjΟ,«D5 μ59=†,‚΅bίzΞ…\όδmŽ*•M‰ΈžR₯£έΓn‚'σ…Hs‘]Σr“@±Jφ7Θπ,"Ρ%‡$+G©&.μ¦εϋΕη† Μ-ͺΔ‹©”:gό”»ΜrB&Άk-ΓE9/KR‚‰ΜƒU'1ˆ½'°kΜƒPΞ’ΙΘXξD­’κ}Ιc™»"€Υ ƒτ˜δσp=₯fH)px‘X€ε"9DyCdμœ9™qsΦ–΅–2/gνχDΘb «L—ΐŽŸŠΖ£ž°΅ΒθU-ŒχA*‰;’u(Mz{RVΖκ)±~˜eHζ°kœͺυV+Aμlb°?»„Φιέΐ©A Œ%ͺ"$ƒx&>dέ,†’Oσ—₯»#)a0Υ―’;Νx*ΓM)}”βι.Nφΰ=–ςˆ΄Δ0«±θBRί'©št’‘Ά,ΠΓΑωΪ»FycT1΄sθΆ4Ξ‘Ξλp‘«4ΉžBΚΓ+,MεΉh„\¬+Λ²uΙ₯N>sΦ΄¬2CΣ]J†EŠΚm­td̜y)]MυΠμEΙ\Šμ[XnxEr”2ΧUh»ˆρ¦έj*Ι«η<\«C~†{·*­fQ™ΤΧΙh]‘F²–Ÿ#’αθ;±ώv«b\° ƒμZ§‡΄§tή³:@ΛKΌ©’)Π¬8·”>'Φ £”PDjV0τgt"1`„Ρ²feΛ&7. άω0―Αx3o¦ΎͺΔ‘‹Αf§¬IYyΊΩ¨©₯ΉΐηuV  f-'ΕΙ8΅ž'€Ε ―]λ’–yέ\Ψ˜ο±†V§ι0―”V‘‰b½i“Σ$SωζM€r1₯\δœέ{CXZYπ\°IcUb=†|ΥJc =LΝ•1\μΆ°Η’Λά²ͺ%ΚnBΟf&%E-LΈζ 3°’…1Ρν"ΉiXMI7Ε1%%RBt¬™—»~,Άψš‹γ΅LΆiεί€Χ#–UO°ͺ7γ.gVί§‘oΆTΗν rι!ήΏjUν³΄)σPΝίb̞ ο%ƎΪΚ7Ίμ8εΙΰΪAUΛΫ3"μz)io©τR 1Ή€σ½KXCwa£2λ¬Αk‹ΧɊ­ bΖBOlΠ”‘Z ξζY΅Δ\γ³jπΘKŠ!rα—ψsΖμVmΙ9.2΅WΖπ1Ο{­ς3υ ˆ³’œΕ&•”ΉΆ3AH œ΅«΅iΚΚ0α—Š‘jט‹:Όœb\Ώ°ξώF¬v1ε;:VEX±`Ή“ωPΣΔ UE5±D3›Ξpή$AX%Žϊ†Βb‘uΓΝ‹pΔ‹Νg:.EΠ’{NU7ΥYΝa₯ RR+,'FOƒΘ€ sRVβŸδ32vΔ£PΓΪΉΛšzYϊ5‚bΏJ=“βV²]+ΛZω@z¬Ψ²’Ε„<_–˜€΅X3―¦s.Ζ ²ά—°Js-εΦT3Γb2@}&ύΎ[Is λθ2Wι¦κφ-ΥΛJpΒ  kI[pw˜——I=VˆΔ`Θ1€κ£©;/₯ ΰ%αB#9έkΘO0Ά‹2>ξ°@ˆγKM */£έn–DRG&₯v+ADr1Eœμ*Νͺθ`Υo™ Υ IΦμξzΑZLΚ…”eНκ:o£εΡu~k†ΒΈ&πΥšK°šTGΓul(Μ:οZ¨š]B Gt¨Θαu4y늭TίΒ*1ν°„ ­ͺεί œYΜ`•λR½@6ΣZΥSϊ5%τΔΏ]…„ zy,ͺ©Q聃€&<1=Οχ0΅ƒ¨V‚»ψ,‚“Ϊdς‰9ΉθZ2ΐLH^wY>πγθ1Ι1T”€ηθ›‹»%ή¨ΰι™8’dW1γ³Έt9A]š¬v½›cΜ\ξβ₯ŒI#‘5›«sŸSΦRήNκ³ΤUlθ O•Ϋr£AΛ‘‹‚Ž­βυ7^y lμc³Κ›₯Ίkχ”>£ΧH˜Φ—²<Γ”Άy³T"ΠUPΦ0@Ο°ϊ’,ς³ΟΞΘgrΞy·Υ΅΅]Fθ―)»§·0~ώ‰N&Ί½ƒΧ¨^YσF¬΅iκbc§Γ|@AΘω΅τ&€B),•€λμ 2OO—£h#CTϊ5‰\BWγY½+²‘`O&Y]ζKΒΠ׌αc³š–4Α&GήΦζKφ,ιηΙΝΑF6‘K’e˜ ]ζ΅κ“UM‡ϋw,q}<Ψ.β± LR‡Ia­y΄_±iͺzιΉ'X”Eτΰ’²‚„1\TA`!LU"Y,z=°O%6 ά¨ΈaC₯77—rπF€ +v Ξ ‚InΌοXi“Ϊb‘όx―;υ“ΛϊL₯'&΅‰δΐ£F"5ΫZTJ$L7ΩAlε²tι¨μ°ΗN£b,MŽ•`Γ]j˜CaέSDX„ pW#hυ=f5ꊭTWς’κβ΅–ŠαkY‘”bΒA*\Η ½Δ9·ς+쑉YκΞ2$ΗR_&XσΓ›I…ςκ'ƒ=μΰψύcLϊΓͺ…παό¨†Gw˜κ-Uϊy›šŒδ`Ή2،»Κέbœˆα“lR‹uΡ₯;ή0¬G˜‚ RΡR'Kωžbbβ;+€ΓYΚΊBJ/Ά°C―[Vr_’—”ͺ°ΊˆJΐ₯„”S+ŠΝœYy* μTΚH‰7Ψα²[»”€Œτ΄-Ωn/ΘΙR@Κ½¬Ε(uΈΕ"ΫT€ε©h½*^Γ²τwΤko$*“ήΝCNυtΊ―U ηZdMΥCE«ν–©VœYtΠl‚˜’ΎGΣ£Α~ΤIβΞΈ€ 9Z‰Ξ±φXΊΝΨ9Ÿ~GυΙLι t‡βπ”¬Y³Yβf₯ΐnTYα΄sl²όHPŠ­ΨɐUM²o±Ϊ2ΑʐB,©Nu™_9λIo:œ‚ψ44ŽR󞡀Kε‚Aω‚Ρu@Λ‚ή τV³BΚ^<5ηΦΓ‘dœώ—…r*Νd‡Ό™h½DΦk²\Σ ξ­z°k§(_*ΟRF}Ξπ­kΉ6YfNUgMΆπή€w#;Κ³rA•' X’θ£a΅έ₯G…2Ϋο ±Ν"ˆ±:°QTa}½S‚! y‘θΈ£vΉϋΊβžYU(VΜRξ؞SuΧέyŠ4 t<˜7 άœ‡¬rF6q§WŸs¨#ƒ©Z2ΖΝ€/+GXΥΔAw—)‰)5T… cΗΗJ¦”– XΑ…cΣ炃ρ‰RίzΒψbR?‹ dijͺΡ-₯3Δσ…Ζ ‰€Ž³”Λκ΄ΐ…Œ› ϊ²†κH/]†P,I‡qRsΓιIi«*κ8%‘‘κΈ–Β©π­Ezζ5€ ZͺΗω†Νγ@&]UρΘZb»`[;Wύ|½N+sΕ‰–a]†Θ9κ²Ο*sχ•+‘ˆ‹•7Zj*Y–~~9ΨVνšδΨT]s- B&{YΦΘΠnHμ¨φUsyϋˆRLΛύΗίΝ7b₯Mͺ₯Θ¬vz6A¬VH‹«L˜κͺ«FΒ’₯`7{V¦”±:γ³ή‡”Ή°Ζ¦άvδΰΚ±9‹Rr~o,t₯¦Ζ=–«j.+η2Θ!#΅L£“‘"+YŠ€›Ό9$P„RZƌ™l–ηΖR)eη'`U"₯š΅V/€]΅ώ ϋιαMΈΉO…ˆΛ Ieμe³β€(±²rε΅ή½“΅}•dŒ;[qY^ζΓO.^Ct°©˜J€E‹1;K6\j$IHqΕβηuύšα+DOSγ’₯λΪ;Υ!<9c…Šή°aΓ$C£V5–UMΖϋ^V°ιΌΗšαΒΥeν¬€βΔF„t«}¬AΌ#Ž ΕdΈ Λ₯ΜF‘Ζ*&ξΡυK­†”δτ8$° ²ͺ5*Υ¨Γ‹ ^ͺ>=₯_.XΦ t+BΖ€.x,Φπ9 a#όBUcΔ\γ―‹½¬³DΙtGj*#uŽ;⬙ŀΥGχƒJ&ΆSς€œa@`7XJŸJόΐΔ,c½ΑΡΉ3vΏPγ|rΑ;ΘΑς» %&— ‡ΚBΪ+b_AͺKIδa"aŒΝV=Φ pV%=@I^VΘcζ7Λrεΰ£#Z$£ -@,ZΉι!¦Bι€f9vYΨ‘I )ilΉΉΥΐšW{pˆEσ+—Ϊ;ωT?@Ήΐ{£[ͺw$I]΅’kΖαM°4‘‰8TΞXuωlΔ,!5œΟύ.Ί/™T§‚'“ͺ²χ‰ΤΟ•˜±a©ο2'Β,vdz‡Ϊ(XηŠΩ²ΖΡJbΉΙΘ«²Ϋ:ΣFβ³εε0– ±½βœ†Ώ½!‘'«Ϊ‡eלΘ E²J–5[I^άƒr““’-Ρ3Ώ-9r«άx‰1#B70JPμžσFlF”#Y LŒ ΒυΣƒaδ°’s +…U½Α‰eΨ5αζ°J6SΐεBNιβΕΟ –ειΈ”€@ΞM…ΛJ PΡ=Εxp¬<ΑNΙw―$ΖFBΟΎζυpHTΘM £—}Φΐ9J ²TZDζJRsΨubΠκ«)ψa6V-P!'‚Ιl&x«Ρa>™*Κƒ„ϋ jΖ§σιιv:gb‹υΎ²ͺ‘ΊεIΒλγš‘αν¬ΟΉ#<ˆ€u‚ΝGςΫ ˆ; M–Ϊ@‰«ˆτN 5„¬¦žj€›]ͺΔ²g`πd»Ρ‰‰U$Œ«…œΡ0:!#@A8†±΄Lv5υΰβ•£,α™θM6.ΑΣ°&©GD&O‡•ΈΓηbƒcυ/gH6p·¨+΄p³βζC➑„›Η?έ”ES7d1€Β†Ώ›Η2eΕOͺrΛςΤt’΄Tσ[=!U‘υ΅– ±X;mkž³nŒ³vν;’‘c$΄B°§αοͺϊ1΄Bνšδ½:―“ςf΄χkσ€DΞ‘G䐑‘οZΖ· AμlŒρσ–ε#¬˜#ͺ(Q/΅O]κH₯ΖŒκ"KΨ«ΘZvάκή]>™—#@XX”¬ω²ƒYŠΕ•s‘³Δω$vY ͺuŒJI…œθ:₯.Ž­Φή–J C^άYr2[κΫZ$n'$jτ³΄}˜LMM0k ΣκΉ„U€eΌzΦ²|W7_¦€Τ-oΛ ;βϋŸ*/Xk@₯ΌUέΨg͈°ς–%άSeF­+ν@W;!A‹ƒM 64XŸfΔ5’ŠGK1c—ΓςΞz©}bΙa ά‰# Ν…q˜1›‘ΦRLYωΒ&7}Σκρ‰© ezηDWX‚’"ˆ[ AΦEΞΊ}ƍΡIΛαH·«ΩΠ)ωζR‰PΚpžAͺz¨ήΠ#kIp<)/U&n-ΩŸΕΞγYρ7R ‘€Wk•οZ„fΕώ™oΧ«–χΆ<ΐ§B¨N*·r·«»‘ ψ"5Iζ₯4Θν’s=+΄Œb…―e”zγ1=2_˜κƒι‚ˆ’‹‚½―`Ο‘Ψ0~½°™γPυξέ»¦Ε‹Θηbχt)mόJ brΜ²Z+Μ€γ¨e6³ ΄vdrηΓ׌ωΩ*§Τι/Ξ|VςΙ²‡Δš’gΆv} ~zWVjF―5lFξΰπ»ΘAΤόX tEŽl΄JMŠcεΟqj†‡EĚL›Ρ ¨sI)―Eš’y/©.lΝfΠδt3!η¨ΘˆΕYσ·΅„ 3τ%έΫ’šF‡*dt␠%b†Ά5ρ]«&6vAlD™ΟlγF{μΆ„ΝWb_Έˆω˜Φe©(™=: 5°,)rŠEύ†uρΪL:ά#θ¨Ϋ$ ­0©ή”±xβ6ζ”2(:œ+Ql ₯ς₯δf°±αύ%V*Dάφ°~Μ1ΌCόμXXόϊ„8Ε9ˆ‡‚}ΊΠ€AΈ¬›“Υυs“Ι)½+I)\ZΥ/•€α"\ά¨έ—ΎΊH)Z²μTߘ2–|€QΞ—u‘³B…ήˆ”e…ΈdRQ'Ά[ ZcO :ƒ‘8ηΖ!+.b"YͺηςK`OͺhoT†:ŠΫ¦κ’‰‰ΝŒ•Ώΰ=-ε[63¦ΜιΌ»›΅ό†^θoL”«ο­De$Α‚l4”9ΞΛN)?·΅QΘρD9 rs½ e&tœ_–3ΦswΘςz@»vο©ΡdI”`θΗκκ])κΎε,ƒr.r½ˆΒΣA©ŸNP©y”IUa΅Χ)₯Q"ίbυHΘώΙΡΤ°„+¬Isy…žΥ‘Κ―tk‡ I'ϋS‰{Kα–ς¬’K…)‘/’₯ε–l ₯ΨJΡλp5ξ_©82Ο*ΰ} gΨ§z>œ Z@SΰΒ‘σekΌΖδ)sΑκ)]2'f)ν’rί‡Υθ›‹`‰!CVμΧz~J>šš:ΦΒ•u‘_¬₯ g[H°²E'.[ ½`1^pbΞChΘu†Υ¬ΉΚ₯œΌΑλiΝs^Pθ¦ηΦ.οχE³Κ‘LΉ›Gο BJ,`ˆω²Ρ3ΧΚIO{|Β£Ζθ Qν40‘DΦΣθφ‹;₯ToNS%½N- ˆFqH.ŒήC4θθDχ―ΖαWe7Υ”$Ιpc₯tθq#lήwυb›άn…™,°CάJΖέcLΦ#*ν΅βΗαz'—‡]στX)Kα9nkτΊXCi°³Υx₯†0ε VΣα ΰΌHΟZηδXΆ¬%=R»y,Μ،αšOΝ Χά)ϊ»ͺ3₯ͺ /ϊ0RBΞΙ*Α&Cί7D)‰{'ˆ6'9j9Ε‹$`Ε%λ1E ;\ΜY*’ΫΕωΐŒΞnI©]+bθχ8Π:|0!χaΕ—KΑZ,$ωΆNˆΆ štεhIjιζΑΩFΨŚ₯€0\ίLn›ϋΐ"K' @Iυρb’a9Αš=-7 ΊZŽωx©ώέoς¨uFjv rLr£τ…Δf°τ!'ˆ#,\Τ!’H+€RqdΔ8Ν¨™.”Θ€gΓ‰Y)ΥPΛσ°@έ}Ό–&‘ΤΔ.ΐ’ζΘΒΦ‰Ιd ΪzLήͺ'Ψ<Ζʐ1›Ε(Ι­εΊ­°‹.™Δbr9Ϋ ΚΌBΟZξς¨—˜EX\qπZƒ {τŒ‚ ΊQž;ξΏΉh9‘‰°¨ )Ωulςδ½-f+AκΡaqΆ˜Q kώ­ωΞΐΑ†Lt₯@½?⢐žH%3»€ΗΗ­―ΆΛ˜Cƒτ w€Ή«ΗΗΉk=Vƒoϊrra’““Κδ{eυ‹.ύεΉ’Rμzτ%C}—$ϊAς.κrηN!IέΟ€Πή3ΒB ύβœjQψRυ₯ λnΙψ@,5FΕ“ΔίT?CJR\‡».W]ήk„‰Ε1[ϊOΥ«D„lςΡ]—©nΨTθ© œέ§¬Ω‡ˆŠŒ3ΤeΛΑΡΡυΆ¦¨Ι²[J{>ΰιf8& S2!­BweUΜ'ΘζΆ”†₯½‘;VΔPΰNΎΜ;¬il¨«άTA*DNΖ“cn­Ώ%k]D0ς\h„εŽ\O%d₯?ή«zjήΠ€ϊŒτ¬Τΐ€R π’φ–œ :„ x‘\QΧ‚λ:bΏ‹Φ¬€¨lL‚aυ|ΠƒαE<°Ζ–~Vε`±?I픬zw’™»–LΊbα₯ε)`-drR‘ξd'¬F1Y±ΔfKK~#ΟXiH`§*t€^L{Τ©&Sy―iPB=ΥC‘ )nN4ΐυύǚΪN—$ͺπŠΒjΊ»2ωεΰΕ:υχ8Aδν₯f>aK€;FΦ‚ΫΕT±T΅ rΞ»MIr” NΎ!‘zΚrρu…†&©r`ɍ#ΟR±υF·ΞΜΘ¦IΖίG«ΌŒ΅«–“ιΨΐU…Οfƒ»xΒΧ“δIΣέΛέzUͺ£ž“-Υ`n\,tž ΧͺU₯g›€π¬Χ<;φ 1Ό¬σHεb…1ζΤ ’ƒBVVθ¦°ΤΠ $΄Ž«"‘%ΑΚ!˜^°ε’Ξ€ι‰XjΉ`Ή*ΊQε w`qΖ¨P_.….%ΞΙk―Z ΅Qarη(K€SΙGk~‡Te%\žϊ=ΚΕ6jΨNj–‚τ’`V‰¨Υ3 c#scΦ i+ š*_Υs8RΣΩίΓM—ž S τXV'ˆ"ΩΌ£k­yƒθΩΔV˜¦RPJfuk3OΑ²KKL­šP.fD~)3Š*———_±₯wbΨaιiž Γ…²ΒKη'«©π{BVaͺzH/΄Zf;ΥaλQͺϊ²„Z‹4TτυŒ°ŽxnπΰRύ>τr90‹ΥI/©‘ΐΨΙαW©Χ|HlΠ@φ;(uΧJ §Ψ9AtAH}Nqc¬œΝ8Ί &Υa] ¨ι³Λt“2Δ₯­R@²9υzΛ•B©D)3 =ς” ­–eΞ πήΨy+#S^••ΌΖο|!ζi@[4SΚ»^@8Sn|RRΩzΣbΡ3β­b„uΆκ7$ω9 —‘"%ζ‹8ΌΘ}J/Eή'ZHP‚iΘ½YΗ[‰ŠaWNHR‚“Ό€S ͺΦ@“jβίΨYλΔ Δ@Qϋ}n³oY&k‰ι₯δ"Έ«–ρεRΠ2ΩμΠΥ΅τy*—φŒσ1d%Kjψ‘΅h’€Ή"μ¦k ?Ά :·Vͺ΄šαYμΠυύ`…lτό VΩiυ`yςϊE%Y*·ΐΠσˆYすLΗΖh›ζάο₯„œ :ˆ ¨&‰DvR^azbHO-ξ¨vY#:Ή!•υ+ —₯<…­ϊ 1»RSΰ²v‘²\¬GͺζRRCv?SκzY–ΰ b«u7Ω* Ω|„Ψ ”šu η\λΚ5«*Hί_dΕΌfshπ΅K…Žd³+ύ>JTlΆBbεbί:NNmL²›ΙU9&2‡ή¦1GΊάψϋg%:nYEς˜A^ΥΎΟT!υž*έν‘ŽΈΛ€bjήΐΚ„Ωϊ¨]€uN¬Œ‹H₯Ob];@KœλξεΤζ ¦»ηυθOijT/G΄2ΊΨσηk‘7(΅ab;έKέ7Μ Umž ‘±JΒ°Nm)R&…ϋκΒΘ‘‹\Φjw¨ž@LJ€%8Άϊξκη`³Ύ<ΥΐΕ:τR!£JένQΣ(―1ω‚ΐΆPbΛ:Χ4ΠΠ‚β.+ΜR‘Ίv’Δ²b‹υ’—μή–žΒtŒՐ©7`θ•ΉRβδε¬HŽSJδHp$ηsHοΞ«ΨR2υε@汜 :Œ €ά‚ΦqΡΣΓ΄[Ϊhp7„Δ(Jaw­aΜ©¬šΊΞθϊLA₯ω–Ο©`όn-ΔΦhPΡ• K~CL^‡ΧΊΕίĐzͺ”Ξ‰2ϊzih‘;-§AY kŠ 5‘/λFΘ•5Y―…pŽnˆ#“ΐ†„‹}©64»I•y—­EεΡAAρ5,ΒΊύžΙ[Τ-5:³^σŽe"΄ΘΪu2Λ %₯†&₯€ΌŠΔ"»“σφ-0v-;brΡ’Œ–Ά…)ύ¬ΗΆ dΒ(Υ{ΓέΊ•ΜΧω @{θ$#δ26O”Βς3BΡDΦ\xΩ¬‰’λ]^ €ϋy#­―U N¬Γόq'ˆœ‚»>θΙNZβγJ”’Ψι4=ΓΝSλ˜S9λΫ’€@2ϊϋ†δG₯α4Τ>0Ck* —Ζ\‰ΤΤ9ژίΝ~)έΐa;,™UΈ­UΠΪc©RRΉ[η5₯GZ‚–LD[ΰμhl.²Š1π7ΞΑ’œϊ[πbY\Jl Ž|Ίί©\ ($γсd#λτ‘όsˆ΅ͺό± Α}Η’[λNœ7ΒCšΨΪρrhQ9€«-C XpͺAk8Ϊ%Ξz”y2Ιnυ£Pš}+zΐN»»mΉΠ₯JI­kJηιŠ –ΚϋΚΪδ@I˜ΊNY9ήy²#>%ΈI`σΒχ°ΈΚ9χ΅8Aδ2m .GXGJ9³λΙ΄fuγHΰ)Ίukω[RήZ΅*Υ§aAŽrΔΒ²e’|6/`-C+Υ£ΐΞψZδQΰA"wV ΉΆ<Ĝ`‚=μγρ;ƒν.ΎΏ;ΨΞ‰Ώu ή ¬wοήmwc Δ€Zμ–΄L‚ŽaΚ²Ψfέ¨}YGυkή₯›Μχ ͝rήήδH`6’Τκ…bn&₯\ŠD6I[ΔΦ‘ˆΟZάͺ•do5tY+š³zx‡–‡Uϊ‰0ι‘arβ‚¨Φ KΝo!Ÿ#\ˆfΏ „‹Θ@-ΐύYΛ΅žG‚ψ@ό½Α&ΫC=>Ψ ˆ:ΡƒΐM€?KE_θ3€η778.„.,ι‚z„§gό²:ηφ ή+y£ΧC’€Ρ`'m–0’žΓYΙόr·ΌCΟXΘjF+Λϋ°*ε$†OWς^"ύ€6LrΪςa˜!ίh`#™jlK‚PdpZ°ŸυΤuπ-Ι‡ec•ΥKΠP‘šΥ8€( χͺ™Ώε'†UΠaŠϊsΉXB4οΰŸ¬πς?π4Rσ1Vˆz»‚RΩKb?@©Ρ,θŠ( TΣd9琞.,•πfN›zT•ͺύͺ-•ΝAll#ρυΨ`ϋͺη|I%©Η:0I  Κ¨U©e…1IŒ ͌ #?ς³‡eΑšΑ¬η—,ž’ R;ς<αGqsB8 \YΞh΄Β’]Ρ­ή›‘GͺΆόΉxδθ*g‘B9y΄RΑΩμ°ΤόJr`ƒ“ςψς†ΌΔζ1¬{;`)²– v(ΛFK”’0δŠcΝBΖ(—Ϊ`NN5]t)%Ntl²‚΅ΪΝ’S»δ0›F@Z¨Ρξρrq@Τ#ͺ€ϋΊΥ ’k©U>§͝<ƒ9'zΗ₯ϊ²ΐe&ξHπךΏA)=œ, ΉUXϋfh69A8A”/ζ †'\UΞU¦έ΄yΝ)g€Ή₯Ή_o €QƈKΝ$Ξ"ˆfušΧ ¨Ί*%·Πξ}₯Π]΅6«k“TKx†ή§Τςͺt|m-  ³žpθαQ1δΌζ†gχ`ϋe―Uv©rjΧ οΚQν!kΖ1[―‹Ωάε‚ULƒ›ΤHΨL`pP;τuT φψPςθͺ2―Ci|x¨,Υ–`h žΔ‰mP–μ‘cp&μ9c&ΒJ,wD²Y€"j3FYbς›T*ΖTJ}ύ›Η?]p΄ΞΔθTΕί―RkŒ(`hσΝ‘LΠ,β€–B³„DΕΰŌlŽCr’%‘΅Š{UΚHίή`ύ'vS™³œy¨O/ΞvnΌ Gύπό‹Yc.w-ϊ_(βΰ=Ŋ6¨όž~Gs΄Ή`Gu֜ œ κBΦT(Κ^#ΔC½šjηΧV$Ϊ*ΥDͺέ‘΄(΅y L «>†μq%a)G>€Ο Ÿ?B‹ΚR« ΟPθQVίA¦»Yγg)“Α@ŒRu‚p‚¨ AX`šs0šT*^6”―Ω`ύ'jΨ ΄–α"Žntt>8ψEΒ3Υ–₯Κ qlš„”I³zH8‚T p‚θ!!Ά¬ $Ψ°£¦&Ος•ΝΣόg\ΏΡCŠΨ€ζ((a–šΖεθ<Θ<†ςT;H‡ωΕΟα]„ƒ|Ώn40¬ !²­ϊ5Οkq‚θ`‚ΐxMΦ₯ΐnκj·΅βΞI]₯΅Νn¬Ό‡,©Eυ’|©‘ŽΞ„Lκ"Ό˜ΚS ²·δΌaΣΊ«ρ²$2κ J•KΘ Β ’‘aθΚc7‚]I³1Ys’βh„€on$ϋ=Ψΐ›Fή ήDt§Π€kς$ xΰ͚[9gΌtΑœ Ϊά‘”3Θ½έ2"Ahu[Gg‘ExΘP/ξS‡ψ=ͺΰP ‡™2ήh ί#o›!QγαQΌQpΓtUφŒμψ…³Π8εθy@ξγFλΏΗf ›*φ5Z.†ΐD»fIΤ8A8AtλΠCίE6“9”‡rq½°Fa„« Ύτ•εktU7T}mαH'ˆ6Η01P}―*Η0:νL‘«Χξή6Όn@\Ϋ€J΍bNN ͺ–Πtƒ MG§BGͺU *Θ(•]σZΝZ¬/ΉkυD;„K œ ŽC.[Όΰͺ²εp΄ 8£ύΆε]X6K59Ά‘`Ck»( ;At0vάΑBρΤαθDPΆ²j*Π艙"Νqό›˜!ŽQͺNXΐlZ°ΗƒdγΌAXοή½ύξr8ڐƨΥ{φΏΈk&τΤη–6E²ž@Υ!ζ«S™Ψ ’:rΨ0Ψ£Α6{;_οl¦{‡£ Ω}ΔΥjέr΄AΌ%Ψπ`?)σωsƒmμαp8Κ’έΊ|4NeύΔ?)y'ˆ€^Α»(γ9οΗσβןŒa¨^N‡£\ wθΠ+ΖΓU ˆϋgxG;ΔξΑπΕdQƊ0±°ψœb ,rϋ΄‡˜G% L>»³½°MBLo”s8 FξwΡ}EΟ‘}NN‡£Cpά G²qν‘ΉKό€8A8G‘pςM>sΞέΕf5ΔΔ§_π“βαp8]ϊe;Ÿ9²(w‚@?„Γ Βαp8ΊGτBƒ M&‡„Γαp~;dZaΛ~CŠs @˜wξp‚p8ŽβΘRN¦ΓΛ^[ξ'Ε Βαp8 …ΛFwΝr?oΨ΄β+Vς“βαp8…Β•χ?Y$Μ₯ήςΤ!~Bœ œ G{°+χ€™Ÿ8mΈŸ''‡ΓΡŽώόΖΗvυIŒNN‡ƒΐόΔ^η.μ}Αh?!NN‡£ ”ω†}ι’ϋό„8A8A8Ž.<³δ•n‚8μͺ‡ύ„8A8A8Ž.ΌΆ|e7Aό䦉~Bœ œ Ηjμvφ]E‚@W΅Γ Β?m‡Γэ3ο|ΌH§ίρΈŸ ''‡Γ±‹_~½pΘc Sžu%W''‡Γαp‚p‚p8''‡Γαθ)°o°ιΑf;ΕxΌW°Kβ㓃νθαp8Nλ›lσ`λ›lkυœύƒ D±k°‡ ‡£σ b·`ΓΕχ}aκ9 φMρ=ΌMœ ‡£³ βλΑίμχκ9wΫ]|w° ‡£³ βƒ .UΟlΔN‰Ώw ή ¬wοήώi;G„‡˜‡Γ Β$ˆuƒ=l3‘€ξ£žσ%•€Wζί^Ho’ ›[Γο6ΪόΨόάω±ωΉkΤλ.,δ¬ΜUJ3b5SΏψ³ca…Υe—ΕΗ§”“¨Γ1=RΘi‡›Ÿ;?6?w­xݎj”σ ΏΝϝNN~Αω±ωΉσΟΥΝ ’i'χ?ΆΞ:6?wώΉφ€ckΔλ:9ΈΉΉΉΉ9AΈΉΉΉΉ9AΈΉΉΉΉ9AtT€—Ÿ‡ŽϋLίβηΑο 'ˆό_hλδρ‚‹='ϋPŽΟέ–Αή–ΣcϋD° σΈ°;-ΨΟσΊΠω=Ρ^χC+>―ž@ ί φX°Ÿδνf8<Ψ=Α{{oΤc³βΑφξ۷㬐[£vΧz9:ΆοΔΟŠ“όžθŒ{’UχC+?―N'‡A¦#ΨΑώ‰ρηoΞΑ±}&ΨV—xn \όΑξφ©ψύ…ΑΫ*ΗΆ_°ϋpγχOϋZ«Ο]œurT°{ƒ}2ώlˆ–’ρ{’ύξ‰Vέ­ώΌ:‘62N0>ά³ρ‘ζμΨ†šmόϊ”`_ieΈD_ΆΣƒύ&ΨΛΟ.ΰ}ΑΖν‚fŸ;qlο΅vΌqΑϋN«vι/ ²αs=\=Φκ{BΫ‘yΊ'Δ±μb^€αχCήΦ°N"‡ΟΕ°Tbχ6 LΉˆlƒ΅σkΑ±ν;ž.5ΐθFώoΞέVΑ>]iΔψo@¨$Ζ‡7mq½5ΚΑ?s ϋΟy‡Ψαέ!Θ<›H(ώ(Ψ-Έήμ/ΑnΆ}Τ²ϊ`«ο‰Œcϋ°Έ'vjΕ=aΫ!ρΨήΗ$7μ~ΘγΦIρΏΑ.VL‹bν­ρMƒ]‡c?λ%Ξέράm•“c£±g°ηsSΪ(žŸήκά΅κžH[«ο‰2­ξχCΧ°v$‚wΛ]œzμAΈ«β{Δί9Ϊϊy:ΆθRvI#ίΟbŸW΄βά©έψ‚m'vTθƒΈ ;―F»jŽM6ΚΕ°ΐΊJ’—y| ±!ΖΟ―jΥ=‘ul1œ3’‘χDηνφjο/d<Φ5¬m "`ίŸC}τεκΏUœΜϋλG–ίˆξcm½θΦΎ;§ηnΓψύϊ-8Ά7«έ8κβ/E Ξ22γΤ9;6τAΤβ{BίξŒ]ΗPΖ>Ί",GΗΆσ9>oTωΪΈFζΘΌEΦ°Ά$ˆΈCDmω1ΑŠ*½cbh?cΗΡ+V•\]Γ• Μσ±uΰΉ{§ψIίb₯Ηώ=ιΨj9Ύ˜Wϊ[¬z€•09<6T₯}ΆSŽM{1Ή°ΡΡ$…VmKςΔl =~ύήXaΠ[°|Ώ8ϊ³±. 7Χ6*F—ηcλΐs7/6ΖυŠυΰθN>΅§[ Ηχ%±0=μ$?Άζ›z]ώύƒ}?ζ.Ά"ζšΆN΄-AœK‘Η²‰ψω1A3.Ί]ˆηΏ+ΥΌΤSŽ­ΣΟ] ΕmΠӎ­NΗχeινψ±5φΨΔλ"τψ³υb‰,Νύƒμδ­‚ύ°™λDΫDΜ䏏uΐΧΔΖ”ν„ Φ[Τ/Γ­€zυΔcλπs·nŽΟΫΊ9Ώ'ήβΗΦάcKΌ.ϋž~f°ƒM“Ή–f¬νNθLό^όZ?ΕIN<χOΑ>ί,­–<›Ÿ;\ύάεγΨ―{eό%³χΗNρλ"Ό-o’‰-'#yΓΊl$‘ξR:$Π³9@=—1iτώžtl~ξόsυs—c«πu!Λ±[¬Ύ;W<6Tz.N%\8!™p cC|ξ‘δω@,QΫ€§›Ÿ;\ύάεγΨ*|έγb"Zχ½-―δΠ‚Ψ5Vœo¬#cΉ±"δΘXήEFώΉˆέmΪ(uΜ<›Ÿ;\ύάεγΨjxέ_‹<Γ›σL -!|1yσ͘έ?Ÿ*Žβ9λ#λcsŽ_C©³_O=6?wώΉϊΉΛΗ±εύzjw‚@sΘuμDŒ2Ιw‰!gDΙ‰’›vfldωCΓU s|l~ξόsυs—cΛϋυΤV…Κ€zδvρd}$~똼ωM<Ωƒ΄:a%r{l~ξόsυs—cΛϋυΤ–lI”κ}·¨7>/jό3NhϊΏ¨ΔωζRΒY~l~ξόsυs—cΛϋυΤξρΦΨIΈt±~ „ΘποŒR―&ή¨Ή=6?wώΉϊΉΛΗ±εύzj;‚ΐ8Γ莽SH¬ώ§”NνχλwρζφΨόάωηκη.Η–χλ©ν"–sm…§ξŽ'ρ5Έ}ΛΘΐΏTΏ»SŒεέ݈©Hy>6?wώΉϊΉΛΗ±εύzj[‚΅Ώ˜]|=λ€£ήωί mdρ?Bύω8γuΟέ€Ή=6?wώΉϊΉΛΗ±εύzjK‚ˆ'πμ8gξΨW K«yž>qQW:ωσ!ΉΫ  -·ΗζηΞ?W?wω8ΆΌ_OmKρdNŠΓ3Ύ3ωϋF½τOŠη‘}΄ψώ`―Δ¦‘χ6° .—ΗζηΞ?W?wω8ΆΌ_OνNfq˜ψώρD~7Ψ£ΜΰCτ*βΨLόήgϊFr|l~ξόsυs—cΛϋυΤξ±~,bμξΫΑ~Ώ†|ν‰’μλΖ&wiηφΨόάωηκη.Η–χλ©Σͺ˜ώμ'ρλ"£qdfΛGεεψΨόάωηκΗ–cΛϋυΤ–A5Β¨eΞ–sdχίlχ`lα…–Ϋcσs矫[>Ž-οΧS»D―θ¦a"Α‘q1`ϋν9ΨΑεφΨόάωηκΗ–cΛϋυΤ }ΠD#ά8*Wo*ΗΗζηΞ?W?Ά|[ή―§v'ˆλ ΞፚΫcσs矫[>Ž-οΧSNJυΉΉΉΉΉ9AΈΉΉΉΉ9AΈΉΉΉΉ9AΈΉΉΉΉ9AψIpsssss‚pΛω¦7-γ9WRM ›UόώΛΖΟΠ uœψώ˜5άΔχšό»’άΓ7Τc‹?,s6 φ-Ώ^ άάςN(/Χγwβ’7΅Ε½χ&;%Ψo*lkδœθ½ΠXζן„›[KόΈaZΧ-ΑžˆSΎzΕΗξ‰bjη[wΧ7¨ίί0Nϊšl ηgΔ_ƒ½Φω’0’'ΤίlNœUό“Έ£H ΄ί"Ψ0¨‚»;γuήΦδψ»˜mόή8g`Y|ύ-Δσχσž£όt|ν©Ρ~,nZΤΒq}ZBΑΖΗΧϊψ›‡ΗŸAφϊΊψ3ΜDx8ώ.<™χşο‰ΜFρΈy¬'ϋuλαζΦ ‚X›™ ™σ 4r$AX‹½ψύu)›€ρ‘qξUaΔ¬Έ8ώwEζ6]Α»ΖηνΗgφŠηR{λl:Gj r{—8?GΏ)~FξΊξA8AΈΉε FŠŸcΐΛw* ˆ·ϋ}ά)OŒήΑϋk ˆ?‹Ηž¦˜[ΐχ‚]ΟΧΔn6ΝxμΒ7ί?μΔIΑNμGρx爟_l8άQ³v–ρΫΙ2L„·Ž―ρ!19AΈΉε… ξ?Ηbέ ‹ϊM Šψ=ΛMk ˆί‹ΗζŠψwγ±½c*Λx ‚x{ργ ‚Η? ΨŒΏ…ηžiόητήSδρ‹`ΟΖ„Ή„„›[[Δ $υϋΨi_Ώή;X‘A`ύSΥ+¨0šR$Š·3^η’`Ώορ± CL;F―hύfš*BL’ φ‰; γχŒΉ„˜fΰύͺ<›βΧא T>Ή“―Ζ0Χ½~½:AΈΉε ΍ΙY€ή8ζ-‰e±Σ²"ώ|P\pΟ―’ 6‹Ij$¬"I}›LRWB%’ΤSΥοCFSβΉΨ"ώόˆψ»8οğμɘ\?_Δ₯βΉ7Ζrά·Δ|Λ$OR;AΈΉΉΉΉ9AΈΉΉΉΉΉ9AΈΉΉΉΉ9AΈΉΉΉΉ9AΈΉΉΉΉ9AΈΉΉΉΉ9AΈΉΉΉΉ9AΈΉΉΉΉ9AΈΉΉΉΉεΦώ?^oΔ£^l$ΔIENDB`‚xarray-2025.09.0/doc/_static/thumbnails/ROMS_ocean_model.png000066400000000000000000001577661505620616400235460ustar00rootroot00000000000000‰PNG  IHDR*. I989tEXtSoftwareMatplotlib version3.3.3, https://matplotlib.org/Θ—·œ pHYs  šœίcIDATxΪμxTΧ΅ΆcΊzο!Bˆ^Iτή{ǘbŠΑ`lŒ›nά;Ζ5Nά»γήk\γΔ‰Ϋ±ΗΙMξŸάΗχ¦¬|{Ξ–6gF32MbΝσ|Ο̜9sfFšΩϋ=ί*ϋ{Dτ=•J₯R©TͺSQϊGP©T*•J₯ ’R©T*•J₯ ’R©T*•JAE₯R©T*•JAE₯R©T*•‚ŠJ₯R©T*•‚ŠJ₯R©T*•‚ŠJ₯jTƒ‘39j§ •J₯ ςέΣŽ~μθoŽΎvτΈ£ŠFςήεθξ{‡ž ²ογb?θύ$”c9—ΝΦs±ί%»Τzό_ŽφxsέΙk‘΅}£ί9ϊG‡΅%:ΊίΡί}—x¬ΔύίύΙΥΣΨΖg~ΞΡύΕΡϋŽΖZοk₯£ΟέΗμυ½pίŽρ²΅½«£·}γ^wαh‡£―άΟόΌ£N!~ζ|χo(?Χωία;tά^λd‚ŠsiοθAχσύΘQ‡PΏwΗ ψ¬η}ΰu/ςΨ>Φ}νŽγϋ γ±JAEε5ˆ¬uτ_Ž&8ŠrΤΡhG{Ρρ[œPΪΐηbrΌ !Ηr.:z6ΐc˜„?s4ΗڞΰθηŽ>” β\jύ΅»ήΧ.ρψχύΐQ4@ΑT:ΉΕ»“)^³Ή£UŽ>γ3—ρΘΉτqτWGβ>&οξρΟt'½ζΦ1opτ’ηʝτ1Άvίξ·rŸβ跎ںοϋGο„ψ™Z£οΠq{­“ *½-tA Ώλνψώ…ϊ½³ŽτYΟϋ˜ξΒξΦφ{νwo•γ=ލT *§.€ΔΉgˆ“λμ~μžQcP;PΟ1y`Ηω%&°STάχωoGα˝΄?…;ΰρJχοemΏΦΡrwB r§£‹Εύ!8ΣtoGΉ.H{ρψm^Š{fzΞxCύΜλoqνޟκθMρx”ϋΏΝΫΚ½ζhΎ*Υ[r†Ψ†οC­{{££ŠΗ0Y~Κgά‰tŸϋzΏwξφύN―"¨,sτ‰λx]eύM;ϊΘğ9κ~+‰ξϋIͺο{ηρά Οz^7Β…Ώ΄γ»ΦΕΡGtc'ύ=β{ς£uΏίηΊΏ=ώ{Χ±\₯ ΄A₯Φ Q΄²&‘Ωξmœqφ Tnu'}rύ9ˆf„ *ΏwΟτŸΔΐβσ.04δXpέ5:ΐγ°ΠΎf ‚ΛTq?™'Gέf²Ž΅ή+Ή·Ήα¨-‘~fwϋ#;Oΰ=ΊΫc]‹Ώλz τ.OVξΆw\Ηež*8σ~άγuΦΉ·σάηΆwΟφχ8zΐ},θgί1LœΏqt3‡ΰάΗ:zȝ˜cπ<86ώ&ίι΅B•G\η+Χύn1¬MvΫΛ…ίvψ»8ΞA~/W‡ψ^Ζ!΄ΚχΞγΉAŸ!Ό6\·Εύ₯ŽήχrTάίγ{Žrxqf™ξοhͺλψeθxRPiΊ 23Π”Ψ–ώΆPg1°·=AŸ‘Ώ{Ζιh“σŽαyΏΔδڐc9—›lEΊξΣ ±­Ή )ε"ό"AεSyfκNάδώ-Ψ#χ,όωξ›‘‘~fλ5‡cB²œ£ΝξΩ. 菘T­ΙλχΆ *η;ΊΛz;2‘„ΛάΟω/74Pΰ>τ3»ΐάΣ=ΓNsC?ο“W‘εϊ|ΰs7ψ΅Β• q‡pDξΖκτ;Ιv‘hz(ί;ηύ†πϊRcΰxΕϊ•υχ=;―J₯RP9ύ•"7†Iκ-G£B•–ΗαύΚ„ΥφAΘθΝ€ŽH°cΉ ΰ9³άφ +!υ•'b;*SΔύ$ΛQωΖzu^‰Ίξc8ΣόoG© όΜpTƈ ϋ—λΡΜύΎόή=£Νtα"1¨b³Žύ°pTv:z՝@[ΈΟά½p?sΊϋχ‚”κή–Ž&ΘΏy$Οό.―Ր9!»‘‹Q'ΰwžβΎΦyΦφ€ί»ŽJΐgˆοί₯in^Β.ρ\₯ τ«~~οN<‘"°G8)ξν‘ξδΤ滀Κ1|οΉnΈa„6ŽΞqs’κIμΓ„XՐcΉΉ+±Χ%Γ’2']θUχο'œ­ίΉ₯ƘXž΅ͺ~ξr]­(χ=Κͺ”a+Πάu.w«iΪ„π™‹έu„ϋŸεNΪέE)υΗξπξk}γ>―΅υ™V;z·­*‘ΥξΎ+¬ͺLV/»?ΰbΆ²‰α3#g¦ƒϋΌ$·bη9ρΉ.sC,©ξύ,TΈωN|—Χ‚τ«‚ ς-~-ͺͺζ¨4πχοΓ› ΔΥ€ί»U?ώŸƒΎg†ήzέΦsg²υΨ.€S= RβŽ?άοϋ|χχ¦ ’RP9MrU~μNΈuΤΟ}μv·|ωoξYέΈC?'T:ΉI†wΓΟΰŒάΚ?ψ›G©δ₯’A%&Ό*7uσZ^ ±,zQ`ό‹{viχQyΐ}o_Z}>&»!ͺΏΉ`…3Φ²?sG.ώκ‚Μ[²ŠΒΈgΟί-&7ΔοΒd›Cό4_ ρ»'»_‹bζΞͺπܐαsCΡ½ΐΏ%ϋχξζW=θσ¨T **•κt€|8lυο𽃹V*•JAE₯R©T*•‚ŠJ„ŠΌ’ϊ~ͺ•J₯R©TT*•J₯R)¨¨T*•J₯R@P)//§Ž;ͺT*•JuZ ՞§ϊ„^=(’z”΅[h^ΩhAœψΗ*•J₯RΦj  θψχΧEaλTl **•J₯R5Pιξ€Κ?Ώ. [ **•J₯R)¨œPiEίώΆ l)¨¨T*•J₯ rάΥΝ•o~›ΆTT*•J₯RP9! ς·ίζ†-•J₯R©TŽ»Ί–΅€ω*'l)¨¨T*•J₯ rB@εO_e‡­ϊ>›»x)Vί]pw›»ύBG_Ή‹dB#TT*•J₯RPρTTώψUVΨ T°:|΄{»₯»’|_TΦ«£’R©T*•‚JH ς__e†­p>›s‰tτŽ£> **•J₯R)¨„¬2TΎώMFΨr>Ϋ―πω„–xJs7ΌƒEuw‹Πžϋ£CŽTT*•J₯RP *_9ΰΒtTβ=η¨ΤQš 0Νν¬(¨¨T*•J₯ β©Ξe-θΛί€‡­p?›sΩj‡|œKΎ£TT*•J₯RPρ•Ξ-θσ_§‡­’iSΰ€Έ·#½δh”£ ±ΟGw)¨¨T*•J₯ β©RT>uΐ#\…*eŽήusQ>tt»ύ6G?q·?$ΑEAE₯R©T* TZΗΏΞ[ΪπM₯R©T*•γN¨όόˌ°₯ ’R©T*•‚Κ •Ÿ~™ΆTT*•J₯RPQPQPQ©T*UcΠκΥ«19΅Χ^« r Uβ€Κ_f…-•J₯R©„:d@₯²²RAεƒΚ{_d‡-•J₯R©„ξΉη*₯₯₯ *ΗPPyϋ‹œ°₯ ’R©T*•«§žzŠ:uκD³gΟ¦ΨΨXΊςΚ+ιώϋοWP9*ξ܊ήό"/l)¨¨T*•JεꁠθθhϊΗ?ώA6l RJJ UTTΠΓ?¬ ςAευ_ε‡-•J₯R©\έuΧ]”””d@…/ί~ϋ-mέΊ•ŠŠŠθυΧ_WPi :8 ςΚ― Β–‚ŠJ₯R©T@{ωε—“}ωΟώC}ϋφ₯}ϋφ)¨4TZΣ‹Ÿ†-•J₯R©\%$$ΠΧ_M^—›oΎ™   @΅w@εωϋ–‚ŠJ₯R©TZ΄hAϋΏλ *ϋί Θ<ςΘ# * •g>oΆTT*•J₯rτδ“OšJ„y]–-[FK—.UPi€ŠJΫΠ“Ÿ‡-•J₯R©͟?ŸfΜIΑ.οΌσeffoΌ‘ PyⳎaKAE₯R©T§½n½υVJKK£―ΎϊŠκ»”””Ѝ7ή¨ ¦Ϊ9 ςθgΒ–‚ŠJ₯R©N{­\Ή¬ρΚeΥͺU΄bΕ •°A%‚ϊ΄sΨRPQ©T*Υi―υλΧΣ™gž¨TWWΣ…^¨ ¦ PyΰΣ.aKAE₯R©T§½,X’£ςόσΟSNN½όςΛ * •{Ω5l)¨¨T*•κ΄ΦK/½dΪζώωηυ‚JMM mΩ²E«~ Ά₯‘τΓ_v[ **•J₯:­…Υ’ λ…”?ώρhN–›²kΧ.Ϊ±c½ωζ› * **•J₯:]tΥUWQώύ묬ܫW―“φ>‡ †I›Š‹‹ιάsΟ₯Ε‹Σm·έΦ¨@εOz†-•J₯RΦBTί!’άάά“ζ¨Όψβ‹Τ£G+εεε&899™žxβ‰F*¨άρIο°₯ ’R©TͺΣZ#FŒ Γ‡Χ *θX;yςdΣξd½W$σTΎύο›χ4oήaKAE₯R©T§΅°*2Ϊη‡rACΈ˜˜zύυΧOψϋ|υΥW“2gΞ³ή3BVθιX@επΗεaKAE₯R©T§΅0Ω?φΨcκ%;;›ξΏώϊFUUU4aΒϊβ‹/¨[·nTZZj Σ@%―Sέτ‹ώaKAE₯R©T§΅FŽiZθ‡z,lΨ°‘Ξ1.Χ_ύq{‹-’‘C‡Ÿώτ'ΣΒ‹#ΎυΦ[ͺκ'―S4έπ‹Š°₯ ’R©Tͺ°„ωP&Ϋ»wo?~ΌιϊΠCΥ™8“f̘AϋχοTπ9‘ΔΚ7HrΝΟΟ7Ή#Ηc  ^xβγγι³Ο>3 '"§Ζώ[7PΙu@εڟ [ **•J₯ Iχή{―™01!·oߞξΎϋnΊμ²ΛŒΓ›EΙi T3b(έwί}'ό½:tˆΞ9η“ϋτΣOΣ]wέςs§OŸN p.ψ[$&&š’aό-p=|ψpΊψ⋏ωg»θ’‹ΜρβA€Kcμ£PΉκηƒΒ–‚ŠJ₯R©B<`œ„Φ­[Sll¬ΉF;ωό² *’G%)Τwf[Љ’Ν[6‡ν°ΐ™xπΑΓJ.έ·o 4ˆ’3b¨rJ6Ε&DΡΘ‘# L 0ΐτΛj™@Η5jέrΛ-ξ ΰF;wξ4ŸH ώPΑγυ\ͺ5kΦPJJŠyΏύνo Θ#¨δtŠ‘+>ΆκϋlΞ₯£7½ο觎ΆΉΫ=εθχ:AAE₯R©šΈΰT\pΑ&ά³yσf“+³ύδδ$Jʊ£eό‘ΉχΤPzϋͺΤ?δ‰=DΑ B a§ό’Dšya!]υ~ΉΙg˜ΆΉΠγG?ϊ‘„JΠ{$22@Υ΄iΣL>Š<€ζ‘G‘οrωΝo~cΰν•W^ λο9zτhσ~―½φΪ:Ϋ‘Ηkαo(Λ£Ο:λ¬F *?ΆB•3E»·[:zΓQ_G{λn?ΧΡn•J₯j’ϊΑ~@}τ₯Έ”*َΚFQ—‘…ΤΉΆzLiOCΧ”ΠYO οO€UοL£ε―O¦^³K(15ΌςΚz!eγƍ”W’dͺXΪΆmRωoΕ Ύ΄μςβ£’/χΏΫ\ΛΛΏύoϊπΓΝΔίΉsη:Η)++£Χ^{Ύλe€IΖέAcΈPp|"""())‰nΎωfΏs…Ϋ/ωKϊέο~G'N€)S¦PχξέΝ{·A±4|Λv@εΐΟ†…­p>›s‰tτŽ£>Ž~α(Γݞϋ **•ͺQ gΑ'£/ΖρΤΑƒ©zd -_ΎάτΫhH9,ŽQ;Ί–ςΪεRtBU¬ιA+ޘDλή›b(9η½IF[2Ζθμw§ΦΡδλ+)33Σ4I“nΰΗωηŸOω…Ω”S”HηήΧΓLψ˜―ΈβŠ Ι₯3gO§8η=mϊa— #φ.\yklχΤΤTjΥͺ΅lΩL={φ€Ω³g›‰ ΤΔ©¦ΥkVS\F<υ\ߏ:ŒνHρΙρtωε—‡~ΉγŽ;hꌩ—KY₯T±Ύ'MΎ₯šVΎ9Ω( kσϋγiΛγŒμύΆΏ:ˆΊΥdSTtuθΠΑ¬Uٚbβ#¨ΛΐtZs¨]ωQ₯©π@ςδ’ύ%Պ’Rβ(―0‹ΖŒS'\3a8κ:4Ά>Ψ­ήVϋ‚pB4X„°  ΐ&ϊoΎω†ŽΕα™νΫ·›οœ‘P*¨!³fΝ2ΧX™‘ ΒΘ ΚΚΚ2ΰ „±h*―πόώσŸT²PΩύΣΪ°ε|Ά_ασ - ,ρŽžsTͺ ’R©uR(„ΎόςKψΏϋ?Sώ‰|€mΫΆ™‰ **ΚXωHΨl,Ž ή?&΅ώ[˜»&Π¬ΧΠ«†SlZœ™δlw&ο鳦SzN%8€ΣwQ)Ν}`ΔQξdC»* ),lgΧ…-ό‹_ιO›ολNηάΣ“vΏQα―κPπm.I=ψώΪρ\_ΪxošΊ©­qΰ6τιΧƒrbhίkε!υΰ@ vϋςυΧ_Σ{ο½g’UvAhκX_ΰά \Sί +!/\Έϊτ1­βιΫoΏ5ΰο ξ£Ι[zz:uνΪΥ¬νƒίίώχ£^―±€ΚΕ?ΆΒύlΞe«£υϊQ©TRθσςς̊Ήυ]ώϊΧΏM7έd\δ<ώψγ§όηP™8–:χμL͚7£qχM1°qJ™™ψ 8Aς&rbb(½(•z/.£iwTΣΚ·§š“@p"έ@œ·9όPΑ5Άρ>  " /μ’0€π>ςΊψ4mk{šΉ£˜ώI?φ@έLΉύΊ¨œˆΛηŸn’aͺ©ο†E‘ψαςάsΟ™ά>„ξΰμΥwi  ’YG;>ΆBH¦M“βގpτ’£QŽφZΙ΄{TT*Υ)-„6P၊ІΨω8³EkcωΌ… iΔ‘Ρ4γυE”`*F'q‰q”Ρ!ϊθFΣKK<‹VΌ=£^χD†yd¨ΗΎ-οcm?mΔV>JNΉ’ƒ!…KQ₯Λ"αϋ2δ (R'ϊrϋν·›rε`Ή*Tπ '‰ΈpRp»χΡG…υzTψ»ŽB•2Gο:ϊΐΡ‡Ž.p·'9zΖ-OΖu’‚ŠJ₯:₯…’Z8#˜rA#3œ%£·Ε©ώYq {χdšσΖuγpκΆ  υ]Σ“¦ά;ΞΐΙ™oΟ4€ΑE‘NJ°\ˆέ“ ₯»")€ μ|$IβΆW9*ΓƒŠ ΩNJ}€ ¬ΐe***2α–?ωΟAΏkΉΖΐ«}AŸ8u•••Ζ΅CΎζΑwhχξέ&7•=ΈF5w½œΞ ’ ίT*U“&œΉΎσΞ;ίiBψψ㏍%NηΣ-$Ξ¦g§Qη©₯4ϋυω΄π­Ήu ± *²ͺΗ†€ˆ=‘`;ηΐͺ€H…χc—…οcq…@Ί»₯:oχZi7€άό³ήτώϋοGμK/5MΪ6mΪDγƍ3NΆ‘ρUί~ϋmΟϋž={ \ |zμΨ±ζ;—ξ r™”‹uƒ"ψΎ!χeΧp±c‚œ'τrA |T;!iž4}s[ΪφPWΟΗΞΊͺcΌ―<†©¦xi  ’V’pTrv(RPQ©TMRhΈ…€W䧜ˆ‹mχ?υΤS¦I„ˆ°¦ rΠ νe‡έ”Œdšvs•`˜πm™o"γώ²Κ¬ΫQTBŠK‹τC@IIGJΟO0ϋr‡dΠά<~·•ΦΙ/‘ϋΙ„Y†•»>ι釻Όψώ_–Ρα7Jhλݝhωeνιό»KΝcpPΞΌ’˜ΖG•Sr¨b\6υ‘NW½Υ+h­€ΰυ!•“*vOžP€ ’R©š\βμ₯K)??ίδ œΜΛ§Ÿ~j5"BtBΕ{Dε3ΟoΝΘjκ9½C3PΩhY‘#«w!\. !Α•!dά¦bΎη† IΆlpΕ&·’! rhί[¦RGB ξσq!Ξ' Ψa€ÌWŸ»4ΉΎJLl)¨œx₯vLπ̏ͺO **•ͺIiήΌyf5\΄I?/!Tτΰƒ† `θΑ²’AQ›O‹y€HPαό»¬X†lψ6£kuΆ•]oUΦιΛΠ"³J8y–.1ζΗ ††”ώ²»x»νΆΘζoΕ>~0HiаX@E†%C•‚ŠJ₯j2B rTγόχχ);‘ b¨E‹A¬E΄uλΦ:]MQZ xΘΙΟ€μόŒ#•,ΙΡtξΫcλδžΘ0 HPa70Έ`°Έόgƒό•6²s,KBˆέ…AEVο0˜H°°ΑA‚ΓH}νρ½Β<φqCˆTNŽR:&Φ©> U **•ͺIU7HfύώχΏΚO*θΩaμߟzυνJYΉ)¦όuά„QΤ`oŠŒnMqΙ­)1)ήt3=ηœυ“IλκGgέΡ›V|Ώ/m}suί–έ3€Nλz^kΗξuΒ%Ε½[CƒQbfνy―n;{ΉΦŽ (6€0 Θfm²->‡Œ$XHΕ Rlj•xLlyν§ rbAE–Μ‡*•JΥ$Δ+"{΅6?Υ.ΉΉΉΎ•q§€ψ‘Θ˜4e]6u ͺg§Ή‹₯P§²φTPšDη=^8vΧW»Œ˜!Εθ'΅”W–δkR–ARι Ω«Δ+Τ#ΫΪKX±χ“’kτpΈG: Α’`₯«β•"!Δ N8ο%Ό(¨œ8%; b7! E **•ͺI=LΊtιr'Œ―ΎϊŠf͚eͺ|Ύψβ‹£GΫtBtlkκY•μ•šΪ‘Τ©w2]χVw:τa/ͺYP@ύΖeΣβ+Kιΰ•uΦΒ‘eΒ£Άv§ηXη½?Φ€ ‡sμž' )ΘC©œo^³zi[ΊτgU~ΐ°W.Ά ”β ΄ŸΛπαεp°3bL°0„‘>νb$Α€>HiJ°8@%‰Ώ5'l)¨¨Tͺ&!4^J‹―ξB‰Ιq&ι7*:‚Ζo,φL¨υrUψ>_³#sdVΏ·y?λn+ ΨMΦ†@°βΥΔ €`ΘUaXα<Hζ―’‚ΚρS‚*Σ^[ΆTT*U“ΦsΟ=g eοή½΄eΛZ±b5Κ΄ΪGΫϊͺ„π8\™wήyΗ4•pdddΠ AƒL8'”²ιψ„H>&žf/ˆ£½γ(99™vνΪEm"Zγ?og&^Lΐ<Αςd ΩΒͺνιΤ»oρ{μ1Ώ‹1aςxJΛL¦Ϊ³;ω{¦°dΩ1;Qο±y΄ιρώ΄η§Γ<jλΛYa±›Β βψx/m£ύΙΏΑ`%XՏ„Ϋω`7’ mWϋ½ς«ފόΫ²ψo rό@eΚ«KΓ–‚ŠJ₯:-uθΠ!S%”™™i΄‘a΄ύγ ?όΠ@ Ƙ6mژJqq1 0€-ZdJ‰C…€δδ:tWύτΛLϊΰΛ,£Γχ$;π†³’ ¨πY>&UL¦<‘²;πΔ/;О۲hΜ,ηy‰Q&!–_cΕΚ”_–LΆv’QŠiΡ-εώ6ωr‘AYΊΜΫΨ ρκ"¨ HŠ“y9—;δ–ΤδXΉκΗ½ŽΚg±C@^-ξ%¨0 πί‚αΔ–„;G……Ώ«ν@vՐ‚Κq•)4ι•eaKAE₯RΦ%Νχή{― Ÿ IœŽV­Zx™={6έ|σΝ¦|Έ‘ΗίΉs'UV%Ha½χEΆΡ=Οg½oΤ™L1‘Ύώ«|3ρς$zο{ν©KŸ*κKΛ–-5y1ς5FA#Ο)φˆμ±β+\ΊΜΫμζlXdU‘WoΉˆ!―5-όΑ`*}kR<“pνu$¬HHa'…+yΨE‘!ϋΎ 'r?†@™ΧΒ@(…m *Η^ρ¨LxεΜ°₯ ’R©TΗI›7o¦ SŽ€ ;*o~‘gΔ&&Oήφφ9Vxœ²,Υ΄Χ”¬ η§σ μ: ήδΪ>Ό}νC΄μΦ>žα ٞίK/HαΗΨMaP9λρjњZ΄ln–+^@φΊ=2 ζUΩc;+vΈGŠAήW†ŠdΘHAεψ€ΚΈ——‡-•“\JΙ)‘άwν΅Χƒ>¨ΌJΥD„2ΰ‚ΆΡτα~P›‘“+&O†ήΠ‚λ~ ¦‹Χρ8`œ‹!Λ iΞΞ4ψ¬΄ψΊ:+%CηΏι[­Ή]ŸTΏΓΒ "[οΛp „ŸϋφͺΘ *|,€ΚΖχ'mzwΕ%F˜ΧΌδ©ξιžxU7Ιͺ™S"ΕvN€›"]*H‹  )¨[Ε9 2ζ₯³Β–‚ΚI^›€MD4%&gQfn)₯gu Θ¨X*//ڏA₯R5žΠRNn έyoRaΧ䩟;“ε§mΝ (‘!’Ÿ™aT98Ρ‰Χρͺ1ŒΪηӐšA4{Ξ,ŠIˆ’ew¬³jrΕά" χ5ΰ±ύ½š:‘"@† ±dγ8ΉB² ωΘPά†•ŠeΜk^p[ΡQŽJ 2εϊΪάΫξŠt@ΌΒ@ @EŠ—TŽ=¨ŒzqEΨRP9‰š6}ε΅―’cφPΕΈ½F}&ξ’ԜΪ΄i“τ*U~Λ=zΗΏL7ΰyώ­4š½Ψ·aՈ?ΐH@ωψΧ>U M ={φΤ›΄;eΪDΪΎγ"ZΊt)•ΙυC ΪηγuRrciθʎ”Sœbξo|fRxΡB)~άِ‚}9τPYpŸοu_\ΰι¨Θn΅vkύ@’Υ9^₯Θ^ω(TΑŠWb.ίVP9vŠνJ#^XΆTN’PΦM}žk@₯ψ½Fε“χQNi-MŸ1Sy•ͺ‰tΛ­ͺκG‰Im¨[χxκ\GΡΡ­iΚΤρΤ(ŸΞ\ο‡ (ξ—vŽ3I½ŽΜ3Ϙž(#FŒ ˜ψ(ŠŽ‹€ΩΧ—ϋWN.θ•κ/_†bb£Νυζ« \ JGŠ :ψq (²zHκœχ&ω5εΊζuF/Ο5ŽŠL”•λΩ]gƒΉ)²<ΩN~eΈ₯Θ‘Šχ·ΫTŽ-¨ΤΎ°*l)¨œ$ ZCω…•TY³‹*Ζξ‘~χ•OΩG]θ’K.ΡA^₯jBzψα‡ιΖo4zρΕι†n Ά…Ρτ“OΣλΐ λΣ_§Σ[ο§RLLkzώωηƒΊ) X.ΈΰJ΍₯γΫΐ˜~uΉ…Ξ#²ΝuΉE4yoZ|G…dp}ωΪΎ 1ˆ°‹"DdύξTZχή#€Κ΄Ϋ‡šΧ·<ΛaΗΐ@‘°b;*^-ξ₯£"“_½*|Β…•@‚ £ rŒ@₯}*U?Ώ:l)¨œ!i6&*‘*o£A΀ ₯ο΄}“ I΅*UΧwάA‰‰mθ±§’ ”H}ξjΝϊX3fL½ΗΊκͺ«όŽΙ™gžι νΌ1ŠV=UC³nκοwLX2…E‚ Γ ήGŠ ω@ («ή™FgΎ=Σ@ΛΜ»|‘ŸωηeΦYŸΒ}―Υ‘νZ/H‘ΩdχΩ@ύU.2T„€f•c£T†>wvΨͺο³9—GΟ9ϊΘΡO­v·_θθ+GοΉ‘ ‚°(YFV6•tŸC‡ο6y)Mι6ξ|ŠŠŽ­³h™J₯jšΪ±cedDΣ/&ϋα„Aερ'“(!!ΒtΓ 5]wXzMiG‹~8ȟ7"C<ή‘πβ)ςΉό|ُ…Η ¬x{†Ή†¦ά3ΪbΪ—κ €…„―Υ’εz>^λρJ¦•J ‡%°Ψy, (²d\AεΨ€JΥ³kΒV ’ᨻ{;ΖΡǎJ\PY―ŽJ˜ZΊt₯t6Ή(”>3ލhΰ<ͺ¨¨ΠA\₯:M΄eΛy”œAνŒ₯§žM¦G”D[.ˆ1nΛΝΫΓ:c4Υ= š•’q{ΡέUGΉ%Xx`%œxέgwΕn ǎ eιgM»¬yύ]7¦ϋΑB.h·Μη큱έ;‘ΦyυW ΤS%€PX *ί]ΡνΣhΠ3kΓVΈŸΝΉ<θh˜‚J„3#Δ’;O:zΞΫoΤk£9>υž½Ÿ²Κ†ΡΌωσuW©N#!_₯ΆΆ’ςςR¨°0ΖŒ©1-ύΓ=šΒ!ykF^ΨΝΣ‘!<†œ† ‰LŠΕ}ι΄Θ2f™£‚ύΨM¨lΏ&ΝΧFήμ:Α-λyΑE―΅}$¨x΅Έχ―Υ’mg%H$ p'aHA廃JεΣλΒV8ŸΝΉδ;ϊQ¬ *Ώrτ£CŽTκQεΰ!”1p$u[z€Ί-ρ©ΗΒύF -i₯fρ4ΌU*UC{·L™>Ω@Βΐ­ύMΎˆLt•αΩχΔήβmΨΧYΚΜΰHYψΦ\£Š*_ωυνOηΤι I—ΐb―υ#aΕnooΛ«Y›¨„ξ±aEBŠ‚Κ±•O―[.lόXhIH‰vτΆ£ ξύ4GΝ5s΄°’ RΟΰK–\@]V0¨t_δSΟωϋ©xδJŠˆŒ>j-•J₯²…Κ@΄θΗκΙr;*’ςiΦ³Σ 0Ψ—„] Ά!·„…„Xω;δ#ΓAz°ίβ·ζMΌ{œ”ΦmZΠ³Ÿ΄=jucΉ ‹μJ+C@v^J0P ”³($Γ=ŽΌ:“>9–Ξ|qΌŽ3ŸCs8΄NXPPαΠ o—α vR8Ρ–>@₯ΣΈbŠŠŠ’걉u@"ЊΕ bwEΊ*^Ή)‘Ί*Α@Ε+iΦ– ) *§&¨8—3έκ蠝d+n―qt—‚J]xα…”VΓ&ΛPYΆ— jPzχ!”Φ‘›Y΅υ¦›nΈ‘[ξΘ+ΈνΆΫόU[X&aό€I΄xΙs|Χ]wiE—κ˜4‘ΛΝΝ₯έ»wSνm)*.Šzθ!σΨ /Ό@₯έJ©ίΊή”R”μv₯M4ΧΝZ4σ‡tΈ”XŠY½ƒϋE‘Ι΅V 88s^˜a^«E‹ζtσ#™uΒ>€ Ω¬-¬ΘΎ*vƒ7/Ι<―E L‚Ή*v₯—£‚ξΑ * •~On[!€J…[νφ,Evt›£ŸΈΫ’ΰ’ β‘±γ'PFεXκΌζεYΰ , Τ‘€Τ΄»Ζφ裏κά¬ψf-ZPLJ*υοߟf͚E‘ρ ”Z5†Rϊ ₯”Nέ(:)™ΚΊχ Χ^{Mfͺ·8θέ»·q,²¨hT užίƒςσσiόψρΤ&ͺΏDΉxT[g¬‰€±γΖΩgŸM‡ψAE:(LdB¬ 1 °Σβ*xής7§™ΧNNmY§TΨ«zΗ«²Η TBύxΑŠtOμn΄2gΕ†–‚JCA%ϊώhcΨ†o'H“'O¦ŒAγ¨dΓJΜmK»vνA·‰iυκΥ”½œ:lήKιU£(΅G?*Z΅:n:`TΌa/,\O‰νŠiΫΆmϊ7S5Θ΅«­­%Œ}3gΞ4ξΙΠΪa”V’IΉC τ\Σ‡¦=?—†\^KγξŸBσI%%%I΅—”Πΰb†Ξ-Α}\sB¬„ޟΓBμͺ0€°K΅+nI;―KχΓ€μ4 ψ°!Ε†˜@ Ê]–lWώ°€s"+yXμ΅VΌͺ~$¨œŠ°X@₯Οη†-•$œΝ€v« [P|Ni£­oӘ8PZ:rτŠˆ‰₯όk©γy˜l>ΰ”’s}J.ιj&’6iY¦ί…ώύTακΊλ£‚‚:όσiξΌΉ4lD5EΗFS›¨j;Κ·Bς”§fΡ¬Χϊ5η4ύΩΩΤcq-}yΚQω( !σޜηˆα…‘†!DζΨZτΖlͺΨΠΣοJȎ²ξD‹όΉ²„ΩδAΕ«αΓI p‘ŽŠW_^Α:«b‡~μ•­TΒW€*½ίΆTN`2mT\eΝYJ)•ΓhΞΌyΗμΨoΌρ†‰O:tΘ¬²ΊaΓZΆl™™ η/˜Ogu–ΨπΨε—_N·ί~;½όςΛζΉΈΖ‚g¨X°`U«’ž]iΐΠA4qΚ$ZΎ|9]qΕζΜM'ŠΊΟE‹QRZ:Ε₯gQZΥ(j·ςB l>Tς符€Τ4³|&uTT‘ΏΟ5k֘ί0‘ίσ‚!ζ7Ϋ₯{ŠŒ€’‘Εζ;U2³3MxxΪQΒ|Ψ ³ - *ΆLΩΥAς6”έΣ·"&yΉΎtΓ/*ŒT /H‘°hqΒ@j½@Ε Bμ¬Ψ?Ά«"έΉxδ©+TΪ₯SΟΗ6…-•άΤ)))‰RFŒ§ΜΌ<᜹?ύτΣ&Qσβ‹/¦E‹˜Θ.Μ£–­[QLj<₯vΜ₯¬ώΕ”5²'eMλGY3ϋSξ\GSϋPvu籎”Ρ³ˆΫfP‹V-)!5…Z8ΟM,Κ¦Μ‘])wv*Ϊ0’J.™DE›FQώ²*J›PNɝ ©UDkjΫ±ˆ¦ΟšI—^z©Y­)O·ήz«3@]YY™84ΣBΣΎςςrŠˆ₯”ςJ*XΈ–:nάο@ ŠίMΩx€Rϋ ₯ šcγˆdHˆUυύζ«ͺ‡RF―*šήƒΪ+£ΎϋGΣ„WΞ€ŠƒΎφτ%SΚhς3s(³g6υXΩ§€Ψ°"ΔN eΐΒ‘„u$Π°»"K›qΚ-ΓΜ{~^Σ° ]@ VQζvωm±oΫςZEΉ>`‘αιœH§„oːCŠνͺΨ ‚…#TΒ•ξm[ *'XhζG­iΒδɞ ΨvυΥWΣ¬9³©|Pε80&² ΅‰Ž Τ’ ΚXLνfχ¦›FP·kfSΏ‡W™&Ή‹bzœ-.―:el’‚+WQ—ϋ7R·GΟ3φšΧ~8F—GΆPω£k¨tί4Κ™3€R»΅sΐ₯.νι¬+B^‹€1©kΧζμ5+7—*SlV6­[·ŽΣΣ)΅f ΅ί²Χβ‘až£@₯{šλα’=ρΔ¦$ε₯ΌVΛόωσiΚ”)4wξ\γΌ°ϋ₯:½tο½χRLr jxa%{yΉ_γŸ]D}w £ϋί4γυED‚A ϋ2¬ΰΎlyΟΒ. γΎ+†ϋ«p§[~lΩ«Sύ ΌxMΰϊω # ―ψh°‘μDkƒ‡ΌΜ]±a%X—Z™<JχY/!τ#EJA%tE8 ‚ω&\)¨œmt&€Φ11™”L«V―3i!—%» ‡ ©pA9•^0’z_7ƒ*^Vg υβ ‚MX ‘φ…Uώ•&' *:°/Γ ΰ£ύ½ΫŒ@¬x?†}ω6ށ¬k> PΣγ‘ Τώβι”1’'E$ΔP^ϋBZ~ΦYτδ“O6‰IbτΈqUά‰RjFQξͺ ώA8cδdjΡκp‘£ Pρω ²ρ° yj·μ<ŠˆŠςMΊ†ΦΤRλˆ(J-,‘΄Ž=(½¬₯χ¨’τΎΓ)»xηLz„σXg³H%Ό›¦^yεΊσΞ;c'·>|˜’ ΣΝo}ΜKgΥωύOze™Ρ”W—šIŠ‘Δ NXx  Β`c»+ !2‡·Ή²GΆά·ΧJ,ˆ3Ώ/«“l{π£!ώυ€) /pUd²,ƒΒAΈΟ#“hC]€0Py²]ε ¨Θ’dΘ†•pA%ΓΜ=αJAε$©ͺͺŠ 1ξ ςIΪuκ`“v#:Ρ +GΣθ—›Α gR,ά‡ν α>\pΒ˜†–\6ΐΑ HμπcΈΝ ΒŽ ί΄p&6Ϋr]ΪDν.™E‰ƒzSkg2R[kΩΖ@Ωg’ˆτLʚΎΠΐ ₯hηm@ ‹”V6ψn#<Τ’UkΪ·oΝ›Ώ€"’c(£Ο0*]r1•­<ΰ74Λ,ΈK-䔏7Λ/θ€ή4Ελς@σζΝσ烑„=&!Žϊ_;Ι*γ_^VRlP±Γ<α°{@™φΪb#v\8YΦvV^dΫ}― ±½φάRσήGο-―ΣzŸ;ΦςΊ@ ,ώΈάˆσUX8L$·{9,vΈΘ^¨0P£7.•–ŠWς¬€ΨnΚ§ξJא‚Jθ Rφπ–°₯ rŒcΚdB鑁€Κ˜ΜL*ά<žΪ$ΔPΩΕγhΒσ‹Ν$Ο|0¨0œ@<8IˆΑ^ΨΞ·-V5ΨΑ]αŒjάΖ1X ? -μΪ@Hl²γ‡οίj”ΫΕ”½w%NEQ)ΙΤΆΨW†‹Ύmςxι₯— LδŸs>垽‘’‡‘δ.ݝU"ε¬XkΖ8+[8+Ε[„£ΒΪpDY5“()Ώ₯υ¨€σΞ£²ΥGΊ3€t9Λ•eGΦƒΚ<›ϊ¬“z“^Ay‹VΗ†P Ζ“ύϋχ›°oBv2EΔDϊχ©½sŠ:0Nx咘ζk–“Βϋ³ΈΊ‡Ετa§Ev¨΅[θgvЧ1yώυ‚ΨQaF tνΟP‘°‚ΫpPx;ί—έi½@Ε–„ *Φώ –<λΥδC?PT•ΝΆTΎ#˜ Μ‰–θSΠ²eKjъš5ofAiii”ž›Nωνσ©ΈK1;œUcφΈ‚ΆfβορπF?€` ၆“Ϊψμq灊ŸΗ’ΠΒα!χYμΈ@ΈΝΒπ#έ™°‘[ŽAΚ»υΚ»v/ε^½›. „Ξ):>ΞTΘ<υΤSjς@ŽHDZ:EΖΔΠκjΣ` “Dφ™«©h» *ψBBνQ "`₯Σ9ŽΦ ΅Lγ?,§€k nγΪ)Tr*&ΡΘΡcuBoβB’5Ύ_1Y1”Z”κŒ™4lD M›>Fm`fκT_.Hώˆώί=CÊtR ;€°M&Χ2œ0¨0ΐH§…αƒσR*φJΚ&ΒΎμΎ0€°£‚p€…%‘E‚Œ„vW…ƒμdZ *Έ-ΖkABΫU±ΫζΛЏ¨œ °@₯Ma•>xAΨRPi`gΘsΟ=—ΪΆmKim“©χ‚RšxcMαH¦όμWfӜg¦τG&δ»ΗψΫFπλ†Sξ’!&5ΎΈεl»€ ξ7 Οrdω ίfϋ’₯„|ΖΔ Iw„ΐ‡tf †<ΖϋA )VxΝ€ŠθΔΞ  6]ή-»Œ μ§άwSζω(q@?jMΣ§O§Gy€QLpΖPκ}χέwSDd$M›6ΪnήNE;ψAN `…K’ύŠ*R\PΑuι:ŸpΫΐ B?«\Geω‘k(³Χš5kΆNζ§A[όN:™τΰ$}ϋhκΏΉœΊ/λJ¦w£ΌςBŠˆ€ΤΤTΣ2ྚ£ι¦4°Νy9/ (TΌz© gΙ†χ&Pjϋxͺœ›W'ΐΒ c―Τ XΑγ~6̈mYœΏΒΫXΈJΘ«ω—7Λή*=‘ Ϊ­τΓm›/C?6€| 2¨”<°5l)¨„Ω―kφ έ!gή4€ΦΎ;Ωoub°`‹”CώΘ18π€^pΩ>*ΈtΏbΟ2±M.ΖΗγΕΎψx<ΨΘ³$ Bœh+έ†ιΈΨ ΒΟΑ}ω\q~ \άΖ6 ±ΓΒ\|ί…”w³-Wξ3ΚΪq>%VWRλθ(ΣκU e©2„’GŽςAΚ€ψ“j·ν¦p9Ο)ζϊά# ƒϋpUp͠ފ ™|T2:φ5A:™7-a<ΑΪOL€­ͺ©’’Ϊ"šσꜣ°Νzr euΞ Δ$ίΪ=UkŽrG$`ΰ6C ‹WΏ·δλΚΧηΗ<:ΚΌώςΓ= tΐMaXΘή*Έ,aΑs8 &ό\†–άŽ‹\UΩξl+'΄!·…zΑŠ—£Β°ςΉ 'ΆTκιΐ‡*•C<ΧδΆΝ¦‚nι4ηπŒ–g ^e3ΠΈaœΒlχ χωGΝ+‘ΪkhΘ²Aι¨0 @-αδZΎ/-b VxŒAEζΈ`_;$˞eؐ"!&γŠξέf”wx—O–λφPφΎ )qt΅‰ΕWWW›^%!α1">ž Άογ¨pžŠϊΑνβM{©ύϊ‹λ€ ŒΏΧΚfίs«²ͺn¨ϋ’Τgζ~JΘΘ1Νόδ™w8=xT§žΰΘrΎIvv6Ε;ί­Ύϋšͺ?tŸmί±ˆ:Ž*>ΚΩ€ζΏ<“z.ξFΩ]³©φΚaGA‡|ŽΜcΑ!‘FV±#Γ‘ΫY‘Η”]j9…C@€ Vd~ @…χ„°€##%+†ΨuΓ"A%ΨJΚ ) δ’Hh±Χψ±AEΊ( *αƒ N`Γ•‚J½ψβ‹΄cΗΣπ«΄΄”¦]Υ—Ξ{¬Μ?FŽΏT€b/£n/.αB6Vβ9ŸaˆKΉS€tTδ±$Œ`›,G΄ϋ)π~€ιΆH'Ή-œŒΛω)ά-‘ Š 9!Lď·ϋαveTpηN°\»—ςν¦œ«.’ΔΙ#)"1ή,²†υNεFh=ΛΛ)eμ$“H[xώ%”5weΞ^δ mχΑ‹©rT°ςž Βχ%@ΨνοY2ΏMVϋΨΒσexGV1ΈΘ… ± c…vA!ςΔ ϋρ 'ΰJΈ‘ςΪΞΠΒα"ΞgαΞΆ(τrTl@ ΤZ_VΨkόV9*'V¨΄.ΜτŸΐ†#όΈ'νΔΖΖRMM ΩΣ‡Ξ{o¬ίΆΔ’Ε ΒχνεΟmϋβ^Έ–ϋπW–Κ²?ΩςZfτs?K‚ 4ξŠΠdΨGVό0¬pNΔε\―ΕΟ/Ίϋ"+,ά7°β@ 9,9Χ\LI‹g˜Ž―h¨†₯ΡSζT›\Π„/*%•Rϊ  V”βΌW>+.άr ΅»hœ»2&Φ،,ZνœΓ‡΅ήyηQUu΅)GϝΊ΄n˜hΣ§₯ΤΝWT½‹*k|Κkן’’²Ϊζ¦=Ο‘€„LΣ{C'ύΖ-œ%&&šί.2ΔkέXlΕ†ϋ„F†Š9·ΕvKLZ΅Πην !α1‘OΤμqO]ΔβPϝΫPαœ†―ƒŠμR_Ψ'Ψ?Π§A EA₯~P‘'―‘ͺIJ(eΑbŘ„Ζg,ΨώύϋΣ5Χ\CψΓΜ@ΖW₯EΙNŠ”ό!Κ΍ &,ώ‘ςsXςLBΒƒ /₯Ξω/vJΈδν2s_Λ9 ؈pՐ,oζ€[ *μθp(y*mΏΏ“Ϊή΅ΓζbxΙ»ε£ά›XΉaQήΥ{)cσJPNqqqQUεΥ½χd%;Άmίή4jΓΊ+HFΙrΒ€Α”Τ₯΅lA‘11Τ­OS–νυΎ±ŒB€ΐ9“Υnί$ΩυΉ)ε“χΡΐα» ¨ ½‡ͺ_μ—‘ύΆSFj;³nΣ1ιΒ[֍:uθ€+zŸέqΗTp‚$sEB/7%P.MfχS:)^ᙏbƚW§Ρ7§Χι±"K˜1FΙ<9ŽqθFB‡}Βg;*v“8™·Βn‘h W%Xb­tT"Κρ‚^(€ΘεS VTNqPAYίμΩ³©uλΦΤ«W/U ~όΑΞ:q֎fk8‹ο₯‹iςυλ_ϊ¨/€]‚'!B$ΟΌΆΩ?Π@ϋH؁dζ<Γ ΫΎυΩΑNεD]»u6ΔeΜυI:*²Ό9-•‚;v•{.2BΟŽŠTr•³k;%kzΝ€€€˜ςL„απΏ>&˜ώ•••œBι3η™λ™³η„ά• ΚEΗΕSRΗΞ”7}9u8oΏΙS¨ΐaAθΉ)•#vSΕΨ=Τoβ^/Έ¬΄/E}ϊτ;&Ÿ₯cϋΚώ^!Ε΄‰­“ s: !Ά Ζ4ίρΙχO8 6l@‘ŽG €X†yΝ9* -\’¬ΗŸΈΔγφ;»ϋQΊg0ΘP rP0p›ACvͺ•Ϋδc\ΧόŽΓρJΜ²œ,Uζ―΄φΆ@εΙZꃕ  6Σ?„£F *θQ‚˜}BB-Y²„Ύψβ ³`’ΣΠsγζΥϋdψπαζ¬ύ=ήύ _ *ΆƒμΜ@ΒH {S†Žδvω2[žΟΙ»φ™KfέK(aΕ«²ˆ›5ρ>Ψ†A- °ΛΒ‚‹‚ύy0EΈJώνϋ…ϋ&t‡`³b\Xhkt΅+Wν5Qmχο§œΝ›)iβDJ*+3!δ ‘ ώΗ;§εω矧νΫ·SEΥ`κΤ½;•tλf`Νμ"ο]|Ώ4Αyo Y • IhδKΙ,€vνEiΓΖRΙΚ=Τsή~κ?a―TϊNίgΆ‘―ŠqW\HΡΗ$D6gΞ*hΦ‘ šp?]!ί'Œ5#GΥΞώxkα’…Τi|Η€.Š/b»*všέΎ€CΔκρΚAαΧ‘a Ή†Ο™―ΙΉ³“ϋT >±’c"@EφQ‘Β’n‰ $ά­. oη~,rέ ―–ϊ²ŠtGlαΫ6œΨ%Κ #_ώFA₯A βΞα¨QƒJrr2νΩ³‡>ώψγ:°G}”FŒaΪΤww&ž/paΠδΐτ§?ύ)δ/WΨ†„ςΎΜV·3Ψν€1Ά@% ΨŽtZ°?ί–ΥAœ`Λƒ–\·Γ+Τ#³πmqεΗ—±?‡‚ΌœYυƒmrE jΰΩ=8W…γͺT\Xρ @ε²}TЏj»w?εοΩMi+—QόˆΑ—Ÿkςˆ*++ ¨"·θΎϋξ;&πς£ύˆ&MB­"#)‘K)%Ν™B©+SΪYK)}ω™”Ή|εoΉˆ2ζ,€ΨΔD*lΘ™8ž ’τ½°¨=₯Ռ7₯Ι½ζξ7Ξ ΤcΑ~2π§ΕQZF‘y]―γήsΟ=τμ³ΟΦϋϊW]uE9ΐΣλ{U”Υ&Ÿ6;Px:B €49#ƒ’gO₯˜‚―ΐŠ–K\hŒ3sύ9”Ύf*%ŒDIK):%™š·hA©YYT9t¨ω>`ΉLD‘Vc̜3‡ΪΔDS␔΅‹y?&ωχZΧι9ΈŸ χ8ΪuΐTeΞ\@1iι‚·nέj™kΘduΕWPt\ε/\γλΝrž―O ΊΥ’ @λTŒs`eΤJKkKΧ_ύQοΞ,ηύ·Š €„dzξΉη‚gΥ:šΊo ΄*¦τԌΰ¦)κlδe€Sς‚Y”ΉuEΔΖPn~Ύ™€RFΎ’’g „ΤΜTumυQ "ƒ27Δ+τ#A₯Ύκ»‰ŸΌπIΠτkϋ›Ο~Q²Q‚ˆlΡ e- ,Α\hΉ&ŠΜGαΐD:.Έ/+xm Nͺ• T<Ϋeρ;7ΕvSN€TΌAEΆζUTPBŒd4€ ΞHW―^mΐ€°°Ξ8γ Σ«cέΊuτΩgŸ}§/μ—"]ωc“@"A%dr™ΐΰ‡(ΓG2IM&ΨJρκεβ)²΄Pζ―ΘnΈrPΔ`†AŽέ ~–ιΰ¨p˜Η4~s„π;)Ύ€Ϊ]>PΉaς/w!…o;Χut`ΏιΣΒΝσp WάX.o.ΚΪ°ž’§O₯ΔΚ”ΰ|Z:“MJf& 2€Ό° ‚€F8(11”0 œ2χl’Όλ}y3ζ½]η”ϋ~*€ξ©nΫ>ʘ·˜‹KN9Τ° $pγ;›1s‘Ώ? „œΣWe₯/wΰzΟήOzL7“ΞΔ²@CΈή=ϊRλVm(7Ί=υn>”"ZGΝιι^Φƒ:ŸQNm[u€»Θ9]Γ>γƏχ‡8r.έI) gšΫΝZ΅’μΛΆRβδQfœAΘ±‘ ZŸ°:ΞΆΨ–†μ¬ %oΞ¬*½V娙†een Ζ“I—χ§ΙϋzωΓΝΌ?šΞwTwW‚ CΧ˜#Χ’`γ•Γ'96JG…ξV˰#tΔΙ΅ +\ͺΜaΓ C†„―œ»ΡƒH0H9°@₯•*8Y WTΪ·ooΨΧ|6‰I(''Η$ΗήtΣMτΝ7ί“/ƒŠ„Y±#…€v§Ε@έ₯λ‚ηΛ3’ ’5˜γ#œΨerΐ‘ϋpμ™‘ƒ5yφζ•ύ/Ο*ψ’T8ΌWΕ€Λ-»ŽlsbΨ0! ά†‹q΅O³νπ:«BΓΉΑ1AHήπԁžKχSΡ{){=ΰe%VUPB‡BjαΐΛχ˜mΦ’9E₯&QβΨ‘”ΉoCݜ™k}ξ^ΧίMΨ C΅»δ@N΅ζΎ-ωgn€”ή&j€IA«ΡπE.UΘμμρ5’Ϋζ·άηE *pT *μ}‘y?ΕHξσ* =ŠΒρŠvΦm©ΟΒΆ‚σwPbiγ”Κ%AˆL~—ϊŽeΦr+€ΠS /NΨcα~κΎψ€I¬2`Uw½€j³WQMόBͺ‰O5I‹©6uΥΔΜ£-˜πOσfΝ)&*–*ϊ 4!Q Mp!ΟψήTY1θ΄/F΅‰‹J:RBy/Κ½Βωn\»Ϋ€,ςŸV¬XAΓ† 3nξ.'L˜4΄φ]„d^|73Τ¦MΣΏ_S'™Υv>½ͺ‚Ό\Oξ,‹ρaΕσ£iΨΩύ 6jM‘ŒΑ˜d·Hcς=yIΙ FΎ-OΖdŽŠ3e +2Δ Β°‚<ΉZ² +¨ξα.³vˆαΔΞK NX_ύ&ΓHA₯.¨`œWΎκ§[·n”™™iJWϋίΧ/‚ΧYE –χάόΘ«ώίξ ΫHK+SώH%τHΛSXΚ?~18ΙΑƒ!†K eζΎWΛlι–Ψ9+^±n™θ§Pα/C†Kqσ#n ƒΚ‘έώX$w"DIΦxΐ6<Ύ &t@B=¨*B€ 7“3ϋ‚πZ *˜ΰΉ γΪ œƒΗέ>.&όtΨšΒkωa…c9+ώp”λ¬ξφεͺHP1έk‘Γ²}?eΜZ@‘ ‰4jμΨ£reV­ZE UƒL.Š %mχŠΙIYγsPH ‘ΏJΟωϋ*kwΣ°>Ϋ¨6 Υ&/5 RέfUGΝρ]·žAΥ‰‹¨:m)U¦Ν£„‘”_@±Ρq4ώ|zμ±Η(.:žRšeΒ…‹΄§‰«—^z‰z–χ₯Έ²ΚΉl'%τ,£j$‘Œm4ξCHγΎtοd€ύg8„ˆΔ[8ΌόFXξ.ςηΪχeο+V Έ€X*Z@ύV”Ρ’'ՌMž8Θί!WΩ‹ ,PL.Κ…νθζ_τ5!ΉƒŠ\ΚΓώύΫΥ…²‚μJkχ“’α{•e;׏ΗGYΆ,aEζ³pb-;*άΐ"ΧόA9²„ΫEŸώ:<8‘RP±@Ε­ψ GTΠJ ΫύσŸy'–nb€ΊΔο΄0Ό˜γΉy4μόP9ΰsU)H.9+μ°Θν[wRJrJHI5gξ#GŽ€μ‚ΆAiSgωαƟ—²Ζ· !”%C½ζψg₯jΠΕ4΄|»qVp»¦t³ ωP†΅œnΩa©Ν\A΅ygSM—σ©ΡΚ‹)1SZ›ljΫ¬DAΕ£±_ν¨Q–JρƒΛ):3έ,cΐ:ΒΛΈN˜RK ³ΖPLN¦©$D«„€τtŠLI‘Ά:˜0*Ρ"˜I=Β<'Τ€n/‘‰ΫξO»½ϊ¨aώ K·%Ψο™Γ?œ€Κ%ΐθφŠ*™#g7š΄O\dŽœl`iŸΘIχΩ VερΩ'kœD+C@^°"[κΛRe{e^ΩvQ>΅rRΒ/PΙͺΣ"T5‰ͺŸuρ*ρ•y²s­ !MρΪn'Ž‚›ϊŽ'!FΖxν2jΞ±aΛ6Ψ"d<0IH‘Ή,ΆΣΒρ£b—ϋaŁnζƒdXs[ΦΞ;χxΜΈ$nή ΗΗF η©Τ·λ­ *Θ11ΗpΖί·…uh· )ξσoρέφηΎ:R: wΕ€Κ# z†€vϊ`Ε„‰œύ2-£€ŠA”2iε.YCω›ΆϊC>μ¦ /…έ”(@ΧΘQ1 o;UUξ4ξJρ{iΘΐ>'₯Ν,_((a‘T²WQmϋT Π8ϋτ<£ŠΚΏWC₯ίλMε½ϋ) θ«(™3o.uιΥ“ZF΄‘˜Œ4D’κ8ΩWΝ+)aL-₯­;‹r―ΨK‰UMγšαΓ)±f(%N± ρίΉ™œγόžY΄ςν©Gu—φZ§Η^Θ&ΒΈΐ ηvπδΟ+!{φ Šν€HwΩ^V„aΕξGεUŒΐ㘠/² ‡Ιy_N¬eX‘]jZώaw…;ΦrΘ”p”“+TD»ŠP₯ €€ZώΑΙ2ey6ΰςGδ%/`ρjŒ$χΚ{±_Sώΐν6Υ -œηβUI`ŸΙJPμ³88²‘›?όγLψ°σδκ˜ά_E‚ B?eoρ»-νݐP—GΆ!OΑu_|`αsS ]'ςTvsNRυ…„Μϋqή£w•gσγ`'Ε»?f~ΰͺHPα$X * ™Δ[„‚φμχ;,&\δ —:#όƒ’δΞnn Β= 'Q1]kGωΪκTΈs-€Ε8'1σ €VjΣ—SmΞjͺmwŽΈ+p\†6›BCϘDΏ7Š"[Gšκ!…“ϊ{λ ^KQ¬Δ—ϋͺ‚ZΆ4kUωΑΧύε^Ή‡’Ǐ¦˜μ,ΚX·’ZGE™ή6ίυ=,^ΌΨΌnΧρEG­ύε•[&CCς$ƒ‘‚CΤpU)pS0Αc`‚ŸΟω.rqC^m™oWr•€½Όˆ,uΆ^»κQΊΝφͺΚ2gΕβfprM nΗeΛμpΞJ¨J(°ς΅+Tς³ό­*Β‘‚J˜―ζn2Œc¨α*άΘ_°D\†Ή/Γ‰έχΕ« >—]l»*ΈζάΫe±ŸΗΓTγΈTΜ%ΚΘ%p0€ΰΆαBΧeαΫ€€ ‡„^ € Wόπλp> χ<1α“돔dΈSΜ.ηδ-Ώ3ƒή/ξzE&)˜σjπrŽ Tΰ°PΩο™Ο‚Ϋ21ΟΓz?₯λ|>ςρ»)s„}ϊNΫGΖμ‘ΑU—υ›΄ΟτV¨˜0*ϊqΓ>΅7Ρ°^Ϋ¨6k₯q\ ¨ΈjίΌ ΄3aŠSe]₯SYHͺmKY/ άλvΧω^Ι2v£+|Χ έ»ΡάωσMθα΄ΝoΘkcIΤ΄T*ST'>X<ο#ΫΨύž0^ μƒTCΈ€LΚε6όά8Ξ†!~ξPΛ0b/ b‡€δΊh|"(+)ν¦˜v3ƒŠΜ]Α5`Εk1°pˆ“mXΰ¨2Ό@%\@ωϊΒJ£ΏcΊTΒΌHwΒ R‚Ή&^δ Rx―\Y²η₯`]oΩf•ŸΫx€°E&ΜA<¨ΨΛΎΛή άΰvΘ¦=  )Έ6‰³’S- /`ΘpΒPhaPcg…ΘΈ(WIz­摐‚P56,ωλφ­8Ώ£8ARνν.x9―PaH#αΆι­β ΣUΧMΒ5ΥG’ΜΩδ₯œλƒτL1nΚΒ#€bhηω’i±·ϋΜΨGεSφ™N΅Xi™‘ Ά¨ͺM;Σ@‰ ϋ΄;‡†υ½ˆj Φωͺ„ΦbͺT†œ1‰JZτ‘˜ˆ8SE§0RO―“ΤdJY3§NRxε<(ό?Ο>?†LΓχ ΉŒ¨žΆϋ–`Ό`ηαŒΌ„Τθ›„kvTΈ‹Γ&—νπ*KΆΓ@vƒI†+^•™Η;α–Η0™»'AΕ‡‚*άΉ–N΄ε2fξ£"‘€ΎpΟΧAEAΕ•]aKA%ΜK G#h„λ¦H˜‘ δ Θν:έΪ₯RX)β³{ΙwiΩrΫ~―eα9‘-υλ„tμΕ¦άνb\HπC{$ځ(| PΑu<w’πƒŠλ¬pΎ JD₯άVβΧχ―AδNB&Μγ&ϋK•m γ¦Έy) ) *Rά.ΊώIλ Q-΄ί)8)\εSΆκu[ζΊ)T―— ‚ΠΈ'Υ=/4β|l«->Χξqΐ€¦d³—‘ύwψœ€Jμ|?¨ΰB΅Pn\νή½[$ˆ¦ΟšIρ“kެSŐr½ψώ]~€:Μ_!vΩ>Κή°‘²Φ£δ~iΚ›όρ°^έ9k©uDkjΩΊ%•v+‘yσ皎Ζ?ό0>|˜zχλE ) Τ¬y³:y4λ<ή*²Q₯l\‰qϋpηi 7x„°H)`‘eΟ2/οK‘Ι΅RμžM²Œl‡ ‹x$¬ΘjΙ@°Β Βα „Ήδš@ *,„BΙK PN¬4PqCμαHA₯―$Ψ†BŠW"˜―ΔX―†qΑZρƒ‹lbΗ–¬]$­[Ϋn–λ 1ΐ ΕΎq+€Š?οD΄Τ—ŽŠ$ΥB€ \4ZxΤYΠΠuTό=WςΩρˆϋΰΌ²3WψψΛ§έА™|=T.³Κ’χμχ ά”­ΨQΩιΛ?ρ7©sΖq/“ί²Ϋ·/άΘtž]αλ™β‡”yGJ“α’ Μ3`τ% )Υ=Άšk$ΣΒ]4l—yπ@1ϋ:ϋPΙ\a—: rΊqY ’KihΥ°γΦy΅)hΙ²₯[֞ ξΌθH8φ4‡έ5ͺό‘vМLQγƍ3«Ύ7€έ~}‹&.ZΌβ3β¨t\1άΣΟοpx%±r~ΕoΧψν’όΒ *λψ>Γ \ζŸ9Ζ«ι;±μΊςb‰Ό‡Μαγβ˜^ΐ"pΩ²C@άzŸo#όΓέkαΘΆ +ΈFΩ2 ¨„+ *Y~<ΥχٜKŽ£η}δ觎V»Ϋ=εθχ:α΄•`Λ’‡(^ ζΊΚM±[π[‡Hή–χeΒ›]^hƒ W KήΓv 6Ψdo”φ"Χ°`ˆΰ>ογΟQqC?ξaXAΈ€‚1s8,ώ<˜λDc8tΈu€ƒ*―πŒη`Π5eΟ(›Ύ‘nX¨”\ΆοȚCδ”°“RΗMb@Eδ§H7…·±#ƒδَ›} ސ›bJ’¨“—Β•>€TχTΦμ2₯ΙΥέ/π !ΌcŁ.΄7‘ T‘!œ *ŽͺZO‘Ύ-k);ΊR’RΝ:@š―r΄Π0/§°€7Νχ—ήϋέ6.]ΏμHθ‡ΏGώ|$TςΆο ―ΏώΪτ[3fΜqyŸ³ζΜ Ψ”hͺΩ_IgΎ4FμθE=¦Ά§΄‚$*θ–AΛκj.­’ΈξΕΤεΎ ~œΰpί„ΫΌ|~wœ―¨aΐ`'Δ I§ϋρσεm/Ι“½ΐ"—_Λ<ϋΔΛξ^Λy*,NeΙuΈ„™a%€|έ7EAΕ•Ό,ίο%L…*ŽΊ»·c}μ¨ΔΡGηΊΫΟu΄;Θ1ξu4Q³&ε¨H` ! Iυ‚‘@ "C8 EΒ‰]­d'ΉΩV-™‡νO΄=ŸaaΠ‘­ξω6ˆς6—0Β½Rΰœ \cƒ Ξμπ:δψΈ!)n§[>ϋ€ΰ8ΐΖ8)’ά؟[ !εrq#}SL7ΪνG’f•‹Žδ§pn —3sΣ9@ J‘φ¬`ΑAγ¦,sσSdI²#t£¬bzΟΪο+5v§„― °tuε‚ €Εμ›΄Ψ—\λ6‡c@›R¦΅jΩΚ¬Υ»y5uo>˜ŠŠŠLϋc±υ‰Τ~πƒοΤ«€>a5vΈ!‰σΖˆ6,WϊΎsH/έ€ΧŽ›ΔίyA ʍΗϋDγ94˜Λ)Θ’-›SΏ}Lο-·άBK–,¦βͺσή{OΫM±™YΎΎ:Γ'`ΖwΏ/Άέα<Κί.Γ5NBψ·Λ-ϋ0π›δΆύΈΝ­όJπ›ΰΨa%c‘ΰΒ‘!Ξ™σJΌε.ήά†™X‹ςeΩn_ζ¬HXρJ’ TND©ςι *Πρ £aŽ~ˆ0σ‹ ΟκθGŸ:Ϊε¨ΈΡ;*‘&Κ2ςjηUvTΌΦ ’πb'Χz…z$œΘΫ°eL›αΕ TΌΪxσ #&ֈVϊΕΉ#wœΕ™βεxœEΐΐCΖ±9τ„ΑWuΖΰΘg|pRψΨfEg·‚‡Wj³ψαeV¨η`έ΄RόI΄nθ‡iα–ψ[φ»gΪ\αΓ-ύα€ Ή›¬φAΨΉ( (I€]μλ―‚c#D„ͺŸšΞηωΔΠ⊹]ΆεˆΓ‚ξ΅E|mφTω›Γ Œ˜h–žx葇(&2†ϊ·i*††Ά]M™ ν©OŸ>f±ΒxΐΧΧ$;ϋΈΑ η|$Œιη]šŠ0vΝάό£:IΣ.ψΐuτ_υ_΄rεJƒ'mg eΡ±‰ΤnιfJκΨ•ΊυκEΣ¦ωΊΤv/ši\:|—yι ^ό“ϋαD«ξψ„A „ίήεοΏev7εJμψ}γ>Ό?‡Εω1ό{—‹*Κ’g.Ήζvϊ™§‚ϋUαj ξ·"AΕ†•ϊͺ{ꃕΣTό½΅ΒσΩ~…Ο'΄$pδ;ϊQ¬£?[ύ)Θ‰s΄ΜΡ―½κhΎ£–TV‚Ή&υ5u³“r•{έδ¦Ψ`Tdβ¬\ΕΤΛQ‘-χY\š,Α$΄πͺv<š%6œ₯±λ³5 T˜δY„ b'οΪνΒρz؏!-—:γ,‘ΫμΫPβ·λέ<Σλdׁ:Ι³uδŠ,K6•>nΨG‚ŠYΰz_‰3&-œΑšDΪ΅n"ν’#ύSΈ#- ΖT9ϋ#™Φ@ Γ δJPq Χ΅…λ} ΰX”0Σ\›6ϋŽ £:›I Ψόyσ)£M MYHΓ ΧRŸά™Τ.΅/εεε“ Η[X==Ώͺ#εΧt2nΠρxšίή›KzVgš6•T‹€Rν‹6΄Ui­Ύ•v•JBBmhC‘Ib5b’JR`#,αΆ„°i°qΐΨfλΑφtΨ=4nOΨξ erM8:’gz&fΖα˜ω&ίοζ“χ½§ΎΟ?kΉ[}Δ‰Μ›λŸΝΜοΙsήσž+^—φ^Ή8ϊιϋ3€ όF›’aφ΅Q€©.ΧνΠ‡?”­;9-X° ]qΕΩΉv*Ž“ΐεμσΟOσζ/HΧ\}π™Ο|&g‹ζν» sάƒι菽οσψа( Ιη@яΟ‚πΩTPfEΧΒί„~€xθϊx™‹gZ}Φ)3ω΄f·ϊΞTΚ’xF…vϋœ§ „fE†p(₯Π(•έ+3TώΝζ‘£λkλmσ{ρW½xcο‘@₯·-’Ύ₯CίθΕ ½ψ^ό`F‚ŠΚ°vγ6PΦΙΣ6uΉ TšKύ8½΄ N”XB*Ν‰““#¬8°xψ―#}ωπ‹J€A}Ίt?κΥ±c*²+z}ι‘IA“₯Ά?ϋτΔ—ϋεž }Q,mΖqO4yT²Άί²κ倬…ι-zά¨Ό{,S’[ίj"Ϊ;Ηt+j[VF―ϋHX›Λ>J€]'ί”<ΈPZ™ΐυEΏšΧ/ؘ6φώ^?ο’ήΒ4?™1ηfΣ%›ά}ζ§9{ο“KAηzW:fι%i͚5SYΡΜ›χ½ο}Ω1ΆtύλΊ"­½ϋ¬tΑή“ζ/ΩΏρv;ΚhΟΎ ζ§Ε·_‘Žψς£‘ψ@«/ ΐeλψ°ΙΧ°p¬Δ²zuZΪιιΧ]‘η. °‚Θ꫃g˜Xvw…`UϋxΠςuιΰgŸΚ―Mϊ1…΄]‚‡ ψΊ@)Ε0P‰@γ™ EŸkΎ#ά:Α;ƒά–_ί©‚}+ŒΒAXA―‚ΝΎ|U•Ρ¨TPιƒJί‹j”θςΪ”υθşτβ]vΩ(₯Ÿ―υβιΕϋΉ]χ—3TΪJ;MΆψM 2 DΪ΄)]`ΰπ J›FΕ­‹γό‹a˜Ώϋ³”.dΈΘ] Tθ0p“9ςRG›’»ΎlύfX’ϋ^δ…†‘…ύ ΜΫiUϊmΕΫΩεΌ)OndSx|ΧΌδΗ}ι‰|ώ) IBxΗφ ’l‹eΝ―•Š.Φΰ„0νŠ@%»Ρ.»wlXαά[₯‹yϋ,HK-+.ζr©ύΞwΎ“-\’Ξ=όήtφUO₯ύ\‘ž{ξΉ)Λh ΐIϋ Ζ\'†βωηŸOσ.H~ύΞό??ώΓ—§ΓŽ><ίWΒ`έοŽ;ξΨ©ύ8ξδuiώ²Σό rϋο’KOt΅eπύΒΨ¬¨μTk3‘p">όρΈ>ϊΡ΄κw¦•7έ‘–Ύαϊ΄θ’MiρϊSόEKΎσζ§U‡šφΪ{ο΄ίΒΣ[zϋλΣ'#4Dρž{ξM‹–,MGoΊ/FτΉQΙ?ƒΓ€βΐ’σ%`‰BDpαΆnΌd$`‘[ΘΝνΘ0w(Ϊτσέ‹°ΦέkZ—V*ΐΚ+; *»VfŒFΕ½­:F1ν―φβ ½ψTΈόι ¦}ͺε1./\6gΖjT΄)₯α£ RvTJ ―kσFi‡‡ •Όd€Ώ;1΅ΉΤMΨΰ™Pr½υ #‚πn¨(NΨ':Ž('Ρ± τ5H•,¨νΞz•ώlΏάg εςMΏ-YN³€Ιΰτ&’έ/τ;ΟϊΜ3ƒaŒ” €Q‘φD!1-]?„ eέ½c+θY+*ΙΞ5*ƒlJΏ(gTΤ–Όκ±ιΚ=P9rΞϊτƫߘΎώυ―͐,˜Ώ:oνƒιδ‹Isη-HίϊΦ·¦TΎϊΥ―¦Χμ³w:όΚc‚ΥdθPιδϊOϋ/? -]Ί4Ν?`A:mΛU­Γ%ί{0Ν_ΌΞTΜΫo~:δ Η§SO=u‡2¦JτΊ|労ϊ¦ ιΐ“Χ€ω+–¦ƒ?|σΐŒP°rπo}$ΝYr`ήΏ•ydΌύ“[ο<¨ςρνί;ΉtψΐGΣao~(ύŽΗΣ‘·½/pπ‘ιώϋ˜’c~ύχ§y.M―}ώ]ƒ”_\s¦cέ¦;‹βΩΐ&κVθ8*νά?ϊΊψ2Ύshmζ{, ίΥϊ.|VΠ¬ΘΉΦ³*£ΒJ-ύτAεΰUΫΟXλ@εœώ°Ÿφβ'ύΈΌ_ΚωΣ~{²NlyŒΠε²*t4JΫ”δ.°;*₯ˆ¦pήέΓsΉπƒ& @G›λ& Ώr|}΄ΔΏKεŒζ}‘’M‰oκ&B_ §άlξΦ-:›<;H=όΟNœ˜ΜŒž *}0ΙΏ˜Ÿ”\ςQκ‹cΞΆˆvsΌ8ΦE‘-φ~)d•χŽ»*s"Q&EΩ…@EσtŠτ eX&hSΠ¬τN)¨dΫό#ή3fτΆθΞtϊ‚Χ§ύzς₯/}©u‘ϊΚWΎ’ζξ» ]tΦΗΣς΅§§›nΊyΚJ>σ.ιΑήs瀓:;½α7¦S>+­½q}Ίβλ·€³Ÿ»6ςρΛ‹$‹έβΧΘVυΚ΄\ςνϋNoΌώš ι5vΙX|ρ‹_Lϋξ??-½μτ΄ςαQ_y4ύ{cš ½OΦ~νΓiΏ3O o›|ΰΖ‹a›Ÿ” υž `‘$ΰΔGG%@Κ€ΣΓ―½?|πΑYCRΪ―OϊΣΩ‡Eέ<»R<όgφgιΡGΝ―εΠχ~xΰ̌]ΰ’ΟŸKJΆΐˆ~¨L$.B*ΐJΜ’ΰΧ’P…ΐRθ:=π€Lϋ„ΘΦ|OΰοΒ¬6ŸD†PqXT"¬lλ ¬}₯vύ¨¬q2BμΞΧΦΫ–χβδΎΛz΅9χγό^όνŒ•¦αm•&`ΩύΙwxδ<“—)ωhy>ψρ1έDΚύY0kς_8–QΑdξ’  Δ³΄*ηΏUϊιϋͺδˎωΐ˜cνQοM'Ί*͟·_k)ηυ―Ώ*/TgύώtθωoJϋ/<0ύθG?šRΚν·ίžŽΉν€›R„Š_χ±ΛΣ©›―ΜΏφ—½2}φΩι ‹ŽΝ £΄+ X‘ήϊΆ;r¦E―Os{†=―f -?cmώŸe0ιgQ²«ρsχ₯Ε―Ϗ΅`Γ‰cΊ§ΟŒ»Ρ F,€-p‹(;—ς>8”<9ϋέγΠ²όΈΣΣήsφIλΧ―O\pAoΚε:eΔζΜΩ7-ZrLzΥ«^•Φ­[·Λ޳K―gΝ)· ΖRθ΅h?υήdίτ:τY€+ˆγ£\¦σϊ<Ή £ΠΞs;}φψ() ύoυ>ΐΟΕΗ ΰ§΄Μχe!…ΎΫΠ8¬ΔμυKY•W¦Έ-yƁ ?ΪFˆέ *·υΝβώώ)ρ DΉTv°4¬΄ΣRV’˜7βΞ’Bœδ™=P<š2&1{AΕ­Ά#¨Π–L=_u,pό ¬υΘz’/<‘3.΄FκΛP‹ – *ΐΚζ­±muIτ>TZΔ¨­£ΑM—N ]¦ΗT$”%΄(»’P7P^ΐή3v]nώΨx&Gσ~”5°(Tς!ΓBVepΩqƒ˜ήί'―Ή=͝»_ΊοΎϋŠέ'‚’kΉ6-YxxZ~όΩiΑ~ϋοV’a‘…yώΒiΣKo˜ΰdΚ₯ΧΖ/ސΨ§šρςcJs˜Ÿ6όώ‘ζι_½7Ν_r@:οΌσr6bήΌyC»o4ίgρšƒ Γσ{C‹ρ±Ÿ -Ωpό@σ³ΧΎsŠΗ”3ΰΎ4ζrL.ƒνζρς% μ£σξqPΙοƒχύΟ©§]ω±tάΉχ¦5§Ώ9ΊfcZΈheZ°`¬Ά`Ώ•ωAτšgtξΉ€ωϋ―:*ϋΞ-iέ5¦ΓΞ|SZ{πλiΎ?yέ3ƒ9,:Φ‚Ί‚€Σ)mΜ΄2볆yœώΦ±δzΰ @GχQ/Mΐ’―φεc>λ\ζεβ8ΡΎcLΖkλmΧΜ*gZΆ¬Ÿ’¨Άδ.ΫTώ&šmwgVš)ξ_ TτΩŸ„a₯,Lh@~»θ†Kπ%ΕBF‡€[u2ϊΒ€€τΆΎ81άV SΦ–τέe6˜§δθ “¬5|‹ΎT΅PzM^°2(;}vΜ[EΏ°UΌd-J?πjɚ†Oτ»ŠzΏrΥΆ|Φ5Oηvεs/ί<€• pf³Ο\ 0<τχ₯ΕλOΝ>*Χ_}Φ‚ΈnE-«ZίtγMSζ₯ςέο~7°ώ„μΰzΘ†Γ&@ ‹γ£n:q, τ[oΚΗψˆkΦ₯ӞΎ2Ÿw;ψΓΌ$ίξ§?ύi:ξΈγοώξοΥ¨¬:쐴ϊŽ Σ‘χ^˜Ž|솴δ΄5c€²χkͺ7€cΏςΘΈ φ₯1ΫοA7λ›< Χo]Φ^—Κ?οοΣ€l…ώΗ@yξO₯ /x<vΪ;k»$­_wg8€ίύίί-"ζ£Φ“sbZ0gi~½—nΊΌχ|σΣαWߟaEpTΔIθΚ<0:%γβƒEŸε₯Ϗ Ψ@KDΊt ΊΫ­ήΌob&–μŠ©I အ+kΊ2% ΏύωŠ‘S”' Rf¨˜ξ―kμζŒΚ-ύΣw«c(ƌ•¬tq–mkA.iQ†eD’m•aχΦyDθ±)έπΞ·8έ*,.n#b›s`i―Gc4Gβ\%NΩ@_pϊRΤ₯ E $€έ^Ώζςl @eΛx™G£ΕP·§Α_>1ϊRwP‘«A_΄ϊΞY–ίλ"s۞΅ΐ &@HŸνΟ&κj‘;oΣζΑBYν,υ{£ENfqr½Ν‹eoq=ξ ¦Cn:+Ν[΄φψ8ρΔ3HP©²‡A-Φ%νˆΰfwv¦ά|λΝιΥs^έϋu??/”W}嚁pπδόΚ3WηΫD‡SώΗΔEί}0ς›7ζΟ―Μαώΰώ`θ~¨Lvβi'₯U­J‡^Z~ž9ϋΟKΗΎτ@ώΏ±ΰ²(λΌ:fΖ‡\>1–ΖώlΏξώΌι–ϊ_ΖΩωΈŸ±Σ[]_‚ύΏ4 A£.Έψ‰ϊ?j₯ώΧG¬}]Ϊ°aΓn9ώΗΌσοLΗsL:φΨcσϋ!grzmvHVH³¨ Ί zŸ€E—I“%p!λ””ΣΠ‘‘]PКE`ρwΉη .·dYp̍ϊ6/)Όˆ‰Λ+σσ•­•'{› rΠκΑμ³Qb7ƒΚέύΣGK1λ@%Šfw…₯#Γ±]ΐ¦)m­ΠθVΈb΄h$η£ά=ƒ‚ί ΅b€ƒz2ΆϋΐAΤ₯ψ—ŒM[²›ΞΉ+.Ώ’x,έ€ρEL_~‚: 1ήϊŒMώ…άo7Ξ L_Χ’ϋΉ @„QΎ8ΡR*ήΆ©ϋΣ₯€…Ν½Y2 ¨¬τβ˜ΨWP␒KS½Eρθώ~θ ^ε•…4yYζqZPdΓ―σ½θ1³+οο>ž_·@L§g}νήtκ―ΛJύΒtψ '§Υg™φ_q`ΪρΒάζΛ&ΟuΫ~ψαΉγfwXΑλύβ‹Σ•φκtάΝλΡ―?6έϊoΩΞ8χΕurKΊζΧ.Fuμ)’έι™_½;pΐύa΄hλ8¬{ώΆ΄ζγΧ€u_|`PΖ¬5 δ‘ځ›:Υ•Ž/eπt½ώ§ƒα™ύ¬šJAKλ&Q΅2*Κ^œsΥSZ4”ςάΛ6gp¨lΨψ‰ήλX–>ωΙO6ξ»ΌY$ήΡ–ε5Ǐe^ύκWηΣΉσε²₯²GΌfŽ€‚Χ'’g OΏ»N‘χ¬ϋ)˜¦ l„$ƒH) )’f…Ο“‚ΩD·ώβσˆψQ¬x a­Z•½$XQ(ΛTl3T|hlΧ¨Σ“w1¨ +ν”₯©s§ DΪζψ”&#’MiΧ©wω”TοΒΑά­΄ΘΈΘpHh²*]ΒaΕχE0‘/4ΖY"Ω-vΉc§?ΐP_Ίϊ2”Υq’scZ»vmηύyψα‡ΣAη™‘Ξέ3™[ΗQ‘lŠώZ¬ξ‹§΅˜*λ@& Ρ(Γ9υ?ΞC5{§ϊds?›¬-ύŠLώ*X+š–­α“λŽ»­Ž+rΗNάoΩτ/\8f67ͺeΏ²f+ZžNΏo}:υͺƒσc,?ͺ΄ϊ€Λ²ΧφQΰ₯ΧζΗ!C˜umΥyξΡoL„μύχ"φGχ§ͺ΄H; π »βώΗ΄LλύQ²? LKv…` eŁ2*ty Θae*·*ΏώΜΘ1I•%½ψ@/>Χ‹ˆY*M‹ϊ°!‚TJεœˆ΄ tPiϋ»‹P·”1β΅Ή‹sΧβξσw|AA#βΏ†£_ Ώt(γ”`…σς/™Ι‘[zJ-ώ₯ε]ΧΡxΆG‹7©~-~t@)(χD n5€PS^΄˜ι Ÿ…NΟ§γ₯ϋθωτ₯ΜτhΖ4™gιΎO˜ί[4΄ˆjί½ΓQbi‹βά/ߞVœzx¨.9byΪπμΥιΜOlΜ‹—fΥϋΥ}Λ›oIίόζ7‹‹ήΎπ…ΖVΫ¦XΆjYzέη_?α=T^.ηͺcMΫ*ׁθξΌσΞΞ&pΪοεΛ—§3ŸΏa° z7‹"fθ Μp υ?ΣύŠ’!P8•Q «’Θ°ςHίgην[₯ j…²++V—ξ½χήν2Dš΄jΥͺ,ΎνΆΫr&l˜.‡Έηž{^{ν•ήψθ±ι²w•Όπ΄τυΧ¦γοy*{ό8LΠrŒΎ$ƒΧsγ₯-1R•y λβ―M+N:(ώΨΖ΄αΧ_ŸŽΎq}šί™}θCύdƒ―_φΏύΫΏ~ψΓΘ?ώγ?NτG”φήgοtλχo7ήK~&–ϊȚΡΕ˜ν³`nΖ8l±ώώχΏŸΞ8γŒtΘ¦γdQM¦¬Zh‰w-m»œ6 ύ_rηΜc[C+σ¨wŒyλ(δH¬2τGκδRw—ΔΡgς΄ί‚ΕiεŠƒς±eόγ§Ήs玍OX?ΦJ-‘tPyκ©§ω矟ΉζštεΥW₯σ.Έ ;ηžxΫ'ςΎd1ψΏΩœ_+ΗUΗCΗ=J.σ|f¬½ŸΘ°bmύYώΙ-ƒŒK†4 dΙ4Qε£~›Ώ`!»¦_anŸΉ˜Q‰YTύΨχŸ²*8Χ’QQωG•Ÿώ|UŽι²ΝPΜR!& T~2kK?ښζπ ˞΄Jp4•F…•­Μ1›ββZ²)΄ψi±-eOβΒν xi‘ρ²Ž{­”4+m€Βν\γP۝£f¦δ΄xΦ-ŽNu#νύ—\¬‘σΊ1‘βq½dΖΎΊζ­‹3ΓβΟ³°žuq±©ίή34œΗΤgx\ό;7€#Ž8"g+ψu―ξ•Y4+GΒO-œsζΜIoϋΫ·[΅ΘΎfΞk`3jώž*eη"τ*z=žAZuήΪtήΕΛ%n~Άώτ“ΣΑWœ˜.ψΞ;z¬γKΖe'HΪΰ1€<‚…6]ώe1ιύc­Ιuva¨O.ϋ\Ύ9m:δ‘V$˜>ϋ±tΙɏ¦ƒ—ž–nΌρΖ ―γ΄ΣNOE@£τ#Σ<ιTτχ-o~sZΆ~ØΞι[³XT²<πΊ:N‚a΄'4ž:η&”σw†šώG².YDLΝ‹γΎDd²€”+£Δ«pQ­λΫτΩ£]Y!oζReT¦Σ6c@…α―#Δ$Κc%ύY*%XVβi’Œ*ˆν 9ώΈ1R1£BfΘEιR; P"¬ψ‚Λ:Γ@%vE(π²@ΜΎ”ΌYΌόγ-@ItΌτξĘΞΕΕΤu9μ«Ο&ρr­ΰΊ€_„tι2^‹·[G8)iN<€Θ΄4l.:Šr^λ₯+46μίλώπφ΄t͊tυΥWηŽ‰VοΎϋξτ/ς/ω3#_”+―Ό2{ξΉΫ-ŽO<ρD:βόΓΣIχŒyΙ.Ύ§JYξC6вY. όρƒiεΉkƍKUΗ­?>­>οθtΟΌk)LυυcX:Ftyρk ‹ ?Ί_΄ {Η‹ΚJ>‚ ‘*(£’N @%·‘χγ'嬕Ώ–[—^»οϊ΄δWVζ“hW†Xκ°οόωω8wώ‚tΔ]ψ[δiΰ/lΞϋ‹.!¬ώ>ͺ/ŒΝΐΒΗϏ9>“AqXqh”ˆ>Ήe\ΌώμXi({}Ύo€ψ₯Oδ2Η—oΫzGΟσς°Ο0S–X?Μ°Υ§τ#P™ŽΫŒ•Υ«χQb’@E†o_/ώο^όχώί}ցJΤu΄9Θ6 SGu”-iOΊΒJ›¨·MpK§O`Žέ.M₯ž. βΑ—`0¬ ¨)1cSͺS—κΦ7ψΌόέίύ]^όTRPYεΙΝO¦O[Χϋ΅~kΆ…ŸΏόtζ»ΞL7ώ»ΫΣE[.η¦χR¬ππœQsζg{|2 d<β ς‘cο='½ξoΟΗ‡γ‰?NœOγσ?ΠyΚ2Ÿpβͺ7dΝ0=τPΪξ’tΑ―Ό!-›»*OkΦuGqtZΉrε„Χ»Φ=&ν=gNšΔ1c![?Φ1&ΠψΤΈ‡²%‚ΰδθώ©2)€‹^γ`hίρ9°ςO+°τ³+-ΛsύΆόΗ|k€gΡρsοœξ:…ή₯$ϊξR;]· *΅λg‡`₯©ΔΦΎŒKΣίΈΏvmYnΚt-M•^™΄)nUα€Kι'‚ŠCE׌JX)yotŸΜA…ύs?˜* KσVH‡Ϊ"ύM†‡MΧχD½ ΗΠ³/n1[€VΌ¦ 8¨x–&N̍ΰ¬\ύ£»ΣΆmΫ&|V.ϋς ιΰ ‡§…«₯%+–τΒ½Σ©οߐΦ\}lzυ^―Ng|μ’ ϊ%J_ρ½γ£ρ½ˏdTτژCƒ uΩΊΓΣ³Ο>›ηΝ›7§%K–€cn\—ώήΫΈΙ Έh9Σσ©ΎzύΊ-₯’+‡tΩ©υνc#4Nα΄[·δP»y6λ‹2+8™cυƒcqΔ{%ϋߞζο³ΊδβiΥςΥiιά•ιΜ_έ”Ξάλς<η‰ΞiYVpL:ώψγ‹>9Κ P*š»βΠ|zδƒsh@;’»|zπ‘ŒJΞ’0ϋJΎ?/e=hWVFd‚¨φ“[&Ž’θ m³γσ–-γΓϋe€ά1τμΈk©ΐ'fXθ±GΓ‚³­ώOz?ΈΞ³‘L\¨h`‘‚Ω?Σy›1 bZ₯±›}TΦτOO*Ε¬•bΈ!¦pM&qϊ[.‰DΌ=£σMF£ψ§tq΄υL mI“;’Q‰,1;ΤΊά5Λ+±,„¨7¦ύ s;πΑΌ#΄,α9¨ΠΪ­ {Κυ2 \€ƒˆηuα2Ω`ΑA"–†άx4G%–5JΩΟΦ8°xYˆ.Χόw§Λ~ηκtΡg_7ψίίπgo+v„ω{‡Ηi_Ί©€¨¬Uω“ώΜ™o>”ΰ·Όε-ιψ“NH ZœNϋΜuƒlΗ'vTE gRxέΪΑmJΜ¦xΈšB€’Sή3&’¨¨τsϊΝγ!•KΈ#ΗΖyoΞS²u~Γ‚λΣΡ{œNέ{cΊx―7₯σ_ύ†΄jΞkΣ[nλD6]zYZ³μβ΄lα‘Ϋ•Š$d^ΈhI:ϊά·¦C/Ί5Ou~ν“c†+jΙΎB½`„Α„qFδΆψή}•A ©ΗΘΧ1b`σά D”}XαWυ «βBύφη +ύ1Z^Σ°`B§Lνχ ύŸτΎΰ‡Ÿ[¦++τ½Š3νLΨf ¨¨£Δn•ίκŸ~ΏίΫc@ŝk‡ώΉ;˜@σ~})xΒΧ¨ƒΡi΄νo‚”˜•‰>M­’M™’΄4eSψ5»u†eMJPΡ*Mέ"ΓΐΕoο BFa¬Ξvξ1ˆ8¨p_;FΗ…] ¦SΒ3!%P‰σT€•*±Τ+M χMο^gPΌvή³W₯‹^Ό6½ρwn—aquνΛλ~ψφtάCηηφjΑΙ1›oH§ύ‘΄xύaiωρ₯?Τ[ΤΏ;ή^*…yU„ζCρϊtμθώqΡ¬²%1£€jK–.%OΡΎg<£’aεΆ-O•}2œτbΣβ»s.›–ή“.ήΆ΄bΑ‘iΞήϋ€³Οܐ³$χήw_:σμ iŸ}ζ₯³{K:ϋ°;’E‹²o Βη•«J‡­»2gnΦί΅5‡„½ƒW›ΗΔ΄y΄ΓGΖ.ζρ&ˆυΉF>iœΑŒV|R?²)ŸΆ‘OXyφ遨w*/Œ…28*?qΜ™·ΕPο}~ψlκ³JγΪ”™²ΝPyfΛΘQK?»hkΛ\8ˆ4ǎ@IΨh°–‚L Οΐ”@HρφgŸˆ³mF\mΣΤΉαzŽ6Γ6ώ¨ΈpΆ+¨΄Ωψ—Ϊ€Ρ¨ω4ОΈ[―Ο?ςΜ  ‚XΧΗ πϊΡ΄ψΘoαXn΄%PqH‰ K?M°KC]²*ΫΕχοΘqÏߖ.ϊυMιψ»NNkY;‘ΩEΕ:ο,K\‘Ž|Γ ιΒη―JΧύψάδΦδή•8|ρΰΆ+Ξ::(€=›γ5;ήΕγζ% Ε_ŸφUΗBΗ3ΊΦΊ&₯”M!K PQVEΪeV²e½Κ?·oΨΧgΚ²{Ηΐ€wš£(9V=Ξ8πiΙβeΉάsσΝ·¦ωσ¦ƒŽ:7sΤ5鬋VΩ‹•ΗnH7ίrk•λSZqτΩ„δn¬qz~@Ds¨rΟUͺ2t0ϊ‰ρΙβ>qBφ€}Ύό‰­Ί~&ŠEΦ˜naPϊΜ3E–μϊ«‘—ž˜ ς0 Vψ_κ3£Ο~Lπ½¨οΡ™ΆΝPyzΛΘ±›3*ol‹Y*%XΡή!BπA€ Ld2€Ά8“;’θΡy‰Όt;n«ΛόοψΈ”‚|ˆb“;[γ»=Ύ’)KΡΡδ­ιtΌ}WPi2ώŠ%›&Kά Σ tμ0™Υη9€ΔΦf§€˜ΈLΜψ0FίN΅%M‰Ή‡—ΪDΆ₯R±μΜΓβ“V§γήu^:γΧߘψή=€%‚ΚΏχΦtμm§€}ζο;<―½ό¨νΊΘt_²Dηόή]ιΘ»7€ύ–0α~σ–.HW~ϋŽΑsyλ)c‹[OIηνŽΑΎς:}p‘—z€9/Εš‹—y}”ŽTnˆΦϊKρμ  "…Ί}L(c5²ŠJ@9ΣE••χ…@₯wΩ†½JϋΝΫ/OΓήwίyιΜs?;†ά(Ns ŽΏφƒiίyσΣΧΎφ΅tφ9η₯ΓΞ~S:ώ[']و?ε[γ2¨|lάωXΐ|^wEvmΚ dd&pΫuόt…•Ο<3a]n›~qά_b[œ€₯[Α0‘Ο–~Œ*3q›  2gΥκ’hzXμfPy±%^˜Υ β₯RΆ€(@ˆj’ˆ·2ςΏΤN‚L‰§ˆ ¬4uόM‘^ ¨h! }l)n*³΄•}J ƒν5³7Γ2$±„k#ˆΔΫ”Z˜c9)vύ %@GHqPρ,Š_ζ­Π1“Βν={R‚όU<@ǏJΜ¦”†½ΕΜ‰Ÿrω%ί0­ΌπΘν€Γ53)«N?t\°9Μ”l―9{₯ξ;=]χΗ7Nϋό$΄7―]φƒϋΣŸ»1­}Ηyι΅—¬Mϋ8/]ϊ•›Ο{ΥΏ»7G ©ΧκΪ^£ƒw45AŠΐs³n£ξ“¦ŒJΦJOΑ’¬Ÿγ (T*ω²;ΖΔ΅A­CJ/.y͍ιψ½ΞJG~tzα…’%eϋ}…lχ5Τ`‘aάQ'^›–­X™yδ‘tΐͺCq ΘH”΅οοŸφέsuέ ›ςψxζΔζόΑ¬{e„’N’p½€fΒu~ϋί0H!Γ’ΑΟ==Θ¬Θ’_‘cŽNE‘Ο§ΎοΘ&ΟΤmΖ€Κζ­#G-ύμJ,σ pΑL ΅Δι4Β 5TΕa₯”½TψΥ0 TR-€M‰RΆ’­δΣ€Qp“΄ Ρ₯KΉ)ΎΕˆS‚&@@$Ό\γϊ•}Ο¦D‹ώΨ δΐβY–˜M)e\Έ\ϋIk jiΣuHqX‰@±MΉ+U‘dΣ7οH—}σmιΚ?½kBιΗ―koX—VŸvh:ώΞSΣωΟ½!]ϋ£·m§S‰Ύ(œ,±δδΟε8kΌΧα&~;”{‡³e›1 ςψΦ‘c’@ε§αt~/Ύ=+AΕ3QƒR*χ@Ϊ@ΕK>dSR€!žΓΝαάξή»J>-ήT†¨…Φ;IJΊŽΈh6ιUJY•hdVςD‰]9qαvΣ9•ŒŸRI'BK›^₯d$η“Ÿ Ί hOζΛ1f_b4UΫ¨€RΖ‰}’N ‡_ϊ₯²ΰRjS¦όγ v,“xf"@¨`ζζ0έJ3…Ÿ>MI‰²CD„*ίOΏ―;ϊ–Z²]ΣB c9χOqXρŒ >+ZTϊ‚VVEC 1~S'`EAλς™Χ=“‘γ…oK—.Έ=Ήοϊtφ™η€?σ?OoyΛ[Σͺ₯Ηf@ΙΡοςΡy=¦[‘–g,ϋΉL™œ5Ώ6Ά”€NΉμσkγΰ"t τ#[ ‚Θ'CgΠγ»‚2|<Z’Mƒ’}W¬ ΰδΉ~φDšk™Aτ“mŠŽ·Ž1οY}ΞfΣ6#@eεκ ­μ]c’@είχO’+{±O/ώ󬲢eS‡„³ρ2E R”hηeœRK2`β]FT"¬(3»o\ŸΑβJ)’iAm›€άfΧΤ:/sPιb•_‚ŸR£ T’&>†`%Ά'{WOŒXκαΨ;1| £Ž+RΡ6 Z•’˜6ži‘Ž α‹~Μ°x‰)ΓA’τ˜~™C χ‹% M~»ψΨ~_οrς.¨XσI½Ίήi]—βηέnlβπ“ω :ΒϋΖB]@ΩRέγσΤ™£¬ˆBƒ ε£rΠGζ©Η_όβΣάΉ Ή'ΌkPz‘©Λ ι^τψ μvΫΙα‚«Πυωωή12 •²ˆφγγιyJ6<0Άτ!Ε2*Œΰž"Ϋg&}ΉΕΎΗe―ΝϊμD•¬Iyaάτ W\SΟεlΫf ¨<Άuδ˜$Pω΅^Π‹kzρΏφb[/>6+AA«ƒJ)›’Ϋ4uφI!Σ’l šΧ‘8€D›ύ(>1ά― TJΚn!οžn u±\2,υί€Yζ0[ˆ6‘o“J)³_C›p€s|R’©[μ* gZJΎ0₯lUΜφθ2-¦>I}ЉgSάτ¬d€ ή’J<Y–₯%Hq€ˆ%™.°Rτ’ί)uνψσϊl€¦vmއξΓγλv±μ΄Š2/:ί‡΄ΚXR$f LtͺPiFΠ ’±›8wϊ,_°:OOžΏ`tό1o+ω¨(›’ Μ‚n]–³7χm@ŠžXʐςqXpΠ \ƒF„rNvͺέΌ}Λr†/λ &)c·yΌ Εmš”ΎoΚ`ξnΉ_~l0°P‘χ4Ÿ‡ΩΈΝPωψΦ‘c’@εΊ^,0hωƒYg‘aEYaΪ`…ςNΤ¦π7 ³)nΉοζmMΉ΄ΰΕΦι6PΑ–EΤΚ8Π‹ΕΧυZ`}‘/-¦]νχ›ξ[2‹*MΎ+%P‰₯¬6H)΅Csœb']@.¦-•†š\zKe5¦κ:-ΈΎΘFoGφnE,q°ˆϋΐΏ(P- +Œ°K=Q£B §Ι<.ŠοW[ιˆ#•RkqθzΧSΣ°FξΓ{@Ο₯c‹ ΦA…r"ν]BZX•%¨Πa£•d²θxyΧ€(Tώ`,:f]ZvΘqιψ3ξΜ]A\όDΠ’eQ[²bΓ•O™Ί]χΜXTJB―"­JŽƞGΟG¦C%— sxΆŽ;ΘζΜσcΖk‚ξC Hr |W&ΈΦΪΜ21ƒηώ―kΚIŠ\‹Ÿυ€`’'ΟΏΟΦm¦€Š ±»Ζ$kTΞιŏzqU/ώ§Y * † ’)£βYt(1›BΩΗKJMZ”8Ql΄ΑGHλA€JΙ:£7Ο¦ψœ n‹.C—ϋ0ΏhΠΦ–e5†ML.•Ÿ†JIsΣT’~‘¬ƒH[ζ€$¦-•€β~•΄?j޽$ύDbφD‹°ϋšπܘœΕNwΘd?’ΈΆi^OΜ€4iUšJ7%!1Μ΅α~ξ‘β.΄ρ˜΅A Wϊ[Η•rž*MaΊ²ƒŠBCυ΄gXω΅±Ε=gQϊ:YΤϊ蘰V΅*ηœ{ωζόR)hTΤυ£Ϋ ^”UQδrOX)z|ΐ(›Ξ=0Ήλ§?9Ρκ ΅Ψτ$9³!`ΐΖΎwΩΐφΙρύόbξgW<Υ―Ϋ³H R²Ζfτά:t=‚4)ς₯AΞv6oTvTώηώι½ΈΙ/›• B™Fΐ!Θpq­w3+K‘D©§+€”Κ7₯)Κz23±dδ·:Ότγ…#LϋTn‡Τ³%xhƒ–Ά.‘.πΠ€“‰` I< Šβc6iPšZ»€K%)?Vd|?Ι¬xιΒ­γuήužΥ(eΉς|r3ϋQ:nΓ4Jž )AJ›ΐΦ‘#fV’>&––:άsΖK>₯RΗLχΡγz?ΰΖcFΊdUŽκƒ Όt\`sFβccπpφΥOηNeΣQοΝY΅2“%Ρu‚e\”I‘Ή›BΆ²ΘΧm%œUΠν#0B’3*ομğδ³O:rζjϚ ΅ΐ εвBzlΔΈdWrΠMτΡ‰ΩΏάΛ?Κζ ڐ_z2Ϋδ+p₯³Mγ=a›1 ςΡ­#Η$Κφβω^ό¬―U™Σ‹8kAEZ€ƒRP T€³*n›οP§—τ(.Šν*ΐJΣ0B²#žqΐ2ύ…`°Α:hvΌ$'s²TΪ&z‡QΙ•Ά ΪΌbΊΒJS§O©uΊMά€S‰‹3Η‰…₯ξ^-ͺz<ΔΎ>δ°4β ΤυεnΒ₯©Ϋ«4Ή€E‰e£.^?nQΏβs}Όάγmέ1€=@ŽΞ/@…ǐ,K[VE‘Ώ•UQ†‚‰Δ*‘YZƞuν3T€ Τε*ν=Q§n§, έC9ξ”AτMήtκ™Ž\ΪωM3TϋΝ§' $«’}βNΆn§πΫΡϊΜ,‘Z—Ρc['΄(“ΉΑuV‘ οΛ=e› ²bυ„sט$P™ΫŸοsdο½Ψ8«A%vν*ήΎμ ‚ŸΏ_SΙΗ5)₯,ŠG*%Σ7BχΗήΰ±πF} ™€…πύΤΠφL¦E_*MΏΊ»tu„A₯d―?Μ$M\[jn2—ζ‘2 JJY’aεΛ'+SήΠυ”‘Ȑ”Ž_©\;Ύ’ΰ0hŒϋκ₯Ÿ.€–U‰­Ο~œΌδγπζ0Rš{€ Δ₯Ηρ·{κψ±ΰ8ι>θVtЍΦaEηsP_š³[Η-εέρ5‹nΛ~(K’YA‰΄+ʜΰn›΅(oί:°ιΎ=b’έ~›r”ήσ1,QY“άόΉώ,ϋeΛώx¦@!Άƒ”Œ—ƒ2”|d{Hdάd.—ŸϊΗ37°¨Ψ“Ά *ΥBδ-ΊΚ €ΐ€ (ρΰΕΛ>%ρμ(R΄*ξ`ΫTθκρvd-l΄ΰF8ρΛ( ±O΄Lλ9τΨ^αΧ(‹b[ kΧ2Q)bV KtΙͺ4Νj³ιο’-ι2ψ±*ΡkΔ΅%,ήϊ›ξ­8[©$fζu²ψ’ΛhΛdEΑk‰m˜»jU<³βӍc׏—gΪ²((<† Μ)ε‘9ΰx“UΤy m)*ΐ’λΤ½’΅Ÿ}z‚ «² Y`ΊyLθŠƒ¬ DΊeSδ‘’ξ…E ’Vc²'‚‘ τK@=-ΒΏΙsγSгEϊΫ„²”o€ @:“OAΧ8‰ _°πb‘£HΟ―™Bί-zΡ9·§m3Tέ:r {mΨ‹ξΕ²Λ>‹Wzρ“~\^A%lίϊ/ks*ή^μsyJ ‚Ώ YoE.eS\8λsy†Α Οͺ”΄.<.Vπ Κ,lʐD@!"€Δ̎›ΡρχκΉΊdIΪ,ϋ› ₯-›*Ϊ@b€4eMΪ¨νυ7΅φΖΕ=šκ±.τΕΙ6 ž£Φ%e$#VI·«Ε,‰ΟΚΑχ€©[Θο;šΚF~άό9™ ά¦CaŸΠ‘8€ωηΑ;αψΌp™ œ–=&­Κΐ ¬πχ˜ΏΚc€πμΣΫΩΖηSœmeVeUθήQψAZ+θTܘ-?nΏ›&gOž3RSg ΦτΪ'΄uδΨ Pωω£τKC +¨4θTά}V βΩ”˜Iρ²Οϊα:J“λ¬Γ@—¬ γ R‚”8 ξXή‰€ΔrΟ!Pa?ΌcΙ‘ͺΙδ.₯ ΏbΡN λš67§ `4Ν%vΏ­RΗΞ³J1[1JχK•j?(GΈιœžίMΡβkD›δ 2ϋ!Η³q(`άί8“‡h+c•`₯tš EΧўΉ]̘ΔφρR%zγΈΑ_Μ¦ .Η-ZΧλωcω' le^F©Hΐ’ ‹z-ά:/ Π©ξ“5-/lžhΘφ©-ƒlΚ ¬ςαq1+·Λ~(Κάτ³)‚ KΤγ8θκωh ΦεΪά`'ΪW„ϊ[·ΡmsfεߎgirΠζάo;Ξ£gŸ\―}Πσ‘‘bπgέf¨,_½ΈΊKτγ/-ξκ*Λzρκ^ΌͺŸ¬TPiθό!£’rΞ0Hρݍ —i‘Ž Ršvμ ¦WΑ₯ΦAeρψdVbΔLŠ?―/ζ€DΰΓAEχ‹ΓKO:-ν+έGZšZ}G…•a%ŸaPδΐαY _θ(­QBΐ†_—ΕRLIQrmmjӍ[h*\―w£ƒgΆVJ³vT|_KΓqΑ―­*Γ ­ΤρΓυΜςαω(οΈώ$ŠΘ›@₯δ8μc(‘‘Τϋ–ϏžίντVΑŠΞ«SH Ώb*‚‰^ hY2|όΖ3ύH.τ۝ΙZδ2%%ΚKΚ€τΐCI6ΗWž›L —+θb"θpβ12Π( “!ε‰qWY€εscϋ ΡXΙιΙ r|_xΊn3 T>°uδΨ‘ŒJΧλφxPΑOŝg‡Š—*ίώ/krθΎΐ„ϋ§Œ*qΨ Ώζb‹r—N’Rϋ±½9€°Ο@HœδηωsΊ£oΌ]μRŠή1~<(WiA‰ΰΠX†έn Δvjό6όΧ·;ΥΖQ—‘]‰ΠΛ⬚˜‰pφΓş,¨t§xΫ-·uγ:@₯Τ}³)n±? RΌμS‚&­NI“S<žs;²7@€wλDCΎ&?Ο4-(±lJvEοSν B”``‘4€ϋP† rFεω§ΖΛ@Zψ₯ωΤXyEΊ2'9{ΑΌœ~»―NɞδςΝοeQȎ0ύ™Μ ‘d}ΪwΞλ:έ6—~z‘‘€Ώ9ϋΣƒyΜ iτθΗ ?Φ|oΤm†‚JΏ3m”ΨΑΟ ;Ξ^|Ή‚JΓζfnŠa¨Έ V 3*nψΦ€U‰±M~*>ά°*₯ΕΏ ˆR|.ŸόμνΣnvηvΓ %ξ`°5(eYΊ:Οv•&'\œ}½<ΐBθ₯"·ςΧ)0‘S/wψ‚_•ΆL„φ‰r_`ω›N/oΊ~"¨”Κ>+Q—β€R*EXi—τ5₯A…8Π²p₯lJ)JBΪ8Ϋ‰γ  ΈΎΛί«ϊΏΗςΓJΙΞ‘E0΄δΠο<>΅}~¬σ&—uϊΠpt_;"0d>ϊpBζN ;ΰ„ύ¦ΝΟύ­Η¦d€ϋ RΈŸryϊ«<(ύτη«rόψΜ9SΤmζ‚Κ Λl„θΠυσ₯ώ΄γνΕ/{qG/~§έΧ¨|ΓΑ₯‚J¨Έσl—πχε—Y‰+₯E:ZΪ—²+žΩfΗ_ςcq0АE²€ —yΈ>EAφΙ§<k%Hkϋ1 dε 0)ϋ8€0vΪμˆ–₯ Rψε]Lο’μ’ύΧ}JΎ"₯ιΏ.d-M.ζωTΨ?ˆFα(—0%P‰eX*AΚ0PiςΠijMoΚBΕμKi€B N8^+MΘφΜJ“K0­ΗbFM°4eY\t‹MΏ„ŒKΞ”|ώΙœ Κ\ ‘l€π2·s@Ρί€)οψ~θrAξΗςΡ±ζ½#MŽΧΟ~±<‡Ύ/)ŏ^><ŸbχP·Y*οέ:rTΓ·IbG@Ε§+³ #.υy?,Φm tΙΛ±ͺ?f¨΄AJ)“βSš#¨xVΕ―w} ΌQ<[šoT¨(ώ%«νb<ΧΥ"ΏMΗ!ο8λiΚδΈ½·kΏ))±p7Α‰·ϋΊ°4šί9Œ4Ν$jΊN§Γ@₯4UΉ©Λ'fƒΊ‚J){R* ΉΩœΓ·^7AŠ›R’c”ο£aQ*ZΌυΪιLΌ+¨,žY!ϋ40‡ΜpB&±+p3&\O¦„}ΡcKŒ,°©.€plu™Ž):1…>«Κύ/VδTΘ (›"@ωΑΛGζψΣ—ͺ$2@eΩκά&?jTP™DXρ:+1 VΌϋG‘΄g£ϊοΠΠ+ލγ-Λmfrm°B„L‡g@Ί‚Š—sόq™>·qΦ‘—Εl π|A‘”βmΊnίuΨaΙ*ή3)>Ϋ)΅»]=§ΊŸZΧί>½Έ){AΔ Ο Σ` Ÿ €ˆ“6H‰·iς\) ,eQά‰ΆIΏοmπΫ ΐΐα0RΤ:,^tφEπαΓ c Q‰—ι~1ΐ…2QgR*ηxP$fՐ?ν›ŽŸŽQό)0ΙΣρeβ:ž1*γώΕ?šCπ!8QPω›Ÿ―LώάFR·Ω*΄ΓT& TJa₯ΙSA­€E†θδZjιmΛ¨Έ}Φ–@₯©m9 e½Μ!%Sμ*zlf$EM‹ΛGžm)eUb+.εځ™i€EbΨ ‘6³6Ο€πK2κΌΤTςFΑοğƒE½OvWκ°qγ4Ο”z}.Ν0`‰ -χΫ²*^κi‚•&·ΨΝTκώρΛšζ ›Ιl…£(„gο<"άψcι}©c l8ŒΔ ϊ˜q‰CΕ»v”rz.έF`’ΧΛα<%.φΥΑNϋ­=·Υw˜J8|ί D^ξA‰bΫ/W€Wz‘σΚ¬x6EQ·Y*o9*¨L1¨ΔROΌ }‹~Y¨FλZ‹|œz<¬ό;t|φNi2³gM<"pD8‰:%AmI ‹Ε?:ό^x|f&)ΫΊ!ž7‘‹Ώvc›­‚EΕgΧΔΩ7M™•6‹ΟR8€x₯δ0[‚ Κ?,tΪΆξ&»NΕ-πγx/΄eTός¨Sρ ΕmπT•m Φ’ά6Έ0‚ΰ°,JΔΛ7Mဩ}Ή-t't+ήδAΕ―C³B ‡Πup;eCτ9>˜-ϊw…>·Ίά3Ÿ‚}oιT!ΰΠ¬ΏύωŠΑˆ2(ŠϊΚͺτΟ―¬Μ—+b6₯n³T4SjΤ¨ 2 J?M—s?•|*ϊΠ.,ξ@‹K»JK"Ϋθ…ΊžιpQ¬φ!z”@₯ΤωΓmΫ@EΗ‚ΙΣόM cρΑzœRΙΗΥΊ!$-Αξ]β‚Ρ¬”J@%P‘˜ΰ±άΙ΅- ΰσybVΕ;I’g.³%‹zΪp#  ύpFSΙ'LiήΟ°²T©Σ'ή>Ά(w•&Xi₯ Πr,Π£tΧ@•²(%P‰ε ²:t*“’ŒY ΕXR:/ΐ‘oŒγ=BΫ;S‡1>Η}'‘-Qθ{‰σʈόδW`D‘Λtϊί^Yγε |*PQPϊΨ(κ6ΛAε][GŽ *“΄΅eTΪ2-‚Υr”Ρ²¬…Ω…¨”VJY• Vr€πA‚)€JtœΩ”*žQq}‰n£ΧΙΠFw€γrŽ3‘πbqHρEΒ!%WŒΏš½4€Λ\τ: Vbι§)›fTζ‹ͺƒŠ›ΔΉE»`…Άkο*’Σ$HuHtΘ$•Κ>mε •8ψ―+TΪ:„š\w»Zθ7JWmτ΄)AJ[Ζ„έgξCTΈDƒ8†r[έOΗYΗG "p‘ΣFπ‚ΐU‘Ώυ:aγ0μώ9hžΘΨ)τΩΤg^ί?dGθJdL•t!’σŸΞΑy °²ν—γΐ£ŒJέf9¨,νΚ;·ŽT¦TJ>₯Ϋ(›B›žΎ(€@ΕΝΠ’V₯ΙWΕ½U"¬Y‘¬γpαe Ί„b6₯TκρhΧ—*z\ ½ζ)ι‘e’C/oEφE$–{ό—2α…Οͺ”όVš@°`!η9\—Ϋeγ|•+,\˜ξ•ΊšΨ£iΡφ’ ]J%i"s¬t~X§Ο0`i•ΆΛKΗ1J/9΄p>:ωΊV(·αΓg±€%σZ%Pqλ}׈y·šφQ’WŽΏS―›a“ΊϋαϋΟή7„ŸΞ}/Q6δ•>”&€ρύΣ‘9…—}τuΫ3@eί¨0ψr”¨ 2 2 RΌδ£΄* ¬ ͺ ~ωt•h Wj3vȈzχC‰%Ÿ¨x9ΙΛF~_\sε₯β3“άι—Ž:Vͺp_βχ:Ώ;ΏRq8qΟ€θ+‚vΕΑ!žwe_δTJΎ₯¬GlY~HΡ»‡ΏφiΐDΣCΚ,ƒR{r¨πΪΨO>ν/―7Λ0PρΆjoΓnΚͺπz’Θ6Š•ύ4f­J“―γ&7yσlπ‘Ο–Flοχ[Sf…Α†^†₯Λcλ³₯ΗαΗ€ί–Ο.ΟΙ{Ο"}Ώθ>ϊ,ρΓHŸ1•sΌά#HρςgN“ηŸΛηuͺpXΐTuΫΓ@ε‘­#G•IΪΪΌR†AЇ`E ³›‘i1Χ‹ϊQœZK₯7„‹ΈQ―βέ7Rβύb7Q¨p;@E— ΚhΣVψ1ίΐ?―ρG½@΄~ga%’ΉZ\†JΤ§8PD0βvJ]-*žQqP‰Γ"ύDM―[SΚθ+^ss·6P‘E™ŒŒk~πZq­LSV%Λ(°αcΨ|$-ϋτλΨ a₯4σ(šβρqΟ"œ–ά“Ϋ€%ύt]Y)s;υ|>?r”‘υniE’vDف… … πAΦ„Nb)‚›ΊUP© 2ƒa…9?Έ2z‚₯β_4,ψ±¨ΙςΎ©ϋΗΗΠ{ rΜv4ΩαΗμIΙσ$ΆΗ¬ ₯]ζΜοψ[ΗIΗD—a”ε­»tφ”œT½Ϋ%N<ζΌ #¨4uϋΔΜ‡ƒΉΰ4BJΙ“"‚Jt¬’5»/rώ Xπ׏6₯δԊF₯tόJ‚Z=&―ŒŠ‹~™΅δΟΥ΅A₯­T D•6±'`GXqΆCh,…‘gQθsζ°’SμExIXλΪ>»qθ'’zΑΎ?0XS†D ’2ΜΟϊ%2% —+"˜4AI Ο¨ΤmΟ•ή±uδ¨ 2 ζ©¨(K@›žgShSV0δPΩΊ_-QO”YiΣF3·’ΉšƒJμκ‰·σΛ’kmSωσ8^e/ω(λ€c’ύd~OœDμYοΊ‰ΜΨαqšZ•cΙ‡…:v‘M‰™ŒΨŽΫT€ˆ’GGνξ =–·C³/zή¨DX)e¨€=ΆnO¦wR7ƒkΚͺ΄•’¦¦­¨Τφ]{›rΜ°•@%ΒZɃŏ‘ώ:Ρ°‘z©TςYXΈ½ghάr€μ(™H}·N^6ψ@g‚Φ³%6Ϊ`δ_·ή)DέφpPypλΘQAeŠ2*MΞ΄GK%@Ε;€΄`s_ ₯Ιε5ZΛ7MXn3v+HL9G―•MΖpήύΓγα•Βq y6EΗAΓ"Ψf³(] Εηλt`ο”}Ϋ²)Ρ;ΔKTβbθ0'2»˜7‚Šk7θΠρΑ|±ό=T<£ΰή0<% Όlt?½ŽR7P Xβ1)AH[§OΫ,€)]¬σύΌCoΠΈέΎ>SxQΆ₯τ5VMβZ‡²œηs œθυ\š(;‚ΙΪ6ΰΔ°'‚@ā„Λ=J°R· *TΨ:rTP™BP‰B¦'Ϊ) ΩZtΫ¦ —ΚA1›A₯Qƒ2μ>ΡU7fS\„Λε¦IλΈ0¨Qη*z.JqΠ_Œ€΄M?Ž β(m² žMqηVŸ|\‚ΟͺP–(AJΙκή‘%šΩ5e”Ό4<θyc榀U‰s‹Θ¦Δς—,΄ΘκrΦFX)™ΐ5•w–aνΙm@žM)ΑJ<ΞφJΊ–te ΑŸ ο˜γsλY•Ά–e?¦LaΦηΨΏS”=Α²Ϋz4&ˆ`cΆΔ$J[4eZκVA%ƒΚ’ΥιΔϋ·ŽT¦NΏ-ΐ€G€(Pγ™œ\=;mπγ0Α¨O)ΑJ @bλr€xΚ» RΞλΧ&mΪdP˜{€Χ/ΰΣ>{gƒŠΫΑ— €ΛδθIβfmm₯οxi¦΄eTJ™•’ΧG“Έu˜6ΪχΗA‚x©xΧR¨D‘° jγ‚Κ˜‚’sm„”&γΆΆnž8€±ΙΞΛj^ς‰°Q*λ΄eδš`Ε!R αΣΛϊcχš—€š†r|Ρ¬){‚8–,Š ΕΫ†wFΊΒI” *EPyϋΦ‘£‚Κƒ >”5XŒKεLΰ8οσ€bfΕ©ykd,5‰i‡ΑJ€ψεqFOΣ< nGΩPAβ "Έ˜§7-(]¦ ³θŒ*„›Ζ9”„΄MR·{oƒ”aή'₯YCΐŠ jYΔc ¨$€e Ζβ’κΒΪ&‹ύR'OΣ¬ž°Dp‰­Θ±Ή :JSz?•@%–ά0ΞgoρυΜ$εœ(’φΰ1}.θΤL”9iΣ‘Œ &₯P…” *]@eέ}[GŽ *S*Q8!₯­ΤΑ„tgVZΘ±–wˆε™X jΚ¬΄eXΊDS&%fTΨOμς]ŸΒλΧkΤx\}™Σαγ: w’ν]AeX·—~’Ρ›—*Jvρ%PaAŽφφ%i*Y”ώvxς¬ ‹»ƒQΤΑD’λΗ[©Ι Ÿπ‘ τ쇇ΆΙΘQ$[š€Μ}’Ή›g|’HΉ >J—Ελ#ΜDΣ87ώ#ΫFfΠ ,€Λ8^+-dRτβŠ>38½e³#™’Q³'R*¨t•{·ŽT&ys@‘άC»`WH‰p!ΕKAz.eμι Ν=Q"°x¦₯*M°t;wΈυY@%P!ƒ€($&›β†wΜQθ΅θ ›²ƒΚ0Ωaβ €Q‰‹^Ιμ,–FΊ€J[Ϋm:„t¬ηε±ψzόyΌUΫu/Γ@1±ώ/t§Έ[0Η1oIλ­ΞΎ₯AŽM mcβϋ‘t]Ό]SΙΘ§gGα3eK>s:>όXπ,T)‹’Ϋ`c―Ο„²'ΘξH¦€J•Ι•υχl9*¨LΑ&ˆN€ ό@†‰gKβ“η° >iΈ 4v&Ha{ f¨θ:0€MβxύέFΟΧTφiΛz”¦ 7₯ɌΔOΫΒGι§T2ςΦΪXφ(ι2J™χόeϊo©{ΕK@~ΜάR~G@!-‹ͺk-p«υ.(‡”¦L†ΏfŸ)δ—Π²#€%hŒηKidUάμγΛ<*[ΒΑΙ°.œ…“ )Tv)¨,ξΚέ[GŽ *S°•@ƒŒϋ¦Dh)•~JPaΕKA₯IΗ΄DΆ•}Ϊ2,~½ŠkQτ…Z]ξ¦ Sς‘4ζY%?>ΊΎΤ)#DHiϋ= ¨ήb»|bι‡E}¨4 H‡ GγkΦ^υ₯λ£HΤίΨITς§q ΪέΏwˍε°Ψγ₯nyό(–v ε$$RΎςΆρΩ΄‚_ŽkP8-ΓyK7~7΄ΨλψθsKŸ aε D'»2†y‘T@© ²K@ε­#G•)ΨJ™ ήτe€ˆFo+ά― R('Ρ)γ₯ •J€†Ψ§/7iWbΈΟ €‚?„‡ƒ Χ 40¬γ6:U¦$Ϊδ;¬ιιu1”Ν³>™8ώΚn‚•a ‚ “‰ιΰ‰‹Ÿ—}άΝΦA%Nv4 EKVj_Vβνb6 ‚JτUV€4*nFηΰΌι>ϊŸ‘<T,ιιςrΝ ™άpuκ­Υš9Φ²ήTκ)]η`κsό΅–μυέσΖ3'zοΏάοΦΩ]P²³Q· *£Δά¨œtηΦ‘£‚Κl€Š/ΐRλ7ΑJ TP<ΘFΰ?βχ‘θΞ‚Σh<εξ–M%ž(”%#BET"Μ8ΐ(›ΒλT(ι8θψθυi|Αsρl©°£ ΒΒΝ"€.Ά"σάόβχω@ΡcΔυ)m ΦΝΛ₯μJ³ΒΫ’?K©δZ/ƒQ( xRt?½ητΧ{T«χ"†qΐˆ sέs$ OKƒZ’π:vˆqΫ …ŒyΊsrœδνΞΒϊ©΅ψoΎ"ΗΞ”sv$CR!₯‚Κn•·m9*¨L!¬Dq¬.€θKJ§n‘ίVϊ)A  β =Aη ٌRx¦₯i²‹dc ‡2N)›R 4)žI!θ†Vtf\Ό„γS›Z―ΖΔΏK¦wq’C°BιGΩibH§»Τ³ ξλVτ]Ν½bV"t(‘‹=K¨ΈqYӌ`₯ TβπΓ&Xα΅–²*₯2Ο0K~ΧU4y©ψkuPVMΖs:fZΨiWgŽήMq½χ”ΩsΟ‘X"£Eΐmž¦L Η1χ€c'(zfΗχΐœQ―yGE²»N* TPΩ• rΚ[ΆŒT¦PPΑC`€Pϊi3|‹Ω”a°32ό:e ³ΓI,㐉AΉ†pΘπTbxφ%Ξ<ŠϋOχŽφ_Ώ¦υΕ½MbV…μEœη%’FΕA…< 7›@…ΌM£R•¦rUӌ‘a°Υ~Ώ TJώΨ¬°˜{wŒ?ΎS&BοAΔΩxι΄u΄ι=°(‹α%NKγܞ.γ ™zL™Pq€ΡλΡλΎβڌ \οy9ǎš1™ @©RAe—ƒΚν[FŽ *SΈEέ%aΩΧ§DH<|‘GLλς"\l•$&1x~φ­)“ 8€:%ƒŠφ[_όΐ§κTά¦žIΎmz„x]\ΰc«)@[n]£Β―hΜΝ†Š—~6’q› 5ΑŠί.ΝΫ ;~β,_ΰΤ*ΛΑc“έΰ8jΡ§΄¨/e/ Ζ6|`]·ΡϋVχgJ³—}KΖ€%8ρj=ΓKR΄χγ$+Hρ9ʞˆγς@Qθ²ϊΚͺΑΤβaz”Ι€’ )T*¨TP™°EνεΊ~J员MαK:‚Š‹wύ”„ΉΊ^·₯EΨΓΝΧbΉ&ΒI|^(BIΜΤ09ŽG T΄8*%”QA%κJvθ±ˆπ«έυΐJ¨”²)@ svΊ΄Η•+±ΤfHV‚Ώ.–qΌEά;gΌEA-G¦Ε}|t*`ΦS|/8Δr^οEήS΄G`T’‰χ£ϋΈFΛ³€d γ-²-΄Ψ»ώLŸk εg6(p˜&₯Jέf¨œzΫ–‘£‚Κnxœxΰ₯’/Ά€4uό” Ε~Ÿ#34x¬Έ^%FΜͺD}IΜβDMJ Rx†26₯φΚΠ¨θΌ~)Gσ0`ΐAEΰ"m β₯”z+*Ύ Ϋ£¬ΑmΌ·*›~ψΰ ›€0Σg#²6ΊQ3+Tš &ŠrέgƟΫ£γ2qfΕcEηu½@ƒ6uώΌΟŽύ=Η©ή[‚ t+dRbφΗ³*Gή‘Ζs*Ό—ωl¨1Γα₯ Χͺμz•hW* 9ΌΈΦ;}bʜΕE·σ Πή>Ν!K Βuzn~Σμƒυ€Z}#”΄ΑJ¨x)Δύ0ΥΆ ž!μΩ… E——:“Jί6`i‚”¦. 6}JΔ”Ϊ›K Λ? 1|E"ŽΩ8Β;Ξ✨(όflYΑ ώ+Ϊ‡8o*ΎΧ=ΫηΞϋŠ‚™V€»w°ΡΉ&P‘Ϋ¬βŸ_YٚI©€R·Y *·l9:€ΚΉ½8)€ΚS½x_όϋz±Ή‚Κn€C“ΑY΄ΚεŒ ƒus`…,ŽžO·Y•RxK³Nυε\zΎXb1ΡmP|Ψ`,y9¨Π!ΕPBiΠ‡Έ/GΜ¨Xβw- ³“D„:¨ΈjiQ*πŽ *ϊ›rU©LΣΤ‘Τ+M^1MΎ*£t5ω°ψ1‹Ο QͺΚ$.NUD=T„f:ƒšŒέΓ`‰¦†ΡθΠολΣ»ι’‹οg‡Ό…Π¨-*~ϊσUYD+mΚtΘ€Τ­‚ΚdΕΌ¨œ~σ–‘£Λkλm‡Pω»^¬θŸ_‘Ώ+¨μ¨Dα( 2l*r›ˆΦΛ0₯π¬JΜ¬xΫλ—@Ε3.%Σ·R°0ρwά—RιΛu*€ŠτΗχC`)ΐŒο™¨eB°Š;²ΒηP!bψτ^ˆΙQ_ΕίΓά“Ι¨8Τ+x/,>+«iΦ°"KόŸrωP]J…”ΊΝZPΉι™‘cAε Χ· *;*V(ΕVΜ8x(HS7N„ _{ξ―‘"z­”²+M]Bq‘πǍY€¬x+5ξ΄ύ­…o4 nο% ²₯t]`%.άqnŒ·ΎΊ— Ω-Φμϋ¬*±τΣRΪ|?ΪL톕ΊΪ@%fOόX2†ˆ^&‚ Α™‹8¨gC‘ύp§WΊ‡ΌΨ‘…p`q!8οε8gΚK@Ίή3!ξm€χ)6 2=₯쎞K£χ¬J>mΩ” (u›ν rƍό½Χφz}wUP™’¬Š‹^ΫZt£]~I₯}ΨνυυE¬/a/³p›’Ζ€Ι±ΆΙRΏ©-™9.*ΡοΕ3+@ žϊ[Ο ¨ΰˆκ†oQ«’θšUiΚC‰g•I™ͺlJέ*¨LP9σϊgFŽ•§ƒ˜φ© *;±•„―Mΰ2j&ΕM±Π·hΑw ₯&~9zΆ"–—Όγ‘4(vE}@„ |&L𠅐ξ«ΕPΡγj‘’Ό[”#€ψͺ”ZkGΙ¬ΔΦeοŒq£1oOv1mΧ¨μ Hi₯΄AΚ°N<χ!Θ~)$πqƒ7Ο’0!Ή4ύ`qHq½J PJΐβZ Ε³"z―ϋϋΞuTΡψŸάΧ3+jG¦Σgͺ²)u« 2m@εΊgFŽ]?_κΕΆ^όk/~Ω‹;z±¨ΪoOΦιTvTΊψ ΔQ R܈-ύSψΖΒ_ΏώόM΄Js" ΰkf]»zšΐ€΄ί< ‚ a+–θΓ@…μE—rF›^%‚Jœ ί”ν‰]?tώ*ΊM[FeT`ι )%@‰ΟλN― Δ>žμG ¨ΛΘ„0ΊΠΑε8Τbζα.Ά₯OSφΔ…Ά[!£γ Bx6Δ½}JΆΌq/#ρή—Ή[iΰ`…”Ίνi r֡όΥπmΐJ[«qwzUMœ‡)οxΰ†‡"βWβ&pΓ‚,ŽΟκ "Jš`†¬e*ށoS¦Γ(`OΟίt9°tΝ¬”wΟͺD ύa@₯d? °ŒΕ4,<Λβ(ΐwΰψ$aΐΔ³,Lφ̈gHNά£Ζoλ₯ tψ)bZ40~y RΈ\οης&ΫέΞ»”Ί>‡SΡέS· *ΣTθΚ5OT¦!¨΄‰_cΛq]―E^Α<‘ΏΕŠ<_XΡ)@R xΡ/B/η΄Ωμσe> DJš”” ΣλΦ"€“ zΔ΄*œ't½ lΫt+m™Ο¨”@…ŸhŸο^*t&uΡͺ” eWΑ‰Ξ bφ‚Β;qȈ8xι’MΫύgJΎ4qP²4žYaΏ’ζ$ŠΎ9%γ!ΕAΕΛ–ΐJtMRπβ3‚hWΊ9ΟFχٚE©Ϋž *gΏρι‘£‚Κ4•¦n.’σ‚ύ‚P^ξÊόtΉF#t9Ω&7£Uα”ηΧ—/šΧ2“ΕΫ9‰Ά‘Γ€₯0,P UAxΙΰΉ8­Έ)b7P VΪJ&ή\*ύx9ΚΕÍߘΥ3 ¨t”’₯4»HΕ―”PJ₯ž¨'i`θα-Ν"EρηΓμ λ{ΙΊ>čגg ΐβBZ`mUΙt‘χ"ŸLμτ™ΡηOQ3)u« b rυΣ#G•i² k). ό‹ BwB`ς³Ύ€Pq8Ρί~YΜ€x—!θ"tDΓ6‡ξλεΧΗψœŸΖ&PρλτΨZL΄H°°iΑΤΒWΦFHQvC‘σ%݊wό΄™ΐ5eT΄GGΪ‹ΏPŽ ΏχΞ| ¨Έ•~‡ΪɜxιΕΛ,)ρς‹Έ€@Y]³'1ƒ‚.'CŸtMψε’Ύ…ςAΡΟ‡ς€C‡Σt]•{œψν½DIH­Ο˜ƒI TΌ›ŽΛšfψT@©Ϋž *σ{ rΞž9*¨LcX)J€ΰ|©pP!³*dZ•&H‰ΐQΊ8ρ(AJ©Ζί+mSΐθ8h‘αΧ·bŒΰ’Υα„σ28«–Ί`Ϊτ)€ ‹Ώn―ΗPΠ*ηι»°¨Έ¨Ά VFјΈΞΔE­ 2?ΈΨ”l•g<\s‚±]Ι{%ξ3mΝΐ\l=ΦγγΑ4ΕaξΡCF‘iΖ 5,ΝrHρŒJn濾ρύ3*ŠWϊS‘'«άS·ΊΝPΩU:ηͺ§FŽ *ΣTΪ„΅%H!pmŒPTb ]qH*DIηΒcψcΕ,M°ΔLI—L‹ „±7gj.‹0ΐ° ΐ‰ αž&@Ž/ψ:Oζ †g(#_ B…¨("¨θ~ΐJ[¨+€8ΔΞοža‚±/Ϊ*«Έ–L‘Γ™—ybΩ¬Z싇kTτψΪΚ8₯χ{,}F RάνΏ“RΉ§+^φαΌO/§³‡9>“•E©[έ*¨TP™Pf…ί(*ZΌυ…‰F%‚ ΅sΧ­8\Έ>…λ“ΨI4,t{t0΄<—Z<›~©ΆŠ#f‚+ˆYlπδ u9jV" 0͘ΏΙΊθ>Zpέ…6ŠA±Σg‘¦s‡,J„43‚ŸY[—GιΨTΘ\Δ ΰ-λΙΈxi'‚IΙZί;’8ΎΟΊŸ;χς8ϊ[Ο₯ηEδpΎT6-ιRbΙ§ Ndiθ`+M8χωS:―φγΙςI©[έf*¨lxύS#G•iS)E¨ΰΪ*€hš‚Μ Y–(° β‘ΆΞ 2ΐŒΟ(,m% a¨PΖr!°`°#»‚–B‹!% €ΕAHρVΘtΠ>¬EW +0‘(Mm¦δεΌοΎ{;P‰pδB_Ο;‚Ί T€+/΅Έ&ΔΫ|”y8vmΩ&•’­Ώ.RΘΚπΜ’£ zp—²Œ1«β~?T’`Φ‘ΕKEžE)‰iΉ\ορWϊ>)ξ•R₯nT  rεS#G•*]&λ ]‹5P¨YA`ϋrT"° kcXJ\ΒσιωU³W¨]S—37eXω§)§η{Ω ˆ6l…@FΏˆ΅ΨiTX™»£Εί΅#ށγη#ΈΈK©tΔύ]4+8‰FwZΧΡΈ Ω€eΨδ+θ?€wv₯όγ>%‚† "’€TΚκ4ΑŠ—ΞJ₯ηTΠMF‰₯”mτξ³ΰKέRBXKiΨ‰YΎ&Hα½ͺ,^Ϋ$δ (u« 2TΞ½β©‘£‚Κ4ΫΊΐH[θ‹hτk¬F TΌ59fGΊdR(3ρΨ<§ D€8¬θΉ=«₯φdşӑHαΊΜΰ΄σ žS4+ή Tj¦+'‚ŒΓ„Β‘€”AQlψξÍ β%ΒJΙk₯ ¨ΰWBη BΉ§δ4λBφ#fsΪL藁) ι9] NΔρ^mƒ–h’¨Λ™ŽΣ±ΏΟ’&₯z“I‰ R₯nT  ²_T.ί$1»H‹"KΪ–-ήYγ±dPΐ€IŒ )%P‘τγΩ€© V8υξ –8<ΡAΕmιqFNΜ¬ tu JΗFέL„@₯©“‰Ϋιy΄ϋρqρjSfΕΖΔηcyZœ„Ÿ§ P€`J>»#‹R·ΊΝZPΉlσΘQAešmώKπ6L;ώrVb6Py9dTΈΜMίΪΔ²ήΪό³_L̚)œ8 }ρκXΡνΘͺh±–Iρ9+ΐ ―1ΖΛ}PΡσθ9t*€AI7J,Φξ*λ]A±|γ`R‚–ΨΙγ`£©όa₯€—‰Πβb_Ο°xpt’Z:}0Qσ¨D¬H€”2,žYAgγΧγ§’rΐ@οG•X˜σδ%™˜]QDΆw²ΉΧ—yωLͺ+~Y©ύxW€Jέκ6ΫAεΌM›GŽ *3TšΰΕ‡ύασ /_t‰mύΠω—ΝΞAFη½όƒΎ$Ά3λqΉY A"B…`„/] …΄ΈN#¨0O¨ V|!ΑάM‹ϋκž1Ϊ?I—ι5vΛbH†PΑΙs6ZŠc Θ³'₯VγXκi /E`Zβί%½ š™Ψδ­ΥžMqοŸtL‰Œπξ&2)M"ΩaC½δYΏ^Ο‘ŒŽ²"Sή·o8¨ΔΆχΨr_ΕΞD.œΥΎTvu&₯nuΫ#@ε'GŽ *3 VβπA² „Ύ”Ι6xΖCα°BΙθπΞD±θ@hsφR  œ”²(Qd¨σΊ-ε=/@Έή6eTb)ˆT=ΐBΩ‡ΧC™‰} t9€'PQ‰Œώf‘e!E;‚V’`Ά©άΣX"¬”ΐ(–‡š:„X<{A{uτBq݊N£@ΧΔK8‡CLX›+\¨θ=.¨ΰ=«χ›ώ·ϊλ} 9^±“M—Η’N ±Όb6ΕƒcΫ€Α…•ΊΥmO•σ7>9rTP™A kρϊEKβΏюPφ‰ ΒεΫ,3’Ώ£cmτ]q} "Y ₯ιK8~‰λΆ€Š‚&‚JΜ¨”^;N1ΛAΕC―6\e”Y`AvΝ mΜ>άΠΝΫJ–6}J—r°RΔxf§+Ρ‡hαυ8°Pjσe‰Pβt±EΪ΅0Kχ•i‚/!ιΎJ.dςΌΔ¨χ+η·™ πυ¬I›Κ!₯)τ9Σγσ|₯’O”ΊUPι* z rΙ“#G•iΈ)ψOœxM™;XȌ ,₯Τ‚Vε+Ϋϊ›l‰ΰΔ³'/‡nξ/ΰˆ@ΰež*d[(ψs Ϊ„΅qΊ_ΘΌε3=NΊ/Z τ+ZX΅pϊ’ΛB?lfmΘ]3)MY’€7ώ»`ΌKΏ—Xe΄<€ ΧωυξŒλέEξΜK—λXF­‹C‹ jU’ΣΦί«ž!tΨυVϋR§XS‰g€0Ž‚ΟΔΘ’Τ­n{*¨,θΚ?1rTP™†[S[00BΊ+{Χ”D£΅ψ«“LJl&Λα@Cͺέ30Ύ@xwO›—D,ύžYyΉ!« αJ>€J© Υ–ι‰ΐ’,•[Λ€ ε‡/ ‘ιˆ%"Fœ”²/M±4TςbΑœŽύw£:‚r—;γ(QKΖΔg!Τu νΝ€J©4δέGp­ ±ΡΡζN&%Nιφξΰh{_ ²y<ήΞΦ­nTz rα#G•i *”[άξήXρΫRF‘„Γβ­/Z‡ ΓE°ά.B‹ΓD lTΪ Εΐk[Ώτ»€bΗ…kidx=₯O[9*ξŸ‡y0> G]0€Š.§[ˆΜ mΝ>ΣGΉœx]^κr‘¬λ\Ϊ@Ε3*d=xώ’†₯δΓˆD}‹·>»±A%:Σ R| ²`?† Ζ…Q| ¨Θ-Vοm/·πCηDφQΆΑdTCœ)Υ)>ΗGζ©$œTκV· *γ rᏏT¦ιA%Š ^υ··!-nΆFvā#‚J)ά₯t½—W†ŠΓJτUq ΓK–ε βʘENΪφSΗοŒΟ( TάcΔνεέΓ„…œ’’n£E“97ŠζΫ"θ|ΆšAz‡“¦Πώ•Δ±zώXΪΡkPpΉ₯ΝH!3Uš Λ?XΫΣΉεd^:Ψ‰ΆLJ T~y ΊΙF}_W@©[•! 2Ώ*η?>rTP™A a…6dJ:±tγ%Χ„4AJ4hσπ…’­³gTP‰°’ύ„Ρ~ZΚ€Έ_KΧ΄ϋŽ€ Ώή}* E#4χΡeρ:ζζθrŠ^Ν™‘?ˆN)‹h‘φ, £σ%P)M;Φ>0"€₯4):fO”΄<ƒ8&>!ša‚€‡ώΦsx²ΠUα¦ip‘B·hDΝοηW¬S mŠΓJΣΠΛ’/ qψΌΟG”ΊΥ­‚J¨¬Lžχ‰‘£‚Κ •R'Ξ³α—¦λJΌ¦ TΊθ:vvρKN+ΌFLΊh EwΣ΅ά΄#ϋV ΌX΄ `Δ³:Υβ¬S€ΔgΝ*htt‚e€ ΰiΛΚθ:€ Ϊ=Žφ‡¬†ΐ…ϋhρΗΘN βƒ=β< ΖRΞAs‚[-# τœ ϊS& s=²IžUB«[–z.Α…ή Tθcl‚›zF₯TϊρPτU‘£MŸ™yΧ­nTΊΚEη~bδ¨ 2CK?,ΨΎΨ»Ψπ•°β“Ά4χ”.:Οϊx‡Η6{ ;šΙΩΩN ίgφg]f )o!Ηƒ#š T΄pκΆZΨtί*@ e“~PόΌΟλΑΜ-ΣσDHΑΓDΟ£ϋ‘ΟAcA…μˆn―ηΥkπa ]NΦGγs‚šŒγτΈz?”ΊΖτΨΦo³ή)_Χ©x)ˆΛP|xf4-¬pR· *»T6<6rTP™°[ˆωR-ΩΣ{fΔ5&όΝi—ΙΜVD@ˆϋX•.&r£<ηΞx^Δ}phΑ5 vΕΆ0’4`°ΒIέ*¨TP© °ρGΎLK iiAmϋ{2!₯€Si‹λٝY“QΑ…}€ E+Ÿδs’Ά™χ †eϊ[ ¨h‘֏½€οe.΄ΰ3\Q@V₯( $»¨ h%»βe@K π/y)HΟ/ΐΒλΗ½τ8‚*νnO6ΗhυΊτ\Κ.ιΈ&ͺ)[“)y)ΘΟ“y!+©ΛέπP1,«X·ΊUPΩ 2oeΊψμΗFŽ *Σxkrwνf»;u&»T&&v>vXΪ²DξμΩ-22Z8½CE0’Ε^‹<% Ε€JI£BP‚z*t& NτΈ  ₯)ν]cd΄”y”Εαqw φω;€ŠξCωˆR₯ŠξΓbZu\Ϊ|xΌ‹ν›cεSΒq*φaœ>χκkio*χΤ­nTv¨œυρ‘£‚Κ •]•M˜ @Ω‘}žΜ,ɝΈm‰z ^4ΊxPHHλ3ήYƒfE!0 Σ‚Έ­Ÿ]φ3kρφ,°βΠ’}Πc«„;²pΦεΚiŸ΅ŸΚ’(€Ϊ·u™€‹iΗΊί0]H©δ†ΦΚ»έ’Ά -G³ΓœΦ­nTv#¨œωρ‘£‚Κ4ή¦#LLF¦e:ΓH—ύŽY£ΆΧδ5Z8™d-8Π¨(γ! Δ’ΕήK@1Xή-ΰΡγ*ށUjq'c!B°κNΘ.Vzν§† ω€Sζe3λkΚ’4•ΌδVe{Ϋ{τςΦc’nu« 29 rΙ9*¨TX™¬Ιt/MΖ1`uΗU†₯oρ.e/謑»(‘5؁E+3¨`–Ηbξ™ž’3qμ°‰]hŒw δÁŠw=a¨¦ΗΕT­tό<[εη]XξΊ— 'u« 2Ε rϊGGŽ *T¦ΌΌ³+@’Λ<Ÿ7Y₯±Rfέ„ϋαψh@E‹?š•θլЁD™EχT’>£i„‚k@VͺΖyT@Ί—>ΠΈΨU:§8€²I8^·ΊUP™"P™Ϋ•S?:rTP© 2i‹qi‘iοj£Ήι||Ό‹¨4]½…ΰ@Ί@E0’Μ…Φ\A„ €ιΏLL0l2Ί―zζΑ[Ε)Ή(΅4³+>%8(οhΧΞpέκVAeκcΏΉ+ΖS>2rtym½νzρΧ½ψΙd‹ *{˜NeOΤε”Ź@… ‹|Ασε”W,¨ΰγ0θ±’οNΤr*ΫlŽ”—π0ρμŠ{”0‡ / \¦έy\λV· *ΣTN~tδT׌J•“T + n€k…2;έκ4‚ŠwπΈιœ!ˆ.¨έΌFΌ τ²ΉΒΊ?‰©Ρ’]š€Ό;!₯nu« 2A倏T*¨Τ˜†°βπΰe @₯ )1(έΌl –I)‰j•mφxdXβσE#5 'u«Ϋ *ϋφ@eέ‡GŽ>„ό₯Ε]PyΉ‘UΊΎ‚ΚnήώίmGΦ}Μ¨ΰΒκš‘mPΨΦ'%PiΚ¦”f/•Δ΅@‹ŸχηošΔ½;²(u«[έf¨\z⯍3*+ϋ§K{ρ{qn•I’.ξ³VΌ (₯Ψ֐ΝΨ@Εu)mΊ˜’οˆŸwhΑ0Ξcw΅zΧ­nu› rΒ‡FŽQ_[oϋH/ 2E ReφCJIŸβpΠV–qXqŠƒJ›&&fZ"΄4M…“ΊΥ­‚ΚpPYž.=ώƒ#Η°ΧΦΫζυb³^lͺ 2Ε βΐ²;ΐ₯ΡΤΜ9Š 3+±Σ$|u=‰ϋ–x¦νω›†YjYΑ€nu« *ϋτ@εΈŒ@ε΅ύrβozρΑT5*ΣT†AL€μΜcΤΨ5ž2ΓZ“#¬D»ύθ΅RΚ*Γf'uqe0f…“ΊΥ­‚JTŽωΐΘQ ίf9¬T(™NΌ₯’Κ0PiΊz׎{₯LΣ»ΊΥ­n{.¨lZϋώ‘£‚J•Y*SύΪvdκr¨Έf€/qŸOž.Σ²λV·ΊUP© RA₯L‡ςΦt†”¦O—²PΨμ«ϊ 'u«[••5ο9*¨TP™5Π2φ½Iƒ2 ¨DΠh΅N†_I“ΊΥ­‚Κ.•9=P9κ½#G• *³X¦λ~ο¬tΉmΧ©NκV· *ΣT–₯MG>2rTP© 2+`eΊοχ(°₯Ϋf*2'u«[έ*¨μ4¨ρž‘£‚J• *3ν5Œ’]©9u«[έf%¨ώπΘQA₯Βʌ•ΩπΊ¦ Τ­nu« 2© ²wT{χΘQA₯‚ΚŒ”*@R·ΊΥm¦ƒΚ΄ιΠwŽT*¨LΛΌjo*Τ­nu›… rΘC#G• *ΣrρpR€nu«Ϋ,•Χτ@ε wŒT*¨μ±%žι)u«[έκ6λ@eυƒ#G• *³ͺ‹g&ΒIέκV·Ίν1 ²κ‘£‚J•ι…2“^kέκV·ΊUP¨,I›Vή?rTPΩΓaeO” u«[έ*¨L1¨μΥ•εχTφPP™©²“ρϊκV·ΊΥ­‚Κn•eχŽTφ0P™ΙCwΕλ¬[έκV· *S*Kο9*¨TP™Ρ R·ΊΥ­nu›) ²8mZ|χΘQAe•še¨[έκV· *S*―^œ.]tηΘQAeJέκV·ΊΥ­‚J• *u«[έκV·ΊUPΩPYψΆ‘£‚JέκV·ΊΥ­nT&TΈcδ¨ R·ΊΥ­nu«[•έ*―Z”.έο-#G•ΊΥ­nu«[έ*¨L¨,Έ}δ¨ R·ΊΥ­nu«[•I•σήοψ¬"ΟλΏ¦ηΣ‰"œΕ>Δ>Σ>νυcbΏθΝΎΛΩν-ζ°’ƒΗΨή²žΟZΉC;~ιζR:‰‰&~ŽίrΖ­‹ίλό6wSˆψ…ޱ-%GΨk½—jnnn1;sζ¬Οϋjπ>Ϊ!ƒN!(φ9ΙήξΏ‚mί”΅˜Ύ‘Υl4“­ίUΖ¦έ­Ή~τΐŒώT€dΩρSZ_λ13Pƒάπd·΅js»€Ϋ<Ξ‰Ησ‰ψ%wŸ[·lΛ~φιˆ,ν˜R>Έ „@θ:·@λ#ΏLYΟjw]ΔώΩwYDίs_›ΉμίV°AKΆyVJΰ"D‰“•§΅H‹Ί/ΦfρŠψ1»†œ’CΪ1χ·§­Τ²Κg"©kπ½A+=ίΡ:5—N,?!Z`ι­œXR#~zqAhŸ*–κM§nΠ>sOλΉZl?^;-έBώKyBrΧΦ泡‰φˆ¦Ή?ΒΥaoΎςΤχQμ>AΔOΔOˆG8YίΛ—ΡϊA΅¨ΜπηαϋοΏh«φΉϊΧ±ŸΖη°‡ΪΟΧ’qj5MΣBA Ι‰μ΅~1!«(κο:p΄‚}3f6)Qύ4%-ˆŸˆŸ`π½Β­Ÿ)ΡΒόΟ€oδ–ίϊ›ΗӝjΨ &$JΚOzς>n˜Κʎ²μ»±ͺΔΚαVιμα”ωIΏͺ$β'˜šε+‚ΖRόΖ&iZ<ώ¦½εlεΆΘ@‰7ˆχ'—OςΉυ'ζx?"zb΅ΚΔM†ˆŸˆŸˆΊωlδjmv”ΊΩ=μυήΛ,Œj@žν²H{άlπ«kέΨ΅lĊν1#ζE’#ΊŒˆŸˆŸϊ„+„ΔέΤ4Υl8“έΒgϋx bXVyuΜπ$ή`σ˜ΰnΐ£ŸL–ιΫS&²€‡-+$β'β'B›Τ\veƒT-†Z ΜΑ|φΒ˜ΪΜ5ƒά="έ~μͺt’ ©ΎΩnΈα’„ͺ'??!KΝΦΡrŽΩx$Έ‘ ΞΫΟΣVχΧ›­θ[―τŠίf~K4)"~"~B<Χm‘–ƒ$­ns ,²‡i›Δ¬?“fύntuΤ*2žΐ>n8DόDό„³±;ωΰ@|}"P~β”Ά§πrΟ%t1\ŠYλ…ΜGΦφψζl¨€.hωρρt€xGOέvfβ’]¦_£>iϊΈ©λιίπΛ¬€k±DΊTϋŒχ ‡ˆŸˆίΦ@\ΎΪpƒίTV0vΐά’^IBLΞήEΕe€Œ2νΗΓ²βώΏ!Ω€έwα"~"~‚‚ΚΚ…ζŽe³±Ρb¬ιί;uάψ½‚oΘρ’Έ–ϋ *gΕsίHϋ€ά\ŠDόDό†hžL-­έR€τa%›L} ’Ό±Κβ$$ϋ䌻χ‚ΔΝΈ•μ3*Ερρ'=0ΛFQk€Ο[†“ε^βΟO3υQTο‚Ÿ+Ι7»C–FŸ w2Ίώ°Uf +!Έ™λφh…ΪΏV]–ˆŸˆίuX»Shε£QΤ8XΘؘ36Ώ΅—ψ—χ2υͺή*$ξ’^μ±ΔœΣΪ1Œ₯\ΕΨꑌ-Ha¬ωωŒύZKΈ+ΜgϋΒ… !ΈζΣ6ϊT†ƒ½?x?ς`ΐβmμ>Λ¬±JύΡKψ°fη16αCΣ_·Σg#VΣrP +8Σ…U:]οΫ—τ6υ‹ͺ+Μ3αχ¦ _?’Ητďύ"~"ώ€€ͺŠÌ,j`ΰuΏΛ;8‡Ώ*fgέΜ_3dπΦ龘.’ÁB;ߎ]£υ1P ¬6Nε:ΕΨΡύΎD?μeΖvπώέβ"ΖvςYzZCƎ”ˆΧζ4 ΫDυ¨>M£™ΪP‚6οs·k‘ˆŸ ’Uj|>2ΚΩυ©Œ yA @,Ρ׍³0 Μ–ƒΪ°ί€π?‚³Ρo‘W5l4Oή Ρ@πΫŠηs[0Φυ6Ζφδ0vβ0'χ_[ό«x―η½β±ύ•ήBΛ‹+ ^ΜEΕσΓ@ϊ€Κ&v{v"~‚ˆVAΌ lP04?«dlΧj1Σ_ΩΟ;hυ$>^?f.qFUOJΦΠ;·ΰ1™hH M:\ƒͺίΩWυΈ‚9Α]@°qού»δώ‘‘—z.aΟwsχ “ˆŸΰΡ(½Oϊϊ‹:‰6ώ}Ζ?ΗΩϊξͺΗlœ"ŽΩc.bQh_Ζ&1π±α{ϊ iq°™zEύ¬γlƒαΌγ?}₯σœόλ2–ruΰγnχ ΐΖ/"ΗΰϊQ―Ÿ=„ bHGθΡGJ:ϋ}BόƒŠΐQ0ά=„ ݐΡKğD€Υw)^dp¬τ’πδΟβ°Ω"’ΉΤœU_<ξΛ ϋQψWζNΊΐ κΥβ:tš“―ΕΌ‡Β*qχoC‘]Ž»΄›αΜη“ (ΒΘ@FώˆΣ»ˆψ“jv¬,hβ…"α‰Ε―XώcCyk†ŒμYφ#Π~Αoι>―@ϋ{υŽƒμ0ιφΗ D(¨>χbρά–­ώ!»"ΐ¬υ{΅ώ†~FΔoΒ8q+αΆAχZ3n»Ή­•V›ˆίzΐ7‰ΩΚύmηiE1‚%‘§³΄;cε D@ψˆ‰<p[‹9ΪfυγΦ†'ΔˆΰyΉ§Aˆšu£†C"€H΅q‰θ£Ευ3`ρ6G_―DΓάn@ό?Ќ?vΐζ'fdŸ Οb•g‚/U‘Υˆ¨‡ί%±˜•!ͺΗ`Hη­Νgϋ¬dά”aιlΨ-6=™bΠm£ͺfm™Ÿ˜#™kΰEόqLœ’.OšŒŽDόρΤ‘™Voκ™”Ϋ—%ΆΑΨTVRΟa0mνnm%3bΕv­0;HKrB|€2…¦σρέξr‘T•L¬g8\8>γ€ς'β·†ψ·s[']Aη…ψl=4V½zuuΒυ†e?…S–υΕT"¨‹k9ΰbjq‘(ςbre€{ σϋJΎ’μf°€β™Σ’z4YίΡBΥπP' •Ή4©ιΤ\"ώ(‰bnΏηφ;n­Aώ4γ·˜ε££φ %m καΒ:Τ΄GΓA ƒΙ6 ߍΎ~7$ΪΨ;ˆ¨ͺ1«vϋςAΠΗ²G$°Ρ2€sγԈ>©:έk7<§FχΨ‚ψΎGΔ–H₯A- Uό ;δ€φ˜Tj„LΐMMΣ΄LKΤλ%ΔJŐPžŠΦ‚•$PϊE„ 1υ‹ˆΏB‰nΪ[NΔŌέσoΉ!β·jiŠŒέ ˜τ‰Nˆνs{4Ό(Λ«dΈΙα7£ψώ˜’Ρδuμꆩ‘ϋ–ΒΤ/c/Θf˜ά ;<Β*_λw‰ ν9{ˆψ ’ώhn{ΉUrΫΕ­.·αάΦK4ύ€ˆ?z|;v »3T9Dς΄«!"ζΟϊPPE·±1 θΥβ³ώ“ΦQˆΌ‡±”«„η~UK›ώm`IpQm_ΞΦ’Ή&eiΊQDόDό ΓΎςιT§ ]M%ΡΖ› ¨Λ ρ6“@}Tœdτ¬JβάBψ/όλ3‘΄•θlp@ JOψXΰOƒRΡH\Sc BuDόDό ΓLY#tνΞC‘D,5:ϊ¨·DF₯]‘Ά |ϋˆΌθ<'Ÿ:…Υ^’ΉZCtKH xOΫjB&Αn€ˆ"}8€μ׎#”Ψ”;φROηθτρ»¨ Β ;(Uaσ}6Ÿ©Œ_lπF€Ϋ[Μ‘ΞJ¨w·N7@²lη¦Yφϋ¨ώ‰fδx^“›Π/1ΉΊF>CΉC$Α‰ψ]DX ZΠS„?a’Θ`ŒEIE+νt P΅Oq’L˜Τ½7ˆdš­*QAŠ9,4"=~Θ¦&ΐδϋ&j YpτΚω#±‹ˆŸˆ?ξ@₯ t@Ύ -šηΖFΌn₯Θ34Μρά λη‹QΩμ‘φσ©cXδF|44S“ψn1}cψhΕΝ/J|¦`s·ίγ¦>’Š-Θ/qΔO$βwVnpaΈˆκVe%έ°nΌΏ3‘φ―ά^GN’fU“ Ψ-Νgk›ηa1ιcΖ:ίθ¬šϊƒ¨ lβf…ZΦ8/WρρΗθx†”)‘Hς,w@¨#–γmͺ16κM’ˆί@κφŽƒΪωθ8{uŽhωP `cΧι«ΩsίGυCW ύλΠΓAΙIœ›Ύ ·8β'ρ» j†6¦Νυ~s»C…ͺBΩxDˆ§φΓέs]γYμ(Νϊ£bφAn†VOˆΑ ΧiΒ‡Ξϊ‘;E»σ'f!OYβ-ΈΏˆψ‰ψ­Ζ§#²Ψ£2Œi»σΓΤ ₯Σ ή€.ƒΕ4Vlέ―Β\ ‘ΡQ†³T±ƒλΣΙΔυ1 UxIS{‘ϋbφΟΎΛρ‰ψ]€Ρ‡¬΄₯Πη!Ζ†ΏκœVVΔ—37ρ٘Œ$ιp”r/€b¦ͺDβ­~Λ5r ?Ϋη«Νfη1ΦσΎΔ•WŒ(F4ωSSi6m»ΆρLvΪ’mDό.ά; ΈNFόΩ¨yŠΊΆN2Aό&:όΙN Xέ!™ΤI"βΥ§ήj†WVaΈ6π•;ˆκZΗΤGF!(DΔOΔ7 („‘β'Hύ›NΞύ±J\Ξΰr~~ΘθΜώλΟF¬Φκ‡ @…-Μς‘p§ωΘӜϋ£Ηό›±ξwšϊˆ©jdDόDόV` *Τaw‘§oΉήπHη²cέξ0,¨ΥcΎP)…x]kΙN~/jCι5μ*R―}c"*Ζv@½ηV—˜ ~(>,²xΰR$βw z/KΜ6r§E,ul+ £Ε= jΝΟΫηΉ1Β;$΅>Ρΐy‚ϋπύΑ«Βλξ·ΉΜKϊ{Φ:ϋ‡/ι*~Η s™Έ©Ύ·ύscˆψ]‚‡f²ΗŒDσ@¦A N»¦ΡEζ Γ3K½¦JΨΒσ” ης­ar&NΧΊφeEΞαλ'ˆίS,χ4²†ˆ›A cήΠͺ›ˆŸˆί άΣz.ϋjtvθƒnχ]Š;…‹ΕοΨbΜw #H``NΞήEΖPOχϊ_f±ŠΚ0«*ε>Œ’”‘­€ZΌψ=RˆΞΰ˜QΕY¦­έmλŸGΔο””‹’ύm } Š…‡ΐμΕφ@Φ±VΞ――‘Γ±IYvμ”±sEΠπHΚ|ΝΝΛzŠkQΊΩ?«Hόž¬ΑΎΔ&'RΰHlrV Kl;3:NdmrC–†?xΨΛΞJ ‡Σ•Œ΅ΏR$ •ϋ” ζ„ό(" j5I³uE."~³W PV ,ΑΡi³‡»λΗχ{LމB2p‹AOž("nHκ7`HOϊΔ]'`yoo‘=ρ‡ΉΑΝΩX¬ΗU…φΝX&βw ‹z6TθͺlύZΛpI9Η`UY:ψϊΉn‹ŒΉ/’°‚=ίΝ@¦.²§q PΚΠM@ŠšΙ«\―ύΪםHΔοX/ΗYvπh…giωXnjΠθΜIάάw"” –’—†Ώ?ŒŸω݁+I$9–ˆšΝa1δy>㿚±“εξ; žύ«γ΅"ˆ Χ†d¦l?OΣΝ"β'β·ΠΫΗ¬αv¨υω―ώΛƒŒ°M§gκΓώ-β·­%ΒΥ¬,oFЏ|;v »Ώ-eρ†8ΡΏψΦΝƒYqZCwžέ£ά; Ϊ‹Ώ+BΛ2 Βi¬°τ(?Ώuθ4'_˜¨)‹p;„έ… { ³)εH(ω‰…ͺ.Ιƒ Mj.»¦ΡΜΠ±$Fει3μ“αYZš΅>ŒόΗΡRqαw+πΫφδˆηλ'JΧbθ:ΞΘ€ΏΉΩlΫΊ‰ψ ,Αυ I£Vξέq5 c—Κ#²ΡJώΔ:π¦$Šeΰœ•Sφn`C‚vͺ_ν<Fp Ί8Χ§&Η Rβ€Έ„Αη#W³‡SμYφ“ˆί‘xΊσB­Ψ΅ ΫB-)ξrtλ WiΏ«LKδ)ΰωξ5Ÿ²f—vΞ6ο+g_¨ώ7Eι‘“α?uTœλ’¬δ8ApρΰχΒεΖΉ΄c "~’μψ)­ΚVΧΉš―r°!‚κΨ”r+ζ΅τ­Γ χCσσςBΐ›]ΦvQ—xώ¦}Δτ:(Ώ>,Γ蹁t16>++’ηDaswBέ°‡ ’ς & β'⏠δΖaωWΈAPqΛ­€NΜΈχ›ρ½χ΅Ή-δ `CΥΥz™ΠνΜ0Α‹‚βrνΌ`EdΆΊ7h δΝξEΖϊ<φ0Uœώ >I³Ϋ~Ώ'sς΅Ξd¨φ)–¦¨†"LͺeQQΠMG¨Lφa'fKA6dμ‚2SŸΙ‚Τk}©!Χ©RΝ΅›»‡ˆίψvŒ‰pΔ+Β†7Ί”ΠS‡Ώ?rχΦdX7Y‘έ¬c­ώΤ›Yxΐ'‰pɌ[šΟf '­3vpξtYΕ-I5TΒΰΙ#!Ϋ*K’Ϊ­ώΏΓpΰh…Ά±Ϋ%½ τΓ_›Ί‹:&/“©ϊG‚ΟΆΖgyˆ%Γ–t)ΰΧΗ9θ6·ΐΨB‹ώuκxrφ-Oohq@(ζβΌ\Ό-Ή‰Ÿc·nt―Ο-Ϋfωx`@)oοαΰ>%jΡbΖ›ΜΨ4ΛPmaύ¬?Ώ89“Ί6ν‘œ“² –M„ϋ°η½ΙΫ·vg‹Ύ…ϊΥ!€‰&jgΫ«{"ˆan·ϋ ·ϊςy}nν‰ψγ•^K5H`‚NΉn|r,q6…ŽΧ/:xŒΝΛ+6&JζRŒΛάi,›Ήd“pbb1γ»δν[JψΠΐ/‚ »Πάμκα¨αGόωά.‘Ο/ΑίDόUρ¬šf²V3ΒlB‚θΠ)we±€†^ΉΣκτX’Y2’ujΦ·BΊΊ°W’2€Ϋ\&Ξo2£] Ζ¦}φ°Η;fhι§ωΉ΅K"—]ˆΏΜούCDόU‘/3+'³_5@вνNϊ¨ZW/C‡C΅ΩΌΙ’Ϊ‰z˜D cχ#Ή!ͺm€€ͺΏA$  ©υzοeΪΨνf—γˆŸ£ «^½zRυ3%.ΏΜk%bχOW&χΐD9FΤό6†O6ΥNTnΓο­?1GΛ y1άjGν™l™ΟΜ[’±,tΔΞGC35p΅δhβηψ-·jδκ±PEDŒ9jιΒPhQmfŠι©ΣgB/Ηϋ?ΑX‡kh`jΣψΛ›ω“‘CU„ΟŽύξ—qΐlΎgόήGRζk1όr‚ DˆθN”QΏTυ·I‡< ώ}}γgό«-"ώ~›»)ΙLόXκ; …V00ŸκΌ τ‡Λ‹Eg„3A”œœXΟΠ‘jƒ7{ΗAן– ς&χΩΘ՞>Vb˜ ΘρˆσIπͺ·ƒj‡<*Ίϊ±\y:ρu―£%ώžάξ2Iϊ£ΉνεVΙm·Ίά.ΰ6O†sβρόd&ώ‡eU!~eoχ_ϊΓπ½3Kτ}”±a/:„sŒ€ΫΒGΒΪڝ‡<}«Ο‚0›΅ΘTωυ)=PΗΊλ­!½r‡Ο†:μjή׎WœNX³£%ώ\n§Ήm墎Ϋz<2Jΰ2 ΤΞEΊ<ά8*uώ‘φσ΅Ž‚ηͺΣ|=:;τε§ΙΒ™4(1f¬ϋ†…‹η.7rΑ¨ηPwH¦F>ͺo…¬O½}²₯7£>ε3pŠμp 07·ΈΚδ ηN%ώΛΏy¨NΥHlA7I5fΐΝΰ+΅χΓ†rf ‘ʁ;iP³•N‘:ΨΤΕ9Ζ&―›±ts©φ;—Κzͺο•”‡PـܐΐXΦΓι$―ϋs(Κ²tKi•Ό£ˆŸγoΜ›q[ňψΝ‘’ςŒ§#`3#Vlχ9ζƒΑ«Œ-ΗΑτœδͺˆ εΤ"/ΒΟβΖ‰Νs”Μs3ό₯‚Sς4 ‰œ±β<ξΛ₯>h…­μ`aΠC1CžˆžψU$&ς.RβŸ! ΉmΣ™φ7Ώ9(!'Ψ?ϋŠx_Μτ%ύ„a#NΖΏΟΨ―7Ρ€TPΊK»:όξΦιμΗρk]}J ]"S›Œsš0Φό!BπBU!SΆcEΨΙς&T\A‚$B’Žγ—³ό{Έ=’Œˆίΰ[υ_Zv¨½m/ΰγρš‘C…δ΅»χGΎ1SΘGΪ<½ξ§Ύδ䊠&Αάζ¦ )eΨDΙ5GλγPnθβ–Αν’rˆψΝA ―a³ΆŸΩyͺNύDR©_2Φ¦š€a€·].Ϋπj―₯ΪΚ0 nŠ|ˆ‰Q_ •ΥΌz¨‘ΓU 4’œHό ύζΆVώ}·±DόζΠ|ΪF-΄ξΧτ|­3„•\d’σ­μGQ΅£₯„nx‘,TδΊ§υ\WŸΈ³Tε±°@φ·rc,λI})PΫΪDαyMHξ Yλχ/™βΟ”kΉύI='β7„kbΆ Ž ·ˆ΅b6ϝok D=”ŸΝΘ°‡vš½I“Ρ=eƒ$›X ρδυα ΚΔ6Lφ?EŠ…Β:M¬Άk5IcM§n¨ςΊͺΪυνΨ5Ά%ώΙάΞε֌Ϋ"nSΉΝ$β7‡';-Πτ<’nH«ηΔa„z Xˆ U“Β@%ΫΨ­8Ά€ς¦ΪCΪSf°€Κ gλ|cΘh‚Δ€§λχΈαΓ됑%ω«\£X<-i“»uΈύΏΉΑhHj9Tal2ͺΊΣΏ {˜ΪtK]·ΗU?ΙζRv»Τζ™`4A 3|ξ‹€‡ωtΎαΘ'„v"?Η*Α ω<§cTŽJ/&ˆίW»(ψ”σΑν4š*δ9U«išυ5TεώωjtΆ£εš!τ§fϋΘά5U^pw6υ#Θ!Ξ—Α: ψ—{ϊ€θ―m―Μ7 ϊ ˜Ο­Ν}d?¨„gdPρόlύu &ΉώfίεΪ{ŸŽϋKV©ΗρΗΈhj“!Z·4Ÿm­‡‰Ί²&”&M£‡Ά ΩΑ£‘ΈY²Ω[}iΨrλ“μλς τ†ΙŠψ<Λ Γ’ˆΔ-ψ©A^»VS1‹-σΔΉ+ ­ςͺ/{Ή+H6δM^ΗJˆΥΓλr/€ˆίΐΖM A•$+’jPο…W@dδέxΏϋŽ© ‹Χ ³΄ˆΌqkw{;φ¬†ώΖCζ1“ ώ»€/^σвu¨)’‘ζ€*eΛT’_μ2‘Ό‰Ωώ3FoβDό‰²στΩ X’#3/*_―J§WΆ’/ 8ΓΣχڌ ό?SYTPβI¬‰Cε>}Αj X,V$kvώ\Š“³wyϊœα ΠφWŠώ„σsμυ•Hpό¬ϊΦές―†+οvσ’ώ"ώ8ΰŽ–sψ,o6+³¬χβΞ¦JΎtΐ*©Λ-ζξ³»Κ4MΫ°7’©j-|8TlδA§ΕJνμ#έΨ$Νη;υ%=ΫΝ2XGΈυ₯Œ₯ώ@}$ΰ΄ΌX¬Θ-ς~n’I_Dό66Qίις–bΖχŒ΅­ΖΨκaB4zŒcV‘n✩μWΤ>ŽΏΘΫbϊFgm7ύ³οπ ΆtsiΠχαΗχwθe˜Qμ',ƒnPΎš˜X`‚a1ΐ#Wԏ>ƒ—ˆ?†@bFήήΓΪΐl΄π…Q ž"y"\-ύ*ΞDέ„έ*ρ¬H;|νpΕ(2ž”]€vΗj0TYΞ`A¨ι V8VQ©`φͺ‚[Φ`κ#Ρu >cωΧ*—^€ADόϊ‰N„Υ–@ρ ζ«WρΉΡΈ‚”£E,¬Ÿ K1š“ΛΈ΅ωlmΓ4ΐ·ε(!o\Μgοͺμžκ᠎ϋi|NΟ‚ςEθfTΨ³–€?¬‚ζNΌΩς―EτƒhΫ’žψ‘Š‡Θ‡™& o`°a‡‹€M4υ„•0“S%ξ”­+*³ξκCοt^K`ΡΚ šfέ‘ΔPcεηΏZΐM€Α€užώΙ29+LBχ‘οKώ³=dδϊg†G„M3ΕyΩΉ’ϊH΄€`"„Γ$qEΒWΈΦσσφρGD;ΰDΎa26VI.ΐ ””Ÿ Ά SqΈ–`Κgb€ζ§Ρ‹GΕω[bΪ]σB’ Έι£θ'θo(½§ϊ‡*ΘˆΌ tΙ_°ω%>ο«PNΧM Ξ!: ”}lΤ[ΦΞYδ^S΄αIOόμ+bμΝκ°@»Ε- £*Tδ )σv—±½e'Ψx£₯ξŒ±Φ§ έhΠύN~_7υψβ1S7»±6qu‘Φ'@ΰ ώΒ[ψξ`Ρ7˜έ©Yύ#Γ°a\³αLmeP‘¬η½Τ7¬²vUĝ…P΅ ΓAΔο(f]π‘šΠΠΠψfΜφوΥΪΜM?@QΘ#¦ΐ¦.dΡ­šΪ_aκ#*{+;31CιSΏ—΄lΛ~βTΞώ?P|™Ύx“Έ%[θ|ω賏€Μ7ž  =ο£~e%β/-°τk1ጢ2WR?δPΥ@A!5ψ°ιfΐ5δ]W_LaΜͺΦV Uαœ„Θ±0EœΗSΖ³&—J—ΚόMΒΏ ½%ό ·P@]U¬ υP…^ό-zg›™ΉΪŒα™ ω/Fykΰξ“ňφˆmͺQ ,ά=0 3 _μ±$κ$ΐ€$ώΦ©ΉΪ ›d4ˆ6yƒϊΈ*·ϋ7·Ψg–PΏ[ \Knν.§A-6§ΛŒηεβοƒΖ„ΣwΚi§Sω lΥ—n˜ͺEvι]1ίnΐwοOό˜ΰρˆŸ―3ˆψH^κΉΔ§8 ΅aγΗ:fh›z PATαUΨm·Lc?°AtTψ!D­΄ΰ…ΎBwS>ϋ1½o?%ΝλΎA`φpύυθρθ7„CαΏ’¦ͺw―Imμͺˆ3<κo2RPQeQaΜیuΏ‹ϊ…Υ€ŸΏΩΉBΎΑ"β_%kρF³―“”ďώϋƒWρYΎοs° ΧÍαΉn‹<ƒ ³6„›Α(Ύ­BBΥyTZ.±ΟΟ/¬΄T2€²ΝΟχ%~Ÿ¨~H4M(’ž³Q¬.sŠ…χ>Ι ( l =κ2~aˆŠžζΰ9κ±@‡k|ϋY”@-^¨μ;|"βο°ρsl碞ΫΪ` c1iS³3½ΖŽ2lθ©b(žΗΧ£³=Η‘Ζ©%Θ›!: ©«ž­ Δk[ζΡ`²έnχ5ϊΥ΅Οίg―uμͺ†C|OΙΥόφ ο¨žΧ¦½ήl^D₯E MMςκ±φδτύμΜι„7ɎΔ!K:§Š‰†!AkBV‘ω#b³/pMuΓ€Αύ-}—oGQEŞ?HƒΙ μΞϊ4GKΕΰD6―`#.Α@ ʊ»aP5S‘’iΣΣy1ΫͺχχQ&σh.ΔιΗ˜HŠΣeŒ_"~Ώ™”"παΛ·kϊ ½`©κύ¨£+ύF2Δ›£\ 6w Φ₯+ΫΥ°lΥ¨WήD8§Ρ¬pψ-½…Ρl?}5:;ΰ±Κ‰”Όΐκ‘ΤbΤπΛΫή$»!·ln«ΉΥc ΠγWƒΥ?.ς Hή Vsα}ν#’v@π]oυv–όٍoΗόΦό<Ÿυ2ΌX·j ήͺRWΏ…[5—"\ŒϊΌ=Tβ!\αD_ι<δνc;WQ?ˆT΅ώ[·‡S†U―^ές‚]T+ςŸ‘a&žψψω`ώ c+ϋy%”$‘₯Bό±jk–ΡΥMΐͺQŸΠe–ψ‘ΊθΩ`3zμ §Ήay/ί™θ©ΤbμΙαόΒ•†Ηm‹ή$ΫFυp4γφsAΝ]ΣΐΜ1εΈωœζƒΊΗέrpžc(δP³8ώ`€†“ͺΙ`¦μfϊΖβ*ΔiΉΗ°˜ό)cj26΄ccώM} –8ΌGWθ&ρr+Ά!~Ž?sϋ«ξω2nΟ$ ρ+§γ‡DηXΪΝϋήΪ1ήYΕπΗˆ·F‘q‹ΕοωcFυaŸώΔoyυ6ΰδΖ:^ΛؐθΪΗ p­Ω€¦±ˆJιށmδֈΉ ΨΊ!€ΥΎ}5+Ψ8ΕχθογuΤ‹%Δ¨“Ϊ‰„61ΪώΔΏvη!k ­ ΨΌ’¬ ΔΏΦ’υžΛΪ ͺΉk(=˜ςλϋ‹:A;žD΄b H·ώί„7ßψD[`Εθ[θK(ΌBˆ/=†sΏ9ˆ?ι‰νh/ρO¬'ωsχžŒ>Dƒ'VΠ"{¬ΥNHΰBذٍaΓΐδΏsW]σxγθ~ιΚνNğτΔ―³`Θ&Ε¬Σί/ ŸlρF8±φU΄eψa[4‘ΓzΑ6Λ V•ε{ιš'ν―dlκ—DόIOόΣΎς «CΨ&!ώΘ"ΞΩ.wΞτ¦B§(Α+›€EίGφ’( βObβGRV{ΌΔ?ώ}‰ΐϊ‰βόοΛsχοœψckΡυNFΌ&ϊΩΠ‰ψ“–ψμ²­³κ{‰?s ŽD  =92XAfPŽ „Φ"₯N"~§Bνςο\ιν ϋ·ΠΰHTΧζΉξώš'­*†’MŒ΅ΊDLψ€ΤIğH¨0N•5 1'ΔWŸ=Kƒ#ΐζΉΫcΫ‘Dͺ*B‡}Δu8ZJğtΔ―κœξΛ₯` ή,t{܊~‰ίX”IΧ;‘X?!‘ϋIDό uσ|CuNν¬΄άͺKŸ;]„ ƒlFΌNΧ:ΡΨΆ0‘‚mDό‰vυΪE°PŒΕ‚eQ{HώYα„ψ«|\ D’ρ'ρ£Έ5©"Ϊοf wˆ—*εIH<ΒΉχiž"ΡΆšΠ‡!Ψ¨;‹8λΖ3V0Η=ΏλΧ›¨¦€hΘ¬Οk€τΧIΟ"~Χ©γΆ‘h%θYf}5PqL(Z:Ψ»hqc³Συ΅ έ0νλͺ―oœ*ϊ^ «νρ' Ά‰‹›=œ€€˜žψ++λtƒ N§ΒSγ‘;]_;–Ήz—υΧ«ΟΓDό#ώΛΕΕ-H§`',λY΅0ΆzξΤόŠ’|Ρώœqt}ν„~36μεΠ«N„}ρ»€ψ‘°1υ Ζ²‹ ‹M‚}°f”/ρo]ΰ}Ž 9'!ƒκ·μH7 xͺκλ¨·νοn$βw8ρΟόY\LUQ+A™{„ Ψ•ε;θ i£žγ¦ΰDx’…(QΠVυc½ξ―ϊzΧΫtuΆ‰ψKό§N06·Ή(’”ω`$kO 0Nαbί@³σœƒAšdΨ>d¬ΛΝ^Oΐ–ω"Ϊ§ωB:[Υ‡8f}M"ώx@ΎωχyΘK&)WQη·3Τuσ6cύŸδKσηω;΄Κbη$LŒΘάW0ϊ_’―.¨ ‘;M&άe‹‰ΘλΒ;‰ψγ„lαβ±γu"Œ ·ΉŒ:Ώ‘/Œ‰š N%Τl&Ψ ³ QF@I5cr‘I9,dlOŽW4PMBΖΎcΙ €ˆ?@Θ.fXΖ©Š[TμΪή€<ΆͺΖ…›6nΨvΗΙrρˆH1†ώ‡,q‚½ΡVΦx>νεeeEbη‹:ωΎ· =Ώ#0΄Žο…CΘ Š}ΰΒœ-ΎœθΚεa―:Vΐwλϋ™Šτ¬(ΎB°τ5žqτΧM…·«Α'ŒOψΎ‡½"~Blϊ £Ψ\B ‘κρ,Œό;@Ύ*7 PΡ;z[Υ_Μφα" Ψ "P̚nΧΫB΄MvΫνŽͺΧΤO<Πνv’₯.ͺmœεMu.uύ‘’ ¨Y#V˜š σ₯Œ₯ώ(όϋ¨τF°Š7ˆλΥωF±šœρ]Υc&*ŽYEb½”<r5c“>ρ^ΈΓ»©Σ; ‡χˆk‡­H€₯»…>Ϊ€n$|7ςD<+ΝGDy?M•³5]C»ατ)ί™όκaUά†rB/*Xμ?Ώ Ρςοb1»j3‡ΰ, ί’Ε…ŒΝω%²Ο«’‡0dn[ %-‘'‰υΌύ`?”››Nϊ˜―jρΫωfy)Œ<"–wgΖκFφY%΅ Ch¨?ΰώCh_€₯ψπyMϋI'%=υKοD„Αω€λrξDόvwμ/s uZ§>τHύ«ͺԞΏ‚\πνφ~ :7ΠΖ)βσˆύVΐMa‚*Δ38uκΫΆmΛΝΝ%‹Ζ²—³άe³Yξƍ†ŽΗ9ΗΉ'β7φ¬Yxg³}Μϊ£!f΅Ι’W“½{/²οW² 6’•––²³NU8΅ Ό…L^‘ΰ8Χ8η8χDόρ„^βwΗ κ΄Nǜ&"/}%:όΥͺd― ΕΠ{=YΫ›ΩZϋ ³O"} pφŒ¨™|x—±Γω9ΗΉ'β―zf+Κ4―΅ŽD€R?θϋϊφe"!γΘ>oΚ?ωXέΟ¬zŸωΟͺpΌ΄†Α‰"pπσGBϋƒˆMˆ|’x£(δΕΉ'βWƒqΟ¦rWY%η!―αVx ΩvώƒΪΙ₯ϋy3Δ΅ά΅Ϊάη°ρŠ*(|ζΟNόͺ/"#«u€ƒ=(HΔο~`i"€ˆ?”L2fb¦>χΊwΐͺΚFϊjMž”ωu| ο₯Ξκΐ·Ι$Aυhύ v{V}ΡgΤλΘΪĐγ ωN›½η½"Ζ›ˆίRΤ­[—mάΈ1δ1ο½χ?ΎκMΉ°°9τ φ}Ϊaͺ€ Β 73@!•q~UΘ-λ‰AΩ¦YD–n\šψαAΐ$ΣΰF>Ώ$vQ›$π»šΜ‹; ύlU7>XΊibσ…ΰ œΣL%.εώCΌΎ<=fφΨτυ¬(J―(όW’ϊ=€Šcβfdσ¦ž|šMΫΐήθ³ΜRΓw†ΒςεΛ5B|πAvΧ]wiρν͚5cνΪ΅c=τ;zTμΓαοζΝ›ϋξέ»Ωε—_Ξ8 }ί‘'~|χNΔp ]uΥUgό}ϋφe-[ΆΤžŸ£'ώ#FxήϋΛ_ώψ_}υUmΕ ώO5΄•ΖΧ_Ντ&xΎόςΛ‘‰_ΕςŸ:ξ|βηψ=·­άδφ_άrΈέβΧΗΧ«ˆf"oTRTυ:Ta@B”€Κ%Rζύέxθ;Vy(†bF‘qώ]nρ} YίZ·―Ό+Ν׈η SΌ…;l ;ψψαήaςΛ/±ΆnέZ#ίiΣ¦±7ί|3ΰgρOš4)$ρλ‰ϊΟώs@βε•WXZZZ•β4hq⇫Δςˆ+ˆ>n³u7€Ε„ψ•Ά>[₯ήc Š§’uό’ΘZ}ΜΕbcξ"t4ΪT#rt3–χςκΰ*T[ͺdžΏίϊκšζΊAβΓjύI=ͺf.&Ψ[η W6ίO²ρ7mΪ”U«V₯§§³ββbνωK/½ΔJJJ΄η›7oκcΗX~~ΎροΪ΅Ksυ‘£+™η—Q©2vΞΘUί:ΊίϋΔΦ:ΤΟ‘€w“ΎͺχˆψCίSηΞeψΓ<Ύ|Έ]:uκ€=Ÿ7ožgΣ6uκTβWΔ­6w?ωδΦ°aÐΔ‚όρΗΩΝ7ί¬mξb Aƒ¬V­ZμΖod>ϊ(+++σΩάō’ψ‘½ · Α›½έ‰υΔί=ΐqυΠhXυκΥ#λ[3ΌqΥpΟ`©A_)$ΤίΓ_ρ;ιgE‘‹_εfπš‘ήΜ\›'Ρ,’·τE±ύ­p‰Ÿ[±‡ ΓA9#½Ο‘Εξ?{ήGΔ¨HΜψŸώyΝύ㐫GE+P›΄ ˆΊ@₯"Μ¨Τςάn§[v£”ζgύΥχ&· _ψί(ξ‚^?]o? \LC_PΫ”Š'@ϊ υ\ύOυ|πsŽ$§αϋοΏΧ6e―½φZ-|Σ)ΪCv'ώ?pΫΖν έζξ,ž"mK»ϋκ© €ŠΆμ.υƒP>l«›φ΄μέ±b`A!d‚λˆ7ΛΫ0Y;DΦ&ζ»ΉΎΤκγίοˏ‡~w0μ%ρ>VͺŸ: ž.I6ρ‡"Ϊά dtO#ouNΙάΆΥ½ΎΘ.( f_?ΓΒΞΊ*΄’Οβ%ΈΑ2³•ο]ΏκΣϋη@Ήαλ”ŒƒϊaœpγΑ{¨§KΔOΔo_Yf}•$ b#­ΰpΑM€jκↀc³‡SOKβΧ=Q$ο―©Yn·›[Q ͺ} ϋ ‹:Šοƒ{ΠσG@βΊ‰ψ DόBeNͺŠYPHμRKιμ⽌vΎŸS᠁6χξ%~}δ  φρ§7ΟUf·αο?Η«ςŠZΉf’ ‰ψ Dό&‘χίBWP~T•Dƒ΄[ζι &ζvτyX\kΝ=τ”gTQ^ͺO˜υΑ«Ο!‘kΚgBˆŸ@ΔΓ \jΠ)Ωdc"μS_%Ι?4O―žHΒlξ2‹ƒH¨δ@(eͺ>1κMsߟϊƒ·6/ΒIΔο(@°-†ˆώ@Ζο₯KMΟ`ίGΔoπ«"SROΰy©’τ/ Γ $7ε£ϊκλNωœ±’|σί3ζm‘Pˆ„Β5‰ψΝ#Q#ΓΈC‡Dό,Ρ5w‘‚7ώύΠusΦ©ώ!$'ΰκSΔ―’o"”Ό7nDόQ…Q1«rΙ"χ§Ÿ~;‘™»hΡ"ν}Θ#ηδˆ ό[o½Υ£ΨΩΈqcΦΏνyJJŠ'γ·I“&žοVΩ»ΘΦύτΣOΩ 7ά ιφθ%—AΤψ € ‘Ν›——§΅ρβ‹/f—^z©–3€Ά@R:?ψ?°%KΔ>βώύϋΩSO=₯΅­^½z ­DόB"€•"Τ57D™Υ‰d@uAr‘›ˆn°A΅­5EhBwί}'Όl©©šb'ΠΆm[Φ£Gvψπap‘§@jaΣ¦Mš²ζG}€%rΰAμ .τ!~<Θοοέ»ΧG‡Δί­[7νyϞ=΅j_fόo½υ[Όx±XPξΨΑ»NΤVF™Ί͘1dNΔO 8ˆSΔΏ°ƒγŽέ‰_Ν’!ή¦ττρΪoΌ‘έ pάύχί― ΈAΡ@&/Θ[Ι,γs(Φ’'ώPΚ›ψ,Δί€+Vxn8ώΔΡEyώ «ΘKγωΦ­[=ǝwήyDό‚γ‘ˆ?{Ήz,@QQ‘&„¦€’(Šψ•ˆ„ TTT°+―Ό’Υ―__qƒZ—.]4]}«UEΛŠψ‘ϋŠψQ㣁ˆ΅Ž―ͺ½βG!"~ΑΔp"ώ¨΅L(|㨀…ˆ‘ˆΐ{ ΜτnjΓ.»μ2όΈzξΎϋnxfοϋφνσ!ώqγΖi. Έz°š9‡#ώŽ;ϊμΐΥƒ½…5kΦx\=ͺ’ΧΜ™3ΙΥC ΈŠψ™λβP@.”:„œr8βΗF ΝDYDλκΥ«=ογ&€YjφnΩ²Ε‡ψAψό±Grω™gžasζΜ IόΠιΗf±ΪάΕ1p9α5|ΎP›»ΨFaΪά%ά(~Ά­ξŠŸ’ΜαœjE’Ζκ›Ό‰>χDό‚α’„ΐd&~ΜδU-ίΑƒΫβάρ"ώ$;χDό!.δs–δL°`ΪKύ‹ϊυ/"ώ*3n—Θ»ωnΨ MΏ—_rϋˆ[·Z6hΧ]ΊηpΫj“λω;nΰ6˜Ϋ•ς΅+Ήmζvυ/κcΤΏ’œψ9Ύΰ6™Ϋ·Έ˜~ο΅ηΦ„Ϋ&¨Mίp»TΎ†ΩO*· e›0z…ΫE :W—θ^£|Lηφl‚γg˜}ιώΎ˜Ϋ n7λ^Kα6.žώO;φ/κcΤΏ’šψ9^ζ–)—GΈ{χΠϋΕpQαSDηΧ¦I6έ!ίk.ίβVΞ-ΫίxnΡ-ΛΟη6@mΎΕρώ•[nΕ܎rϋƒί@δ7SΫΑνΖdν_ΤΗ¨Ε”ΏBόνΈ} Ÿ_.£ωσ΅Όkbιτs‚Ϊ4@ikH'6”†Ι‹ύί6:Wπ·Uƒ ŽΧρ-ω8†[_??lΆτIΎΦ3^›^vμ_ΤΗ¨%͌?ΐ–Ϊ©‡ έλΧqΛ­Žξ΅ΏsΫ)΅ϋ;XuΗ4Ω¦ άξγφ–oΊχfq»ΝFηκYnσbE!Ϊφgωψ9K­©;ζMI`ΝΉ΅_Ηi£-aύ+‚vΕ­EyΎbΪΗ¨Ή‹ψ+ΘλX:NCψ“ŠΗ•;αυεwτΡοyIΫβΟώ3œXtώHΟ•|νΉ1ψΗx^GύμOΞ‚ϊ½w”½cαο„O\ΏAj‡ώA»βΩΗ":_±ξcΑΪeƒώUΓnόekβηΈ“Ϋxn]Ή=¨‹\ψΞoψ>·4]Ηϊ‘[3ωόVϋ7£hSSΥ)c±ΔΆ]ΑL¬Ϋ¦kίοtΗο”³WΜΠξ‰Ε,Gž‘kIY"ϊW4νŠY³’]±θcαΪ•¨ώ%Ώσv2:L»ΩΏlMός„΄“ώΚχ\!ύsWψ‡ ~©τcφ—Ο‘ŒΡ(Ϊdηv™lΫ_τρΚŸs;Λ­ Φαl’0ς1Σς'¦Dœ3jWLΪΧώ%ϋ}#ٞ­<}Ύl;γ—ΎΐσδσKδ…ύ‹ξύ2π6ΉdBθΪn½b8s΅]›μά.mÌη!έρ›ΈuŒ•ΛI78ў.rίξΉ6Έ–Τ.kΫΥ)Υκ¦Y)’dΗ6ΩΉ].:gΏθ^;Ηο{Ξ‘vQ»,μχY~˜Ϋ9ΩΉΞ€o9ρλβΆ―Qzάr‡1θW`gϋjIp½‰Ϋσno“Ϋεsφt©ϊΏ£vQ»,nΧΛΖUϊl\ΧΏ<9m€Κά#rγe¨ί]t―,­Œχή"…•·ψ"ΪMvn3j΅+ͺvέΰΒ·Œψε‰Κ‘iΠHΡ^Δν™5w·ξ8€(gθώ~Ϋ1™Π`uV€νΪdηvΡ9£vQ»μ7&νNόˆΫ~Gχw/y’ώ?―Ξ…΄ιq*΄O~ξ‘‘˜νΪdηvΡ9£vQ»μ7&νNό#}»ΏΧΕ΄*9VHΖ~©Σq§ΠQΫ΅ΙΞν’sFν’vΩoL:-ͺeΔΎΣ₯/χ’±·(wφc‚d!lΧ&;·‹Ξ΅‹ΪeΏ1iKβWJRόjωvΌΟ•Ϊ‰([g»6ΩΉ]tΞ¨]Τ.ϋI»oε’iΈΤβΖ]rh"“μΨ&;·‹Ξ΅‹ΪeΏ1ι„8ώ{₯ κnumρmΨ&;·‹Ξ΅‹ΪeΏ1iwβ‡j^•da“‹i»6ΩΉ]tΞ¨]Τ.ϋINJ΄‘‘‘‘‘ρ“‘‘‘‘ρ“‘‘‘‘ρ“‘‘‘‘ρ“‘‘‘‘ρ“‘Y݁€ΟδσKΉM σBFΔOFζnβ―Αm 2"~2²δ!ώ1άNH‘ρκ& §ΘZ―…²λw²κ UwUδH“UΘΫ½? ݌ίοωd፿r»ˆΫaΤW•ούΚνω%ϋjΚηχp›Oη•ŒˆŸŒΜΉΔί_wάN%ήΕρ*[-(Λ£σJFΔOFζ\βο‘;n»*Β­ήƒ¨ΚτΡy$#β'#sρ_ΐmG$Δ/Ÿ/Ci>ωͺ·Πy%#β'#³?ωαΨά5BόWΘΝ]ΤrΝεΦ„Ξ)?????????YΨ‡T”Š9·―IENDB`‚xarray-2025.09.0/doc/_static/thumbnails/monthly-means.png000066400000000000000000012476471505620616400232330ustar00rootroot00000000000000‰PNG  IHDRηtΦ YQ9tEXtSoftwareMatplotlib version3.3.3, https://matplotlib.org/Θ—·œ pHYs  šœOIDATxΪ일\U΅ΖCο½…–@B !€ΡE€—'ŠŠπEEΑΖΓ† "Š’"Š"½„";¨tPι½χ:‘ηΌύί³ΎΙΊ›3χNξά@k~ΏοžΆO3ί]ί^eχ«ͺͺ_ @ χρ@ @ q@ @ˆσ@ @ !΁@ @ q@ @ Δy @ !΁@ 0=wΏ~ŸJψWΒΣ ―%<”πχ„>„Οβ§ύςΏ²ΫΝ›°wΒν ―&<ŸpKΒ‘ ‹φρ5͘π»„'&ρέL£Οξpž]Βo:yΆ-φ=ڎέΦώΎ³;'l\η@ 0}φ;™¨9"α>‘π5η„8―m3SΒuΦ™ρύ„u>™πcθcϊψšΆ΄οθ{ k$ ŸΫ /Ϊu>•0sM›₯Vοερ³―ƒ:Φ,ΦΟϋ!|gL8>ψ,!΁@`ϊ6μN8½•Η6Δym›O˜0ά|j>·τ™Ν¦{Ωωfœ†ŸΫΪ5žcΣΝzyœ™κ„}«ο©ΆΣα;8Ϋϋ-Ξ§τ@ β<α―$ό©ΝΆΛ&ŒOx&ፄ>]΄Y.αΈ„,Dώ~ŽŸ°@Ρn•„‹&$L΄v‡mVMΈΨ‘ΠρKXWξόhΒΨ„ΛνXχ$μX΄[ΔBΞοΆ6$œ°d/ΔωgMΦ¦p:Ίf=~Z#6G&\`χ|†ν_†nogϋV_σX?›pigΪξύ»η7lzœ`ι3:αL Οη{»2αcSπqΝΟΩΉxΎm§γΓξηη »Ϊ;σίeoΔ9σ »%άiχωx ³»6ΛΨ~;&μŸπdΒΛˆΫ„9νύΥσΏ7αK-Ξ»bΒev―€ό¬μn’gs›ίΞρΥΝά&δΞ·Pϊ΅­έ_\›Q&Hώcbψ3 ΧΫΊΡ…8)αŽ„Φ7ΡΝΖu\»a ΩqΈ­μxβ­q>(α-uN”Šσϋ,<ώφ\θt8ΚΆ)t{—γ½-χ‰§:င7yvξΈ Xg ί΅ό­­ν<Φfœu~\aΟzκΚ•Ϊx‡–0Qύ'[>Α„άmŠσΗ¬c…οf£„Εz)ΞO²ϋΨ3a½„ο˜Ψύ[8§Β1φΧΎΟc--a'{N³χ{…ίΣξ X@ω}Ξkbϊa·\Ο―μ9}§FœΣar€΅ΫΘΆρ<φ°οv­„oZgΒIn±Φ9pΎ{?χRœ?fοίΑ§lΫ/μΩhχϊekw-QΑ‘@ β<}'Ξ‡&άμΌnxΖNΔ/Ϊa΅…ŠυΙΣΧβψx3?jΗkλVΆεQέμwͺ «ω Αƒ¨?­η₯ŸΝξγ/=„O/mϋ~zJΔΉ΅ΫήΌ«• ΈΫL|-Ρ‘8ΏšΆϋΆ™?³ ΒƒάϊŸυ䍢ˆ:7f-ŽwG;ΕηηGvνkΨς†ςN·)Ξχήΰ)I?8ΗΛoΛ_,Ϊ}Α֏)Δω₯E»Σlύ6EΗΖΫ€ԜwΧbΓΜ?Ώ-Δ:(†Τ΄{Φ]·Δωo{ΈίμϋέΖή·…z kο…8?½h·Œ½;{λΧ΄φŸ  η@ θ[>“y“χ΅0ςΧΝψήΓ΅yΜ<3ψ΅ΧΪΝjžί;ΝΛνΓm·²6σYψτΥ&6–Ή¦§»ŠεWkΪqμσ‹uί°πάWŠλΪuJΕΉoˆΏ?›­μΎVθ@œhWœ›—υ2σŠϋϋ9ί΅Ήtss˜ύYΝw{0!m<:&ξ.ή'ή—«ΪηGφΆ6€Ή?7O\Ε=,bνv*Δω‹γνgλ)ΦΣqpxΝyνΦ΅υ΅eώYσL?λ;¦|X{Ν=υKσΏY|Η«Oq^vl|ΝΦΉ[Uε@ˆσ@ τX_ΒΌιo)4Ωζ»ΒjYkw  ‰=,4{ ύnζJ[»1„zeαςŸqΫŒΏͺΉ6Βl'•9ηmοΨyžΛu­ΦJ$χςΉmnžΖS;糴#Ξ-η|– …·šE$άXάχ=ώzj޽dΓ“ΝΨΝώ«XΎ—ωΆυCΫΙ9οq~Xχ°w!Ξ·ο)LΎNψΊvsνFΪϊΟ»ηήέυ¬Sˆσ!5χψ7Αgaν+[h{—!γϊPœ―_΄Ϋ½‡{8&ψ2Bœ@`κ t ±Άͺ-?i«VnΩœ‡ύπβXλ”βΌ{_݊Ÿ!lG:Οωq-Η³½ηx2/.Ϊ,Ϋ—βάφ§@Ϋνn™‚Š6 v#ΞgnSœάjΜR¬¨Έο«}ΎΝ±η²ηώϋVίmχϋ‡άΎmˆσ}ϋ@œΒ"5Z½ŸKτ±8οΙs~½s­gžBœ/Wovλ Ϊ£Xε)ηDt<^³ώ–β|½’ݎν-ξaΩΰΚ@ qΎαK·Xˆ³6|WOΉΑΦύ§bέ±­ΔΉk3ΚΪ|Ξ–O±όςy\›y,„ϋo½η7$œW΄ωYoΔΉUួ…Π%/·ŽΚί7νΎΨβό·–γμ ’iˆ7ί{›ψέΝύόΓB°gœΒwgV˟ΎΖ^‰¬³`†χ@œK`ΫΓ~}%Ξ{Κ9©½«‹φp=­Δω|ΆώGΕϊֈσ»όoΒ­ίΝΎϋ…έΊΑqю8lϋ5Έ2Bœ@`κ‹sΒfΐQ–wΎ™ sŠNμΪ 0οωυΦv- §ήΓη [1Ή‰~»yοξ-†ΫΜͺΕUΏΠBά—rbOθuV΅z «]W­½qΎΏέӏ-W{?«"ίqώYα€ϋ€=·mνZ3›Φx:kήΥοΉ|ˆs];ގϋ ˏ~΄Έοω-Δϊ FΐoiCβωjν―XqΏ­μ»ύŒyηΡΝsΨΒαK-ΆοX„pO5qnλN°Ξ‘ŸΨσYίς¦OWx}ŠsUΥηΏΆu{βϊΞ;Ίχœ gτ$Ξ]ΤΓΣΦ™³‰IΌ·FœŸnν63φ2Ά~9σΎ_`Ογ –>ςx;βάεβΏf•δ7΅wm;{Φ  η@ θ;qΎ£ 凬ά«ζράΕWοΆΆKΩPK™χν tΫ^ε“L$=oFό*…8gX³“m\λΧM8ž[ŽnyΤmsή†8ŸΓƜ~Ζ<œgχ6¬έžΓώΦYπ΄εγλ>Q΄Ρ†φzΘ:-.0dGβάεΡk<ωλ­Σ‘Λ}[»E©\oίΧ›6lΧ1Ε8ηΛΫχφ΄V{Τή‹MΊygX‡Κœ-ΆΟgχ|τ{$Ξg΄Ιύ’Ν#,ηλcq>Šρ½fVϋԌsΎ€uΚ<`Ούi2nη6Ε9Χzž½―O[ Α¦5β|Έw’ηάΆ}Κωkφ,6θ&η|½Οz[‹ŽxΥ~‹wΨ΅,!΁@ ޏά–(@ „8@ q@ˆσ@ !΁@ q@ @ Δy @ !΁@ @ „8@ @ Δy @ η@ @ „8@ @ Δy<„@ @ Bœ@ @ β<@ @ˆσ@ @ Bœ@ @ q@ @ˆσΐtχφλχ…„ Ϋl»]ΒSρZZ?}–I`fζψށΐ4Κ§G'μϋœηΆ„΅Ϋlϋ`ΒzSpμ'ίg ˜ήψsΒOάς7žJx%a‘„5ξ±εOΕ3 BœΠHp·„s‹uχ΄X·Υ{!žmϋ?ΆŸΦΕyϊό4α­„— w'ό!aqΧfν„Iφ„6w%|ΉΈ–Η]8αMŒαxOχ•#€―ΩοWψΓϋ%Ξα-»†U‹ŽΡͺfݝSαY¬Χb<χhΌ3@ MNΕz!αͺ„f¬i;‹΅νΦ]’πρ,ηT’€ςΕ„™lΉΏη“Ε:f–q^+ΏwDVH85αq to΄¦Ο ττ&Ό0Β] ‚}€;ξNΆ.Δy πώ’λ½ηmι9OŸ‹~θ–MΈ£fέ_Bœi•SΣgΎ„O&<pTMΫ₯J{-}ξν-'GΔd β|z ΙY&&¬dΛ[B ,Φέλˆτˆ„'Γxt"Ύ‹ NŸ L`"ώ±cnοΫ&ό:αy#ζmΫΟήIxέ{©gΈ₯ΟΩq·tη"ΤιΜ„—KΨ§ qώuΡάΛχ]GΟc!Χ~₯„gίέ‰s·n¦„›Έ·VF«ο³ξZφHψ•Ϋώο„έCœΣ8·ί:φlΒύ ίς†dΉoΙιsŠu„Β‘’s―Mqώ“„³άςνΖ©εΊml~³„—jT CyŽ„cŒ“ϋ»xξ²Ά?HΈΩωδ„Ωζ2οΦ$]°DΡy)ϋRΒΓφΜvwΗξφ܁@ΰƒΛ©DύŒχ% MxΥxNΉ4α>k§h¦ΩΪ°K―Lψ­ΩŽϋΪ>Ώ6zΚΒθηπφ6aΒΣvΜ/]ψτ#Ζί,O°φpπϊΆΌH|ηηή†fŸnστφ IΨ¨X‡A·˜SsΈ}·Ζ ­η_LΈΪ΅›ΑΔ§ηχΊνsχo!Ξ?Ÿpyqݐρ^FˆέαnΫ~mˆsίώz^έΉtž±'}.g›β£ύž"ηό3¨oTώΎΗ“pqΒ†f€ξβ<˜f ΙWμχ+|ΝΆ]*qκ’…ΪηΕyζ·}ηkCœΟn‘E£­³oΌ­ΏΖ­{ΐΦύ‰H’b:Φͺηx7tνΆ―ηΫΌωη)ηKΉνΧ9.μφ܁@ΰ/Ξ―1Ϋ§mqή¦]ϊpa‹βμΦ­αψrmσΚϋσ=m^ρΛόwΧζG Ηλ.ΐvŽο<β<Π’ό„yŽρX'q˜R²άίpœ[·₯­ΫΑ­›ΟBΙ΅P \Ξ`ΩCΉ‘ϋ­jν„΅oΫ¦8_Γφ%Όόχ.dτ3>'XΎηΫΆHΒΩ½¬Φ‘ξRӎ±έΩFΎώ[bͺνsˆο•q^l qL†d9ΞΉκqΜl!δΜσ[Vk'5ζZΫξϊ½«s›Qω²u<~q ΕωάΖ=gλoWzRΑΕΧ[¨εζ]―ηχγ¬έ6ŠΔ}νˆs[>žΕ έTko%Ξ»=w ψΐqκΛf^mά9S/ΕyΫv©K]άΟκ\Όd|³S›β\εs£l¨ZϋjVΤς9³SαόρBœ¦U2žΡ„π:ΣΡ5_ΪΙXλ@ΰCΗs-;Ϋ¦ΣϋωFO”Δs@ qώA4T7΄<ΚΩΜ ς„―¨9_ϋ*ζΑ™'ΎΛ@ πaη–ΏΉ¦u¦©toΒΞτs@ qώa0Tj!Ž/[hηjΣΙucαJΫΕχ>DβœQ"n΅Β6L˜υƒ~ξ@ !΁@ @ q@ @ΰ}η -΄P΅J+MUŒ[zj₯AιέoΩ*Hχ½εΓqK5ΎλόwαΒΕ-ήu^|Xr‘ψΠs‘q^ώΝx.dκψP\Ψΰ”A“Ήpά@Ηi >,ΉP|(.δwΞoR|XraW>œΜ…[œΰΉώiΕ…βΐ’9Ξd.δρ‘ηΒ‘-ΈpX\8¬δ½’‹νβ*ραŠφ`΄ύc\Ψό>Έζwq‘γ&Η…βΓΆΈΠσ‘ηΒ& ξ:?ƝΣs!ϋy>,ίmγΑΆΈ0σY 0>,Έ0·Ψ• ϋŠUd…~ 03{pΰ{hŽHοσ Γͺq£F¦ws„[^!½ΛΓ«qiέΈαƒσΆq£Wl΄cύ°e«qC6Ϊ²~ΜθΙ;ΆΪ²}ψ Fϋ΄MΗfŸG­ΖŽύGͺF¦ε!+ŒNΏΝ±yΫ°‘£«A#FU†―X MσcϊFΙΗQ›FΙSΦ±ΏξcάΨ1Άι\ωcFεύG€uC±4"wΎΓ«9ϊ/WΝΊπ ŒΩœ—ηZrh5ο€aΥΓGV -»|ϊkάΓ°AωώΗ.·tΊΥΈεΣοsΕεΟNΰω€g3zιΕͺ1ϊWΛΟ?o5|žΉ« Μ[­Ψ‘jμ ₯ͺ1―Ζ.“°μΥΨΑKεφ㖐—G/΅h5r‘ωsϋεη›§Z~ήyͺasΝ] cΞjΘlsTC眫1?λΥr ƒgž=O—MΣ%gœ­Z|†Y«Εϊ',3Σμƒfn`ΐL³5Ϋτwν˜.―1OsΉYζΘϋΡ~α΄}ΡΞΓuZb‘|\'`9ίSΊξ΄zοΈޱΒΒσηύΖ μŸŸ•ΐsΘΟHΣAK¦γ.Α³jΜ/eίΙdψs47°Z>δ;LίιΈε—ΛοζJ#—oΌŸι=gίgž·χšwhԘƻ§)ϋΚ6ήoξjΣ<Ί^pΰϋ*ΞyI¦φ獃?W½ύΧ/Uο\ςκΛΏWMΊζG“ροέ«I·οSMΊqΟju»5–oέ»šτЁՀΗ&=όΫΖόƒΏͺ&έΛΙΣ;χmLŸ>,ακνIVΥkgV“&SMzφ¨j3GTΥΔΏW“&]RM|λοΥ+oώ­Ρζν «—ί<΅z፫η^?Ύzζ΅£«§&Q=ρκ_r–xιΠκΊ§¬yκΘ꬏­ΞxΰΈκό‡©.β¨άφρWœΫ°Μ~ΟΎvL>ζ«o^½φφ™ΥoŸ“ΧΣ0/<Ζ ΉύKoώ΅zύν³3^|γδκ– ‡gάωόaΥ“«ή|ηΌ|ŽΗqul¦άΧ~Sjα#ΗTχ½ψ—|ŸΥ[ηηϋβ8lγzς¨κŸέΔe]]όhcΚ2Ϋχ"°MσΪΞ³Z.Χύ'=οž="Ÿχφη«ξ~α/ωΊΈ§_;²šπϊqωΪΈožΥ;“.ΚΧΛwV½zZ/ŸΪσ¬gϋ;ηο¬zα„Ό/ηΰψ<‹Iχώ"£ξ“ί-ή•ηΛοA>ξλg7ζΗtNΞχζΉ]ΪLzβOω}ηoΫeΌ}›ηxλθ/δe¦ΗmSϋξη}ΟΫ±±ϊd°_jζa[Uoτ™κΝ?Ύz눭σ:Ά½=~Ϋζς;g|΅zη¬―εi"ΰχ1:ΗR‰€φ›‡…Εƒxί;>|γ7[To²eƒ―όA“ίΉz—Ι|蹐yρaΙ…βCΟ…Ό·Ζ‡ž ™z.„cΔ‡pΏUxŠyρaΙ…βCq!ΏsxK|Xr!S8­δBx>,Ήώ©γBΆ‹=²>䚸q χΚ}•\Υq‘ψŽ©ηKΝk»x―δCΟ‹œO\Ε}p]½|h~<7‹{εήΉ‡ό}ΐ?|ž =7fN3>,Ήw₯;>μΒ…pόηωs½xδszΎLϋe>Lο­ΈΜš<ΨΒgβCΗ…ό6ΰΓ’ s›΄Ξsa_ραΊύ–ͺ†φ›ΏϊBΏ‘aΎ‡\ψφ-WοάuEυζ“χWo?xc^~ϋŽUo=z{υΞ=WWo=rkυζ5§ηmo>ύPnΗϊ7ώubυϊEG6φMλίxώ©^x¦zνΥWͺΧ&N¬ή|βή|¬7:5·g»ŽΝ>>ϋrυς««7žy€zcΒcΥ=OΏTηSΥ³/½Z=2αεκ_χ=[|ΣcΥ!W?P]rΟΣΥ ―L¬n{βΕκυ'TRφΏσ©s[Ž}χS/5ΞωπΝΥλ/ΏΫr|ŽηžΘϋίϊψ‹ΥEw?]νxʍωΈ›vu5κGηTΏzRΖΠoŸ^ήνœκ#ϋ_Rmό§+«ƒ―ΊΏϊΚ‰­&ΎφZγ89ίΔ³ώP½vή‘Υ[=―zϋς9σσKSΆΏvΑαΥ3}―zώΠέͺΆΨ Ίf½΅«?·QυΠΫU―žϊ«κΕ#R½tΜ^ΥΛΗο]M<ύ7ωYΎvΞ!Υ+'μS=υ«ͺ»wόLuσVWέ|ύκϊ?Q]±ζG«KV\₯:ΘΨ겕V―.³juή 1ΥΉGUgτ_!OΗ/Ό|uΐ\Cͺ=fTν2Λ²Υn³ͺŽZpxuμBΓ«“YΎ:eΡΥ!σ kΆa»Ϊ1ύζ σό‘σkσμ₯VΜϋ~‡~«gZ&Ÿηί›­W=ΆΟŽΥ³ ίΧΚ2χΔύp€gPχα~Έ·;ΏφιΌί ‡ο^=wȏ2xf<žΟ‡ι«ύe5ρŒƒ2xVy>''<[}8ΰΨ ξΓwΘwϊΦΏΟΞος;χ]_½υ؝ω=βέη;}λρ»›ο5οΠγΟΏ’ίΉ'^x%ΏƒΌ·₯εGŸ{₯OΈπSύϊW‹υ›­κŸŽ7Cπΰt*Ξώτ&Υ“_Ψ¨zf» ςτΉνΧΟ(?όΣ}ηœͺwώωέΙ)Ζ'†ηΝ{5ŒΛ»χkˆt-#̟:΄šτψσ4Ω@•‘ΚŽxΓΠΐxH†K6D“ρRMΊ4u oΗΰHSŒNŒ9 Q/Ξ11ž0¦0z}₯aHad!>δTμaΘqίzηόlCBƒγσΠ–ν:žΐ5«S€}$€1t―¬γzu|ύןžΜ‡ž βpπ\GŠ“0κΒ…^°‹ QΖ‡]ΈχΫ8Βs!Ώ1ρ‘ηBΟ‡ž ™Š="0Ε‡όNYοΉP\幐v𑸐σHHΓ_œΎr=βAΟ…πϋ•\ΘύpΎ’ =z.Τ±ΌpχmΌx/y³μΘήRtVrΝοΑ…΄z.€³ΗψPŒεžl‹ ι¨η7ΰΉ0‰wΈ”\H»,Ξφβ5_ΆίΌΥϋ}Ό’Γ2Όη}Γ…ˆ€H½ο™—ͺW'Ύ–Ρ₯3Qrο΅ Ž IB€9Ÿ¦,#6ίΎ?Υ›O=E5τΝλΞ¨ήΎν²†POβΑόϊ+/eƒE3ŸEkΪΑƒ°A4#tΈ‹rΟύ© bώηkΜσˆtDβaΔqΨη₯t>M97β‘τ‹―ζφ΄»ι±ͺώqOuΨ΅V'ήψh΅οΕw5Εωjϋ\T­πύ³ςόΚ?½ ϊθ—ζmο­/Θ"œ{F"؞upΝάω~MpJ€"8‚ ‹q„!Η£C$y:’σρύΎY=°Λ«[ώw“,ξD1B\»α:Υ?W]# u„;bŽ˜>m±y™υg->2/#΄κˆopΰάCͺ½g\ν•πύ™—Νβ[@Œ#μ9mύzΦ]ωρUχξτω,žοώ6Υ_έ< σ§³sΎOuNξΏξΓ½°/}φGdΏz~Y”g0zžΫλ—ŸŸ!S:˜Οϋ8qΞ‡οqίJœK ³?Ώ…άY•zξp‘#‰uι=—@ηη7ΔwΝ{E‡οΐ͏ΏίΟΎ° —Nτχ™~‹WΓϊΝUmήσž8a‡υήΥ£ήΕΕ•Η`|λ/£oFΐΣ‡Mθ妨νˆ/υξcDΠλoFΖbνΟ•ΫΘ€Δ0’§γcCΗ{|1τ0eδ ©¬“Η ƒUϋΛΕ8“†!Ζqδωa?Ξλ=ΞtrΜcxβU’‘*Θψ£ miƒqΗ±ΉΞ-oŽ Gy~dlβύΒΓ$£Sžr’ητ\θωN’8‡Δ‡βBE y>ΤoŽ–\(+ΉPήpΞλΉƒ)Λ%²ΎŽ αψΠs!ηΥqJ.T'dΙ…~λΕ…Μϋ}Kρ^r‘λž ι¬εΊθόΟJ «³’δBAΟ+O3―grdD|θΉ0s`ͺγΖΔy“ yxΟϋYΗ…Yœ·Λ…β΄.T‡ΎηΒάY©(’>ηxΝΏΥoduδ ŸHFι πžOeqŽΐhra6Ω[ˆ7ۼΈmyšZΞρ΄L;}φ'H#dΰγ§^hx9ŸΆα]δzπ8"Ό™Gάόϋαη³ΈίG²ηzO½ω±κΊ‡žΛ‚o:λwΕ}Υήή™…5λΉ'²ŽsξOζm₯άϋLޟιΧ?TvΛγΥΡ~8 )u ψ7;τͺjƒ?^Q­ώσ‹3>ώλΛͺuΎ< σ΅ϋΟ|―tSΔ?ׁG6xΨ™?γΆ'²ΐFζNŽ$κyΎxΩ{Yτ%Α‡”ΨΟi}~žΆ‘ˆΗχџ~={Πoίξςτžo}.O%†ρXγώΧκΙύΒαγ² Η£Ž‡aŽxgΘ Ž@Gxnž‘ΥΎs,—Ί<γ¬W­c±O$’ρΝύκϊ˜§ƒAp—½Υ‡{CδγηžΧtNΰΧq€„{Ž4HΫΉρϊ%ΗdξΫϊ#^τw'Ξ9Q"ς’σ]495}?Όt.=ς«ωγ}¦S QΞ;ΗϋΦ)β5_¦ίωYoέo‰πžΏβ<}ΎΞ  Πqxή ώΙΎ²σzΩ/kέaŽ'Θ ryΙTXσœsB2™Œ›Ο"\BƒB=ώΙΨΐˆΑ˜Μ‚Μ₯‘*/˜¦2Βoq«Β=ΥΙΑ}aϋˆήِδϋ„xςŽ€τu'Π³ρν ›ή'u”δpR»…6½ωtθΰ)B%„G΄ Ρ“Α™Π«ίΠ~›7ŒO [ OD<ηΔfπvJΐςšcŒ‚πžχ-Š žί•βz.„χΔ‡ηZ'>τ\(±Š Εƒ‚q!’N‘ΰκ "κ$Μα?q!ΏEΟ‡βBΟ‡ž %<‡ήd‘AϊΝ—\Xφ\Θy9NΙ…βΓ’ α ‰]Ο…ΊξvΉγΡΖsά±w58N–ž ٟΆ΄Q;–u\ΞU¦ωŽ-λ9ιήyφ@iά[Ι‡|o|§ž ³ΰ†³@ >τ\¨(.Π… εΉ‡=ςξρ.N-.Μϊ%βCγΒNωP^s:)αΒπžχ½mˆ·δπ$Μb’qNX:ηε؜‘Ν΅pOώςΫYH \C«aνqΞύ*¬½ χ—G=§€g¦ƒC  Υ‡ΠΫβόe‹Κ £‰N'Ύ;ή':–ψmρ»θΤ6”Χ\!α=δœc„ςƒτ₯o4„ω»Δ9Ήζτκ#Πρaβ⟽y'30D 2Bύτ\θψ°Ω1ιΑ:Η…ΚΝ.ΉP|<ς[τ|Θ„&ΏiΟ…^ˆ{.T'œ’}T―C’Ξπ\θ;Κ¨!Ϊ3_r‘:HK.”€―γBΟ‡β+¦΄ΣΰAq‘ΔΉ„9Η`?ΦΓ}΄βBΦ³]?|XΌlRΗ«οΤΧσŒΤI’ΊβBειgqNg‹κfXzO|ΨδBε’ϋzΚCχ|(.TGΊρα{Κ…π―ΌςΚ;Χ\οyίΪ†gέήπ(# Ωs‰s9x s^y‡D*‘γˆΔ!ΌηδlK`3ΕcΧ›s t™β­Ζ»ΌΣi7gQhFθΡ Ψ†ΧzΛ£Ν"Μ>ˆs–Τw„6‘ζΓw>£Zξ›§U#Ύ{f?·Ηyy=žk9b™eΒΣi‹'œmxΒΩΖup]\B›kBTs-ˆ+ξWaς„ΦσάΈΔ=ϋI Σω@Ηϋ³ŽmΚ•ΟωΚιζ”ςφρˆίvY#ώΕ YƒΧ_z>G#δο"΅Ν^uBͺI!HSε«#F%Μωž˜x’ΕxΜO:β˜Px„.m1G#ΠΠ΄!LNH<‚œ©–ή„Ί#Ξ%ΠΙOgΒύ’+ηpvΞΗΎ„ΦsLD?Syω5ο―‘;qΞ‡vη>ί{ΞYΞ’ύŒƒς³PŠΟΑZ}ίQ§ϊώ”6ΑwMžs₯5tκ9χ^s!Όηqώτ7Μ…\ήΉμrΑ£WΏ·~F™g–s+1:F)F'&SεΛsξ½C„m²Œ!ŠΧ\Ζ(F„Ο›K†' † a‚9ΌLξ(/ FΖƝro1Ζ”ΌΐW2¦”SΝΤPσ’Σ“μO[Ξ§όBζ½—γLΖ›Ξ#Ο·rΏe˜bτaϊόGχ3Še@ΚϋΓ>2"εᖐφžp ΡCoo@β\^&Ϊ{CΤ2Jε-β˜2dε‘ςEζΚuο5’HWΌχ’ΛE`δΌKΊxkz‹πu'ΠρRςn•)ΎΣG©δXRH q”ήΫ\/!‘₯·¨Eζ” Λ)Ζ§ηζ…κ΄Rι5ŸRο9₯„λnJΈ-ao[Ώ`ΒE χΨt«A ζΒWWο…»ˆsρ‘ηBήMďετ6ΉP‚]|(.τ|¨: *ξe|ˆ ϊzόŽΰ"~Wx›U¬MB[υ2”f#.TTŒ~Γκ„TžvΩηS{Թ鹐)ΛόζΥΙΙρ=Ηω( xE!εž =·ˆ u,xΘ{Β=–\(OΈηBνΤz>d›ψΠ‡ΞϋB›­rΤΥΙΰCΰ}Z/’ΗχΨδBWά­m.TΡUu‹ -j­‘&.δs|ψžr‘ψΠqa'|XzΝ…v½ηΑ…ν}žRΔ&;{w$Ξ89Ό!™D€ς© έUώ8β‚ΐΗ+ŽηψΨ<’…*ΗΖσ-ο8p<ΩΧx°w=ϋΆά†}΅rDχJ{žŸηρN#Φε9§=ϋ6>ζΗηVƒv8΅ZϊKΗVΎ<ΎZpγ}σtπ7ώ–E8ήkD9’6 mΆ5ΟZ»δvy£ 9εuΞ… ηπΐλœ‘Ξ½!ΆΩF[Δ·:Έ‹vά'σ<BέρŒηηgaλ9<š\υ$ͺρθζΞΆτŒε*"'aή› *8§ΞB¬9B/5‚ο5B™\t„9AŒΈ%œο5b]œv*ή¦Bs쇄­«Xžs„99η„ΙγiηœΚ{Gˆb―Όq ο=tp]œ°O«QtDx3Ώœ"p§&ΟK˜ηuιΩρΡΜίOσ=Šσ"½W‘DίS6π˜“ΆΑχNGŽ2:υœ—^σήxΟΣg£„»ξMΨ΅fϋπ„«ήHψA;ϋNK|:έVδ$·RU†•?6qM&χ‚ΣγM('F'F( c“ψς±M(†F….7…Ήͺ{KœΫγΣW7'Η’u: =RN€„ΉŒRUiΗp’a'ΟΉχtΠF!Ž2\ΥFω‰μhΛq1¬δ­ηܐςωπG t_± Dy θ\ΰΊ}˜€ QŒGy$žεΝδύ‘ΗCτO·5¦¬“§I"’2ZΥV†iιΊοhπF©χ”)šΐ‡Υ*Μ―+„βͺπT3SŒΫ0HkT’9TpKΰύ“‘Š—aN£„f$ύΒ+)§*μ₯YΈιψx‹œ—¨OQΧ|J½ητ &Μmσ³$\›°zΒ"T¦ Ώό0€p!Ε«<Š =vαBΔ’ψΠs‘ηCq‘ͺ―{>Š<βCΟ… ΚΗ°N\'qξsͺ%Ύ­£°qύΖ}Τ‘ΈcŠ ΕŜΧs‘’vJ.T|Ι…ŠΈwλά>μΌϞΕ…Š ‚Ϋ<ͺS³Ž }ΫΫύΚάφ²:ΌρBΈΕ’<Š 5ς|(.l¦γψŠξSΚ…*>Θ{XΗ…LvαBρa_s‘BΫ-rΙsa'|Xη5ŸοypaϋΉηδΔ"0ŸΚEG*ηοcφπޟf^-BD^BΔ:žA<ηr,b[Ήά aG #t² CΗ« FtΣO6’ΡΌζ/.m†œ#†θ,ƎhΖϋ·|±-~—«¨³ί’{x΅ΔV‡ζeyΝρ€³~αOPΝύ±οW³Œωrσ³[΅ΜΧNi y:΄η@ΨΣ‰ΐΉεœ0Ώυ1Χ5ο‰νκ@ΐ›NΎ:Ο4Ή£(Y‰Ήx˜ε,gαM2ωΡ–Ν|N#Έηκ¦—ρχΉΦ΄ΝaΫΊ­uΒέΨdΔ5σˆcBΨΙΫF˜#bΊ**‡ˆVa92‘ηΜ#Πρ˜γ 'lq|tΒΫε¬Η£Ž‡žsΈD7"œσp t°žkd=Χ@σ*b7₯ž‹„7ΘœPχ΄>…KΟ5Wn·°υΌήŠνi¨+Χ›~DX¨ Ώ ~D[t`^zΝ§Τ{ž>3%ά—0(aVλ΄Q΄Y4a•„Ÿ{qήέΎΣŸNΧΓeΰ)Bμs†1κ œ‡†·/‘ώρΛP΅aυΰ+|Sωε2>}Θ¦rKαw“m%Θ1N}Υs…o*Μ\!‘ςšK4{oΆŒQy‡T`ƒK˜ΟΟ–‡Dβάa¬χžy…€{ρ*Α*―‘<χω­ζς(Ι¨-½:ή“#―‘ΓSζnm€y€ς8yρΆή(•['Πeϊ’J οτα― uχ!ξ2Jρ)οά τβŽ@ŸtιδκΖΌ/½1Jm’ζ<ο!"ˆε<‘ΝχΌFœ{£΄#ΡAŸ™μ1βΈ>Œ³ƒΛτωΪΙθ¬3FΑŽύV Ρ/¦ΰxs&ό7a5λρ\άΦ/Ξς‡Ω Ν½εί[ΏΙ‡%Š»p‘ΒΧΕ‹βB‰£Δ‡M.TρΛV\hπ\¨œsψΠs‘ηCΟ…7J8{Q.>¬γB~ϋjγ‡,+©–ωύ—\(ώT§ΈPaβ%*η½δBuTz·ΈΝσ‘OλΛ<Jh{^χ•|θ½ν qχ\XF ΄ͺς..T籊jŠ U€OyηκxΙ^tρ!yγx½{+ΠΕ‡ž y >¬ηS… Ϊ^pooψο ΓH–^σ{\Ψ7\ˆ·pt‰ssk†μ&A“ΓΫ“˞u„"―9απˆ ¬αΗsŒ '„‘Ξ}YΔ3₯’~ύ`„6ΐS­0vBΒ³ΧΩBΐU%/6m%ʁΌθζδkH6ͺ½K S<ŽόrŠΝΡ πu„Έ<ςL9^z uD9*ΒΖΌ<μμ3EΓΣsΙΕΪ,χ\’Ε9Ő0:ρ Mάkӌ.½ΰδbςΟέ†ύi7’*oΉŠΟ§ά7ε{cT‘œηͺ<;©1Ξ―\^_NΨ€ "…Άϋ°ΚR +ΓKΒ\ωΩήΣ#8λ1ΠδaƐϋνΝ“­ H–Ω§,,Η΅©j²*(« ²L]žsŸ)―ΌG2Ve\tKΜ+΄]νh£νΎ7b½Aͺg#cΊ•ΧΘ t? ›ς^U…ί±ΑβCߌς\Ο°_‚’ ΕΝ%κϊΫεΒ2W\\¨Θ’ ΥΙي =z/ΊοhU“£μ΄¨Kχ–3αC?¦|S Γ‡V$°Ι…Sΐ‡Ν:%ς~|Ψ"Υψpͺq‘rΟϋFœZΉί"-Ή lD}½;> .lƒˆ$Ÿœ‚dxΘAq~έMqŽΈ$€]•©#δ˜+ŒΟ1B―9σx“%fρ8#\½ I˜Gp xΔΖxΞ 9G,#°Y gžu„³#Έρœ“cΞΎ„―D9  BΟωl+οπ.a>λΈνs¨;ΫϊφΰΗ€ƒαΝ}0Uψ>ΐKJΑ6sM4αΜzˆtBzΞ#ΧψΨδœίw}ξτpVσ’}~Ν‘ΎζΩKŒΈΌδ˜Ιcn““nEεκ΄Eό*ŸΟ4β—cεp;>ν$ŠΥζu¦ iG˜“Ojο„―kθ5rΜΩN;ށΠWΨΊ ΐ)„qΞυΠ) ±Ϋyςζ@Χ§bq=}δ—χažsΞznγŸηχαNΈ»mσχN Βω΅ ψ=ΰ=Wh;œw’Ϋπͺνϋ h)ΞWλ7?ώ~6αp·ΌmΒΪη-χ–ψtΊ&ΰάΫτΓ «Χφή¬Φ ΝΌ)‚$ƒTγ¦β-βΏB“X e―«Θ^cˆJœ+ΤΟͺ M†B81θ4ΟMVψΘ{uδΙφE0ͺήX’¬χaŸ*.δ½.q’ΏΌρψjv»v2Ύ{ΥψκW―φΈn|3η[Υ”ο./ηΑS€Όv_ΐNη*‹ΆΙ-=C2.}SγΪΛή3€λφ(ΊΌηewoΆςI€+΄]‘΅ ι, "yžΏoήˍμbJτπ΅#Ξ•©bήS{ΊΛ{|ΙwaΘΤPΐp΅bI9'2‘γRž%Ξ1Deœ&t"ΞwšqTuτLλΦbίW£Ρ1SpΌω.Kiχ|XΗ…ω+ΉP|Xp‘ψ° ϊΡ*<–|θΈP|(.TΉ*³‹ Υ ©ί§ηBΟ‡*°&Θσ]r‘jw”\Έί γ3—ˆ wΉ¦1ύΖε㫝¬ηB_@M\ΘΌͺΐ·βB‰s_ψ²δBuR–\θ#…J€m)%Π}x»:+K>T‡…/ησΟoqωηβBu4Γ‡ž ›‘DΌ–ςΠqή,η#:œXπaζB N-.,Εy|Ψη‹ΆδB°LηΑ…}Λ…yξgy—8ϝ0Œιόΐ ρœ“8G”RKγγy—χ/3aκx‘™*$.ο·ΒίxΛιiΆ#ͺ5Ύ8λ–Ϊ樦`F³Œˆ&L} χΞΐσMh:Β\^w<αδ™³½Ξcπ²γUGœ/ς©›\ƒͺΐSΠMΉΓtFδ κxΓU<|q<§ ©υ²Uμ$ΰŸ{"η‘ηάsΫ=§ $Α¨ δ^ #ΜεiVρ3Δl3τΠxΎ'„~:’ŸmΩΣnyΩΉΑZΗ±πδ‘ϋΌoD6aξ>眐uD8 „ο9žt–Ι1§ϋ8&s„?ΉβxξUΠΝWYΧu αλυβœω;Ωm˜3ίϊ^rΊΐ%“S)sρ<ςσΣφf.ΊyάA_‰s@ͺ3Ό7ˆsjt"ΞwHβό›3 ¬Ε qΎUΗψ\ΐ>ΈMqήrίη}ό) Β5‘§ͺyL_ ή!yŠ00Θ““ΐBœ›aΡ^>|SF¨ Qrν&]Ϊ^UB…Fœ+TZΥ‚ε)Β°S’γΦy —”q%ŒΌ42ΐ”[¨\mŒ4„ψžί4@εΒHάχΏγ›Fβό+—œX}ρ‚“«νqB^f;  Lyp0~ΉfŸ*γWF§χdk_Φ{γSσ€λZk£v~›¦ε9υLΌAκΓWΛ!†d”ϊpNyΕΞιΊ†Vk†³cPšŠQͺBIΝNžžŒQŒP‰$³Ό›·Ώ{|JηߝeTuόμλΥbYWŸ"qnΗά ’PΞω°φσ\(Qάrχ.7ωPό¨B]ςš‹ ½0w|ς›Ρο¦,Ί¨‘ΛTi]o€‘ $ξσ•Ο}8ΉRZ4dP§€Έ~“hφ\_Β%Š=r\…‚KδŠ }ΥuίQ)*ΉPb»Ž %ΒΛu΄«λ΄T4‘ηΒR ΧyΛΞJο=χ£Yˆύ8ιtΒtIοQM‚τ.4ω°ΝΊM>,Έ°δΓΜ…ΌΗtnΒ‡S‹ 5”šγΒNΔωͺ3.Ϊ’ Α²3΄/Ξƒ §¬°xΧzDM€ iGtŠΤͺΔNΘ7…δxΚΙΟF”#t}XΊ ΅!†UYuΚεF˜γ!G(#Ίσ―Ώg.δFA7rΕ™G γρVxϋβ[’9`[+QN(;ωκ«ξ}aηSÞα%η~U„ϊ#¬”7ŒΗœ¨\Eύž«³ΈV(z‹ά*²#Τ³Xζ‘m *νY˜S­έΖ:οβ-·πuΦ!ZΉͺ¨Žχ»BήΪN‡IΞ_Ob]Γ†!49Vυ&ΤΩWΒ―9Β™γz/·„:βœPu πv†fcΈ4<ηςš³qŽΗ[EޘJ”#Έ›c”sφœ”GΟ6ΐ‡λ“Χ;η؟υ‡fuφy(=œ£Ÿž­Ζ‹ΟB=‘K4ίOznΉΐž…Ύ7zBΗΏGoΟΏ Β!Π5„ο WˆσoΟ8°Ϊy¦ejρΡhGœGXϋτ@ΐ-sΙ#Sŏ$́σ ec@ž$'Ψ»ζ2FΣ:BΪ5–9FŒ P9εζ*XTζG{ΓhYFσή;’ͺε¬ΓpΓŽ|λŠΖΤ‡Wͺ=ηγxz΄Α(έζΌΏf±8+ž&φWΘ¨ .)Ό”λΐ0,½ίε2>½AΚ1eψjž©7N™gΦ{γUF©ŒΡ2|ΤWpχiζ.£ΤμΊ“@κRΫ@’ρ„α‰wˆβq Ω ΕxUJ%ΌFΧνΦΉwACmaR„N•‰m¬ίNΔωf]<Οϊ΅ψ՜kτ(ΞΣgΌD*0—pyΒf Ώ*Švi›5:<н—\|θΕΉΆ)œέ‹rύl4_Ξ‡±Γ…JΡЌͺͺξσΎ5―Jε>zˆ©ΒΓΕ‘š‡ΨkpEΙ…pˆjbx.„/ΰ’’ κLK.Τθή£―(δ χ\¨h Ο…ŠhzNτ|θΉ°άζy΅Lυ)Ίoέσa™ςSΛTώΉψ.l1YΉΪŽ σψηŽ»γΒ₯a©]†*Š="ΞΕ‡S‹ γβC{ށ8_mζE[r!4γζ~Όs˜C”{aŽp'?ύt<<ςΎψœDΉŠ΅e―Ή iΟήtBΣνVάNc—{ρ^ϋ™Ξ‰$΄3ˆ `,xΎ*ήΣ™dUμ³ § ‘΅•—έη§χΙo$‰sή ΥbθKqώ½™—icWb­Ϋη3'ܟ°¬+κΆB›βΌεΎΣŸ~  8Ώπιk:ΎΨ‘1\(^—aΣJ#˜αTGΥΩ•—¨0N UV~ŸŒ:/Με-—Q%o΅ŒM…ˆcˆ•9Ϊ2Μ$’ Ν”h«3O­Ά»θ€f(;Ɲφ“Χ Ο(ϋbΘb”Β=Y§’m\ŸrήΉ~ ?οχb»4&™Κ‹₯s1•—ί·W;‘<&Ηγ~}n¦DΊχ¬•Bέ‡vΦ…Ή+΄S•τερ“η‘At„Z¨9ž³ŠΔ©²έ‡wMJya=ω½b;E‘4΄F)ο/Ί’ρ›EΣ5?Κθ“ΠΏίlΡ0HΙ©$4oFir,w"Ξw™stuΚ|λΧβΐΉΫη£nHΈ9αVŽiλJΈΔ†»`Ί`€SΘ…Κ5Š ½0/‹Ώ‰Ε…φ‹αBΌ©„>σ[QJΏq‘xPEΙ|evrνSU$ΌΛΌmyΕ‘^³ΎδB€Ψ† αq! }Ι…„½Γ‡μ#.ΤωT―Γs‘Όδž λψu:Ο”raΙ‡κeΎz]ʏFΔ(=θβΒr,τ²ΓR–βBŠƒ=Ύ‹»αΒό?Χ†%-ΉPUΰ3z.4ažωpjq‘Ζ9w\Ψ[>Dœ―žΔy+.ƒfκQœφ5&ˆΧό *JΥ¨5Ά9ήfŠ ©‚ΉŠΏα-§z9S ΅!Βρ˜#ΥxΎΩδ†#ΚξxΜi#ο6βέWZGœγ9Wˆ;‚]ΫXW'Θφƒcql‰xΔ={O9χ‚§œΌqΔyτDςΘιdΐΛΞuΡ‘@'Ώ<ό\~ΕYpZnx+žΌs›Ο‘ιi…e7EdφYdS/² m…£#x5”Zώn]cg‘οΒΪ™—Pηxε9σx·Υ*Θ¦!Ο€ŠΌκξσYV~9β^αμˆ~ΑΞΉrey†5KΟƒη²@Ά{Μs!·Αsn…πΤ‘P+„‰\Hϋς4R .=,£’2H1,Ώώ―ρ]Β5Ω—φ*ŽΔ: Zφ§-)ΗP¨ŒWΐuιϊ1ϊJq^gLzQξ!CΣ£lWΧΖ‹σrΌtoΆ*Žδ+άλ{ρ…‘|X'ί£οp‘Qͺ\tD:9˜Ω“$Ρš>\½ή―τώΠPPΙ{ šΓ΅Ήω½΅4ŒlŒ¦wΆ,Χ1 3²ŠΞaˆRpιςοεωNΔωσŒ©N_xƒZόnώ5¦8¬ύƒˆχ\œ‹ Uw\ΨδΓ²ή†₯B\¨NΚ’ ω}π›Aάy.τή|Ρ7Ÿ·ΝoU(AΞoΨs!œγEΊηB‰Φ’ ·όϋߚqΟ…pŒΌΠž ΩW”ž ε!/ΉPάJ.΅Λ…uΎ–βΌ,§βœ­:,λFϋπΓN–έs‘ηCΟ…MΟzΙ…ž .z.$tΎ κ½ε}5>œj\ˆηΨp!€{Ι‡ˆσ5f]¬%‚Α3Ο[ΎΗΆaQŒ#Z„^dΌΝZD¬Šΐ‘k!Η4lšBΪΆx­Ι' \oΌήδΚ*ν„»#ΞYGΨ;m%΄ CG”κ^ŠπΊs<κδ₯snu¨β;ή}„7<χxιt@\¦Œ”*νΉσΓ₯q,ΞEGb,‡M“ΟL¨» θf1=(ž^‹9 έ —Ρž}›p+^q7œZΜ„w.gVEΠ²·"_aμςπ7 Ω₯λΙyή>ΞΔxΓέΖs§£¨Νλ“€f]―/ 'qχ#Η~Βc9ǟˆrήM’:ς³Dμ§γ²-g—|N+`:>ΠΎ/ΠWŸΧ_œΠ,˜H'‘%€E0ζ}'β|χΩU{Ν>ΈλΝܞ8 γOΐΝ"2δ§α%β;ž#…vϊάσKTs’2$€†Ψ->gƒTE„dόΘS„aδ…€ ?εqkκΓ}5` 0 3Ό.2βGǘΔύΤΙ§W›F6L•K‰θ–―ηΗ¨ΓP%€γTž%Ϊ¨Ί±Ξ«q•gΙώ2F’ι½AΎΛςΥy”J―Ίΰ½Ešzqξ‡e«σ΅2HU4ͺUa$Ύ;B:ΛaΦΌAŠΧ‚ηHΥ©»€ˆ'nxwxg¨tμΗ„fžm€|ΞlΦΙ»™ήΩfΘ(₯τώφUρ£f―­U(ΖE5ψ,Μ™&£΄qώγωΖVg,Άa-~ΏΠGBœΏ©ΈPόz.τQD₯Η\DΥdoΉ:)α?Ο…*w“…³‹ ωŠC$ 5Fxς»ΦΌ†S₯uνSr‘x€δΒϞφ·Z.„ΰΓ’ επaρΊΈ\^ϋ:.τ|θΉP|XzΪλΈPχWr‘ΔΉ: κΌθήƒξωΠsaY(ΞwVz>ϊΤΟ…ΚIoς‘ΈyγΓ’ Ε‡%Š5Ί@³‘ραΤβœ"̍ {Λ‡ˆσΜΆXK.!ΞίΫπυW^Κ9΅ˆs…νβA'¬]"qλΗ4—@ΗγŒηQ‹χqŽΧΘΕŽ'tœπw «Fh»Όν=…«+ŸΌλ„Ζs΍Ÿλΰs•v–ΰx:Ή?ŠΈε<ϋ‰sGžuΒΫι„ΐJ.=ΧΔufΟ9ήΰ$ώrwΞxΒ³WN4ρ$YΞωΞVˆ,{ΜεΩEX"do»¬‹W9{yνXͺB.‘­puΌΰΜgQN•rΖψ&―Ϊς«³Hgψ0«/O:"›άvDΆrΖ&χ‚\Ήδڟy别ΈχΘuKη\{rς σOΟ6 4q ΘΟχaι* —Γυ­€žΔ9΅8FΓιδ;R!> 3‡πW h…\ܐηKΕ{ ο«瑃€)yηˆsή:w:η?Iβ|ο$Δλ°~ˆσ8ΟF)CP©,Γ΅(΄‘ηήS€ΠΝΒC„ 4ΆΉ†•Α{€±"aΞC§,Μ£n~8RΎjΉχƒ”œZDΊͺcŒJ΄δbIΙ UΫϊΙϋ’a¦x‡0dρ8iLθlΤςώYely%sgzos¨eB_}r%# ]yŠ:0F%Ξ²ΰΨκά%7ͺΕ!‹βόύκ¬Όz—FΪ„ψP⼌$ͺ g·cq‘D9ΰ½ΦΨ o'Ϊs‘†6σ\Θ²:#τ βώ·.nτ|蹁 •\(^r‘ψΚs!λπ§δBρaΙ…ͺ³Ρ.J {Ο•<θΫ—\θ=ηuC²•Uά½8χ\θ+γ—EβΔ‡eg₯―ζ..TT‘ψP\Θϋ£w£Ž <(.€M“z>œj\(οyyΞ?:ϋb-Ή,7Kˆσχƒ O U€¦b;ωΩxΟρ’#Π"x–ρD tΌˆbΔ7ΉζxΚρš#Ξ)π†7:ΫΌR :ήsΦ3τY;έƒUrΧζ„²γ5G¨Σ‰@‡yΒxΚ³§•κκ&¦€ά+B ακΫD δ#:zV„pή? I Σ\‡†dhBή:θΠιDœο3Ηΰj9—«ΕΖ³,βόC%Ξι G”γ5ςy—f6‹ωv'Μ1dX¨—_‚ΛΟΣӏ‘‚!£aΙd|Κk‘Πu’1ε N‰qUΓ“ηƒL’že<<[œrZ{φ)Υ'Oό{΅ρ±gvρœKΐӞγΙ„‘¦ΚΖŽxΰYΖθTΡ$qu_ΚΪΟ“Yΰx‚χ΅ς²—bή‡r–ž"ίyQ'ΞλBΫ}‘8u8΄Κ·”°P1+₯~x(ΎoŸ{©χD’Ϋƒuz_δidͺν‡νͺg χ.ηqΦnE’ϊϊσΖΑŸk>R%Ζh2z;η{-<Ά:ΐF΅ψsηο›87―`“ .lςaEd57$Κ=z₯w›χ˜PgΟ…όΖψ-jdεjϋα%ΆΛB“ΪwΑͺŠ ΒβB<ηβBΦΧq!ΌΔ1K.ΤyJ.τ΅B<ͺ£²δBŽ_Η…l+ωP”έ‰yΟ…u|蟴η譊fz.,ΓΫΕ…eD‘ΈΠη’·βB–=κρ\ΘRρaΙ…žϋ\°²e£˜,|θΈ°·|˜Εω‹΅δB0dΦηο .œ„V°Ο?•‘Βpδi#\"x˜5ήΉςΟUμ0pBΜΝx΅εs¬ώ&_Η«Ž˜Ζۍ8'œυ³―ϊΝ.Β›}}Ny)Μ9ηbx5AEεΨ—σBO9Β1{W5†8^nͺ~§{DΰΡ!πBtiμjξ―:βœ‚x9€™βgIg―,žb«άΌκœƒΝπk·]Φ’ˆz<Γpͺ›w8οg’6“³1³@wωθιˆk…Ί7ΗώNΣ<ά›εsηqΎρά[a6<άς /Α¦Κ™Gˆ³MΓΉI„η}ϋωœδΪ’OŽy:>ηB`7=ες'!­jΟ:ΰm/™χ›v* Ηϊg­ : \œŽΎζή 'Έ'DžŽ‡oΞžοŒvYγ1Gμ[ή?θΛίΧ¦š ϊMπnt"Ξχ›spuΐ\Cj±Ι, ‡80‰syŒ²Χ¨¦@\q.OΡ€Ι9ζ£>όΞΜc„ΚΨ`#FΓ‘i(3εώ1•‘)F–ς UΰuŠ2ζΛjΓΌ†EΓψΔΕc(‡ |ΥbŒN…ΚKδkΘ ŽΛ9Έ :ε†r>\Υ‚½0Χυωͺρ‚7NKΓΣ«ε²/’δσλύsσc—aν­ŒRyŽ|w_°Κ+δΗBχF©7L5욌RΌHšΚΗταμθΚΥ₯†"ΆΌL¨;‚κΦ½§ΞοƒpN₯ˆτ„NΔωή‹«.΄q-[κ£!ΞίΟΞJRΔ‡5Ε23ΦΥή°τβ½ςκ¬,G±(”cŸΧUp―+η‡Xσ\X tuXJ¨Χ₯ύˆK.–\(>τ\Hρ‘ηBή=Ο‡SγΣRΝqaoωqώρΉϊ·δB04ΔωϋΖ… ½ΞyΣIxQΡ\AB―Ζ«ͺOη)tšπ=εtrΩΣsTa6„Όž]³ͺϋTηΚ;ηχΐ;B§ θ΄ ά―η^ϊέYœγTXΒ8[xΞρšΛ»)#TΕo„봞eΒύπD`a˜*dέ qŸ7­Πp„ͺͺc„)—\ō0*%X½pΥΎe΅uBBΈk˜΄2€œγ”C 1±†χƒΝ‡ΨϋjΔήΛγ QΓ£Ξ8-α=LΚ•§ΘοΛω}a(ξΪͺZ±G™sY τct‹ιήsδ S_ΑXα2H½pQ^&οΖ&F§ή#Šc{.ŒDucB­:q—aΟόΨδiJX¦B35ί–`Γ₯R1γ'Γ΄qΎΟβ+U—Ω€G ψXˆσi…=βAֈs…)λ½δ]φ|蹐yΦ!Ψψω( ).T‘†…·ψΚκκ€?ˆWJ.Τώ%"ΠαCΈuβBΞ+>σ\(O4σpΈΈώ¨₯B•u’Ό–|؊½8ͺS@£iΤ…ώ‹[q‘Oω©ΝΒΧβPώΉΈ°;OΊηΓ’ ιΐτ\¨«L=κέž sαLΗ‡ž 3Α…ˆjΗ…žΫΞ=‡φ–³8Ÿ»K.Cg q>-paΞλΨ1φ‘ί„}γF΄*mΒΘηΒ ‘LευRTΎŽp&πs<ΰ₯wœ6nΔΉ―Φ^‚σЎΚπΜϋΒr„ΨSaο&‚q€Μή[Ds—ͺP€πyυδŸsŸŽγb !Κώxΐ›ΥΤ QO"‘ΝqŸ0‘―Όύ 6,ΟP œ;/+d;MA.lΖι —·i‰'ŒήΒ΄³PΆψo!ρ¬ΗÍW\UΥsΑ8 ±—ΐΟyΩ&ζUaέ ό`ςά†sY…τ †Œ£ŠzΊφ—ΜSŽ\^o3ά?Ο•yΔ΅Ηύ6”ΠΡ‹`ηΈOΩ¨ς:ιΊ0Ϊd¨)|Σ{ˆT^^¬ΈT΅wƒ©:΄ΎΞP­κη2Έ}˜«Ζ3Φ½ϋBHη=yΠ}A€Ί1ΠK―Q+Ο𑝠ο,s™iΓzΦι=FŠΐP覆lΛyΑ’Œ~ ήΎηGC u(Ξχ[z₯κς›Φβ˜Aq> ρa“ U ›B„~\sΦ^r!0/½€¬σ\¨Ž1~›ž %Ξε©VŠ \Pς!|!~ρβU\(^(ΉPωη%ru\¨Ϊ%ͺP]Ι…ΊΖ:.τπ\Ψ–žσ’ ΕwΊοžΌη₯]ί‰:N|ξΉ†Ό+sΠΛ¨’²Γ²τ’—|θΉnz.τiž sν ρaΙ…Μχ8Ο‘νκ¬4.μDœ―=O–\†Ν>_ˆσiΘ6DΚ3Š—Ρ ³ˆUq'¬AL‘7ςΎʈπλπfΉβ@β!ŽΧAOX<ήχžrΞΙg§―9yξtrΫΉΌέt"0/'"1Η½ τ˜"½Π"―žς‰YFa–½΄xΚ­B{“O‚Q)Š0Gΐq>φΕ;ŒΨ”HηΌ ΧFά Κ‡ΟBΣΒε…Κ­!Κ˜žΟϊ,ήΝΛNN89δyXΆ$ΜλήΛތŒ¨·cε{ŽO;7vx^Ο0qιru‘{Qā<ΰnž―Δ΄ ανE8Ο‰)Ρ΄Gμ"Φ•ŸΞqΥQ!ΛxΆz†t°η˜―Λ’”F 4έC_ˆs:&Έ.[ΧΤiXϋζR:°Zl1ϋ"!Ξ?μβΌύΈ§ ›½ϋεζ¦! γc’N!νƒW9Ϊ2ϊ0Ά0ΞΌw…„χΰ@ 1uJρ›Ρo­ χQ<*Β&―ΈΈP“κœTu?$Z«!Λ< qχ\¨%‰eq!<z.”0Wˆ½ηB ςvΉ°zΑξΉP9κ>’Θ‹ρ:.¬V­ŽΛ‚™ž =Φu\ήMΈ»/ηλsx.”§υž K>τ\(>τ\θω0 sq!Γωd|Ψfq.>4.μ-"ΞΧ™·K.!Ξ§O.D€#Ξ)τFΘΊΌηex;‘νlC˜#¨ΩŽΗϋM9똯6Ν |φΣpk* G;‘ς λFΈύοΈ/wΘ#N‡BΑ‡ DlγυEΰ!Ί΄žΞφQ•zξ ќiΔ ‚‘ψ¬ ΙΖ>μ4σ€ˆwyŠ%P›tpŒ,ΠΝƒžCέκ„ΑςξŠΟ)_=_ƒy‹%Hρzηρ+Nnμ$ήsŽEώ;ωφ@ωςκ ρohζΖs.Ξc9εά+ΧΘ³B«p ξK‘tj,σΜΕ‡ηκ ς;Qύ ~Ύ€Δ9Ώq…‘‹οΰ-…€Γ‡βε‘Γ… Ž U›Γs‘φ-ΉP%Κ£\r‘*Γ—\(aξΉ°Ž΅άŠ[yΤΥ™ΠŠ ½X/zϊΡ,Κ‚™%ΦΞ,ΊΈ°Γ²μ¬τ\θωΠs!σβΓ’ ›βάsαu»uαΓ.\ˆ@Ώ}ŸΆΉπΝ?~r4’zžσOΜΏxK.Γηq>=r!DΔ9™‚oν%Ά:4{ΐK/8Λ„Ώkϋσ(’g€άΎE#(ς€{£³€)πβ\aξJπaξΩ[nC―I˜Χ‰sφΛC»ΩσγώΉ&ϋδέθDœ±ΰΠκΨ…†Χβσs†8qήvE„Fρ#£δΌ)άNγ΅bpxοβM9ηv*€Š»,c˜ΙΐTnΈΖ&LjcΚ6 ‡F8&Ϋ7<ϊ¬ „ΔΣWvφΓΥDς©P'Τ}ώe]€²`œ'έ5€Πΰς‘†SΈ±ψP…3Vή[z‹γB~£β>ΑΌ ΉItΓYπSq‡ηBΈΟσ‘ηB fέ4ΌZΙ…*Wr‘:*K.„[ΰœ’ %Μ%ΔΕƒεΌΠ*ͺ¨UΌΒχ»γBρ`)K.ΆΛ…βΓΊh"nαsΡK>”]|θΉξz.τΓς•\θω°Ι…¬oΑ‡pλκΈ°Nœη‚p.œ]©C½ηλ&qފ Αςs†8ŸΉΖxητuΎμΪσ3ηYp΄SQAyΠ™g›·ΞΓzφκPQΥ|Ϊσέ!ΰŸuχ`Φ³ι’#ξ"4¬]፠7^'ΞU]_βΟΎ:]Έ>j.t*Ώ^hX5~αεk±υœ‹†8Ÿ^ΕωΔoœρž}ή<·YŒ&‡ΦQ ) Ήzq‚ΖdΥ[¦$2rΎΠΒ1Ζd€~κδΣ›F§ςΓε5—`§ Συ<»Λ˜½s2ne`Κ£Ν:φγLςHΛŽ‘©‘ήδ•‘±η+&ϋ‚L e—ιOΎ‚7Pe”Φ…ujΩ₯Ύš±Œy?NΊς>}Q©²R«πvU*ƒθήƒ^—‹.ƒ΄4Lρ)„]Ε“40†¨/€εσ-}Α-Ϊς~ε ΪΞsΞΨ»xˆ²1Šqšπ0u£·•8Οω”žC”φ.€½SqώΫ‘«T7ώΙZœ:jνη}ΐ‡ο9šη\οkζBΗ‡ͺ΄ν‡δw .T:ΖΒ βCq‘„7ΌΕvΈK\¨νπaΙ…βΓ’ 9xP\Θz‰ω’ 5<[Ι…κ , `ŠΓJ.€S‘z>lQ$.,ΓΫK.τ#Zˆ Ε‡u\¨αακΈPeΈ{9²…OχΉ₯.δ]z.”0z.–\(>μΒ…¬ο[‰σwq‘Wα읊συ\Ό%‚s…8ο ΫQρ^~MTmgάsΌΕ„uK€<Ϊ>΄9žn„4"/;ωβuyκ݁yνΗ±π˜s\?Ÿσ|ΥύM8β‘λαρΐ"μxΜ#‹π1 tylUŏ·ͺ‡s ‰Mrέ9‡rΨUTŽyΆ3εάz’πΝαΩV„OC‹Ρΰ ‡Μ{«yΆk?΅ςΑ9¦BΓ™8–†8S><Ο@ήhζΩ¦ϋΥΨήκ„ΰϊ%Κ}Ύ{ΦΡΩΑσD\+Š@C‘ιy Ύ'ؐv΄‘Θ :5δη‹yφ§­:5Έ>Uάo s’LœηΠ ι—€‘π]θ­Δ9Ο†ο€οJ\ο)τNΔω ‹ ―NYtD-Ά™+Δyˆσ)πœΛ[”Ε9’ό‹ž Œ RŒŒΔΒ#ΞBσUƒ1*U c‘ι:‡ž›:Ζ¦7H5n/Ζ*Ϋ™WNΊΔ²Œ9zμ‹˜gφεΈ~(!ŒRŒ9o{2ώ|εc_9^η24ex*ΐCΫZyŽΚ0O…²–iβξ‡jeͺ*t«|K‡S«ζ[yΠ1Fλl‡ y'Δ‡ž ™–|4„$όχ..4>μΒ…σψΠ‡΅g.€X<).δxp‘ψΠqaoωqΎΑΒK΄δB0bξηΣ£8GΌ Κ’ˆWΰ?xΉU•]Γ©αέFDγρΖc^Wέ½' κ iGœ―ώσ‹sΥx¦ ±π€“ŽXD4#ͺTτk~ΘΖ8Gθ">59"›‘βΈ'εN HŠ#—[Γz!8” νζ\œ‘Κ:ςΧ™r…ΎsN…ysLΔ4ZαίΦχ™@fY…Χδ…f›ΒσΉUBWΗ‚­gΒΕέ#ξYp?ςs­Sε«`χΓ½!PyN<³½/Ό3?7€xe=ΟB uWAAžηΰΪΉΞ!qNŽΝqχ<Ž|L–ΩΖ΅©cƒη‘{AL7ΕΉ αo%Ξs…z tΌθV _αύtX¨£π,όϋΑχΫ‰8?u±ε«3ϊ―P‹/Ν³XˆσιVœο±I{mϊžzΞ ΅SQš²@††( BCƒ#Ιη!JpcPJdcˆ–?φΗσσ:…nb” μ‡QI[αΛρT΅X’Οq”1 π2qLyδΩ—ύT-YšδyRή¦„ΉBDΛϋρ9 \W‰‹^ξ©ε2Μ³nŒtyΌa*£ΤW,.Γ9}ΞeY©Ψ‹tŸYV/n•s)ΓΤ¨xŒdŒb "VT4°λ—χ `ŒjΈ* Μ© Υ̞’Ϋχix‹=¨1fuB[Ζ‹‰σζΎs7ζu6Nρ"±=M;η―°juϋZ›ΧβŒ•Cœχ‰8―ΉΠ<η]ΈΠρ!λΔ…βC~CβB~Ώ³κ€z>„·ΦϊΣyM­ΊβAνΛ>πŽ8DΎ(ΉPβΎδBqrΙ…βΓ’ Ε‡u\(>τ’Ό––^¬+Ǿ쬬+η;,=ŠΫεΒV|θΊηΒξ}J‘zq^r‘ηCuTjX5ψP\H Cύ‰»p!Β| ψ°‹8/Έ0W‚‡αB‡NΔy+.+„8οqΞXήίόžq!ΒΡ‚h“PςŸ+ŽΧo7ΒZΦ§ΤcBpΎˆάΪΏύg΅Σi7ηzςΞΙυ£sr˜=λŠˆIεKkμm ‡G "ƒKUoχΓ­!€Ω—°σ &.• έυμۚ"U{φG,+χ‘Ν³cͺ‚uΓ΄SGΐFά#pΥ†φˆv€hε*Κ†ε^„5ΧΞ}–γ"Ž„rΌi―ϋΤΑ o9Ο ©ΗsθαπάΥIΑsQΈ»ΐύΠβ<}ΎΞ  Π7b2Fίƒtb#Œ]π•βœυx0B0X0d0x0„0δφ-CM†¨ΌζςααΖSDŒQyd¬Ι»Δz…½ΣVUεe‘AΚΌB8}ΘΌςΧeœ*Χ¨hΔudDϊΒLΚ›/!ôΣ^η]χ$€uy—‚ίΈΜΉΔSδ«Χε\b€zγ΄NœΧεŸ{£΄4H‰GHaνLύψΟͺH¬qΟyΗδ-¨fˆ’G †yε…‘(~džžv?Να‡π8QDιζ½ΐΨΕ@5aΞΏˆσ?ŽZ₯ΊsέOΦβΜΥ>Όaν}Ν‡οΎ‹ Š Ε‡όVψ]ρ»“·ΩwRΒMκ<τ^sΔ3\¨πs₯ζψZJωͺX¦8£δBΆ΅βBρ¬ηB ή’ •k^ζ˜—”­Έ°Ut‘Ÿz.τέs‘ςλύΘ%–‘ν%––₯'έ‹s_ Υxθ­†[z.%"ΠyΏθ˜,Ή!ώ<6Ήq>…|ˆ=σaΙ…·ξέΰCΈ±Ξ ½η.ΊxK.+Μσαη}Ν…σχJœZŒHB0!ρhήίBœγ5§€ΫB›νŸΕ5`žaΡ¦$”1ŽηqŽžv…Υiόͺ΅~σμAg,u€Έ“PD`"Uu\‘γD„)β1+!.ΟΉΔͺ<ΏLΖͺ,.Ο9ϋ κΉGu‡°Vξ΅±ςΉ6 &Ž Ψ¦pΐ΅jlvΆ© .ηeͺbw\ƒŽΛ9t^έΗ`™ΞG›Ž εZ³ŒHεΉ~χο·δηΟ=osάυΜ#¬w?χφάΡ‘k‘Hη|κxΚΖ›g;Eό¨_@'Λ'ϊWΐγΡ ΐρΈožϐgιΕΉθδΡkœφΆμΒϋŸ\©ήjΠ £Κρœ—οSχΤ‰8?cΐΘκ܁£jρ•ωϋχ(ΞΣg¦„ϋΰΦ„YnJQ΄Ω$α<ι«'\Ϋβ8O& tβόα9οΰσΪή›eΌρ›-:>^»ηο\άΘ―$―’±~θΑ*G>b 1‡‘£qΝ1π|‘£2—{rΦόύΩ[δ "Ι Ε8“©wolz"]†šχΚΠN^' ‹χήφ-+§+χΡη—Λΐφ†¨Ό\ςt•^―:C΅ ϋτ‘οΎ ’ΔΊν,=G*^ηΓ:5Φ―ΰCάΛαΥκ R/ΠKΊz«q2D5/cΤη›{£o‘ R¦ͺ„Ε„&‘&έ¬LŒΠΙΘl‹„ΝK”Γ?ρ:™Θ§¨†„y_ˆσ»ΦϋŸZœ΅z„λ+>ΎώλOwΜ‡ωκ3NšΜ…Ό«βCq!Ώρ‘ηBeσ‘μηt*\\Hρ‘Έc°Ξw^EIΘj*.DԊ·<Š=Jδj?_5]|Ψ.–ΌXΆ«γBE”…4Ε‡%ϊPχ2ε§δΒVωη­Όθ>Ό½‚{έkηeήΉηž }‡₯R&Ϊάsq!εž›\ψόq]ψ°-17~ΫF5χ’ 9ޝϋvαΒNΕy+.VqήΧ\(qŽPͺ; ‘ο.La‹°B΄Θƒλ‡C β!G˜γ%GŒ#¨)Φ¦ρΙη }ο Tsg_@Υw Ο!ΐˆxW…ˆq trΟρ€#κ—[:ΈNε_γωVή3 χ[Έ·BΚYV˜9ήfD8βU9κˆdŽΗ‹Μ΅bHEœ"bَg˜gΓ> 9Wn»„Άχ*+oύ”ΗΞσδXΜΚ©—Wœυœp-ςΰϋ€S"½³2"eΌj»/φ†ρ†±¦‚KηΗs‚¦kΒHSψ¦ 7<- gχ9εή+δC4um2uΎΰ’ΆωœϊΊ6@†΅7Tύ8οςPi€2έ{ΠΛάKyŽό0kΎš»7L}±Έ²j±χ•^£rx5 %€ͺΥ„tς+)€€pNοA—QŠΗ‘ŽΨQq8/šš‘—ΟUU/œΠ2Ν ΅mΚ[Dx(†;η»ruοF›Υβά5c(΅ΎδC σ^ρaΙ…=ρ‘γΒ’Ε…όFψύπ[οŽί€ΔΉψPΥΨU©έ vΈP'Θ;.ξT>zYψΠΘΒUbΥs‘rΘλΈPιB*φζSqJ.Ώƒ:ŽλŽΔ~ΎδB_μΒ…t9>l[ΠΑ‡u\ψψ's‘’λ­8ίh±Ε[r!9oˆσΎβB σވsr©›‚FγcΥύG•Ό„›D œQ π–γι–W›ΌsΌιl#΄‘XFX#ή)"Η:Όζ΄Θώ—δβoˆoΒΩΒO5œ±Ξηδœ+·±Œΐ”gΟ,ϋ ˆ0 ιEenVΣπYˆs ›Έ/Δ&c„(ηEψNΗόψάf狆Ϊcίί5χ€Ξή9::T0/k"ƒKΠψτSδ=ΏχΪάΉ…Έη} σIMxϋωήψ@'βόμ‘cͺ ‡«Εφ‹,َ8lΒαnyΫ„?mΞNψ¨[Ύ$aε’Ν‘ ί.ΔωƒΟΆBœχFœόΉŒNΔy.,“ ΡnΡβƒ!Α00Κ•γƒ!‚γ=²L1Š0œ0ζΌΡ%O^M%teψ)χMαοΎ=S Yε[–Ζ"ηχΒU9“ΚQΤΠF2@KŽMΧSv*xθ:ΏΞΟϋύ}aΤΠn^—\θ―Ρ‹qΟqνp‘GΙ… ρ―λ¬,ΉΠσ‘q/#ŠΚΞΚV–~,tοIχ\θzΙ…Ύ@œχšk85ψ.D€Γ…ͺΪ^ΧY©χ.\8ι’&z.€½'>¬γΒΌΎδBΔϊ x.μ-"Ξ7ξΏxK.!ΞϋVœγˆσ,Πk†’κξƒ0D¨"ήO:ΉεxΠΛtΔ3"ŒeBΥΩΞπhqΌλδŒ#ΠρΈ³ŒP§Π’Ηώa1β‘sqn–Ÿ½χ€“¬,σΆΏ}7|ϋν»Ί+λŠP1.* c ™Q’"’¬b@]AQΰQT²λ+bX1$ƒ‹HR`˜A³ Μ η;ΧΣηͺΉϋ™SέΥuͺkΊ{Ξωύξ_εS§NWύϋΎž;‘ Νsx cΥznΣΝcΏDgy=° €Μ@’MΣ€qSΏhSΉ2ήθd!ΰυ_?/ϋΗΈ.@ΪΜ,vm7•œΫ@jŒΊ³_αθ5‚mšΊΡlΰΧ3ςΞύ@,η€Ώ‹ΗΔ}ΐ-―€gŽQΘΖ€φxŸpξη0*ΟΉζy£ ‚Ή)ξ–π\LηxωΜ.ΰpŽ_ώœ7Žη;" ssΛί‹ηpόΩ d6ΔΘΉιμDΝ©·›}ΧΈc Έ/ωΣ)½ΈΞω^`–=Ψ― œŸZBψY«ΏͺΦΆyΪ³xχ-w©lΫl[Φΐω!ΩsN­σYα6ιπ.mεpίΚUͺϋ*m½…σ~ΰόΰw'›°3Jd¨ZyΧ!-:ΎηΧγ\ΠρΐΘtB­οΓΉΑρΑ9"βSΫ1;ΗE#Θ:d:©ΡρŒ‘υ™1 ή:vSލΊΰlκΘΕyΐFΛσ±?q ώ˜šοh7.£9‹=χό>ŸΤE7΅=¦ΉwkŽ=FŽ’S»Ήλ”ŽW™w-‹žΗhQ„sPΎ3\ηΞϋΝLΩΜλ-uHc€2₯`ήρΤ%;AU·ΡAgν°΄n²΄Τό¨΄Q€~Ζφ#―§ΫqΉΟdεώӬߚΐω‘―žUάΈΩ¦΅vϊ[8€¦¨yΏz˜iαDτΘ'€₯ςΰ’ίŠp΅ίΏK~ΏΉF=ŒZ(«…jeΤCυI-‰Y=±–}δZh΄9ΧΒ¨‡ωbiͺ‡uΦM γύρyuZhΦ@…fεέάs@Ο΅0‡τ:8EuZ˜7‰Λ£ηQ £F-δΊzθb₯#ΥΤC"磴°Γ¨…ΕόγΖΦCϋiτ’…μ§Β4½&8_ει]΅{ω?΅p>08/‘ΌίΘyš›  H'ŠΨΛά€―Ϋ5qiΞ8'rN38€ Θ°ynή‰sΗ"H τ€=s ¬‘ξΜ~"œ3Bηcΐ9οΗ~ΈΞΎx=·y_>ް΅α˜γΒΈLšΎ lσ"―α½IŒŽ8S™₯μΛq"°€©έ9v뷁^ΣΓM)η1/yœχΰΈpφoM9ΗΕy1»ΐHΎ™ \硜'žk„;Φ‰anΩ€ .άΗγ<_@Θ½ΞγΌγΨM‰7rξ¨=¬ιΪy>ΗΖ9Ξω›ω]ασ° ΒοΑBŠe Eϊ ̝ Oδ[Κ«μΒωβ›f/σ"΅έ…ΰά¦–4σΣΧxUqξ+_SkΫ>γΩCIk/·w–vζοράώΠΒyŸiί΅•€­α„ςOGΛF€₯/iε`n88#šQ›8Ζ ‹p³ι,σΌ=:£:…1*£/ŽνnL%Ό˜F©sg“‘θΨΕ9Γ1"δλu@£SΙ{Ϊΐ.šΗΰ₯ΟΰσΫΎ.:ͺρ˜=ή<­³Π±Ί¨‘NiιΉSZW‹žΚΗ«ι”ΖZKGEg€ΑuΜNiμV¬CšΓ9©œ€v¦zΚkχωξ ‰9Ώ€pA’‘Ρ₯»/ ͺΰ‡Η4wHq:S-%Ρ'"F·2ς»˜σ₯t?³Π›ΐω^;«ΈyσMkνΜ7·p>p8οSGiα£ΏμI#œ«‡–whόVΠCα\-δ7§ͺ5q1ΧΌ˜E#”Ηi1"α QcΤΒχQ -Ι΅Π,§ΕΟυ°N »ιbΎu0Ώ­ΦF-΄Ό‰s”O΅ΘGOšQΤ-‚ή-Š^§…ωbeŒžΧ-VΖF™Q MmΖΥBž¨σ]κηΛha₯‡£΄¨y’…άN΅ι]΄0–ŒZ ³ˆ΄°_=Ξ7yΖΣ»j!φςnα|PZΨ/˜»1$­›oΐzςΝΩߎOXpRΡrkΚ­Ϊ‰’ \€4€ „qΏπΞλ€q@γyΐ/€Ζυ5η.α€ϋ#Zψ6Rζ~φλ(7ή›Χσώμ›χΆq`*”E@Z ζEX|ΐΈΞώxxRMLύ. cBu„ 85υŽΠνHΟϋη>ΣΝ9|6ΐ–cj½ΞγΌ†θ4ŸΕΤwLMη’c²ΉξbCŒŒGγ μž3Ž›ΟeΤ0·ζά¨9—άgš;Nϊ>―ε8Ν¬ΰoοίΘγγ=θΐ8; Op^5ίί?W³ΠΉκ£Wpžf‘—Χs8_rέEΕγwΝK₯ D荞[–@½ œŸ±ζkŠσf­]kŸxζͺ½ΐωί”6―΄η…†p/͞³iΦξμρcJϋhv_„χyN η“Όε«ι8€¬΄;ͺeιΌήsGζJ‹+φΡ!γβΨœΣqtœ+‹c„3•GΌcϊzž6nτ<:¨u‘œ>š'q›λ:†:¨¦Kζ΅›±ρŽh/Qqnσ>£ίz±±žς¨QM»λζ3€cŠ{ξ”橝6Hκ–β^θέ:Έ»Hcj{LkΗρ$rŽCJϊ―i8§F‹ςKŒλ¦Έ'GT8ΗyΔe€ѝšHQrHq01Σσ>“κ'±eΫ…x3J¨Ω.DŒ?fDžKή‡ΟΔg0Ίkτ9v+·γ9ζμsSΫ…ΧΨ`MPf_4—μί¨?ŸΑFl<—σΓc Ω,p`ά' σ™Ήκsz₯F7φk«ν{Tχm‡KG©}§z|n¬7/·(νΎώ)ΫηΥs©9eή@…σ~WLCS¬”ΒFͺ,㐖ȩSΓΡte>ucΟΰ<Ž{¦p"p@Œ Ε™±:Ÿ1eΠ΄vœ§X›hϊ₯‘‘8Ϋάϋc$‹πjWγuΎyf²97z£έΎO|ϘͺnZ₯πm·δ<3Ι{ςΝγˆφΪoœU{ώΪθΉCj€Θ‘qω<τ5+­s¬FqέjΠλΫ­³ΜXgαάH¦Άγ€b^§Φ2ΒΉ¨γͺ―6ΚψYQ¬ \yζ~`œΧsά€8€Oκ= Φ₯g?ΌΈ7ύέ†k€žΫτΝΞνFމ¦›DΎcηs`›} Ξ\χΨ<_#€Νγ|¦ΨDγζΆέν1>ηΏηΐqe¦Ό»@!¬sΙyφ88.M€7ZžΧ©σΉψμDΗωœ|^Αά.τ|ΏμBοB Ο1ΨΉέν¦Ε{³0δ(5 šξοd}`σCζGΚ”΄Ι!p^•mΠόm¬LκΥ‰žS{Ξwή9ςtΫoηη³nqΑzλΧΪφΟynOp>ΣmΖ0«Ϊ:€¦° ηi՜Υπ0':₯q>:2ͺ%Έ „‘vΜͺ>ŽΞ¨)N‰Ω£CƒαθΨΧFd¦qšŠ™Χ!Ζ”n―GGUΣYΤAΔ±{υ>#Œ#7άΣ:…t_ŸΧ}{ΏŽ$`ύκƒΞN—˜N¨i<'>ΟϋfpNη±:‹ΟNuΎhλ=M{3€£S#GωόίX‡žͺλ^£ηΠσFHΒyLk Ξωα„9Κα<ο䞺/9»Ε1ύ<₯q-ͺš"₯tL’<:€ŒKñ챋;#ƒΝFeΪߜ/5‚σ½nΝβΞ­6ͺ΅σ6^oΒiνεvRio΅C•Σϊ§Φ!­ΧBυO8Z؁σ …QΤC΅οz΅L~όFόύΔK£²ρšs‘:Χ¨ΊλFΦw_gνwv΄ ύ¨Σ˜23ƒ|¨…κaUξ7¬›Ž₯‡qΏš  ωSΥΒΨ¨.–+εβςχΊŒ’|±2οβα<¦·«‡c₯΅ΗώΞΥΒ¨‡1rŽͺ‡Λh‘z΅¨yΠΓQZΘw]λU ω ‡A ϋΥCΰόνΟ^Ή«bk¬τδ’ΥΒIς ―>?Α9 ’κkǁs’εŽς²ω•€Eh$(  πΊEΝ£ύΓz;%ΠΊ€m€Ψ’Ή8*ΉΟθ―΅Η€΅l SPΐΗTz ™ΧΠhŽΫΐΏ μΓ{»Ί{›c‹ιρ3ϋŒ)ϊμH§³<ϋδ}y ΛB£ΔHi7Šlτάyδœ7·ήάϊo’€/Ÿ ·‘ DνIΡΈωά¦Ψ;?žγα|ιχΌ™~Ολ¬γη’σμ~ψ;©ΖŒͺξΒ;Η%œY̝Cn'xΑœΟ—GΜ ηψ8ξγΌΨΕ ;ξsN97<ίΕ »¦ι|W1`8’iGχφη€―ΫΰΠίA/Νy x;φζsΣΘω―ί°~qα›^_kŸzώσZ8ŸqpN­)lόS₯ϋjU–jΡψgΞ%΅΅U%«ς88ž8: N°e£/ œgχκΤθΰ™ΥιΡΕiΒΉρΣΑΣΉŒ·qzΗȎ―KP^:v―ωΚ™Ι!Εργ5B4¦Γ—Χ~Ηhx”³―΄ΟŒχΡqŒ2ξΟ^[ϋΌψάΚaεXσΊ1QkR…τ88zlςΤkνetH£SΪ­9\]Ν9Ρ"SΪσšσάΕ ε»ζuΎs±ζœΗx]έSμ'ΞωΞβ,R\₯!Sl΄βy@:Ώfτ–ΦsΊ3Ρ":³?’Gόj#8ρλΧ,ξϊΐF΅vή¦ Ξ/«#gMsŽ[J{ridέί:€υZΨYœ€qV…κaηh!ίAG\ρ]Tωώͺ‡Q ω=EεχbΖP¬iφΊΏ΅η1-<ΧC―G=ŒZθB#―λha₯)άΗkάo„ήXϋ£ΰ±n<.D ΦQΧΤΦΞcΉ&Φέ7ŽFXw±^yΪlͺ λQ ΗΛ(Βςl’Ί^upž§΅G-Œ%>jaLi=8ψžaFΡ£F=δωiΖ9Zˆ=μh!ΐnVH…ΤΓ€§θ!ZhzŸz˜ΰ|Υ•»j!œ£½θa«…}ΐy5"J(I³ΠošLH!u$°cTΐ²{8(η€’ Ϋ(ΐΊ—YεDΟZ@X7­έ5€A™K’ΑDΉ6y)₯³οΕ~Œš[Ψνf!ΐυ>ž ˆsΙ{ω‚}n¦ΛσZ@Ψτx Φ†kkIζ¬o`xεNνŽFΜ#œσΩe3x>Q{ŽΡšwsL~6Ξ1Ο³ζ>v½η˜9Ώ|N.xΟιο±|@3Ί£θǚƒ9 ±Λ<ί%³„rΑœΛψ=Ξ‰ΐσωΝ`ߜ+꽉X›΅Α}μhΚwšΆιΐΉΝΰh΅ΞΉμ4€γϋ_ZOύlJΈ'zΞ£X·&φ§ω>>‡λ<ΟΗΉξλrσ1^MθΞiL…q–9_.RΔ,‚X/Ϊ-šA=»ΤZ³ξ§4œΧ₯rΦ9£ωlίΤ˜:bωMzŸύΞ₯…jΧ=Ζ¨έQ }~ϊXFPΥΓΈΈκMKžb&AΤΒΊ”χΊιXžζŽΖ9πqšE·“θaΤB£ζvξpF-Dλ’ςέ³Y¦z΅Η:z˜i‘zΨT ™mžτ0haΏzœΏγ9+wΥBμ[δΌΥΒM3ο»=Α7ήˆ*rI­nμΚnƒ-G†Ω€ #Š ΐrΐ1 hέ6π0UΗtΐW7›θ.Iσ6λΤΩ— <³_^ t–€=―:ΉΝλΩ?Οε6―wqq€Χqά?ΐΚ~yœηΫΤ›qάqtηΒzoΧϊmGͺ¬Φ”ΗωβF—γx2 ΧΙ€±γγx@œc#šΟ’€οY—΅ΐω‹‘~ώ><³Yη“s ³β‚I,Φωϋ›oWzΑ<6γσ8β  h…jΝ1uΞy3ΥٟΝί86—γœY›Nύ· o4›ΗΈ  Q€έλԁΊΎ όN؟£ΥšFΞϋΆ7ΏΫψΝ΅Άγ‹ŸίΒωŒ…sλН>}˜5 fήYAΕ/Sͺ©Ε8@ΉγΡpFq4„s#8&81Ž!σ‡'¦F'ΘΗq–p¦p…Τ‘N œ9«8iF·;κ5 fwUΗΩγ:–ΥΚΉŽirχYšzΞkς7”λλ1NŸ#lΗηF–;©9¨ηŸέΟ*œλ”ڐ)މΛG Ε(’c†κ=FΡλ"θΞλκΞcCΈ˜Βi—κ˜nδ<ΦWΖyΎ:‘8 άg*;iΕ6„γ>φΕ>‰"ь‹ο,‘£ΤM›9ηD‡ΊŒκΛ!₯£1)ΟDž¦΅ρ–WχύΗΫjν‚w­Σœ—Ϋί22£΄Ο5#1ΪTΞq\ˆϊ©‡™ŽΓJ ωž©‡~ΡBΎίκ‘‹Sh!‘U~C–ζŽ$3j« ι\Z˜k‚Zθb’ϊ \G=Μ΅γ6:€Φ5gλD·+(Vη°¨ƒci!ΧΡΒ: ¬ΣΏΊΛe΄pΏ₯Qτ˜Q`cOυ/φ+ΙGp:έB@G c-Ίz΅0FΡs-΄@Œœ«‡|’Ζ±’Q #œG-Œz΅λκaΤBφ‘&-$rX£6IkίμΉ+wΥB¬8o΅°ΩF,šc=T9€Nd1Υα–7ΰ ΠrLFΊΆΝΨ„O»—€¬Η‚[ ΨH΅άe@@Μ£Υ@&ΟЁkΣΧςΎ<&δ ξμ ˆΝߟύσ~€¨`Ξ>s Λ8Ž˜j Έμλ¦ΆΗQdžK('jn=zLo·ξœsΝω΅ωη˜sΔgΒx?`™γˆc™ ό β9δs˜ o}ΊΝψb½Ώcγœ™NΎοΜ °Ύψ’e –ήΝά€v’ι|~»ΥsŒœ{MwΪyœh<`Ψ λŽΏ#šNΪϋUw>˜Œώ Ψ ~;μ‡c'‚ήΞ/~ϋ[ŠΛίωΦZϋΜκ/lα|¦ΒyJgΏι ’Xπ‹’X|ζHΧaRκioώΟeΐzJQλδLί4…§gGη&Ž―Αιc΄Β‘_9ΐΥ8MΕΤ)Σq΍ΰ$G³νΡv8Δ7ξδŽΓχ¦ΟŸ2ͺ–Ρ}κPςzP_»ΑΞΏL—XΌγΆΖΎγνκΡ‘ννu£#ff ΰ˜ζγάςFzBz5ͺ‹ ηβ,?ˆ«Ί5…‹s}m~„Cη›ΗHy:»5ζFΛuNΉO8§31υΏ8₯ΌΖLφ‘šs‘lαΥ1΅½ͺ΅μ[€λ½#)ΡΤg)*&p~δ[_Qόεγo­΅ ΆX§—†ptΫόiiίΜξ?(k‚t`λφ ‡ΉζzΘΒOωύR£9ϡОh› Ξ%Ώ9~{y_~ƒόφψ ς<΅Π…Bu@0ΛG=ΨG•ψTϊ‘’O.XςϊδgΤΒ¨― ΰ+-δυci!Χγs£vΣB-ގZ˜ΓΉŸ'–ϋΨ/jažξ+’ΗΕJ]=T σ.ξQ cj{μΨnZ;z΅0)i7v§V ‡DΝΥΒ\ΥB§ώ܎νQ ω’‡£΄tχL9€₯F-μWœ?οi]΅{εSŸT΄Z8ωZΘΨ({it…1ΧάτφΨ4ŽρQˆ΅ΒΤ`FI&GxqIP΄xŒλh¦d†FqΉν82η |ς|‘ήτm.s£αάΌΙ:8χ½Qΐžύψ~ݎΉ.ϊοg1JΟΎŒB°v[ΰ |sΎ8Ÿ¦~»BδΩ9⼎rGΗ±?Sόy_ΞŸhζ=λ>ηX›Ωq°ΨH8§Φχvd‹œΣΈ@Χ½mΗ“ŠM·ϋEr»ν‹ϋE Ηαάh‡‹·?!ϋα6φŽm~žξΣ1ε}’γ―σޚŽjt\}ί±έuJmβ„sjΝ©ΝςςξΠλœ|pŒžλζ#…bέy§†3jWbQΝ”KQλ+M_7ΣΡi±Ω‘ί7,©F4'”χ#ΕwΡθgrJ1nΣ‰8k„Τ¨f™šσΩ{5‚σ£6\£xΰΤΪ…[έ œ―_ZQ΅˜]γ3ώ₯΄sͺρA\Τ:€=θaΤΒτ0~7cγB΅ίŽ₯;όΖbcF*σ&Žά―F-L‘μύ–B³ ˆ±±š:ΑmΛtΈŽ–ψπpjˆ”A€υrέh(K κHΰ Ž ψ‘ΐ0mƒ5.Dΰψcσ5 ˜dΏΦˆsˆ7°mgwΛ ’Όο tςμΗύX ½―εύ=a—}σήΦfa6 „•‚9 Ξω‹t ψu„ϋ³„€χ0jΞηf‘a’ŸƒΏ ϋqœ#ξΨ—¦η“βnF€ΐξη"ΊΟqrμ,ΰΨΕ3΅œq{|dGœq?p @ρœΐΫ(< €χ;’σ€;ΗYΨΔ­gη:sŒθ½S€i²D„sΊΉ7Ωμ ΟwΏ œ~Λ·sή·q­νόŠ·p>£αάΞ֌[‘Άg΄J{›θ ‘J‡CbΚ&ΞNŒΠrιή8·œKžgW¨η9F‹"4Ϋ|­Σι78jΦ_F`Oχ—―ΫμcΗ'Gη˜Ζ‰Δω³έ(ytu:y>Žηζ=.ν‡λ\rγ6ΐρ|^«ω~ξKέάλ’IΦcj»sάc§y;.G‡4z]ZglŽ;Ή1ͺk#E1•G”Θ!"N¨υ•ΐΈ5”:œŽ Καά¦GΡΕ15γ;gz;·νΐ>:΅ηχŽ8§iΌχΡK!tlοΫyωαVΕ’3Άo#‘Z<χμdc/Ÿτa  Ș*£ίv6η2F{‰δΈDhΑΊs<‡¨Ό ΝKž8γ€,ο ,’άgDΠdί€tŸ<ΞΎϊs'6[γXΈnD¨Ά mԜH00  ζŽY8SΗͺQ6 ³Ž8'…`¦”€†qΌO~ž'²°  Ϋٝsγω΅‹Ύς8"ζ,Βζ/{’ε9Ω|―ͺΖξ±ΈΓwνρKOωύι‚κ:Οwa‡ˆΉϋδzl2'œΗ¦„œC \0Ξ‰žs‹H. ­§›;`ήΞΞΏ?νƒΕͺ&p~Ω{7,ζΎ“Zϋμ+_ΒωŒ‡sF«ΰ€R_I3$χηDpL€:Αά¨„γmb“kfπ\Sέy=―1%GΡτIL§,Φ_Š΅‘¦qεkpqϊŒ’γβψΉΑκXŒž›’¦μ’jΡ‹Z˜šf:j-”ςΛ\’]hά>ΆΨςύG'M¬ΣBŸ3\ Œz-λ΄0.ΔR NyΣ>KΗXͺ…ŽΤT τΨ“Γιja,χQ ]ΙK}ΤΓ¨…du›qF-D£F-Μ#η.fͺ‡j‘pF-s-€lC=œ -μWœΏΰi]΅[σi-œΞSΗκy—‹.?-Α9π„υηD(,»Έx@ šQ³#;ΐ—Χ₯ŒFrΉ¬«£ˆ1ΰšύ9ga¨tŽ:·…tλΓm"稴&pΞλΩ—γΘ¬_r],pv8‘fS²‰ŽζΤGη\rώ€PΑάΘ9©έ1ŸΓΞκŽ;£œc˜hJ{ώχˆυψ .0πΉψ[pžΉ 8~λΎsJ ˆŒ/8ξ€βžoμT<~ΧΌΤ€fjœόνdn€»έρiφAΪ:QrSםoΩ„Ν 97ΐ; œ/Ηόq )σ|'M‘gρˆΫ,PΡt#zτ7σΩΪ€ψγGήQkŸ{ΥΏ΅p>Σαά&HιŸt΅ͺžΰJh_f{hnΊm88'82:;1B„#„γδ0Σ°ςβ0ρ<£Ή8€i PΞΦ/β΄uœΝ*½.ͺbͺ&)―Γy4Š£ΓΗkpFc »"N#P#ŠmυοG%@χ6Ζ~sψŽQφΈ/,:¦ΡA©¦1Š^—β©SgΗqKvmξ=―k§3φΕΡBΊΡσ˜ΒŽ#j€ˆθυδΦγ’²Α<•ο–£]―΅ΞνDΜuSέγύ8€¦·/yβ¬t¨₯Ž0N0F4*u/^π‹QΈΡ©œΏ£„σ6¨΅K>ΨFΞ‡‡Q i–YiήDυο»pΞoΛEJ,€ΰe,Iρ7Νο5j!·MmW 5υ06TSλ΄ΝQ›lΦ¦€θY Ή†δ˜Z¨rŸϊ£ζ.nϊϋ3β+}~ΤΒΠ󛦸«…ώ±ΫΌρLy―‹žϋ·Θ#θqzΤBυ0j!‘sυ0j!΅ζκa…QΗΣBα<Χ˜ΦΞ>’ͺ‡Q ΡFυ0ΧΒ¦υηhaΏz˜ΰό…Oλͺ…X ηΓΣBΖͺQC›"šŸP,ΎκΌβ± ŽMφθ‡-ϋ·ύ)ΙΊmDA‰ϊ“ΦI{ytά΄v’ΧέΖ9β¬ήc36φM9°@ς~F–MŽ3²mχx lΡ&pΞ~œΧΞ{9Œ…;œs>Lm,;R ΈN‰œΫΡ=֝Γ+Ÿύs ψΥϋ…sŒθ9ϋaΏ¦ϋΫΞwœo 8g‘ψ%=ζ‚@/QjΌYπΉcίν‹[φψ‘ ΛOKί§ό;΄·ΐ9Ρn²pΣΦωάΞŃtΗq³©ž™Dρ}›Αέ¦ƒθ<οCͺ;π>@ησ7σ+?ςφβꏽ³Φvyυκ-œΟd8O)Ng鄦΄·ϋ«t`’ι}ΐ9+ώ8%8/ŸQQ;‹›r¨σS±q˜x ϋ°΄v#Ϋ8k±YœxŽΝˆxιξN'Ξηή}dρΑw‘Œλ8¦˜N)NkŒqŸ2νΣtИšG–ς:υ=ς³ΫΐΙΞΚvfŽ€Ξ9νΕ)ΝΛ Lι΄ζ2―FĈH‘5•4Γ%R„3Šj³7#ή΅)@U„'¦­ΗTNOγ:Ξh¬7' D€ˆΫ8£ΜψΥΡ%:„#Μ%ϋJ-Ίvߎ3κH‘~G 5σc7_³Xπω k포ΫΒωυp”η}κ!Z€™E$τρ;‹ε<Ξ#GΥΒ¨‡j‘pΞcv\ ]ΔΛ»Έ;6M€ο”ώ”Ο˜s-4Ί\=D§\ T ΥΓθhaΤΓ¨…Ό§ •Ήζ™GΠ£ΖΛ\ γ8LΗcΖl’AZSάγ,tυΠF~Ξ-υq¬šz΅λκ‘Ψ±nZ(|«‡u‘sτM-DΧΤC΄pΡ’Σ;Χ“ή-9»“2οόtυ0ΧΒ:=μgλΞίυ’•»j!ΆζΚOnα|ˆZHz1‘σΗ.<>₯ ζύΐ9iΚ4ε–€O€μ―&œχ Ψ€ζΌ[΅5λ€% ”άOΔΐ΄!ΝΛ€pίΧΖs@zέψ±‰ŸΟw`Φt~  Ά>ΐfαΒΡc@&PIj6`ic3’ΡΞ<Χψ ΐ=ΡsΰΘΙ*ΰψ9O½v˜―;Ο,\π9ψ{qΜfπ>±ΞσFτžσΙίϊΟUύ8#ψXœΆσ‡φΥ9_pόAΕ#'\ŸΉqγ©ηiοΜ(‘s;Υ{E Ξƒ €΅sΙσMΗχΞ(:N-; tƒ§¦=vvΜ…sRέω σ«ΙCτ /όΓ6›ΧnχξZϋΒΪ/kα|FGΞIg»ϋΠτ<[!Š^©œiήoi=―•Nέh‰"8&Θ&p8>ΒΉQήιγpp xu–:€81ε;ΒΉ‘"ξ·y—ΞήΕIΈΉtM‡)›:Ž:’8Ÿ[Ώσ§ΛXλ\bΌN˜η’ύβθšaήΛι1Ν3FΜπΊ5€q±Χγt=v-ΞΔ9·8ζ)―AΗ)εο¬Cʘ 8"ˆzGλ'MMΡM ‡UΦSF87JCͺƒ ”!ςzκΤΞwΈΌδ>φέΉπ’tƒή tαΎxΧ§4ƒsΣ3λR7³u2Ίcα|‹Ξwέ¨Φ.ύυZ8²ŽB€3=œ¨(,6ΊŒZh:{Τú졐K~Ηάοbe‡jagΜX₯‡ΉΖ†n£’ΠUI輻FΛΡ΅\ ΥΎρ΄½‹©ρΉζZ{wD-τx…s›zF-t ‡Zg‘Υ,3FΞΥΓXκΗΖθ9kτ0ΧΒ¨‡Q £ͺ… t*=ŒZαx_*ίΐΘΐΓωv[Χ}jΛZϋΒ:k΄p>γ#ηχTιmΤX’nZ'Ρ’ ΒΉι{Dp^phbΔΓΑΔ!uy„JqέH:†Sεά]£A:jΦa9·+;Ž'Ξ›°.τΖh’‹w‘œHPχc8ž¦u ό¦„Ζ릋Ζζr:§|VΣVMۏ#Žβ,bR;¬ΉΤ)Ν»ΆΗ‘vFΛcΔHHΠuJMη΄ή’΄N"FŽ“Š΅εΡ±4-S‡TgS@Οαάηz[g3:œiΖyιŒν€ RŠ|2·Ίšwh%c\ίyΎίΧ|-M,π7nΟήkΔ1₯ΙΡIKh²8ošΦ~μ–³ŠGφά€Φ~·νλZ8vδφΩ”Ϊ³Γ(|Q cτ=.Zι·Yθ¨I•ΖEKΟ%z3‰„sK|’’sκ^ΤBo›QέΞνΉF=T £ΆE-TΫμ‘»²Η΄φ؏ΓΧ i9ΰ]ZG£’ƒQ£²`Yκ_­b™ζpŽ6‚σ{zW-ΔΦ\₯…σaj!‘ς1?葨ω©ίMυΒ)β9A8g#u˜H§sΉνΪόΚ€δD#Ό6‰ΓκH΅Γ;FtΧΤy –KΩI;ηx8ξk{SXϊωό¦΅³€ΆS;Ξ’‘pΐ“Ίm€ΡšmΫΐ$©ή'@ Τ;NΝΤ|›ηρΉI;ΟΟ/η‚c²+=η‚… ›θ±€ΒΞ"Πk}<ΖύΌ'ΐΈσ·Μ9`Ω™γ€πMFFg8Ώύš‘ρ}%Ψ,N8OΣ*#σ‚”tΏ9žΟoκ?‹˜cΦ8VgΖ›ςn*<ukΡmηΜuη‘“ήΞ¨5αœxΐ£ι‹ΤΝwƒs^ΣΞΈΓΏ7μ΄U­νΊώ+[8Ÿ©pώĜ/-ύ‡Μ856™y^՜7έ7’¬F‹ŒH˜Ζι0R‡ΚΡ8BΊ ΟσqLqΟ»ωΖy½8o6ΓςzG£Ψ¦tςά)+ZޏA2j#IρΊ©‘FχγΘ6Ž[8σή;)¬Α!Υpΰc*§‹#6ά3:€Σ |Ϋi?FΟσ΄N£ηv*vŒBYƒsοM‘Τ™δ:©š€ός—8¦­“Љ£iWbPλΚΉŒMΰLiΗαΔΥ ΕpJqPΛΗ„ωπτV rt#υθDΦ«¨zŠ epΎδΤO€yΨKNΫXςσ4ͺ±μΐω{K8¦΅φ»OΎΎ…σaι!Ν1ΡCΒ©…DΞ ‡ό&7@Ξϊq.-υAγRo*ϊ›ΐΊΊ£Ώ‚₯Ιc6Qœς &ΔR Ÿ;««‡±ΤΗfqFΙ΅Ai‘z(ψΧiaμλa*|…yΦΤ(-άoi½3>*χ±i\,ιΙ'£ ιΉ ηja>ΕB-$‚šNŽ‘ƒκaΤBυM­σω6}3kΘΗΥGΑ©… Κ‹j12j!ίλJ;ΟU «Hz…d”$=Μ΄π‰KwΡΓ …<Φ7œ―ώτZˆ­ΉΚ?΅p>-\xΦα Ζˆ³oŠ”/<η'#·Kk²t€)iwq­ŸŽβ6,#u<¦Ηk€&0 <:›lή &2ΜλXφ1@ ΎΦjσΩmHΰΪ±]&βKΤΔΙ:Fάvη{ΗΉη΅5ξ\ΤI£Π]tΰ˜¨λη˜<6ycΑΠpΩ?‹.PΛ{~Οί‘(?)μ7Qun³ΐΰg0εv‰€§‘|₯ΡΗ¨%’ώ肇ψ³ΤS>Ώ‚s 8ΩΓσSν:ΗC$;o,RζZ¨Ϊ‘]-ΐσ…Jυ0jaμά΅šsτΠήj‘pͺ…N¬ˆ.£:ΟΟϊt_ΓmΟ1οΕ{ΪŽsiδœKD„o“’ΟmŒE‹ΗμJsu8ΏnΧ§φuφήόκ^ΰ|Ξ·wΗ²ηZΪVα6QφUƁσψœUΈέFΞ퐒ΎωDυO§”TΆn‰ϋ€sœ"v޳Έcwρ˜Κ<₯½ο·΄Ι·γsQSΎ άΞΆvΡ¨yLγ΄;0χO&œGgΣΞ±ήΛ.π. ψ™μPμμb;3»pΑλ)ν@ŒαPΖ1u€cβ„Ζ(sώf: Ξ=―›χkΝΉi팍"…cΓΡ$bDS€ΛJ‡γΆ¦“JDΙtΰάτv€<ŽJ‹©ιšΛ4?Ζc„¨Š %§΄rP“3Š“Ιw½Šώΰ°βδ¦+’ι8 Τη―‘ώ˜Τg Η΄|}#8Θk‹…_W­]φΉ·΄‘σaλaΤBώζΠC"ηθ!Ώ/~gŽϋβ7šλažύ2J +`Ξ㧝ڣͺh0«–Έθ^ΤB.ΡΜΙ†σ‰h‘z΅PΝζsζZ˜&oTzΏ±©‡j‘₯=lθaΤBξΟυ0šYFΌ.6…Cs-T»i!π ζ–ϋ¨…ΒΉχ©…iq½ΓQZψΔΉKυΠ”v¬ΤΉΤπ=,ur”VΝα–ΡB4²ΤΓZ-T+-μWœ―ρŒZˆ­υ¬n#ηCBΨ©7g9 Όˆ˜ω]’5Ω##³Φ¬ΐ!‘λ~»ŠσZΰžΘpρζ6χμDŒI€€4="ΪξΗλƒHk'ΨeŸD―`g›ΈŽγά•€#` @Ζ>ΰ΄o l­;*±θD†©©Ζ؈&σlDΕ­Γgΐ¨9 DΣω»π<Ξq™6˜;ϊΝΟΑsΈψ΅ ΗΑ{}¦Φ<Φ™•ζψx©θFΐωΗnνByξŒΖ&HΌή”v"E€obΡ₯K1©‹qJΉ/Β9‘"‘ά‘©œ1]Σ1A Ο)›tΧΦA5:k,)Ϋΐ%*ŠCŠΓ‰cIΚ¦₯S ͺ}$gG•Χ±Ώͺ†=E’x¦SZ:­ΰόγλόξZ»|Χ Z8ΆF-BΤΓ>£ηh!‹Wόf,σQ 1ΐ‘5Λuiν63S/ΠQΩG!›ΘQiφΡΰΊϊhŽΊ#μ\W3'{‘R-΄Ζ|ΌηηZˆf«‡j‘‹q±²Ns-dCΟ썒O―¨‹š£‡Q cCΈ\ #œGSΡB"ηj‘z΅0ΧÎ!'Σ#j!·ΥC΅ ΞΉžk‘ ΅ϋ«Hy/ZΨ―&8ε3»j!ΆΦ³[8jCΈ OιΗ€΄ε»»w΄c²»θoΔΐItΥTv"Θΐ׍\OF#6η¦ΗΩγΐ&pˆ’šmc3γΒHϋΔϋωΌ§‹DκESΐ= PE›’-w.7dLX&L #βΜf'sαœηčΏ€Ž±‘₯Οχ~Ϋ™‘°;Νf~bk½‰pλΌ―ιμΐ9ϋ±ΎœKΗ¬ μ§Ζyδ˜ΩΖ{˜’„1η³`€²sόœk_CC9φΓ₯X8ΏyŸOv~oΉ}q“u'=­={ή—KΫ₯Mk2œ'gtΡHηΦΤ©ΥUuώ‘σOz‚NŠγ‚lό96HgΤQ_£šΓtv§™‘Ιγθ0Ν&HΞρ΅ΩN›NŸ]MƒΤω΄©i“ΒΜθdφ’ϊΗ qμΞ$Ξαάμαœσ<ήΖ4"Cv+ΦςB1gGθΚOΏε'Ιp2­―ΔΕ9₯ζ’™Ώš)αΙΪJ8Mmw4ЈσYt"B£’BΦ–k6«RΩ;QrS0ςΨ‘ΫωΎ8”4£f ηΉ\]²»1ϋΡ‘%Jd4‰1C}‘œΏέϊQDΉ]ώΕ [8_£ΤΤB#—QϋΡB΅β(Iϋp˜ήŽΊ°Ζe/i“·NγΗ¬Ζάλκ‚M0­+W1kΆΡ>g:ͺΡq’Όnz¨šΪ>Q-΄~>ΧBυs(œ›Φ>ήf֐:›eͺ…fq_ΤBRΩ£Z_ŽΦ©‡κ Ίˆ‘‡DΠ©='jͺ‡Ήͺ‡£΄P0½6ΤDυ0j!P­ζZhγ·\ Ν"Κ΅Π¨yΠΒ~υ0ΑωZΟκͺ…ΨZ«Άp>L-|䀃Sc8ηRSonτόžoμ”l’pŒ©1ΘrΏΝΰš‘q»Έ’Τ™s<, 2‚Οg#B/€θ€"€ΘΕΊk@“¨-pˆ©rꯝ» ¬AηυœW^η|ξΈύΌ/)θΞΗ-Ε*!Σζΐ7ΰΝ~ˆ8ό€0mΊ;πΛqPSn7φ%Χ]”,]Ώώ’tάΐ9QeŽ›ύΟ@?Ηη6ŽqρM³ΔΣ}^΅$θŽ,cΪYΔ ;€ŒG­q<.>p|Ξ>7Sηzή0>/°m}Ώ5δ«eΌ7ΖΒχσ|"νf-Ψ1žλ€9 +MΰόΦwθόΞrΫλνλχηSΪΌžΒ½4{Ξ¦YCΈK«ϋwiO Χ/€σ{uϋ ¬!܁-œΜYν.Ρ²O­›c U`ޏCJτΐ†G¦±γ€Νΐ Ձ2z» Ν1ςm€ΫfEq>―3Ν­·ϋ/¦Žœΰpθz‰4Υuζ}mnd‡δ^;³?ŒΧtۜyŒ³Nš,6‘Σ7γ,_£ξ1RDM₯άΫ ŽξΔΤΦ⠞zσˆ“Κœ_ŒλgίφγŽ3 ˜;Ώ— ηΣ(‘MŽj7#7•“Ψ±8&(FΝuHu"xγ„ςέΗ ½xΧ₯ιV³σΘc,XΩ彊<%‡G–ύΈ"ξ Β©ΞπΎZ»όΛ΅p>d=ŒZΘB“zΨoΧv΄ί•z¨Ζ±hXL]‘pυ0j‘ϊθΌrSΤΉίΕG'R¨GQ }z2Œ>έ&S4ΥΒ±τΠqύj‘˜kaŒœG-δ΅κa…X….V’‡Q ωώ©‡=kaΤΓ¨…aqq™…Κ¨cA ΥΓ¨…Ι¨ΣBτ”ύ-μWœΟzvW-ΔΦzN ηΓΒGNψF±θχ§tšΒκΐ9`  cΩΰ‰h"ζD—σρiύ֜gD―ν ”αΌ·ωθƒ2šΣη‹DΗ9ΐ&|ήΤ2s·p€œλŒι’φ:ΝΟ.a•]άΗc@}”Ǎ4z²HS·ζΌ§]%dΪμE£ζ?0 άΪɝγ ;ΖgaaAΨε1ž#Τ›†Ξ~؟m€jΝ±Εσ.+ίxEϊμ@8έ±ΊΝζlηΠHΆsλωmH[`*»%œ{ήλT–ΗΓη²φΨ9ο.π~ξΛnϊMΰόΆά±ΈχΰΟΦΪ—6{]―£ΤθΖ~mΥ΅}κΎν°bι(΅οTΟ υζ«U0]εk«Ηώ₯JΏΊ\©…σ†[šΫ\ZrDgο5ςOΉ#…Κ ŸύIΗ)0 ΈŸHD…p@uFuΦV!κŒ@λͺlp-ΠΥΓ\ ΥC{o θ zθ ΄ύ‹zˆΖL"g «…ύΒyΉ\ £ͺ…άgO΅0ι_’«θaΠΒFpώκU»j!ΆΦσZ8œs(§[;cΤΈšs‘œqN·ξυρ ½QGΰp£¦™ˆ.©ν,©δԝη)εDŸ‡™ξήο87ΐŸ:Γw‹ή='κLΔάΉΨσͺšrΎSt˜jΐψΓσΣεγwίΨΉtnκ° ?4΅ύξ*έ½n³ΆŸT}κΗ'9g#  η4ΜsF<0mgy"ΰΦΞΌDΣν―9ǝΌ―7Šn9ϋpIλ‚ϋ…sΞ')ι·iκϋ₯Ά\γ½€sξ€Y0αoΠmcρ„γbA…η[γΟ{aŽ`c|ΟY8iηw}ϋσΕύ‡ξ^k{oρ¦ΞgtZ;pN”zέj4©Qσ~ᜍhjΝσX[ξX΄΍όΪδΝ(NŽ˜Ξ‘hαά.νΓv:{qJu¦fυy"}“Χv‹ζž ϋ‰n8Žΐ7)—1΅Σj€Ή 0gϋF‡Ηg”ˆQtλ1Ήzσ8>ˆZK!Ωπ­o8ΗL_Χρ$ΚνcF»qF‰v—ΦΪΞϋΜHύe¬ΟΔΑeζ9#«ζ›?qιξɚΐω/vyc±ψ˜­kmφAοhα|Θz΅0W›h!Tυ0ο³υP8'kο Λwμ¦Ξ₯ΡηΨ7Γμ\G3κκΖM _:©ΊΠ0-즇iΌδξΏj€…hšzέ "ξΟ΅P=ŒpξΒ€z¨² F-ϋΦC-vR·ΗΗbφO―Z(ά‡ZυNz;-μWœΏφ9]΅›΅ΪΏ΄p>$-$JN8:ΆΣξαŸ}5₯Σ)ϊζέ>œ³q$ΒJτ’4f"豐ΑξΜ@7ύ4Ρφ&ιο“•:ίKΧv’Πv#τˆΤ’Ύ¨•@8υΦ‹o™“jΉN½6 œsγw^Ÿ θ:υΟγΑ9©ό, ωΖ&²q\DΎrκςΩ τ¨?£Μ€q>Qj`ΗX„‘Ž8·ι€ŒSO*ΈuίΞk5χFΡϋ…sΝiμ†ρY8η\'ύœο!QnλΓY ΈσξοeΗvkΰsΑžΫvvw~;οΣΞο)!όΑΓΏXk_Ως--œΟh8'›_-<%­βӐΖ¨΄Έ n8(4Σ!z“G£9ΛΧΡ7idPΥΠ(6+rόγΠβl^!ήHQ]ͺ氚½u3Η5iš„:V€H‡—ZΥ‰ΤXƍΪG"A@SI²»1NitDIk§ή07rŽ3 ”`8¨DŽˆα”ΖšK%9*mbήCHΩ4Κ]ΝOίmέn8£]R8G9€'o“~1CF’°θŒ–ϋXςΫ]’5‚σΟΏ©X|ά‡kmφΧ7kα|Ψz΄Tcυ°‰²ρAΥA-c‘ ξδαάR*]¬ΞγmτΣ:ςn½/–·rœύθαD΄!όoi’…h˜z΅P=d‘2jaΤΓ¨…θ`ΤC΅ΠΘyl–‰ήΉΰϋΝτP-tQ2.VΖ™δθa―ZXν{-δ= ZΨ―&8_ϋ9]΅kα|xZHκ,@­9Ά‰€“ΐcz? r4JsΖ9έ́πΨP§FάΗy XŸθ˜3ήΓξιŽmf:’p ΜrDb=€‘FΝ6MΣR3΅ΚΙ\H $>šFŠΡpKλ»}]έLζ|^²€bl"`ŽΥωτΐ9cΦ?gέρi@8Qv@œΗˆ”ηDΨi*ǘ:ώvDα…s άFxDΞ‰ΊΗΞιd`Ω€fSλΙR`f*ΈΰψΗY怬ηuζu™7Tσ|χΑϋΩC8w1 iZϋ½‡νQΜΙ—jν«οέ …σ™ηιoεŒ52½.E‹l6³θτ οgΗ†ξΔ:’šPnχaΗ‘Κ™Ο!Χ³γΊ€ξ( wKμu†ξ°"υb±V~¬σΥ™^•τ³αΐq>rΡά 5ΣzskΞqJqHq<©©$}Σ”N ηT§”ˆQJlBu•1’ιό^kΛq<ν¨Žσ¨3JκeiγξŸηΔΐ1UΤTΠͺy“Αωno)–œπ΅vε7ίΥΒω°υ0h!υ睋Zυ0j‘©ν.Tͺ…ι₯ƞκ`ΜΘ‰₯>θ!zΡ-₯}Ίja¬]O 9œŸ¦Z\«‡Q ]€TΥΒ¨‡y¨‡Q ΥC#ιhα} θ_ΓbeŠt«‡Q ©zΨ‹šΚΎŒ²o΄ΠA:΅—ZΨ―&8_ηΉ]΅›υόΞ‡ η Ο:|d Φi‡¦Θ9QσηX?Q^ hμθN]΄5θD˜M Ζ©•μ€B`S@§‘ΫxέΨb›ήΝkmŽΦdTΪD›ΡρώDŽ1"ΞDWI΅&mœK’±DmIWOpNΗσΜ±E³ΟH5ζKnψ]2jSmϊ‚ξQ^=€s>+οoWϊ‰n@2€ xΣež¨9Ÿ…}ρDβq ΜwŒΏ+@ΐδœsώn‘w€ήΘΊΫνΦn'ϋ‰Β9 ΐ2Qq’ςDΞ‰ΘΚPΝsl£έ\Ϊ‰}¬Χp,w?0ΊC@E'Sά½έΞο+!œί]}m« [8Ÿρ£Τ¨―Υ!]|fκKĈhQZ½ΗuL #[ŸΩσΎ5R‰žΫ.ŸΧ‹jz;ιΩΞβ5:nΤά¨‹#S=NJΒτ“:9U GvΌΝϊ|Ξ“Έ~7Ζώΰ\βdβtšͺ£δΦ–sιυ)@«Fͺ¦γΤ„t·iœ |œW>g'T8·I0Σi {5w·Σό¨κ&<ήFτ'νΟ΄ω|„ZΉΗ5†σ³A±δ€ΥΪ•ίjα|Ψz˜ΎK|4ΕDωŽŽBΊdWΟ™¨F-Œ –Q ­C'=;˜kΉ’\Ÿ©Z8žͺ…ώοhͺ…,2ͺ‡1m=FΙ£Ζζ˜Q Ρ?t=DσΤB›Εq›τv΅0AωτpTΈzθLsυ0j!€τ°-Β;iμQ +=ŒZΨΞΧ}nW-Δf=©-œQ Im§ξ8§ζœ΄vkΞ# spΗ&θ€  Π‘ή€c4/#ΪKdΫh³5Λ€…›@:5ΤΒw>Ψ§Σ8 r8όί6 0g±VY.O&μdάέYuOp~Η΅)rN3>ΰœλ:΄cγm|n»΄σώX?`N†ηέsΘί‡Ώ°Ξϋΰ@;Οαœσ9ΉΞߐsΟίΟYφϋαυί‹v8!νƒσπ³O`Ÿ:Ρ牀΅σ<ΐ›οη(ηψtζ˜r’εDΡy Νυξ›?6œο·V γx=3^ τδΐΈυν?΅ιMΰό/G|%•–ΤΩΧ>°Q η3ΞΣ?xVΣYΏˆδ„2Š™«@:ΧSsšη4G"ͺΔcΞ§Ϋh†„#ΓL_G§ΕΪΚ”ΞY:€DR³Έ*ΣΨ‰–ΰάGTΔ±Aqžy·‘f3Ρε\ΖοάvQΒ5έpq.MU·qμΞn%Χ‰ΒΩ$:€#JΧ3±θ”β„βψ:Χ7₯ Σhkα)½W黨ΗΒ8ͺ¦rβˆ’ΖŽρύΖ'³γ(,v9&Rδ>pnΩη₯»K~σΩbΙ9Ÿnη{nX,9υ΅vεwΆΞΛνπξ)ναΎ•J;«uΑεSZ‡tβZΘwS=Dγ:Z˜ΑΉZhΗν±τ0ΧBa<-Vξ3’ήή™lQ:+Ə†&)ηw”ΧιαςNc†r>'η9„’eκ!:΅KξZh9Π›½‘uκ!Zˆ θDθΡC΅0Ν-wFyZΨιΞ);pυP-DΥ²RΓzΡΒ΄€oD>j‘σΡƒφ«‡ ΞΧ[­«b³^0>œΟt=ΆoH8Ζ§Iη:€nΗφΏ°υ2pξsγΨ§ΊΪiX"z ΤyΪκ₯Ά%Βi'ΠbD\=žG4–(υθDہ{ ˜w¦:¨@"ΐ8¬tvŽ…Ο°’RΞρΨ±hΔ€; /Νπ~ΰήηŒΒ]~ZΚ`ΐ(1 ΞIηsSΛΟyὁέW5ΤB@—(7琅Rτ9Ο|.Ξ' d$pιΉζ~ώ.τ`ΐLžΓuύφόνω»κ€ΐΫλu±8gΑƒ’ ’ξΒΉ©νF΅‰~ΣŸΧ±ΐλ¨ίΗΖΪhLΗs…σ»+8w&:@ξx5ξk ηGοΧiԘΫΧ>΄i η+œ/³ρψ‘Σ*ΎŽ)ιv4HڝΕ:œΗzK"θtΝ5₯=޲ήTwU.ӁPnγ°bc9žΣ9JΔζeΎ™ξŽCͺ-j%•Ά³ΰΏ’3‰‰c‰σˆ£ T˜š‰ΣŠƒΙ¬r’=v޳z“γX‚ι™t ζ±{™ι›:…œ8%ŽtbqH#Dc¦[’¦Μλ€ςͺYQΟοE΄‰¨“N(ΖθA£ζη}f€γχΙΫ4ƒσ½6Lc‹κμΚού{/pώϊΦʜΡKΫ­ΊΎ[i΄pήLωEηϋΛtυ0jαxpžka'{¨ς¨…κaΤB›½ρ»W Y¨Jg’ͺyuzθηε\˜Y₯R™,Q§….Fr?šΙΨ=υ°ξ; ͺ…4Ti!³ΗΥΓ¨…,.©‡A ϋΡΓτ\τ0ΧΒj”dΤΒ~υ0Αωϊ«uΥBlΦ ΅8ŸΡzΈΌ΅pήη>˜€<Β7@ΈΣEšΫ½ΐΉ‘b˜rή5 ° β4£³7ιΟ4“n‰θλ<θyχόΌ–Θ΅΅ζ#°?Μ&pDMš€qόΤYc€[€_!Sδόή[G"η%€9O£μœcωf ΏpΞω"}žΕ luγό (C υœθ9‹'”°(‚qžY°–“ΪΞ9ΐά\¨ΰΌpœž#ώΆœ² °‰l4Φ#•άlΞηHηNκtL•§–?• <ςHOο ›Oš;‘t»νkΞ[δ›ΐωύΗ˜FΦΩ>[Ώ½…σΞτΗ•"G8¬όGΈztρ/“ƒΚͺ?Ζψ!¬n#R@dN·vf'e3F‰LσtΤγ†ˆŠ1²“;―«~q:GŒΖƒsm"η8η©Vυ€sώηηoυΔ½Δ€qgσφ΄αx.9{€ΑS—qF‰–σzR£ζU=pJ·€’KœIŒάύDPιȎͺ•·”ΕωωGF:WŠΑωή›tΖzεvεχ·κ)­½άž›9£*m•κϊ*άnαΌαVi!ϊ‡ή©‡uZ¨ާ…Μ>§.ΪΛ\ ΥCΊ·σ˜Qr*ΥB’θcιαtΝ"κΞΥB)&[ o5=Κ£ς=θU υ~υp”šΑ‘ځL"υ°‘¦‘jQΥBtͺΤΓ¨…ύκa‚σΧ=Ώ«b½ΐωLΧΓ© …Χ}jΛΤ π6šξόεΫΎΌm‚tRβ5žƒΥmŒiJ π¦1Ρe€MΠΆwΐ›fbΤ$ϋ< Θ#ZLέ2 ΰs°Hˆ$₯:v††§ΐ¬σΆOŽέh.0Ij5#Ό–ΐόϊKŠΗ.86₯΄ηB9η ληΌŸ›σeέχdlœo2X`Q„ΟhGxkΟyΌ—Νσ±`Mt„YάΞ©7γΜ0›nΞσϊi2ΧωΎ.x8EέIНQΠ‚‘yτ©qίC€<·œβ¦I uΆΟG7oα|……σŽ'pnr<œωKϊNQ$R–ρΰ܍τ?#F8•Ί‘Œ€UΗ)Mά«Fq8`DExέ š MEg΄—Θ9:6ŒΏ1Ζί–K’†φ$HiΎΓϊ%²A•ΆΩqHqF©$ΝΛ*•³/‡τ7Ÿ©Q§Ω)›DoJ(Ης­œυν#‘§»ςππ€ Ω°m{pFΘΏΏ…σΑh! •€šz¨iXΤΓρ΄ΉΩQs-Œ³ΠΡBκΥΉμ"΅mNg’ζZX§‡.8 S Ρ>ŒΝ ΅οAOp>h-Μ+=₯…€Ό«‡M΅νC{ΠΒ~υ0Αω^ΠU ±Y/zOάvEΦΓ©δΌsRέ‰ͺί}Ў)šNΊιπέΰ<­{ήssJ& ΐ¨€4 «‘`³I›&ΒI]'εΪτνρ»t?ΡXvΚηΓŸF 8ΗΟBΗι–SJ=ήΦg›’A¦°γ”ν01ŽZΓpH…t‡4FΙgJmεx›©μ+Μ†ŠγY9œgΤζo˜³ΘνN}dŒ~—Φ—CJ"UυŽcmΰ|ŸΝ:”r»ςGκ7rήΒω€mη&νCΡBRΪ1 νs^u/ZH³Eυ0jalžuHzΘ%―#₯;ΟZ^σΜ‡₯…l¦³―[ε§F-¬n/£…vd―τ°‰&=$e½-lη/μͺ…Ψ¬?­ίΘy η“°]υ‘·ΧnχξβšmήUΜyίΖΕ[Ό­˜ϋώM:iο Ž; Α9°ŽuλΔ% LΐN2‘f@@§.™-©Τ=SγLΤ–H9@Nϊ6Hϊ8ΰΞs©u¦Ζ(ηr˜£Σ4R½] ’M€8g˜€VΈ$⠜-wΎόγŸΠ5•½Π±a»‘šcξψ2Ί¦; (Β}ŒΜg”»ύyœζocϊ½·¦Θ8 ŽM’oxα'odZBνσρχ΄pήΒyχθι}Φλ9Ψλ6Ζ Ρ Η²3VmŸ‘hΈ£Υ0ΕρRXfΦ¦ΊυςqGΝ$0οΗAާišcΑ9Χ—œ=¨ιš͎¬ƒΜj+›nτυ& Ξχί|ԌΰhWρα6­}šθ!Ί‡²X‰F-Φ±|£#Έz˜"βΥ8΅Qzθ}U-ΊzΈι‘'u΄;tο™AΟ#ζ+’¦Zρ=ηθa₯…άΧΡΓ¨…Τ‡XM[Ÿ48Σ‹Ίj!6λΕ+χ ηmZϋ·λw|oκθNδ(έO[ν•Ι–žσNp 4‘r P‘ϊM6ΐM$ΈI‘Ά1’ΐ8siΓ8:²ύkΆO@Nέω°‘<ΞWgAΘ?‘eRΎYtΰσ€σ“5@J;  ΘOώvА;Ӛr6#§+ΒF…ιθcΑ9‘rRΩ]θ0mέ±i\ μƒΪΏϋƎMœ?xΪaiZBν»ν{[8oαΌ~³9Νrh †SJ¦5—ΐ9Q€|£!ΞŽDŒ6?φ„NΧvκ)S »3«h:PŽ# ˜sI&χΪ!\ސΏ’ΒytFν>ά™Ϋ[³JέqD‰=ώ«₯MίβίI€σ^·Fp~ΐ»–Ž"ΚμΚ#?Ϊ/œ”5@:°…σΙΥB@άζaθaΤBα|,=ŒZhɏΐ.˜s[=T ‰ΣΝ}P:4κΣWT8OZυP«ξW; δ¨7§ΧFΤC΅pΰ|2υpΞ_άU ±Y/ιΞgŒN8'­Ϊs›Δ]Ÿ½εF8ίϋμβΟΌΛRΊ°#HϋΠ‰4'šH9ιλDΖIYΨIβyœθ9‘κεγyJ;ΛYP idPgMWo>#Ω6%#KδœHωƒ‡1™ε+œ“Uαœ8lc€¬γDΛsηΖίZA8Ζσνψ>h8‚oxαƒgό¨Xxή‘΅Άο'ήΧΒy ηέRœ΅sΗΘθ,ŽΛΒθπΝγ˜σ`‰}όΧ?+ώύğwfr „›Β޳Š3J„η•ΗpHΉœ)PΎ’Fˆ–qH±Ξ—œέ)‘Hc†pF©5'JRSάqH±λχŸ6p~ΒAοξtNΞmΞΡο₯[ϋΡ₯έYΪ’n+νc₯ύKiηT£ƒΈ\©…σΙΥBΰ›ξέtσFσΤBa=κ!7κΟs-T£Z{Ž ηh!ύ81Ÿ*γVd-Lϊ–C:χΣ ΅ΓQZˆξ™E΅λκᐽ_8Ο›_U ±Y/yzΡΓ~f΄Nߐ΄INδό†Ά*Ξyω«‹Cžό’ζڞοjΙܘιMM/ΡQRΫitΖΈ1œ¨8uΫΦ;Q§C9Οxy ¦kϋ ›”x’ίv#οω€άέZO—Dμ©{§ώέξδ4΅cΑˆ΄ σΗξ»=]1'γ€ˆy¬Υη\HpND\‹pΞw8'•]3 ι¦°σ|α^¨ο·άς€σωη™J>κlίν?Πœ—ΫFUζΠυ.PfUiίͺŸΓ€‹κώg—v^iW—vUiŸ ―ωri·—6»²MZ8ŸB#Ո9φψŽ5λG5Β9u–?ύΣȌXŒšΛ].:ͺψΘYΗ$'Σ΄u.qBs)Εθΰ>Ω`>ΜΡε½ΨrwPέ[₯n5οDΞύεR€Χyε6¦CΪ°3ρΠαόλοιΜ_ΟmΞ1Ϋτ9Ÿι6ΥυP-dQύ‹ZxYω½΄Γ·zˆ}έΘLμ]³¬ έZhΗvυ0j!©ξMυ°0–.οΕclJi!0^ια(-ΚΥΓ¨…5#%§<œΏε%]΅›υoγΓy«…ΛΖm¨ζD́πOό?ΟIvΰ~α(8?κ©VœώΒ5‹ Φ[?ΩγwΝK0Ed”:MΞ€oŒξ瀇SΏMτ#Mœ)©νtx'υ½)˜3—›ωΫΤ³ΣυέΩλDΔ»½ˆ§;Ό3ΪIg7β˜ΣΐŽΪp:ΤΣΝ›še>+pΎθφk:£²¨;·ώς܈όcSaΈ…Έ³hƒΡ‰Εώώ¦³1η{΄σξ‹γΞ¦œϊθΤ{ ΞφύΤ‡Ζ…σrϋλn@[Kϋ»,mυμ9›”vZιk—vI±΄όGPRiΧϊΪ Ξwi#ηSx#•“9―'έxDqςM?νΜ|ΕζV³aΉ°«L–Γω^Ώ?*EΠ±žv\s‘άBŒβ~,n€{ΉΌ_yΑ½8›Λ£†3ncέo3ΌA4Š›*pήΩͺHQ²ͺΞ29‘8U$©θ:£Œ"­³κLόΔ콦>œίχ¦γ¬³9ΗoΧΒω4ͺ9'“H=δR(g#‚N$]-ΜαS IsΟυP@7“(κ!5λMτ-œͺ½;&ͺ…,24ΥΓ©磴°‚σdj‘c#ΥΓ¨…>FŠϋ΅°œoπo]΅›΅ϊ*-œO-Όϊcο,nόΒΦ Μiχ»ί\όh₯—$(gϋR ε_ϋ^€όδU^–’ι9œΣlΈgdιΛtoίοάkά’βNΤό›άΠ™9νΔ©¦Y\0κf¦ΪuδkκΕI‘'₯žFsx]ƒ9F΅Ρό  e8 ΦΒ³˜ΐg ›NίtψΜΏσϊbρ-sF5€γάawμ»}'ϋ ߈²/8ώ eξτ"Ι€Οc3ΞΣw«n"ιD΁r#μάO6kΝ…sš F@§cϋT‡σ‡.8>•9ΤΩ~Ÿώp/pΎNig„Ϋ»cΩs-m«’¦?GφΌ“J{λ ηqDΘͺ«Ϊθ ρψή—l‘!’βΡυ~Σ<17fόβ€2Fθ?Ξ9:9‘˜i€―cD†0ΟΊΧξŒΡ™¨cΩΛσ›8«έΚADκŽSg”θΩx €H™[‚ο*-έOγ#OξΣε>Ί[oY9tSΞ~o§“rns~ώΙΞ₯‡‹~ΈU²alw=ςƒεθ!©μΉNͺ‡qSΡ³χŸςί-TΥA΅°›ς|τp²΄°Ι"eS-μU' ηh‘σδ§Ό’ƒκaΠΒτ ’θa…κα΅°œΏuυZˆ­¨p>HίXΖ&{;kυW₯ΪςCωŝhΉΫNύάβs󼎹‘Κ l2FŒqŒZYΉΰ€Έ3†ŒΘ3V·Q£N”»—4τnΡο7|γΧ ΘέFgx€ΰ'=h:qšΡQ'…χγyάGM<Οqξ7cα07RڍšSkΏθΦ?€K>ϋΓ?ϋjͺ7g4]„s:βcέ6JΨΗ’~—5"œΗNεuΗM€›κΫΌ*‚ΰܞW}>Ϊ­;η1>»)ρ<hΜ§œ?|α ŒߟRkϋο˜ϊ}¬±’εφž ·?TΪ·³ηœRΪϊα6e?―*–ναqKiOp~S•xiOi#ηSΞrΡΞΗ‹LΒΉνω#ŠqBΉ|Ο/–ΒΉŽ(†γY·αΜβ`₯Ή“9ˆhP?5˜uϋ‘“2ζνρŽ-w>Ηsp9Β94Η mΊέ/RM*αœzV’o,’`S!5ϊ“9££žcέΉ"8°w²©μŒvΰόχ/τg6η€ΪΘyC=&œ3 $k¨W-T£’kQs-즇€=P§[ƒ¨'„’Mκa―Z8VϊXz˜k!ΫΖ۟0J …sτp*ka‡γja¦‡Γάϊ†σ·½΄«b³^ϊŒ6rήΠ7œbεΥ‹όΗ_ψΫη-ηcmΤΣ˜@9#«h–xQ7lγ―~$ŠΥϊ\%χ;Χ8'ϊd'"N9χ w:ׁuη―π@9χρΨΞ'ΞMΖ‚AΎΡ .Νɞ²τκΆ?¦E 'tΖΡ1:‹&{9œοXηDβ©_gίσͺϊkαœΤzŒΟΒρbΞΙ`ၠl*n֐kur?»]άΉΌυΎι‘Φώπ₯'‹fŸQkϋοτρ^"η[Φΐω!ΩsN­σYαφ?–vYi[„ϋVRζWiϋθ-œOΑΝΞμt*ΖΖΫHε$Z΄ϋ%#ι›ŸΊ`$j€ι”bγ9£8R8UŒΪ`η_¦ΕDLRœ3ΐADΓ'β”φ²Mœγ„bγΑωv=5³)η?2ˆ΄Ν~ΦyŒΤ˜_σ΅dƒΪzΤΞΏσΞ±η6η—Ÿnα|ι!γ$‰ž«‡γm”όήŽΦi!΅0FΥ»ι!ΐωζιB Y D¨…DΑΡ ~`{’―„φηuZ蹈ZΘ”υpΚhaŸz8YZ˜FJN²&8ίπ₯]΅kα|ϊhα7Ÿτ’bϋΏzNΜΗƒsj―ϋΤ–iLΤ’λ/Iœ˜DA‰€’ΚL 3Qατœ1ΰœqe€f| οe>9£ΪžρΎCSWx»ρ: ϋZjΡ‰€e'ε ΗΈNc:"ο€0`^η΅‹Ι%œΧ€6 ~N"η̐ΗΖ‚sΐ΄v7h¬Gκ|Μ2ΩۏpNτŸΐ|*ΐy·1jΩnΎoπ0ΞίG›l8_pΩ―ŠΕsΟ΅ύ?»Ν€§΅—Ϋίςϊ>[τ8£…σ)΄έϋ菓C:8Π±|ϋΒΕ#О[·ΝfrDƒq΄pLIuίh‡;uε8h¦:NvmεD6Σn)£έΆ^RιuJωΜ8κœ—uχ:=9¦Žͺ³ΑΤrΫη“΅ ΞΏχ‘₯Mμ2›σ«Z8Ÿ†zHsΈΊ±iέΰ\@―Ϋκ΄p,=€Y‘`΄ΠE9.λ΄›JΝέΠΓ±η{ΥΒψ\?³pΞgŽZΘΘ:ΗwR0%΄pŠιαΠΰ|£—wΥBlΦ˞ΩΒω4Βζ½Β9F34,ίLGfάΰ™"Δ₯uΫεΊ¨8Qβx_]ν8ΧiόΖσ_Vκiκ¦Ιθ4|ήwjΚ‘†QŸŽ™ΖΈ7 °‰lŒVKέΫ«ΟΙ’uόXτeΰρςΣΠ3n-e ”6κΙ‰s˜cDψ9Vκχ­­§yYvΔ_^Ϋ ΰ|2Ά‘Βω•g”vΤΨώ»|²8›ζ•φΌΠξ₯Ωs6ΝΒ]Z,νβώΣΎY³ίUΒυK;¦…σ) ηD‹°η#&œR}½=OK€N”H'z<¬†GMΆ‰μ«ΧηδpώΊέ•"F8₯vΑ_ΫόγFlE…σC·‰xΥ؜Σvnα|λαςB2bt’θόΞΡC΄MD ‡52mZΨΛbe/ϋ‹pŽr=κa«…ΛΞ7~yW-ΔZ8Ÿήpήkjϋ 7R#„ζDΑ‰Φ*d’μB»‘uλD’y@N„hΜI›η>ΰμ']ugκΞΨ7 XΟσ―OΦ oύCͺΙΟ£η΅Ο­κ’»mv½pN£=8vjδ'ퟌm*‚ω°αό‘ΉηKώtA­πων{₯ΆIΥiν{Tχm‡Nυψ\λΝIu/­¨κΚGL+·#ͺηςΨ/λΘ΅p>Άϋ‘j-‰αŒb½DΠ½m}Ζ±ΊΞ(Ž(p>Œ¦Gƒ4θqB¦―5"‚†3ŠSΚμdκ\±vkΆ5‚σ|tiσ¦Μ朹K ηΣLImGΥΒ^"θ“₯…D‚ΥC΅°W0oΊ˜9΅τώ¨…DΟ£ΆΫςΡΓ盬ΡU ±Y/V η-œOxΊι-{ΟϊΰdΟΪλτΞΜtRΠIα¦~œρktiζMkώ'ž€žθ9uδ6θI ςpΙM dό΅ρŒ€cϋ ΆΗοΎ1Ω 7> )ωDψι$ΟόuζΕsά,4`νΆά|Γ Ήκ7άΨu‡žΰ|¦[+ΐ=lԞ ζΛΞIυ€™γ†ˆ ΑΝλΞΗ›†σΪΛH’ι²ρωIi%bδ‚N©΅¬νΆœαό‡ιΆ\csΞή΅…σiͺ‡h c%——αZjτp2΅pΊι!ZhF•€ήκα€σM_ΩU ±Yk<»…σi¨…Λ΁jh9 Œζ€3—ΐ:Οα1Α ηωDΕ­#§ΎœToκΙ["κŽJ£{ψΝU—πι²9ž… άι„Oτœ™αX»-_8_pυE„άφίν3-œ·pήϋF*ηςpH™—Nƒ9š&QC-œOč‘"ρ’θlΣΕ9Υ΅ŸϊJjυ±v[Ξp~ψ6#£jlΞ9»·p>Mυ-œΘ‹Ak!ίIΣNzΨΗ tυo¦i!Z£…DΠ9?6ΰk·ε η]΄kα|ϊϊ†Λ Ξ™{.„ΐKjͺ‰œcDΑσzs`lŒ¨2)ί€{οvΚUιϊrφ°“Βξόl·…ΟiΘφΠΤ†\ œq,>π9Ήp^ͺχΖΪm9Γω5—‹oΌ’Φφίm§Ξ[8ŸΨFz;vσCΓ«·Δ!₯“1‘"ΐ‡«—fGF•λœ‰tŽ―κ5ω€pbΫχŠ£’΅άΞ΄έΘx€›sޞ-œOc=ΌcΑ%[^Zh‹r½j‘γ‘…S]ΥΒAθ!L˜}d²Vϋ„σ·―ΥU ±Yk¬ΪΒω4φ — 瀞θ‚()άDΏI[ΟΑœfoDΡ…w"ε€~Σ΅ό›ܐf­=ϋΆβ»ݘ œύŸ~ΝέΛΜ]pn*ςTή¨•#ךlρ|ΆZΨΞ―ϋ}κΊ_gϋŸ[8oα|b͐pF‡ι5Η!₯σ.Ξh/p>ˆ&qΡ1p>Θm’pώΰcΗ&k8ΐω?92ΏΈΖζόz―Ξ§±F-$’>ŒmΟKG’ζ€h§1“υη“‘…+’φησ?.Y«‡œΏc­ZˆΝzE η3Ξ‡ θD»©5'ΒMγ3"ή\ή€sӍ}ε-ΎΩIjΜ‰˜ΣHŽτw€žΧΨDZrϊlΐœTπs»§xtΑΓΙhάGΌuΖ‘Mq8δΦœͺqήL‚σ‡―Ώ<}Ÿκl=>ΫΒy ηίpD‰ kΓ!΅Ζ’.Ό4ό±ρQ·θOlŒΔσΥ͝Ԋ²ν}ΩQΙ& η Ÿ’¬ΰΞΊCρΔ½?¬΅9ηοέΒω4ΧC§X Ξ‰œ»PIδ\=–λτ0oΗσΥ±½ΥΓϊα|&ia38ŸΥU ±Y―xN η-œOh¨ιͺNά΄t£θt+³ΠŸ»Ν'`g>9©ν<`€k9)μΐ:Ν~όϋ[˜ΣŽˆ3pΈ7ζ²G8_xΦαΕcηΖ›₯g7όn…ΡΒ‰ΐ9αœ…¬υ K8Ÿ7§Xtϋ5΅ΆžŸoαΌ…σ‰ot+¦Aά°6"ηΐ9FJ{]THψζ‡1:©4”#Ί"9£½nΦή$‹οF ηΞ?]<ρηΥڜί|΅…σi‡θ tlXZœ£…tkΟυύ‹0Ž^Ε…IΖ.Άz8ΉZHOΰ<κa η%œoφͺZˆΝzΕs[8oα|B5γΐ9‘rΐœΫ4yΚιΰ„δ€Ή“¦Ξsˆœζ€8‘q`žfqΤ§ϋt4ΨιfώΫœ:΄ίύΐΓi.yjΨuΣμbρά³œ?~ιIΙb4½έ–nœ{ΰάzχΞœίψ‡bΡΧΦΪώ{~‘…σΞϋΫ)ΔΜίalDΞinF΄¨[ΤΗζp48ŠΝ‘ςšσ&ζΎΫ­Ξ/½ηπbξ}‡%`ΑpLWx8?j§β‰ϋ¨΅9Ώέ·…σ ‡θ 6L-|υ>gΥκal”™kα;Άωy«‡C€σ+ώόΓQz8S΄°œΏσΥ]΅›υΚη΅p>Ν΅pΨ€N„°™ιMτϋIoψB²•6ώZͺA΄ξ½NΏ:A9χΡΙX$bN„¨:γΣΨ‡3Ασ³½'ujΏν/έw{t’η‹―:―xτ΄C‹GOύnŠž―h©νs?8Ÿd$0/£|`E‡σ‡nΎΊxόy΅ΆίwkαΌ…σf)sΠ's;xξ‘©ξ™΅ΜφνVkN€<:’ά&J„ ʝN#…†Ήαˆ^vο‹‹ο>Ό8ϋΆηέώγδŒbO&m²΄°ΥΓe·+KG s =T ‹βάΞ_ΣU ±Ξg† η»έj“ϊ>ΐ‘n:΄ 'bN-9—o>ψόtPNŠϊ~η^›jΡ1ΰHΚ…zΊ™s›;5θԜη—ήό—4Χό‡‰j’v\E79ωΫΕ£gΦ‰ž·Ϋ²τΙf`„šλζ-œ—p~Λ5ωφΉν·W η-œ7ΨpD±Ώ,œΌΆ9œ“’SΦc­%Žͺ‰qD{i7žΣΩ:£½9€ΞθΙ7ύ΄8υ柦\ΐ<Αω’ΣWTήλΔ£?_󏫡Ή}ΉΓyΉύui;·ιΰτppŽRoυ0j!`ΆZ8\-$Zξ‚%z8ΰά›ζZΨΞ7_»«b³Φ\mΉΓωςΦΓ™’…€9φέzρ€Γ9Ροwώΰ’‚4†#RKχu"δ€²ΫΑP§#;tfœ⼎}σDyw§ή˜€s;@9Α#ΙβΆπœŸ΄`>ΞΖίƒσΟβˆpΞbΗξxpΤhΊΞηίv}κcPgϋ~iiηƒΠV€ϋάξμg Μ1kƒάvΉθ¨bη GR9™ι;Vτ‡ΪJΡΌ œΡιΖΙq½λe{ΝWΞL)²X“νωAqα]?*ώηŽ'Η‡4u*Ζ]‘αό EρΠρ΅6χ’oL‰ΘyΉύΊ…σΙΡΓAoQ ί°λ©£τ0o‡N–Ί0•υΠcνUΡΒ·όΰΤΖzˆζθαoξόьΒΖpήE ±©ηΛ[gŠςδ%0ΗNzϊK‹K6|Σΐߐ¦Ήp`·œϋnΰ›Τw ‘.·‰’―φ‰γ‹Wμ~jŠšσ8γԈΐσzig„ ΌξžΡ#Θ_}~ρΨ…Ηw"ηԟcSuc<Χ’λ.κΉ6ž:ϋϋζ/θt©οwc‘„Ώ‹pΞyeΖz>7~…„σ;ζ₯R‰:ΫχΛ{ΈΘyS-oη?Χ?ά πΨΞ)φΐcƒsΨvόνQilΠζǞP¬χ­39šMӜξpΎήž§₯ΊTf!sIσ("mνpbρ¦ΟΜHŽpΈ&f(cέζV需Ά="Εw¦4†kηΗξ^œXkswπTσ}Jϋvi―+m-­uH£…ƒΤΓ¨…oΫρ€IΥΓ±΄p:ΒyΤB6ΤΓ¨…Ξ›h‘Ρsτp&ia#8ΧΊ]΅›΅ζσ§ œ/7=œ‰Z˜cwμ»}²Am4#:s ›u œϋ_³χ™)ŝ™ζ€8Χ©Mχω/Ωι€Τ4ŽxΰžΧσΐ9Mβ€ς+o ψ3€ϊΘ#ΙFΑξM³Sj;uζΣΞ9nšΫ₯%eŸ¬:c@yη|ώG}4ΩD62¨7'c,λΝτU ;p~Χ-Εcχί]kϋξ½ΧLσFZ:ήΞ―Χ/oΈϋΖ-ΡA9₯Μ—%R„Us>Y΅“8ΏΦoN‡ζG6„Ϊxϋz‚sΖ1mvτ‰©t„sjZΉŸYΚ\~κ‚£R§h¬n#₯ˆ͐ξ\πύ&΄φΞ{Ο’XxJ­Ν½μΫSΞΟ«±s[8ŸZZΘ΅ί7zΨmœδdiαtι䑇γΑ9Z”«‡Q ½=O Y€dR=œIZΨΞ·X―«b³ΦzΑTσ妇3Q ψ‘w7μ΄UΞS#΅šn€ύ¦#;Qo@{Տ•`œΩη°ήNΕ3ίXjχ―›gjϊφβOLΰNτœ4v@Χη@½Λ~}ύ½XνΆ-ωΣΙθά>Υ·΄€pή‘)βί œ/xδΡbαόϋ‹[ο{¨³8!œ_sχƒ ©ΙΕά;Š+n»?ΥζΧm–|χ’Σy₯C>p~՝πpώΰ=w Ό―ΦφύΚ—g œ7ρv~y η½o€οεŽiΎ1#ύΖω‡v:|“ϊ‡‘ˆΕΉ²ΤXβm}Ζ±Ι©’γpμH<ŒΖFΣ%JSZ9^"FΤ¦Ύφg%gώu»j©ƒϊ½Σ8&NS.ΡΔεXpώυ+—ν1@8œQΐœΏω‚E'Kž8+Ω ηΗ±όΟψ«Z›{ωw{‚σrΫ¨΄?•v}i»MwΑ^‘+5~uiξQ νπέM ΩFiαχF~Οƒμΐή‹Nu8―λ\γjα¬Ξιθa…,| ‡haκs2œwΣBώ†κ!ZΨΒ9pΎ~W-ΔzσV §ηvϋW·KΆΰ˜};p^W«ύψΕ'€†Xΐ γΛθ’~gyΉπ‘’ς ΟΌ&Ս“:Ν4ΰκfOΩpο4bKζqύkΆ/ώρuŸKtRΫ1^ ¨ιά&ύ8(OήkΜ§:œ§ρoW—ΞmŠτίy}'κΏψΖ+¬§tκΏάY,Ίνιq’ε;ηώζЁi žσΡ Ξ―ΎkYΰζ|’˜9gf<―ΝΛVH8Ώχ΄Rgϋ~uοžΰ|<=,·Ώ*ν[ΥγsbΤΊΫkΛm₯Ξ*νΊκς)Λ ξΗωπχTξp½c­Χ;₯1₯“΄>œP’DΖƒσΣoωIg_Μ>²γEǁ"ύ”ΞAŽI›ΫXc”0Ξ‘!œΧ}ητNtΗ“λœKœΣοί:#9Ÿ<‡”ΪuΎ9’κITύ“ΏqFλRώŽό=τπγ?O6SΆFpώ‹/—Ο¬΅Ή³φΞ\²ϊϊϊι'Šͺz  tP‘€HSρ‚Šˆ Hώ(Α†"D>j ”4BοPΕΠI B %…4!η;Ώ=³†}ߜΉw{gζΎοσμηL93wζάά•½ή΅χΪύZ$ηyS·InΣψR―€Ρ΅B»š‡ε›1!­,`!ΞέΒΓ–ΘωΓοhς^ŒR#ΐBT]αaΔΒlbξgΌ{,ά¦ο° ΑAU‡`!€,„‡!;x,€œέγa#­6“σƒv.Š…ΔV[mtΦ 6:βpŽ‚+#5zΆEydDYkΘΉζ”*U‡|£šCΠ!γ_κv”€ΌCΒι3§ŒrN:νς=5Ξ”^Κ―[RΞk}Aΐ)g‡œsΝ?}SςΙ‹χζˆω[ΟΨ"Ξ9ΈΠiOΙ9χ!β(θΉώ(θτ·§Ο΅ίΛΨ©sΝŸ Κ!ζD¨ˆsu=K§σbn˜’σ™Σ“ζΟ͌sΟξΣ"9/Σ΅wδIϊvi<ΧkΣuΘ:Η4Ξο(,mι‡Ρ\D.žBΜq/Ι9I'κ‰'ΞήwM” }+—δ@Μ \n3“―‡n*$R(!ωj™œSήκ“QRΊΙuB%bό’”s’TΚ=9%Ι χ†ήiI(‰)Ge΄τΉrμ—‡,“Œz²π“;-"CΞΟJ’O‡eΖ¨W.…œoŸΖCξ~/’BΙθοσΗ3²"’σΚ-ώ.„…žœ{,χ„‡όρfα!<δoΥcaΉeξ@ΞεZnT Ή-<τXΘ&‡πΠc!› ΒΓ ³π ‡‘œCΞw)Š…ΔV[m˜tΦ v,„Œ O#$η(ΈΏρ) „Δ‘ΘzSΆbj6†p(ΰ¨ηΜ7χ„|Ή­o₯νWE?suGm§O£ΜβθFεUtΉfh΅BΞ?ωP˜3—kN<„άΘ{lˆ°ωAΩΊD\6D#‘’ΣΟH;ΏTt~/˜»AΜ³ΚΥ™;{sΏΏΞHΞg8σ³žώ Ξ9ϋμRΘy‹x˜~iκξ£”―ΦάkuNώ6ηΎΩQXK—ͺ΄ ηοΞλW0 “*οΖΰδŠΧrG’›[ίΞuΏXBΚ’¬„‰ %„„λ§ΏΎΉ"ΔυLΞΓΰzPΦΞυαΆ'ξ~2}«VήΩw˜t- ‘ΚΫIFΩ!~σθΠδΡƒDt–U9Ώ³OR£Δ¨QΧppήίΕΡΑ{œFwp 6’XΚY·XψŠΓC°πΊ19,δοJψ—……Να!» !™mmύiržl`Π ήωp#SpνΐC…ͺ*‚°‡Xρ°Dr~π.E±€œƒΕπ0baγ,•XSVmͺν΄w“Ε³sJ"…R§9γΈ|C˜EξPq³=θttˆ8δbΎΒξ½ν>Ο3"OY;.ν(θτ›CΞ!ζ2„γσυΊPΗΩτ ؁œΟΏ‘΅Μ|Φg½η’vpŽRΠ!ν”A!εσ,,s9„›@egC…€°G,l™œΟš=»ΠΗΖ9ηœΓIW•›¦λή4vrχMcλζ^›ΩΑ{ΜJj΄¬ύž4ξ.·€ρ4֌œ½f,` Ρ°‰Χ[r‰*D2ϊοWs%τ”S²ΞވΉϊ Γ…1ύη˜Γa ‡AάzΈΝJήy RŽ.ξ”ΒγΤ9G='˜oN5lυΊ€–3ςν½Σ~“LΏψ“©}OH>Όό”dα])/σΩӍ”O›³ ™5oANUO§Τβ^σ•ήFΩ₯̝M•ώσ˜ϊΡ#9/œ8kΆύ{ϊ³sδόεβaΊξΛ η[5χΪJ’σr±΄₯7ί΅™Ψ-x¦„YΨξ₯K™₯}κΓc6j‰§#J4I2!ι<Η9Y )ηΐͺœ“Δ “8•$†κpgIFQ}Ψ€ guυŒϋK½\?’{3;Ρ‚%œκ X)gΰeΙωνwœ™,YϊpfŒ|΅_‡–΅»χd“±OΎ‰v‡ΣΈΈͺ;’ΑBtα‘ΗB‡ C<ΤβyΞ#ψ[ ωΫ )έξ¬δάc‘ DΑΑΦ`‘”va‘Œβ„‡ΒBώŠxX9?θΰ‹b!QBΟyΥ±°#π°³b!Κ-$e5R(Σ1Tsd·oϊΑqh‡ «„ƒ1ͺ7μRΞ7όγŒNγ1H8€²ΞH5£Ό]ΖpzΫι‹ΆχΌ1W=/ϊϋάΪΧTςΩύO³γœkfΚΉ]ϋ‰―9ηΘοUœί*x‘$€ό&…=aΟo¨π˜]§ωsœsέ(_'"ΆLΞ§8Λ6D²’ΟΩηΤuY{₯°΄5?hU"γρώqw΄εE―%¦G¨ ”±£©¬σά9ňΔ%‰$Υ—t*"ν9}03FΌre)δό‹iŒOcgά±i…’Β/&n\$nžω㍣ΤΪ=‚ƒΒC…¨ηtα‘°PδόΟΟ Y Α_ή^ 4Β ±|αa9XΡz,DExX"9?h§’XHtk™œW k;+B!†iL.Tω0{ΊU=°©‚;Αο-baσδ|κΜYΆ•/œ·ˆ‡ιϊI`χ|K―MWίΐξ‚ŽΒR~Mμ3˜‰ΔŸΖtοΐ­_rp§Δ“Q3*η$HpHB9’t’¦Ÿπί\ςΙ}Ξ₯Ώ’΄¨ꉦΥΘ’§^χΫo)%‘Y —<Ρ(δ\eμ$₯՘/ek )€D€›'η·ά~z²xΙ}™ρςΘΛJ₯†γζΨόξγiLF_ΞΨO₯±Y«ά‘œ· ω{z,δ1α‘ΗB|³°Π¦₯ˆzˆ…"―€…QΛUMUM,Δ .βaiδόΐƒv,Š…D·­J₯V,¬<μΜX(΅š’vH7nμΑ%£6ސτSο}͈:Ξλ'άώͺΉ­S¦!Η©]fp8Ά3οœ~sJΩEΒ!σsu:υΌsTC‘sΜί θ¨ε”±›α^JΒ™mn=ώιΡϊΚS"n=ε >sΕ‡tA‡„§D^σ·yυ ΛY<=Χή3%ο”ΔΏψή¬δΚgί±ˆXΨ<9Ÿ<ύC«RȊ3ϋœ]κ(΅eπ0]ΗΙg£Τ.Λ??Š~σ–°4]+ηΛίίΚWκ(,mι‡τΜΟz[ΗGCIΟEnϋbΜΜνγ%7Λ%6¨Gκ±T‰'‰* )I(Š9 *clθ9—k.ύŒΎ‘G2Fυ\’r”‹9‰(G))D#‘sΎί½-₯ύ₯Χ’δEίΑ\œœ΄δήΜxiδ₯%‘σ€zε”Π£˜e™Ζ.ωέΤirی iϋa!-?ΒΓ …‡Ϊ°lX†XH―4x(ƒ8a!Ψ RήhXθρPXˆŠ^ <b βaLH‹“σbXH”BΞ#&FςPΛ)1‡(£š3² B ‡ χ~π »ΝcΈ£Žί9ψ#γ"η+οsžΝ:_σˆΦ{Ύώ±·[y;―˜Sφ.χvή³‘ΘωCύ”Οp†Ν˜δεrύύŽdK Gύ¦¬έάΪS’n}ζβAδσeμz B6~Wzη>ζpτ띑Όr>)%ηπeΕ%’σZJaiK?dl?Ι.qΈν‹ΡBη_Yp1VΙ&Ι©/gG™ Ο\ )GΖ{1oV*:F=6Μ‘sςJ9ϋΔF"η|?ͺΚ₯Ԝϋ;j$€ίƒ'η~6}ΰΟΘωM·υ*Œ– γ…w49Ÿ˜1ƒς€|Δ9ηνŒ…ΰ πΠc‘ο7χXI’ΰ6ΑΒσ†ε|#ςεέΒB0BΗ  ……|Ώjΰ‘°wαagΐΒrΘωνP ‰οw[―£Ιy‡γaΔΒΔHžLί w=ˆ8Κφ_ξm3Άqs§Ύρox‘9²γΜN|ㇽ #Τ ιvž'0†ƒ sDmGMΧ(5\α‰F θFΞοΉΤϊΝ1‚Γ΅έF¦‰œ§dUœ#€|J~Lš)γ(ε¨θσγΤ(yηwƒηRα€;;Οyc?”_ύξ"9/NΞί›:ΣLυ²’χί랜WK[ϊ!£Ϋς\ΰΦ-R ’ ι½FΰξN‚J P š£VΡ bDBjεœi2JΒD/ L‘|Ο‘; ›«ΧEr–μW»¬]I)}­8C(»ΥlϊΐYδόΤdΑ'wdΖσ#.κhr>…ΟXdeο$’σ[Ζ ρι !θsTsπ,$TΪnσ»ϋ[ …}YXX―x&iΣ!ΔΒΆŽ“+ Ωv,,œo_ ‰ ηއ —]”ΈcG/:j:σ΅ άάe@ιφ³Νώ±―lwΌ)顆1εœρj”Ι£ΠΧ³z'0‚ϋψω»lž9.ν οΈ0Χ?ώΑψΟρόmH7(ΆVڞ'ζλ…:Ž#;Ĝσ^™4Ϋ?ΧS>ž£oέzΠι_Oί›’w^X˜MΞίIΙ9›Yρ·ϊ'ηΑ’δωΦ>Έυ‹NΒ/f?όώ ’QJΪ•ˆͺΧ’δrN)'f=˜ψœ©\‰¨%¨υ¦ ‘„#η|Ÿj’s‚„—ŸEςj‡Ρ¨γ„Κ!ηCo=9™χρ­™ράΛΦDY{GGΔΓefqDΈ„…(茠‚rGΝεoS³Ο……9),δ6!,¬7<τX˜EΞω.Υͺ"ΚΒΓΞ€…εσŸΈ]Q,$Ύίmݎ&η/G,¬ΝΕ85Β/ˆϊƒc¦ZΠkΎR³M- zV ¨CΠ!υΙύμΊηLΗ„N=Σ”ΣΧ#1/sσ‹χš)cΥ¬Ώ"ba}.\ΪQΕK!ζ‘²Ž!%ν”Ηs$ ζυHΞ§]x’LC)ΗξΣ·žI&œό«δ£Η[Yϋ’ #Lα6’9ΗnαB λ5Wo9%κyυςΝQσηUNI;ελ{+—©ο“|‡˜GržMΞΗNžQhί£Χ™}ꝜWK#wΰ’œmώΗ·Y΄Έ{:a©G=qC‘ߝ„”d”17RΟ)λdό ]ξJN5Z¨ή’Q9Ν«TS#ƒ΄Ω ΉΎ3žzρμHΞ#Ά!ηΰ!δάc‘ΘΉΗBΔ‰°ϋ>ttΏqWλδ<ΔB©θΒCa!$\xb‘πPX(\γu2ΚjS7ΔBυκσΌΗBΖίy<Œδόsλξ{ΐΦE±Ψ’ΫΪ‘œG,,,Ν|.•œ3 Χυ]ό”‘λNΈ³Υδ'wTtˆ>k{=sϊΚ!ΰ¨βnςΕO 5N@Κί;ν7ζάΎΰζσ°CΠQΪνxΧΕ9ώP{=ο#"M”φχ^ύLQ§?=OΦQΜ Qm³¦šβ.ΒΞy‘œ7%η―Ύ7­ΠΏΖ_z=’σHΞλkΡΧLBJeœ2B*$€)!WH9'QSRΚ‘$€‹#,8T“Z+ντ%˜*=%1DΥ!Yδ;)HΊ½Jnύ¦iˆͺ†qe―*υ#—HhΉ\+KVΣλi›η|V«D^‰)?›ŸΓωόx-7JHcŸ^πκΞΐ½ά||Αy;Œ'_ό{$η+‚‡ΒBZMψΫ-`_~ƒ·ϋhδ€ΗB‘Χ,,¬E<ͺ,ŸϋΒCیΘc‘Θ6·=‚ΒC΅°Πoθz,”ŸIˆ…ž¨{,€’Αγa£`aΉδΌ[t[+’σˆ…m^¨ά8CͺQΞΧΙJΫwωηγɁΧ<Ϋd€Ϊγ¦fPΧwΒΟ“/?Ε7ν¨ί₯ŠCΆ HΉ”sΚΫyž#„ŝ^uΞαXθQO Ύ)οιϋ Β[Ή|Ύ_έJαί](‡/ΜFOI:*9Ζs˜ΡY™Ό#ςv€Ο=}¬³“σWήύl,`ώ[$η‘œΧι"ρA=§ΧηvT£¬„”ΔΚ'₯IŠŠΚΰ•rδq’Z$η^%θ#’Q’j#ΦωοF’ΙχΆςΞ4™$Y€€V‚λΖζ†Œ€Τ§JŠwδ#7\ 5’ σ7KPΟΛ•‹ϊ6׎²Nή§b’Qβν9WYtVr~ύMΗ%,Ό:3αΜHΞ#Vdρ7ΝίΈ°PΥ1RΜΉ-£8πA),+„‡!Φ2 QΡ= ­z(‡ O~φ³ΠF/Xψ«‡n²#˜η±²-<τX¨MP©λΒB΅ό€‡„…εσ}Ψͺ(›?’σˆ…ε/ΜέPΏ•V*1rχc“Υ~vΉυŸ£ΌCΞύψ6f¬kΜU­‘sˆω’QΓ’O&1 Α†CΤU²>©Ο1ΙΤΎ'F­AΚ9Χυ=.{Ί`Wλkā{ρF ‡4ΣnΚxJΞΏ5GΪS’Œ*αζ9”v β λοžzdα½ΞώΚϊΙιΛ­›άς­FΨ_ύE+…‡Δs>ηRB±—²ΞΟ0ƒΉ+zFrΙy#,’)fΜ’©—P ©'ηκIW‰»ϊ1kώ?šY‰©DJ¨‘Bώ»p’RI’˜Ϊ&rΞ‘ΝJdI@Q‹n?Θ’~W·GΒxŠ€TΖslH΅"&ζ}y%£$`œ_sγ1ΙΔωWfΖ°ηώΙyΔÊ.ˆ&X¨t^©θΒEkY9oX!€˜ΧΚ€-ΔB0PxθΏJ8XE4‡…ΰΘυ9 QΝ!θ˜ο‡ln ©:‚τ›ŸΓB‡ QΠ;#BΞςΣnE±Ψόϋ]"9XXΡ΅Βξ½-ZRΝΏ²έρ6'BOI|=s”moΚΥQΟM©χœ©άF οΉΤΚΡUΪ.b )η5rxΟZΏάZWή8ΉύΫ] χ/ψκΙY_^/y²ϋφ¦ΊCΪyO6θq‡¨CΨωΉbž~&SΨ!θ(θΣή΅rxSΤΥΧώ⽝ EΟ{ϋƒdΔΔY™qβigErΙyc,$J†J7 ’SΉΛ0N&i*γ¬rNͺ>sΉΛ¬H㐴‘„”rLΚ3C₯¨Ψ΅C"ρ$ %ιδ5ONΎΎ@ΠQ”H^ •ΟͺΌSΧU½«\S> ןΟAΟ₯WH:9Ώzθ’wηυˌ‡ŸλΙyΔΓͺ`!$Τγ‘°ΏW‘sώfe–YOδœ±]x豬–‚…,)θ`"ĜΧΠFυψ€ςδ€Τ =ͺν<τX–z<μLδ|ο”œΓBβ{‘œG,¬πΒnΕ=Ο*YA_γ—ύ“ΝΣΏ_Jγ‰Z'η¨Ω¨Ϋ¨αRΙ!Εp”rnχ9σ'Ÿ{¬‘k’Ή%R^,ϊ―ΈQςί]vΆŸ7wΐΉxLεςύκo9ΏΛύ½0ͺ2|Ž”ί3ςνžK;9ώΦΙ‹οΝʌώZ9gΞx€ρVώΈb‘σφJγΝ4Ζ₯qͺ{Όocx5;X!ψΪi,Jcd>ŒδΌΡΦΗ’₯K΅¨ΤR― ϊ§eϊCb€2OΝΎA―εΤΚ ŠΡ?rVφΖmε,Κ8IN)χ$9e,I¨’UΤ$ξCΰe,‡r€k«Eζ€Ÿ’~'τ šΩΏS’σ+o8ΆPΞΖύΟΔ²φˆ‡ΥΑBΤ_yI@Q…ΒBΝ=Φ"›-.ua‘*ˆΐŸJa!•š#ώΰ‘63ΑBͺ„‡ eΆ§ta‘<9„‡‰œχΨΏ[Q,$6Ϋr­HΞcnXθIΤbφyHΒΏΤν¨&Κ9κ:³Ρ1…[ΨΫ-jqaφ†*ΝΈ3υ“ΛΠ RljωSCMIG)7œ'ηε.Θψ±ŸΟ&ιχ~χ{ΙλGξkŸƒω7žkŸΥzJΌM=Wϊ›O6²φτ;πΉ;9ςΝΙΙ3οΜ̌γzY.9Ώ@d›cηgœσ…4ή›ΣψR―€Ρ5άi|1ϋ|½>OΞG'Q9 ik*GVι"$R†q>!­ΕώΚbΙhHΤ5Fˆ€”²N#ΎΑw­ΤR2J&A"J‚‰’ΔsI*‘˜lˆ q.κ»HωŒE,:9Ώ|Θ±ΙΨΩWeΖ½Γ#9xX,”κ›UΖ ‚…©VΛώ₯b‘Jέ9Κ9½X(<τX(S9α‘°PD@Ή’{<μLδ|―”œΓB"’σˆ…Υ ηŒW[eΏ Œ|~­Τγμ&nνΔ¦'έcQk #7‘s3Z›0ΒTr+#OU\λFŽΣ δ…έάάΣϋ•\žœ_·ΖFΠ1•{ΜA¦Θc,GΩ»}ξτg³Aΐηη³°i ηxTφzν=/‡œ?>fRςί 32γΨSΟ(—œ£†―–Ώ½χ3ΞΩ>‡άύ^DΖy€1$’σN”&‰’Ί‹L£f£rΞ΄¦.I)Δά‡Κ:q)–ϋΌϊΝ+|3—|’„’(έόƒλ’g§^k₯›ζφKf-Ύ‘Σ’σ‹—Ό’^‡¬Έυι³"9xΨX(<”r.ο ™¬W,Rζb!₯όΥX`ξqΓ8ƒ:z,όπ£Α‘œοž’σbXHtέ"’σ˜&fΦ^ r–Ό3R¨•%₯ΩϊΈηΜ4rNIΈEJΞe¨†’ŽZ=ζw$oόvsq‡¬sΤH΄j―Ι,ξu·LιΊ΅ο_kss|GEŸpς―μ³@Φ1££G’NtVr~ίθχ“GΖNˌߝܻEε<]ΓP±3bΙω!δό’ΰœΣς=ηŸΟί_.•σ··Jγύ4ΎΙy\mZ”’ ‘¨) Ε©š$Ά½’Qυ^βLy'ύηΥJFΓE)"ŠΗ˜YWΫνΙ ΄ dsκΒk’™ κ΄Ι¨Θω…˜Œ˜qMfάςŸHΞ#v Z_t^-Φ Ά UI€Ήμ Ϋ =RΎ.<Δ™½Q°°rώγ”œΓBb“HΞ#Άσ’·9_9Gσ£ΤNΧκiάοΞΫ;±yΧφΣάγγς%λMF¦₯λ <ΖBδ_Ncί$φœ7Ψϊ8ύχρΙƒΉh‡d”Q7K#ΐ1δγ?½ιŽ&γ†l N~\Y5Θ9=’ε&£8“ςωΝύε½)ρ”ω]sδ<Λ8‰Η΄‘ΈmδόάλŽOώ3εΊΜπxŸrΙω&il–1#žΛεwEίΦngLH;7bΦw.,dΜXθρPX(Ηρ «AΞ+…š{.,δ³ ©,ΆDΞΓ K™ΛE[ΕBb£ΝΧ.—œΧ=Ζά0ΙΝΏN£Ϊ ‚}З’­S\#Xk1Πf~m|β]Ιη=šμΣoxrθ€ηνΈΥΟQ―9ǁύέ”˜SΚ>%%ηάζη,ZΈ0Yόα”œbž’rzΞmΖω‹χ)ΏχQf Gω8εν(θΈΆσύήάηyzΑ9ΆDΞάΰϋM>W8F-baΫΘωΠΗ'·Ύ:)3οyzYδΌQ"p' η½–ZΉFrͺΫG?5€0 ˜DŒ$„ε…δ'`‚δ΄R«άd”€–ρA1ˆƒ¬sΒΞQeο„z2ιUηΘχRβIY«Ν+Oƒο“ΡςΙω9מ`#”²βΊΗΞHY{F2φ=DQ$η EΞ‰ !ιΊΝ6ώξΌεvΓΓ …΅†…Dˆ…ΒCa!d,ηb!$]νOΒBp1’σςΙω)9/†…DΉδΌπ0ζ†νOΞ‰cnimt›₯Έούΰσξg=lι|H:·O»υδκηή)Μ¨FεΤB9‡Œ/\΄ΘHΊ)θlΜ™™,ygd'ύΦΎζΨN¨€΅χtΘ9χ1ŠƒŒcΚF©;½ί<Y‡ σd’Ξ84ˆ:η@Μ ρ“ΎΈN$η"ηƒ_oΥYρ?‘œGrήH ©\ΓΓEY»ζΡϊΗHF‹‘s’Q‘UT’R:’8’Qε₯œ₯ωΌŠJ$¦~ώ/I:RΣIJ!κ<ΗmίsκΙ9‰ͺ’Qν"—NΞΟΊζ„ΒΏΛ0fδ|x[Ηe4“Œ^ͺΡωϋΧ06#’σ:ΐο―*2\xθ7/™ΕνΙ9XHu‘π0ΔBmVΦϊyθΒB~†π,Δ\,δ{‰{r.΅=’σΆ“σ]~²uQ,$6LΙΉοΏξŒxsΓΔa+מ2rο™1Gϋ’αγMύ†h‹œoωΧϋ“΅~{γ2δ|Γ?ή‘l•bεν<~Βν―&=οU χ§ήϋZrΝ οy&fΜ]PΦη]<ύύ\{ޝ₯’NωΌ mΤ=ηšqAgŽ8&kŒ.ƒtCΖ JΫ!ΰΉϊ‘ϋšQŽκά~νπ}’§ΆΫΑ\ΦEΠ!δsΜάN_nέ&δΌΧ—ΦMŽ?kΫψ4β__Ϋ baΘωυώK†Όό~fϊ§Σ"9δΌƒΦGχζΡ*'£R‹˜;KxrvŽ*οδ6eœ('Vί§¨’N‚O›|NΫ †xŸκv˜TVBMβ}EΤQ•%¦¨K **˜άί­,4MLIΜ͍ΐ₯“σ3ϋΙF*eEΏGΞiQ9on\F3ΙθeΙθA‘œΧΆBΠ=BΞ!δκC !λΊBP=ϊχr±0$ηώv₯±Pξ„FQ C<τXΘh6πv'ζ—GrήzrΎσOΆ)Š…ΔίkY9ot<ŒΉan”ΔΌΪδœEYϊY±`ΡƒΞx΅.Ώ’¬ώ‹~Ι·ΌΘJή)wΗ<ςqίΆΟ#FμQΟιE‡¬λ©qFΠߞ>Χ’­kώ } δܜΨΣΫ„’kγΙ9c θ(ησŸe³ΝEΞΉ-…2ŽJN;ΔbMœψ…΅|CΔΟϊςzΛ¨γ Ξ½θλΪΨ4Ξ͝ˆXΨzr~Ν3㒁/½Ÿ?δ<’σMH•”Vh©<”ΥϋŜrŽQKξ2…ƒœΚ@Ξ΄|Ÿ%Gυ+†ο₯’σbΙ£_•T”ŒξχΫ[ s›ΗΈOE 6Ι·ϊ.IDΉ\'"pλΙωιW)Ήο݁™qΩΓηΖ²φˆ‡M±°π,„œCΘΑ>°Pͺ9Gσύη`!&“ΰa%°°%œ«BΠEΘ9ς˜60ΑCυ σΨ¨ε{ #9o=9ίqοmŠb!±ώχbY{Μ “ωό`ΌEΕήσΩ;,ZZnΖ«AΜ!δσ΅wKς₯nGΩγσΝOΉΟΤσΏά=:Ήό™ Ιq·ΎbΚ9DϊίKn5ΩΘωΔΫVή93uNςΌyf‡ZŽ‚N6₯σ…χ\jδœΩαΜ§„ΰ9ϊΠy e#’χλVΪΨΤο¬rυRβμ―¬o±°υδόʎ΅Νœ¬8δ„^‘œGrށkΡέΉ€΄j:₯›$Z*U€Ώ’$εHκ‘ˆ:ž$­08οŽ *uQZιΝRRT"PŽΊ9G%Ry*I7•|g%λ>"—NΞO»κOΙ=ο ̌Kͺ9ί40@ ακ„ w‚…TΡg.RΞί>XΘν,,δ9υ‘‡X―$9―FH9WΉ»nC…‡RΜωžόߐ…‡1!m9/†…DΙyέΰaΔΒκ(η₯νΞ–lz=ΙzΈΝH8*Ί³Ζύ]ώωx˜γ¬ ™’τύΌΗΖZΏ0δbNLžΥz‚ΩΎάH-ΚγyzΞ +uŸ;+WφΑψdΙk[P gή9Κ9Wv‚ktΚݍħ9}θτ–‹ C²ΫJ;kοy9δόςΌi^Yqπ#9δΌΘy…£¬5πΝΑIΏΧ[Iη₯£sc±#)D$avœέyœ$ΥDI©Τtns>·Cηχζ$ΈRcƒZΫw)r.ňDTΚ‘7L’ĝοI2Ξw$IWB*Χϋΐ-“σSϋ˜ά>~PfόϋsΛuk? ‰i,Nc*А{ξ΄Ό+1£ƒzΤ2G< Θy;ΰaˆ…άξ9|H,ΣψΫη± EV₯¦‡χ[ƒ‡`M{c!%σڜTY;Š9Ψ§Rwm\RεD₯€pΏ3ca9δ|ϋΫΕBb½ΝΦ)‹œ7F,lJΞFTsQžN?9Κ8€γ7zΜΧύύ­ΙWΆ;Ύ 9_nλί';ώγ1λQ‡CΔ‡Žœh}Β”#γ΄ύθ[Σ’g1'ޚVz‰ϋ’‡ϊ[Ο:€ŸχDA‡œŸ>7™6gAœτ‘ϊΦ3ζώΙΛδJχSΞ{0† RΩηŒN£μyη n<ΧH;%οτž«Ό‚N?y[Ι9―XX:9ΏψΙ1ΆΑ“wj$η‘œwπͺB―eΈH>³Θ9}—(Ft-%€$_¨H(Fά‡ j°Κή•°Bξ5†ˆ€ΆTrŽrΣδœ ω”A½•$ ϊ<2‰£Χ’2w©b$δ$¦~$]ΰ–ΙωΙWφLn}{pf\xyQΞλ="Άή<.‡}! ……RΟΑ:©η ©¬αΘs u^­“s―š{,ͺχΫκ;‚…υΈ…εσνzt/Š…ΔΊe’σˆ… …”ΆW‘œΣ'Ž ’s αPΘ)mη6!rώ΅O2eΧόfθΛf G)ϋγ¦)ηψό»!;u‰R{Π!Φ/Ύ7Λxˆώ«“g›‚Ž’Ξϋσ#ΥΜ±=%θ*§dΣqΟεfŸ§dηv:₯ξJ:Š9#Υ&œό+3‡γHοω=«m–τ[a#‹rˆy$η­#η>ρ†™fΕώ‘œGr^ŠQœŠ|oΐgεΓyeθό‘ƒ“sG ± ”“Η4σΧ—·K‘c±H»ϊ5#j’ΞSIh)δ₯¦Δ-\“[»’Sz+u_¦q$€rb6£§ΎΓΜ rΞw%"—FΞOΊ’g2τ­Α™qΑ}‘œGΨ^)_ότM‹θgRN?ΊŒβ4ηœ‘kΜ2ΗδνδΫφ’vΜα\ά#–FΞΟφš™fΕ>8%’σHΞk$mƒZ4γΫ,Z"ηΚ% JFQˆ0A"(εD%W™"Ι¨J;u[c†H@IH1²N²J’ζ{Χ[Z$|>,5©d΅υ<sΝω₯―’ΠX5?rsHF1y’€₯Œ„”λB2ͺλΈyrώ§ΛzZΥFVœ{O$η›ΑΓ b‘Θωృ =žυR.ψ{F φX( δ¨Η…ΰ‘°δˆͺ,΅½T,dU ΓsύF₯FdΒfmVͺ΄]#4εΗ᱐gΗG<\–œo³gχ’XH¬½i$η ›’s9•·–œ/™0Β’9r~Ψ l4$—vΚΥ9B)iG!η9Lα<9W¬ΌΟyFζw»δ?VζIΗυήsiζžC¦!ζJxKkΑΒE¦ΆC)mW2ο iηύ(sG™§δ݈ωΌάϋBΜM1ΏγΒB;Κ9₯ν(ηZΜ>' θ~o‹c?Ώ–…ο!o A‡˜©½rήηΑΡφo%+z}r$η‘œΧ—^ΉxΙ}%‘szΩHFQ…€ž‹œ3rN@•xͺΔ]q$™$§κΉ”jN‰§WŽ8_Q0ŽΛΟ&P] Ζi’GΒ§q=­IJΫκ­$υ‹ŸO©»Œ‘HJΉΟg“rΞwT)ΧK䜍 c9γ…žz­E#’σγSr~ݘΑ™ΡηξHΞ#–…₯nT‚…ό»zr± _ͺΞίΏ6-u?ΔBn mόΪy9<ΜΒBž ±°Ϊm?*i=κsh’…°/<τXΘf†'ηεb‘π°ΙωΦ)9/†…D$η ‹ζ{Œ+±ΌΎλζΘ9κ6dΗυWόΧΤoβG?edΌkŠAsJΨ}Ο9ύζ+υ8{ƒ8”υΞ{ΤHϊ—=m£ΥpnW0?εbNp%³7œΨ9?N ‚Σkιζ9=€Σ9JΫ!ζ(ςώGΖNKžz{F‘·ΧSΞN9¦pτ›CΜQΠe‡aĝΡjoόv#θΈΆo΅]“±hmUΡ+i'S»F$ηgή?*9{Ψ›™±ηοώΙy$η5¨UΠ΅˜„΄ŸυU’ SΦNBJΏ9σ»•”B4•˜J)‚€+ΩTι:·y E™ΔS3~Eb ξc$DB'BŒ"Cο"Ι¨dΑj–wςώ*έτ‹ϋ|F’ͺŽJΔ磌SŽΕr²g‘˜zrΞυδϊ’ό_ρΪ`»ξDΈ wMd³m!"ηΓ?ΈαΘω.ιiύ½YΡϋHΞ#Άς7'"δ[}΄Yι Ί°Πu°P› yLX^€!BΘι-g ΐ-a‘ΚΝ«νΨβ!Ÿϋv?οœΟ/< Ω¨ Ϋ=rέΐC/ΧΊ%,€ͺΑγa­ba9δΌΫέ‹b!Ρ%’σˆ…-(ιΜϊΔB=‡@SΪQ‡˜3Ϝ>σUϊ―d…έ{[ω:IUσ/w?6YeΏ š<ΖΈ5‚η ςΌV£Κ£ˆR&ZO9=GΤ{Ζ°AΆ-ό¬L]₯πτ•£’σŸΡ«WZJΖ΅ β2…b±‰‘rΉάSζIEΧ’’s~$ͺz §^’ΤΗ']_ ηζφ+όΜ™ ²˜³ψ¦dήΗ·& >ΉΓ’ΘωQχ΄k§άΙyΔΓ±°$,6ρϊL,Eeš)%]žšwNhlšLα„…r…`&γΤΫ-Ε’†X&‡ΥnσQOΉWΞ…‡|py‘l‡δœ₯D]σΉ.(G\7|~υύK]ηz£ΰ‘¬’θΡ‚πδδ던—BΞ?Zr―E-“σί€δ\ί;Œ“o‹δ<βa+Uτ2±r†X¨±j =ϊφ°’ͺ°@}θ`x(,„¨‚+ΰx豐#!,TΕη‚I<^Ν9ηšLαΙ9xνρPΛX«άϋWξΩdl•ž·Frρ° xXBψΐC… )Ί ω» !˜Mοκi`!― a!ΓQxφx,Κή—οB΄A/‡œ3τ«‚ΘŠνοΙIWρώ.ŽnΕϋΟξΟΚ8ηΰ4ϊ»ϋ‡§q©#ηο€ρjΧͺ,žηΣ8Μ½ζή'’σN»UκΗZυW€†$G½}κΉ$αT95·Υ{I‚D’‰RD)Χθ!Νπ•I’\ŒIΐHDΥgi½‰)8Λ™X½η$œrE—αJ(₯ΰˆΘ+9ε±J)E:’”zrŽbE2ͺRTξ«g^}—:‡ΗUΪ©^|έ†˜$¦<¦yρ2Jυ$Ήηz“œJ±γwΐοε1qώ•rN2:eΑUvδώ܏o6’Ž;5Ιh-“σΓ.μY(γ„[ώΙyΔΓ2±π±Va!½Μτ !„ͺ.rDνοτ·«ιΰ£7Œ !‘ΎΌ] Ί'ηΰ‰ΗBp<τX¨ML0σXXRw…ΒC‚Οn¦uωžy>·πPXAχxθ'xpψΏΗΈ6lξr½<‚…ΎΕŠkvα‘°pς‚+-„‡ν‰…εσΝRr^ ‰56‰δ«δ}ωO,ΈΑsύθ^σla; >₯φ”΄³1€’ΞFs8—ΟŒαδwœάο̘gD—λ„cϋδs5΅™²rH-κσΛϋοnϊ±-»·HΞO_nέδ‚―n`*;}κnbe£\uœMΝ¦}ξσoθcjΈ~gVj?ρukYXbQ«δόΠυ,Μ‘γΨ›"9xX&ΆQ`ΑΓ …‡ό]BΒ……"μ`!“ΒBΞΥk ͺε‡Ώ}…`ŠΪ|„mΒB‘s°H*Ί―*?EζΑΒJœτfsΒBήΜ&L1?oX”kΣTx(usΐCΎkˆ…ΒC…2=Κ€<τX8vφU<τXYΩΐΫ Λ!η]wλ^ ‰Υ#9XΨΚU(}Ξ=Κ£‰RŠΉΖ’AΊ!η(γά†όͺο‚No7d‚Œj!FΝ†4CΖ)aGAηΡFE‡΄kF:₯οτš—ŸΞ7:=ο¨ν}: 9œŸ‹šΞgAα§ ŸsιCgΣ€ο‘9λ|Ύ£–{γ)#Ε”“£DCl!»”΅Σ{Ξ(5JΟ!Χ”£gΝ;²Κ&ΙΓw³sώ»ΛΞ¦„CŠ1lS©PΣΙ¨ΘωΟΩ³0>ŒcnŒδ<βaΫ±pΙ‡‹β!Υ%YXΘί’ΚΫ……š˜‚…E‡ςχΘγrrηοX3½e’)e,δ¨Ύk…ςεPi»ο=! Տ¨„‹;?Cx¨MHU:±΄ΉͺMJ‘s>»°PnσΒC…! ՟―…ΒC…=BΞ…‡`!μίC;¬Ά’σMRr^ ‰Υ"9XΨυ<%fφό]r…(βψ=wΐ…€0, ΣPΝQΘιη†|CΜQΚElΉ 9GQ§˜rs9ΔΒ QηŸΣWNy;d²NΙ:D;œ•ξƒςvJε9²ΪN  CΔ 6 β”·σžx>“6 PΤ΅Ιΐw _½P]’s†p\H.䒍YδGυ,bŽσ:%κœρ…”σzήGΔί~Ζψ—r€ϋύΡ9όƒρ¦ˆCΈΉζozβ!γšνωYσ$Σζ,°#-œOΟ<ΏO‚ΩμόL6Ψ€˜£Θσ%ϊT °CΤk EΞ»ώ™‚@[Z69οΒ]qΞΣŸ7v“!ά¦"τiܘΏ½i`7>ΒΕ΅ΜZΊτQKDιAViŸTV’Τϊχ(넨£qŸδ‡sP!P$HHIPIšΤH<Θτˆ‹€‹ϋv%c„J9‡ςEV6•‰“\ͺ΄]εν(ιY '“°r,7ρWΩ(AςI²I(ΑTH±RπΩUžκηΊ«―΄AWy;j₯―Ύ€SI©WΞω=τTς{“[1I)„ÐφZεσƒ.θΩd\Ÿ£nˆδ<βa…€₯4Αœ !vό]ea!Α9„°R¨ΝKωC€…rnηψ;ͺ½E­-RώώU΍²l}Ϋ)žh‚.υœ#xnH +‡"ωό|)ς`›HΈ©ζ5]# UMδύFΐBωodmV UM€–αaˆ…,πΠc!_y•f G/wJ’ΩΔЈ2•‡£8Σ7NI;BNxrNPϊNΩ8δ˜χ02ΈΆ”ˆ›:O―xz›rtˆ5­\ίwΣk‹;>›!·ΗNύΜ1γ;ξcΚGpΞψΌήμω ­?`‘’£˜σ{ΤrΞγŠ:Θ ‡¬Σζq[ώόψrΙωΚi<š₯Ζq₯όγ«§qΏ;oο4Ζζ]ΧOsJcTΎηόŸ–?uΎG5ρ°}†©ξsG«±ΏK—.=+HΠι³#IΑ[Qυ+“h’Μ@Β₯Ι—ηqΎ₯w€‡D”„Ue›~V7j’zΜΥWΙ‘$‹Δ‹ϋf~”*IΟ‘’ΐZ?wž Λ bξΥσ0!…°W"e3@}δ>ιDΙR°H* •εϋ‘oRΝegsΪσο%£8 y’ξΙ9 : ©F­)εzkζ―OHω½@&4Zmα'wZ"Ϊ^½•• η€δœοšΏyΙyΔΓκb!Υ%³ί° ςwδΥsa‘οiφXqχNβΒBT_πP£Γ8Ύ’ωη!κya!xδΝα²ͺ‰ό†₯°PmAεbaˆ‡! ω<¦ (_A$r.<τX(‚2waaHΞ……žœga!Λc!Ώ+‡υ@Ξ7JΙy1,$ΎέIΙyΔΒκ,J‘QW)†Lr²nΔ]δœrjJ¨Qu.L¦€DBˆrKŒ™:ΗzΝ)#§ΧœΫτ}Λά ’œ₯œΛψ N@’QΡQΊ1œƒμSŽ.7χζβ뻞\Έύ΅O²Ÿ§>tΤy‘vΚζUήN/:› ¨θl8@’)GΕ†€γˆNΙ9D›€tΈͺ­¦%rΞΌrˆC%ušuKFbF"¨YΏRl€€{UˆDT!%©r.££p)‘$Βeξμωžy©D>%T οΛΫC‚ξαd ₯’NΓq}ιy₯ΜVꋍtލ‚±Κ!ηϋύ£ga_‡ŠΚyΔΓκ,‘ta!BΜ³°Pxb!ηs£šΛ  ……κ©Φ$…Βπ2 &hž9˜6ω2wίξρΠ«λmΕBΝWo Zί|ή.ΔB‰Σ΄ …‘‚Ξχζ:y<BN…‡ΒB*­ λ !ηό°{Q,$Ύ•σˆ…UX^Y•Sω‚›Ο7bIΉ7D’²k+ΏώpJ.fO·Rk)Ύ(ι(Π]Τh”oˆ5’]J/9 8D9Θ=Ğής–^G< :·™³Ξ{ šCτ!ε|γ6%π*}G…€Σ§Ξg‡μ²ω@ω8%εiͺ ²γψpAΠqiΗPΞ*ξΊΨzΝQ΅!όVžΧpnώϊ‘‚CΒιί‡”Ώ2iΆ‘q*ΤΫΟυεˆ9ΑkψΌ¨η(ο¨ύ„εμΎΌ¨³άpψ>—?iUYΡυΰγ"9δΌq–z!ι¨  'I GΚQΧ9ς˜z3!$±2£,Ε‚q6sΚQΐIFI"(ί” €žJυ›λΆζύς˜’05­ογ–S»T!$))ε˜UζٚPιz©KJf{·v©D"η$Ωζj¨η ΎΏ £|BJ’N2κKΪUZ«ΕΖ • (~όވz#η=Ξλi›3Yρ‹‘œG<¬.BΠ= ϋ8ςχ₯ϋ‹–άmηfa!!,DE …‡))bMΛ EXυχΟc2ŸΤ5Tt…jͺ ¨rήZ,Τ&ƒzζC,τxθ±0k³R=ψΰ‘ΖΟ Ωμfa!KXΘοθ¨ΥVrΎήΊΕBbՍ"9XX…ΊJ@θd&†‚Nο΅υKS’­²lFx₯Δ#2”uΘ, .εΥuzΉ1cƒ\γ¬ ¦ΤΌ‚‰¦D²½ε_ο7‡χζ^ƒZNι<䞠dž>vJ晏yG₯GA‡τSFΟ{CΰωŒτΛ£ ΣW†Cšωnτε·fIAΗ@ŽΎn%ΥltΠλoeξοŒ4•žΚ~Υl@ΐQΗ9*pϋ¨Ι…uu1q–Κ9―£άεœί_RΚ!ηD=a‘ΘωοqΫδɊ86’σHΞieΒ(‡Δ“„†δ“d“²Ο°Pj‰,κŠ‘άŒILIF1μ!‘"9•[±R•ojΞ-ΙΙ©J9—Η€)!UY§t%€> ηm%θ*Ko-9'yφ ¨’Π°¬„TŠ‘Ί’ZΎΏJ95rIύ§$£΄p}ΉΦRŠPοB‚^oJ‘Θω^ηφ,™0~6 ’σˆ‡ƒ‡YXθρPXH@άC,d“ ˜ΗAΫ txθ±Π+θ|O_YϊJ"›k Λ!ηλμΪ½(«lΙyΔΒφYH#’8ŒS†sJ±5Ϊ+$η~Qv ‘€\œ±W°QŠŒ!›'ίΕάΨ=α†˜*[oξupt”t~±)Y‡œ«ΏrŽ‚NΩ<Ÿ #:άζQΟεβŽKϊβα­Ϋδσ½ηŒTΓαύΥ_τ0ƒ9Θ>ΧrN?ει”°CΎqηΊΡ»―™ν(ϊ$JΫ!η*kη3Z€Δ<$ηužΑΏ†YKDV¬·ί"9δΌs$€2KbμL©†b(F”{‰b„ς-ΣυΙAΠIPA4Vσ€ž“‘„qGKςςcz”ϊθa2*c€ζ”’bΟqΏ΅eœY$]U$œJPEΨΓd”s»-§vΓ‘Π£‘θcN%χhίsŽ‘1€Ht”bT9ίνο'ΩΏ¬8 ‘œG]l‘>3zνΈ[_1UšRdΟ5~Ωί”ν–τUφ»ΐˆ6ύησRJβ!θτœ˜ΒAΞΉΝ{AΚΧδυ£1>/dxθΘ‰Ή±fi΄eyχ³Ώ²Ύ/£Mΐ+#ΤfεΘ?%τl@ΜΉ^rΝj?{Ψ›v’.‚‘η5(ησΙaή6QΠΒά/+κ‰œο7gΰ—λξ{L$η‘œΗΥl>›’z’!F‘p’„’ž£tp›DΤ'¦šυ«‘j$f(%œΗ9$ͺ$ršξR―žάט!OΠ³’Q―(ι6―γηmYJ(}rJ²ι‰9‰¨Ž2Tςͺ9―•s™ ‘Βq]EΞ5ΧΧ'£υNΞpƟmC"+φνΧ·,ržŸe9&?ξβŽ4VpΟυJc\~άŞ1!«’Xˆb‚…”·CΦΥ;,'wžηο^X¨“QΞ …'ΒBΘ3•Dw&Lͺ=TΞεμΞύΠ³CχΛΑΒ……`ŸπΠc‘*ˆδV_L9’š{< ±°ΘωZ;o[ ‰•7X·,rήx±°~ /†pMTiOTksKDΕ‚Mi:εη₯σεΆώ}ΑςΝ{ ’Ό‚ηy²ρ‡€Σ‹AGA§’,σ»rΘΉενšwNΟ>Ĝ²y”pˆ9ΔΞυΒω^Α΅#€€«/rxk3˜2ΞͺΨ<ρ«ΘωNΘͯϊu~ςϋHΞ#9«Ε‰d³QRtΊ’Q’-ΝώE‘1·•„‘”‘Τ‘Ά(zυά'€„œά}κUς0•Κ$ηχb.ν­MF½;=I¦’P―ޝXΕΌ–λAπέ₯¬‘άSΎ‰B$σ#’RBK.ΣUe W9ί΅χ_ ­aμsΕ?Λ%η{€ρΕόνσ‰όνiΌ’Ζri¬“ŸIω…˜ΖU),δο< QΟ……t‘s°PΥDΒCa!ηΰxH©ΉF•i³RΣ,<.†ΚΉHΊχηπ*»ΖU‚£ša^.ϊ^zHΈΗCίoΊ΅λ°P=ηΒBͺˆB< αΐAΝ οH“ΜΆ’σ.;mW ‰•Κ'ηu‡ λkQ’ϊK L—j ‡‘Λz)―Q)<܏Wƒ°σ~<.³9Ž!e‘L+ U_%‘²O©Dy½}%‰=γ‚(γ€Ο’kŠjD()elΧ΅Υ¨ΙωN=Ω~χY±Χ%V¬¬=]€1Δ©D½άs₯±}$ηqUjQ AΔ$,d³γ!XΘί½Κ·εΛ‘Η„…κGGa6:ΕB™Γ…ž€{<σ|•‘σX¨Χqδ½ΚΕBt…Ϊ\±ΠΟ7χXΘ |oπPŽχ8ΰƒ…ΒΓ,,Τ?ΒBˆz½‘σ5wΨ(+_9o<ŒXX U˜rqTi”jΚΚ[*O‡LKaηvxͺ:Jy©€=μgΗ`NDRwΔa@ω£οœ(gyrN0Vqt2Ξ{bάτB ;qκ½―™jΟ΅ΒαžλΕΨ7ϊυιΫ‡€cΗΈ5ΝAη3‹ρkrΎp‘•ΟSFρΤ ¦¬‡κz­“σνϊ<`-Y±VHΞ#9«δ…bDΒDω!ύ8·“Œ’q$α$ιB‚ˆŠ”“Θ)!εy%£$qJH)λτ ©ŽYΙ©ORύŒt%€ΌΕΌδœε— >·ϊ)ΓRφœsΎ/α$iWυΑ=ο 4Ε’’N)F£fφ7rŽ!$€Δ΄Ιω޽N)T„±ηΕζ€αΌΏ‹£Ϋψ³ξIγ°όνKu;š4ŽδΤώrאU6InωVW Μΰ‘φώΜyVš©¦t…bŽJŽ»=Κ=sΨ!ζτWs€gΒΞy¨η(ξ”Ί£¦£ΊCΜ Tuˆ:δœk¨κζ:?eœE=‘σξgέo†}YΡe―ίErΙy\₯.f“8‘l rh:Ι(I©οA'ρR‰»JΊUξ©ςp©E”ΈSΦIIY'eξ$¦$”JLΓδ”#&€΅‰s΅έώυο•σt KctFμοΞ9-ίcωωόύΛ2’Ρƒ"9«’‹~η ΩtΣΈIΝοQW™;$]­@j*σΎΓ σΔΑ?υ‘ƒijΣ!΄qιC›˜ ΑMα!Š|ΉXΘςXθ« ²°PŠΉ°Pp-Έ6ΒΒΫΗ2 δr||υ/MΏ¦€‡ !ηυ„‡σ5ΆΫΎ(+¬»^RΒϋ44F,¬Ο…βKΙ8eν”ͺ·UρF ΐ£žCΠω|¨ηš§].AΏqΥM’{VΫΜΖͺΡkQΖ̍qi”³CΘq΄' ζ¨δό\ΥŽΘ™ΥAF鳆€CάωLz^ƒΚN`GΎΘ9ύώl0@±'κ‰œoύ·{νw‘kξ~T$η‘œΗUκbώ9eν#fδ&ˆ:J‡OH!η¨!"β~|˜\Œ₯@C`5X}—2Š“r&¦Rt[$έηCπEΜε°N\.Iη3‡ΙhΨW©σd~€kΰηωBΘIθ9R…0xlΞ© ΅ˆkLBŠ[1ξν$€”rvΔ*‡œoϋηS›TψψQί‹Κ.kOΧi<“ΖςISσ£XΦWΥρPXI …‡΄@Κι§Φ¦%xθΚUκ1Σt a‘9 S-²πΫώΎίΈδ6ρ^ba9x(ŒSΗΠsΓ›ΐωωζjσα !ηΒC« QΠ!θ\η λ !η«o»}Q,$J!獎‡ λsQΪΉd|Ω†Ό£IOx)δΉΪ :£Χ θ¨η|F ΤsΜ׈r£Τθ§ΟœrvFΆA¨!δ”―CΘQΙ!ε”ΥΣΎε_ο·ΰ6jΎH:₯ο:WξεΌDS9H:π¨σό,6ˆ:Λ ‡w;ύ›CŸߍδ<’σΈZŸ’VPZHβDRzݘœβ‘D5„€ˆzG^Ž"ηJΨd’D"§ήEF ‘Ι I‰©’Π0Iυκ:ΑsΌŽΧσ>ώΆs?H ©JPUΎ©P2~/RΏ9ABJω+„œ ₯,– 9₯΄“λ*c$RL˜Ν\oδ|«žmb¬ηc—σώ_Ή†p{₯ρz«o †pqUc…”Z ΩdΑB)ΓPξσ·b!J²'h`~ΊŒ3³πΫ2ΤT΅‘ΗDnk£,΄Ωδm$ηαH4ξCΘ=†f˜ž˜‡σΝ=’–ƒΰ!׏λxί»M=Ο”λ ξ λ !ηίιΎCQ,$ΎΉNyδΌπ0baύ.FͺA(»ώ€•’WJυΔϋ Ξc‡9ύη8ΈC)o 9ŸΥ―—r‚…½ζτŠCž)G§œ Θ8*9q~6 Ύϊΰ1«γ6A‡Θ³ΐy”ασ9y ’OPώΞ{σ3(g:Qoδ|ΛSο²+Y±ΖnΏδ<’σΈZ³–,}ΨΚ9η|›)$₯$R$U¨η¨ζ )‰ *I!c$RŸΈ©¬O ŸΖ Aͺ₯))υΙ¨R%ͺ %£”qςz…ά„₯ )€€ϋE²ι“Q>K=’ž”+²’Q™γ‘š£–iσBĜM n“˜rDA’Šj‘/γ¬7rήνO-”·†±ΣΉ—”KΞ τ~#σqe΄΄σνόθ ΅ ΐλw… >Ή£€…όύ‚‡`!„œ Υ“6‚…rszτx(_•ΊƒiΒШ{<ΤcRΝyMˆ…Yxφ†x(uδεχΝŽΝ ΚΦ₯’C΄!ΰ”ΧϋyμάΖgym¨Gžώ}cΣCͺ;JΊJέωά|N’ήΘωζ'ίU¨cυύ¦,rž•x$·ςΗ›ΩΠ|3§ΊΗor8ϊΗόγk§±( c#9«ƒΧcΦ{Ž[±Κ:)CτΖH"ζr0¦'$Œ„ ň€Τχ-ŠΘRςΘ‘DΟtWΪ©€Τ'§J<}‚κUs%’$΅DK䜟ηg–k<š>«–Κρ}2JψΝs”"©ζ Ή΅“ˆRKrŠZδ]ΫQΞ•ˆg‰K1Qoδ|‹γOo²αcϋΏ_Z1·φzŽˆ‡υ»ΐB6,-Ή»Pήb!ψ'Gw Α°P$]γ=:mV QΡUU$\ 7.…‡Eβ₯œ+xŸ–ΘΉΗBLjIιz8ΏQι{ΝUΦO ΉfΪ¬……τσ ΑBTσŽΔΒrΘω·Άή±(__«ύŠHΞ#6Rz †XΘί=Ψ',Τ&&jΊό94^M=ΩΒC…R¨εΟΡa1PαIΈπPΨnTz,Y—#;‘Š'B•PΪ€τ› ~ΆΉΗC°ΫΒBRσ†pΒC0,δ:ƒ…υ„‡σURr^ ‰―uΩ ’σˆ…u½(΅&(ι¦Όρj¨θ‘ͺΣKMΏ5GCξΘQkl T³q ²ΞΖύίlΎΑχ‘ίΓ6‚Ήδ|OΎΟ£Ί£Έ9Ρ^Η{πέؘ`B*ys}τœηKψ9²Ξλx=Ÿ>vœΰΩ8€˜ΧkYϋΖΗίf›"Yρ­δ€«Ϊ:V2]³ƒϋ³2Ξ98ώξώጝ ΞΩΕΗ<9_Ζˆ4žLcηHΞγͺ΅πNSΠIJQŽHžH€P;dκ#w0OξΛΙXζHRΦEUφNbκΛ=e<Τ$!%$‘t‰©ΘzMHxώ5…D4ŸŒ† ¨Bσ{ύμ^%RT’*RŽZΝoG.Σ<ŸrΝHζQέPŠ€‘„ͺΏŸΎVΊz#ηuFa&|[žΪ/’σˆ‡υ…K.`!Κ.xb‘ΖM†X¨–a‘&^@`9 Α_B2s‘gκ=.f’pa ΓA³°P½η~l€6$U1δΛΨ=1Ϋ Mτπ¦yΒB6- η`!ύζ^5±°žπrΎς–;ΕBβ«kFr±°Ύ„‘€΄’Cf!sH%„—j0€‚‰m­κ]I‚Ξ{B~!ιtΚέQΊ)EG₯–£:ΚΊ\Υιύζ;Q!ΐcl8π½ΈO9;χQΉQΛ!ώτΝ—22.«:€ΧAΨi Š€Œμ˜‡ΞfΐΤΩσ-ꍜopμ-#Ό0Vέρˆ•σζΖJ–HΞΙ η—η\‘ΖIξ>¦š+ηoo•WΥΏΙy\΅³„tιGM½ ο\κΉtΔ‘|Κ•—Η€¦‹œ+1U’ζ‹ά’δI-’zξƐ¨{β&’Ύ·RͺP%H“&5#kTšT!‘ρP!βs“Œς]”ŒŠœΛ½™k#eΝ“sά‰ΉžέWY)rΎΡoΟlςϋς±Ε)‘œG³Θ9ŸχՌφ:Ο ‡―‡››\S«μπ«/kOΧӘšΖw›ω9O€±u$ηqΥTBŠZ„₯œ(Ύ˜φp€ίCυΚ Ξ—΅‹œk̐R_ΦΙm9»k°H³’Ζ0AYWΒΩ’*.e ½qYV_ΉpΠ‡πP€\X¨Ά…\#…Tb‘šS…Πx9_qσ]Šb!±όw7Œδ_κv”E%Θ9›Dδ†ΓΧ=ϊFλ£ΟŠ•Ά;Ό\rή70„» γœ/ζΗJγ α6Mš:Ή?ΌfU‘ΜΙMΒ>’σΈjg}ς %€”·c\†Z„ΒAB%υ\ͺ°J:eϊ£$Τ«E*ητενΙɟ#?ΖL‰£’TOΤ•pz2.%¨˜2ξέΧΥίͺDκ-“Πζˆ9ίΝ“s™‘b§„”2N’zΖ2@ΞΧϋΥί›*y.ΊφμΙyΔΓ†ΒBLΚΐΓ UΆ-/•ΌƒΒC?]xb‘*uΐ#…>„‡ΒΫΝa‘ί”ώ 9Κ S½εRΙ=!χΥC!1η»ρ=Ή\< © ’jήxH"·Βχv)Š…ΔςkDr±°ώ³·1Ecœ}Ω”·S™ί„ζxCΞQ½²Œ:^‰Ρiε„ZεδlpAGΑF!η>κΈ'αl*πΊJ}U @ΞεΨήδ|£†ͺΒXiΫ²ΙωΚi<š₯φ¨tΊVOγ~wήήiŒΝ»ΆŸΌΗυiJΉ.«lS ‰7ߏ$œΔ“οŽα (Gξ«ίœ2Nˆ9ρρ§$Ÿ.}Δ’nΙω!½›Μžχ±εΡqΞyΔΓΖΒB;¦xψφœ¦X¨MKa!ο`‘6κΐBŽΒBα‘ί¬ EEΤ……!†›—~Σc‘άΦ…Ύ…G€ά;Ζ{ ,FΖύΖ€Ή7ΓΤΖ„ΗBa pQXΘh:ˆ9#>QΟ伝•œG,lœ5vκ\#θΔ#c§ΩμoH:sΉ1‰;bΘKΦ{Ž)³ΐ1ˆΓygwBΞξ(λ¨ιrF§„<«\œΫνEΠΓ@Υ‡¬Υ6―Sy½ ΣΈvτςυJΞWωV-‘ίΨςΰHΞ#9«Υ ιδΛrI)1ρβ$Yp»Νž•}Σ*λD="9ε1‚$L£rDΠεξ.rl‘‚RqtE˜œJARxφ‡%μ~NΉWΗ}R*R.³7…ΚςU–Κw@!’BnLpT• HH™oΞυlrΎι§'»žr_flύ›Λ"9xΨXXΘJρ,ε°Px(„Έσχ―–α‘°P »oΡtα !‚ξqΚ㑈Ί/qP$Ύ%,Μ"η‘ρ₯WΙ΅A)CP©ε”χkt€ˆ9ί›ΫΪΐ q½‡œ 띜―²ΡŽE±ψΪw֏δρδ°A/XΤ+9φZEDV|ύ{ϋGrΙy\e'§S0΅Hc„H¨θΉ”j„š.EδK‰' D•€TύικGWRκ#%‚κEW₯OL}Ή§/{Wβ©δ3μ₯τ£€|²«²M%₯^ΕW2JπY₯©oΤ“s’O_ΞΞ}QJ ;׍d”δεpοςξ7-ΓπΎ”]Έη7(³f—{bΞη”b.τ}φT ¨bˆk S8sΧ ε|Ϊ’k k Λ!ηίΪ`‡’XH|ύ[‘œG,l¬υό»ΤsΘ9sΟ!η˜ΓAΞιA‡„CΪυά_ξm·!θκM‡Œ’otVΦ YGE¦χ[Κ5δΌ£f£WΚ™½₯žwΘ9=ωτνsˆz%η«μ{Ύω dΕW7έ'’σHΞγͺHBš&SŒbδ ŠA ρ$!%ΡβHBͺρB‰ ΛM.ΖJJύό_ŸjЈ³’Rߟ$U$έχeϊQh>Dπ•ˆϊžχ,Υ܏R"*ƒ'‘sυW’|«”]ί] ;׊λ6cΡ€Ί/γ9ώή½’½ώxgfμxθΕ‘œGs%’2t"4Üο&₯H›IFQ‰Έf\'”6ΜυHξ‰zMFEΞ»υ85ιqμ™±ΣΟ/Šδ<βaC,,†X(s8a!Κ1Έ ?rͺH/x#,τUEΒCΏ1)< χmAaιΊΏ/B.,δy?‘"tcΧΘHa U{a ΗBΒc!σ?Ύ­ξρrώυΆ/Š…D$η q˜8«0Z±jJΤrΚΩ­ιF)W9;Dσ8Τvϊ>ύ†ΞƒœcG©;] :%ξWˆ¬LΪ²ΤμŽ2«9ηϋ1f¬ΚΩ+AΞWάγΜΒxΈ0Ύ²Ρž‘œGrWΩĜžΛ”`RrˆΛψδWΜ||”z£8ˆΈ”e©(R‘Hβ4']&qκ»$DE˜½ƒ±’Q%‘²žεΌξΛΦU ͺ„SI/λ1… ΌΩ‘ζχͺ·•HfGήŠδά'€$£”ΒΦJ9{%Θω6»Ÿ’μϋ»Ϋ2cΧƒώΙyΔΓ†ΔΓ !γ` χC,ΐBΝ>'Tφ.GwΏY ΦΘtRX(ςb‘*‰Βώτ}™Ί'ψΒB)γlDκy™uj»7Α1=ΚS*ΉοΓκPϋδΣλ!η«­³}Q,$ΎΉJ$η k͘» =yŽ‘σ‘#'ZiϋEOΏmΚ8$γ7ˆ·ϊΝ)kηyυ¨c"Y‡Μ£²CΜ7?%7}Ν#ΤsZ9W6£ΞPΡErQΧ9―ReθΥ.gW%ΗΥΡΟD½“σoξφΧ&3γ}|yƒGrΙy\!ηSϋYί9Δ₯ƒ>A’OR‚„T%ά$d$h$₯$p$n$h¨H―Ζγ^1"ρ“‚ξΗ ))ΥQ ©Κ,}R&‘JdEΐ½3Όq©DJ|½»/έτ3ά₯ρύΨpΠx υαϋ1Kκ5μ½ ΨdUyν߈³Μ “ΚŒ S‹ŠβΜ$ Ζ¨1FΝMβζ&&1fP£«ώcŒC&CLTԘ˜DMΠ\gΕΔ됨QEih¦覙ιsχoΧ^§ΧykΧχUw}έMwοzžΥηΤ©S§NΥW΅ϊ]g½Β§(;DwlξβόΔ?θΞ|ρ9U<ωYήΔyγΓ-Sœ.TS8ηBmƒUw-.Tν΅jΟα]¬ŒΩDπ”.FzVΊΉ«O†s‘gEτžβU ι\θM0knΉ‹rΈP|ΘxOε<βB@ΪΏΩiŠ™§Ulζ|˜Εω~Θ…`ΗέhβΌqα'Ξ/ΉveNmgφω9\™ΔαŠοΟό ‹q„7BqώͺsΏŸ…9Ξ9#ΠΉ³NwχGΏαsΉώηœΖpˆsj”w|Ϊkztv–=Δ» ^„<Ο[clHΞ{₝z{κτωLΐζ.ΞwxΚ+σί­†ϋτΤ&Ξ›8o·η7| λnόη»γSΉ6ši Q₯r*“%A™:υj€:K΄Λ=WΝ’R:Uwι΅θ―‡τΤO₯gΖΰ3>_Α­q -΅MίŒͺ.Τ]" FO)§ΖP―€š|ΥW•ψžδΝ*Ξϋ”ίλ~φ|ΈŠ§ωΆ&Ξn™β8Ίμ1UύΈΤ t₯«±‘Ci›κ°¬ €ΊJ՘rŸχΟΕ ₯³S£Jμ=)…sVq~Β ―θžχσ\Ε)§Ύe&qžn”pAΒw>—°·=φκ„K~”pJ HΫmSr‘ά#ςϋF.Δ=w.:Ί›k£Πž$Π½Λϊ|\(>” —(ΧΕHe ω…]LPSL]œ"ΞΥsƒΟ„ΟG\HΦΑ–ΐ‡ˆσ‡>μΡΉμΌσώ3‰σ-n™7άs:σΟΏ~ωŠœβŽ@Gˆ#Π•ͺ-aώω‹―νώύλr9jΥςt\vάs8b•Ϊsά5j Ž@GΈγŽΛQǍυΊt9Ίf|Zw,–Έύœ―€± \œ[‚8W=} χήηρ3‰σtΫ%ασ —”εΞφ{_Β΅ ί›φω“K·ΫΒ€—½₯[sύίε@4;F€v¦ Š ”δ]ͺ·Δ!‘KN€vaIsgΙvΥ!ϊΈ!„κΡ=(•{€ΖqžκιχΊ0ŸTC.QξΑ¨;CJΧ”+€Y½κΞ€%Ζυ^5ο˜ΐ—aΞE še§h ηOxόοtΟΉͺβι§ΌyVqΎƒ­VΒ;Λϊa η'ά/a„% ΫΆ€΄έ6fžψ0r!ζΰC2hœ π!۝ ·\°„[Δ…pR¬IG +ε\½:œ %Ζ=[ΘyP\θΩBΞ…rΙύΒ€7|Σωi]\ΘΕηCq‘²«ΰCΈל”φœΦΎ…ˆσ‡=τΡΉμ2»8ίμω°qα–y»β§7gaw:"Ρ Τ,qώ‰‹Ξξ:xλΏ_’g:;£Χͺ€Ά3VμπίύDwΔοŸ›—ΈηΌτœ\“ξ γε±9άB5u›υωœβ›4}.(Έ@¬σ>xΟΤηƒ-Aœk<\ χ~Ψρ³Šσ·&Όͺ¬Ώ*α-φ{bΒ±q^}ώΖζFΐνΆpAι₯oξƒ5?ω‹{7‘ Α.;νΧ-§l|ΨΈpΛ½)½ύ‚«nΜKΔ7ω·~rCί4C #Τ½.qΞΊF«=οΏ‘Ε89 ›:tΦ欄::’(ε}!SΫΥ^Υ'5‹“#¬”uΦqΛqΗ•’ΤΰNŽ9sqaώ¨ΧΆOkηsΫάΕω}Žώ₯όωΤ°mβΚΕ9φ^e}/ξΟ±ο~q^}ώΖζFΐνΆ°Aι?Λik.Ϋ(½³RΪ"˜R;•ι.Ί‚6–ά'¨SP*χ\υη LΥΥ]u—^“ξnΊ\!AέbMΉCŽΉ‚Q₯ΛR@ͺ T³‹5Σό| ΊyŸjΕϋ§Ά’ΖG8E£ΉS;hζ[€8φ#8ΉϋΕgώCΗρBv:‡γ^²Ž―ρΗ Λ Χ„—mπBΫη½ ΟiβΌέ6"ΞŠ 5V „Έ± ?ΐ‡r_sΑ u‘PNv,ω‘γΉθΎ7vͺ·F„ΈΠΣΦαCΉδJ½ŠΏ5BS!τ>U‡/>p!β|Υ9Y˜oζβό;οπ°ξgΌ§Κ…Ο9ω/»έv:€_²5σaγΒ-ϋΆ4 τΛ[™tD95θί_~S›Έδ€±“Ύ.GQN <#ζιτΞ>Œ#ъ˜EΨ*₯QŽP—8GδjDΊznϋθ|8gΥΝή‡\t=N†ΐγί|^/Μ·q~φΆœ4ρsέfϋ‡°ΣΦ— ΣνΖp†uηΥηol.mάn "Κ—8ηΛ6»EPœΥ „‚U‚PΝ—•`vJλ”k€ΤrŸ‹λ#Υ4Ξ]!‡Ψ½Ρ›\(]C”mξ–{ ͺ ΤΏI”€kξ;ŸŸ ιμΉΆ’R4‚»λs›}0Zˆλ^»ξ΄χœSώj,ύ…€¬¦}Ά›η_(fΔ3+NΡΚϊ;*ϊμ&ΞΫm“qaαΓΘ…uΈP+Ε…p…ψξ>~Q\(=Štq‘ΰ\(>ŒΠcΊH)t.T? ]”Τ…I88f Ή(χrή‹άr5 εσP­ω€ ׌–[σˆηv~δ‹ͺβόΠύOκžtάovSpκΝ‡ ·μΒόςλWuKQŽƒΞNC8„79ξ9œτwφ!žύΎsΕ Y΄SwN»œrjΞ±ZάgRή5jΝ·Kπ*}ά»Ό―kκ:8ΒqŽθVέΈάnՊ{Κ:Ϋy=σ†vΌ8X*m_s1<ω/ώ#g€Ν<6<`›νΡέϋ¨_ϋ\·=ψτn›χ™‰ 7 8ί¨\ΪΈέ6 ½τΝ£:K+ΟΚΑ)Α(5Υ`Β”ν€v¬©Ζ’ΐTέ/,ž„/AŸDΊœt u₯xͺ&έ;ΛIςΗ΅ŽΓ>Ν…Ή‚QΆˆΊKŽ£₯‘ijt$‡HΝίxŸΤU²δ3QΣ£~ž―R8qΫrΦΑζKΐΰ‰zYWsΟq͏=μηΊnα8χΑΆ΄φv»§qa«F&LΰB\bηBSΚ;|XγΒ KF‘DΊΧ’ΓOιΊh/\z/ZΆ‘ψΠ^ŠΥ_C\¨ ”<&1..π΅Fjκ"₯O¨ΰb„–jΉ°/ HΨΜνkξ9ω#Χό^[;6.ά²o8ζԞΰκ!h Η,sF«Iœ“κΞώzRβ―Ήρζ „ϊΙοψJξΪp”™~τ~* qDΈj΄UƒΞ:"W©γfν‡`ž”κΡl“f«›ΊD9ΗΥ… ‘ 87‰sΦyL©ω\`8ς•ŸμEΈκηIΩΌ?ωqΞω¬ΐζn³ΛΑ]Ν=/ωΡ3ς_KkoάncιE4 ¦–ν¨šσϊΤm/0j ₯¬”α’#ZqP„z'c:R; ς$Τ•ή©ztOστFIJσŒ)λ΅Q@.Θ•ΊιA¨:+8• —;€&GζrΜ%ΜyŸ8e£D9£–pΥrŸ>³<;άoΖά»η'Ώ}]σ)Ž}°­&)ςeύπΠ΄γ²Φέ6)ήπQњQZ»ΈίΏ.Πq‘.βE.²M΅θ~Ρnr.t>ŒέηGŸHαŒΒ\|θ\ΘΊ>“ςΉnξiΝ=?d§rΝ·>l\ΈeίβˆλK]™ΧΥ$Ž™ζt!§αΖpŒ^[žΔ8ΈaΥκnΥκ[Ί[oΉ₯[±ruή1Οσ¬Μ@§QœκΝ%xq’Y"vλ4‹“XG³DT#²έ!g½VKξΒ].8B\’œγγδ〫›ΌΦy-„9ϋpn€σ:κ՟Μ5ε\t@ˆsс‹ lηρήz^9χ=j˜ψμ6σΨpΜ=ŸΦ5ŸβΨo έήΊŽβΌϊόΝ₯€Ϋma ^7 ¦pŠR0*g˜  G„Yθ€ϋΤςΈά$‚RΉD·¦uvyͺΑԘ!5ͺ‰u₯zΊ`w!ξΐ‘žž)'Huδͺ…'8>ΏΚ±œs&˜Φά^] ΠΕ –½0'“ŽΔ€pͺ$€†RΰΫ―ΝΨ Xξω‘ζž/”kžn)iLŒϊDΒCμ±Χ”Nš\}ϊ=U˜7>άJΈ™~ΧΞ…p ψ0r!|‘ΉθΞ…p‹š¨Α1Ίhι£α)ηBηC―Mw>t.Τcχγjκ&.Œb\\xaΰCηΒX[..”kžbYΛ…yσα&ζΒHξωBΊζ[6.ά²oJMG˜/))ξˆu„φ3ΟώzέκΠNMϊ7ίέΆς†ξΆ›Wfq~}ζj,‡hG¨γ¬yΙυYΠ“φύΤ·9/q Έ8λαK§sDό£ίπΉμT«‘œ„ΊnΥ…γš«™[LmηΎf‘σ|ŽγBœε±―ύt~}Ξ°NJ>ηΖώ€?χ,ΘΉ8Aj?οŸ™ξ―ωΤEω!ά9οWϋύ\{0\¬Ψ\Ή°ζž/„k^ψnΧ„/–Qh,w)ΫχNψ”νχΑ„ε w&\‘π⹞Ώ±Ή΄p»-l@ϊ½7ŒΩiδ“O‚NFε€λΆsG©Š,οώB/Τ X J%qΠΥΕX|ΉͺΛ”ΛΗΥd-:μrΩ•ώ©Poά恦―+Ψ$ΐtΘωαυu.κΊ¬Ρhjz§ ” ›ΐ“ρFG|&}Gb„ωŸκ»΄3r)§΅Τπ›½8wχ|‘\σ- ·|.Μ)Ω«ΞηB~οπaΰB₯Έ‹ αŸ‹^γBρ‘s‘„²gΑmβ:–βΑΘ…ίͺχ‹œ—ΞG\θ|(.TΦ|Ή°ηΓΫΦN¬Θ‡ΰžΓ‡χ.\>tχ|‘\σΖ…νΆ9άsΓ!΄W&Α`Η¦ιμԟγš#ΪIcG€‚koZέ]uΓΝ½ΐGœσ|Ζ΄± N·χΏόΚ’\»NgwΔ,B—TyyΆα@κ‡Ώ“ΣΔΎˆyœi {ΌΔ4Β™%ϋ!ξΠΜZG4γΪs|ŽIš9η―zpξ³δœ8WΏχΝEή›ΖΘρ~irηΝο4FŽŽυ:oΞ±ΞqΩN-Ύ>›Ν™ έ=_(Χ|KB#ΰv[Ψ€” ŠDζœ“ž˜Δ'Ag l+σΟσcΈ#i;Aš‚QyxΰΠ‘ͺΤxo,§`5± TεΆPΊpWΊRΡ}έΣcσ"Ή?›:oΉ]άW ©Ζ£ˆζ:J„ψšσς}ήOξHΜη@ Jm*΅ϊ£4@’,ΰβ7mφβάέσ…5oi»m\¨9η‘ s_ ρ$\˜„)œoΐ#5.DτF.ηB vνΦβCΉξ‘ η[Vs‘σaδBρ‘s‘_€t.TjζCγΒξ¦­εΓ{.P@šέσgψΆ―5o\Ψnχδ‚’4vάo₯¬γ#XqΚ³Ζγnœυ^œ―Ύ9oγyˆt‰\φε9ˆ}Φύλ²ΨEΤ άβ%ΧζTp0‚‘+‘Ž[ΰΖ½gŒΐg΄`7›%ξ<šγρϊt›WS;ΥΣ+UŸσW*>Ξ?X}Λ­ω’σΞρxκ`x/ˆvή\Δΰ= ϊΙ: ³`sζΒΞάσ…rΝ›8oάn΅`τ[―₯β₯ *œ ΆŒr_AꝣϊB9η\‚6u4W*¨RΑ κyλO$G*©•J±τζJ5ΘςtL9A—ΧPΰ©σπΖN D  uή}Ίf‚XσΆ[?ή’ω3"SΞωeoΩ"ΔΉάσFά\σΖ‡[^ρφ.ΜΘΣΆž 7ΐ…ΈΛpJδB‰φΘ…,εΆ;j:†.`z6’ψp.τFn΅·΅·΅p^Ο‡Ζ…z,σ‘s‘„9ŸΥ=€ ŠqΟτ€έškήΈp«Ή‘‚ŽGH#΄YGh/+ΒQ‹Hε>Β Ξ΄ˆP\sΦo»iEΈ€c±?S—Ξ1Άqw£Ω†πEΤ2†ŒΗΌ€‘;pΩβκpͺŽƒ£―zyjη—– Όΐyq>:ΧΫΊΌ»}Ε•έν7\Σ tή?οησήU‹Π—PΧ..pQ°Ξ9ρžΧέάΉχ|Ρ}·oω¦ηa~ηͺ„ΛΓ »{2.oηΩΞu+>Χλˆž’πάFΊc|x[ϋ-΄smηΊΩœηu πϋί>αOškήΈ°k;Χ.ϊ š­΅Ψp;ηερ­Νθ?o΅σlηΪΞ΅‘}ΏΪΉΆsm6΄ΏY;Χvν\š8o_κφ™Άsmάvν\ΫΉ6.l\ΨΞ΅k;ΧΖ‡Mœ·/u;Οvν\Ϊχ«k;Χvž νwΠΞ΅k;Χ†-MœΏdsͺ‰jηΩΞ΅kCϋ~΅smηΪώ?lh³vν\ΫΉ6lqβΌ‘‘‘‘‘‘‘‘‘‘‘‘‘‘‘‰σ††††††††††††††&Ξš8ohhhhhhhhhhhhhhβΌ‘‘‘‘‘‘‘‘‘‘‘‘‘‘‘‰σ††††††††††††††&Ξš8oBCCCCCCCCCCCCCCη Mœ7444444444444444qήΠΠΠΠΠΠΠΠΠΠΠΠΠΠΔyCCCCCCCCCCCCCCCη [ω—qΡ’χ'°rfΨώ—eϋ/…νO.Ϋ lί―lvΨΎ[Β —·Ο»‘‘‘qaγΒ†††Ζ… š8oh˜LΒ?Jψˆm»wΒ• —VHψοV$| s¬#lϋo•m„6.lhhh\ΨΈ°‘‰σ†©Iι•…€VςxZΩ~―„W%,)$τ/ »Ψσώ5ακ„›ΎœpΈ=vZΒEε˜ϋχμ±_-dχΣ„'μmρΟ―%\’pCΒ;ΆΩ$ό§εάw.ΫΞHψtΒWœ„Σνε=ό|Ήκω¨ Ώ6αmΆύ[ ―i$άΠΠΈ°qaγΒ††Ζ… 64qή0-!’°LDXˆεΐ²ώς„LxhΒύή•πA{ξ―$l_#υη»φΨς„'”υŽ-λOMΈžϋεygAΰ„ΟMΨ)aŸ„λNppγΨg~cΒ»ώWΩΖ0Ο―π‹Κ{Ω6α U!αύΚgΘ>(‘ΨHΈ‘‘qaγΒΖ…  6.lhβΌaZ>(αΪBχ ύ@WKΛύ½ξ$Υ§rœ !νXξ$α₯ ;„ύή›πV»Ώ]9ζ~FΒ'Ψγγ«6ΐRHψ„„―sΞ Χ$< BΒ_ΰ?˜²ώόςŸΒ} ί»μwJΒ›ΛΥΡFΒ  6.lhh\ΨΈ°qaCη λDJΏPΘ‡t‘ΩΥ[V†«Ž·%<€\ |sIm>]]=.αcε˜‘p|ΩNŠΠo„Χ'θρFΒEΒά$\ΦI£z9”ϋ= §ΫΓξ潄T¦Ÿ©π/rυ8aiΒΎ„6.l\ΨΠΠΈ°qaγΒ†&ΞΦ—œv(DςrG"ΗΚΎ/*WPχ§φǐφ»OΒοή3α ιƒ*WH§"αt{AΒΝs`Ÿ)Hψυ kžT!α?,ηs΅sύh…„Tώ3ϊRy¬‘pCCγΒΖ… 6.l\ΨΠΔyΓ:Υ=΅Τωά7α}Ty ςόwψ•ϋNxfYuj‰ qC@#-Ηy₯2½X„D:TI:ΊΌζΫ!ΎP[΄1―ξRΞi› °τž†3nOΨΥIΈμ(»BάHΈ‘‘qaγΒΖ…  6.lhβΌajB:2α%-η§₯ι†—θΚωŠr₯tUIUz“Υ}¬l_Zwœ„?SR—ΈjψΝP/τkεXz½‡n*<–I8α±%UλΑ•}ΎŸπ²HΒaŸFΒ  6.lhh\ΨΈ°qaCη Mœ744444444444444qήΠΠΠΠΠΠΠΠΠΠΠΠΠΠΠΔyCCCCCCCCCCCCCCη [Ρ,Z΄˜¦+ν³hhhΨΚΉπώ Ώ₯ŽΣ [1ώ#ώΪg±‰Εω»ξΪ-^ΌxƒβȝwθŽΪu‡ξ˜έ·οŽΩc‡XpAήΆύhΨs‡ξΨ½wμŽ}ΘNyyΜ^`‡Ό½_²Z?φa;u‹Ύ{·ψ„vνŽέgηΆsΎΏψΰw‹έ³[όˆ½FϋΈλΪϋ‡τ8Λ#Φ->ϊ€nρ1¦σψΗœξ§m|θθρ£φIΨ·[|ΔCFΟ?dΡkΈ[·x]F뾍sIۏ}θNωœyoω½€χΖωζγ΅ίθ˜‡ο=zηsΠn£ηsήŒp,Ηη؜Οβt^Gο?ΊΟ>μΟsυόόΩμ1ώ~«Ό‘ΏΏη‡Ξόό½‡Οɏν1z]CUΉΗθo©χΰο%}>Η{`w,ο™ΟŸχ jί»ΚgqHy-_φη°ϋΪϋϊŒΐ‘εάx/ιόΝςw|τώ­ο8vω»½Χθρ£wΫ>Žάe‡ό›θο'4.άo—Œόš‘ 0>τί|ηβΒ1n› χ~ŽβBqŸΟϊπ‘ήSβΓ1.Ηk3†]}|xρ<֞νά&ΏζΡφšΰ¨£ιŽTΏγ―7i{<£Γ9ψίά·Υώ¦ ξύ EΫv§Ψ°qΰ&η|IΦχφυ#ΟθΎϋΨ3» wf^ήž™o˟jwγKOμn~yΒo>mlΉϊχOΙπϋ·½ι™ένώ³έmoω™ξΦ7œΡέςΊΣ»[ώπι=VΏςΤξ–Χž–χΉϋc/ξξώΘ/uwΎχωένg=·»ύνΟξξxησΊ»?ρ«έέ_ϊνξξ/ώfwΧ?½(?Ζ>wΗοtkΎυšnΝχήΠ­ωξαΫ―Νϋ²\³τΟΊ5WΌ½[σΓ7ŽφYώ·έškΟΞχyξݟύυξξOΎ΄»σ/ΜΗΛx Ί»>τ‹έݟώ΅ξξ―ώήθψ ڟσα=ρώVώ―»Υ―8)Ώ/Ξ}ΝεoλΊΥνΊ;>Υ­ΉαέݟYwΗΩ?ŸΛ’ηf€u^7ŸΧ―λΦ\σ|^Όο―ύτ|ήs~οχέέ_ƒ΅ο“η]τG#θ8ΐη=³δ³ΰ}|γΥkρŸ―‚mμΓΎΪ?mη59―όώ9ΐ:ۏδηι9œΏήHŸαš‹ίΤu«ΞIί¨σΊξΞΟtk.{KwχΏύJώΫΧnwύΛΘΗΠΉζcλ˜:>ŽΝ>œ;ŸAzOύ9πwN‡[ίxfώΎΏεο[[P»ρžΟߜΏύŠ_9Ή»ζ…§δίΕΥ/85ίΏφOΙλl»ςyOοxξΣ»ΛΞ8=cιϜή-{φiέOžuμω­YσΏ“‚ΠνΥ=kΡέ S`ڈwaψΠΉπ›‹ŸQεCώŽ=ϊ!ϊvρα4\ˆ σo‘π‘saή.>4.ΜΏρ‘sα₯oξωΠΉίaΟ‡‘ Λo+r!ηžίoδΒ›>”ω0rဝ α2ψ*r!ηΕ9D.Τ{Χ‰ Ε{Ξ…ηΒΘƒ5.Τη8V8qμy {>Œ\8ΈΠk\Θύž Ω·πa Ε‡Σp!ίaώξ‘ ―ϋ₯“σΆΘ…όn".βš?tΡvέλΧ=|ю]sΟ† o]}σ·ά’χΉmՍ#άΌ²ίο–[oΝX}ΛΪ¦cψγ«VίCϋ l[™pγΝ£₯°ΚΦyΜαϋψρuLG>εœύυnX΅zΧΧυΪ+VΞΈ>αΪ›FΈζΖ›3xάρϋΛΣφe+Vu—_Ώͺ»μΊ•¬ ΪvΥ 7g,Mϋ.IχxΝMέχΊ©»ΰͺ»ο\qCχ­ŸŒπƒ«oʏ±ώΥ_Ÿρο—^Χ}ρ’k»Ο_|mχ‰‹ξ>φύε½πͺΌd›π©\χΥώŽσε%£c~ύς=x-ΞΑ‘σasόώςΡy]|ΝΚξ’k‡ο“χ΄Τ>Φ―ψιΝωσΥί…οSFϊnΥnlgίUφχβ󍣕•Ώ}wŸηϋ­οhάξ7½&Πχp_―Ώ’|7ψΫλο―ο€ ±hϋξΔE»u{-ΊΗ{xγΑ-\œσν /9)œό‡ tφΫθ―νCΠv۟>k$ΞΣ²F£%(Π}σ ΰ€%’GŽ“ƒΧ¨ζΰ…ΰ5"5QΩX%v)ΠΙ&ϋ)XIn„^}Ω/K9ΰKϋ³η ΐšχΔs³p$0Dd³”0δBC:‚©ΌT°Ιώ) [sέ{σωδ ΧδύZœŸQŽœ?ΰωœ Πζu”*P\ G‘ξŸ‚Y ύ\* εΌΒ9Ε`΄ΩOΟΡ{A(°η|εY‰ΥώΉλVώΛθο3!ε6ΈX’σ+λω΅KΠΩ£ϊσΉ–cσ·ΚίΓτχ’OΫοψ›ŸΛ_]Lš”f!•Ύγ3 rΐν§σ€ΌN Κο… τΗgŽ‚Q’¬/@0ΊΝΑ)}σ’γ»w,zbG`Ϊάσ'ΞβCηΒΜƒβCγBŽq‘σaαB8N|8ΰBx―πα€  %@Λο½ηBφ²}ΐ‡βBŽ%Ξ‰\Θy'>γŸόΕh>0.„σzδBρa +Ό“ίc ]θ;j{νb₯ΈPάα\XψΌη:ΞC|ΟΙψppί/Vκyεy?γBΞq^>t.δ=κ"²qalύ?(.ΤEπτχͺq!·©Ή°|gk\Θ t.„ΰCηΒ…ΰC\σg/: {ί6OνΏhΟζžoDq.ρ$qή τ {Adpρ…ϋJπ+'σ(€£(χϋώΈWΗqΑ-pί/DQζϋ#ΔΦμ£u€17’Υ!·$v8Βόό+o̐fΙvΐ}‰g5ΒΪΕ9Βόœ μ…:’\Bύ3?ΌfL˜»ΊDΊ„ϊ7–ώ΄δ.Μ9?Δ9η„@ΧΕήί’p1Β/NπΉπώω<%žυݚtγ1φ]ΉzόβCίΓΑŽοίΧ‚Αo \Δ©έjί‰sέxœmΧqο‚.RH /@lΈχƒέ·{Ι’}ΊΣνήάσΝYœΧ1gt?|Ϊ™9ΰ\rΪH¨ƒ<εΜΑ~ό'˝Y ” Tb₯wŠ[ΔάόΗ>H=M‚]‰Οϋ'δ`”`"”ΊΚΟ•όΤ)%ΰ‹β’°N D@ͺ}εvΘ] Υ±ά©&ψ!ΈΓ}ΗiηXGΗ%&("U`H`™Ξ·α—η€Χΰ~ΏηP¦@2ηΰ|ω9*θsM ¦ “Χγάλ @έA*―Ρ_ˆβ܏+WIY!ΈΟŸ™ΐω[ΐίώ“R}ήΕUγyίtξ₯ ΐ'£εο“_‹χΔ{SPžΞ[σ>hΧ>ΟτwΰsΟN_ω.VIή²<&€ά$Έψ- Ζh _AMΫp€ς›qΧh‘ΔΉ\s‚QΠάσ…γCηΒ~τ3ͺ|ˆ(>t.μΉ±|Gϊμ’ΐ‡βΒμ–>p!λ…\X\dψpΐ…Ζ‡.t>t.”ΘΧ8κX&$37π»K|3Ζ…π&ϋD.,b0r‘ΆE.Μ’4έγBCΌ˜¨μ!ηBη5.d9ιB₯_€Œ|θ\xm>.γC}nΖ‡Ξ…d4ΜΛ‡Ξ…Όoρ‘qဍ ωlgαB]”―q!·Θ…rѝ gεCΉζ\€„ dΡc›{Ύΐβ|Lt›H©:ηAΜW]Κ ήkΊ‹wwΑWVΰŽ©°j>I\»Žˆ’.7<:΄+LΈΧΦ`.Κ%Θ.+ΒWYΞ² ς%EΌKΐ#rΌ‚ t±<8g)Χά…:=":θσ tη‚‹tΏ.ΦN²>γ^œ—οή€Ϋν7\ΣέΆς†ϊw†πξ/*Νq`’p―9μώβϋΦǍι1ΆρXΊ\tΎ³Ζ†rΝ_Ίhί,Π›{Ύ ΔyΊ½„?$ΨgŸ}ΊYoόηΙ΄§’€Υ£εq]1'°$νƒΚ y<˜ι?ϊAgy~XZ*EŽύr`pΦsΧ¦v&ΰρXNsWš#ž9½k@’ΰIΑ"ΩΖσ9‰ζ>εx@ΞDqΒ•ΎHΰΤέvξ(ˆ"E‡ˆtQ‚"Ϋƒ^₯–4ΕxrœHεΗυz<‡sd©Χw7GA©‚M‚²ΛίΆV˜»Λγ)`›;ά =(• W:hΝ9/ΈΣΡ]ρδδXΊηΐΕ‘X(AϊύϋŸS λ’ο((Χ£%[ M=#U3“0eqŒΦη¦tw ΤN@PŠΰ[ˆ4NwΝ%Ξ›{Ύ°|(.Dp8z‘σ‘ΈΠEφ€ qΧ Ž₯΄>Œ\(>paq•ϋ’ηΒΒ‡c\(>t.t>t.t>Œ\XΔpΟ…W½cΔ…+eΔ‡‘ 9ΞO·Έ°\γBφ­q‘xΒ΅Οœ\(Π…<–.\J€GηάΔy/Μ=ΎˆαΑφΐ‡QΠ.σyΜΓ‡.ΤEρ‘s‘ψΠΈpp±rr!·ηΒYωΠ]sakwΟ:6t…ЧΧΔΡ E9‰¨ άΞ(¦LδχΫMΐ»h―₯ΉΗΤv辟΅ΔΆDς’†mξx³δyξŽ_sγZ·7¦5³έEΉ 1η.R]KΘz:» s„1"Y’­tv€ΈFt#Βδ>Κξƒί½"/=½]λ.ή]ΐΛQ—Pw.gίέt‡uηόέτ½Πw >—@ÍΧu·έ΄bν…£}Σ>ύΎ s9τs9ψσέό{λ{όΒΝ²ΕyΊν%ΧqpΟήάσΝΣ9Χ !N@Θž=ι™ωΎ‹sΆg§<ˆs₯x«ΧRO}ΛϋΛ)’kn½η€S‹hWω΅Ov<ˆ“V0CβN „z1¨rΉ7ξrs\ ~‰Q9½₯nΉwzHΑ$,˜Άšƒ’_[tZζη!μ%’ ,Yr Έύ½υ’μK¦`΄Έ%ƒQΦ•κ_IΉˆs―[Ÿδœ»Sλ2”Z@Z ‚{±οβ‘’ Θ' τRo«cηs±‹ ω1/iΰΈΤΡ’"<8—pJXί‚N.’Φs‘8F`ŽωΊΊη₯«ρ7ΞOψ~ΒΚφ]>ŸpIYξΌ5ΊEΞ…|gœ]Ό;Š 3Ώ>ŒΩDοΞ…>τ‹•ε{˜/H:ΈPbVΏ%q!Οq>‰ s‘σ!Ώ5ΏΠYψpŒ ―πQpWœ—> ³4?λτλ J=ψ*β<׎§sθλ%‹ΫήΧ>*΄ΐ¬–`‡#7Œ£I-އ‚;₯g*8%E¨©ϋΣΐ \υŽΡ6S9&ξ”ΛIβω%Π8π "€z{l¨šΓ Ξۏ­γypκnyl¨€L…Ό¨wΦζη“n}š¬Rvύεοtι} TNΉ”r:½½V‡Ή.7~' HqHεττφ‚Ρ1Χ|]έsŽ‘°]YΏOΒ%<6α­"T– oٚRΈΏ‘σ‘ΈΠωΠΉΎ:φιξΈη‘ Υ$Ξο:ζτΒ‡.t'έlΕyŸΞ–‚Qώ3UM˜nΉ!άo>mδθΠ4†mΊφ–:ςŒ’ή)qžέπ7 $Ku—ƒΰ4§Y‡βό:i=§[*uqnΓκpN@©Ί;π}γ£Ψ€­4Bκ!χBiη^{)!ƒH Z98–ζ­€Ά―$HυσϋαN(A(@Œ—Žξ½Pg©K―ΑV@Κ1ŒΊΠvgΘY='ΦlzPΕΈ‚Ξš8―₯ΉΧ‚RιαοΉΞA)Ν¦RPŠ`ȝ‘%ίWςωωχ<ŠσI γf H©·tq>KgΊRsΝέ=Oϋόϊ:ο ίNxLΉβΉ—ˆžϋ[s@ʍΞβΓΘ…βΓκΒ#|h\(žΛβέΈPB[ϋ;φ|h\˜ΟΒ‡.t>t.”@―paΟ‡‘  ω­κwκ\ΘοΎΤLΈP½1"–ίΌκΏ{(η›χŠίΔ‡βB ‹”ησq‘ΞΟέμθ²G§]|XΛ(ŠeGŽ(ΞυXj~Ο…΅ –Ξ‡λΛ…Ζ‡Ξ…ˆτάx΅4œ$Ξ7ͺiœsαϊς!!kytΟ. fTLθ‘ΈπsˆswΥ'ŠsΗwΗά›½i›RΚW†έ5qέ˘’~Ή˜ηr7—ί8.Θ΅OM΄/›Gœ#ΎU‡-']Ϋε”«Ι›ΔΈ§ͺKΗτs„3’[ώΞΌ< sΐΊ:9$Π£0―Αέt^_]5ο±έ·Ιe\xΠgΰινW5rΛί sΌΧIœ_·¬»}Ε•£₯Φ£`7α?Qœ—υ™Gι=ψwY™ϊξΝώEtΝΧΥ=O·γ>kχ_ &μϋϊ Ξ'>χžΔ§›΅8η?Nώ3ε?Q]ΩφšZ57ʍŠJ°˜sKΝTͺ4Nο>,Ηά1¨±T—Ψ˜κyrrr­"N€€ρ:JL-μΣ$56HP0ΚΊ$₯‰ZΚ|/Iα€ή’QA«ΞέG8ίπ»εdδmJΟTΪ’ΏΎ\(―5t¬uwŠδ r“<0 ΠT°G)xυVNJ.Ί;@±Φ<»rΟΌ.½–ζ..η(ŒΚ«•*(- %Πσ)šο ‹σμxA•λzK0;WΣΈuMι”S€FHj†4ο—-zδDqώ/z4;}Mυ†/™p…τ» 7λ*fΊέφΉakH#Š\ˆθ)|ΉP|8θΔnc&#:zŽž α0ύ~œ •ς.Α'Q­ΖfόΆ’/>Œ\θΝ2 ―»NΛ…~ΑΣ9VΣj\I +{H)#ϊσžϊ.x'χŠk\8©³{HeοχρzvθήήEzδB•[MΙ‡ήίΉ0wδ‡ΛψȞ αΗΒ‡Š έ9/βΠ,^τΰ‰\φ]΄}ηυΧ5>l\ΈΒΒλs£8ŸΛ=NΈ7ζηž>_iΗΉ[.7r>η\£­VTΔΉΧ—+UݝΝZͺ»φ©ρ.Τγθ°XS.ΰ$Λ-Gœ#`qΚαˆa‘–Ž.AΈ– .q.H ³‘.xΚϋ$·|.zt»&ΞΊjΣ½iλΌG.DψE >§λΛί±wΟωn(m˜J ;$Ё­χN»ΔΌΏΞ‰s5˜σrξ Ο~νΕΧ\xΜ’Ψιέσpαsήcχ_”πΧSŠσ‰Ο½'ριfMΐͺΉτΊ0Ώ©Ξ²w€LœχɁ­ξΓJυτΪςIΑhο¬—ρj HΥlMCος(¨T §ΐFΝ„$Ξ=υ†CZβ(Ρe˜@©ΤjφsΤ倬₯`3ΟιΎγS]·ζΌξŽ»?έέ~Χ'»Υwώ[wγνμnΊύΓy=Ο­MAj$τ)$˜$ˆU₯œpw³0zj{ rŽwŠΚ w—\ϋ+¨UΪ¨ twΠ±>§ΡAŠ|m,‘7Η“¨πFVS€xJ¬ JH.Ξω Π T"}―²θαϋdΝΆ\Δ/„c䝊Α,βό·ξudχώmŸVΕουvϊϋu8ήN _J8’€sσa s£ηΒ‚ΜaΞ…eΩ—ρ¦VzƒLqa9:*½τ΅t::κΒ`δBϋύE.μ›²E.$=> \ψΣΫώ1σαΖρβBΆk»s‘ψ+r‘ tq‘‹τΚ=Ÿ$Θ#ͺ½gEΎΚaχΗCCΈκ4 ―Η|7ΰΒuηΞ‡Ί`©Ύ‘ΥηE|Έ!Ή8/"Ξ΅hχ‰\φKβΌqαΒrα@€‡Y}κ»=ηŸ{ Ί7~‹ytΞcηφU!MέΕΉ;ιξ΄―΄ύ΅Δω5•ϊqοΌϊ`=Ή σ8ο<6ύςŽμΡ5W£4ΔΉ\f„­κΏ=ε\‚ܝnζη.Δη€ΈK πΦ2^XwχqΡkͺE—0χοŽ8]©νκΪ.χ\nzρμHBzZ~Η5? ο.Ÿο³ί΅K‡b~ΕΉg†¬ )σ—&qώλΫμ[Ερ#qώσσ㹁}Φ”β|βs›8_ΰ) ή²sNͺΊRΠ‹³£ Ui˜qξ`L7+ΞΉΚMJ'a5ΐ! μgώzγ"₯*U°Τ6ͺ&°w…ΤD©C}S±φ5β₯vύߍP:ίϊρΌΜi•)ˆ£Sρw&¦kΦ|1€Wίrv·|υ»»λn}κ·έunwךΟυΟνξϊάΈJΌ&Α%nΔ»„yMLΛ)Rp*Idt’|ܚŽ£mΪ_A­\ϊ(Π%έ}χν^«ιιŸs5™‹έ r£#Λ~Pϋ΄΅θύχ‘tƒΦŒω(Μ]œχ~¨₯Ε-šg|Π47UR₯d‘Π˜Ξ"Ξη>Gvx«ψ“ϋ>vΔy9ζλ Ω–Κ9?Φn‘ Ε‡Ξ…šν₯σήέ5.”θwξτϋΞ…ξ’;F>œTNE{δB΅ζ™`ša_ψpš›g›9Φ.Tͺl"σαδBΰ\ΈΎ|ˆ8τ½vŸΘ…`m¦η Χ] ΧΆ{ηkwΡ« βάYŸGΜKœ³¬‰σΪH΄ΪΜσš8χΖ\ލ½6ZΝExtΥ£@ŽΌΊ½ϋώ>Z Q*ξ)νΈΡ枎Ξz¬$}Ήt₯Ά 1έΕΉΔ·dsxνΉœs_Ζm.ΤΥαέέs> >+>?e=τ₯jςΖw Α\τ|7‰m–τ|ΏβΌkΫ@œƒηωχaΏ ΏΆbFqώ²{νΫ½|Ϋύͺ8a›§η-­}s ΰI7ΔΉF¨ΕϊρόŸ| D•Ύ>hV„ω$Χ( ς”LΦΥΤH5‘r~lδΞ AeI)8J& s@E঎Α%0Ν©š)Π$θ8C9ψT°vνΩ9(Ν©¨)Π$ψTPŠPΏφΦχuΧάςήξ†ΫΉ[uΗ9έ-wώŸτ)ž—ΣβsHPI¨ΐΟƒ½Ψά(€jŠ4ΙQ―‰uψ1ETuœž"έw/ς¦M>g]iάVqβ!6HR½λ:4ŠΛi›ͺΓ-:P₯jƒsϊfϊf‘£ΡzV‹9덀Τ!η\Aι,βόwοwdχΑXΕ[0Ώ8O·γ•υ$όί„3ήšvΌ΅€σߜ %Μϋ©… sv‘ρaδΎΘ‡ΉP-+(»ιš Η}‚…Ή²=z‡wηBΥ‘σΫ\¨Tυ1.„ϋΰƒΘ…”ύ$>¬q!@ΌΈECδξžΗFo‘ ΕeλΚ…Ί@κuμΊͺΜ$―C—w.Œ|θ\(>τϋξ;Ί8w.ΤΕ–)Ε©ω[ώ;ΧΈ°Τœ‹ ωŽφ|ΈΈΞΉp}ωqώ˜mwŸΘ…`{mί5.άx\8hΕyHiε!%Ύ&ΞOOέΪ%ΊcέΉ;ξΎΏ\sDsœUΗ!]”;b=ϊς0¦ΝvΏˆΰΞ=Pgv5NCΔβB{ΗuOSχ΄t‰ο(Ό.μεΒΧs‰n­KœΧ ”v\s₯G§άEΉƒχsΔΉΖΣ)[b¬γΏ\τ)ΔωW_Φ;ΰ.Ξ‚½ςΣ€Ο―“8· Ώ`4‹8ν$ΒχήϋWρΔ{M%ΞοpYΒώΦΤνπ)ΕωΔηή“ψt‹&ΰ2&-yώΈšΘχˆδI) >C7bŸλ[sΠ³S€ΊDͺΣσ`4vnWJ`ι’žS4%Φε Ι½Υ}(‚2R6™αKϊ:nNѝŸIQΟFA(iššνK ΗzIΡΔΒ-ZqΫ² _Ίκ]έ’›F ΥWžΑr‹<ΠσzKoŒέ"o’€Ϊtu3ζ±XŸ©ηiRJύΎ;ι“κ8c}Ί_L υ`Ϊƒλ8’ΘΗό¨‰Ÿ€^‹ž01UΉ‚šX©uIɝ$Ξσ>|gΨ§¨Y"w‹”βœ#ΠY‚YΔω<π¨ξ_w<©Š?ΫξψiΔω‘ ίIΈ α{³lί5α‹eάΛ]Z@Ίn\ΘχJ|ζ\˜y²Β…Ξ‰‘ έeοΉPΌVΎ³=κ»§X8F.τ‹—Ξ}ϊm.Μ\F.d>Œ\X2z"Όγ_2Žq኿ ϋΘ…ΎŒ}2Δ‡Ξ…~ΡyΝΕ»DΊξGυϋ1]~]Ή0Ί.ΰ½qœ§ΈΧ.Tz‰Β΄\(>4.tΕΉψpCr‘.VŠ Χ—珽χξΉ°νΌβΌqαB τΨΡ½"ΞΗs\ƒ9ηHœΗZςI³Ξ΅OηΪ_βΫΕxζrΈ½αΫ\MβbΗv‡ž+Q.χ^―Λσ||"Η‘μξ9"œττ‰σš—;»°Λ-χqi.Ύ½+Ό7₯«Ν=η|=m½&Φ]΄kΌZη|Fƒ¦p.Ξ©;ΗΙ–ΨN˜K˜gTœσH—8·”φ(ΪDœγό‡‹PϊNσ}œEœώ}φο^}ίͺxΚΆ»Μ+ΞΛqNKΈΈt^MΩφk ¬ο™pEΒJΥΛϊ“ž{OγΣ-_œ·¨ο6Μδͺ³,AiU˜—πΗzt’θ€Œ[D° '¨8G9ˆ!ΐtΧ<Ξ$—°Γ "˜Rύ€κ&Υi]Ξ5K₯(ΈI°*θKc_gN&KάJ}Ω“‚9V‚Nœ!j0qŒΈωΩAb‡=»FΰVΊΧLΊc㡐ξμ(xVs&G 8υ~$Κυ5rϋ‰Ν–ό|jˆΑsΌΐ (X΅μ>²Ν»λΒKpΞϋ¦Wή­Ίτ$θ·©ϋ4Η‘Dκ&b©₯Ω)*γυΌA¬·UΏρ΄dšΔQB³ˆσWnwtχ‘]Nβ/v8~ΣΪ·Dl ηΌo¨%>4.4}‹\h|8/ͺΉ€΄χ\¨‘hκΣ .TjtΙBιΉPΝΗ”9δ"Δ‡βBΟ΁?"ή5JQ―raBδBs80r!J™#z ηBρaδBΞ‘Ζ‡άŸΔ…ώηB½§uαBκ~!!raΜ8ͺΤ©Έ0πα@ΧΈΠωΠΉη>t.ΜMγ n0.|ω¨‘¬sαϊς!βόψϋμ1‘ ΑΫξΠ5.άτβΌ:Σάϊ$qξ3Σ­άΚ β|Υκϊœσ˜Εωυ–οK―E³Κ½ ϋ°Ν»³ J[_f])τ±φέ…=©ήˆX„°j½ΦrΑ5&MΒάΕΉΟ4sΚύΎΟ5χϊq‰n‰iΔ·ζ©kΆΊDΉ§©{Σ7mΌS»Ί΅k€œΔ95ψ|ׁ‹+«6¨=ΞΉ§’gq-qŽΘβ}ί&A^π "ΞΣy{OηΌΟYΔω«ξ»χΪϋPΕΣ¦η[:ΆxV­₯Ζ ©ΑΡ ψ ŠΥωUιŸύάιœΖΉ¨q\Zζ’‘;ͺ#W@Iπιυ• <Šۏδ!ζz`ρ‘s!Ώ-ρa… 3φ3Ίωέ9jήΈsγΒμœ'žΒep!΅δ™#~ˆ\HJ;|8/z ΉΧσρ!άSΞyŒ εΜΧΈˆ yd8Š £ΐwq‘Rγύbƒ»μ±VέΉPγμ"ͺD‘Ζ…>žR\X:ίg‡ (Ξϋί‡΄ž ³ˆσ/‰π7άΐ*NΊwη[8η֏ϊ‰£ƒ*’ά& š'™Σu‚|5?tξVs5,κ;ΜψΈ0–š1«¦oru4'\υ€]†(g P ©­Δνρ:rR2 ,ΥΰhΪY9‚:žG 'μU«ί™χg¬κ5y^Ÿ― .Φ‚»(φTMGά=δώ(0υϋ F DζΦΌοΜΎ‚˜FΘ‘A«»N5.Χ(6HŠŒ%:4bHiΎr|›ΝΆΟ―Ÿ^GiΩuT7yφI’}C€8D,qJœΏvηcΊsχ:₯ŠΏή­‰σMΕ‡ƒΊσ‹cŒ NΕ…A¨«§Gχ΅₯d€οΒ..TjΊZq‘7Αt.τ~ Υ{#r‘ϊiD.ΜόΑΓ΄Ή Έη‘&F.τfo‘kœ¨ “ΈPbάΉσŒ\Θω>μΉP*’H―›— i]`ˆiς΅‹•q>|%ΞΕ‡‘ }‰ΈPρΣk ΈP#0Σ~Š Ε‡ %Ξ=&r!8θ>MœoRqξ½V_ΪaŸŸ>ΣΞ6Ωs9γ«B:{l ηυ汁Ϋe₯“Ί£DΊΈωΜr9βr»%ΖΨ¬&gX―ΛλιΉδ²ˆa„²D΅ζ{s6₯₯ΗΉδΡ χ4τuXΗw·[ΞΆ„4οCsΪ]PˆcαβγQ «)Vη₯S{£πn9V)[θ¬j,Tϊ.!xΐBί¨΅TͺNΕ³ŠσΧνvLχ™}N­β{>Ύ‰σM%ΞuqΛ|ζβΓ9ΈPB½ηCKyο냍 ³ƒ.>t.τRηB~βCηBm Ł‘ U;..„ί2rρ> \ΘEΚf;’(χzρZωBp‘άrηB.ˆ 'ŽΉο5Ό\(ςaθΞ…ΡA―]° EU.t>t.δψeTέ€ 52΄παΖβΒYΔω Ψc"‚ƒοΫΔω¦ΰΒAΗφ"Έ’ά3kΤΗĘmnΈ ρ(ΞWVΔ;β\υδΧT_ξ’Rγ½Όξάϊ²"&—­Ξ0—+,\λ<&‘οcΧx>ΟρNνfO!WZΉΔ6Koΰ›Άy}8π±g8γΌ.δ˜ϋΉ{6ΐCΜπm1« w σxD)νήΠ―zΑFߟŠƒή§΅»@Ώβ’όαZDαTx[_ΠίG:?Ήη.ΠΑ,βόM<°{λƒβ΄ϋμΦΔωΦ$Ξσ-–“‚Ξ€άΙX݌ίωΌ~Ϋ@ S«YR;σόUΟR7™P 9AlW½€κ›]π±^Rϊϊ΄B5R`…ƒ’‚3‚F\RΡη€€’ ”%išΤNήzΧΗsΠ©΄Ξ•cΜε 5ŸμO*'mH•.ισz½^²&ΐ=πœΰώ ‚MΨ½ΰφm!Νι₯¬ϋγ怍₯„z]¦½~†€B-(3γθ΅8bHB]’< Υh¨2FMAi.ε5Τ»ΐ]EΆiπ†ΈQSIGb–8G³ΦœΏ~χc»Ονχτ*ή΅χ MœoB>Μ‚zΞΑ…=.TγBoΜΛœ •κrŸž uAL*γ|neYΏŒAŸ γBάrρ‘s!"~‹\˜9π‘s!‚—ύ\Θ~сφ:π…ΰBΏΰθ(…Θ…Κˆ\θ)ςžφ― ‘ c:Ό|‹Eή >¦ΈλBŒΊs‘7Žσϊσp±²ž”Qu.,ΩβΓ qC «Ξ\\8KΝωΈηD.Mœo:.τŠ8οOwΝ-­½ηAŒE§QB=Šs―9_Yζ8²βέͺυΎ¬8Γ.Ξ%Β}ΏZΉ O9Ν.p΅M‚Uιν—u5Ts‡œ₯D΄‹u₯§+5έSΤk Ϋb}ΈκΎε”K+]s]bt‘"ΦΫϋη矧Ce:žyψκt―Z|ύνόο=ψNxƒ8λή>hτζaΎμ{wύδ‚Ρzκ'½€Έ/ψοΓΓΉ@ŸEœΏεAvΆέΑUœqί&Ξ·:q›Δφ˜ψ.sUtφλ₯c,5oΜ`₯ƒlqŽbfη€ΰ©8’άGΙqQέ€j'θhμM F½F‘4Ζ»FA$΅’š 8ε8?ͺ5“€μΫΟ?§63œ9%ΐKΗε8FΉφR"ΎζόL 6kΫΜ ͺ οίξŽ9’S0šίC @Ί:Σ{PZ0nε.y@ZK‡χΊLOsχΏ“ΖΥζ»P/³μϋ€•ΰ4Μ9Ο©Ώε{’ΞΥr‰xŒΡϊN‘•‚\Ύ}cCΫgΪ†HκΦN@:k·φ?ΪkqwήΑ§Uρή}žΠΔω=ˆΗΈΠω/ήw.dή΄ψΠΉΠψΠΉPiΛ=Š εΚF.TκΊψP™CΦ(2r‘Ϊ§ζΒL-r!œŽq‘7ͺŒ\θάWΫœρyω0nƒλxΏΞ…€΅‹ {ŽΟŚυΘ….δk₯A!Wθq$›gΕ”λqb‰Ν9WMyΟ‡Ζ…ΊP)>Œ<—.τqͺΣr!p.œ₯[ϋ·Ϋs"‚‡ί―‰σ{Ψ+Β| Ξ-­½bστUΈP›qŽψ“ φnJgiٞΎΐIt*u[ιΫε^[»ΊjΪ5:MβΫ›²ΙνφΉα.ΤkpA$Θέ— wQξΝΩ<νΌ6&NιωΚ¨Ν‰χ rΤc'ϋe暻0WJϋ˜0χΪσΚψ3wΞ³8G|q~Χε߁.7Έ8/]ή½I\ώΏ=tuο/¬CσΈά.~ηfηΎύAέY;<ΌŠgή―‰σ­VœλζB;œš5˜(WZq?ήŜ&₯<†;ƒ ‚ ₯n–ΜΎΓ―jφ|€—:±{—_όQ½!B΅+M‰θD\\RIΔ5n9K‚S9I, X”ζηs‚Ρٝηe7†γͺΫΛX¨ΐλkA§Ÿ₯‘ۜ ΐxœη©σΗΣΎ“ΞEΗυ ·ζ΄ϋΈΆZjgμ^,„TάΎφχγX›rΚŒίβ•L‹^Δ”τ]сπΩβœ&Hr‰HαœUœρή‹»8δ΄*ώnΏ&ΞοI|(~ƒο¦βΓΒ…ω»+>tΧέΎ«c\(>t.T–‘κΛΕ…βΖΘ…EπζίqδΒ‡΅κ`Œ ΩΆsΗΈPΫΗΈΠϋfx γ›ZŠϊΊΰž– AδBΟ"ςύYwρ­sπΧt.ŒΝθœ 'eΥψ°&ΠηΰΒΎi |θ\¨>ͺiW)CZΟ|hηŠηςΆηtj‡ gηOJβ|‚Cš8ΏGΕ†YDΙατ4δ(Ξ-u½&Θ\˜ΧjΚέwqΞv sD€κΗ—šΐφ†nή¨ΜS―εςz£8νη’V‚!Ϊm οΗVΌRΩ]ˆ{ƒΆθx{jΊ;뱆άEΉ roΨ»¦Gx-Έ‹o ιΨ%ίηΞ_ΖEψh:φ[šω œsk Χ_μρ憁+^8‚όΛώ{-ŠHΟBέέσ8jM.όBˆs}οPœΏ}‡ƒ»ΏΩρ*žu7qΎ΅‹sώƒt_iΓ Tϋ}Š(ΗιΜ‚ήΣΫ­ώδ²κNL€’Q55r#$ΞΥNAA A‚ z*IΚ4ˆ₯ϋΈ>”%(i©žΩ R#!P\§>Υ“mΕΎ”sΌσ‰qŽ7’φ˜gΥVrnάW`©€ΤΟUiρ Xν|Η0ΕzυΨ$):θϊϋy*ws/’ςγήΕ™€Tέά•ήNzgιΰή7Β*άKw†΄”ΫΔχ./ωςέΔUZ‡ΐ£šυ»Ξω›χYά}υˆΣ«ψΐAOlβόΔ‡‘ ³`\Ψσ‘q‘σα€ \(ρ ί9²Ν³†τ›’0Š %DKΈΒi[δBtDωͺ‡EΰBΛ|ΈpΠ-έωp]Ήpκ~'½Ξ\\XΊ΅xNΟ7>Ζ…rΨηθ·΅ξ’;z™V˜lΡσ‘s!λβΓΘ…βC]Δ¦WΑ'~΅ηCηBηΓŠΧEœ;Ξ"ϲÞΉr›8ΏΕ†Jswχ0Αn]έΪ]˜ œσ[†ΨγH΅θœ»P”@>ξL΅±±YΠΪζ5Υώ<°,€z³Ν;”#Šu ž+ρH/—ΓνΒ:Φ‚³T]Ί\q u‰σ8βŒηψ貘r_{Ο~1BΤ―/΅ΰΊπ±2tΔW6Γͺp‘ΔGΖ©ρž:Υ :ώΚΰ˜Ws sΎ?ǎ$°ϋΤu9γΕ5Ώ{Ι7»»/ωϊiύ'oοŹ՚‰s{Α<τuηzRΫ½μ#½§YΔωίξtpχž©β9hβ|«ηS Ε!b=;KžκιiςˆsΖPlˆ¨ω›fέuΎ³ΆΩζcyJ‡σή1|Y0κN8Ž)™ˆsՐγ"‘ΦΙφލNΟΑ)'€Νͺ‚ά»>·ΦšδΰψφθL»pŽΑe 2kΫε) υ@ΌvN1@Žn’Ÿλ\#Œ<0υ΄N[yχbw”ͺΉA,Η‘ΫΔߘΊI‚OہF !f,£§‰`Σλ…hjΌίKυ@`ΙwΡΤ§O”2F€”tΞ…ηoΩχQέ׏<£Š|xη›+:ζΤbρ‘§ΐΈΠ›a:Κ-χYΫJ}χq_βBρŠD³s‘ΔkΰBυζγBΉΟ sΉf€;Ζ‹‚Ξ;‘_“Δφ΄€ o;wΰξghzΕ\|XγΒΘ۞u4©w‡s‘ϊxχΘ‡‘ Ε‡βBž+±ͺώ\γυΔ‡ Ε‡Ξ…Ξ‡‘ ΙPNΓ…ˆsψΠΉp&qΎγ^Ήϊ€&Ξ7G.D¨Έsξ’ΜηBΗpΊ?I˜ΗΉε>-Šo W UlIhˆζ³.7x©uoGΛΉF { ½άkΦ5jΜη„σ©Ώ$ε8ε χ½ρξί·άΫ {Υ {±f₯Χάsfψ2σzυοŸ’ƒP sΉE ‘Φώ§Χ}sρ3ͺψΰaOjβ|3ηΩQΗ‘:ΈP™A‘ #Š ½¦s!ΏϋΘ‡Ξ…βŠ5ηυσΛIiΧμrηBe 9faZξ:ΊK]sΑύBΒ¬\"*Κ!Ÿ cIPμ%RθβΓΘ…ήΡ]{υ倝 •U΄όo‡\ΘΊψ0p‘ψΠΉοžσ‘s‘ώŸ”IΉP|θ\8‹8ΪN{MδBπˆ6qΎYŠssΟ'ΑΣΩ½œ sAΒ\)Υͺρφzκθ{Š·DΉw+ΏμΊ•cΝΝ”κ.Ψk¬έ™χξζΌ›ΧΣEŸξΒZΫ=]σΖ%ώ…θ΄σ\vσ >έG™y³7οž.aΏΓ`V½e68$ΤcΪ»RήΎOtγΗάs«7sΥ—/ϋ^ή=δ'!~χΎέυƒ/ww]ψ…ΌΜ==Vη‘γ»sΠ§¨+­ήDϋX»₯ΐχβ\ξyωόfηοίυξŸv{DΟΰξMœoβ|Ω³OΛΨhΑ¨:Ι¦άUη"]©^›žkζ$ΚT»§ΖGJλ$•ƒ€z>ϋ₯ ±4‚CDwί‘WAœ€<¦ζpšWάόΞŒ²Τ¬_9FΪ‡ 4zp z‹ΐο")¨τ`ΡΧ•"οΠΕ‘q›oΗΠcΕγ½ηΰΨ/BθyΔzπ;ι’AΝQšΤ0.¦1₯S‚ΒΗ;i›RsΣΎωxƒϋ€wͺώ–šL‚P"ΎgΤ]jά”κrΣ’οSOžΕ Wγ€Iά%–juη½8ε©#¦%iνΤYiωσOIœΕÏλΎϋΨ3«8ηΘ'7qΎ|Έ±Ή0σ‘saqΠΕ‡c\ˆ“ι\¨†c|― #–¦qA^ρΎ›σQΙ"Š\HJ;<Ήα]εB πΐ…™£pφϋσqZδΒwϊs΅OΙ¨r‘φ‰\(>œ… cM―Gw>*«ˆΏ±s!ΛΒ‡=β ΣC|ΈP|¨QβΒωψpRŽ"Α+Opαϊς!βόΔ]φšΘ…ΰ°5qΎ±!Bhc‹σ~>zhηb/Ί±κπ-¨&Z©μκΔ.H„ϋάmP«Λ–“>©Λxμ\ξ½ΦM―-\b[©φ<.§Ϋ›Μι\jιφ±ίχσ—…Ϊy›ΊΡ«Ω›ή‡§²―,ΒΌeζΝώlܝ—!Έ‹ξWbΫώ«VΧzμޏP+uΟΈεΈγˆπ„^„Kݝίώτίύl^ήρuwόηΏuw~λάό8.Ίwoο9NbίkΡ+ά{qnΒ=ŠσγΚ9ψόfηόΰC»έύ°*^ψ &Ξ›8Ÿ6υq/ ₯1XΝA*Ž9ϋ+•³€bφγ‚δ’«Α‘r―m–cNΐ€ZΘ”)€$ΰμ›Ά™pΗν!ΈΤ~ž’Μψύα gwύτμξͺΥομ]#’ή1_sή°ξrΝ(%Tižc’ΨH₯‘:δ^9β>z^μŽdχ#ŽtόZ`λ“»ό΅ 4¦Ώ{ κ€tχZS$‡ζΡKxπHϋζZUž―τ^ž0'ψτYΏ|pΥ«ΐ= .ϋ€τ¬ηŽΎoιωr*Η‡Π,)/eΖυ-―;}m j™ΎŒSW>οι3‰σ·z\wΑγΞ¬β#G5qΎΩŠσΐ…·΅=:ͺ‘šΔy =ƒΘψ N@TΓc=ͺT'=F.dœ|XγΒAφΈP―Έ°Ÿ>ιβb #'–cŒqα$>œΔ…‘ k❝ ε +½½ΖΞ…*ϋ)σδϋρ’β9ψ0r‘ψ0p‘ψ.μ³ΥJΧχ9ω0ra™(Π;η… σύW Ήp}ωq~{MδBΠΔωf*Ξ-ΝWΒΟo±#ϋŠ•γΒ.Μέ5–ͺγ–νβάΕ»ΧdK G+§yΉΉηξZ"Z―!­Τz λ8<ΊέK‚“ο)λξŽλ\ή-=Φ€ϋΥυ{ ϋ`n½j§­†ZέυΥ7 Ίκ1ήΣΰcCΈœΞξΪ}tšΉεΩ'm§< σ,Ζ]' ΘoΪ9έν_ωπH §Η²ˆχξνIŒχβΡ>8οq‰οφ>―sž0‹8πξ‡vέγ°*~q»&Ξ7[q~ΕsŸžΑ–%Υ¨ΝfMθΰρq;/ιL썏\œ{žGSΐ£¦;8ζD ’ΚΨΝπΕ½ιG蔀TΎQR8 F/Ύρέέω+ήΣ}ηϊχfΰQs©ΐ4Iβy{ΐλδ`W©“έykΧ=¨ γ`ίΛ1ΨgM8v-ˆ-ΉΞ/ΏοnŽΧέΉ“^K5A©ΧΚ‡Žο½¨ΰογ#ΦΤKΐ†7J"εΎfΕΣΠ¨tbΟυζη;Έ’i™%€ιΚθΣ4?₯q’7‹›7x)βœ%=;θ,A HIγΔ!β·Εolq~Φαξ.z3«ψΨ£žΔωπαΖζΒΜw ΣΔ‡dˆπ]FΘIΨωlsο±:–™ά}VOαBŸiΉ€#"ΖαΓΘ…š‰ΉPcΩΦ‰ ½χΗ|8φψ€‹™q=ν[εΒΘ‡ͺINΓ…΅:ωI½9<Εέ³‰βKηBΉηκΊΟΊšΔ₯33FqΞχ‹Q|Ζ…ζκuΠsaΉˆ>-ΔyδΒ‡΅λΛ‡ˆσ“wΫ{"‚Γ·kβ|‘Δ9"k£:ηŁ•Πσ›§D―X9>ΖΛΕϊ5₯ρ›œrAιδ±Ήšΰ©ΰž>Ξv fι—Ywφε暫Ύέη€/+uθ±φ[ŽΈDvmœΫ€‹±Ή]νάβ~~ί»¦ϋg©Tσ•^_.AmΩ<5»Ou/ρΚσcϊͺ ΜyΕ1ο]σπΝλΙεrΘϋeAη_ώ`^fΑžφΝuηE˜Kœ³T'χu™}ξβάυ~=Ίη ³ˆσξyXχ‰½Ž¨β—·ί£‰σ%ΞΣν%ό!Α>ϋμ³ €¨+Ω<%Εq.€ΐaΜY'˜`«τΤeX¦š‹ΙRs£ι·OχTWv―9LΑΑΑ#$͌rM€5’8'%ΰpˆpŠ”r_ξΫ–―~w^qx.Ξ‘&ϋ@rΝy“ρ B\ΗΰΌ|Χ=­:–²Ai-XŽι’“R;k©7sšk°_hρ T¨―γ±δωlc:ι—₯VR4©ΎKͺΕLπ&H½@/£¬b½ωΌEqΤΥU;7οJKΖ ‘ΦN@:K0*qώΧGΧύΰ)gVρ±γΆ^η|‘ωpcqαŠχŒ}[χ΅š ΉΠϊk8ζΞθΞ‡… ήβΓΘ…š\Ήƒ"ώxε»2Rώγ\ŸŠ\X»ˆh|Έ.\QζBO‡\8©gΗ€Ζv…'Ž[«qaδERΣΕ‡βByδΓ(ΠΉX –Χΰ9Ξ‡=ςάuδCœΜ\¨ι+Γ–ψΠΉpq~ΚƒχšΘ…ΰπν·NqΎΠ\(Α΅Ρ„95ηEΠΥΔyM”+u]λξ\/)Β\5ΫrΚΥ@MK.“sξuη¬ΛUχΖjr²5―\B\pχ|™9λq„›DuL›— ―₯£ΛιŽβ_Νξ|ίoY˜ΣΎΤφQmΉΧ†―ͺ8ζ½ΈNuLΧίΝSΥ%Ύ{ΡacΔ<ύ=Φ¬χβ\Ϊ½ \iώ–]σ$’{qώ£―d‡Όβ5|εΓύ:ϋςάZj»υΎ)\ιδ>@›½ξβά¦€YΔωΏ=δπξά‡>²Š_ήqΟ©ΔyΊšπ£„K^Uy|›„Ώ*_plΩ~HΒw +^^{}Β•φΨiΝ9_Η›jΐ$ΰLΐΌΑ¨Ζ^Ι5*c^ς>ΕνΗ_yS7œξ8ΨtΚ/NmŒ²ΝΣΕSΠEH *qή§š— ̝$@πJpIΰ‰cΔςΒοΙ)€ΗΤ½Xi,Ηά£9„ΈR-Ή;σAϋΗΥοσΈF±Tέ½φœ_t΄&ΉF΅†H΅NΛQ {:§ τ8ZH©ΈE,K€Ύ¦2fcΔ ΦIζΉι΅’8'€Tν―jΠ§½iQŽ©dŽ©TN₯KΟ"ΞίqδqݏN|FŸxlk·P|(.€aΥBpαΌ|Ήq₯±WΚ!Y|Ίιχd\ΨσaϊmφΏ5,.„7ΠβΓΘ…¬³OδBάsx0r‘υΘ…jΉΠω¦–ΉpZ>œ† Α$.sεkB½V‹>©A\­λ|­Τ'f9:Κ=/zζB sρ‘s‘σ!uη… ω^9φ\H#ΉuδΓ<½Β…Ή³{:sαϊςaη»ο5‘ ΑΦ*Ξš ]Lmh‘?pΝ+.¬ΔΉγr!k¦Ÿš!ϊn σΟόπš_|¬ζ[Β²€gGq>ηΟΧίΥ³ΤvOw+Šs5k+Β<Χ™Σό- s ·ΌβA€ηuΉη ½8/ΫΦKœϋx7]P¨ τ„YΔωΗφ?²ϋτGWρβ]φžWœ§ΫΆ KΰΦ„ϋ&œŸpXΨη΄„O‘ώΨ„špœ«φ5qώ{-­}ΖΑ(T7Έ8w<%ΐi½ζ)₯Σwί]؝‹γ£p†ΤΤ¨t)—@%ΠΔΩΑ1Rχ`5‡#@%`%U0¨Žν›4@βωάgΙρτ~>XΗ¨9ί΅ΐS¬ΓWƒί Ζγσ>TκϋMLAυtΣi:k½ζEΧ/:θ.Ξω.¨α•ζ9GηάRJ)Α₯»uœAέwjOΗ ίiRŸύ›λ3 J龝–€r‰@τ'ϚMœΝΡΗuŸόŒ*Ξ=Ύ‰σ…δCu“ž•§ηΖ…y[δC–”nhlšs‘¦₯sTwΓε±jeμZ/ΞιδΎμ{q>Υ.ϋ^aŸ“Ύjχ›8_ˆθϊ£ϊ]Ήη€4R₯ή%τΑ(Wι‹0Wz^FIΫ#ψ@Œ•Hƒρ:)ΠιƒR5ŠβњyZ7Α(NβgGΑ™Fύδ™½ήεΌT52rWœη³ΏΆ H•6ι‘ p&kλSPWψ΄O P#Ψ‡`”χ­†x~>8 τIϊ€&q΅ωΏξ O)"iԚ €•ŽΔŒ wΚτCδ€οTn²• 4Ξ<6¨ΜΞχϊΈEβQ·I*'nяΟ<½[ϊ3§Ο$ΞίΉψQέ’ΣΞ¨βSOhsΞZœΟΚ‡.ΞηδCγΒΘ‡=j¦9|θ\¨ίYδB~cβCJ°J ŠΕ…}£ΆΒ‡c\X²‘"r,9εΞ…Ξwχ.ΤkNβBυc=r‘`› 'ΜCto―Ν?β<ς!bZ#Σψώπ=ŠόWαB–p蹐L 3>œZ„%>Œ\˜¦γ;/"ΟΎη^Ή4qΎp\θkŸ[kiΫ΄―5pΡ ‚<:ΚKm|™&ΣΘ1Δ7"ϋcί_ή}κ#Ž‹ΞRβά›ΕI¨#ΞΩ¨QœŽ©r>³άΉΉp—Σ^ΛvY₯qΫJΊΌ7lσFxq~x¬ΝW ω€ύ\œΗZρA‡v.”ΘΕ³Ίk‚Όγζ–φ7ξέΰ©νa|šκ·Ύq~ΙΧΧvh/ΰ²Hwέy_žΐh΅,ξMœgW>Šσ"°§2n¬Φ‹8δ‘ΗvŸ?μQUόκξe§w«ά₯ΰ%αΟIxέQΒ_‡}ΞM8Αξ1αQaŸχ%Ό,ˆσΛK<νάΔωzά~ϊ?OʘEœ«αΜ\ΑhU¬ R H LqΞ}Φ«DΊ§ͺΛ5"P•#δA’Ά• RΑ(Ai™·‡ΐŒ RΫ μ”Rُى ΧΊ΅Οy.A‘‚@£J嬋΅ SŽίΞkΤŸγΟΥkϊλ+ΗέΈ£šγT JηKλ¬u1޳‹έ=ιœ>N( t€|'4&Jά5Bο‘jx½žWΞŽQΪ_Α¨ΧXζ&JiίΉFε4ΟR[Zs€¬”Jψσ[ΰΨό¦/;c6qώG-ΞΗ¨αΣOlβ|!ω‘O³ςaζΒ"Μ§εΓΎ±aαΓΜ…e9ΰCq‘JzάA‡ ω}‰ϋ|ΈJ{ξόΜ€ ½žt.δϋ/>¬q‘  •ΦΠΜΉδB%|θ\θΌ9L##‚Φ.>Ί¨Ž\8‰ο¦αΙiΉPiω‘ γΕ͘&?Φpn₯»;ΞΗ‡“ϊqΔqqΔZΩΞώ<_“,όΒwΙΠθuγBρ!NwΟ…₯!\δΓ<= n8pZ.:ΞRs~Ϊή{NδBπȝš8_hη|}o++Έ€ρζξ©Δ9’Qξ0ληJχΡh.š%ŽΝΰZ ~ΡnνžΦΤΥρ-p\oη―ο»ΟPWš|ζ“ζŽ{Ή8wόzsΛWšΠφΟXuγ±zό[πΉFΑξMΰzq-Χ\λκΤn³ΊΞΈφQ£ΏΠ™=ž£7|©u烆pE@«ζ\sΝ]t Ju—0Ο]Ϊy¬Μ:ΟΗAŒ§cφυζ₯I\­ζάχ$qή tu˜ΊήΛ,βό³Η<ΊϋβΗVρ‡μ³QΪΣν™ Ÿ›γ5φKψ^ηψΣ~}Υ •Ξρ>θ β<Ÿl£§ˆύΨF'Wφ|(8₯ήRικ>1.ρΕΉ5?’³#§HΞ‰rθ,’ϊ聀x˜z Zα,8*ΠδΨ@υšλ =_¨¬8ΈΏgwhΧOϋL‚Ψ0Vαim€Χ]ΖQyξιϋΐwC)μΟ:βΖQ§ώJί3O=Ξ©Ύ·IDΕfq}“€R³™λΙS°ZK}Ο)ξg=wm*gz ‚TFͺαΖΞ"Ξί“ΔΉj5#>΄'4qΎ‰ψpj.… % α8ρ‘s‘ΪωΞ;AΦs ;δβÚ8/]Ϊ=“FcΠΔ β,ΦΕcΞ…Ύ=ΊΧΞ‡βBνΉ0Šqη± Ι…zέΘ…ώ~j\8±τ§›ΐ…‘½Μ ’ή^m˜)>œδž;Š?Σύ*R&a\Θχ·υ5κεΒR­y¦ΊΉOΛ…½87.\_>”8ŸΔ…ΰ‘;νΠΔω&ΰBΧ.Ξ] {1ΰζΝΗτ|ΉΊ^;s,q‘c^4.Άζ•ΛΡΖG|ScŽ[XwqΞqξνQ\{#9ΫŹן{ΊRΪuŽήDnI₯{tΜ§ηr½k·&Π]ΜΗμ~Œ±nEd‰τ8γ< χΚάσ(Κγ2’―9iνŒR»β’΅Ξy©9GœΛ!—οέσ’Κžο#Ξζ4‘c6:Ωγ.ΞΛx΅ΉΔy-e},Nΰ’‚7‡c{/³ˆσΟ?κ1έ<ϊψ*~νaS‰σ{'\–°Ώ5„;<μszhχπψ‡~9lsρώ;μΣΔωBν>=#―Ώξτ\? ζ HΛȟΎ-ŠswƒJ ž…ΌŒΞΉΔ„ΰtΊ8W@δυ}¬σψšαΨ‚-09& β΄ΉΧγu°ZWπ*1΅ΪίƒEmχT’kπρEq_uT¬ N FΙP'ΔUoκQM Ο™κΣΨ0Ι›#•Ρwc©7ˆsχ\ αΌζSάUƒο›Ύ_rω%α£TΝμ ͺ1αG~iΠIQΓ$MίqΞθ ‚RR:qΩΣoίΏΥ―<5–fηο}ά±ƒ1DŽ/œ|ΒΌβ<έ–π₯„$|?α·Λφ]>ŸpIYξάΙ\θ|θ\¨qS.τΞΩΖ…½8\θ|XΎΛcΞΉs‘œsψΞΉ0ς‘Έ0\¨ΤH‰sηρΌRγBέwώ˜ #j'7κ΅#ϊΉΟ砏•ώΔl’z9P7©Q ±AœψΠΉP<θ\ˆpΧ>ˆsηBΎ3Ξ‡… ᡞ ωώΈ¬ τΌΒKP33o§ŽηηiD2βYΞ6λrΝγΰ£^ΥΓέso''\‚άkΚεlΧρ΅ nwσ•Ύ.h›Ί»Gaξβάλθ•^ηΚ(VNH]ΧγžκtχθΊOκΈ>pΒ£ 7Qtχ>ε=€»G?^8ˆ Tsή7 “ Ξyη9΅ύΒ/ fœg!^ G½wΝ“˜Ο’ή\s!‹υ2χ< tηΌ~5U½4x«έΌc»7†›Υ9?οψΗu_yό UόϊΎϋM;Jnμ—ν―)Ϋ~ tkG©½£<~‘Χ›§ΫV$μŽω²/5η δš8_ΫΝ/?±HΉΪνβœϋŒŽ€jΜbηaΉE%ψμRΝmUŠ»ΧXˆΚ5JAJd\ z͟ΧX²­Έͺ}T—aΉD ΰΤiρ*·'}Ύ―x¨zΐ©ηp‚@ΥΉ+PΤc@’Ω·ΡΉΆΓΟΩTΐzŽΏOϋτΤΞ(Πηuτχπ ΤƒΡ(Ξγόs₯tƚKŸν«”Nw΅”8'eχΡEΎΦ¬ͺ›:Αf.5ι9M³tkΟ†2ˆq@SΞόεχ‘§”’βI Κσfηο;αΨ~wΔO™Jœοe³)·/D|XΒ[5Χ’eΒ[Z@ZηBxΠωΠΉ°*ΞεZΦΈP|θ\¨%|θ\θ|(a΄vρ‘~gVW>hgγΣ$ΞΕ…β*ρ’s!|Α~‘ Ε_.ζΕ{Jχmj8ΉΠε\Ήo.Œ|θΒ=r‘Ξ+raΜ ͺ₯ΉG.Λ(Š+ '‰σX..T³LΝ?ξymšEαΓ"ΪΕ‡ Ε‡=‚ΔM|8ΰΒrQsZ.Μ’?ρ‘sαϊς‘Δω$.SˆσΖ…3ΊγσA·UApG.±θ]Γεšk ˜–j‡¨EδΚ5ΧΌr±θ–8GŸsΑ•=䞳?OξΈ„Ήΰ’]ϋ)…=6}“@Χv₯―»`iμ“P‹&Ύ’‰oγαZ ωΪ7–Œ5‚« o‰usΚ{w‘Z:Ή»Hχ±iŽxg₯Ν=ηyUqQjηj§vθr‘ΈθY qžy£–QŽλ΅ζ½σ‹^Δy9Dωΐi·ρjžΪ>‹8 οΎϊΔ'Tρλϋο?•8ί±E‰σΥΏJ()jͺ#9•ρQ\QΧά;@Z0(U“/– R *…9Šsu#.x?FΝSΣRRέΣ5ΧVΑš YΉ+n~gŸ~©€%Α zξΖΈ«ΰTΗΣs5˜ΉΐS£›Wίrv<·=Ξs:ηxΎ,yΗ@5ΦέΧυk΅—c#†βkΞ•έ`β|Po©`4ΦZΊ[dAhί— ΈA½˜‘B―GΠΛ1ΣχO)œ}7α’εαhv‡ψξ~μΕ£ΖGλ8\υι<!G€;‹8Ιݎkψι_η΄φtϋXΒI^;T‚Φ΅€΄Ξ…ηΉ`ΰBρα€ ωώ8†Nμ½πΒqβCq‘σdδBΎπ!ΏηB Δΐ…ύγi_q!ΒSβY|θ\ΐ5.”ΫΉP’έΉPnj\(>š– 'ρα\\¨γ;κ=G.t>Œζb_ŽΨέ}b6Q­«».TŠ MœW»·Oη‘ Ε‡άw.,cψtQT\¨ς›ή9WΖ›Υ›g.d\XΎλλΒ‡™OK3Mqαϊς!βόŒ}φ˜Θ…ΰΘ]Φ-­½qαμβ<6oστε(Μ} \,ΊxτωݚαhEμΚ5WvΝ,Χάr‰sRΩη>$ΜYϊ(΅(Ξε” Qœ«6=vgwqΎ$ta‚έΣΪcϋ²ΰ’ϋψ4ΏPQ‘G¨]>ΧΪΆIΞωXνΈΝο‘‘jE¬χΫ οώωκΠΔΉΔΈ»Bί©==wplηύŒσ%ί‰sšΑ™8οΕΈΉίWZ;β< σhΞrα–·ϋη0Ζyη ΠξΛO}bχŸ'>ΉŠ—t@η[ZZϋκWœ”έ"5ΓκΠrε<kβ\©n‡Θζ˜χι›@ξΉά  –‘  JqΏς¬΅3~S€y΄9(ςtB-K°» Gwˆ%γ’›ήέ‹mΆ)x$πsΡ…½ vέg’?ΌαμžΧy-›ϋΓ5,]υ~ι`Ο€κΈQ ³d?φ©eΔ΄ΞX‡^«E―:FžZ«ϊˆΖΪs€j G«αϊH€ΗNΕ|/δŒ+υ]n:Ο! Υχ ½.ΗΜ ’€’@GHρύTΠΨ‹σ¨Ζ™θΣήr­%A―]ȚEœύ“ιηoGόΗ3ΗN_›k\F₯9ΗOvHΈ14.μ»ͺ{ύ³ei_ηBηCηB–j'.”ΐ–蝆 yL|ΉD.Œ|¨ϋ‘ £`―]¬d=r‘σaLϟ«/‡:ΣWkΠγ…JηØM€F™±I&|θ\θΞΉs!λ–)4ΈπΝσ1 ³Γ.>4.Dˆ{Ρ,\(χάΉp}ωqώŒ}χ˜Θ…ΰ¨]w`Η—LΓ‡ Χƒ'qO‰žT[<ΙΩu±ι‚\σ́ΟΗ5WΗt„5BΫ!qŽC.qώΑο^‘—vPJmη8vέΏι‚€;瞾ΎdŽξρͺ;—@_bzθΪξ£Τό3‰.|άΪςπXL‰χuO‡€²{Ή„7"4ΊΕE˜f¨Y›κ©Σ}?Fη«&ΤΌ―ra^œ{η}½ωU§΄KœKŒ[Jϋ@”³OYς<5~ΛΞyšΦˆ.ΏΗyœς‰ξyE Ο"ΞΏzς“»o>ύ©UόΦ!6qΎ₯6„λkΖHσ-Νaψz0Λ”Τ€”8Q՚+ε*ΎΆFΎ¨©‡₯Ϋ DΉκ鴎ˆΧΌσ»F)…J½HtΗ"­+εP+–. Μ$0ΨΙY!Πσ€4z^ ι(χٟΐ‘γ^|γ»s@Κ’ W―»HΪW=Χ·q(°bέ]&Ώ ΐRό|c ©7MςΖHޝ^έάUƒ>θZ쳐.Μ}Ξ―₯Άu)φZK₯^_ια|°ƒΗ–οJvS°ΚχRδάL.-ϋο} LΧ»Q͐ψ­””ΡYΣΪα©Gχ”"ΎόΜγ§vΞΣm»„NψΩrΏ€Sής ηΒ‡.Dlφ\¨ΞΨβCζβCq‘jΘCέπΨEJukΧ…*ρaω~λχΩσ‘q‘.PͺΏaυΰF.„oΰ¨Θ…βΘ…βΎΘ….̝ α­ŠΧ"Š"κρΘ….γΕU]„ˆ\θMBcΧwηC}ΦβCηΒ±©ξš;:ΚΥq‘¦XΈ@)u§Ζ‡μ_ψΠΉ0_(|ΉP|8+ζςΔΞ…³€΅#Ξ'q!@œ7.ά°\XθTqvuμ ξ#Ύ\ še΄n­«K;βVΝΪΤ  ]ηrΟ%Ξ] {χv to,ηΪ]œKΌ»ΨŽΒάΗ§ j έσ8Ϋ\K[RάγάsΥỨ_Zi(·άβkβ| †k©μE|k.χξ¦W»―Α.ξΊάs5…[5‘)έΰ"](¨QCH‰sΝ9·Τυ&Μ{hμZ™gžΕ95ζϋ'ŒΔyHYΧϋ^―ΈΠ…ΎΥΟ"ΞΏ–Dψ·Ξ8±ŠίzΔAMœo©βj½ΜͺsΝ½ΤΗ3…β Ν=χtwύ?+>4.δœΔ‡™ )Η(\8hΖK:/"ΞΟάχ‰\ŽήmϋqαΖηQ€ϋŒμ˜ ΏͺRg~9Ώ>Γ|Y¨ΏV8u@—HFL#Ζε€;δŒ#Ύ£@Wc8 x₯ΉKŒΗιζjΗRBΫέpε.Ξ•ώΕόe•Ξνšί΄χZ9 οeεq]ΈXRΡΆ4Τky½ckλΘ] ;$Ξ‹`ž‘N]β\}Όγ|΅1Ξ‰σ(Ξ}_oΕω%_οέσͺc^ΰ3Π5]£ΤΌ;»„z|or»Χ[SyΗω’e0‹8―gžΨ}ηgOβεGάΔω–]KL½I’ΊOκ`\­=£„ΌφάkΠ%ΞΥIΫ]œ#X”Ξ)χPiΏžή).§HiΑ%λ‚sΰ=ρJ5( ͺτ½ΝίΩ$Όb—β™άΦw>/₯³ˆσ:ω¨ξ†—œTΕWŸsό4 αθΆω ΆΏ-4Azk HηηΓΞΕ‡.,<Ψ7„+\Θo‘ηCηΒΗή›€ΛV–gΪι?ι€―ο)Ϊ11ji¦U8jœΣ&1Ξ³&šV“h4N1qQAΡ΄έΡ8΄#DA'Τ¨!Š žsΔ*"9œ# ‚ΤΏξuΎ{ν§Ύ½jοΪ»φΩγϊλ½VΥͺUγzφσ¬χ}Ÿ7…zk,ˆ sρί·B“­xXc!˜φΤX¨ψ±P<Μύ΅θN,—‚… E… φ ΉχΦ‡…βaŸH' ϋΜβΖJΫΑΓI£Υj,,x8†…5Š…Yή0Χ—@όΛΛΗ1…Όfρp ηε{+$.ηχkΔω$,$ηάͺ]΅ϋΔyŠτ4²ΔΪςkΗ€eί΅1λάp3ζΑ)Μ͘Χέςv:}ηsMαπά—ΜΉΒά²v/;"ηχ²έ±ku&½vk―ΗͺM2“K1ŸγΩς~φΏ{!GΑyKθΣdŽΟή¬ω€Œu=F-Εv]ήe³τQξύcœZŸ0ίΧσZΌO'Ξ‹»ωXζόΌ³φ tϊΞ‹Hοf›gΦΜe$Iu5Ω’θ=‡0A0 G:θZζ ωԜ"’C1,{Ώ!Ρ}ϋ£O|χm£Ο^ψΦ– žzΡ[[™β\©QέiY¨Βš€ ϊ˜\fλΙ‰₯Ησ|ήξεά}‚]α_—~rΫ?οmνυ,{—”JΜk.)]Θ½X3€Ž”φUKqžΒΌšρΫ υΪΙl‘½Ξe™―ΩD’>¦Μ…F¨πΊΫ,%œ€j³‘œ8βΉp2&«^ζψΞBH1@βχ3«!ά»ξ~σΡήΗQo|ώOn;8Ώc£2ΦβK%Ÿqέ&N)γƒΨ^g €‹γaba;6oΉxXΘο₯ΓCΎχ|‡ϋ0°›ϋ‰…φ’σM,4k ͚§ ΊXφ‡ΰ˜#‚wŠσΔB°OAžX‡5ςxβ\…;'`ဨρP,τ„e…Dϊ9$fUQ…βabα˜9_ίH΅IYτs>Ο$³1™xθIΙ4 ΜΚ ³μιέR°οŒx(ΪΖ&(Β#f4„»o^o"’β|ΐΒ4ˆK1Υ‰τ&–Ίλ™-ίU‰UΔp–œλΜ7^ tKΧunwήΉ’ά’vGͺ)Θsœš³ΛΘ)˜3jΡέ'ΜσΨ±–Ωυ|Ž<)‘ά}ΎΖ|έ†Fv<ήΉ‘MW w%νE”η΅y³Ξϋζ˜g?z υ4„σΎ!ΞΣ0KάΗf›η¨6ž§”΅χŽR«Ίεν]yθ–±·‚<‚ϋΆAΠk^Lί< ΠeΤν7_ qξ‰Œ3ηgόιέF;잽ρΤ[ήdη›]œwεΎΔΠWΩΔ²Δ9gοι¦ά2 Α Ž£•D„-δ„ΫnτZBh²A‚0ΥnΊIFShΪknζ\B D˜³…LBΰ>uΑώλWgƒjs"Λ1ShCFy,›ψΨωoo―CN I¦βέύυεχ’Rc{dŽΜTωό\Άμ=]ŽSœ')]ΜΝέqkV,t=—fΟϋ\Š $¨=£„ΊοŒY£Ϊ.ύ ²€Σγ²3ˁC|χx )nΫIΌV^cΩh倨61ΛrΑLβό7ν{βφΖϊ{Kvkߌ±Ϊx˜fYΛΖΓΔB~#εw0 ΕΐŒΔΓΐB+†ˆz>wž¬ ωm#@σXˆ€―j,[̎‹… ΰ>,τΔ£ψ&ςxlk,―&aa†·{ΉΖBρ°ΖBKφk,Μ“•“zϊΏG,ΓΓ k<Μ“•°P<«(κΓΉΔΓΔΒ<©™XΘ1βab!χ§"ƒF —‹‡­8­λMΔBb1q>`αʊσ1aΒnq^ sΔef³ζˆλΊ<]žΫ<&ΕyŠxέΫϋFͺe¦ZΡ\‹υΫ“DyeΟΗμ;Nq'& ?‹μΟΘώy3ώΎ6ΊcΤεσfΧεξU&½·ό=KΩσΈsΞcξ½’.{zδ nŒZ՝£ΤŒyύηΡwΞώ6Rœ#ζε8΄—φN #ΞιAoΔyΫn/ύ }ησϊŒβό¬?»ΗθμGά»7žvΘο β|³‹sKέΊ²7g™7δt^)g1ƒ[r֞μ€ηX3 o ©Τπvϊδ ΅˜+ί׍ͺ³„βάl‘Y’t6η:N2 ‰ƒΌq€Μy<„ξ'ΟΜLŸ0χqεRΚζΝΞKPθOHZ½ž‘b=3K™…—ΌB³¬3{1Σ)9ϋHλtέ‹="A΅ίτyNΕι^lζHΧbŒλ¬Q~_ΜυMΙ¨'} œΩc©0―!~ΝΗΊχΡΉώγμΟw BJ{β«η;Ύœ~εYΔω»οsσv¬W_|αα·ΔωjγaΥ±\<ΓB甃‡‰…)Π  ω­Υxh₯KβaΞ O<δ2X&ώ€M5‚'fΝ λ,΅™κ:cžX(ώ%Ї}X(ζεΎ,|M―±°vyW€'ζΆό’NξβabαΦS,ΔΓ ΕÞ ϊ<,Μj’ΔBΎOβaba97œŒ<^b!――ΓΓ ϋΪ7–Š…ΛΕΓVœφυ&b!qπυqΎβΌΞΊφ t3£“ΦωαȞβa©utš’‘m™zfΟκfΦέr¬Žνi$ηάσZœ³M±M œλ}:·Χp3ί™χz_vάΘ“fΖήΌF·υŒwη=†Πaή“ η–~vΗ©₯@{Ώλ“.σ²ι=εο}β½6…γρ/‰ΎχΞ1ΎΚβwYσž‘ecsΞɚ—‘jYΆ>fW—±[Κ^y'Π›ΰ±Ϋήσ’1o…y1ήΜ|’yΌYΔωφΏΈχ諏Ύ_o<ύΦ7Δωfηνΐ¨εtkt4AΈL%Ξ9+/Q XΖg‰{5O₯7³½_CjΘX€σ0[ |,ΫTdB°R˜Ί…΄A Ξ e³1ˆ]ΕkfŠ,Χ42C”Bb •„J@%©–›%²μ2C"›Y§$¬u6Ιγy}fI΅aR’Σ̞ΧNξ’8?gΫ ζW 3* kK>-q·ί²'Τχαο]›dI6%£Žκ³l3{Ξ?•ϋθ`ΗL™5ηy(ρ,L2š#—c7‹8?φΎ·hGzυΕiΌέ ΞWΗ°Π1iΛΐΓΔΒ Ίΰa‡…ެ±°>QY°P#L…ΈXXγ‘Xh{OώώΕΒχŸχΞΞ#±Πφ˜ ³_ΌΖB’ΖB.‹‡`•XψΑo½£艁 q±/q‘θΓBbZ,$ϊ°0zb‘%ξ υΔBρpήμσ‚‡cX˜ύηΣ`‘½ηšΏΥ™σZ sLφ₯‹…± ― <¬°P<œ —‹‡ˆσ4β|ς ΞW η τΔ9ΩάΊZkŸyΞ2Gp+Ξλφ<&χ)δuiOq›FpŠcDm:ΕΧΩρβuyzŠοΊ<=χ₯χy©ž=π9“½δ΅1ž•φϋ^xMΌvΔΉΞνŠdϋΐλρfuOzF'Φϋ’Κͺw½TZπ\Ίτωߊσ"ςϋV;Rν»_™+kWpqžτN˜+βεfάuk7ŠܘσόΧzϋΗg1‹›EœοxΤ}G_{Μzγ·ΉΩ Ξ7uζ‡b²DqφΌ54Šq+K*εt€‹=γΕ¬Νό8ηΥQZ’O# δ$€Νύν›sn―=ζφB€rž/Δ "–ΖEK37–g™xŽϊI2ZχŠKdΝκH γ΅Θ†όB‚!€μΟϋdf‰Χ$©υr>Vf˜xLH.—%Ԛ$Ω“ήg–δ(6HiΊ§I\=·žύ›}—c£„ ¦^―ϋΟ Ώ”θφeΠω.@Hu)Άœ3³Ef‚₯Ζu²%«=Fqc†sΝώφ»^άΈ;Γ―JœΫwΌ*βόxΦέ{γ΄Gέa竌‡cXXF€-Η°Πί…S ΔB]»‹ΧΖŠ…όΞΔΓΔB~Γώ~ Ν”§0 Ι‰…Yή'Μwφ`ažtL,Μ“Œ‰…'μڏ‡}X¨ˆ―±0+³ι\―±°6s4e–δ'ΦsΣ Ξ’ηθΙΆ—»ΖBρ° ΕCW9I³ &fφ<±0q/±0E{baΞυba³`αLβόΖΏ< ‰ƒε?β|•°pRvuΉ₯νΈΆηlpnΞ47knΉzŠσzvΉFo™QN!Kδ>…sΊ²s=ΛνΣ=_k_οx]¦^gΗ}³βž€Θ“ τμ³χΆ|~έΥ"Ηΰωu–η„Ÿ·Ξν—D訟sΠΣόol|^VM\eςχ#9ϋύΠΕίηι2ς•Θoο:N­'ƒŽ nηž›A/ΩσnkPŽ·O݌9NνŠrϊΛ›-Yσn\νR_•«Δ9—WKœωq}σ‰ιgήξζƒ8ίμβΌ5Ё„φˆsΗ,™”6ψ;ςQϊλZRJο›„΄" m6©ΈΩvd2ΣgžΫC)5ΛΑegοζ|]ƒλPHdR™ύێΚ?YΆΩΧ#™Y dφZ²zόΉG·„Τ“΅ 'άn½œδT‚{ά9GwُY»»+Π%§τ9”sάωμό,³χά tΦμω‚ξ΅IRϋŸώ„yd΄ϋϋηˆ5Λ4ν›”ŒΪwξ˜*…;㫘-HwKθf\Žm 8–LΩN²£ed–3­ύώ·σ­'v/&ŠM\ωœ{΄1³8π!έcΥρΕΏΊγ ΞW +q> ζ䱑> &Ža!Ώ§‚‡‰…‰‡‰…ˆσŝϋ°¨±0Ψk,Lχž³‰σ?έΦυjΦρΕΗίyη«]Φ^c‘x8 κΌξh΄‚…ςH,D°‰‡‰…ά‡ίΫh ±°nS knMb‘FpYu“"ΎΖBΕy…ΩΏžX(^ΥXˆ@ηz&ζeρ3…Ίeς5j8g/|…Dba_½ΖΓΔB·υόσy%ξ‰…‰‡…&ζόσΔΒ1™X˜x˜Xφ‰‡‰…œlk,,',{±°‰yXX‰s°p&q~Σ_™ˆ…ΔΑΧOƒ8_ε²φVΣφ2Ε9‚q«˜΅Τ[qΪ—5G § Ms·4IΛ^r…°BρjΙ7‘¨6KΞΎz{fΟwUΩtKς³Ό=ΏB½―Ÿ<³ηfό³Œ½žγξg‘ŸI~}βœΰ}˜=W ο)ΒΌλ)Ωφ©σ8γύΪςΛVΟ<Ο>tΓlΊΡo„pWb(V,+žƈsDwφ g {τ–·ύκŠryˆrέΪΗfœ³ίqjΝ>Η»΅·y|y]@/’]ηχ>qΞm³ˆσ―ώνΓF»žφπήxφžJœ7λξM|½‰s'9š?~ςΥεv¦\·}«‰eκΕι±:M|ΌLΏ`ϋ‹ƒ8_a󣜍jRœ[)ukbΦ՚Ϊhh.΅m&šn΅φT¦λ·3c£”SBšN»Ίν²…\YΊ(…΄AπΜΤhzd‰&Η₯C{f “Œfι¦ΔPrΘγC>!žΔΡί؟!"ήυΝ£»μΉΔ2ο“χγ2DΤλ’ZΙ©Ω'MημKΟ>ΜΜΥ'!²€ΣΡBι`lYg–sΦξνfΟηυŸ'!M“€-TŸi·–υjŽ•άBφSZi¦Θ€€F¦¨7 Ω€_Έ‰ά“ΗΦpΙ,R#ΈΈ­ύ^6ϋΫο‡ώΊ+uOB e?Δ”ρAΡYΕωq»UϋX}qϊqΎκx˜XˆY<μF§5x8†…`^…‡c™ς ‹Θγ·ΗοR,¬ΗΕB+‚ˆ νο±Π*› ΕC±¦ΫΔ5…x5*βk,k,τ~KΕB"±03θ‰…\ -qΟJ’‰ΥDρ?¬ΖΒnξyb’m βabထjμΔC±0ρ0°ly‡‡5ς}灅m…Qƒ{σ°I/ΝώΔB\Ϋ—/Ξ―? ‰C~u竁…fNk§οΜx.w!pSœΫƒm9wfг‡άύŽIC”¦(―ΛΦ Ε«π]!Έ9Y`Y;—3R οͺ’/“n|fζ=Qξλ™9Oƒ·z|œο?+η9[έχ”=…ΉβѝeοξgΛ}ΏφΗΪSŒζœUލC³o»o‹hqkšΖuΞνΝm)Μ[«8‘ۊξpo KΩKΖ|¬Η\qžsΝS`§9βΌTtΒqnv_βΌ+™Ηω=ζ¦Ο*ΞΏφΤ9:μgώ!‹Šσfύlη‚­Mό|Ϋ›Έiu £&O."ύΆM|‘η₯ηq_ΠgΫΔQ›Zœ7λ±ό!‰ƒ:huJ8Δ?dKΫ(sγŸq3‹sJτ ˜’BDΗsΩ’?Λ$2ΧξŸ'λθȐβάΜ†ύf‹μ7y΄Ηρ­_;Ί½qƒ°AδμνΞ±@9{W’—.ΒfΝ3+D@<ίπ•ύρζ―Ξ]& ¨άώŽ―‹nφ³/Γcέ’] ©Y%ΛεΣεΨμQΞΞώK{ςcμLxH>Ÿoš"IJu…Ξyσσ2θώ-%₯Žr€žβ#Η ALu'Ά2 j§bΒ,[¬ζq%›Ž8ο²ηfΞΉcJV…™σFˆ΅ΟΟλiŽCœ΅^ <Δ•rxΔAfυ˜GtAEœΏηα·ύθΘϋυΖOΎΛ–ηk†‡‰…+„‡νχ[λΟGί9μQ½ρœ?Έυ4βόvM|4JTΗΌ‘‰‡Εu²μΧ_Dœη1Χηϊ9_ι’vˆgΥωηΌβά QΞ½N[Λ΅ŸœλΩ‹ή=HŽ’<ΫΣIΙu•dΛRNȚeε9gωBΦ n–š›%Κφμ§ΜΡ>:KΝ~C%‘―?ϋθΡ«v=ϊϋνϋ·μg·eφ+yυ2ΫZ¬KPΝ@Υεž–ˆ²’˜&!΅’@7c³j)Φ!ωiˆ”€ΤlQΞVΛiˆΤ–έŽζΔyΞ»οηdŒζΊΆλTlΆ(³Bdƒ$žφ–!ίfΜωσf ιdkφΌάv?ίyΎλτW6³}^·KR!΅φ}Ϊ£Ιm”8;f«Ή<“8‹ίύθοΠg<ν‡Μωjγab!d%Δy)]ΓB)Ξ―ήεΡψe°ίžΫΔΒΔΓ ΕΓ ΅Ίœ'*^§ΕΒ¬θI,³ΐ² σδeVy|…‰‡Σ`aβaž°d‰‡5²4Α -sΕΒΔΓΔΒyβ<±P< ,¬ρplδ¨x˜Xhi{ž¬L,$ [Ό+,μπ0±LγδTƒ‡5Ά•$ΰa…ά‡ίG`αrρ°η7Υ‰XHςλyȜ―ќσV„°Yρ¨0―Η§)̍–₯νfΞ³_Ϋ¬y:§+ŽΉ ažβ™-―!j§Ή½°©§7«ž™τΪΕ=«κu³ώιΪξρ–χ›9Oqξηυ>–•Ξc竎‡‰…~GfΑCΏχβ!ί}§θ±Αο#°ίюλ ,δ6ρ0±P,τDeœΘZΆ8ΏΕ―MΔBbηk8JΝδ²η<„’βά¬qν0MQš³Λ-g·€έo3γΩΟΞυsC@σόˆO…x=nΜΘΜz–‚gΩ{fΠηΩΨΞ,ΉέΧoΖ\qn%A9―ρκκMθ8a…ΒΉapηώΦ™sMκŒ‘γΣJΙz…¨Άd4 ©αΌj…»d²QgZΫΛιννsΨΫIω§™sݍιOn~?3‰σΗά~/gOœyθβ|΅Εy`a+pfΕCΝέΔΓΔB«HΜ¦_³7δο«ύ=₯Ωbσ;k,k,k,΄|½ΖBρ°ΞœkWc!™o«† Ή žΥXψΒ3φγa…ή§ ΕΓ ΕΓ\–ξΧXX‹σ YυΙJρ0±POπpž0υ`a‡b‘ΥD‰‡φš‹‡‰…:ηθ4zβaœΐlt1€λΒΚρ0±d[ca9? 䁅ΛΕΓVœίςΧ'b!qΘoόβ ΞW λ™Ψ³–Ά#ς²δΫ¬rfΜkq~μφ ΖJΩλ¬yφ›§›ϋΜ"+T³|½ξ·ήζh—ττg[n‰x–ΈΧ.τŠν,aW »―.Ι· ΏηD=ηέΗτsΠ.ExφΨg(Ζ}o¬Ό¬xχΔCšΜ)κ=‘ъsLα4o ΧuKΩύ±φ”qnΞ>οΚΪkΧtKδΓ1=ϋ»³Μ}^&=„zΞ6Ÿ'Ξ£l=O&d‰ύ˜x/―3OtχΙΧ')fηί>βρ£‹ŽzRo<?ΰeνΥq/hβιCYϋj‹s݈±B²˜ )f–Όμ-gtš„Aig–rZΪήτ$P2Z2Ff,ιΜ¬ΒΌ6>‚\₯‰‘e›lupwv―䬙–™’tξλO$K }Ι™ϋγπӏ=ύ_=χ΄cZιLa.%Έ/‘δ4³H™=Κ•JiT·Ψ"Γ–ξφR{XέBϊ³Œ3O”dlk‡X‡Œjd₯©†p–n*ΜΝΩc™εœ₯ί²- Nn?eŠσ_>/s.%ϋc€%œE`· c‹Y"rͺϋ;Α>‰,#†JOΊ†q³ˆσγϊsγŠͺ8σΉwΔωZT‰…Vd. -[ [.VX8†‡fΟK»x˜X¨Qx˜XhΦΌ ΰš`&Φ#ΣκΜy…šΎυa!O,ΑΓ mš σD@½τυX *Ξs‡Fq‰…Ά€‡‰…nύΫ΅X˜½ηivͺ!\β‘X¨I[vx8){žX¨0―±Πt+D ΕΑ [<Τύ=±°ά/±pΉx؊σƒ}"‡4ˆσΥΔB]»Ν&λ½άyηXΔ¨εμιPžεμYΒœ}ιfΞ3γlφ<ΛΪug·΄;…Ήαάoίcf„3³ž"~w”ƒ+Ξy³ζΩKOδLφtkOA­ψεσρ>΅k}_Ζ<ΛϊyΏ>N}BDγΊZΌ/΄ψ¬κΩο<–ύχ~.s{ɎΫ‘Ξοfάύ<χC9«1ϊzΞs€ΩXΉxγφΌgιŸ$ΞΗ\Ϋ'eΞ}LθRœ—φnœZ8ΎίsraVqώ—>a΄ϋOξΓοu‡iΔωΟ5±«‰…!άΝͺcξUVφMό‡Έ|*Ξοεϊ+*CΈ—βό@ˆsϊΙ,s+ek]IηrΕ9„RiΆΤμ9βά́YUΘΚ5‘1$φ₯8‡ˆ:Ξ†μnΊQ| TJ3ΰšΊIFπ9Ηw{e~$΅t3G€Aόj"*Ήd?$B =τ njžό/njϋ™ύΑun³χRj@X-­‰)[‰iί²΄Τ~iΕΉžΟΕ1kŽR¨;NΘ“"Dk’UΖ eΟe'Ξ5΅ ‰0{¨;±#ƒjq^b¬o7Η«Ud΄%’†„Τ,ΊίeMŒ$£fŠBl··•Ηλσ„μJFΛ¨-'Π 9“8β»QEuœω‚»β|΅Εyb!BI<œ Ε4Y`a+Ξ vYΨ0Η ΕΓΔΒΔΓΔB0―ΖΓΕ°0G¦%š5―±0«ˆΔDΕ3ϋk,|βηϊ±Π– i±0ΕzίZ*ΆΩ’ΐB³ηιήξOρ0±PO€4‡ηi”i‰»x˜XX‹σΕ°°:Y9†…βabaŠσ,Tl··‹…ΞJ―±°ψuˆ…–Ÿ/[œoϋ‰XHrƒAœ―&"°ζ—DΖ³›a½Dqn9{ŽΛΡaš‘)=FkΖ8ΛΈθ‘η†˜Τ.ϋΚ½μΈ°z„˜ο[α^‹σ̜Ÿ[Lξ>MΰΤyRAQžΞκlyOωΉδLσΜΒ+ΜyNίοΉχ οsΛΙ«¦ηΌΏt«Os9Odμ ‘νgΉ§τμοΎlξsΊ0zΥ»¬yρ1η–·λφή3ξ¬^mωΉ₯ξu&½G χfΠ³G^±νsG–½»o–΄W™ώ•ηίύ_O]όͺ§φΖσο{§iG©αΖώβΪ~XΩχ8b47JνΚν;£ίό7‹˜'ΞφΎεΆλ–ςχo–νuqΎΒ‹³έ­8Η•:f“wύd₯?mY₯œJGE† s)Ά”3f™§R:΅›)rζ6„Hγ#”ΞĐK2έΊ²K,Ι™a›Y"G Υ3ΝΙε˜ˆ¨8ΛΡ%‰μΣ±,’§œΊŸˆ>ώ³ϋƒΛfΠΉŸεžudίz€tP‰m]ΞΉΠβ3LΗbΙ¨„ΤyΏš IHδ}eσfŸ+‹!R’Ρ–l¦8/Ρk¬•ξΞοΝμyŸXGTAF {+K¦ˆοΨ‰*œŒy2U`Β2NξWfλNfΧ ŽMA¨°Πί\ba‡b!8&*ΠΕB…y…9Z-±0{Μ δ5κ‘Qcα_|όέ½Xh_z}8˜b}Z ,L< 9"ΦXθίfζ¨I±0Μ2Η°0Εω΄X(φ`α&šρ^ ΕΓ1,D”‹‡‰…₯$±p–Μωƒq> ‰CnpAœ―ζάλͺ «εΜ9G Ϊ‡­ΨV+Ξ₯lk‡σμ±V˜ZΎnIΉσœYŽ@ΜςυKzζ{ο»b|›—ν)Ξ³'»Oœ§Ω]Šs_·'ρf/»Žνδ s3ςyBb!anφά¬ωΉO? χšύζ σl ΘΟowΟώό,ΙΧ σ4„‹²ψœ•žl ΰήέ!²ϋΚέ³χ|^ƼΜGφ<LΨ{|-Π½\Ο"Ξ/xεߍφΌζι½ρόϋέy*qΎΩcΣπ•ΟΏWmΦσUκΔy+r Ρ”‘UέΨ•ζτRV›΅$) ©3E1BΘAϋΛΣlΗ’v³d‰Θb@˜pΥ…κ0lά^k§8—Œš%)ϋΞΉ?„TaqΊg&;ϋ&Ή‘$[!…ˆBBŸωωΉl‘ΩΚί φΧΑγHP-ν[<ž―ηΧišΕηΗη˜Ωrηύš-rΦo=ο<Λά%₯iŠ4FR!₯όΝ³’Β b9)΄€οX–u–žΛn„Z–wΪΩGH›˜ΧβΡΣ>žδΧ‘mόxœ’i'SdFg&qώwΏί ͺγ¬#ο=ˆσ„‡‰…‰‡cXΘχ³ΰα,Xˆθξπ0±Π“”ž¨,XΘ±ώŒόξ ΙζŠ‡`!X'φ%&¦8O,k,΄ΟΌΖBKΠλ’sΫoj,΄Ε§ΖB½:–‚… αα,XXŸ L,Lq^d¦{{‡…£4ˆ ,΄•h"‡άhηŒσ7{ΝW)Nέζx5_JY»½ζ)ΞΝ gΛ Qg„ΧεμfΟua'²”έi΅ι[^ΞlyBr_%Φ9>ΕΉ%νηVeνŠsϋΛϋΜΫΌNΣ:^{:Χηg€+{ sΩΣ‘}RLΜ‘ ,ΫμDIšDF%ŽZκ‚xζ|_Ι©³~ωμ-‘΅œΦΩηŽw2γ—e·šΘuί"K<ιEΜΡ²ΔΉ£„B€·ε˜f4=rτΟ£±:ηvD9χ•ΠƒΙh!Θθ}XΫ© ›EœŸπ”167=γK/ΔωͺgΞ $βα XΨ ρζ{ί™(Š…–²‹‡ ω ‰‡fΕΑ='&‰…‰‡‰…Žc;-š5―±Π±i5Ϊχ]·ωxL…)¬υΩ“žx(zί>,\gΕBEy…Δ‰‡ΩZ Φc&ΕΓ1,ύ»,L±Ύ,q^a‘^¦ ά΄XXJθ»Ql ΫΗnπ0±pΉx؊σΫ4 ‰mƒ8_5,άfpσϊΞK_1l© 1™ΩeDgφS{™ύ)BuA·ŒΫ}YʞύΫߎRκΪψν’ΘμςϊBQ©HχδD-ΞuO‡φzyŠsoKρž%μŠρ,ηηxί«™φs‹Aa™ώ$q‰[mϊ6mφ\1^ΟA―ΫΗDΉΩρ˜gή rKΕΣράώς"ΠΝ€wΎ‰e‹sΕ4³ΚˈΆ:ϋ]gΐ'­VάσxιΘ¨/&xcο£Ή<‹8ίύϊg.{σa½ρ’Α Ξ7½8η_#ΠΫ³ΰΜ{¦άrΧQsΖZ₯­»Y(ΗχΔΒ,βόβF„ο}ϋσ{γΕϊGƒ8ίμβΌsh₯—–Y¦τ»aBΓ™ςBFω§Ώ,2zυ\Ή_Χ‹§ϋ°&9εΊ£€ ’’Ÿz¬Δ2ͺ«8Β ρΤMXQN@,Ι9Z­›–’ܐΨZΦ8ΟμPνμ|ήdζΗ,‘ϋ žΤ ά“wφsœ€”XhρΈφn J,giGXβξe{-ύ[($¨υΘ΅œΕ\›ψy_bΏ™'—\ΖiiεαŽ”N9«%ŸŒ-βqt:–ΰ¦8§Ηr™H8Φμ φΔφ}ΏAœ―6&ς*xΨaaσ][Ά8ΏΊΒΒτf,δφ4y Α<ƒ5Ї‰…ΞόξΓΒ…y"ΞyΌ ϋ²εΆφˆ‡‰…ŽX«±0ρΠ ±0ρp1,gΕB?[MαΔB³η“Xθeρ0±0ρP,TΨ{±ŠŠ™ρPqή‡…βXΑΓ©°~sNPΥXΘeΕyΑΒY αt»LΔBbΫo β|΅ΕyŠΪ4†3ΓΌœ…ΈΜώμ4N3kΞώ,έΦm\·—ZΑjF93Ί–]ϋζυ@Gι΅b²θ›fgŠsG¨ΥFp)²-ΕΟύs9γr[‹rΛΨ³* …ςB’:Mπ–#Μ}œΜ”_™σlπsΝ6ό|[‘Mζ:KΚ묳b·ΉάŠΰΈmΪεstYξž’υœC^;΄/Θ4‚‹“ccγ,Ε·R ‰YΔωž·>o΄οθφΖ‹zΧAœov·vϊΕΪΎ3Jά™RΫ=[ήύΓ/sO‰©HQ’iφtŒ”rΉΩg&H#Θ§c,-ΤMΧ-δ‘žp dK¦ΘqA9hϋ„Ly^Ο²xΘh₯(―GAF!…HŠΉμ¬_‘$Ÿ s ©±Ψ²ςΪ7|) ‚!5R¨g6½ξO·œ–Ώd³ύ#$ώφR ¨ζrΧ s ²(nbId43η^Nχ˜οۍY+½–‹“·ϋο\uΟ‘lΝmm%γ}V`”Ϊ Οω£ΉyλUlυqΎΪnν‰…|ŸΔC°ο1ZΩΣ "ζΕB—μO,dβ‘X˜xXc!‚Ό  Ιΐƒ‡Σb‘π ΕΓ₯bαbx(Ϊt °°.ͺ½:j,l³α ΉOšmΦX8-v%ΔΐzώyšΑ•H<œ y|~5–μ{bαL†p·ΏαD,$ΆύΦΔωŽR#œχ];›YŸv!6³\αjo5‚Τ^rΜ23žay:³§ϋΊβ|’pμν‹Ζ‘~‚8ί}ΩόΜy_Φ<…yφΪs›sΜ3ηΩ[ΟηΓI …Ή=ΰΣόνς„ΚRώ6u{CŸ8ΟRχ=Μφrτ^›5ΗU½κωξf™λŠnΉy–»O)ΞuzΟ²φyΩy3τΕM½žmΎ 87χ±,yŠςbtηwhqώƒwΎhtΕ»μ—όΟ»β|³‹σ|€ΐΠKΖ?|ώc†ΔYs`t―ήuΤtL]C:ΝXΦΧ7&’‚Q‘¦D\†xB(ΙI’œΝ α4«d)ΥθΘΏΩ[ž=•Rϋ+ΝAtη–±K@3σM8*ˆ¬‘¦pP“$Ζ ξΣ0i12ΚγCH!± ΝΊΜ9NΟέΏŸύχαX2Hφ_"*ZQζψ²…œjΰGΦ¨svΡIϋK†ΙL.E˜;ͺa[θ Ά”³#©efο’„τΔGν―α»Οσ~X'ΜΫμϋ{bο|Vq~Ψ]»mulνƒηΝzKίoβΛ±ο:M|ΌŒΊ`ϋ‹! 1Β uζ%.Wœ_ωΎ1,$ΪߌxX°ίHβab‘x˜Xn‰‡‰…fΟΉ­ΖΒ³Œ]LL,Τ©}Z,TxΧXhkO…‰‡Ή΅&rŸ•ΐCΛΪύœΕΒΔΓϊ•x8†…TD&ΜKιΉύλΞ0ŸδΞ>)+—^Χyͺ-Κχ%OψΜ$Ώ9btΕqGυΖK~ΟAœoq>φBκxJ{)fŽŠ0oƒΦ”ίαΜ~Ιsd―ϊ@›!’”]v΅ηMΘNΞθ…8B,ΉΝŒ·ADΞτAJ"!–HΧsΙθφͺ”S2jo₯d”LQŠσΊ„3ϋΛ!›ui:Υ²Noƒ|PG¬M"₯}ΛΗ‚²enπ¬YσξρίΥLι‚Οηξάδ$‘Ξό₯\³ΛBDίΛw‚1RQΞΫΡ"ή[gbΘhKϊ~*Ξ zΥo™ύΑέμσi3η!ρΨdMyl2MφjΗv³η3—΅~·χ³Žν―“iΔω›8€"£/oβΩες³›8jηΛΘ’“e Ήœx(ς^ rrŒx˜X˜x˜X扇‰…‰‡5&ΦXX sΪη5Zͺ^c‘' -gO,$ΐ±IXΈΦXΈUDmYo#’ΕΒΔΓΔBΫ~ΐΓ1,dLžxXΨ s*ŒfΑΒΔΓΔΒ<τ;x8 Ά‚ή‰… g*kΏγoNΔBbΫo4β|SγαZaαΎͺ\s±KΒpMў₯ΟΔBενφkηάςzΈq£v·Δ:Λ·3cl~-Μ;!εhΈ:sέ2~ί{Ξολ97ϊΔ9%νˆrΔ9aΏ9ϋ9>{νύ ²Ο|"Վe™ύ_κΘ»ΎμΉενY&Ώ'2δcξμ~†σkήφ`λb^2άmι9bό{ίΨ?|ΧϋǞ•ŒχRV+Ό³Όκ#οJΟ³'<ǞMq" υΦρ¬¨πύZ]‘•³ˆσK{ωθΚ_ΩG<ςήƒ8ίjβΌθ„ ¬3X!©ΣŠσΆδΓm₯%}HŒ%dRΉ …e›A’3Λ!€ft!”ΞΪ%3δX¦Ys³Niϊ–3|λL‘βœϋCF“πζ<_³F–oΪ[ … ZŽ™fFvI'’ΏΔ£NyΧθ―>ύO1%$œ}‹Ηψr2LθJ,ϋ\59‚œΪχo₯½”λpŽ"ό‘ *ΖFν|{ά©Λ¬σφΆ%–²·©(‚r9ǝ±ε;[ʎΗΔΉHSdŠΊηr[GΤ>βaΞh²E”rΞjχΒ{ŽΟaΨώΖ‡MUΦή¬VdτλM\Ώ\Ύ>Χq>#&‚{βabαβ<±Π\ό¦Ϊ\Uζάg±Ps7°*±ύβab‘ύβ9Ώ\,ά^ό7j,k,t¬d…β!YρΔB³βšΌιΌξmb‘Έ‚‡}XΨ‡‡bj4&&&vcΣX8†‡`!"]‡vπp,Ω&Ϊ{ήΰamdΈT뜯}a<_^ηφZ”ο‰,z-Μ{ΛΪ{ΚΫSΈsΌiFφβ<Ί¦w–΄c‡GœšΑιΤα݈ϊδΔΌ‚¬«;©P#[,Œ“ι`―(ζX΄ͺμ»Λμ‡k{;‹όΌ³ZqξΘ3³ήΛηQ’>―όΌŠ~)zD~Ι’g5FνΟΰ ‘™Δω {tε_ΫGόεύqΎΕyϋΟί³ο–tω”²…„’`nοB€ΒrΝΗΪR?Νέ,3λΚe͏4@"CΉΤmΨ²BH&DB a$«Γ1W²Y‚­ 7{TχXJLλLQ:΅›-Oqξ8 Λ9λ>IϋΜλLω#?zlQD:ΔςvEψ$qnv ’Λs―$‘β μθ&ώvΞρνH(1ΝwΙ™Ρl—1Γ·}ŒbJΤnωώeΖHΑn–“3€eeM5>ʌy)gοœ‰g(γμΔω‹ξΥε:ΆΏωΟ8θT?β±SΡΛͺΫ/Δω ΰ‘XˆΠ{°P<œ$ΞΕBEŸcΑ;ρP,ΤόœK,³ΔΓΔB3θV$%*Ξϋ°ΠΩζuΦ<k ³Š(±Πφ Պp±π/>ώξNœΧXΨ‡‡>Ξjc!b\<œ‡…όm—‚…|–3Σ<±JŽΔC±ͺρpV,ϋ4ƒK,,') g*kΏσˆ…ΔΆv=|μVΖΓυ€…Θ€ώγzB"«λ:eά9:Ν~σo—lΉgί{žΠθ΅ άήJ gιϊˆσ‰…f+ΉXΨ ςΐΓΕ°1Φiψ–Xˆ ςβab‘YmρP,τ„$xΨ‡…ŠσΊΌ<¬±°―ηάήnπ°ΖBρ°ΖBO0‚…` X(N‹…[ Ηώχ&rY<μΑBρp¦1€§ΑΒεβa+Ξ·'b!±νΖΧ[nζ|η`ν»βΚysΓΝ*k=I »¬₯»‘@΅ΟΌv!Ϟg3ζ“SŽόΚQju6}lFwτ+Π}.ΛΫyV€@Wx#Μηφ—λL― œχΥξΫ—,^Κή ΤΜ’―χείΓ“)]΅@mΠΖεόξ»lμσšεωyήiϊωgη|ύθͺΎΉ7Žψ«β|η ¬ΜIFν3¦d―οLΩOOξ²AγX2-ω$,!̞?J9Ι A"!’–`J$%™PΕyfŒrΖyŽ"Cd¦Ιΰz=JΝήJ…Ή£{ €9(gυRv™ΒœxψΙΗ΅€2š%œ–»ηΎ-A< Ρ\Pœ[R¬8―͏zΔω¬«%£Η<’&ΏΌο˜“rΖφ·?r(kί(xhΆ\L¬±°)… βfΥΕΒΔΓΔBpJ<¬±P<œ ug―±P·φτΰ 5z›„…fΡΑB„7Wc‘βΌΖBΦVΒΓNtWx8&Ξ Ή,φ—\I<œ gηγ·'b!±νΖΏΌ\q>”΅―βR°›IOq>Ilš…¦L|WdwUΒάΩΪ»/ι}βΌθ΅XO‘nι{—…¦τ:ϊ•-ΣΆ·Ψ“fΠ«†°Ξ Ίγαη tϊΟs\χKΈ4Αc96mΟLχ6ΌΧlν‡{ηc%ύ•3m«gΦΧ6M?,βός½aτ£Ώ₯7Ž|ΜCq>ˆσ β %SD?±•brTŸΏφγmfˆ yŠt2ι’ΤW#iEœCFΙA4!‹υ843?M α&Q‰*d፧|“ K$%[DΉ¨ενu―9Ω!ϋ)s^―=—dŠ $τ‘8~LœΫwΎUΕy’ΡΞpΛLΉž”€ζνfˆ²Η’ν “ΡiΧLβό¨tY­:Άύ—Λη―¨ ^>ˆσUΐBϊŒ ΑΓ ΕΓΔΒΔΓΔBρͺΖBCΌK,ά9 Α;π°ΖBρp*Μ³·Ό—–Yσ ηΔVηcXH$"ΒΕΓΔBφ'Š…@œHOFdu€βό’-’-ηuV<{Ι{Dω€‘xό}7Š8ίϋ‰wŒ~ό™wυΖ‘³©Δy³ξ^*‡ΞρeuϋΏiβΥεφLΊ(ϋ£‰O5ρΥ&Ξnβoγ>/hβ‚&ΎTβžƒ8_OD‚2v²E ωμΜqΕΑ»8±C%¦:„&;τUBJι·4£is„Ήα¬_ϋΑ)Α΄oœ’Ν₯ŒΣγ2c$1•¨BjΙA 5Jwaˆ(AV=3FάζΨ ϋ+-εΤ)Λ8ν―4S”†p™-ZΛ%Y_σοDTΣ7Θ¨J”q²?ζK·‘•Ϋ$€«˜1šIœύƒΗϊC3vΌϋ1SeΞ7{¬w<찐,yΑΓyXXα!Ηθ³Ρ‡…βab!Β<ρ01N,Dœs{φšgY{…fΞk,L<°° &&ΦXxΈ!Δωήd"Ϋ~gqq>`αΪ―==s±μfΑsnΉ™βœ[žBžνωU½ηfλΣ°­Ύ½.uΟΩά{£½Ξίmζœλ1―›m[ζ^z}OΊΟ“―gž#ʝ[Ξ{φ}ζɊ4½[YςυΤΛ>ζD_χ—‡@3τ»rΎ―ΐj τ™Δω§!~κρ½q䱨8oΦΟ6q.ΨΪΔΟ7±½‰›VΗά³‰“‹HΏm_Ν΅(ΤCίπΎEœ?}Ȝo€ŒQ7ηJY'1š›Ω+ ­ΕΉδ“€”κ^lί9 %;Dδ"3Q$›cŸ₯}—f„$€i‚$Q…ΐB0͚kζ¦Σ0D’ύdˆxΗͺA`ν«ΤπHκŒsMα ₯ΎADδ}ονJ8%¦’}‘ΕνYβιΚYΐ›‰ŽSΘ¨Σ$Υ”Λ}9λWBwΪ‘λ_œΏς!cŒΗ=vη¨’¨5ΘΗ°pΤαaN?HqNˆ…šb: έ@œχα!‚]’ωβΌήW_ίνD>w>Ώ'φT½θ—T½κŠσΦωΫ^sΔ8[³ζΝu„yWζ@ΏbξdΒω—ŒgΟ₯F?9’<Λσ³oέχεϋρ},(VKYw½ΪϋΤλΝhNa>oT[fΝ³ύΚ+獁[M>‹8ίχ™γF?ωό‰½ρ'=rq~»&>Χ%ͺcήΠΔΓF=ώΥqοoβƒ8ί(βœω$;dΏeEF  ₯tSwβœ₯βœ}šΩwIω¦’\Tˆ¨½‘φERšiΉ{–Ή§0gSB‰·LSχaφ“M'CΞ~n—ZΆio₯ε›\†Œ,ށ8*Μ-ᄐ¦K±δ΄/s4 !5Γ΅™V7*Θ~Λ:3”ΩσμΉδ2% έβόU:gnWŎχ>~ηνd₯x8†…γβ\,4S^c!ΧݟX8-Š…φŒƒ‡5Ї5‚}}XθΗ Ή=ϋΜ³΅G-ΩT¬Ή ΄Ί΅kl$ …JR“.TΪΙmάGγΈηΣRή€{CR2γfΗ mχK< ͺ#„$sEœΏϊaγfN;N|Β–η+…‡«Š…dЇ±ΐBρpŒ`π7Cζ|F<\U,€η<ρp ,k,L<¬±p!<¬±P<μΓB{Υ ΕΓΔBηšƒ!5j„Ω‡…fΝk, Ι ƒ‡}X(φ™Δυa‘xXc‘­H5Їλ §ΑΓE±°ΒΓYΦRfœΟ$Ξοv³‰XHl»ΩΦη+Ι WSœkhfΙφBΛής +wχμS―MζκΙFΤ"ΔrzΎΥύΘ­ \”J§θλΜΘο–ΌWcΦφE―»ύγtO@τeΎ5)ΛΏI'¨Λ,mg«ΫŸβΌ݊σ13»&ςΈ«/ψΪώψξWFΧ|λK OqnxμZ¬μο•SτgΆ|_.t‚c±΅TηχYΔωg|xtΝΞOτΖ˞ϊ˜^Φή¬Λύ›xκhΚΙƒ8_OλκμЉ) ©%œυΚωηΈγHLLZVH)™ „7b‚•T*Ύ5x³$“Ϋέrd”ΫΌςFΦΘ‘AφVΦγΜΨΤYσΕ€3K9!¦Χr*vόš‚Ώξ΅δ΅X^ο‰^›¦MΌήο™X³lΠ"β|-Χͺ‰σΧ=|ΌD?bΗIOΔωFΔC„ω°P<μ[‰…`έbx˜XHοΉxXc!XΧ‡…^±}ŠΩΔB[|ϊ°pΉxXcαBxΨ‡…‰‡‰…βab‘'%ΔΓυ€…λ WMœίύw'b!±νwmη ΝT/”εΕΉύδ»/λ΅Ά»2K[Θ0ΝRυZ`w†bŠν,Uqiή§νiDζυRZž™ϊμ…gŸ"|)Λ¬Ό‚›ηPXςœοwΡφψ  ‰±OίϋF+Μ[qŽ oβ§η~±‹kΞ;«νWŸε6Φj-&ΞΧj­ͺ8?λ££kΞώToΌμi=8Ή&v5q£0„»YuΜ½*CΈΣFs.ξοhβτ<ξυγςSšxχ ΞΧ+…ˆβNάDλTΌΚK£$L| €Ρz―DL±Ν6K8λqAμ3£Δ±9Ϋ\’§@Χ/S΄”EφB:Ν‘ΎήΛz₯Sr,η'Φr΅Nμ»ί°uΕωΉΏ§΄'vœό”Aœo`<\k,T +ΞΑ=²Ζ g…ib‘55²<¬±P#Ν•ΖBϊσDεBx8-Ї‰…Ύ^ρpΐΒ5ηχψﱐΔωΖη霞₯ν«΅ζ sEyφΫWž½εD·­Xχ~₯Ÿ;Ηx!œ“9šΛ–^ΟΊZσΉxΞ…}τE•ŒψDqή„~­Φzζ«-Ξ―άωΙΡOΏώΉή8κO˜v”Ϊ=‹Σ:퇕}#B„CΉ}§ύζ”Ί71*}εc#ӚυΞr,·} Ο@nηλβ?7’9a ρj/2K”{βNL©¦½’QKΠ!c\—Œ2ξ‰…”šQ‚ŒB^!n9ΟWσ€:Vb9Vh₯–}σ”z*Τ!£Ό―΅,γάLk&qώ¦Ώœ›g\Ŏ=}ηΧRΆ.ΦXhΉw…\k,TœχaaG, ύ,ΐCήο°Φ[q~Ο›OΔBbΫυAœoPq^χ―ͺ ͺ„y{ώρ[>Οd¬ŒRkΩνα ΰΖ2ι•!Y[ͺγΌVb-&Έ—•€(ΒΌιˆς"Φε?Ήπœ6†΅fάπΤ+Οώμθ§η|‘7ŽzΦ“¦η›=ž†“Γ#G­φ’τ“²O2FK%„‹ΜfA0K;ομ‡dZΒΞνφg*ΞΉMBjΆh£,KN!€Q> φY²:¬5ηo~ΤώQq=±γγΟΔωΖΓ΅ΖBΚίqcϋ°ΠώςΔB{²k,D°sln4<΄/έ“–μ£:€ΦZŠσ[LΔBbη )σΆ{-VΫO^2βc³±›ΛfhνύnΛΨ‹ΩΫX\΄«- Wœ™­₯cψ dΙWνT#ςΪΎσRΚnφ|­³ζ7,βό+2VՐqΤ³ž<ˆσAœ/αLά΅λ2F«ώΰͺ··ύ–”q’!ΣcΩ"džJ Ž –fQ,kΤάͺ0·W“}Rb#-I7ο‘Ξϋpέ°ΦXœΏε1ϋgχĎSΔωηk…sρ°ΖBM/ ιΩΆG»ΖBρp£c!+±Πq™q~―[NΔBbΫΝcη”:Λ{݈σbΈ6OœΫ‡†j]½θν~'fkOSnΎ.9;™ςσΏ<—9§ί| ΰ,Œžσ―}‘k3¨γeΟώ»Aœβ|ik-ιξ+o[Κ‰3{ΏKy%„ͺAœε™P…σvΘ¦}ι6φ™1Κ2N½Ρfκς†¬ωzη]ϋ½θŸv 7 Ξ&Ξo= ‰Aœo|,Tœ_ΈJύη– “ωF3Ϊν}WŒ‹σ(Oο’pMγΊyι9B­ηΕDAŽ€jΗ‘™A/1¬~qώ™sχ΄—ιΔ7vοέ²X¨8ίχ­³»‘vuΌτyΟΔωβόΪ2ύ“%>ρΙ­.Ξ|Ν‡8ύώUoi ©™’μ·SΑ9e?·C>!’τefY{Ξυ5sn¦(Ε9!%sNp}Xσ"²ζΊH]λΓήγΆ΄8ί1O.ϋ§ήΨyκQƒ8πpI+±0+€j,tE…ˆsρ0±Pa? ηdΜΕΒ'cαe?~W‡‡› gηχ»ΝD,$ΆέςFƒ8ίXˆ0'΄A\WrŽ8oΔ5ύηfΞχqžσΙνI―·]ΖάRχb7&ΞερF8ιvή τ²o˜ήΏζˆςSΎωύ6ζƒ8oΔωwΎ6Χ"QΕKφ Ξ§ηOiβsM|¨‰G4ροήΏ ’Ɓ琜ώήΫƌŽ ”f~Θ¦£ΤΘ q<ωΗi η}|,Νε²€έ!%œ;¬~BJφόSλ"M@P[2J@ΎΆͺ8§§F—Ώ»7vώλ+Φ\œ7λgΑΈhq.&†ΥX¨ω[…sq0±Πž ‚ΰ!ή› gηΏ7 ‰m·όΝ5ηk‡› wqΈ΅.Ξ/έέ mΚΣɚσœ™=ˌηx΄’uWˆ{y,JŸΊ’)Wkή5Œ%›Jœϊœ‹ΫνΧv_>ΪuρήΡ·φμk·[ZœŸΉ‰*^zψ‘^œ―–NϋD7jβ9M|‘‰γšΈεV`œŠ΄@‡ˆ~οŠl$Sσ"ˆ$םyΙόΨωoo³Cd•fs,‘d4{,}¬4=Κ2Mˆ¨&HΔ°&/ι%?zηθΒ+ή8Ίψͺ·ν'€?ύΔ\fd«Šσw=s4Ϊw|oμόΒ+ΧEζΌYŸΔωΚβαZ‰…‰a5‚yΰa…΄χˆ‡‰…|[ƒ&aaβα°ΖBπO<άLX8“8Ώm'b!±νΰ΅ηk‡› Θƞ½F §¨F˜#Ζy.N °­ϋΟ3cή^ξδŒT£·œm‚)W;ϊ;;Z>¬Ιλ_ΏuI[N ΤΏωύ½m Μ·Ί8ίϋ½]σ#_πάM‘9ŸK—ςD7kβΕM|«‰?Ωκ\1χχΌ½ohϋ+!˜J]Φ)Α$3!…dr›Βόσ»ί’Q2EφWf'™’4‚#S€ρΡB™’PΚωΰή»€ΜŸ'.fYdŒΘΩg AmŠΝmUq~μ‘£Ρ•ο덝_|ΥzηG4ρΪ&ξ4«ŸΖVΗΓ‰…¬ ΕΓ Ή φΥXˆ`O38±Η ¦ΕB.―w< μ€χL‡:άϊ‡`=ηβ!=θ-š%ήͺβό·Ÿˆ…ΔΆƒk½ˆσ5ΓΓΝ„…dƒLφŠc-£ΟŠγΊΒœ“–Œ=Οi(Πρ]‰{UΖήFι)ΧΰΝλuΩ:✌91eŽΐZ·<ύτ“FΧ|υ3£Ÿ~σ_ΫXτψο~eΞπ:1±”EΆNœυέKG_ώήεmIϋ Ξq~ΡwΊ)uωΒΓ7‹8Ÿ Kύ§σγ›xpnΰωKBΊ’€”Μωφ†2Σ—¬’œμαx Θ&ρ$ΨGpΩΛ$€φ››5wvo’QΦγ?{Lf‹6š8‡d;›˜>RI7[k%υs$fΙ!Ξ―ΉφcsΔkλπαο;ξ9£ΡU荝§Ώf½ˆσOυΔ'Gƒ8_,\I,k,tz_9{nDq.ZΆ/&¦8Ÿ 7Ξ,Ξ'`!±νΊ^ΔωšααfΔΒΘ+9›Œv+Έ·}\3υ»{Δ9τ1qn™»nξ–°+Ζ)c/[΄ΗΔω3:aΎΕy_άώ(3ΠΫχΐ(―²Mqޞ¨¨f½O³(g'cc;Nνη6’όά‹‡žσ½»ΏΫίJΡΔ‘/zώfη3aι4†p_jβ…M<­‰§f πӏΜ#₯}ΔTχZηKdφώδΈ6ra~DφœΡAd€ ’φK@έz9 ͺδt’8—¬MZ’R3Gλuέχ]ο=τǏψžF?ωΈφ5O#Ξωψ,©,Hqξ>3qdλDί¬_ώn:·sΚ87AΦ|fq~όσζFΛU±σΜΧM%Ξ›uχ&ΎήΔ9M<{£φVΐΓεb‘xΨ‡…¬ΔBMάΐΓγ q³δ‰…3Iœo,ό“χ½·ΕB.‹‡‹‰s±°ΖC…Ίx2cž˜„…N° 6Ξ$ΞxΗ‰XHL#Ξ,άxΛrάŠδ+ζΖ±―Ι–—=Β°›[ވkϋΝ-k'.)₯νϋΚsŽΐΕΨ΄N˜—ςυ΁½§Οοuτϋ;qN¦ˆλχ?φΔ6s„³2Δ”Μ‘ŽΞ|n|rΘ)D•Ο‚Ϋœ—L@HέςςωBFϋ)3ώmωζ0η|NœΏχωRϋHoμ<ληΕTγάRΑσσMloβ¦+tVσαeϋΤΎιΚαa…βαbβ<±Pq.’Ek,Dœ[Κ^c!Qc!Ώ}ρp#/pPq‚‰βab‘‚œχXHU€xhΎ(ΦXˆ@…όνΖπp­e‹σέq"ΫωνΡZaαzΑΓ͎…f¬»ΎοjΆψRΔyWš^Ϋ;wυζ1]ή>ŸοH4ŽU•φyYσFœχeΜ7ϊΡ§ŽύψΤγ[qNdφ~υ™'·ΫΎhE;ώœ/ΜυΧ_π΅Ή,zσY)Π ϋϋknΦά@ 7lΔωžέsSͺ8ςΕ/ZTœOƒ‡ΝΊg'‘~[*ΐ»o³^XgΫΔQk…₯³€ω­^˜ζˆ!ΕΉ€“q3χ”Fχ-Θ„Σ½™Eg?€)KΩΝmvqN@H ˆ(Αef“9‚¨R‚ͺΡD’ΚάbH+.Ν”Ι¦Λ3Ÿ•.Ο v²E5ΝlαθΪOβ<Εω /ܟ9뉝Ϋί48Ώ]λ‡+DFΊl{O>β|³Ї‰…Fφα‘σΞΑ½ 5ǜ ωνoqή‡…Db‘γΰj,δ>βab!=ωβa…}xΘΙ–1<Δy#Ξο< ‰mΫώΫh­°p½ΰαVΐΒ±Μu%ΞsΖψ˜A(,mσ3„wfΤσqΗn«2•Ωoލ΄Ϊΰ#η?ώά±£ŸœφώNŒ·‚[q^2κ­(GΈ›MWœ›Y,zνκNšψwμη tKΪΟΎπς6nψ3§^φƒKΊ“Tuρ’—L#ΞΕΓf½‘‰‡Εu2εΧ_θΎS.sμΧΧ K—ϊ€7mβE%εϊΐ“δΔ™―NΆ8yΣGny&σ’+ίΤ’Ong‹‘[ϊυϊχ—šA‡8Υ₯νΡ…Δωbeνλ}έγύα›>Τ†"ύNπ‘6Έ Β ₯ΔS²z··}°=žΫΉa… BHΙ ρ™r9[4Žβ3Δ-šw[eΝ$ΞO|ρ~‚ή;wό_:•Ǐxlυx\Ό93Ξρ΅£‘”sCc‘x˜X˜xXcαbxXc‘Υ.Σb‘™σŒ…‰‡b!b]<¬±P<ΌΛ>άa‘eπΰ!XθxΉΔΓ <œRœ?ψΞ±@œƒ“πpΐΒΝ³κΩγσ²‡)¦{Δ9·χ-³gΌμUδ σην}θ]œŸςφя>ώ–V€·εμ qΚΫO?©έg™{'Μ‹ Ϋ‡˜§Η^zλ©(θ2θΞ…/;ώN!Ξ/½tξwPΕGΑAoœ•6λ€&ξΧOiβV έ·Y—Uqιh=–΅—wƒ’ή'υF{šΈαΐ‹/2DRζσB` žPKOϋώ~7aζΑr Ќzqχƒεœ^³FΩ[IdζHB ιΒ©½΄Ρ–Β‚ ₯”"z»σ±Ιmw}ΛIνmΏχʏΆ½τ£[Ώβν}!€VJ:ω,(λ$ψ\ΙιŒYε3γο8ΡιΔω‰'Ύ¨-mν‹;ήθ ƒF[iQβž™q2=RfΐrYB 9e_fŠ$§.Δ=ΩwM‘$₯'J !Ÿš!)Τ9Fb ±‚’ωΨθ%νIF!’”m"°Νefθχ_ςθ―ώh+Μ!’-!=be*ΔΤΗ ¬Bͺ΄&Iμγ6ϋ^^\œŸpβ G?½φγ½±}ΗΧ΄¬=σ=eD$=HήΔǚxΥ=#ΊEρ,€΅Gγ\ου΄Θ˜·₯νM΄s2η”³Ϋkξh΅žμxwaβάr­«ϋ_›+oχδH©Bθsβ°p\œοΎδ1#½ŒM'ΞΕΓfέ«2„;m±ϋ6λ•!άΛΧ K§y’ΤΔ£Š­όy€ω›ΈΝΐΛαkNj‰ιKΆP‚ςφΟο~K»΅Σ¬Α~GΨp;—δφRζΨ4©Ε„4³E›"¦!φPZͺIf‚ ρΌύαi‰'[C@Ή~—gœΤFKH’Κ– ›ΔγΠ‡I "JIƒΉν%«7ΡΕΕωρ'ή;R‹8sϋλ§₯†γζ7ΚΩΗΓVŒžYΆφgšψέ&ώΛ¬' <œn%‚m‰‡œ° κβab!'(ΕΓ Α>ΛΧ· Ї`Vb‘xXc‘x˜XΨ τ‚‡b!"] ‰ΕΔωΔΒυ‚‡[ '΅u½αu?z)WoΕω‘=&Τ53C g©|υ]©φϘwŸη©Ηw}η˜ΒΩgή s³ζ!ΜΫmqg§ΗΌη^/Ζp:Ω;fΝv‚Ά΄½˜š XΈ°8ΏhΟΪρ}ρΒΏdΪQjσπ°Y#Fs£Τώ‘άΎ“~σΕ°΄YΧ-εοί,Ϋλ¬–.υIΉ‰'—rχσ^Ϊ²„ςNϊ/!žJˆ(€I‰ ™ ˆ©σ΅%ˆuncχa?Η@<-Yw¬„TΗb3η”$’1ί,„”LΔ“,"š^ΙΆς¨S:βωϋΟϊΠθžφΑ–|š!bΛ~"I+ϋ!₯mι{σSΛ­Δ0η|•±²uO‰…”Ή‹‡5Φx(‚w‰‡b!βάκ’ 7 &rY<Σΐ@°°ΖCρ/ρ° ‰ΔBΗ«%nf,œEœ?ΰA·Ÿˆ…Δ:ηkއŽΖϊΟ»yθYςž#ΨBΠw₯ναΆήŠmDwήc₯ξλϊψΡύςΈ6Έϋ8Žϋ@p!Ίd₯μEη³·ΜvΰωβόΨχ>»5λ‹ΣΞϊ?k-Ξ/δ5N˜Eyψ ΞΧn%&ΦX˜x˜Xˆk,ΕBέ7²8·Œ]&ΚζΝΦςν )Θɐ7"Όγτœ—’vz;λΌτœ[ήb½unΧ$.Ezη|6mUƒσγΛ ’V Χ=ώ₯aί Ξ'ŠσoνΎdtαe?μηm|qΎ"XΊΨ“\LŠΎ‰g”Τόοg ΌrΛ3θΉ.ρ±)Y$Κ7ν7'Θ¬[ς !5cDΥpΌΖ=ω@J!¨f³‘–3zηl1;"Όmΰ^ϊ‰–H*ΐ!ž–CH-ο„ˆšQ‚„BJΩΊŸϋq<₯žRKG1Hβdˆ"a³ŽšEœΏλψgΞZΖΞ|εΊ(k_λπp:,d%ς›K<¬±¨±P'χ>,άhx˜X¨8½lίyλΒXΆ‰‡‰…ήΖ6±ltb!-EβαVΐΒYΔωύxΫ‰XH|Θo΅8?sΐΒu*ΠK†|LˆF?zgπύ㝠/eλέx΄Έ½+yW€Ÿε„qΊ‘„9’<Δ9σΝ۞σfۊσˆV gd?:cΤ’½shGœΗXΊœίU+hΈW²κ퉔M‹8?η{{Fη_²―7{Α‹7Ί8_,]ΚώBQ²ι3πJ¬O޽φ”6[dŒ,αΤ½8CBšŽΔRΘ's|! (ϋF#£”iBFΖdΕu ¦„aŽXζ:€Βͺ8—ˆΆσ|‹Ή‘σΞ!¦φ[fί₯½ζΊ»S"οΦΡmœπΰ3vτ1πΈ8?ζ=OλJ’λ8υΜ—―΅8ΏΞhηΑΒΊ€=ΓΡ”5ΪφSc‘'/7 &‚Id±ΕΓΔB„:xˆ@WœwFoeΆΉfo}X˜b<§]ˆ…ΊΉs2`+`α,βό~ό½‰XHάς­΅8ΏΞ€…λg₯ά’Β4η”Σk^²ΰφχŠs„8εΪa·QΕy'Ό‹ΨF`+Θɜ·B½l͞§`ο’Ή>–Y·όέRχ>SΈ’1;ιαόσžŠ‡ξη_Ώ`Οθ[{φυΖ‘Οίπβ|E°tZQώΐ2Pύ‹M<―‰_xuΕ9 Bš"J:έBF1DΪ^.SΦ a’Œ !₯΄3Kέ‰$ΞΙ9›I@F!¦RŽ#“ΣΞ.v2<τ§JΆKn3“ξ!gK^Χ¦ ‡γTLi,ζR|ώφΒ\‹σ§Ž.ύρ?υΖΏœyԚŠσυ.ɞ‹‡5 ΦX˜βΌ 7 ΦXž‰‡5Їb‘—βab‘'!α‰…β!XΘΆΕΧW΄ΓΓ­€…³‰σΫLΔBb­Εω€…XœλΠ@D@1­›ήggΉ;Χ)m/fhMœ“GH#ͺκWό†ύρ‘Χνθφ€—2xΗ­΅QnοϊΣλΉθΕέ½5‡γσ΅_ίφ€ψά9Y²ΩΫgη_ύξΕ£]ονgopqΎb"‘‘rφ3J9ϋοΌΆβ܌™s„Έ£Θ9‚Dp;„ *!΅―2Š~ϋΖ €ΩSήfy^rgΠφΨΟ3zϊΏΣ’R²8”xΆ#„ ΉδXέu4†TΪ“©πfΫφgΖ|`=—ΩΗcσ|T#u£Ηu(kοηο|Οί΅"ͺ/>wΖ‘ƒ8πpEπ°ΖBZ|ΐΓ ν=―±p£TK,lǞ¬ 5†³ΔBŽχΐB']ˆ…bœΥE‰…‰‡§'ΟYc!›ηΔω}xλ‰XHάbηΦβ| ™Χ4λF§εœσ"ΗϊΡ#ϋ›’Ύ3BΫ Ζp ρV 7ΒΌλ#oΆ oΔωU}σ\_zˆqφ΅·!ΰ›ΛY_Ι΅ύθi—Žξαμnζ\q~εUW XX‰σ/Ÿρθ›ίίΫΟ<|ηӈσk›ΨWboD{}ΰΧ΅ŸάS.KΫαPz&!›”lš)‚ˆBL Hͺ#ˆ©½ζr²Dl Κ: \w‘³ή–ΖGB36MΘ!$”ΰ΅“ΑρXD8„“-ϋνl!«fΫΉMb ±MͺWΤ”‡r<χ?τ Η΄džΟ–μfU–)ό3‡ΏγΈΏν2iu|φτ#q>ΰα²ρί˜x˜XΘο‘λVΥXθŒσ ΕΓ Χ#*ΜΕBg™‹‡Όv³εοšΓ‰…μk,7k,ΕAC<μΓBρpη8ΐ­'b!q‹Cn8ˆσ »΅”¬+eνcβά²kϋ 5…«ΛάΝ’›-wlXf΄‚΄8›―·•ύγm9{Ɍw"ήnψ•|ν~χ"ΠζμοzɍΉΏηœtE:Ÿ‰ΩtΕzΉΖ \έζΞνΞρΈa#ΞΏsρθ»χφΖ Ξgœs>πκ‹sL ?ˆsz&!˜Ξπ΅Ώκ<`2μ3kDvυ’3ιz,Χ»8‡8¦ΰnΗ5"R „„ΌnΔ9ǐΝ!#D†‰ςNœΥŸrκ1νφ™ŸίŸYr°ζI–„r°$Τ,™Ξπ<§#ΥψΜι³LqŽοVηo?ξoΊρJuόσι/Δω€‡3‰sρpZ,δ:ϋΕB9XΘοX<\οβΌ Ϋ²υΐC±k, ΣL -§ τά/±ΏIβαfΑΒYΔω}p«‰XHά␠β|ΐΒe‹σ,mwdZgTfi{]^ϊΞ•­ΐD\*ΞKυzηΌ ΰΪΜyι1o…:βΌ”¦›Ώςύ―Ϊ/ΔΰdΣ›m'̍r¬YτΦυ]‘N6½œιΊΊ+ΞKΏ;z­ŒΏKq~Ε•WmY,TœoφχG_½θςήxϊσ^4ˆσAœoΜEΆHWβS/zkGDΉlίωφ{©8?aΧ;;s8ˆ"]"Κb?Du=’QΕΆ™Κ5!†E3η‡Ÿ~L[βŽψ&« ©„Œςώθ+εύ“ΩIc(3fž¬`¬Ι·δίΟ…Ηδyx~ +€”ΧΒ~ξ0@0ΤcrΆͺ8Ϋ±OκΚ\λψτ_8ˆσWdΥX(ΦXΘun«±ίΉxXcαzΔΓ Κβ‘XF‰‡‰…ΰ›x(β‘φ±ε3‘Τ_,LΘ'%›Rϋ(-±ddBMˆχ$sΔ– Α’”r²‚ώT‚ΛμG΄η >Oˆ+χ…Πj¬—ύœfυ%¦bσΒ+ήΨf$€}3›·‚8λ±Oh?‹ΎψΤiΟΔω€‡ ΰό~k,d ΦX¨8ί(x("Œ-]ΑBpœBd‹‡5Ї‰…ΰŸx(ςy± 9qAuξda7‚M1―`W”§x/o&½·VJΪθΞDοζ’po[βnύ’[QœŸ~ήξюο]ΦOyξ ΞqΎ Ψ)#D\BJ·"ͺS1$‹²Oϊ1!fdC6’;±Y ˆ,―„όαœ}η,H(€Π2K’oA4νA%  |Vˆw>/g(σ™r,ϋ ­]ί,:―ΝRS2Iœ$€όCf ’эJHgηo9φq­aW_|ς΄η β|ΐΓ‚…Jπ°ΖB„;Ώρ Ιo<΄ §ΖBφ‰‡‰…Ό?ρp!,Ϋΐ@Ίx˜XΘI]ρ Ι‹‡5Ї‰…Μσފx¨8Ÿ„…ΔΝ>hηθrΜW+Ξ'ΜAοΆΝΎζλ]œ·ΩκF;ξΜˊλΦΞςσ&4Œko+θύάJ‰ό<σ8Λα-{/ΩχV Sβ@Χ±½dΞ zΠ™{~ιΎqqΎχŠ+· *ΞΏΈkχhϋ—υΖ ΞqΎiά!V™-bk™§`ΘYH„”Xο ς Ιƒμa@„qd”ήJM‰(£$$€Ύ?³< -H<Ÿd”-€B ™„μγŒΩη6‚Ο‘ΗΆΌ“ =Δ،―‹ΧΒΙž›19™Ξ"ΞίτΗ·'9ϊβc_8|η,ϋΐΓ ωύ‚‡5’1ίxh_x…cρ0±“βα4X˜x˜Xώ‰‡~ŽNJ‡‰…žP΅¬^,€r!ρp+‰σ{ސ‰XHόχAœXΈβœLyι=οΔΉ=θ1σΌ3“3›Ύ‘ΔΉσΘSœ[vŽ8oφ–ΐ+ζ‰…VšΝub½φΦdN9zσ|@?ο¬ύ}ηœΙϊχΆύηΔވΛ~xε–ΐBΕω©ίΌhtϊw.ν'?η…3‰sζŒ7ρρ&ΎYΆΏ8αΈ»7ρυ&ΞiβΩ±M|­‰MœΨΔ.ϋoΨΔUM|©Δ?β|X‹.²B'έ‰!Tš AT!Zd†$jΆυΌμ£€Tς ρC€λ ρ£―ˆ±‘1Λ"cα$3Δg!u<ΑeŽα³t<™r¨ύ—ly­”|r;b!3F[IœΏ‘η|f}ρ‘Οβ|ΐΓ³ψ­¦S»XhuLg<σ5!žX¨;xΈXHφά²wπ0±LΊxΘ ±0ϋΡ yΌΔΓ­$ΞοшσIXH β|ΐΒ±,iΟ2wE{›1ΟYέa·…yqHoMΨθ7Ο 9ζp%:ΆΟ|’£θ1™χζρ5•Σα},ƒΎλŒύ¦pωωSή^fΧ·Α¨Ό½—ΆΒ|«‰σΟ}γΒΡiίώAoόΝ‘/˜UœΏ\±ΝΆ‰£zŽωΩ&Ξ››ψω&Ά7qΣrΫ7ρsεςQήΏˆσ/†Μω&^?:©ω΅x¬Π’lΚ–p™υ%!…D!,Χ+Εό-ΗϊΨWΑ›aΟ₯#V‚²rnόΧ.}S;Kί*$Κyʎ¨#;Εηjο%[²H~ξ‹k1Ω’ώδ½ml%qώΊcžΠVτΕI§=η,δw*&"Ψν9―±p=βabaŠσ ν;W¨―4ϊ9Ϊ·/ς; Ι”‹‡‰…˜e&n%q~χϋ2 ‰ί½ε q>pΓΆμ™X1qΘ53ΈΕ NΡ^‹σδ}ŠsΡΗΔ9pz1gλF¨‘E/cΠΈ}%VΧ“^Bƒ8Kένqo:ύπΊΆ[ΉP’k7h.Σ‹IάVηŸϊΪ£9oOo<αΩϟUœ“ Ώ~Ή|}χs»&>Χ%zŽ{@Η β| €Λ^–oΉ €'©$t‘ώΓ΅\”mφRφ9jcφ8:R £’•<α±½˜Hρ9BJ1L’¨κMFˆ¬‘F{|ΆVζΧ^ύӏŒ~tΝI£_σ‘6Ά’8ΝΡOμ>§:ήΉ β|ΐΓ‚…dvΕΓzρ;ξΓΒυˆ‡5RΆ.&*Κ-y?X˜'€ >_p/ρP,΄’φjz[FίΌ>z޳3ψ{9·΄}+‰σύέΡ§ΟΉΈ7χ¬VœΏ‘Ǐxμ²κϊ₯=Η<Έ‰7ΗυG4ρڞγ>ΨΔΓCœ_ΡΔYMόswΔω¦Cΰμ'₯Δ^(J9!€d0ΘnP†8Mαj s’Ξ9§ΧHΚνΞǝΨYΕ+Ή ψˆr² u²FωΠ‡Ι1Ž&β³ζŽΗ₯ψšk?Ά‘Ώͺ³ˆσW½σ‰]¦­Ž>7ŒRπpu±P< Χ&κ«!&Š…ˆsρ°ΖΒ•ΖΓΔB„9­β‘XHU‘xXcαVΔCΔω]q> ‰›ήbη7\yqΎ ΐtήΉβ<ΔγzYm©}:Κ3φ aΞ5·a׊ς2‹άλŠyξ»’' Κ {Σ[qϋΪe<]g΄ΧΌ—ΦΙ=ͺΆ 7<υδ/Ÿ?:ε›ίοΗ>σπE3ηΝϊYμžΈί”βό!=βό5Υ1‡•žσSBΧ-—·5q~qηΓZΦβ¬<™sΙύԘψ@JΧ΅Œ½ηn!©TJ9ιg€t’2OΜ‘Ψ·dt‚l”h^ώγcGW^ύΎΆ’¬™!3E[UœΏςOκͺ9κxΟgq>ΰαΪΰ‘XΘV,\/x(}Xh$"βΕCΆ`!’ύ@ˆσ…πP,€d=ρp³`α,βόq> ‰ίΔω€…«-|™qŽyYτœ―'qNΖΏ+·/ύπ­H/sٝs^gΟΫχ0skKίΙ+,Ξϊ\» :ΰ—ΧΟ{iΗͺ5±±Pqώί}δk»{γΡΟ8|ΝΛΪ›υηMόkοΟσι&n΅‘Ε9% –'tΠA*β"kAί Ω ·ŽΘΙe†Ψ²EέxΙΈb™ρQ˜K4 χYyΧ·œΤ‘Tξγ(!2HS03‹Ι!Ϊι}$†΅6βό5β<{3ŽύΜΦη-Š…d~ΕΒΔC±P39g|HΏ¦(W˜'r]<¬±P<°pmΕωήgΫD,$Άͺ8°pΕΉ³ΉeΔ‘«ηy1]#Ϊlυ»m†Ή”γ[nφΉΛžaž.μΐuΩμζ²―΅ Ϋ‹ΈΦڈσχoφθΓ_½¨7υτηΝ*Ξ_QΒ½Όη˜ŸkbW7 CΈ›ζ\άΏΔ/Uχω%ŒδΔτ&.ΐ~4dΞ‡΅œ…‚Ϋ9ΏRb)βœήΑ•ž,!•„Z™βΙL_Θ¨%X”RΚΙΌ] ˆjqΞ~‚ρgτB²…΄2h½»Φodqώς·ύMχ}ͺγ˜OΏh&q^Κ’ΞnβΪϊμe9 zN9ƒz·Ρ-V…‡5&.&ΞΕv °Pl kaNfΌΖB…zbaβab‘³Οي…dΨΧ»kύFηpŸ[MΔBβ&3ŠσΝ€‡²8Gˆ+Κθ%³KL+ΞΝ χέoVqŽ»y•@·Δ½ν9 z'ΞKζΌuq/ύζ]i{%Ξων~ϋΥΓ\nXFœΦ·Fο?ϋΒήψσ§Ν,Ξ―ΫΔ)e”Ϊ) θfύjŽγξΩΔ7Škϋa±œR²>62­Y*‹?³‰ϋŒ†žσMΆ8a.π²?`9rΜQ.ϊ•γ±]x”8Ηi8Mΰ₯=έΪζ™)Ja‘%K %ς1ُkπbβΌΟ Š~TNN/OœΏ¬η|Χϊ❟zρ¬βόwšΈq]ZΔ8ŒžΏPΊžλΩ΁nm,dž΄xXcaβ‘Xˆ ΞqžΈUO¬Θ­xX—Ή‹‡‰…`Ÿ›XΈ˜8―ρ,πp6q~—{ίj"7Ήω gηnΈz=ητ;w3Ο‹¨žΤsξψ0Ζ…u3½-?@βœΧFι7†v݌φςϊΘφ·ύηι܎π.cΥηνI2λΕ8πe.z»oqΞΎ\–Ο+φ,\ž8?φŒσF'μό^oόμ Ό$eyυ!š¨μ‚2μ‹€Θ’ e_".¨1βhάΗρsIΤDLŒ~Έ+ΖΈ|ξQ‰‚$ΖD£ Έ$n DAΩκ«SχύלzξΫw³tΟSΏί™ξκξ{ζ9užεqΟωXβ|ZΌˆsA¦;C‚ΟπV3Ÿ_ή0Σ…—Ό8KJωTΓ$uα³Ž‘Qh 6εGa^KλΤz‰rΥU*θΤ|_ }iά"ά(έΑXΫΰ¬kܐwkV°-ΑsΜ`t|qώΪχ>§>}αk%­½ŒΖΪ‘σT_”β<Ήq.D.ΤΊΘ…βGΔ9iπβBšΘ­).τzsoYKs§{;|ΉkΞ…ͺM―q‘DΊs‘Ξ .L>Oœ? η£ΈPΨkLq> |˜±α:ηZHiοΔ§₯Œ«#y72Lβόόχvχ»†gκž^βη‹Ωδ¬sΝΛ(8Φu.©•οEyΉί‹λ ?ΉόάΗE/©πΊ₯‰\—! :υωΤ`vz/θιĞβ|lqώΑ»΄ωθΕ?―β1ΛRœ§8_WΛςO¬΅€T&σΊYXqN„Ί8ΧόΩ«nό@—Ξ‰›$q”N₯ Ξ΄­‰.ΐΣ>.Hˆ©ξwI&Α'Pp«ηε)ΰTπΙΈ5Ζ )νSλu«χeτYη€zTAŸCπΒΕωŸΆβ\ΏŸήϋεNœmuΗeΜŒΎ…Ρερ9›‘β<ΉPΕαCηB»s‘FI€‹Pp‘ΊψpMp‘x*vjχ4χκ6ς‘ }ρ‘s‘žƒ ε¬Γ‡.ΞΕ‰Ξ‡†8?ιή#ΉPΨ³η^½!ςaΖ†kOœχcΣLœ#bg‰sΝυΖ9—8oo%`Imοηy—δΥ‹"Ξε–·ΗI3΅ώΆˆsκΘIow1έ£€°3{Όk€ΧΎ§]θζ—+;ΰŸ>ڏGλ:‚άΔ9βΏ|6ΰΊυqΔωϋΏqIσα‹~VΕ=οΜη)Ξ§' ₯{ψ¨€Tπ€Ti›8Dz¬N»‚‚T9ηρΚ>Ž‘ τqœm…σΚ½[± ,4‚ΓYrOνΉP‚O₯Ίe= “pάυ/WFsˆύη«'ΞΟωϊ›χλ²*υάη)Ξ7q`“™άΎ­O‰sΉDΪF#pων‡{g‰fHͺΛ€ζNz»‚59H ]^ώ H,»7JΒ)"]·š¦€lβ’“Κ)(Uγ$aυΈξrŠTλQ“€η/Ξ_يsoΈεxΗ<Δy“iν)ΞΧϊvrΙΕ‡‘ Im\(*>\ .%ΞαCΗ(.dz δp‘RΪ=υQ ‘ ŏκΥ¦8_Έ8Ώί‰χΙ…Β|Δy“iν„8_lχ|”8G¨ έv­νΔiq‘‰SΔωηž6.ώσοΟΤ‡—TτΥYnΈώΊ8ǍοΞAβ\λJwyŽέ›Γ‘zξΞ'Δε²Λmη5θΕ=χ œ3]ίΉHαΞ:iσzjϋ8βόέ_ϋQσΎoώO|ΞΛRœ§8_‡Λ΅› LΧΕ[ίτ±.ψ$}Sχ—ίτ‰n>­ τv9λš ¬€T©ί)΅θJyχTχΥ H ώ8*UπH]$)F8ε¬χmΩ†€”Zt—z?]€Hήͺ―Τ­‚N9_4‡Σω¨ρ“Ώ‘KŽ$ΰω‹σ3ίυάα-η½nM‰σ}C€K³!ά„pα:ΰCq‘ΈOΉπϊ›?ΥέF.Τ}Κ"NΪwδΒΘ‡sq!b>t.€†φΉPCξ\θ|Ή°Ζ‡Ξ_œъσQ\(μΎζΔωΔπarαšησ]$8{q^ψΎNuθηΏ·[GWtάιΎ–}5Δ]{ύŠζκεΧ7Χ\7σάxέ΅ύ\π.͝ϊσ]Š;³ΟiGv‰uPsŸγ>θώ^œxRεqΪ©[twχ.οeΡΕO.\΅8ΫWΠΌϋ_RΕ©Ο:#ΕyŠσυ$ ]ΓAιŠ›?έάxΛgPΠ)‡ˆΰ”[m«υrŒ$Ζ©;§&“ΐ”±k F΄Νw‘8–;£`‘ΐQ;F™ΤQΚ"υΪKΆΥ>pΫIα€ΞW¨&Ξε‚j-½[±‚Q yPͺsNžŸ8Ω;ŸΧϐŽψΏ_|έΈέΪΪβg-~Ϋβ 9Bφά™₯+±F°>pςa… Χ0F.”wδVλΕ‡‘ Υ$N|XγΒΥαΓ:Β…πaδBG.t>t.”:::κœΰΓ ™ Ηη‡ŸpŸ‘\(μvΟ]ΖηΣΐ‡Ι…CqΎ6DzWG­Tν­wΚAιƒνΪǞζΞsr«]όΞwY^„ωUΧΞ ϋ,ΏΓ@œ—L€½Œ€σ:tζΊwέKγ»ή‰G Kδ··½ ΗmΗqη13Σy\„rαόΕωΩψƒζς“*–β<Εω:_ΦB0ͺΰ³&ΞoΈεάώ98 Jš!)₯“q/ F”z“9―››o0ͺΖC€r’nN}€ΧJΈK$Ί‰η>AR8υ^r‹β85R‰r5?Bœλ(]U·€•™΄Iΐση/~Η²nτT oόόŸ-Šs>ιH>\{|(Ξƒχj|(.ŽβB2‡$Lu Š»Ά.„#"Μ#::βͺ{ύΈgΡ‘]\θβ\p.t>t.„u»!sα8βόΎ'2’ …]χOœ'NΧ²¦Ε9ιά5qήΧa›πF|S‹έΥkSM|Hyοf¨—9κσηrΜέ9Χ:₯Ήγ’«ž]½κ-˜Σ…s/ΜMD#΄κŒ‚Cάw·μCΟ1Ύ¬GΈχsΣυš"φ“ η/Ξίψ՜ύ΅K«8ε™/Mqžβ|=F©΅\Δ€”@QώΫ[>7€ΦέtλΊν<§H·z¬ T’δ˜“Φ¨€Lξ‘f λ±€σ³#§FΑ!΅Τ„{J;"[©žΜCW}ι†Ψnqώ‚·/λΣ}#Ξϊ\ŠσδΓ9ψp p!βtD:©ξE RάMœχΞΈ;δ‚»νΈλ8σϊΛ~{'ΎRן\Xη―Ώΰ{Ν_ώ㏫8ω_’β<Εω:^”Ξ‰0_¨8ΏνΒ¬" UΠ Œ"Ύ΅RΫqŠ΄ž€Uχ©ΓTΧb¬rŠB˜Κ=Q¦ΰT΅˜€€+Eo>n‘R,ΉΪrtHντΤvš»Q©…ϋΤa Έδ‚P-ΊƒδβάS9ά!άs₯rό1n‘ΞUΑ¨œ2! xnqΎμmΛΊΟ†?ϋlŠσδΓ9ψp-r‘‹pηBΆ‹\(·\|θ\(.„ ΅D.D »{‡΅·5.ΤΉ0¦΅ Zbζ:|Ή0ωp~βόVœβB!Εyrα€·J:7XΘβγΡF‰sf{Η†g=ά)—θ.]Κ{q^Ί‘Σn€2;\ΫvJχ|f Kœ#Ζg σεΧΜ θ}zi§₯―AG qΞ­—[^Dχ¬ΊσέveL\Ÿ:―}Ω6€ΛwηWRνύx’ G‹σΧ}ι»ΝYπ£*N|ϊ‹Sœ§8_Ο\£ωŠτ›Ώ8g@ŠΘVpyΛm_κ[N£#\$w“΄ή›ΓιΎΔΡ‰ŽΕ‚Χ!JΈS?§7ͺU|fjΰ)ΠΓ•Q ˆPθގΫCΐ‰XχǞΊτ:‚QA‚]φ₯}Λ-RZ{\B=u“c&pR0ͺσT@*w,ƒΡω‰σgΏuY?C>βOΟMqž|Έ\Έ qξΞΈs‘‹sψΠΛ}ΰExΠΉΠΔ‰%ΤΕ…β=ρDδBρΏϋΘ…Z"RφSγBψΠΉΠ…zδBψ0rαͺψ0r!|θ\¨σO>œŸ8Ώχq‡ŒδBaη}Sœ'Φ—N˜Ά˜ΟβιήUqNwυVXGΧάΗ¨α‚χ©ξqŽƒŽ‹ά;ΤE γ¬k’Uβš4u]tW »€8η|;!ήήv5η&ΞϋΗrΡ―Ύb:οβΞӍ½sμU›Ž{―sοΦΉ]Β»v!£ςευ”™λ}s9‰s₯Ϋ§8Ÿ—8ΥηΏΣΌζ‚TqάS_”β<Εω‹saAa~σ­_ln½νόˆsU‚U=ηižͺ?§ΦRu—r‹4RH©:IνT0vqιX¬`TPΐFχΩΉΔΉξלtυ›"ym9隞Ά© “ TΑ(nΌ^«ν¨9—SƒQŽ/ΊEώXη’`[NM jοWwαsŸFqώΜ·,λϋˆW&Εyςα"‰σyr!|WγB!r‘ž‡ 5v >›ƒώ‘ ηη\t.”ƒ.>Œ\Hͺϋ(.€ρ›s!|θ\¨Η5>€1fδΒΘ‡p‘ΞΥωp\.δϋšFq~p+ΞGq‘°SŠσδΒEηΒ¨₯s|%œΛŒο>½ˆn0˜n©λΨ.ΒΌΪΝ—8WZ»ΠŸwηρΑcsΠ{qN£ΈβrwsΧ%–‹pοΕyiόFyŸΊn½ζ1΅έκΟύ5ƒ.πελγ,ρσ˜&qώ>ϋνζΥ_ϊ~Η<%ΕyŠσυmQZ'X„EΑζm·}Ή L›ζΒξ>bέE»ž'@% ”‘j γ=Φ,`₯ ΔLi’$'Iχ©»Tͺ§ή( #0Ε‘΄¨)³Ζ=­“ΰΣω:\"£€ΊJAΟΘΚ‘b\PMœΣ™XΗ)‡H·‚ŽŸnΕ^sR}ͺΡWπΈ:!.” 3ίΡt‰σ§Ÿ½l&λxω§Sœ'].„Ε…‘k\θξΉs!£(#^\8!r‘„:sѝ =kΗΉP\΅.τ •‘ αCΈP―“π―ρ‘‹sηBηΓΒ‡p‘ώ?€ηΛ…Ξ‡λ+Ž#Ξ:φ‘\(,Mqž\Έ*ή EαΒ2* χ| ΄ s\πήu/―eŽxχ˜ηυWšTρβNwiίΧόj&-]βΊMΰ£}*{%} Τιν&Πιί―/φ Mίζη½(w°ΞEΉΥΤ»8ο›ΧΩΊQOΘ ŸΟC§MœΏδ35g~ώ?«xΠ“^β<Εωz.BPŠο>υs&0Uδιο’Jθ BέIWŠƒ€ ”νκZ¬G’Τ!*8Uπζ)ž4ΏΑQ'”ƒ€`1 tœt8χq”΄N&Ϋλ±w$Φ}$œ"j+Yp―€<λXIίΤ9Ζοχ΅x'ϋQβάΗΥ1;ΩΕ9Ξ οh%cΆiV|zͺΗεώˆσΣο²AΓ=ΗΛ>•β<ωp\8&ŠαΓΘ…ˆς…p‘»ιpαΕE F.ΤcuηBqŽn#J0Gξ|" αFρ‘s‘O―p>t7.:: πœ/ŒηΊ F.„#ψ.„Ήq~`+ΞGq‘°γ>)Ξ“ ηη.T[Œ%ΞεfkŽ7©κ8εEdDΊ o\q\r:—»SN£4DΉ5h#ΝΌkβ֊τNT·B]NΊΊwfολΜ|q=)ξΪ―2J“8―ο›ΘI(KΈkNΊu“ΗQχ ƒξΉ"ΐcΫ*σέ1ο:.ϊqή'iωϊ<‚8§ξή³ tƒεΚ2rŽξφΛKC½Iη/όΤ4/ύμχͺxΰΗην²U‹σ[ό¨άn9b»γΛxΙ·x©­U‹Ÿ·Έ¨ΰD{½^w\Šσι«νšSGN`D@δ‘»κ~+θy§r‰θb,(ΐRΐEͺ»Ί3\δ‹Λθ!‚Sf+ΠSPŠc­ϋ LεΪ¨[°œ#4-rP+©ΰRπ‘@ž JJσ#7½ρ‹Ž‹ FYLιR[G)½_η¨ΐ[뙁¬ΐΤΕ95«zL +Ÿ%€gR:/œŸ8_K"}qώ€7/λ»@GΌψ“)Ξ“׎‹ξΌ¦Ώ1ΈΎ”ΤΈΠ‘ντ·*AΏνΘ…€ΊG.ΔU\HΣΈΘ…z,>\:Β…~qΣΉPϋ­ρ!Η²θαCηBηΞ…t°:Ί8\:’ε°JqΎ/X8ΏΧ1‡ŒδBαξ)Ξ“ η#‹8—¨Vgιfy—Ίσ>-w<6wsΨμpjΛσtAοΐΊR³Mσ4Dq'Pι΄n’΄wΑΛs½cRΪϋΩηά/)ς€Έχ―)·ψE°γfλΎD7.zqχϋΟΘ.&ΰΔkιΕΉΥ™RΩ­c{tΟΉ@0xμ<«ΐκλΉx!>qΎΆϊ8βόΩV³μΣί©βΘΗ?\q~b[·-^_Ωfγ—ˆ›[άΎΕΕ-φ1qώΒΚkφ)Ϋέ‘Ε.ευ§8OηhΑ© €GΑ€Δ΅">r(δ)%m“ S”ΆΓΙ@ +Έϊυ4G’]Α—‚0gˆu€4 ςΑͺΑTΰG]#ιδ€Ό3Ά‡Ρ=ͺΑt(°τ UM”tŸ@ΧqξΑ(Πc½ ½·‚bζΝjQZη :£8WπM0ŽxW .'MŽš>37ݐκ;ρΖ{ϊόβ\¨£Τΰr-₯γˆσ'ΌiY]FΌΰ)Ξ“ΧΞKΈPβZg‘ ;7½ύ›‹\¨ΏQρ!ΫΑ…ˆρΘ…ϊ›F.D G.€.½Ζ…BδBρV έάΖ $ρ‘s‘^λ|θ#Ԝαš8Χ±“ΊΒ…d‰ ω?‘ ί³‹f‰sηBηΓ΅Θ…γˆσύ[q>Š …ηΙ…σθΕqξΠ Ί‹s‰ε΄mΰ|[c·AΊzα±3ϋ@ #μqε/:of{=.ξyίΡά…)"΄‚΄;/5saz₯£»ξ#lMδψΌ\œΣ8OέΔΉwcg†9ξψΐ9χ&pμk„8Ÿ%Βη‚ΔΉ₯νλ<|ώ»πΛί H§nm τqΔω3>ϊΝζ9ϋν*Žxά²qΕΉ\ννΚύντΈ²Νa-Ξ Žψ«ηύ6ερyΪOŠσ J›ζ†sτRœsάrO£FΈΣ܈ RA—HD·œ € >‚(έjφ―‚.m§ΰSA˜ΦωL`Uœ#FQwI-clG-žGbH™Yx* %…S·zΜΈ :! "Ξ½–’‘HΊε8pΤ Liό€sΉιiΗsϊό΄ŽΟPA)Ύ{ά7D;’©}νRn]œ»s&@œŸφΖeύχρœyŠσδΓρΈp|θΞy …Θ…8άβ2ηBύ͊χ"j;­\Θs‘ ιςΉχΘ…β+ηCώ–Δ·΅π‘s!Ϋ‹υϊ(Ξυήβ<ΈP)ξ‹s‘D9ΩQΞ…œgδB}ξ’Γ…\A¬Γ…”τ|ίι{w>œqΎίΡ‡ŒδBa‡{€8O.\Ψ‚8GΨΝ[œ—tσθ‚»;ξ©μέ¨1Υf—Ϋ^άΧ„<χØ΅nδXΖ½ υud\δ˜ήήήΚQv·˜pΰŽΊ7ŽsΜjΕy{Ξ€°Σό- τΪ>«]άM€s~ύw‚8ΧyΊ8—’|ΕωSί7š§ό’*ξ{Ϊσ΄Ρ»΄Γι Ψ5αρΥ•mNmρ{όΨo1qώ“ίnρ^βυ|‹Σμ5ηh?)Ξ7δ€ΗTA ΙƒR5©―T€*I’R-˜*H­IWΤRk[κ*\1οWβ\%β΅ΞEΊwwW@JπΗ|`=Φ­ΰ΅ιή٘ΐ Έψφ1AZ§η dζŒκ=pˆxoKέE’Γ₯TMΊ#Ξυ™˜¨κάυ™θσΐ1Β±σV¨±i_sKœήτω•"ΰ΄ζ­§βό1­8™ΰΩKqž|8&.”o»°wc μ“βBq—ώ>ΕqβΓΘ…ϊ{…χœ ΉXΉm#"Н #Ζ>πα|ΈTwΑΉΠω0r!|θ\θ=Bœ uρBη@)OδBψ.t>t.τš~ηBΎ―ξ;‹\ωp=ηϋΆβ| Ϋ§8O.Sœ/D€wbT Ρlfχ …ΗQMv©½Φλ{a/A»n©ξέcž«‰s„«RΡΝ94+’}0λάάσε₯iš S‰vθύΎθžζξξ:'κΟC7v-.ΘΑ¬šrwΠ£8χZsάq.<„,ˆYβΌ…Ξ‹τu:‚άΣΪ9΅Υε}qώ€ώkσԏώG‡<ζy«tΞΫε‚ί­ΰ”yŠσGTΔωΩεώ]KΪϋνZΌV½¬kEœ?<Εω†”zΐ8΅UŒUkn½`Fθ Ί_άG  i?ΊΥzQί)bš€ŠΰSBS·8!ΊO *Α―η° lGPJΝ₯&)ΐ£ξ²&ΞyŒ‹€’Ϊτ˜ˆ›N£#ζγ’SΫ§ϋ>捀sΤϋŒΚ)"­ύbλΤ`TA§ίβ°λsπnπºkξͺ$Ί€Tί—Ύ[¦ΈEόx‘f]ˆσGύΕ²ώϋˆxϊGRœ'ŽΑ…1ΝyΎ\κοO|ΉP‹ΊΉPBΡν\JΠ;κΆΖ…\\HYMδ"w±Β…~‘’ŽμπaδB-5.δœ uάπaδBξ;κ±σ‘ΧœΓ‡‘ αΓ껏ξωZΰΒqΔω=Ž:d$ Ϋ₯8O.\^η΅ϊηΈτ.8(}Φψ0 QKξ€Ιm–0χtx9ξZg]άϋΤφ2N­og‚vV½5ˆ]Ωƒ€§ybaŽ8οΛY·χζ8¨=G€‘ή}Nˆufš{#ΈZ&β£sn5ηƒnυε˜όw²Gœ ˆp\τxŸσŸ„šσΣήχυζρϊV?ϊΉλ4­=l·³D“iνΉ¬r!0βάxxr‰τK+ŸΧϊ₯­[Ήέsmΐ£ΰIb›ξΓΤ­S7¨ΧβŽΞ‰ ΏΖΈ!g4Šc ™ΟF γ"q_+ιζ8ι4L"5ΪLR<έIzΖi[έχA8γzŸKL­₯ήίƒQΊηάG˜¨#o'η H‹pθέ"„„Ύ?„ωo>2ƒ΅΄Œ#Ξρ†eΥfVΒιNqž|ΈH|ΉΠΈs!|ΈPβ΄Θ…ϊ›δ‚€s!)ν‘ iΉ”νΘ…>~ΝΉP™Eπ‘s!|Ή1ε=r‘σaδBΔΉs‘wmw.t>δά£8w.Œ|ΖZσž \(ξ|Έ—Υη:d$ Ϋνβ<ΉpL‘ŽΨ4q>k˜ τβ’w"ZB΄ˆΟΎN›škΝ·4ο>œ”p5›£‹»ΑE'R%D˘3o7θžκ;Ά›@—wQ>Higτš‹swΟΉ˜a‚šΟ«η₯ΦΌζ†Η±i η΅1s:GΤ7·όϊα\xΏ81)έΪύήnNϋΐΏUqΰ>g\qώ†Πξ¬Κ6›΄Έ΄4v£!άΎzΫnY‹”ϋϋ††p—fCΈ\f/r€”s–8'ψ4aN*§ΆWΪ5 CψΡ―4DŒ *4)€’„Ψ& Υc­σΎ©1Τklι5ΈξΈFσwŠ{DχbBF)d]Lο΅!ηc]»@Κ¦ΰΒ\±ŽƒFp –ult'¦Ξ’TNΞ…”MθΊ%εBˆΧ™ |}*;ιμ||—Jݜ0qώΠ³– ΊI;žψ‘ηΙ‡kˆ £8·Ρi8βΞ…L€ˆ\(Wγ‘ υwΏγΘ…4|‹\¨Ώ^ο.;ΞrδBoη\Θ…ΒΘ…ΤƒβBŸ_ξ\Θλ ΅ΐ‡p‘σ‘s‘σaδBηCΦyVs‘σα,.Τ=v>œqΎW+ΞGq‘pΧηΙ…‹%‹@οΣ±ƒ8ο„rΛ}5βY5Φή}ΌΞ~_^w-χΌΌΎϊευƒ}’:OMw™+>θd9ΪΕ7b»οκξ.z+ΐ]χ’άjΨ«ξ9βaΝX4›[ξŸί@˜³m£χ1K ηžΦ>θνωqαA‹‹rΦΗΪϋIηΧW›GώυΏV±#Ÿ58ίΊΕ—Λ(5έnUΦoίβσΆέ‰-~XΊŸiλ?Πβ;₯ζόά ΦΟ,Ϋ˝?aMςαΪ¦ΎdΙιφ/]Ί4Ωs±ƒR1%ψμ;ܚΈΣ:5?’XVΤ₯€JΛ…Ώm&½Sېj©ΐ“ΖeΤ£+˜’všΗ Κ|TΑš‚OΆΓ1’φ’ϊKoΜJΧ{κ΅€x* pΠIΉ¬ nΊ=GS#ά'\qΏ―₯„"ζω ήwΘ]#Φ)υ.ΕήψΘG9uί›§ε’@§Cρ"Μ|^[βόΑΎ¬kFUΓc?°αŠσδΓ5Μ……ϋz.tqή>7‹ =]ΪΈP«β5Ζ¦Α…πaδBzoD.”θ:²ŸRϋΉnŠ\H]xδB‰ρF>Œ\Š 9?kδΒΘ‡4Η„ ]œWΉ°‹’ ι5N€8ίぇŒδBaΫ Tœ'™ΕΕf/ͺqΚ]4Mω@„zϊvϋνύNl{|q—» Μ9/‚½oΊVΠ]ΐ'•[π†pΕ)GpχnxLuέΩ#‚88ΥΦ$. μžzΌΞά…·Ο9w·\Ÿy|7†3qή_(πqΑ=wqεZk€7©βόδ·}₯9εέ_―bŸSŸ9–8ŸnΜ«£Sœ zš;A'^·z¬ΐFΫ·Αc†ΊF<νs D©%W°(Χ†ZKQVtΩ₯žšvm ΨΈiφ§LΑ,5‡u$\'¦ΆŒεQ Κ(wuδςθ=%ΐG|Ύ‹ήCπχ8πβœ°£8Χω* εsη ΟNΒΌ―§Τχ$°Πc€Ξ΅4Ηw1ΕωI­8ΧΌεσtΞ“Χ0Ζ’ψ0r!B/raαΞΘ…β άsηBξG.„#Š|βEl ΉΉβp!<':ψp.dlZδBηDηΒκΎg9ςω‰«\¨e=ΰΒqΔωξ­8Ε…ΒΆ{₯sž\Έ†„Ί‰Gκ}ΚΆΧ—GΗW5β%ύάζή5/žtχΎ&½4ŽλλΈύβ€Aϋ"ΝέgχB½VΣά­v»&ng5\3Ύ₯?―OχǞ‘ΰŸ―ww/#FΦΨΫ1σH„ϋBm9“Δ…ˆσήςΝΙοόZχxXŠσηΣ΄ŒjŠƒΘσ€Tkth΄€ΫΫfjICTp₯`—CΑc„p@h)πR`IΪ·‚KD½\1mΛΨ"œτ8 χ‰ ΠΙq‹›CI <*sΚ…Υη~‘Ϋ8JΝS:=ŝΐ›`”ΊVAŸ]?ל *7Ο\€ήτhΒΔωρ―[ΦόρW?TΕ#&Εyςα\F‰8ύΥΈΠω.,ΞlδBρƒώώ#Β‡‘ αΓΘ…pgδBjk\E.„ λ\΄\].tq^ζβCζԟG.ΤyΒ‡Ξ…Τžw|Έsα8β|Χί?d$ Ϋ€8O.\Sβ|DsΈ^\©άšžω2ζζ.wb»Œd#΅½wΔi ΗΎζV›N“8κ±ϋη&LcΧφΎS»§―ϋ,sθ±K»Υ΅ƒ‹σBΩΐ σ»uyοέσΚqxƒ8Ώ8A=}μΎξυυ“(Ώ~Σ…Ν±oύ§*φxΘ3Rœ§8Ÿς…±C+>έ»B]PZηA¨3χ—y½» i §[Ou§#±1“€<κ1Α³Ύηήι—Ε<9D@ˆ厺ί' ήkΖ…Υη.ΖΑΕ‘ΖήQ‚ΡΨ΅9ΊζϊT2Π}ήȊAλΡ2Ž8?φ΅ΛšΣρCUœϊΎηΙ‡λΰβ₯PγΒ9ψ0r‘xE|ΉP’›ΖoΞ…Έίβ†Θ…BδBΔzδBηCΈ0fψ8JTΓ‡γp!©μ‘/.œ½§Ÿ\Hc=>3Έj>\ΉpqΎK+ΞGq‘p—ηΙ…kiρ‘`€›wbwΧRΊg }kzΖ85—†ΰΜg±½w½¬EΌΞJO7QξuθΈηUwά]υŠc½ ΟΝxD%+ θˆsk²78_θtoo›s][£ΡΦ¦8ΰΏάuφW«Ψύ”?Nqžβ|η.Π?3Ÿ΄ΐφuͺ»T©ΰKσs ’‚©υˆs€ηœ κΎΦ3.GΑŽIqdž0έη…G7έƒSTwsτΎΈH8J«³ΔζoΗά…9¨»ζξœ+ οΩI«Ε-ς€!A]ε5o$Ώϊ“4Oϊς‡«xθ{ΞJqž|ΈφΕ9.μ\¨oρaδBρ›_¬„ ΓΉ4ψΘ…τΘ…πaδBjΤAοπΰκr‘σ‘sαw*|§W8Φ²ˆΔ…Jυοω°Ζ…|ήn‚ψPβ|ηϋ:’ …­χLqž\ΈvΕω,‘^Φ­‰'΅½Ÿ8ΧΈ4f£ΫΆΎ) αŠ`Τ½·χϋ.ζ8Κ.Ξƒ χiƒŽμΕu―₯ˆΗfpΎ~μ‹sšΰ‘`$iν*(5ξύyΫƒ^¨·ηƒ3kΙ™w~₯Ν9&Iœί η7ΏW_©bΧ?xzŠσηΉΜΉ΄Αšσ(Υ₯Gβ‚toΧ}\zŒc€€Q‹E­S¦šB±8FžΪ¨ΰTϋ‘ξk·Iyχ.Θ¨ξfσ‘Vg‰―υ@ԝ!Djͺ€ξιœλ3θ\;ζυˆΖ`tΒΕωƒ^ύΒζ η€ŠSήυ†ηΙ‡Ι…κg!>Œ\¨Ώkώޝ Εԏ;J Σ·ΓΉPϋ`ΒEδBšΖE.όNΝpaδΓΥ]όυp‘ή?ς‘M£Ξ.tלtώ]:;|XγΒ)η;yθH.ΆήcΧηΙ…³H wξxIkοœσ"Ά{Ξτlw•©U/ιπ½@ #ΞF9ε£Δy΅>=ΜFWKœ‡‘tύ9qNgϊ(ΞgWs‘‹rΝνBDL]Ÿq~δY_jπΖ¨bΧ“Sœ§8Οe•ΛmW«ƒο…s’Tq‹8ί ²θ6¬ΰ‹tNf »£ΗΜ°eΆ7ΞA―υζHΤ_"ЁΧz{ va1‚Q–Ϊθ4δΡ%bdP¬­μΗ6y ]VΕΙo‹±Δy™eωύ2ξβS-Ά°ηΞhργ2ξβΈ HsYL.Τί΅x-r!΅ι\”ƒ ΕβΓωr‘7•s.$»¨Ζ…‘IƒΞ"paύ}9Θ‡>΅"raŸΞ>Š %Πu‘d‚ψPβ|ιύξ;’ …­ΖηΣΐ‡Ι…&Π­1\'ΐcϊΊw‚χξπ&θυγΕ]Λ%RKΝuΥ1ŸkΌZM ΣHΞδWw©Ξ‹’|ΪŽ8χξξa>:]Ησ‰Ψkβό—ΏY)Π…Iη‡Ώξ‹ΝύΞΊ°ŠO<=ΕyŠσ\ζ”^υ7] EΪΆ§j{ΰHJ·ϋ!½N―Wj¨‚1Ζαᦔέ\`.°ΟχYβ΅΄ONΗ Fiώb“#Ξ™”T‡Ž§œΞΞ} §oͺ€Ε9οΑo>2‘βόώ/qσGŸύx'Ύε/ΗηΗΆΨ€ά½PξοΣββwh±K™IΉq€Ή,κοTάΉξλjΈP|$.‰\(χ\|(.„#Έυ –p!/#ϊΨ²Xrγ|ΈβΆ8Ÿx>L.œ0qξγΥJΝ:uΩ7ΩΣΫΝMοΊΔ9γΕ¬><6D€ys?֝nkβάϊj‹sfΌ3*Ξλλ£sΓ Ζ°UΖ¬qa>βά]σIsΞ{Ν›#ώόΒ*v:!ΕyŠσ\•^Ψ‘ ²=(UΠH‘+xc­nιΖ{γ-Ÿν»+H‹BέΗ‘ξη㨔zZ₯»JΒ8‹wf'³Φ}WΘΑ:fψr«€Ό›γK­ySΊF*ψτ ”@΄]Χ­Ÿ q~Δ/iωιOVqά›jΡΪΫε‘->d.Ρφάy-KqžΛ’Q‘DaΛ‡‘ έρv.dΪ©κp!|XγBάtηBΊΐG.t>Œ%7‹Ε…πaδBΞΏΖ…ˆrΦΡ՞™ζ.„#ΒΊN JœίύπΓFr‘°εn»5‹Ε)“Κ‡Ι…(Πm¦·‹σAƒ4­₯ΦΌwΣ[tޱ j sζ›[·ςk›ΟωΞ>χ‘kQ¬/‚8οΊ_€pA>ξ#Ψ|–|'ΞΛκ͝―„Έ\rΉεW\sέΐ5ŸDq~Θ«?ίάχ΅T±τψ§¦8OqžΛόΩθ‹]°§ L#Ž.ξ1"›ΤEš)Uϊ’#ζλΎ ηθΨ‹³ξΝ•<Ψσ€ΟΣ>§‚q–‹Cgv6}vΉΟ€m²žρs:/g7?Y‚Όs†.œ©΅ΤύRC9Bωξ°nΒΔωα/}Isκί~²Šcώͺη_Σώ §―ζ{ύ]‹ΣΚύ·pΏ<>§Ε©)ΞsYl>Œ\θέם α…‡΅­πaL{‡ Ω/YFΞ…‘Η]\œ³_ο­Αh98Нη|Β‡Ζ…q^αΒIη;vΨH.$ΞΕ2&N(Zσ8Fmΰž{]ΆurοRΨK*Ί§tΣ₯\χ₯£θηƒšσ²^uΞƒ8_θ(΅Yηνοδ^w^Is―vs/"T~Ώθ°Όsη—·ŸΓεE _5‘iνχyεηšCτό*v<.ΕyŠσ\ζΏάpnTΡΉXAA.ΉJΏy₯ŸΊU:£‚3έ²θυ₯΅@·ž€ΔΪoθγ:Gz=΅υΜι%užοΒ\.‘>9eS€N5‚£;±τ­ DuΞJΰ­ƒΞqΕω‘/|iσ~ͺнαM«tΞΫε‚ί­ΰΫζΜRcΉQyόΦJ0ϊπηΉ,6ΦΈΊsηBΦ#κ9ν#r!ŽzδB\τΘ…5>„χj.ϋΈN:\θ*Gρ‘s!νυy ΈqŽk"Μ+\8I|(qΎύ‘‡δBa‹]WνœO;&N¨8o*ΝΟ$ΞΥθŒNζή. S^Σwj'΅½€³K„“Ύ8ΏrUβάf‘χˆiξ6O&lcΏΧœG½2Ϋ}ΠΝ½ŒZλ>·φxt>žΎΟg@8}?ϋυ \‡ωΈβό —]sοWWΕݏyJŠσηΉ,h)n†ζγqK@J³#¦ Dˆ)ΰ”S€”€TΫΰœ΄Έj_ξ9Hq<0Υ}κ TŒzm%)c“Θ pP;οuτ.Μ{§Θ»΄#IλlƒRw…ι/ί=iόŠϋΌΰŒζΑώtΏΦ›ΗNko—Η·ψz‹;6ΓζG™ΦžΛš_"@αBD(½6¨5‡ ί‘ #F^γBηΓxΡ²Ζ…γπ!\θΒάΉt|x°Ζ‡.T ψΠΉΠ]ςΐ…“Δ‡ηΫzψH.6ίeό΄φIηΓδΒΙθ€jWSάK·v#L;QΕyθ©ˆS\cάb­οfΒΥDyηA€―8σΰkΒΫ3f9θͺSΉΪ}|š§ςGq.Χό―žqΞ'Uœψ²s›ƒ_ρΕ*v8ϊΙ)ΞSœη²εΆ+ήΩΧA+ςQ@ˆdj ΐLφcrš ;—Δ]rœ¦ VSί.Φ’γLΗ Τ…z­‰’ά#άw’jͺ)y0ͺΗZ΄½;δ„RGΙΉs_¨_tθά"Ή\sέ'0UWv’₯ˆκ³Ÿ4q~Πs_֜πώs«ΈίλΞ·!άρ-ώ³Ε6aύΎ‘₯Ω.—5Β‡ Ε β‚Θ…β1.Z:"ΐ#κyηCΈΠΊs‘σa =ύέϋwΐ‡^·ωPά§νβΕJmO9ΞψͺΈΠ3ˆ\(§άω.tqΈp’ψPβόnχ9|$ ›νΌϋXβ|ψ0Ήpr—©ιμtKwˆR ‘K§rΩζ‘)"UβτςkV:Ζ}έΉS•ΦGͺ•ΪφΑ·r]u πN”[γ»ΎλΊFΚ…ϊσAΗzOawP tvTkί?viηΌcZ» qΎ.;΅+Ξχρgš^φω*ΆΠ“Ζην²U‹σ[ό¨άn9gώ L²x©­h‹‹ ~’Ϋ²~η7ΨsοHqžΛz#Ξ»:@GWώu_‰ΓkC§aR8;‡δΆ ϋ ΣΣΨ|²Žϋ€wΡΨ&E7= uj Y·*qΞ:ݐ*X₯λ1‹§hβ ŒEςΊy„9N™Ξ§sŒθHŒk€ρ@z¬ŽΔ€oκVŸ³‚ΡΛίήaΔω½žύςζ˜χ~ΆŠΓώδ-γŠsκe5’,©—β=a}Ύ:š|8ΩΞΉs‘„/"ΥΉPό Š\ίE.D΄ΧΈPˆ\HΚ|δBηΓθ―JœG.t>dΡσ1eέΉδ–2Χ€ •:"ΚΕ‡λŽ#Ξ·½χ#ΉPΨt§±ΕωΔσarα‹s­::Bξ8Θ₯½sηE zgu£&‘*QͺtnwΟ«αFΝ6#ΥΚ\τήA!Ξ{8GX—1h}ζ€ ο(Ζ½)ΫΫχθΞ»ΌΏΞaωυ+αY1NΝΉ0I\ˆ8ίοŸnφΙηͺΈΫο-ΞΟBlλ–Ιa› 'ξΪβφεζ>•νώR±¬‰σο¦sžΛϊ)Ξ•nX\ Z ΜpEhzΔH!AA[琨žπΦ z—‡HN’φ£›w[œ₯θ.ΉΫ@ηΘΣήc=f V ˜iŸ^—ιΒ=Φjβ‘ž9ΗtΜέ9* oΟ‹ΰ»Ok·ϊΚΎ!ξΠˆσύŸωςζ¨wŠC_υΦEλΦ>ΙH>œΰΕΈPb“™‘ uΏΖ…\¬Œ\·D.„#Ζ9ιp‘s_MΈ;ΒƒάΦψ/>¦\νβdδCΚzΰΓŠαCηBoΈp’ψ°η1’ …qΕyra.λr‘Θμ›Α!@£ƒŽ£l5θΤZχsΎKS8ŸmŽ8Ώ’€s_qΝl.§ΉwΠ]Œ>˜}nβΌ―=WZ=Β<6΅«5s+΅βˆl„Όg Μιζ–wΒ\έΩKgzΑ›ΰy½=ŽΉΧ˜SƒΟg1iβ|ŸeŸjφ{Ρg«Έλž88ΧEΘνΚύντΈ²Νa*ρiF””u• ž{€8Οerά’6(UpΕΈ ώ¨mΤ\_έφ £rΪΐΤ‘€­w•X?ΒiW¬`Οk»=0«)Q­Vέλ4©Ν$Xu!οA)”cπIΠμ5υέΉ0ΏχζRgιγ‚ΌΆ’`ΤΠPqΎοΣ_Ρ<ΰν_¨βή/{ŠσδΓ©βBq‹ψ0r‘σΗ,. |(Ξp‡q!|Ή0Ž’€μfziP­ΥΛ#^°Τ{Φ.Lϊq’Υσa [ ΈTφ Nš8ΏΛAχɅ—ξ‘β<Ήp²έsDm+DϋkQ¬Ίΰ-ξω`Άy‘†ΰW•zλΛZή tΔy?σܝsεπ^  šΒ™0χΩνƒnσΦuΎΫξ€γΊϋhΉΪΉ+[ΐ„9©μd,―sγδ²χIη{?ϋ“Ν>ΛΞ­bΫ#Ÿ08Ώ&<ΎΊ²Ν©-ήc«Ιa›ϋϋ9q~}‹hρ•G¦8ΟeύqΟ –Ԑη†sϋ Ž1>syΜάU€ΦcQœkƒ`΄Α[Lι$Ν§ΖS―Mwg΄KŸ<b:¨―ΧΉjzο€oξԍ²ZΚ~½7Iiχ.ν @χ­+o “&Ξχ9ύ•Ν‘oύbωŽηΙ‡ΣΑ…νί­ώΎΕK€Œ;ϊΌσ:F.Œ|X:Ξy ©Iχ&l£ΈηΒ…ρ",ΩβΓωp!™Qˆσž %ΜαCηBσΐƒ“Ζ‡8?π~#ΉPHqž\8ρβΌ8Θξ*χ"΅―AW·φRoή θ Nr­Ϊ(q>˜kξβΫ0νԚΛ5—σοiω£Δx™ΗήΑFΑ ·jΗKz~ο€k_φ0ΣΌo~§ –ΚΞLσθ˜ϋ\s„;5ψ`ΔωžΟϊD³χσ>SΕ6χ{Ό6zΧ\c%ηš\1OqώˆŠ8?;lσφ/°ΗκΫ±uΉpqΥ7KqžΛϊώμΝ+€«?Π7BrG…ZD§ H΅§¬wχεšxw^ά€f&Ό{ Zγ˜5Oιήt(vKΡ]ς ΅†Ψψ-vΦϋz£7ŸίN *)«ύΘ΄[fΦw©JεT@*ȁsaΟXAΏξΟ_Ν<ώωΩ'Ξχzς«šΓήτ₯*ξυ’w¦8O>œ.μDeϋw-~ˆ\¨Ηβ’Θ…ξ*  Ι0Š\θY<±œη\θ˜Ζρ5>Œ\»ΟS?ίσ‘s‘D9|θ\θ|Έp’ψPβ|«{9’ …;ν˜β<Ήp²—Ξ9n…)ΒΆ£TRά©»ΆtρΎξ\’΅Τ»Pυ”n―·ΦύkΝaοSΪƒcŽπγΣϊFtԞΞΞ±Ήk>J ϋ8΄’ήΧ¬tυεJέ·TvΞ“σπσugNZϋ€‹σέψc͞ΟϊTw9όqλ<­½]6iqE‹»Ορ>Πβή)ΞsY‚R§ qN¨§œEξwιœm IΓ£.H# 5˜}κγm:ΉSŽ#g“ζYƒΧBF·ΙλΗ}뽆’FFξQ3Jέθ€nRSY:²χcιpΞε)θTπΩb}YΖη{<ιΥΝ‘o<ΏŠ{Ύθ])Ξ“§Š υ·Lγ3ηB]€τ&™p!|XγBϊs8zwηBηCηΒΪ…ΜΦΈ―Ζ…t[‡ yŸQ\8'v3ΛαΓ N:"ΞGq‘p§»§8O.œχ\έΛαt5/ntοJ[ύ΅ξχΒάΖ©ΙQφΪkOη¦Φ±Š°Μ=―Ԝ„:βaŽλ_fΆ{wωώ1’<ŒL#Ε½;WDΈ€ψ§žž΄ύΓφ«Bf|α]^OAlψ΅έžφΡfχgόm[6Ά8ChwVe›MΚδŠ]¬!άΎΝ°“ϋWΒkΆaEi$χsu†OqžΛϊ–΄φΞ½hƒ-Ÿ)P#u\ŽŠΦΣ­Έo„T>½‘Zl―CΧv€u–fj΅ΐ‘μnMσξχcS#OΥτqo€}z[ηΒyRSY:<Δ9ΞPΑ4ˆσέχ'ΝΑ―rϋ,{OŠσδΓ©βΒξ’[ΡΞ…4M›Ε……gq‘σaδŠHw>Œ\8 £Έpr?¦­Gw| Θ#:::VΈpωPάχΌH.ξΈΓž)Ξ“ §BœΗtnšΓUΕyϋx0Ξ¬8η°ΡˆV:: δcΥF5‡“X/MΰjΞ9’|0-Ξ1βœFo.Ξ-…τŒKC˜_iυτΒεv~ζ¬η’Δ4ˆσ]žςαfΧ§}’Š­}μΈβ|λ_.£ΤΎŒ€n—ν[|ήΆ;±ΕKΧφ3Γ>ήΧβιaέΓ[|―ωoρM֜η²ή£?ύΛ•AS{_A– fζ›»λ’  Φ<ψτ4NΕ:K—¦H5‘ξ)“¬ΖΗ1Xuw)œξFΉSήw6Η«F=ψδ˜uλiͺŒJSπ©€^3’]œΫg;βό±―nώ³ ͺΨηΉοNqž|8U\ΨΉη-F.τΩί.tήs.d}δΒ  =m­Φa~ϊΪsšΓ•Ωη€°χ΅ενqH//)μ±όrηΈηœηO\ήό΄¬¦Aœοτ€ΧμόԏW±ε!§%Ξ§IΐΉ,< UΠ$όψΟ»tmΓrξ:λ~Χψ‡FH>JL·7†&I8G±Aœu/ž•.ic‡Έ ±.2¦…ΌFwΌO?m.œύώ€jΖsΠ­κ):³K”+˜§‘”‚Q9H HωLΧ£e¬΄φGΏͺ9δOΎTΕ~Ο̚σδΓιβΒ.“¨ε3ŸMzΊϋ€ iι\θ|8.Ό­Ξ…1Ε”8ίrί#Gr‘pΗν3­=Ήp Δ9BΧF‘ΕyηQΜφΝΰΚ¨3„,Ξ9βη؝εΨ,-¦Έχ·.‹PοΣΫKCΈYzpώγœςY#γ’8'  tdΏφϊa§υ+Β9ύ΄ˆs₯°“Κ.Hœƒ)‰ ΏΆγγήΧ,}β‡ͺΨβޏNqΎΆΔΉ:νΡuoι₯Ιb“‡ΐIΦ-_κS9=πΓaΑι‚R„Ή‹V@Υ»{'γθ‡qlƒ@•±β.՜€θΆχ]Φ£o*iš~|œ›_|πΉ½xκ3,ιœ]έ₯‚S­ώk¦Fœοω‡―lξϋͺσͺ؏7\qž|8\Ψ5†“#Έ‹}^ޏ+.ς€ ΕπImδZ-Σh.. N{δΓUqa5]}TΪΊ_tΰBƒυΫ¨r‘σaΰΒiΰΓζ|Ÿ#Gr‘p§ν6Lqž\8EβœTξ2σ| ΞKΓ΄‹Ž8Ηa6ηάζ+­Ξά…ΉΧeΣ½ϊt\τήIχzτ8ηάκΟ{ηί:{c;\Eiη3Μcύ|μ<—8G _fBύ_];uβόΣμψψχW±ωΑJqžΞy. HεtX·ά.ρ†s{Χ₯od’SίΨ»Ιt*wLjϋξ¦{:€§{FwfTj|˜<ΚAƒFFQ„Η:Po›ΥλΡ`f―‚N}~€k’ήNz§ΦηŸ6·}χΥS!Ξχ~Δ+š#^ώ…*8=ηœ'Nκ~Λ‡Ξ…½°5‚ »LqG ΅‘ )•q.tw=raπ#Dϊ*Ή°Ζ‡5—άϊkD.μάrψΠΉπΧ―δΓΘ…Sΐ‡η[ίγ~#ΉPΨPΕyrατ9ηύΌoκΛ‹@'u<ΊΤ½ΛlsΞ½ašD8©ήάΊP§YšwuΏφϊ•#Κzq]tΫ%Π©?χtKu§Ω[«“y+Κ{·Ωκ³ηΆ8η4Gϋ©₯~³N½]·}š{Α¬τPξ"έ…Ίg ΦΫ:―5'=ίkηζQ”Kxsώœ?IΑϊR>Ž8ίξoiΆΓwV±Ω½–β<Εy. H ¨H;T`ڐ D%ΎευA€ ΰ¨ΉΜφ%`T κΑn (A'*ξ .<·ξΨΈσδ’ώΆzϊ 5ž ŠrP: ΨIΡ$ΰΔ%bn/©@@ϊΝ3›ΫΎύΚ©ηϋ=δεΝ_τΩ*ξσ„·€8O>œ*.ΔMŸΕ…βAšΐͺ{{ΑΉaΉN«q!|θ\θ|ΉχšΦjήo©ΤΝƒβ’ΟβB.R’Ξj½σ‘_¨^τЉηΫμyΔH.6½kŠσδΒ ηV»­…Ίσ>%Ό\―Ι–ˆνη€Kδ"ΠW¬L§ ΒDgΩtiξΚsutΤ ;ίeϋ`½£ΫΌ2Ϊύγ˜Η±hξ’λ6^ppW½&β§Aœίυ‘olξvκΩUlzΟSRœ§8ΟeμU”zΖFj} ζ-3Ξ΄‚UΝϊύε ο휣.Δ]χ`T]t^4cΰΗs―zŽΧϋmtγGΥjΖϊθΔ»(χ”MPœ!έ’Ύ©€S§ΞΎn&ψΤ6Z§ “ _;ˆώϋΛW τuœ9Ž8ίΑ/kŽ^vnχ}μΩ)Ξ“§% ϊm.Τ:q ψp"Κ…‚s‘ξÇ΅ΎΝ|Έ0ςŸg%Ή[2A"Ξ©Σw.„ ΕΞ‡-&Uœo»Ηα#ΉPΨtΫηΙ…S&Φ%Δ©3/β|ΠΙΌέAϊΈDz»^³w5Gΰzƒ΄KΜMv±‹˜υ™αŒ\£i\λ³f€#Ϊ}όšw{έί­ΛΌφ«χ ‘Ο*χ‹ ?5§aξ—<Φ {­ϊ€Šσ»όΑλ›mς—Uάiί“Sœ§8ΟeQR5†kƒ19FJΥT`ΪΥSβέ0“Ύ)—Hs~5ϋW3€»΄N%Fƒ=βρy6ΙJŸT`(”ρ<ƒνjRmΔ›»σξZΉ{U‚Ρ.ψTC#ŸΊU@κuζ :Kƒ£Ύ’Φαγ)Ε=ŸΠ`q~ΐI/kŽ}Ξgͺ8μ17Εyςαt.-Χ8κ~Η‡ 5bM|8‹ Έσδžγ\œ»H†};ψ0raδCorΙ±EtqΞ{‡΅^ήΉ>t.D˜>\–Υηwέύπ‘\(lΆνn)Ξ“ §n‘s{‡˜N}z¨α¦™š‹σ+‚λμ5Ψ—”ϋ<‡@g,™wug|uι@‚Ίλ₯.½λe>yο²ΣPΞΦ1»ΌΦΔΞ…yμΔΞγ(ΈqΞ9—Qβ|‚cΓ―m}뚻<ψ¬*ξΈΟ‰)ΞSœη²(βΌ °ηJΥTΐ9H},U‘šχ{ε Σ\~ύ»Ί΅ΫΞR&£Θž%Ί HΝUκ°αΨ΄Ϋχ)i’1ΥΣΤθδ(Gη‰mt\€kR7)h6lβ)ψ”c€u LuŸυδς’υF˜+Ξ:α₯Ν ΟψTχ{Τ›Rœ'NηΈ°K―p!uηU.€Zΰ@ξΈΠω0¦Γ;2Βω0raλž.Ο>αBωΞ…\”„αΒΨ[ΓΉ>t.”(Ÿ>”8ΏΫn‡δBa³mRœ'NΉ8Wσ7OχšsηΕY§^ϋΚ Ξ]˜θ—3@Θς\œ ξδΌ.±ξŽϊ¬.ﱑœΉμJ…χΪxOaχŽμΐΎ<4Ά»,€³GΞ…†  ΏΆε±―jΆ:α5Uόή^Η₯8OqžΛΨœΰͺ όδόΘ’Τ§iβͺ”FHΜBΥ οk~xΝ»š_¬xw—β9KμzPκ Θ.‹£Τ§SzJeιξ0ΥΊ$—ΰΧSBϋNΓ:6@Ί¦D8£•J“¨>MΣS4›£ΈHΕ!ΊυŸ_8”Ά˜tq~πq/mNzϊίVqGώUŠσδΓιδΓΐ…Ώ5ΞβBF–ΝβB&ZΜ— α!½otםΩή/ZΖ‹π‘s‘•ν Έχu.”S.Βƒ>t.Δ-'.όϊ‹§‚;qΎλa#ΉPΨ<Εyrα”-}Κ:5ηο26­«ρ˜ΞŒρdΤvœseOgGœγž;θ—±ŒHGψS›NΪϋrsΤ—WάυkKxRέφ^ΣΡθρBˆσzzίηAΣ»iη[σš-{uΏ·η1)ΞSœη²(Α¨Πb]ΰΦs 8εu ‘h wσΚΞΕz^A+Ž‘‚ΣΎ^œ`”€Τ…8nΈ©lί€zL₯F‚Σθ’{‡xμύϋΉgŸ7ΘƒPέ§I”  S¨§hκ>Α(©œί8c&ύκσ§Fœίηθ7~ς'ͺψύ‡Ύ1Εyςαtς‘F«ͺŒG|ΉΖ˜‘ ».οβΓωr‘=p‘ τΘ……{.τΪvηDq!Β;ς!©κ‘   =“ΘΉπ+Λ¦‚%Ξ·ΫωΎ#ΉPΨό.»¦8O.œ:qή oVσΊσΡ<Ž1λλΟ΅ΝΥWτξΉΔ+ΉΔψ―ΈΆωώΏιn‹φKL “Ϊg‹»0ζρ•樻»Nc9Φwΰ‹{ϋzξr«‰χ΄zζžbοbZ»;ν8η——γ&]œoφΐ—4›ufΏ»ϋƒRœ§8ΟeQ‚Q\‘2§V₯šΎ)θμ:΅η₯†Qυ—JχTz»R9―XqΌ[δiœƒ.†=°€žΡR5ϋ€”Αt¨}dϋθΈ»XW0Kύ8n^ΛΊš(§ιA(έΩ D»EΤ]–tv£]g‹υeGœϊ 5yβΗͺxΠCώ"Εyςατς‘q‘ΈN|8‹ KΪxδBέΧs³ΈΠωΠΉΠω0Έλ=βh“b§Ε²!xΡΉ°ΦOΓΉςΏ@ι|’^γBψΠΈπΦΏξTπ‘Δωφ­8Ε…Β[§8O.œ2qξuεζŠζ›#ΜKͺϋ@œ·ΟKόΊπ•ΰF˜Χ/†pΑŽ›.aλuݞ:ξbΨέm΄ΏΞSβρt”ΧΊθr4Ԉ» 'MŸϋμ“±p—h·> σqΕω¦xQ³ΩΟ¨β»=0ΕyŠσ\ΖFL)Γ=QΦήW€© ³KΣΌΝ)π»υ‚.ε“šΛ_ίψΑΦrŒpz(ΈSΓcά‡Ιχα=¦Ζ{§;BΌ‡οƒΊςθ œΚ '@Υ-Ž9γ€€ϊόή’ξή9Em0zλ—ŸέaΔωαxasκi©β˜“ߐβ<ωp:ω0p‘ψO|8‹ Kσ΄Θ…]Yψ0raδC欋\XγΣΪΛΐ‡.ΤΆξΌ³ο³/LF±[oς6Š Ϋη"N:Jœο°τΠ‘\(lΉUŠσδΒιtΞί½@§ΎΞψ1₯Ί!ίΟ /5έ―—η<Šσο]>ιQœΗΤςXίν5ή±ιœΰσΤ%Θ]¨_ΊΘ#κYΕΉ7₯»Κϊ(qξ΅ιΪnΔωŽxnsη#_PΕνwΉŠσηΉ,J@ͺ JA›‚Ή’ͺ­ TΑ¨uιœΜΒ•#S:Œβu©œtωuΗ›NΎLΊpvξι—>L·ΪێHοZβ·aΟH©›zž&p€»γMI0Ϊ‹σϋΏ yΔ}ΈŠcOt.τ‹”Ξ…p € ™FAY\ˆ@wΧάΈpΔωέwL.œrέf› ύμp›%ή=_άvŸ)NέωeW ;²»˜F »s~‰mηξx­>[ΉπίύίΩBŸΉγ?5‘_qFΚ:NΉŽzušΜωΨ΅«Lœ{νΉΟiGΨ_5%iνwΈχӚί=δUl²γaγŠσ³ZΌ΄άi‹ΧΨξώ-ͺˆσκλΧ6—&η²xAι7Θ Ξώη―Ί`« ΒΤ©4FκDΉ‚ӝ†HJσμžW%]ˆ =νk.ΧγpνSlVδέƒ_ž §‰ΐ·ΗΊω˜4 o‡sξiμόΒ¦KέT *|αιΝ­χΤζΦO=iŸyςΔ‹σϋΆ¬yΜΓ?XΕIGΩ’₯΅—«™o·ϋgΨsη΅8,Εy.λŠ ΕΦΈ°Μw.„gqaμΈξ‚;ςaΌΰθ\θ|ϊϊΘ…\ΐ1‡ #ϊH4ΈzsηCέW8ηCΈ°εΐiΰC‰σ₯;2’ …­ΆOœO&N±@G”+…½ξpκ½xηωφvyU&±ziΦ΅qQx»˜η5£Ί„Ή@ͺ<ϋυ&sž έν+―6”“0£ΪθˆvΔΉ τΛCγ8jΞ―š‚†p·?θ)z ίύΠqΕΉ\ννΚύντxŽmwˆσκλΧ6—&η²ψ‘‚5§ ΌTsyλ3Ξx{Ϋ»>ͺk”HgŒYqΣguφL £ΣνΑ'βœυpRΙφlG$υ’ρuξΉ@Όn’@΄tξ>Rk 7p‰δ•@τ–=~ζώδ‹σgέ{Ώ?jwΚϋ«xΰ‘Λ΄ΡΪΏατΎΗk[\&rm±MYχ–§Ω6η΄85Εy.λŒ Ε βΘ…ήœΉPυθ·UΈΠSά£˜v·Ϋ›ΎEŽt8ΧyύxδBΊs!Ξ7|θ\(tρ κσ€Je‡ oύ䦂ΕMwΩr·ζ±ώ›*>ζδχ4[lΊƒ6<}CζΓδΒ)θe„ššΌu£ΘΪ[wΝ{.Qη Τ]Ρη.°]”{z»»ηΊET#ΐݏπZv―cΏτW³]tΨ£»-Qξœm:Δ9λιζΒ<6sa>βόs›μύΠζwxb·Ϋzomτ¦ΥεΒvΉ&<ΎzβΌϊϊ΅Ν₯IΐΉ,~@ͺ` ¬ ˆvn‘jΚ 2½ω‚QΥaJœ+έsω'†σΓcpHЈئ‘m=…=6q£>œηkc|ΌΖRg₯n††FέΉuφϋεq—Ξ@΄ΈD·|δqΝ-zμL@Ϊή Nΐ›mΉΩŽmΰyΞ¬`TAκΆ[ν©v]Ε>.(fΔ)§θΥεώ[+ϊπηΉ¬3._θοίΉP“)JGχY\(.>Œ\E.Œ|ΉΠ3‰ΌGμΞ:8Ο{iψzΈtuηC.PΒ‡‘ αCΏο|ΈpZψp·lŽ>μEUq~Θ=ΫxG4σΰΤ©ζΓδΒ @œ·β[b[΅EVRΩ{a^Δx„Ά—σ,Ακ"9֞γtrΡ]˜σ<χ£8w±οpΠqΞ//υδrtœ“ΎΌΜGGœ³^ξWXγ7ϊŒR;z£-w« σMφ}T³ΡοέEέnuΉp Šσ΅Κ₯IΐΉ,n@ͺ`KΑž‚6_—Ώ}&ΘT=9#Η΅9dΗΝ F:μ…ΝξKοί4‹—ΖΉ›iνΉ¬w\(ξoD.Ώΐ‡Ξ…rΠΕ‡‘ α«ΒWσεBηCί–γr.Δg}¬%w>$έ›].π‘qa·>4.Όω§u’|ψ°]vίvλ½fΉηrΝu³}~Σ “ 7 ηΌηrΟ%ΞqΤ‹cŽ€Υm'ΞWΜ8Ξ΄½rΐ£ΫνέΫ]€γšΗ4xDzΝAχυ^·ξuιήξJζΈΰ:NwΛ»Ήθv>.Π=΅‘Ž?j.ϊ„sαFέιΝ&{?lΆk~—}šwώύfLώΛ΄φ$ΰ\β`4Bpͺ–\¦7M£a’f«c±F¬)0U@zΫ…3ιœξ’G­ΧGaxwΧάE8ξικ:6OΧtΈ3δσΙc0*α'E˜«¦R³Μ΅ΩKΉR7ˆ 7½ϋ››ήρ¨“.Ξkξω|]σyμ{»μŸ(χχ M;.Ν†pΉ¬S.ΤΈFq‘ ˈ΅Y\ίΥΈ>œzFs‘ σωp‘σaδΒΒ‡‘ {>„ Ι$*|8‹ §ˆkξω|]σ “ §{ΑGlK φ©νԞγœΫ6sέJΘώτͺα˜3wt»έεΎ$Ένρ~νy―Wη=½ Ή sOmwΗΌηΚ(η§σςτφ«ΜχNοŒ}‹α& 7ήνΈ&ΊησuΝηΑ…o έΞZ 8―Ύ~msip.‹Κ Q *Ή ςΊ ³ 8;wH›ΦλVAžusοκ.q‹₯}Κ!RM¦ΧNΊΐF\Οε„γ&±ΞkΗ=n @lψΖz r€kΰ³€”²)0»Wλtk)νΈCΏ=ϋ+ρζ‡O47χ|±\σvωdIcψ Ώk±ƒ=wf逩+ '¬―Β<ωpαΒΒ‡‘ ;Α*Ξ\ΨσaδBjΤ#:ΧΝε„Γ….ΜG]˜¬Ν!\ωΠΉ°πα¬ •π‘qa|θ\ωp .VάσΕtΝ§“ §{qΧ'yPwΎόš•vE=%\BΦnjyχu―5wΧ{”8Ώ44Œσm\œϋμs__η^kή‹ςβ˜wηΚˆς9ΰœλ5Έβ±c»ΧΆ―OMαΖδΒYξωbΈζeί[·ψr…¦Ϋ­Κϊν[|ήΆϋp‹Λ[άάβg-ž<ΧλΧ6—&η²ψ©3F1ρaz^n‰‚Ή’’©ΰ³ƒ9©΅”PW0«Tw§r΄ŸθαόΔΊΘZ:&Ν‹¨›$ψ5k(q…h|δήHΡ΄4ΞΑΜ^ΥT Z''·ΘR7ozΫ#;Qnό‹‡v˜TqξξωbΉζΣ„δΓ € ΞβBρ€Ά‰\(ξΣmδBρ£ά#:ΧΝ%ΞαB 9Š £ε<΅ι…\θ|h\Ψ7„kωpΐ…r͍Χ5.Ί{ΎXyra.“°ΰ{Κz'X­+{ίnΕΚ‘c Yo˜¦ϋŒWσΞκΡΧγθ¬G~iσΎqΜŸgΗ£Ρθ ηpžε½cϋU‘ήά»΅3ύ§•Ζsz―IζBwΟΛ5Ÿ&$η²fά"}r$¬Ϋ`° ΜθLήbƒ”Ξ}댣$WHM‘δ)8% U#9£ 6©o$θυύDW<:ήqήxLnjސ oޏΗlE9ΉΟ0·ΖoΪu}έωgžάάόΎΗτ©›]0Š[τΖ‡MΌ8wχ|±kΝ3 Νe"ΈP\Ρςα,.'΄ά0‹ qΨ#*εο\ˆ0ζ’¨ uψΠΉttŸ7ξcΟ"F>Œ\(Δf˜±ΧΩCπaΰBψpΐ…Ξ‡λ.ΕΚΞ=_΅ζΙ…Ή¬ΟKίΰ Η±Ί³#Ξ©ΥΛ­yšΟG€#b4m»4ԜGgέ_[›]~yθžNΈk‹£Θ π4φHm'UηάλΚ―°΄φ8σœΡlΞ…½{ΎXyŠσ$ΰ\jΑθί?wε5xrh¨;TPφΉ§Ν€qRI½’%)UπYζόφu–4?²:φ~†Ί twv’›;`FΧΗaι¨ή]Ψ κɁ§l*oΟy0&MˆυœΆ-9QοˆΎξ”ζ†WŸάa’Ε9ξω6[ν‘yςαΗ…_x vαΒN˜Γ‡Ξ…ԝG.τζrΞ…pn #ϊ˜ΗZŒz§υΨψ2r!}6|ddαCηΒή9oωpΐ…₯΄§γΓυ€ ‹εž/έξΰtΝ“ 7˜…tnοΖξBά]uDλΐe_1μtξN5MΨΌΓyMΰF‘Ε9σΛ] _aΣ]ϋX΄ΎΡ[ΉπΠΓΊΝGaξMξΌ)uλ»;ι>f “Ξ…{Ύι隯+qξσ;—.]šL5Νi :η\΅•₯co?6GλΔ!‘Ϊ~ΓΉ3A¨\€²%€ G(€£AˆάlοΜρΈwŒΪGβˆq‡»εζ€ρ@-Ίω½ T΅]{ΏOenωλ€+^yR‡I&ΰ.πΪχΡΝή»“ΑhςαΗ…xοE.,Žϊ,.η—άΉηfq‘ΔΈ_… GρXαΐA™‘ΆayδCγΤ:Ξ‡ ΉPYψ°ηB^Χ>paΈPΉΉp/Vξ^ζšoš\˜\ΈΑˆsΊ±WΖ€Ή(wξΒAg:ΝΥ\ΰFq^›oNWv„9ΈΒλ‘β<\@ˆλβσ΅qjσηξ On΄Ρf;6οόΐŒ Χ΅sή.Ώjρ“0`~}ΖOς8σX7ΰcύUεεΓεω·ΗšΗ:1Η™|˜\˜³y¬y¬Ι…Σ%Ξ s‚ώΓψfgkk"_y¬y¬ωa"Ώ³<Φ<Φ<ΦDŠσόQηgšΗšœikkkrarakkkςaŠσόQηqζ±ζ±&ςχ•ΗšΗšΗ™ΘΏƒ<Φ<Φ<ΦΔ΄‰σΣ'©aIgkk"_y¬y¬ωa"Ώ³<Φ<Φ<ΦΔΤ‰σD"‘H$‰D"‘H$)Ξ‰D"‘H$‰D"‘HqžH$‰D"‘H$‰D"Εy"‘H$‰D"‘H$)Ξ‰D"‘H$‰D"‘H€8O$‰D"‘H$‰D"Εy"‘H$‰D"‘H$)ΞσCH$‰D"‘H$‰D"Εy"‘H$‰D"‘H$)Ξ‰D"‘H$‰D"‘H€8O$‰D"‘H$‰D"Εy"±ΎώH—,yU‹Ϊ㟴8Ί²ξ’ŸY"‘H>L>L$Ι…Ι…‰η‰ΔZ&ΰvΩ¨Ε%-jρΦόΜ‰Dςaςa"‘H.L.L€8O¬ BzI‹Ÿ·Xήβ-Ž*λoΧβ₯FFk±•½ξγ-~Ρβ7-ώ±ΕΎφά‰-ώ³μSϋ~‘=χΤ?nρληΆΨޞΣ?Ooρ£W‹Eˆk™€οί↧•σΎ}ώN‰δΓδΓδΓD"Ή0Ή0Ή0‘β<±&Ιw―—A‚ν²s‹έΚύη΅ψ—woq‡οlρa{ν“ZlZž{S‹‹μΉΛ[YξoΩβ rA-ΤγςΊ³Eށ€?Ϋb‹K[όͺΕρ#ŽύZ\3–&ŸSώ³ωBΐΛίJ"‘|˜|˜|˜H$&&&Rœ'Φ$οήβ—" NxξΏΈRZoΧβζ›Tφ³E!ΟΝΛγiρ΄›…νDngΩγ;—}ξl|?{^Dψ΅uu΄]ξΨβΪ)υŸΞgς·’H$&&&Ι…Ι…Ι…‰η‰5MΒΊΚψO%Uθ#v₯tE!#Ώβxc‹ZlάβΟKZΫ4veυ>"²Ο―΄8¬¬B‹g†χWϊΣFΐ»Ϋsοkρš΅Hΐ))U··4¦›Zl“Ώ•D"ω0ω0ω0‘H.L.L.L€8O¬ "ήL©I->Pb¬lϋΨrυt—$c‹Hže;₯,SzԈ«£wͺ\²Όn,] ώR!ά_\QŽι9ωI$’““‰δΒδΒδΒDŠσ̬+zP©ρΉ}‹χŠτΚs"Ξh±SyΌM‹SΚύg¨Ž¨ΆHτmgΩΟc,ιΙ"Ήr¨R+t@yΟ7λΚl¨+Z'WGΛUί[[Ϋβn]ώVώ^‰δΓδΓδΓD"Ή0Ή0Ή0‘β<±¦xί(3]nΊ€ŽœΟ/WI——4₯ΧY=ΠgΚϊŸΆx\ ΰ/–΄%₯5ύ[¨zzΩοwχu@ΐ|Tι>ϊ­ΚφΫ—+Έϋεo&‘H>L>L>L$’ “ “ )Ξ‰Ε!ΰ7ͺƒ¨=Φδg“H$’“‰Drara"Εy"±vΘWυOίӜΚςψ˜2‹σNωω$‰δΓδΓD"‘\˜\˜HqžH¬yς=ΉΜ¦|oI―RχΡnρΘό|‰Dςaςa"‘H.L.L€8O$‰D"‘H$‰D"‘β<‘HL\ ΪΆωY$‰ œ 5jό,‰Dςα’[ό^~λXœo½υΦΝΑΌF±ϋοlΦμqϋ͚{άqσfŸ;Ν`―ίέΌ[§ηφlowΫ€ΕΖεv“™uΪF·lΗ~xΝ^wΨ¬ΫΧ½Άή¬Γ=7ίΌΩwΣ‚;oήάs‹™ηάvΣέ6νΊ·Ω΄_Χ£]·–3ϋ9p»Ν›ƒvΪͺ9xΧφ³ΩsΫζΰ=Άiή­½ΏΛVΝAΫo>ƒ»oΡ΄γ–́wέ΄9ΰ.3ϋήoσ™χέϋχ6oφΫlσώxtŒ:w­ηόv½έ¦ΝNKf ΗzύA;Μμ›sΉη›υη’Χλσθ>“;̜‡Žηΰ½οΦΤήκ5ΪVΫρ^€Ο]ϋΡ6:6‡fl£Ϋ}ξΌςϋβjϋφο•Ηl§cτ]qόBš;ΫϋVή“γΡgqΠ-›ƒχΩ=η»vŸ½Ύ/‘φ›ΣφχΪj³ξ»αϋΡγύ·šy>?m#θ{γ»Ψ―ΆΥϋοQ~ƒ{”ί$οΑ±νsη•珣;‡ςωξY~»»nΌi³Λνf ίΏmΐίΒ.mΪAΫσ»Ρ¨”Εψ»?|Ιݚ=–lύm”Δ»φψPί!<Ήn‹\F.d?Ξ…p|Π]o:δCΈ°{ΌΝl.δο γ%ηΒ½Vςα€ #Β‡‘ Ε 5.Τk»χ \ŸF.ά >\ςwΉ°ηCηBψπN£yΖ…‘39ή=::ΖχΥ}Ž―γÝΆœΕ…έη° >„ »οΈηΓ•\¨[Ύ{ηBέ::φ|h˜‹ uΎϊέF.δο#ra·]ΰΒΕβΓ,9 ΩΈ₯Α8G:ΉpΝrαAϋοΧtΟ}šƒ[΄ΎένJά£9x?°wsπΎ{U°ηΚϋΪFh_wP·ΏύVβ^χœΑϋ·ΈWsЁ oϋϋϋ―|Μk|?ν1vΰ˜νx*η0ƒύκοw°cύ1μ?sLΎύ¬ΧρU}ΌΧ½φ>ΧΏv_;G>ξ±ςσ‘|'ν³G―νYΎ§=‡ίӜίιž3―³^™γ‰ίC°ίQυw·oψ^†ί›Ÿg·ο9Ο۞σχ‹ΏιΪί€Άρί߁v8π ƒfξΫsZw`»ξΓ½ΐ3X .ΤΎ;/ΩΈΉΟ’-šδΐu,Ξυ#Yέεγ›Σ|v»γšΟοp|w3w=C\΄ξ«ϋœΤ|λ?hώνΰθρ…{œΨ|qιρ΄ŸOnulσΡMi>u—c»η|½‡φ£νxΟ/ν|Bσ―žάότ!'5—ž|Rσ_|pσνΓgπŸ8₯ωΩ#Nh~υ„c›+N;ωρρ'7ί=ς”ζϋG=ΈωεγŽkύγ£›λžwt³ό™G5Χ<νθζͺ'ΫόΟCO잻ώωΗ4ΧΏδψn=·β•'57ΌϊδξΎΆΉδΔ“›ϋέ{σ~'υψϊώ'wοϋί>©Ήμα'vΠϋjέ»žΠΫ{7zΠzΝΥ§Σά|Ξ£›[>ςΈζΖΏxh󃣠\τYθ³Υku«χΉόΡΗwηtγλNi~ϋζ‡7?Τ έgͺ}ιVΗ%θσ.Ίοƒ»sΧqhί:~£ σΠ:}vz>KφγϋZΗ{ΎcΟWφ:±‡ΎO­βλΎ;³NΗ’ΟQί‹>£[ΏόμζΦΟ=­Yρς»ο^¨-Z‹Ηί}Ηϊ.υ[ΠoBΠg¨ηt_Ÿƒ>Oύ~ύ”cΊϋ|Οϊ½θxυύρ=ϋΒη§ΟV¨-Z―ύθ|υθ{ΥoYΏwύ–ωλ=τ›ζ·ώώΫΥ|πwξΎ{ώ6Zςόζ"π{Άδ{X+Π_Π5RMβ] >t.ΤχUγCύ=Ç΅ϊξα:ηBηCΑπ‘s‘ώNΰCηB=† ΅>t.ΤίFΟ‡Ζ…βLψΠΉPοα|θ\¨γ©qαϋ6>ͺΚ…Ώ=ϋF.Թ곈\¨cΥί¬ΈPα)η,ηB}&:Θ…ϊ,t.ϊ\j|Ή0ςαͺΈPίσ\\θΗΙwηο :ΦΈp.>t.„ηBΎwύœ k\¨eΎ\Θο%r!οG.Τχ->t.\ >ΤΕIqαiKφlŽhω09pqΈπΖλmn\~ΝJ\{u_nΊόΗΝ-σνζ–Ÿ\4sΛύώζ–KΏΥαΦKώ­ΉυΗΪάϊƒκpΛύγ Ύχχ3hοwΟύθλέΆzνΝ—}·ΫχMWόwsΣ/ΪόφW—Νΰκ+šsΥΚΫk~5s\ν±ή°bEw«uϊyχšnΏΈ΄ΫίΝϋΓζζŸΊΟγŸύgΈΫV―νϋƒώ8μxΊch₯;ϋœΊuεX;όϊς™γ©νGΠρm[ŽΏNλ8_½Oω~Ίs-Ÿ‡žλsޜ³Ξ―ύLkίOρ=ι{τ]ωχΓw€mΒvƒο•οΦ^τ]—ίMw|νχΠŸŽMλΑ₯ίͺώ>ωθsιΎ3AίS{Ϋν³}ΎΫο¨sΧ­£ό~Ώος½λu#NΪΟό†λ―[‰φwΈβ†ΊοƒγΡχ§οkωυ+šk[Ρ\Ϋή^½όϊζΚkgpΕ5Χ5ΏόΝυ‹ήoΙVΝAK6oΆZς;ΪίfΙƒS.Ξ . `t«Η RψΟ™  F `Hυ<-ο)θy‚C bΌ$ZXθ=υŸ½ž'XTπ‘@P Σc½N·Ϊ‡"νCAŠΦι΅ ΰ΄Α¦ϋΪ·ŽAŒQφ©ν΅ΞMΑ…R}V:'§‚Ιλ_t\χžz^―νυΉΔσΦη¦ν+˜BTz η‚Qγ&ψ ς\œ+p΅€΄ˆμς9ψΆϊž€ώ;Πr>μ_Η―cη³ΊρυinώΐiΝ-zμœΑ¨}gϊŽ―{φQτ½Z§οTŸΉ^―`•ητ½q±Gη₯cΡyLΗ`Tΰ3”ϊΎθ@‹φλ]Ώq σίιθ>ΥύΕ ΰΓ‹(³%χm˜¦{ΎφΔΉΦÇ΅‘αB}ηπaδBxΑΉPϋδoήΉPΏgψΠΉPλυϋΒΝΉPϋ… αXqWδBν[ϋ­q‘ΉPt¬‘ ύ ηB]€ΤρF.„‹"j;OδB=Φ{F.δο{”8\ Ξ…:ΎΗΘ…Ύ/8}qˆs |θ\(±Ύ*>t.Τφpžs!q" ϊόΈΨΉ>œκ9}>5.ΤΉPS\¨δοb1ψP<(Q~Ξ’Ά\ΈyΊηkQœw‚ …Θ)b/ sDΪ@˜·pΡ‡hκ„W[±* ˆ‹@•ΰ‘ΨιŽΧ« α&δB.ρoθΧΉ8ηύAε}βθXΚŽΉ»΅sθ…;ˆοeηά½GΩ/B°{.”ο§ηE.JD‘κπο+ˆσΑΔyE WΑ6ώΊDρ@H·ˆF-:'^Ίs-^ϊﲬλny~·~A©"Ξ΅ŸnΏν~τšΪ‰οφ3Χχ!Q.tίMϋ φΣn£η$Ξ‰σ«Š8—0_ q.Χ\’ό‰Kvl$Σ=Ÿ`q Ky*xPΰ‘@Aηϊ– DΫ걕ˆκ?`φ«mΆ!°ΥΦόgηΨ—ώCΗ±‘θTΐ₯@Ϋθy4 4L r˜œΉD€λ" ‡ˆΐOA‡^‡ Δ±WP₯υΊ―[•z^ Η:>³@ΰ¦uˆk?:V³φƒ£γπΰΠ8 sΖΡΧϋi?8"ˆsΞOΠ±z@:ί`ΤƒIΔω\)ϋd_Ρq"°Φqκxuμϊ\WΌμ„Ξ]ΣcaΤ’Χθ\ε)— (θυZ§ ^Ολ{ΰb )η―ΟWΗ.Œ 6 FG€Z8?νOŸ‡»OžM’ί3YϊΝλο‰ΏE ΰΞ5W0*·ςπtύ ωή"ς· Β…ˆsq•s‘D¬σ!\Θν αŠƒ:JtΑ‡Ξ…ϊ»‚ ]¬:κo>Œ\莹s![‘ ΉθΉPŸ!|θ\ˆ8\¨Ώaν'r!œΉsv.tώ―q!Η^sΝγEJ‡s‘g-„ αC.zθΨΰCηBe¬Š oxΝƒ{>t.Τ-ί™nαΒΘ‡γp!ηXγB-‘ ωޝ ΗεC\s]€>Ι½=_,q^Dζ@Εy/ξwcM¬ VD_ΩΧt Ξ%Μw©M€βFφΗΙE„1B΅τEΈΝrΚ[Tlάo„΄‹ςβ’Δ9ΰ9Ž‡ΧΉ ηyO{ίώ|W¬˜‹sΏx‚SΞy!0—@"έ.¦ΜrΎάρq ώ[°uώ}#šu¬ƒίS‹‘β\ΏύFΚ…ώœέI'ϋΟ‚Μ‰ςyτŸΕwά·yν~τ}φB{t.$£Bϋv.tαξ\H– ιτdΑp–s‘§ Γ…Ξ‡‘ αΓΘ…ˆΰΘ…rΌ;> \Θ繐4ηΘ…QΜzφφΉPk\θYDΞ…\Δ¨q‘‹sΈ‹΅ξœ{j»KzFq‘σ!οΣραB‰tψPŸεͺΈP©π·΅π‘s!π[]S\ˆ8\(8ŽΛ‡ΈζpaΊη‹Φάs LΪg9²Oow!D›žοšnεtJ$γ2 €IΎ~Ε ½sή‰UŽ‘c-ιδ½`+޳xH)η=ϋmύ½MLχ"š ζ~ϋŏΑEKeΗ•Υω έ>Λώϊ,’)0+_―§ΆWΚϊΗρ»ϋΞΝ͝Χάόο_θnυθρ¬οοΒΏ½?K˜γœK˜γ˜w»w»ΫcνEύ]ηΩ_”ΰBNΜr(Οi[JψLτ»]εE€rŒ«»θ\pΞqΚ•Κ~ε"ŠswΝ%Ξ…tΟ'Ψ9gΡΥm8ϊ\yH²( "(Œ©;MΊλΔ•rέκ1ξ9uh υ€p+H`½ΑAš€σ”nSη^O*)N‚§”r z­^§})ˆRP€ΟŠΰJλ΅oάqx»j> (@Xκ½p΄(³ŽΊS­GœsΌΌg‹ΐ@”tF‚E―k­₯΅`ΉSSΆ― vW‚eŽ“UηD}\ A,5΅ :©±Υ}}>€»σΉR'ξ˜kY•[4ί€”ί™‹ŒFΧΜΧ=o—ίmρ·ψ^‹W—υ[΅8ΏΕΚν–’[δ\¨ΏSηCηBψΠΉΏύ֝ υύ8Ίk:’…€[ηBηCηBάbάuΈΏΗΘ…ξ{ <Η@†Œs!iδ‘ %ΘΕ‡‘ α°Θ…8Ο‘ uΜπ›s‘ΧΥ;’MΉχ‹\¨Χκ~δBψ°Ζ…π‘s‘םΗZsηΓΘ…΅2ŸZ6€\tΚ―F‰σΘ…π‘s‘φ:Ζ kƒ ϊܜ ΗαΓθšƒωΊηΙ…ση昻ŒβŒ¬AΗ•Ε5₯ξΧλŽ=ݜ”ςθR·B–τa„Ή ΡJ*y‡"bα:Wέ7΅έ–*Ίb. Δ·‹nžχzδ"Δ}ύzOǏΘbX1Ό1k{;ΧώΒί.±}'Qm5θύχ"ׁ^Δω@€+σrΔ=οaοΥ‹sΫέϋκ{χ2o³ŽΧκαηZf• Ψgβn6)κ})C©s_Υ{πϋgΑ9Ώͺˆσ½z¦Ξ|±ΔΉ»ζ`‘ξy»ίβ-~άβ₯•ηχnρυΏmρΒωΌv}βΣ‰$`‚ DtE^Dη8έ€€“ΞF§tP’Ζ‰[„3„kN=ΊΧkΏZ―uΪΏz i2Cz#‚œ@ΰ”΄M―‹Γ AœSηΗ@½%–^OS&oξΕE ‚s=›‘γβxH!T0¦ΐPA’ά ΉM]ΓΊβ6\Q―ΞΉα QC―m¨%ΕΥΗς ―σQlΧΪ ΐ½Qηή,Κ…·;Gξ Ή#Εη€οοp«ͺ΅Τ‚‹‡γ„{θYΤεκ9ιράηrΎ…qOŸ'­ΣΕωΈ ’kΎPχΌŒΊsΉ;-ώ΅Ε}[œ‘κΆΕλ7Τ€.€^>d‘±uΦp‘ώFΰCηBwϝ γ…JœkΔΉ~7Ξ…:ψΠΉΏΘ…όύρ· Ί3Ή>Œ\F.Τφόζ )-‰\ˆh­q‘ΉΠΎ iώ¨ΫΘ…Ξ‡p!΅υ‘ ½Ωœ s.(Έλν✠pΞ…ΡEΩEq½žεΰ|Έ*.ΤΉQgξŸ•7ρs.δHΞgms‘ΰ\8FΧ|‘ξyrα<ΕΉ₯e{ v/Ξq»©.’¬λˆσZ3.RšKΪpŸbŽx5[+βΧ\ιΑJkοεP“έ;Κ–ϊ<§0oQsΙkœcΰb―cύ@¨‡ΧFq>Έ‡‚Yί‹§Έ{J?.1QB*w}TΔΉ²ΊΊs ρr;η€»Ημ‡~vqΖΡχQœ—cξŽΣώ‘ξΉκΎ­δ`‘`4hHVA_–± ρ=WCΈy‹σφ»ΠqDqΗ`Œ •³\σ…Ίην²q‹KZμΪβφε’ε>a›m[ά§Εk]œΟυΪυ‰O'’€©₯ΤΥwV4CΒi&Ν’`xϊ'‚gHϋˆŠi˜Δ}š&!ώI'ΔΡΡzάVš#ΔΰγŒ“ώθΑΦ)Ψ’Ά’γA˜+xπtJΔ5sl§ΟAΫxj"ΝζpoΌή»kόσm~ϋΖ‡u‚œZvA‚[uƒ8Itέ•XΊ8§IβΌΦΙƒQ―ǯ՜{VΧ₯ςΌ»NžYα΅—±†ΤE΅³σ Hk 5χœΏ;…όNΉ( hα·έ³U­ J©ΉυnύγΤXŽrΝέ=o·9zϋ»c‹oqhΉβΉ]YΏoΘ©Ύ3ύν8zc8ψΠΉZiΑλΥυsλ\HŠ{δBέ‡ ©gΧλœ ‘ΐλΘαCηB\mΟ©ƒw>Œ\F.Τ­G.€!gδB„pδBR#κΦλ¨αBίΝ%Ξ9η5.τtΎO:ΎΧjΞωϋv.t>t.δ|#ϊηκ\ωpuΉΠωΠλοωαCΈ0fV­ .δBsακςα(Χ|‘ξyrαόΔ9"Ωλ₯%6hvε©Ι½XNz„D—7+Ν·fu&·τp ZD.ΞΉΔΉnG:ΥVλMƒ8εύcη³Ί’G\@€γΪσΨAΊo{mx]/+οαuε³jκCσ9RίγE‰Ac8κ§Ήh‚ƒ^kς&7YβάψYZ»7‰λΕΆw½/υϋρύς΅λŸN¬8Χ’€JΑώŠιΈKͺ%©“^K(ΰŒs…\Α)€4Cς΄6R=Iυΰύκ΅ήUΫλ钍[Δ6ˆFhqΌήŽ Tš7ϋΡcνWžφMZ%..iη =ΥHg^mλ5˜ϊœ΅΅“ξ ΡμŽ`Tχ΅Ž.Μ€a1ΣΕΉΧqΊ‹ν)«oρ5€ήα·6ςΝέr>»Ψ±ΨΗ€Ψ/0Pc;_ΑN Ώ;;b,5q>ŸΊτω,ήΥYΏ3„š~ίrΗ ΰ3O_²O5Ξ\r°6ϊgκ Nq…τ’Χq³] Ϋ\½!€€·Γ‡΅ν^‚ΓE<~ϋ‘ αCηBρ›σ!5α(ΔΑ‡qΒχΞ…ό=zŠ;λπaδBψ0r‘yt.ΔՏ\ŸΥΈPˆ\Θώ#zIηBέHΞΉΠkΣci\ΉΠ;Α;Β‡£Ίs‘g8:ΦΈp>|HΚϋ|ω ">.€4Αω°&ΞΧΉέΉpuω°]vΊη’­Gr‘.`ξ°δNΧ_Χψ0ΉpžŸΧ5—τ`ΔylšΕωΐ=χ1Ygˆ’26­w­-…Ρymiό†G _k"!Φί§6nυη#Ε9΅νήΰΝ:^ξ‚ΌΆŽσ\ppD§}–ϋ.@πό@μ[½½;Ε}Š{­&<€Ο•†@γΡ―ώ]ΞjΒ]τ"Š ϊηžήξMI·vs-ΪYϊο΅”:tβά~Ά―ŠσUt_ˆ{NC so7¦8κγ—ά½*Ξ…ƒg2‰ήΉ .<΅Ε{μρc[Όežβ|δkΧ'>hq…Ρ>1΅€UAžώc§!΅„4€Q`ŠD-%WΛ€~坔OκϊtΛΥ‚ά†Ψi<¦-’ώIJ(ι¬L}₯ΆS§Χsa€΄QŸ«-'M]rεϊάτŽGΝΜμώδΊ9΅έΌΪΏnw_σzU‹)wΗλ"ι¨,Η—›Σα|œβ€γHQ£νυχ”²oεξ±Χ¨s‹λβ%ξyJΌ’±Žq!Βάg ϋϋGqΟiUKLγη"D­v’ΰ™ζW\δΰ8Η]HΥo-ŽΥƒ€_ρœΫνߏ―ŠxΝνΥF³€ύmΡβο[μ—ιά|XγBκjαBώΞΰ@nqιςzνyδBψΠΉ1­ϋΞ…ξΎΖnγ.*αBR›ι€ :F.€v=r‘šΑ‰ozχΉπ«Οοηw;Βa‘ i ΉΠo#"ΰ kέΉ±Ζ…~ΑΒλΏ} †_τ‹εA\°p>¬ ]ΟJςΫQ|/TœG>¬]u>τsovMq!Ώ7Δω85ηJŸΌχ’mGr‘°σ’M›δΒΕεΒXΟ\k𻂻ΠιΣΨm¦8’ΈΓEL!6qΗέaD{½ω΅ζL3Kzωυ•m―?χθ±#ΌwWg–΅ eŽ/ΒttΆ£λ^sΩkBΎ–2οιτ³ήŸ¬:Έγ’ǚn―AώUή£ΣpΨqΓ‹˜νD7¬¦Ό»ΟEu0―•p1!Št₯ΉΗfsεΟ|.. š– L›‹η˜]ΡΝMŸ£›ϋκd’p)~ŸcΔ†_{Ϊ’₯Ν36Ϊ©ŠΓfJpϋxDE`Ÿ=Oq>ς΅)Ξy5οœ`ΞΣψhzEΣ3‚< ΐ•ςzΣ$Φι΅Ϊn83wuKϊ¨§gγzH8‘ΖIJ(ށc|pœΈο:ƒ—IΥE*¨Τγ[>φψnŽζΤήϊΟ/ln»θ3ψ——Μ§ŸzR7ΏV³l%θ{φ#:Q―Η€pRcιN˜w&=“@”:Co„›ΣEWΘΗzC%― χζn‚§ΪΦRDcͺ{ζqT‘7?’6Rοοnά|Δ9ΗN μΗά,έβτ‘z+Œ³xσ#jΞέ=Gœ?w“ύϋΏ£ˆΧm²0q^φωJ‘l¦rškKδBψΠΉPΟλ±g ωχΉ0ς!\ˆ°Φ{8ςώ>)Β…'s­αBm:ςz&C8"Θ#Jœ‹gqαωΟZΙ‡Ζ…πaδBνCƒ5.Ξ…ΤŸG.τ¦q£Έ°&Θ#ϊcηB.xD.t>Œ\θΩW5Ηάχ‘χ€ u<Ξ‡σY8ίwG.t>\“\Θ߁χ^X]q~ŸΆΙ…ΒΞΝ_œ'.@ A;«.—ZσJZ»ίjŒε–Χš―΅ΐ‘–8₯NΧΕΉ rOw7ςΪλWΜξlξ‚άΗ€ωˆ4kΆζ <΅ΌV{~m¬5©θήuύڊ8GŒ»›Šΰ•:ΟkgΉξ΅Ρr><Φ’ϋwΔƒožΩέvNyΟhnύΒΣ»ύ(¨₯©韜§ƒξrΧŸΉxA©Χ_Ζnξ^Κ6>™γπ΄Nΰ’=6<ͺΝO― 8W\ωμ½ ΄mgU­ΫDAΚ)(‘&` U$$@ DJE―xEA€’+±ΕΛΕ‹’/ŠΡ`( <ZD±Έ*—‡/¨z1 ΥΒvzΡQhΏ½qqΎ˜ΐν$β.‹Ÿ{ΚγƒT-uΙςrώSίzάΘν˜xω΅ο³%9N7U•(._[r¨!ΞβάdΪρςuBΊυΙ±ΠρΠ±Pšq‘s°ΠρΠ±RΕoΣ±ί―c!Š…n΅/T9ΚU]X—±PΈ'ΚXvްώ’swaΟυ Ϊ£^^[žGϋ©ξύ"Ξ!?Wl΅˜TͺΥ¦t(δ<p£O>]γ}οΔ©Ό< AWΜ!ηΟΈΖ­?uΝcΊqΚ7/EΞ―9Δ? qŒ™ΊέeIr>ωΨ«žΤLυgbΜ„”€R-ν.㝒²»„ΣRݏk;ύέJ””,|y₯'mτ5κυ)ιΐh™)3fI΄¨Ύp]Ϋ*IT%G3gUΥ)Υ’!,ΥoI4ΟΪ³ΔΘ.%―ΜΦΦνͺ νύπσW~ΰΉ₯‚΄χο^²Ψϋ©s6n{χ3KRͺŠ‘ΆΥΎ˜Cλ•k'ιΌ?Oέ‰Χ“9‚ΟŒΔ4Wˆ²³/ΞΏ^=Κtd™|ζyŽ/I©'YΡΰ=ΆqS+OH©nmEΞ]žο¦Gήz‘“Q_zn%υzώ^v"!εχ9gnυ\rώμo»[3ωΐγά뜴 9?nˆΏβ!>}Ζν7ββw‘σΓΦ ιφ°ΠρΌΡ±¦ΫzxθUσ),Τν`!Έ^ρΫυΪΩό ~ΪW%ρ!IίJnαΧχθŸ_ι9·'η'ε5Π#Ώύζ…œλ=g³Cσ ˜CΟuΝ[/žϋ-ΗtγώKσΨΟιC|*œΧώ۞’ˆΛGροCμ‘\=._κ±W7<=θ˜•pL¨ΒθO˜J’ ςX†œλvΖy5 ΄€ΰΘΩIX ”8ŠΣ ͺ€Mχ±h€DP‰—φ%y:2J€ƒŒ,Σ}:g–o‘rI©ΞuBΞYm―λ’l–ʐz/ίόčͺΡΗn±χ–Κ‘ͺIWώι“ŠT ξ—^pzyγƒ<Μ}‘TΜ ΨΘέιk'Ef7w'ζHBIf™1μ=›91φπκW…ό5»,Υ―ϋlv7·Κ‰+ί‹bͺ―’„Τ•$‘znD…Ό”j#fR$ͺ;q’‚ qSΥ”~γΉδό¬λή½$Ά½ψΥ럴mYϋΑ»‰‡Ž…އޅ¨ˆΆΒΓ­°ΠI©c!tŒΪψνϊtŠŒ…ΰ‘c!nθΰc!†l Α̅ΰgΖBΙΫ :^π£Eκ.<μaa’η Ίc!ο%c! £Œ…<6c!.ρ|fŽ…=<œΒB'ΰY¦οο#c!ί_VQρž—ΑBžΓ±ΠρΠ±Εl}Vϋ ³χΒ\r~·1‰…ŠΫ^γϊ‹5ξnn(©σˆ”C²"»€} YœIΏχΡώ\σΟ†Γυ”‘$Τ νHΦ䨛f¦oήχξ.ιnΊΙƒ„μΛ¬ςνd}Uψ]ŠΟc}_{LξΟUφ\YΟ½θNζ½'½!κ©z=’˜[υ»Jέ;#ΠάK 1xƒθ»S~o†=ί“WχyœΏ&Œζ6‘΅ΧοwxŸW€bΗΒ„/θuΕk*½ζR~μΐŒσJΞ₯Hν΅eb&9ή΅ŽYΌΰ[oӍS―qΨRδό`ƒ€x(ΙsΧaΙΫ‘ρzδρ)žœf)§BΫκ9¨ŠxΥHI“.#+τdƝtu]Ϋ#sW"‚±φ‘$r¦s]W•H‰(RIέ¦κp%|αΓ‹t³TΎŸsZ}L‘ΒτΓ6€πAΌ‹Až»!ηό»—lTb^δοΓ}ͺ,iίτ\:9ώΜή#ιtϊAI Έ!;AwY€'±>G]£bδ£™rb™“e―ϊψβB6žσ^z’ΘΝΘΉW·zI¨›&qϋ¨\ΡŸκ ©»Λ“΄χz1ηώFz.‘+~sΙωσwχšάζψ΅ΙωUΡζ2^‹ΎZΎσŒ‡nˆεx8……ΈΌ ΏΐCΗBύvΑCΗB&Sθ59‚ΪŸc‘γaΖBπ0ca©z ‚'=,,x˜±PΔ}ΈΏ‡…އŽ?½6p-c‘χ£gBώg,τ–#ο_οααfXθx˜oΛ­[Y”Ιyo‘2“r'η>۝ωσΰ‘c‘/ΰμ/,dΛ±p9ΏΦ“X¨Έν5Χδ|ΧΙ9Ξέ6ηΌ—•εΝ"D³dœζFpTΞτž; ­cΔBΎ}…UI«1›T rΞσŽΖ―Ω"A•Ε‡Τ>Έυ€νσΛβ΅g²9χy―φοI£ΧxΟNΰύrξ½η5εηπ}<¦qΆ(Qω]ޝδξu±Εδπ•΄Ϋ΅¦Bξ½ύΆ0HΌΝΘΞΏ§¬vpI»ΛΪ›γjxLΟ A³οN—φ&ηΒe?Ž―9δό쁄ΏπΫnۍ]sMΞ rŽœg_]Ɓɚ'’ς܏ή‡’ϋqθ%1R"₯ͺ nΒTjσόZϊ'υn#‘υΚ Œ’6UŸ"ε|Αι₯·²ΊΏν©‹―½κ{kΕHχ+1ε1JPUR©νΤSY*EΕ!γE YΌΘXH?zΖBπ0c‘_ΑCΗB°Ε1n ³s|ΖΒܞχΣ«ƒ‡`aOαδ‹ΰXX”‡Ž…އϋ ΅H©ηs,œcwίo;b ·ϋ–59Ώ*rΓ:ξͺγОGf²e_$δ=38sfCηκ9DΪGžυάΪq//ΙFkeΙΊLkΨ{σΗƒΌϋcΌ‚ΧΧ­χ±'Ρ‚Έ_Ά§%πNΈΩŽm{•ϊLΌsεά·qω|6’kzσƒ€Σ{]₯λ!χφ~p―¦Χ–s_‡”Χ Ήώρ½ΈY^χ΅˜Σ~­œ[ΛD–ΝkίyΆΌ« x U-ΰNώΡ“Ύ_Θ9 Υ}9Ρ@Β_rνΫuγ΄kήxMΞrξ+βϊΣurξN¬žŒφϊΒp.&!ΝN€θ>‡dΛηx#q'Ρp“/ϊ ]ΒI%…^K€ŒTΏεD¬ͺ"υFs#%•QυΦνEβ=θJ|Κx‘αΆR Rς*η₯/ά0„SB«―DU_‰’’Q½&7#ς„n*±γ}Ίc±\Λ½—NΜσμ`½~k%¨>ςΘIΎW³rπςΊΌͺείI|χ*η.ε±$¦™Δϋά^SHŒ^“ΎO‘ œ‹q‚GKΥq'RLΐJLwŠœΜaΗΧΪ9^uψ}Χδό*ΒCο©υΩφSxΈ:‚…Ϊ§γ!XdŽ7Xθ}Վ…ŒυbL$XHO΅—±P•ο.ͺo\x–°P8(<aa΄χd,Δ΅½‡…,Θm†‡ΌO0އ<Ί­Χ‹ξdΎšίE< Ž…ΰα2XΘvΌ'ΒnPην-Vf< έΌΝ±I}ΖB½GπΠ±Pο<ά_XΘΑ"η'δ| kr~Υ`auρfδUŒΨͺ2θDޚήΫδ€ή#§™ΘBΐ η"Y>σ»V…!Eq½gΚ•Mή²Ω„‘J<δ’}π>r/9oωv―¬_f£βˆΛάϋgΐc3‘ΟFqώ²έει³σΎτ‘Κ ƊεΩεή’ΰΖmΎ]S‰ξHΧsoQΧ,PΥΗE=^S5 rξUvΘy6ΝΛΗ@•Ω»)ή£ϊΝmlΫNz <;Q9ρ΅o»ψ…λάϋ–59?€Θ9†/Θ9™ξrNEξγvζ©λœ€5χ_2οWϋURΒάpH6€N †*H>‚†drN΅©'g!ΥγUύ) ¨δ–Μ-'bv―’I*=…ΐ«’#ΦHEΊΩF‰iq(ώΐsK•HΫR½ ΩσήΗ)§aΏΝίI&#xΌςνUΖι½* Σg©Zο™Χ€δ”Dd-'·n Η>½ΧK„ύsΟο­·‘₯ρYBκu$“>ΊˆΉΜz-" z―8ςλ5?«ΧΝ~χΗ‰ ©+Jζσ~BνgΞqήΝN^“σ«ΑΑŒ…އ½ή،…Nμ{σ›…‡Ž…΄κp,ƒ…Μ g+κΰaΖ~Η#,T•Ϋρ9ζA3Š€ 3–Κ»ˆz`a!τΓύ,0d,τj·c‘γ‘ΏΛ`!σŒ…Εθ.π0caoΡϋΦ }όεVxθύπ=,œΒCU²1'xθX¨χf,t<ά_ Žλ ηχ»Ξ‘“X¨Έύ΅ΦδόͺΒΒ―δΒΕΧ?zQ2EΏ9flFΜ|^·5οΎΒͺΟ—Ωˆ΅BΆl$Z&Π΅Bιύν_jειώάynx³dΙ™œσA<έ°.›ΉyΒά[Œΐ₯ώsŸ΅Ϋ.K½ντθ;‘wŁ“zŸ?ZΜπήοπ¨Uk›?>RBψ¨:ͺγ=Γ=σΘ‹3ΩȎο :΅'ƒΉϊ|>“ήeςi|O{\.ΚίYkΖNŸjΥ|4€#-Ε2'ίNΠu>‡œά'TΖγkr~uΑCΗB;:a u<ΜcΦΐCΗB*Μ,Z‚…ŒˆΜ•UΌ:ΐC0|Π>3‚‡#, IzΖBαm>Ž…Ϊ'UsΗBΜΧz•νfτ€ιއ›a‘/Θf£;°P$φ*‰ονΛ―[uj~žkQΟ†qΪnŠ\;1—9yŸ3“<—ΡσxΞsί»WΣ}Q’Ύ~UΟ͘­~Ά"Ηγ쾞{·έψ¬S)ΟnεNΞs”ΧΖλςΩθ,ƳΎ/¨ΤΕΫf4χ8f¨#i'b|\iΫ`œ\¨BŠΟΧ·pηT₯ώώ οk9₯λήnρkΧ»C7ρ­kr~Θ’sNϊ£ΦŸΆI:'aεvŒb9•G―Q)RΒΚJ?ύΤyΆ‘G/f–?R5(Δ|H‹ρ<Κΰ¨TP†D­TŽΤc)Ga%’CBZKͺFF₯>$₯ͺ•δnHDΊ­Θ:‡D΄T†€ͺ’z1sς*£Œœά’ΌeiΊWi¨εδΛrˆΉ4fΣ?―ΫΨ†„”JΊ'šTœΨ}©Έ»+AΔΘy.υ€₯Ό·,qw Ώ‹+τ»—ΡkΗw¦Χγ‰&}΅\•χͺœ\―˘dy;†[TΞύρ›‘sάΪu,ΛΨk9Ω­ξQη^Όξkr~ β‘c‘H»lφΖ―9κχΰ‹t`!Σ |‘UQ υΫ3–ͺrlΤA… Λ]x˜° 0,Τ9=ݎ…ΰRΖBήΧ”'caˆ/ƒ…ŠŒ…:ί 3ώfL«ZNRͺJ’’–,[$퍣Ργ±… œRBͺ$L‘„…ͺrI"_ςˆ*ρ,δ\Υ%’ο{φF₯HRΞθ½ΤύΥΥXΫE2ŠΩN™<$΅˜ !—Τ}T0r΅‡ΔtͺJžC*Aωœ$”„ŒΕ*de p;―#'£žln–όϊλχE―‚e…ΐT?i―?“ϋτ8ΪH0΅0’•ώrfœγJ ™Χη―Χ¦cƒ“Ο…FκΛ1Δ )½„”γΉ§»w32p9₯Ϋή³.δxύ±χ_“σ˜œƒ…Β½ά›N/:xθXˆŠ„ίάVXHKΏ#°ͺ9x˜±c!xθX(\q΄cb•%HV{ύψΜwF‘€K?ί§ή›.ϋŒv=€@ί«ήσ‚³ ™QNŠ|r’“RΜΆk.;::φπvnλ-hf,ΜxΨsvΟ£αzΚ#€ύΕ'ΐ°Pηΰ‘c!ςv=&c!xθXΘ‚ΐ2xbΘΩΓB…cαrώΰ5‰…ŠcΏ}MΞw"7™ΨΝΣ7>ρBXκ؁ΈTΣ,œΉs%ά«έΈ‰Ηψ―©ξΜ—v39Σ…tb™ϋGςv#ύΎMv€Ώ"Νίc•aH:δύς4ΛΌηΆξ¦qΉjž%ίSδΎΞ>“,νχEŠάƒžεκS£Χ|V{UΨk¦e’VΥm–;fp₯ΖneΒ·Oσς;αqa"‡Q\#ύκŒ¬ͺ|ΘΫησ7~§Εql7žpέ59?`ΙΉ» οΖ‰~5fοfΗWέG%IχΣ7©D…€™#³Α©’RuΥ9+ώ^1§χXIIιTςnλ$^JΆŠ!œ’GͺDͺŒ«:τΟZμύΰY‹½9{±χΓΟίHJu_D‘u+2wε £!%ƒ%ΩU'ΖCJΓΐ''“$‘ωzM*£Η½·ΑnΆ‹η)RΗœ^{HυΎυ>ΛkΔ™9’S― εΧ+Q½j}VΰϊžT”Ό"΄™„Ÿ^O}Œm"!e‘FI$³ι‘Τ±£ϋqeΦλtΩΊWΓρ.pΩϋf'ΘΉŽW…₯ί’.Λa{§Θω―{―ΪCšγΝ'|ךœοξ6r …>ΒΚ±GwέηXiΥοΒ±ΠρΠ±|ΑBύήΑ̅Œa‘δμΒCΗBά;Xθ}ζ >υ𦇅εάqn ,t2^·σ…ΚŒ…އ…ώϊ !ν }α2Λί³ g—ξ2r»>ΗB$:  uΜxϋxˆ/Α²xθδzG@q·*F$>‹ΧlNJ($ηιρ™ΌJ*]βΘ|\—Ra !uω&‰Qι?.ͺέTo]*a)•"%«α2Œl³$‘xn‘Ά+)-wυ]I*ςNνWcΎš(>ύΤqτn'©ŒλžpVboQIu˜ω6^ Βό¨JUCZͺFκC€2’SOHG―νΜΎt4Wό³τtΚέέ“Qw<ΞΦR9’}=Ώ’Qd™T ‘σΊ{ΏŽƒ^ο y―'–Ϊ–…£|Lnv’Κ)₯ˌRbΊ“δό•w½g}_9.Έη‘[9ίi<άΝκΉ0*c‘{`!UuHXθdΟ±ΠρΠ±Π‰Ήc!ΖmΈ©;’$*X(ι:φŒ<ΜX(’.Ε±Ε˝"η§έτ¨I,Tάεz‡&9ίi,œͺξΣ—ίφκ}δ\„¨#1φ*ΊΟΣfδ$£­Z ‡ω2xͺεC@–θ¦ϊήTƒσX.#£^‘#œόκr{Ÿ8UνLΨu?½ηsLβ\Ξ{Έ{&pήϝ―7Ξ睑dΎψPIqGςήΘΚ'œΦ³α[V$Œζ§ΙΟύM…<;ιΫbχ€σύgr^«β~σ- Ίˆ9γؐ΅c A;EΞί|σ»,.ΌΕwtγGnpδRδ|8=tˆΏβΣC<―s7 ρλq₯Cœ·ίqˆYμβΜΈοEC|Ζξ;}]9Ώ žΞ“QŒdXΕWPκ€Œ·Έ9ΥT‹HLχωόmΣ·§ΔΣ£*αŒͺ£ΞTR‚¦$‡m””ιvͺ@J.Λu% ΡgYSς·=uγv%€Ί¬>ΛaίΪΎΜψdJt­δ(©³„sT剀―V|¨ϊXbΩŒ¦"fϊ֊VτΧ{o)I©WŒόuϋΘ!i―&eΉi–ζ{kB–ybjΤ3zε’l>W½ο-Οs€ILuΫ1’ΣϋΜ†o,i?yΦV'%ž"ηŒ "™Υmήo>—œζqχ¬½Δ9ώδήkYϋN·ω¨R½*!_ڌœ» <  !ϊ}8::f8°±‘Β̅ΪFx0ΒB΅χ„FΑBπΣΜ„…uζy 3ωΧ{XθUςe°p ƒœNa!½θ• [•ΏΑB―ό',μαa―%¨ηΩ:φeίπΠ±Πρ0c!xθX¨ηp< !νΫΑC-0α•ΰX¨Η wŒœ~Τ$*UrΎΣXXέ©r1›|ΏεU%6#η_θEu~΄BΧ5#]QΘ9s΅}žΉ™ΐ5Υo*ž„U*ι-―f`F”*‘gΜδ<!±φ±ju#ŽΉWέ«ΣnŒΦ›yžgs?δ<›ΔυΘΉΟ5‡τ;ρwΩϋώτJ'ΖΕΥκΆUΧ+Žͺ΅/¦xΥ;»­7Ÿα„§@³`’• V‘oφαsΟΣb-΅:d:«Gpi_–œS1' ΑweF,qΜΞ!η}ΧşέκΈn<ρ†[“σαt!ώQΨ:Δ΅†ΈdˆcΣ6§qQτϋ ρ‘‰ύό?CάΚΘω³Χ²φ™'%£ϊέίδœή6לŒ2³œŠ9}t QΥa6.‰(χcŽ„9}κŒOcv/Θ9=†ΪVΧk΅G’ǐ=ΦΏͺE%½ζ%φ©D·$wΡ«‰Σ1•θœΠef½Ν’Πœ`Φ„Ρͺέ%τtyfYC/gbΖ Ιa{§Θω«ξ~ΟΡϋ#.€~$₯Ta‘Άλq$œ>3œΗŒΟQRR’QUu”˜ IX©θ{•υ’L›4•ΟΊgΞDRΪΗ֏δ i‘dFKχγ^μσ9σΌ`Gz~79"ω€ͺθU¦eRUT)*½ΰ7Ω9CΈί:ώ5αΞρgχ]Ο9ίir>œo†‡Ω ΫρΠ±ύfFzoτ£Ο¨²gRNΟΉΟYΟ†tHΰ΅›U?"η>bmrήC©}΄‹Hω6Η"κυΨDαόLr~Ρ]N\\όχμΖ“ΌΕ2•σΗuΘωo€mή!η'ΪuΙα/β»νˆΊσ/A_“σ]&ηYVζ’αeL‘άU–δ–Λ܏……ŠοX“σ«E弌FϋθE•œ+ΎφΑ7/-ƒΖίόωΖ8+#γΥ΄ Ή0Ϋ+ΰA‚ VΎΐPH>€žWrxΗρ½GΞ½*Ύ&η+œdδ²κ £7ΜgΌςγ'EE~¬Γ<ή\-e$ΓZΗ4 2Ξ>£ΖΘ Ÿ 9WR€$†ŠI‘4ͺj£~H°”45‰bΈ7&D©'»&dVEžLΨR[₯–IžιrJ’Μš`¦σzŸ'Žq‰'}σ5)1o’Οα\·5·sμέϋ:}μ'€Ι%ΉV‘ž~κh [&λ9 υΫσTdΈΓλ@’Ιρΐ" ‘€Š‘ŽŽOF¨1Σwͺ2Ξ1νΗ§o§Ϋ…Ε\a%»uڎσWίϋΔζύxΌγΤϋ­Ιωβ‘°pU<μaαVx˜Ι9> ωXγ2Xˆ«6&bϋ޳=,„œƒ…e~ΉWŽΘ3i‘r_¬λ9«',,ϋOXΨΓΓfΑ±ƒ…™Œϋ’δ '±0,α€a!x˜±°ωoθ):XΈ:rΩ±P2}πpY, ΅M©³8΄,* ησΣovδ$*Ύγ†Χ_“σœ―z’_ΌTΡbIΟ§/žξβ‹o8§Dœ‰qTΞ½’YIy‡ CδΛ{ΠΓp^Ι<Δ^δ<ϊ˜]^ΙΉIοsφyΰήώ₯±ΡZQVΗ«₯1l£qdFΞ½ ެκ9d›ΎrzΛ™N@Θ=φ|q<‹έz}πΉΧΎ1\› εT=»ξgώ Μ΅΅ΐΙΉ?–…―Πϋσ@ΠωΣ΅¦rξΗUpGMΏy2„α†˜{drΞo {ΌΖσΞ!ηοΈΗ½οΎΧIέxΚ-^†œ_sˆβ3„»KΪζαΙξΓιώ7 ρ#ι6'οΟΤ6krΎ'zΗt*ΈΟΰͫ鞐fI'R3 ίHΉN"ΚΨžΧWέuρjzgΘIUR.7Œ+IΜ³6ΖΘ(-δ3 ζάFrXζΗBš%Ռ^Lζ„{₯Ό‘iNωύΉP―:žͺ;Ν¨“r6Υ‘DΒYf„ήk‘―Κ°‰„TAŸ(Ι¨[RšΓeόM/ζDεΏι35§fw5φ>Μ<šm*IEr‹?€»V»s5m τ~r|BxλάΝ‘œŒS•ρyΐYξ©ΗΡo}λxυΎσYδό€›E(wœn9Δ»†ψΫ!>1Δ3βφΓ†xΗη7:ΤΝ°ΠρΠ±rφπ,ΤΙ1 ΝQΖB?¦ uxˆ”ΌσΗBHΊca1„ <ΜXθDέ±OŠ:©Ο†–YΥτf\«XhD{,¬nμ™T λœa!ψn……© (£-8˜i§cavyχŠϊaw’bl‡I\ΖB VΑBmγ=η/«ΎΈφ‘ΨΘNπ›a‘b'<8DΞ~σ#'±P±9_cαLΒ>FΚ+=D1p‹pbRˆq*θκ/GβΛNΞΏ|Ρy%΅ΧьE3“.v­¦›Τm*9bdEεά]ί£ζΥΪ\ΞΞνΙq‹χάη;»ρGίjΩQjrcTΈΆŸ·=E±Ψ7Jν7γώΏρ~σαt!.βiŸΏΫͺηόO²άšœ―pΒi•ΔΠΙ9·ωJ9RLP_Yηί [νδeΘωQ6›ςzΔΗρrζZκ|ˆ—­>f<τ1gNΞ9^θοΝ$έρΠ•>Μ–fͺXθx¨ΛΒA…n  ‘·σϋ q_O υ[§/½ΑΒ Έ 3ξυδε™`w±0°£‹…ώZΆΒB'Ω Γ f,tμλaaΖCHΊ-V֞χšμ>Oζ˜ΒBo%p,¬xθXhύνŽ…ε³ŒΟ £>°Πρ,δԏέμ/³,ς{p,\ 9ΏΕ‘“X¨8ξF[’σ5*sBRΞ1lƒœΛY=‘σrΏΛΛγ1:χͺ:η+οz]!ζ_Θ…ϋȌ‘’gbΣ“·ΦC\δωAΌΚ^I„<aΣηœHf#αΆjw3RΝ«δSξή'νnζ7φ₯Φm=Ό]ή!ήτ€{ε<»΄ηθ]ή›9>u_’Ά7Δ\‘₯ιφη¨·»#?α.ϊΦ‡>’ΓϋB€χš»C{TΛλbOšqξ3Ι}a§†zΘAΘ3qŸT‘hT ~7Ρ^αΥψ9δό]'ίwρΎSξ׍§sΜRδό`ƒŠœΈBΜ½ͺέR%€˜±’މφn”Δe%’Q]Φk€œ+΄_%τY"Λ#ΉυY―8½Σ{.ς†‰QNκHUEA²9Jξ¬rL‚Ήiυ¦SΡΖ€.›Ή³I>'Μ‰z½‘ΌΎ:&Ž OΌφ2ϊθΝOά7κHrΦ¨˜!ρ%¨υ~ΘyξΏμHK=ΝΝΝ(6™”fΎg‚^{]™‰ότS«DΤΗμιr™Η<Όίςž‡$\―YΗύ’Ο:žv’trμp\ζρk[¨½υθ‡.ήx½—ΩΩsΘω:εψfΌΗ»~ίmΛΪ‡ΣC<Ψ{‡"iύϋuBΪΗΒμB+ƒϊΞ !ݘmω‚₯ΊςΙ\ ynπΠ±P·;r<0VΝ§0–XΨΰγ_ΖBΓΓQ;ο;υΖƒ‡ F @Ζž©¦“σjμΗ΄ ΓCΗBa/xθXθ:žΑBŽEŸ˜nρHp,\EΞΟ8ϊˆI,TwΨφdνk,œOΞy&R]"ˆ:rή,1/ΔD€^Dέ*Ϊ…Έp›dΐoϊuέP+άΨ«L˜σ¨¦ϋuΘyι9ηώΦΜOw"ιδά.{Οr•m{υ|‚ΰΊœ½[17 ΈΆρφ+Ύ8&λξΜξ„ύςNP%οJσ™ζ£H‹ “ΔšΝζWGSSTrnλσFΎΦRA7bOΜ!ηοyΰ)‹>θέxΪνn³&盬‚ŒτW°$ˆH9œ“tzBθ=—TΜ}[Rϊάtδβ”ΙΉ^†G>Σ]―œ»΄Ή°ξ^NbJ%Ygιαs©c$d#bšϊ›Δ”вς Ν5)νmλ’άχ=ΡI’H2ZΞT ΧmW^π£3ΩCΚYzMƒΜ{RZχΓνμS=ϊ.΅ΧZ“R7L’ӍŸΓζ†q)mˆΊ%₯>SΫGζtTεΖ¬χόާ-ό‹g•χΧΙόhŽ/+JT!L’*Ά{"ΩΥ±«€T1‹œίψΡLxβ>γ;΅Ρϋ7—Ρ1ηψΧ!?Δ}Ÿ_'€},„ˆΣSλXφπ δaπΠ·EN ŽυF u‚l€lθTΚ}‘ƒΧ3ͺκ[υΌΚΜ9.μΉGδ‚rφm{0|δΒς[©rψ ηρΰϋ/>tΪwuγiwΈνšœlδά+€Θ3ιƒτΚΉΛά\‡€Β?uC’Fο‡Τ}Ίgv―yBŠ£,½ΔβΠg©}xŸ;ސωβn+—βθtΉURŠ©Nτ Φ„4ͺ0NNbξŽΑ‘˜zBιU›r#Ί”ΐ5Υ θ‹lηΝ,·΄D«ε5†ΔσΚ·όxI̊;3‰& i œsrκ―§'·χj—ή_γδLοi2EUΡ=QmΌZδ2Ο©VRŒqIWΎλ‹½5­1΄γrξ©ο½Flͺϊ»ΙZ•¨{@|seœΘ#ρ:χ‘TΎγόξμή—ήx*ϋ˜ZυœΧΰ2vŸ`ΰΖpΫ"ηZ°‚œCΠ‡σ9δόύ{ΰβ#g<¨?yηΫ­ΙωΑJΞ•"‰€’?[?ΉD“D“QΝ³«]~ A§r„€SΙ‚ͺVϊCW"Š”S―ρiτNzBΚ~|́œ“ͺEI#9DφM/₯šš R9rcJB³œ»φγΜ,]KRŒ{2Z«@Θ(;•‘œ€ΦΚ• Έ ΑV•θΚ·=΅$€žd*A«‰g$‘%ήπ„κf<’z:9·ˆΖ ‰CΩ/Ή»«s“Œ&§wο½τκQ­š›{΄ΟΦkΠϋPBΊχΓΟ/―_·λΨΣq¬c—γq»s©7ϋέ {Φ±¬cw9½Sο>9^齏ZŽœ§oΡȌ!žΥ‰±–rn~’zYq,ti£Κ’θ‘,*9›JΊšŠOrξV‰¬R”]G•"ˆ½WŠzv’NΘυ|QςjήΫ•=₯$₯%Ρμ$ž£p9^%€Ω‰δ}ΤwiŸ‘;2W‰»[²QK>’Ν%œξκΞ~=‰­ϋŽΫτšŠ€_’Ξχ=»\Vβ‹Kq–CζV‹<)CΞ!wλΊ5+ήχΨ“–1„“Ϋζορkιφs“ ΛΧ ιΦx˜±0γ‘O¬Ne,« !κTΤΑBUδ{δ\ηΰ‘c‘ŽΗC°Pϋ υπ0caQ™ΗXθψΦΜΧςzυΫ°°QeBλλaOo‘2caΰΫ¨ς­ͺΉπ0cαxX•IΡ*Τ5‰³σ)%Aƒ…އ£ηšq•ΦcξXHεΌΰ‘aa‘°ΗΎ,όΐs+:‚‡σ9X:Ї"η<ζπI,Tάύ&Χ[¬±pc!½αξΖξζpu;ο§2Nooͺ„7UL#I•¨εΚ+²d#πUBtήφW/S‰Οi“d7κδڞεΤ…μ9qξο|_Ct3QϋDΊ›ΎξΞΘ2=?Ή"UΙ‘Δ7ΟγηTΙ;•e7ΊΙίs―ΉWΜ{Rv_LΙ ˆΈήkO(ΫΉόέMα¦ζ˜ηωη|76Bm0­Λn핬ϋ(>weŸ3^c8δρ3+η~τC{άC»qζqw\“σƒ•œ“κOƒ77AΚ'ώΜ½ϊγςuλŸΡ/τHj•žJ‘‚?ρ\=W2ͺϋ΄φEr«Pβ‰l“>σ2fζι§Φ±=$5J˜œό2f¬Θ‡$L}ΚJΔJBςΞLžI« Ÿ;9χHΔΆŽs³ΎΝQ&Λ8sΕγΖC\γƒt~Ψ:!έϊ;u,\©`ƒTΑB@ΒCΗBž³L°ΠρΠ±PΗ;xθXΘΔ ΖDVCE)ˆΐΓ„…ΰα ιv’ήόφ¦e,tiΈca&η#{ œ7Ψ—C˜§φž3f<¬ ”އ†… 97,œό<‘Ÿ§9ξ φΉ_Η„Ϊ¨ιA?s:BΦιI―X8ΰ xθXH똎©ΒΒ<έ`er~›Γ'±P±9_cαNWΠι=Ÿ¨ž—m}Φ©–γz]ΘvGΦ>Ω³½ΕCMWA ΅ ::ξb’9EΞΏηΆ‡Ob‘βψ›^o±ΖΒ]"ηκ5RŽϋ:ΥσmοKχTύδ,»xy=ηnG•άgW³MΐΗy#_χ™ΪΉ/ν‚Ψ5„0Γm½JΉήZΝΞύοQnͺϋAP·¬r;QΝUo'·i~{άξRx€μ8ι'Izv\oHΉs'η±ΐR*θ>φΜΖβάυi;πχi‘+μ,vTχ/ΡSžGžω”‚U αrnͺ“9δό―ΰa‹OόΠέψ©ξΌ&η39§MΜ¬zσG­2ŸΕMτg_€ε‘ ŠT#o§ΞΉΛΫ}ζ/+όz]%Α€y7ΖΘxςSgψ&Iv•:ͺΊπz=*G½>Θ¦;; ›œΣΝ†š€1ǚΨF?gMφr•ˆͺ8‰dŒ +Ι€ΞI&uY=ζJuχBΙΆώXU“mϋςκy5O2gγκ~l―·ϋ9˜a.RGω©TŸbάR­$%'wHzMHq‰κS6Œ+ίν»ŸΉΨϋΙ/φ~όη6€Š!YΥσaŠ΄ͺCq>鸝CΞίππγFNυΔ‡~`MΞw 1Μ\, ƒ0a–c‘0<ΜXRνŠd9‚…΄χπαwΑœσd,¬m?= w0χͺz Α³„…ΎŸ<Κ,s'γ#,Γ–ΕB.;‚…†­O‡‘τήψ΅.Ϊ↻Έηqll;ΒB#κ=,,Ύ*†…Υϋ#xΨ`‘&\ΔyΖB©4ζœ0z]™œίξπI,Tψšœο&9§ϊWLΰYουΫφͺκu‘€mŠ «bjύΗ΅οƒΉ)‚žΜηze£žζάημςvρfΔΏδμ66-O$@–Ύͺ1\sΜϜsώ±:}ρΙώέέψ©{¬ΙωAMΞu‰₯Λ.W!ηΕe;— UuΌG]Ϋ½jο3u?― 3w»Εi •Z届Ž’­’Έ)bΈ'd΅Τ‘gζκq“Œ¦ρC$sρP$€5™ ΙdMψΈn•‘št*‘$Ω$αTRωglHΩ•hEb]“Ν^drξ‰jygžάωκeŸ§μsC1 θt{S=O£…όœΕ˜FLNΪ‹)”ή›}Nzz¬Rδν$£^Ν\₯s9γ#ξ6―ρα'œ΄&绌‡Ž…˜O‚‡Ž…އޅŒbΛ &ŸξX(,­xαnΰ λ s[d¬X¨ίΖpž±°`Υ„T½‡… €…Ό2o «ςφ%°ΠΙΆc‘γ c!.ρY”I€½ΑΒw?³βaΖBπpЇ"珺ύα“X¨8ώˆ59ίM,€ϊWΙy8·―JΞGuο7χYΧQIvSΈ:ΆΝMαΌ—Ψ]βτΫ>GdS2wΘyΠNŒSλΉ―gr^Mΰ¨JKΰsΈ½?ύj-²©ΝhŸͺ"wεξ^ΕΟ„ί>›JΜέτΟ\Ο³κκyωΎ^ˆΈΤ φ½ς•κ©c¦τokξ! οΔ ωμ»hLό|ζ9ΞςΆ0TͺεΦ^‰z,HΉk{­ζ―h7‡œ_ϊΔG,ώξIκΖsξu—59?˜ΙΉͺ6ό1λ2=ŽSΙθ2v%i%’ξ"ΜΜsž‹ :αRa%­JŽ•@ηa—7›Ϊe:ΗΰΜ«₯Z€„Tςn…€œVαξΞΥu# d 4ΪΞΛδNrZ9ίΞ€›M2JΒ艀^·ͺCJDeφ9gΡ!‡Άχˊœ΄¦JzΧ`ΙΗ ₯½1»σ>ύ¨!}­Ι§‘…&)Άρ$Σ+FDν1Y»oΛν.- 5Ύp ΡoΟ9­:·£QΕ(“σν:»Ο"ηίs|;ZΞβΓύ;Χδ|—ρΠ±OUπΠ±qWއnTθj’‚…xmP=υc½β‘a!p^ρX(²ΦΑBίv,‚΄Κr £:=ΒΒήv™˜;‚‡Λb‘—―χ2]Uδ=`λ΅ύ$C½fzΰ!'*ο`j,4<¬˜“G*²H ‰οΉΜg,η`α,r~‡#&±PqόΧ_“σέͺœΫX(w‘†Δ¬‡ΫΜ0‡€RΥ4Ήt•uGΆ‘΅{οΉ™{99―#Υ ζAlGy'Ίy΄UZ—z9Οξλ½ΐ°m$GΘγHnm―}4ήΗΝ₯fιΝjŸ¬”'χJΜν³pΉzc &RM^T±™ε•Ϋ Ήt‘GŸ·WΛΏφΑ7—cμ«ο}c‰Q{Ex”όΤΧΟ’HΣcoΥσlXΧψτ€Ϋœsͺη™œλώέ"ηχ,>υ”ΗtγΉχΉλšœμδ\ ¨$“=rŽόr»I©€μ’ZJ†I―₯nSRŠΟ灜T‘j‘’=·Zj)4Υ‚p²υ‘<^­.#%yTΟ‘Y‰V„w{Β-)Ν‰«ΟΧυHF%¦‘mφz)͘¨I@!ΩJ>‡Dtο₯/ܘe«δZU½τ έΠφ\Gς™Ι:Χ#ιuwΑ–gΧϘD>χ Ϊ‚EX“φΧΟΠfUΪŽΖ5υͺ3…%ϋŒΚR=>τZ₯ ‡i •pΞΖ5#/ΖbAΤ§R\Ί¨Ωδό±'Œf"ωc'―Ιω.γ‘ca&η«`‘BΗxθX(rδΖpSx2ΊRϋr,Δ€,c!£Σ\ΉC5»Γe,τ‰Kbαf*’ΖK»©΄»΄ΪΝε\ζnψ2ο‘Ο» K‘σατΠ!ώ~ˆOρΌΞύί4Δ―ΗύAy‚έχ/CόMŒ€όˆέ~ΨοˆΡ”:ΏΡšœο‡d‚Ž*=fS )ξ²J ”2XϋΟΙoVλ΅=UχRνΈΤ"{+Υ’¨TTS0%X$^H8!ΖΊ>$Z΅Ίa²Ν¦ œ%.Η€J€ύΡ™]‚™‘›ͺD{0J-$DΒؐn% ͺš‡σn SΕ@ζKD’Z"*Ke;D½<^ -‰(Όέc‚€Œγ¬ΒΦ›KάuΆχ>ΩΖΞ‚ωΐ₯—6ͺ퐐Ζ )(‚τhFE±Έ)GΉ’QIάuNλDHχ)YΥqκ /ϊ„ησλΏέ«­„YόΥO~ךœ_‹•`!¦msρgυŒ…"Ϊΰα2X¨m΄½ί`‘γa #,τφΓΒͺpιa‘‘ωmd,„`g,Œλ]»ΆΙXΥςj†χ‚‡Ž…Ί &,GXΈφژP T,4<ΜδΎ.œ:.……ΦCή`!‹š\,,‹ά‡ iωΙXΘ1˜±9EΞ› Ί™•UBHΏ³UΙ )>'‘a‘β―Όγ5•s]—Eά ‘UU{ΨWρnτ¬Uο‡Ο±|&§―€SλX(1Ό²m8οάΪ9wςŽIœ^+Ĝ¨δη}ϋΛkŒ…—¦ χ’œΪ+9§wžθΈςW“;―œcmΎ`:δœΕ¦Β_wςB˜CΞα¬^όλΩOμΖO?πž[’σαt!ώQΨ:Δ΅†ΈdˆcΣ6§qQτϋ ρ‘DΞoΩοΛ!ϊ:βe59NOΦ©8ϊθ£w­ί\ΈϊσuIϋ*fpωΔ,‹¨«Ϊ£ΔRΟ‡|Nη"ήΊΫq2†5sΝc< ‰¨WW™ρŠŽ’–’ *Α‹™―l2=$„Ω™ΈVŠL–Ψ˜ΖAΠ©œ°?7rY¦χOFΟe3ƒ)%²MI6!γžT~ξΌΕήύΛ‰)‘JΊΞιe‘λ$­άφο―Xμύ—s7nc\¦ΊD=>“Ζ½xΒΑΈIR}άPŒTκšθe‡ζα~oO¨Υp¨­zT‰GHΫKrͺӏ*Lτ]†—Ξω΅ϊΟu¬ιΨ,NΦΓϋQ’JεRχ;‰Wθ7βνͺvΞ"ηOΈw;rΞ⯞ωΐC–œο6‚ΒB}§.iŸ‹‡ΥiϋΜ5X¨cΟqO8ΗuΞΑB*νΰ‘ca>ώ+ްPΏχ2wήΕBπ0hΖότ Α6wV‡°χ°υPΖΒ¨f\s,~ 3ώλ―ξΓCΗB]*„‘Ž…μ/c‘㑏žLΣ-š₯Ό7c(“z<¬κ#‘s#ζΎψμxX{±cΡ±Œ5,¬Έι=聅ΧεX¨ίƒπ0c!c WΕΓBΞΏγf“X¨8αζ7\¬±πθ]!η•,BΠ£Š«˜sΉ,ί ;Υω;ΝΡnΘΉ‡UΣ+!MκFβΜΰ#ΈΈ½ά–foFΞ{Žν™Œ Λ|ŒΪ°ίfL&i!‘Φg¬ΟιΛo{υβΛoyU©.ρόs_zσ―”σrω‚W”mθΧ.ƒœS©Ž^mΤ Σ*37Η{\Μ!η•dκ>#η8¨o6J-tˆyYHΠΎΥΗγ ΏͺΡNpmΑ₯QSΈc»-’4³Ω½r>1:ΒζΨ±χΫLpΣ>S)4¦rφΊ›)3Ιω§ϊGφ³?֍³t―eΘωICΌΝ?_‘Ά9oˆΗΫuUُڂœϋ6GιϊΊrΎN.%§’ 9YυTΖ ©Vήυ?rO_ygv/ύ—"ΰz ‰«‹΄ΊΆρͺύΘAΞ©QM*•sfΰ†Α“j$”₯rž{Λm4ŽΛI¦F½εH2³#°’a%zTƒbq x³ I¨ͺCJUω!±ΤΉ%—‹zC”δή΄Ψ{Ωο,_ψƒψβ5ίΓήΟόΖ1’^WbͺPςͺ}λΉ””RMsc,‘φ™ΕuD’UΗ½ΟάežΑ”%€Θ9½Z„Œ1KΧ=I…„ΤδtxΌ`SΚΉŽ/ίήŒtLκψΤq€Χ€ύ@ŒΚ‚ͺLκΧφ+bD%Ιϋg‘σ'ή§kާψθsN]WΞp<δXΦρCe,Ɓ‡ Α<ΗBzΦQ%ω˜©Š‡†…ό. 9OXXϋΎV¬λa!£ΒœX&γΛ¦otΗΒ.šWΗ œσύyΫOmύ±–Fς"e?s1Οx¨ϋ3*2’pcŸ:ξtκΈΝXΈ*r~χ›Ob‘β„[Ιωnb‘HT!RHΪΥ{RδUΙy1 )v%η™ΊLέηCΒ]>mΥΟJ½jžzΝGsΝ]Ξή›nν#‚ž₯ΥΦΣMΕά{λ r‘s‘>8fΗΗgRδμκΙ9Ώΰ…„‹Œ;9/„}Έ½TΠ‚οƒvυU‰₯Uę՝―‹ΡœΖyώΊήYΑί₯Θ΅HγύvΏw―š›ZΒeσ(*awϊfN»wξOυΡ΄ΡWϋά™³Žaœ-Τ–»–{Τg’σώω_όŸsžΪ<τ€e*ηλσίHΫΌ₯CΞOŒΛ7‹σΓCʚœο"γ LυFΞ$$’Ϋ=A τΗ―?u—‡«¦?vf¬»‹1•$οk£Ϊδ™(s\έεΆ1BbtPΏ•Δi&½”Y²ž »»‹œ[΅€JΦ]–ιΐκ—ŒώΗ’xβμΥ$ݎΙKgdι U‰>󉧒ωΐxƒ„ΙgMH-):•}ͺΕλ#!ε5† i~󾣇΄?υΆM}šΉ”}ώ]c8—?uΘ{ΜνΝ iν­4rŽίΙ¨ŽS)ρΖΜiŽ?z¨˜Σ§IΕSΗ¦όΐqg,ήwΧ‡Ο"ηηψ}ϋγͺ†ψθ N[“σ]ΖCΗB·…Ά €cΡ±ωΒ8ΗBoΰ‘c!: 9«“·-x9V,ΤBYκ‰nάΕ;X˜ΝΰͺSΉIΊ,€œ°°ΰœΜ.3‚7Β ΗBό22ͺκ Ž>τ?ή6<4,,~NΠΑ7€ϊ†‡UφžρΠZš2’\Ψ‚‡£ΛLΠ½Ο<ΘΉtπ,ΤρκΌbαpάκ~g=,τ±¨`αͺxXΘωρ·˜ΔBΕ GίhMΞw›œGΏ9ςφjθ₯>θ!ΆΥoisα.•H'iξΐξ=δΙΰ-“σ†4nBΚ»nνFΠGσΑœg){ρLΚ§f‘³„±8΅kL]˜Όα€/’]$νWΙΉWΞKΥ<ΘΉ*λ m[ϊΆ£‘ωΜ‡ΟΕI$d|KΥ„Ύ_πΘ2v7ογX‰EάΩΛq‹ΥΐΞέΣӘΌκGΐ8<ΎS=WlΗDύm”\υΘΔ qwηXF‰vŒεζσύ…§.>wξOvγg~ίύ.kOΫ½hˆg―eν»ΐ$’’―ιOXάnΨΆjBͺ$>6Rυ¦ωHάα‘Ύ{EΙM΄ΖY§$œŒ“‰D΄1j³Q7U2=˜MΊ―Π§ζ5΅ύUC·”ŒΦ^ːf–ꏒK*=Γε:φg39;Δ<yι£”<ύύŸ%ϊSo*RΞRZ–œŸίάx·7cΩ¨pQρ§*’χ²ΪΩZσ„ΤϊΦΉξΞξn7ϊ^"ό„κΑMγYgξ7·ΔΤ݊IF}š—Λ αHl1V’=ƒE#Γ(Md„τξ;ž>œ?εδfn΄ΗGfMΞw q°ž΅PdπΠ±q•ΰ-X(¬  kΛOHΪΑB$ΣnΦΦ`‘X˜|6²\ΖΒZ9ΟΔΌΘXˆG†.Σ㍲ς’ ’₯Η±P—Sε»9m «’ˆVΜ23’`1,D9₯Ϋ3ςΉΨeŒ³‘¦S«—7ΑΒ‘qX˜πΠ+θŽ‡Žއ ΅OΖ²©m(°Ε$¦ 8Ї…œŸp‹I,T¬Ιω.“sΑqgδՊδΌŠΩόξΪΓ 1χ1hyμ•»‚Ρnδπ½ΚΈu™oΣ« ;IOγΘr=;ΉΧ*;ζpiΖ{Bύε₯ZΥεbψ.ζ₯Ώ<ΘΉHy%ζΡwN?ΊzΣ΅EχsΧB&oVίμ€νƒ9—±'b^fΆΛΑ=< Š"BίuŒ_srތ~ۊœγΆoߟn½Ώα1ιίgžyžŒGέ$ξ΅w<ΤΡW^]έυΉ‰―„=€νŠ9δόί^ώ΄ΕόΚ™έψΩ3N^†œ_sˆβ3„»KΪζαΙξΓqϋ·q=»ό~9ΏΗυs“!άΛΧδ|?U‰” ꏛ1Tϊσ՟ξͺc„θ‹S‚«κζTŒάP ω¨^$] 2αΖqT:Λάσ!Q¨RΏθΛ+ΔHmΈ΅ a iΣΣ–ŒίFςM9ΤH7́Έ$©JΘ”ŒͺWRΙ₯HΆ€˜:WbωΙot%€$­Έ ‹˜+9 {Ω^¦G"џύ­Εήψ›θgdCΚ)™g―Ί>•κΉΌχά zκ­ (I¨%¨΅‡ήΟ½G‡ζ0Fβσ.Ι¦NΕηί;α.L5)»υwϋ1CΊIψάiˆM]ίψ‰5&KTγ1Aβ˜U΅ˆqj:~^Οω>ν~-α±ψλŸ{ؚœο2:βc€ge,ŒγTx˜±Π癃…T1qs'θQgœX¨ύκuϊΨ)ύV{ G=Ύ=,DŸ°°Ž@s,Œ˜ Σ„‡…s¨¬k‘2c!dΎƒ…›βα`aƒ‡™œ#Ϋ ]=f<τΟ'ͺηΕ€Ο°ΠGR.…ΰa }σYn*θ=, š§‡Β αύη£%……«β‘ΘωcNΌε$*NΈΥakrΎKXX*Ÿͺ‚9ΗNα£°Ά»ΟjF_K…!AΩ}©svv79uCΊ”7UWw2c0—J;©Ϋ”œg){vrχQjiΆ8’μζ39§§I;δœσέW α†νDΜ ‘bςs—#»ˆr,Ά,ύ}9ιR!Α.ξN΍hηρv#―dε>ΝzΏ+‰·ŠωH•ᆂΦι»EΖξ 7™œ§™ν.m―Ÿ…ƒ^gδš“φΪ‡>Σξ3 Ώμ7žέ>ς”eG©ΙύSαΪ~vάφΕbί(΅ίŒϋΖϊΝod^ρ χέ8δοη‡­ΙωΜ=a.gWBXdl1UΚΓP­Y%!-Υ’gmHβ0?Bžι9'&H¬ΰcPa‡œcˆΔλ₯Ο™―­ŠDκ!oͺ³!ύ$m’S―Ή”Rž’Πf\šͺι1W½$wΘ11wSR*ωx$zM_₯ΆAΒ©*I¨ϊ$•ˆͺj~ω&†_λ€PΆX|εΒ~_ϊTBͺ ”`=?£…˜υKRŠ”3UšYΔ$ͺ½Λ@Ρλκ=¬6+yŽΗA•x&%Š‘vR5"AmŠm?ώ κ‚]L•BΚ©Πcp&9gŒΪάQjψ“§Œg%GόυKΎ&ηϋ   9vfaasπ0c‘γ‘KΫ ΑCΗΒ2.ΉΦ†…Tf½ŸΌb‘I₯36τΝ<7ά$­ƒ…3"w,τ…JΗBˆΉπ°ƒ…›β!XΈrξXΘ$ πΠ ΊIΫ΅@ΖCχρžu“Ε;9―ͺ’x˜±< έ,s3,,=ι†…TΞ …ƒ:n WΕΓBΞοqτ$*NΈυšœο―ά r•³9/½αTΝΝ±][ΕΆdν|σFo΅ϊ›‘YΛ-“8―š§ŠͺΛΰRε=εNΞ{Υσ\Ξέ+θ‰œχ β²{x–Ύ»K»“σfρC’v™ξ $]ρΪSuΙΨ1‚+Žν[ΈθCΞyΎeOϊn²Z‘!λFn!§έΡv›ERH4>Φ{Ύκ©iπ‘oώύwdν£ΟЍκ ηZh %qΝτ™•σΟΎβ§ωͺ³Ίρ’G=`)r~°ΗAMΞυ'«d?h­ “βͺΨ–ρ‡$w‘Έ™.Χ:χj‘nσdΑ‰nΪ"ηΈcW’ ηq5.ΖΤΘggΩ`&θτh–ˆΎΚ¦Zn}„>f§\V6$r˜»ΡGYΞ‘φ‹g¬Ιω.“sΗŒ‡«b!δ\xθXθxˆ¬]Χ©ήΣKΞe\ή FU΄¨‰ ³;ψ2XΨ[¬l$ν=,Œ…·Œ…Սݰ°φr',¬}ζ Υ[x¨Η 7ΑΓΩX¨Χςφ έΰ‡…Ωμ“p£P'ηΡrPqpErŽjξWw5EYp²Ηe±PϋκΨ₯ΒΛtΗΒ9†pΉΧΡ“X¨8ρ˜59ί5rFpuφ6δ<δӌωΪ^όΪ:φ«t#7ΝXͺLπΜI»7σ|4n«W5Ο€<υNOΚۍxχζoFΜG•υΡ¦χUί;}Λ1ΆNdΫG§Q%/v*ζQ)G_ώιcΜ’ί{YY»KβχϋΌΘ‘ˆmc η +y,^žGο•k――@Ξ}±ΖŸ·9N\ξ―w“ηrΧϊFΦξsΠ1ƒ‹mζσΟύΦY‹/ΌϊμnόόcΈ&绬§`%…ͺΪθ[ (½Ž«$€TΟ‘+TbIί¦χœλ²φοΖ5žp’RM’JD‚‘€‘ΊΣ¦mE"˜eλΦΣΉ›’tRuςΡi=—ήf8 iTƒjΉWU”ŒJΒι}•Hί•Œͺ"=δE Π—ώxψ}ϋd2zεήwltm―Ψζ©VŒ"m’LΎ€CΒIBuU°Lπ] o‰=‹ «’σκl ɈγŽΫ=!%¦N˜j‘"Α˜ Β€E%’ΡΏb­gn Λ>^pϊ uœ  g‘σ{ίj 'ήζΖkrΎ‹²φ† ΫH5δΤΫ%ηTΟ1™Γΐ¬Κƒ™uMΥ4­ς9ΨΩέ›ώdͺίF$Gδœjͺ»€ϋΘ-z•σœs\ά]ςž γpi―•s¬‘sH\Cψ†ΟBŸk%η1*­TΚ#Š3{TΨωμ6#°NΞ{†q[.ΪHdwΤ»ŸΘmΟ­½ͺ!&悏Ζΰεο)ϋͺδΌτ©»ϋz˜Μ5Οi³Ξ7%ηζ~ο­TΡ+IΗSaˆ9δό?Ξ{ώβΏ^σ3έψωǝΊ&η39χZIΣT=R²§?aͺ8sR­Φ{―šώΨ}T=οTΞrΞΩξ¦­δΤ{AυZ΅ZΩ R^ΞΥΣ’A―Œ$œ1Έ&Fδs΅ΘηΩ6—SŠ΄½’Vͺ+Μρ%EΖN…HRφ!jΏ€’PΕ•ΎQRδDrοΕ‹oμ}ϋβ«ίxΛΖvJ^ΫMHυόf†T{?}ξ/Ξς Œ’¨‚q]ξτΌ7—Κ§>ϋzΫπY0bhiπUυΟY’'I…WŒά‰žΜΙ„tHVIduΜβΨΝ␎W%£H8υ»™EΞΟzΰ>ylŠK~υ‘krΎΛxθXHϋ †€«b‘γ‘c‘*θΰ‘c‘žΟΗ£aΚ±Χ±θXˆα—^«caφίΘXθ12ΘμxqΤΉθ™8 «dgΖω ©š;JΚ.Lα‘c‘Zx,ga!}ρއ.s 3rŸTΰ‘γ$_„έ°PŸΛΆρ0‘ ]MδΖqΛb‘eτψŒ…:uΜ"kΧωͺxXΘωI·šΔBΕ‰·]“σ]#ηQ=ΟnντGSΙ]Ij,gς¨ —j.$έͺθ ΉƒXYΕ΅!θσhz€έP,δλ΅W=ɜGc·²9ξί>[[·η9θYnγΩ ηΉ·ΎΦασ€œ—ͺysο-ΧwQϊΊυ(‚πNVyc±ΓεΦΫ"η±`T½Ώ;ΛΧS•œη/―ςΚw½λ&nθ₯­8ͺ+{όΊ0ΗΦȐΞ€6%η̎g|š)κgŒdΨΧr~ΩοόL3BΟγΕίΰ59?ΨέΪ©b3ΦΗ%ΌϊσU© ΏQ±ώ7ύ©³ͺ―€@ϋVbŠΩ³{K²@΅sx3©IJID‘Μk%5"'ηEΊ‰s1γ…ΌJΤU“άŠ«<4‰kUΘεvr%‘TE|ήΉWσ‘iτ]βB¬^JUˆ˜Wδr«$2’Ρ―]yΡ•€%ϋ-1―Γ΅XΙΞς{Ϋh{…ΟK'ρφήQzNγ±%‰E;ΔΆΘΉ/¨˜"’!nz%Βd}K"5kzŽ=― λΈq“C1Ύ ³ΘωσNέ§(HqΙ―=jMΞ―‚ι`!ΈΗθ=$ε,‚‡Ž…Œ²b„$XXΐΓθUΗπc±°ΑCΓBˆ΄.g,)ˆ2f,„τg,tRξXήe,}dš{rˆ”χ°p < Ώς χαα,A   ma³ΑBIυΑCΗBϋl—"QΡΊ±p΄Μ=Žπp,da&c!xθXΈ*nσ[Ob‘bMΞwί­½!θζΨ^Ζίσϊ}Ζe«τ†¨#6ΗμLθFςvϋ•Ωέ‘;»OΜFoF­ΉYΞί>~m ΰ˜Ώ*ηAΘ}ΏuŒX,”^eΆ Ÿu!βTΚEΘeΌ'Rmβ-Ώ»Žκ`₯v‡kσΩΧοΔͺΓΝψ0'¨ΘΏEfMξΫΤ…•α³g!Δ‰υf&p=b^ΙΉWΓε½’σδ]°μgDεηϋz욌½Ξ;γw9Ώό΅/¬.ύ9^ςψΣΦδό`'η:)Ε£!ζηͺΗ,'£Λ&€J 9„‰;}œΈλy•Œ"Ÿ¬ΔZI§ϊέpεŽPrQœ€Γ„§Κ7‡DEΙaIF!cNΞSeϋ²,ΊφZ:9wΙ¦Uˆͺ1cΡ\ήHo"²N€$NΔ\IfGJH©m‘Œ*U2ϊεoόI‘΄oΘΪηJ²¨ΔQ £sΣm$”z­’šκ2sυ~”Œ"ΗψHfPJΎ΅/%»šoμUτpΈ_6-ΓφήG[ϋi}VσH=›Κ`|ο[t,γΊ8~©XŠ˜ωτ”'³ΘωΩnο-.yεcΦδ|—ρYβŒ4Γ]X(ά£rΈ 9χ%ζSƒ‡Ž…zNπ0caΖCΧ΄ή”ΓBU<ΑCΗΒ†°%,¬—3:9_ ΄½€…υŒ…,T†Œ½b‘N[ΰ‘cαΏώζΑΓR½ u;xŠΜλvπbŽΔ=π°ΑBα'xΪηΊ,f_ ]α„Ε’X¨Ηιy2κ\x˜§¬LΞο{Μ$*NΌέMΦδ|—sΓJΘsˆHͺ?:‘σe₯ξ₯βΔΣΙ9nΧMΕΣbCΤS%΄‘\ϋάσMζtw ΓΜt―ΘVrNεάζš;9Η.λͺ=ΡHά£m@ŸKib.•Bi1ψΘ…• nEΞ½G’<λΘ$\δ”οKjΤ¨θ{UEDVGx―6UυJΞCaP>›ψn—–²η1yξʞάή«·Α6Θ9ϋ(οΩΤ$΄d4δ<ŽΡ9δό?₯KΏΗKώΫιkr~(s?‘|ꏗΉηTΡΉΐΥΝu“ψJ†£rλ«φε#‚œœ—ΔpΈ A"9EξϋŠ+A©€>Ζ•ͺ§Ικλσ[υΤ“ΥQβšeν>:Θ“QͺDJΐp8§―„T‰™WΜ=UR§/Θy•nξ}η4Pξ}{­)έσ΅7mlΏΙcΆMΞ•@β ΧΉͺZ:W"ͺΧΟ($%Hΐ‘κ\·QAΗ Šdt‰hCΞέ•Ÿ9ΑAdςHΒBD=,άΫ2–φžΐΓZΩf± t# SoωΔΌŽ$Σ&p;@Ξ›ͺwHΈ‹ς!W|V;Η‡«-šH 0ψ ”Ο3ΪΚgΆ ){%ηSf€|ώ6§Όι{ί9Ά‡œσ>5 I±ο9δόσozyΧ­_ρ'œ±&η‡9Χ‰?]*θ>•λΛ’sδœε>\‚qΠv'd€šH7•@ΤδcH ‹T=ΓΓcu½θDυ€DVχ±:V+ΟZq²Ϋ«³7r?“sŽΖιΉ"9­½ε&Σ,Ι–'€TNDΜΥ·¨d”DO‰¨FαΚξγΟ&K%‘σ+ΎvώΎdtρΞωή)'2M’NΞΓΕΈJ6y‘¨€N)s~‘Ήκv3„ΫV2šϋZ½rΔ… ߈œΗw»μ‰γ₯τͺ?級”"o¦έC‰ι,rώΒ‡ž#.9οϋ–!η§ qBJF_>Δσβςσ†xٚœΟΓ̅ΰ‘cα2δά±°ΰZΰ‘c‘ͺ‘ΰ‘c!f”zœcaννsΕΊΐζp8Εgތt,Τk΄„97,¬ξν,,x˜±…J€μ`‘*Ё‡Νψ³ ’-φ±p‡ρΠ±ΠρΠ±qk¨¦ +:—Ž'ΙͺXhηŽ…Ωίη₯o1” 1'd’…cαͺxXΘωύn;‰…Š%ΙωA‡W5BύEˆDŠœURΎ9/9 qrp*+QενΩHΜΗΩlτf6„έIvœχΘΉί—ΝΟp,ΟΥσ΅ 9§rNŸ;Υ}―šΛ _σγ‡Ο΅VΜƒ˜W¬˜ ηnœVΙΉŝ8Ή£|£¦πE›‰Θ£ή˜·^Ύ_LΪL΅°ŠΉKΪ'ηΪ›Ό“sW,λ_Θ9p!mχqjyDά,r~ώ/W‡ώ/ύαG¬Ιω‘HΞuΒ Kΐ%AyϊΖHŸœzt8ͺά •4IPώάyŽ:ξ*F€y"H5§Θ5•δ0²‡jjΜπ­†7‰ˆ7Χm6I΅W–šqjτYΖθ›JΞ}ΆoπΖ -Lƒj_₯ͺDΘ!%έ1g4ύ•ε4MΞ•€*!U|ύΚ·ξh2Ϊ$¦H71E{ lάΪRΐŽs1rχ!Ά-­r7|ϊj]Υ w~›Ημ2ΟUNΥ<)’Q“"PήζAu9ω‡7 ½Η%―ώ₯dνΓιΦ)ύϋ!ŽŠΛGιϊšœΟ;5Xψ¬W<œΔΒ>|), EΞ™3νXXέ‡p,¬" οvόkŸžΝΑzXˆ³|S=u ]ξXˆΡYΖBΜί2ޱpsrΎί±P]AίΉγ!Σ-Ζo‰…>ωbE<ΜXΨxπέΘ‘ίFŠΞΑΒκ}“ΐB0Π±pU<,δό”ΫMb‘βΔ;ΎXr_-^°°˜”½λuΥѝκi1ŠΓU<ͺΏŠΝHΊΘh! V‘oͺ¬fΌU+ž=r.2Žl<$72›₯Υ8xwάΌG½ΜaטΒ%rjz1„S΅t‹ύS5Α+.ψρyι³ΥgI՜Ŋ)s4ΘyοsΩo­V!w‚Ξ¨=Ε²UωBlιg_‘7>»Ι7¦€¦X¨ζo3>ΜϋΚb…)<šΕ k‹˜CΞΏπΗ―¨j”/}β£ΦδόP%ηœ”$2•Κ9 #•σ­ΘybυŸ›ΜI'μHFuΞσΰΖŽΙ›Εb|’v2gά‹›„9Vώ31――=ͺμa"֐sw'ΖψΘR*"a„V“RU‰|\šͺ12<’ ±*εͺyΥ|ržJ‘s%¦Ε‘xN IΗIΙ©b‰dtΗώΜ Ώ8ρ‡Μ·φi:9ί‰„TΗDΜΦo`3Ώ…YδόΕg,ͺ^ŠK^σƒΪθύΪΏΕ“—HFΏξόšœο²ˆθXˆaeΕ” rξXˆd=c‘·9ϊtŠ cLΚ Ι‹ρiUNΕ\Δ|TŠ‚yρzGXh=Υ«Œ[黏j>ΗAUQl“œοΔIοwT1·ͺy3sσmΞN―ο;Θ}νΑΗΙ~β3ŸEΞ/xe3FΟγ₯?ϊθ59?ΤΙΉNHz½jD5'“σΝ’*΅ Ω1=θΪ—’Rϊ:©Ξ“βψN‡1A΅:`U"—"7RΣ—…’ξΗ°°Ϋ`a<RφŒ…•œχ°—vΗBζ€g,τEJυ™ƒ…ͺ–ƒ‡©b~¨ΰa3郩`a&灅₯jΎKxΈ Ξ"ηίu‡I,TΜ ηkYϋ.ž γH΄EβEΜK„Ϋτ^}φˆθkΫR-F.o²ωΖρ©»ΟCΘcʚjw–¨gGο$5―g*οFφ}Ϊhœt#ζ₯.2Ξνu$W2Φ«ο£±!ΚΕπšΛMΘιφΣθ{t7ύ r>2ύσ9ζ;,σ――i‹‘Yδό’WοkuHqΞ“ΏoMΞΧδ|“„ΤFϋ8‘.3w#1¨Žη–$”€Τηιbδ²μO$›JώJ£HšΉΉ»#;cŠΌ§’JQ%θ2TΆΣ>«qΙ₯ΞB،λΒŽώN%G‰˜7ύ”JBεζΛΨͺDΡ[Y’Ρ―Ώ΅Δ¬ήCι΄)9·ΚΖIUΊf.³­Υτα{ΪΝΣrώζ—?ΊιαχΈτχŸΈ*9?7 ½|MΞχ?‚[M‹LTžλ±Ω!L ‹²H* [8τ}Պ9Xˆ’dΨožSξF˜#,daΥ°P€°ΰaΒΒ*“ξaao‘2Ϋ»X¨°>σ‚…ͺ–ƒ‡‡2bœ—±Pί1x˜±Ππ°b‘φαςΥEΞϋΐ;Nb‘βΔ;Ή*9?hππ@Θ !™nWΜΞ..b(ψι6$=Ή•SEƌ ’Nο³Ά)nζ’u‡ΡZΣwξ.μσ Χ ©³ λ¨ ΫΫnν6¬ΉξΔ<\Θkuί>^Ι9οAQήcHχ].­Χ{(εCu‡1ŸΎΓ<Σ<ΉνW‡όύά‡Ώ?Θωž?ݍί@'Ξω‰X“σ59ŸHH£Ÿ±TΙc†jΙ!ΊJΤιŽπ‘?EΦ¦3nZ„΄ΌAΦbTMIH­'ϋΘ]Φ^«ZαΌ­ϋ΅}™?‹άRηJΒδ§λDί}}ΥΘ‡Ρ@Ρg^έΨ•„d\Igu!VuHύε_ψƒœύoί±1h³Oz-ίxϋUώ2šρ9τπc’ i©"!©Mύ―^9: ΘωΉ©ΞΦ9.}ύ-γΦώϊ!>;ΔΧ‡ψχ!~tˆqqŒωakrΎ XXÈ΄œΚ‘O³@™ξ¦!~=ξΏT“.βφ[ρ!ώvˆO ρ {Μ‹†ψΜ‹8}MΞ―f'%jσR‘1—_wQŸ"瞜ϊμUŸΓ[eε©ΪT q¨p3ς,ˆz„L>*Y΅jΒΓσ)Α)‰§Ÿ0,RςS’O³q›Ζ™ΙRIHmtX­)P…θς>“·!ω,FG_ϋ³ΚPI7uΨΒ]X.ΔΔ2·θ i“˜BΜuY•£˜\ϋ-£Χ֍©κˆ§p.ΎΪ“σ_yά>2”β7=y©ΚωΑ, ¬)UoΓΒςσβ؜ η ‘’"ΘΖ]…M›αaΑBΕ@Ϊ ½—=c‘·l4X( 1“' ΅x)’NέF―αrξcΊTuΥs—Άwˆy—œ[2pͺακ!―d}BΪ―םΰ[Ε·ΘΪm^v%ŽεϊM…c!`t{<ηΑFΞQ4.ω,Š@ΞSŒ αΜΡώκNΞ―xΟ›ƒ=_xΪΆ$ηΓιCό£°uˆk qΙΗ¦mNβ’ ιχβC‹}ν?υλ ρ)δόΩλΚωΥ}•T½Qρή,%i¬rdϋR+G1§šEcol3gSœ eΘB™;c±†P²«€Ή\†DsH†Š<ύΣΏΈ‘€*9R2„½”$£ΪVΙθΏΏ’u–LS—•θ‰œΣS©λEΎΉ‘ŒΚmX‰e―·r™„τ`4FjΖ ΡŽ  ©%©MRjσΠ―φδόWΏoŸ ~ŠKΟʚœ@xX{½m,cM(U2dIV«ΣΰaTͺλ(³­πбЌGX*’Œ…ϊ½<ΜX(Ο αaΖBmΓΜnΗBυ•ƒ‡Ž…ΰ_ΖBΓΓ9Xx0βa3οΫeξ3Ϊ‚εCΞ'°Pqβ±kr~ `!=δ8ΈΛ$Nαδœ Ή*ήΕΞΗ―9Η«TΕ‚^Θ9}θ±ε± DΉΟ+Ο#Π*iN£·GΣ·δΒρhΊ^GvΩΌσJΖΩ7Rv’)Y™—-Y;3έ)†¬ήφ;IΒ· ηΎq°œκ{ΚκΘΉΟ wrA7§«=9οωϋ\πSόΒΣxr~o³λΟW€mΞβρ‹Ž?GΪξ‚!|H’srτΡGΟ: >yG–Ψr~HfδšS{]ɏόš€qͺwϋB2ͺν» ‹ͺ5JJ"aq _cύœTŠτZKoeT…T‰`ΫJ£T\…•d~ςΕ΅Š^BGz¬Ξι1Ά+U"E³ŒόΗ·αΚ Q$’"εJ@39ίl4#„4RM—=ΥΌί-WBcϋ")₯·ί’άNBJΈΔŠQ$§W{rώλί8~:qι›ŸzΘ’σΒΓέΔBΨ‘£7Η²αaS€ <μba"ζ›Σ _†*t_ΕBUf!ή E„‡ ©²',,½δΑBE ‰Œ…›αaΖΒνσ‰ΘΧVͺ„…#<Μν>»Œ…³ΘωCŽΔBΕ‰Ηήl±ΖΒyΉ!³™χ;9WΕ[€ΪΜΰ8}ώΌηgw'μTά!αΕ½\δ‡jΖ­…Δ=χͺμ ˜\e•χOΏ™οqfœu\@7@Ά²:£Ž Έ ›£#ŠΓ("(φ"‹,Šb€a_ƒ,F}'Šˆ€¬ ;$,B !rΏχκόί>χΤ{»«»ͺ»λVΏχyΞSΫ­κξΒό=Ώϋ?KI‡0f•K­ΫJˆΆΓΔόΧξp]‘F@ΠΜ}ηΦΑ5χ; ŒΫΉNΧ Ιο@°ΧίΩ—ΚW‚«ωέSUύώ—™jίυ€n{ύέΕ {αE.’x8Oό·θV8ω–ΎV‡οό?8ι€ώΦJ†cΛ§˜Ηίq¬;η²šΗhϋY«hžαρdˆw8\ΛΰO ρΞμœw#œ«#Γαp­Ά7Ψοζer* ©φš#1Iώ wκ"Lί]œl«εΠψ|&Νψ\–€JbΙ€™.ΏDŒ!o(©CRвNΈCHT‘„šέΆgƒŽ’\"³.Γ޽ώ{I,qϋςβίI’θαά;E„sΎ―α`N8·.’MzmBΪηLυ_JΪξΉώ7όί€:† A¨«α|βυM²v1υβeηΌM=I-d[ [sZ=¨‡MΞzJ ΥAMώ[ΰvj!![‘έj!ϋΟ›΄PίΫ€…8zθ΄0Ίε -”2φ¦ΓΈ¬ιN ©ƒ)-΄Ϊε]s―…VSn{J ω³Ί}ΥΓV΅0uA¨{α|εJ-DŒ[ylΒy'sΓ‘‚sΊβn€8’₯χ‘$^AœƒδΨWN0§»ξαή»ηt¬c‰:Χ›aP›‚tjHœuΏε}α|ϊ‚…‹$.z₯o‡ΉžcΛΪ Δ,S6±8G78$£(αDς€²OΈE-Ξ‘D2!]ψϊEςΨ& )8GςΙ°y+pŽχΜGΞνw:δdΓ–wΊRίΆ’ έ<μp~ό֍žήDL½l§ η5C?Υ|ΐσ9y›₯θt»ΩΧmυΊΕ‰έUpN=€ς>tΞh!ΧASZˆhBKσZh/TΪζ:½U-τpn΅ϊ—‚s―…ύΑΉΥCϋ^jαhΒyι;’›j+Δpκ‘ΐωW©ΤBΔΈUή—αΌ&Zˆuim@:vŸ#ϊ;πΊΌPυ)}ΣΪΝ`Ή8><rύΒyλ†Σω&dΗ~p °ψԭێχΚησ5πXœtλœΫ²jbKΐy(Spξb@8wCΡΞ›¦šλήξĎ"œΫІ!Ό™)Π©ƒ[Γ η οκ›­ΰγπ]0μeναψΏxˆ]Š7cd8濾ScΨFgg€€ ‰ΡjΙΧYΎ‡[$”Svώ<-㌠$“QuzbΰηΩ²@φ3γ9"R€g?hLtp>ޏΔ ]"&£Ί··oGo‹6L²ˆϋHH,žΝJαrεμύυ`2ιΕ-~ŽuΫ£utΞ‡νΚHΑω‰ίiόο'S―ŸαΌ¦zΘήπ–ΰ\υ0yŽ]ΫΨ‚–΄Π^¬„²7œΫμg[-„Γξ΄ΠlkY ‡¨‡^ ϋΣΓ”¦.nZ=΄Zθυ°΄°ΫτpΔΰ|γOTj!"Γy½΄έΈΫεμΑΉΊξIOΉρΆ/Ÿ¨ΤΒnμ+i‡k8W@o"fϋΔ΅WΌθtή-jt”ΊG0Άξ΅φ<Ε­΅€ήμΘ›€ΤόμT9{ Μνw…σΡ\;Φi8Žc$α|Ρ΄dH`*ŽΨ}‡Vΰόm!f„Xή „[ٝσ%7ξΆ’oŠϋY!~•ψάeΜύρ!ΞΛpލΙ(Kνμ.Σ7’Ut89}HŸψEΓέ §—& ³OΣL!Ž=—}Ž$Ÿ‰Ο›wvΓΒ: S²ήφ?ώz,}υ0$œσ_ nJ@_zνόθJ₯†.εcπG[p~6 §1S―ή5Γy]υ°΄Πκ‘ΥB@4άnθ‘ΧB|ψύ½F=Li!’ZH=΄Zθυ0££‡盬Z©…ˆqŸx†σi!\mφ›Μϋƒσα*_"nΧ’•&€£<=Dj%šLlW8·€Žϋψ A“αn(eοΰTπ |(χΐγwdι=A½›KΪΗHnxσ’{ΫP|±ΗŽ­RΫD'­cjϋΎϊάφαΗιλΣΨoŽRχ…φ•—V¦…γl=―]’ —αΌ›’R-ΟτΓjFμηλ #ΉHΐ}»HYfiQξεΕλ6ΧBΌIrΚΎMλ²γ| NB‚ϋδ/kσίO$ €τ9―œ)Nϊp$ΎY€‡η§lΫ؝ˆ©Χξ‘αΌzhτc΄΄°€‡^ ΉšΛk!\p^Δ΄ZHΧ<‘…uΣC«…/Ό:Ig=μ8_­R Ξλ©…tΔ_8~t*BPΜ’uΫ‡ΕΒΡ[Ξz<_ϋΝ9χδ/Ύάσκœ ΰbΈ[ˆΊΡ5ΧΑt»l}ژ…σϋ\Ίΰcγˆ=wj Ξ{=²·š"‘SΗfΔ6{,‘<"ρΔ "-·”΅gv-Χ‘\’R8IΈ}τπΎδI(‘΄­NKEα½ψΪΉηέ>όhΜΐω©ίλ εbκu{f8―+œjCŒš¦ƒ–5i!ηix-„ώqBΆΧBκ!΅―‡¨Ϋa΅pξ«gg=μ8κ•Zˆ·κ2œΧ47Dωy«Cα:žL»ΡwΝu-g%οΜγΊ4-gΗsqι-ηΖη-XX‚sœ3Rƒφ:…Β₯Lγατ£ο\ϋΠσ1ΊpΎπΏΔ >>ίkη ηΞ‡”VL“ΆŸ ˆύ•Έ0GžBwΌhΐ’SΊ>(u爳„χγy@ΊX‹Ξ1αs΅­qιρŠΖώήΧ―ͺΝŸnθ5Οlΰό΄ν€G7SoΨ7Γyέ/VަͺΓ-Ίε΅₯ιN ε­<*i!Kή{L Qβžυ°KΰόΛkTj!bάͺΛf8―qn8pŽukθΡεΤvΏίœeμtΘγͺ΄Έ?wώΒbΦ‹/sΒ-@%8ογ ¨-όY!κp°¬=ƒyΑωC·o<~w2ίϋ'Ξ3œ29ΤAoUކ5!%€s’0’Ξι’lr@Λέ™t’ίλ‚Ψ£IG NΊLpŸ0¬§*!ΕΎ^ΩΩΫε"¬.zgΎψzόΝ] η§o_,™υΏΙ˜:eΏ ηu†σΡΦB«‡^ qηr-δ@»&-˜C£…] ΤŽθα’²Ά ηkVj!"Γyύα| ©νΓηή§—ΦŒ…ϋζΖ9·`”?3οeτΩs œsZ;u₯ΰάξαξΦCJό;ζvE\ΦΒ6ΰό‘ΫKθlΎΟψ ηΞ™rπH&€\Ο†‘E(»$€#EB‰RNάjŸeμ1gŸ:Βqe]'&¬,γ„―;W‹ω4GΰΌ³_όΰώfΚlΰόΜ‹%ϝœŒ©<(Γyα@K@οgΒzΗ/¨ d«6i!nΩ^ Θ›΄ΠNyχZˆAp)-Kz88Οzθΰό+γ*΅1n΅ε2œΧ87δJ΅‘μ=ΗtwτθŠ+L΅{Ν9 έξ@W8Ÿ―N9ΰό‰Ή Š™!ž{©μœΞ9ω¬ρ˜eπαuY‡=θ:‘8:ζκΕΥAχNθKFtŒpϋΖ5c/!]ϊŽτ8Ω.ήΤ³ΞΟϊQ±δωS“1υΖ Ξλ~±’.υHΑΉΥBƒƒκV «…œ‘a΅½ι’‡^ Ήβiα’Ω'φι‘ΧΒ¬‡k‘κa†s…σ -Dd8―·bj;ΐ|Dα|Κ$qΞ9θ,ΒΉsuΞe*»]tΠΡ_·`ώΤ ηpΞΎsφœ ”Γ‰7pn§Ύσg•ΓξVΜ΅η]bkΚzΞg˜ ϊ.ίoχ ηΞ‡ "‘d4&ΐHHαόΰg#π{/‘TΜ1±8„$€H2C2Šχ1•ΎtœL_¦8ρ|Ώ^Ι/^:―‘ˆ.Ό°±>(—5¦Ρ§'6’Pά’6ΓyœŸ½³¬‘JΕΤ›Νp^w8'‡1-€ ύ£z-\«žY-ΔkΠΐ€rΟz+ZˆϋY«΅Πιa†ση_]»R γVϋ`†σšΓωK§ν/1bxσd)7GŸ.žΑέpΤIμ\“ΕαyγœΒα–³¬χs sš»άφq‹Οw{Κσαΐ\œkη2œχΑω‚Ηο‹>~ΆžΞ3œ1IDι€φ3λΟAb’ΗΨ'IǜϋxκΙδauΥ₯gR{0#˜Ÿρ­Ζ "|’Y:FtŠ•H¦b―/"$£HJs2ΪB:ϋΔF09Ε Ÿδi8?η'Ε’yg'cκŸΛpήz(ZcbXυΠj‘j—θ‘ΧBξ9wZ =τZΘΙοN ‹“pξ΅0_¬μ_ 1άκaha[pΎιΪ•Zˆ·ϊςΞk…Ψ}މώσW.?^ΰ\†΅=ή(Χ\ϋΐKn9KΫu9άρ9Ό(gΟ9Ÿ s(W­E07Ž<#i8gΠ€Žy8βΨΰγgϋο•α<Γy’Μ +œs¨’G[Ž„‰%ΩUkm+@Žrv9‡ΞΞa2Κ•lθΛDεγG5"DT—kϊ’QD> )φ+‡„”I» ω hŒU8ύovλϋί”‹i9jΤα<o 1>'€ΣΓa…s«stΝυfI ©uN ₯Œ=₯…\Ι–΅°3Zˆ‹Όͺ‡½€…νΑω'+΅1nυ:œΆφв|8p޲φΧοΎZΰœαΔ΅·0 ‡KΞpΌ½SN8ηc η)ψΖc|>"…yΊΒ-^<Α4ύُυΔυΆΰ|ζÍώύDόμ€½kηΠ,ΐCMDΰ°0BR7,Έ‘\r§0z&1άH§¬Ky&†)|‹+€½ε1纏XΒ$’q%sΫχΓ―kτXΎzY#ςQύΏΊCpAP¦ˆ$e±cΞΟέCάΗTL»υθpΞΓρ‡ ηΤΓaB‘$§…ΤC―…€ι΅PΚαε^ Qš=ΜZΨ-ΔwH=μ!-l ΞΏΆn₯"Ζ­1ϊp>ΪzΨ+ZHη1\+Φ^Ώύ²FΞ΅η\Φ§©ΓΝ½ζr”§Κ ζ7Ϋ”³Ο™ίWή>Η¬Tcίy Μu5›>ξ―ωKΚg=Ϊtό7zaVΌ?–α|ώ3Ί] ‡΄_O8ηνjiΰvRΈ-&:žŒrݏξτ%°#Ρ”„T$!}ν˜-$υΎ‘G,eBΚ‰ΔHn‘Œ>zΈ$PMNEΏ ι’%ΧKtσυAo.ΉV’΅‡»ͺ#ϋ‹—<ωΛF)'Wγ AΕχΪύηmΑωω{5άΖDL»ν—έ燆86ΔF!Φdd=ΒΏƒaΤB‘$£…ζͺ‡^ ζ^ cŸΉΣBΆ( V »Y‘…ƒC^|hSE ­φΆη›­W©…ˆqk¬Π-p>jzΨKZΘςvΔΒ Ž8ΏσJ ΐyμ7Ÿ7;–―/P·œpN‡œ.Ή]‘f{Οη0—έθή5pŽη„τnu)—F?s‹λΤΔΑΥsΫYΏαœCϊΠbΎ» ηΞg=ή·ŽΟΕaνί+pή––υ‡Έ:1νX2Κ’tξε₯“½—±wΙ)Φioy N$f '‡ΐ‘·NL$Fβ‰(ewΧ4’5ν­¬œίΠvώ.w²pŽΏΟ#ϋί‰(Κ9α!!γNQ„σ φι+v1νφ‰έηSqC†σξƒτ’r•.T¦΄Παœ₯νV ζψχ:H-¬œ'΄ΤB ηνj!υ°‡΄°=8_ΏR γΦ\±[ΰ|Ττ°΅`ŽXtΡΡ…σ»―ξƒsξ9œ`ζ.s@9ΛΩ= ΘΉλœΫqΰŸ#ƒδB”ΰ\KζcΤ ΞιΤβώσ3ϋ`η:8·%Χƒ†σ™χφν…ΧώV'½χ4œΟ~ͺ4νίΖa?=°WΰΌ--ΜZ)ΔOC<2Τ(cΞ;•”²\‰₯Έ=¦ΜnQLFυ>wΛ­>N–΄£·R'ΕV„`. ΒοΘ„΄8—²ΛηOm¬J²pŽ$ε†LVρZ?½¦δp‡p±ΙθέΔ2ί1 ηΏέ――ΨΕ΄;Žm ΞΓρΕ…x4Δ^uμ±ηtNZ=γnrSEδ/R6i!υΠk!{Ν{L “zXη’…ΰF=΄Zύƒβq+Z0§φΆη›oP©…ˆVΰ/NaΗ »Šη²ζŽσζαΏO;n|―ΐωKΟ=7ζ㰟Τœ€‡αψ›ΏΦΧ§ZΧΊκ½αX*Δ΅ΚΉΈ}η¨ΑύόrψΕCάβŽsB|0 pEB„$. “DB"I%χκΪ•f‰•DxMRu…JΣΪΥE’kΔΎΛΈH{/c'§s]P€HΩα‹h1ΡλζDT͐xΎϊΖeΕ’Χ/Ii©W”I%&0γ’DHFε±ξt_2ητΎηϊƒs<ο>’Ϊ‡k”nsio­ΰόw–«LL»λ„α\‡jLGrβoU‡VκΠUΝ­υv—Tδ„΄Zhυ0₯…ΖύΝ’V¬g+i!wšCϜ’Ώ<©…ΤC―…Ί:­TΚήZ§6υΝ«¦•΄ΟQ­VΑy•’¬=‘‡cΞ·Ψ°R γΦόp1ZZΨ-zΨ«ZΈθcϋ‚€~ρ1Mηɐ·kOk Ύ·ΗαyΈηoL»N¦‚Λ*5@ vλ°6:ζΈ΅`>KKΪYΖ0‡k8Η}œΧΚφ: „‹ΩtYά7[+ $β ΰ±?\Ο¨ηάSpžrΥߘqGγ’>‹ψΗ<œΟi”ψ§β°ƒ: œ·’‡αΨ$Δ• ιλ†Έu χ†γHΒΊ²ο£₯₯ύ~!ξ ±ˆλseξΰa&§q/9Χύ ηptβgΔCHR©«‚lοΉΈθ€€IiœsuήΓ2N$OϟΪpJjz°t3&€šŒ"^σ*y·β!‘ Ι·$£Δ‘dΎφΖεΕβ7―l$HHHρ}ΐωΑ9ΊγX’M‚;ψτηpŠBτΚΡœ_xP£R!Σξ>±8_/ΔΥζρވ%£?ΠΫS‘αΌszXBhυp8·Z(Ÿe΄°€‡^ β›΄zθ΅fUλ|x-„φQKZΘ¦-|yρο’–΄Πκ‘ΣB_mΠη=€…νΑωF•Zˆ7n@86-μ=μu-|υϊ3‹W>EΐρΚ•'6<<Ι냁sω }‡6†PΘaΏΉsœ₯μΌ8GΜ²φΊOb—‰θ€g|7ψ^Ι(1Η΄t@8Κάυb†”Sσy8λάy‘‚Π.0Žσπ|NEΩ;¦΅ΗŸƒΟΤΘΉa€σΉΟΗκ ‡rp+p> †γΔί4α”/Σί{yŽήΗΉ––φχ.ρ€6΄――ΟΝΘάBBjχλz8ηΪ$’,Ήd_8αόζMΙK ς&)eoe,cηΊ Τπ#8(9μ8—„sI#•δRΛ7ρΪΒΧ/*ζ/Ύ@ΞΑcΌώΚ—H"ϊΒ«“Š_;·ρ~”vβϋxζ8©&$ ζΏˆežς:U;Ι™I)Ξ½wBΟ >κœ_tpΓΑKΔ΄©§Άη[†8Ε<ώ6τ¨Θ₯œ΅:JZhαάh‘ΥCyl΅0‘‡άV- ³Ίj΅α΅nUληV α˜S­B¨V g/:5κ‘ΥBΡ>ΥΓ&-Lθa„sΥΓ ηΞ·όT₯"ƍϋH‘΅°χ΅P <:ΐάΓy*^»ρ\)aν¦σγ„v<_ΒGom€#φhΞ_šαaKΩ-[0ηwΈν₯ΊΓ9άλŽρΚ²w<<!ΫΈζΌ°{ι±ΫέμŒη.y<–)μΪS^貃ή}―ΈζνΒω‹σζΕY>=τΠVΰ|@= Ηe!64―±Vο Η‹ξ3ζ]ZΦώO!ΆΥΪϋΗπ‹†X' p‹N—œΙ#“BέΟΛADςΚ2 κpΜαω¦Ο€ ŽΟΐZ5/˜D7“c‚K§ˆύ•H¨†8τ§k’ώ%ΧK"‰Ι($›tΞ‘Œ>ΚΕs―œV<»θδβ±ω'ΣζžRάφάiΕ-³O“ϋΣ_:©˜΅π$IJ‘€2+“nh”~Ξ;[>σ₯ΧΞo^³ΤγG[pώϋƒ‹RοΏ‰iΣΞQs»‰νάgl•Ρ‰Eg‡vόΏ;†8>ΔiŒœƒBƒ kΌo΄0n€R Ω’3R©…Όΰ΅ΠφšC αθC 1QœzXη‹ Aα€S ροŒzh΅·ΤC«…Ξ;9κaI αΔ«Z-η=λaλp^‘…ΐ9τ―JGB G[ΗRnΈψΆ‹ΈκpΊΈι—ίwά†ΐ98WzΛCXhoΊ8χΨ]}ξο‹ΟΛP7…γ08„wΛxœ@βδμΪ~Ώœα\#π½Ζeο8 ύ©ϋš£+½σ’ΗοlΆ”‡οΥσuz½L°G©»–ͺΛΰ<:οƒ7Vα|ή‹}}(œŸΤnnŽΛp>ΏχvΞΫΥΑό ₯Cμ€ υΜAώ’ρt–]vΩb,ηH‘Β½Αc…qI΅μS’Ouΐγπ7uƒβgαs\β}ψ$΅ΊHΞYŠ[ώ|=_ϊ+?*=•Έ†Ι(K8YΙ„rήkΏ'θ‰'J Eς‰$ΙθŸ9£ΈωΩΣ‹;ž?΅ΈkΞ©ς:VΌ‰gœr §(:>ο™…'4&ΓQΚ< œ_xΡ„ψ=ϊΈgκI£ZΦn>σ·!Φ€mB\β˜a½":FυP4ˆ ©Mͺ…qO9ΰΪh‘τ—=ŒŸΕ–|žΧBάΗs^ Ωkξ΅ξp―θ‘ΡB\΄€Z-|κε’z-€¦΄PτΠj!ϊο³Άη[lΉQ₯"F»¬}΄τp¬j!W‘Α|/Ύε"υθ’_{ZuΊτ–?p£άŠ›ŸAΠΗdpΌΞjξ;gίωάΔ”v–±Τ±:ύΥ~B{νΰΞyqιϋVלύίβdxgo>WΟ!ψ}qχ»έϋŽΧ,,―˜γZ9~/|o#ηs^˜ΏKrh­ΛΪ;₯₯Cύ‘Λε«£ƒLL ΞLΓ}ΩΑ{ς7]if<AηH“ΪXΙ‘Fx―W“ŸƒΟa_;’W&₯Έ€„NΚ΅k~H2Š~ρ%7ˆ34η•3%‘„;„”0~ Ϋ{B ωD"zέSgHόiΦι’œ"¨βΌξS–~"EBΛVY»”x8?(–ΟϊΈ{jK=ηoC+MˆεΝΰŽ•;”ΎMoοΫ©zϋσ*΅C£…€σ¨‡V m5ΥB£‡ςšΥBά²dήk!aΎΗ΄0κ‘ΡB6τΠj!οCλ¬NyΊO­Ξ}υ쨇V q?λaλp^₯…ˆΰ|Ψ΄°[τp¬j‘¬D @X·v)iW‡sώζC7Ι-Kά χη(ΥFΩ΅NtΪ=ηΠ9Ž.:‚€Ϊ ‡”š£„½ψ(iη4vν—rυΥtΜ,,·Ψ‹stκ=ηΘͺΉE}@ΛίCτRιϊpΑωσΞρΏΝT΄ηκa8ΎδΒέ6Π{Γq”wδhii?`šŽŸŸfbͺΖ_Cœb΅,ΐƒKHe‡‘|“u―8?L(ιz35St‚7{3Y.Šη˜ β6<Λ8Ρ’gΰ<Κ9‘>όβIRšΙςutАŒ^υδ™11½ό‰³ŠK?Kξΰq.Y$’}άJ?:†εdt@8Ÿ|αβή₯βΞ{ώ·ΥUj˜Έω°^}ά·ƒΙθzKΑΎ1Δ*!ήέ‰ΩY>JΪ Wͺ‡%-δΐ7―…tΝ1 Ξk![ˆRZ¨ΐήkZhυZ€¦Z €Ύ!¬^3³O­ΒI§Z-”~φ¬‡-Αωζ[lX©…ˆ5€σαΤΒnΡΓ1Ί§]ΧάΜqΰ-}ζמΦ(o‡ƒn ηΒ1kΒΜ*΅¦ηάΓωlη˜ώψœ4{ΞΉ"mΦ£₯ΎοΈs<|OΦ5_ `ˆο‰ίνΣ'¬ΣEηχŠˆpώܞλ+.8vΞ ρb‡ κ*΅&= Ηφˆ’o•Ϊqϊ:ψu­΄4οςχGτv©ΡΦ¨-[AU^Θά’Έ™(’GR§sΒpL9D η°ΔΞ%‘eBΚαFΌDχΘτœ‹S„ύ³=’J™%Χ½ή˜LŒd”Ι$\!8Atƒx‹€‰(RΡ‹;»˜<}’άς5άΪ’OτbJ2:†ϊ,ہσί^ΈŸ€B*ξΌϋΈ–ΰΌΎrJ θχ°Λ2Δ§τjκsœΆ™ΠCj‘hΓmI ‘oͺ‡%-4zΨ€…\‘F-δΉfkE―iaΤC£…p»©‡V ­z-€Z-΄πn΅PΦUf=lΞ7¨ΤBĚγήsήλz8–΅ξ9@N9KάΕ5GωϋMηχvœ` Γ1W]ϋΝ:8-@²ηœ Ι>s@9£§ΰϋΔu*»¬šƒsίd:»~OΦ5ηΚ9 δŒYΖI' ΣEgY»ΐωκ;oΟyώωSqP‹pή­Ρ)-νο,1Ώ"žqKˆ›²1‘b$wτ†δR’SΏΩΧgΙ]Ύp€₯λάiW…dTœ ¬²k‹8όˆ“‰αυœsΝ’‣sι+§δƒ )Π gœ]\πθ€βάGΔη™ "ΑE―%ϊR™ŒŽ…~Λvΰό‚ χ‘ΧTά~χΔΡ†σ§;(wΥΘ{ΞGZ Z-D™{ΤC§…^©…2\Ž{Φ­*θχ’–τP΅eθΤΓV΄ΥCΤC«…xΜΫ’bΈΣܐ¦α|³-Φ―ΤBDΐω¨λaΦΒ’α‚Hƒί0₯Π=η€qqΝ΅“Ϊ˜Ξ^sΒω\’„sφ›?1wA1γωωΕt^‚s™ώΒ¬Ζ^rξ7Ήτ›;Χ<ΥoξΛΫS€.%ξZΦΞuicہσ™Ο½ό^ό΄φpή-κΗχΥBά›xˆ‰TH"γ*5”Άky{,Ρτ{ΚΩSΗyšΐ²d=:άs…tId9( +Ύ°ή†IiˆžHFuΪ-ξ³e—pΞΡC‰d’Αd”’Q$HBΟzhRqϊƒΐύIχ%§p’πyθαΔz"ΩŒΏ ηKžž˜8ηηn/ιMΕmwύj΄α|~NJ]”d8Y-δj΄¨‡z_4zhgjhŸyΤC―…κž—΄επ—ΠB Χ#zH-Δz4κ‘ΧBλˆ[0§z-€Z-όG=ZΨœ―W©…ˆ5Φ\a΄α|Τυ0ka!.ΉzΈG@.ΣΖuwx#ΈK&“λs€']` >xŒ2v‘ό‘ηζ €[—½gΰeζvΉφγΛΊ4uΞ η œσ"†uΜSενΗtι9G_{†σ~αόρΩsγχλcϊΓyG΄΄έ_βY€;δqz;K/9•ΑΧνN`$§œzŒσ9Ι'#$§LFΉ{’ΡΊ9XΔ]εΌ/₯•Kn~Λ‹'Η>KΈ;Άl“Ι(nΩ_ŽdŽ9O$’§<Π&₯HFρ:#$°ψφ=θΰΊΓyG΄4 πh&™p ώΣ.ŸKwœI©ιU—žLλ!yΕηb]J7Ÿ:Fn₯χ%ξHRΥε¨Νdu‰ΘŒ%EI;Β½ΡΨλΛ!HΞ1ΐΘξ3η€bb²ι!Α!H6π^L?FI§ό\L*ΖχϋΤ1Y€œζ·»Ιχ”ŠΏάyΤhΓωRE†σZκ‘uΗSZ(€nυZˆ!›ΠCj!Χ©Φk€‡^ ‘wQΎψΪΉQ­rb»œi]tκ‘ΥΒ”쩇cA ہσM7d₯"V}8_*kaχ2₯ελ!„σ‚μ1_ΰΚ³”O(˜Γ-ˆΚxΆΜνcΐ;αΊlιϋΦͺd9r–²Γ]W‡ο³ΣμνπK8F‹―’NφYr‡/’R$‘H Qή‰χrΟ/^³‘˜prο9œ%&΄tάρΉψPŠ(%€sNΟp^ηηόvqπRρη;U8ο–Θz8Δ‹•pΔ©‡^ ,{ΌS ‘ΠC―…pΞk€‡^ ιœK·ΡBΫwn΅ο±z8Ν Π΄πMέ³zΘηXΪ.[,Ζ€ΆηλTj!bυ5—/²f- œ ηΦfY;§²sψaϋφ1` Z;87=δR^ŽΎsΜ^ZŸ¦pΞ όlΉν1'œΟrβζΜ_˜vΞu8œτž?;#kaΞxκyωίc*φ:°ήpή1ΘΟ<ŠΙ¨&’-Γ9χsU’K¬Βc$€θΕdr’UqŠΠcΙήJ$‘u @‚[«pκp Œ (AτQ’Ο‰§μφ]’—Ξ“[φZ"ι„;„Δη³Όd½’<‡k†θ*YgΙ»νΨυΛaHθυDY}†σ4œOϊνxωo”Š›ξψY†σ¬‡ΡC―…uκ ·^°bˆzH-Δ-€zXC-ΔNsκ‘ΥB\Έ€Z-Δϋ¨‡ΈO-hS­Z=΄Z §z-μ• ψ‚σ―8―BD†σ¬…₯γθ-oΞqΐ­΅{Ν˜΅`Ύ¬€—œ@NלAp[‡ƒSΣρΚ₯€]]tqΝu ƒΣAp©6ΒφœΔ>ψY¦ŒέΓΉ½ 4ΣβŸ~0k‘ƒσϋœO7›lμ™α<ΓωhΟ!ZN^u*»”cΒρAB‰ΔI©uŠΈ–ŽΛ9‘”ΚQΎ‰δ Σ‰ζΈ-Hκ^Ψ(οΒd.Κ4‘H2±„cΔiνςw„€Ž6’U8ΞEβ wη3‘ε°$8HH0‘˜β–Ξ:‚ΞžGbΛsπ³‘P!A–^K8FpήΠΫ_³vα„σ³.ΨYΎοTόιφC3œg=²ŠζqK…ΧB»ΧBΥMΡC―…x/υPΧƒE-μB=τZˆO%=T-ΔίB=΄Zˆ ‹ΤC«…V½R­Z=,i‘§£f8pΎΩΪ•ZˆXmΝf8ΟZΨ—σ`*{ˆ–Ξ}κ~q…ι³Χz[Ζυi„sφœΫΎsΔLs‘Ρm‡…d2ΐ<ξA`œΓA'œγ< @š½ζΆ€½j…š ησφν=γρ»Μΰ'λοBδάπ-7ίϋδσ₯ B6φ8 Γy†σΡvŠPΖbΠΞ\ $”Ά—‰*Χ±/‘ŒΒUB―%{+mo ’Πšΐ9J8=H(‘lbΰ‘$„pΟη_ €Υ:pŽΰκ  $oρψ,|AžŽχΩΟ‰Χl(’ΨΩ‹N•Ÿ%SΫαe8OΐωN©ΈρφC2œg=² „«Vj!s«…ΊZ²I 1ČzX8·ZˆϋV©…ΠM«‡ΤB_$ HœζžΩ(;DŠΐ©η &7n»θΐΰ#L#dγ> I)Iΐ)Z|…$‘²ƒIHDεoBΌrIγo|½QŠχγ$§t”ΐβ5ΊKψ<$O,€Γ„ϋHVρ>ι?Ÿ+ΙΎ–ΗφΒωvαόŒσ$ ~*ώπΧ Ξ³vF½Β%Oi!tzθ΅€N=τZΨ₯zh΅ϊSCΐyΠBhυ°€…Έψ@=4Zν£Z-΄z˜B„ΥBYιfυ°G΄°=8W©…ˆΥΦX.ΓyΦΒ‘Γ|@N#Ηήs'ׁΩ5U,s·=Τ@ι ˜‡ΐΑ!gˆn:ζšIκvΕ;UΓw‡sΏ`.5kzΓŠKΕό…ΝA Ην½0 ? ƒα7fά!;θ%Ώ{ΜΓω]?Wά7λ₯dμΊ†σ ηuu˜΄<“N“’Q:ηθΕDBGΧύ•XqσβoϊΰIB>τΊF²†δ “QN"–aG!±dYy„σX#ωDŸ%ά#89H.eŠπΌ³ε–pȐ0>wr#9…Kώn$Ίq°ž Ηxn“S–€"…{D׏ρσΔΕG’δ‘ϋ“Η.œο+|LωλAΞ³vN­β1τΠia¬"ςZˆ5jΤC―…]¨‡^ 1€‡ͺ…mκaI ρ7S­κZJι7ZˆΟ£Z-€³ύ³ZΘͺ"ΡΓΒvΰόΛΞ«΄±j†σ¬…8β>οωσb΅/͞kœ`Β&{΅ιόrš\«δ΄s…υn‚s@9ΧΓΑωηp6ΊεœΨŽοEώLR‡spΓβLΙ;W³ρ;βπ8όSQ@Hkά6*" ‡Ÿ(gΌ9ύ―cΞο|μΉβήg^JΖ.ϋe8ΟpήQμ)η0$;Α8„ΈE€s”"CRΗ¨&{}YW½rILH‘pJ)%JOρ<̍$4žΗΈ Ι¦4 ]Ώy.$©ςε˜H&Γ9tγ%ΒΟ@bL'ΞΡ4νΉd )ϋ~ρYšΦm2~§ΰό΄σ·—d=7άΆ†σ¬‡Γ£…Π<φžΫ@:^«ΠΒnΧCh!‡ΑY-ΔsΤΓ’ΎqMΤΓ&-KμΒ²ς’τΠj!n­ͺ™‡Z-δ}ΡCφ ;-KzΨ€σ5+΅±κΛf8ΟZΨ‘ξ,€Π8ΧΉβœκϋ£Ύc xΊδ)0·«β8θϐγ8@ΉΩqΞ|ό­v9]m<‡ο„@nΘΩοpΎω;\ŽϋΥν@:ΉΠ@ŸύϊΜ{Μ,sσΡ[ΗœuΖμ➧_LΖψ ηΞ{&9₯;nc’Š‘G”„ΙΔHŠ8ό‰z+}’Φ­G£%νH<18Ι Έά!α„{ƒΔ0φγ€ϋ₯Ik+e©qύ’s|G,s ί_œπlϊS‘τ"¦{ΔO$€v2[]sφΟΟTHΗcNn§γOΧ\ύζ”-œ'υφιε»‚ΣM?ί!. ΰηΑΉΘsu[iBΌ™]τηg―?σ°|φ›έ$`ލc©{;p~λτg‹»žš—ŒŸμ;‘-8ǞρΧ†xDoίYqήC<βΡ{™η ρ`ˆ©!. ρΟϊόCΌβn2œηcΰ€ Ι'’P8GJ$Xp=ΈηN\"Τήν gΙh,γ I Κ*Ε;#1EbΘ„TΥvΎC$£ψŽθ’γ{ΤͺξEζt{$Ιθω„{D'I)Κέ9œ ―I鬂ωXƒσΟύa©/ΥΖU·α<λαπh!zΘsj!ϊt͍vσΏM«…’‡Zz΅ΞΈκα°i‘ΥCξˆG/Ήκ‘ΥB9υΠj‘όξFΗœoΰΌJ ŸΘpž΅p8=@_,ιΦRnΈ·Dφ@γ9GSΞή­Η3œ Μ)σtΡΡSpη4φNό=ψŽεΆχœŽ= ΏSΖ§88P/ΊψϊΌ¬X›υhΓ=Χο¬ΑωMΟ*n{β…dόxοƒΪ…σ# ΫΈ qD✷†˜mρ·!ξ ±’ΎφωoΣϋGπύ ηχΩ9οαδD'DΗ>χ·!βσp„ιv°„ΙQ—NΡejF’yCL@Ω+ήθ½AC<ίnB“R%ί'w#‡[qŽBRΟ>Μ†ΥpŽPΒΙ)ΗHFρœ₯ΨηŽ[”ŒŽ!8?ώœ$AOΕe7˜α<λaγί–FΗ>“%½βί5+dœv£¦΄Π^¬ŒZˆRvΥΓaΣBκ‘ΥB\‘-„£O=ΔmΤBNΖW=KpώΕMΧ¬ΤBΔ*«/—α<η†ΡIνœ?pcMΟ³<ί”Fw€D„¬cXκΐ΄n<μͺ3{ξ5αΑ>{ι/ ΰ·zHu@?—“οεηΐŸŸ]ω Γ9W8— #αϋΖ­|χΊnm,Αω”Ÿ.ώό؜dμ°ΧνΒ9άπeτώ2xœ8g½W›Η{#ηm✠ηΞΫKH5JΟ#βŠ/άΧαGR–Ψe‡M@™Β‘–2Ν%ΊΟI(B‡5! E&œ$d„ΞN}§H&Ω+©{”e 1ϋ4ρ]†$Ι1 !PΎ)=‘αχG€ΐΧ1ηǝ³cΣdΖ%Ξα²œ³W:1%Όt±Μha7κ‘ΧBκa“β"₯καpiaIΉS>ΐ{ΤC£…€qκaI ρίΩθαX‚σ/l:R +g8ΟΉα0ΐωλw^Ω€rL Qw¬ωR@Œ₯ν]ZnW–±ί|Ž–ΆΓ)ηdϊ't(ϋŠŝτψίι‘ΏΘησw k.ί'†ΜiΫ'Γs0ΰέ^‘IξcΠ9Ώξώ§‹§ΟIΖχ8? Ÿob»A|ώ‹ξρΌΔ9[†8Ε<ώvˆcη]bkη Cάβ!6Κpž‘,& „Iφ?#±κ20gςiο³TS’Π%7D§ˆ%ν1YErŠd΄Σ )Λ:‘b’3Κ2q.+Βwέ} ,N‘BBvΰό˜³wŒ½§>.Ό)―RΛz8²Zυ0‘…έ¨‡^ ©‡^ ­§Κχg΅pώe=$°λο#zθ΄p,κ!ΰόs_W©…ˆ•VΛpžsΓτ!FβΰξηJ]-֍`Ξ΅ot:δμύf9»¬‚cοw‹ύεCϊώfά!ΰApq‡zψξπ³Φρ]ͺΣ.ΏΓ³3Κν!Ζhnxσ•χΞ,δΉdl·Η:ηαΈ.v"6mΞ·JΐωDwΞΎΪsώ7ϊψοBΌKο 13Δ;2œχRBΪaΧΌ%8g%)Χ­pnƒύ“!G7DPΗ­τ["QE‚j€αϊ.ρ5‘SÊΔί3^Η}&°ΊOy¬Βω―œίΟTLΞpžυΠΐωˆύ<ώ;uZΨMzH(‡ξY-΄=εV eEυΠkα0νiυ¨‡V ρ<ΏgNjηnω1¨‡„σ*-Ό'ΓyΦBΒωNξŽΞΉ–½Σεν¦•iœ,Ο΅istU‚λΰζ'·²ί\'₯Ξqb8ŽΠ9žγΟF?«tψžΈεcΞ/p~νΓΟ%γϋ-ΐy1ΜeναΨ&Δ_BΌ½ŸŸσ‡ke8ΟG{IP$I&qκšC2I(’L›Œ2!΅χ£;„Dtρr.¦K ε0%£•ί+!$¨HNq‹ΗόΞ»΄·$ΰόgύ¨ΈγωS“qώΞ³Ž’&΄°[τp -΄΅m>―_Υ¬…#₯‡^ ©‡=€…νΐωgΏ2R Οpž΅p€/>~wœΣεν&8'ˆΐΩOŽΗ t§8ws]χœΛгx] Y‘x$Β8[b‰»Y§†‹ZH8Ώψž'Š+x6ΫξΆ»p~”wd✷…˜by3nε’oŠϋύ!ήγήσ ’£¦‡x“αk ηθ`οΐ²Λ.›Uq4’Q;™XK;Kη‘.ˆφFǝ΄Zς)ύ„(cDt0…Λγ§ ΫηνktŒb9§ξάE%ϊcŸ%]#ξ(Ηsΰ†ΙΒϊ7,™wΆDΦΙΥΏ:σΗΕmϝ–ŒsψΣΆΰ\Λ’ξ ±Δ_½Τ« κΤ/t[šυp”υ0‘…VE•ΤC―…€Mκαk‘wΟύ…Κ&-€Z-tz΅Ο!ςΡq8ΜWΦͺΤBΔΗΪ„σΊκaΦΒΡ‡σθμbˆY’¬@Μ^nΔΟΎTL}ζEq²1ΐλήg^ŠΣ;u°ΩΟmΑ›ύδά)N@§»Ξ΅fœ¬Žηΰd‹k›²rTH8JΞu0Ύ^pΆ»Ξ/Όϋ‰βϋŸMΖοΪ6œΏ+ΔυΊJνzt8ήβ sή&!Φ©νϋšηΥ’υΚ΄pl‘ Ώ3ΔWŠάsήc "J’5F$5βzœ₯σ|βwbΙΆ–ήΗςνa€sI.΅g2Ί?L6΅t=φVΈcιϊ }}8Ÿ.Q8”$9eψ@pŽiΓώΐy9Ym Ξ?γΗΕΝϞžŒ³§ά.œ<ΔG}iΦa¨xώ^Ξ«Ω-γZH—άjaB£²Ϊkα0ΐyI Uš΄Οy-Τq’‡V ‘}ͺ‡%-Ξ½B#³ΆηŸώςZ•ZˆψΨͺlΞk―‡97μό@ΈJ0ΰΖΖϚώW‰ψ³±{;„=ζΞ~©Έηιe`ΧUΞ..ΎoVqα΄g† ΞyA}δpΜgλ>q<ψΖύω εϋ|uq!½ίΊ_œ{Θα`  cΫpξ΅ΙωˆΔ€½¬…­Γωωw<&ϋIΕwvΩ―-8ο•Θ<ΰά•mFw‰'ά!J±Ώ–e‰άΜΐλHnC$avΘ/qM„κθ1)΅q“V‚Ό½Ogˆη"‘δ`"œƒ„”‰/ξγL9Ζί‚@’ΚdCr2Ϊ6œvϊ‹?Ν:=gΆ ηEEίO’wθjτe8ΟZ˜*aZθφZˆUaΈο΅SΗ‡K ©c^ ‹ -΄zh΅;qΎΥB<¦-έ³zH-Μpή6œ{€σ*-D|΄M8ο=ΜΉα(ΐ9BΠΙz΅'ύΤύ@±* p·ό/Ο8Ÿ<υιβŒΫŸ”8ηΞ™q €ΊS~.>`@g―9\q‚ψ\uΧ Ησβž0η€tΌ‡₯ξžηJ3)AΗ 4ThΙ9'¨#πEΟpή8?χφςΏ‘T|{|†σ ηc!!E²ιR$œt…˜rw-^C'nυ9yΞ&ν2y“γLJνby³ ¨OX—Έ΅j8Ÿ+…Βο'«zPš©ΖμߌN=žGš‚s$­ΖeΟ>k32œg-Œm ;tŽzθ΅šΗΧ­ή²gc ηpi‘ΥC«…|œBΧ{.ΏΏ£ΥBmjBΌ‡ΥEV qŸ<ΗxΠPαόίΎ΄V₯">ΰά–xE=ΜΉaQr²‡υη^œϊΧ'δ1ΞCΐέξΔΑ²v”ΟcΠ MgI;^Œ#dB;Ž˜?/;α=λμgŸ½μDWP.•Ξqυ»―.EΦΒΑΓωY·M/Ξ½ϋ©dόΧOφΝpžαΌwͺ;q VίΨ„?φ}cϊζΗKD·I*ξίΆwρζŸwkœΗ(€»$qν&€H‘θΑ‰1=“₯’L?} ͺΎŸ₯ ftπΨΈQ±VΰN8 šΟΑBBŠD4$›p‚ήΌφGΕ›ΧΈoο0’Q&€HFCˆc„ί[{Ψ‡<}—}ή>!e?$Λ/kΐ±dS{ΞK%οH<Γg-™sz_h/)ξGGˆΙ4άuΫg>Ι(ϊ3cRjϋ1³ΞK=ε%wœαsίwA@—uk,{Χύξ\{†ϋoŽΚτ£sυYx,Πώ-Ύε’ ί~YΦΒ!ΐωΙ7?Ϋ"||}§}2œg8ο}8OžΛήI$₯HF‘p^Ή}ρζΥ;4\“UIHπLHH`ρšώ<ι»œΛ &‚/’ΣԎrλΡΑ±ΣΨιψ°‘βwbΒΜοπΔήR<ΗΧπ˜˜υw‰’πœIn³Ξχ;yηβς'ΞJΖqΧ6\pΎ²€4#„Λpή―Bοpλ΄Pάsθ‘ΧB€ϋ”;£…EΡ¬…ζ”Snαάj!_σZ £Z-Dy?υΠj!~«‡ΤBzΉEER†σAΓωΞ«΄±βπΑymτ0η†ΓηΪK>ΠΑήlN?'ΐΜgXΖ-έs ‡C);œN–·γ½ηθ*œγ³μ3'pσh*]ΧΧΟΜyNώMά“>w~¨K)<]v8ιXyΖςvΈθ\3§xΗ#@9‘΅pπp~Ÿ– :©Ψj§½3œg8ύ€tT~6’PΈBLH‘h"΅€Ž~t ‰©$€6)EBΛqCIF Ιt΅ΩΟHχœύ’ΆΜ“S†qϋΛι0ισqf$€\›Δ=Όμ#ΥώS9‡λ•ψ:؊‰ΞY€†σ}NόIqρcg'γΧWΦξ΄φΝB<β΅³α™ΧφΥ©ΔX΄q7 pΦΓ‘ν;Oj!ΛΤ½RُN-τ€n΄p(z“I§R yλ΅χ τV ΩV©…Œ‡ΏΗj!χ‘σ]°ŽαpΨK}ώ=O €£΄·€jNXμ—Ÿ{Ϋ/Θ}@4{ΕKΉ:γœΐξa]ΚΥQή^€γύψ¬Ω θ)ΫŒ‡σΈ /n:œt€9w”‡[ι9Η›Gώ(iWηό΅ΟΝZ8H8ŸxγCΕ ·<žŒΝ”α<Γy7$€Z29œΙg©W’ΈΊΰ’`ΌQΞyω%›H̐¨©ΫRžή'Ÿsς!‰kΩΡ" 3)eι9’R η61΅Ι(Κ/mΙ'{ΗΡkNwœA·ˆ?‰'~wpΫosι(ρwDθ¦,ΐΓω'Œ/&OŸ”Œ£―ψYGœσΊGΦCηž£ΖJ‘”"RZΒka Π½ͺΆό;α|―…`θ‘ΧB蟽O-˜S­ΒιΆzH-δnw«‡„tκ‘ΥBΞ-±ΏγΤΒvΰ|ݍשΤBΔ‡ViΞ³φœηδv)ΡΖΠ7]#&Ξp€P:•₯ί€μ™sD@0γ9ΈΠxΜώsμ§FY;@Ω'θ·rΰ|¬eΓΐ9ΐ9ή‹ηΰβ—ΰ\‘Όδ”ΓωV§\œoέkŽsμ 8~ξIΗίK=λX½ΖΗtΰΓηKΟω7‹o»ΈxνζΙΕ«S&―έtΎΐω«Χž–΅pp~τ(&ή<#›ξΈW†σ 磜”"*vμs šˆT2JwI%άsν=—DΣ–t²ΌI3’1$gt4Α‹ή5‘πwΓ{αΔ0)₯cm XKφ@š)κ±lχς¦„Sz-΅”SޏW{Aεg Q₯ΞuIψ,pEQ^,Π‘Jς~³/> πΐpΎϋŽ/.xtR2~žα<λa?z8,ZhΫx<Γ!Oi!Βk!4 zh΅°Λ‘r-jaΤC³ξ2ΊΤΠ)§…qr:‚ZHηάκ!΅₯θΤC«…lΩατz³&3©…xή Ψ΄Z8ΤRώ1η_\§R Ξ³6A:WuΠEg9α<‚ΉN-§X–{ĹʌΣΨgjΨϋ(wX£+ΧξqψZψμ|ή]OΝ“’v”Ξ£<~¦Nηz4ξ,gŽ6.((W§\ξ«σΐΟGœηβΓ9R’ΎΩ}ώΜΓβ sJ=‡ΟΙΆo@Ÿv8ζ€σW.?ΎxεΚ%^½ώΜ¬…-ΒωΟoΈΏψΥMΣ“ρΥφΜpžαΌ ’Qƒz―:%€M`“ΧcI&\u<6N‘ΐ9s0œt&it“Ω‡ˆΔΧUόN₯ߏλέtŠΌό-ΆŒœΙ#K=9A§ ³τ‰)n9…XχπΖδI4~†~VLJψž˜tσI)2uˆbΞΏ«Ϊ²χ ηγ_LzxR2~vY†σ¬‡Υz8Z/Tz-„>κ…Κ’^ϊύFx-„FΨΚj!΄‹ns‹Z(Ώ£ΧBΊΧB«‡V ­&΄0κ!>ZH=4ZΨ‘ ΠB^€ΰTάR ³Άηλ8―BΔςΞ³ZˆV(J‰»ςV ηONη3†Α)Ψ²Δ{N6Ÿ£=Ϊτ§^x9‚9œν™κ€{w·€t cO7@y ƒ€χΞΉšM 9ό^Ξυ–9[κŽΧ1ΘMƒήž~0ώ ωœιx>ξ-Ηΐ·|{οΉ3ΏΏ”Άc τ[.jΊ‚9‚ ž΅°8?μš{‹#πH26Ω~ ηΞλ™˜J8PBŠδ «ζ€qάκήθL:δκ¦ΗΗHPια3mω#‚½‰xMχ #Δ=’ Δ2I8{YjŽ`Ω€b>Φ!Eqw/BΧ₯qP'Σ1§‰I³λdbΟιυΆ΄V'άΗίΩ’ΛΙθ€pΎσqγ‹³š”ŒΓ.Νpžυ°3ή²oBBΉΥBφ•{-„‹ŽΟυZ½‚fx-ΔchHB £{n΅ŸΟσZhτ°€…8¨‡^ q>‡ΉY-H­²οœzH-ΜzΨ2œ―ύ…u*΅ρΑ•3œg-μΤ[,o—s€s;…pΞ’vφoΓ%Ξα sˆΪ¬„‹NhBΰΈœγ~t±ΓΟ@YΈa8Χqό ό|ώ,<Η²v.0HψΖk8Ψo.ηθήrΎNXη{#œ?~wΣwΐ·%υqŠϋ}SδoxεκS0χχω8Γω€p~πUχ?»ααdlΌ]†σ ηu†σ~zΌ%DbeW0!erŠΔΙ&’N:C:Kμ§€[wœƒδ΄]’T&rZ2*e‘-ΐySΝδΟ±)οΰfoeέ±·R“QsNAFbΚδΦ­’ŸGΧάΆpΚ1§s?wΜ­©τwu©zΞwσE—[,œ|T±θ’£ΜΡwNH—ϋΪΫ9π=!zΞχΏlj1αš“ρΉονžα<Γy½“?ύ•v˜M38&žφ–‰)n‘΄^ό?Ε›mΫ(σΔ{ύt—ψβ1'σ‚€ΊR’°βo£kn.0Dχ‰0ΟPφcG=—₯RO{ξΓub`ηΘΝ>sφ˜‹{_.Z$ΰ\xΐΎ:υtΞ θ2 nΚ$ RζpŽ[ΎŽ²wΎ'ωίεεsε»Pw^’KΧ΅΅ηϋ\rOqΐU$γ?ώg· ηΞΗ`BΚδ“I§uΥLJ‘Tš©Εoœχβυ³·.ή8ηΫΕ›Ώϋn#I΅ξ;ϋΧρ^$μ_ΗϋΉͺˆN;K%™ΐ±·ΡNM'θ²Μ“I'χ‘Ϋ)Ηtš@2ID’ΚΧΉ:HΛH›ͺό€v~žψ”|TzOœΗry|7(‰upws1£Wΰ|»_/޽wR2φΉ(ΓyΦΓQΠB:α^ ^ ν…L;Αύ’mE›΄οΑΉ^ ©‡^ ‘ ΠC―…x.₯…lgςZˆ ΤC«…ΠM{‘Σi‘ΧΓxΡΤB5§Ήσ~•ϊχ$ΰ<ώΝœg΅ΠΐΉΧΒ^ΠCΐωΞ«΄ρ•2œg-μΞϋ)UτgMk²¬=TӝίuΊθΆΌΞRw θά‡@'œΓMg?9žΗs8ЎŸ!Ξ·)7ηp6|.³‰{ΎHן²β:qžΞwqγQΚg•KΩρ·ήωXσ’k­-ωρ"€]§†5j7O–HΉήtΠSpN'ˆK»ƒs žγ.uφ΄χ œοvΡ]Ε^—έ—ŒOχmΑy8– qmˆGτφη}QΧK>b/σόA!žq·Ζ&ζ΅½υ|Όο ΞsR:΄ΟbΟΉ-Εδc_βŽ[B<LŸT†ηήΈ`IH%)Εktšηp=Βcœ‡)1±εΟC‰<#$jL@YVΞη9”Θφ="Αγͺ!&|v'/]lW’“cΛCΡ“iΏ# αΎάήMg—sŒ $Ώiώ|νm•ςV†ƒσΨηO§Ž7pΛ„4|_ρjjψ.ε’ΎS€€~―u€σο3Ύ8fΪ€dμya†σ¬‡­ka»z(ZΘ •^ ΩΚ㡐½βyθ‘ΧBΌΧBVY?z衐Ί‘B„ΧBN]g%ΥBκ!t;u£‡ρ5ϊ¦οΠθaΤV{A­₯Z-tp΅:h{ϋ œS©…%= ηΧΞWά:•ZˆΘpž΅°8ο€—ΰ\{ΑFuR{„t3 Ž:άs$›cϊΠι’Ϋw”³ΫιtΣq XŸ‘{Σ9πmnΓ-]n|>Χ—ΆΗuiΪSΞwqΝΥ=gΉ»άΞΌ·α΄σ<\ P8ηΠ=@1N{Ψ§Ύsλ–KΟ9\rΠ€k§eξxov€'ΰ\ͺ#ΤΑ¨‹CΞΕΊ6~Άt:λψl|Υ…‚n„σρΏ»³Ψύ’{“ρoίέ₯]8?’°ΫG$ΞykˆιΠζβž+8ί-ρž•τΌΏ ±ΌΎ­ΞsR:ΈΟ`8A˜N Χ Yχœ„κψΔΔ’₯›κ #$!e’©‰©|nx>&¬,΄ΰ9P‰½ Ϊ›ΩΤΓΙdΏ3 wμΪ]ΌthR%t~t=Zœ[˜Q‚v ξΌu%št€’{ΞWμoε[2ΛϋόΞ}Im pΞχv3œχWγ‹Ÿί3)»NΞpžυpd.XΖ5jΊ*­I m{ŽΥBό»¬ΒMZhZ}JZˆ·|ώΫεΏuθ‡ΧBΥΓ&-ΔσxΒΉΧC«…ΎέΗξ4ηzI η€os!3>Oηœ.ΌΉ˜iυ0j‘mQςZθΫ μfΫΐ‹ΎF«ΰ|€΄°8_5ΐy•"ޟαR€ήœοpώνΕNNMΖίί.œΓΥ^Fο/ƒΗ‰sΦ q΅sΔχΞγ9ϊψj|N†σ1θμAΓ99Ί,οδ.s&¬ΖΥ—‰%ššXΚϋΌ"A₯DΗ‰ UψL–ΏΛ9HFρ~άwŽHt‚θ`Ρ)‘“₯ξVLL™X²’ΠnןΒ9$Ž«‰p.ϋΠvEΞ' σsRpŽΧτωΨƒͺ‰€όΆ ΐώ]tΚm+[˜€σ<…o–ΝΎ~κ7‹Ε'£X|Β―Ÿρ­ΖσHL ΓܟΩœη—γ‹#ξž”ŒρΞ³Ά£‡Ϊ3(8·zh΅Πν9Z¨Πμ΅PtοuZAŸZˆΏψ7K°χZ¨.|“UφZH=τZ³1©SΌπH}£†Q'­2Έ*μlY»…sΌfœv«…Qί9)ί]vθ?ί[B Eβ6~·#€…νΐω'ώcJ-DΌ/ΓyΦΒ‘ΊYΉΦςϋ±«[έdNn@g9tuΡ=¬s`]t€3 œ·ΦMΗ-|ΊΊθœζ 'ΐsˆz\Ÿf]z8ψtΨγ°7&AWΛΝιœ |c79Ώ+u€ΕΖw °nΛΗeXαYKΛm$αόκSd0!@Νuuς3P·πšΊγΞmΙ| ΞΓο„χςw$ŒΗί ŸmB.˜ έη۟ϋΧbΗΙχ$c½o œŸ„Ο7±έ >Eχx^βœ-Cœb;Δ±Ξ15Δi,‹Ηλ!Ά6ο9Ÿ“α|,'₯,y€;2)φYr*±--Η”qμΕ]τϋF2†ŸΓήΔηOm ^ Iυˆ#p7€]* ‰+&$N,ωg‰I©uŽψ3”‰ *Ή}Δ’œAbΠYbΉ'{Χω½1Έ―—n" V*ŠΎRLΊjLDq‹ΟεE όnφ;ε`=γ„•\"Ϋ*ΐv|φο§cD§N7q‰Βχ)I(ΏO&ϊό γ²wœλθρΕ!wž“Œ_px†σ¬‡νiα υ0ώΫuZΘπ’βuίΈΥΒx1ΞiaΌ€I-T˜Œ~½R-šhBn•Ϊ ,}§&z-€:-ŒzθηhPΖ ψόNSU@Vνό–°³²ΚV.X­T=΄ZΘΩ'ς}Ž Άη+8―BΔ{?žα€ƒK6m;ΰI¦νr.’ΙΟb"†€“€­LϊΤβγΈ/]%Iž¬ΫξΗN θΌoŸΔ1aεkœvlΛ=,΅ί K<ιdΫUi¬δW’ΩΦΊt¦Μ΄Τ‹κK1ύ=V&€Ά΄•οηwΑdU‡NΕ2N-‘-9E]^ΦώΝ_Œ/&άqN2v8?ΓyΦΓ6΄Πώ;·CΣϊΡΒR »ΥB-ΕφZ(n:~žΣΒxqΣia„ι”½R­Z ρZHΝŒς{²Zˆη¨‡N ε{ͺB|†-ΉgεAͺwά^¬€ςΎ·aZt’“ΞοΒθaI ΜΞGP ہσ•>»N₯"2œg-²‹ξ]Χ©υ»R€štIu’c6ϊ9%γ*α`Πζ7ίAΊ…u93π<άσZ>]u<Ηsρόl„"{Πu ;Χ¬Ε’}€zψ]KΉΊθqΚYφށr ή, ¬λdx?Έ=δβbk™Ί|ϋάω{˜žχκΈΠa =VBpW=~ηπΉ2T.DΌΈ`‘ά8μq@]€ωnΥBΒω6gέRl{ξΙXϋΏvΥ²vwήύE.kΟΗ€Ι©uμ}—H•œσΔ”φθ–Lα€  I’<‡F~>JCb'κj){,―δ°3뇄λ΅cΆ(^;zσbρρ_οsŽ΄O0–{ϊήuη,'΅Ž4]k b’Κ’TŸP²τ―Ω5i,e'œs€K`5Ω-υLϊ)ΞΦ9· ©_[ΗΧ,œ{7eœš°³΄έ‚9οΤΡœouΤψbΏΫΞIΖvηf8ΟzΨA=τnzB Kzh΅ΞY’m΄PtŽεέF ή©CV U½Bλ ‡^ ©‡MZθυŽΊcΧTZ-tz΅Π‘σΓ娇N Kpn{έ­SnέpjΥBΫGξ/ΫιχΞSZΘJ"―…FGς*œό3λTj!b™e8ΟZΨ'έERρ|έyN7<–‚sMX€<Xrξ'¨γ\€±νGηnt?<PΝϋx޾Ξsφ #ηsœOPηΕNp‡³=Nq' λδvY•f€·)XJž2©΄ξŒΰ]υYψΎΩ&`€QεχΕdš’ψΌNWJŒωσ­cΞuqΦ·Ίu”¬SΔΙΝ6!eOͺ“χ8ψHέ6>W8ίμΘρΕ·œ“Œ>'ΓyΦΓaBη%Έ4₯μ±κ‡-:^ ‘ΠC§…Π‹€πBDσRZ’I 9δΜk‘νΡN­ΑH mΌwΞ5δϋδj7«…ζΒl©*Ș£Z=Lλ«ˆ¬†Ξ΅²@ Βh’½εL“:ΐωGœWi!bι ηY ;ι¦ϋkκΈG˜@’ΫΊζϊ𔻇`4ξsx\·¨ΟΡΆ€N'@mƒ€Ξrw»+·μY·₯πs΅οœΑέηθIgΉ{ s:jV˜ο‚½έ6δ»4•φslωy© : —tΠ霳œ]Α!ΓδΤ —’yΆ3¨³Ξψ:Mkίβ€?_?ύΦd¬ϊυ΅ ηο q½RΓνRϊό{C\aΞΫ$ΔΓ:u}_σόΩ!¦iΟω%ΦχΥσαΞo<œz82ΛΤίς–νΨΨΏμ²ΛfυμTRκΛέyΛύΉZf)‰τ„ˆ;Άν6MXΡs)₯HHQΎ‰Οœ3Αb$'―3‘γϊΈ0α‰’(–ΆΗžAά·₯žαΉ¦)δvί­‡^:26ι³ΙͺKTεo·Σ…ΉRΝΉ|—v Λ_νz%λϊXΨΆ«Π<˜³•I(Q†OF5 ΕΡΤ£j€š¬RϋΪγ‹έώrN2Ά™4vα<λα0k‘-w7ZHη·I νΏm£…«θAwZ(SZΘ^o§…σA›΄Π—½S ύljΑΦk‘Χ―…ͺ‡%‡ŸzG-ԈߧΥB^ τAΎέφ‰ϋ‹ͺvΎ†…r{Ÿ{αŠZ-΄1‚ZΨœδΣλTj!b¬ΒyΦΒαt;<NσpNχ–»Ό29 ΟKί7ΗΆϋΠΕAΧ2χω ΰœ΄n'Ύ³ήfγ:5„-…ηnuΌnŸ»ΟΞQή€Ξ5kΡρχ0ώΒ,i=γ`5\VrΊΩ/Ξ ˜Ξ΅€>^τ°aυ&87kΤ"œ;θŽ.½N™GΙ{ά©~νi΅€σMOΈ±ΨόΤ[’±ΚVνΑyΟhcΎ:Ϊ#Ι©ukμ04퍌. n‘pi2%―ΡIΖ-΁[„€Ι(ϊC‚ΛA]²iω±IiψΩHͺ^›ΈUΓ5BbΚ„T§Η΅8,ηφ=‹Ά\»2Ua†ρwkϊΎP’ͺΡτš:c~`]©§ξδ΅ ©ύŽlιfͺϜ%›Zžiέ"~7Φ5£8α㋝ώ|N2ΎuvvΞ³#¨ΫωΆšΫ©…Ϊ«ΤBhΧ‚-Δmi°­Ϋ’r«…ͺ‡MZ¨zΨ€…v/Ί‡ρΑj‘ιS²ς"„Zg/N¦†ΈΩ ΏVGm){’ͺ(₯…’‡N 뀇€σœWi!β_>šσ¬…Γι‰ε’{ΞRl›œdΞ2ξΨ_mz¦zΓ­8՘δΞ}θZβξKΫγžςE‹β9Ό΅ύδ8;Σ ηsMI»wαΉϋΌηΊ#=φΘ›wΉΠ`ώΤaj{H‰»…pφ‹σΎΉ-A9~Ά ^(pERpΞ=ηφχ‰ηΒοoχ­Χ(7ΌyγcP|ωΔ›“ρρΝwΜpžαΌ‡R;τΜ&£t‹ό$tΊ2|ύΥHRρ>LlG’Š„mΙ }₯νH΄ΨγΘIκ,£Dβ‹²N OœδSz.CTš@n§ξϊ2Gλ[§%εD3‘₯kƒDϊ½΄ό=ͺ£TιY0·8ΒN8χPοv(ΫWλ•φωŽpωf'ΰ|㟍/vΌιœd|γ¬ ηY‡Y½r¦„ΧBώϋφZ¨:$Z]S-,^Ή€―šΘj‘έ,a΅š‚j"―…v%˜ΡΒ [-΄ϊRZ˜r£©KCΡB^X@…΅‘Ί`΅Πκa ΞSοgΎκ ΥBΒωhka;pΎΒΏ―S©…ˆχd8ΟZ8LG“c^,ƒΧ‘ftΦ­ΛN(”ύα ‘ΡAGο7BA€N‡ά–ΎΗsCπu_ oέtί―ΞΟ΅p.}ηt7 ΞWc:KΧ[=ΰ‹›­ΣκKpm'°Ξm9{pΞr{ΧKnW©•.άxnc.@ΕE„:ΐωη=₯ΨψœŒnΆC†σ η=”Œ&\Ϋ/ΘώλΈΚ&$J₯+¬vj8J4‘X")E)8ΔΡ-^ΛΈ6Θ―9ƒΣΔ΅dˆπ<Nq‡B‚%ΞV­i™"W†•V„Ω2n"ΈΫέιφyΫΛM8‡[€‰ω α<5•έ;DΎœέNNMtχΙ,zΘu/‡α»±€§;ΧΞ?θψb»ΟIΖ–gd8Οz8rz5«B ­–ώ’χ œC½ͺ6i!wƒ;-„ƒ=lBΟ΄“ά½z-τηy8‡r §m*mΟ°Zhηf΄’…Up―Cψ¬β~7ia;pΎ|€σ*-DΌ;ΓyΦΒα‚sΈ°!όγθ¨sswθ!Wͺ:XrέΧ«5•Έλ­uьηΆήυ¬Ϋ’ψ*8η@ΈψyψΉt:ηvz»s_l6ΰq\ͺΐχΫ!p~΅š‡sΫΗΞao@g~αόΣG__|vβŸ’±β¦?ΜpžαΌΗT›²|Sa} Ι–$˜!)Ε ‘‹'7ΚέΩΫ‰AGLN9έάξΨE2‹RIL/‰—$£κ‰[dzΠω8Ί [Κh‡ω΅lώ9${-ώ­ΙοŽƒΰ°ω ν©ApLFS‰«/A χ βHΠ%πέhΰΰσvκ=’NpώΉCv)Ύχ‡ί$cσӎΘpžυpΔ΅Pξ·©…―½qyCΖ^ y‘³U-ΤΎτ–΅ΠjŸΥB«‡mh‘ΥΓ ζΆ½z·χu{¬&0ί «…Ry`΄²pώoλTj!βέΙpž΅pδa=NV(lε`ί΄ΈΟΊ_,ενŽ’ι€ΦΓ}‚΄ΈΫ8—η˜Ύu†Έαά γΦa—apZέσθμ;ƒξΈ«]w‡Υu–ύζμe .rΨ‘pΎΌb@\i•Ϋ άŽsi³κδηk)»¬NEHoΞύΧŸ9ζΖd¬πΥ ηΞσΡRнηH6”Ο_|APυkϊz8΅w=ξΝεπ9ξΨΥέΐΈEβ Χƒύ–>π<|»©εvzΉ=/φ)ΪD;ŠYΖYJFu€\ό.8DΟΓ9ϋ~ςTω» φšΖο$‘ŒΦΞ?3a·β»Χž—ŒMO:*ΓyΦΓϊh!]΅pρ›WŠz-ŒΓζΌr}c ZΘ uβ B Kχ­κϋΫΡB―‡qˆž‡sο ΤΔσ΅΅G€;‘…½ηΛmτΙJ-DΌλΓΚpž΅°6Gμ©f/·-oOΑyΈ₯sŽΗRz0‘>φ„u³ͺΝ:π%8§snKΫ-œλ΄y@ο+Wž(1”cαδ£$β7Ψ5j„σ 7E?eνq…ΰό–‹ΚzΞ7:ςšβߎώC2>τεν3œg8ΟG+€ŽRΝβυ«p»€c'(©Λ.‘<pΗ(œΟaH6Α’ΐsvHe,ρ΄»mνΎo›¨²W“Ι¬&‹HFγwςΜlϋTΉ§νΥςMλ–Λw‚ΗοΚ½^8χw+ΎsυωΙψΚ‰νΑΉξ²|PΧ]\βŸΝk{‡xTΧ]|!'€ωθ€Κ`Έ ‡MZH=τZχz贐ύη^ γNt§…I=€φY=4χ­Άs4]¬δ& {±²JS›-xžiν©Β’†οζ՟oΏ³:Αy•"Ϊ…σ^ΠΓ¬…υtY΅¦SΡ₯Ό mpn–Ή @žΠγ-uΎίΒΊ›o£τs,œcj;φ±[8Ηήφ»xہsΐ~iϊ}bz»/s/9η„s‚9πi4υœ«λW©Rοy;pΎααWŸϊω”d,₯d8Οpž–’R8/puΒK+ŠμŽp»o%ν8―ΓI Ÿαœ‰(οΣ QΧ€©άΓ’μΠ$3<Ι&₯ƒsβx”¬βΎM,™„ϊ^P›¨ϊΎx€9ϋM5α”dΣΓΉ&κ―Lψrν’QΒω§φΫ£ψ―Λ~›ŒMŽύE»pώωoΣϋG τώJ!ξ ρw!–ם”oΝ i>:₯…2$Žλ­²Œέk‘“kBj ΧBκ‘ΡΒ^©…^+Ϋ‚s;‹ƒZhαάj‘ΥCνVOi!υZXG8ΐλVj!b©Ϋ†σΪλaΦΒϊΑ9ΚΪγπgnΈη„jηœ HΓ5·NΉuτŠ3°—BϏϟaΑά¬T‹ύζ ηνΒμΒ ŽXtρ1βΎ/ΊθθFy{ͺ΄έ8εM+Τ,˜›½εfΟΉ‡s:ε<§ŽpΎή!W~C2–Ϋx» ηΞσΡrRΚαjvPœOλŠΤUβ€$$dH°Xqu2IΆŽψZ_RΖrO³Έ ΞS°Ξ$•}λmΒΉ$€,ΧΧ„TΎ›ŒΪau~ “Y wk >{Ν₯\]KΫε;±hΏMΡE~)ήΦΞ7άgβ—LNΖ'έ±²φplβγνm^»:ΔzΞσΡQ@‡z-„ξΩj"j!cs…ΣB^˜Ki‘θ‘ΧB[kU νδχ6΅zXB78/Fjώ`Ίv$;wƒ*’έγa΅―ΥΞΧ_·R ο\±seνuΥΓ¬…υ;Δ5ηΰ5”.:!œN:aΪά/­@#˜σ±½΅mάaB‡ηKPnίμ;—ν ηX?0'œ#d½ϋχθύΐΉο7g―9`›ύ圚/ύηΠβxΜw”Ά#κηλ|e±ώΟOF†σ ηωLB:e義Řގ@ΙΎsΈBθ­4{Ο%‘ΣUkHΔ$5Ι§ά?lΣΎ[„Bzμ»τ §†]Ήγέ#Iώ’Ϋώ»Νš$ω›,˜ΫΑLvz<#]•Ζ £…„TώFuƐdJβΐ±p—ΟΙγ…{~Q’pΎώ^{[^ψ»d|ξ—ΏΔI7γσMl7ğuiˆ­υώ±Ό―O ±e†σ|t\SZˆπZˆ‹—Π«…κG=΄ZΘϋN KeνΚKZX‘‡ω»­ΜΝPΞXNoυ°-€F- š‡XΈϋD­ΦΞί·ήz•Zˆxη +ΰΔνΖ²f-¬ηrvΐ―ΈηΨΛ­Π\9 Ξ¬\£“ήΦ>τ3γ…œOxΑ‰ν:΄NΒ…ίΓά;©=uΐ½~υΪΣ$PЁš°νzΟSp‡ΐ)œK)»ξ1g:]~†³³λΦ:υχŒ$œ―}ΰεΕ'Ύ6ψΒχ3œg8ΟGΛIϊΞ_όM#…#PWg9Β9¦Θu20§ΉγΎ¬QcwH<_9δ«’„1bRͺΰΞχ¦ˁ\t»†ΘΆά"Ζ/80΅Ι(χ΄Ϋ ΙΦM7CλΔ% ΎΊFΈ/‰η>χ%£!^ώΙΘc|Wu€D8_w½ŠΝ{a2>ϋ‹_θœ‡γΊχ&bSsΞΎΪcω7ϊψΈD2ΊE†σ|tZ›΄ξΉΣΒ8 Ξk‘Ίη’…VΓύ”Ζ}θƒΡB«‡mj‘θ‘ΥBΐΉ]ιζαάk‘ΥC«…ψ;U­Κ…JΥC«…ψnκ6ξ}λW©…ˆώΠ E ŸΣΣz˜΅°žœiN-—f†#0ΣEG€œs‡KΉζ.J`}ξttΧγ*5Βy^φrΏrωρmύέθ;GI;nKpNΗΫ‡³ΓΰτωΤϊ4ωϋ\ψn-œ—φΡO»#•# ηkνY±Ξ„k’ρΟ}/Γy†σ| Ζ1‘€ &ξ£leHNΝPΈΈΣ.‘–@bjoψ£ξέ‘¦„TƒΚK½”%₯β%Y4S‚³ίW’PS ?.6ΰoΣDΤdςS“ύ΄dξ1ŽψΫΩcŠϋβš‡„I¨$’κI9'Ύ›Γ6•ξνΐωΪ»ξ]|υάί'γߏ<¦ν²φplβ/!ή^”‡ε²φ| Ώz-dΉΧBΈΝΤC£…œ«‘B«‡ΤΒXI4H-}rZ8h=ΔE‚9χΊλ΄¨‡nRΌΧΒRI»ΥB…sκ"΅ϊυΠj‘~/uΡCΐω2Ÿ\ΏR ΄όΐpήλz˜΅°Ύp.kΒΠΫ­ύέΙΈ#œΫ°pNηΫΈδMΞ9 ΫƒΉΒyΈσΎέq«Τp`Ž˜3q7‰ΑΟ³K1ϋ¨$pD眻γ-œϋώs?ΞΓ9?ƒλνά-Oxןþτšε†7―±Ο%ΕΈJΖϋώγ2œg8ΟΗ R»o7$™qJ;ΰœƒœ"y#˜_ϊύΎ}ΎΠ9ΰ“ενš”²œ39Ι'§βmΟ%HόΎψ½˜€bΝOHœ9UΉτwr/<“Q}ο©Jˆ₯Τ„ύl ~LΖυο³ސx2 •Pχ ϋh”pΆ ηkξΌO±ρY—$cΓΓ&Ά;ξ‹!ξρχόΚnŒ<.Γ¦‡ΠB贐zθ΅ ›Bέλν΅γΌφ;‡#QΞξ/jΖΩΊζL~7<ΓEτfυPΞ‘NΪ‹•x‘W¨.Ήβv*» ηΑρ|>‡ΧS€ξΘ͚7c7ϋΚ9yύ…γχ,žwXρςo–xι΄ύ›ΐηΜ?σΐœ?sΨRή',ΠL@χύηφ6΅ίœpnΛα|YΣηγλ,ƒQ78_u‹‹ΥχΉ"ούΜΆΞ3œηcPΙ¨φaς‡Α±―’ *'c \Hγδuά²DSR»Η;φ€x•‘Hζό˜π₯&χθΐy|#wšΓerΙjq7οΫ‰Κόύτ\;u‘€"ω΄%ώΌ0MqŽLBJX―œ―±ΣΎΕΞΈ4λrl»pŽΥ@3Cά­qBQ.휫ƒ6ξfΞzXo=$tŠ’{Ν­’eξN ©‡^ 9.₯…―…Ιk€ήt±²Ξ©…„στF ωχWj‘ΥjνlW’B稇’…κ€³Ν‡N:α1ZΗPα|ι΅6¨ΤBDΰΌφz˜΅°ΰ<ΐΈ1ΣΫYjξέπΤ4vνSONg·η°lέ—Έ(os8ζt¬Ήt 1HνζΙ @Pήœγω“&D8ηysΈπθt΅νp8BvΒ5/Ί^8hšψn^{Σ ηˆ:i!αό»ΎXmοΛ“±Μ§Ϋƒσp,βڏθν;ϋΉ ωjη^ζωσŽ>Ž[}ώƒ!^Iil†σ|Œn2Κ$NK΄%‘cO6#$€pΠ‡‰ Ε!α“FΘK·(NηUGHvjYctŽtP’]³ΖΔΦ&|ύΒΉŠd§©Ϋ•gάΕK8η #Ώ»Αε6ΑΦί+u+'Τγ5&Ÿt8N iΏ₯8GΜλη«ξΈ_ρΩ“/OΖ':cΣΪλYk¬‡ΤB\¨„Ξ‘‡^ ρ8θaR ΉΉΑj‘κWJ ₯ίΪiaKλ՜£^C«uΈυZhw™Ϋέε:έ^˜,Uπχ£ν·Z(ΞΉκ‘ΥΒ蜻‹”2 sβV΅‚σ·A₯"ώqΉφΰΊΖ9NJ΅[.Š«ΦXv^*Y·°žΨwžrΤγTv? Ξ;θtΐΉφšGχYα\zγΓοˆαj(KηΤu–»/ΊτΨF?9Φ¦)œγVϊΜuR;Aΐ.“Ϋα ky;ϋΖK}η6ηΎΔέ‚y Ξ΅,ίΒ9Χ ΞWQ±Κξ—%cιϋοvαόHΒ6nΉVσV½`ω‘«ΥE+%ΞϋrYηχfη<ݝΒAω7φχΔι!)E%wϋbύV障Ϊλ™Gί‡.pεπ±,ήΉ5₯‰Εά=€Ϋαm~ΚzU˜€ΤCw ι|ŽλγμE†Ψs―%¬ΆΗRQ†IHλ”ŒΞWωαώΕ§OΌ"kp|†σ¬‡½₯…ΠAh΅½θΌmQ ²MZ¨[©…FRژ*…―ΪfΓο.'œ«›υΠ‡ΥB½O=δίo΅0Βy ZX78ϚVj!β—Νpž΅°ήG„σƜ*=@s,qNJ3q#€Ϋςu3DΞB{ Ξω³Z­ ΞΠΩkΞήnξǐ5™t~㹍‘jןΩuμWPHŒP—ρΟ¬Ξα΄#π±Δέ:η.J 0^rΩνπ8ηœ—ΚΪ˜ΧΞ?Ύσ…ΕΚ»^šŒ₯?Υ6œΓ _Fο/ƒΗ‰sΦΓό’b6‡>χ7Zτα ηωθώ„₯Œόƒ²ο§'6φό"ž:&:ζβ ιcΌ'ξ³ε $€«ˆVZ!d†#Y0/9Φ,ι¬ΊοKΟ™¨I όj ϋΌτK"±4 v)!΅ *χkBΛ9MO©$˜LΜuAα>χόΪ=ηuƒσ•Ά;°ΨθΈ«’±ζΎ'd8ΟzΨ[Zˆvκ‘ΥBVσZh*‰JZθζo؍’‡UZh#QϊžΤÁtΠ΅΄―-γp·’ϊ‹– =,i‘ω[½R½Φ­ηόέklX©…ˆXφΓΞ³ΦΞ9΄-ΐ1 *N5v|HŽ0eιΙ’wΣg^t_ζn'ΊΠ«ΰΰK¨5pˆ¦{žt†qΣΜΓyά-.ޏηΕ]°.Uvš‚xrφ›σ–%ο|Nw’—&»«σ/@~η•}½σ5…σόhrρ±Ÿ\œŒχlΈ N:i¨k%Γρ’{Ί')εΖΥAHJqŸΓpŸϋΟ±Wϋ΅Y6 @η*–³DΣN4χe’₯δӁ»/‘τ›S~°‘}ΜΔR]›θ†›€4ιY—LŸ“„›»έMBΚ•A£Ή*¨“pώαm'Ÿ<ϊΪd|bχ“2œg=μ)-”“Τ@«…ΤC―…\+洐Ρ΄έΑ^tzζΨV*Uic₯VΈμ^ mI~I ]Ω~©bΚκ‘ΡBκaI udέυpΎΤjUj!βοߟαςχ3ZΘΎrΒ9΅·½ ‡Hδώy•OUj!βνοΛpž΅°ή‡μ6«s‡œΞ ›8GΔY@or½κf€\ Ζ-”ΫϋΖyŸ‡ Ξ΅W;φ[Χύηq½° NΈVπ|,3Χ)νŒœ[χœί‡…rσύD@Χ œς{Ξ—Ϋφ7ΕΏΫdΌs­Ϋ…σw…Έ^W©]O€Η{C\aΞΫ$ΔΓ:΅}_χg„Ψή=·Eˆϋδο ρ•"χœη£«Rή+PΚ‰D”ǁd‰(\$ξΚΕ05DΜδ­ Ά½Σm§'J&cJχΖ–γ}γΒ—†ΆΩ²OŸ<¦r³‡=& &!΅η•&1λΉ²Η<$’/ώ α Α1βΊ$<žΓθ¦£-ηό[Š΅½6+ο”ΛΪ³φ–  [=΄Zˆ‹•N 9-₯…μηN‚m…ϊ ’Z-΄ n΅pzθυΆ4qޝ—šJo΅Πκ‘ΧΒ^ΠCqΞWΩ¨R oo.kΟZX8·ϋΕ£σKεσpΟ΅'ΌκM{ΜΝυ¦φTX@ŸΗπ.q:‘šξ’έ6τ|Ϋϋ-ŸιzΜνTφœ»υΊϊ-~OάΣnJΰ{$7ΌyΩοž],χ?η%γŸΧώV[pή+12?δ-oَSχ–]vΩ¬buNHu₯NœRŒu:Svn”°# ·|Δΰsα<ι£dyͺ4Σ―(s‰aΙ•±Ιž&Št_όϊ‘˜ϋΞΔ}_ͺiW‘1Α-»?ΟόŽ₯Αoαχyωǟ-^ψήηδΙ)Oά‡[ΤKpώ‘oX¬{ΠΥΙXυ‡cw \ΦΓΥB8ηΠC―…VβύΠΓ&-4ΞyΣ&u[ΡBjŽΧΒθΌ ΰΈ €‡ΤBώ―…ξΉοf~/jαάm?ί§‡F ½ηK­΄Q₯"ώ~™± ηY {Ξ˜Ηςm'ΟΣ°^σΤπΈΤdχ”«Ξw”Άki8KΪΉλάΒyžzM œΞΈίkž‚σπΌ-Yo‚s³Ÿ=N›7ύθ½ηοίϊΤβۜ•ŒχŸΞ³sžA'€fmLP±FHΛ6αIΐB’Š[-}ΧHϋ+cI%‡ Ή2N› Ζiΐ>!e2h¦žs]ΏΓIΗ•ΓŠR%θ 7ή&—MŽ”w§θq=P”p"ΩDB*n‘–·Λk»|N^›·έ瀜³ΰόcy`±ώW%c΅νσ*΅¬‡=¦…:O£I εΠA―…Ί'έkatΤ½š’π’šΫ€jΨ9MΣΥ-§΄ΠΒ5΅0εΜΜSZθ`R ŸξηϋτΠia/θ‘Lkψ†•Zˆ«pž΅°‡ΰάΒ7_‚9§Ϋςmη%@wŽy²Τ=ηοΌΞYnα|(ο1cΙ:ϊΧύsΒΉληk•%μ滈ߍƒxό=½ηοϋζIΕϋ·>=΄ΖΧ3œg8ΟΗ Ru ƒξΉτSbΰΡm{χ%£X5„Oϊ0α&…ηθ41πY,u·ΞŒM ›œMD›€±ΟƍΠΗ2 ΨΊFψ &₯v²±[“f¨ΚDT?·Ι©7«Πdo/ΎύΈ/α΄₯œ\Χ‰¨ C"Έ‡¨+œ―΄ΕώΕF{_‘Œ5Ώw|†σ¬‡=§…ΠΆ&-„K=tZ(―Crz{“ϊ>oΧZc/Fδ­>—© Wτ–—.„ZwΌBΉώ­ςΤλπ{X-Δ}κ‘ΥB‚9’΄°8χΗ6¬ΤBΔ?όλŠΞ³Φϊ(8]s»BΜ=σ%π©Rχ„ƒ^šβžG(7;Ορs£³νΑ<εžΫžq–ΐ{0·CήR@ξΑœνp<ξ‚…έՎχΨ5q|O]α|™­Ž-ήϋ“ρŽΥ6Οpžα<CIHΊσVœsΈAHHα!!Ε`$$€˜Hΐ£―άΉ†(„ [3‰] ŠNŒ$’Δuκ/§ϋ άβ±ή9βg–ωώυT;“Ρ”knvπƟΑ^rύ]˜l €£„Sέ!ΎŽ―=ϋ­†£Τ pΎΚΧφ+>½ϋeΙXϋ»Ηf8ΟzΨ;ZΐZΤ ‡I-Δ­ΧB ŠΓ9N γ”v―…4υ¦βͺ}ΤCj£ΥBΡ*―…Žy“φΣΣ/V:-W\υΞj‘½xi΅εξ³·nTΥΞίσ‘ *΅ρKg8ΟZXs8Χagqθ\0φΓΟ|:§Ή«sPnAά;ζ ε₯2x·J-ΊζΨoύβΆoΌΤkξΊ)œ—s?έΐwioΉwΤ-œ»Αo©’vϋ\ΌhΡpΎτfGΊεΔdόγ'6Νpžα<CaΈ< ζ(Γ εš\dϋ*Ω“Žΐ@$¬ΊwB£άΣL:.ΑΉ&v’Dε‘σΒu;H<ᴨΫ"η3!εσ :]tγ:₯&W&ž©ΔTοΣ)².Ώ§ώ,αDΠ1G2Κ•AΈ„τΉο|AηΡ«+œbΣ}‹Ομzi2ΦΩ&ΓyΦΓΡB IH‘‡pƍŠC=τZ¨»Ο½’ΔέΒΉ­β†λN³e&©…^9]υ°δΐ›umMZθΑΌκΦΞώpzH0ΗοAΧ\΄P/&π’%n©—ΤΓnΠΒvαΌJ Ξ³φ„*F8'[ˆ₯Γn‡ŸΩήkNuΧiξβ†Θ_|Ύ/ψΐ™αq„ts[pΓN['°c šquΟm){i»‡σœs?έΉΏoΎ£Έz­ Δ΅η²ιΟ‹₯7U2ώa•―f8ΟpžvφJΖΎs;©I(\!8εάύΛ½ΏH\‘¬rη9ϊΝ΅/±”šu>Gw…ΙœuΗYΙΧ%n”›\\rdr†g=ώΎ,­WwJ’dηR¦‰ίΥ ‡ci'έ"ΊJˆΊΒωκ_Ϊ§ψόN'c½oύ:ΓyΦÞΤΓ&-¨C9ƒƒZˆςwθ‘ΣBΝlBSͺή€…ΤC£sΌV šΩΤƒnΫ‰όŒ «‡ύha“R υχ'œ[-€Z-Δ9ΤCώu…σ₯W\ΏR οψ—2œg-μ--΄₯νZ ³v@ΟΣ Έ6νBμ½ Ό\U™-ζA’0$a3h€@d‘!ph§'ݎmC7ςT€}έ>vkƒέvΫ-"8ΰsj‡FΕΪ ΅Ε$2„‘0ηόχΪ΅ΧΎλ|gŸΊuoU†{σΥο·RΣ©S§*§Φ]k’δɘ?ςΰ²Ξνα"ηh¨ΖζliΎym„aΜz6δ6₯έveηΈ85ζΆݘσΪηΤE –›”φόαύδ˜Ζͺ9ίαΤwU;ž~aΫΞZθζάΝΉ_Bˆ}γυΑ‰hP”#ηˆ !š1Šη1ϋ3€QΒσHεd΄(E‘)μr΄…)š)ύ1ƒυΩΙΔsF.Μ-ΗτΰvMJc$;―ά¦Χ:Ώkύ€ŠR€”J©¦Ž2"„γ@b“iλΈF3$ˆTmŽ„ν6”1Bύ˜σ9 Ο―œυ_Eύ’χΉ9w>——Θ…0ε‰ #ρ• aΨρ<`Έ0f$>΄\˜Λv Ζ™ΰ\„”¨8n[>Δu䉲+ΦΈY3.qς^il›Fξ3ίΚb+Ž<§\¨Ν2yη•7„Λ¨Νω>ΟhεB`NnΝ Η©A‡9ešΈυ σ\ͺU‡yEj{Ι kΔΌdΚSmvξvCLs.£ΠβœrΞ0sžSΫYžšΔumGγm+mCh ~[έσ[~’Ζ°6Όvκ)ο¬vXτξ"Ά}Ϊ)nΞݜϋ₯ί ; ΗzI€ΆΓ€σλ, p›γ„9Bτόή‹«5·KŒ*ΕhQ›–^ͺEdJ&k)4Y·HρIΞzEB―Vο-iξ΅HΊ44Šο}ξ‰υm΅ˆ–ŽώαqΨΤΜ»^tJuη N‰Q ŠΝ(ͺΟι|<!Κm€±*FiΞηόΦκΤΧ©ˆgΎπ_ݜ;ŽK>΄\λΚΑ‡Κ…0λiĚεBt#Φ,JZΈr‘ςaƒΞiς!³wΘ‡– k©σ’>―|˜ϋy`!|hΈ™B<r‘ς‘r!Ž‹|¨\¨œ9–ωζ|ϊήG΅r!°½›sηΒρΖ…Ι|FCng‚sμ9M(ΰƒρ™Ρuc9Ώ<pM‹—šυάρ\:Ηη†p9·Χ€v=f]`hԜk:{Λ8΅b7w5ι:ŠNSΨ™}Žα±9bŒkΓk'ŸψwΥ”“ίQΔ6ϋŸθζάΝΉ_bΞΣψŸ8Vβζϋ–κDΠΩ‚ΡtDŽR½e€Aœ’f“uηYμƒΜΊl Q=5ηx\›­AΰQΨαv6θI`R\Rj€F₯rdJ_—"CŒL1JΟγbM%…θΟ_Ε&°δ% ²1gΧb4‚Γγ8ύΤxλζόι'žWφš/qάσݜ;ŽO>Μ\ˆΞμδΒ€¦Nξ– ρψ°Α…Ν9e¬χFΪ8’Ќ’k6LtJ‘iI†;EΕ³ιNf>›^›*ΝΦhόuΖyŠˆη¨Ή‰€Ϋξνќ³Φ\jΟsGz5ζι˜jΖάw5ηrœφšζοθ>1ΦΝωφ'όm5ωΔΏ/bλύζ»9wsˆQŽF£0 ’tΝ]οͺ§dν9qŠhQκf―o{O¬΅ΜMDŒRˆjDˆ’ncŠRB"ΐm>ψRQ”¦LšnsMη‚E(Ε1k$y\ΐ|ξΒκφηœ…&€cΠΔλψ™°=9Eλ†rιǜρμ7WΟωσΟρμηό³›sηΓρΛ‡Κ…ˆžƒ• aΠZ.ŒΫ>΄\hΉ– •aŠΙ…ΰςa e9lδ¦=† st\Ή©ο‰#œ'.„I*ζ ’³›\¨ζœ<¨ Υr!Μ0³wΐ;š€|X«[?ϋ„ZΎΖιΈ,’Ι…Έ’• •K\8Φωζ|Χ=ŽhεBΐΝΉsαx4ηŒӜΣηθΉ€·7Œ-Ν,Ǐ₯&iy;v`gd;₯Ξη΄yFή υμyΔ[z}#­=υlΞεx³9ηη2Όφ™MŠ{£9œΤŸ—Ψ™=/<„γζ|»cΟ­&χ–"ΆΪϋ87ηnΞύ2(ƒΕζ―λt]&;¦³Σ9)JΣH‘<8‰VDΞc}#» Œ9£ΰ£dŠ;»ϋj 'iŽ5Ϊ^‹1Ε]ΈρύlΝ;…¨BΕ―. Ž(žγ>5ύΗ1 ΡzΛ‚ρeΟρΜ]½ΰ}¦ˆ“^θζάωp\^\ˆQ“ΰCΛ…H}g}Ίp!Œ<³\¨|¨\HNΊršvεCr”Φr+*Χ)’³”/iζK|Hξcϊ<ο3MΧΚ…Κ‡Κ…γ…aΞwΫύˆV.¦LέΛΝΉsαψγBm1Οy–8M΄6‰³5ΪΪ ]RΏk#͘¦.ιιΉNœΖ9ΰ’αes7ΎΎTsž"θٜλϋ³1œ;·ad]M8!hΘ»t+SχΣηζ|›£ΞͺΆ=ϊœ"ΆΨγ7ηnΞύ2ЈΊ§FFQŒΒŒΈŸ‚Ν’p;W˜|FŠj΅ή:όœ‘ΖAjΠi!™ΆI1ͺuŽjΠk)ξ)E“Q"ΎΏMέδ{rΉlΌλ%ψ°cΞoεB`Κ”ώΜωxΰCηΒρyaΗσh€Υ ΣπΒ€3ΪLΐτJδ=§ͺ§†q΅ˆΉšsιΌΞύΫfΉ{»¦§Ϋτv1ϊ΅Ωηl gΣςM4ΏΦτNκκm{ΓΈΛHΉZC8,&€Ε€ρ`Ξ·>ό Υ6Gž]Δζ3ξ˜‡ΛΤ€«~Ÿ§΄lwiΐ½7φϊϊuΙ₯Nΐ~ΘQ’Η.~q¦ΈF3#<£Fˆ₯Fpٜ§ΘzμZŒ(ή_kSΪσœσB6·Γθ… λ ˆ>M/·#L5j*E¨κx aŠJμ›Νίψ~¬#Οζ<ν‡Ρ|ŠQD‰(DΗ‹9ζQo¬^ϊόOqκόwχkΞ'ΙνΏψpΊ=+ΰ†€­φ X°™ RΏ¬7.LΟ3‚4ŒZ.?>΄\#ΪδB“¦ΡgεBMc'ς1pΣΛ΅ΤGΣά΅γz‰ •Ή8ΚΘ<ωΠr!ωPΉϋ!*ώξ„ΣǍ9ŸΉλα­\Lά·9σ|θ\ΈDΟa€™2ng‡kΤ9™κΖlttη6&₯½΅Σzay­‘ΣΡS7χά Nΐ}Վ™©ζvΏ‘>ξŸF[Τ₯ς΅.οvAMΌ€ΆsΎεάWW[φΊ"6Ϋνˆ~Νω…oM·ίpAΛvΟ ˜[0ηΕΧ―k.uφΛΰDι‡^£ί₯h$„šΙΨ i›ŸθN ŠtN€‘€t”βl_ιΜzpΫ ˜’NkΛΩτˆ’f™QDΫΰ΅Ϊ΄\H>T.Δ~Θ‡Κ…7;>ψ0\φΨeΪμV.|ιs>Ym?qΧjPœ2VωΠΉpGΟaŽbΒσϊ#O―~:ο΄κ—Ο8=cŒπ~Σv8 zΩ韨^~Ζ'Ψ{χ˜Ίτ^μ_πΪΎΗ?άr Ψ)=φ€3e›KώΜΝΉ_Φ‚ηrδ³Νbρ2ς‘εB€ΐ§ztεBφΒΠ(9ΉΠς!»œ+ςš&\₯\hkΔ• •m„œεBδBξΟ)κγΚ…l>T.ΔυxΰΓpΩ\ψœήSδΒωG½ΉΪgχXgωڍ™ Ηy)αΙΔ2 ύ±λ.olkβΥ,cΞ7›Κa;Ό QχΣ<πμΫk{2΅NρZΫ-‘ώF*»."°1œ6‰Σύ›tϊhΜυ½μM»νΰŽfxif<0Ζ΅α6>·ΪβΠ?o`σΩ/―&l=₯JΩ?£βΒpYnξ?0Bs^|ύΊζR'`Ώ ¨·4ΔθsNΈ°‚X…h†dΏ„¦Ε…HΡ;ντωnΞύ²ΎΈ0.8>΄\ÝωPΉc)ρ:pαegf.δ%Έ¦™™BjΒmJΉεA»P©\¨υα%.$jMy‰Ι…€r!ίK,΅,Ο+ >ξ³ϋ± .ΔβεΞ.ά·Ρ9ωΠΉpœGΟ9η\#ΣΑ4«‘~τΪ/DΠ€λh³Ό-Μ6ΊqΦ†γ1ΞηΎΈ ½ΝœλX6;v­q[»΅§θwή†Ο)z^Kg·ζœqφ»wž ό7η[Α€o~πKζ|Σݎ¬`άϋαΒ΅hΞΧ)—:ϋe Dz 2™Φ£@ί}cŒ 1“υάΨ†‚Ρ£hΠQ“™ΖiδZ;±C@j[E"…(ΣΛ)iΜ E} ο3*5ε|O5Ω*~)JiΆ!HaΤU¨rμ\ŒγΐvHΫd”ˆQ#`¬›σΆθ9M{5Έ4Ξ=H°žΦξ—  γŠΐu– cCΚΤdMΉ|؍ νB%ωΠ–τΨ(Ήšσ*―‘ εΆ\Θύ[ξΥKεBƒεB.VβX,Ž>l‹ž3j>PQ7FωΠΉpœ›sDΉ“ηΈ²h€“!―!kFΔωζkiΆσm>ž 8ξc–ϊ£ίϋLΨ—1θLwoθLrmτf:Εη†sš–Ξ1m:=˜|“Φ¨5οŜK~¬kCšπ–¨ω€>ωΟΣڝ€ύjΞατN—b€ͺc†ω{ŸE%„’>›ˆesώαuΜ9ΆκkβφqΌ"LηE‹TZc΅ε€„¬ν†@€ εm5Οά–]†Ω٘‘"5ύ*@ω8Νφp‚”͏(UB„ώdΞ’ˆ AφCΐ₯θy―Qσ^ŒΏά>;ΰ ιφ¦iΗ­ήΞ/λ՜.Z.δΨEΛ…δΓbΌYΨNΉΡnς‘r‘ς‘r‘εC^+WY.T>΄=6Ττλβ(φi*ωΚ…ψΘ‡Κ…–ΗΊ ΅Ρσ‘DΝ7>t.ίUνŽ&:™liά‡δ;Ÿ¨EΏiΨ#hΊΕ|ηλ΄έ#W]:„°―ψ>Ι€ΣθΗTu’³Ά]kΐΕ°Χ"η0ε)ΥέnkΝ;λεkΝΰΤτσ}uqΐΞ†¦B¬KdC 4:‚πdWγœωωWΔ4PDŒ°/\GQzΞPσ ›VγΛlc6š`5˚βNht‰Qyέ― WŠQ} ϋGδbTλ,™β±Rμ²)#EjΜc՜Ϋθω ’ζαςΕ”Ζ„ρA_ ΨUž{[ꀉΠS6Tcξ|ΈρpaδCpaΈO.€‰Ά\Dt=saxm¬O‡Ω-χ©)W>΄\ΘγP.dΩ^―\ˆTφŽaAZ‹ž2j>ψΠΉpœ›sΠΤ­©ηٜ3=˜lάVSz2ξρ1χšΖŽηώΖE|룝ύ1š.޲YΧρΡΦ†o΅Nρ4ο—φk3άMφΦyι’j_3ζwί\=Άδ–‘¨{:¦±ΈΡθω ’ζ‰οvψN…†λ©ιρ]Ύ.Ϋ}&`IΐγwΌͺΫλΧ5—:ϋe°Ρ’`ͺ1B­%Δ%R6ua/ΟΥ}σΙ±ζbβ4Ž`KΡ¦X‡ sƒxE΄ΙvΦτK£Ά’bTλ)LUDΫΊu NB·S± ³k…¦Σ¬;§9g„ψΡμE5ŒusΡσAEΝΗœΗ?‚ΛbTάp! k‰ c9Γ…±y\ΰCp!’νδBM)/-*v3ηm\¨}2JQqεB]¬TΞŽ• Ι‡Κ…0εδA5θcY2z>Θ¨Ήs‘_ΖJδ<Χ„s ΣLΣ―“ §yΖσΩ`'ИgΓ-Ύ±ύκ@ Ί˜zMwgκ<λΦk#Σ΄ƒ|jBΧθ2Ÿ"ιH›Η{¬ώκκϋ1ΡωΪSΤiθiζ5BŽmWω½ΰ6φΡ0θ&Υ=7‘Sƒ^˜kήgG«₯1lΨ'ή―1ǝΡršs›J/cΤ4ZώΨ=·V-ύΓ’AG},saΕθω΄C5wsξμ—Β…Ρ"D|~ S ‘αS8ƒΩ`ΞaΤρΊhΞ!J/;3bΓsqΖοyC©L,u)Άuα Œfs†¦mvƒs+65"ί&DՐkΊ;4^ΟΛ’9αA§fŒeRτ|η©ϋ{ΤάωpγαBDΝ1΅"pψ©θδBp ωΠra„αBΌϋ"ΒδΣ@+–&X(*Η…6U] y/|XβBŽ$βσ• ՘o\8(>Δε&›lζQsηǘ#›Μ-’Λќ'#N6 ;nt<ŸΆUΓ £MSΎκ ο©V}ώ‚ΊAηku@ΝΉm—F©―EΟ₯žΌaΎ=O#β δLƒΈF»Ξ6Wc~ονuΐ ‡Η±ΝXηΒ=ίβ)5__ζάΜο\p›™a·!γ6?N?֍ψXοLΨΩI·Α‡ψoΑΥuΜη}ψύo’ ܜ  ύXύX7bmΈ{ΐ6Ξ‡λ9ržώ3~6†ώxό̏ӏՏΥαη—««=tψ™«««ΓΝΉŸΤώϊ±:» υcυcυcu.t.τcυcυcu>tsξ'΅§««ΓΟ/?V?V?N‡όXύXύXγ͜Ώv,ΥDωqϊ±ϊ±:όόςcυcυΏ‡?σcυcυcuŒ;sξp8‡Γαp8‡ΓΝΉΓαp8‡Γαp8nΞ‡Γαp8‡ΓαpΈ9w8‡Γαp8‡ΓΝΉΓαp8‡Γαp87η‡Γαp8‡ΓαpΈ9w8‡Γαp8‡ΓΝΉ ‡Γαp8‡ΓαpΈ9w8‡Γαp8‡ΓΝΉΓαp8‡Γαp87η‡Γαp8‡ΓαpΈ9w86„rΒ„έΎpΐƒΏ x₯<ΏUΐ»ώπpΐοή°‰lσߏμ.ΝΈΝΏc‡Γα|θ|θp8œ  nΞŽα ψš€ xJΐζsN‘ηΏp]ΐAιω# »!ΰeqv8··‡ΓΉΠΉΠαζά1B:/ΰ€•7œί4ΰ­‹Ω|>`ͺΌξ?ξI+Œί 8Pž[π›΄OμϋMςάkn ψS"»]δ9όσϊDx|PW%τy 8΄εΉμͺgzόˆ€'φ~{ϊ|ϋ:;···‡s‘s‘s‘ΓΝΉ£2: ΰ’`Έμ°OΊ}NΐSͺy. ψŒΌφ/&¦η°Ϊx½<·$ΰΨt{JΐάtϋΩ)ehnzέϋAή†€Ώ09`fΐ} ZŽύ,ο‚™-―ϋvΐ^l· — ψnΛλnxπ«ήπ)'`‡ΓωΠωΠωΠαp.t.t.tΈ9wτCΐϋά›Θc σάoΉRšξΟxι<…ύLNδΉ}ΊšœΧL2Ϋ]p‘άί.νsO!ΰcδy¬ΘΎuΐŸyJ"Ϊ_§Ολžžžϋhΐg[^‡?Fo3ΌSZ>Π Ψαp>t>t>t8œ   nΞύVR…>++₯«V˜G€υμ°Y"±Ε²M%+«OΈ<νσ»G₯ΗΏπWζύ‘ώt΄πΎςάΗώa-~φΣ{ ½j“‘Ž¦Ϋ˜šˆ8;···‡s‘s‘s‘ΓΝΉ£o2š„Τ€€Λύ›HŒ…m_–VOχJΔ5Ω’gΪn‹€7"=ͺeuτ)…Υў8\^šj„Ϊ0³ΗΟ}Pzί‰–κŠX°_€'§©σœ€ηCηCηC‡ΓΉΠΉΠΉΠαζά1ΪΊ’g§Ÿ-.ι₯ηޘˆfti:g€ΫoH)?“‰~ˆδ™φσRIcz‰)5Υ@­Π‘ι=ί‡•YSW΄VWGΓειΆ915ω½<EκΘy`ZfGΞΚ6™€Σύ·₯Ζ(Nΐ‡σ‘σ‘σ‘Γα\θ\θ\θpsξ1ΝNd³2­π}MR—Π‘σά΄JΊ2₯)½Kκ.O#₯η冀Ώ™–ΦτSS+τϊ΄/Ύίnλ˜€ίŸυ‘τΗΗπ4y~λDw€Y–·€Ξ€›v!ΰνR}–°Γα|θ|θ|θp8:::ܜ;‡Γαp8‡Γαpsξp8‡Γαp8‡›s‡Γαp8‡Γαp8nΞ‡Γαp8‡Γαpsξp8‡Γαp8‡c=šσvΨ‘š7oήZΕA“Ά―ž<©:d‡IΥ‘;N¬ζμ41^γ>`οΗmv˜ž›–n[μΤΉž»λδjή~;UσφίΉš·χΥά™S:Ψ}J5oŸ:?mz5o֌jήαφΎ;†ϋαφ»TσΪ΅Ž§Nλ\2³š7gŸjήά}«Ήσφ Ÿa€ύβύy³w˜™ΆΩ»šwπnι΅Σ;ϋΗ±ΔχHο ΰ9μϋ€icΒσ{MΗˆcΕqΗύ²gΨŽ›ϋγ>ρZΫcŸxΫ‘{₯}οΨΩοίszη8ž6}θώ¬τΩ‰Y» 'Ύ—ΈύŒϊk2¦5q@‚nΓΗφ—οC?}†Ή{†ο`)CgŠ=Βsα;ŠŸ5mΏόΊw<ΖΉ»nQ:ηpξΜέ}rηυxο½ε»γ~q‡=;ΘΗΆWzo\‡σ η$Ξ_@ίcφΤpΎ̞Aι8ή~ϋΈMgΫW…ϋψ=N¬ƒΫηρΌϋBWΤAόξ_8aίκ„ΨάΥIw]ς!Ÿq.•Έό§·γvΣΒ .Δy›ψPΉΏ³Μ‡Κ…ψ½–ψΌ@>.¬ρ‘r!~“δCΛ…δ\Λ…xžΏ7εBΌ'ωPΉPωDΉ|hΉΟG>μ‘ •k\XβΓn\(|hΉΠΏεCεΐέεΆr!ξ“{ΰΒ*ξ³c3r– χ¨σ‘}Ω#ΰBlω°G.δ6Κ…ƒβCt΅ήuΒSpc’sΰΊγΒΉsM˜SΝ=τ!ΰ1½ζm½?άγ‡Ξζ2»sέxξζkt?­ο5'cΞάΉ΅ϋ˜ΟSΪgγ3Νi~ήC:~ o7§όYτ3σσζ}–ŽqN—cνϋΑw2Χ ‡&:§špπ!MΜ–η±ν!s†p¨έWΪί!²-ŸŸ3gn>ΦΦσŽίqό~ξ`φACΰcx~φυηŠΫuP{.ΟεmΒ>ζό΄!$8xV5οΫeΤ_3 .άdζ„mͺ'μδΪp}›sœ$kϋrΛ‚EΥΟ_X-ϋ‹“ͺε―›_­ψΛω՟^}buί+OŠαώ―=1ήpϋ‘sζW«Ξ[P­:·s;βμ2VώΥ ρ±G.xNυδ·ήP=ω³«'>Šκ±½0βΡχΏ zςΛ_σ³·UkϋκΙοΎ±zςŠΧUkώηTk~χ՚›ίU­Ήυ‚Œ'τ–ΞcK/ͺͺΥUU|­zόΙoVΥ“ίͺ'ŒχΧάωΎjΝέμl³ς ՚ΫήS­ΉεŸͺ5םίΩ?Žε«―ΙοηΒΎŸόα›:Ηžβ½,#ŽΗχ»μ՚ϋ?χ‡γΞϋϋΖλγkβkβφ?>―σώkΎSUΛ?έΩχε―ͺžΌκ¬ΞηΟγ}γqΌγωΝ;;ŸΈρCΗ‰οΫςΫγu ρ3Xΰ}jΫΰ±οŸ[=yΝίtŽ?|ηωψΏψʈ'>ϋςκρΏ΄zμβW}ψE€;ά~ό’—T_vf܎Ϋβ¦zψ+Uυΰgγ1>ς3"J—Υ{JυθϋžΏ«ψ}γίφΎϋ|€}<Ύψή;\γόΒyŠσXς’ω=p{ι™'Ww½θ”}Ž—?œ~jάηυνΟ9΅ΊuΡ©ρχpσI§UΏ;ατˆίίΉΖcxŽΐc7Ν?-Ύ―ΕΎyώlT.Δο,žία7©\Ή‚|(\^ **ΦΈ0ό&3.̜kΉΟγ7iΉŒ}[.$Ÿ.Δo9ςεBlψ°Α…δ5Λ…ΈM>T.δνΉ0σ/ΆS.7“… k|(\Θο$ς‘paΌO>4\Έϊν§vεCεB|GΚƒδBξŸ7δBp²ς‘ε;άο• οyι‚ΘiδBπΉ°\HΎT.=az57ˆΡηG*t\W\ψȊ:xhEυΘƒΛͺGXZ=ΊόΎΞcα>ρXΌ½ry·‰Ϋ₯mΈ=‹ΟiIυθ²»:Χ| τ5ω>ޏο―Οσ½Βq>Όκ‘καΥ««Υ?oσ±ψπšpŒx^9'nλ±β>^—>Wώ|χέ1tόy»€ό}Θ±ηΟ‡Χ€Ο›ΏCs‚ξ«λpΠνευόNVZ±"`ωC««Vͺξ_±ͺΊχΑUΥ’εUw?πPuΗ²•ΥνάΏσOEΰω₯a;ΌfYφΑ}α>χ‡νpΝm€U«:Φπ”.ω;ίνcχή^=ΆτΥcχάZ=Άδ–cxώρ»~W=~χΝCΟιvxΆMΐwŸίϋ”.ΨΧψΕnύωn»ΎzόŽ«ΗοόMυΔο7^σδβŸF ‚ O°s΅ϋ„­«&lYιΘ;Η3ηί=`aυ“9‹"pϋG³EΨΛβ…‹βcˆL’T―!PiΠ!Fρψͺ7Ÿ…DE4ια~£4κηtΆ‰ζ<‰΄(,TŒΒΤR„η£ψƒ8ƒƒ€I ¨p½δ?ͺ5χ]ˆAlBπ­yΰ²jΝ]οοˆKŠ-ˆDˆ«$tψ~ΩτC¨&Œm!r}οσ’ˆΒ5Ž1ΎgxΏh:f[Ψ?DSMΌQPaόΧΞ‚ŽΗ”L6Κ–šΐ/ίήXΘ‚Οa[\σ6^c zx^‘‚‚š‚΄°ΐ@αGs\C£όόΈEονYΔ†T.'… aΖ3*βwN>΄\Χ.δο«Α…«Ύ”αj\(ΏΩ‚π– Α?Ψ¦Δ…€εBάΗχ`ΉχGΚ…8εB,2ΉxΐAςa‰ ρ%\Hތ|hΉp>T.ΜηAβCra^¨4\Ο§Δ‡%.Δ₯W.ΔΉMƒή β6ψPΉp|ˆ¨ωώΆ―.šp\…Kž† kΖTΠ0η4¬bˆF›F6ν/ξdTΉMΝ„‹Ή·ερΌ@€ηΗaL?hta£I‡§9ΧΧΪΟV2Εia’φ9±fC-†8/>?Ώ\‡ΗωώρΨΥL[SΝύ«ωΆΎdΞΝ>πΰϋPsN3M# γ ΐ„ί‘LωbΞρ E ’ιS4θdncAΤEqΗη 0ňFPψρyΖ$š’ ₯„€`ςρό½wΆ Β7G… ή‚˜£1ΞDΌ°ΌBΡ/ =<Η〨Ζ1ΰ}πήŽ ―Γϋˆxα>ΆΣE i\σ{PAKъχψ}•’N0κ¦Œ(©PΕηε5>ώoj†žί OQψψύ‡Ο…W ν‚s$šœxoΗ‚}r’ϋNQ-D DqBŒBt₯ Ξmœ·£%AΚ ’M4ηœψΝΈΠ„γ·‚ηU€"ZΰωpŒš_4αYΥ%Ž―φσθωΐψ°Δ…–a<Θ‡5.<»Ι‡8§”-”+β|%ΦΈΏΔ‡5.ΔcδCΛ…δ<αΒΘMδCαB7Έ|Ύƒ‰V.ΔvΈV.δ(yΑr!`Ήό>T.Tž²\ˆγΰq)2 Κr!ωΠr!ω°Δ…δCεΒτύΉ|hΉΌ§\¨|(\ˆχŽ3¦EάΜ‡Κ…Κ‡Β…–ϋαBόΐ‡jΞΙ…\Π'ώρΉ ‹|τΛ‡ˆšc‘ςMž]½tΒώ=”9§ρ3¦&fΰΥόΡΠ–ΜΉXšn@x2¬4Ljžj&JM9!¦Έ‘W³M¨ιVγΚmνcjΜ­.^~o…†dΌsΔ—Ÿ `ցYΌ° ΓEΛK v‘‰7Ω4θˆtΣ<3b¦όvƒΫξ/γŽΒvŒΆ/IƜQvΌg4ηf±€[ζF4θΙlΣ|ηk½-ζΌI§ωn‰Žη”sŽKΝ€Δ‚°οΜΩαv6ηΙΌ«9ηuΏ\ˆ¨ω~žR½nΒΥ™vυθωϊ0ηαςZόG3gΞμ;= «α—§ŒύtήΠZόqeτ'F~ &qΧβy¦rB¬fsαi£A˜Bˆ>ό§!l‡kDŽ $’ΈHB&F«!Ryc–£)ŒQΜΡ@γ1xгτΊœΎˆϋcL …πCτ’/‰;€|>±ζΚNz<’B«ΎΤ‰>Aβu*τxLjŽ!8±/ˆX<©–LΩΔρ㘱O€βΧ8―G$ ·qœŒπC°κλπΣήUxͺhελx›ΒV£ρό\όlšBͺ†žeιsΥDUΜB€R8AΛTΟn‚4¦ϋβ_csΪ)“τ[ήFτϋŒbτά!£Τv₯©Ν…ιλψ­@΄25”’buζœQsˆQΐ£ηƒεCr!LŠς‘ώ?g>T. G>T.ΔωD>¬qa2σ87\˜ψ°Ζ…ΙψεŒ³PΚ…Μ΄±\¨`Έ°Ζ‡Κ…δΛ…,²\HnΆ\ΘΆεB.LZ.ΔΎΘ‡Κ…ΰε6.,’-ji?χA>΄™IΚσΚƒfQΨ–Υ•… ™eЍk\ˆ}ΦΈ|(\ˆšσ΅Ι…ΈX.$œ‚5Η"%ΈΠ£ηƒΧ†0JΩΜ­^Q3ηUcͺiΫ!V$SžS“%ν8©ε,F›Υδ›Τp](F·m·Φa~φšΩM·σχ’iηi›F6€¦°kV€»n#Ω63‘»πP2ζ\0‘AΧΧfΐœΓ$Γ,kͺ:io½oE„ήώύ½+ͺ›—¨~·τΑˆ_/y°ϊν=6π^‹}.IζŸινxοΪwΎγ=|oŒ ησFΝ8Ι6ζ½s z!ͺ>’K\8ΐϋ‹Οζ\Π'ζ¨9Μ9ΰΡσqPsΑ αˆ45s5η0ŒΕΘ7LzJӌ<\γ9λ.£pe₯¦pθλ!RsŽ•ύ˜ξ—’¬¬iŽ"†QFΎ!~΄φ€ƒxK΅”Q˜A¬Qœjν!φρDFA QF‡ϋΉAΖΊuμΟcί6² 0UPγžR>³ΐ„°Δ5·Γqπψ’)GtŠ5ν*H ­xcD°Ζ\_O“N1―bS³(ΆuΏή±S䡆]κXYkŠΘΞpΡ’HΒα\‰ιšZŠΐτN€o"΅ηHJ½7œO1R™Δh7AJcVJυIoόV(Fρ{‚@…D€H£ζ4η#‰ž‡ΛΦΧάπλ€w€Η§\πϋt=ec­³$β|T>ΤηΙ‡Κ…4θΰ2εBφι(q!ω°Α…‰k\(|XγBMΗV.ΔsΚ‡δBώΦρϋ·\H>΄\H>΄\ˆ:uπ‘εBεCεBςœεBT.L‘ξαψ°Ζ…δCαB¦Ξ3υ>saZΜ―-.” ϋεCš½Fϝ {»ΐ΄±>»hΞ5ͺ&–©η%CnΣΈmνp!-Ή- Ή9/EΤΥθΪhΎFΞ ΖΌVŸžΜl©N[ΏŸΦtw ―§9Ζ’‚ΦΆk$ݚsή·ιξš= Ÿ‹ΰgXk£Μτs­5Ώ£ƒsάxwηϊ—w/·oΈky6νΨSε΅N]K ΈxΠνΒt[{ή5―S©ΦΌ›ωζ]?—x<šήΞϊ€ΜΉFΝ‰‘FΟΓeAΐM·Ό΅πόS~πhΐ›zyν†Δ§c’€Qg‰?œˆhγκυGžaΣΪ‘²#D)5Ϋc5Β”ζΐvQxRštv”ΗΥ|άΧ±Ύ"4E‰ΨΔ& ˆπxNyT‘DΑΗH D!£&†ŒXk4˜β‰Š64ˆΐ ά’¨‚© Θβ―ad ‘#ŠH BΎ7Ž'<Ÿ#M+>ίj?!2YͺBο ρ „m’Fσ ŠRFφ£άΕ΅Fƒ(>Ήΐ¦PL eτHkWρ½¨PΧΫ ΊK#¦ψύi |‘Q”¦^DΧUόpήDαšR4#ήχόNΣ%ΤZβ\Iη €ηpŽ2έh»ΰόϊ5vψύ0Z„ίnSˆφ[ci£ζ#žcu5`»t{‹€Ÿp! Χl¬‚”\͌π‘ώ“• cκzβCεΒ ²afδCΓ…δCεB.@EŽT.Τ(-ΉQ^ς‘r!~ηδCΛ…šJ\H>4\zαB4 «ρ‘εβ4Έ‹ † γλGΒ…©τ¨Θ…e'*’ •ΫΈpΠ šτψlΠΙ‡† •‡εBπŸς!ΉPψPΉη˜ςαΊδBς‘ra?|h£ζD―ΡsηΒήΝ9’©jζ²9o«_Φθy›yVΣLs.¦ΌaΞ5ΚiSΫ5=ήΦͺ—Œ: n)-_’Κ4¬Z›ΧzΫδο©K½VwŸ`ΣΦkί΅f-pΏΊ¨PJ]·0¦½dΚyόΆ\ɜ«)Η}DΞoOΧ0ι0θ4ι0β0ε„šsœ/•w€ΆλχΛcm5Ο<ΟRωCm‡¦œ‘s­;OMγ†3ίΆΧΒ¨£η©.ΎQή§9/EΝG=—Ν#Θ°eZ΄œeΆΩ9ΰι¨ζΌΫk7$>³9~ψiρ:D!kfo<φŒΌ Ž?²š¬Β2₯§γ=;³ζšθΉμoΫ€ 8"­xΞHΟΐύY‚ υV>$**œƒΫ” q›|hΉ0σ‘r‘π‘r!›*F>T.d΄› •š‚­|¨\HnP.€™$*2²mΈ0§Σ.ŒΖ·-¦χmp!ΒY.LFΌΘ…0σ½r!³,’-*–ΈeڌŽ|¨\ΘοΡ4₯«tεB6ΒK|8Z.Δ9”ωPΈ ιδCr‘ςα ΉS2hΠ• ϋαΓRΤ|€ΡsηΒήΪΥΌ©‘+₯ΆkΤΈfJΩI½ΤΜ͘sMkΟ׌š«1gΊ<ΈΦ²+˜ ―ζάΦ››hΉFΝ­QT³n·iDΩ΅s½, δγ‚9Οι~ψ]«αΧοΏ5’o£δ&eŸϋ‡ωΥύ±Ξ|…ιΪK&ݚu¦Α/N)ο :Μ:yΫ`[ΞӜ³1œύ^G­ΦΪscΞ‹ ωδ± }τ=¦\SάϊΠ†σmΤ|€Ρσp9*ΰ[r| eΫkΜyλk7$>Σγ2’Ι ΔρGbT©6Aβ}61Κ©Δ)S7QΟ͏°κŸFΐΔΊJFžρAͺu‘4Χ¬λΓ}¦v²ξP#ΔΆ»Ή , R6{£(Υkˆ5ΑΤ . EFΎΓγ9’aΘν)‚q ‘Θ”MMΫΔsŒI”œγβˆ#1νq;μM˜|œβRAn·§€ε1S”ͺՈaΝΊFΫ%Qͺυ‘ωΈQΚΡ}4M8οXŒσ‘—’9gχα~)FiĈ服“ϊ ΰ³^L\˜Ν―\¨fΫr‘ς!ΉΠΎΖr!ΎX.Th“NΛ…©4+g‘ iφΓ…±Άώ,ΰ£reθќ·ΎvCβΣ1mΞρG#ό!ΕΘ @Ό°YRlV”Ζ²ΰΆ6DbΪ: ;„&ηΎb[έ&§·'ƒ·K#_bϊ&gΙ²ωMj–£²š’©΅~ΈM‘Δˆ‰¦j:"E£*¬―d$΄ Φb΄ŸGΎV=Ήζͺ‘yΑwΡ )6IB &Ν1›qt#SoŒœkύ$Η«A”2Z”Δh6ηΊ=hI –<)@ω>„F¬4Šn;$ΫΊz­Œ€k&_d^{-Š.#Ϊrz{š‘Ξ”Μž–RŠ1k*™φ‰k6#δ…s§c…p>Σ$±‰WΏˆNDΟ­9ο3rώχgO8ΈΥœΗFΧ²PΒk»μorΐ5Ή νΞ‡– Ι‡5.Lζ'rœαBς‘r!L8ω°Α…δCεBFVΑ‡0ulŽˆθ΅f©h§uς‘r‘v5W.δcδ%εB5³Κ…kΞσΣ{βB6_³\Θχ-qaͺ/·\˜‘FΎΥl+***ڌ’R›q€eAό¬Κ…4θHs·E222saZ€ι•σίcΓ…l©|.d–ψpmq‘5ηύDΞ‘!tΨ„ZΉΨcΒΔJ›£uγCηΒ}dΜV―ζΌΡxMSΌΕ\ΧΜΉi Χ¨ Vƒ#ΙΤ€ΫΖqϊ;c]šΊiTΩnkΦΥ4rΡb₯1•+ά―5κš^߈Έ'sΞ(6η…+Œqe”½­+=ΣΦW˜Χ”ŽΧFΠνΌσ%bͺy›iπΪ@Ž©ξ¬Kg#ΉΫΕΠ/5ϋΰη\aΎί8έdhτdˆ₯Φ<§ΈΓ°›zσFΣ>cΠqΙΡϋΤ N zΪπΪWO˜ΩjΏ˜0}€†Λ ϋύ=šσΦΧΊ9ΰ"Τ‹•)ώΈ"%.±ίσσ }Žό€θgϊζfGŒ˜KΝyCŒ&αŠύjƒœΨό¦ΞtκΣFeΩ`­‘v ¦Υ¨0) SšfŒΤ`nοκŠbBτα'ΎR=τΨ«ε~¦zΰΡOWΛΉ¬ΊgυΕΥΥ—Tχ?ό‰κ±'ΏΡͺhx”"I{Qd’&3Υ΄g±i#=)‚·oAŽQ\κ΅ͺbϊ5]4 \Φ{b[.JPΜۈ“FΤΤ s„£ΰxŸ#ŠR5θ£2ϋ7׌‹νE2B€†‡)Η€^bο„° *»bγόλ6nm€‚Ρ"֜CœrœP?ζόo6Ÿ]}rΛŠxΧζG`£OŒpŸoΙz*gw>,q!.– #²dGΈ|XβB֜“ yg‡+J―εœήγΗΘaδCεBšnφ‘(u*·\HSnΈπΡ'ˆ|hΉπξUŽ|ΨΰB5ήΚ…ΈΈ$gΩ(ΆεΒ©/r‘škεBεC5γ%.δ5›ΝY.Τς .βκb¦]ψ%Ϊ²ƒΤ‘Ύ–βžψ°Ζ…#4ηΚ‡΅¬Œζ>2‰Χœ³A&Ήp΄|sώτMvnεB`ΟM&VΞ…ƒεB5ΥεΪϊg¦―—"ηRkγΣ8WΊfςΈ1cΊ£ΦΦΠkzΉ5ζhw!Ν\M¬5Ιjfυώ2¦ιΪ΅1f…Ϋό>ρΎΛV Ν·†Yηƒσ34κθΣgεβƒsϋΉl΄ΪΎηžλŒršj@g‘œƒ3Ρ5ηΎt€š.Bθη² ωψυ”NΞHuͺ=Οη”tiot7} bΞ΅{Ό1θύ˜σ7l²Guφ¦{qτ&S°Ρ‹=­}Œ0.Ά!/ψ#£Ž0έIDΖtbŒ‚`Ν―˜σψ‡?Νχ΅uηVŒRΰB”Ζ#«0ηiOŒq&―ΦZjj¦Fz!žl§r ©”nžΕEΤ!Dƒ…Έ\ύψECŽλ(4!«««}>?ΎdΥGͺ›—€ϊέWXqQ§«+λŒΒ~ fyβk՚5ίΙΖ<¦¬³n²›©.Υ^Ϊ(’˜V°ςυ…ϊφ,H΅†“Β”Q'—5θ4ιΆQ’ν @QΚ:tΞ&Θf$χ^in‡sˆ G’ΥΑTc€ζ32‰σrΈΩΎ½¦r""DsQΚΊKˆ~Μω·˜]}jλωEΌ{Λ#‡5ηHE”(έή&ΰϋ‹ήcšv\θ‚΄Ξ‡₯‹εšJ\kΦΈPωΠp‘ςaζBΙ4©q!#φΒ…L'§ρV.dωεBpMΰ)Λ…ΰ³Θ‡† aΠΑ‡– Ι‡– #%σίΰΒ‚©.φδθΖ…δCΓ…΅}.Μ|hλΩω=)²¦]K” uΌgiT%Ή β,¦€δΓα.\ΚΝβPo.ƜM Ι…LΗγk‹ ΅A¦.XŽΦœΎιΞ­\μ5Œ9w.μΟ 7ΞΉdt³1,Ԝ—ΜyŽš[¨9Χ”v7fάBΝ–FΛΫΊ±ΧPˆ”·dΧ’™΅Ϋ•υ8x|ΈOƒ_Z`ΚΉFΥ9'ΌΡYή|ž’9·Ρtm§ΰϋ.3XRHy_²Ό‰₯¦ά21εΛV -Bδl€R'ό;Ή3­]k»s6„3cύ Π­QόœZOcŽΫiΡ s~Φ¦{TηlΆgΗτfΞ7Έ5`/iκv`ζΌυ΅ŸŽ ξvαŒ_ΞτΝ£ΡR‡aˆIώ‘―Α6„Σ¨Ή6CB—β F9z&ΧκA|RT²C1£αl¦Ρ"\έϋπ₯ΥoώtquΓ²ΖΗYΣ#Ÿκ€β΅Het½-ŠSHq―5FB$ `T‹ΰσφ5v;ΎΠΊΡΆ4ΡR‡d[§―&BΣ=™ή‰ζRΪ‰΅η0#)J8\£8.θΔ1|i[–G@άjz')"žxœ―Hλδθ«~/:Bβ–ζΧ@?ζόM[R}nβ‰EΌgΫ£z1η³~πΛ€±ΟτψίIγ.p=ΥιΘΈD1XΈ0s™q)οv‘²Α‡Β…5>΄\H>T.ΤQ`Κ…μΌΞΪoώΖΕ„[.O-ΒhΗKΛ…ΰ”ΐ7– Ι‡ .Δϋΰυ– •… ¦š\Gn+q\‰ K|¨ϋφΒ…4π– m‘Y Κ…AW.dSΐaκΠsγ·΄ΈcΉ£%՜ƒ qή’Χ˜ƒ• GΛ‡0ηGlΎs+{o:¬9w.π₯Φ`ΝΜδΆ)Β΅FpmQοRzΊΓfF²©I―½ΦΜ ―u@o1±ŒΣΣ΄\ˆmŠ\8>T“mΉ0Eε‹\H3\ψΔ•C|hΉ°Δ‡Φ€“ K|h'b(jΣRεB–w%>¬ ΎΔ‡5.DWΔ‡5.ΔΎ*ΖH{βΓ΅Ε…h6‡ύ*φcΏΪrZ+ϋl>ΙΝωΊ6ηΆ[»³mΝ:λΞK³Θ iΕm »245Ή0>-GΟ‘‚o̟Fl­‘l«ω¦ Φτl5ς±Yjμ¦iο|FΎ5έέΦz―¦I.&θqΩzt[KoΣά­Aηw‘{έ'?§.t«c_ΡR―oΣϋ5e=ΧαΫnχγΜrΦuΧ扇۹Ύ5ηbΠΩR1Hs ¬)—ρnύ˜σ·m΅wυφ­χ)bώζnΞ7 sŽ ΡbΔ(ΥO’‘ η›stŠUŠPs5β5cŽ•ΤΙ₯qCQŒ²yΣΨ9o—³y΅¦Oη›S0q”gˆλœά”ΦοCˆsh„)#ζht„ϊIDz *ύaΔ‚2ΧMBΜ>ων(6± 'nγ5¨ΉTcϚΛ9‡䨴”߈촁ŠKΐšq·§€M Χ©QΧΖK6υ]λβUΪˆ’­OΧ2νb QšΊs€^$ΆΛΞ¬™σUO"4ΟNγˆβsXΰA€1܏ŠSt’Ρxl‹σŽγ„uΑ˜-6OΔ~YΏ‰ˆQ?ζόo·ŸS]>νδ"ώ}‡gΈ9_|H.δ+πžεΒΪb%ΉP’ζΚ…z­\ΣΫΙ‡Κ…ψέ‘• u–·r!‹hΘfg†“ ‚ίΐ‡– ±θ³\H>΄\ΘΧY.ΔλŠ\Ψ-M½Δ…₯EGšσ6.΄‹›\˜°|h-FΆ6•³“4hK 5Ι‡Άy¦π!ΉPω<Ή0€Τ™ι™ SŸΣΦcjΣ$­CO’4ΞrF:/…ζ寊b”Ρ#­ΙŒ‘ΕdΠγmΜ Nu›Ψ'φ…ΤNœSq,’Pιyœƒdk-!‚(e΄¨_sώwSηT_ίuAΪωh7ηλ‰ρk%>lpaβCεB-ι©qaσyK>T.€Ργo‰\ˆΗul’f²• ₯ΎΫr!x|hΉΧΰCΛ…ΰ·Θ‡† ?ΨišiΉ0FΜΑ?ύraηu{ŽYΒ…1@ω\¨|¨\hψ0s‘.dΪρnvφ»5θv±Rω0qa-ς1α˜}‘ψ°Ζ…²ΝΪβBtp ra?ζό˜­§΅r!°οnΞΧ–ŒΉŽR³QτbάnM—ΗhΪΡxmgή‡cΕh Υ€ΫΪςϋΝΌošpm~fMΊ6iΓώ9fLΗ‘αv[ 7 {žΕ.ƼԍΎωΧ¨ώŠ–±i+ γίμν•-ΖΌ”Žn z[ }k”άΞ…ΧE2ηyTšsšσt_£Υ0θ:~/›υΒy30s.έΪs4‘σwn³Oυξmχ-β”-vpsΎ1™sFŒπ‡œ«β±^-EŒjυθ:Ξ…‘ς%§ψŒkˆ`5γΣ V˜²ΙZ> ά†(J)y4*k+»₯z§ρ@Œ›‘“£ΈώΕύ—TΧέ{iLΛDz&GΉ~3Δh<&ξ³”j#GvœQzΎΡΌΙΞ Άsƒu&°τ8Λ™‚ ˆKΞD'𘀾S°2 8ž7ΨφƒνΩhbϋη#™ƒΎ@”rΆ/Dθ ΪίΎγœκ›3ραιnΞΧg&ώΙ‡5.d]―αΒlΞ \ΨΰCDGΓ5ω°6»Ώώ,’• qmωόΣΒ…)W.Δm›F.„Q|hΈΟcϋ2…ΌΔ…ʁΓqαhωΠpa‘·©ρψ<₯svΌ›pe#Ίύ:” ΅QœΝ&bŠ;yŒ\˜²„J\H>T.Œ½ Θ‡Κ…ι9ςαΪΰBtŽSDZϋ1ΫLkεB`Ώ-ݜ―Ws.Ο<ΛΫΚBΓΈVƒ^JuoΫVΆ)ΦΌ§Nθ6Β[ͺ›¦ΙΆcΒh¬qŽ4³{ΙςΊI_* h9 œΫr›e3Ο¨;Ή1jMK€ ½FΌmƒΊ…o֐[¬(DΣmΗyΫα^ο—BΊFΛm·φ&° ά΄‰Ίϊ§Ξμ:;œΘQsI'o€Άc0θ&γb—XwNƒΞθyš»ή9ΧΆϋT>eΏ"n±£›σΙœ3b΄šsΫ‘˜Q"ŠQΦΛ±†Ψάgχν\Ξ,„ŒΞ–…Ψ„pbwέ<[FŠ•aΔD )D#GAŒ2R!ŠΫ™¬lt1;§ΞλqΨ„-Δ'›ΒAΐΦ:k€FoxMhΊ%E€B_£΅–έ`ΔhLγΤχRQjΜ•:p]Ьσ€Tw©Ρs5θhGƒΞΖH0κ“*LYOuηΩ §D1 ΄ΠΤL)GΝƒ Υnξk«&i₯’0κύ˜σwL›[}{οSŠΈx·cܜ―G>dd½ΪΜy1jnΈΠς‘r!PγBόfΘ‡Κ…:}ΑŽd$–Έ0Αr!ΣΩΙ…δ4π‘ε˜~1\ˆΫΰΓ²άr‘εΓ^ΈΠnΫ ΖH>ωΠra[ƒΉβΉRϊ;>•:Ό—²‰8n |¨Mβ”• ζqlX¬L|XγΒ4¦|Έ6.ΰAπ‘rαhωζό™O™ήΚ…ΐώnΞΧ֌Έν°m e›1/₯Έ«ρ6τF³Ή›JŸΗ¨™nεvNΉFΣiΞΥ|+μψ0kΰρZ}Fΰu΄Ψύ’Ϊg•Σ †Ο™η±kĜί₯Œ…£ΉΥΟdΣΙmδΊ”’Ύ²p»Ao­7ΝηJυξœΗ^ϋ\― aΜKϋ’±ζ˜2_FΡYΣ]Νx~=Ν9K"}Ι]ΫuΑ ΟnνΌέΎΥΏMάΏˆΣ·rsΎΡ™s^`Ξ1R‘#\7jΞ ζœσ₯iΎsgYBT#‘Hη„ˆaΗα5’œρΑƒmJ³l)’IYsu%gﲑj,!>!&Y7N‘ŠNÈaΌ†)‘Ψ"Eρ±κκψ<„+cΊ$…›ΔI FTWΥ=ͺψΤηεΨkΟ[ΑZ°ι6Ž—υ9·Α΅v<ΦΘ‘¦‰j]¦‰¨·6L’(΅35RAΚΞΕ*L!JŠΤ‚ηR6‘ΦΞ1T¨½Δyφϋ§8'a XšΑzHŽΒcΈf”GoχjΨ1πš~G©½sΖΌκκύqΙΜcݜo |¨\ z‘A¦šsεBςvΪV.Œ©ΜΚ…ˆ΄’… σo/EΚΣ XΚC.dί‰"U|hΉ¨\˜y/ρ‹εBπx°Α…Φ”+Χ­iαBncΉΫυΒ…Β‡Κ…qqAΫεν™·Qt­wοΦ―CΉ|Xj§ 3u:ΉoεCεΒΔ‡Κ…Ρœ'>T.ΔγΚ‡Κ…8‡K\Θ:ς^Ή΅ηΚ…ύŒR{ζvΣ[ΉΨ+7ηf©Χ₯Nξ/₯΅·R“τφR-{#•ž5Ϊ4“P­Τ5\λΏ—,¨hΠ9g$Ɯζ|YΑ˜3υœQt5πyžwϊΎ˜†ίa–šsύl₯om¦{eΨοfe[:{)%ݘuΝT 9―ehΣ7ω?ΟζΉeδǟ1zžM7γγ Eƒnφ_΅f£φvŒ_σΠ±\{.£Υϊ1ηoυώIϋq†›σΧœσ‚?ΰ₯.9z”ΊσšυnQh"’)†*ŠS6©aΧΩ 4’@ˆ‘ωΌQτˆͺήΡ΄CνΘK˜šΎA$²)ξC˜ΡdCB\RPrR< D‘ LΩΰ ϋA=%^‡HRξBœΔi~k:M–ψ^ΈΖsxœΰvmΫκφΉ†Ό*˜ύtŸϋΙΫ–’φ₯³\»©Ν–Ϊ"J¬ΛμΦ٘u—jΠΦΥB”²ξF=™υ˜κ‰νRf|œ$˜τΤΉ87‰c„‘ψ F9η€"£f˜‹L©œB€sΟγάΖmFG{ΉPΘRΜ’sq?ζό]»Ο«Ύ?λΤ">±χ3ݜo@|ˆσηΐ‹ražjn+ΒDe>T.DΓ―Δ‡5.”^– 3*ΪF’qωΠr!#Κΰ4ς!ΈΩEδBDΦΑ…¬;·\ˆEJά·\H> vΫvD\Έ¦ΐ…k |¨YJm\h£λΪά³Δ…vκŒ:ωPΉPΗ¬Y.$ ΒΤg>΄\H>.DΙωe\ $Z.dvH―\Έxα’‚ GΛ‡0ηϚ8½• ΆήήΝω€ iΨjχΙΦθΉ­'Vcd£§¦φΌUΟ†–uΪ…™ίLogΪz[΄œΈυΎ5άΜϊmχ―ŒΧZ‡Ξ¨ί«9ΗηβgPΝωΆί―ΊhςEΟmt[ XνžLθϋσsηH“šσ'lΦΆ—ζgΞmΣ$5θΪ΅˜β’ϊM“ΈάΉ΅“¬xΗHαˆAbj'Sί!R!DρΊEΧΪΚΨmϋΝ'Gα ŠTb˜sˆS–mΠP±aΜv/ζœfΫ#­³s~Α‡U?š½¨ˆOνοζ|¬ς‘ra ’k\˜FD–Έ°Α‡6kGΈQcεCr Έ«Δ…μ©aΉΫΡΜ+b±ϋ΅\H>*qί ΈP ½εB.4 }/|Ψֈ³›9'jo5θδCεB¦Έsj‰tς‘r!λΦΑ‡δB6~#.$**Z.Δίpςa/\Hs\8Z>„9?~ϋ­\―–Bъ”dlΓzI{AΚ&E'Σ7Ωό)œύšσ χ:¬ϊɜEE|ϊ©nΞΗ*6Έε>Κ…XhJ|XγBDZΙ‡šΞΖάr!~Ώ‰Ι…\\”KΘ…δCεBπψ\ή#χ1R\SΩΣόsεBFΑKό¦|hy―Δ…%£ή-ϊΞtvεΒF½ΫhGsήΰCεBmg3‰4ΕέτΘΚ…Θ*J|XγB\'>΄\H>T.D$“4$ŽQ |Ψ+"­Qsra?ζόΩ“g΄r!ΰζ| ›σR}y)b©Ο΅ΥŸF©q@ΣάY«Ν9δμΚσ “}»˜qή'1§AΗσLW_²|ΘΘkZ<£ξ܎&Ύ[*Ίšφ˜³Rγ·šΙ—ˆ{ΙδΫΕ=–Φ}¨Ao™WΖΊς,{MgΧ¨yjτ–ag†3f™σ±˜Ί­ζ\Ί₯ηΘΉœ{ sžŽGΣe£ΝΗεόkœΧ:EϋΒ‚ƒιߏ9ΏdκώΥ'wxj/ΪΦΝΉ›σ^Νyψ£Η%QŠ?ώŒ±kqŒQ”DA DH­ ;„[1J΅ζκ‘(Q5Ττ‡)—θ, ΰ6#>)G Ρx#ζœc„D9hΦρ¦Qβ=)H!ZA’9W3ŽνΌ7£U «mf½-Υ“Q%ΎŽΫ6?Χ³‘mWδn‚΄ΠΩ½1‘BμVlgώ€c Δ(ΞΦά¦Ρ@Q¬B΄2bQΚ(:Μ9GP!ε©œαΣ ’E8/qΖ(y€½ΔŒ7 E¨ RΤlͺ…@νǜΛ~O―~~ψiE|ξ γܜUs.\;Ά'>lpaβCεΒZΔ\ΉfΏGεB–ͺHφy‡|hΉ\dΉ†œ|¨\ˆΗ‹\˜fˆ[.$7Y.T>T.d$ΏW.,ρ‘fX.ltΫΫƒ‹•₯.ρ– Ι‡Κ…vΡN΄·Y.΄ –©δ'σ‘p!Ώεm” Z.T>$Ζς³>d½Δ…%sŽkπ‘rαhωζό„)3ZΉxΪΆnΞΗ¬9/₯8kMΊ­VΓ^ͺY·5ΓάGͺηΦ:mkΞiΠaͺ=Χϊršl<Ξθωοοm¦Άkδ]A³α–­XU›Gng‚ΫNλv ›νΊ^ͺ ·΅λ΅Tυ–”ωFτΫsΫC ­ έϋσDΊ°ΫΩΰ΅μ2»Όfΰ%eœ©ξy4ζϊϊπ|[δ»T‡nΣΤsΉλνˆ?μ‹Η$΅πύ˜σΛv< ϊμNO+β₯OΩΩΝωX5η7{FΔϊ2η±1RŠXjƒ€œŠ—F_E‘α©β‡B‡έ~΅Ή‘6JM˜žIΘ”KΤQB”Rπq/„%›ΊALώζOC‚τ†€ΈCˆjδιœμ€Q)6™Γv4Ț.ΚmΎΏ.Ί}[”½-z€5φ#F6ύ“ιοhΊvΈ·έ RΞ,Vqͺέ‹!JuœΞϊUAͺ’’Χ¬WO3Ρ£EŠg§ράΡ”N˜sΌϋϊρyQtjδ5ηˆE1ΪΑΡ$ΤZ–šΒApβ1UˆP\#ε“υ•ŒυcΞu§WΧyz_˜ύ,7ηΰΓuΝ…Φœƒ •32ΉΔ…όν)β7G>T.” εBpωΛr!£ε– aΒΑ‡%.dδ<U[SΟXR.d΄ήr!ή·.$vγBΦΨ[.΄&½fΦuΑ²Δ‡– ΩΉέfΩYθ9·e>–ΑμΗnΔλΘ…ˆž“ŘΗ, šsΓ…δCΛ…Γρa[ƒLεB–φ€• ϋ1ησ§ΞhεB`ΦSܜBΒ€­7snηœ3ͺ†N#—|LΝ‘l“·3γΨtόL.kΑ՘3BΞ:ς₯’šNs~σ4‚ΞΘΉ˜λ¨5}ŒcΧμx7ŒμϊNƒ^ͺ5·¦Ό4Š­aΦmt½`θ[ΣΩm€ΒŒϋψΈiμV[`‘Ρf cΞΩε€Τg‡3‚nj»sJ»\γω’ΉΞόBΤΌaΠ»™sΦΟΫξπsώ非ZύηΞ³Š8ΣΝΉ›σ‘šsΦUbΦ―«±Ζ’‚bQ€ X’¨aW\;Κ& QRΪ<ˆΒι–LΗ„H„€Πc½$€ν˜’ !ϊσϋ:s~Ότ(N%bέ%;CμQψ2-χ!X5"ΔηqΝHΎ*Ν;…)a£HmΡ#¦ͺ2ϊĈ‘F΅hΊMs·‘tΞEΧ±k6Šζœ‘s{#ŠS^ӜCT⡈&›ΒAŒ"mӌb€ψΪί·ΓΉGΑ»j‘ΟΛ`’9μbSS99γ’“3|U‚Ε5Ά£½uQζόίgž«_ž{Ό›σ1lΞ• •3˜ƒ-’• ρ[€1.Τζ’Κ…ΰπ8Μr!ž'*’ψΠr!ΗMZ.δb€εBr™εBΝlZ\hkκ5z^βΓZ­z©έr‘Ν$κfΞ5re>Κ…Κ‰Œž“• ‘AD>T.dƒUd_.$*rœ_7>΄\H΄\H>T.-œŸ΄γ.­\ΜΪΞΝω˜4η…1h₯(f- ©]Ό5E:άΞσ«™Ϊ¬―Mέάρ΅Φ\Η£Ρ„Γ”kT\£έ¬;‡1Η6ΐb1ςΆΑύΣΐkš»ΞZ/Ρύe jΠ»uSΧFlΕΘΆi8W4υCžM·5ζφyώŸ¨IΥFΜΩΨΝFΜπ‹Ž1η΅Ξ0Χ:r“6^«UW£/ΏVΑ†p4ΥΓ™sϋyuu»v˜—}χcΞΏ0νiΥεΣ,β§Ή9«ζό7ǝ±D)Vεc€(՘#BTkΔH ΔSΪ!jt΄A”&5κ‘iάmLα„€D*& :)D D(Δ(G¨έωΠ‡«_άI’Χήσ±κϋK>#FΨ›Δq_*$yΫ I qνˆLΰ~7qͺϋν–κIcυ₯Ξm΅λΓF¬(- RΝpΠΤvl¦΅S–D)£FŒαuΨ‚υ”’¨©„§AOŠcZpx.ΦbBƒ£Ρ ˆQ4€‹ηe0CqΚ@@/šsŽ`ƒ…ΐE„׈ Qˆ’)R?ζόύž«—ζζ||ΈΉ5ημΚΞμ‘˜“-Z>Τ±d©\ΉPΛiΘ…ΰς‘εB¦³[.η-’K\H³œΕγΠmΪΈPω°W.l‹¦kοr!·\ؚQd#θέ :£θv̎VγˆΙ6ƒN.δˆ5π!Ή™CωPΉζ<ρa6ηa[ŽΧΈπΓ/ͺ9ηψ5r!ωPΉp΄|HsήΖ…ΐnΞfΞΧ₯AΧ4φ’9ΟfFg\ΫΘΉšs£U« cΎsֈλός;’9‡ιfz;Γαqσί-}0‚&ύVIuηλlν:£κ4ωμξ΅θ4γ¬S§IgD½Q'^HA·Y 9π₯hΉvd—(Ή5ߍQc&“‘hRΥ°§3FΉkf¦œΖ<‘‘ξuθΆ>ϋ“}1žW»΅'S][όιaφΉΥυσ¬qΟηn:—ϋ1η_š>«ϊκŒƒŠψσΝyΈ,Έ)ΰ–€·žί$ΰίΣσΏ ˜›? ΰzΑŠ€ss7ΰ.ynαΈ6ηαςZόG3gΞ)2bΤν9Vδ™2‘ƒΣ7!’ ΕX‘«ΞκDΜ!@$’γ‚ p؈ΗΜζ†h’˜b΄ƒf”"“B"“u’{Fqᧉl¨9)„(D)’FŒαq€Ό3zΔΘ‘ MMΟTJρΙh=Ρ‹0΅΅”κi#HŒ`±ΆT›,©Y·©ξ΅ΊΛRZg[Δ¨-­έF‹¬(ΥΘ9ηŸγ―CtBc`^؝H’4 R˜sάGT)EœleŒaώτ??7‹Ι^/’h’Δ&Jά7FgA¨"R!zΛ‚ώΜωg?½ϊέ §ρ•#6ή΄φAσ!Ήίλ Έp8>΄\¨|H.Μ3ͺqώ*κ˜HαB–ρX.€1WS NΑB$ωΠr!ωΠr!Έ|hΉ™EΚ…0φΰ3r˜εBΎΉΧm\”,GΒ…ΌnγΒRZ=ΊraiΑRωpΈΘΉm ΗΕΚ‚%Ή©νΈ­\hωΠtπ‘p!žS>$βόΓΉ8>δόsΛ…δCraΏζόδg΄r!pΰčӜš 5rΪwP¦`Ά‹΅ΉΖœkͺ°²jδ<mβΕΉΧ©cwmwšcΝߘΎΞ¦o4ΰ0Φlό†ΫΌΟˆ9Lωoοy°ϊυ’!ƒΞΗpMƒ―W£Ξχα‚€FζiՌσx­AηX6›Φ^kΪV2š‚.ΦkΡršρτ6νi]Ώf8h4=Υ[gƒjR»³)—ςZ:;ΝΉB"ΰŒœΧŒx‘Nέ¦Θγ±Z?‰ŽΧΜyKδΌν|ϟ™ŸMκΰm4Ύs~ωΜƒͺ―ο1»ˆΏ˜<}Xs.›,·lpCΐ,³ΝΒ€o$“~dΐOZφsOΐbΞίδ‘σ>/ψ£vΣόΣΦΊ9G=%Vδ™Βk.UŒ"“³[!DƒpΙQ‡ xrδ’ͺΟνf#7mΔk MRShˆαΎF{8³’―γψ Όž]Ϋ™ΆŽχΓ~Xƒi7›"±ξϋ!τqϋϊα {)ΥSη³ζ“΅₯ :©0mŒ"*Υ£W]Ν9’Ešή5—Ά"F©£q'„ΊJR€ώj“(ΧΈŸ>YΫΛ Δ' Α$1‘Ÿ^.ΨušŸˆ>AΨdaΏ€0ζ7ŸtZίζΏΟΎz€7„$RδχΛ‡½˜sεΒ\ƒžψ0s!ΞϋΔ‡– u^8ΉΞΐΚ…ΪBΉ\ΧΖ…Z\H3mΉP;Ά+’WΑ…Ί^’κΚ‡jΦρ}X.Τ†sΚ…u/ŽeΣ»`YβB@ΉΠvlZ>,4Ɍ|¨\Θν- –– cΗvαCr!ΈTω°— 2…ΐ‡– ‘‚kεΒΡς!Νy«9_\8(ƒ>Rs^«AΧzs…*Σ΄Kλ€KσΒ9s|Ις‘Ζnjž-`aΆΦ—3+nΌϋΑκ—w/† Qo3λΊ P‚šxήΦΪu֟ۺs6u«Ν”/uΔηwΕΕ ›oΚ υε¦Τ€†»Q† ‘q™Wή0δΆΖ\ ω­?―ž\όΣ‘ϋ֜k*»FΞΫ ΈŸ£ω ٜ³Σ»IΫο©‚‰šη‚4βMχέ—9ίkvυ½-βUSwιŜπ-Ή>`ΆΉ(ΰ%rQφf›“~(χݜβΒ?l# ηΜ―‰PάοφΗ<Žn1ͺ£Τ’9Η\sŽΛJ]Ήs ŽPM1€΅ΞU˜i΄‚u“ˆ±Ϋ:ΊQR 1ΊCs«οCNAΘηp›RΕ₯ OFΨY£‰kή¦ΘUqΚγ’8ν&L5ςeSHYCͺ΅₯Ϊ ͺ$H³€«.έ5ΥVE)Ν9gžCŒ2•“ινlWj!šjns*'D)RΫΩόˆ]…)ΕiΚΘΠτaFyΈ0Á٫9G*g€H Ez(BƒΠ)‘ηtR9{|ΗμυcΞcΞaQΨ–πυ£}”Ϊ Νω¨ωΠpα°|hκΜ•3βόO|hΉPSΧσο1<¦‹•%>T.ǐ-’K\h9WMΈεB|Λ…9O9q$\H>΄\¨Ό\j$ΧΖ…Φœ+Ά5Ρld‘• •• Yw₯>Ά‡r!ωP.DJ;ωPΉ·•βqςa ρzαΓ^/ΰΓs>T.-œ/˜6£• ƒ&Ή9ͺ©ν§†Ό—9ΠΊ}ν~2΅ϊrΚΆ4‡γ(/;ηΧ0²¬)gtœ†ΉqFΕΥ`σqšr˜qšsή§AΧ; &]G΄Ω±mun―ήiWlτfΤ$ΌΦμγΟ>*χ_π³ΝΧŽ‘ϋί 8ΜlsiΐYΖœί–ΰρά7η£Έp5»sŽ©#YiΗ«σ9Β€K€ˆ"P¬°Ξ2ˆF†²Aβʜ’¨i‹šΞNαH1†N€^œ³‘E; σ6ΕΗ YW†Ά’žΑj4 ;šM£Hάg/ΡtΫ8‰ί_«΅₯₯H»₯ΕτΞͺΕ Ϋ΄Ξ’9g₯ž7FˆsΝ9ϋχΊσλέ‰Ω@‹ΡtFΡΡ ©`Ξ±`„ηbύe”@·HΉ«ŒAΨ2=‚·Q‹‰4iˆ~Μω‡ηVK U|ύX7ηƒ6η£ζCαB”~ψ0s!λŠω;!¦Žμ4‹δBόΙ‡Κ…ΚΚ…¬7Z.TΎQ.€·‘f.J–ΈP$­ ο• -φMg½­MΗ1[.lΛ:R>΄#+ιξ₯ΙΊ`ΩfΞ΅ξΌΤ(³eΔd ΡŽ|XβB •‰ Α™jΞΙ…ˆΎΗ‡m‹˜– cΆ\ΰCεΒΡς!Μω)Σg΄r!ΰζ|4ηbpz~}i¦ΉšshjZ5Νf0€0¦jT9† fv‰˜σ[₯>|±Μ._lŒ9‘ιιjΠ™ΞcNXcχΧ…M£W豩™gs9d”:·#½½‘ΪnGΝis8ΩΆ1šNΗΫΩρg4ζL]gΝΏΤώηFoΩnΤ”ΫΖoς\4εΑLGΘΆΉΎά€Χއ©ε4ηxί΄Ÿ6sήθeΠŘ3εΏ‘ήž ΊfhSΈ~ΝωΑ„_5λ°"^³σn½DΞ_P0ηο7Ϋ\Q0ησδ>αο˜&MK©ξ›ό# Ί›σuhΞ!$²)&~#Β…Ήδ'ρaξ*ΎζκΪ¦­­ΖmεBΞ,gΈN ΐ5M΅Φ3u]yP9‡wr‘N΄X[\ΨmΡ²-έέr‘vo[΄,­΄™\΅Ι%sΨδC.ΒΨθyiΔ$ψPΉF=οΜ…jΠ ’• ±Νp|Ψ+²|MΉp΄|ΝωŒι­\Ό½›σAšsbΔζ\M³ §ΧššςZ­Ή5αb"kΚRΤ\g€«1ΏΓΤ•«ΧΞλšΞšς9Χ¨9#η|½šv>§δΤ¨/ξ‚[Ig]›Γ­Πyη¬!·cΝ΄1\ŠšλvEc^BΑœΧ"伝Fž5RΦ †»-₯½Ά-Ή¦cnΉœ#Ω§τς| šTσT_; Ÿ΅X_cγV.oώΒρθχχΡςŒ~Μω7gV]}θαEΌv—έΧIZ{Έœpe—χΨ3ΰF7磌φ›Ώΰqψ# !WΜίΥl0‡Z6š–lΞƒqŠΡ!Μi…ˆΐΈ ¬ώCh@ hWο Z’¨©™D S;Λ»dΞ!Ψ κ DΩ]XE ¦Ήk]Ί>n…‘ŠA5υ*>u_*>€“ςXάΧcRΑάΝ€—Δ©€³3lϊ{·9ΒLe-u1Ξ5—Φœλˆ5=Η­¦sͺ(m‹1΅t6PBύ$£η©‰VnŽDƒŽνα\ƒ βω‰fE±)Χη_£G%1ͺ#†EΒm+HΡ χΩP‰η:― ϋœ_|ΔΌ8#Έ„+Ÿνζ|mԜχΛ…½ςa͜'>¬q!Ξsς‘pan W]]VβB5ηδBp ωΠr!ωGy°jt%<6nyPωp8.μuΑ².δ9εΒ6>,•όX“^,χQsn Ίr!Μ9 z)›Θφα°|H.ΔλΙ‡Κ…ΰAς!Έ05䀜ŸΚ…(―(ρ!ΈΨ+Β γ\W.μ'rΎp—ι­\<ΩΝω Νωh/֘Σ`wێΖΘ6 «Υ•«©dΣ2υ…Ζf©[9;²s™¦²kτ[#ΰΦ,ks7€Ϋzr¦΄ίpΧς†1§9Ηs|ޚxkΤνϋ gΠΩ$n™Τ Η蹚sMqΧΖnŒš[―ζ\HΪiݎB³γΞ†<Ր稸ž[SnΉ1ηΉ;Ώd[Τ ΊΦΈ§΄w›†ŸΝΉΝ֐Ε%]²£ιlD=.H­}­ΏΟQjߚsxuΝΌ#‹xέ3{1η›ά°—4„;Πlsͺiwyώ³nSσώFlγζ|-_Π‘¨ π‡šŒb]yΣε؁ΈhΞa˜Πuβ׌ŒBŒP„B¬@€ΐœaΡiSΨqέMͺ9‡ SHQJq!h ΄ŠJŠD\S(*@ρZ+6υ>D1£ψDI˜jšg›0΅Ρt›κΙT}l§uς₯”ΟnM”4bΤHι΄ιœΪ©ΤŽΡ"5θVj½%Ε(L Ξxœ'xž¦†"”έ‹it°ΐσ3šσpήrΞ4ΞQΫɝ‚©™@9tΞόΖo€υ–ˆaœλŒ5ΰͺ€ί§λ)» ΖYΡ>Œ)sΒ‡™ ‘n >ΔΉ  qn“… cδ<ρ‘r!ΣΩΩ₯έ¦žΚ…ΰ—’»J‹‰δ%Λ…–Ι…ά^ω―Δ…%.ΤχΤχ*qa[Ϊ;#ι%.,υ,u;ž­ΨΓr‘ςai‚+KΝ2΅φ\ωPΉ‹7αω"Β…˜}ΞσSΉηεCšσ‘pa6η U§‰­9?uΧι­\&F/'¦+sNyͺ‘ntOέΨSΤeεόŽe+[SΖΥ ΫŽλšΆίl—vάΧθΈΦσq5οvkψ4θ·Μων2Ž Ÿ•QτFηv;MjΝcΣlΊaΗOΦ™Γ€v«%—τtšρ\GΠ0α֐‹1―Ν9ΧΘyŠLλ1ΪΩβ΅ηt”›œ—%c^2η cΞΉm-ΕήΌ?ζόΫ‡Y}οΘgρ—3χθu”Ί±ίœΊΆΏ-=φz ₯φΑτό―΄ή<\Ά X°½Ωηei[ԜΕ6ss>Κ κΓ„γ*Π&H9~…&G9Ν9GΕηAΔτ:t…ΐ`ʞ¦θAh¨iK‘s Š#ˆ’œZΫ™˜M‰ΩΆ"”B”uθ€V ΪH{(:aΊχΐΕΥoώtqΌ¦ ηm>―ΰγϊήϊ~Œ\ΩzL¨ΆΩ…©Ξo/₯zZQΪVƒήšή ‘J3Π5ZDs^2θ%AΚsƒΐ9AŠΗ™Κ‰μ ¦§ΘP’˜λ‹…<Ξ7F3!:qΟΕ/Ύ2 N­ζmD€pC”"]³$FΥ¬A°FsD)Δ,R?ρΊ~Μω₯Η̍ΏΓΎsς1½˜σ2›rb"βYr%.pAΪΞ…Κ‡Κ…˜q_γBΜ/ηU‰iΞΙ‡™ 9ς|H.ΔωO>.dZ;~{Κ…Κ‡Κ…\€΄\Hξ+q!8‰£ΣzαΒ}]„΄\HΎ+q‘nΗχ.EK —jΦK\ΘΊεΒ‚e[τά–ϋK}εBεΓ’9Wƒn#θ½raβΓ^wώ "JN>T.Œ‘sαCεB.4υΚ…qϋΐ‡Κ…£εΓhΞw›ήΚ…ΐμ)ÚsηΒ~LΈ˜kβhτjζܘk‚ς}©U‹pJS/Ϋ=[ΝΉšΟxߘs6€c€|±1ηšΖMήlτ[#ζ%#Mn :£κ|άu­K·Π…‚[ο+w|ΧqlΪΑ]SΫsΜΫF¨ΩΉζjΞm³7νΒή­–\›ΊYόώGΩ΄kΛ‡‹šά’e‰ Ι‡%.δΘΈ–f²Ύ?sa·iƜ7+• ՜—FLΪqr»Ζ…4ξx½αBς‘r!”Θ‡Κ…,έθ• ™I€\8Z>„9_΄ϋ΄V.fOT9Ξ[kΖ\SΗkζ\Јœκ“QWM‹fJ΄¦ ‡Ηb­nj.ΝΣΧ“ΧΘ0SΪYgζ\›q^Ή¦³λ΅FΘmΗu5ζ4ξΈmkΛυujΨ™β©ξ4νl&ΗΗ5η5·³ΣΩ$N;ΈΣ 3« _2ϋ½6Ϋ\\%.d?δ.μf„Y6η6}&ό¦t€ΫΌoΜy«AOγΟjζ\q4M]F€Ω²]Ψ©ΥΣΫhΉŽξγ€Β‚T›)―5HLη{m! Ο9η}ά1Υ΅Η?³ˆΏΪΗΝωΈ3η|ξΒˆOˆΠ₯gžœkΛ°Ž?²ψCΝQAXΒ)t:Sυ‹―Œβ4ΰsΔ n£ξ «ψHέ„`°MppŸι}£0η0xIŒR,QV}ΙΗβΎΈO ¨6z€ΒΤ¦Ω·‰S+JΩπΞ6΅cGcταfkζEi­[±t€qJ‡βF­₯5ηΆ9\)ZD!ΚzKά†±Atb‘"μCλΪΓ~bwβ FΩ¨( dŽ˜Β£CαyœΟxžŸ‘ŒβΘ@ΩΨ)μ·sώργζΔί` ½θΨθΪnγ2 Ν9ώ0)`Ήyξ€e.€9'*’k\d―raJ_|¨\ΘTvŽ‘$²ΌΗpan~sΚ…όνβ7Ϊ6ζLΉP³}Θ9δB\γωΪEK„[.$Z.?ήΠΒ…7.l[°μfΤ5’^štΡΖ…Ϊα½43½ΤΕ]Ή°΅{;ΣΪSCΈbτ\ΉP£ηΓq‘֟³)Ή“… qž*’ QnA>΄\8R>ΔοΏ εΒΡς!Μωi3§΅r!pH0ηΰΏ^ψΠΉpζ\_΅™σΪόkν΄nΜ]Ι kσ0kΞs³/Σ]Ό=O‘s˜SFΞD•Υ Ϋ¨ΉuFΒ qmθFƒFέ6}+₯©k„ύg| βwΦ‘iπ4λ8.˜λgΉ]’ηKMzϋ ­– zšgž7J /jΞmΧότWλΒ^0ζ5Ӎτυί~/"›τ‚9―₯΄kΎ”NSnK!Lϊ}ξe20ψΩkμυΌΥ:{@#δ¦ΧAO=hώΣώy|ύ˜σοŸx\υ““/β¬ύχqs>^kΞ±ΒM¨’qΈΖh’Xeη¬hΞ”ŽΒ€!GΧa6ςJγ^’˜ϊΙ,H’πŒBB£«"F!€8³Ϋ ,Š)¦q³A‘ΖΤuˆ> AˆΖoίωρ(iΪ!&oqyC!ΊCρi 9θwοώxuΝ]ΰ6)€ΫΨ†bΐλμ‡οχ«ΒΒ€ZΩξΖ₯ζIψž° ³Ν’ΪΊΠw›lΗ Υ"FΪ ¦œυζ*F΅ξά ΡRj;ηϊjS8€hΪNΖΌη \Γ>βϋ&QŒΧ!j‰ΤcS.*αΎνΊ=Ϊ £ν1}ών§ΖίH?ζόϚS3…ŠοžφŒž#ηα²]ΐΟž—ξ» νρcN>T.DDPω0s!GD¦Ή™ ΡΈ|¨\Θ9»²“ Ιƒφ:ό~Θ‡Κ…ψmσ·¬\HCΞlr!W‚;” ΑY4ΠδBς9ͺήP0δ– i-βvώ*e)Ϊ΄χR†‘εB6«+q‘6³“/J\Θ:;b­Φ,“ •Κ‡Κ…δC;b’°uη%.δh5Λ…μιψ°Ζ…xΔ‡– Ι‡ύr! 3~Κ…£εΓhΞχ˜ΦΚ…ΐ!;τ9w.μ³Ύ\Goκ˜sΪ―F΍±+Φ— Ί˜―l’Τ I*»Φ›3rΞ†pΈΦ1jlw‡\^ӜΝΦTtkΒ FΊK©ο4ε0ί0δΧέώ§κ{‹οΟψα:Χ?ΊmY†vμw±1η·›wwHύ9#θΪΙ]Η¬α{γψZZ»FΠm‘f¦}·EΞ έb {0γOόϊšΊA‡9η sΫ₯ΎΌΦdMF‘Υ’έl (eyϋΤ5=Ÿ;\ΰ)|f[Κ1ΪΕ­Fηϋ>#η?<ιYΥOOyv}€›σqkΞ!<γΚ9jΠR-VΑρΗV/θd£EH_OMg’EσξsF5Η1uQ\3Š@Π ‡ΫQΤ<ήιŒ 1±€)άj2q­γΠt¬™FŠ(ώ ΏωΗOΤ„"D$!<)Υ¬λc6JŽΧΒμ_yΗ'β΅5η*NqΝχ%4ύσW…(’­νfΤΩ° ΫQZ0ΥΣΞKgf‚mŠd"Υ)›!!RΔ¨y/γΤ(BiΜ5΅s}ijNF’ΨI£κ5Š©€αxβΎpn†s•΅Ώ0P8Η‡›s>AʎƱΛ{0rύ˜σΛN84§O[όΰΉGυdΞΓe ŒΜ8·*ŒΔπTΞξ€³“-*’ c2ρa yžβU.ΤRεBή6\ΝyβCεB©\hΗC’ ™Φn°\ή*qα―LΆ_GΎ*e αυ%.$” iΠ-ΪniοέΈί {‰ ‡γØ5Λ…™• ΑA₯ζ˜9ΧΎ–Ι…ψ;Šsˆ αΘ…Έ&*β5δCεBΌgβCΛ…ƒζCεΒΡς!Μωι{NkεB sξ\8 sn’Χ₯ξξ:φ¬6ZMΝ9λ˜5zΞhl2η9"Šh£ŽκbΞYsS&tD™5œWΝ9iΒaŽmz©ω[©S;:ΣΩ-‡9‡!'ώϋ–ϋjfFΫa{Ό–)τ₯Ϊs~ŽWcQt€Σ”ηh:Ν©,z4Rέ΅IΛR―€F38Sož£θ0η₯ΘΉ6…k3皾‘r–λωfΟ³T#―η>>7tπ}4>{Ÿ“ jYΙ¨χcΞΌθ„κΞ8±ˆΏ™΅Ÿ›σρjΞΡu• bΧ½0€ΓxΨΥt<#EWΥ hΘ•R޳Q‡CBE―€ΡƒΘIΡsD0±ΚqŸέ‚m#!8ˆ»DXB^qϋ'£p„0₯@Δm¦_jdœβ“©EtˆΖΐ>άfτΘF‘5RaͺiŸΏ2&έ¦”–κΣ΅Ϋ;;-³iœm–dgkΔ¨›9Χτφ\{c„ Fuœš­³d*;λi5BĚ[NράΒyΖmρ·ad)5BŠ·Ω³£`€`–bST ³E1Ϊ6ί·Χ S Ω½’΄sώ©Ι©€?xήQ½4„C·ΝOό›yό=¦ ….H‡ηΓ>ŒMα03š|¨\HUβBαΓ́z[ΞL|¨\Hj£δδImάƈΉ]¨$ο‘-2γΘr!ρzkΚΑ…δCεBΛ‡jΤK\h#θ₯ΛΆNοΚ…X˜ΐύ6.Τ…^kΞ±BΤziΤdδΓΌst›ΪŽεB1'2ζœ\ˆΫ6ӈ βδCεΒΔΛΰCεΒ8^Νπa?π‘rαhω0šσ½vnεBΰΠ'VΞ…kŸ KΡλ\τl’4ZX0κjΠ[ΝΉFFSέp6Ζ «9gʑs֟Ӝkύ6Gͺ1ΫZ#γИjnkΜνx4¦½σ΅0ω0یŒΓ”σš†άštnƒn£ρ4θjΜ™ tv¬ηχA3Κqk΅FqLωn1η₯&qyζZ'ζ\λΜ5—YηΆΆ<›rΝΈΠ4φ΄x“Λ+ ζDž`šσ―ήφΙκS7*^C2‚€ΒU…§FΚ™ΖŽνρ:μƒϋΌό—E¨0₯8₯ %T˜Ϊύ ’ΪiΣ;ۚΘα{ΐk(Hu ›ŠSm˜TJo·έŠY{ώ³χ&p–•ΥΥχ—7&οχ}yσ~―š˜`28%*έΕ)1Ξΰΐδ5j‚cp ‚ ‚gDQfd”A5Š‚Œέ4Π4ΠtΣΝP-θ¦οwώO=λΤΊ»žSu«nΥ­ΊUηωύφοά{ξΉσ­Uk½χΪγ\ΫUΖ飃šΔΉgŠΈμΖG^–©L‘Δ9B‡ΝFGu/¦J†urˆ}Υσ%ΒLυEEXιΦΨ«Τ\ύN5φΗ£―R菌fϊηG½δΙuΏgŒσ_ύΜ^Δωsͺθδ±—ε`|ΖΓ«8'bϋ°–NއޅˆŽIρΠ±P}θva‘LΒςίΘ8,t<δoΙπΠ±PU2.<Ή¬v0ΐ±PF–ێ…'^Dˆ…ͺ$’(χj eΩ %Κ#r9 uθ1‹ξ=ϊtΔ,Ί‹υˆ…\ζ~%,ΤgΝγΌmJX(ƒΈθΓΑχQγ‘ΒiήΉ‹σΨζγXΘ6β‘zΚ=s.,Τ JŽq,Τu ψŒ…ϊνπϊΊ°cΈŒ‡³…ΣΕCΔω«ϋˆF,$zη-Ξ€@7aή”=―EΊΝՎ₯Ρ]]εΚΑ Ξϋ‰“ Κ¦pυΦ^ΩΟ(Ξλ¬έά΅υ’vΔΉ2ή±΄έΕxθΠκ_—8χrvΔΆ ρ˜A?ηΪ΅υV·s?7šσ>τUV€@_U(q'8I‘¨Η­Eqέ #ΦΊz){ncΣΌη\ελγzΜCΆΌΞ˜‡Φ†*ŠΨ&a†]ή±BΓ*5j“Έκ}κw£²―.¨[ςɟΎύςkμGœ_τκw–½ξeΕψΰS׊σ…,ΞuFZ#Pd§‘SSYI cR!₯όYYΥ<χ΅ξ£ŒΑνΉ/‘,Ξ!D%•,zΉ¦ϊ)!bκ!QUζ[—!ŠίΎϊΘΞqΧ™ˆ)qŠ#©τ Ή²γΚ|ΗL9Η‹€ϊγΊ.+Β+BK=˜zy§“ψ‘θ"Ο{δ3rB*ο΅I {φ|ΒΎσ8γ·©”“’vΘ§f•‹”B@c¦QΣ )ϋš νγ§βq\―φKΠπ\d5“;ρ!―=qDεO>8š9ΚsΚϋ!€dZՋ܏8?ζ₯ON%Υ₯ψεkž9e·φ…ƒΔCΗΒτ;™.:‚kSΑBD^ΖCΗBeΟ…‡^Ζ.<ŒX(|ˆXN‡ …› …‡ …‡ ‰tΗBι1›>Q?zΣhʈ…*ˏX(^ΒB―&*UE,WΦξNν6ηΌ6Η:Κ΅_\Xθ',#ΚXΠ±P•D:6caθ:ΑcXθx8XΨOΟωv•8oΒBb«?ώΓN‹…ηΦξΖlS~,/‘φΎσœ5-}IπΚά%¦6δL¨JΉ}ξωκ ΞΣ„gΞ}š_φžsνSΦάg¦#θ%Κ]€K„{°οŒ«Χt~tΝΪθάΧ³φώ<«Φw»Ά― q.Ξύ€EW)w8ΙQ,wFqΏα{«Mάά.—3}#”-G”GAο>Ξ*χIΉ*c\Ό"ŽKσЈ΅κ}oΘ­#φΫρ}ͺ6θKœ[EB?βόβΧΎ€³όŸ·)Ζ‡žϊψVœ/tqNδ“3Σ©4-gަ#ΞSΆ¨""‰΄ˆ`ͺόYf5"0lˆrŸl8ρ ‘½©‘ΚΨK%›NΦ<σ#Σ7Θρ˜kG³ζKD:ΑuΛΦ•%χ̐H¨Q‡ΗVp›<χ‹εξ"Ί±άέ‰iS‰§Θ·*4―8Ž[‹Ω5]wqξ₯νξήξεœ^ΚΩ5FRšEy9˜Δ9™!e„DHΩζJ­DJen$BKΫ9†}£2ΠΨ“©γWώηΨλΰ5δ±Cιώδκv²›d<}6u?™Vώ†ϊηίέφΙcεΣ!.ψηg΄β|ΐxθXHΆqΊx˜~»ΒB’„…ΒΑ(ΚωΫΚxθXΘί›ΌΉ)šγ‘WΫ8JœƒcΰXΖ9‚MΒΒ(Ζ'ΓBm……ͺ4βΎ%,ŒUEΡD3b‘π0b!"έOζ–*‰„…ήξβ|2,¬ρΠ±ΠOR:"Ξ…‡Ž…ΰ˜αa   5N2φ¨³/γ‘c!'(…‡³…ΣΕΓ$ΞκXHlυˆVœTœ+“j₯Π}‹se`-b)sœŸή5κ*X“8ί`‚JΠB «ΌέΗ«Ή vq^ΰ.½―\ϋάυ¬9βΪΕχ―Ί-—O½rτ2b\ϋ΅@—³»N HœΛΞMαάΉ=Ί·„ΟΖηž7:αGΣΏ†ΜtΦqήΉχ“ηΜx>»\³ΚU‚ΔtΧoFΒ;WdΤUα5¦ΛͺԈmϊ-εV ΉόSa±ΞϊτΥ³―Ο¬ί₯“ύˆσΛή΄MηΚ·Ό’~ΪZqΎΕ9εh₯rί\–ΖeΡ>ξŒFIU4R•r’½TٟΉJ5#%λ°i”θp• BH!F,υRϊΜ]eΝ£“°g£ε”Ρ„$RΞV™#ΒXζ±TS$SDVδ“ΗTHœs›gΣElδΖ ’“ΣR‰ηε!›.’ΚηβUqܚ‡it*!ΥΈ:…ŒΌ€Σ]ŠρΜξΣήC[g…$ΞΉž3ήγ €GEq.BΚmήc)97‘γΨκqkΙUΚH¦ΧUέBŠ‘—~η3Ρ«ά—8Ε“»ΪI<.xc›94:Κ,³σ΄‹Ι°PΨ–~“Ž…ΒFU ³0 ‘ΒΓόqπΠ±py0ps,£ΐ¬ˆ…2Š+a‘gΙ'ΒBηΒ@mur΄W,Œώρ€e―XρΠΕy¬"ςΉη »ΫeΒVαaRΞ.< Xθx8 e|ι•FΒCΗBeΞ&“Bα‘aa:‘šρ0baι7>U,œ.&qώ׏hΔB’ηs(ΞϊΞύx’ρρ(φžs τ=­9ΦΥsHψxY²ŒΟ”Y_ŸΫU^κ9§ŸΫΛΤγμqέζsΡ½'άχ«Χq.Žψ&N\~kη„e·tNωυκ.QΞu‚λxeΟeη™s7„σ2vB"ΣΛΩG‚k{χ­DΊΟ‘wcΈΒˆ΅ΩηήƒΜx%Ξ“·2u'uυ³ϋΙ—¦V‰™γή'g΅{&έύτΉUC'pΦΩg¦ΟΝOfτ»xœ~Δωεoyyηͺ·½ͺ»<ύ‰­8_θβ"š2D˜ΉμσΚQ'ΧμΊ:Uqž²”·~%‘:³ΰtοΝγ²eˆDDU6 ‚©Ώ<ΞχG'£"x*Λ„ΨAώ ‹‡fL8K€Λ NŽΓNDEbKBœΰρ<\œϋρ<–&y‰gΕ¦ΧήTβιύτ|Mσε\ i̜ϋœ_•pАͺΧRΔ΄kΞ/ί-e›š7.g~ͺ’KeΔ]x{φ£μxtkWΏ₯H)$ΤKγc¦¨:&•σœΌpρš ŐΦΣwόΉκίy*]Ξ%™Dtιžlυ#Ώέn«Ίt:Ζ…oyV+ΞŒ‡ 'ΔΓ ΔyΔΒKXHςχεx(,δoVxθXOΪΉ0w\q,όζU£˜±P= •)ΨΖc”°0ŠtΟueΠ{ΑB υ~±0βa4Κτωη.Ξ…‡Ž…ΒΓ.,δϋk"rέρPXθʞƒ…^-δψρPX¨žsα‘°0;Έ'<4,¬Kν+<,aaΒΓ>°pΊxˆ8ίώoώ€ ‰­ώδ·β|Pβ\%ΔQMWœ3ϋ9gΞk!žϋΟΗ9s›Hχ1lΪΗkςŒo=ΛΫz[η„2ηš}ωη₯Yεэ]¦rάGΗ"Κυ8šξΩseΞίΗ^~Kθž)χμΉφs?Ύ2ηΚξ»)άΚΠg^˜rkgPFυDΉ Ϋ.ORφ<όωχXpr―[ό»Λǎ+W—ωŸ•Σ»8οΚ–λχ7Α8Έ.οk‹π,Ό»·Ηp7χ¦“@³…ηWΌc»Ξ5;οXŒ>σo[qΎΠΕy26ͺώA‹‡&ΧΥ&R:ay;€”³υκ£”qΨƜiπ²ΝLB! d"D| E"I'Θ”Ζ…-/dOΌ§ΗϊȈHD"JŸ₯ΜΫD U齓ʴ‹xr?νΑWŽΖΧ~=ρ2Αq~¬JF£aR$©qQΙΩ˜Ϋ £n”Τδ\³θ>JHYsςΙχ²AΩ-_FHφ σͺχ|δΈ1"jŽμ‰tΖLΜ,Λ£θꭌfHΚ‰|V·)C”J6™3 …ΌŠΠ2κ Γsή7:“ϊΤw€l‘²’ifyυ[Gdρ·ΐε&BΚί‹J7ΏEτ-ΞwΨjœ1“βΒ}v+ΞŒ‡Ž…ilZΐΓia‘άΧ ΗΒΝ†‡ …‡Ž…ˆGα‘c‘—};‚އޅ\W_Έc!ΈΓ6–­Kd—°PθX¨λ u‚Τ½;&ΒΒ¦²w•κG,”'Gϊ8:ο;/a‘πΠ±ΠϋΠ‹c&ΑCΗΒ€‡5qξ"½ˆ…Ρ(SXhxXΒΒ„‡Ž…ηύϋF,ά}›τ/aa4Eδo…>πˆ…}‰σΗύI#[ύi+Ξ.Ξ³xηάfDχτ˜ΚΌJ€Kœ›x‹u‰>Ÿƒ^›χOgε‚ΛϋΟ±>JΝgˆ{ΙΊ‹t‰o«ϋ³οzq¦Λυ*‹GdkΞ9’[fpκI§Œ »D<[φkŒ› η$Ξ£@WAξζxζnJΦ•MφLtν]™isΗΧχά5ZΝΛΠ]¬ϋ‰Ν—Έ·ŠŒt‚»iήz>ΡSοΛν žεΧχ^ο‹#α”ωχIΚΖ[φ]Χ}@ݏo-n3δ.βέυ½/qΎσkίσκb|tλ'·β|!‹sVϊg|ΐvιŸ[2GΚ&NGœ{Ώe:[!ΝYς”=Rιfηάq%μrΙ…„*³ q‚Di†yœ_."ͺλ9'y>H¨œΪ!†žYυΎΙHD!œ_Ύb4Z>vYΧ\χcUή υrw7Lr‚η»aΧy―ž1‹£…”Aς’NΟρω* ώ>*H'MΈξ„΄Ρ Nγ„”M*KMΏ£~‘“‰σ8ODTεοY˜§ϋpŒζLCN! δ4 ςz `>6‘Σο½eτδc†8>;Ίσw ·nˆ©LΒJβœ9Ώ}‰σW/%Ε…ψΥ;ŸΫŠσγ‘c!β\xΨςχ <4,Lo ωΫ::F,ώ9‚.Θu‚R' UΦξX(όŒX¨Κ£ˆ…'ΓBπ”Η+a‘γ‘°P"½”I—H=θ%,”@XΘηYΒBpPx±PxθX˜²θΒȅ†‡].λΒCοw\s,tή·ξB]εςΩ¦,ΊJέ%Ι‚{hΊΚί%ΠΥs~w7­‹βάgKŒo°>σ$VcΉwΜ&ΗpΡ±bΩ(.Ž-λςˆέΔ{έ’€@.+—c Yπ˜χοZ!‘ά5»\₯ωώ…μΉί¦χμΏ!U°Υ‰w{χηnηW‘}€Θeυ‘«τ¦ΡυHFeŠδ=φ\ΦcσD1R_€z$!”ά– «Κ/‰TŸ€—hΊS±z2KNΖ*}WvΘG\AJ+ZχkV#²šά‹+"š²ZΥγ`TŸ  βœΛDώ^ §Ρ~Εωq―Z™qΡ»ŸΧŠσγ‘c‘J~ϋΕΓτΫ:šΑ’c!oΰ!8Θί­›–  K-0Β αaΔBαaΔB‰υˆ…:Q±Π1ΠqQx±<εqzΕB4ˆXθ'g }Ěc‘‹sΗBΆΒCΗΒΪ#`ΣY]Xθc'ǝ¬τja‘F¦ ……”€  ½§,”!\4ŽS‰δ˜yU]¦c»eΗΛ NJŽΫšyžbeΟέ±έ³έ1[Ύ" ` q•K€ΗρeΪ/α/ΑοζqήΓ.‘ξ³ΟεβΞ>οWxΧ΄!‹Eή—ζv»hδ;*‰οFqξΖ{>cή*&<{ήUζξ†qσθ°c%‚σγΕVΏ\Ÿpqž‡qόΩq–;οcœ8w£9Ϋ―Χδ&zϊmΙ@N}όώό΅χΆϋ[ιGœ_ύ‘7tnψθΏγγ°€'q^­—Vρ›*«βγ…Ϋ§Š/ζΫAΉΔn»±Šεy$εEΆaUό(¦dϋΠVœΟ₯|3υtU—Σ?βIϊΚ{&€γ@PTž3¬κΣƒΜxODΙΙ(Ω!ŸM+qNvΔηφz¦HdT„T½•Βύ.=ͺ6„ƒ* €ΉηΚ )SξΒ<Šrˆ'ΑcP]ίη’Ρΰς^•φ«œΣ{0cΔ^xΝXχ²N•π»A„΄$Μ!£Q>G>[•§+λ£~VυRς½θ»aΏ“QbφάΛ9EHUΚY ΝωE¬ˆΈͺ_ά3θ^šQC>λW₯νΚ>ωύ)λΜΞΔu9;Χ•βX‘Ρ―Ύ&•.λ13” ’*AΖίd•Ώ ΒdžQMΩ€JΔυλΦ~άž^;‚ΗΈθ}؊σγ‘c!β|&π0‰α‘άΫέ(ϊr,Τ‰Jπμc‘π0b‘gΝ πB£ ½Ο\XŽι„‘c‘DqΔBeΜK'( #6*„…άŸΗκ Uv_ΒBΗΓΙ°Πρ0b‘π0b‘7ΒBwsW΅‘°°ΖCΗBoυρ8 ω­<μΒΒ(Π}E …‡ΒB„zΖȅiz…‡γ°Px°cΉΝ±°/qώ€-±hΕω`ΕΉJ]<υ-Ξ­<½/mΟ}œ#xοgNσΠ«Χ'‘„ —λφΊ,Κ5χ<φœΗήr `•«Kœ#Όε†.!ξιˆtb•Ή©ΛxN"έg¦“9ΧΈ4ΏΖ–ŒztWψ‰UHœwe–sŸyμχQd]B՝سΐ.φwKθFaξΟαΖkΡ¬M₯δξ―,ΈU?x―|Wf;={ν½β]εBΩΗΉ»»Ÿ€Π$€œ=―/χ¦σσYknΌŸρL~ϊ ύρύˆσkvySgεΗίRŒ]Ÿ΄IΕy΅~·Š`kΏ_ΕεU<1³M§g‘ώΜ*.βό ϋ }ΆU° Εy΅ήΙIlΉε–³ΐ·ΏeΤ©=e‡(_γŸmυΟwΊε›γ)sV5"MFGΉTP₯›/Χ%  >Λ’₯RΞε‘ΟR%ŽκW$DO™"΄J*!ΚAUβ)A­g†”ιt1Ξv‹ŽκμzΑQέ/%€μσL%Md D¦ˆΧ/ΧdΉ+3δΖOdΝΥS.³(ˆ(—U)*r©ŒΉzX9!Βm"£~¬“Ρ8R¨.η”Ψ ”Sd”Q?NΖq£$.γ`Μ‘Ξ±Κ‰,АjŸz-/‚ͺrωκz%{Λ”qV€T%ς*ΫLNΕY„§η«ˆ1Ω%eK)υTof2£ΜΉϊαΆdVύ͐ΡιGœ/Ο¨`Œ‹?ψO‹VœΟ:zI{_XˆΨq<΄κ!9€;F<δo9:;zίy ‰ˆ…ΰ—#ͺύ&ba)[.,:κ:X& ΉΟDXίy=ΰaΔBή3x±Π«… ½'b‘°,b‘πΠ±°$ΞΗ•·G,:‚}ΒCΗB»eΠ»Δy Ή.<4,Lsα‘c‘ΚΪΑΓ€…ΒΆˆ…κAX˜2λvΓΒιβ!r‡Ώ{d#KώμtZ,œ},Χ‡μ=Θ3%ΞΓ8΅$Π-ƒή%Ξ%ΠσŒμΎηκ΅JD)[>sΠψnυ˜ΛtMŽθΚNKœάΟη¦k|™ŸX“Ί‚cTF―θκ%χlΊΚή%ή9F' bΖ]β|M.eηύΛ£Ώ’5aœΡΝI Ÿύ3ΨrJ―Ϋ |Ύ<-nΈ§RsΟBGαο₯ξ!CήΑΐ³Π*ΛW‰x-¬}ό[~_Χ%”mv{±„= φά―Μyύr¦~Cφ/Pl#θ랽οSœ_χoν¬ΪγνΕΨν…ί‹8ίΊŠ3νϊD8ζΰ*^oΧΙ²o1‰8χcΆΰz›9ŸαE–ˆ΄)CψΥΧ€΄ri±ΜΉ ΉΡŽJΩ!C'Ο ) Yd@4Η—Λr)Žfp6υNjΆ8)2(3"B‘‘Qφ(’PΟ”‹„B4!œp‚ΛΊύθ/»Ί,λDTY)•˜*»μ•Μ™Τw©,‘‘c«Ο‹₯>JΒηυςyΛπˆ€LςΩλd—ε-2ͺ>LwsχφΜ9£„݈ΎJ5Ή.Χb"ί^_&c!U_9‘Ie-ΞcxF‰γε~ Ρ„€*SξqΚΫF3F"«Τ“ώ΅>1uA;&"JŠϋ'’Zνη˜TςΙ}( Νχ©Ι);ΥίM_βό-Ο¨]Ώc\όα΄™σγ‘c!Bc&π0‰&―$κŒeΜ…‡Ž…%<τ‘’ΰ‘c‘+a!±Pnν%,$zΕBαaΔBad―XθYzΗBπ<ŒX(±P†o„caϊ~MT;²ΐCΗB t0Ξ±Πέ »π0`a‡%,T sψ8,,€TΩΊπΠ°°6†‹xŽ ¦žsp/ba…i  §‹‡Iœ?ω‘XH,ωσΕ)ΞηΜ.ŽRΛ©/qςτœ1οšu^*m·ct\W™²ΉoKœK˜k–5bY}Ϊl%~£ œΊ—ΎsΌ‹οθ2 uλκEWφܟK—c†\FpQ˜#ή%Ξy\ήΒPNν΅\šΟ֍τRΟώŒύ²F›ΙY=υg‘Ϊ5ZΝ ζβˆ5+‹οrψ³Τέ@ΝΕ­›ΩEΗτψ8^Ξξލ%ζ1Kn-ΡΎ6˜3qΟ>™ κdˆϊΠυΠΆhJΨο(΅»Ώ­sσžο,Ζn/z}C'νrΌ3<ΖNU|ΣΏ©Š/‡cN«β9vύœ*ž–/ίPΕ%U\μ]­»ΒcάيσYX”‹ρΟWXλ1*yΙτΙ(gσ8™4σžK8κ­T₯¨JζdAΨΚόL]%ν”;ΚΤMλ;‘NŸι«~JΟ€K,Cc©fΜ’‹ˆB<‰]~1F@ΩrYUι»ϊ-}λŽΖ"¦Κ‰ŒͺοRŽνq–Θ:™!…/H¦Δuδœ₯Σ ηY‰v/«ΥΈ΅’8―…9‚cδΈΡΠo’I-ί?εΜόΥά_'€”rzŸ€ JdTβ\.Δn|„!’2ζ*ίTˆŒζ^K•p&BZύζ!~\—γ±2δIœσx_»ΊŸR‰ψΎΔωΫ·%Ώ…Έδc/lΕω€ρΠ±ΠΕωtρ°vκJ€όνρ7ON:r]xθXθ', #ͺ}&b‘ŽXΘΆ„…ζ%,$ΈΞ~a!ΗsΗ@‰σΚΔ3b‘ζ±Ž…lυ$.αaΔΒτ=U߁c‘Δ;xθXθxΨx’2`‘πp::  'ηŽ‡n*'9Λ‚\™ττ½›ξθʞ{οΊχ–›{i™‹s7μΣη§²qΩ^³ζϊ|{½Ž#α’9œχ²{v>”ςs]Ώ#‰sž‡η”ŸΑϊ‘±ί›Ήά8ΏαSΦΉuΏwcχ—nέKζόΥqώ₯pΜ β|iΎόΘΌ}D.‰^+Ξΐ»@<%)_Σ\Qzˈιz|ˆ γΆ(χΛ=Ν>»W%Ωχ₯rNΆ"€η0²%ΚΙ8ΝΧe2$G`•JΚύWϋE\ΉM™’ΨG)Zʎ+>xώhθvιϋ~Tη=?½¬ŒdW}›zΎθ!R^+!c$ή§ζϋ ί8ΗW‚š¨Ώ›Ί\³“Θ₯·ˆΖώ‘Qw(ΛΩζΥw[“L'BdυΧΊGHSŽαφ˜)*”kͺwΌΛ ΙΘh}{ι]β!%3EhSΞ 9ύή[κ2ΞΤ7™ϋ)Σ±ΥύιΔΑγy<—Η"[D†(Qώf(ημKœΏγYc}!.ΩυΕ­80:ς;ˆx8-qΞI*ΗCaa6…“Ηƒ°Πgr;ς7ν]X¨Μ1x±Px±Pβ―ά]ΡεΘϋΖ,©—7K¬IΐyYϋͺμ οβ[\Οο3ΦΥ‹^η^vο½υl%Μύ;©?λκ3N‘…z̜§ο:gΏλ€e‘ΗΉτ›Pοκ)7g/Wί`ΒU"Ϊ3ζ.fΥονΖoѝέOŠψχ2.σξπM.π.ΞύX«ΰρΌ¬]ο…Ο~u€ΠI‚x²‘qΎrίwun;ΰ½ΕψΔ6Ϛυ²φpܞUμ–΅’Œfσ#L±τ9™^ea>-BZ ζDVnτYBF‰κ2dFβ\Ωsȏζo{ΏΉˆ¨“PΘDTY"qσŒ΄‹]•Š»C°ˆͺH’»­+CξΔΣ‰¦‚λQnW6‰ϋθ6Ηp›JKy]"Ώξf¬qCθr’'0B"J‹μŸ Ÿ•Jb'’Fϋ)”Fσ)'©‰„ς},&DjΩ zv3βCξΤΚ‹c©B"₯2yΛδΣW΅ιt!Nh|šο)…Œ?ž‰h]†™Ε9ύŒdI•5"Ί―"±ό}Έ0'Ϋ!Εέ»q~ΒΞΟ%Ε…Έδ/iΕω€ρ° ω½<œfσ7ΘΚ₯ˆX(N ³xwΎˆ‡>c»°0Ο7O½ζŽ…œ”zπ:j†y―X¨^rΎE,Œ'+%ΞKX˜Εω8,δψ,TΟΉ°pΊx˜Δω’?oΔBbΙ–­8xζΌΠ7μF[Sη•H|pΕ―ΖβΪ_Œ HeΡζτšcώ¦ψ` WgΩμ±Χ: .Ο€³U&{εϊξΉε>χ\γΙΌtέ³Συ˜2ο‡Ξ.ŽtSy½œάeBηΩs uί―¬Ή‹xTσ>xΝ8WωΎΔa±• Ÿψΰsγsμeωˆ0Ο’{O΅*bΊ›¦Ή˜uλnϊ^ΰ•#ΦK+–˜wXˆΡ0[½dBη$Κu²Η«)’!ΏΉ~ΔωͺΟΌ·³φs(Ζ/N/βό!U\_ΕcΜξIα˜mƒ!ά…yTρ‡vω|œίσυƒ!άgZq>Γ gUF‘πO:VΙetΙ‰Ω’Σ%€ ’٘gωf‚’>>―’P¦HYΟ–#Μe„ζ}ζ”ozΉ#€MΩυ0ž9ŠσΛ½·3ζ2v…tŠˆΎσ§Guήυ_έd“γT²)«ϋ©ο’­ΚΫ υ_*3%R*e^«H6‘nZšyΜηS*qoό‡Y ΉBGGhRe‹R (ίeΚuΉΆ{lΑožΫА&€φώΔω{žS“ά—μωVœ»°$<μ ω{I#yb…f#Ξ…‡Ž…όέ # έ nΉα‘c!8'<ŒX(<ŒX(Έˆ…ή²γX‚‡ Υc±ϋHΰk’…N”°Πγ"N„‡ύbaμϋχΩθΒCΗBM"©GL:v X¨qkΰaΔBαa―XθxθXθx¨γ-s^ΒBαaςψ:`X¨ŒΊcαtρ0‰σ₯ш…Δ’G΅β|`=ηž™vcs€§%Ξsφ< σλ.Hβ< t2»ydš|—˜ηΈ,θe ηεξteΧyήχ‹Θ“‹Ί»Ά+³-7Ο’—ϊ‰“0·ξ¬«eξyΞυ&Π’‹»ζͺΗήwΊ vΟ΄{6_[Πλm<αlΖp|Ύ='ξΙ»JΞCŸΆϊc_Έ συ6β.žρΟΔ²ξώ₯΅.ϋΨ»ΪβΘ5Ο¬{ΟΌWHœ―Ι'pτ»ρ‘}~ϋϋη7ηϋ;·τ‘b|ς•Ονu”nμΧdΧφέςΎ‰ΞΨ(΅―δΫ—[Ώωc³˜'~­ϋζΫžΛί―ΝΫ‡΅βΌΟ΅r»mS°εŸ)l Ομη»$‚J$^Ε” ιMŸο-#ΗvΛIœ«lQΡ7d<Τ?3ζζdL0"TͺξζBΡΨG₯ωh 7=Š™"'•Mβ\·©·R]ΔB«Η•S1ΟΟσΉ³±„:€”cΤw!Ո!ND4->Ν;Φθ ήΎ¬QΗhΘftΛW›—Άs¬aΣψ‘X!υb¦ί€”~[Θ('o4ογl7έ•§ϊ̝”Z ¦‹sομ:[\ύΦSζT™sοS―.«Δ39οχͺ$Φϊηί{󺍕,.έgΫVœΟ:::¦ΦŠŒ‡ύ`!YRុ΅KΜιoIXˆ:z‘»΄σw/Ώ\YsJsC7 Θ›w-!η†ϋόmο…ŽξrŒχ1k^²uνζYu/Ήw#8Νί&ΕΉ* pi―„z―ΛgyΗΩήq€™²μΡ-–Ÿ{fYΕ­g€'佬β‰ ΨTΆ_ϊ,|„έj3ύ“χΧ/߁~Δωκƒ>άΉγ«+ƞΫcOβ|‘Η‚ηwΎσE©„­ΞfwWώiά¦>²)λHδS†;u6aσΉυ(5υœcΠ!υžreΙ ©|S&@τj8„T}γ*ίτ2Nδž!ŠD4φ˜GaξK5Ή.bκ&p2?U9»H/Ο₯1Cš?,2ͺςMRήηDD”{“!D€ΤϋΟ=s³ηήΗ.χγhηm ]ζqd‹Όχ‘>Mqž²DΚ©—œŒ—={Δe sΚ4!€ΉL²˜1¨„wʞΖώ̜™’Yd4 ΆxYβό0Ϊ»YˆKχ{y+Ξ(ΞΑΓ.,€lΨπpΊX˜Κ›3:κVΔBώή„‡Ž…sα‘c!(<ŒX(T«Œ°0fΛ……%Ώ 7w+a‘„yΔFωp8*±P˜±PβΌ„…αaΏXGΩ %ΠεθξXGN ϋ„‡]X¨μ9eπŽ…Σηuζά±PxθXθeνSΑBχξΘΧSQ…‡ΒΒΎΕωΣ·lΔBbΙcZq>(q^₯ζ]εΏSηκ,kqŽ―Δyθ\W–Όνuφ\’^ΒάD~]ώΎtΟhηk/E–h’s»άΨ%½ΩgnΧeΟGζξί•°1Σ°u‘ΔYΫ5w™ΔIl{Θ¨Ξ{Π½^ύζnœ&“² βΌ.ϋη΅ζρjSYξ†ξ™π !bΆάΛΧέΝ> s}ξq$έΊ‘ι‹sw'J™ώ.γ» ¦θ;–@W«„B™t΅F°―q~Ϋ—?Ήσΰ]‹±ΧΟoΕωB.k‡Œ¦ρA_}Νθx¨κ ½lTώαNWœΦĜ˜ˆˆfl+‹.ΒΦ³E±„]™!‘SΟ)ce+S7HdNYp•D*CηυjD²;qDš“Q2Dκ­Ty&£·ψθ΄ΥeŽγvξ―μζ₯σš<εΖΚΈs,δS½•…1mΚ‰ŒRΦΚηGLΉΧΆϊNΌοά ζΌΧG ιΊf©;)•hΧχŸD‰—υ’5RY'„΄Ši‹s9CDΥ_.Rš‰λK~ηίz}3I©~λ5!΅ϋ©Ό³&£•(ο‡ŒJœŸΈΛ?Φ•*1.;π­80va!ΏŒ‡ύ`arκΞxθX¨‘’ηΒBώΦ\œ ɘ  ω»ΊΑ₯γ\Ž…Mγ"U^ΒB"b‘—°P'D υξθqkΒȅαaΏX¦Iˆ;zοyΔBα‘c‘NΎhl[…*oXˆyάtρ0b‘πΠ±ΠpMι ±αlΐB„zΒCΓΒΎΔω3Ո…ΔΗ>ΌηΒBοΕ­ΛΩ6σzͺ«K`ηŒxηΛΟδΉΜ=eΤ­=υœs=‹ρ.sΉμόžΖˆ™U‰5βΞE ζ•K KP)kg]Χο—ΗΥθ°όtq$XΜ*kVΆgπ½ο]ύζκ1Wf}EιΖcxΉΉ‹ΟFqžΏ+ξMeEr7oSl(˜Ό­ ‚\Β; s^VΫ‰ t=ίtΕωHα$‚Jή»F―Y?zΣϋ‰½'΅I¬Μ"]γξΨφ#ΞΧV"όξC?QŒO½ϊ­8_Θβό{aΔYοδPόΉFηEη !„”ύΔ” )FHž]gδτνeΠ"€J%ΜUΖN6؝ˆ5ΓWB”Œ ς¦,€Oβr i,aD4Ί°‹„:ι|ˏΎ›β_Ο9&—εΜcε:Lj”ͺ„Seςn¨€±C>λnσ„Ο+.-υ™ςωΘ8jΚίΧζsκžJ•rŠ”zΏ₯ˆ©²G"₯> οX’έχ©ŒW€U%žΜΤ%¦DF½\S‘³EιvexdgeœDc)W%Ό!…Θr²ν™&2Š9XEF!ΕΕ»ϊϋιKœδω΅ρRŒΛ>ϋΚVœ Ι4 ϋΑΒτχΕΜλŒ‡ΒB΅φ8ΚNxθX(r]XθxθX(œ“Ήšc‘ZzJXǣŞqΗBα_ΔB.K¬;J˜ΗΗΦεˆ…-9,$ϊΕBUiΌ]I ;::κ$tΔBΎα‘ca2ל <ΤIIΟv»!\΅―g,Μ=λ Ή.<N“8ζ£±hΕωΰΕωΈήsŸ= qή%Πs=‰σ_Ÿ7–AG€ά¦>τœ-ηr27C°›Ή—S_΅ΚΫ™nFb^^Žh’€R¬ ΩΪZ˜Ϋ{νšλνŽρ>Ο;τeΛ9^%οΚ¨z:b\μʜ―³Φ93Φ±7ΊK˜[ι}κ7Ογ馔c cΛ\˜{•F[ϊ―½Z! σXο¦_q>R˜Ÿκ‚‘{Ίχk‡0}άΙϋ c'vόυ«καϊ,ΠύwՏ8Ώύ›»uFϋd1φ~ν [qΎΠ3ηu†ό€ν’L*ε¬ώσ‚šΖL5”½Mš-ΊχδD<$ΐ•QIΡΨ.H\ˆΥ[ξΩre‰Θ AD•-B˜Cά\x‹ΚIp%2κε”ΡΈH"B’©@γιΗuώω΄γΣeŽs.χ(τK½κr7ζ΅iΤ‘ιD "Κ±|š{<-ρηόΖ9Λ„Ε‰˜z₯Θ©ˆ©©NΜ(‹™λ~\2 œb¦¨.ΣΜ£…jΗbˆ'‘¬Q.eΧ>•GNΆΨ ‰}ΉΌgbͺL‘β·θΟ­ύďΏ`¬'>Δε_ΨΎηΖΓ.,δ»ΟxΨ: Kν!ΒBηŽ…ΰŸπ0b‘πΠ±P%κW±PΥ<Ž…šG±Px±PbM9Ώ§2Ξ)φX¦ί§z‚)€€Da^—`VΏλ^Ε9„3=Άυ΄σ82„γοΕ³η}‰σέ^4φšC\ώε[q>`©4 Ψ†‡=a!Θγ,vUM“8φc±XϊWԊσΟ9ηLηF#Z•Q’ηαΩά­§fΩσTΦNΦΌΊ y2|Λ£ΐTβž.ηyθI[/8’U&d ΉΚD«T<Ξ2O#Σ¬ΉξΧ&+/s5}&‘/ί3±ͺ6πμsθ*k—!œF₯ω‰^ŸΚδ'+½vqΧ&§φι,‰ζο!fΜ%|Υ_χη‚Ϋ³δ~έ ωΦ…‘j“9ΡGρoο‰Xύ6t’<,a‘Zz …{ uέ±ΠΙˈ…Βȅ“‰σˆ…?x(Ώ”  9™"<μΒBυ”cόfXΘq5,œ’0/α!X(žρ°ΖB²Π†‡=a!‰H9Ρ7‰σΊΜήN€€ο§ŠΎxα}έfo₯lΊgΜ}ΜΨj+kχΠώυaΦy4’λU˜» Χ}Kbݟdzθ:Ρ‹8Κ¦Λύί«ϊηΗ|ΊsΟ cŸ7mۊσΕ ΞΗύCώμφ‰€ΦΖXόSΦ™ψŠ€Φgΰ'η5)Ε­8g#”]€œ(kDΖςCY;₯ˆ2PΉτBPeŒΔe q7<ω”£―“Qe”dBδ3Μ•Νq#£˜1GŒ+ žl|\W¦ΘΛ?9^χQ?¦(AZΥ{Ξ‚`––„;„TDbIŸ‘‘„PVίDR՝ˆ!—‰TŠxV<υIRŽΉnŒ'ηaΔ“ί$tύa©ΔwZ¦G™p&{Ε^£Α<κLJλΞL"‰Dθ“ΜŽΫ=—ήe"[Φ³R=γ|¦Δω'_6vB!ΔεΏΆqώΌ*–2ϊ™*>ž/ΌŠZqήva‘ͺ6hq,œDœG,τκΗBΔ›π0b‘πΠ±Π+ˆ %ΤΉ½„…ςΒp,Τ ΕzιzΔȅΒ͈…ŽŸŽ…n ±°„‡ͺ(aαLγ‘c‘ϊΙu‚ΉΖBœΧ3F,μΒCΓΒϋΕBα‘aaθΒCΓΒιΰ!Ό+σž±PcΨfLœ?χ/±XϊΧά‹8_Πx8ΧXX‹qυ]λ²"‹ςΙΔyW‰{vbχΉη)s.£·κ1|Žxš_­±fΦ­¬tca™―Ή››Α­Ή§ΛΜ­.k—K{.mW_»χžkœšΟ‚=θ.neΕ’€•θΣeU”Ύ“zό·τΡzΧH0Usηrw£w—ϊ8Ηά…tt΅/eΊ§²όω’[ώΊΒσιυx&½Χη‘K{<‘Βώυα=υ#Ξο<α?;χžrP1φ}σ+[qΎΕΉ)|Σ™φκŸ<3kΔ>ώαsΆžύΜΞύnEBJΏ%γ³r_3Δ“LƒΚ :Ϊο£Ό §dD £„ςμ 4vG3|½œ]}•žr“#/=WFGe›"”―9ω{ux¦Hε™ΊΏˆ,!aPfI"^ξξΚ± Mβœχ¨ώΡ3n:¬‘4γ ΡMl:+}o)ΫM–§sξ©\Xo弌“" §Σ0;J‘³A‰ˆ’ab5$BΚΎκzWί₯šVζμˆ7Φ6eάτΉΖζωζΞΎΛΪχΪfTΤβςoΌΎ§²φj=:ΡίT±EΎΌΧ[qή§Ns,δw'<,`‘πp2,ϋχ„…κw–ίƒ°μ:"Ψ…‡Ž…އ UΞ±0N’Ɗ!ΗB"b‘π°W,–°°„‡ηs…5,Œ‚|B,\ωŸΣΖΓ.,Όrο1<4,LΏAαaŸX¨jΉˆ…˜#&<4,쫬qή€…D/β|‘γα|ΐΒq‚Ό Π=R–»ŠΖ :½γdΠ5FMβ<›Αq_D§«EMxYuf«L”«7Xζ]•1“ΪεζνsΞν„χŸ»AœŸLπΧ­π2zwχqcνΚκ6eΑ%Ξ»œΗ'0Š› ‘^—&·{‰μ^VΙhnͺ+V?Δ‘mλ~ΣY^ρ 9ηκ₯w§ŒιKœŸψωΞ½§~ΉϋΎu»Vœ/Vqξ"E#…TΪY—zrζž3φ“ˆσzέύέ:c€R@H¨FΡ¨μ] “NΟ%"2"τaBH!šΚœ@@E>5Ϋά{+ΥgήdόVηʚ;έιΔ±ΨαψλΛά¦¬QΜ2IΘGA―2wž[e₯ZΌŸR/½NV dε c~κˈ‹ΙθL-iϊ­‘eͺΆ*;ObHd§ωδf 7-BΚH-²CΥ6M.€ˆ2ΧΌŠΈϊη{ΏΌΞΆΖΈόΠ7pΠω<ΎΕ;{ £w…ΫοlΕωΜaaWf°0 &ΓΓΙ°‡nαaΔB•>;rœπΠ±P™s½ ϋdŠ F,T‘OŽπσͺ (b‘‡c‘‹sa‘p0b‘Dz Kx‚‡s‚…‰‘7 …‡]Xˆ0ΟxθX˜~Ÿškή'¦υφ‚…ΣΕΓ$Ξα―±Xϊ7ΰΐw.f<œOά0Šp/k— ŸLœ'n³ΜεΎ^—΅gwφ$@••ΎwΌ™ϋ}Ξxt _e}Α2][Ζ¨yΖΣΗ„u‰τ’AœΜαrΦΊk†vΞτ+Σ,aλξπ.Ξ½D2KM¦6›Ξͺvp‡hΊP—α™uΆ"¬ά.J9§Œ‚ηδnŠδ₯μr"φM1SΟ₯άΪ•IB¬‹T*Σ3‘8WΦΘΛ6εφΛαEJy^ΑR9§Κυ‡`V1‘8'#”KBks"BD”Θ½‰3΅\kζ[W1kβ|νΖNl…Έόˆ7·eνC‚‡κA―ρ0`a S¦5γ‘D·άΏ……އޅ2BS;°PtgFWvΗCa‘·ύ8ͺΝ§W,Œx(,τŽ…ͺ"ŠX¨Š‘Ε‚‡ ³ŸF£87,LYsα‘cα,ΰ!%μΔ¬‰σηM#Kχ'ΣηmYϋW—8Ο₯ξ.ΠK"]"ύ›>η°bμχΞΧΆβΌη d”³ςO½WvS†4ΉχmωΘ†HŒ•‘„SY#z*λP!ADρ”xΚ0‰-圞5‡ˆͺΌ]½—*ίtβ˜!R %ΧΉrJh?eΚφ8©Œ„T€4f… ·dJ½κ„°Κ8£8W¦1€SGčŽ(a ε·vΝ~]'‚Ί„ϊ “Ρ^W_βό€ν»G Y\~δ[§+Ξ HŸiΕω,c!->ό†…‡ 'ΑC°ό  νΒCΗBΎiŽΉΟ:χ‘iŽ…^ΖξX¨qg ε΄ξX¨μωT°0Šsa!Β\xΈXΕy…2»t,δχ$<4,τŸη³ ΞgGΕω㱐Xϊψi‹σƒ‡C#ΞCOΆ›ΕιΆΈ’0gDšBb½Ίμ½έκWiΉN ‚Χ7ˆsω΅•pΖ™]½ζξηޝζŽ/»υϊ²:S)s=r-gΜ½ΗάKΪέ N‘™ΩΡψ qŽ(­~Ξ†/6qη©ΗŠNKB=Žr›iq>nxώέg~»σίηYŒύώνu­8oΕy3!MbjΝΑ£βΙέΉePCΨmcyG'w[fΐΚ RͺΡB>;Š#.‘ωθVυ"Κ0HΒάG¦EgvυSjζ.Η°…RJ8!†;5ٜΊ8ηLΏΥΫΏ5ζl‹Σ7³°7ž‘²θ*Σ$ƒ€~K‚ΛŒ±!8NA†B©ΡA2;ςήJ/ΣTy¦²δΚ$±OΔ’θ¦F*…Χ˜4‘m<₯σΚcNC€T·k^/ΟΝe9Έ{'ϋ<3Υ΄ ՞E’+31Kγζœ ŠŒς‚„ς›β7Δο ΒκŽΔΚ(i«i*οPΦ¨/qώٝFίS!–}χ=eΞzΜ{q.,D„ƒsΰaΔBα‘cαΪCF±μΉa‘Μ1½χ<β‘c!#Υ„‡Ž…ͺΒa³ζΰ±P'#"žΑ«ˆ…[ΒBέ7b‘Œζ":ΆXXΐB~;ΒCΓΒΪ΅]FpΒΒarqήbα<ηyVzA—£»—»ηϋΘ<. tζ^:^³™ΰW¦9ŠΊ8VkeUm”­#Κ•)Ώθ¦;Σ–`Ÿ;[‘.‘NιϋκœEWΌ;™«oάMTŠΟe4πώe½ξ(ΞK‹¬|ΙxM₯χ3±T0–ζŒGξΩςUVαγσ’@_= ,z_βόΗΗtξ?„bμχž7M*Ξ«υ»U¬[«ψύ*.―β‰α˜mͺ8=‹τgVqAg¬ύGBύ«ΈFχΝβ|—6s>ίΟφs␍iˆ΄_†5«Ώ6κh‹›-&HΜ‚­H(fHi<p7•H§ΓqΓ€z|-ΚίΙ©ŸR™9CUšιύ•q…”B !‘rNΧΘ ˆ’̏Θ©aM‘φΚΙΰ`ρ<ΚΙ\Ieœ\ζu‰0Η,ϊT)„œXPY$ΝξUΦ1;d§+±ΊΟχ…’Q`Ig_βόσ―νΐY,;aηVœ¦ί¦πΠ°Πρ° qό:²/γ‘c!eμ“α‘c‘pOύδ₯^σˆ…šW±œγ"ͺ=b‘^:ͺχυ4{= t?9 Μ=Ο+s8–FniδΪ†<^M‘ όD£Ι&ηξΏPVœ£ξζoη+ β|₯΅3HΘΟwqΎαg'tΈπ”b|ϊ}oξEœo]Ε™v}W"sp―οό9Βq§Tρ’E)Ξ}DΘ–[nΩΧβšΏ"Ε@ˆƒϊ,#eŸf[{ηέίMΔ“1BOJ4)ιd—! dΟΙ\PΊIh ΐΧ•1)u3#M)eK&‰ΛnN€Lν‰δv'ϋ ²<6„SY)Δρnξζ„RY*Θ©zίEŽKβ\„Ά){ıʌ©r@ζO“­³oώNK7€T}—ΦΏ›2•RTΝ–Ξ’Ό.wq~ΠkΗN4„Xφ½w-Zq>Sx8H,LΏKΓΓq'13v­Œ‡]X˜Ε9Χ#N†‡Ž…^ΖξX¨Š α’c‘²β uR2b‘O³p,δq#rY­E Uj_ΒΒ‰π0bαTΔΉ°pπ°>YΙΦ°° =c–πpXΔω‹žΨˆ…Δbη3Ι “iΩ†»Ÿ=ΟΒΌΎΝ’KΠ+Ӟ³β΅8'‹ ΕΉΞκq>α"c΄ωάDDΥ;Iy¦LŽ £ӚŒ’Y*,z1ΙAΔd§NeŽDό”ua?„Peζ*SG„«,“}"₯šΡKI#†|nΉ.»™Q\"ΑK%¦b^‹ΖΉ)ΪHpc†‰χνe|κΕΗ$κΔλHα„”YΙΧά5κώLΜ[B*ΒΩπΫJ€”ΫΥc)3€*ζ3­Εω—ώyΜ/Δ²SήΫfΞϋΔÁŠsĐαaΟ«ΒCa‘zΚ…‡E,œ#zI»c!8ΘmμγΤ.,a!±0ϊi8Ζ•,o7r,”#| …‡₯>τˆ…zο%,d £8η3F<œ »Fͺe<䚢8ρ“±Xϊ€GΆ™σ>ΉαΐΔΉ›Βεθeu•Β«l½”Y―’”5ΧB|­Έ},ΛMΈWω:·“%gί…+οHϋΘ {―Ήξ£2wξ³"‹r‰Δm°²v±Dyιu*»νs²Χg3»ΥVξ.ΡΕy™:Ί2χΔ~?e“YΌN..ΞUe μύ|\qŽ}i­*ςA σΎΕω…§v6^vf1φΐΫ{ɜΏΊ ΞΏŽωAAœ/΅λ«Š‹«ΨΑφύI.™Uμ‹@oΕω<\:;Οζ7 Ζ!Ξ<;‘MH)DΤMίΚ7q1žˆŒβZ|ε‡tΞZuX"]κ·$”E— 0O‚}d€TŠω²ΓΏ_—dBJ%Φε"μ3Π%ΦEJ•1‚°–Θhi‰ςXzlˆ©HοDβάΙ¨ŸtΠϋžLœ³…ˆΞqžzΙ³'A?Ώ?w)ž©•Ζ¨U1λβό«o-y.Δ²Σήߊσa-iΩʞ°žσŒ‡Ž…އˆsN^&,άtΦ„xHφά±PnνDΔB¨ŒXφ‡ eμ±ΠKΫ#φ‚‡.ΞyΌφ"Ξy<ŸgΡ#: O½ρπη Ng ™q>Ϋx˜ΔωKΆ ‰₯ϋg­8–²vΔΉgΟ'ηuΟyΘΆ§vnΛb_·7•΄{&Ρ‰ΈFT#Έδ"όη7¬K‘λΏΈq} DΈχ›ΛΑ]’έMβ”}WΆ[™j7/c;Ρλœ(+ΌήJρuέηsG‘)c4½&υΤ» Χe¬8WyΎNΜ΅8_?2ώύNuy?ϊLžθeΎωLˆσ{.=³³ιΧηcΫ¬—΅WλχΈκτ8£ησ‰f§Ψ^3η©ίBͺώΚqœ;JDΩV>R’aΡ“‰Θό―ΥίND‹1BRHš²Eκ“T‰§LΪD*%ΜάΣ—|ηΤΞ ωA"§κI'kD–›ΗR‰§;C*yΞ©,ΝWΧλι…ŒNDlEXEVED΅TΪ e‹{3€τ”Žθœ°βΘτYwΛw:Ώ\shŠ9ΛΝ€8Ÿ­50qώ΅7–<bΩ?Њσ!ΖΓ^Εy‡ΕUΐΒ πΠ±1“«±P₯ξ %ΑB„9Xψ’CO«M0#ς˜χ §‹‡ 'ΒΓ::  9™αx8°pΎααΰΔωί5b!ъσαΒΒΙf›7‰σXξ³ρ§KΕK™SΔ©2γQœψΊΫ;?]1Ά%θά.a.qǐYœ²λ*sW »»ΒΛΡ]fuSY#–… ύθΪMο™Ϋυš _zΝ^I  d˜ͺ―β|Ά2φη—ŸέΩtΥO‹±.οκEœ?€Šλ«xŒΒ=)³m0„»°3ζβ~x_(<ξvωƒU|·ησ™ŒΚ€ζ’έ‡όίG€ :‹™ΏXΞ)G`9§C( ΙINΓl!¦RΝκεq”•—Q€VΖJS%£qρ8M½λ₯ŒΣDΞΖ,Hyη|>δύΒ΅‡¦˜Λ•ˆ«˜o BJΜΊ8Ζ›G Δ ±μΜ·β|˜ρpްΩη`!γΒCa!‚ZXΒBαa a!γUJ³……ζαa―X(“‹l0"T‚Z"@”»X—Hηx Y‰s.λqΆ2‰“ ;' TVNL%k>ΡRΏΌgΑOΠζΧά΄T%ΰβ\]cβζ‹8Ÿokβόή+~yπΪ_』Ύ§ΧQjΫd§u\ΫwΛϋv&L„%ίΎ\ύζ”ΊWΡΙ}ε]#ΣͺuD>–ΫΎ_2kΕω¬x8°Œ°<9" Ο""Κ5‘Β±Qξ}θŽ…Δ0`‘πΠ±Pb]xΨΉΑΓZœ7`!ъσαΒBο7ŸJίωL/Δ,βZΖn.ηˆςs]›DωW―I‘λ2‡C„«ίœΛʜK “qV?]rΟo§œ1ŸLΞδςΟAςvN.ΜWqΌˆΈαωχώϊΏ:^wA1ψΨ{{η =Zξ…”ΚM6ǠםχJ:ΙQΞ Σ\^Ή»0W_£Fωpœ—kΚ­ϋΚόcΙΘΛ\h–χαsB“SΘ;Ρ9ηίz{1‹±μ쏡β|Xρp`!}Ԙ!>#*c±PYσˆ…ΡψΝ±pρPX(‘ήβα<ηΫ>΅ ‰₯Oώ‹Vœ!–ΫΉΘ`{™·D5βώ£kΦ&1Ž(?εΧ«;§^y[WO:‘Qkš‹Ωθό”žΟ§™ΰ½,½7½―λ³@Wω|»ζVœίsΥ/:›ΏΈϋόί[qފσ)’μ¬:Θ…Γ;NΕ­α’ΜQξΒlE>!™Pm!©š‹N&H€T%Ÿ2*R/₯ιQγ0ΝΤ…¨Οu―y ΐ&Ξ}ΗXΏiˆeημڊσ!ΖΓΉΖΒ3n:¬Ζȅ*CXΖ©/]X¨‘iˆφ…†…”΄·x8Δy­8nnˆσϊ Y`υ`KPjJ»β'.Ώ5‰rΆu2ιVυ—{ΉΔ8‡•ȏζiˆτ™*iΔάv!kΧ<ηW_ΠΩtΓ₯ΕΨγhΕy+Ξ§HH•1ΊfΏ›ϋυ”)‚lQ^ Ή”ΐφ̐JΫ5―—γD@5sW‚œϋ°O&Hk!e„1ί½–”pΞΔ’|–hΈqών;›W­ΛΞΫ½ηΓ,Ξη ηdΝ…‡ήgΦ #RFΞεˆ…ή£ή+Ξw<Ξς™·xΨ§8ω’F,$–>yΛVœΉ8΄@/‰sΔ΅F§‘1G#Ξ {Ζα-7wsχ׌s.#Π'ηr\ŸΟλϊΰκήΧχL₯Δ|Χ Nœ_{QgΣMˊ±|°η­8Ÿ"!%[”GŸ ’/ΈΌ"€ŒΗ‘dѝˆXjK¨Ο\=ηκ?GΜϋΨ ‰zŽιτrNzΊ‡EœΟδš2}ΰ‡£Ρπ˜8Ξ»:›Χ\Œe?ή£ηÞ9Χ(¨λφ8s.!­šσ “DHEV9ΙBͺŒϋνu=ΈωG‰Œή³ρ€Ξ¦Νg΅βΌKœΏ―žucΩν݊σaη -ΞΑBΚ΅…‡%,T8r;Έ&ά eŽ),ΔTMNνŽ…χ›±pΆβό±;½ςiXH,}Κ£[qފσ)-D1=Τηˆi•§«„0Χ¨5Ή˜#ΐu›οWžΰ±Ι€#.qh'[ΞH5Δ9Β½5Yk^^Ypη†{Zqξβό†+:o½¦ϋοώΡVœ·β|š€Bzγy{7žœΚ qΫ…,"΄! κΤuˆ£φΛ‘]BžΫ œαξμξ„”ϋ@r1œƒψBJ5'x&ζϋ.dBΚ fœ€EN{Σi­8?κΝwQŒe?ί―η ¨’hxθXHΏ9βΨρΠ±PxθX~±X(<ŒX(ˆ…ήŠσΙΔ9{‚ψΔ’ηUΜζΪψΰ‰Ύη©Œ2ͺ,!Ήωd”λꏔ!œ2εšλΑ~n‡ΈςšοΛɏΰyΫUώŽΘAFWn88d4»Ώ»¨ΕωΙGο2ϊbω/œsq^­ί­βƒ-!7Αœ εΒȅ-6cα]χSγαBΒΒώΔω3±XϊΤΗΞΉ8Ÿk<\XX τYΞ “‰Eθ­„™mυ+#.Σ7•±K€#Θqlš\Ν ¨<ζoMΑ!Πu[›5//}N|nϊΦδXτβ|Υ5Φ,Ζ§χΨuθΕωL`ιdOπαB|’Š•Uόv±p!]ωŸΝ7}~VΕ9Ω"fœCE0UΞΙa₯·\2Κu7K’{±F Ή£±2EΚALy ΘmKF'ώŽ Ÿwόχ‘©δφΚ;ιlxΰ„štm^ΨβηΗ|€Σ9ΛωΩy‘9―֏[q>ƒx8KX(q.,Δ9]x(_ ]¦ςGύαŽ…κ=Xθ£ΧZ,μ Α?ααBΒΒΎΔωvΟlΔBbιVs/Ξη Φ%ξ•H—yΪl‰σUλ7€μ6‚‘­ΜΉgΚuь9b\β\}ζdΘ5n !IVžmtkη½α؎θ$ΪΥ,ΞuRƒοΚ?Ϋ…0­q>rσu£ζz…Ψο“»-ˆΜyΏX:•'ϊΓ*v―β†*¨β‹€!ΝΒ<ΕΝ₯˜ΙEφ‚ΓœZζΦ:… r]dBͺΰ:Ω#ˆ¨ ’ԏΞuί§1kсXύœn~4ί{,y?z½½¬#―92}’_R ½ύΎο€οŒμ%ž»ŽΕ*ΏύψX~ˆε~~Ύˆσ}«ψrΟ­b‰’%€ΣΐΓYΔB–c‘NF βε&,ΤΔ ΗBαa―XΘIΕ|ΖBήW――,<υΖΓϋΖC°²vααBΒΒΎΔωφ[7b!±t«Ώœ/β|Ξπp!a‘„Ή»›Οδ’S;°φ9εʌ«\έ3ι\ζxΝ2ΏήΑΘγ*NPŽ\<ω@Y{zoŒU»η·)ζλβ†Ώ―Ι«§p|ΣR>fzœLY“E9―‡XΤβ|υcBμ·η'Š8ο K{y‚‡U±Oε{VρΠ€ Δ4Ν·~eΖ“ςΐ«ο<$υX2·Φ g ²η υ^ͺ€]ξΕ"€"ͺl!›*cχ!…δJ”›8'λ₯χE–M%ͺlq_†ΰ»8?Άow.\{hŠ©.Θ'„tέ}‡%qώΐƒ§w:χ}4/Wβ|ΧNηή“‹±όWΝq~^!ΞmΕωΜ`αLβ‘°ΙίwΔC τ&,t'wΆΒ@αa―X8Œβ\ο μΓ…^xθXθβΌ,${.]§uUŒ˜8§a!σΎΕωm7u™(zμ·Χ Eœχ…₯“=ψU¬¨βcUό―€'!€™Œn^ύ΅ύ.ά‰o»χTΚIŸ₯gǝ”ͺ΄]D4Ž)ΥeΘ(·AЈ¦₯Ϋ‡ΕόBΚ{γ3ιEœ3'ωŒ›KŸ­‹sf(³"Vά‘ΙΪMΩ")Y’Qσ£sΗϊ ³8?~χŠœVŒεΉ5„[Θβό–/uααL,ΗBώKXθx±ΠΊc‘γαBΒBΝoη3™Lœƒ…8ΰ  ©Vφ‚…އ ϋη;<» ‰₯KώͺΣbαη•xM‘ΕyΚ8WΡοB(ͺ<š’vΔ6α‚άKΦ½ŸANΦŠPD0*KŽEPNζ<―,ηLžp˜­%6Ά2f›Hœs›\Φ7άΣ=MYpΕD3Μ•5GœϋχΕη½ΨΕωέkoνͺ,ρΨοS{Ά†p=ˆσΝUάWΕ†*F,υ€)…fažbν!γa)γSθΏ£Μ/šΓΨbΝ}›ΎŸϊφVάύD¨Θξ@F=Kδύη.Τeη%2Oβ6D+1ٚο„*C(Fβ½ς9A@™‡¬ΎQ.c&E@RΩ²ο„G& Hœcr4„7žœ2D›7Ÿ3:Ϋ—lH+Ξχ8ω„OŒΝ;±ό’―φ$Ξ«υ*~SΕuU||ΟjΎ1o?TŠ–Ξ:φ‚…Γr,δοRx±PΧ#‚%,t<\HXΘVxθXH–<β‘fΊ  ›ΔyrRΉΖΓ„…ύ‰ση4b!Ρ‹8Ÿ-,œ/xΈP±0 s’”i–³wSψB@«Ÿ‘½"›Β)ƒξύδr]ηΈ•YŒFQŽΠDHͺϋ57€θε}Νη₯2r‰s‰lU¨=@%ύΪοŸ;Ÿ ί——¨GqΞεq―ςχ£ΫΧεηk α*q~ϋmc'Bμ·χ^=‰σΙπ°ZΏSΕσνΛΌ€ΌιΎΉRόGU\›·+,mώΞ4)…ˆ98‘ѐOHθGt:N˜\œϋ\ΨΝη¦2ARϊƒ•‡ΧΞΑήc©žsφ*ιΤά_ tw5ζΆR η0-Ν'!%‡΄Γρ'&ηeeΞψ| žR nˆΈH»²μdιά~ΦͺΓκŒ䔐ϋΈ³Θ›~0:ΣχΑ³T gίβόΔ=;MgcωeO*Ξ³γ%•;­βχ«ΈΌŠ'Ξύ·Όύd)Z<œA<ŒX˜ρpRqfd;Rβςw\ΒBα‘c‘*‹"Κέ}˜±PxθXH…€πΠ±PΥC*erΒBxθXHF]x±υˆ…υŒsααZΣη;>· ‰₯KΊ3WX8_πp‘c!BV%ΰšΜ*βm*βœϋ)Ω+s‰»ΖͺI”k9Ωr‰r2Έ«οκΞ–da>q>Ÿ—ή«χΡ{Φ|]>!αB]Χυ]ŒΨgμYpUl0α-‘ήu’2;΅AίrΓJœ―Ώ½λd•Η~ϋμ=©8ο«΅M§g‘ώΜ*.˜μΎΥϊŒΔ:[όΥζ K{}²ηWρή*ήSΕ?Άά#!…xqžΚΨ6žQώ'-ƒ˜†^εFN$‰μ†χ\ʝΈ”1ςžK5δ₯ŽΓ.Ξ =1 Iβ\#δ\¨λsœ³ŸΗP―)³“!£|ΎSϊ[Ι!θ!²\Žd΄^||·ϊ[Δω^£½Λ/?€qΎugΪυ]‰N[Κ9\x±PxθXΘο’ xxχύΗ&&a~ο˜vq.‘θbQΒΩE`\rnW?3B]ύδvυ•s;!±ͺΗ—ψ$4ώK3§‡]œσ~υyκ3ZΛΧυ™Ζ“#VAΰ’\Ÿ?χMNμΥw©ύMενρ»kΕω¨8ΏλŽυ]'ͺ<φέgŸ^Δω€xX­ƒ«x½]'SΎΕDχΥ1ω2Ηώ¦3OΛڌ³ Uό€ŠΟUρω|ωBnkxBΊζΰ11Ή¬H(Y•ύ‘ύaK©fr³­M*δŸ5Η7d˜σ«Œt‘QJ½άέKά½Rd”c{-kŸ― B ρ$DB_ςS;/;όϋ)c„ρ“H'ϋžπ;[α¬ΞΣ<»σά―œΡyΡ‘§uΆ;φ€Df5vIζO|.P²GT>k+Ω"„ΑbY}‰σ“χ%κ…XΎό[t>oρΞπ;UρM»ώ&ά/;3[Ξωη“Ž_­βPEKHg 3:b &<‡…“ΰ‘°ΠέΤμ½b‘²ν̅އΒB°MxθXθxθXψ‚C~Pγ‘c‘>π0ba‹‡=ŠσžΧˆ…βόkΒΓA`α\γαbα†/‡–(Wo΄ΒΣ'ΚΠjqβ“l±LήαK·]ˆςΊRφrΓ]·^“bX—ŸπΎo‰t}ώž)Χg h~"eƒU5θ;‹ίISy{‹…γΕωwέUŸόˆ±οΎϋrΠ7ϊε†Υ:­ŠηΨυsͺxΪDχ­Φ]α1ξœ+,μΑOͺβ-…ύRΕ)-OBJΧ};‘JΔ7&9”όi,±ζήoun½ηλΙ•Ϋ8&•ͺ4Ί1Ίσώ£S %†'θΚΕ2Οθ`ΜνWΘ1¬ "I@6!€d} £N²DdΈΉτ€s:ΟήύτΞ?}ψΤΞ ?ψύΞ?|μ‰˜ώΓΧNOD•9Η2Š"³FΟ“Ο˜L>c>;ˆhKF{η'τ©$²J±lΩ7{ɜΏΊ’_ša2z|{η2§7WqV΅β|v°Α-ϋ‹gΦxͺτŸΟVxθXˆ‰f‹‡½‹σ&,$zȜΟ:Ξ5.&n¨Œ«2εδ«-\¨XI΅Du\μΧ}%Υs-qΎ>s•n#B%Μ1zΫtγe)†uQ1 σ5eΔυ9«,έ+κήr}Ω@‚Ρ«όδ—'«lh±pΌ8ΏγΞ»κο%Ζ>£βόuύβa΅~PηK'Ίο ‹σΎ°t²Νtn+[ŸήrΛ-;‹i₯¬‘ΚΧ*’Iv2CֈlΔRFp±/RJ•1Β0fσΉ)ρ%…ΈκͺδΠ3F"›ήwΙVΩ#s-1ΜkΫ#O©³BdΑxoRΘ)b2JV(Oζ/~)iϋ¬=Ξθόύ§Ξκ<}ί%«rxH)d]†Id «<.‚X,„΄q~βI{¦ίs).[vπœ–΅Wλ!y{iή.ΛΫί›νΡA‹λ zΖCΗBί SU‘γaΖB Η„‡%,T›OΔB"b!γ  Ω  Α;α‘c!ϋ…‡ …‡Ž…|n-φ&Ξwά鹍XHτΠs>«eνs…‡‹ %=kή$ΜU‚ώg‰j4#&•W»JΩc_΅2ΖʜγΞN)ϋΖ›―L1ΜK%ώ.Ζ½d]—•=χς~έyϋJώ½ο\'Wτ½θΔGιΔI‹…έβόφ;1φήgί‘.kŸ),μIkΨ?šnkώNtΊτΈQ‚Y‘MΘ'™#B$“,ˆϋ1ΦIsb3!eŒ}d,0ε‘ί’ςB Σ¨/νԜ[ΝθeŸάΚ‡}A Ιτ@Ι ‘5β:B[™‘ηδ΄D@·έωΔΞ+ήρ½΄έξ­Ηu^ϋΊ£SΌτ½''rͺ-$υŸϋQ J=w:ρ{)t#ͺΕ”-κGœοΔOΦ£•b\zωΧ{η©βϊ*cΖOš!RxIή^˜·?­βo«ψ#ž³ΣfΞg’9w<ή~ίwjΒΓ ΗπΠ±:‚uΒCΗΒWΎν„ ir<Τ‡χ{η;>§ ‰%“‹σYΓΒω‚‡‹ %%ͺ%׏t›”ΉPtqrνu‘—ZRίG‚)#,Qtϊ[R,„ε.ν^)ΰ&nξάKΣc₯‚άρ]¬λσSVέοΧbαΔβ|Νϊ;?Υ›8Ÿ«΅m0„»p²ϋζραnχ™ΉΒɞδ URΕΨΎ?Θύ_lxꙣ”x>πΓD>Χήwh"¦š‘-rΚ~²HlΙ(AL!@!ΔΉƒ2%§2Eκt"ͺ-ΗΓΎΘ ABΙ‘ ƒ˜zΙ¦Θ($t§7~·³Γ›Md”-dτΥ|Lηeο>)Ηά]˜²JlŸΉη™₯ŸνΛδω!Τ’ΡήΔωρ'ξžJ”KqΙe_ιu”Ž›Χδ έfŒΎqUμy΅‚>ά\¬{O9Ÿ΅‹uο“ΦmΚ’·X8±8Ώυφ;κο%ƞ{οΣλ(΅qxX­‰ΞΨ(΅―δΫ—Σo>–VλαΉόύΪΌ}Ψ\aιdOς{ωLΒΊ*.ΞεG·WρYΞ8΄ά1½s΄tS%M̐ΨRΚΙ>H)ž2PJ>!€—ϋVΚQI―$dΤ{.Ή,q’DυV.„N%—τVBHιδzκ/‡@f Ι„xΎaΗ# !eB Y%Έ,‘}\†œBr1’Bp„@(-—Εωq'ώGϊM—β’ΛΎΤ“8οΜ^9εΝ…”ΞΡΞ90RΪ+vH>Ÿ9>ηdϊZ/Ώύή΅ΰŠρ«KškqΎšΧΨ0‹rVœ Ή%<4,Lν εΒȅt—9ͺ΄½„…dέ:3.<ŒX(ηxΨba§θG{y9{21ΓeύξυϋοΊ="]YxeŠKβΣ(σΐκλR {ΏΉΔΉZΦ™χ“«ΜΙ^ λGΖ;ζΗΟΡΕ»W.Έ«{+Ξ'η7­Yίυ9{,q>#X:iz^"<§ζo­bΗμ@wB ΐ3΄ €Μϋ•+1bΐ ŽύU@D„Μφ½|ύ7U―%3hΥo)S87@’!"]#q†™Œj ₯›d‡θ±δ:[‡ΙΡ/ !…tŠ`’b„TDΤ‰gS@RιΑΔL Β α§€σΚ;Id΄ηeqώέ>Vg8c\pΙηηZœ_2€ΌΕΓΒr,D΄  e€ F,D˜ƒ‡ͺ$rwαaΔΒaΕCpO£αΤo.,€¬]x(,tq¬Έ°Πoλ 1γ.,μGœo·Γ֍XHΜq~I‹…σoyσ†­ ‘7:φ@+‹.7ψ(Μ5FμώΫW ΅\2rΛζp‰n9Ψ+£yπγλži_gŸχΡόt•Μ―Λ]βάϋψν-–Εω ·­ο2,τΨ}―‘η3‚₯“=Ιεv™Ϊύ=νϊe-Οδ)Σ£GΓ—άΪ«€τΠίηœ‘jεœ”fRΞ©ςMυU:1e?Η0'ύΤODv˜–Ο'&(ί$seΩΘ(Δ‘^qz&)Εω„ κΉμUœ+ΛDi¨Œ‘x>>G «ϊc[ξηǜπΡΪδ+Ζ—|nΕω₯­8",dRή.<ŒXˆ0Χx΅ˆ…;±pΨπΠ±P—ΑCΗBΔ³πΠ±ΞΆ_,ΔΥ]xΈ°°?qώΜF,$ΆZςΨΉη—ΆX8?WΙdl}(»v‘­ήtΟΐ―·’v)¦¬9Brγ-WΧ#Τ6έ΄lΈ„ωέλ‹βάί§ΔzœΟ~υ‹Η™σήο―ϋ1¦Νη¨λ•»ϋΨ5ͺZ,/ΞW¬^_ϊ‹±ΫžC/ΞgK'{’+Μώj²η~[ ΐƒ[”xά0š5'.\{hκχƒώδΦ±Œ„“¬\‹!₯”’EβvŽƒΐ²Vq%KΔIˆ¨ˆ)%δΦώι1BJ†ˆrL%ΖHrg'k€lPSֈ¬;!7cΚειΉ€·“ϏοD«ΰnq~τρ»ΤfQ1~qɁs-ΞΦiΕωΠb!}ηΒȅg­Ε·ˆ…ι σΓ†‡Ž…T  Ÿ&”βažΛϋΧη²uΌΠ¨:υ˜―΄ΩοrΈw·v•Ώ«₯€­¨Σ uΠm>C=™φ΅βΌ(Ξ―Ήu]Χ ]χά{ΨΕωŒ`ιdO²[?―βΞΰ~—χϋ[ξom^{HgσνίJ1ΩΒ‰ήJe‹4BR !%sqŠ#묑\‹Ιω<_φAZ‡Ν‰Iˆ'’ψ%ί95‰rŸαN6ŒΫ)³dή/Ω"ˆ§ˆεΌΞ_‚²wn—’²GTΘ©ŒέΥr‹θ‡τς܈€…n ׏8?κψΧccœΙgζTœΟ—hρpκxs.<ŒXHy»gΠ…{Βȅ\&<ŒXθxθXˆp:r‚ρPX¨χ  Ήπp1`aΏβΌ ‰§.yL§ΕΒ ΅”υ&&[ήCξ"έ{Τ½ ήEΊΔ$Ωfζœ#n“H―b¨„9’όŽΥι= Ξ7δ2τu#χŒsg_“Ε9ΒΪΗ¬ωηγ•ʊ―ΆΩρQœΗώτΕΰάޏ8Ν-λRB)vύδp‹σω=|Μ‡Ϋ>ŒSϋ›*–΄<8qΞ’œsυ=ίHnνd‹Ξ»e4KD¨4‘ΞuΆ>χW#Υ4Vˆ^K; λ£Ώ59‚RͺΙL_ˆ§Lž0w⽑5’€“RK:Αθ,ϋ0H‚¬r™ΗœΚΩ²ΙGb¬2νάN€ρl°σΊοŸH0Ÿ-‚@«ΰnq~δρLΞRόμβO·βΌΕΓΑȅτ—°!ΞίlΔB°Cx8ŒXž  Α(α‘c!(Ϋ•ΑLΏΥΙwto±°[œ_yσ흷γc­8οMœ·<‹d4Ο=οu1rEβ\s}Ι–³υ>r#ΊΩB8)ν„°‘5’{;™#Ž!Θ4ρ8σ5{€Ρ@*Σ„PNF&Ό„ΊL‘ ͺ֝Nό^ΊLo:$’ΰΊfτM?I@8!©\†¨Φϋς~Žηqy=<Ÿ1<ΘθΖΟθt<{4Zήγˆγ?2œ₯ψΩΕϋ΅βΌΕΓιγαζsk<ŒX(a±l#r]x±p>βaΔB.; έ$Ξ±γ„‡Ž…mΗCa[α‘c!Η #vαa+Ξ+qώτF,$žŠσ βΌ—ε¦oη.ΞΣΆΰ.Dέh™D<ΩσJ˜§ΈυšΪΑ}>Ε%‡ϊœ-—0ηύ€ 9$–Gό$DΡΔMŸ‰DΆ"–ρΩΚ™χΥ&Μ]œ|ͺ^Π‰’ε†ΧωW¬Ί½sνΪ‘b|tVœ·β|ΘΔ9 JWάύšDͺob€ΎIΘ)ϋΨj΄™ ˆ$вG;AΖiΔ9e›J)dBH_%$›!ρT_:Uυd’΄•a£ΎLe’²K„+™₯­ΏpVΊ,"LK#ϊ,[qή-Ξ?ξύ΅ITŒŸ^΄O+Ξ[<μKœ #"Μ…‡Ž…>ΥΒ±­πpΔyΔBpΠρPX(œγXΗB™„r»c!"]xθXH–]xθXΘVxθXΘ¨Ο. βՊσVœΧ’ΧσΖitz,EH•ύQ9'DqNI'"RH'œ`A\Υ‹9Ÿ€O™s‰t„7Y φρžΘ7„Κ~"3Ζϋ‡”σYpΧ!₯τlj60UΧΥ―Ι>Γ%•BF) …ΐr?^ΟΛ •si^ύ"η‡χΎδͺ]ŠŸ\τ©Vœ·x8#x±P=b!D,TΟy η#F,T•πPXΘuα‘c!'„‡ …‡Ž…rX¨—ΰ‘c!Ÿ΅γαBΑΒ~ΕyOYς¨Vœ·X8νε¦e瞹uqc5nM‚ΣΕ«‹ρ4bmφ‘?pΫυΦ¬{εUP—ρηˆεϋuφ\ξξlσu•ύ―?ή[ΌςΐKδ%ξ}¦ΊN hΜέB)sοGœ_ΎrmηͺΫξ.Ζ.ŸhΕy+Ξ‡tAH½χ\Ε"’Κ†»Qœ (ε„”Bΰ4~ˆΕύTφ9ίΘ(²(ΑΝ>.γX,£'ήο r AT–’Νη„“0„Qsβ9^ξφκ=•»3ŸΧ5Y½›Pe“”½‡σΪxN>oJ!££„τά‹SœηΨχ€Μf)Ξϋ՞­8oρpFV Ž…:‰±Ώ{π€c"ΞG<ŒX¨uα‘°ά:ςχ'< λ|&Η ²eŸͺ„‡Ž…œ :J€σy.$,μGœΏ|ϋ₯XHOwΠWzθΓ,ϋηέ°¦³μΦ»ŠρΑVœ·β|!,Θ&<Κ!2 ‘QeΟ)cΤh!B·ΝχΑ##σ"•Y2»}Cυ‹²4Ýlδ’(-z"κdΉ s<δ”RC΅x>Ω#e«x~ϊ4Υ―Io'Y'ž›ϋρ5RBڏ8ΦwwNΏΝRœ}Α'Zqήβα¬`‘NVF,ΩF,/†Α>π0b!sα‘c!X&<μ ωό……ΰ πΠ±μΈπΠ±ΠρΠ±0υ /BK²l˜.ι€' ¦dŒθΗδ5ρϊΘ,A`οέxςέxΖ’ησDθKqΖ/χhΕy‹‡³‚…ό ƒ‡ ηό G,DΘΟg<"ΖΑȅΰπΠ±‘Νg±Όz½`‘γ‘c!ΒZxθXˆP:Rβ.b ‹Mœδ7·v~qγϊbΌ§qN6|‹|y ŽΩΊŠ3νϊDαΈν«8ͺη-!φb‹f¦ϊb¬Νςœ9"ƒχ¦ΗΉζηδN‚—λRD:ιSֈΛ2†›I’ύΐƒ§§ςN7“γΔ† ζ•~γ.s)2W|ƈ„Ξ¦³Fγή“Gc‰σ―υžτY•βϋ?o αZ<œ,DP γKX8ρΠΛΦε³Α6b!—…‡³……ΒCΗBeΠ…‡ΒB0PxθXΈyσ9έxΈˆΔωK^΅΄ ‰'΅βΌε†³%Ξήρ*ΊΔfηIΤRκ½ζ†ž2Κs±6^vζ˜8G”K ³­‚LʊγήN=—΅s™Œ9ΗΜδ’χΟΟ³θή—ξ½+[ž…όbηg_yKη§+Φγ]ϋdΏβόpύΞΒ1;UρM»ώ¦*Ύ\8ξΤ*ήhβόž*.­β'U<·η ήτωQRZΕl/²'•½SΊ‰X‡\Ν§©Τ˜ ΟΚ€+3δκ\Η€ˆ,ΨldΐΧD‰rOˆ½Κ<ιΟ”€¬ͺ矲O>_2t”‚vξϋώPVϋη_:ς=΅π‰qΚΟZqήβα`±PxXΒΒωˆ‡%,$"β€.<œM,Ldί°¬:*“ŽΓΒEˆ‡ˆσΏri#­8o±p6Δω„Q©Θ"vΣυ§˜WΒό’Σ“8Χ6 τk1*ΜΩdΠs9~mξΖ{ZuE-ΰgεσ£μέ+Μ0΍χ<ά\n‘rΓσΟϊυ͝_w{1vηίΰρ-ήγl²Ψ…xUβόΥqώ₯pΜnΉηόwςυYΕΓσε₯U¬ͺβ·β|‘*EFεb¬ώKΘ‚}>ŠsΉΚ +£žK²ERΘ)™Νx'kCΜΦ"sDV’ A…Œ’Y§?“ΟYnπάVΟχέpΒ’η_8β=ιχVŠ~ΆW+Ξ[<( KX8ŸπlηJX(ŸΚΪάIœ_tZζ\xJΪ&Až£Ξͺηcξ?„Z¨#ΰy/Έ²ς}Υ%ξ.Κs Ό;Ώ/F,”8ώ²›:g\½¦oϋHίβόΐ`χ™Β1©βϊ*c†pOꌹΈ_YΕ‡ϋό1FrΒτ*nΑ~¨Ε9% *OΨrΛ-[Tΰ’9’ |Έ,³$_2?Q…ΈRφ9›fIIˆ%DTY!PδS}—ڐOz™SLζΒΚe^³\˜JNθκGœxΨϋjμΗόδS}‰σ\–τλ*6Η³—ω,θuω κKζ mρpnρ°„…އΒB•» …s …‡  …gΦbαά‰σzΕΣ±x|Ÿβ|Xρ°ΕΒΉ[I0R–8―lη§φz|˜ϊΈΉOυ³΅RVqŽ(εI)”=―Εy%ΐ%ΪSpœ ψλ/G­a§P&hΧ܈σ/[Ω9υΚۊρΦ’_qώπ*ΞΙ£ΤΞ‘€Φ#«ψ‘·MΧdΧφέluΉd½kdZ΅vΜ‹Ώ€ŠWtڞσvMwQf ρTOΰκ{Ύ‘‚ TΔΉάΙ‰™&€"šn'2Κm2Bβ6•oЍRΪ‰ρY#Ν1Žβ\c•ΘPPΖͺrVˆy/γ…Zžž8ΜwήΧυxυγΎΕωͺx\,-bFΟ™ΟŠΠΩΞN›-jρ°ΒΓ:N&Ξ…3³……ςΩψΫϋΈKΗzνΪڝγ”Σ„q(γ|(ΔH„ˆ/‡Ά$)μAlΙPIH’NŠ‘s»£v΅Ϋ&vŸΔl³ΥžoτΞ¦™0Οw_χΊ―{ηΏξη=ΜZοa­ωίΏί·žυ¬g=ο³Φ;ο5Χυ\ƒΕBŠς υoŸ‡xh±ΠρpτΔy"Ί Ξ{ ΗvΕ&pΝi" Ά)­}0qE=Ζ”uΉΛ;Δ9ρ,ΜαŒ'w<:γία‘―ιqΡ5G :> n6€ΟΔΞξ*Ξρ<Χ«γΤ―#e>…―ΡηΧύΗ#Υχώsn1νPœχK8ΓZτΘωΥ’9ŸŽ1Ϊ ΅¨ϋC`q;6θ‘rŠf>Ό“R‡9Ά¨aδΘ‘n‹sŒέα Ol³Ζ’ΫμΪΞ”NŠxΞ;‡cD2ІH ¨3οmu Ζυ%ΞK)­hxdI»πΘΔω9_9:6ή*ΕΧ~xVWΪ dΤΦέ†ϊ"ηŽ…hξH<΄X¨xˆΏyε GCœ+ͺ(W,dΓLb‘β  qƒ’x¨X8”8·xˆ›އ‰σχΨ¬ Όq­ͺκŽέ³xθά°Κ½G]˜³Φ<ˆkg‡[G™sΠσ¬τΤ-vG-qŽZq8εIkϊΊΊζHcΟ9Δ9kΡΑb©ν8>‰ςψySWϊx3bqnΙεΧG©Ιά"Ξ―ώυμκΊίώ©O?ΝΕΉ‹σ₯Cœ#ž}ώΦΕwBŸϋ~ Μ›}κΩbpdH' ΘΒuƒ 9Εώn5^C:&#8δtπ"ΚζHZc‰Χ‘Ύ · δstε¨ΏΕ#ŽωΚ-7 ―γ‘Ξ:> 3@JAPg₯ξΝJFγH4ΰ‹σO|ω˜ψο©_ΉϋμΡηsτEz~ΖfΈ8w,€8GX,Dc3‹…ΐŠp‹…ΔΓΡΐBΦ”[,2ν]±ξ9ρP±η#Z,$*βσ‘ϊO,tqήΉ8ί!ˆσ&,DŒ’8οαiνŽ‡c.ΞKX7xh±™4ΐC‹…ΔΓn`aIœγa±B»„…μW,Δ9xΌΕBΕCb!oBΰσ)bΏβ‘‹σ‘‹σνή±y#"Φ{ƒ§΅;7¬²03qžζœcA|ΧRΨ!ΠEœΗ}©λ8ήΕU¦Š#:ζ‘–‚‘ιξΉ&.{rΚσΜsŠhˆκt3!>‡@O)ξQ|§ ‚ό™ “x―^ηyΊœΚΏ4‰σ+ξy¨ϊϊΏ?ZŒχΈ8wq>„4‘-Œ«A ¬Ÿ»1’Q4Šƒ@α‚[„ηpGP²ΙΪE3’RR ΌŽa[qΞHJ:;ΊX;N‚IŠηHύTˆΝ‘πœξI*&‚”ινxΤτωΟBŽΟrŽο αβά…I„ηv8θIœ/ s;Ο:Δm­YΈ¦”ρ%ηlψ&΅ε.m—»ΉC°qSΩ!ΞιδΓύNŽy­F>έDˆuθβ’Χn8PΠΣ-aοί;η_ψΩƒΥΏšSŒύŽ™αβάΕωψ“ρXHε„KΔB=ύεHLΡΉΟYo‰GQ’R½ΣΧ8£u€ υ• ’QΊFJ,IZΩ0‰ξœ:MγxρWR֍’ ‘„²fξ›%1£Ž›F±Υxψβό”Λώ9g\Ψψά­ηtΪ­}ο ρLˆyp„δ΅SSWbŒš6‘ΨρPϊ8ΰ!°Νΰˆ‡  ±­x¨XˆZν‘.‹…‰…ΜR,δρΔCΕBμ'*βs( -*.­XΨ‰8ίzΪζXˆ˜ς†΅;ηύ€‡Ž…£#Ξ‡» ΎρsιήC #_ψdΝΡηk±9\tΦ―³Ιœˆέ‹s8ίiNy¬+/9ζΙ%Ο‘Ί΅ΗZttj—zρX."»–ή·?έ ˆuδίFaŸζ’ηZϊ”²ŸΕΉ8η8―cαΘΔωE?ώ}υ…_ββάΕy?:F₯υό±ΞDτωEwΔ C'„$„dŒ„$ ΔŽ GΎ€ˆYΎ/2 GeΈ "˜ΝŠH,Aι‚«8ηk<iœl„€βi™¬§€»D‰ι›κq,>o<°3]"Έi¬ΛΗψ%₯”v"ΞOβ\hi\πƒξ8η½އuq>xh±ΫΐC‹…ΔC‹…―%,\<΄X¨½1 !ΘρΊΕB πͺ,DΊ:έ~\»b!Ά‰…ψˆ‡K3v"Ξ· βΌ S6ξLœ;φŸ8 ‘ŽTu\qΈζγˆω žŒ1ωŽΓρQœ§ξνL‘υΩLŸχ_#ζΪ.wkη¨4Mg§0OΞ9šΒ‘= m4m{θίZ'ιφΥΙяB==‡8Ο‚_ΞE>Λ₯σιyjŽ:D>§sΓόόΒ=P}ώž*Ζή>ΩΕΉ‹σq&₯}Α¨“Q4@*‰st,F‡bˆO/ŽΡA­%)H&"Ί+$£ΕB "Š(#k"£H«dσ6HFΈBt†Ψe˜Ž]6cγ#vkgΝ$Θ)'œϋqέpƒ8"ΧΜΡjΈnO\1%ά5„s μ.ΞG&Ξ?zιτZχhOέςIηŽ‡c‹‡ΟޚΒ)²‹»ΕBβ‘ΕBΦe[,NG"Ξ-b[±™BηΔBϋΣ‡‹±ηQ'Ή8wq>ΞdD„4Eχχ† ž ’„xζΉ›#…[!ŽΪJQ¦qbH)'8FΈ.Z+GaΞjμαˆsόNJqήΝ.Ξ›ρp4°βάb!ΕΉΕBfY,čΌRδ ‰‡ ycb!Δ8ρP±7%!φp. uŠub!pxH¬‰ΔBβ#Σχ ρύ9-Ξ·βΌ k»8w,4β\kΐ»΅’œβœ]ΨcΈTKŽ}βύ₯%Κ¦TwsŠsνܞΣvNuOΞ)d‡η&]ΪkγΤLχšHOιοtΞσX4Dz_žyŽm“RΪ‹ΒάΜW?KF°ιηb°λ½cαΠβ|ζY}ϊΗcΉ8wq>Θθ/¬ ταΣE]^- δ 1!…0βLsΤ "5ΑΖG ŸHηD°Qœ:H m ’:Fˆ€DΥ t`:1 ΅xχ‰ο‘ۍσBHSˆΣAgηavF€`2•]›(a?o0pnv%Ζ6ά \‹¦β#@ΖATIF™ζIςJG ίΎ³­w|v.‹σc/™ž3lœs“‹sΗΓ‘ρpXο&"ƒxh±"%,DX,„[ά²XQ lS,$ζ ‰?ΔB,ΕBfϋ ι”[,T<Δ~Ί±ΐAœ ‰‡Š…XЇЅΔC‹…އΓη›ο:΅ kmδβά±°έ9ΧmΓί)η8ŸΦ#(Τ$ηΒ’œ]άššσ(ζ₯«{ lsF:υ4’Œ³6VΛ#Ψ’`Ξξ9έlŠb€˜#΅<ΝW·;;Ϋ8.§ '—<7™g>¦«γ8vƒ7ηΓ’+ŸΟ Aρl’"ΟΤ}f ΔŽ…ƒ‹σ³n½ΏϊδέcΪ'Ί8wq>ˆιHΕωcƒ4Zx]kŒΠs·ΧΔ9I(H)G„iν9ηϋ²ήΫ k p³’sDχŽ έ#lƒΈ±Γρ`βάΊJ ΄p|؝݊t:ηt:Ηοg*;GAτc?ήΗnΛΈ]l€wŸ|>lΟ2BΔί`IF9?^η&ψχ>k1ϊQœΔ9;εΫ8λFηŽ‡c!ρp8XΘ,"‹…x,a‘β‘b!πl–8ιΔBβ‘bαPβάΊμΌqΙ1h y³R±Ξ8ρP±ϋ‡Š…Ό‰`ρ™ΈΕBΕCΕBόΏRΒÎ~χ ;η›qή„…ηŽ…ΆβΌρυδŒkΪόB•†ΰ(5yNŽύ*ΤkΞ{zOζ¬EGŠ;»€"ΞYγΝ•›΄%·œ―ηTwοΪH.Η†p%GηΔΟF8qΰΉ(βy#€7ςμtŽZƒ0Χzϋ$Ξyƒ’“Εο΅ΕωΗnΉ―:ϋΞίcΧ|ΤΕΉ‹σ JJ»Us™œ"¦p‚”’31ˆ)Hι_žΉ6RtαE³mΗO’1Φ_’ 2ε‘$1+OV:Ρ¨ΣδsZu•HJAϊ8kœ.Ίvfc#O:E8  ιμnŒ}8ˆ)H*φቦηψŒΌ€mkkπρ=αϋC΄~YuqŽ΄XμCκlυχ[ͺκoίo…]Ή¦ͺ|«Zτψ•UυΔ7³8Ÿ¨€΄qώ‘‹¦ηΉς6Nž‹sΗΓ±ΕB-ρ±XΘ–Š…:bM±˜€Η%ΕB;rt₯ΕBΕCΕB–ρΠA'²{;EΑij†Τ5B„"Θ(ˆBI)ά"Qš!QœS ƒ„‚4βQ$S6B" 'ΆAJωœ€•3sqor4ŠsΈvH©ΕΜe<qŽηY #=wξ₯}#Ώ βœι΄6N»ΑΕΉγαΨb!ΑY,€{n±PoV*-²©d Š…ΐ%,Δ{‡%,$*ͺπΆXHΊΗ:R-ΥgGלŽ9kΒ9ͺ,‰υ(–Ν(΄μΌβ[RΦ³0OιοΉΖά6‡γ9ΜΟͺύwaέωplLg§0³ a—ήΠ(Ώ[ΝV°β|‘”tσχ?ΔωIί»·:υ–ίγm‡ί‘8kRˆ;Bό!=.ίpάniΌδC!N–ύ ρ§χ¦Ψ]^›‘ŽΗϋvuqΎ4’n4E „ˆ£‚@FΩΔ‡$ŠbŠGuHΚHNIxN"ΙNΕx9ιΔΆ’RMŽ*aε>Φ/‚ΰBL£’άXgΒ©8k)IjτΉΎG%œ—Χb;)³^žŸWΧβšΚΨΟߚ£΅ξŽί'‰,ΆAψρž˜ήωάννβœβΫ|-‰uŠσEΊhρΏ‡‡Ξ­=xN΅θwgU‹ξ?3G/ˆσΓ/œž'6NϊŽ‹sΗΓαcaΗxqώ‡ΓΕB₯Š…ΐ+β‘b!Ε8±΅ΪtΟ-β9ήΓ4xb!Ž»ˆkЁ ΅Ώ†b!Ž#*βά%<€kN<΄KρP±°…‡‹±η!ְЊsΕBβ!±PΔ9ρ0cαgΧπ°Δω¦»LmΔBĚΊ8w,ά9W±ΦΙRaΘΪsŸFΧ–Β<oeMΈ‡ΰλκœη:읻x.ΈΈηy4YJuηόπ,ΦeΞxvΣ5•]Bλ³0—ϊς|>ˆrξγ{B`$[ΎΩ(.<Λιb}ΔE¦΄γs‡Jœ—ΎK›…Π$Ξ΅9`ιυ±πˆσιΧ¦ϊθχc‡χΧ©8?b!fŽY&ΔΓΐζ/1+Δ†"ΞO(ΌgΓtάKB¬ήΏŒ‹σ₯‰”2₯Ν‘B,š_ )ΘH]!’P8ΒLλΔ6ΣΥMRG Δ€‹ιžxΔ>­Id“‘&qΞ€c‚ΘτOAΎŸMŒX‹Ηtw69Βϋ<Ηƒx²{1η›S γ=¨Ευ”Ζι|^.~_ψόό>ΪΕy•»ήγ» σŽχΰ{Δq‘˜ΒA9]”'=ηb²JΒΔ{$₯ˆGœ/ΊχτYœΔΉŽs8ρzηŽ‡#ΗΓN°‹ΐC‹…Δΐ",R[,„^ΒBβ!;ž+",²ιœb!o6-****βg•πΠޘεβΨWΒBβ‘N!*βΖHΖCΕBΕCΕΒy—Υπ°Iœv"Ξ7 βΌ kΈ8w,ƒή‰c·vΌŸ‚άvbΧPGWƒ"> tm Ην¬ΓNβ\ηŒΗšot­αF|J‡Ο‚œ‚[ΆsJ»8ηYlσu:δtΤQ{NΗN8έρ4+]Εy­ΎγΣ¨΄άψAanΔyθLmΧ – ±ίsI—:χ7‰σ±ͺSοDœύν―¦ίp_1Ά;΄cqW{Υ΄½*žŽΩ*ΔmΖŸ1„8ΟΗ€η·α<.Ξ—FBšΔω‰tgod„‰’D K›Δαu:Fά&I…Λ„m.R’4 v6£cŽηl”ΔŽΏxYEJ'…9ˆ"D7Θ"$Ό—―£FΗ³ϋ:Σ0ρš r­§΄ β@Hαα<³d&;~o4Π-#W2jΕ9ι›qvﲑ½9R#±d%&Ι(] Ž1dγΌ†cρ>Šs8¦Ώ³V’N›ͺΡ="1UB ’IR‰ΐsR;‘ρΊu‰π>M¦rrN0Ε9»3“ιTœΣ=C0“5¦όnπωρk.9+ί/©Φ©β8zRή!‰$45§ͺΉκOίX-ψJ«ήnΕ99Ώ$€tΘβό ΟLo«—eσνs]œ;v„…#ΒC€M?w{[ƒLβώF-ή—Τρ.0₯;l±έΧ-9’LΕ9πΝb!^Γq q“ψ©XΘ^ ιšc›β8HŽλP,΄xH,dΧz|6ΕBΰ›β!±ο'*κaΕBΰ`ΖCΕBόށ‡ψ}+’ԁx8†XΨ‰8ί8ˆσ&,D¬ώzηŽ…#€wΖΙ‰sNלβœ"›"p~šsŽ€0§(Χ}*ιΐsτšΦ©C€Ζnν)e΅η¬αΞϋ€ΊŽ]ƒHΞuηZƒΟS½zθπ"ΚΩy½–βžfœS˜Γ9JœΫΩζ9u_…:»·''=;θI€Ϋ€ό\Dz©i_“8WQž›ωMpqώoώ²:ςΫχcΛƒώyHη<¬;Cά_ˆ½†)Ξχ+ˆσ‹φ*)νύB|=νΏ€ ΞχuqΎ4Rˆ΅Ή€EVπˆyAςPcΥ’3Ab‚ ’B ‚"² Rς…ΪGμ)Γ~v,ζ#ά I~6~^vlgš&on€”2V;ί#°MWηVΧ„4»EUj"‡TNΤWςwŠίΉŠRT8Dt‹~}jŒ‰*ΞόττZ§i£uqξxΨ!* Ρχ!e°(²”Ηb!ώv ρχNŒT,$VX,€ΐΆXH7άb!{w”ΖX2­]±Β›xΘ1jZsŽP,Δ5- qΓ‚x¨XΘ β!±P3  kx¨Xˆl ώ^ Sˆ‡cˆ…ˆσ wšΪˆ…ˆΥ\œ;.‘ƒ^J9§ΨLΨSœkZ;ΕΊΊεsΣΆŠτ*ΚΩLΦι=u.ΟΞ9έsŠσ$Κ)Δkβ<ν·γς4¦¨CDk£9­OOΞzvuž9yšcΎομN—žΒ›sΪ9ΧΌQ §q57έ tm’gΕΉτ°βά~ΧϊZΝ5Oη™ΘβόΠ―’:μκίcσsμΈ¦΅›γΦ‚θ―<­έאDdňσΈ]"¨θˆ›ζό²Ξ€ˆι‰ J‘$α˜…ΧE2wΔ‹΅ΤΛ ‘±ΖG:η ₯t’΄Λ1ΞΓnΖJJιڐ Β Ε#ˆ*^ΧΊs’RC£’ΗΰXΌ$˜ΔΈV~>¦­’€γ9S2ωYœ} ΒjΕ·6•"!΅υ¬ŒZν%k.Ρ¬*|χQxΐ‚θΐο ©ψΓ5#‡¨β|Ώσ§W§ύςͺbq΅‹sΗΓ.α‘ΕB6W,‰uΰ‘ΑBu¦·ΗΏΙ„…H―f:·ΕBŠrΕBv^·XΘ‘lΐBŽW‡‹…tΫ‰…μΤβάb‘f -FβžnV**κxβ!±AόS,d)ΕΒ***B  ŽεZRqώϊ·MmΔBΔͺ―sqξXΨ='bOTλ¨ͺ#KQnΕΈnS¬σΨ,Ζ₯Α™ΦNΧf₯§zτX{nάr:ζyάZνΩ©–:tv€ΟMδδv~zvΠ™ΟξξμΙY‡cNΡnΐeΑΝkΡPQΒ<―½6^ΝΤηΧfΛ›qZkn;κΫxΆ&}"‹σΏό³κ +UŒ7pL§βό|ΣξΌΒ1/ 1;5vcCΈ(θεΈι!IΫ™†p³½!œ―vR ’βΙq\FœΗΡ4 ’Τπ₯τAΊ΅‘ aΌGx…@§\/Έ#t8@ZΩ±X:ž³Ξ„“7ρ~ V}Lo')e³8%₯ ’LΧ$)₯8g 'I)gόͺHgΣ$Q¦ΒƒΤβηrlš†NqNΞΞΜ–`3Xgͺ]žUœ«cΔΤvώ R¦pfηΏt)¦ β™~Ώ™”"}Β|Œ’nˆσ}Ο›^Νψ·«Šqψ7]œ;Žqž±‚ s?ƒ…Ψ―MΝπ7ŒΏήΌ΄XH‡άb!Sΐ--2»¨„…b!pNgœ+rloR*Z·)›šΦΞΧAF΅•΅™™Œrά©v&†c€₯ LέόεŒ½ Ξί5szuΒ=WγΠo,½βάρpt±ΨρΠb!Δyΐ:‹…QΠ§~Š…ψ;fX,„@₯«¬X!ΞΤxΕBμγqŠ…lg±Πή°d@\۞θρœ7*YΟN,΄xHΜBp)κh9;χ\±N³‰υf&ρP±°&Ξ XXϋύςζ₯ΰa/ˆσυί:΅ «,₯βά±ptVΙ…-5³ΞΉ¦·€τφΉ’Ξΐ±La·cΓ΄~Ί-uϋ©ΕcΪb-:kΛ™ΎσΔQηM/ιδ*μ³pgM»:θil»Άη”φTgcԒÞjΦ™rΟeΎ΅…m §ΟΥ9WQN±ΞύόήLc8uΛΉ΄ζ|‘ζ½"Ξχϊ«}ψE16ήο#‰σΎΑFΏ;Ϊ'δΔ€u•SΝΏί’ηϋΖΞΊpΜA~ΠPιιVZ;#Ž©)9©Α©Β6› 1@π@JY{ΞIΦhβ} rLo!Υ†q$§ZNͺβœ’›€”Δ”‘œd– ™μbƒ"„]ψŽ@0Ul+ΙTqnΊ%«:ž‰ ₯Ή;1œ"€qΒ΅“yΞΡ’@G}εΟσν†8ίσάιΥτŸ_UŒƒ―tηάρpt`›ΕBό­<Δί›’Ε焇Š…LoΗ± α’,2Υέb!pxh±―γ5‹…,)a‘ή¬$²άΗb!η γΈN°o(Zό³X¨iνVΈkZ;±ίmΖΓΦnV&,μ%<„8_/ˆσ&,D¬μΞΉcα(ΉθMi*ΞΩ™]z“0WqΞ΄φΨψ Πf{Ϋ1lQ #]<ΉΰLGgZ{ηΌp‘)|“+]ηβ’Χtτ4nM›ΏΕζpA—Οg—v[W‘ΝΟCE»μ+‰σ쨧&qv|MeηRη|¬y·ΔωŸQ΅ΧεχcΓwΨΕΉ‹σ>sŽθ.h:!q’bΚ'φ±γ1ά"ΈJ‰ΔF’ϊάν΅ξε Ž ž q$Y$]šςb W†$δ“οΓ#Ηq„Ο}¦aœ1Δ( t%¦ ζLeϋ°ΏΗδΰ°&_SΥ­€ϋ¬SΔ:N_%€ω& \;όN$3^κ)1ΟdB=D―‰σiŸœ^}ψ§Wγ€―»8w<'}8X1<,`!Βb!›ΖY,ΤτpΕBސ΄XΘΧ-R +²ξœ)ο - ‰‡`!kς‰Š{Z“―ϋˆ‡Š…tΞυ†%3Šˆ‡%,ŒΧ2°°q>eΗ©XˆXiηŽ…£'Π‡rΜUtS λ~M{ηλκΖΧfwλ|ο$FΩ!ήΊχ8>6‹Γx5ŽV“™ηY”SψZqžΖ―ε4wΦ’³v=G«±c{LiGέyΨ?’E΄¦ž[‘.7(j©μM)νκšn^¨8ΧeΣΪ{ )ΞίώΉVΣ.ύY16Ψϋ(η.ΞϋˆŒš4ΐIE=¦¦Όƒ¨šccͺ GΤ‚DLjskQgIiώ°F©›LΫ ιI₯λ‚s°‰]%ΊFZΞNΖLuW‘nέ#tΜρ>œ‡iœ#%€μΚΜ™Ύ–lͺ8·N’ΖiS8œ;w&†S‘œ"|m}Œ›ΐuKœοvΞτκC?ΉͺϋΝΕΉγαψΰa k)Ԋ…!,r6ΈΕBβ‘ΕBβ‘ΕBvA·X¨#Χ,ͺXW,,ᑦΓw‚…Ї₯”uηšϊn±PhZχœx8‘±°qΎNηMXˆpqξX8Z‹BΨ.M_·Q:NSίu&ΊvΧjΦ9/ΥΊγB6מ'Η\ΑE1βΑκLoO©οYΨ3Υ’_šΓεξμά;bq^jΊ?£άˆhŒ¦cLΝ9Ε9…9Ώw+Π{YœοόΩ»«·_ςΣb¬χ.η.Ξϋ Β Jλ-£LNQγϋ@Ёε8!)F:$gμΜΛDRv4¦#BJJBΚFDχ₯τχϋd0κ"G"­«Δ9Αx?ΞΙ”ϋα―VgF¦’ΗE/‰vuŠp6†‹]‘‘Β~tσT L€Υ‰8ίεμγͺΓυ›ΕΨηΛ3]œ;Ž-&ωHœ@6αΔ°‹;»σ2έ“δŒ γΰ‘d‚|5A:'λ1ΩHŽ©–³ ݊t†λ$ tΛΩ ~€iœυ/ΰξpuθ•YI!iWqQ‘ώΝώn΄ζ<9DμHόόOŽ‹ΡKβ|§_vΧΥΕΨϋKηΉ8w<s,ŒΫb!0 xh±ίl&©XΘτu‹…ηl"G¬’P–°₯? ‰‡Š…x$v„…‚‡Š…Šq₯ώόn4»@ρX]sβa kx,ΌηĞΒCˆσ΅Άί’ +Έ8w,cqψH ]­{ΦtuΊΙϊa—ρ:’ΰ樲$Θ΅3»Šs¦ˆgn»¨›ιtδsχv8θ!˜ϊήI&£ζžΣWΑΞΧJ!Ή¦ϋΫ²ύέθœσγXwή‰8ίώό;ͺ/ψQ1Φyη‘.Ξ]œϋ”Νž]#)€G‚4²ΖAD”’€“Žκ'ρ²FΟ†I ­œ ΜΗ³Lέ%Ι)I©ΊG:+˜c‰pό Δ-tQG9‘dš;ƒŸ[έ!%£*Μ΅Ζ2»vμ β‘_ΔωŽgœPrΫ΅Εxηeηw$ΞΣ,ΛΈ‹ο†XN^›β‘4ξbW'€Ύ:ΖΒ‡ΞΝXŒjYS΅‰Š…ΐ;φΩP,„»ΞιŠ…Ιf±xH$*&*κΘʎ°ΠβaΒBΊθ ›„y Ρ{CρΠ`a?ˆσΧn·E#"VXoŽΔy?ΰ‘caο¬’Θ¦X§ΜΧθ0ΫgŒθœCHf™«sΞζiΉ λΟm7uνšήΛ&qΉ{{xljϊ6,=₯κsΥζΉ«8—”ώ~­5·βœίΣόB™A?ˆσmΟ½­ΪώS?,ΖΪοψ ‹sηΎ†EL8;(’0ŠM6Ib$Žαœ•pΒ!β|[M}9ΥΪKK7΅š%‚J’ͺΆ9’οi}eΫJ]œ££#ΞZt%¦ό>΄N ‘±4ηΛ£Qs€r†Θίyšg"ŠzΛηvBŒ^η;œώΡκ |«{\ϊ©NΕωΫCΌ(mΟD€ν CΜ ρ’k§™”Λ8!υΥ-,Œυι  ™n±Ψ<΄X¨³b!ΕΊΕΒϋ€?Η`xH,΄xΨΡ*ΰ!ΗΛ)ζχψ(βy# XH<$φBœOήvΛF,DLκ\œχ<:φΦzh[‹ŽΗΣ\Ž)Ϊ¦Ρ\Η–Dw―Aˆ³žά:ηlWšvςΙA)ξIπ#–tΥFΖI:z[£8¦°Ϋ9ζšξ/ο/5‚³Ϊ±4­ΎWΕωΦηάZm{ήέΕXkχ#\œ»8χ5lRŠNƁHΑι 3’υ‡ ₯ — €Ψζ,]<'!₯cΔοΪευ%G]G―iΓ$ΨΟΞΘ;EXF˜Η»€©ξ’BœΑΪt>'!ε<ίΪΘ Ώ}?w’΍©RSͺEž³Xœβ€!ΕcŠσmO9±:ΰΖ늱ΫEŸιZZ{X{‡ΈJ\’ςΪm!Άrqξ««=ΰ!ώž ‰‡Š…ήܜun±Ίb!έv‹…¬_W,lΒCμ³xΨρ2XΘw‹‡lφ¦7-μ|σ6,$°0~η‚…˜qήkβ|Ν­·lΔBΔςλv&Ξϋ {o©Γ;ΏΠνέv|Žοžμφ‘šΗ³…νάπMα4΅έΦ΄?ešΣ©P† O.{ξ’“UϋMέ€­ΫNοC‰s[ŸΟ₯)υ₯Qk½ Ξ·<λΥ֟Ό«―ζβάΕΉ―αR§EwE‚Ε™ηxdz#› ±c/H¦:H¬1΄υ‰?„γl”Drͺ΅ιV¨“€ΒR"Κλ#k~4ˆcDBˆ$>/‰§vζ>KHu–/Ζα;Œ„΅œ½ό—kZ5–ŒΖ€;qHh$£Ώ9­'Εω63NͺφΏαϊbμzα8θη8ΏΔKψ³n qPΪΎ˜Ϋιω!ήνβάW·ρΠb!…g y£R±²ΕB:ξ mΪ;±PρP±pViI<μΚ,„Έ&**ZqN<¬aarΜ#Z,ΔLsΰ‘`!φυš8_cλ­±±ό”)8πˆ₯ {siϊ5„χ€ΜJ·ϋζ›ιΨGAΝ΄n>ךς,Π13ά4Q³#ΪTΤFៜψ\§žΰ;]EΡmέsη…γU˜k—v:γκŽ—Ίέχͺs>υΜ[ͺ-?qg1&οφη.Ξ}„Œ’`‘{/HλρΘtuMοdΊ&‚Η*aΣϊDŠyΊκt¬sd›&©ƒ4+QΗ‘mΈ†ŽΙh  v-Ά‘‚’v$F gtŠΠX)œ3ΊDOίΨκPŒ”Nt')%Ε#:Θθgχ€8ίςΔ“«}Ύύbμτι †tΞΓΊ3Δύ…ΨKŽ95ΥXΎ0=Ώ€@Fχuqξk4ΔΉb!°Ϋ%,€`·Xˆ°X¨77 ›ši–²‹T˜;ΖB,ΑBμ‘πΠbam”$±Σ*Φ°PΕΉ`!ζœχš8_}Λ­±±ά:Sͺaœ§―ρΠ±°7ΣΥ!Βη=ρךΗΆξc,Η]E΅Šσ˜’AΝFotΞ“kN³ VMΧκY€sy8OΗβ\…·ίZg^zέ sϋ=θgZXηژ―ΧΔωfςύjκ™·cΝ]wqξβάΧ°Ιhͺ³Œ‚2u.Vטξ‰ͺv3Η#%jš ͺΗ“ΘΒAR׈aέ#>ή—κάρ:Ι(;¨·ˆt‡€4₯υ3π™ψyΥ5²ΩψΎ’S„¦Jαϋ‹eα16BzόΚ–{„fH Ÿ € qΣΗcu"Ξ7?~F΅ηΥ7cΗσ.μ8­=¬CCάβεU½ω‘§΅ϋ}<4XH‘j±S,2½έb!ΕΊΕBΊθ K΅ιΔEβ‘ΕŽπ0a!n8–°PρЎδq5,φ%<¬a!Ίβ‘fυBœ―ΊΕ֍XˆxυΪC‹σ~ΗCΗΒήη¬1§SNΧ|nηθ*š΅9„&λΧuD[ΈlώFAΝ¦p©ΌM·³ΑYOηَ=cΉNEzMˆRΧΫΆΔΉŠpΝ ΰsργιšw*ΞίtʍΥ[NΏ΅«οό~η.Ξ}ˆΞžΩ"L_‰H)›±S/ˆφ€q-»ς‚œ•ζάΪωΉt8nH›$1HJt‘° Šγα8-±8‡«ƒ›°Ÿ Ÿ‘€TC€Ά”v€Ι£Ξό/Χ΄Δ9ΆYwŽt*ζœ_|ߝΫ"¨Ώ>΅ηΔω[¦ŸR½γί+ΖφŸό\§ αv ρ»+™ύ™H³½!œ―QΑCƒ…ψ>X,$Z,dΓ4‹…λ )Π-ͺHW,T<΄XΨ&,„ϋM<ς³η•ΘJ Ψά«α!±uηΔCƒ…½„‡η―™Ίu#":ηύ€‡Ž…½-Π5½Β˜β|ΈηΆF}‘jλz³Ž;Ίή¦ω›¦«ƒlg·«@Οηa>RqnίΧζŽ7Ԙ—|“knχ[‘ήλβ|Σ“ΏW½ω΄cυ\œ»8χ52~φ»³ ΗDJAΞ,Ρ$ Γ6AβHήΤM!)₯ϋ’ιiύ₯6J²ξ‘+ŽΑρlΜ„σ©CΕm m]9ε’dT:H%HΈθΆρχ“΄γœρΌilPŒδœGB Χ靜MοdNΪο9±ηΔω›Ž9΅Ϊυ«7cλ³/ξTœc4Π£!ξMρ…ͺžΪωp4m"°γaγa Š…Š - ‰ U +*Z,$Z,T<ΤG‹‡ΈήψΌt³2l+……σ5€΄ e¬šΕΒ^ΒCˆσU6Ϋ¦ ―ZkέNΕyΟγ‘caο.M³VQlέslkP4—uuΡsͺ{rΌ±­UΕΉΊχ₯NςΩA—nο»η­£ΎsΣCdQΞτzηpσ‹NΉήm]Ϊi—Sϋπ ιζή βό ½‘ΪdΖΝΕXυ­‡u$ΞÚβŽHΛrCσχ ;O–ύΧ Ž>‚Η΄­O—0ΦΕΉ―ρη Jp/0=Σ!αd%Ε:Θ^$aUέeQBJΒΦ$ΠΥ5R‘ΞϊKuτ9^'!Ε{X/ί$Ξ5Ε”Ž")%!E &\οηnΟ„T]#žGE;‰(?vΟΡψ$”O|³Υ )pγX{ΞTΞ΄έkβ|“£O«vωςχ‹±ΥΗ/ξZ·φ^ΗΓΖCƒ…ΐ¦z+πάb!1bΈXΘ¦q ΅½„…*Ξρ€Ι+–ΔΉb!žΧπXˆ΄ώ„‡ΓΑBΕCΕΒEΎb1*b¬šβ‘`αxΤ›w"ΞWβΌ ΛΎΆ3qξXθk<—¦Z«kM—œυθκσxk[tn«ƒ5κΩAO‘buΑ“υn κβœηΧfkYμ!Ξsc:έNΒ<¦Ϊs¬Zƒΰuq·΅θΤNέqό%aβŸ«ΧΔωΖΗίP½ρ€›‹ρš;ηηQlγ‘c%Ν1Λ€–λ„xqΚ.Ϊ°pά§ΑeEœίοΞΉ―‰MH:7ΊZKHΡMQ Bˆ4J1:-$§«ΉxΏΌLρdΝ¦Φ€kww’Sμv "ΛfsLσd*ητκ¬^\C$!RΚkf0՟Ÿ-~f6•Γ#ΞƒGQ8E ’h‚jNkΧNΕΈ!’ζϊφš8ίψCR½υ²[бωιŸwqξxΨWXˆΏwφ™P,$F,„ όHX¨7ξˆ%ΔS‹…ΔC‹…κΪ8Ž7'KXΘ±m YP,T<,bαΣ7±ΑΟ–έrv|W,LxXΓBˆsβ‘ΑΒ^η+½yΫF,D,;ΩΕΉcaο»η Ÿ¬wnWq¬uθηόίVΊ»Ίζ6=Ο)Ξ΅œγ6`κένH7ŠsΐQœ£Ž'Mγ ΐS:>―')φΫΦεΦzρβ(5y3TΠ7Ήζό~½&Ξ_μwͺŽΏ©«lOŠsΈα«¦νUρΌpΜVθΏQ5τζHϋ^˜²‘ΦsqξkβR%$€>vy­~’5ζ wx$ Œ„$Nz$©‹Zδ―S°khΚ<›$±“Ω͘δΣG±c²R<'mθ$ΫtΨaΧn‰uM‰)Ξ³cΔ”v6„C')ReΩ©γƒ°ΉΎ?9ηΔωFGž^νpιбΩi—Ί8w<μ+,Δί7]sΕBβaBΰ –πN;πΠb!»Ί[,΄ΈH,$φ)b[ΕΉb!·΅f>b!Δ5ρΠ`!ρP±0~vβ‘b!oTZ,DΝ9ρΠ`a/α!ΔωŠAœ7a!β•“ΧsqξXΨσβœΝέ(”5¬8gXMΗ½4RL]cM(t‹gΨFt l-{rΟs$žqέoDΉ—f]πΆ4u9NΕz©K»ήD°YΪτΡkβόuG__m8ύΖb¬ΌέϋpΠ—t¬dXO˜ηŽywˆ/Ισƒ1v³½~Ζ$Ο ρ!~b;ηΎ&!%IBόχΉΦ’3ΜIζ@PAς@όπϊβzō)’ΟݞI*‰)Ρ9ηΉι”ΪEšξI"Κz<ƒnQ)eγ::Y™‚\Vυ JJ•TΗΟŠΟ–ˆxIœΗHiΦo&£τΛ­ΩΎ!žΡτžη―;όŒj›ΟέVŒMOΎΜΕΉγaa!ώ–nY,$Άaaδ1ˆƒΔΒTΟ­XΘπznb!ρM± ‰}MXH<΄7+ρ cfΰαPXXΓCΕBη5,Tqn°°—πβ|…M·kΔBΔ+ΦtqξXΨΫ‹"’ΒQΊΦ›sΌΕΉŠk=FsαΆ‘ά|ιϟcΑiθx5M5Ο"]ΊΉΧDΈl7ueWΗάΞ[oJe/9νΦuoζόNzMœ―wΤ·« ŽΉ‘+msθΞω`c%‡)Ξχ+ˆσ‹Μ1—†8^ž£©ζ iϋ-ΙU•‹s_‡”‚,!σ‘σ#9cκ₯v%ζ|[=\›ΒY­ΙI'm5$j‘=MmΧ1CΣ–dκ¬tuˆH`K$”DT"ν*―„4ίXHΧ¬.]/‘„+ωN]Šs38RQ4˜ΗQAέηΌcΥVŸ½½›œδβάρ°Ώ°0Ž{ξφ6,€8·XHμhΓΒ„- Ω―£ φ€Νͺ0'­8ΟxΈ¨=Κβ`Ν=·XH{kΤD2§B<“7)Δ8šα{S·cƒ€K{?ˆσ)‡|ΌzΛΜ»Š±ατ/Ή8w<μ/,„Λ‹Ώkƒ…©Ζ–:ω!§Άσ杀΄—2txΞκΛΑ°Πβ`“(gφ“ŽΎT,䀎a!ρP±PρP±ίgΰ!ˆάroΨΎ /_έΕΉcaο/ΊΗΆY›ηLk§8η>Ύ>ΠΰšDyIœqnΗ²ρm긊qΫ NEΎ:ρ*Ψ‡sάΒ‚°WW]ΕΏ­―οqΎφαWWλ|πΊbLΪ’cqΎBˆ»(΅»( ΓZ-Δ-rάξ!L]ΫO5ηψjˆ#ΝΎ}CόgςΏ ρΞΚkΞ}M”›ράsb«1ο²MŽQSΧήH>Q[Hg‰d”$Uj-s=¦ΜΡeC$uΡIKb½κ’–ΖNΗ\gσ’Œ’|Φj(…D·Υ’j]=>/Θ'Ίc62Ζ©8Ÿ`)œ;ηο=³Ϊόwc£cΎθβάρ°Ώ°"xh°ΨΡ†…Ї ‰+Υβ– ‡ƒ…%<,e ‰‡Š…šΦή†…¦ιg -**ΞΩxσ·kΔBΔΛWσ΄vΗΒήζL/uSךrˆp©Faβ|~Α5Χzr†νoΣΪU`[^rΞmm·ύ9σ ξ|Ι₯η{μ u­X_ψdΉkύ@Ÿ‰σΙο»²zνϋ―)Ζr›Ώ·#qή/αμkδ„T"v)ΙDŠ"Ηγ¦β5#!Υ€λΞΞΕ*ά% žΞLi̚†μΦU²)ρ%aΞsΡύgύ(ƒiœ΅4MK<΅³~.|G ŸθD BŠU€s*!ύέYκχέ‰8_οΐUS?~{16ώ°Χœ;φ’AπΠ`‘:Λ5,ΔMΝ jΊb!³ŠFŠ…¬‡·XΘ±i<³Ÿ,jZ{)σ©vsuζΔΓ4ΣW„9kY—^ŠRj»MΧ†rMΞyΙ½f·y+Ζη/(wžŸ?HšύόAό‚'Λ‘7xάx7~ qΎζ!_­&ΣUΕXn³]œ•8G|ΆΔŸ΄S{ϊn{]œΏξ=gT[Ÿ~k169ς K­8w<μS,Δ„4Ι΄XΘϊj‹…xMk‰…ΌΩ©X(sΡ-ZKΒΓb–9ρP±0Ήζ½Ž‡±[ϋλ·mΔBΔ+V]:ΕΉca¬Œnι…λσ`W¬βέ¦½kPΘς=sϋ>TJΉ­νž_hP7WΊΛ—n(”KEόΐ‚vηί¦ΐ—ά{S`ώ‚ώpΞW?π‹Υ}₯―~Σώ.Ξέ9χ5bBϊΓcc*'"R'ˆsI%`:ˆ)GˆeqŽ4FΤ²f]Ι)‰,έ’ΌEw·Υ8j=¦λκ%—]C]"νFΜΤΝbM₯’Q:B >ΗgJ<’O sΘ)φΑ9’[”Θ~tγπ]‡θUqΎαΎRm7γ–bΌωπΟ»sξxΨWXέs€`—°Pne,v-‡‰…Ÿ,j|“Λ^ΒC+Ξ-ΦΔΉζŠ‡Ϊ “x¨XHό*Β9'N,μDœ―ψΊm±ρΚΧ¬λΞΉcaί‰sM/Υ’ΫFpσŒ/‰sΫ$Žb·$θ >N­ΤpΝ^Χ\ι,―aEΊσsD»¦Δ«³n…|ιsZ7ΎWΕωͺϋ]\­vΐeΕxΥ&ϋΈ8wqξkΔ„τŽ,NεΌν¨yϊΣE-bI2F'\ )φΡ1Χx,ή/Μ³“υιl˜€’ΜOη86u“X#IbZ"§V˜«sξymt­1Η΅iZ*‰94)Μα©(ΗsRŽQΓߟΠβ|£}N«v8ιζblvΨ%.Ξϋ γΝJΈΌ ‰g !zσœoΕBŽX΄X¨BW±p‘ΑCΩWΒΒΑ„z Υ9W,lηΔBΦ”[,d ΰ‘b‘ήΈδ|sŠsβa―‹σ ΆiΔB„‹sΗΒ~ηϊœ؊Λω ʍίJNtIπΪu{œ«f»§k#7{=L«§ΨtώΒjNŠGS”Ίώ|+άν¬χ&>PAΗχχ‹8ΝΎV«ξωb,ϋΖ½]œ»8χ΅D€τ£#†Hh„ΘU&‘ ™$dš¦ωlk΄PlŠGwHζ}Χ:Ι*Ιh©“Ž’€{ΪϊL6e²5š6”ˆZqήFF›œsήxHŸ=Od 0…ΔΫΨ§.:R;AH‘ B 2Šο9D―Šσ7μujυΆγo*ΖΤC/vqξxΨWXŸKXH7\°£ΦŠXhΖ-Φήk±N5·«φzoλ¨λMΛα`‘Šsba£sx¨YTΜ "φ%,Œ‚]ρXqN<œXΨ‰8_iύm±±μ*.Ξ {±vۊυRΧq­ΝΆ΅γΆk»Š^Šb+Œη!kSΙKυάΊΟΊψ*ΜhΕ#q-κΦ7ΥΫ¦wφΔΐ‚'ΣβKϋ&ΒΌσNΔωΚ{}ͺZeŸΟγ•οιβάΕΉ―nΈGΩ=·‚šn π@β8Ÿ76’cž•ZΠ9")eν¦Φ³«›DN‚ͺδ49HΆ&SέtuΜυ8uŠθœΧζυ.’Ρ@$㬱ΧϊJ8εΨrŠŽΔΨ‡ΡAΙhtŠ@ψoώ`Œ^ηoάσ”jηι7cΛƒ/rqξx؟xh±PΖ(*O€‡mX˜šDΆa‘ΕWb β‘½Y©xhάt^ΒC-νΡ†˜Φ9oΓΒ&]ŒWl΄‡‹sηΎΊBH4½Υ €’€Œd‘nPxΗNλqφyͺΉΜ#tR-zΑ,ΉθZΓɟ£T]v#ά΅™œΊG6”ΪΊφΖρiϊYy½ηhϊJβ‰ΤM> Eh'eΏχώ½HF)Ξί΄ϋŒj·άPŒmΌΠΕΉγa_"j_„…*Œ ‰mXH<΄XhωŒ…Ό) ΅ί"9–m8Xhρ°±ξœ75+€x¨XΘβ!±έο'ΒZRqΎΚ”­±ρͺ•¦Έ8w,μ»₯΅η $΅]σ…¦‹z©9šŠσG%΅\κω…Tρω †ξ>ί€Σ—Δ9„ωk tΔμ$Τ5΅]Yq>XX>˜xŸ?A\σNΕω€igU+μρΙbΌόυΣ\œ»8χΥqŽΕ$€tPH"“@‹ξΞn #Œο‰Η‰%2EΊ6#1ΥZL›ζ©s‚IXUΌRͺnΈ:εv~o­ž³ͺw«5@ς™―1Dξ> β‰ΔS¦o>xN‹”ήf‹ˆ~χ°½JF)Ξί<νδjΪQί-ΖΆοω¬‹sΗΓώηΔBΰ1Λ`!έsΕΒΗά (Υ›Ο+μ+=/oλΝ'Š0οTœ/φU“¦]Œ—m°«‹sηΎ:樄sR5π•€8η˜ JHΖ@ζ0²gΰι―Εχ8 Xy|δˆ1#ΠσΨ!u‘ΤIΧρe*Πœ£9$™¦Η³ΆS»&3}_ˆhT³Ωˆ(RΨ$£xd)œ„>wΝ!1z]œoΎΛIΥ;?p}1vΨχηŽ‡}‰‡mXˆΗ„ 5,L’Φba-‹H±PGPΪlΔC‹…ΔC‹…*ΰ -faxH,Τ}Š…šΒXΘ "–σ0pƒ’x¨XˆΎ}€‡η«½U#"^½’‹sΗΒώZ:?\Γ©kΑl›³-Ρk%Ρ…ό`Β›ΗPΘkΝόpί'R:{7Δω«w:₯Zn—Σ‹ρυvvqξβάWWΔy Q±q(Ζͺˆ‚ЁΈQ΄²1Rr·‘Κ Rϊ—gέs]ΆΞώ6€T#»Hš6‰ !ΥNώluΧ₯qœ%§ρΪ™o›ΟΩΔκ1=׍`Gv|/L_§3D2ŠΧ°/5>zξ[‡VΟ^yPŒžη;ŸXνωώ늱γޟqqξx؟βά`ažRa±0έμ³XΣށy ε†e ΅F]SΙΏlyρPλΡπ°Φ}}8X¨Β\±ψ<„W,Δw₯Ξ9›ΐέόΑΎΐΓ(ΞΧΪ² ―^qηŽ…})ΞK³ΔuΎ·¦½/wλhΟ5β\έk„Ž\³‚ά¦Η—Κ)Κ™:oΕ9\s<ςgΣIη5¨ΘΦΤvžWSρUœΫζvφ†]σ~η―zλIΥ«w:΅/]χm.Ξ]œϋꘌ’Q:£Fsi8{ρ¨8’Ά$\AYοw&R98*§ΓΊl† w’½DVk.Ί­qΤΤO6–Sχˆ‚]»λθ=/Ο-M›” η‘@Ό6\3I(‚.Ϋ|ν·g΄ζ%ίρ‘θύύς&Μο·qΎε[O¨φ9τΪbμ΄ηω.Ξϋ-FΜ#Ξζy ρρ„έΛKXˆ Ρ%ήb!1KDsvΫ΅Ρf ‰…y"`νX³9CŒ8Sα=Χu6qΓ1V@λyι’³ {ŠvŠs­…ηϋJ―5ΝLoj‡Πζy}ΐ ώΚ펫–έαΔbΌd\œ»8χΥ5BŠζ= XF±ΡL‡FΕ,ˆ*]κξάx€0wλ壎γL\λΔ€9Ή΅št­s$A₯c₯ š4””ΪσΠbC#γ\Υjε•‘\σNΕω+Ά9ΆzεvΗγΕkoοβάΕΉ― tԝƒtbS;AΊ΄nRI# ήσwΆκ»ΣlήLΰ@άΨ,Hέ#RΊΣ’ή™·K„”.νψNrͺσΦ•Œκœ^M1ΥZJ^#gψβϊρ€tj9>ΫoΟh₯oώϊΤΦ¨ ”άόΑΎηΫn{\υžΎYŒ]w›Ω‘8λ¬Ώ qoˆΫC¬&―ΝρPˆί‡ΨΥ ©―qΕΒ_œΤϊ›·X¨΄`a|Z,ΤΜ"‹…Μ,2X˜ρP±P1ΞφμΠIβπ·α‘fY,€Ηu*βσνΚ„‡Š…ΟίτΎηk¬9΅ Λ/ΏvGβΌπΠ±°?—Φ›S «‹Ξ4w­;ΗγΣOώ57‘£ϋ=O΄Š`λP[GZ¬i½Ή­QgΪΌ½ 1[ΔΉύωVkΜ1Χ₯ΒΌ42mžΉy@χΌΔωK§U½lΛ£‹ρ’ΙΫt$ΞÚβŽHΛ7χ叅ΈΈοK,uφΥBŠΡ_ ₯AlΖzAlC|B”‚ Α9²]ΦΩαΟΡ9€Ηk'_M·Β\ ©“œνl\KΉΤNς*Τm‡a;DΤ¦•bBJBΟ‚ζF €ό\¬·d]%R8Ρψ¨œ"ŠσνΆ™^ΈUŘΆλΉŠσWΙφ1!ΎΆ7 1+ΔKB¬βαΛ8!υ5nXˆŽγxn±5β SOŒ6,Ά”°8¨xhjΥm—χŒ‡ υQ›z*j£7ώ\^E9ρP±8H9ΔΜ†γΆρζ‚8/Ύ¬±ΤΨWχH)FήόδΈVj"Ά)ŽH lLƒ¬Ν2§SΓFnpbθ²Π)R²iE8E²Φ£+I•nοΪ58 t¦fjϊgiΎ:G’)Υτun65"eϊ&Θ(Θ'›q?sQχY=ύϋͺg.Ϊ/ΖίΞΩ«ηΕω[[΄χ•ΕxΗΫΞιZZ{Ί›y©lϐΧn ±•‹s_γ†…iX ‰QŠ…©›{RœΫ°xh<φιδ ΑΓ"jκ½NΖH8™±?Ηb!qŠ…x$*β{! >ϋΥχφBœO^mσF,DLZn­ͺ[˜«xθXΨί]EΈ¦΅k³8ŒsŸoΊ7Bq^|Xc©°―Ρ!€h’–ΘhN[Dύ%αš€ΐΤ‘t5B:'έ€Yεaƒ! :6tpT˜+Y₯λdŸΫσ•B›±nΟ5…]…9ˆ'‚ΔŸ—Ÿ‚―©StΝ!±ωH)j+)̏±KŒ^ΰ’{ή-Χ<¬λSΖέbuyνΤΤIw@§MTaξxΈ”`!ρ0V¬•΄,¦ Ÿ6,δόπ&,$ ΅λ»βŸn   qS’x¨Xˆγ*ώύσϋΧπpΌ±° „΄ζžwΣ5οjfS\ΫΊn›r>Οό\uΙ­‹AΑήδδΟkHcWqn·)ψ!½½Snψ+Ύ>»ηέrΝή­β4 “ώΥBά"Η]bnˆgCό1Δϋ{Xc©°―ξR8EθT ς2‡k±ο‘ψ:F ΑAGͺ#ŽΣTqˆq vŽ)γX2IΟ|Sγ5ξG8κ1$” =χ±6’ι— ’ ™ή$ |f:g¨ΑG6Ί8c?Ξ‘š‘ˆΖΔ3ίU=}φžΥS§L«~x§žηκžwΛ5ο§p<\ °ηΐΓF<΄X¨©βŠ…tΥKXH< γˆmŠ…άo±Pχ f'œ’[ƒΞΈb‘β!±Ο)άΓλŠ…Ο\ΈoΖΓ‰€…έΐCuΟ»εš;ϊκ%ηβ»ζ˜#°ΝχTΞ4sΫMέΦƒ7Mc¨3] ΗMΝδ¬Wqώ¨ιδΩKsΧ­K>’ŸAW}"Έη]ΰ†«Ρ=ο–kήOαμ«»„d„ ΪAΰθ „}pD"a%KŒ#I€ΛΓΊpu}©Νξ Ι2\'[%©)Xۘ]mΟΪG oΎ‡ο/‘O›šN²IχKΙ'CHh$βΧ#Ύ†s!₯=±0ΰ`μώ©½3N,μ!ξω;žΥυZsΗB_y±3{vΜ)ΞE˜λh5ŠsΦYΣMW‘bέ¦¨σ8mΞZo¦Μ—œμGšΛ•:―«ΈnjFgλίK½Ι…/₯γγx¦Ήχ2VtΟ_»#]συ]œϋ…Β '8’0DDIΖPG (E;H\#€I²ι›ˆΒ©I³q# ΔΉRGίLHS]w­ž±D™JžR골/‘Vσ<‡:Aκ!¬ Ηυ‘ˆb,„9F]yP ήΌ kž…9Θθ9{΅Δ9j-Ϋ₯zβƒ;Ηθe]izΥrΛαΉγαR‡…Y€,ŒΗ ᜳ±›ΕB8ΨΫ°PέgΕB`—β‘b!ρΠ`αx¨X¨xXΒB„b!œς„‡5,d‰Ox­†…A˜'v ឿδΕΛΊkξXΈτsveOΊ¦³KCΈ)ύβ}*ΘΩv~ΉΦf—ή7`Rζu€YIhΫu›>?χ‰²#_ηfCΕ“8Wξy―caL5Η—»k>^βάΜο\β3Γn"Η#~~­Kρ΅ώΉKπ–;;θΆααίόoΑ―Υ―΅gσΟ]ψϋiˆcά5w,τkυkunψ‚ύΡ`ΝΉα8;ηι—ρλϊΟγΧ~~­~­ώοΛ―Υ―Υ?τπί™_«_«_«‡‹sGνί©_«°RΏVΏVΏVΗBΗBΏVΏVΏVΗCηώΪ―Σ―Υ―ΥΓ}ω΅ϊ΅ϊuzψ߁_«_«_«GΏ‰σ#z©&Κ―Σ―Υ―ΥΓ}ω΅ϊ΅ϊ‡ώ;σkυkυkυθ;qξαααααααααααααααβάΓΓΓΓΓΓΓΓΓΓΓΓΓΓΕΉ‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‹sη.Ξ=<<<<<<<<<<<<<Ζ=ώ?«§BύGIENDB`‚xarray-2025.09.0/doc/_static/thumbnails/multidimensional-coords.png000066400000000000000000001204321505620616400252610ustar00rootroot00000000000000‰PNG  IHDRˆ|/9tEXtSoftwareMatplotlib version3.3.3, https://matplotlib.org/Θ—·œ pHYs  šœ ‡IDATxΪν½ Έ-WYζo!!σHΘΝ<έ„$$@ƒ Ž€‘΅il΅QT4ςG™DQ†₯ Bƒ ΒΏdTBˆ†Θ(d #™nfΒ…Δ$χ’Z½ήuΏίΊο^§φpΞΩηž}Ξ]λyή§jWΥ]{οͺο]ίό]ΧύDCCCCCCφ#444444‚hhhhhhΡΠΠΠΠΠ’‘‘‘‘‘DCCCCC#ˆ†††††F ώ‡όΔO¬Iψ~ΒΆνχhhhh±uΒu ?ΏΒω9 ίHψaΒ_χμέ„«ƒθ>•p€νΫ>αm λώ-αγ ΄ύ'|.αξ„oϋmFŸΖ6 §'ܐ°!αC »Ž8Χώ Kψv‚6άsΜΟ'œŸpW Ώ2δ\χKψpόΏΪπΣΥώ?JΈ$acΒ΅z½Ποϋ=αϊΈLΨsΔΉτΌ+~“ο$Ό ΪbΒyρYZžΨžΥF  &½ζ''όΧ„Ώͺ "ŸJψnΒ±!$uΜl‹Ύ™°oΒ οKψˆνJΒλξŸπ”„;φq-COγ!LLΨ9αΜ„χŒ8—ιΩ μ#ˆ4ΦΖw;%αΎ {%6‚ ž—πθ„[zBΏΓIqž£BΈκΏη±A4οω7"ΓηzuΒΏ$μ‘pLΔ/ΨuλZžDςάx}ΏφΌ6‚hΨ²‚VΒρ?ξ‰Ωφ‹c¦¨χc>Ÿπͺ„/Η1Ατ˜~έYG'|:fηW ›αNιϊ_ΥC‘π{}@|ŸΓβ΅γ ۟u±~dh%»Ψ~ ²g ωό‘ΗΗ ώlίO&ό aΗ1ίλΎCB‚χ• ψnͺ ’瘿LxΣΏηŸλΪlίa ?ςγ«σݜπx{ύJEΫc6Ά€‘DΓ2jCβκxθwKΈ,αΚ0uH½7αέqμNaφψνΨ§ꭚaωμ·ΖL΄- ^§σΪλΖχyRΌ~hΒ—‚8v ‘ϋ†ΨχΛ —Wη{σΑ9ςψ4ώA€kϋΧrΒ b]Σ‹C+x(SΞ€¦° œΣψ§„—Lψ=₯ύq΅_“‰“cύ%:_¬οίm_;φ©ϊN±.Να¬κ\Ί–Άη΅DΓlΔι•>Λ^RΒ…±ώί4³¬Ξφ„—mA βη‚”ŽsΘΫCKϊ΅ΨΏkΒγ;ώ8㞱ο7ΎZοΟϊό“Ύ+γ7έ-ό ΪρΘďβ:2L9" L ^f·νψ=Ο©΅¬Π~Ίη\ΖwΫΑΆ=Nί+Φ΄6O…ΆϊςφΌ6‚h˜M‚ψέaB94‰«Νύ£JΠLς―ΆAΔφ?HΈ*μυ§%ά™π6)„[ιk6SΎ¬:Χ›l¦|i|α1ŸΎΧ…~~όΆΖϋ9Χ₯ĝNΆš‘'άΎ‚‡ΏœΤρώqίσLΧ”bΫF4ˆj;ΔlΫS* β“Υ{>ή4ˆF ΛCΧN‘ ~Mώ‡y|φΫLHΦΈt‘Ρc?WdΝρϊΜMρzχψΎ{Η±?¨lνηŽρAΜηψΗ‡°ΎΟ5Ωύ_:-‚Hγwbί‘ό†CΏgψ >`ϋγƒP”Φγμυͺ|7U>ˆλ›’DΓςΔWNAμ³LΫ¦H•)_σ}#ιΥαhίΑWλΗ…]}M\ŸΫ{ί¦™έβϊώ§Μ!Υορqž_ž Šiθρ‘₯Χ²6ΘιΤ1ίm‡πεt]΄C%Π― ,Ιίιϋ 'έ!ξγc}›Ψχί#zθ˜yά'ΓΎη±°π˜Έφχ‰bϊߊ, mβθπ§ΤQLלΕΤ’aωβI%’ώE‹!ˆx-‘φ‰„ο%ά–πΩiΗ±ΛΧθxΉi…Φ𝠑mν½D`}7Ύσ^δη#²λŠ σ z™χΟ}ο?δ|υχκzόί ΌΝh„ω°>ίΑ¦9ή[imo³χž%ςœτw‰<ˆβw?ӝηAΒg ΙƒXί“ρΘΈ'r>žΥF  A444444‚hhhhhhΡΠΠΠΠΠbφΪk―ξδ“OήbΨ›ν»ο³}·fΫν»Γξ»CwψύξŸqhZ?xΫΝΠΎC‡ΔRێΨώώέ;μΈiοΥ2o ΉγNέ1»ξ·Οέρμӝπ tΗ=`ξΨ=wλΦξΉk·v]σϊqΨ³;ώϋt'Ω―;ρ ύσ±yΫ›Ά=δΰύσv-Y?αΐ}Λώ“_ӝtΜέΙΗ՝όΰ΅έI 'wτ&¬=rΣώ#Ž>,γ!‡Ο£Ού ­_ϋβίΜηΡςςg>)ί žόψ|žoόβΟw_ύωŸξΙouχ^xvχ£[ξξύΖ?uί{γ ςgκX½GΏ›^ ZχΧ‚QŸ-άτςSσRŸ§ίŽcu}?-/ϋ­_Κπߟο­kη?ιqυ6]·_ΧούΕG=ΊΓφιχ_xψ# xΆν ?ΣέρŽΣσωΓ/8&ϊ<3lθšτύτ½ύϋ ϊΎϊώ=ωτšίEΫ΅MίSϋΟyπΓΊwμqT~Fέϋ>ώιA@ίψΘΎkΛ3ρρύλή½ηΡέ›v=²;c§#ΊWοxxχŠτ ―Ίαω΅CΫ΄OΟιiχ;4CΫx―JΤ/VζHπώO4¦ρy«– ’œκΧ_%ώΠ’—>₯>νqδQ–ακˆσ~Β– έDά`Z“νΝ7˜ΠG―Ϋωˆ,ό%μt“k©›Z$ΰK‚ΦEΊΙ΅M7ύgO|x~΄Qh]Δ!θαΧC§UΒP aιδ ΑφέΧ?/ l 5νΣq"m“ΐP—ΰΥΊHAΗK(Š(ξώθλ»»?ώζξϋσΚ|ά]~mΦw}θΟΛgλ:Τ@―υ9"(Π5ιΪ$°τ=υέψMτ{θ{J°θ3E:h~Ε»žϋΑς>€ϊ-D,‚ϊL >}]γΏψ·ω{"Έ%Нt>AΧ $υύuέz― Χ|WγdI|Ž Κ(΄A€B"JAί°νώ>@‡ή£₯Ξ-bΝα9οιξ9ϋ3lθχΦwΣ{!xύv|WΘQίGίCί‡ίYK£ν\»ώ+„ΊΘ‘& έγ£BCΟ”ŽΣύ’ϋFηΡ€LΟΟͺΦEBZAΰYa@ΣΨϋ$‚xφ6M„F£ BΝWN² ί+#›υ «R©*“―‰υ΅,K8$αšq]ΥΆ4AΌ"4έ°Μώu£‹0$υph]@‹€ τZBS‘–z\ƒihŸ ž™š '=¬‡„œ„›ŽΣͺ₯  1@φΦ?ΞaΞ:³~ΎŽH` 0|ϊl΄ϊ iψzŸΞ§γ$”Π”œτ$°³φ’4“»ώξ5ωΪt}Ύ³T .}'ύ1]“ˆAΧ­kΦΉψ} Oϋυ‘h]δ%νIοΧΉ>-ΒΙΔΟ Q8IΈ&Α6‘&4‰>’Πwƒϋψώυ>Χ8Ψsθsυ=τ½τΫκ;£yŒΊF&ϊOυιχϊή|}gXΏ‰>SŸΟ½«%5¨‡ξ}έ‚ŽE@ώž#iCΫτL²Iθ9ε9†0΄mϋ‰ ž·νΑ‘ΔόγΜ¨)ν`#‘+L{8͎?{\ΥΝi„n$έ\ξΊ ΉΉ€‘ˆŽΥŒαΗ Œ6‘‡‚Ω“Ξ'•[Πωj‚ΠƒΓμPK|Μzυ`"œυ λ!• πΡCZϋ΅DS1 iz@€"€h.΅Dhθ}zΏΞ-rΠΜUšŠ„­Ξ…°ρλΠ’¦"θ}:Ξ5LDZ"PΞ£χθ<ΊF„³δΖχΧ΅AΊ΄H²δwρί­ƒοΙo©σC5I@ 3n7I՚€ @ˆμγ5Ηω>ήγZ†ξξ CΧ©ο«ο‡ΩkΤΠ9τ^ύϊ}ύfά#ϊ ό{ρ{£iθύhz˜…†‘ƒχ;ΒΤ@ΧΉ4ΙzλnGεηO`°MK­³’›ž¦!°χMρΒϋ2ALNG•zΎ£Ϊw{,UίώιΆύͺSίsS£έε7Φ¬Y35‚>¦έ`„n.†ΆKΨλΖ–°wSf"­£sΣκ=N*ΌΗ‰EŸλζ7#θΊ$Œτ`J˜ι֍φΐμA'θα– PΥCΟ,YΗ 0žΌ‘ŠPDΠ!ˆ97 Wš€>Gd σΙΔ!ίƏώυΜlΗtuχ™oΜΗλ³0γ0γηΪ<€–Ί>}†›Ž°—Cz―Žγ;ΉΓ&B@ƒΠRΗρΉh%ˆ–Ψεω-ϊΰϋYwS“ϋ#Xj»›€χš`Π"Έ/ 3—HB–ή7jθ<š`~Σo¦uύfϊ˜DθνJίSοAθYΰžEhh‹†LP‚ˆDΟ›–" bZ&¦}·Ή_χβ홍 &#‡£Λ“γυ0‚xKA{³„€FΌ„–PΜzh™5#μ–ŠzΠυΐ#HυΠk»€°D@>ξ eζˆMB“pτ™°z-ΝBŽξ|ξύΩ ₯ΟΠ¬“ŽΏΟu‡1Ξb-υ>KΧ+‚‘‚O…χΤα$‘Ο@kprΠυ;©:1 iΧβΏGMΐΝ‚E τΡ ϊȁΧN,ξγΠϋέL₯uΗΟ-pΝh&γ†ξ%]?Ϊ 9p>& #θΊtΟγW§A0tί Σ"=gzΖπθωΣvwfOC`+2 ψ84‚OΫ…©θΆm¦LL έHšΡθ!ΠC)ΥVjͺP„΄fύμψ"΄.ασ0“af£Φ}8²ΡFˆLΑŒ €>Dz0υPkιΡ9nZaζ,a(ΑΗ YΗi&'fŒξ ΕΔ$Hh`γG˜κs$|%Pu­˜O΄Ξgbηφ(*9ΓΪξΰΌΊ&™¦€…Θ©­χΘ±.“–ή+βΡ15Ω8BL2 |I G­›¨0“Υk‡“ΏŽο‹~ςh'''†uT¦Η>Έ) Ν‚Ο›„$tοB^N6Dg±δšρi"Γs {{œπŸ&9π\ϊD  Azާ!°χOα&θQh1šΆ‰v˜o¨ΆΏΆrRŸΡm.YμNκu[ΒI]ϋ4ϋΧC^D h»«΄Ίωτ 0γǟ€ZϋπC:ž°WΒψΠ>œτ yd vf Τz !BH΅OH΅vΖΦΪ‘sΈ ΕO '“!„ΥηaΎ‘pŏΰΡEψ%Τ2IΐKΐQΝυ8AθX…²ΚT%R@KΑA.’άτ=π/ΰ„ζσ<Τ—χΰΡΉψ}Ω ’¨αQO˜»˜qΧΞmΧ τŸΊ–ΰf"'Ÿε?G+EZrΞI΅ˆaƒϋœΟφH34^έχhŸ(=ψμ0EMk@3nš–‘ ‚XΖ‘Δh‚xt”7Vιη OŒrΟηD˜λ9UYβΣ#zIZΖ)Λ‘‘M€lέΨΜ@| EHΘ£ςγ“π0>=h ¨ήΪ¦‡ ρνΪζρ퀇›΄ΣM$nj0I±$S 3l+N[Θ‘ΕμΰPεsuNˆ€H'œΤi’Ÿ΄ΤηrMe]«-³¦.Β|Ρ>ά ]‡“ ڊˆΙML΅ίI} ξ$žW‘ί²Φ&τΏυωΠp€;A8 °½vr;ܜ%θ^ZθΠύΙ$†(<+π‹x>;₯k‚ ςoš™ θ%C$1-„ο0=C#ˆUš(‡CYΒ½«ƒD€θΖΧβ‘™$„‘δyο!‘ŒΘ&OŠr‚Y!λΨ~1/I1›Υ:³bHίφz„37»xΘ§“ΒΠcζ΅ΔAψ,Kς=(Ώ!'Ϋ% 'q₯ΧŠΣ—ΙH―œΪΜμ1£­©!Ό1oΉ ƒ;£Ρ(j­‚\ χQΤ¦&' >ΣI£ ³5*TΦCdέ€) ΰE}7;Ή9ΘΰnΒ4|Β!ΜG“ L’πI ώ7'=G>q"ΉgašƒpW‚ΠW΄ˆiledΧ zΓΠbgRcΧμ# ωDz@Δ†CΨhy°όaΣλ:k’@₯ΧkΒ]%hΘb&^œΆΉΐςΜαځ™G6η@#a< Η1˜©4[ΟΎDJΒA(γ™dΈ―;―»χζouχ~ϋΚξGλ―ν~xΗχΊήvs†²’•½-ςP€&!LP„Σj …pYw–CŒ|775A \j­Έΐ―ύ/υοκ!ΔN.ž₯νΪΐδT;₯k-³Qίq5κπZ7Q-„ ξOΏwύ΅ͺ@Ί&='“„Ή.$’Ι}D1-‚ΐ79 A4‚h±β b>ώ‰­ ”Β³·G‘Δ*―ΕΤηπ°XΚGxLIN”ΐ‹“šΌœΫuΝ·ϋbn’`ρ’˜”΄$Q ¦%œuψ'‘>ž€ŸχB8z‰’ΰ•³Y„ σ‘αΗ—~n.?·˜Dr6ηzOΧ|½ϋρuv?ϊξυέξΌ­»ηοwχά}wχƒ ·ηm:FΘ„"ηLηωθ3siτΉψLάΡμΑξgκ\Hπϋp>Ύ“ŸηaΎ\Ÿι xn’‚$€ϋ)jΑίWη©.ηQοΫ  7=iΉσu`Sϋ‰ 9”ŠιΛ/β^tέz=νH&ΜKƒLΐzF! -§!°U8ίΖ84‚ΨʊυΥ~i„·zω έψΪ†P—' Πύψ& Υξ—:FB’žΕβS@ΒΦν₯&\ˆy~Β ΫΏ;Ή ©…D4s§œ3s…ΒNUI³\πο¦Λ DȞV’\Nœ“Bλ_ύh. (­BΪ„ˆβίߐ‰βοΘλh:FΗω$ΒΡ5‘μΧGN-Υ~—>­Β£Όκί¬εΔω8Ζ“k?Pν“ Β ’ΐΡηΌφ¬mΟήξ υγλ< ΄ΆΟ‡ Έ7ρΉω䂨‚Z’ώϊΘd±A892!ͺLC`«’³;ΓG‘ΔVLD3AnBςΔ7JP˜} Ή8A8)k‘Χz¨‰Ά!ፙ?ΛKNψLαθΉڏ©b Ӂ)R`Ax^LA9CZ¦€€d3’ˆ"Νφ΅Κ°Ήκj"‚\@N9 ι=ΉJl*€uHΠΛ‰-BΔ ’ρΓοέΨέuχ=έΖ»ξΞZ†^‹(τy:^ηΣ{‰`ͺMEE=Λw‚€<Μ“”£6GΥ9ΌŸγΨF~Χ‚Ή mΒ“©+ε™Ω΅#»NΔσj³ž½=Šd 7CMβ‡ ’ŽpW‚lνΉΟ5΅τzΪΪ6*А‚Θ‹˜AN; Άr‚ΠΝ§H'΄ΚPJ3Ϊƒƒφ #ˆΑ‘νdΘΊ†ΰΉ ζF€σ’Ϊ/‚ ΖBUηΚ3υπ'H°‹ t\ΞaHΫ%¨I\ΛλIθK›YΘ€?€ΦΙ!ϊM‚ΞŸ‰dύ΅έΎ³.k Ωό”΄™ž²f!ςHΪD.GžΞ©γ©:λΉ΅™ΘαΪ~4 }'Bb)kN˜‡ΗΦ„B™Bi=΄–œ’š$ά―κΌ‰Zπ“Qξ‘΅vΫ—ΌΗϋ(έ]'κΥΩΧΓ†WŸ­λC‘΄‡I ν€ kΘΪA#±½ˆ0sΘ‚ͺΚΣΨκwρŒC#ˆ¦A”mΐΝLκŠοΑώξ€v‡ωh$ζf¦d3 #?υ8W!j1ΫΞΎH |…\>C3xΥU"£YB>΄‰¬=$!.@&§μG0WΘk"\—IŽhiα»ΠΎόžtlφ[άxΙ&φνλ7ω(IHƒψρ΅d³UF:žδ9ͺΟΊ_Α³ΓkΤ$A2~ }o2θ^° 2@Γβ; D](±₯uΣ“›› ΄Ίty‚ 9xβžη`Τύj-’.»D„Φz.yΤΣwηθ5χ²ξYόoΪλ›V ¦Ύ!Σ₯k(α}Y¦!°ΥΐΘ³΅G‘ΔVNEx3 ’ⴝb|}~‡: €Nšs‚pS“T ―F*‚Θ‘DIHQ. ’6Ήΰx¦τ„›btNmGΰaλ'r(›…’™ζ’μ V΄Rή$Πψo·d³PvJ'βΘη€]ˆ$²ζ‰)½_Η(Ί)7+JDBηΠΉπiδζFI#q ΣΒΫMC}p_Δ‚rp’@›DX‘mθHA€‰Lο')ΠMP΅?ΓkAΥZ¦Β:ŠΚύE˜¬jb¨ Β‹bnς ٘κΜl'ϊ’DσΤgαsΣ=μNu}§₯ΘπgQD€_Πϋ‘LKƒAxs°QhΡ’„Όφe‰RoIϋj ΈyΙ£—δAθ=4Π!qY)ζ2“λ‚z˜T˜k[9ffΟ6ΣY &A‚>k !Τ5Γ—Φ ‘./Ӑ΄œΜΪ/’O’˜œ‚dτZϋ³™)!;«΄ž5i$zψ4²ŽZLΜ懙…†ωœXx??kαg[Π8²™- JS"\ΝBηφ„=HΚΙΫ5;–ͺ΅ŽΊlΘηYސDmjͺ{UΤώ7AyYΦُΖΒ½¨ο₯λΣ1$uzy‘ΥRKzyφ¦Mj!μetF‘D#ˆ™ Δ@Ν₯>‚pDAΈ™Ι5‰Ϊά€Y8Ɋƌ„ $δ1!Τ$L½φΏΧ7ͺΝ+Πό~eI'(bπΟ„ Gr"Φ³ο@¦‘ΫnΚA†B\ρKD¬ͺfήϊ, ΩBB€=Θi­‰D@zΞnš ωuωlίΝA}πο©@Š}„aψοtύ˜έr{ֈΤr\]fΣSν/©}'Γ2Ύ‡‡;Α1EΉ61ͺuͺ·KυB}^σ Ÿ΅Β0wzz}>]θγE›Υλq Œ¦A8¬©Fγilυ—χ†H£Π’DήfΡێBΤΐwr@ wrπaŽkΧ&€ŽΧΓ‰ΝU')υŒςŒ6Μ&h.€<όA;0£–@ σ’fΙΩ ‚9I o" B˜˜²f‘4€μ;rΘf&E2₯kΒΎ/›΅‘θ#!³R>O:Ώ>KBp!^-8ΗΈi¨&79jα_Bžuχ2+%β’…ͺ=_Ϊ^LO2§™_Γ«ΚΦΡOέΤ₯Ψk©ϋά―α}3ϊΜMu‰Ί”kE΄Iš5Ύτ٘° IŸ£{yKψi$δεΏ§!°KΑδoA4‚ZΛ‰›ΣΪƒ“>…:kš‘„ExΫRB"³cΟo@("ΨNž ηaœ» ‡pBg3ŠG*]ό™"πI~#K:›•”μ†yHARz―(3;ͺεΰF«PΏi™―”W‘Ξ——ι=h5„°ΝΨΖ φτi~ΎB―‡I©ψ`t½β›M`A~Oˆ/eΚ½WΈGD9y±oXB_­}τ‘…gqΧ¦'vκ# ΜPh:ΞKŽ{5WLL|6ώB©!έΗtγωΠ κˆ‹υLιu”–BŽi{ξͺ³$„ ”F³©%δ•Χ Lh%Α…Ω αŸΟ3π\sI€"sTzoΞ™ˆ,νbہ\ς*’|xΪ&ˆ$LO΅ΐ4„b2ͺ…z­11fDιr]_ιΑ―\ӈk+DaΧηf(wΊΧ-SϋΒwλ0ZoΗκω}¨}z-‘_nτ! εΚΔ„‚© §»ΞΗ€s’HΐKίλ}Ί΅OZ…‚‰’ž'—I σχ‡Ÿ†ΐ>β~χŸΣiA4‚˜Ϊ ΫΫ.Nκ:G-ƒc€ξ“IMI Ϋ”°0!AΜP!”⨍ƒ γ"$C£pΣP! DhD:QˆΥμVΎπeΰΗк΋σj°$άΉ6“gμκ9B8οΣμ=^5ΥΫ{LG…Xο\~“Jσ!Οc@ΓβZ€k:΅_£G³€$κž}ε@X‡$\‹¨ bX.…gtkbŒ:?™φdΣ•D¨«ήC@έ‡άcΪΞ„ šΚZβΧ¨θhΖ% Yδ2ΚΜDή jδ&rmrXO… ΆΏΉΦqhΡbjƒό?9΅i ?…wœΣΓμW]ΠΊΟ@½ΫšGό ²£VΪCΔlβ‘π3ΪDΡ¬‚+λ9œUZΒM—mͺ­de½sd“9―s”‘NΚZΜ:I¨κ3ςω"’©\Wιν&8³όZ(;ζψ* aψΜ ΜLŽA†ΩΑ~”bjŠcύ5Q h.CΜO^ΖΓ‰ΒΟΪ TηXx(opδ‘OξΤ–ΐΧk‚t.ΜRAθ3i1Kιy’αDLRtL]ΞCΔΐδ†Δ:‘²8Δ‡εO‘δA^’HBλ"‡i•ί>b‡:ηB#ˆΡ-Gί•πέ„KlΫίZϋΡ봌ν'άcϋήΆ΅`=ƒΪ«_:IP(M³4=΄9Ÿ rΠbΓφΟ‰`–κ€BςZ& B ?ΑfΘGΡ€4ε-AdbP^$‘ΦKTˆXo@¨G„PΦHTωU> ˆ!‘Ϊe{˜’Π \P›YΙ5£89pn΄΄₯ΠpΠ"ΚοdDQkaYπΪCcϋ"žœ(ϊJ€@΄Tug΅'Ωy/ OΜΓ©-hyRΗͺsB$ΊοΘ¦φ2βˆξ+­>[ ~αΓΣλ( Βσθs-Πžž.Σκπ&‚VΉΆF#ˆΡρΨ„“œ ͺύ―Kx©Δ%Λ©AΠjΉ ­ ―ω 3/ι‘Σ¦V³Ff—zφ‰`ΑŒδΩΒ%Μ3|Y† €PT„]ΩΥ[1‘δ°Wi‘τFeΦrLψH„+ζΠYȚ9ΗMY…μ˜μHΗόT“H₯m 8œkbps’FœίD13Ε6HbΞυΤΧα„αšΝ’p‘“K΄œΥN}₯Π½Ω’›£2AΔηΣoΑ/² Χ‘Ξΐ–φ‘s2­χrœΊ «EKξδyXlR9 eΠ^ΪνF§Υ#ϊΘϋο8°:  Ζ“D―ΰOc›„ŽhΡ’ΔΦA”Δ_,Aΰ€–ƒ ϊͺ-χ‘ΔΒ β±ώγΕqw%\π…„Ηli‚ Mΰr‘MUώωΑ ΣZϋυΐΙώKΩn*°ΦY»^―Η4UG₯ i›σθœ,όΒٜΊΚw[$Qi)€@ώCφ7¨΅θm7gbπ&@ΉτΉ‘7QœΦαΛ(‡ θ5$QŽsG9ϋj²¨ΜR½λζg)&₯κs0/ab*~Y˜σΪCaݟ3@ζŸp²χΠΧ΅™ΙKχΥvrb¨.yu`L”:yτπςᐃ ―sPΆEοΥqάϐƒξc²ΆηΫ@Θ Βs(<Uf,ω ‡—νpΨTφQ;ξ4'²pA,œ ώ*α…φzϋ„½bύδΠ.vrΞSυΓ kΦ¬™ͺ^.-‚Rα„ž>‚ΠƒEΩ²WυpkΆ–5ϋ?„'cQšΊDΝΔ1sΘ!ό δ3 μs’[”ΚΰΈ‚"Ι™Τ‰€-”:M*ί­ ­Φ(kτ€Mm’D-δMX»½Ώ·-ΪΖ0Β Γα3ύ!ŸηŸAΔVώ "* ’πυB}„Uϋ',gΒΓa=¨ΐΛ‰x)œΨuΈkMt ¬Χ Υk"˜tŸyΦυ°v§΄9•oG΅E„βZ‚ΉξqGχ2Nέ…$‘RόΪdδTh›4 =Ο―Hδ05‚Ψi§PσQχyi˜πΉ„Λ.MψΓΨΎgΒ§Šε[ A€qί„υ ρΎΟ'ς !dƒYΙ›Υu›ΌO6U\©,¬χ–Νύ.mCD€}hΛσ! iτ»ΦσΩ•Ι“Kϋ”4'‚xρv‡L‡ vήΉ|—q˜οη₯qfΒγy‰\±£˜>˜pKΒ½ 7%<3ΆuΒ³ͺcŸ*Φ7ΞOψ₯ε sΕ±‚ /€ΰυ˜F9©±ŸΊI‰M΄|ΤL*™z5Γσ¬ιb’ sΧ΄ˆ\OYΐΡΩm .?€β|MΌ˜˜δ/ˆNq™$BxBeF/‘ΚΉΘ•Ph¬Δ’ε\ˆΧYΩ½Ιx>‹ο{]Α…πzΆh+vά€c - ŒψŽ$αf('³Z³0ΔQ M Iͺσ>΅‘&‹>ψ1„G{k[/τη=΄=i޲puz/Ύ5šeιύΊ6§{{!Z©( mDΔ@βžHG!να…χA½ΛΞ%Δ|"”†SΗL¦oi=αŽjίν-QnFεD hΣ ˆ:ύ~>RΰαΐΖ+ ‚€%ο‡9‰μ_LIψˆΨ)y$‰QRƒϊBuωdbR&4…ψ4σ.q₯~’δjgν(³ Ν†Μ1] Žˆͺ‰ Οά£=,5y9’eηŽw‘A… ,G€˜œ€iNuΜ‰=ΗτdžTW·:υB~}QL}f&/O^…ΆKθ;Q YP¦Γk=]§σβ CZ~JvŒΚƒp-»ΟO‘gLš„΄| •ΜNΚ…Aˆcvέ₯ŠŒΓ$Ÿ—Ζv g'ΌΐΆ­~ΣJ&ˆiψˆί¦Ρ7x_8Ÿ—πv‚Π O•L:~‘Ξ£Βλ‘§œ―3A˜3Ί·§Υ ͺ³Kφ²„Ÿ|r*$’3Ρ;Eˆ κ’!$2G`"«¨ŸΉ#΄‘“O5―£™&Υ,ϊ>―ƒA)0ΩΰsΜKž1ξEζ€πOτΔ@΄“E]ΰ―όκ₯Β½ ¬“…χ)§Υ¬HΗz)qiYΦvTˆ₯b+³|Mz€UτεAθ9a}²υ| #m£6ητΙΜ4­LκcvΫe γή(Lΰ€V>Ψ{ήPmmε€>£Δ ΕD ˜š €5x}₯ΊY ƒϊB6Lr3Δθ­)Ÿ ‚Πl³‘;£™u–Κ¦U‘:’ΖπAdη³ς”π‘«Ε!-‚{ZA”~ΊΚρE8V‘‘u7ίxDΏΦNκγz §ΚE˜cNκs€γ©5ŸΚΗP4*όΓHa*ΒπΌ^³5ž*Υ{=‘Ξk5Qά―ξ+ΡΧ­SS £Ž²ρš”@u@ͺΉςYu•˜ε“lΧgb‚ «Υσ fͺΥσ¦η‡(?r5t^ φM‹ Όχ(L@NΠΚEVfθ‰ ωO8'Β\Ο™$`§Δ2„f ‚G:)|Np‚PbŽΟjd₯©7xAΈcΛλ,‘MΧmΎzν5vtŒR=ΨD4A ^vzNw΄ͺX’˜Oζ*rˆ†?ΕΜ‚‘Θ&J{³Οw²(QNΐ49Ϊƈ( :ί 5A u\χψjΣUο΅ιΊ!ΛZΨϋwt­ͺ}Ηyξ*3{NR_8²=ΙklΥωžPWχΗeΰ‹ Bλ˜5uοi‰†B&u‘τ~'έΧψDθAPΕXΟ…„¬„<Ύ‹q~<=o>SηΧ3†yw*±ϋsr?†‘%Κm!2€ „ΆΡβРgΜ(άαΜ¬‰ŒRέΤήλWΗ{σ7uA΅ΧΓ‰έΗ΅ΜΡT9£JMΧf&ΛΖαJɌ,π]ƒˆ– 2?!Μς~GdJ³ZDhZE5ϋ.dbλ΅S·O{˜£AΤξΞμ‘δΪΓSYψόZ‡’Γ0Βΰ· I°"ΞβΣ©|*žΣQ4E’Φ¬ρP_λΥ:o’ξX‡ίΑ{eΣqHπ“€'²ΘΞ‚IθΎεώ'T›€; Q'΄ Β½΅Ν WNZ‘€–„Ίκ9Œ¨’E μ΅{μ:'cAl%αd ‚Π’Χ‚TX‚Ί0„np=ψΌω;Ω€ž€€}Zι‹hΪ1θ-]Ν§f‰XςT ­{-KΫ́(¦sZD€&‚ŸmΉΟƒͺ³J°ΙE‰ :ΘYVυ€&A‰οa3oœsΘmcˆ12ΒiLΤΣ(bθ%iMϊ=\°›ΐ―‰ †ƒ&ουολ€f$QΔ@Ωpο¦gωuC’ΪδδνMέ4εΞjΜQlΟU€!­t=ΊݏdL39b‚£η‚Iƒφi‰9Άξ1ΙΑ΄§uΜUS!ˆ=wΘ…ε$ˆΘΖ‡έAL‰$T6X °9υη!m§τ0!΅–ž½h ¨Υ8²P‡)L¦#BX)³μ^hφβ Pe»ψ₯―3ZYΧ‘EΤΝq(WΜIU E6εΜθD˜ΨAΠInΐD₯Υ" ’gF]„'>Œψό’X቉‘O+¨MPN6ψAœjΗ³ώτiΜ!Ž­Σ DŸγݚyψλ@„SEή£».ˆ3Ϋ Β{`s_Υοe=ίsι³IvΤ„CΗκ}šμθ~Χ€HχΈ&<pša1‘ΒΔΊΠ!νCΟ šϊ4φ±{ξ61 ΛL?HX—pνάΠbŠƒF$R[5+QΡ0AU%! ‰Q_Ι šif xW/ΜJd»Šhƒ&A…N' ½ΗνΑD9MgOΌu₯B˜št|‰ αεMΉtΖ†Ϋ7•PlώB !²˜œ’ν¨ ύ9&+όηKΜMΕύz’–ϊ’‘z5ŽΪdeŽθH%ˆ’rtϊnΑοeΡKώΪ³ΛϋΒΦή›#"›z;ΦU%:\°Σϋ£N°Ip―ΪμT“ŒχΣΦϋ΅-“}2‘Χ:Ώξq"σ4‘AzHλ°ͺσ˜vEzΞτμ-$³Ή— φΪ­˜ΘΖa™ β‚©Σbς!³’ˆΑOλD9Αh’^ί¨#‰ΗΞdC“π °ή7ƒγόΪ6a2«Qϋ,%K“Dt5+š5–¨Rj‚,ηDΘtu”TU$ΑΆΌφ’αŸ`ί€™)|>Ϋ€i-˜₯JωΠ=Ή“D―s;’ύϊ2‘KΙsόβ(SΡΐ6ΎW­5τΓ3•ϋ#"©°ό—u"|N‘)φlM’]Ih) ƒ„:ΜI#(YΏΡΟ‘nƒι=¦˜Μš³φ`ΕψθwΟέwo*αU[! HcN…;©_T Υ¨ ;―ΫδDQϋ#ϊκ1Υ¦§>ςθ3)υ˜“\hηP^‘ 4ˆaDΠG hDhEnNsbι3Αajͺ«ΒR–ΓKs4ΐaΝlXιπϊ>σΌ i²zΝ9θLH†>ΩϊτΖΦg”’-‘u―λΣ$ImYΟΕR„ς"˜Θι3¦Utμή»“π8,3A<,α”žνE³›“z™’ξzΥ’D#ˆΙΒ+ΎΞ*A·Οξ₯¬Θ8,3A¨"φΑ=ΫOψl#ˆ%Ξ‹ š€ RΠλam±―₯Aωd’ί(‘!―7WcMΆ?Δξ<τΊ:ή1 ςΡΓ¬‡4;ξš„FξΒ4›'zγΠ ¨ΣΊj"“Œ ξΎηžάηαŽοo" “›%Β ͺ‰θ'_ώmχ5ΈiΕ…h "ŸΜ>ο5Œκ>c3―­lΖP‚ΐχaDύ0ςΏΠH©2g<WΔ@nˆwβ«Ξ5@η}2κζCu؊ Š―ΐΒP½Χ N’ΓFiyš βI dVη„8t;θϊ5I"hCχ?‘zz ALR›i!’ωL °Η@$α(,3A\”Y«•’`Άμ!BAάΎρLwέ}OwΟ]ίΟΛLrPW1 MΤBΤgάQwTΧωU2™WG­ΓWηh^e΅ΔZGUΡIZ’Ίδ‰#Μ΅Φ JΗέψIϊ"¨Š?Β;ΡΥ™δ&"s™ˆ%†g>χω ΌŸ5ζΚΊ’«k©άgnΒς"~2‹ΦKm1ςwt―k‰Ξ­Χ”§' ικᔇ†„&rS!ˆφΩ•Ο±Μρο F`£dρrΔcNκ!ˆυ»V±Ή Ϋ'Ž•mg‘ ”!…ϊTI”ήXˆκΫ7x ·%{[I‹,Ω_ ΄Ρ(*Έζ²JŠSDS₯bbͺΜυ—"΄U‚_šBΦξΎ»D2α€qΘΜ€’}ΠJm&ΧΠ,\Λπγ$Šva€1Ηaν5šϊHΑ“ίάMR`‰m –ώ‘:wH錺Wu)£ήWσɚ&•k1Ŝ²#Z•b^ς{` †R8°Ή'4yΰžB°»?ΛΝL^ε ρ ηρχIS Κ‡άzHp_Σ±N…’štŒ΄_έΫhα« ‚¦At σ6₯S!ˆξ3ΠXiZΉορ$qπ„!να4{­­œe‚πμi݈"ˆΕ„ΉφE4‰ ¨©„i ΏφΫάι+φyΆRβ9Βϊ?œύŽ’W’§3Ϊ@<h"ωnέpWwΛ›‰"Ý·•<-‹Β>LΜΖΜJ1”γ^η)ΏS•υ› ΚΙmτ½Žηa}<[Ϊ’ΤσΘkpΝ₯/:Ι}%~ U§BΖ•Ιˆ\”ΎεZk’$Bƒ(u˜Π$ DiU5=G(uW›4™ΐpBήkΒύδοΰ)­ΐσ+ dΙ¬$mΒiΠσYΪ…’ž”|ŠωIΫ)HίϊKL‹ κΰah±0‚Έ.ZμΙ΅GlsΒΣνΈw&#'›’I„V10ΫξιάΦ—Η0bκΎΛ θkζbΗkΜ1ΥY«­\’΅œ$†‘§šΒYσ"’Μ†αΝs@bu₯Σ½‚Α#εœ ϊ‚#κ’žiMˆ«HΑΛΒ`j’c"D€m΄ %’ID!BΡwΡyτZ&$νΣϋ1_Ρ'[š‡‚~ξΒ΄j#pΰΎ½ΏAAΜ“ †ν[I&&ΟΞ9Hu₯ο-AΤΔB‡Ν<ω ­k‰pQpΟ3j΅tσζ„ZΔ@―fΒC#kΈC…»F‰oΐΖΘ–.Ξκθ”ίΘ~λ1`frΐ…~CŽ„Ω„?£DF9iˆ|μœ”υΘΧΧہŽΜUπ0ZΟ€vrLλΕ¬eΎ}?.Τ3!˜ζ2‡ ά―ΉΙςQν0Ί2#–Y-0€Yzκ^Φv”ι}ͺΡ<`Β}uzΒπˆ%/Qοeκ%Ό½Ž³ΝόΙ¨ˆ8h>$€^L€H₯ŒΈ4ϊ·k9­>"―Y5 ³Bi<:α·c}ωygUƒΨί֟―¬ΎX?ΆrR―›U'5C½h$‚ΐδδau‹!wξ‘)νv`χEdg£· Υl0i u³˜l~ŠŽc …ϊr=&Υ1"ΑΜ²š= δ$ ³I)Σ%yNϋ$ΔE9Μρ5ψωΠ.œD ‚4“Θ­¨#ΌhξΤΖδ]ιzŠ: h 9dαc τHD^ #'‰rL8ΐη8άλ‚ξχh”d½ͺs‰–θ ¨AϏςJE_λ$Θ¬Ÿ:^8¬έρΜ½Xwž«7‰€0)y‚ΦΊ΄Ν•@χlj‘€–ςτJΑό€₯I”Hσέιb7­ZL'Ωo 7χ(ΜH’άΛ>žpeΌ> αKΛN‘ζ}KΒ½ 7%<3α}ͺ2>ˆU„qzD/]ΡWΗ| ‚ΎΣ4&Ρl‡<"'2!Ĝ„ŠN₯V”Ό}15Y·ΈB 8¬Γ?Q9Π$¨„f₯Nα .dΒ\=R©Κe˜“ W‡²ΊIΙΙ‚¨‰Φ°ύi=“–4—Π(Bc{4ΧŠΉ‡ΎΪž΄¨Ηœο~ΛΥΗY‹πΟς0Z3ι•Κ·δy„ΖPšU¦£ς_ͺΰ£%FΦ΅·j‚Π}αΉΓr&1uVuΕγe`FΎ­Sα˜‹g5ε8œά¦)|zιύŽ–?-‚πr$£0#‘t‚mΌ{œδoΛ€ή~ω43Ρl…ُœΥ27-† ΘN₯ξ>!‡zΘx0™αzγ˜τ–’ξ‰`Z(³ΜƒOˆktΛ<Μ:nζaΆ>'#Ί―ώR5ΫΞηu“’“šC0gα —‰KΎό!T•ΝTa‚ ―ϋdεΗJτhΜ!‰ΚΌ51A`Un‡V½ ν‚F@Ή†xνΰΏτΦͺ₯7CD:Ήs"πPX&(άcυΐ7α„ϊΗζnbr‚– rΠσ„s™rψt’s³š Ύ4 ‘QMτ™Π39‚8hΏβ‡!ˆυ€Έ¨ΡΤb©‡n8έ΄Ί©ρhγε7£A μy@15yŸjlʛx!ŽY‹°"[2ADœEξCνe&δwΩϊ9„YiŽ€­ΙΑ«ΈΪ~ΜQ™Œ$άs]§θ5Α:ζ,BΔ _ˆHA‘TλοΨ]₯m42ςŠ9΅ †½ΒlGβ‰ο<ŸA)H ·w†'ζ…#›ΎΩ'β‘h}5₯κ| sfc’ςRψ·κ\›QζΠZψγS€ οc’cφΪf&†Άιxž7‘ηΦω€aΈc[e:”1‚ΨN£₯a˜‚xQΒΫΓl{ _Iψ –ΨΌΤ’ΔrAl9‚xΘ! τς…ε&ˆ0-˜πΈ„Χ&ό…Φ[±Ύ)Ž7μrd†Χa‚ tκf%ρΗ“xCzrYπθ#R£»„S‘ΊϋΔΈ$Eά… πC$Aα=ςRq›χΟw”¦L•o’pTκJR£GgQ °§"ο@s"λ›Qϋ!¦Aξ‡θ«pЬ{ΌO‹F. Ϊ§χρ~rτ~ΘBδ9ࠞAϊΐΝAc0#‘³‡5‚X“TWvtƒŠ$΄}‹!΄©ψY +Γ5ΜMΔX{[H›eυ$¨Ε’n^"ηΒ¬T*‚F<}~} ΚL:fό8~έdTL"TγQM… B€‚· ’£μ9P pHΉ α/ΤZUR0}%Œ"9Ε9ίBGΡ",<·6yέ©Δ½*2Λ³K³'ώ›Θ )YΧα΄φΎtwK˜jG5ZDMτΒ¨! C„ž-u<‰q"_ηs)rΠσGs―©Ac0#qY#Bτ"’HA,1A(A&$n>ݜΊαu£jΆ’ν‹ι ᾈΆš„Ώ;ΑΠ"ϊΪ‹Φ!že[²ͺΥ+˜βqΦLG3Τ2σΆΔ4o4@}!£.δ,΄u !nA8IΈ"›ΒŒ4ΰŸΠuA:·W•­³²νs³ΙΚΒlϋD† "§T“jSΤH‚ ο‘&ˆͺϊν@ŸGbωοΘD­aέHI’© ‘’υ)r!Φ‘mD­EΤώaΤ 5―ξy½­‚ΠyΠδETxVΛΡ‡ϊ ήϊV}χyC*^ο™πι„«bΉΗ" β >4‚Ψ„ ›s“4i"¨μ*'ΩB₯6p,g’P}%Λo ζΊ΄$a.JiδxxUo M kΊτs¦Š¨ΘΑΊΐaχG¨ΟΙ/px 8Ο€f6―YΊ™—d&Ϊhζ%LLtͺ“°φς„·fσΩάψ'<ΡŽά‰:;›°Ω ΟΈ‘ΟDΉ’m:χb†bNx°Υ€pφ›ι©.§^|E”ρΎ΅Θώ‚2I₯ϋ‚`†…ƒWyυ*Þ@GΙ Χτ˜i'”ΧΠ{)κWΧaAPαMb*qΨƒ6}ŒΑΡWρϊŒ„—ΔϊK^³H‚XΣ‡FK<θAύy‘‚nLέ ΊQe9ΪOνza>ƒΖξ”Θ(U7ΣLٍ‘­EΓ|ΰ‘YΈ… (λ€Γ34ώ d)ΣΑjΝI@«mθ–σ0`’,‡"r6FW::ΣA@―©DY'η4KE+pM#PLSC2³1WM2ΈF]‡bœ¦11ATεΟŠz9’0“ δŽπY~J)(…K5\²ΰ!Υ8o.­’2αTΓD2Ÿ¦ΰώή'BΫΡπGPƒIΫρL2τ|‘©ν5Π$H–ΓA1Ν©Δα|ŒΒ$ŸΧSmβ ˆ΅ΤλEΔΕfZΊ*ΜM—6‚ΨBΙr" B€I•!gYMσiΖY–€npψrSϊτ׍_΄Mϋλ¦44*Q0f{Ξα’κ§ΩenξΑ3ΠέΝ ’ΞNΦΉjCd`ΤmJŸγΞi ^΄MAd κ±ζΫξΪμ‹p2qσϋ Y@ ι΅Ζ€‘pZ]ˍ·mμ»uc!‹iŒŒu σ(iR“š³ŽςΚ!½”V§•ϋ€!*dY]γ|YŒNχG!‡0ΜgΰƒΌμ<ɚψΟΒ lαڐΖ$C~½Ÿ:QzFdš" J’₯žIο1 ‚8ιπ5›ƒ=Ζ *WΓpκqG΅φ)ϋ$€±Ό½ΔΊ ‰ΏΦμΕKΠΰ„š0‚ˆCΗχ=‰‘Ύ'’ΐT$ Ί ’Πƒ(‚(έβδW°PΚR ;‘ΡMz]’ΘΐmΦͺ±¦₯ΊIŽ“Γ­Β£˜x6ΛΕ¬ƒ`―£—03‘Χ να¦ΫDλ$πWτE@ω96Va³€cFΎƒœD"†+ΧoΘ―η£yL’Aές›IS0sXΙ‘u―Dής-X_ξ’UmΊ λrόU_ΙΗ-Ζο ϋΟ« GmDσ@TŠ˜ό‰=‚ΠσB.α­"ž9ΜNzξδ¬IL ŽXS=ΖaΔ’D7a«ΡFK4t3Κ¬D1oτΫΥΜ†1“φ]=hώt +Eω’$Ξkm£ψŽh4*΄2k0-g. f”‚9`²°‚w%>߈cΐκ₯+Π κj­•“ΊφGη !-‚@PSZcƒi˜œάGΠ8ΠR6)Œ"š"AΧ|oC!΄œiށ²δ₯δΎοΘ— Zƒ§RΥ5ͺς–D>«ήͺ?S! ^Θ(­p±½Ρ”B‘±€λœ„kΒ| s2/jNΉž3=ƒzΖH˜›ATΚ©ΓŒ˜˜^`PVυί¨Zv#ˆe& έ”όΔgk–£›X7―nZ’~pΞΡk”©©D5YƒI βηPΧ(ΐFb\ξύ MΒ’]03e²PΔ ½0W˜ƒ³Ψ³­*κ€Α£k<’©ΞEpgr8©}Ά_ΔσMHs9άV™˜ϊΘ@Η‰P\λ€Xœ šQΑyD ·FRή΄Ι‘/?’tΉσfD΄2•Y0B“υ?N2JnLD°yεΧ… j5y?’βΓ qUt©ΓGζ•a3τ\Υ‰v˜–¨η€ηŒgMό΄‚ίmH―­œΤg,’ ^fPAԞ°C#ˆΊ1!ͺMR|LΠ>/46Š κΠW―Τ™g"‰Pσi(DΑ?ڎκ=%rI‚&Κ6d "ό₯πα©α˜φ™m!fͺr <nIx’άέƒζ ‡‚άj—ΫΠR`—―@B]€(jΏDΣ7tόυqŽQΗm ’Θλ”Τ°Ξ`9A₯IUdek’2Πε0’νΌ§Δb B-M°ΠΜι!1, »Ϋ$-OGί€©D1υUΌή+αœp(kΉη" βi“lk±LCvP―HIA„@Ζ)€£²HρGθaΣƒG"Άf=€^΅΅΄d&½ %“6ke! $WΥ-6½‚„gUΧ™ΤCεœ 6Tan:‚άδΔkMYΏΒΧoέpΧ€yi’qKœ―ns%ΰΠNιk 9`κ€.dαeΔ™δ,– œ$ˆ\7¦BG2΄SI”;Ώω Vθπ2^f€*Γ’†δΨ£V“ΧS»Ζ τ ¨U`ua€; Yή‹ΐ| dρD.yβV]bΓΒ*zQ›© Η«3|ODNjΎŽsSΘ@ΰ5p-ΐ?ϋΫθ6·«–₯£"‡TGTξ%΄WοS‚oŒυimέ2qθζ2ωc°œ‘ž: oJXŸπ—†Ώ¦x#ˆ8°=.œD;'ˆaΙCrZgS’ڍͺPTe-ΙqΚ–¦“ΧάQ₯Χ¨ο„ ηJ„Pp@WΝtŠY©ΟœTε@HψKθK˜ηφ€–P2–ef '5‘Ln>BSp³‘“„ΆmˆΟ!\3Y€(d"Zoοk1ΒGa‘ονOχ Qtωσ‚a^‚ X'zi©b>c*qτ‘½ύ7ϊ°ΜqBΒ3%xς|²³A,#AΘ@SwΪ‰’Ώ…@2‘Pk₯ΔFTq-…ωδ4TXl˜ŸΠ&τγ›@δ‡>€›˜<+Ί”ΣŒjpCH«HA!‘—ήrgΦδ0ΰ(ΙjQW‰ςέ·!8A`:ς󬏀9­λ³"΄Ό>ΒIe}#…ΡQ ύ:ωKχ %Y4‘οyΦJ‡ΓlŠJϋ Ž•O‡Νiί: 3bbΪnQοo±|!α/!Oύxh‘(Πb΄ \›Θv_Ω„-̎θ’RΌ/l£$CA"ΩΜ€xx%RQΟΚ4Μ‰Vͺ»ΘY‘ƒΎ„ο%ίΎ³»ΰ¦Ϋ»oάp{wΡ·οθ.Ν™0Ύ΅ώΞ,¨=‘Βw"…υAλ+Α޳s!­"€u‰8Ώ>W$±"@“H‚βŽ^ό‘~„²‰ϋΏB[₯ηĊ'ˆθΗ13BG$|8Šφ­ΛNC Q)„λ[‘ϊύΡ„έ»Ν‘^χDTαm ϊ ΒK|γμk±² b‘₯ΒW"A,§1‚8戹=Ύ‡`Fβ‹ ?2W…ϊ^žπŠY ˆΎBTOΈo¬Ώ†BTu,πΦβ€V†ΜIΤS’?B!―‚ΈD¨θ}ΡWnΓ› Đ¬ΩœPwέ…%;·τ’Ζμd•Bλ,i/'‚‘ΐ– Φ…ΙG€pMρ½ώί2aΘ$!ο>g3Bύ–;ΓU=VηΡωΎrέm"}Ÿ)`‚7֍8²Ά‚(D‘άω΄ΒΗE@ΡL˜,KQIHΒ’ιtμKkΨ\ f©aj1ΩΆ™ Σ(ΑŸΖ/'|`k&ˆl š4tΨ‚œ ¨sC‚Ξi‘v’Mk2YRTƒ σ°Μٜ…«š=V3‰¬η’#A…Φ‚Π±ςH@ ²Μώ=ΒH$ρ₯ko͚…2Ύ„AξSp­Η΄η2PIŸρˆ %-Ξηd%,” <:jk15‘ Wός=Pή:š[qo9AxΫ[ξΟ•J'―=r 4ώ(ΜA|)α> IxNΘέ+VA|<αιvά] $|!α1#Ξy*Ε―Φ¬Y3•η­»•±ƒξX}a­dr>ȘφW%`1/EΛQf{£Κ8g-bέy›k2yωl«’J‹ΝΗt4$¬―ϊξ†,€%ˆ%<%Π I…,$`₯]|σζ;²P—pΏ1΄ Šς!¨=lΥΫ‘όNQ#IŸ-ΰœ^$ m"%-k‚FšήϊMοΧqλΒων1‰fβ5 V,IPg¨Š„c”dQ£]΄Δr™™¦BΗ9 8 3BKΨ9αA οNψ‡„GΜ4ADΚ·|ΫΔλν•Aλ''ܘ°λ– –“ δg aŠk“Œ¬)I’Hnƒ“ƒΟτΖeS†„ιδ€9‡θ"™„²)*ό:Vϋ0)Ih’…|λ†ΝεΉ‰HvνΧ±˜„ΠUua¬γGUOυ„8L@D5ιό˜ž€΅H{A‰0j‚€dϊBpίšDζYή\ο@EΩψ]Ϋ‰n9}γξ£Αχ…ό–ΣQ=‚8jSϋXn‚Hc[ω}»Ybκ#ˆˆΕύJŽ#ήχω„‡n &&ΚΜ‡ °γζAjBυΎ8¬ƒ$<£zΨΘ΅}δ‹°J’”ά¦τ¦£ά,'ΚehΏ„8“ΌΒLoνΙzΖ}cολ3εŒ#ˆΎαωINs‘―kBθ#ό˜¬ uAbB_Rž_/βήΘh–‚RDΰ,v`]LλqΟ M‚Ά(Awτζ:Yc0#Δg™ˆΟb»X?4αζIj¬‚  ₯*S σ!| Ú»Τ%7Š9κ¬·gMγ)κr”πMΥ Iψ₯­ΕIM8+1ί²ΗK5”5© AΒNΒ_ζΝΔ5 δKΠ,Θ%μυ˜˜<κrΐ'qKΥΫΑېz’Θ΄ΡTό}ZΓ0’F Χ~œ \c’Uιƻ{±.ή’ ŽΩάxk f„ ήέƒwu-Qne oΏθΆ–{”‘Eΰ_`9|κ[λ»sϊn1ΧhΎΟGΈ&’”Πκό'$‘{LGOο1ί"{“jΒ’)ώ΄Ζ΄Z•Ά± >ˆD₯τύΜA,š`AΜΖPέ΅f‚ hCϊέ병ٻfΕ"‘‚BfAΐ #βσW/q w½UXu²@«)¬α‰IKΏ‰~£λ-«H>fSUγ 0#Δ‘Q6ό’x}|Ÿ4‚XcXHλ²ΔU_Ι!¬έ“π–ΐ#3ν€PR|ΪχΙ˿ӝyι-™D i²ω“<‡Βk.iΉCA}Έo­jΪ­JΫΨ±vsO”1˜‚PΪΐΓ•B`Ϋ.i±B‡r„eχA€|2ω¬Ώc³Γ€„}<ΜOΪ―0RAΔ ν$Ύy L.8¦)QΔF+Ύ’ Œšφ°² β€Dsz‘ ΑŒΔΧcιqa#ˆNʚ^Ξ‘3ͺΓI-α-O9 LKς8¦υš0R ~rΌ+MΨδΧ D£Β«±΄‘λ_ Η΄œR%ml‚8ώΨΉ}P†`F⬄Γh”ΖS΅­Δ *«Aίήε$‰œ!-Ý·ε/%2dK"?„ˆC―Eλ­\†·%ηA$rγm›χέŽl„h zKU»ΕB¬Δ’―'Jf„ ”6π™„»#}@Εϋj±‚ΗL„eHK(_cEχπ;PsI„ΘαΣW~·α#Χ§¬fΠ„ΆB·TUZΡ4j‚π0QŽi£Π²ζ£™(2 ‚ β¦Δ€qܜֺΓ0KQLiμ”°KΧ’˜Vώ /=|—… ’΄œ©ζψΟ£LZƒΜN yD†΄L+$ΘykΠ[ξ¬ (γMm&½:J­GτΒGnυΥze:ΤΫbώ1Π kfDƒΨ+Z*ΏμΌ„7RΦ¨Δ Λέ’Q7ΈŠυ½„_ΑΫzRΒ‚²τωIΏτϊͺmθϊŠ$(©ιΙ “T33-Ž δγΩZ•{AΜƒ ’OΚ8ΜA|:αO ό‰LN V A,WεΛF[AH£h1AœπΰΝeοΗ`–ϊATΫΎΡb J#oι ˜9IξΫWfΗ'qό‚„Ύ—¦π §8°e^’ 'ς3Λ[ 8½ξ5‹03QAv]τ΅n-CAκ˜ˆ€ζLD•Q]Ωσ ΖD&Ϊ 0#ρ Ώ=!„_™‰Žr ¦3hGΊ1G„|šaRΥKEhλγχ!x2₯ϋH-Βk6ωΠqhm,’ Τκϋ²fG>ŠόL₯†ΠU_i1’ ŽΟΏγ$˜‚Ψ˜π ?όGl64‚XαƒrΙ‚šl‰‘Λ}§\‚Ω£—DTTeφιY½ (Ηέ:‚\ƒX?€Z«›‘šsz $΅΅(ί~kd―Λ€H™κF#βΔγ7w\ƒV‹©Δ₯#—0’Όχ΄†²@%D$@κςέTUΕg@!Ύ UQ='‰š,ϊNκ«K`Σ@Νe) μ-ε ΪKKaQw°›†!BΨfΌ QAW6s΄ˆF£β„Ηcf… ’ώ™©rߍ –ˆ,‚ θΤ΅dΔίΛΒΓ;ΓQu•¦? Έ°Gπl°,θΫ¬D}¬HaX ¦[Œ 0‰¬‚P]* )‘.&ŒŸ§―Ειbυ2κΏΕQ-ˆ4¨Τ|γ "›ι&ΐŒ˜˜ή-šίΣΚ}o A“xZˆV}§BλΞΛNj ό ˜#ψ>σχD62’ρCτiΕλ°6j@NγzGΟς9¨ˆ‘ "†“„"Γ¨{MΤΏ‚ ΌE«ŽΣ{½΄z―!ͺlδΡo<'Ν₯š&7 ΖΔCNάά§} f„ .k&¦­˜$ ˆamDi;ͺv€?8η=›zW~xξ3ζΔεηfa!³ƒ„; p(ύφ€G+πΩώ­IxTλΉχCΜΆ&+‘ ”].2PρBUΈ}ηΧ―οώοΧλ>pώέΗ/ΫTυVDAŸ 'A/(ρπΓέά}δβoηcυšμt7ρΝ‡ ψ½EŠΥ—9Q…W»yiͺAΫΨ1χyΡaσŠh€φ’%"ˆw&¬m±ŽΊΟ4ύ§ΥZT„ α? Aόπ‹;H—~.CC³M x4 2 qL; θ5m7ιΙ\ϋ&ΐΖhΏ©™¬°Ϊ†ϊaHψCςύΊ7|ρšξŒΟ_•‘υם{uχΆ―^—‰ΐ›0 ΪφͺΟ\Ρ=/ΞK‘‹ΞC ΄ BŒ)s"ƒήs49©AΘΤ€zA[ƒiiΊρ―dF}žZ,'\΅’ξέ4Χ.A<6αΞ ’‹.Φrj‘ΖsφX νλ»UOκ=#³οͺXξaϋN &ΥyB#ˆ I""¨ Bύ…ι7όγ‹?Σέ{αΩ₯΅€φ‰„}υ£}ζ&Ν4 IiLI„›nΉc1P`Nη\ν!MαoΏysΦ^ύΩ+³ ι§.ο^ςO—f<λο/μ~οo/θNδeyί+ώω[yΏ^‹žώΎ―w§όΥ—ς1"”^xS>·’eω₯uάf@‹>wTΏ¬9¨^΄‡Fσ&όeγ0† ™pv%N[‚Έ:ΤΚ’>L“ ^ςw‘m3ζ:©"ˆ3P₯΄LxM¬― έ>Ύˆ˜uΫF“ Bςj8‚^ Eυ–Ξ…ΩTaRB!f9Nd„ΑϋϊHBCη`ΗY|½™œnΫ07KΗ΅4Œ1@zˆV+AΘ$$Ν@€ -AB>vIψΏςξ―εε|ψ›yω‹orχ€ϋ•ξηήτ/έ£ΟψlχΠ—Ÿέτ'guι•ŸξϋŸΛΫ–/ζγE " ŽEšˆHd!␆‘ΟΓ‘=ΞΏ³5©D4|‡τyΧ…ƒœj²Pe·ίa―#αΝK@Ÿ]r„HA³ϊ„YόΉjŒOπΎƒ+‚v°¬ο―Χ}μ)fΓ6‚˜|dαΛΞ_Β?;―ωz&OΟΞI…5Ώv“έ9+s’ή7Š$t¬ή'ΑOO:ΓI‘Σΰn‚r­NΥX θ½ηݘΙMA€ότωBχkοωΧξw>x~ΦτϊQϋ³έΙ/ύTwΤs±;θ™κ|Ζ{»CΓέ §}"oiˆ(D$zŸ4 ο­_Ή6›D ZŠ,DFςULΠb~xΘCN¨4< c4ˆ§υΔ›–€ ޚπ7 ΏΆ€aiœπ†„o%ό•:I#˜'AάQνΏ=–oNxzεXyκsž #―Y³¦έυ=š¦€,ψΥέ*iΩ™HCš$!Ge61Θη χ˜&‘uΦ23倉`$διλΰ!˜ž% 9ό ™ Π,‘h5i"ΝφίτεuYKp‹D "€όδ«ΟΙΒ_D rXσΫθψλοΘxΠΣίέωœvkŸ±Ό<ϊygvΕ?gBΑμδώ ™›€Y<γηευΎΔΓF‹%ˆ‡xfΔΔτξL/Μ5ηF™Ψ³ƒυΆ‹νͺλqΝ”β-=ρ”¦A,N£Θ~•sŽ&κ"ˆμ|–°WΏ‡$œ³³’H–΄έ}™d‚ D$ΉΛ\zn~ό8€ gΕq›™—ŠΦ€Y)‰Vσ!“’–"ω ΄ ϋγψ™φG29μ+oνφ{κ›Ί}Ÿό†nοrF^‡0€UφύCwάύS&C… mB$!RviΤΠΪZͺ΅n)‚ΨΠtΡ‡1qί„uaRΗI}l·2©Σψ_ÜiΣLL³;p>gAΓE™ ΄Μž޹I»ΘϋΆ›7ω(ΤDF~‹πOd2QeWΝφΣ1t™“ M„gHγ—ΨZC6%%m…μݜϋ0ΠΦ•:dξ‘Π֌^ΒZ³zi ψ$ΰ!™”€-@ ž§ΌͺΫυgNλvϋΉΣ»½~ρΥyŸŽ(τ>‘‹4AώŠŸ}γΉ™ρgŸΙ€€h¦­©œχ– ˆA”1„Ή>1αΚπΉžή-M˜λ‘ η ‡#«ϊOΊYsν!ˆΧVNκ3bύΨΚI½9©§`rRD“pφ;6ω€5„yGλg %Q%ΒΘZ…΄r"Π8δΗψލ%WΔ R`Ά*Σ“HΓ‘°$­ζ/@¦%ir(Kƒψ©Χ>“Γ±/όx6ό{ίπ«oΟ€°ϋγ^š a'Ό"c—Ÿzq& ν“f‘γ΄πQ"-e’’v!Ÿ…Jy#[ƒΆΆ₯ ’.!3 3’(χ…„‡Λ%`Ϋ.Yv‚Hγƒ ·$ά›pSΒ3£»Ρ9ζͺεžvόιΑ€2NιZ˜λtMNŸ{&ShψDψ Θ}ι);Ό9*‘ˆ4 x>q½%Τ‘AxϋP’šVkδ’ε4H{ΠL^ΓZ>Νς%ΐρ+H+ˆv~Μ 3)@Ϊ&MbŸϊΊ¬AΘά$@‚Ά½Qˆ€+G5ΪέΦ@Κ[‚ NHαώ΅Q˜‚ψz, .μZ’\5AΘά”σ"9N€ICšBZWT1I…*ϋHœ‹θ¨¬i$/ΰ—:-RΈ1’›pZ{Φ4‘M[K3Bζ%ωDΚyA AH{π—–°ΓßέmΠίΟλφΪ.rdbάό$Ρ±Š B‘ κΔωΙμΠη*τ5‡»6‚˜Aœψr/ΓŒΔYŠ8UΛΡnsxνY ژy‚9«Δ–%ˆ­Ωά4-‚X_uD†!ej&αξ„›Ύ8ΥDΉF«sˆ ε”“λ!i{N΄ sSρQ(ΩNyrZGλJό ”Ω€ ΘΆΖiM±Ώ\^#œβ« ”'ίe3<ΔUΡE m•ƒZ&&ω δ;HΈί# {™‰Ν$S’H2ΣΔpΏ“~·ΫξΔίΞK‘‹Άι="r&DςG¨ΨίΦ†ΐ>>Δ-֟df„ ‰εN »ψΆFmŒΓ’β2A$dΆόα˜1d„š²G4α«%Kω–:F5j²Γ›ΦŒ«ΐ!2  ž„4j/)ΜU[ q oUδ‘όr<‹ π5HΈk»|Ϊmd!BΩρQΟΛ€€ˆ tœHFN""}žλt=ΨΖW»g© ‚r3γ0#q~7AŸκFmΜ{dΣufbΘ™Ψ"ˆΠ"²&‘ήiF*‘οu– 4iZΟδ σM΅JœΥ₯5΄”iGd‘{"€A¨¬†B]‰dR¨ͺ’‘€I"E")!NϋτZD!’i ΣlΒ`Ρ.0C‰ τ~™±ςͺ[ΥgQo KMή›c–“ 8ZΉdψσdΓo%\Ϊ’‘ΎˆaeΎλAδR&!BΦ ŠΠ&07‘„'ΞΡͺ4ΫΑ•±ρŽL&«₯eΌŸϋ‘‹JξƒͺΈBf&E2©ά†·Θ@$@Ά΄ΜNς¨“Π—]Ζ!’πˆ&2―E8:^ηΤ9TίIυ›T%VΕΦ쬞†ΐ~π ΙΡz“`™ βI‘5}[•Eύ— ?Ω’ED& 9₯C[Θ¦&ζτZ„ΜDΪ&ϋΆ–Ρ­LΎ‘Ιs" ηφjV )•­_3uε;HƒPξƒ4 -₯]ˆ@djR•Viςˆ4Λ)Θμ€d7•ή”PG}&r&Π*Π6”U­m€r‚LL:όϊ\]KΞͺފύΣ"r~ΖaFLL\ΤϋAl]£τ€˜ ‰μ˜NĐ³’£dFn<#“…ό^PΕέƒύ©Ι™ι©4 ŠΊω+]`‰ 4K—v δ8™™δ{Pͺ¬J‹©I"-Cε6ΘͺVφ³fϊ2?i)HΛ Ί«ˆCD‘6Θ’\σπβ~"]“ˆJ=#ΚoΎŠ#Θ–’ ŽK‘d“`VzR7‚hc^„0?D!ˆ€!HπSg‰~…"2 ηs6!IKHIZΓmΡ/‚R^— 3U`+Θτ$Π ]ζ#‘\EκΩ HXP†[•]ε<ώͺΧΎMdσδw~5‡ΕRBC…ΘBζ)­ Xχmh!rŒΛA.—Μ^²σ?eS‘’ΤnΊ¬Δ<Bκ“ D#ˆ­‚ r§Π 0ΡO:kπ,£^f'"Π n±^ΚdXSήc₯„ΜGš₯‹d^QH[!Θ9¬'ω&䧐9JΗK› £œ !NX¬Ά‹0D2I φ2€]H3΄M―ν™Θ .r©h›ή‹‘ntΉά‰ΊΚ5‚XAΠ/|–› ’ κ―4‚hcI‡¨Α”ΝC9dsSψ&\ƒΰx"›θ%‘υ¬AX>„Θ!;―Γ 2λCŽ_ΩφE m•©IΒ]‚_ΒX₯.„Ž%aN$’²ά΄₯w΅ή£Ω>ΡO:Ÿ r€…θύ:Ά‹ppΛ"R‘y ν’ΠΉDDΊfΝp 9λϋΞΊFΔ•λ7L„ρAœΫ’-2y-ŽiBUΓq}Šr2‡ΆLLψ#€=Θά"™s"4‹ σT&E8­ B‚ΌϊHλ΅ΣdRR―hA€ˆΓZKi4ϊ‘ mŠ~‚ή£σ‰|τ9zŽ iί†φiuŸθ^'’@ΛaHΓP±@™Dϊ|_¦ΒόŸEtšΠb8Ž=ώΔςΏŽΓŒğ&Ό(αΐhωΌ§ΧΐkΡΖτ΅ •αPΉΠŠ’48¬Ι“ NQšΒJ―Ι$-α,R@ˆ‹DΪ&-€ ·2ν\z˝yζNίhΜ=ƒίSΏ™„7>½ο’oί‘g¦2Ρέ‘”"\ #΄‡Ξ-M…ΎΦ" ϊPΠήTd!νCΛ­frZΝf§i„ώΗI0#qmΦ5‚hci΅‰uηmκN'-αϋ›ϊδ~Qν΅˜š"ς  u%i.΄‡•6db†―Ω8δ€™ˆ2ίΜ&5€5Ι6-AΙ·ο,YηΓ¦;Ίψ ˜ίD*Ÿqktμ‰θ܊TYˆ€€UΘ E/lό@Δ!CΧχˆŒr©χ …77‚ΔΪDϊο&AsR7‚ΨκI‚’… "ΛΊ˜‘BΛ(hŠΖ©·Δ 6i@‚œΤΖψ€Θ· `Φpg½:½2&$Ivc΄zΥωΠ8t^i,ϊ<…4 ‘”|ςYΘOι MB&'i"i="«dΜ―"MbZ!‚Ÿ˜(χ³±|rA΄±eHBN‘ H δ^κHGΝ&‘@Zf_Dτ£¦lΗJ$μώ@3o‘…„«–Ϊ&Ih]³|A}ZCΏ‘΄œύY³HΏ/Α4q’™CΧK(ϊbΛμ$b™I„!ηΆΜM"4}/ώ*35M‹ DΎ“`™ βέ–θIέbευ§ώΑ9οΙX-"„}&‡λ. U%‡"Ο€o_Ÿ ƒκJ"ΰ!A³E3G­cκQ܍”¦9²o‡H2i Ώε¦EL" ]‹œα(Dδcˆ(δ°–ΏB~‹λ#x δ·¬2§υ4φ1‰ τŸN‚fbjΡBύ"Τ₯.Π«aωDž„b…ΝN $hεoAhΖ-Rΐ1)σŽ^K@ˆ D hK9τϋŠ$0_y•΄ ™£δθQH›IL“]δ k1Π—‚›.21\{AΩ7@i{Ι™Ϋ9Δή₯n) B‚αω=­νόZ‡ ȁQ@ " 9­Ή^j*iŸΞ/…ό“„Ž™σίή}w©»Ϊ·Ξ:A~μρΉΌΚ$h±8‚ψPΒoΫλύmύωΪίbuDΖ_άΌ=­g¨πίεηn~}ΝΧ7„œͺw‡U%:6nꄦmD-eTZ€ˆ@ΡHTTE«Π-AKΘ@K }ϊΈsšΜf½Gϋ€AHγ ΡNΔA-%‘ a³:FΔ"r:~ŽF[ž d2œ N;F3νέlΫϋ"FW>ˆ9a4‚XFΣ"ΆA¬=>χϊ˜‹ωΌ4ž–piΒ$<΄ΪwZΒΥ W$Α/Τζ'?B€ $°Ρ(Όf'}$DC!E6A"h z―Ž!χaΚ⡎ΏΕz|·±tqH"•'™ΣψΌ‚φpš½>;α‘#ή΅ˆ…(φq²hΡFΛYn£?ϊΧ33IΘηAφJύŸ¬Iάvsι‘ ϋ:΅œϊ`ι€υ΄}‘CZWΒ—Y»›Ž˜ι³ίΝHδ? τρ9ΧΦ™oBΠk}ΆΎŸG2αΨ&$φϊ¨ ™ΠO’hYw}s―¨πΪΖΔ1.½>Ζ!ςΊΎa8u ρfωμυ;ž:βύ=|Ί7Eϊ€ΜROkΡΖP‚ψΑηޟ±₯ΖΏόαξG_ύh&­σ:k Χ^°I›Π2ΚƒeώJψΡί@1“CΤuΚKυΑŽDμ›οP–ΠΕ‘Œ™Ι#’œ π3ΰœΖ₯m˜‹ ‡λΓ‰Μv?ί5џ›šI8έYβq¬Β *ŠV€8ρi>δ%ΐۘ>AœBM˜&ΑΈΟKγ3 ηοΑ“FΔ[zβ)c>ηθ„?PR²LWέJsm±LDδ°%β‡_όΫB }‘Χψ#hqYCšwa“ Ν³εθb—gΜκ1A‹Σ Œω„„τ5α# sPmށ 4 zFC.ΜόΡΠψ r n%ς$θ! |0’­θϊ Oζ)ˆσ•HδΆθ1έΖΔ_ㆉ0#&¦G$μb―wIψO ΪJ?<χƒS7%Α @ΰήoόSwοωgεe&ˆΠ"²Ά]ζΘΈ&ΉΞ»Ρ!Θ%ιœ&™;ͺiζδ@½§IΩΝ.dyΐ‡π=Έ AΠΖ pΝΰF# ϊυQΔoƒ…ξκ[λ-§Β ŒkΓ7‘cπΩ΄1]‚8θθ—Rιγ°Dqlε€^7ΖI}B]νυ}πG4‚h£Ÿ ’ΰ^(AdŸ0A€σCh 9ͺIK,―½`“Ά ς„½^συŒlrJϋ(2'’)xς³h ] Χlzσ07έύœ™Υ‹ τG±;­=κb¨̘h7zch΅SG5ΩΣ,΄ΧΞZκΊ /€f"θ<Ϊ¦σ`rƒ0ۘA¬IρΆ―^7ΕτΛα7ψaΒzi Άοτˆ^’?α”1ηΉ°RΌDSγΝ‘h)˜“€9δΔ8E=‰Φ·™ ’GA„ΐ–ζ5 m• $)ΜKXˆ0$EΩόζ¦I‡Ξ‹pΦ9nέpΧ-Β›‘UΈ³YА &’ Έ~Aι $κΥZ‡((ׁOBζ·\A·ΔΤ B½½'ΑŒ$Κ}$αΉ Ϋώ0αA΄1‘Μ‡VA‡Ά&a²D?Y…Ψ-E¬#i f— <ϊΈξM_^7f„ eΎšΘίh[#ˆ6–Œ FžΣό˜› A€χfΣR˜—T^£dZΙQM₯OA tK ‘N.ε7°Σg™ΆeΑˆγz AτEύΰοΐ„…sΩιXΗΌγΒίίGΔ’s}f+Κoΰd΅²Κ!H‚ΟΒ;•B#ˆ6–” p†£= „ΆJ“€‘‘’}˜™"»:k!°J΅+•Ή):ŸeςPΦuΪξα"4‹ΥT…Ώζύƒ¦˜š(D2˜tόΪξύyχύΏyeΖΖχΏ"/W2Apδ±έ«>sΕD˜‚Έ ³Θ₯pTΆDσ' ObK˜9ˆςμΣ>Υ[Γ10;΅β~Ε_‘c”/A·ΉD EH‰$θpU,χh±Ό¦&„y Sd·\K)dA”>ηΌ§$Ξ‡u8Ωy3Z½Ζw‘χ…ο@ΒNΡ·‘E ψŠfAώ„fΉ„ΜβτŽO™( ͘Νi]H šyc"'΅„1šΡN5APΆΓ#Ÿ<7BΗRŒχ’°W“ƒ~+ό"`n3³δΰmZkΑ_rBΒS’·ΨO‚]hU}¬’=ˆΰΣ)B˜šτί;IH“pΓJυA<ΰ°΅έσρβ‰°ΜρΑ³wGgpρΜ—Ϊˆ ί»ΪvFΒKbύ% ―i1[&'ΘΑ Β ου…ΛΚδPˆΒ£š€E„‰( οDyšŽΛη–©HfL#¬‡Σ”MϊFβš5‰*|Σ}ψ;搄;°C£(δpχ`…Uͺ¬β»`ΛsΛq#τ=Ά‘ε8"Œˆ2‚ˆZς\ο“Aa?/φΧΫ#ΒI & 'ΜR‘?‘56’Μ2ρ*Τ5ύ™dFb2wΣ$drΊγ§w·Ώύ΄Ε$‚xξG.šΛνƒHcΏ(μwP•H*<΅¬ο―׍ fΧ‰]’έz’$Ι₯₯Ϊ€Β9δ‹ΠkƒBd’νYH',aO¨₯„WΡ,&W|ΕG€I(HCΒ.Ώ'’ΊrΔΞט!# 1A h‘xη„›‘΄ξNκ>Τζ'Ο„&¬Υ«Ήz%Χ9ašŽχεvBqg6­Zϋ΄„9Ϊ„„kdΈ‡Λ'”B#‘#—dm"ύi]ΪƒΜI"‡•NϋΊΆϋƒs"ΜA¨“άϋW’‰ιZΩΖΞ£ΛRwTΗά>δ½§‘i͚5Mb/I!mUZ‹f!’0‚(ΡL–1I!„G&…D$ΥeΗu"fΙΉiυμw5r)’ ’©μFŽ| ‘V*ΕF΄”;cΡNJ4η„(BhUδA·Zζ5Ξkΐ6' ΄B†ςΫ“Μg>οέ]4‡ˆpς–¬%1œ>-‚°h&Βsd™’—‹‰!MB„QΒ΄‡{λ―x‚xΦί_8f$ŠιS χ[iq@·ΉΤ7#k"‚hΔl:ηA˜Σ H>ΒY/?·h" ­SjC‚'ΏNΗb.b]B‚χ#ΒΖΝF‘1]zZ'Ÿ΅‰ˆ¨*Iy"λCQ2΄‰ˆ²™Ά“D™₯ί½ΩWQ xAA^³Οs6†aΞokΎ—¨­|ˆϋLȁυ>‚°|’UXβ\‰Z2mΜκœϋ ¨₯D9Zιο^SKd Χhz-,χ˜†ΐήϋΠcΊίϋΫ &ŒΔΫΎžπ§ /+&Š)—'Ό¨™˜V0I˜#:Γ^ηY€ΜLA"μΟf iIˆ e@(9,2 –lŠδ- / Φ,δ!OήB# ΈŸœΩ‘γP"’’€ΟΗ“„Gd±ύήφ”zO}ΞlHΗLOϊ•sΩύA ˌ«ΕΉw­ ?ΛΪΔΔMLh•‘Ώ±ό9 ‚δ39(€5Α‡΄™”0-mιh₯₯$ˆ½9¦{ΖΞ›3B/λΓΜD;Ρ/ΦΏœπ ―­œΤg4‚X{sο>œΤA.a·.¦ bζ#Ž^ΗζDΈpTηπΝ$μ˜Ι"̝,Š&!_‚fΧ6ƒΞΓi;3ΰRζƒ’ζۍu’`ƎŽlkΧ(ϊΤIͺ5 H#·O md‘‘―‹„?ŠZψjρΛA?MΥδώς ’φΥ@δωgε&W‘W“$qλ›^”±ZbΟDOίΧ'B«ζΊ0‚84ΜJΒ₯κŽΫχJ8'Β\΅ά³Δ* Dψ0? #ˆzˆ(rΔS”ϋ¦χCެ©’™Κμίςrˆ¬9™³™)½—Yp©%Psah‹#Ϋ3°k@ /¨ηύ&J$”υ€˜WT™•ρKš„―z˜k­EΈYΙΛ‚βZ„4.E›₯’O‰o³€%,9A|tχkοωΧ‰0#ΔηT{©FΧεژ­’D#ˆΥD{$‚ψ•wm"ΜAœlxTΒλ'±Ξ4‚hc‹™X—οa”9ͺoΘ‰Cj“°Rώ€z'HΈβ(™©*―ω_„!y…$( H'ΛΪΤδ$@&#Ή'ΉΞΓbϋΒPJsΘBΏu§Σ…,,[Ό7η!ΒY)jXB[ϋΘAΎ‡―~tS9 Ji$Τ‰oίyΝs2Ύϋϊηuλ_ϋάξζW>«»ιε§f¬t‚Ψύ £»'Ώσ«aVMLi|‘D«fΘΦ-§¨rDΚ%π.i₯Ϊhh%α¨5žΤ F#}Τ’ήCOiΡ’j AHPIεŠ£%€œˆΘpvν’Ts5<‘UΧ$0%eΑ a„/’Φ ζ8~±ω;a8iτˆ‡ŸΊFQ‡©š)«$·α_pŸJ|—D@#ƒΪ ŸΧ+ +G2½IDα>Εϋf₯τRΔIrΦDXμη₯ρŒ„―$μX9¨›‰©Dc΅ˆ ‡lfJB;ϞoΈhsθ)a―"’™θaΉs"fΘ\Š0•Π mb ͺ‰œ Χ ͺ蠁•–1¬ζΣ€ΖΡZ‘CΡπC˜ΣέIΒ_Οί;Ύ+½Ε1)yˆ+-GqNΧ}!V#AάΏΓ»η''Β"ΤjΓ|YΒ>Υφc+'υΊζ€nΡF#ˆF3B'œφ‰‰°H‚Pλ*‹x[evΊ&Β\OιVS˜kFŸΤΛ£'υΖφ—'άl?ΘA΄1 AΘ!‘•Γ:ζ™$d™)'£ΙE„Eϋ”ͺ ι”`AτΤb—·WBbηψ"ϊΒHύ΅—―ΜQ› zήJƒ;ό5ψχ¨ΰ~˜β!­δAδΆ±ι·'AΞCZW;Aǟ˜­άχΒb2ωΨ%αʈνAΌ¨imL2ΘΪ₯y„WΣdΕτςl;’Σ"”yi’&0ΒXΩφxΆ—ΠΟx]ΐ5  ‘X•Ψ:zh δGI„πΠ0 žπι‘»PόΎΛχ xΘξΐο@—2₯ΣoΛoΙX rΡWά›Νzoκiμφ=Ό;φ…Ÿ ¦CͺςΈFmΜ— |ζ*α%Α6ΠκίPΰXe'ονDαMM’['ίY‚έœ¨’’θ+…1ΠύΞg¨Σ@₯1”ο…–P9εΩ!΄~λLh ϊέ‰Zš…‚|KN8Ό[ϋόM„F‹'‡ƒnHΨ5βΊH-WΒCήs*qΕkΦ¬i’r+uԌlα%τ“…ΝΘ½œ5‰p™"„Υ‰’&ˆš<™ΜΝNu §O•m•b‹© ί…ϋ5*Ζ€―‘φ7ΈvPi …ΝΗP§‚τ›@‚ΦρE]-β €uυΔaέΡΟ;s"4‚X9μœp^Β“γυΎςΖ'ά'αΟDMƒhcؐν[ΒJΠΘωI˜e3S‚w†«›`ΓΟΝpD‘ό•ΟαB9…$°Ο“αf§ΚT3 uXFvά^γΙMQUIrΏ~7a PM#ώ|9°Οr(£!rd-BΎˆ(Ϊ‡“šdΈYΘwXJ‚Ψ>ΔQΟύΗ‰Πbαδ°]ΔοΎ`„fqI#ˆ6† i 5ƒωC‚±˜–κšCVR"“„"ƒ1ΉΦPe^b&Ž­=ϚeZq-Γ2‹ I„ΰŸ“ymIwε΅;½Ν™\πmτd@{ƒk sȁλρλ6βc_Ι’ŽςϊmEδ£ώ’—Χπ„8Ηͺ#ˆ}λŽ|ΞG'B#ˆ…‘Γ6 οMxCνΌΆυη'|¨DΓ†;E1mH€ε–€h‘EԍΎD5ε%3qδQΝ’­3DA’X!Ž ‰B ”£ β©%™9©τj h΅’j^4'α­rBχihDž =P>#’ΒΌ„7μΛΞjDdQ‹ D΄]νqψ³?2A,Œ Π…―‘„΄&Ό/αβΨώ1'ŒFm # ·}KIˆI`– ˆjς c/—+ΏpH•[-€5Ο¦M`QmVΡ”Ψ₯>Q8tΘ$Ž)pβ0Ν’φc %…*ϋyσR1YΩnˆΞΏέβτ;gr ΝΒ£Η8Fζ%ϊQίψί-εVAάoοC»CΓ‘DK”kc††ˆB ?„7τ)=–‰Ύˆh’ς«^Σ § ŠWί~BBH"Μ6 \H8}έ2'yΝαζ€:wa˜sΪύ A…άΘ)Α|Εχ ˆμœ"‘ζD-B9\’ίκn8ύw2V#Aό{?A4‚hc΅ Yχ1Έ6™)k‘'1+‘J―a^ΒμβΥJϋÜ,hšSHαKεS;Η‘‰ €|΅°χpΥͺόx•4'§‘&+ΥνYιδ=`ΎSψj&Β0)QΒ›¦@Z—iIΪΓΊ>½»ϊΉ-c5:©E=σC‘D#ˆ6fΠy-‚ΘZΥO•al_χA”Κ¨ΚVΦ΅4‰D(”— ”³6ΉxμΏΆ±Žo€3M‘A0λ†`Ž5£…9Έ‡†€ΦŽηΚ―αNjw@S‰΅˜Λ‚Π δG ί›ο yΰΈζyΏΦ]ώΜ'eΜ☠AμuH·ζ·?0A4‚hc†:ϋ! d³’ZyͺW„eS3 i"υ‘ΘΥ`‘$†Πt‚€Ό€΅–μCzB™¨?Žc‹Fa¦§BaxΤΡ€©ΘίcRŽ ²sMˆuΜGΊ>iς+ΰxφ>ΊNΘA¦₯k_ό›έeΏυKέΕΏώČΥJΫνupwΰ3ή;A4‚hcΖBπ’PΜ™Υ2%E³ά»š~ΣΡ η5&§\&\νK“F‘χ»ΉA1χτΒ€,\[πά Μ7δΈφ1PΦ‚¨)λνέάgP;ΞέŽΖΰ¦0';΄^KθΛd„γY$αί’Σq|ΦS21\ψ΄_ΘX΅±ηAέύ‘D#ˆ6fΜ€}“ŽL+9ŒUU\ΝάT—ΒΘΩΧV!5E"(3ΰ„$0‰Ττš™6됄ΓgκΨξΘ…}ζŸΪ‘mΡOځy_r‡³k+ρ‘5HθΎŸ›˜΄ΤwΕο ³,“ΓΤb5έΏϊφ‰Π’D34$°$$q΄–δ95’W΄΄οΡJΕδ$³”Β_ε—HK a)Y5Πk' ^λz܌δ¦)ΘΒA#¬=°}4όuMͺ’Μf}’ΰά΄Ε58A 9@—_7ηΑχ ‡τEΏzJΖΧOωΩUOϋΚ['B#ˆFmΜ IPvC‚QŒŽsΤ2ΒΜT½ŸxλζqΤqŠY™™$L=RᩐN‘ LOΜΎλŒcfπΌΧΝM>Ϋˆx ‚€ܜ…ζQϋLά|1AN†–=θz8―›Χx―ŽQH«ΜKς?ˆV;Aάw»ύžϊ¦‰Π’D AlM±ϋέΎO~ΓDhΡ’υEdΗl¦˜y2IΤ ežG9ΉΗtε-$h1-A $ΡG ΜM80ρhΙyΨ‡9§65y›η6‘RξcpӐ“™ϋL0/ρύ΄Sf*ŽΣwU"œΜKίϊ½_ފβAέ>υu‘D#ˆ6fpHx‘U-‡0Μ%#Μ[%-ςΊe$“'‘£ B«ΠyώΪΦχχ_Ηyψ(3yfοD !Ψ]“@›`V_šƒϋ œŒj €ΐ΅KθK+ΠkςκliŽSήΓUπ΄L—ώΖ/n±νnμφϊΕWO„F ژΡ!GT„š„ž»^gαεgb¨λ#³Sδ δPΧΏ{MώΙΜ»Os Τ„€Άα£νZΗ£ύ„ΘA‘­ίψşοΎψ¨GgrXν±Η^1A4‚hc†‡„ΒMΒPPίƒ*ρύ.pΙ#(΅‘’6‘Χ„2Σχ™·Ξν‚*ΘNΟ~ ήA0“―…5$α‚‚rg6KΟκ†Όjς©Ι@λr2 υ5ΊζΓη롎ΡχRξΓO~|6+‰ Ύϊσ?έ}φΔ‡g¬Z‚ΨenχΗ½t"4‚hΡΖ  DL1r‚¦ d«GΘ»Ζ©]ͺ΄Z)o³p΄ΦEš[™^΄dDj²ΠΊ„΅φq½φΚ¨…ΤΞo7)aF‚€ }Ÿτ9τΊ^‘γό=N2ΌOΎ„ˆAyiυΔ~έn?wϊDhΡ’lΜ€Ύ9ΒιγoΞ3m"p|†―γ5;'Ω Ϋ>₯°%€!„<3j„­'‘eYΤΪBWΗRπNΠ1ψμ’ΐβQH˜ͺ ΄ΞΕωτ2ιš!%,€Ώ‚pίƒ4ˆsρ“™>½φ‘«• ξ“bן9m"4‚hΡƌ‚R‚VB›½©„œμηͺ!„€Τ1„wR/)7"ŠšH˜«\`J ΊΦΐŒ!ΟvΧ.ΨΟ̝9ΒΪם`œTπUΈΣ3Θ€Φ G}†2 υθ5ϋX²ŽΉŠsk»ή+ σ9~Xχ©#’ρΙƒŽ_±σΎέΏyαDhΡ’@̘%μeZL[ΧνηZ—ΐΦ{$ψρI@”ΉΐLy ΝάIsΑ/α œ ΨOεS'*‘jsψ1΅)ΘΝTn†ͺ}"… }ΒSωLΣχΧ&1ΜU.ji"ˆΟόˆBΒί?`mΖͺ"ˆΠνψ¨ηM„FKΣ’τHΈ:α% ΪXθcΖ,ΑIΈ¦ΦE_zμcJԍˆBΑ¨Ω2Ηγ(&QŒϊDΌΦρΆ–:―Γ°kΪ¦™»ήC#ή_“‚ΏW―kmΒgϊξ ° ₯Ϊ„εδŸηΰzuN| ϊό_{ΒΟdrP“LL|τIέY‡žΨΉί±«’ ΆIqGό‰Πbϊδ°mΒ5 ‡&ά/α› kA΄±‚@˜b~ΡΊf½˜Dd?G J`JπiΖ,SΡA^fΒΓF5[GP N.ό1γhιΗθ:i1ZrŽšT\hΧ&«Z;A°;9τΑaκ¬Ί …ή"U9¦ oΥο)‚φ ‚ψΠ>Ηd¬*‚ΨqŸn‡‡?{"4‚˜>A<2αl{}šΠ’ω7Ε0–0—`“–P“@Σ¬WB›Ω·’Ž™x ΟΚf]_3h xi$˜ƒ“¦ΘB!ϊ‡H ]-yί°Ύ;Ώ)uα~‹Zψsš(ΨζDπ/ϊ χ=`VwΑΚΌ$η4&¦οάκΤ vά»Ϋώ‘Ώ?ALŸ žšπ{ύ oŽ9U?Ό°f͚& ΫJ8~%μπGHπJ K8Kπy8)‘>Dνx'5ˆ‚h#jiŸΞ₯ΟCΨκόh²”Α–01i?λš}k ΑHπ’IΤ‚½O›pSΠ(ν F)Έ©KΧ¬λΠχΐ‰c³Ϊƒ›—Π V%Aά―n»{"4‚˜>A<­‡ ή4βψο%\a¬`\ΧΏ}‡φ–όϊΏ7υ©y|ή§A,£‰)ŽYω,½ΒΏCϋΪwhΧΏ:1k7Θ}Φ%bNκcΫƒέΏ}‡v5΄0Wέ$OLΈ2’™Nov»ώφΪ}ΤΠb‘7Υ©ν;΄λoί‘έG  A444444‚hhhhhh1E{εΔ5›fμΊλ}qΒ…8εΨ3αΣ WΕr»ζw%|7αΫ6τš#<ωκψž0£Χς„›γž8«ΧΧt`Βη.OΈ4αW0βϊWΤΠbeΩyΥlšA‚Ψ»Ϊv$§eΒkfμš›pR%`{―YCόΫGΈ²ώ§mgπϊ%˜^ΤsμΜ]\Χώϊ±ΎKDϊ­])Èλ_QC#ˆ•!dηP7γ‘ώφ ]1ƒΧ}p%`{―Ήώ/τ?ιšΑλ&˜fςϊ{σΜ„Η­΄‘ηϊWτΠb6…μؚM3|νΧ&œŸpa}iάQsϋ ˆήk֐πtΫώNύ_3J"λ‹Β΅Η,_ΟwΉ!aΧ•φ?τ\ŠύA̐WΝ¦»φbω€P‘»Κβ-=φSfπϊχ Sε}ώLΒi–―ίgη˜\AY› ¬­ Θoœ˜hΘ–ΏύνΏ.nnέξΪΊmkνΈXλ»4SμOeΨ›N]­½Y˜cvΎ;Y<ηΡνk‡[ξ/ ˜ΫΚ ±ͺόϊγ!Ž„ψ™λB¬-χ― ρ5AY› ¬-']Ξ”|zόvδ?ί34ΎΐŒάL>xsΫύμ‘‘9ΟϋΖή‘β`EΘaϋž'CόtˆK!,χ?Θ}AY› ¬-ސ)€%™lͺγν³Χ됍αϋΪρρδsμ\μγη!‘,ΌΡžPΫΗBœρί,#Ϋ{Ρ1wεξΩΪΌ‘FPξ ,Ήχτ΅:HSΰ=²αWŽŽχζ?ν*Ν±Oξͺν½8Ν”c@ϋΨsajN–~ ŒmŸ)@ˆ½!~’U(‡ν‘Η‰5kΦΚ²΅ωχβ#ώ»ΆR62ΣΗKΩΰΖΕ«ΗΖ ι°ΖπϊfδŽα\U™ς‘™z†|Ν'ν_}Ά? ρ{’/eeΚښmHΖ| DY¬Ϋy~²1ϋΎΎ{°Θ¨ΈP–=?ΉοrmΧ»SΕsκΊσΙ‰’’ΓϊxμΏ†γΈ5ΙβΫ΄Ϋr‘ο2δςλΏβ@ˆ/…x4Zθ['( Κ‚²6/] Lά©ΓΨd Λ€χ]š.$Šϊ―$3a«¦°ͺŒΝΪφ5‘A―ίΉ!σώε?λ«K¦gwU¦ΆΏβTˆ3!Ξ…ψƒrύ!v—%qά”eAyeΒ7^H#(M£2"]ΐά@kY—$ΨTίpmΟΕΙ";ζ˜WŽŒΝknqύΐ›Μϋ­kEyΟ»JS©yDP”΅E@N-€½z§€.0₯‚’7ΐ‘¬`_ ωΖ‰ρ9υΕyp€ζΗJγ₯b;χγQI]jηPΝρζΙ«u)Δj›•΅Ν]Q}!( Κ‚²ΆΤ–[HΫ~n²!CFn0Hξ YtΌ€Η}2kˊMπΐυΟ‰!?ξ³ς{p–”"}/εΞΩr i»ίJΒI‘ψFm₯ι­™Μš¬:WiQΟ„C&N6|θςL±¨η«5ͺΚηξƒYPˆτ½”;GG¦ςΑWLψ·¬·žΉVΫvv²ΆιδΥΪ{ ΙΒΰά›Ρ—}M2™σλe«ΆU[ΤcΝΩΛΚ”eAYP^Q:²—&ψΰΖY«ͺΥqύ²Ο­™ύ}!Γ6p³Ψ;p΅~έ:•)σ8žwι‡!( D‹Ώ5λΦ#V=π)AY[rΛ•™ΡΪ|όΚ­’vΨ7‚Xm2YuκyTZTω[l °% F§~1©Y4έ™Ϋ—έΒ ·θڔޝ™Έ³ΰΏk‚² Όdο“ψΔκ”΅%³dk›n”&fήX¬C3F#ήζ@šσΎΘΙ€άΧ,o€·έΥo(m …dαsψςLmδξl=εεΜΫ<;”εeί|ΫtJ&`α IY]IΑ2ΩΨϋ’žA—‹‚~яk½UΫtj’€λKύ£΅ GΗ*Ν‰RmάuJδψ°Έ [OAy9aΤζ ”εeίβΆι~ψ₯HB°ΗόσΘ`Ι¬Ι¦·œΉVΫtr’Άυμυ9MtλΩ9χ\œ.τδͺL9ΧLψ >(Έη‚Ÿ ,( Κ‚rϋλΙΐ’ –,ί €η%†8λεqΛfgKί&’Κ~Η·ΉzηT²Χ”cc"‹ΧBΖΞs‘4x=|°Κ‚² ,(w…žΌΣ5}δœέR™4`ύ°qdjΞB^œΩy;Ηc;’RΗ!I pŸŒ;w>«‘ζΉ˜κ Κ‚² ,(w…ž „2 =§&κ€5`n.Α—‚#πδq3Ο-ό‘Ωzπ§ŽΩ|zΆξOw ηεa`―™.ΒηνΦφ!Σ‚ω½ ,( Κ‚r{nΜΕ‹‘Gύ―_d€ΉΖ“l‘0gzΟqUΖψ)έψ⭁Vmsώ(΅Ϋvξzq.> φ]œ*ͺ3 ΐ-šί Κ‚² ,(·ηΖΏύ1 °^ŠH5ŽΨ1ζLΆŒΆμΫ€½¦lΎdΈ―GΖE;ΞMfan άΰžΣWη<— ›a«σ0Ώ”eAYPnΟ-εs@foΉθG|ωιώϊ T―;§:χ@π²@†°Cž·χβd1 Š}oGU8Ύ%띏Ξ.ϊ™Ugίΰtτˆ‘ͺσ0Ώ”eAYPn―­ΚηbΦ‚snλτKύcs JVL֚[Ψ’<ξ;«F@UΥIsάή‹SΠχd:ι­!„Μ³o8[λάS”·]- ρbžm‘±·¬Ά¦”yN΄”eAYP^ψ­•…­\EΒλNΏMe§€{M4\ΨΞNάnXόΓX(žLšOΆn?(‡ν‡Cμ q!ΔωΏSξ_bgˆ‘ςφ>AYP”;+SΙ8}πΑw’U Άΐζ»φLšΰyd»ώΌμkΆπgWHŒs-ΣΫπ³y%{Ε ’sHΤ?§ΞcΟmA[nK(?βsεΧίb0ΔgC¬ ±ΆάΏ6ΔΧeAYPξ MΩ²D eΩ#ΞΟvΑΝj°Θμ7P6+U³ϋϋ.Ng;βηΖΖυΎΊΓ2\Ό0Lφ―#ΞΠύλ°ΧάΣ‚1~mΉύε‹°υ„ψΕ—Άχ%AYP”ΫΘdƒxW^š+RU}ƒSυ¬hωσΝ±­ΚΣΒω0όΩuτ!{€|–S]~K]3 ͺ^ φYw -[Ά,?—Ήwd¦ωΣ!ΖC|_ˆχ’ΗξdžσHˆγĚ5keAyΡ άNΏkνš!s»χbΪ,ώΨθ­:HmD“Υ ·} “y€»―ΏΤ[,φ]©=u`ΈA.xϋμ΅Ϊ‰±›υE9€ t‹ϊγπ5σχ6—ΖA¦§κ γ,—zdCε―χLx hΚΉ>/Ϋtœ¦μ~αW…8βWΚϋ-AΉ›2εUχ²ιΐQFN ΚΚ”ΫYKΞeΌ=§6­²πvαΦΦ?7ϋέ™νttτF]I•²W€“Τ"cΥ’"σψ^μ-Οϋ:083k’_JquΧηCiϋΉλαΏ†;EΙέώ-7?b{ˆ―Έ}+NΎhφx§dΣ‚ςΚΙ’ρy°Ž8_Ώ›Κ½©P frλ™k΅C»/LΗ§€ŸZ€C£ήvφz½γ.ι·μ²Z/o=£}o‰2b/­˜£sύφ—^\‡Š @}vβN!ίlr<›)‡ν£!ž ρ'ΡώG£…Ύu‚r{@ω^Δ Κέ)[ψ9z±¦lk1Ι*{O_m8žqN˜Ζ§΄Ϊ\ω›ωdδ*1ΎΙ'Vs{^ΨΤ‘Cε¬@bCQΙ~{"ψμαΓ€E’Ά‡ςΟ†ΰ‹3!N—ρpˆϋCμ.KβΈ]-(·”ου} Κέ)[؍·Θ$Œš°9>–Κ2ΝΤΉΠkνy&WpNͺ/ ˜^Ζΰψ''²•ΟmΘ°78£"¦Ί1ΌώgΜy>ηΘ‹™wξœ!RΗW_ΤVpσˆ ,(wΚ–k^˜ ωΩzU#— Nm±bRΕξΜήv³(c;>^{ͺoΈΠγlš=€mœΙr,<©„μ·7ƒ:ϋ]ž©·pΗΩnΥ³οηZwdΚ‚² ,(wO¦lπBw΅Ηs3γ“Rel^V 3”6//„π•~©™ΩϋiΨΕ¨kE6ύlȚ[±ωτΊp³»BS”eAΉ;4e ŸI“¦λ–gšŽ]¦hΌU>«΅Α¨ψ ΰ8>(ΗL/WFΌ(P>"ωBP”ΫΚUnn©-eI™kΦ°Ε»0@Η•dΠ©Œ˜ϊζž΄OΣΕϋ ›ζ덅<ψy]”ΨΩ‡`ΖpίΎwαwάΦPώΟε0Σ/„ψœ… ,( Κν―SA‘ƒ4Ε-Μ©’1ƒ5Dl…iZ­HP@Λ-εhΎUΫΤxˆ{MλΫ\Nΐζ8<’γΧωR€9Χ™; υzm`όφbwβ΅”χ&b ά^0»ΧΚ AΉ³‘\5Π4eΉiz±·πσ‡GΣ]vΧ 8ζJέlς‡‡ΆήtίTΝ1YυσΜ@ψ•²ΛγΌώΛ’^Ίσοϊ½逃Άφ¬ΎΫ3!fBœsϋV— ‹Cεν}‚²!(Ο_NMηπ–›qε™':oœ•"1Pzφφ™kυ¬ψÌu¬x^¬ϋZ΅…kηn₯F˜²8Λ‚Ή”sΩ»Ι*ΌζΛΣίκn(‡ν_•·_IΕAωηJ9ΔCy]ˆ΅εΧkC|MPΦ5εjιβτψ¦γ“[ˆ‹]ίΜdΘΧ¬ϋΝ,EΛ%«%kfΑiΒ€Μ±dΩdΌθ½»ήͺνΉ8;±Γ΄γTύrΡΔφσϊžΜ”ΞΩkδΌ”πΉYhΗέ εί.o0 ˜-:‚ς₯–_?Θ}AYΧ”σ@¦Ρ#΅h–šψŒωNΞΩJΰμf‘ιυ₯h~‘Ν&zXfΝ’Ϋξf“"R™yU£Ι†²¦Ψ4ζ5ΣV·lο“sρaΡζZr{Λ(Ώ=~GPΦ5ετζλ Ld· XΗ Ή! ΐhšξ[ΜΙ;8'Ζ j‘0Y1ΰΤ~Ρ-Ξt«&LWω}œŸλšdα_§7'ZϊΆXθϋLˆΗCl ±ΩbΉ‘ΆGB'Φ¬Y#( Κ‹φ‡²”ΏkσέΌα‘Ζ~ώ•§1’0u·|πΚ½ΞM­λ©oΥAguΎ±lš,βKΩlfž…JΉ·ωκ d3«ΟαϋχΔθ¨ΈR#eγΉΘξnmε.Δ/„ψG’/LeΚΛΏΕr„Εž sΫSžxΫ8$ΛΈ9§4ωαΎ¦Υ0ηt`Ίύ>œ§7ή0²)5iΪ@Τd±γάdΆΕ;ηΏΜ’}¬”LyQ›GP~4Zθ['(λ‚rzͺ)Σ κu=ΐr eΊ­Ζΐέ²X“-μ|–Ϊ~?(Υ·aΣ$b2F …λ‘©szM°jφχ–΅Κ^[N΅Tϋa«~j7Cω_–‹{ ή<Ά—CL†ψvˆ«!~³lγή]–Δq»ZPΦ5ετ"_ά0b`΄Νό$lA Ψn?‘#άΦrςGΌ0gšσ ™E7žo5ΛΉλ½~|";(5«NιΝ–Ι¬½KΗ"Οtmυ…η—ΐά―ζSPn(ηFSό˜ΦgΎθΌ»ή• βΦθ GΗ mΩg¦17:Ι9¨cΆςΉωψZ’G6v‘|H C³¨λΝ^;N-vˆž|ΟPƐθ»kςΎ0εΆ‚ra0ΕY4@νuv™uŸΩΕΒC3E•EOYeAωΩ›''’Χ|΅t3=šμ7=ΠΕC™μ–ΰœ|8ΠdΒζφρβ!rεzΌΧύΡ”iΣ¨ύβ!υΪ+AΎΨ€Cœ ,` Κν—)[I˜ι»ά·….k§6žΑ©o%ν.s΅Β―/2V$¦ ¦b"žƒg0Ηm8v₯^²F­²5€Ψ°UξΣ‘Ϋ»Έωσήc£·ޏ¦JU‚΄ά’Cy_ˆΫ!Ά/FIœ ,( Κw·QςΦXQ1ήΠΥ–šFν3γ\UΥKΆlΠ΄ŒΤj“SΣ=Μ?™ηΩsžtΝ-U]|ργ’NέΚjκ§£Ν%š―·μP>κKαBό|»Ωy Κ‚²4εFM™ω)uΫQ.β‘Υ’εΖ#”β7Ώ`θ§~Δ)’Eά5ΨsκjέӘηΫs<ˆs‹€)½™’*ΐVI8έ ε“‰}geSP^ήΚ ΄ΰΈ,Ν¦D›Lq`°Ρ`ˆ–e i²#ΎΩwΉΎΐζΫ– ’ζX*ρΩ­·ο侟ρ·Ω M͍pښ¨$ΊURD+3ε°ύ›gCόw μ‚qP/Κ¦ ά>₯p^B0S!Ϋϊg’ee―•₯iΦΌΧώ΄ ’Hq¦Μ}οya―Α>$^Μxq€κYΤ£»Ο Ξ-Yr3ƒ‘œtWjΚaϋώ²±ƒZβq±Ί¦…>SPn;ِ½qb’Πz©‚ -™μ³Κ™-§οRϊ†#R™4‹|UΓPύό<ΎΞΧ²o+―cΡΟ>DΨη§…΄ ΦxAS†D‚²€)(·E)­ΥhΘ %­λΒη'+ΩͺšJΌu'ΣCRΗU_¦φb9$Wс΄‚A‘Ωm29»*( ΚΊ† ά=v»XΤ³’΅ΝεΐΨƒ­Χ/Ί5Λh‘z3₯pdΚΫΞNΞqo£ά-—YΗFψԁ'( ΚΊ† άΊŽϊ²sVγώ7Cf‹μ`>ΪΐI†κ›HR•9ν76ΪυξdΎ9 :žρΧAx‚² ¬kΚs5S―΅"SΨ’ΨΆ³³CάBν3bXδμ‹ύ£…QΩl8sΊ0ΗXVŒ»[J¦` * <Ξρdπ©ζqt”e]CP.π†C83%{¦VΨΰmuqE†/wΫ|ϊZΣzb“<|-³œλ4Μς ―₯wΰjΦ:TP”eAyΩ œΚ|s[nRˆYsγCC3Y«LΛT%ΝqSGΌπηαžΣ›M yώΡBΎΰΌή4ΏοRZOFͺxξΠh1υϊ±Θd‚YP”εv€r.σ₯I‚†JΕ€1ΗV™Ψ³ωφ_.@‹—q.«% Fžΐ|ˆ…9²lί<β³c@Ιbϋ¨Ζ¬>ξβΫtr’8†:θΈΎ½š9yΉŠŽw­XΖ˜Ο‡• ,( ˜‚ς‚όYε„Χt»΄ϋΒdΓ‚°>>z+;ξΙ †8Ο3‡“Η‘Ηζ?\gc«ΐ2}:Άκ,μφώy*Ζ7™υ»ΰ5Ϋ>ίQh^ΛΦQΏ–žά(l@ά‘M ‚² ,(w2”OJ5λΛΈσ(“™’αr π6Ÿž¨»UΤΫ<< iυΙΎdΘΉ’7l4ύρ9―d[Ψ3πβΤ½dΑϋ±ΐύR€)‡ϋ@€›šν't°± ,( ʝž)ητ_ο#‘· 5Ε€>– Φ—`6ΰDsM³ρΔĜ΅ά±V[ γkϋφξήrκˆMŸ&3fΑzΌξ·ΐ_miRΚ (™”eAy© \₯ς΅‡WΣ.WŽ–ΣdY\σΝl9ΉΟ‘kΟ2ΦάΒR ΗυfmΧυςƒœF4r4σV'₯(S”eAyA άŠAN’Β?/—΅Ύβ4Y―K#oœΈ¨?“A§d ²dίlbYxjώΩ-ΟarHκ5½VJ/‡/Οdηχρήζ3)Eš² ,( Κ εV2ΎœΛ›yF αbξžΣwSΣCΈ}κΐpΓH%ͺ28–ϊd³ζ€©#Υȁ¦μ«2¬~Ω> ͺ²nZ­ωΰZ={ΠΟΣcTSΥχ¦C…eAYPng(·jΊpȘ- 5=Ψ&zδΛ~λΩώ’)$§K£ίξ½8U;^ζΦάޚ5…<ΉoΆΒO‘τξΘθzΗ`*f?"換θ[½Ή²αξ‚rΨ~)Δ₯—C¬”unΙ”}V80q»¨φΓL©l@‡¦›ΛΕ2t[ΐHυ ›3Ί4nnq χϋ§ [Oƒfκ5btΟ‡Α d*=¨ώ@’ˆm‘ΟΪ»½s\yΊΒu6”Γφ±Γ!~”IΪ!B|VPΦ5:YS6 Qi@ƒ…-’εδoώX‘ΘHΙ^ Ξd3εΤ~[\΄φλ7’¦Σ™sN’09ƒω{1τγ)ΣόΗ ŒΈ» ό΅Ίϋ_%e]£“ͺ/|6θaν++Πy«ͺ-¬LΞΫiς5-ΞΟ­g­d¨tΨQ%ΡlΠF/₯2ή\ΞσM–@w¦"ƒ–wΒm*+&ΣWFά]Pώ΅OΉϋΏβ‰θ˜GB'Φ¬Y#( Κ‹φ‡²Ώk^Φπš-άU™Δ_PΥβŒίdZ£s₯t­TqΚψ πJϋœo†ώ΄u”=ε―+SΦ5:΅yΔ/’qΦm7ΟMώ?Ύ£y¦l-PŒϊ€eoέ―b’A^ΰρXΞHΙ&)yΒ7ˆ ψΜ8Wیa’6Ι‚² άΆPφž›NΞ6b 5P_ΌοT­δFΡjΣ”νk“9|ύε§ϋ‹sy°RήΆυμ΅’A$ΆΜο\ž)—ͺβΰωΘ^Ϊπ .ή¬>₯…σZš <ΥΦyPώ#!>γϊ~\PΦ5: ΚΎ»@φœo¨Ž¨7X \+fκaΪƒ)<•=§ΦΞ„}WζΐΨ2T Έσέ©¬U¦ΫJάΠ΅χε…¦Μ"#֞ρ4/«€2χΈ%œσσ!³ύμdαa! woIάΓ!Λ*ŒίWIœΡiPNUbΌα:ιR°Γϊ`²ˆζ x^_Θͺχ”¦A^ΖΘιΊΤ@SΧΌΉ,WγΉ’·xφΐηδy>‹ŒΤ!kλb(ΧΤ<’kt8”s5Λ>ΛτΪlUs†-ΆQuρζΙ‰βkΖ:‘ιζκŽΙ^χ\œ.κνρ\•‡‡¬]3žνη»σ|‡ 5œ¬ΰΡN‚² ,(w”sέ}qfk nζg†υ±ό·El8kmیκmRεdΒ"αφ¨;5XIŏR² ,( Κme•‹΄ϋέ©€¦ŒάΠl&^Ξ%ξιΒχbΆϋE:tcΊe³hΟ{2žΙ{Jί `gλŸηξύα=‘«3OP”uŽ‚2 ²I)Oδ#Γ7 έ–’ΈBC>91Φ±΄©CΞιΗΕ:1Ο±‰ΣφWϊGηTjΨΣmgΦΫΌ‘CΌ)‘Ι€[π”e]£#‘lE 8$`ΜΏϋŒ€bϊ‡9Γ₯Ζ1QΆΆύόdν™Γ•ΓLS’ˆ!I\ωπΥϋ•#£Ε΅‘8L'ΞyWh”e]£c3ε’@‘lΧͺ'b­Φμ;v<ριΐίbpΪTYΟΑάhΓ‘+υ,8tθΌE|_š± ,(λ―)2ΐfυ•U^©‘€>Ά&ξ#ƒ¬wΥώ±ώ‘›΅w.Oן «4k_bΥHtη Θ‚² ¬ktE23υ»4UPݐς΄ˆ'…δΚβ^Ktό½50Ξ{­ή ²΅ό °ch™ήFgίΰLΦΓWwψΗp€£eD y‚² ¬kt[›5P˜Έ3Η”(Χ…—“€.·θΎ~Ί4 uΉ&Λx™JςΪ±+Ά›ύ³Ύ©LΩ²yn*0 Κ‚²Ρ«ξdρσΘΕzΈmδ &‹ΰAqdδf±¨G·žιΘ)M#‘·Ο]ΟΞΑc1I„¦“5šY~š#ϋπ`ΆE>žo <Ξ‚ηχŽsx_U š<ξ>AYPΦ5:"SNuχ‘ۈ%o•™ςL’&h.1ƒ"ξsNdkf‘ΟfViΟ–)ηl;SšrΥLBm‚² ¬k΄­¦ΜΒnmθΒθΛΦύdΉΨHMΆˆ=“³DΩΰAΆλ½Qš5™wπ) Nόβ¬DS muz·6AYP”—½ϊβΤΨν€ν%Σ¬ΝWο `Hfk•›3£šlΑΒx,S.Ζ¨ˆσ?±'ύ8•!ΫΞ^/ͺ/x-Φκ Θ}{xn‘/η„GiŸ6AYPΦ5–Κ9M΅ΚΊΣڞΙ@­<Ξ²S_.gΗ‘^@ ΠΧ—²G³fΛΆΧ»L2VΟ°9œ½ϋ­ϋ]υE3Xš² ,(λmεͺL1¬Έβ―7ύΠ΄ΎΚ4ˆl™!©<§ψΖέ7e~ο§–tMNΙ-Z•Ε|Ό’ε­,( ΚΊFΫ@Ή™¦κEoͺ6˜E:ΰ]ŒXΚ”£‘ιšΤΠ[Žhς(f ΚB"“AR΅Ν|M†›(ιAP”εΞ”[ωW­λ΅Ρ³¦Eϋ.NΥveŒξ½²5€€ŽΓxθΩC£…LAfΝ‡†5 LάlNΙUYhλr(‡νΧCœρ?Cόƒθ±―†ΈβRˆ‡e]£S5ε”1φ1¨z±3ϊιΎY”Νε\βLΪ°Y}1\ρΞ@oNUlgΖ8ΩD˰ѭњύ¨'m+Κ?βο†Ψη‘ΆΟ†ρ7C|&Δpˆ ΚΊF'T_4ΣT›eΥdΜHVsŒ ‘™Ν.ςΝz#[Η‹΄DV l=τ½3ζHΉͺ9ΈΆ2dΙI@™,ω«ξώφ_”uN«SNm­θΟ@•Μ•Ίαώαι"ϋυΨ6½$Ξ€MήH=ζ놡0'(ΟΚO„ψWξώΣ!~MPΦ5ΊΚ­θΟ4ζ=¦“ΖF9Ν9gγΙs΄x'(Τ]!Ξ%βŸU@ωΟPώΥΜω qœX³f ¬k,”κw-§?›ΦK&Μ@ΆlU₯kρώάc½Χ” Κ’/LeΚ90{ι Η FCήtr"YaρxE¦œ{Μτdm‚ςέ@ωΗ£…Ύ-τιέεV% σΆˆ+)ž;<ΪΠ$B©~Θκ”οΖ<ΔΥ„˜&#vύ~YuAIά?©$NΧθb(η˜Fb.pdΑTXš ϋFλνΎκΒΰ«EζG&½σέΙ"VЦόΕ‡ύΨOώTe¬ώΑͺ|œsh”« όzˆΟ‡ψyεχ’cξΚν d*) ’d·F2[4dΎΆ¬χΥcγ΅'φ ֞?%(ΧΤ<υ:²•£‘…ζ:ιΚΧ Ξ́«eΐ€7ξμσΥtώ₯τζ''Bf=Vΐ ­αzV mǘ¦νηY'(wI&άίAYΫ½9†0YθειΉUϋM;φYρζR+6έΩ?Ζ±<dΉο΅gξ[‰ΣŸ#՜β?ζΩH"( Κ‚² άΉr…ΟB­DI)=ΩΧVtaλTƌaΗ€κ˜ύmξqeΚ‚² ,(w΅\ΑΒ™—βvζT&½+dΜ[έ>β•ώΡΪ‘pΎ8cž£έ¨m={½ž5›‘χ Φ9έΩ—¦,( Κ‚rΧΛ)ΝΦ²Π8“ζ8JΫ6ŸΣXΒΧψ[xy#ͺ7Π•ΙΈύυ_:2VΫptvαΗΉ/ϊν»8U{kΰZνΘΘΝΒΤHΥ‚² ,(wS^Θq&κΫ™}‰\N–€θΖTel<1žνκΫwizN₯†lΗΉΙ’bΓλΟ€ž}4«,@»΅ ,( Κ‚rϋeΘΉ,vΟ…©d;³eΚyKΆ>σ¨ϋ.MΥ^;6ήP· \ρΣH]Ÿͺ +―σϋM ±…CσΖ JDP”eAΉc7ƒkN³φLsF* qI!tΏŸ¬Ωό•­ϊΰ›<‘»ΎΑwΧ»S εv© ύ 㧚L)”eAYPnŸΝdˆΰ|2₯p½₯A;5UΘΏϊυ·8k%Ff ¦$ε·\U™AVmεv@έ›=^Z~V}΄8₯DP”eAΉ}€‹ΎK3u =‹ςρ+³^ΖlvŒ—$^κŸ5ϊΛƒ# Ε wήΐ(UfPώΌŽν‘!‘ΉΟ½²s̊rv V!β”AYP”εφ‘.R@εvύώΛE–ŠFk’ΓζΘ8Θ—ΈΕ&φ9S{ΞaƒΟ”ύ1H›O_-2μœ,Β΅i΅‚n‹SJeAYP”ΫKΊ¨š B©™Ι q‡ΏgΌ9 švΜ‡ή―ŸOΞυC"ΙA—ZhQ%Ή(S”eAΉγ€ ”VΉ΄±ίKρbœ5xΔY4χίQέm`&L‡ζCYΨn:9Qΐ‰"žέ·νάάͺ\β¨u>>@βE>iΚ‚² ,(w·”ζ>dš1@λΪνΡ±"›68ΞL±λάptΌήά±΅,­œ©,–,ΨkΚ=nΨͺ7)B:a Tό\kfΩvφZ1₯νMόΛOχΗζxΨͺU_TL)”eAYP^~-Ωa86 ~1 †¬š6iδ ž‡–Œ$‘Κ΄·…ΗΘΒΙΚ©°ΰzΉͺ`ƒΪτdd{-μ;œ?ΛΎοΒ”HP”eAΉ½4εΈRΒΰ†ΑΧ5}Τ€c€sΏͺš―ΙiXy-(ΖΞp9Iƒϋ=eωiΚ;ΞOVy[Κ‚² ,(·χfυΙ©ΞΌά¨%«Qφ³υhπhζη}™ύ((²κγWn6TzpYdοΕι"&CΆλΩϊvO©ƒσΆ£‚² ,(ίέλO„8b ΔωTξ_bg9͚Ϋϋεκ­χ³μvtτf‘­Zφ‰<α³ΰΈK/.;3 ςΌ”όπδΎΛΕ1<Ξδjδ†Tέ2†ψ<ζ­Γ>(|}²ΏΎ5ΌYς€f>ο ,(―h(4Δͺς돇8βgB¬ ±ΆάΏ6ΔΧεj 7λT«‚6~hΑqٚΟFγΊ`_fY3Ωl«™ς72REjΡ/τΕ ώπH‘ο|ΛuΡΓK&dΛθΜdεφΐν¦SE›5ς2V›ρ$j²h^w3ό^7zͺj|Ug8AYP”Kΰώύ§Bœ q.Δ”ϋο±»,‰γv΅ άϊ·M?Άcΰ}(Π}­DΝ䈜4ρ‡ηx){ˆϋϋq;uJ>Ψ΅Yϋ2·x‘Ρ4ζΈDΟ½,Γ{"Σfqσ.j”eAYςEMΝ#‹1[VkέxqFωb€2€&ήΘ>₯­Ωό’cYΒ·<Η£šZ©ˆˆσuΡ}mΪήΖ=4Œ¬((―Ί“•F? Κ‚² Ό€›―Ζ¬6Δ2ͺκƒ­Δ-½h6^ϋ›*-=γLΩ2ςV*"Ό$CHΚ$Ÿ#c3{AΉσ3iAYPξΊLΩΛ6Q$€’oΗ₯ ‹μ°ηβTνωΓ£s€ “Μ<ΘΧ1oΪή811Gσ6ι#ε™όaΘtέγm›Χ±ώΩ«wξ53”eAYP^όEΏ-ε(&ƒ!²%j^ˍ³R_ŠV59zΧ»“EiZͺ‘„Ε4“,Έδqlτf!/gΊξΞNάn”>Β}Ž₯Ύy·Σ‘ν±νη― „±Φ}.p‚² ,( ΚΛζέ¦\ί€s\l%›έΘ₯§ϊ†³πή)ίN%§―΄HM¨Ξ΅cϋ…=›νgD”ΡQUΒ4jAYP”εφ‚1^ΜΛΓήΓΞλΜ€ν˜8ŽzœXibR”Κ†sΖCζΡΜΟbCζωΛή”ˆNΎ”Q<'‹Κ‚² ,(Ο¬σu|ϋΰƒοԎή*²]*κžΆ1γEΌœ~Μ"R°59‚,;Χ"Mω[ͺl-iΆ±Qρσ{άͺfM-άΚ”eAYP^t=x>Ί)@ήYVRžφε§ϋ‹Œ7n‘Ž3η\##œƒ <-LλMΥ1£AΣ½—‚­β›χ°#€_>2–¬g6ϋœΤ!MYP”εEέr>UΩ౐!ηZžmαΞάΪR~ΙΦ’œuˆ €χ6(ΟΒϊZ±€gƒO‘&RM!U}τ²"Δ΄cψXLδ΅mΞdΚ ,YΚ‚² ,(ί{¦lRGΞαΝO’΍jzϊΐp²)Mι#^Œύ‘ΝqΞ€N3ΩolΩ™“HΘ„S‹ƒ}—fšzZΚ‚² ,(·•¦l/ZžϋΗF+š*²P ·9c*ŸΛDΡrΙ|«Ίφ&™νγ;>4"ΓeΔT¬[§ΞΕσ9Ύq„ΤΠ/Žπ΄”V΄b/v«Ά ,(·˜[q5σR rdΖΘ <vh f:ri„ύΉΏ*'ΈTkUցηηεY)]ͺΔ.†²ΙLKΒ-Ί» Κ «fήΔͺ>΅θΐ”墁±p’ήΈͺ,.n§Ž[™™m‹p>στ itωε²α\vw ¦²h²Z τΝsν:ŠμΫkη½/„εy>ΎTΐ”εΆ“-¬r―7Ή™zρsšΝΧΨHhΔζ‘LεΩ,ΎΖ wcΩ74f4δœb ·μΧΛ#9Θσ4ΏcAYP”kwο—όέ‚ς½-πYζi£™³Ο¦Ηn½_Ϋ²έά"š<™s\²FECd΄d¬θΘ,ς‘An$ζο!E°(h-Σ9ΐZυ„eΑ±6Όή‚² ,(—ήβΣξώO…”[Ϋr₯p^Χ-&6‡Œ9ΦisC{ΚlΦκ”su<`΅ηδxα±μα‰ΙΌ/kKI&€»δFQwμ#CηšΩ|[Ο^/Ϊΐ—Q’”ε–΄σEφ–^t(?βbˆβ q2Δηε…Λ”χEF@V‘$o£Ξ-άΕ>ΖVŽζgδ‘aϋΩyώ1Ίύhνά©ŒΈΠ–/LΦ^=>ήξY² Ό@P^#ύΕ†r+οs‘³ιΕ—/Βφσ!Ύb2ΔJS^8M9ξn‹ν5ύ”·ΚE6$ˆΗΚΊaφη<'bΓy3 JΑ>5Ά‰²r«Uζq+c³&ο©ΌDM ‚ς2By!2iAωށό†8β !~»Μš‰ <κ Ό†¦ΎUΤϊφ…μ½£[ㆡ9{ϊ ԝ6s~KAΦηšM)±i$œ7gdY8pžO§βJ…r³ϊίf₯h‚² μ‘ό§L€vχ$ΔNAΉuϋς7‚EΆΈ[£wβbŸ«ω΅Ž»ήLSˆySΟLϋ΄*ΟΫsqΊΆ>€›Μ6ξΚσ '@hW•ΤωΫeh—ξH(ί+ˆeAY%q‹ [pŸΚŠυ™ΪβάQ@l₯h,&ίŽŽΥm8­VxΆ†x΄ΑΉ,8εάΜ­ϊ‚²Ί*odEΕΙk”eAyI3εBό—[Cμ±hςœ±7Δ…ηCόNΉ5Yvˆ‘ςφΎn…rjQΑλΒΛΥΏvlΌASΖ–zΝΠσRΗΜ΅Ά&tnΖAρBEEκυΉYΫ~ξzαρά?ρs˜Xύό;£ η1=Ϊw ΪγΉμήgάύ#7Ϋ9[”ες—B|ˆŸ(uβ!ώi‹Ο]U+εύ– ΆGB'Φ¬Y³€Ίοέf|9^π²!¦ΐ6R𢕏9{ΜWKύΨΧ'Ο§”ΜΪΚΦ λΟSγΜKΓ‘;, f?Z2ςΟ‰KθxΉμ~!?Τ–ΚKυ»&( ΚΛ^}Ά—ΩυWάΎΆ’/ζ; €j;2δ0 ΊqΉ[l,ο›ΖPmΚHOYΆ0  qηήάκ‹‘Ϊ‘α›M›K^)gιω}ΫΞΞZlRΒ—*ΧΛ½ώ6•1”) Κ]“)&Δγ!6†ΨlΡδ9 ρ\ˆ?‰φ?-τ­[Ξ?”…Κ”Ι ‘,r­ΞdΓ€–Κ τaΚΝψzϋωΙδ”θ½?œΑWθΌ˜Ο.j™γ–k`ιAΚsΘΞΙΈΝ >·Ψg0χ€φϋ²ύ@)žA›σa’t`pΊSGeAΉ« <βί…ψ…²ϊ’ˆ&ΟωΩ|q&Δι2qˆέeI·«;YSζ8Μw€`nt“ΑΧOρπFπqη€―j…6XG²}2fσ₯H5ŸT΅aΗ&φΆΟ,Eω€bΞίγn.Ÿ=τψNi”ε‚ς‘Z7΄: $tkI-¨ ΗΖηd˜[ΛVi@― sό οŒ68ΏYΖKlχφœ§ ΧΫ|†mέ‚ΌŽψ΅‘―gτž“πΑw>°|›5qlτfΗkΚ‚rϋM7”›Cω_†ψΓϋβs΅ήΡg‡_ΐσμ‘‘΅νg'“°ΆΕ8σΆθœυΑΰ\»ί¬ύξΛ' ΧΐJ¦[εCaY΄ΟmPΖ+™ƒ²6_ΥΑd\«Ό'dΕφAΥJ άέ~¨ Κέ εvΈΖJ€ς‡ΈJmrY}±·YGίJ€²-ζJΕpd3Ψε)Yl I ηqt3°ζ2κ8N-ΤY&luΞ© McɎσ“Ε±Oυ Οτ³ήΜ³&–/tc ,( Κ ;κvYsάRI\·CΩ€•«έυ2ΗΔ.m/•5ΑΤHΊυ+Rεqq6μ―ƒ& PιΞ;6z£0";―ςΟΰZoES¦γŒά$Œ‹eAyEAω¨/…+§ιf(ηό0lΏ•ˆθβEΎΝ§ηŽWBηύfίε"3ΆΚ λΆkfLο³aΪΆ9ΗΫeΥΐζ5Ψ" ΟeΏušFΝ}d«² œχΐm―κŒeAYP.‘|2±οL7Bθ0AFκkŠΙŠ‘+όΏσf8Ÿ―ΔBFρΎΉ/ ΙΧ=sxΣ†γl˜R΅7OΜB܏JΙ62jΓΡρdFΜ‡M•nάζΖB‚² Ό2‘ΆSށϊοeˆΕhˆj]V§Μˆ&?QΪCΘ½Υ0ρΩ¦MΗ`¬Ο±+e‰ηΥΕ~ΞΕ εδ γΊΌΟ•°ρ(iγ9ή 954•cER#₯ΨGKΈ—aΊH7”u†ςχ—o/—# ,VΧΊd‘ΟC©ͺt [†λ³SƒsμgΝhΚΈΏm86ή²\a2ˆ=ά³ίΧ>Η֜ρΔιX3~σδDΡτ(u‰n,(λ2$κd(ϋίS§―Λ=ΰF5ωco ΙΑ=7[Οwσyxς!`# L’@v ›ηϊh\‹μΫ/6Ɲ€1μΡ£»Tž”eAΉΣ‘μ$RυΖ€°§4ЧœΜκz}v DYlΫxrΦΫΒλ½ρ ½Τώ­ΞRΣΛ tώΦ³ύΕώΩΕ»‰lφkϊ6―+ΧΜbΝ*Όη.Ξ†e]CPξ†LΩZ“ΖEΥf(oΩiͺ…zΟ…όό½\©š?‡Υ ³Ÿ*[(¬Κ~y]V‘›4‚ΖΌϋΒd½9DP”eAΉ- lΞnt¦Pš8Φ—CB‘Μ―x“ρG€<%IΥ8«δœ;Χ±Gpέυϋ†’uΟ|€²_€ŒΝeΣ‡•δ=Ή/νηCeAYP”ΫΚ £Ÿx! ˆMoδƒg<„ 4Ν<&rΚμ§RβɐΑ’ρ"{X§ ηάyƒψ*ŸŒ\–m@nχ4^ΜπKΙ"+TO”eAΉ‘ ¬zΑƒ–ϋΫΟ5 ؝6f:ρ^;>V/o‹ΛΞ OLM#Oξ½\dεή`ΪΞύx9‘$7/e½ΉχβTƒωό–4·pΩ₯!‚² ,(w"”‹ZδΒ”Œ“+RNjdΉ―OJ^’¨*M3YƒLs"²ioλΞ…Ζ»SΕτ€ŽM%Π―ά|Ώπ?Ά:N[V¦,( Κ‚r[@邬2WšΆύάυlΥ„•ŸYfxk―ΞIT[Ψ1±dAΓHΚv“ξ—4eAYP”ΫΚVi‘*Y`ΐ.%E6fαyΈ!;δΚΞb#‘ΛΞγγ…$`νzώ9^Γ6=ΨσXaZ?Qx.#yl‹>LμωΫΞ^/4i_!‚² ,(w”­&ΉΥ8“"όcg4όBa<ιΩκ‰ΙΎΡ£ιΠσηΚI~Ϊ‡eΦvϋbgM””eAYPž›)ΗmΠΉ8“"rήqΩΩΛGΖŠΚ Ώψ0ΉeαϏiz±΄hΏφnρΔ‹₯h¬I{KΟ=‘ύ —)eAYP₯½3ž 1βœΫ·:ΔΞrš5·χ-₯¦l>€j ΖzN]ΝΆWWe―”Δm.»θXˆ£Τ.Χ^Ν1ΫΥά?>z« L²x%€_d+gζ|xŸŒΨόžΚd n©ΏFΆ”eAYPŽ‘όsεUεu!Φ–_― ρ΅ΪΧ)“1Ÿ»U—šU]ΕΨHΘ:ϋΜPήΚΡrrU­1υΜ―­?Φ3O ξM}k%Ή½ Κ‚² |`ώtεK!,Ώ~ϋKύ‡B›ρF7 „Œ7.‘C2 ³eB°΅:ΏKυεh@vΟΕ©’Ϋ$‰Lэxμψζ‘͝ωjδΌ+Έ.Ίx\οΜσGV–/² ,( Κ εχ’Ηο,ΕŠŸ$BψΆd ˜‘ ¨ΜΨRN1°=u`Έ€/ήΖΤ&ϋF­!SήsqrNg·œ =€[ͺ-˜‰—§M­ζ9œ7—ΉSϊΖ{π/&oΰ‘‘FAYP” Κa{$Δqb͚5 ¦%{ω!ξΤ#›₯ϋΞ4η¬ΨTQ0†i}ivίsrΌ'c™ΈΚd™[“+όy3A‡`<«σzω£•ΰ1Aδ―ώκΫs\ν|τ»ZeeΚσϋCYΘί5AYP–|ѐ™²‘›ΐαk‚Ÿ98\;84Sϊ€fηυœΎ 8]―¨0’ιž»Y·ωΜιΑhΗ–‘\λΩC£E–Ν~Αάyω„…»‰;οΟ© ‰ΟΟž4eeΚ‚² |7P~4Zθ[·X(© Ω/=™Π’Ιdύ¨%>Μα$@υΓO© ‘ƒΗ˜0’«δΨPϊ)§&“`ˆτRh‘•£eχβά³·&CToςξw»Λj AYP”edΖHM†ψvˆ«!~3Δύ!v—%q»[-u·(Ή¬š.ΆM—°‘ ΗΞp܏e‰gŽ0φϋȚs™20―jΏΆύίμN΅iΩ^†0}œ‘Nό`·γΑΚ–eAYP^²ζΏp—kΞι―6#g»™«'^y§ΜŠf½”5eΰ퍂ͺκ’νΌθήΘ˜ Εο3•1η̌€+ Κ‚² Ό¨Pnus.Sή2dτφe –“/,ΣMtρόΔ¬σ[€3:2εsŒ’²¬ΊY+5±γόdQKOΧζώΡ"CΎΣΠZ«“V† ,(7Ζͺϋ?YόLrρΕ‡”[ύCn±&7o4γMe+tQεXΠΛΝΨKYx¦φQ‘‘š2€*{©Ι ~ήήι±Ϋ….όψŽ‹ΩiΧώ9²κ”ε…} χ˜I―,(ηd ϋί«Χ°Qε€5¦Ÿ:νk~mBHΞl(ψΈ¬π"9Ό1 bτίE1ώ)œcΧ»Ssό1βŠ|SΩ΅΅cKS”eAΉ-2e΄W$:ρή81Ρ05€Jz~@=†~ΐΦ:σ8ΟΞσvϊ‘1ο pύςΣύI`ΖΩ¬Νέγ+㜌pjΕ%.Ξή ήœ‹Ε?Yu Κ‚² Ό¬š²Dς0kfΉ™ϊwŸs ?Π}GFk5Ε=§ΖCF;Ωk2cœαόΌ*Ή#eΑIΆ;Ώε`{^π⍋” ΚΊ† Ό,Υ”‘RAΑ’ΧΙ±[ elψT€2aΊγ¬kΟCΆϊ₯τa*!rŸ9£‘­5!ƒSߚγ.—“N¨=ζ?ίώ €ω>+3”eAΉν2e3τ1ΐζͺ+8Ξ$‡#£7λέ~Ήn<۟ΛbΙ€γE=`Ώαθ•"“Ώ%μ&nΧ5p>X¨;κΜ³2@XP”εŽΠ”½\t©+ŽλΝrΐ聏1§t-n)fτ•mΉκλξ‹§IσΨs‡F‹ΗXhœγeqlΌΆηΒ€ΰ+( Κ‚rχV_ψL–²ΆBͺ8r₯Ј7”Υΐ2φΎ £EJx1Q¦6«Σ^KN,±ŒΈx6φ3υ>¬Y¦ψF}AΠ;»ρα ς5AYP”»>SŠ;ΞMΰ#3N˜>{x€hi&£ΞMΈ¦KopΊΘ¦Ρ{:ΧΑ!ξιrς4χ™·—*γ1τΰx6―1囬FAYP”»BSφ5Γ&]δ:σe~ΗG†œκ†œ~ͺΑœη›Uη\{Ν±9%r[Kc}ΖuŽσ(S”eAΉ«Ό/Θ,qC£‚Αt]` DSyqΕ…UTΜgΒ52HΞTh§3$2ψζ> Ν4,VJ?”eAΉ+¦Y{HLά)€†TyY\Φf:t«ρ†£ΣΗ‹‚©Ϊdί°βƒμή²jΚέ(oΣ&( Κ‚rW@Ωo”•Ω€8Ϋ}-ͺ]φ²Υ9Ιά<=;04]ψ§²hΓ:WΗ|lτVQ§ AYP”»ΪΊΣsi;|y¦Π…ΙVωšΦθX¦ˆG4Ψ'φ fε •2"ŠΟ€ρΉˆKβ>¬μPž ,( Κ]lέIΔv—@΄]Jδ΄œκ 2`ΐi%mΉς7ξrM$V:—² ₯_ k‰φ­hβΪeAYPξJλΞάcdΛ6ψ{ΰh“«Χﻜlœ·ρΔDα‹μύ)rS©S°&cǝ.'‰0ρZ› ,( Κ]Χ<Β"_•­'Θ €™,8ΌHeΗΜΘ£ζ™Ε=σ6Ξ6‘ŒάΘΦOsŽ*_ meLΡ«LΣW=π)SPV¦μσƒEχ]œ*䌸LΞ²γ׎Νf·±υ'PηΐN1ΣD¬3}±4b&D•'[J «{ζ|Ό&mε{Ν„eAΉk5εVFE±θλ‡ρ66Xšζ[ε GPέaΟ±J‹υ‰σΜγμ€οΎ0©R8AYP”»«y$Ύˆ3{K_©aesΚͺˆT³HΎθR‘ιD6gΛO^Mz;¬U!( Κ‚ς²GΨ~)Δ₯—C¬]¬?”T挢όΦΐυ9r6ڬ»Ξ·2S@Π‡96§―/ŸoΩ2P{3‹|¦g«½ZP”εεςΗB ‡ψΡίb Δgγ%₯1§d αΠxρΟ°¬6UΓΜcΟ- œ‚μΖ“Εσό=^eoUcždD$( Κ‚ςrBω !Ά»ϋ_%γ%U‘›ΝΗ’^,3˜ \¬ΉY9mΔΌ1(§ΛiέΆ¨LYP”εε†ς―…xΚݍO,U¦μKΣ|kυ±rρ―ΐΝ·bCΤ₯Η()›dmCQYPτR soΩDb@–¦,( Κ‚ςrCωΧPώztΜ#!ŽkΦ¬YpMyK9-$ˆ ‘#6,JeΔ΄q“E3#±S'FoΥ^ιmθΦKeΐš,^P^¨ί5AYP–|10Η$ΌŸŠ†aͺ‘2ΖωζέlϋX8τP•+S0εN‚ςw… ρ·Πχγ΅%n}υf2\€λην!gτ NΥNέϋVδΚې&Rp•y’6AYP”ΫΜ‡,«0~ΏΆ ~±ήœ3?:r³©uχεtζTE«Ν,ΪeAYPξZ?ε»Υ›S#‘Όa '桜kεne‘QΥ‚² ,( Κ-θΝ§Ηοde ρΤa.ϋ­2O&( Κ‚² <ΟJΆqΚηX(l¦+S”LAYPn1;Nu‘ΛΥ€) Κ¦ ,(·$U—,( ˜‚² άbΕ…δAYP”εe„²ήeAYPnε˜Uχ²r’ “feeΚΪ–Κ‹=ξIPξ¬ΧΠ$“”΅π¦m±‘ΌΨ™° ,(―ψκ -Ό Κ‚² ,(·q²6AYP”eAY› ,( Κ‚²6AYP”ε»w”»a&δW:π5wϋ{άΆDΏkWτs_ροq[WCΉC?HŽλ=κg―χ§χ((λWοQί½?AY‘_\… ¬χ((·χυ½Gύμυώτe…B‘hgΰλ› P(‚ςJώΧυ—B\ q9ΔΪ.yO?boˆ !Ξ‡ψrκ;C •·χιg―Ÿ}‡ΎΟ…8β­Ε~ε`™Ξύ£!Ύ;Δ@ˆΟvΑϋz0ΔηΚ―Ώ·œBώΩλ >ά†ψš~φϊΩwθϋόJˆ—”νύ –KϋƒύBˆνξώW‰.|Ÿ=!~±Μ tΌ—τ³ΧΟΎίΣ…Ψβ‹Κ‹φώΛ₯ύαώZˆ§άύίρD—½ΗO‡ρ}!ή‹»£Ÿ½~φψΎ^ρω?ο ΌhοO°\Ϊξ―'ώ0ΏήEοoUˆ!~e±qυ³ΧΟ~‰ήΧ—B|£όZPΦΏ°υή>Ξ{C{sϋ$_θgίΡ?ϋ°ύqˆ«₯ΧΕTˆβΙέσ‡ω]!FB|Ζ-φόxΌ―†x.ğDϋCΦιg―Ÿ}ΏWŸ)/Ϊϋ,—ώϋpΉBΝJόοwΙ{ϊΩ|q&Δι2xŸχ— $Cενjύμυ³ο(/Ϊϋ( …BΝ# …B‘” …BPV( … ¬P(‚²B‘P(ε•[bυ!ών<Ÿσ…ψ?υύS,Φο˜BP^Ι0x œΣχB‘ί1AYΡ0―„ψ«²`Xˆύ!^-ώsˆ#ΔΡgCός91Δο•_οΓr°<†ηόοϊΎ*ΰwμ—C)ύ‡w…ψdΉΟBόAωυC!ϊBό }ŸεΜbΚn£χΚ>όΏβZˆ?*ϋkƒM@ω1ΧmΆKίWΕόŽέGϋuωυΏvΏcίSαBιρwτ=”»ύf§{Œ,δ–_γ»)e;ζ“LΔΠχU±Ώc/Ď2{ΎΫάsώ·ί ρλϋ+(―„?˜·άcχ$zφc(Ϋ1 NXϊΎ*ΰwŒύΤνίηžσΫ!¦q_ΣχWPξΖ?LQΖeE›ύŽ‘%Ύόϊ/ Κaϋ‘R‹ώ_Kϊ§υ=”»ρ†ΉaηΚEAYΡΏc¬΄)=PZ\ξ+->wΉ ϊσ₯Όρ }e…B‘” …B!(+ … ¬P( AY‘P(e…B‘PΚ …B!(+ …BPV(Š?͜*X’akEIENDB`‚xarray-2025.09.0/doc/_static/thumbnails/visualization_gallery.png000066400000000000000000001132451505620616400250410ustar00rootroot00000000000000‰PNG  IHDR5¨Ύ˜9tEXtSoftwareMatplotlib version3.3.3, https://matplotlib.org/Θ—·œ pHYs  šœ–IDATxΪν˜UΦ†ErΞa` CFΜ¨HYΤΥeΝ’kΐ¬ .‚J†Y]σšuYΓΊ*ζ„#J‚d F\€«ξ_ΉμικsΟΤ­©žΠκληy‘§ΊΊΊΊΊϊά·oέsΟ^žηνPήΑA€R©€ΤR©Q?”½φ:›ψ βΊΛˆώexj/?,βλφ'6α ςw¦aξT)`½?o”Βώ­'ŽŠΈξ‰ΔFb'Ρ»ˆ―ϋ.q>ΞH (Β–nΏ'Ά‡—4”ιϊ#ρiAA΅0R“J@ œΗ’3ˆΉά MΌJτ+.©)+1²€uΧΗΣλϊRSΦγ-€Τ”Ι/,έΞ"~ *Ni(Rs#ρχbz]H ˆcΉŠψŽ8‰¨MT%Ž%¦ΖLj~#:Bj€¦”Ώ°Δpb ΡWώεՌeαΒ€s(_GΔ7δ†Ό₯CjΜτh#HΔDβ~,‡·ΣR|Ι;πύ~a_fΊ=ΙΖμk7£ΔώΥζνžcΡ‡]WGp1qͺ¦ΔlβΆT₯=5 &1d0χPT Yη σ='2ω{eβΚΊ1ηοͺωa’Ïeˆοi*Rσ ‰‘ς2έ^2²αΨΟ\‡Cy?σψ½UΠϋpΔ†SL<γύ8…'#©AO €€.5ΫΉgeo±Ό9ρ3ˆV,;x' ΐT"6˜@ΐ_`z:—ΈΜω-D~2ρχUMα=Tζž–ΞbΩ±&˜Ό―žc‚ΡMAΑ%`ϋ' 58π~Sΐ:Λe―+ΛΚ―,2ZjΆ'ΛΨS©1=Α3ς‘β{c~ ‰ΏksOξQ½ Ψ°ύ…‰17H (9©9ƒΏ¬‰ή–ύψΙ6‘ŸeA_2ΊM!ξαϋο˜Λ8!RφE6ϋςV~2ΡkSΐ{hΑΫ¨-–](ΰu˜δ{Ω)φWοS3~νΝόžΝΊ!5Ί§f7—δwΠτΨΆ ˆƒψR±Yηεď•₯ΖΔ„ϋ‰―ψχΉίΛ½z_~?ͺ χሠg²Θ$Φύ-!5P²cjLι*ΡΠgπ%€*QβšτEβ{’-ρFjΔςzά…όx!{jΖ‹hz˜ή yΎή§ω΅‹ž)*¦ Ή‡ψϋ΅©Y©1S34d3ŽνΰT σT Σ½¬Jjšςsκ‰υW₯@σ₯럣 ΰ5½·ͺ§¦–κ©qΎ-5 Γ―_YτΤ$DεZβYράB€ζ,H €€žύԚῈΑΎ·³`μΝhsύrΰpζΦs!―α”Ss_«Ζ=GD|OqοJ-Ύ€΅IΐΊ<π•aΨΧ ΐsμΣΣό+―2’όP‰Šω{?>˜εΟ%5ζϊϋpœk ΩOίς€Zόbzpωρ+Ή‘n+€δψ€`.{Η—}φζόο:~H™οψΕό=<—؜/Ζ²dςύό͊π>Ί² υγ4M©qΎ©Ιe™Κα}<‡·•ΨΗ<Ά― ‹αΜ©Μq΄Ξ7H Hmžš,T;‘Ώhχpπ0Y Μ<6!RσgώRžRH©ιΑƒ‘wp–KbΠπ!&Ψ„Ό¦ΌΎ+ϋ)‡»²Ώη¬“έΤΛ±O&°Νγΰf~Y]­D₯/wiοΰ §'B€ζxodΊž―Α9*ψؚΉά“ω ίYCWqOΗΞš2DΦ6nΨsR3„„mγχ„LαΛΗ;ω΅†‹η™ωsnygρwΦ•ύψ>—ŸΖs,ΫΒƒŽί“½It»‹χ5EtIM5>ž{Ά…σ R©€Τ@j©€ΤŽͺUjyuλ΄ΪCΊ‚z%€Ψ~]MIΛ$΅΅’Τ«•a!³žSΗΖΪ§ϊ6΅f&idS+ΤŽHθv‡ςZu$џ“uΜ«7OR₯©Νލ|κξΥ0ς9{¨ά$IΥf65ZψXŸ;a²Ε’žϋΧςφιQέ‰™Ο­`*Χ¬ηΥΚΘήCν–‚V,κd†Π*½ ΉύZšŒ$5[t΄¨Ρ\ΠLA!kn#·oΏ―‹ϊm:ϋ4lgΣ(«‹Oγ Q1 ·Χ€½Ήž~Ÿu[ηψXη!wΥFν,*ΧkιS©v³$5»©ΥΔF<―rέ>U΄Ά¨Φ8ΛΗ>':¦G ™{ΥπšμU-ŠGΚ͎‰8’„=τ0Ιη°Α“-=:"C’6h’…άώαGL΄HμƒαΘCnυ9κΐ[,μ;Ξg`ί±ς1ωœ#oΡΰ$ŸCŽ›bqΠΠi>œn³ί°ιIώX s³’‘οΉbϋϋžεF§ίgΏ¦ϊ~ΤD yΌg_—€ΩEkύΡ稽O΅©44‰X.ŸcΤψŸΑ™#l:τ‘Ÿ»Α€θF=ϋΈόϊu'©l+ΘF~Ÿ1―νa[ήτ9xΫ‡N{ΗIΏ)orΠΔYrϋϋ{Γ"±†ž£^ΆθvνK>ΉWΎΰΣωŠ™ς1ωCŸ_υ‘ϋΤ/οYsίG>§><ΗbΨγŸωœϋΔόBq֌yNδφ ‹|­ žZΰD§ίη‘wΌο#?3ƒ<ή™ΓΆh8hœOΝ.σ©Ϊλ'Υϊœo!ŸWΘΡ>ΝOΊΝ’Ν93|δηnHυ»ί”δεβJm©qR©Τ” ©©ζύόU–“‚ΆΕ…ΝC‹x qΌΌO긊o(ž3Šηψ0σ† ‚Τ@j 5ε_jš‘Τ\Άw»@ 5H €&-RΣ›€fηWmœDS΅ί73Ξα©γ§$ͺ*›‰ΙbΖΦE<uO„VR©Τ”o©iNRsu•¬@ 5H €&-RΣ«GUoλζL'©l‹§ΪŸOμΟ½0ΌάΜ8ϋ…θ₯%žσ:q €R©)ίRΣ’R5oT΅φ@j 5HMZ€¦'IΝ–Ν­œDΩΧΘYΘΣΫ'zdΆ©uΆςwΓΔςÊ*Bj 5šς!5$57Vo€&˜  „„rμ'‡“DʎΓ‡OH_‘δΒ…%5έbaIMŸ1ƒzώ9ϋάd1ΰ€›}Žϊ΅]²©?O).ς΅ Rp΄Dπϋi>ς}i –Ÿαΐ^c,wΊ>IλΛ“΄ΈΨM˜ΤˆΟӐJιARσυ¦ '\Cg`xˆά4 ή!Ί…HΝ]Rsryx&K)!!‡ε½λ£{)1Zxd£Χwμλ>RT BJG―^±θqύΛ>²ρκtιs/~ΦIΈη}€μθΧ’’₯₯LΎg-pςψΘγ6π,€4Η'=ψ‰\OoγˆΫgϋ„ ¦~L~ΎR\δki™Σ$Σϋ%<¦ϊsj}Φc>-†ήaΡψ˜‰„IΨTE€e₯κήΈΤ@j 5š΄HMχUΌ ›Z8I5Ρν&S4n—Ÿ 5šΈKM«½«{ku $κΆΈΧΧa~© „H €R©Ι/5έ«xλ6Άpa °©΄ή€οΧ$ή'Ž!¦ͺΒSψ~W5PxmE( ©Τ@jͺ{Sjg’‚Τ˜jκR˜p©Τ@j 5tλ^Υ[Ή1ΓI©ιΑΏ¬K‰1ΌΌ11‹a™‰ηŒζ¬'Σ›3€"€tCj 5q—šΦ$5·ΥνHΔ±y™+ŽRΨγ ©Τ@j 5t%©YΆ‘₯LΎ©Τ@j’Π¦ruοξϊ9D”šgˆ}ˆώBjΗζAj 5HM Ή$5‹7΄r©Τ@j 5Q₯ζΎ9”pΐ—¬οζϋρ“Ίεp i‚νΔ…Tdκυ={z>tϊ±Υ˜‰V7ͺξ=iά"5X?f₯«B¬—/-\ˆΡQߚD‰‘…\οΰ[­ΖX Δ1)r„άG-<Υ'_ΚΈ8ής³Ψη\›Ύη$џ“”™^―χΐώγ|τ12€SΑ{ŒΘσιzM’—ΫΘηΘύ0Xiα―Mκ2)5R~ˆAέnπΡΗ?•`Τ…€fή—­Td©)Ξ8΄}wΡ?ζC§ΛW7f²±t₯p„K4r=.ΕH§…K1’θυ΄δΈ―e2'χQÐ{>τΡ©Τ2ΝZ~—<³Θ‰N—r’ΕKξΧ«_τΡΗ@АNŸ>{΅ΟύsΦ[ΘΗδsτ~Θ΄πVg<`Ρ„~P&R£ε§ύ…ΟψΘcoHυ»ίr οαF‰p{"±‰εηb7ρχX^~βΡζ ΄-Μ "H €&ΞRΣΉ{5ο“υνœΔ₯§¦¨qR©‰»ΤdU©αΝh%'ρ”=5 ]jvP€Rg©Ι!©ωp}–“IM‘β€Rw©iORσdΣ.Ajœ Yj".-μυ7H €&ΞRΣ©{uοέuΩNb$5EŠ#HMά₯¦IΝ³Νs©π“οΡν€!U#ΆΝS FfRbΐR΅Ϊ !5šΨJMv·ήk;;‰ƒΤG©Σ$R©‰΅Τt¬ZΓ{1£[ qšψ—ΡΓ!¬‰ŒŽ'ήγς€R“’ΤΌΆΆ‹“˜HM‘γzj 5ššήK™έ‰ƒΤό=B ‰²Ξ“Δ9βο”Uk“ι΅½kκΈ'„»“΄»cšOΦmΣ-ΪOMqBžE§›CLΞM6]nHνJ›ή%‘ ½ ƒ+=}OŠΊHαΦΕ­ΖS¦ ‡/ωZ25Ϋ …DξŸσσ,z^’DJ†A™Š­ΣΣu1P‰^ηYdߚ$+oΊώά€„ιγc₯t tjΆ”Α}Ϝn!Aξυ6©Žέjz/¬ιξ$&RSδ8­gooΥwΫχ°φ{7k+ΎύΡbιWIζnΨκσξκο-ή\ωΟ+ΛΏ‰ΜΜe_ϋ<±p“Οέ―³ύΚη>Ί±—2!%CKΣγίςΡ$N-5™ͺ·!_K¦fdz·άχk_Xjqλ[_ψLyw•Ε˜Χ–ϋθtlωήt1P‰qo¬°˜΅κ;ŸΕ_m³ŸΩˆgϋθγ#SΊ525[~Nϊσ”Ηΰ‘Ή,RύξgW«ι½ΪΎW ¨ύ-ΥβŸϊbYΚƒŠ 5š8KM’š­ξε€’KMqΕH €&φRS£–χFη>ΔJjθvqqf‚tξ(€Rg©iί­–χδͺΎN0ω^ΔrHMΜ₯¦SΝZή¬ξϋ©‘ΫγΔGf&Aβ毐H €&=R“ER3cΥ~N 5H €&ͺΤΌέkΏ@β$5Λ‰J₯Ή£HMœ₯¦]·Ϊή#+t©Τ@j 5‘ζΌͺUΫ{oΏ‰“Τό3‘e©Τ@j/5m»ΦρH €R©‰$5΅λxά/8d?½HΌ@Όcζ€ ^ηΏχΞ­Ρ!ΓλόμΨ|δόkœ“N‚ŽOίb‘υρ>m›hσθ€Τyh²Ν}S|Ϊύuš…¨.£’τΈΜF6Έϋ ³‘E%΅ ΙB•RNtJ±3υ›iμZjφ?cš•}^:έ[[Άο²ψξΗ$ίnΫiφΨΧ‚υ[vψ,ϋϊG‹ΩkΆψ̘ΏΡB6ό²ΑΥ ΏL]—"d…*΅ Iα Kύ–b‘₯ζτG?υ‘)έW>ΏΔ‰|/ω<Ή=ƒ|-ΉΊp§<R" 6lϊχN‹Χΰ#χQ/•β’§r"_kΫΞέ;v%ΩHYjκΤρ><τ@β 5‡…©Τ@j#5­»ΦυξX~ΈH €R©‰TGnο“£ϊ§ΛO“£,ƒΤ@j 5%#5™$5yŸp©Τ@j 5QθR―χِ#‰“ΤΜXΆR©Τ€KjκyS?θR©Τ@j"IMύΊήάcŽ $—ŸώD,!v‰¬‹2“0€R©)©iER3aΩ'H €RIjΤσœ408HM}’ρΡVφ²βHMœ₯¦en}oά’c@j 5HMrΦσ~H ±ΈόD·½‰₯₯½£Υ²ZϊA»Η'ψδ“‹‡R2ώ6ΕF4DZ:²nO’“Οk{o’6LΆhϋΘ$ΉΏ{φY”lpυkeί’η#‹2dJ΄.ζ(‹zφ;aͺ.ΨθLWΕ.:ΝBͺ”)Λ=/΅ι}aωƒά_+ \mC¦Βk™Θ›D§μΛsBŠD›ϋ§XΘc¬Sο»]•€γΔ$mhΡγΕ}ϊΌrƒE·™c|΄t§@2HjnZrœHM4zχξν7?x$’abρΥV›/Ψα#"-’είΨΘηIq1lό!‰”ΉΏ½ς΅djΉ.Κ(S’u1G)2]ί Ο -ςs’©ίς΅ χ~²ήηΓu[,€gλ·IΎίh³ν{-έ)KM£zή’3Ž$Ncjfm 5HMιHM‹άή ‹Nt©ΤT©‘B©)~©ιΪΈΎχωΩΗ'©y›ΨΑEγJežH €&ΞRΣ<·‘wνΒ‘N 5H €&’Τ4iΰ­ΈΰΔ@β$5₯>O €Rw©Ήzα©N 5H €&¬ΪMx+/:9ΈUιnNΓ4Γε'H €&}RΣ¬KCoΔόί;Τ@j 5šHRΣ¬‘·zΔiΔ©§ζTβKβQβ1Nι ©Τ@j#5M»4ςώ4οN 5H €&’Τ4oδ­½zX q’šE²w†nMΝ²tξhυ-ύτμμgnφι€R·»·ΟωελΥωa³n e#(Xv.Ÿ£Aω^΄°IΙ‘E1οώx…L₯ΦΒ#ω2½ž”2.%IΪt₯fλto)8Ί~”L₯~eω7>‹6o³R£Sγ₯TJ5μΪύ“O˜t„Ι§<δ9χΛw_ZδKρ€,5­›{[ξΈ&T·E·™Δ€Β|K[j>,B@zŸΘαϋcyΠρTeuSJZj\BS–€FΏHMΩ”y€Kjκ“Τχώ%NŠΠml.ν|B,δK4ϋ‰ηŒβ4&` *e±)–8w©ΡvY”š°ωf 5E—š^mZxΎϋϊ@̏‡Δ₯ZfxΘw²±ΗΌ₯άkZΪRcΊ{Ÿ"NηKQ{HαzΈ98‹‰ηΝ΅6’1Χ‘ZΕ7‚Τ@j 5nκε4σŽ~ο2'Eθ6~ƒΒˏ&ήεϋΉZN{λj-5RxΏ7ΒB>Άο«£|τ3Y3ΖϋΘFΥ ηF‘‚£η8ιω>ωζ°ΉgͺOΘ<‹#’„UΡΆζ‘ω“”-+ϋž)²£₯IΞ?£_OΞ«#ηΊΡλu•${ΌU•ϋΎ)Zr|ΩQσ ΙΟIΟ?#η’2’Ο%yNθΌςά9βν+-R ΅³›η{]IΊM6TΎ€ιΕαϋ]Υ@α΅₯9PΈΨ€¦WOw%dΏ|»ΞΒœΝ+’h©‘ς³~‘…|L7`²”σŸθFU>¦ΕΘœν£+<―ύ>Ι³KΎ²˜ςξ*- R,δr=Œ”-+η>1ίGʎ|ŽAΞ=£₯FnOΕ”ϋ5cώFŸwWo!«rλŠιzή‰œOH~ϊ‡Uί–’+ω·u lηŽ!e©ιιν~./HM{j 5šxKM ο€Χ―wR„nγ~|9ΚΜbρœΡœυτE"C R©Τ”s©ιΨΪΫύ❁@j’ΑοH €RSrRS‹€FžošŠ^ΠR©ΤΤτΙnγύτκ}Δ©JχΞ”¨Κƒ€ΆΘA„ιR©‰΅ΤtlQlγs 5HMœ₯¦­χΣλ'©YȟH<ΚΕ«Aj 5šτHMM’=@Y©Τ@j 5‘ΎΪy?Οz48IΝ2ώ~b0ί‡Τ@j 5i“š k4H €Rι;“εύgφΔIj&+Μ€`ΎΥΤ *L玚ξχ„¬H™βb8θλ|dΚk˜ΈhδL­Ί‘’υvδrέΠΙΤ`Ωp¬Fφή©>Ί΄|Žτ-f]Α;χϊ$ab!€ο9Σ-¬4ξ3mφϋc’Ύg'ΡΫΟ½.IΧkm€lIΑΡr%ŸΣιf›φΣ¦ϋ΄½sš+½ήΎ-Σ»e ·AŠŒ<―΄œΘΒ’g|rΎΕٟžνsζœs-R fJ½HMΔ€ή³‡ŸBkΙ„—_ΎYλ“/₯Ϋ%.²QθFjνΌ$κ1ΩΠΙΤ`Ωpdևɏl˜uοGζnπΡ°₯0HΉδ™E:["₯ζ’.τΡβ"χγΑΟΎ΄ϋ‘₯IΚ•|Ξk+Ύ΅X°i«ΟΚo·[„ ŽL―·»uJ·Cb ςόωοšΟ|~[φŽΕ― _χωmΙ[)KMηφή>z&X ζZ+•ω~mS9R©Τ€Ojτ<:H €R©‰&5Ό_>y.8υΤΤ"n$ώΖgΗ@j 5šτIMηgΗ:Τ@j 5šH߁.½_ηΎHœ€ΖT辎XΚΧL †Τ@j 5%/5Υ;΄Μ7J©Τ@j 5‘ΎΉΩΦφ$q’šΉz’½t†Τ@jb-5ν[zΩΟάμR©Τ@j’°On§|Ϋ(μΆΚ³Τ|Δ½3σωοΔ§H €&}R£Ο €R©ΤD’š9ήoΛg'©@ΌG|OΜ Φ'κΕDxYw O˞θρ1σάΌΙ=Ν ;£ͺ–)a#Sm΅ΈΈRrΓΦ•"dϋ€UΛΒ”νώ:Ν'+oΊ…|¬ΝύS,¬š›bΡaΚtŸ.£σ|Ί_a# Zκ’•RVz_”g!SΖ₯4…£”ϋd©ΩΉ#“t»ΚFξΏ>>–>6Ρ‰L§Χ2 Sθυά/Rdδ9πϋ/°Έΰ³3}XpšΕ΅ ‡ϊ\½πT‹TH5’šφOήκ€’KMqΕ‘>=»ηK‘’§Δ–¬ΘTm-.Ž”άΧΫ—ϋτΣξέ²πβςo~΄—}ύ£Ο—?찐Ε3υcRpžZ΄ΩβΆΦψΘ‚–Z\€¬ŒymΉ…L—Βtœυ²₯/™š-ΣΆ ² ₯άw]ΠRŠŠ,`©Ρ)υΞzY€Θ³LΫΦη 9ΆEg%ύόΞί}Šš†½O·ΞήW}Hά²ŸLiρί™ΒD“ƒQ“€ŠGςύ‘ΔdH €R"5Y$5OŒw©)rΤ@j 5$5«η§žSαw1†nCμW„`d δeπύ σ7€R© —}SS©I9Ž@j 5š.φεOAœ€ζβ.bΉ˜³ζ³ˆΟ]gΖβp%ΰαΌl›Zg«γΉΓΝA6TkVR©‰±Τ΄ςΪ=>ΑI €¦XβH›ΜVHMΌ₯¦{nώύ`β$5σ ›ύD·–ό3σβΠ¨Α=5H KM»V‘ο1RS,q=5HMs@{œ€fŽ™MXΘMS)8)¦±Δ5Έό©ΤBjTζ–$NΩOE‰#HMά₯¦Oωλ”1q’š?/›ˆρLN‰πΝΒJΥVRΠξŽi>ZjdjsΗ y>o΄ιzMKT —%‘E% r›:\bI™.,)%L.ν…Τ@jβ.5zrF &ίK= Cj 5±”s VO6ΙΔIjΖ'›ΤnH €RSJ=5ϊs@j 5HMT©‘³Kβ$5;ˆ#~!ΆσίΫ!5HMš€¦M+«τƒR©Τ@jR-’‰ΥŒΒΈό©Τ”¦Τdzmο™κR©Τ@j"}zυΜ· βΤSΣ'SΤ²JΊv΄n§fιΨZVd‘AΉό€d!‹ϋδΌΘHΑ‘β’·/ΧΣΕ4eγ+mK 0t˜”η“s“,옯±”Y}kžO—ld!ɜ±6RΌ¬4s•‘#S©³ώ1ήΒJIϋ«ΕNΎ/ω^φȜx-] ©σ³c}€¨τ|ΙF¦pK‰1ΘΟσΒΉΓ|_t²ΕΈ%ΗϊάΊτwς±Ÿ`‘²Τά5Υ €&b@οž›,@)SkΥ|N‰!δ΄ςΊπ …£²Α%8{ ŸXOΛ•l|uAK)Ϋβ3sΩΧ2]zρWΫ,Φ|ΏέGʏaξ†­>³V}ησΔΒM²δ‹Ÿc!Εk#IIYdΣ Σ¨·οΪm!SεώδφεϋοEΎΦΪ¦δηΫ|ςIΐΏΏN"z;΄|ΚΟS¦mdͺΆ%/―ήg#₯ζΝ‡, #5Ίθf‚(ΫβΜC“½:‘}XŒIϋC–GμSœRσ _zšΗ˜ϋŸk‰H €¦„₯¦ufΎχ/Τ@j 5šH߁޽¬χ%)h[<_έΞH¬Ζ‰ΉΕ(5ον–w$ή.N©y’θ*ώΞ%ζ7ΆR©Τ€AjΤω!)-©‘[fΘcΗBj 5š²&5½νKg‚Rs ρΊψ{”‘γΙ’Η§Τ,t-ƒΤ@j 5ι‘š|ΗFPŠRσ…γ—ΥΉζ€R©){R£Η%ΰΒ±sΓΥχz(ρ€ψϋΔΕOVζ±ΒHΝS\Τς0ζnβi’zΤΒ–H €¦R“™™οs–”’ΤΝ“ίe«_oKΒzq 5HMιHMοή}ς ˆN‘§ζ”©Ή£γΙ½\΅ Rΐ΄2+N©©I\M)εdς²Α·-?'οσr½ Λ†X€*5ZJ%₯=P˜nύˆ-\#FY(ΌOΧ_BS…ΈδKΥr"SruBωΨ/ŸΞ΄ [˜όόΊπυ$,c Άνάν#Σ― ²A—’₯F¦=Kω1¬ϊn»Nχ–Ιτh2.S¬ε>djΆ|/2zO*υ>ϊ±°ηΙ4kWΆΑ’5³Θ©AL`‰―8― ²ψ€NΟήύβIžΛK2σv±ήO/ίm‘κwΏI2%Z§ιJ±»τ εgΣ“X)ΡJN¬’ˆMΆΕ"u:Ά,)ΕE# Gj!‘iΦR:dΊΎ!¬π¨”H}Œ]ιυ:EΌΟΞςΉvαP ) R\ξ]q¨Εƒ_죓ϛϊω@‹T₯F¦ρkR Ft›I ΰω¦Ž xάJΥ4iœ&³”Ε9Žτι’νύ:Υ!Σk₯<N± ’!΄RoUB™ΚϋŸžŠDΎΖrΫχ>Z:mήζ#ΕB¦”#εΗ ·‘Σ½₯œΘ4jύΛ_ŽΧΠιΨβRb Β‘Ίxd’Pi+e?¬π¨IuŒ-ΉUβ+ΣχύsА³Gd„€XβBμzz²ΟΞάβ³λΙ 6ΟLυΡΒ“κwΏ{ΟήωΞ—^jθφ"§h’B0ΊŠώ$‚Ρ”D!,σ?1R©Τ 5γςœ4¨ϊ>Ά#6υ̌ΰ<±Υβ=SPŽΧΉ“¦.A-e©)rΤ@jβ.5έHjΦgD€&1ƒπν<«p"oά– QkΓ³ˆ#D02Χθ3D·ψH €&Dj22σM‚(‰Ί-3Ζ;‰^Jό•ηνg¦mΰϋwHΝΙ₯(4ΕG 5HMο|&ˆΝε'ΊΝ޲ΜράgLΙp’ΏFΫΤ:[ΟžψεY§EmH €&ήR3&ΟIΔω%ͺςe€«Δ²ΧΜwSόmΖΞ4-k—ŸŠ+Ž΄iΡ R©‰½Τθ1X β$5Λε¬~—$‚Τ˜2OΛ°9š§:;_†šo.οˆηŒζžσ}Rн4ΕG 5šΈKM.IΜX“Δ*ϋ‰±υdͺ"0Ι_XSΥΏ)=ΏynCK"hY‘2!εD§ΣΚ΄Ϋ»Vτ·kˆ$r{ς΅΄ ι}– ³%8"UΪ`oΌS!…Dsw0R  νŸΰ#‹l€Έδ(A‘ϋ,ε€ΫΜ1r=™bm²"ΕB ‘” -˜–/ΣπΓχ₯€κTm)'χΡΟη‘•FF >ΟR–šQyNβ2ω^QγHŸœ¬|ŽΑJΏ6ιΩB&tAW#%(ChŠ@§θZb$DHο³l˜₯ΰ€XΘβϊrƒkl…A¦mλΗdJ9ΝΎϊ샡ϲP€z/–Έˆkƒ”){δB~†B"σ ¦<Ζ:-_¦βλsD¬'·o₯i.q1μψϋ8Ÿνήδ#—,αQηYa€FKj‚8 ξ!Θτ)D0jΜƒώVρ 5HMΈΤδ^Ÿη$¦R“rΤ@jβ.5]HjlΪH€Ζ\SoΘσAΈXŽ…Τ@jb-5Ν3½Χζ9A™„h@j 5q—šΞέ{ε›$1A€f=\Β§H €& RsMžH €R©‰&5=­qVΜ(œF 5šΈKM·«ςœ@j 5HMrΊυΜ7`<€R©Τ€Ejj6ΛτΊ_™ηR©Τ@j’Π‰€fΦͺοΤ€‘ŒάϊωΔΑ Σ±₯tΘFI763VνησδͺΎ­άίG6JΩΈέ±όp-MaR#m™–,‹+dF-2εΊν#“,dHYτQ¦Nd‘ΝCήΊΖΒU ΧΣ"%ί½άG•!U˜|jΒ€ςΖΕ'ψΈRς ςάΡα:_ž^έΗBž;r=>S–š+ςœ@j"JMvΫ€œ¨l«Π ݈ΘΖζΗ‡ώμD6RΊ1s₯λ€ΰ„I•ξ­ aŠΒ—²@£ ™rύνΆ²8€Uτ‘©ΣV‘MUτΡU2_HQR#‹Eκ΄j)ZPΒR©%.‘L%-§WοσIε|Ωzί(ŸmŒφ ;―΄π€ϊέΟ&©ysεw@j 5HMΪ€¦Ηˆ<'H €R…Ž]{x―,&8MΎgfFŒαΏΫ˜:1H €&MRΣ4ΣλyižH €R©‰B’š™ΛΎ$NRsΈ[Ξ›4οΟ 5HM₯ζ’<'H €RIjr{xΟ.ω*8IΝ|ώœΓR©Τ€Ojzύ)Ο €R©ΤD‘=IΝS‹6'©™cŠΩ ΉišIχ 5H IM“L―χ…yN 5H €& Y]Ί{3ζo $NRσβ.*7ž ɝ’Ξmέ΅/b‚)$²αyaMw‹ηΧττ “έI©‘©ίaR£Σ”ƒRΣ ²‘7Θ‚œGΏw™…L–ιΧ†^ΏήGvLM Σ₯uͺ³.)‘!εr)hωή΄|κγεBJͺ<ή)•)'aλΙsGŸςό‘ηKΨΉ€…'Œ”₯fxžHM4z·ΟL DHͺ­ΩπΆάqΟΏοΎήG6JΉ |­°FΦ’š°‚—:E]4φV‘Ξu ,~ώρ]Hς—oΦϊX… gG•­ DZΈ I*A³ŠC yΨƒN‡w`₯Ρλ’’J*-€ „¬'%CŸςά‘η‹Αu.ιsΞ΅=Cͺίύv$5ΝΫHά Zv&.!.%Ί€{G!5š8KM-’š>ηη9Τ@j 5š(΄%©yπ³/‰C™„°šO 5HMš€¦q¦·ΟΉΣ@j 5HMΪtξξέϋΙϊ@β 5λDν§[ˆψώ:H €R“>©ι{Ξt'H €RUjξώx] E‰#t›J¬ Ο Δc£ˆΥo¬νl!Χ +B¨‘BεJ/6Θ<¬£L=ΦιΖ-FΗ½‰NΥ–3₯Έθ΄g—<n]ϊ»HΘύΧβ"ΕBΚ N‡—Η1LNδ±ΧE&υη$?O—θjΩ•η‹>gf­λδ£Χs NA€@j“Τμ‡ιN 5ΡθΥ¦EΎF!Ωΐ|—w…Ε·SGψΘυΒΔ%,₯;΄ΐ₯hΐΓ 1κ’›VG–‘ωΜG§c[3•4Y©Ο.yy/ZHς Ϋ·ΔB¦Β«tψPY ΩFΨηδ*8©eWΚIΨω’‘λΉ§ Rύξ·μΤΥ»υ­/‘m­7Ϋ /€ΤΌhͺπύ;χω₯-5f`πνfnζφ(…ιVƒψ”»ž–γΔφή$Vρ !5HMRsΖ4'YjŠ3Ž@jΚΎΤθΉb 5Ε,5Ω]½qo¬€ mΡν-biΗ‹uFσ˜šJόχ]RsrΉ,hΙ5£κπύͺ<‰ίΔb$/™Έφ©Τ@jRΣ0Σ;ΰχӜTp©)Ά8©ΤΔ]jZtμκ~εσ@ŠGθvρ1QK .s—Ÿή!ήΦ€ψfk™‰‰ύyt/Ο0Cj 5šp©9π”iNβrω©¨qR©‰»Τ4ο˜λ]ϋΒ@Š8Px0ρΉ©8 –wU…Χ–…Βϋ&ςΜ―€ˆΟ5ε;Εhθmj­ŽηO\Ϋk²€Rk©9hθ4']jŠ+Žd6ͺ©ΤΔZjšuΘυ|~I E”“²½‘Ώ§†{Υ%©5όCdH™˜Q8ΰ Ό—βϊ ΈΗ§[Τ`„žH €†₯¦A¦wπISΔ¨§¦Hq=5šΈKMΣφΉή%Ο, $N΅ŸδLΒMΜδ9QΊzΆsqMaΊ;v«ι7/―νκ£ΩψΌ».ΫGK!]xP6‚Ί1–ͺ«a6HAžΙΛ; kμ₯¬œπΑΕ2εZ>G7ͺaιΖa ΅\Ο•ξn+VθΡUHR~.Žν’ΫΩλ:X|ϊe[ŸOΦ·³ψp}V ςΌη–<7 aΗ1•R‡€¦ί SΔ)ϋ©(q€gλζ~ΓρύνWω„5DίLΎΤB'Ά” Z†€ [³K”$μ~ρN7!ιΛRT€ΰ~[φŽ~ž+•=jϊ»N[ΦEε±’ΗT§K‡z”ΗίυΉhτφΓδφ« ϋl;άΙζ[.r"Ο+yΞΙsΣvSύξ7iίŻੁΔIjδΜΒ&Σΰ ’_„η5MΜ*H·šΔϋΔ1<σ ΰ7R©Τ„HMύV^Ώγ§8©ΰ…‹-Ž@jΚΎΤθc©)^©iœΥΕ;χ‰ωΔIjj,«αy=8|1§}α卉Y,H³’€‡Cj 5q—šCŽβ€‚KM±ΕH €&ξRӈ€fΨγŸ'©™eYI©ΤΔZjκ΅ς=z²LΎ H €&ξRΣ°]gοԇ燂–-8γi9Ρ›θΓτ7…« 5HM₯fΘd'H €R…m;{'=ψI qš³8Σ`Ÿΰβ$H €R“>©9lΠ$'H €R…ϊm:{ΗάχQ qΊόtriοh§ξΥύFD7LW㣋ΊXέx†₯t»ΗVxQ>¦eΘ%Fzς΅eZ΅αΌΟΞς‘ιΧRψ ²AΧ uaΠβ¨…Σ•.ν’MΤ”kƒ<'€¨,ό2ΣbΩ†–>ϊ1y|΄ΨΘ퇃0αIIjκΆςϊ˜δR­šϊˆl”4†Η W[ ΤθΤdGšr:…Ω%Ck;βuuZΈ,Z©Σ±e,p}¬ ‹+ν9Œ0θυΒ―ΓΔeΓθs}Φ]w¦\nΉ}—8τ>SύξΧk“γ Όλƒ@βΠS“(Hu5q•R©‰£Τθs.RS—€ζπ£&:Τ@j’HMΎν”A© ›ΛRSt©©Ϋ:Η;ςŽχ‰ƒΤ\(ζ…ΠŒΤ@j 5ι“š#Ÿΰ$B!ΊΦ|ιx9…Ό\=nζ}1wš¨š-«y>˜AH €¦όKMΜ―_ή $N—Ÿ޲ R©Τ”ΤΤiιqΨx'€ΖLNΧ‡οΧ%VΉBxL‘Ή/RcSυZΦ”d½H €R“.©ιδ:ν@ ©Τ@j&5Gr«“Tƒέfψώ3DOb½š΄VΦ…Τ@j 5ι‘šΪ­:yOz;8\~:ΗΣlTγiƚ_qH €&MRS»₯wΤΑ·:a!™+ς½nGl κΗ·σr)5w&ΖΤρίC!5HMω—šƒ&Ξ $RsŸωZ§1b“ΞΝν^Υolζ}ΩΪΒ•zkp₯qλ਩ΓRpΒ½e!Ζ bŒu]uΡFέΰΛ†UJ“. )ί‹nœυq•H1½^Ο%ZœΒdT’·φΪύXΌ‘•…™2,δzZr\ΗCΏ-Xy>¦,5ήβ$κΆθV‡˜g¦d jsˆϊRsW€Τœ\ή^χύΖfγ˜σ-ΒRo]E,uJsΤΤαΘ’#SΏ5aλF,Ϊ¨|Ω°jQ’4ε{ΡB¨«λ‡­η’-Na2*ΡΫ{mΧ~ΎyΆ\OoCn_ο—|/a…0%)KMΛlo[ή $N—ŸΪ–φŽΊ€¦0B£₯&¬§&ͺΤD•TΦ «Bνš0©Ρο₯0B&5a=z£MIH-5Z~’–š¨BS©°8'QΆE·ͺ|ι*ώ»;ρˌα7ξΑiQQ/?Ή€&ͺΠh©‰ΪS&5‘½6Q₯&δ±°*Τ.‘ΡR£«‚»€&ͺΠ„IMXO‡ήΗ(B£₯&ͺΠθ}‘’&5z=—Πh©)ŒΠFjjed{}ΗΎHœ€¦){…x;€R©I“ΤΤ"©Ωwœ“…+·…¬#{jΊͺΒk+Β@aH €R“νν3ζ΅@β$5¦*χyœj.I=DL†Τ@j 5ι‘šz΅2Όϋάδ$‚Ττγ”mSr!s΄KjψοќυdRΊ‡T„”nH €&ξRS³EG―Χ ―'©™Η/ΛήƒΤ@j 5i’šš$5½Ζ8Αδ{H €&ͺΤτΈώε@β$5ŸˆλκΏγβ–k 5HMϊ€fPΟ?;Τ@j 5š(ΤhήΡλvνKΔIjŽ1D7ž•ΤdO ©Τ@j%5-ΌAέG;Τ@j 5šHRΣ¬£—{ε ΔFj’sE„u§f§[#βMbί° muξ^-°šrX£6O¬–&<ΊΊ΄|,L~’"+RλΗδ>†₯p―ΫΨΒBΉ=-MRδtcΏaS Ÿ¨)Ρ+7ΪΘΗΒdBːD§\½ςΈEMοΦ嚳ΖPlRSƒ€&χ'YjŠ3ŽtΟhΨΘ†UY›§F§w»„'¬Ίt˜όD%¬B΅ά§°ξΥ#N³Η@Ώž(ω^tƒΎκ’S|τc.AX{υ0'Z δcZ†\σΘ„φښ5Wœξ#›ήfΨ~IΑqΝYSPΟTΏϋΥ›uπrF<Hά₯fC„u§f'¦#yωΘ(ƒŽ!5šΈKΝΰ.£œTp©)Ά8©ΤΔ^jšvπ:]ϊ\ q—š…xΞ©Ω9›"C¬/ 5HMˆΤT'©ιt½“8]~*JΤ@j 5ΌϊW θ©Im}95ϋ6υΨVΗs†'¦}oΡͺ2€Rc©iξ ξx­“ΈHMQγH«ϊu 5šXKM΅&ν½vό38”IΨAlΐ,-…@δOΝΞG F詁Τ@j„ΤtΈΖI€¦8βzj 5šφ^Ϋσž $Φ=5)"kjv^†ΛOHM*RS­™7Έέ•N*ΊΤWΤ@jβ.5U·σZŸυX ΕGθv Oτ)'ς4eWVσwvPΉ•ΧΤμ\rAπ›RΠΆrHjiΎRjtγͺ¬¬ΣkΓ*H‡UŠ–„Θ”ικΜQ_W>¦‹&ΚFφλM[6·ςΩL'ΠιrŸΒ„D7Ζς―!H χC²Y!“ΫΠb!₯cb]r=ύή\R£·οœ°”χ°τt}¦$5UIjZ_.Ά8£eΣΐ”ί°TΫ°οΒT‰Φl½o”…”)$zϋJΣΉ}Ή\€” μΚ‹NΆX~ήρ>RN ςxΘύ“Š,±„œθDο‡|LnΓ _[J‡^OΛ\ΨΊ.ž–ξυœ «Ÿ²Τ4jλ΅:γ@ŠG8SΡόψψR”\ΙU%WΦ”dΙ•’FS³‰YœŠiώo©Τ@j šΜN*ΈΤ[Τ@jb/5 Ϋx-_ Ε 5Ο=UΉ΄Η-7 R©‰·Τ4υ·ΌΤ &ί‹€Rw©©°΅Χb聰ŒΜ OAhŽ#n(Ž{'1L¬χ 1R©ΤΔYjͺΤ4“H €R©‰$5 2½¦'L$Baά·ˆ₯OΜ1•€ζ©9R©ΤΔZjšxƒ›\θR©Τ@j’PΉ~+―ρ1)l‘[wβ;–Γo<υB \~‚Τ@j 5ω₯¦roP㠜@j 5HM$©©Χk4δΦ@Š+ލžšj πΪr;PΈ81)έA"–μ’-S–šη9ΤD”šŒ&… £¦έ†5>ZtγΩπ»Š7κνλ4τ¨Ε"₯€hXφΗc|œ4Πbα)ƒ}€ό,9γh ΩΨ‡5Τ-Wr?΄Τθu%υ΄ΈΘυΒ {­¨"–N‚HYjκfx Œ €$€†ΝYO&₯{HΉž§R©ΤƒΤμέΨTχl'H €R…½λΆπκ>*LΎ©Τ@j&5kŸιR©Τ@j"IMζ^έΓ R©Τ@j#5•yk s©Τ@j 5‘€¦v3―ΦΑW©Τ@j 5i‘šΊ$5ͺžξR©Τ@j’P©VS―Ζ~©Τ@j 5ι‘š½zͺœζR©Τ@j’IM―zί Τ€wͺτοy†C=γaiύΐ~u_ΎOαό­€m½iAΑ~ Ž1–Ό©IPš‹ύΐ~”υύ(kϋpΎb?GbρέF0Β~`?Œ 5Ψμβ€'=φϋ`©Α~`?G 5…ώ€‡c?°e}?ΚΪΎœ―ΨΔH €RP~₯†n­‰wˆεΔ2βr^ވx“XΕ7ΟE¬ζκŸƒJx?¦+ˆΕΔsD^ގψ‰XΘά[Βϋ1–Ψ,^οθR:O‰}0s),,αγQƒψ”ΛΨ›ύWηGϋ’Φs Ž Ž”ίX‚8Rρ₯&ƒθΓχλ+‰\b 1’—$&σύ\>ͺY\ΪΌr ξΗ@’ /Ÿ,φÜhKΣx/νΛ r_Jγˆ#ˆ##– ŽTΌΒζšκcΔmjωT5xk ίοͺo­-ΖΑ[Aϋ1˜ψœhͺ–7MΌ.έΪsFA£܏ qJβΙ8☼—¦γΡTdΤ$ή'ŽIχωQΐΎ€υˆ#ˆ#ε7– ŽT|©ιGx|ΧO34έmΔ,N³›%?Dξž[Γ†=€„χc5wχYιt¦’Σρρ@―cKx?'–πςTpJΫρΰΗ!.Rλ—ΤρθΑΧ’Ν~,Yi=? Ψ—΄ž#qq€όΖΔLΎ©€R©€ΤR©©f―½v–πφ_αi± βωύ‰—R\GσΊŽΗ)ξϊ.eψ³5Ηβ ρχ•\‡ζNœϋ©Τώu Uu΅RσRΘγΕ"5fΊσrπΩζ«NL·³!5©‰ΤΠ­ρ Οώψœ¨jkκoL&>%V&*§r)ω§yύ§ˆ9‰:\·‰™šœψ‰gœͺεƒ«ΓžΝχΝΤΩ+ˆˆΏ&Φ£[m²ϋΟNy|ARΓS–ίΙSqΏΜ=GCω±}Έμœn_™ΧJμ zjR{©‘[}3ξBόέΑτ©9˜ο77u;ψώσΔαβ9σ‹ 5¦—hΆX~œšΉ,‰:!f|H—€ζ6β\ρχ³,5FΎΆ‹m™Ϊ0o°œ|)Φο‘€ζaρXΰώΟ°ΰ$–―γž—CΉΦ‰‘^!Ÿ‰)τΆE<ίτΐ<ȏΜ=aKΈ&Q”ξ5~έa’WRHMˆΤτ½ λωώΜBHM?9˜—n©yΟ!5ζ2QN*cjXjΞ šξζςNΐσ 5wŠΗχ‡n"9φΟτ]ΐRr¦cc‰'–›^¬o‰ΦBZƊžŸΓ‰Ώp!Ί*€ΤΰςΣ*£"ΞΏ 5Χχπύ\βΧ©i¬d‘5?VEjΈόdz<:πzO¨ΛOwŠq$½#HΝI<^¦2iΩΚRS{MδυΜε¨βςΣβ5]RΈ?|ωΙτ^UεΏ;ρe©Ά‰Ζt»"qY/ΰ=4εcΠQŒYκΔ½HFjjšήήOσωμm€QΌoy]sΉk€ 5q‘šγq" R…ŸW…ƒ€¦6_ϊ0λ?Κ=ΩRjψώ?Έ!žΚOα^…—Έ%h π$!5¦1Ώ{9–e90Pψyf¨=›%Ξ”ΉΏ€—ο/ O$>tHMΰώ°dLΛίaq;‹6ƒŠί7γlB>—#Δ@cΓqΌόV–±·Μ₯0–šͺ|¬―7RΘΤb H ˆ.F¦€†Έ\eD¦Z)ΞΝςR·QGάIά^>+H @j@HCY—Ν.➁!₯Έ/±T½R„mœ&_NdHU€ΟιJσ 5·hΜYN ΊγΨ€ΤR©€Τ@j©€Τ@j 5€Τ@jβvΠώWόπƒˆλš*Χύqά)Ęv„ΉS₯€υώ@ΌcVΊq@jΚΚIk <%ώώ=±•8,ξ'{Τ ZL―Ռx‚ψŠψ‘ψΨ_<~±SπρD~| ±‘ΨN|IŒy­ β~-³ zόβυz•,@!Ον3Έ¨­9Ύ&^%ϊ•₯ο_)σ]»5M―5–ψU}――ƒΤ@j*¬ΤΠν,βSΝ_τ šΚσθ֞ΈŠ…£21œΨBΤ Po‹ΏsˆΪ|ΏχŽδxnsβbβΐ©ΉΑΓwȜΣί™sќŸDUβXb*€¦θί΅cŒ‰GO €&R#ΡΎβ±ϊΔƒόλj³ως%~±Λ“nχΣΤvgš€ Nζ‹υ4ρ±ƒ_ωšΧσk™ΗΎ ŽŒψ>jΣΉ—Βτt|`–ρcۈEςRέή%nαžσšoˆή TΏjδεηΛΉGλu’­Ψžωηb±ˆŸιuΩ'`y%b‘PΗσŒΤ,‰π+¬ €”`l©Οί›SBΦΩ›ΙησI ǜ΅ό=]g.;Δ’|"Δίρσω~Gβ=Ž&ή=•Βϋι'βˆι=›—W7ργΕ·±§?±‰ΈšεΞΔsψ±αάs’θ}‘—·$ώE|Οοs„“gŒœp|8Ώ¨R#ŽΏ9Ÿ'I ǝΏπϋ0Ηo1Ρ­ c 5₯!5β±§zμyβ>ώ…e.|J\p²Κ_ςJόwCΎNυ;”ƒ`€ΰ΄“χΑΜ"HΝΏ™yΔΙ ±e0ρ[Xoέ0ίsςχΥΔ›'τχciΔsΔ%Τ…s‰w4Η‚ς2έ^2ί!Η~ΆαFtξmjlΎ£όΨm|9Χ|ηλ91qMH97σσLΪmbdΠή/σCTγ\σ]€.!ΐλΦδΛ{ΫBhS€ΤœΒ"eΆw±Λί€c;ˆχ­ N±žσHMiHΝvξYΩ[]’ψ΄mώBΏγ0xcθ‡ςί¨K#Zjޏ忒Ύγž£ͺ)Ό‡½Y’zχ΄Τ˜ϊdݐ’ԘβΏ$όϋ9ŠxΞρ}Ϋ•ψρΕΛLτ²Τό€φΗΔΈR³Ώ‰₯―ύ°ˆŸ³‹0¦ζ%<-Φ[Hpl VrΟχήQ€Τ”†ΤœΑΑε!ΡΫ²D•_#?Λ‚΅ς@Υ{ψώ;ͺαΧRσwΧusή—ψΞ“A_:Η [/hό‰ι αω>v%~‘Ι€1@~Ξ½!r{?%Ζ!ρϊΩEψ†Ϋ9[μχ ˆckζςeίoψϋw8w―β›5š+šI:(Δcβ9rΫ{ή§υΎr,κ·ιμΣ°M£¬.>K€FΕ€ά^“φnδzϊ}Φmγc„<ήU΅σ©\―₯E₯ΪΝ’Τlμ¦V“$ς9DεΊ-|ͺ4hmQ­q–}Nt4iίG=3χͺα5Ω«š“ΔLΠ œͺUjyuλ΄ΪCΊ‚z%€Ψ~]MIK›Ϊ‚ZIκΥΚ°YΟ©moΟΪ§ϊ6΅f&idS+ΤŽHθv‡ςZu$џ“uΜ«7·©4Ιލ|κξΥ02ςyυ*7IR΅™M>ΦηN€G ϋΧςφιQ=ŠGΚ͎šF~Ÿ1―νa[ήτ9xΫ‡N{'~SήvrΠΔYrϋϋ{Γ"±†ž£^φιvνKΉWΎΰΣωŠ™ς1ωœ>7Ύj!χ©_ή³8澏|N}xŽΕ°Η?σ9χ‰ω…β¬σœΘνωZ<΅ΐ‰\OΏΟ#οxίG~fyΌ3‡=μΣpΠ8‹š\ζS΅Χ9Nͺυ9ίG>ΗPΘΡ>ΝOΊΝ’Ν93|δηn0©ΎQΟ¦$.Wjλ$•mΕ#GτŸ°‡ώ&ω6x²Ε‘GGdH’ΓM²Ϋ?όˆ‰‰}0yΘ­Gx‹Ο€}Ηω μ;ΦB>&Ÿc8β°ρ>ύNς9δΈ) ζsΐι6ϋ ›žδΕΐ07ϋ!ωž+ΆΏοYnδzϊ}φ;aͺΟαGM΄Η{pφu6Ν.ςXλ>Gν}ͺM₯‘IΤcςyƒ_ΰ38s„Mη‘>ςs7€ϊέοCςςλΧ©qR©Τ”©iFRsEεvN 5H €& ½{TσvΥ.H €R©I‹Τ4―TΝ»j–H €R©‰B―U½7·R©Τ@j"5-HjFUkοR©Τ@j’JΝΦΝ™@j 5HMZ€&£Ruο¦œ@j 5HMz’Τ|·Ήe š4b²”rXή»>Ί±—"#eG7z}ΗΎξ#E₯ €tτΊαŸΧΏl!―N—>gΡρβgΙρΌ…ωZ)ZZΚδϋ–ς¦EO·w}`!₯ιψϋ?Ά8ιΑO|δzzGά>Ϋ'L2εrωΩ€ΈΘΧ2§₯I>&χIK°<¦ϊsj}Φc>-†ήαΣψ˜‰NΒ€F‹o*€%IΝ­5;:ΤDŒ#υZωrΘ±SœzL-7ω 8ΰfŸ#o!EKʏ!TςΔρ‘’tπ‰S-€4xj§$9ψ€©ύŽŸβ#_w!Β)?O).ς΅ Rp΄Dπϋi>ϊ½Ιγ#?Ώ½ΖX ξt}’Φ—Ϋ΄Έ8˜0©Ÿ§!Υο~’šΝ›2Τ@j 5š΄HM«½«{kut©Τ@j 5Qθή£Š·aS‹@ 5H €&-R“IR3½NΆH €R©‰B·ξUΌ5[©Τ@j 5i‘šΦ$5wΤλδR©Τ@j’Π΅{UoΕ†Œ@ 5H €&-RΣ¦ruοξϊ9N 5H €&ͺΤ,ΫΠ2H €R©I‹Τ΄%©y aŽH €R©‰B.IΝΒ/3Τ΄ρ½φΚ! ΆWˆ7‰UόΓ§‰oŸλ]τΟ…ωΠιΖ²Α•™n,])άᒍ\O§…K1’iαZŒ$r=ƒ–ς΅€Θδ>κγ3δž}t*΅L³–ŸΕ%Ο,r’SΖ₯œHΑΡϋίυκ}τ12€SΑ§Ο^νsœυ>rΉA>G§ϊΛ΄πVg<ΰΣ„‚·DJ”Cϋ ŸρΡΗ?•r ο±ΖTd©)Ξ8bκυ={z>tϊ±Υ˜©FΦG χž4n‘¬³Ώ•4Yˆυς₯… 1:κΰ[m„Y¨υ,‘RΒ#Ι'cBζδ>j9<θδ©>ωRΖΕρ–ŸΕ>ηΪτ='‰ώœ€œΘτzƒάϋσΡΗ@АNο1"Ο§λ56=.O"Ÿ£χΓJ οx­M«Λ’H©Qς3¨Ϋ >–H©~χ;w―ζ}ϊeΫ@ 5©¦ΚΔ7D[b 1’—$&Cj 57YUjx3štq—žš’ΖH €RSΝϋd}»@ 5©£Δ‡| "ƒοg˜Ώ!5H›φ$5l–λ$FRS€8©ΤΔ]jrΊWχf―λ€&΅`τq)ίί¦ΫκxΞps ušd@j 5±•š$53[tu#©)R©V»!€Rk©ιDR3k]§@*ΌΤΠν€!U#ΆΝS F詁Τ@jώGΗͺ5Ό3Ί9‰ƒΤGAO €&ξR“έ­†χΖΪ΁΄-Ί΅&ή!–ˈΛyyΚγΫJKj~ΰ_F‡°&B0:žxCόΛOHM*RS­¦χJΫNb"5EŽ#HMά₯¦#IΝΛk»AjΜχ¬ί―K¬$r 3Ύ­΄€ζοM”už$ΞOU`J³ φμν­ϊnϋΦ~οf`Ε·?ϊ,ύΚfξ†­>οώήβΝ•ίωΌ²ό›HΜ\φ΅Ε 7ωάύρ:‹Ρ―|ξ#z))Zš––&Ωpκ"™™ͺ·!_K¦fdz·άk_Xjqλ[_ψLyw•Ε˜Χ–ϋΘTlωΎ Ί¨D ΘΈ7VXΜZυΟβ―ΆωθΟmΔ³‹}τρ‘)ݝš-?'-oς<2wƒE*Α(»zMο΅μήNb"5EŽ#ΥΪdzmοšϊ?ξ αξ$νξ˜f‘uΫtŸφS“tœgΡιζΖΉΙΉ)I—’t»¦χEItc/e"4=]€pλbŽVγ©ΣΠΒ£ΕKΎ–LΝ6H!‘ϋήηό<‹ž—$‘’aΗ@§cΛχ¦‹J€€τΊ8Ο"ϋΦ$YyΣ-δg&%L ‘•­©ΩRχ=sΊ…<ΉΧΫ€ϊέοΠ­¦χόšž€Ί-ΊΝ$ζFΉSC·ZάγS_,kLΜβ*σ#H €R"55jyoζφuRΡ₯¦Έβ€Rw©iί­–χτκ>ΠΆΦ'Ɵ1ΓCΎ“νˆ D½Β\ .u©‘ΫAΔΔ™ Ή£HMœ₯¦SΝZή¬ξϋ:Αδ{Ρ€Τ@j 5΅Ό'Wυ $κΆθV‡˜gΖΥv|[©J έ'>"ξ&ξ`ώ ©Τ@j$5΅j{ομs€H €R©‰4‘g·Ϊήc+χ$ΚΆθV•xΈͺ(γΫJ[jΜHηJ₯Œ 5š8KMNνΪήμr©Τ@j 5‘Jt­ν=ψΕΑD(\‰xŒΈM-Oy|[iKΝ?©Τ@jJCjκxάΟ €R©ΤD“š:ήύ_τ $‚Ττ#̝ŒlΙΡ…ίVZΩO//p^ϊVξrz!A:w΄Oο^ήΟ;Άεgηv'?νΪι³ϋ§Ÿ,ΆοΪν³eϋ.‹ο~LςνΆΗΎV¬ί²ΓgΩΧ?ZΜ^³ΕgΖό>Ία— .Ψ(SΧ΅ ΙB•RNtJ±+υΫ ΕBKΝι~κ#SΊ―|~‰ω^ ςyr{ϊ΅δ~θςxH‰4$Ψ°ιί;}>^ƒ…άG]ΌTŠ‹œ@Λ‰|­m;w[μΨ•d»"•`ΤΉnο£Γu©‰F^ηgΗζ#η_γœtRt|ϊŸ¬ŒχiϋΨD›G'އ&'ΉoŠO»ΏN³Υe”MΛ’ΘwΏa6²¨€!ƒ,T©eH OXκ·LcΧR³Σ|¬”ξσBΠιήβyr{ωZVJ½*ά)EHG_€‰6NΆθ09ΟGJ˜.l*ΕEΕΜ½.ΟGΎV֌ρ퟼5Ιγ-Rύξ·!©ΉkE@β0ωήaa@j 5š4IM½ΊήœA‡;Τ@j 5š(΄ξZΧ»mω‘Δ¦LBΠD:ιž\R©‰³Τt!©ωlΘN 5H €& ™$5yŸ$NR3?`ΩbH €R“&©©_Χ›ό'H €R…V]λy“— $—ŸώD,!vσΐ λ’Μ$ ©Τ@jŠGjrΦσž2Ψ €R©ΤD‘en}οΦ₯Ώ $RSŸg |‚h+HϋΘfH €&ξR³ψχCœDΘZ¨A|J,βBtγ *DG·QΔjžƒb€R©©R3nΙ±Δβςέφ&––φŽφξέΫo~ ρH %Γ Εβ«­IΎόa‡…lˆ΄tH–c#Ÿ'Εeγ6Rxδώjδ>κΧ’©ε²(£A¦DλbŽRxŽΌγ}]°Ρ•‹]Κβ“Y¨R¦,O|{₯…L]Χ.εώJ±Ϋ3άρΡZ-/~ώNΩ—η„ ύ9ΙΤoωZ†{?YοσαΊ->Z‚³υΫ$ίo΄Ωφ½–ξTHΧFυ½e<ΖIΔω%κˆΙ³ζΈ Ρq‘:#@Υ‰,SΈ–¨\ξSΊ³ZϊA»Η'ψ䋇R2 ›’D4DZ:²nO’“Οk{―M›&ϋ΄}d’άί=ϋ,J7Ές΅²oΙσΡEeJ΄.ζ(‹zφ;aͺ…Lƒv¦«b— f! UΚ”εž—ΪτΎ0‰|ŽAξoΎTp± ™ /E36‰LΧ7ΘsBΛD›ϋ§ψΘc¬Sο»]•€γD›ΆNτιρβ>}^ΉΑ’ΫΜ1>ZΊS‘Œάލ‹O$Ncjfm 5HM)IM“ϊήςσŽw’ΚΆΈŽ|bΧL άK3J<ΗLηp €R©)ίRΣ‚€ζϊE''©y›ΨΑ“κ”Κ<5HMΌ₯¦·β‚D)DgzZx²¬’G&°f έξ$†‰εC!5HMω–šζΉ ½k $NRSκσΤ@j 5q–šnMz«.9ΕIŠ=5 xBΝn!RsW€Τœ ©Τ@jΚ·Τ4λΠ»bΑiΔFj8¨5'Žašας€R“F©iΦΘ[sΕιNR Ft»‰Έ—Ÿ 5šxIMΣ.ΌKηHœzjN%Ύ$εbVλέ ©ΤΔZjš7ςΦ^=ΜI„ΒMM ί―IΌΟ?P ΡΡ­«(Ό…!5šŠ!5šχ‡@β$5‹dο ΘE)tu?C¬ΰjί†₯‘ΊSΊ{[)Ϊ.~ήΎΥG6(VΓCΘ΄π°toΝΦ»|ΒRΊe£§·οΪ/ύ^€π|ϊεΏ-€ΰθB’g͘η#Σ₯O}xŽ…,L©ΣΒ%Zjd*΅LΣΦE%₯ΰθΗ€”MŸ½Ϊη©E›-€Lhα”ΕKeΪΆAŠ£>ώ™~­Σχ%ςΘ7}ΐ?$ηί\멐ξ-{_Ž<ΫI©ιA,ΰy¦–cxΉ³έFsΦ“ιΝRͺͺ˜βHυ-ύΤμμgnφΡiΫ]ž»Ι§ϋ v"ΣΒeͺ·A6€2υΫΠξο|B…J4zzϋr?dƒhοE ™LC6HΑΡR SΏuΊτ§' K —Θ–™J-Σ΄uQI)'ϊ±^Jγr›.£“t˜”$Ÿ`Šβ₯R( R"υρΟώηΝ>VjHΊΎ#)'ΉΟίδ#Ο?ƒk=Cͺ"€Kcο‚ΟΞ $NR³$ Ν{IΔηšήσω~5Ni€H €Ζ!5MΌcΞwRΡ'ί+8©ΤΔ]jwnμύιفΔIj¦ς5υ³™W£ΊΥγKU•ΤςΐλψH €&˜-›z›oΉΘIE–šβŒ#HMά₯¦Qη&ή°OΞ $n…O&ςˆΏ'F|N/žΕτξϊ~€¨νΚΈxώπDŠjλΦ­!5šψJM«¦ήW.vRΑ₯¦ΨβH•&υ!5šΨKΝŸœH¬€¦Α¨/ρ›™δ‹ΎΈ%j0BO €Rσ?zf6σΎ:ΒI—šb‹#詁ΤΔ]j’ΤœϊΡ…ΔιςΣI<οGb;OΔ·=ΒσZ˜IΑΔί‡/γς€R“’Τ΄nξ}ϋUN*ΈΤ[Τ@jβ.5 ršz'}ψ§@β$5¦¨]—B$“:šΓχΗςψœΐ4 Z& Ρ0X‚`ι΄Ώ~΅2ΙΖ₯ΏmXμσλ¦ΟmΔσ~ωz΅Ε~Ψμ#GέΚV§Λη9%Œp ŽAΕΌϋγu2•Zʎ\nω2-FRVtΚΈ%)8ΊˆeXΊ·ΉžL£6Ό²όŸE›·YH©ΡΗGJ₯Ρ]»²“§‹s`βœϋε»/-ς₯x R•š-w\γ$…‹%Ž˜‚–‰†B¦ΙjY‘鴇ΌuΕQο\HYWYΘηόζ΅½_ν#GƒLγΆ»…ΰhΙΡ |/.Α1ΘτbŠ,S©ϋ\`#λ=<‰^O£Τ)γ–(IΑQE,eΊ·,Τi°δη›nW'ι4.IϋιΣ-,©Q©ρςxK5txκŸ0ι;ΟδyΠχ•Q…’0RsΒ'©ω°ˆΧΓηr*ισ&ν2,R©Τδ§W›ήΦϋF9‰ΤKΤ@jβ.5υIjŽ™}i q’s ϋ)βtΎ΅‡tξ(€Rk©i›αύψПTt©). 5šΈKM½œfήΰχF'©y8€‡ 5HMz€¦w» oϋ£79Τ@j 5šHRΣ©™7πέΛAφSRzFAj 5š”š¬–ήΞάβR©Τ@j’P—€ζˆ·― R“”šωH €¦₯¦}+oΧΣ“@j 5HMκtjξφΦՁ@j’R³ Δ₯¦WOwΡ@Ώ|»ΞΗΓζI΄ΤHωYΏΠB>&/ζ+S…e£jΉgδόϋk½ύ΅ί'yvΙWSή]ε&ς1r-εGΛΚΉOΜχ‘²£₯I¦jλΧ–Ϋ“₯ ZΘΏΡηέΥί[Θ–²Έ¨A§x'Π©χς³ΠΗΨU¨R‹δίΦ-°qœ;†Τ€&ΣΫυΜT'šhΤμ˜α,θbΧFZH9‘ ‚–)ηΔ)₯{ WΚ­Κ“\m!†₯5₯R©‰΅Τ΄υ~~σ!'H €RIj:Άpžλq’š…ό‰Δ£fζNb€R©I“ΤtΚς~~ηοN 5H €&κw@ž’8IΝ2ώ~b0ίO―Ττμα§ΠZi³B\φπΝZKbΒΔE6JέH­—D,Χ L – §A6²a ±K~ ²aΦΕ.™»ΑGŠ…) —<³ΘB§qK€Τ\τΟ…>Z\δ~<ψΩ—r?€ΰhΉ’ΟymΕ· 6mυYωνv ΧqΥιυVz·NιvˆŒ–“ωΜη·eοXόΊπuŸί–Όe‘’Τδ΄χώσΑSN 5©J•"‘ΕEžΤ".qΡΘIΝt#uάϋ—ψθΗdC'SƒeΓi°Ψ{§ZΘTpω-?²aΦΕ.s―O’‹EJaχœιVχ™6ϋύ1Iί³“hqΙ½.IΧkm€hι‚œRδs:έlΣ~ΪtŸΆwN³‘‚£Š]ΚB•2½[¦p€Δ聹RNd ¦3>9ίβμOΟφ9sΞΉ…‘WΦ8IΝ$b…Μ— šs 5HMš€¦3IΝGΟ8Τ@j 5š¨s5ι}L«Β\@2ί―M΄€Τ@j 5ι’šή/ŸΞt©Τ@j 5©N@©‰²-s΅†ψ‚XMŒ,―=5΅ˆ‰ΏρίΩΔ1H €&MRΣ%Ϋϋuώ«N 5H €& Υ;΄Μ7ξ'AAΫ2Δ’=QΝ C!rˣԘ έΧKωοš‰ΑÐH €& R“›mmK©Τ@j 5Q₯FOAj$^—uΣQϋ±$€fžd…!5šτIΝ>];εΫΆR©Τ@j"IMϋ–ωκ% m­7Ϋ W.0”x@όύGβΞς(5qοΜ|ώ»ρiz₯¦{ΎbARγ™|²"S΅΅Έ8RrCΧUΫ—ϋτΣξέ²πβςo~τΡ—}ύ£Ο—?찐Ε3υcRpžZ΄ΩηΆΦXΘ‚–Z\€¬ŒymΉ…L—tœυ²₯/™š-ΣΆuAKΉϊψHY‘,52^Λ‘•B― UZiΫςX>ΫβΧΉ/ωθΌ2νϊ?³Ÿ°HMjrΌ~ρHMΡ& Kێšͺ­εΔ•’[ΠΊR†δ>e?s³…,LΩξ―Σ,²ς¦ϋΘεmξŸb!‹gΆύΫ‹S¦ϋtgΡύŠ$² ₯.Z)e₯χEy2e\K“«₯ά'ƒLΝΞiΣνͺ$rίε±1XrψΨD':₯ή•B/‹T€Δθσΰχ_ΰsΑggϊ\±ΰ4‹kυΉzα©…‘)a’=5§HΝεQjοί3ΨζϊG|Yw‰Ή\%z|Μ<7o«ψ†H €&DjΊuφώ»κc'‚QkβbΉ™’Έœ—χ">I|?‰ύTΧςj8¨TU1ΕHMΩ—)4šβ—šj$5퟼5Ψ\~βoLόΞ &š€ŒšΜP<’ο$&Cj 5š0©ιb]ζDFDΎ_—XiψoCxωΡΔ»|?—V'²xp`εR–š"ΗH €&φR“ΥšΈQ!ŽT!ΦrLH ξZ{j*™²Δώ»όEWˆ`d~ωeˆ`ϋ€R© ‘šξ]BΟΣTέfrμλΔiΌμtβAΏΐx½˘ԀG 5HM«|ο%AΔ”ξ£ωG‘ω‘3ΊΌ¦tίCάeΊΕœ5ŸE|ξ:3‡˜—tD·mj­ŽηO Xj“Ω R©‰±Τδζ?©l‹nνˆ \Ο­ ίίHl&Ϊς:wΚϊnt{Π ,E©)–8R­Y=H €Rσψ„@β4£πόΒf?Ρ­%ߌ»ͺŒΠS©ΤπωOR£³T©Π¬ρ]«ΓbpύWβdΎ*ρίΏ+@jN.E©)–8‚žHMμ₯¦]+―ν£‰“ΤΜαIwrΣT N i,qM‘.?υθ–L©•Ε(5Q₯F€fλA—²ΑΚ—i"Σwε6BΖ;θbšRpdΓ,%Ζπρϊ|΄¬ψφG-52΅yφš->Ο,ήl!DŠŠFJ‡AnSξ£F ›9ƒά_ω)1Y¨S°4ΈR³ R\δρΦE+-©‘, ω™Ισγ—Ož³ψyΦ£>?½ώ€Ήž!%©ιΡ5τΌΨm\•/#]%–ύh./‹ΛΜΫΛβε§βŠ#΅³›£ΤΈRΈ R:Ž™}©ΟΠ/² Φ©]h!Σwε6 ς1™ϊ-EΛ G[ΜΊ}š•ͺ­€ έΣ|΄ΤΘΤζŽς,:ߘ€λ5I,Q1\–D•4Θνιtr‰%eΊ¨€”0υ<)2²PgΦ_¦[ΘF]gIqΡR)'­“R# XδΉ€Ο‘KηαsΓ’}nZrœΉž‘PR#eV'©ωρ±‰ΟΑδ”Ο3εκŠϋρΛSΥΏ)H €&όόΟWi^a€Ÿ–ΗˆΫΤςε‰LFΊizqψ~W5Pxmi .Ξ8©Τ@jZYσIb!5tΫ›8ˆθL\B\jΓG Fν90.β4Ρ"“j§bšAj 5šΞ1±€&‚Ττ#̝Ŝ½όυγΛQ‹ΈGvρœΡ<π‹D†T)IM±ΕH €&φRΣΆUΎγ˜ N=5—φŽBj 5±–ט2“οER©Τ΄Κ7Ά*Aœ€fœ$˜Έφ©Τ@j/5²lƒR©Τ@j’JMΎΟš‰“Τμ ώψΕ $δΏ·Cj 5š4IM―ήώύ΅H €R©‰$5m2σΥ K©) Θ”V+½VɊKdώ»zŽEXq@]xPβœ|SΧ‹υt1MΩψΚΒ‹)Ϋβ3sΩΧ2]zρWΫ,Φ|ΏέGΚΟά [-f­ϊΞη‰…›,d!Ι?ΖBŠΧF’’²Θ¦A¦Roί΅ΫB¦€ΛύΥb'ί—|/ωZ;h›’Ÿwlσ±dE HαΦς)?O™Άm©Ϊ–ΌΌzŸ”š7²HMjzzΩφ½HM4κvj˜Ž-EΕ κΗNϊπO>²θΰ°OΞ‹Œ).Ή}Ήž–+ΩψΚΒ‹)&εYδά”DvΜΧΨί3ΥGʏAΜΎ5Ο§Λ 6²dΞX)^ωRΝΕΰU™F­gΐ•E%εώδφεϋοEΎ–.ΠωΩ±>ZVzΎ”D¦pλ¨ςσΌpξ0‹λμ3nΙ±>·.ύ…|μΖΕ'XJjξžHœzjϊ`ŠZVΤŸΤH‘Τ”]©‘B“6©ιέΛκU@j 5Q€F MY•‘©)f©i™Ώ9SΪq„nϋ%%Πν8™ΔPRσ _zšΗ˜ϋŸqšη@H €R“©οI©Τ@j 5Q₯Fƒe@jή53ž,οHΌ]œRσ€,ZΕΕξζTΛ…H €¦€₯¦·χӝNJ+Ρ-3δ±c!5HM”5u‚2 5KB[TœR³Π΅ R©Τ”ΌΤτξΣ'_‰I)JΝŽ_Vηš9n 5HMΩ“š¬Ϋ¦R€fua+ŒΤ<ΕE-cξ&žζΩF?ƒΤ@j 5%,5½ϋδΛπ’”’ΤΝ“ίe‹e¦ΔΒ’°^H €RSJR“™™ο³NP€ζ^ZP)`Z™Ώ§ΤΤ$&ž#žηΊ+΅xΆα:ιx³ϋtΝρ%!4εZΚ‹NΥr’ΣreBλ±OgZΘΖ-L~~]ψzU|pΫΞέ>2ύΪ t-!RjdΪ³”ΓͺοΆϋθtoω˜LΦ)γ2ΝZ‹†LΝ–οΕ`₯R‹Λ#ΊgΑυ]dςηpbΙ‰š·ΕYδΤ ¦°€Xͺ”Ε'uzφξοLς\žΝΜΫ“ˆυ~zωn‹Τ€¦w>1””f0βς «‰n¦ ρ!Ρ°,Ž©iΨΉ‰/ RH ”kΩ(ΜsLΙ5Œ˜{ύ؟ζύΑG/ΤΒ#·φ§g[HΩ²w5ΰS6θν§N·R£ Uvœ˜€ν]S“θtoρ˜ώ΅o₯;R³uzv֌ρ2•ΊΣΏΖYΘΗδ1Λ ΉΟί )'†Ύ―Œςq9ΥSΘ"§y^Ιβ“RN “— φΉmω‘yŸπ‘λ&,βS©i?}z e@jL”'x&σ1«yLb“!69₯υf‹*5Z:\BSšR£εΑ%4…•ΉΌ$€FΛIa€&_fOqKšΧ¨0Rc –)4Ε(5½HjdΟ—¦ #Sna ׈«QV »€&l™¨R#…FKšΒJšΒJξ©q Ma₯FΟƒS©Ρrβš’)4…•-Λ…‘)4Zj€ΠZj”θ&(+ <^χX¦}I€tΗΧΞΧρί½Lπ‚Τ@j 5ι‘šž$5?lίε€/?νrš¬Θ]₯5A'€R©)˜κ­2σNLYΎ T»€η©1iάυ‰bΩbH €R“&©ιΥۚAYƒ”nH €RYjΤ„Œ Κ@οΎότβŠ’”š9ό?€R©)©ιAR£bK 5H €&ͺΤθ² Κ€Τ˜”J<^w~IJΝƒΔFdL–q‡₯ ©Τ@j'5Ί~–R©Τ@j"IMΛL/{|^ e@jNγ„$Γ™%)5΅8ΝΚΜ"<—οΧ€Τ@j 5ι‘šξ={[ισH €R©‰,5·δ‚‚–Ρν«²ΉtEΌΔ7"ήδω-ތ’ώΩ§KΆχλόW‡,.(εp‰E*••­ωDTee9?Ι>x*ωKQˆPJΗ’ΝΫ,€XΘΚΨ)1:υ[nCΞσ’εDΞ£ωoΩΎΛG§»ͺ^λYn]Υ°uEμDυuƒ5‘!¬šΊIuŒ-Ή«η$ςΟ)BJΜ‘"ΕeΧΣ“-vώγŸ]ON°yfͺžT₯F?•ΔAjŠ#Ž4ιΨ— 9_ˆN–’‘sUVΦ VXee9?‰lτ W,8-YυΫΠύ…?ϋ肐YyΣ}€Xhq‘δΛ†©Ύz>)'Φό0MΆ°υ3²β΅NΫ–ΘjΨZHδΌ1Z:δ;ΛηΪ…C-€€θ΄ν{WκσΰϋΘεωœ©Ÿ΄(ŒΤΘω‰$^jθφ"§h’B0ΊŠώ$‚Ρb$ίIL†Τ@j 5nΊ‘Τ¬₯ΟΙEL€¦ΘqR©ΤΤŒΛ $R“˜AψvžU8‘7nΛ„¨΅aˆYΔ"™τ𠾟aώ†Τ@j 5αR£/J*ΊΤWΤ@jb/5™ω.C&ˆΝε'ΊΝ޲ΜράgLΙp’ΏFΫΤ:[ΟΞcxζΆiΡ R©‰­ΤtνΩ+_™I €¦XβH΅!5šXKM ’šΞΞ $NR³\ΞκG·,³,ΒσŽ1u’ψ~ΚΑ=5H KM^ω>?IE–šβŒ#詁ΤΔ^jZdζ+‘ NR3˜Ψ@ΌΛ¬'FxήDb― ±›ψ;.?Aj 5©IM.IΜVΣTp©)Ά8©Τ@j2σM«μ'Θέ“©^ˆΐ$aMUό¦(59YωCΎΒ”B&€œθtΪ°F*4EW`mS‹‘!½Ο²a–‚£ΕB^^Π…/₯hδX Ή\§ΛiφuΥg—Έδ>[°εrυ˜L±6HY±ΔB ‘U[ ¦<Ζ:-_¦β‡€οΛνλTm—Έμψϋ8‹νήδ£³„Gg©J–^I\RΊ‹Gšη6Μ']|RΚ„”ƒl€dΪν]+ϊ[„5D’λ’!½Ο²a–‚³GrDΊ΄U‘Z₯~λΚΦw»‘Υξρ >²rΈAŠKŽΉΏRNΊΝc!Χ“)Φ)+R, ς3”’‘S’Σςe~XρR)©Ίψ€”“ϋΏθgρΘΚ#!…GŸg…‘šά‘yΔa pŸA¦O!‚Qcτ·Šo©Τ@jάtιή+_œ$¦R“rΤ@jb/5ΝIjΛ $R³ΘΜύΐσAΈXŽ…Τ@jβ,5Ijτ‰LΎ H €R“ιu½6/8HΉ†½ΦTηαSH €RS²R“Σ½gΎ™£%m‹n­‰wxΠ2βrρΨe<>e™Ό„C·QΔj~l€R©©RΣνκΌ@0£pΤ@jβ,5Ίυτή]ύ½“R“‘ΈTL·ΊΔJ"—8œx+1NŽnΝψ\ξ©­ΞَǩΎH €¦œKM3’š+σΤ@j 5š΄IΝ¬Uί9I5Ρm&1€xš8*ΰqΣK3Jόύ:q €R©)ίRS“€¦; LE‘ΈΏ‚ _›‚” J£Χ·όHMvΫ€ (°Š ι ˆnl~|θΟNd#%₯|)»"]WKS˜ΤXιή2]Ύ”΅XΘ”λo·ν΄"eΡG™:m°^O}$\Ε! *ι*© Jι°λCδS*•΄|)IΔO―ήη£₯ΓuΎl½o”ΕΆFϋ„WϊL%€dwνι½Άβ['©l‹nνxІzΔBb1‡xΨ—ΧΉ“&žσ 1΄ΌKMFnύ|’bΠιΨR:tz­llf¬ΪΟηΙU}-[ΉΏl” ²a»cωαRpΒ€F6Ϊ2-Ω ,ʍZ,dΚuΫG&YΘ␲θ£A¦NΛ"›‡Όu…«8€A'‹CjΎ{ΉN«–‘Ε%Ÿ—Pn\|‚OXZΎ€ „¬'%CŸςό‘ηKΨΉ&<ϊ±TH;’šGζnp©‰Fλu}yp4H!‘ α…5έ}ž_ΣΣ'LjtC$₯F9tI.p”šž@6φ²ηΡο]f!S§eϊ΅α€Χ―χ‘… ‰’ ™.­S’uH‰,)—kA“οKΛ§N‡w!%Uo-•)'aλΙsGŸςά‘η‹Αu.ιs.ŒTΏϋ΅Hjϊ\H₯f5_ΎNτί[½Ύ©μpgββR’KΊƒ€Rg©iΫΉ»wœυN 5H €&ͺΤμsήτ@βP&!¬ζS#H €R“©iCRsο'λ@j 5HM$©iLRsξτ@β 5λDν§[ˆψώ:H €R“©iέΉ›wΗGk@j 5HMT©ι{φτ@bS&Α\3™βο!ΔtH €R“©ΙΜιζMŸ½Ϊ €R©ΤD•š}ϜHœ€f^ΐ²ΉH €&=RΣͺS7oβΫ+@j 5HM$©i”ιν7lz q’“W~#׌iΛ#™_πΌΔ§œΞ΅ΜԘcuή$Vρ ΪV―6-ς5 AΘζ»Ό+|Ύ:ΒB&.a)έΞβ–Ρ€‡b΄RUΊ±DΛΠΧ|ζ£Σ±­‚™R,t ΌKτϋ «h¨ΪΎ%2^₯Γ‡ŠJΘ6Β>'WΑI}ΎH9‘ηKΠ9“@―ηœ‚H%€΄μΤΥ»υ­/œTd©)Ξ8Ύ[-Ώ1ψΧκ^NdσΪΪ.o¬νμ#Χ +B¨‘B–b,π°BŒ2υΨ Σ%ZŒŽ{™¦m3₯Έ\>u±Ξ[—ώ.rί΅ΈH±Π!₯Xθγ蒝’/·§?'ωyJIΥ²+ε$μ|™΅“…\Ο%8‘κwΏ6IΝώ˜Hœ€ΖΫΝά4ΜνQ σΜ€uΔνfΎ›ˆ)ΔH^>2Qψ R©Τ8ζWΙξκymΉ“ .5ΕG 5HM¦wΐιΣ‰ΤS`ͺΕ3–ξΟΉκΌ<Γό ©Τ@jά΄θΨΥω2'qΉόTΤ8©ΤΔ^jfzž:-8υΤΌCΌ­‰ψάΚ<ΟNQŠ|›Zg«γΉΓ…΅2ΥƒΤ@jb+5Ν;ζzΧΎ°ΤIE—šβŠ#MZVƒΤ@j 5§L $NR³ΰ`"Οtύ¦”°u‹ŒΠS©Τόfr½Ο.v£žš"ΕτΤ@jb/5 2½ƒNžH¬/?Ρν½B<η&β\~‚Τ@jR“š¦νs½‹ώΉΠIœ²ŸŠG 5HM¦wπ‰S‰Ϋ@αMˆAQΎlΐχkοΗSΥΏ{}zΆnξ7ίί~•OXCτΝδK}τz²aK© ₯#U8_Αň³ϋΕ;έ„4φRV€ΰ~[φŽ|ŽnTÍΓjΉž”}¬δ1ΥιBZ0]Ÿ‹Foί%·_MΈΨbΣΨαN6ίrQ ςΌ2ΘsNž›†°γ˜JiΎ‹wΑS œTπΒΕG:v«ι7/―νκ£"ΩπΌ».ΫB6RR„tαAΩκΖX6ͺ:=X6ΜRtš²lψ'/μΔΥΠ€¨œπΑΕ2εZ?/jϊqXC-Χs₯»\ a…]…$΅œHt:v˜άΞ^ΧΑηΣ/Ϋϊ|²ΎΕ‡λ³œΈΞ+ynŽcͺίύ:$5ύN˜Hœ€FΞ,l'ί ϊEx^Ξ–ZL,%ΖπςΖΔ,ήΦ¬(™THMœ₯¦qVο¬σœTp©)Ά8©ΤΔ^jκ·ς9nJ q’šΛͺ§sG!5š8KM£v½ΣύΤ &ί‹€R©iεzΜ”@β$5σ£,ƒΤ@j 5%#5 IjN}xŽH €R©‰$5υHjŽžH ZΆΰŒ§εDo’ӟX©Τ@j#5υΫvφŽΏc'H €RUj<98HΝYœ>ΉƒOπq€R©IΤΤk“γ ΉηC'H €RUjϊœHœ.?\Ϊ;Ϊ£USΏΡ “ΔΥψθ"„Ά@©Ρ©Ι)Κ‘O€DH“o;β΅­΄pB­”₯S“eκΒPPκ³ —h΄0„₯_»ΔeΓθs-Φ]w¦~Μ%8ϊœ “gΧϋ4€*5οϊΐ €&ΊWχΩ(i\ A60V7ža)έ)a…εc:Ω%FzςuuZψyŸε£S°₯τΙ]«Β wƒ– WΊt˜6εZžZV~™ι³lCKΉά –Ήύ°cφ>S–šΊ$5&‡žšaόΥΔUH €R“©©Ϋ:Η;βφΩN 5H €&R,!©9όˆ‰ΔAj.“]iΖ@j 5šτHMΜNήayο:Τ@j 5šHRS§•wD ΔιςΣΑQ–Aj 5š’“šC§½γR©Τ@j’IMKοˆΓΖ‚”nH €R“©©έͺ“wπ€·@j 5HMT©9ς[‰Γε§y<ΝF5žf,±R©Τ€IjZvς–H €R©‰$5΅[zGtK qšΓxόΜΧj<›μtξhχύΖfγ˜σ}\i·WKΞ&5aΕ.‚c©ίΧz)l”½– )J²x¦~/²a–ΗT£«k½0)ΠϋθQάFΨ>†νΛ—#Οφ‘£‘λδ6δΆuκ·|/"˜A€@j΅Μφφχ†“‚ΆE·Φ<ƒ™sjqΉzάˆ4wšˆe£ˆΥ\8rPEšάξUύΖfή—­}Βnu£"ΣΈeϊo˜Τh€ΰ„₯{‡bt­§Χ +Ψ({-RštMω^dΓ,©FKlμεzaR χΡ%’Ή MΨ>ΛύXΌ‘•…™2|τzraΗDΎ-W}>Fjps qΊόΤΆ΄wR©‰΅Τdd{}ΗΎξ$‚Τ˜*Φ}ψ~]b%‘+„ηuβΛ„Τ˜ΗLo¬)‡BdkˆΚH €¦HΝώ㉓Τ4劸―o'€Τ@j 5ι“š>7Ύκ$Υ`D·™ΔΎ Ρ“X/€ΖτŒλι9R©Τ”o©©W+ΓΨwl q’S•ϋ<ξΊ6—€"&Cj 5šτHMΝ½ž£^v’ΚΆθ֎Ψ@Τ#Ž#nηεRjξLΜSΕ?H …Τ@j 5@jϊŒ $NR3_,–½©Τ@j'5=Ω Ι\ΑpΗwΉŽω>›2'D-bQ?@jξ š“!5HM9—šš$5½Ζ'©ωDtAŽ‹[Τ@j 5ι‘šΝ;z]―~ΡI”mΡ­*‡―βΏ»ί±Μ~γœΈό©ΤT\©ΤγΖ@β$5ǘ_sD7Ξ 0ΏτŽM«Τd4 ldΓ †₯tkΙq OX!Ζ0ω‰JX1GΉOzε{[=β4 y δφ΄\Ιχ’τU—œβ£sIΒΪ«‡9Ρ!Σ2δJΉ#μ΅%k8έB7½MΧ>ir₯wλ4wβ’Τ4λΰuΎb¦“…+·…¬#{jΊͺΒk+Β@αΞέ« kτΒRΊeaA);Zxt!FωX˜όDEp”Ι}ΤιΝς}­ΫΨΒBύzRš€ΘΙ†ή°aS Ÿ¨R°r£|Lo_>¦eH"Χ+.δ>Κγ¦Χs Ž–Wz·‘X₯¦F oP·‰Τ8ΰΦ L#₯[#βMbίR©ΤΈ©NR“3βy'€¦§l/&2G»€†ΝYO&₯{H©©bŒ#H €†€&χ†@β.5"¬˜FJL!Fςς‘QCj 5±–š¦ΌŽ?λ€"OΎWœqR©Τ΄πwHά₯fc!ž³'”ωeˆ€υ€R©qS­I{―ύ…Ο8‰ΣŒΒE‰#HMμ₯¦zsopφuG)ν‰PΈΨβ€R{©©JRΣϊς@ šΔ“noK8Ύ¬LδYΑ(0”hLΜβTLσ#H €R"5 Ϋx§ή퀂KM±ΕH €RCRΣκ²@ GΚDžε&°Aj 5q–š* Z{ΝOΊΝIœ²ŸŠ€R{©©Τάββ@Š+Ž”ζDžH €¦ Nθ³π”ΑR~–œq΄…lμΓj‰–+ΉZjτΊ ΒΦΣβ"Χ {ΜυZz›aς΅f*E7S’šz-½†ƒΖ9ΤD―ύ$2aιΐQ…G ƒNΛu₯θ†α*ή¨·΅H£nTeΓόέζ–?nnνσί―³-~ύΊƒ”Ÿέ_΅³)έa ΅FŠΛΞ―Ϊψ„IΝζˆhq‘ϋφX˜lΙυΦlt£Σ½uŠz‚°"›ϊ³NYj*7ρ5GββHωŒ%ˆ#\jψΓΫ@Τ#Ά©ΗΆςwΓΔς‰‘%΅jω‹‰Χζuv ˆχˆCJψxŒεnZΣ5ωP’‹΄΄Žέ•s)”δρ0ΏŽΈΫu§ψυR*ηGΠΎ”ζ9GGΚ_,A‰Τ˜ξ8bqR'Ϊ]'ΪΙ%΅bωhΎΞY‰6]’ωώ>όk£^ ζόEΨ›oR){ˆ«Εί%zqLΊΟφ%­η@A)Ώ±q€βKM?ΒγkΌ~š‘ιn#fqšέ,ω!rχά6μ!%Ό«Ή»ΟJ§3ݐœŽ·ˆz[Βϋρ8±„—Ώ ‚Sڎ?φq‘ZΏ€ŽGΎ–lφc©Θ’HλωQΐΎ€υˆ#ˆ#ε7– Ž`ς=H 5H €HM80{ν΅³„· O‹mΈΈΟοOΌ”βϊ?šΧu<ώHqΧw)ß­9‰Ώ―δ:4wβάˆ#ˆ#ˆ#£ΒΏN‘ͺ2½ςx±#3έy9ψlσU'¦ΫΩFqqqR›`D·^Δ'<ϋγs’ͺ­©Ώ1™ψ”X™¨œΚ₯δŸζυŸ"ζ$κtpuά&fjrβ'ž5rͺ\φlΎo¦Ξ^A|@ό5±έjs•έΟxvΚγ FΎo*ΐώŒ¬_X`DΤΰi³³9<-‚ΡQšΎΔΪ£“ˆ79@΄4ΥiM0β‚s%jŽΠν4Q½wi’»•n“T0Ϊ”˜Rά΅?ΔpβF^nj¨Με:*W›)ΘEΐͺλψLšpΠͺΝ_/¦—Σ™?ž˜>œn_™ΧJμ ~aΔΔΔHMμƒέκ›λ₯βοΖςE0:˜ο77u;ψώσΔαβ9σ‹ŒΜ―»Ωbωq"Νε@‘¨bλv) έFœ+ώ~–ƒ‘ šΫΕΆLm˜78¨|)Φο‘‚ΡΓβ±ΐύ!žαΐ”XΎŽ1Κ΅NLθς™˜Bo[ΔσΝ/§ω±“ωμώυ”(JχΏξ0ρk Α Ž Ž Ž@jŒB‚Q_ρ+`=ߟYˆ`ΤOΒ£Ϋ"½ηF¦{7'•kαŒΞ FέM·lΐσŒξξέώE rμŸω•w“3λK<°άόϊό–h-‚ΝXρ‹νpβ/\ˆ ‚@AAΤ Ϋψ•Q'ό_ FΧχπύ\βΧ€`ΤX}Ι[σcΥ9έΖζ—J^ο Υm|§ΈώΫ;B0:‰―sWζkΡ[9Uγ_;ςz¦Ή«θ6>@Ό¦+ξw›_UωοNܝά610nW$ΊγήCS>ΕXƒNόλΟ£šζWο§ω|φ6Α^Όoy]ΣM=Α Ž Ž Ž@jβŒώ―ο&ΈJ π{^ π F΅ΉΛ¬(ςΘ–ΑˆοƒΏ@Sωο)όkΰ%ώε4ΐo’FζKx:Y”PΐΏη™‘b γlΎ¦ΜύΌ|1ΐo"ρ‘#ξ‡ bω;pΟβΏΝ`ΐχΝυρΟε1@Πp/Ώ•ƒθ[¦ ›ƒQU>V‰Χ)‚ΰb πˆ#ˆ#ˆ#= ™_/5D7³ @ΥJqN…—ŠΈ:βώHβφ τY!ΔΔΔH 9Αλς`·ElτCJq_β`ψJΆqšH—|9‘ΩP>§+ωνœ·qqqR@qˆ9";!Awβ€ΤR©€Τ@j#ώσdΚE]²"ΌIENDB`‚xarray-2025.09.0/doc/_static/view-docs.png000066400000000000000000020261231505620616400201530ustar00rootroot00000000000000‰PNG  IHDR–ߍšΕ'sBITΫαOΰtEXtSoftwaregnome-screenshotοΏ>-tEXtCreation TimeThu 09 Mar 2023 11:58:50 AM ESTΟηXz+­IDATΑAEY%Eχu^dT Œ˜ύΚψw;fώ?οχžvίž5cέA A€WQ#απa[FE $\ϋηϊΛo±Ώ<ϊύΞΏά—„ 6ν/ υ}χΑ©τς"Rs]šœ '_'ΐL!©‘zψG잨„qβ¦Rχ•„dΦλuž`σθΞ7|*γN?vυGΑ!ͺZyώΦη}/ΫϜw|ΤnΤ-DΖ 8FόϋΒοWοξ}o_?“Β4gσ/¬™΅6vIάγ’Β»Eώσηί;Έ){₯χύ3οΌ έ°ΎΟ ρ9χΕΗηάαΗ'~¨kcο_j5˝οόϋηοIg'₯έ‹‡giρ]“7ϋΎΏœ>AR²ογbΘEτHΐΉ”0RΟwZ:ίΏη‘ή#;~w-`F_Ό «ω»nJσ»œΉΒƒ‚HT'I$ςιαίξΩ$ρXœ‚:…»šœ/m7ζΙΕ]Β{:?吿\χJωN”a`μήΦύΧϋ:žώŽV¦q”L 7ψωΑ½“ΏρGχ>Ιχ,<ΙΩό š-ŒΗΛ"ϛɝςw άόνώΉγχOΘυHύ>b*£Ξο,žn࿝kΙGQx©Κ–£ΈΟxϊΉd±ο΅ n]R―YμϋΞ±Αuρ±#)»ΰψiΙΐŠ#αŽέ3²’ŽΪ;­ƒμ}νυΜN/ΉΦryp€ΜΥχݟ ƒεEΏί”&''~L κ‰@q Ρ/?cχ³œeαρ8pόΡ/w–'¬ρΑ}›ϋπaΘεώE―ΥΈΖƒ\ώsΨχί_Αxσw^όΛmQ΄@ρΑΦΎή―ϋύwŸοΟδžΑMš7οΠFΖKέy|"§~ίσ}ά΄χߍw^κ˜qά·£±§μ«sΌ?‹/ƒ9oΟ Οk€—ΏεΥ»Y·N οϋ1! ‹Ώοώωgtw”“'Xwƒ±‚WPA!±έςŠHκν?_π φε_ύ~· Ž&˜ΐΑΙΫΊ!Υ7­_\(Ϋ}τ$ •*H,Β|σοφQϋ&“twΔAŠ:ϊΎ‚ψe· ‘'_t ή“RπΉ/~w3"LΡUδ?ηλώΌoˆσ‡;3@η*IΈΐžωί»Χχϋ“Ϋ·wό=R€“œΝͺ'vk"RψytYΑυ ωσo7eδ»ί(ΏOΠ7¨>/‚Οwυ²Ψǟπe¨ο£ΦΝƒ}Ύσ~ŸΆVΩ§΅ϊ΅Ϋ%ε]Κ{φχ}©θ%χ1A+"ιΑˆK1ξn0Bρ»ώ«Ό/Ώς5ΞΛwαCρθBΥέmΏ«:@­α_MΣNƒΖθJ ' œCτίΈέ3E₯vœΐtΊξjd/=/š}y—°±'Αηγ‚Σ/©ίέΖw¦L‰βε?ΗqοΒΗΟνψκ³νψ AU„#?ΈwςύηΓΟΟχ™}Οΐΰ6Ϊ.F3―Η/ 8’έδοVς>ΏέγάΛΗυHψΘΚV~8άδύχώXΓ}~§S\Δχγ”σχΩκέNϊΖ%΅z(dΝο/dΊξ»>φΜšeEρ \Łά@8x¬™TΠ Gmη«o±/Φ3οά%a‚Mϋ tZC}χ―b£¬|»€4990‘λ™²Ίγ!ξΰΪύGNώω?œͺi΅Ax\PΒή]ο[ D v2K?μΒ<ό²kΆ³ŸpRΥ)›ϋa$δΰ²€β4ν;ͺΌξ8DȊ2T1ξvγ αNΘ\²„ "Μ3–ŠΔώbΒ ŠΰΞ= "Ma zœJ#ψ^#)J9AVI8’»ϊx₯LοπLI£PρπCΐΎ•Δ‰ν„ž>ο!ίιtΰΒl;AO ϝbΨ™cTέίAš’D{§ΧΑΝί!e8‚Γ“αΑΙΐe%]^ΫdrμλY$™ήpψI@%@iώα•΅α[*'|ͺ`žš…ζZEΥ‘z~οΖ&Žθ>ο§ΑQ tcƒΖqLΨhR‹C¬nπ‰`’0¦ηŽNTVπ„Ν” @Ψ™8ΰ»Ώ8 ΉΥXΈuF„€@ƒΣΒΡUWŸΟ¦b…μ`‚€|XΚy‡†φVΗΩ…Άέβ_ΏSEXω£΄&9-hTσΣSΘk>άU b›*;J·¬Ί†…₯)’@Πυ>x ”%<ω’/w·"€ aί;ΗN{PPˆ—‡v Υ “ˆΞεπ¨ιˆΜα₯η‚ξΫ=ΨLŠΓdP!ΣΞC`AΌ°„ΖΠIΑ& ²ζΔIN¬β³JJ"ΠΫ(Dƒ ?tšpς”“^‚Θ!K=;…Α‹μΈ;ί1yΞ€ DΝkœθ‡― κΛoωG³‘ δί!bψA0Ί†{KN3OΫBHΌ<†φ«’ON1»‰@τ•€K9$Vvq’žV†π£€VΟΙ.όΎή}£ ng,““€Š…πςjέ;Ϊ4¨θ‡ΐ ǁρπ޲+ ς„§ώϊϋ “(`©dg0qœHr Άu @I„ρQ2eΔι €’œEH3ε>„AQ2τH‘υΕΉΐ{h"“„ˆρΩ '@ήwέη‹ΙSΑ»Δ¨@Ixη‚Δ]ϋ§ΰŽ“/vπμtv8Ό8.PΥ΅"&₯ˆ@-D“ξ/ΘRΫIάMƒGί$ ?”δΰdxz~ α‘φ5ο’ΞυH$`ρX3 "€$>ύŠΪ±·~ΑQdsF€4NΚΗTοΧwμ‰κϊ>OTβΒ‚L:?6Ψ@’}…bˆΕ β †š]*€«‘BЊζi 1zΚ@rΓ|άh%*[@hΰΩ`uσ}€QuWœO)H%…‚Π•₯wZ±pνE|λ> ­p遰ƒƒBΈsIJσ4βε5u]• °!ž xάWÐc¦|p"R`ΰht‹ωhΥΊο%Iœ9|€h@qBνΪ³χ0©£ι"F ~\Φίίξ?θΔ.›~t!`’xŸŸL₯pςώμ֏~pψWw߈Ωm·Λτλο:TΗΈƒώΞ―,ΨDϊ²Ά“(δΫξΙxΨ7Ύ(ƒ’ε±€τΖ8ϊϊΈζ‘Y/bΔΨΏ^ήƒσXχŸ`?ne–‚qμ“lω4½ξ<–ΰE²|Υ. ΜΈΤΙ·cρψ°O‰FrΑMωjλ€ΌŸϋμ“ λύ/ώώΑ΅ϋϊb|Π\OxGtτΕΆΧΦγ¨ώŽJΛΐξ)"wqΗΔK`cεqΡ§k4ψπƒ/έ'@ˆ³z έΖ²Ρ°CœόΎΉE‘¦ΑRμ’œύϊϋΤύξόλ{ύMznμcξκ(ρϋ½ ζEά΅MXΑ|Ηξƒ'{^όwƒAέσ›ΈρμΡWJίΐ{Ύb&qxѝœ#―}Τnύ+ΟΏŸ)a—τGΩhŠGψacΈΤίUΖkΒΓ±ϊ~GΠβΡχγ³φωθ¬ρ!Ÿ;Y₯©mΠώΣϊΠΟξσώJ η_ϋv|‡kλŽ/9όzΟδ ΞΎξ£Ρ{ύϊθ>ψΣ½/@ΪτΊθΗΒ h/M«“ύ2GΒε`ζΟ‹ ΘΙ·;]ίχ;Υ˜ΰCέ:Ό,έ>ΒΓT ?*ΐ_χ;vάϋZ1ΎŸ—b£‘ΫΏ}φ₯*Ξ›EΈΰθ:φ6Ύθο€[ʞθΥλSVBςχΟϋ›·›4ώN°Γ"ή½ϊχgΘ­?β”`_œΨ―νψH"ΏΈc±αΦη·ΎΗωώω~Άξ‹oaPΨΕσ¬‹;žpΈF½ ΗκΑζ‹g{\Θ9ˆ»! u—ωϋΫχ ͺκόΛ2M vιMξ’ΒG―{Άϋ~—γŸγ{·zpxqw#&§χή™ašίU:Χ(ΈξX¦rΥΕρφ―%|Ο›Νίύc?‹ %νl+Έκz^Ϊ’lΣJήΰ‘px…`?ndλŒάŸ$ƒ5τ»zΗR(ηΌπB€ϋUς=ŽUvϊGΏΏ δFdίΘίm%φοδμδ iώ·ςοΉnw]ΈτύύαρΔΡΌλΈ_ήμwορŽ»²^3ΰ2€¦€‰ήΗDπޘYq₯kππ£?ψkΏšτ< JUΎuπψϋ«πΤ–βœ*α}ΚKΞ‹ΓΤ4­ώŽRϋ§~Ηβ{ώ^ί¬α|_ΓΏψξΣΫ;£;Ρ9/θΰ€n .jcaάΘ’‰’ΓΆΦχm‡ϋμ_ ς/nΪ?Νψοw %Έ;χwo6κμϋηώα½ϋωyύα·ήΚέA)ŠD~ΐwl,<@ή0’4Ά{τ/|μΥΜqyΈ•?ψμψŒϋ:^ͺ Lς©*θΗ˜Q€’hΥEΔ~}ΏcG»KωϋQΣ1œϋλγmr³ †WL]Qr½Ρ·„ ρ£Ώ―qڊ€Ύί>“Ÿέψ;Π"lηiσF|t–ρNΎ₯_ή£qθιuω ΝnΰχσΓ“Φ»=vΐϋ?―O€π΄!7Ζq”έ·oύkΚόΚoήγ£o€׎‹Υφi/Ώ«Χζάν> hˆΟϋΥDΏκϋ$^ώ@ϋz{{Ί₯\όρσΓ `n{ώ’ŽGθΉD3ξ+›,¬ώβvpο4qoƒ\NfΟ[ά'½9bT»^§|μx@ΖΞύ‚Έv­δmcέΘ½{οaΦόW†°RΓΞοώ‘1›λ\xμζΘ›8ez*ΎD.φΕΙνڎ…EuΩv3DΫzπλΒ½f;ΈΙς'„±ξyμώhδΜυ{’}ΡΧ“ίΟΉ0HΔ“D[ΘϊΊq†ΉΆΏυ/½ιΕ>v^œό°„ ΈψzΈλΡω‚ΏΎ}Κ‚:rΜΝgλΌdnώ>GσπƒλΌΫ‹(0|sΟφ>VΟΏPηφΠγŽΕ“?:“:©ΌήαGωνΨ'lΑlύΌίG7™h=ψ ΰΰ?φoc§β2Έήe›Ϋh4δ½oFγζΩ―ί§ˆΦΊΧ=’š#/ Ήέΰρμ7UΨt¬ν₯$AώΎσ(kžΛvuœ€€m‡a¬GΐͺΈ|οέ?iϊΓΑ―η::ۏlΟχΤqv_^ΏηΫpA’„MhbwάJ΄ρ?ςκ₯ίΌ_Ÿά‹Œ4(½{¨\άW]ο{ΐyΔΨOΖy€sσ±ί‰ι_QzόςŸ]˜s{cžCΰΪυόΰA·7ίqŸ†ž?½ GMU\ϋΰ<띞s*¦ =ϋyΖίίζΣΏ~»/•Ό?¦‰z΅(œoΫyMάnϋΔko€π;R’{w―ΖπηcdPΨ™γΉΧOφ¦jφ#S`Η»θͺΪ7yω]Ό¦ϊd΄ϋ¨•ΝΗ~§ž~ψ]\rώβCΟΉ=Ωrώωσ/ΐΉmώχύοωΟο±*TdTŒ(TH'²“βH€μoΈ^ρ B*+β x₯„|žwE‘L6σΪ?Έλn»ΰψΊ“ρΒπt²L0’K.I€U_€IPΦvΊΟ”(Dπ‰w‹ΐpΊ |v€²FβYX»„οβΎ9˜ί»†ι2`[ƒΰD[rCςΎ2dŠˆΒΞ>‘€ωbάϊm^@κςq|#}Π₯A@xχnΉ›ΦΘ³Axβ΄#κ"’5Γ¨' b0š–E¨ˆ00 EbSάYΤϊδ™χΑΖ0,+:YiΛƒκσόؘφΨvxUΘ‘΄Ξαq™ νX†Š(cFΦ,BΑ’ ΒΈΣ°$>œ(qTν+ ¦αΊ£8 @^ό.λΰ8€’ s 7Ϋyy‚†°ŠΠ88Q{œ¬―λά&|R „!,ψ -(‚`βΌ°.p‚@Τ)­ κx轃 b42Λz@|@  ώΆ£‚Šτφ«ΈτΔ§ρƒ'Δe*k!θΰqα1Q“" %ψ³QMPUŒ*‚Έ[― 0J?žλ₯ΛΡuX -δΓό—{κΌ$Ώ§¨ pLŽ )‘‘οΊ2 @Ξ―Y˜@ΙαΖYU~1.Tu~|G‘k@Iύ.udeP&ή@π?α‘0”]ΪΝd’ΨωύΕBED]ψɁ!LΑ$<‚}c1 „ρΎwv‡—’oΚYp―wxUŒ[Π`  œΞΙωέi•hξΰΊ7‚b1™ΨT€„…Dψρ@L‚"ν“?4TΛξ3b2iΗApκ\―-™ŸRt‘–†p‘ί|}ΧŒBr|a'4€G°Ζ]eΉS?U’Pβ#ξα‚ΐΝ°w\ύΑ$IrΔ@ΐ«χ©ΏΌ7 ".΄CΊΣ;,ƒ‚θ³Hς 8υ•Bˆΰ3Ν ξY‡©¨8τμ‘$ŠŸΓπŠηZ‹™ ηΓCU‘¨XQPpν”ώ²ΐ€χΑ"Κ@ O§#ΊŒΘoq]Œqτ)|0 jS ɐ UIˆτ$ φΰ†φuυ¦XR‘šΨY=’ •₯ΰL€‹Hc=>xu4₯‚š‚`ͺΗIXPγζοθHJΰS€ ;PΌ*vwn†ώω’D,v7Hσ ([­.O;‡cW$_$’βΔ{`0ΕΌΩ!ιdcgΔA.ΈόA€²α3/:L)‚2€;Σ€#Ι$ιΔ#0ύ¦ε{—ΒΩ¬Υίρ[r‚_͈‹@–°€(>²4‚BžlqΐΩ‡Σ2ͺŒϊ`V\Gα1Ωηy~[ϊ€’λ@RD‡ψοΫHZT¬†lΘΐ „0²K.‰ΠU-ˆ_-C•$Y …€!p+DPΒΓ@NKPT(W %€p};ύ”#Hξτ„ ’gΗ˜x#$9Q ΈLAŠSζ“ΐ_υD‘‚p]w.GΘ;ΧΈπh¦)¨„Ε‰’|DDΕ‘‰κΟ؁|"h°ΦŒΑπβ /Α€XυΎzp±œϊ ώΑƒK22ƒγΉ£#"«"—q‘€4„* @,Λ(ˆ€Mεκv‚ŝˆ`ΒΗŒ‘fyT}ŠΙΊ$Qi ,=άcŠ„’†‘‡§9ƒ‚‘$ z?Ώa«Πƒ;Λ•”u@ΠίN?%ι-w²R±“Ά TΞvPε&\‡€%1ΊΒe…α™Λ —Θƒ˜|cuΡhr€.Ψa1δΊΣLXF!ώA1$*ψ3Λ!ώW|0‚DNτˆzg¨aί<Τ’Φ(D˜†Χ »R€CpPΗ₯Ώ #δΉ!ΉβEάΠI‰,ΑQI )>Αΐ‡ΡΈ0 Ρσ“ δ€"Dϋ¦κμΠΰE‡1€d― θ<όπƒδ@E"€‚&q@“Γ /qά“’3\(:Υa<ΘδhKI$ ƒ‘wX HΪ=€ΓKL&‡7$‘Υχ-L4Z}μB:Έψr‰ υθpm e‹β€ξrJΐ!`)*(A˜^†΄γ Qˆλϋ\I»;σIkHλ$‚wE aϋT"i’" `„’ΒS ”4 ΰ,ψR0;_E(A`₯ή>tκ€ξδψzV^Βζ}trz8²hD‡iph'/‰ D‘D'Έ@ξΔMΰ!„§ξτl ‡ΑΙτ*¨$ΈGyœκI*˜ΆΧ DαŽώEΖ²Ξ .Ύ\‚ΰQρπƒU·³HŠ Λ€ 4?/΄“&L ―QPAͺΡΫ‰Ε‘‰…1/`·\D―q:'M€’† ˜J€@H4½ΑB…ΓCΛBΑCN1ό$>KK,Χ·oΚ'%wVΖΛHιΉ·7ΟR:ΒICNε )Ό.έΐίΙbψ΅-ξΔβ•6φ=iΣ±kξΖ†^y²ά ΡuΦ¨ϋ0H¦Sλρwξ "<ˆΦπ‹H*Luo£Γ0Φz4#€ %―“ƒςŽ›zΩ»ύj”(Œ=ετ±£κ—4β<―eτ‘‚U χY„ΕkP xκηA€%ΧΖƒV…Γ¨?yx€ΩlΤ‹Ÿšέ£@5Ήκ:œsΦυEŸfΠR}?zD'’£ΓXb`}· AΠ 2Ό^uqgP­Ό ·}&eχ‚©μάCΨ 4"όe3“£½{Œ—>hˆΓšΖχτ‘ν;ς ψ_’ƒ;; V”ώΨθΊ(ύ"<8Ω f›εΪ™`˜dp?Ή#HC+. βΞΑ;ιόί…“@€rxBmκ}WΪΠC:/\€‚Y€Ψηƒ+ξ8*¨cλzε@MrαΝstˆ,ΌyyIΙ Ψ~ω7μ’xwώό‡†&8€ξ{π°Άΰ'Ώ› θ½§VΈύξ?.-„ƒ#QΎΤ )Jό6Ϊn>j3J#ξ•Kλδ”ΈOς€»>‰ΟDΒ,νψώšlH aΠ™ψ‘b’Όq–,Nˆ4ΧΗ‘+$αΕ7€ eωun΄φ>»’P=οsϊΰι\[½σ0‰ x9œύθAχό"Ηα1ύ¬Vή‚&ͺ˜)|TuœGεEΈJSτιVǝ SNΑςπγ,‡θ3Ίιqΐάw Γ§²>z~_za~ΟΨΘκY}κ§ΦγΞ†δΙA΄6yR0„ωΎ° ΒXλΈ―€'§ΰκxr`^"Ωyξλ>@AƒαмృO]z·wτ# ň Ψ%z $€€Izp‡^RN ptΕ¦„€μΑQͺξΖήΕ'Fοΐι“£ƒ9fΥ{p©dΤςχy?Ή«žHώ|· ΊH^Š*j‰¬ŸΊ(Ž] ^E •$v*”]‡:χόΒX!Ι‘Ψ‘»»hϊΕΙ)*ŸλvΖΒΎ#ˆKSφρwg'Ρ:1K?7¨χ,\„gΟ–ΆDβ‚DΤΫwΣRˆ‘Ιά‚(πj]œΎš`Ιέώο.•B€žςΨ mΊ5[=Α }…¨£ΤV•δC¨RβΠ‘vFξŽO+PÐ/“·4ιΨ»ωύΙCšι•OΎΫNPβ8δD2„TύΐξρNBy|·@TΉΆΫ0L“£Q$·tšœ—7ς ί} 8ΌP ψSƍ”'_­τPj€ΰΠΔάiEΔs–!`ρur€Κΐ8ΌUΩΌΕQΘCVXP"ˆ`MΎΠ²x„ ˜d;# ΐ²>#ΙΑ˜’$‰2A$Oo‚œœΕq{˜ŽPPOͺ" ž DΐS2py2Cϊ’ψl“(ΑΔ&_½>ογσ:H!;Α”Q !ΔΛ[‡ί'RΡH9 ±y °qz—ΈΖ}˜\$ ΰmw$ψšRεAΐΙΕs­faΒXάΘ8Σθ°ΫΒ9Bΐ2¨'ΔΒ0qΙΐ2 >Ζσ ½‘&Υ])€QL4D2Ι„/F¬γθy8 †Ι Aω‘Ζ§’ B‰ψ›pž΄8ΗμδAFϊ>i##‚"ŠvJh’x‡{@ύH$  t]Μ&cέ‹%°"E°CD‹γZ#Ξ››WHΘiργ• }HnΓ"aψ,ˆBs:Ψ5…a„«'ˆ(ίυΑI\7Š;95€ , Ε8 ™dœΐΈ­‹• βa;k}ޚE”ΐ€Fr0) oω– 5σ™` XΑ:1³•Γ±… Ÿ(”7Εx €ƒi šT”@τΌ ;1Θ%—ΰ‡3|QaϋΓ‘²Ιwύ£ΙΗ:βςgq(„L’•’±ΊΆ|‘|ˆl0 ―yη7a¦Β>λa%,/Λ`>ΌΐΨ;¦x„@ύ‰’k%$ΓΥ­πp\# ’εwΐ±CT!ΐ, €oE˜aήͺΜ%·eχ0x€e #  a0θ»15‘·φ°(M6¦rpŠ€φ₯LΐΓϋβ&ΘιYΆ˜ €&}$•aτh²^|™ ‰KΕ•ρ“o—!Re?\\ΘΝϋgpˆΘγ!@F$¦‡p-«σΰχ F @ςBΚo&/‰ 4n˜$€QθνχN _Σv‘ qp’-ΧΩiβ΄€μBLʚ§Τ¦€…± Αζ0Αސψvz§‰Nxί3½ΡΜU]7!)VW@$oްσꃆε™@rkΐΝόρa4KΏ gpΒς}vpγˆΰF γQrIΒΥu ` ΫΩΘ‘’‡Κ7>νHP@ FTiή] ΄(` —d Ή°jAΖςm’&ίΞgͺ iΗD 1‰‰e'α~wX| GOΡfκIs œ%1φΒn€A²αΛ ‚ƒΕ@ΐD`ρqp Σ p—­$: $$’@‰4L ?@τ‡\~dpF―|sr#z7‚λμ†1U‰ Αρq¬g†‡O†Φψ}!³q,ϋΞwŠ»ΘΏ‘αzΧ…²0koG¬ Z« Q>ρPΆXτyrIQ0C@1<„ΣηtγikM*9  S‰zψS© ΛΕφ5›d`ΡρiŠd!ω|[nOk EˆΞ€1ϋQY‘˜ύωΎƒ•t/„ΈwI`uw_  (Έο矾0„μΞς8‘°4$|RΑ±`εα―γ‰\ύaΆΑοΫ}!€(PWt_?±ΞΓΙ‰x,τέj ]]ό,ΐΔΞΣΜAΆBσ Φ\*f)2ό Sl&»2& +―Σάι8 l(0Όΐ *²}°5ΐƒΉuX gΏ²‰^HXrΏo@#8θΈϊο{Ώ΅νž…ρ!bθ:DfŸ­^ee]_†Β'AQb–‡^@q‚υJ2(— ͺ~6‹ϋΫ~FαhήA(Œ°δ”Ωѝ{l΄2ςΰο‹.N7υ»―ˆ„κΥ:υβ'ƒ*OΣ˘—9ͺ= UκβFŸηΈΎ\ξ‘­fΡΥΕB+c`qΝ*Hϋιξ& ήΗq= ςv˜@–CGΊΓκSΛ"s¨‘ϊ% ΄«Hi1™PA¨JΥ%αzbg’„–„1&}ΗΉώΉ—w˜CD!zKέ}φΥΕyα|™0tU•\rj]H`Wο0$ LχΟΒ7{,fΗ ΙqZrΚVtA,硏γΓθοψΦχιζν‚  ;τ ύΡQ§3πϟ—Όxμ‰ˆ \]ΐΰσ―_ΉΫ§»ΐ·Ξυ^ŒYή-XVu¦όδ…‘9@ξϋέCΠΚσ}~D„Dπh!b¬¬δωivDڏ/‰ ¨Ž6¦¨ͺέCRh=Ϋ!£$ &Ivž§χΊ{K zθ·χIu‘bφηνX‚Βκ"ˆΔα^@q’XtŒ8„ΐ~“?ΔIvη'rΕ5k„ήΤq΅c %7»žIpπκόζ}; ξβŠϊgrˆj²Γ˜w;ΐΨa cuqWΈAi£νΙχ~aΏψS$)z]!έNΛπΔ)†žκ֍Λ.#‰™Ό$§LiEυ ˆ‡…;O8mkxea ¦3*¦uΐγϋŽ$Ο΄‰»SN;9Œo’at^όyίΧ‡r˜STε,ν³©gœΥ}G ~–UE‰Ύτ'(Ζέ…Vͺi8Ε}6’‡2ω»>˜wQ‚‡%§(Ρί1i‘eρvυπ•γϋ¦E$ fΗωϊςΏψΙ(ŠιAμΧxFx˜ˆ€»„-ε#½μψΪ*pέ€ό³2ίu%|@^Ί/ƒΦͺS> ΧχΕϋ?š/x5Gφ½Κ'Β»^ύσΈΠ0‹<`γ3γ!yw.fϊΥη\qWέΈγ«…6φΕ(―Š"*€lžάΗ d~›jάQ"X"#?…<€λΐν@ωŒ;ςϋH0C„–@G°wo½αΔώκΓ/‹θ¦φhπυζ Ίψ™ΐE)(2ͺ (Œ•τάaΥέί±£_tGψεW”ŒφnπŸ6]ˆBs|ρe”.2<–»0ΑNАΌάαcӟΖFΗγ’ΙŠϋ—c3G՟ΎNΛ]’^vνΦ‡jλ.b7˜½Όn=ώ†žEF8>ΡώΦ1τΓν‹κJNρώΕOPr "dd^œάΰCΤ~rE€@<ŠS ‚™bzWΜ“ƒί°«ΓτΣρd4DbˆE\άύύΖnφΈK1:Λ‰πθHnς―dπςψΘ—w$,θ ‚ΏŸΊ γΧυ|}nB!4 ˜ΪI³²(Ϋκ 5;θ€­υΏΆuΦΦV4i…ψ#’„ "ˆςε~ΏžW@JΠΐυPm[-8[Τ†fQΘΚΩ{ Ή"N,0t•A. Dψ¦‹rΨuT’Φ* $Ψ€Ÿ:£g<:wΉή‘YhXΓπ%ŽΠ<Ί85”Δ‹†…]\ΔΞΞέ"\νΘΤϋκv|wY'Y,9Γ؍ΕboRdπ8hmIrιΐ@λOHΜ]a³€ D(/άd83ƒp7W₯/iƒΆj#*€αξ&4.(cφΐκpΖbL a … Όϋ χ8 ϋκΌ¨X αˆx!›8άΣΚ χΙ<Λ·ŠF ²tΚΒed…œχήmΑμΖͺ8A„Z³ ¬&ƒ„² ¨Ha½ο:G.’iQ€*i£!’θbΞkpŽ6ΛraΌ³—aδ0 \^±ΝΚΖvΫ ΟΩ—bΟΈΜ:d,δΚew=‹δHάΞͺθ‰`Ω@MΖ@Kӌ3'pc˜VP’{Φdj΄q-χfs™Λb''l™l‚:αξO₯ΩΡ9,Γ Ξκ%ΰΘʎMTŽsHwΧ;β’8œΦM6/0Νl/QQntΗ΅#ƒs{1―Χ½ξ;³8^ί\ ‚5de˜ a¬v[z½\Ψθ&¨8oQw-DutΫ ‹η,\/G…ΩΟ£κΨ°t“wΰŒ»μΚ&Χ–+o9»;=0ξlΑ6!ƒ―F'X­gί‚φn)―A»z§Φ²ι²›lSΓς:±.C::yθ IœέΙ€GΌE«ΝΩ°°zt˜kqABwά£dΩΚq‡\];qR€›\uCDŠ.(3δμ+d–Ή;^2l»¦ (΅‘;Ί‡i’§Ό(’+žZŠ<€»Θ`Ž+ Ί'3.³8ζkίεvΨ9ξœ‘,'ΕΡ°³sͺŠbδŒdΕΐŒgwΉ+(Œ—½‘œΉέF…a½ξœ‹o@P՝f6Xΰ0hΣ΄^ΆQœ’½’½Ί,Λ,2(NΞpM\έ¬žέ 9³―)zœ₯Ae*(H {Z·ΩdPvηΉ:’Τ> ΨΙΙdΑfζΜλφ°W›³Κ±gZ8"jλEY έC/QΊsσ;ώυ―όΙoύΟΎόγΏό>B !€€€VB ¨₯R@ R (€(@P(€…€€D €@!AbP@ ΠΜωδG>ύλŸω—Ÿύω_πΫΟήn]pά3φJIšν:$]vsnCΖ«εaWE-{}qŠκΗίϋΪοΟϊίΎόύ«ΤΞ‡?ύw>Ο~γοΚ§JAJqcψΛο~υΎό;Ώύ»ίώΙ5ΰν~υ_όσψkΏψ©σ20οOψσΝ?ώζϋOΎϋ½όθ'|πΐωπG?ωΙOβ/ξsΏόιO~β#4°EΩwοΏτ;ωΛZΓλχ·ώνoώΟ~Όο}λλ_ωƒ?όΚΧΏσώβ}_―κ7ΏπO~υ3Ÿ~ίλKύ?}ι»‹ΪΏϊΒηΦΟ5Ώo|εχ~+_ΦώβƒηΌύΜίψGζσο—>ϋ‰=rφωΡ·θ«ίόΖ7Ώύ?ϋΑŸψ'ΌίΫ‡>ό±}κ>ϋ·?χΉΏω™ŸϋΨGάœύΑψ_όΏίϊώAη#ωλΏςOυoύƒO†\φ0s‘SφϊαΧΎψ_ώΗ—ΏφοτβωθΗξ»/όΖ_ύ™·£2 έ[:αξBHκ0Π2‚šaΘ6 pΖ₯ͺvΧΞΘ3]ο ­²5‚•`P&ΞςΜ$dΌ¦<€άζ(#ΙγΘ’ΣθΞ[^Ž‘Œ@O© „B]ΐΆ%ΆœcΛMΦ8»HAU *gCY.°!ΑΣ=§Αξ‹&ŠχΙ#Xΐ†GŠmΜi_l,Β‘p vQ•b ¨·φa:5XΌle£Ι36DPΗ”fͺmWζΌδ€΄vѝ¨Τzνua‚Ž{ΆΕuŽ.”\Ϊ½ηxφΠI˜ΩY.$HΊA:­•ZΒ 6aa’iQ CvτVm°žgί½θ1Φ#Hvk6‚Ae‘=Cg°†Τε5l^Θ±°‹{†*†™jΐyγ}—«r*afC ·‚΄ƒ-ʁ఻ν%υΝg9+’u¦ w ‘‹;HΐzΫf_Їφ„€d}hL PΟλψΤ6&9­.Ρ!υ‹Ζ`ΈΣbŠΈs8›^λp±]'Ξ€ά Jφ¨NS΅f ³oŒC±ΑΫs”έ³€©·Άg'ˆT”ΉMAΡΆΊΓ™K3M7XHnˆ¦ ²m€³Y+90:NHβ<«1 Ž,mΔ.gΟLονu¨Β`³K&eψqn„ƒ#ηD4 .§p /³38-ξ±αΞ±”Α²»ΝmVE‚α< 6Pή$A¨MΌ½ίM-{}q΄DU—Σ ‹n2AΉήΪΉ3wfzήβ@ΊΒ±3¦°ΞΣ–ΝαL K44X\‚]U ²‡ήv­ΥΖτ2pcŠ!Ωh—Ρ‘΅ wGw‚}ˆaPG_‰{$“ΧlΟV".- NΓΎ.Ο 0Tlno= ;ΫtΈ`°’4ή ˆCV€ήEΊ6κ(Μe@`nγξ4δh[j—ΑΓΩΉ?υ9ΓxΉFtEœ»gτΐLΦq;cΑ6 αΰ„έ±†˜0ŽΞ”΅.‡H Εβ‘2΄ν C‘τ΄₯@[μƒΕ‘›]ΟΘΩ@ͺΆ‘CFθj”Ύ»η4Ψφω.9l03G―ά•F±'Όυα3ΣΡΙ)’5­XΊΪζ@T«b'L:βr ‰:Ϋ#[[ΉΉΩΛυηίϋλgή<•ΉυθOήΉδξ‡yβ£G€›&‘{―ώμ™§ώγΏϋχ_}ξ΅{‹›=ωΟ{n݊ΦτώωΦΟξ;μσΟύψ₯W^ωΕ/ί|σ­·ί»wοώusάΎύπ‡{όγΟ|σΏσδΏχ…ΟςΞ4iΘΆ}η•Wž{ϊ―ς…«1Ύώkψs·_}υ§ίϊΪ7ώφ»?|ρεΧί~οώΞρΐ—³ίϊΜ|βύ_Ύϊόί~ε?ώπ*ζΑ7ƒOή}σ/}ϋ_ζwžϋιΟ_ϋύ{;Ηέ/>ϊ~σsŸώυΈή{λ͟?χμί}Ωώθ§/Ώϊ‹Χ^ΥΫο½οώυά·ξ>τΘ#ς™gΏτ{O>ωΔo}ϊρ»G7Έά>_ύΑ·ώφΩWί:™Ϋ<ώσwύβgόγά¦ΥVϊΪ³ϋΥ―ό‡o<ϋ³·ο7·ϊψόί²‡MS…±1tl₯–”¦FˆM2ΪphΥ­έSŽ$ΙF³4$Υ­”€(r Zΐ$±Ω£[³;]ιR”Άm—‘D³t›H'€ΜtΧJ΄mΊ€θˆt«›&BΣΦζPΊέRck%₯±“\΄jGkv…‰€Ω…@ΤΨl]&9μZδΨ:²T—ˆ–΄1«Dm!…”‰‰Ι±ΩͺΆ΄ΣN£&tΫ΄΅Ϊ΄L[Π΄C+š¬N+ͺ­v‘© “Lt5mΊΪH’]mΟS£±ΥP’Ρhښ²MΫc’(V:UΔ*1ΆRκ¬ΚА €ŽlΣm»mΤ‘M›j!•Άη E΄[t‘dΞ¦›ΆΣ&m‘vkDΠj›%Ρ#‘ŠrfŽ₯­JjΨ lΜe70"mœCŽ¦έ†Dk›vΠξθDΨ²iccv#Btu%Υ m΄αΈ»Z„i%Ž ݘ&ͺ±5DJ‘ ΙYΝL:Κμd΅UZ5’‘Ά­¦Ϊ΄€tXUΡ ΅ΨXmQΨvˆŒVνΚ ΙΡΆέH3Œ-,HM§Qi΅ sI§VN •*šVΔF+•*‘–&!v2«’BΆ΅Άg6G&©hV¨DΪY›$Ξ&ΪRLΜA“s¨ξN·iU΄Υ6Σju;p€:‚Ρ΅’™­XH¨š„–cliš³Θd’£ηžΝ‘΅MIFˎJv΅£‘͞Idš­–R5G7­a2Χs%$ †tEmS‘jΖ¨³…ͺ Μ€‰Š΄΄mS©t£Ϊ–¦;ͺ˜j+6ΪB¨-FΩJ³δ`(=Ϋi΅*ˆΆ=IU'›ͺ0ν΄ L»­9$V‰‘(4έbΫ),ͺMC£ˆ0VRΙ€4ΪκξJΔ!6+ !L³gΝj€9&΄&ΘަNci©ͺθVZ‘FΣκY‘&˘΄c›šYΪ²1]"А({j$ΡV\;f=w;Ϊ±Ν‰θΖN‚m5έQ¦fΪjˆ`ΊQ3™Ι’L;S΄ARe;Ί*M€lΩͺ6•6I'š‘V·%mΪIΨJ՝"₯EΓ°M¬TۊeK—¨–!ΙT۞ΫτΤ&H΅ΫμΆΡΨ†64i§›N“Κv΄3žyω+/ΎχΝ½ϋζέέ7· uύΰϊΖ/ŸΑ/ώκΡ;}裏μΜζlΡlG4IiΫJͺ« 5ν(—t’S&ƒζ<9ΆΡδφΓν'ώδΟΏόμKοσoέΫξ½Χ^|ζιo=ώ±ίψԟ|ξ±›œ’&χή|ρ»ίύζΣί}ώ΅{KζςθθΟώψχ>χψ#·²χήyγΥηώξ©―ύΥ_γ™ΏρΞύsΐωΑ»―Ώςξλ―Όψ£Ώψ‹Χήλ͟>ρ›3#m4XϋΦK?}ώηK?ϋ»§Ύφύ—ίΌΨ³Ηε˜‰$PϋφΟ~ϊό³^{νϋίό›ο½τΖ}ΐžŽΫ—ΛMΞ·_~ιΉoλkσυ§ΎχΒχΞ-Ψή}ύηοΎώστ“ŸβνχοΩ—η³ΎTέύτ=ρ™§όσΧίzσj?x~ο«Oύτώ«ίΌ5·f"Qαh.ϋΖχŸώώ /½ϊφύ’;ωΠ§~OΎπ‘ΫΗκE”ΡΤpFU»Η¨ a*ͺέ5ΩM;JΫ­nBŽέ¬"C"r¦€61GΞΆeM.vΫVSA8δ΄•m¬6e‘έvI’DhΆm!Z]iΫ`0ΩΨ­I©–ŠD0Δ¨d›VΪd;9χΈ@’’AT…ͺΑθδh²‡N€)­€š4Ω΄M26m³š³#=šΆœΙύT"α„R 3Μ΄Bt©FgLd4r݊VΘl΅F&iӞ½”³‹F΅š†’ΝΞΚnΆΘZΆνX©$&£F¦¦IΊa3ι°”jΆ“4R€†ΦΊki’He$ν(:Ǟ«Ϊ΅•‘£lΣ4‘@έ!ΞS’#Ε)jdΨΆΞν„6]“΅Ϊm'Υζά¦tQzL6vΫhbΡΆ“VE"Sι‘=₯’νX±GB”B„"©aŒ€q½h2Šͺ΄vŽέ]’!V»ΗN³Ν6#­:Η•”ΜDz–ˆVlΜH h«!Q‰ GΊ΅Χ6 Iκ(Άa2ͺeΆ³½–F©U#m»™IL²Ίgnͺ¦Ξ€ΩΪ’ξ‘{Œ&3MTd*)ΥLrŒnj΅ͺ@£©ΚRmwIΒ¬VΪ$¦έH[­VΒ₯ΝͺH˜XM””UΫ ±Ϊ6+1ijI4K©nΟ5Z«ΚazvΫPb&HΣΪ DΊ•ŠL˜lmk¦%t‰f2©lL“Κ^“Ω¦›igj€QQ tp˜ΠdGgX6&μΉΙ0[Ί³4kM’¦έ±l‰d’iΝq‰€M₯]=₯:*ƒΤ9W[ ™-+Vrm΅iZiZ­¦‰V²I&–mΫNW€MΫθšιΞΜ™Ξ΄A8:έl%r€a©jΥJ’‘t”¬Ά­­&¦†΄«IL–nΛ6Σ4rS9•F @JIζ°Uνi&‘‹έΆIU؈ͺΦΉFΣ…DR{mΫ6$ )sO V›$‰#ΩθΆ QΥQ”„‘”IlΣΪ6iΗξΜf’QYš¨’ƒfΝHτΘ±΅‡N€Q­€j6Ω΄%Gšξj/›³‘NΣ–άo%’h4m—j4™™¬ )ΊT£“Ν€#gm[š8Ν±Ul‡dΖτδ:[4ͺ%ΪκDbΨκΩiu+Ifλάφθf"!1ŽΙJL3+³‹šyώΥ§<πξέ‡oŽΛ7wŽνΛ―~΅χ^ϊω}(6mMZ™&!½^+’:ΆlΑΕJH4šΨdΞhΤyyτ‘OύαύΟώώΩγ―όΦύϋλϊϊψ½§ΏϊΩΟϊϊέ#Ϊ}ϋ'Ο|γιo}χΗ―δ˜Ϋ|ϊΛΝφΔ―=ώπeϞόςη?ψλϋoώŸ½§ΘεζΞέΈ{χΦe²η½wίzσνχ―»ϋώ›?ώΦ7ςζςαOόΪς$" €·^|κ+Ο½πΖoΌy@δξ·o6οήώΩ·ΎφΒ‹oΏώΪχ»wοάάΉ™σ΅ώπλξϊχΟΏ΅Λ­;<ψΐ;·.±χο½ϋφ―ήzοZ}Υ}σ―ζrχΗγOί™8η“ΰw?ϋ­~φγ7_Ώήyχ‡_φKφ‰‡Ύs418λWίΞχ_~γν{0·?ςψ'ο>ϋzVU΅Dg¬6΄ΪiŒD΄+•4΄DM )ŒL€1&€Š‰Ž•nYlV#ΩdRYa“J¬E±iGͺΥVk¨ΦŽi9˜,G$i&Νe+Ριv"‰FΕDO 9φ"³AλΜl74‚Y"]«‘F“&ŠF&6Ϋ΄i‰θZ»ΣŒœΣ3ΖfJ`£š’h5HZ΄vGΫ΄QA΄ΙΙHšX-\:©Mɖ퐃¦’m61Ž₯₯H„3Iι(M#+M5ˆ$¦iHD§bg΅4cU*M4Ϊ&₯J›FδTc7YhΝM"³I5‰HDu$;ιVΆ’ )-D+D‡²I$m«mSc%΅UΛdͺ«Ρ€‡ιͺ*Ρ —€šTμ9γθJ$Ρm£ιΆ©δΤhƒdͺ*a³m4šΡΆErnΆ”j³=κœέiJdΐj£₯H”jjMΊ* £IΣθDm]j„”™eώd˜€M₯=ͺΥL#L«DΊIVHΠ!θŠM L’€D#£G›v£)»iΛ&šj m†XUΊTZΣ¦“‹„²JΣV2͜hš ’Š` ‰$,ьɞID₯k+γP• ‰Δ) bu+šV₯ΪR“™€NΣ΄ιŠL$©Hf'ΆZiŒκš‘†hFΡ¨Œ;§μΠ&'ΪΠIZ»m"²» €“&Y¬tS¬6ΫΩ$ξ§μ˜šfcΣ–$J5…Κτμ,Q£D+i¬DRƒVκ"€’i(Ϋ$Θi*ν.f#i*‹ͺI+$m{T”I’4θ =hPΆQz&€H[m6c[•₯"Θ₯Ym³d‘ζ4MΨΙDTBH mτ@tk‹(έ,]CR9Ν›4I7Ί­jZΑ m΅]™ h7›lβTMIΡ„™T5›„\Ι£Ι&‘61Ρ“’9z$ΉΒ&Υhk¬ΆMΚN$©Ά‘‰ZΡ”¦£Ϋ՝utšnΔ&5’:£€"I¨¦%•Xv‡J›4T΄±£HZmŽ©$’-ې£³ΥΥLcH«h‰n;mHWΘ„‰4 I’­Δ€εhΥΒΚ―ήύ孝ΗεΰζΞε΅ϋoΎw}§‘F₯k²BFŽŠqΦΡ #ef9·.,*"’‡VΪΡ&•φxπ#ύρί~η™ύτ/_yη¬ϋ―δΩούΥWžψΒπϋίΨήψξ_}σΫ?xφ—χI.>πΙ/‹ώδΗΎ5mΗqηωθGοήyιzφΈΉυΠG?ρΩ/|α·?χُ?|wή}χ•gŸϊΛ―ύύΟήόΥΫΎζKΟ}ο+_ϋΡ?ϊοΎp$›ΧΎωdŽ››››Λe’Œγ{δrχφΞ[\τνη9.77·.Η$βψΔγΉύΠ­<τΐƒ>φΨƒ·ϊώΥqsϋ~κs_όέΟφΧψΘ{Ώ|ωΉoυ«ί}ρWχξmέυΕηΏχΤΣ_xς³Ώϋ!aύ?ψ§_xζω7^ΎV?8ίώ©oόδ?ύ΅ίώΘ‡o’ˆξύλ[ίύφ^{γν+=ϊ±Oύα“Ÿf{Te&-ιdi₯DBŽ:£΅­6iuθšD’θNZHI`$mΧ Ϊ¨lc;Σ m›]{vTΫAfeυF₯ΆΡLš¬Κ2DΥVF3ΣΩ¦­lζ0“ž‘4m4©œ&gΣΞeg‘V4H¨@Ϊ1{DuE¦Ψ«ƒ$’Ρht%ν@4Ω(H$»©T£νΦ9I„i•DIΆli¦RI’•­žb“‘!ښN—nی¨Ά‰€)†I*g²I –J!S—Ξ}NhWΫ›D‚i*ΡJ%RΩ¦2’ΆM ]Jœ•–φ›3PMΘ¦K–Š™fδͺ»΄IΫ&‰φ”#k*l“j$ͺY©™ž“¬h΅fš]]{&#έ&Žδš,‡L΄Άέ΄‰J“ͺ6MͺQELΖm“H$’teΪ!Φ¬t3›Μζhvφ”M$©:΄JΫ2šΊΚi†dcc“Άi›60ΦTHΊzΝJl5šμLΊ€θΡ&–.š©”Φhg“6]ζD΅΄ΙV Ϋ¦IcΪ#‘\c“H!ΐ0’s ΡmλΰHΘ”¦Σ™’6™jvg7’ΠVw2q¦•¨¨μ6»“5©,,5QΝ&KIu3&©PvνφhWS EfΪkt’ͺΣH’$¦9;„™’]a4iͺΫσL•m&1Χ«Λ²IƒFRiΟl`$ΖNsƘHbΣ05*Ϋ9eN£s)u¦έ΄ΝH"mHZΓ©`7τX§aFE²cuΆΞv€ImΠμ¦RI[[=ΒLΓDS’ΜhHlΩ&Ν±­JDe9‚DŽ„NΣ°ΥTΊkš4Σμ0‘ΪΜ93†MsHW–$EW8’&PΜ&ΆI՞6»Τh’Ξ6 œe76сœI”JΠ©=£•JΕd’YΈΦm›$ IΪ€2- !IˆZ§ΡMͺStsž=¦ΡΠ΅IO“’α0χcυ’΅v!&;œ§$M¨-1ΙtΊ³*Π΄Ι±;*mN±9Νۛ΄gΪjΓf‰F;9΄I[¦ χ₯&I‚Nh7Q[MEΚ°ι¦R2mΟΪΡdXH4b¦M%Ξ²h¦™vΣ$••­ž’’d$Ϊξ4mΆ΄(’&M/ΝΘJ£™φhJ€ΙΆΡiާ\UBΪ…‰I*š†Θt䬙XΫT"Gm+B,Υ$θj $°² #Cmšn’ZέΜθ6‡„lvΪ#!—9zlOΩΙΜZ]Ξν͜j7φ0ǝOύ“ω/Ύυ³σΞ ―½½|πΚ+/<υ‡?ψρσ7ΏϊΞ_όΕ·ŸωΡλχαςΰ#Ώρεωzς‘άΞuΛN?ώΉϊ―ώΥ½ύ?™OΓ/ι}ι7}ψV[ΫώιŸω3ϋφoζ™—ίZ>xύ—ΏστΛΟρ8\ΞΞvZw>φΙ/<ρGOώƒίόψCάάτΞcŸόΜΗΊ“Bξ<φ‰ίzβώα—~λγzΰζΖνΗ>υ™}δΞyάωβδζ_^o?κǟό/ώσ/αS߹ጝδΟώٟξύ_ώΟο½πΪϋ'χ~ως/žϋϋ½χ₯?Έuc.ϋπ_ϊ­―πΩΌόΣ{ΨσϊβΧΏώŸ>ώΘC}ˆtkά{σ©―~υχή>aώΤγŸyβ?q½φ’IRrJΟαΠ-Qθ9Mζ8&›‘MV³λμύc.–ΔΙ&;§£$’•0Ϋ̜3₯λlLVL'χ5•hmrΝ9³5ΣL'“H·{v7‰έ6f’k6Σκ6š$39G§ν5G=/΅΅€«ξ™\“γ2IΝΞ6ΝύΓΫκ²mœ7Ή^wr=DΗΡΦΞqξdχΘ9Ι€ΫέK¦zΝ!-[m;›Φž³DΊœusδLlΥtΞ=R›vβ09§'sφΈΊ™žd©dΝφ8*™ŒΙ6½Κύϋ—‹ϋιjL*mN€±•MRΣ΄ΣΪΝt’m ε~ΖuŽιL#­]zΙ!ΗyΝ£Ιiw―G'lKΫ6λΜτl²¦ ©NΥ3έv423iӚsΓ‘4‰ΧY™™cf±9Ζ}fΧͺœczε’$Σ€ιΞiF2g¦‘vd3;©φγ4M'§΄»Υ8΅©cχ`/³£;1ΡΩvK—.Σ$©nΞγp^ΩI&‰C§gΞγ:s‘DΗβ<Β¦ηŽk4Η‘γ&Κl§Ξhδ8·Σ₯νΈ†έ#§Ή1S]ΙΥa{lgL’vuΤΥ4iφμΤ6κΞιDΊ‘δΈδώI“¦=΄­ Ι›½§[§£•ΆΗN7ΡΞ›h.7G―M—σds8³+iΨfYΆ±&ι˜ξ΄iqΜ6» q•v"Ηdj—κntr1—=sΦ9\vφμi{Hš-Ξ¦*LΟΝ±χG 4Μ•sI+Ϋ£η0± Tο_φΘ­cN!£έΛΉg'Σ8γHG3gΊsšH²“E;krœΙ›NΗ‘ΚnΖLΕΆΞΆšΡι˜ΩΛ±MΞI€ΊΫ6™ΞΡ]3³±jΊκLg&cιf΅ΗΔ =lj›n€izM{\r\„jΣγάΣl˜žœΫKiΣσ’˜Gχ~lŽέ€;ΜhΥξMη”3Σ¨:ggΣjΣ=J«”D/9΅§4φΨ&΅i%Σδ'ΚvAΟς°λ~ή½[7ΐ–@jILcΩ»28qΕ©Έ2T%η9ΘoΛ/ΘI†ƒTͺrjCDd£Xށ°Zέj©ΥΪίϋάWΦΊΣ›>WΫΔΚ&+³vF3ΙΙιuφyΗ5nΊ:&I6Ωj,–$Ρs'ά c%΅η€Οd;ξœι$ͺ΅Ϋφ&™‡ζ²ΡG_εΩ—Ά³ m·Ν>mZMξ›œ44‘5Χξφh’IΆQ§›l’ͺή©Μ™#wEΔjΪ\73iΊq2’;5W†œE%fσjήhΔf$3ojšiF7ΚΣNΪi›ΣΔ™Ϋm'I§Τ™ΌlkmH23³³jΦOiuΣι–“$wE²ΣŽ»]7’Μcg/ٞg_₯7ΩIGΓζlg6sj“kZΉΟ™έι3ΫW*+‘Ι6έΐtΪ‘{3M`†ς’ι³gfΖ΄»vٍžyέy΅k«Ξ¬}φωx>"m«Mu{3]Ι²wDTΣ)€νά=£™3“Ι}fξ·Σ›ξˆCg+"–½ΞL£ϊθ™υԝJu"^5½»3ΊψΕό_όΣoΌχ?αΧβG«oήwίϊηΣφΥώWώ―εkώοΏ)^ΒήύύκΏω­OΎ’ήζqM{o~φw~οΏύο~ηe―ίzλΥλάϋL›m;υ7χο|ρψΣΏϊΞ‡ογΝ›Ÿώΰ/ΏύžίψtΜ4O6ΐΫΏρο³ς?ύή—>υϊq2]η՜)ΧTΐΫΏφϋτŸύgΙWυΧ―ΣdεΥθž7σxύ·~ύ+υ―όν—žΧ?ϋψ„u_6«έNςσΏχύκϊgίγ–ϋαG~χ[ίΝίύBΚϋ‰Οκ—ήύηώΕ_όιO°·ίϋΓ―σ?ώΒ;σoΌέΜΛ½ίϋϊ|γG?ωιΒλOύς»_όί~g‡S―hέΊŽζ΄έMšYΝι8žιΞ₯wΟ4"IϊˆΙssϊΜ½:Σ±Άm‰€<κt²nΉIζžl:Ωγʍ]Σ7ΞY»];Ω9ϋ’6M―N›]d£Ω9χΥ΅7¦™Fu“NΣN'žvNγΆΝsΪδ8έ™$ΉΟJ%9³ΩάM7«cHυξΞΤΡΆλΉΙ<^―νΖj[u²†=ϋςϊn§Ο9`ηΥν6ΆΙΆaτΑY•6I£kοijέA€ΠnoTΥΩlΧc·;Οi“q$šGξ3T’ ½vlΪ&Ž!kοv’Œv{=M2ΛF»©θ€έι›·ξc§wTN¨ζq³Ϋ’½Π£‡³™δ.»νν6ΝIΌ9š™v4iχl±”φh4b§]23S•ul4DHωι_όΩΗ[ήΠ7//?ώΰGW_mΦτρΉί|χKίψυ?ώΣ―½υόήΏϊϊΏωέ/~κ—ί~η•γεΓ—οόΙύλςfαΏτΩΟΏϋ[_ώYqέ€š4L΄\•ŽTœddγκV+Ι&₯Dƌ½\;ΫΩ™ΜjێF"›¬¦›¦¦‘Ω˜nι† ²»;11+I“fhVšˆ$‰XbΔNεωh'.€U-Ϊ6™$εΡΨ OI4›ιq7YIΕΥk;²ΤBHΣsJ–.:“MGiH˜ I+IDMŒ‡ΗSΉͺŒΡn5)f3:χNΙμ«nΪA₯­™›¦{VtώΓ(JIK›6S €qYYͺi§Ήq“0ˆsGΊs›κYνyFM6[2s&Ϋμ-•€’sͺ«ͺb΅ž™Ι ­κUA:rrzάku¦ΙŽ3’₯"I&έΝΝFg3€νJ;Στ… ΄I{Σ&GUu»κŒΤΦj§gœΝ nΒ€Ν&²Ž–H’I²υ4hΘFS‰’ZͺρHšδA;μΆWΗ945M$]IΡtΣ%ΙYmΫHDαJ[&[C5ZΙHΝδšΩΞLFR«[§9Ω¦²ΜTkΊ§%{²c³ͺšlςt_o§ν!aΪJ•&M+νH&©Ή*S{kS‘†0ζ¦Β4}±g;K΄;W6iΣu“ΜcΤn(ahšhf#S*"Φ6M“¦BΊMœN2yμράξμΡ0ΝΨ܊œ$—gošsg’ΆmGc’7Y4Znšξ1e[ΪVcΟI·Mχ41ΜξĝT4V$bΣ’$If+BξΩCΪD›JuΪ2™i²:ZžŽ3©Σ€Ρ&$m\έTXiW"+sΊ²X‰0έ–hHŠy€Σ”L:©³=sr6YMWΫDNCZM:M·ΡΩ;͞46m$ImeΗjz_ίԚΡ!Z‘²²aY£AR™lΣ$€`ecΒΘε¦C"Νy2έS Ίη₯Ρ΄JfbΖN{ ƒΠ•G“=ν6E²ΆΙ&R΄›J4dγΆ·Ν˜κZl₯Wvξ$‘έm£#‘Ζ₯XE$RΨΆέ0#u·;;C€cΖξ,I;J“L‰¦Ϋ4ijž§&V*‘ΥΠ–$9I†Htβ©‘!ΡYzz›T;½ι†šZ$ΫHηH{“.:A&΄%Š„dV’1ΫL;Μg£έvγь0k™™tΊ§MfvΆI"JMž©ξγ6±Ω8%m“&5-·Ά3•@*7]Ž*K§j¨‘𛦑מgΣPέΉ²2λ62s&[»₯5©Σι¦tCr³gχ±j“H4΄εNšTl΅Cb% lΣtΠ’$•ΥͺV«$Ϋζ/εώήwΎΑ_Ρ·>xξύιίωγ?ψαΗ?y³•Oόβϋ+θώέΟ}Rkš„QLͺέΫ«μσ§πƒόΰ‡?όΡG/Ϗ_^ϊ—ίώΰ‡?-°νΛΛ­4hτΐ«ΟύΖoισŸ}ϋaŸ»ι>V·2€WŸϋ΅ίό⻟{ϋ•}1Υfκd5νέ›ήH_>ώπ£?όΰG?ϊΰ'?ωθ§οσίύΕ‡?}S ν}ήVvΦΩy|ϊsο~ι·Ύό©―‹\xy›_ϋΣο|ιέ_ώωΟΌ•—>όώŸόα7xί,|ς—Ύό…/~ω‹oηΆνӜi²‘l5ΫiC Y‰ΡΥ†™ ‰–&›Ρˆ€š67M˜¦Ι¦;ΛZV£Cˆ*΄Ϊk•ΞTιl3iFVRΣfΪr«$*₯©$ΆD%H[΄ Ν†"šΝ“΄¬•ΥH)TΫbšT― $$靔V’&!:q¦³ΣI·MUHa†κfg5BιlΡ¨l»ιτΤκ*šh³RMš Ϋn'=ŠMhSd#P–ζt$“,¨Φ¦ˆj+­=71†")Q*QVIšfEE›νΩ3M›†΄©TΚ5mΣ–ΡL΄MI…„ΉΆ–€3’ΉΙ͚φΦ šˆΊ­v›FrE›–˜T’©΄ΙvHΆVی Ι4%Υ ­ ’Rt”μd – iεVAT[Š”₯J»Λ4)«C€ŠΔF£©IΠ“ІAι–iHa"΄i5‡IR€“>ΪΉέ3•Rέ„΄W„ΔJ‘$ΥmΫ̎½ C©4•€•"I29z;²Uš&΄’²ηšŽD€Q‘Z™v["BB6mkG¦“΅™Zέi’ͺNφٍ&e`$IMνl*ΝH›¦7ν΄Φm*­ΆZ«μ™g/1$2b›΄‡€ΝDIR Hh[%iU +-„μ$€‰#b’TVZΥbͺΥ΄Κ’4Ά₯‰R&`L§[•6Ι4Β(H4:ELΪΆ˜°imO#eτ‘Σb'‘Ϋ¬.‘h…H£Iθm[1ΩJ“¦M5iB•i&’$*O©•ͺΙΤΆΝP›ν«g:•h4"\T E% ŠmjccfΣLΫ¦ΥF© FΪΦGύδη>είώ{ΰwΎπ6+‘ “¦M΄yφεύο|ϋ»ί}οϋύƒχήο½χίΰƒ?ϊιΗΏ<οώϊ{ί¨€B¦9uΐγ>υΞλOΎΕ:„N†:%?©Ÿ{λgή²m£‰&eVΪθ}Ύωπ{υ­ο|½χήϋΑ{οΏώ~ψαGύδγ7/χΗ?ψޏό Ρh˜n_Ώσ+οΎϋ[Ώώ™ηŸϋ μ{ίψζŸΥo|ωσŸώLόΑΏϋΏφoήΏ]ΜΟ}φΛ_ώβ—>ϋv<7­Ω¦RšŒΪ˜₯ΊMFjVME&Νt₯…I™%#άΡΉΊ+ΡH2‘EH’j/’#ΡΆm;H‹ΝuΆ€₯Νl¦šν–€ZΪN$;Y›M¦‰’E!M•’Ρ†‰]Υ₯Β„mZBH[eBR'ΕR +4€ Fx0ˆvZμm›FRέl6£›Φ=Σ„ $.ΪPM{VIΖΤα%iƒ(iΓRm¦Ρh₯™“’¬ι4“th5₯]‘0JWGΒ&™΄Υ–0F%bg:fΫΆ¬4₯!²‰$‘ΚΊνX•`Φƒlšp"i§·‘!‘4sc&ΉΓl³!D"ˆ‘„θm[δ ]•IFΤ6mN›T€hΣM$VWͺ!vM:‘Κ’Χ`!‘U…‰L¦©6Q©ΠjU—c˜ιΦ¦E$’ h’Š«QK‘‹‘MNQjoe€ά4›Άm3Νai"“9Ί‘ΠT;ν+I"5•d›(šR…&[:Jš£Σ\‘H“d§)QmΙ ’Ž›!R--$#šjKKgDuΪ¦­6‘†i2i€4½švTR³RfΪX*t˜4³L’dΜ6mW’•Δ$Q2³½Ϋ"™fΪ»;‘™μPš•,­Σ¨©ΠnΣ4PΤΜ)΅UΡή$Ιh -JΔȨ”5΄Κj£Β ι&•L¦­V2u΄” + ΊdH$šlν6ΪΆm#aSmH4m·Θ4α"‘‘BΣP€9%ιd¨©‰g³ €EQΡhR¦΄H:€#€aJͺΕN$₯tΜ‘j‹–&hK—4ν ΩF›UŒ4©H&]©n«4$˜•²i’Lš±΄Β 2«ΓάΡYέ F9“lIDθ³­ŒLΥj'33ͺY+έL›Ω€.ic’φVR “•4M«4*‘Q΄­$™0Ϊ’{U7*1ˆ¦M‹$ I IK¬Π’b":*ΆΣnΫj%SΥMS‘θ΅š:ΣX;6’1τB:[HBšΝ°ThΠT(—h΅Ρh"},i4Ϊf3Σ0€mIA4ZFΗldV’Œhͺ­h²Ά#%PΤT""j*ρlV’AS΄ΠdιŽz€ΨΡ΄’‰ŽLژζtΊwŸωΜg>σωOςόΦG0?σ ?χσŸϊşQ“Ζšz_~όγόΕ_ώΩ7ώεύυ?ωΧφ[ίϋΰG? m–MΣj ]Ϊ0"sΗC*€6έ$΄νdgΠz_~ςα{ίώσ?ϋΖνε7ν·Ώϋßότ€H2+ΪQωω_ω[_ϊ;_ωΜ}ϋ[oΰΎχ­oώΩ_ώζ―νOωαwΏω΅oώ¨…yϋΛΏϊ₯/|鳟¬Fb$₯$&L$Ά©]’¨–δ¦ΙΆ’„!Ό¨t"mΣέΞDLZm#6’§™€T£Ο¦3Q΄™€ ΅U‚Dͺ±#mlb“ΆΫNg¨VΫT·9H(ΥF’TΣZ­Ue„)4$ͺΡΈ‰Νi#­LrNŸ ‘Fη΄ZI2R΄ESCΥΡQΆ¨Ά6Υ΄»wοE&έFw§Ι#‘T«MΆIθN;Ν–ΐIͺ§dM¬-ˆD0]?AπΧσm›–yۏσw?ο0 f(u hIՍρOW\Ρ•ϊqtɏabβ‚’&Φ›ΆV‘)PιΠΞΐ΄D‘ΐσ>χu»ΫFΣ*›4i›6 R«;")‘Hlˆ±ΫQ΄ RU:#¦ΩR­¨ή¬jd’›΄3š4"έšΥJT)²ΡH"„Υ›žι0•( ΙlR…Sd$4ΣHš΅RΪσ6v‹ˆ ΪB‘² 3­έ¦’L(m›3c(mDBΪΝ–ΫΘL€‚Dtc!ΣMUD&1½9ιζˆζvI”«i* ’²/m f ­ν ΚvΫ{Ν‘¦έF“q’T›ˆIO›ŒˆPνΫNG#†iΫ‘H‰=v-³ν±™Άa(Qœ M‰&BZ•4F=j΄FΊ­"¬Z%U4@RΉIȘh’i)ͺ((‹4Yνδ„ΡΛMš€Ζ€΄b²Ϋ‘Hš•¦Cc;•$sφ$’[]:ΆM+›d'»Eˆ°KΪ­&0’&M‰Nl³«©,²‰€H’t‰&•hZM›™a4S•&MΊ[I’T΅…iIΉ‡ΙΦε„΄ZRΣξΪέ…3Ί6νHNBKiT€£šΩ΄ΊLŒ΄Ž0₯a€±›Υ₯tfEa!Ό­M ‰¨J`ξΠNέMΡ΅Z;Ρ”ΆDoR Κ”‘t’I#΅³Ά¦άi’CΈι2ι΄iX•dv΄•θΨ52 6DΖά<ՌL†ΆΝd:±tSR ₯΄=Y²ΊΫi&‘­TυΚT’ŒlΣΖHJΊξ–&¦VCl2mИδLοD’FΓJL’ΔΆ©&$ΥΡQͺε5JΫ΄SΥnw·έ:‘Yέ4™˜¨VP’μιf₯’tΠNehĐm‰`,]­¦l4Β«Άν@„©Υ†Π’α&4ZΡQ€ͺmgšL)₯ *₯0Dιi$ˆ4U$™;Ϊl»Ϋ}₯!/ρμ!Qi\–Η•0“ΖΣWίΚΗ?ών―}υ·~γλςχwύŸύ“ύεŸψΒΏχύŸ=₯mΫ¨ξϋΡ7~νWώξΟύϋΛΏχqo3―·O>ϋ―|ςανΓΫλœ“xΎω{τ§?} hϋΙ'ooo―s&ΙσΗΏχϋςρ>hfNwT}ηŸωό|ρ§Ώoύ« τ΅_Ν―ψΧ?χα›_ω΅/Sα|ǏώδΐχώΉ·g6™4sΗVš“NOΫ­mƒ$2+\kΊDb’4ݝs₯tždufΝ­‰A§M€VΫM‘vΡ$9ΌΛ’£›ή£»ΙΩ¦L:©sβYi&mΫΉsŠ»z$±Ϊ¦u{γ53!ΊnΊ:ZYΣnάΨξ‰VW‘ «»#F+•E‡•5MœΫvyEr{»ΑΜζDNȎŽΆέΆ­hι’„Η]!“FOMΧ™ΆmΆΣφΙsRΥ„‰4οΊDΚ2ΨGZZB 9™Έ7·ΠLK·νN ΣNvχuΫ¦ΙRΙΔi7£ΚdΟ$6μꈨ&7L,­’YΞ“ν˜ŠΙNžαζpι΄έv·Ht2›v7•[z’ˆ₯$ζvΜ•EšΩ¦ΩëٝHU—Έšv6Ρ‰Τ6^mK+·ΙΉEΟ432μJ6M;ζΙlRMΕ+™iWλΆ’™™I;΅ΡT6¬ΣV6n›ά «mc2ΓζξT’N“ΎΛ₯M’†Ό&οf3ΝΣέ$gΦν5g;Μ‰ΜbTχV νθ*΅zΣ{3M£SΣζ΅=γ™ΪN΄t›­feOχ “€Κ¦Η¬Rš]ΆΕ ΙϋLΧM:!ΪvWdb#'Ζn³=TΚ’ŒΩlD)―ΩvΊ©Ά%‘fe'i4‰T”&;Ήck6―ιˆfΉ[m"ΑΌž^ν¬΄M%ζ€O5F³ΞN3ο"ΝlSΖI§Md=6’jG‹“4΄ΩΙΌΆGΛ"dυΨΜΔΔέ.‘g^΅Y1Ix²iΆEf2™¦MμΨt6kΨτVΫ3,[!3’χΎ*4—•§ŠΜhz{ϊ–ξ»m™iΪ½6‰ζvNf²IΗeΣ½¦(ΫdCΊέΊkh³iΟΚΤ‰N“ »ΝtέY:J/cšΦ²DV‹]ΐ¦abJβ%OάlS©£νή-""#/»΅›6R)Œ¬h# •μΛΨI»ΆT‚iςŒ#³U "‹Λ¦FjLžρŽΝ+=‘f«νϋvJD¦ήl’Ϊ’hΊ;¦§fiΉιΜΚ6Ι4νZם¦Ec™”žΆι£ΆΙ0·’ΞTGVΥh»5²3kτ¦'Ai³U»f2IΪ‰›R²¦mςθΆ3«šd’Ά›g_Ρi’«K…ΠfFΈN'MλN·™Χά݈YGσJ:;n¬Ϊj+΄­Ψ ΅Χ­ΦL6:kTμŒχ4Yڢٞ­›{Zi INσ°1RVG»—-]#0 ΟΩΥI₯M·io2D#“NχφτIΣd©D€€%­$=ΩvΪI{Ϋ#‚ΚΖN¬iE­lΆ€1‘Υ$bΊ@«Ρc4θ¦χάΆ™Β+cοc21»vχxŸΜσήΝ5&²“η=y&π+Ϋ?ό―όςο\€ϋϋ_ύΥλόξό7ώσϋ{ΫΦΎ/nw@;Ϋ7Ηλ4οǝη즙όΡoΪίϋ_ξ―­ςΗ€Χw|ώ _όβOΤO؏ώΘχοŸύΞoΆOΎφ?ύWΝό«ίψΖ·€ΘΩΙμΪΝΞ+ηΓηΏχϋβ_ώΒίόκ—?Ίψζo|ύ7Ώςςσk_ωΰ»ώΒOώλ?ψ½Ÿπ•Ξ5ΝΙ6ο›wσ61‡ΝΫUmn29―sNߟχgΕ̜ΘμGη-³Ί·O:―“1]šΎΫμΪi_霽ϊqΝνάINgzϊΆ+kV‘fή»―Ζ€gΪ±Ωκι=—{*{Φ«fU$Mm³j½ζΜgξϋϋx’ΉMWzŸιΔάΙ}Ktξ͝YΫnΝI;6“Η½½η5ξΪΫΨLΆηιvv&iμ¦ζμ[ϊŸ9η½Qέ-;ΒΌΏŒμ'iΊ}ίN$sίΟ+ήΙΑseο.ΝγΨ‰GξήΨ6χ=™£©zmGο4:=½¦{Wϋ–œΌ6ΩήnΫγΌή^ϋΙJ³nΦ΄lΊ‡Χr쬕ΥmϞ©hΪ²ΆzοT7‰~Έχ₯ΟΛ={γnܞsηDςκλΘΗΧΆwVj¦Mκœyφž6±υ<ν«―ΌΛჄO»³φͺ³3s:ωΨ'Νΐ²ιIήΜ{ IgΆΗmΣ4©tΖ&Ÿ6c^gLςΊξvmΫ½η―9{χή+™3ŸΙσ.’WNΫ}vgff’t,{φξMkΟφΓ©Σ§έΫ<#oχtβ•nnΣέNΞΗV{0ιnn9­›μ”=›Χ&]“Υ%unξΉσφΦ²n27޻ٝMdΟά“ΨΉνN¦οw#“q§Χ||ޝŒhsoχιά§›M6“4ξ3vfΥ½Ω;“©6ΫӜ~œ’ι΄Ή΅φΝ³«}e^ŽΜυξξvΜ9Σ7}5eχΤl§[;uΜtϊΆ{;7­¦Sζή•ΥΥTVOσαξ‡νχΥ;}j·ΣΎ^υζμkΨΙ=·{ϝI;‘„ιξϋ[l=kΫ±“Υ7CTπ‘Υ¬ιvs7›9ΉΩ§›uFφiΌ’4F虝c£«Ύ¦χewή7οΌΞ$s―Ϋκ¬ΘyΧρ~ίο^Nζmdϊ'Sφ>Ϋμ+3I3{Ίι»f―Nϋ²svσ|Ό§#œΞι«oSVVišΉχ₯{<§;}šήžΉg’8=―Ξ·^»½ςΙk+”Ζ&2Αέ•D4uφςϋΏψώχ/ύΪoόώ…·Ο|ϋχόεΏϊΎφ7Αο|σO?­ύƒφ΅όό/?ϊηΪΏωΉšNtηΣ―ύ«Ώτw~α·ώ˜οϊρψΏψ+ώ_ϊ©ϊŽgΜσρύΣΌo·j³™Ξά€Π„‰Š¦ΝœYϊ4Os MοΈužΩΜvœυώΟρσΛ_ϊΕ―ώ d|φΟGνΏό~ϊ‡~ΰ;ίΞΌMΞϋΎόΦϋϋΆ4ΫσžE"nΦ‡?σνίχΕχ§ϊoό·ξS>ώξΧΏτ₯ίϋδΫ~σόΉŸύ‰ωόχ}Χ\­ή3Σf3"₯‘;ΛΆβΌ€w"ζ½£›Ω6G’sΓnΆΡJڜ§ύ γ0ΥΙ™VέπΪd[DΗΆΤ8[&η΄iZm{JNJMcΗΞHζ6fBvd.m{=¦3§™Π¦0sΞΡΫ€“φ…Μ{^΅›UefΊ{OΕlΛ&5ιΪΝΎ’$IΠένnNΪέ*ΣΔι€i»v[kfˆνΔtζ½6₯ΣΎ>MΣ [ΩLζ-Ο.3£™5Ϋ€χΨRΝ+έzbΗ€Ν³Z‘Ϊdg2ΡΆSϊήΙ.mΈΠΙnrϋΆ1έδ0’Ϋ6ιΜŽžΤ>•δ˜Y³οΙΡ‰tPν™Μ&ON\lIΛ>sf«kjτ€νi3™FΛϋzn*›49―¦†φΌorn§’ΉΙ΅―§γYSMΦτvΌ*%qD5}Nγ)mΖΥΖ™Σ{mδ$±³»6 cΕΞMφΔT«‰9ΩΡlsΥΊ cš—;·"2sNΜ}ξhβφΕΣτφn{2em_ζΆ™Η€Ϋ­Nv&2n·­Τ±΅ι4:II:VνUΫ$&jζ9Ήt8O±Cνn’œΆi{θΙζΤμξi₯I6υ1iΘ"νJ”Ž“Τ½kΡΈ΄b£7³΅ν+7ι5™“v"ΡvΣJgΤΨøۍΣs¦švφjλ5=²νLΗδΚFv7³3[Ί 3{›:ν©i''bΉΝ>zZ™˜Ω읐Ή{ΨdχxΕ1›,Ίs[O+YΩ™œ“(ϊ sΉM’άT*co½ΜFΖ¬ν4m4s·Mv©Θ•L²CΪΩhΫ›χ‘F³ΥΝ‘™#;i}BsšΦΦv;I¦»ν!mλfŠ]w“$“€τξ•Jͺ«•Σ I€i»Ωm­3£‰²™{ܐΎž„jΓξΊη4l›φĞ©³­v²•Ž ±·žIBc™(+2R[έ&!ΉhM·fcIz²οœHΊΙ«yΥv΅“&M’=Ιh»σ†±Ξ­ΎΧ+DΣϋ:q3ZΩΞΉ²έ κ™τ$•v’˜ΣΥΫΉOϊΪ%1Σh֌η}šš559•9W΄ΩΥΦjgnsΪq$ΥΘA»ΙΎn,4$Mίέ&“±W§Ι9Ϋ4­ΆΫΚΖ$Χ4€£5:“gέΝ²mεΌLiη㚹;-―]™΄I*v A4Eϋ_ώ{ηWΎςυχ[σퟟϊOώΣΏςΏω/ΏϊsΏψ[όίν§ψ/Ύς«οώΓϋΟ~κ»φ-3μόξχίόΪ7ί |ΟOύΜΟόψ~ψ»?σ‘m[·G»«eM @Hh&jM’!₯€I“aζ~χΏϋίώΓχ"sώ՟ό­ϋϊά·}έ©lκ6ΠΪ{wL" pΎν;Ύη‹Ξ_ϊάύ ΏσΌ?ϊι7ΎώΙωVΑλβΟώkŸϋξΟ$οH$:Ί’iηV»Ϊ6E"ζΦ*·1™„ζ"3Ή΅…0ΪΥ¦‡¦Cb΅»ιΆm‹F§I55ΩΚJJ΅›ŠiƈM·έΠNhEΡhi&/vh[΅Ι”YY!Ω £ν&’jUhJˆR³a”•αt+«›\€Ξ"’‰¬κκ$“%mΣUΔD¬Άt’J6!Τ6τ€U$… Οd›*’1#ŠJ IG˜΄4R!tCC&‰ΤΙά΄-kmg'!IƒΥˆΩμB#©ΘʍΝ4He+L#se[[%š49[K5z"y…%$‘…HΣFvƒ&]“ŠvRŊi’¨ΆmΫƎL…ŽMͺSmš†HšjŽ!M+i *]C«b"ΥΆKn# ΑΠfihˆθ2“F«Υ B4Ι&:I%ΫB¦•­lr‘šΚ’4*¬¦“Δ‹–]]’I'₯mHΔˆDis73₯Ν’˜F06š΄hΊc&“V¦R²Ι‘I₯L%Z.‘TΗ„:IγΪvUš#Ρ`u"$I(Υke²ΒΚR’ιh€f‘ӌ¬Ή₯«J“$‘ΩΨtt4™ I$Šd΄M7XG₯ΫΆc«M%Ӂ΅mU3:Υ6£R±€MS’©4%§ƒ&›–m΅‘B@₯h5ν$Ι°X΅›Š$j’6+τ6³IŽ› ¬…m6dš5©²Fγ>²²*5+βLEͺ[jžl±΅1k¦Qmχ$ ²Α‰mΪΠ—6eB’ έΡ’Ρ0FWΒ M’„›*$4nC*I#ŒθtŒκκl³*M2`ΙD“ιΘ4]U“Μv₯γŒdΆΪL3RsΩ­Άmd˜e«y&&αHn“Yi!1­M›ŽJor"*ΪΩ=ι$Ν$j·»-Ρ‘”€Σ6]I›f›¦! ³ΩŒT:-•v-IΊ¦m@›š9²[ΣΆΝ6"$LζΚ Ν†d΄m"Zκ€*©¨!Ω]IŒκάΜͺ0•Œ‘4»mΜ4›φ%΅i΅ΊŽF£έθΙT²A"²ΝκI©,‡ !’¦έd©jGΙ$)%i¨4Β€)ΐB*I#D"5ΙΖͺnͺ›N˜D'Ո–$‰¬JSCee“ψ ‚—lΤ³<¬λΊŸχΫ{{γmΗΨΗΑ6[-`H UTŠΤAΥA;νOΛ £N:©"с†SZ‚Ζ6˜€1>Οή§ο}ξ«k‰I“¦έhRhH;Ր&%ΝHx‰¬Π4‘Ρ½ο~ Ώω;ψ…Ώώϋ7_–σΖ>ϊι_ωο~ωcxό·βσϋ―ΏχζΧΏϋμ}λ›_‹?όυίώ©ύ˟ό‘W^™ζεΫοΌύζ[o/ΐ+ο{ο{_}ρΒσ.=ζfϊ­?ϋ‹Ώύφ7°$Kh4‘i‡mۈD€vΪΣHΡ ;yλ­·ήzλ­’σΎ½ρΪy΄{'H»Ο·ςΏτ­·ή|Πt“04šWήxγSΏςsωγϋ―Ÿί}Ω}ηνwΌπκ'?σ™ώπϋ_kSΑl‡T΅S‰&·ΛFΕΘd¦‰jW‰΄ΊmΆΊ½H’£œ©$2ΝM6‘MΚh4)­ΞŒaH"žΙέL3!UΘƒn–:ΞV»έL*ˆF+Ue:'5΅mK5Π„DuΣ ’$X’–mA‡F›i“έ“ΥάΞ­€I£‘ι¬μΆΝT²qcW”ΘlΗήrU²MH‘l‚EΤPΥΆV‘€T2ΪRIΡD%›μΛ6d’Vƒ}Œ4Œ0•$HΪΠ¦r’I ²₯*MœέΩ<+#‡¨d!i§Υ6—άj7έD’‘“ ͺlRΙdΤΆ…ΖκΚ$“ͺ4iΥ€T:ΥXΡQi•&&qj’r“[Ϋ<ͺ’CV«I¦1¦Ά­Τˆ ΅Θf‘ƒμ­ͺΆ‘MΦΔjiL!ΡHΫ¦%#m΅Lj”*#ι)±ΝπΘφ€«wΣdΣhά# isΫ©€“έh΄i3JΦ€m-e˜²•4)•m:©α¦!Ϋ©juΙ„$Υ•&F¬$™TΗΦ±™nΫJ:Ί₯QMΜθΔ0²»­&!4AZ­nLzΚΪz^σˆTΨΪ¦ΙΤlCΊ›’JVΆ7 #ΙLΗ–¬”D"n·Νˆe!“Ά‰ T'IGlH ‡Ν6MD†L6}rΧ4&³Ls’«τH’Mcg±I%°$’ͺ )›κ’J₯Ц!i† ΪP»ΝŒ”j$UsΊM#)4MΓ’9av£iΪlH“{Ϊ5Υ[SS£›&Z[uΆ΄ΛΥα΄i%ˆ6• ¬ΤF3ŠN΅­4‰$©ΆΥI’ξN$I»gΊm5$Ρ%:#M›L{’R"Ά›ΘLΫ TZJc4Ϋ»Y‡N<›•JH;mΪZ QΉmmΊ™LšΜ4)ΥΈ’H€«ΫΝF§·!Ρ B’M–I¨T:5Ρ±‰Ξv;3†©Iš^yJVΕt7 GtΫc&iΊ65ΫΫkB#dΪMaΣ&ӌfΊ«m­†I“vβjS¨‰.@*’»MRD2)p›Cf›lš[{Ι& iΙ6YΓ€“Š₯M-iέ­ΥiΡ„4Ϊl"IU0M…ΡΆ­%IJ“›hc;I‘&Έ3}ΉEd&»ι =ι$½“θi’”ˆΔΆTBθ$­-Υ¦§ZΟJ$•Ωͺ€iΘ6ŠΆΫHB’h³‰A%Lͺ“-yΘMBKš)I3λ&$΄Οw_~λΟπίώφχεο|έr^Џ?χ+Ν§><Ο}γ'~ω—>σ…―λϋϊΥ7oί}χ»σ…ίωΝχ§~δW?ωΓο}υδ•ΗγΕ‹ΓKΰ»_ϊ«ΏϊΪΗ>όώΎο•}χωφ7Ώω•/}ξ³ξχ?΅ΏϋΑ@svNέ¦!‰™ΊMΧΠeb"hξΝNiJiwr^yρxρ‚wΠέ7ΏόΉ/~υCούGίχjά·~π½―}ω/?Ή?ψ½τ7ίΌIfΞ‘&•ά&/ζ½?ωK?ϋΡψχ_όώ·ί]€<^yΟOόςΟ~μυχ½V› sΪmΫV—e»ΡΨΔaΆcͺ&SͺΝ²šŠn₯1Ai₯›vz‘# ŒK[Iij05HΉΟέHՐν&bW##SιR€Σ'MS#Σ€m§…ΨΉΣlΪ*͈‘Unl Z+dBVš› mAdu€ˆS³ΒUmU²Ι†mD”@jκ&6L†”έ(Ϊ%E’t¦Ϊn9m΅£91‡ηΪ&mΪ–Ig’v·Q‘NΆθI£DΫXΩt;G#Ι€mk§“¦•Hb₯·I’˜DZΣ4„MΪξΪ61UGVZ›&5rˆ¦Ν@«Υ•²mνQιDκx”¦IdlVVΕθV―¨€“€•6½““d€Ϋ²NtJΨf·EΣN5m΄iλ˜4³›”¬ΙΦ&•ΚHšTΊfkΆS­T-ΙΖ‘ŠlΊJlDh•f.I₯©Κ’š EhΰTΧ­ΒVv\hi’žjH;M₯X&‰€₯KΊUU#Σ`Ϋ8·ΪM›‰ ©ηšD«$2MΆKw‚dcu‡&΄ΫlΪΨFsΊΨ΄ΩG7 nΙlΞ”‘)a΄¬tέې©„cnr™Y‘c3šˆ&-΄εrΩξd3’CΦΘf₯“iR©6Ϋ΄€ΚJ Ο‰Q!½S•HΙ°¦]°5ι@b0•Ζq7·m΅‰ΣμΥD²ΫŽL“6šjΖεJ“0fLj”-mΆ1M•¦Ϋj6&°Ω¨₯HJ']U*5IηlnD«Li(ΐ΄Y&J›άI“dI HΣθ¬fZ6$'²M U«©H2=§}Y£μΚξŒL$ΥΊuΤΆΓΔLΣf霭θI§‰Ά]6ΆQ§¨Dit4š€m¬ΙHσhD3΅£’mΆνΊν)Σ”•›ζ¬{BtΘͺΆ;nΪΆ6˜=L›:›ά‘J΅im$ݚ•!ι„VΊz§“H’H‚š4…Ζ`$kξή­i$d5*E9&+i†·nL’I“5sΓ6νΠΨΩ4›”6™„b6·.  Χκ„)3KΣQ–ΤHJͺQFg³£EΫ&6³!&Υi@f³F“I‚έhΠmnAΖL£{εΜ)-ΫhNΜpΧV*U$2F₯ι&4i\ιDZY6VN•Έ#v”˜Δ6]ir&§#š :άLΫ]m1Σ©”M“ΝlžGΒ„Τέ”i»L˜Nhk37&Υθ#εD£δj*i¦XLͺ4½Ο7Ώρ·ό[ΏωΎόΝ7ŸΛΌηƒύΤg~ρ—~ζ½ΨwΨωΘ§ρŸ~ρoΎφ•o}ρο΄Ο7ίωΫπΫΏυΣγ~ς£―ΏφΎπώ}ψG^ύά—ήίωόŸόΑοζ­Ώδ‡_₯oΏόήWΏόω?ϊƒΟώηoΏœλΔ- ΝΆ­ €R–VZ!Ϊ*Ѣۘh»W³ούΰ?π‘~OΎVΩϋφ_όώοόnΎ΅~ψ½/φεwΏσ΅Ώψσ?ωμg?­ϋ°‰DΞd(@‰{fίϋρ_ψΉτΩ―ύέχήύΑΞγ΅όδ―όόGίϋxOΆ€Η&]*«iFLšN›VκT;ΊιF› ™•¨Hšr%ΡΖΆΊ#9“΄EΙΘΘf`Λ¦ΟζΪNdaV§w7§Cνmn&“ͺj΅’©Δ‘[D:­ηζμSivΊ7L³Λ ³Άέ©&«‰Ψ“jΘ N³½H&R­›4ι°šV1$‘³§SZH;eFΗΤ4³έt& ,ˆI†ΫVs¦Ιέ¦wڊJKKMͺmɐ–LUΫΩ$ΨνYΓ²$<ν¦Σ&•°γYIσˆ Ι‘+ΧΆfE™hΊifG'‡ˆŽ&*UL'َΨl¨πhH:νΎ<Y6iuN²Ω€‘VΣΫ*“Μi«•4”5„ΆήΈmgE;ΫY›nwν!άz):醀Iw»š†ΒΆšŒ¨Ϋ¨Σi6 ³mΪT›Ά&³Ί- 9ν‘rc£ZZˆ­¬L2₯l££YΡ$:&N]]™ M;†”ΫL'QfΫ&3»m›‰™[vΡhiRmE£έ$#SμMΪ­ŠΔξv〉ΔMΛIFWK5­4YVZsςIZ%Mnφκ4+ vŒ•M:7I“jŒ­ξhTΫFι¬ΘJ+υ¨tR½ιŒv“MΪ„™4›&Y-΅Ωɘ‰‰U6ΩΡ"tswζMM•LΜͺn{·“«—m‰›ˆͺ‚4DCΩ I³ΝΦtt°šμ†6-‘Μ<»Ρ©Sb%Ι¦4₯T§QU!Mf-Οδ$›„£)Œ&³‰4m·eΞ6iDcƈνL7QI›•3™V+d6ΩνlOJΩD›BΣͺ–ŒN[mgΪΛ )φΆ³BΈθ[kΪ¨4yfφΚ•4™ƒήŠ@5–k·Ξ•„TΝ¦N'71™΄JQ4K%Lw:KK ©μ£§νMoh”MV²ΙD€ue’Υ¦·;β$™Va£ATL³BͺΊι•«6Ӛ€±šn±C=Ή’3ΕσŒh›’H…PΚΔ¨Ϋ€3έNΣCͺ΄Ρ‘]#ΚݍdκZa'ՐRM3.›HN΅6Ω m›ΚFBιi°”³J4Œ‘,‘AJλΚd’IΆΪΞδef[»ΣΞIAii΅ ΄M2Œj·ƒ΄·³Ι$έmΥπ¨'2n[²$Ή\Τάfš3Δ"Z±\m;›\1«²©ιd'&(”™χΏώκ}π•Χ³ίψϋ·Ύόζή @Ϋ¨΄΄Z‰”j#aΊ—…6=ΣέΆfžW7δ$CΩ¬€Υ6Οοη+ιwύχώκΟgΙ+o|μ3ε?ω'?ρ/_ήVwηc?3?ύΕ―όΝWέ_ύΰΩφωƒ/ώ―ύϋOύΓχ}ΰψύ?φ£©ΟόψgΏώηί{gρζΧώγΏωΏώτ7fΔήKΞ«ούΐG>ω—χoέ·ί*Ένse3[DΊΫ[3ΙLΧ5ήξ"#If·yφυϋďΔO~βOΎφΉoΏsΥ;σgΏρΏΩoIΣ½wςxυ‡~ψcŸψΠ;_ωΫo~ϋ»ο,θζΩ”ΡΩ¦¬ξΩσJ>ωΟ~ζcώKίψΞΎΏπxύ΅ώτΏψΕ}ν4³v;˜Ω©Jcηξuw6΅ ™‰(έXΘHt_œw―ΗΞc'aΆΟv2ιrχ6=3g6Υ-:a&—jr§ͺ7»μINΊUir§τ4suξs^š;9ν‰ΜaΆiY³u’GΫD΄i«­M’ I£Ϋφ:ΙLœnšN²q7‘Gœ€²ιFεnœΞZ³Y­½έ½ΡαΡCοiM=Σι}κέLG=΅“ £ζ$Ռ©y¦ΠGŸ-Ο±rδ$&Ršήwϋ8怫uτφή€‰L£ΫI*I΅«mΘxΞζ™ ;:m{hςΜ Mdšτή#lI˜i2»Z©D³©ΗyΉwξΌΨ™0ΫΫ‰&»χΪΗ™I–ξ3ΩδI™άΣT7{£’Η:Ρk36HεNέ——™M’I“‘YiuΝ5jv%νI›––ŽvD& έh’9Οs›j&mžMΜ#"Oέh²Ϋ©‰2`kιΘ–»-}τ…φN€žtr―ήfvR£SI$NΫΨ ›œιΉY©ž>ΣξΝάζ$"šή—&Ξ#•½p΄έg׌€Ϊv&I§I"Z[½bgςΨΥτp»Ϋ‰D‡Ζ3Ωmlζَ›&…Nε¦{jvΦc3»lΞ­¦ΣΑ•¬AΘ|π‡―Ώ6³ϋζ;Οoώ`/œs>όΖγ•Οηwϋψԏ}πόηόθyηχΰ?«ΟΏσ½·ΐ«―>ώΑkη=/ΪmŸ·οΌά7ί½o=4έmΫΗ#Iz΅3mΊΫέ‰9Μu'5 )ΨΒ£ϋ˜nF2·C’Ϊ±€ϊΦ7ΎψΕ?ψ΅ϋΕ·/d^ϋΗ?ύ τ~ξοyyEΨΫϋςO~ζη>συ―|α+τεwΚzϋ ΏσΟΗ>ςΖ«ϋΘ'φWεΧΎψ―~νΟΏwŸwΫΆ»›dΞγΕyυƒ?ρ«σςίΏχoώυωλΏρGρv{οŒ&@»²&bH2œGŸi―Κ6ι„[wgbφ΅ŸψΤΟώ³·ξ«_7_ψφΛ{w[ξmbΞγΥzϏ|κŸOλπ‘ίύ?ώ·_½?ύΚχ μ±;SξnΊΙΙ£3ηγŸώτ?ό½Ώόκ_MΜλxγΣυ/Ό//Ι&ΔτΩυdшζ™MG±‰›bάD€›g“V%Όxj“&’U“1χ(!™»νέdΗΊ’s“—‹ž&ζΩj›ι΄=―¬IͺέΫΩ&[Ί2“­{Ϋτq’ΙvͺIη·ΫΦ6Œ¨ΞΊ’μ¦mnLΕMš™]ν>΄';3·C†θ¨¬€ΩM§ΙnΉ«iη6ΪΠφ%š³wš>HƒTVwΡΗdšΩtΟIΔ„ΌΌ;S§JΡ½ΫT™w Ξ4ΈM6Ef2IŽw·'4›y™eLξ3ΣΜΡΊ ΙΖmΝM:Χ\mv²ηA‚άκmνΞΞΩξ‰i7œΙNξvξMs29gΪmχhg(f#qΜέn$kϋΌ6mcΤjMZΉΙΥIHvσ4gZM²’Ί9ΣhΥ„˜¬FfΓ΄φYv²Ρμμδj{*ΞΛ]")­Γh’š¬f·ΪγΚhΊ'™”{ΥΘGš¬˜&T·έ}D·΅:IM·YθdCs»§;Σ&‘yΩσΚ”ŽF7-χl6i΅Ω›mRHϋdkΆ§;aΠ€TͺΩ>¦#ΣΈg=Σή•Ιά­Θ‹j•Iw«Tςr²œ<³M±bzc2fβΨξ¦Σ4yΩ™8άνΣyd)¬μθγ₯47Ή3ΝD’Ϋκf3 ΣΣJξ°™mkC†σx1ΕvMξά$ƒ€Y53ΉΉέ΅¨˜ΆWΣS›€ΡΨξ ³Ψζ Ξά¦:Χ‹R “f4fo{;s£ΩΘά“w·ghn«d:iΝ5ι@«±»έ8š΄tg$srΧ½;c'έΩ>Ο‘Ά]mbšΆΥ€›ξOœ4žŸηAΎξησϋŸΣ§ηA­9šbΩ–-ΗžΰΔΒ6&Ι ΦK6ΌήlΨΑ†%S9€ΐ\ΨΑεΗΆ†–,©[–Τ­nuχω}Ÿ;Χ%ΛΈ§’Κ”ξ>J―‰dΦ0G΅ΙN”¦Υ©ΉΆcξ¦J^ν%ڳ׈C€5›y'rkƒIEcš“έμΥ9¨φι΅νLžΚ&Ιa;ΡT’9sΞ^c·1•Ν©}:ak›δαd·›ažΛšϋΙ&Zθ½;ΧΞζτμΡѝ\3{ΏΖ&œ™™y°Χ5]¦iT›ϋ!«MŒιeέk£b\6(χŒΥFd56χ€L6TΈέΫD’ΖΙLvf»i“1ικ–=³sΜ•άW9Mδ*Υ€l’ηV“†ΫΊΆΣN„’IZΧ₯φαc›&²σ«W·ν0:i·­NfwΪf!£μLΆ‘§;ΗΞd{"Ρh4J:»ιi¦»΅Σ¦Ω«I‹κ½ΆsvΟς@D©ΩΨ ½ΕŒΩι¦5©i―’r]€y¨%€έmj$y/“τl§R\NΕؘœ9Y§χM:Oη΄•ͺ\snΤΥ0“&W7Ή’ΞšΛ6›μνTΉΚμfηΆpΪ&M²™«;Χ&1“Ι™4ιφΨf+c– έ>yα·α'ώρ—žςώ»Ώχ—ϋίύΞΏ'@e^xν#υo}ό‹―έξo~ηώ›?ܞύ‹zϋ?χθόϊοΌW ΄κ3Ÿ}ύ?ΚGι“7°ϋήϋ~ϋούεoώίόπύΞύ~)X³"ΉI2'sλΥn;ΡΞ5½•Md’Ϋ§=GrΣ“ΫZAΆš4yωό―χΧLq{ώ Ώφ›Ώς _ϊό3»+Ω$GΤύωŸύɟαo|χΝιwΏρώΒσ/Ÿίτ+/Ύϊ•O~όΏψΟ›OύτοόοΏσϋϊWoΌωΏžzxςβ«ΜηΎτούκ―ύΖΟϊΡ3ϋάOΩWΏϊ7oόωχο@gw„$Ή5GšΆέfLΊΣNw ˆLLH*Ω3ΫǟόΕ_ψνO}ό§~χό—ΏχG_ύζ›?ψੇg_zύ“ŸύιŸύΚ―ώκΏ>ύΒ-~ρηώώŸΌρΝ7μϋ₯tφb―h‰Ά;m}γ[ίϋΡ»€G/ΎόΡ―όκϜYͺ₯΄ŽvΗm4ˆ&ΞZ‘©₯@“m7­•Iœ^;ې$’Σ¦%JΣΆ½Β]δIΥ΄i‡ΡΨ΄“˜‰ΫΆt$ 5f’€“$Γnγ:'ηhC8W[ηΔF6”.Α•…ŽΆ1R¨=Ρ€i+mWΫ¦Ήmv¦ΊJi“Υ΄m›!ν^›+sKi΅έ‹δ̐²©ν€m[W˜&Z6Υ%1Ι•IΉΊCͺ9nΊO§­e6a‘LgŒΪτn―{LTrMwμV.Ω“&Ρde5f–ΪΈnθf6©Œ=‘cW›ΈQHMtΊ©ΪΆφl—J`₯šn‘bν΄ͺ)l*Ρ€©)ΒΦ΄iΫ¬τšsTΪΩ"3Ωi­κN²Qm»—ΙeR£‘l7έY3i›6$#³Rcˆ$:ΘτH3ΝΥk3’”ϋπ4ΉeΤ’Mv©·ZΉ$­HR! =z›4 mΊέT*ξM €JΫ'Ise―¦ΝI&Y‘Ά.If˜MkΪξšμNK/Yͺ„‘½¦3“MΣΝ6IΟ1i6ΆνΚνι]w±±QΔIsΓ•₯₯gڍkΪ‘κ=³t¦Ψ₯³M0Ν$‰]Ιζͺj›&χ­-šm¨Ψ4-Υh€€ŒFΓΉFΚ’–ΨΨ΄YFœΆΣΆ’IΨ)kΪ!fιnGEnά'!m‘ξ`ŽhΪ0‰ŒœΦj’a$Ž‘’c‘&’—cf*iΪ³ΞaAΪm›DzŠά-MI …FOΤtΪ‹‹D«•ζl6Σi•6@…Υ­HλΪ\sN2’nΦu52¦±J§­n»i€6Ϋeb€ΝU‰9Ί© ΣsνΣiΫnv„ΈΖ5m›¬©Ή…¨”V6ιœνΣΡi\3εd'D«4kFb§UβŠ•S΅iR‘U[8͌’­½ϊ­―Ρ_?όδǞύG}ϊSΟύάKίϋ?~x;ΐφΉsύμg^ϊ‰ηζaή³7ήψγ?ώΑ«―χΏτΒ³ύπ―ΎυέΏϋqξ}89@Ϋίίw?θ‡70σδΙγΟ>σθS―ΏψK?σ‘τίϊώΰoΏΫ«ΜtŽ^v—d$½¦Ϋ­Ζ%L:I"FM₯qΛ2mΡΜV«hLyό‰/ός?ϋ―~κ7Ÿ–`½πΚsΟ=S«Ίq’ šGΟ}ώWώ£β§~α?{ZUšG/ΌόμsθΓs?ύ•τΟΏψΛτΗχϋ΅•™yxόπΜ“'Ο?σθ‘νν#?Oϋ Ώφ[οί{=Ϊ³Ο6Μ|ς—ε_|ρ'ΙΣ"œg_ώΨ Οœφͺn΄=ts6υɟΚ?ϋόϋ[VTςδΕW^yΒθŠ$5™‡>ϊΙ_ψΝίώϋΑςαύΊΆ2ηαφθΙ3Ož{φΙ£έωΔ—ΣςsΏώώ—σθΙ“—NΥn iRΤύξ»ΏϋGφ{o½sΑ“Χ_όδ—ω f]i4”ΦV’n΅Α£M¨Ιfhœ«Υ5 ΙUφj"’i§±;’v·Υff†„ °ΗR±™œΠIKdZ‰Vκg“€‰t6’:Ρ3Ξ!.tφΚDγΜΖ΅U31RYΫ@’T΅Q)!Ϊf&U-ΚT6›4B—%ͺ«#‘Α@ΣDh£Τ’«–dcg„e©m%£ƒκΖmr’I₯ΡvM"‘ΣhοΨ(ΡmάηΘ•.P šAΫT€šh²i΅Υ&IN―ΙN²R!&²m$‘ ΊΆiI[Υͺ¬¬­΄A·ΨƒΥ(νŒ™άWtZ™Nšb KΕ&…I³6­“0MχΪμΥ‘Œ¦΄Ϊ“€iWKφŒ ‰Ικ±ΡD“fR“6"!U隩RV’ΖΘ¦ΜdŽLΚJ€«M‡θτhuk+#·d’t[$B6*©”²3ΆEš¬½f³’ KY΄Ϊ™$bB›NWΣFRΫΤΔΚfšBeΫ¦kjšdF7ΆmΆνIN6\UZšμΘά§Z•Tš˜Ik7ˆVj&5vg•Fβhμ€Mj*"g»-š@mΆ‘6iBmο2LD΅έ:›¦Zμ65ؚl$™Πh γβΝΪκθŽ\±»έKR‰)₯‘W7ΤI&’lF#›6šͺ€dR#šͺΚΤ1M›$H#Ψ„Ϋ‘ΡΨ¦IzΫΥ¦ScΫέF’™I4θ΄"TΫF₯5@“­T£H©lΚj3Q»²h)I&N"rPΩ΄₯Υ*΄¦Θ•i*,Υ»†qtͺ‹qΖα¦nlKC3™-»Ρmf£Ή¦ ’3i΅"hmΟ-+š΄jSctt²’65Ζ€Ί–" »Ρv4)ZΝeN$E[šD­FΚV:‰ΙΨ”Δ(ŽIH6Y&ηjΫ&ŒHV»kΫ#F¦ν6)™­mi"ΩΫ $=–Ε•™IŠ1Cš6”© ;¦!5γά’ΈW3ΚZ™θ˜ξΈ°ΫΕd’lΜ‰Υ¦H€-h΄ ±+³’ͺT!•¦re#T]F“H†¦IC«MΫ•ΆΙ6¨\Ά4k1œr΅›œ1±‹HΪG[š€IrŒή―ڍ­¦»±3ν¦ QiFtEŠIΦΜn 0έь&νΐDW!ΠUAΟΨVΤ%3ΝΆ-€΄νe‡@&gμλίΖ§_ϊβλ/Ώφ也ωΜωίώ€³ ΠjžΟγ_ψΒ“‡G“wΏσ‡ί~ϋ«ίψώ_|ύχΏσ·ώ‘gό'o=ώΡΎtKJ`·ΧvρτϋoΏω‡κΏJέ=~ρεWς³Ÿϊ©ΟΏώΚsΟώό—ώήγΌρία;ίόΑ€α΄›X΄₯›Ω‘nΪT3A‰6mHΪ›ϋ=Ϋ™K“fβΚeΤΓγ_Ψ˝ιτΤuοΣ«W―LΟΞ΄»iΖ-nž}όΜσ/΄*2άΫ{·Uσψُ<σΒΥ#ƒZyͺO―Ÿ>$σμ+/=χς«Σ‘iŸξZππΒs―½πβλQͺΫλΊ.M'pΕ‘΄£yxφΕמυυσTΖf»ww»Ed“$šξ•io™yόόνΙ /Ν9›Ά[ZχΥΣσψ™Χ>ώάk΄Ω^{m[•+ΧυήΏ―ϊΝ·τσμG>ριŸύςgžtΊΫiH3²² štš•ΪV“lˆ5+›&63;so’δdο=gwΊ¦n9'φl+dΆΩšjšμ8M’mΔ&χ8άjΪΥvMΫΦΊiI•6ΆšΜœTλšτδ\9iΞLjΒ€kw§š)C2ν•=½Υμ΄6–λ>S·Μ,"ιeuZ“ZΫlHLΉΪNoρP§Φj•Dηβ₯)Λf[MλΊ¬f2MS:Ι„Θtšt˜Œ6ͺ­ΝV§§₯+HΝVz`¦š΄νR΅:M:3Ψ5λA·½Ϊ’Mz΅[5œŽδ~6½§°Ii\Ik*$b›4IΒtquΦf2+Υ₯i4‚κΔI6ڈ†YΉτd'©ΫyΊYΙ@²ΝΦCr’iΥ:cΘΪ欝&Mš9Ι¦Δ&›4nΙΓΆΊNζnΫC’mΪ M»Υ•$‘¬;χ›‡kn-ιd\νv§›™+·Ψ1δή«Lg3΅Mξχ€Ή΅!fξέ9‚Ν–:-!•΅lš8›Ωξ¦7Y­Υ$Θ&bn.ΜΥέi#]2χέk›1Bš₯‘€ƒN:²Έ’¬rœΊh¨Τhτ˜K.NZm»-l3ΧdŒ¬SΣή»54ΡτΩ€ΣDAΗj›©DZV*jΔ422VΪΫM΅N3Ϋ.ˆ©₯m€# mJ»6„“Λ앝T₯9»Σ+έ>„Qm•GZ»n•Ζˆ6ͺΣ+)'92{]am‘$ά»°‹Ωi”V/š‰†­k8sϋΠl'Ω‰Tj2W6έdιlΫέΞ4M6eλZΧΞ£mˆL‘ΝΙ\Ή7μ¨–$ΆΑ•f:kJێ±ΊΪ6MΖ•kwšϋIRΩΤ\.›&#΅‘ Μ‘‰HD7šj―(ιŒp’ŽΜ½‚tΫΆmΑ&=sͺeVΪm/5νŒj[«#›tv°•l‰Ϊ̊ ‰dcΖ%- 8ν4€Ωd[D£ΠF“TΦΠ¦Τ\ζrMt&—ΉšM2z5³έl=$'zΪ6β8luJvŒδ*Ž‹Ž[m΅ΊV²ZCΈš62«(›V&gjλJz2›s§λœ`*“nMk&;=Υν֐=—­έ,in»™Μ&I₯‘^Ω6:ͺ$ΡT›KeGNΉΪ¦sσˆY΅mΥ$2sΪ΄φΨdRiΪ\»»ΝdDZ;Ιd™N“&M&J΅Τng.6†l£ΓΘ½SMTΧΦΪ1Mš]S·vΫK7rK“^©θt$ΧΩμ•VΊIˆΨ¦μšŠ&"#γκ€ΧH#6΅Υ€€„xxτφwψ΅oώΰ»?ϋκΗ=ώόηžϋΔ_όθ­*vΞΌς‘ΎόΪΙιί}λ­?»§ί›ηϋτΓ?}㽇Ηή>Oέ&‘@   EI@‹‚$΄(I Zφέόυ›Ώσ½‡Y£ζ;Χ~γη~βsρ―ΜO½όδΛ_|ύ7Ύας᏿ύž-€‚ @‹ hQ$Ίic:ZΩH“M;Β<­Ι&-Υ–•Ή&n=%-€4šdG6c²Ω{›§;ΩξVΛ,©[]„ Ί{ν €M’hνΆ{ΥS+’+i£i\ΊΉ\se¦ΩΆΧuνύMΠN!IL5d›R»ϋ^Ν ΥŠΆT€₯mMέΛΣξu ¨θD¬΅»νEšiΧΐ4aίϊΓϋ―ΰλoώθΓ η₯|ς3_ϊς'žIpmJG\Ί©š5ιΈΆμtD“rι³±ΥibŠMdsvΫV‹κfz²W"&™ΪΪh³₯±•j+™Jφ¬ΆJ«Y#-ντšhξ%’dgm³3­–k³mΫf€{o/{q’1ΈΖ\Ν,dc‘J Ν2­ŽFDc:Σ¨j΅›Du%hbΆh"‰ΓΨU–¦4›&Rm4{Β½ιtV€QέΪfPRΉ ILS"IΝ^Š!Ibƒ*΄NΕΆL3N[c'‰iC±–6Gšξ(ΣDj"­4t5α HΧ^1γ4#—Φn₯9#vͺM3Ι&₯zιlΊˆΒž}h’tΔRUi;]γ€ΥŒΘ‘©‹6­.e›U ©΄sυ:lUE„&έMχ»iΪΣ\΄,¬J·’Wχͺu5ηth΅­‹[CΆΡ€±L’‘NΫΡ E›Υi"™f:΅’•†¦:LS‚¦M54I$„ΡΛE£,Ih’”+;£7jcš°Kkεt„,„ =M$)mΆ’"ΖRZ1šVΫ4cN“¦!!»Ρ h£Σ6­Ψ™B ΊŽˆV£“2“-Ϋ΄ΉΙΕΪ‹Τl’ΩκžN'₯Ίl†*ΥιΔTrMΩdΫͺ–.&n*dΪ‹²Ϋl4ΆTQ£S³mˆt+$Άμ’m&§#6«“ΝYΊXW΅Μ6sΆk―ZMzœaΣj/œf7»4YJ¨)H Y1ΩH34ͺΆ,liEFiυ4mΪ4C“dJΡ΅4*"‘M'ΧGiΣΩm·Zt.D¦jlS„4FΗn“*ŠJ:₯«3R΅Q’ΘTΤ4mΚ6­¨"::9hj4­4#mΡ ‚‰ϋ•.GŽD.]›Κf¦q©ξt’μdΉ΄€Q%›&¦XŒXm«–•Ϋ€‘$΅,•^Ίa…ΆbZz–u…²š‘ ؝šhξ—Ζ ΧY›lP½vΆέΤl;H{νuΝ1ΗΑ³EΟ΄ι""I*5P³΅i'i‘Σ¨Άͺ)₯‰*7B ͺX$’FŽ­²J!‘qeGFΪV±iš­!Iζ’‘‘’$jΆ$RΡ$₯­B[€©΄M;2¦’! ͺ’²Δ6mZ†€F*1­4¨6‘]&$·G{ϋ―ή|ηλίωπSŸΎ½φΡW~ξεύ«·zί€Υ>θφΩΟΎς±GIίωΣ7ήύφΫξΟΏϊΕWŸύŸ{τ8yσoψ{oυΓ{ΙΛΟ=σ{φ/=<{sΏφ‡?zλoύψΏ{ϊžyρ•—ΙηŸΫ~νoίω‹oψ{ sϋβOΎϊΣ/}οέσ·ο}ν{χe&ΟΏς―ξΙ³ΏωζώςΫ?ώ»{ΐvήσΒΫηΙmΈ~πώ·ίyη{ο|5O^ώΔo~ϊΕ^ψ‡Ÿyφίϊΰ{οξ= ΟωζOΎχ½οΛ?ψwί|ύΟύτGκO½ύ3Ÿxω‡ϋƒ|ψ₯―ο_ώα·~γί~χ‹ίkβν7^ώάgήύ΅Ÿ{ηΟ~ςΝOΎυxiοπα—Ύώ½ίϊύoό?φ;_ό^ϋΔ;γ>ϊ˟}λΗίyΌ<|πασOΏωύίώβΧώηυύ?ύ~5Tˆ΅\ˆ=­°CC<4  Ίyf‰nm1,’ŽνTH“W`18‰W4Υ™–=cΝFΰœV  Χ›KΜJl°NBK°Πͺ’Δ“—FΒeΩ%uI‚ `h·XMœέΣ‹%ͺUHξάyΖ¨04qy@R0qξ«χΏυ•ίύ‡μO~ψ|Όxο³?υΉΟτα½Σς‚³νξξ, ͺβB€k5,¨ΞI"ΚYG°Εμε1h™nή΄¦(κHc-ήe`ΪΪηœe+%Eδz'7B/ΣΨθΔ₯u:MyιΖ”γQa+I¦‘ΚœOπ’,ΠΦœŒ[kLš€‚KcΙ&qZt©…&΄›)#L Γ² Αsr›£ZμNK°„κΒqΩZŸΒ€Y–:ΪΆήΕl¦ “‹β`Βn+Π4±’PY.Ο\iΐŒΙ»ΟlΧ•fa˜  š‹R¬ΜYΞ.OΧΗψlwΛ@U@ΆH"]±Ša‘šf’šsΗΉΛ°nIJY ‹αΖμVΤ€07ΘU"0Φ•rΙ ς™³ š‡i*pΟdn\Ψ₯E;gΛ­ ΐk’s SΟΛjK²“φΖ6Άš0Όξ«·θPqr ΈΘˆCŠε¬Ίx%:)6@Γ]’•a†δδzƒW’DDΐE7Cƒdαl CΉ\J₯Ϊ* •’HΉ’ν#jJΪέ™x(k²!kβr–hΑ“gm:gW+@wg–ƒΡέξήMΤά( te‰E’€œ9I[&«l=ξσ"(γP" +ΑζTD;€¨΄HL؍h”cA‰‘ "6Β³š2jD;Σ4›Ϋ½M tΖ£l—y ° ΫΜI 1\ͺΘi<Τ2'. α&·9![t«,c@Έš’44Mh\Νζښ¦:Γ²3”ō0tΘesES XmΰZ™C€¬ΔL7 j—Π!ιTŠ@k3,4h «Ή³‹ΐΰ@ΐrn »zv&=λρ–ΰb¦aχ<:Ψέηξ.Ι¨B°›)&I-1,¨ΞI κΞͺha;»—#Ž0@γΕbΪZγ@šΘ‡ψ Ω¨Β9gΩHΈ&U7f5§ΙΙ(ζΎhΒ»lά†½œy1Z[KΚ‘ƒa„κ<.°«-lξ-krDuIΓ-vκά–ͺ‚……UΟ€» ±9³,¬N θS¬t‚ΕpXo-Š ¬O`€$KUΆΐ»nš]fΈ8Ρ²)šΤ.8‰l¬OI°Œ2Ά‹)άΩ7gNΕ,-ΙΖΌX3'έΥΘΕ@[g9ΜνΉ-(sxνυo}ϋπ‡_ϋΚμ‹—Ώπ“o}μΎϋ₯tγΌxρɏΎυ³Ÿ:λ~㏾ό…oοχž― ―ϊαpŸ?|Ϋγ½·ίό«?χ‰Ώυηί~ϋωέ?ώWΎώύ=/ίόΤΗήϋ΅ŸΔGίκϋ_£χ―ύΏώλŸxχ#oόψ{η½wΰk1ϋΖ[/ώ£σxxή}νΗί™yάο~8―_ώΜ'ίψΘΓη7Ύφ{ίψζW?8{fD@ηΰΡp\ίΫ_ψΟΏπ3sήzγ3οœχ^άϊΖ/|ξGξϊ‰Ÿ||π§ίόΦώιΟoδ­Ο|κcŸωδ›ŸπήΏϊΑ7ΏΏΜ‹ΟϊέΏυ«ŸϊKŸœΎ/}ιύοΘΛ—―Ώϋ‘ξ'ίψ“―|γKί}ω~ρSΕΟΎρθ»_ωκwΎωύΧ^χ#/ρ'Ξoό^ί|Ώ›ΫμΛeYΐΈνΪX¬ A”8HA¨ΣF1,γ ggPXle;% Ϋ˞˜,XœΠ#.JΤV( Ž„¬jαbAγΤ,!BŽΰ»©d{‘u68@μ²,@@n‚ΛRB³  K.ΥvF΄}υo}ι ΰ·ΏχκΜkŸό©Ÿώ©ŸωόΗ†–$E˜K‚ϊΒV²ΤFQ ‚]cg8œEά†Ϋ}Α!+WΐΫLJμ* Ν](";²YpHZYέaφΈQ »Γ\Bc.ΓJ’Ei(N›\ –†X” Β&ΚtΚt`Ff‘ΆH‘!…lq\Ϋ,=KΊ*RQμiˆ„‰M&Β bRυd©© «1¬Hsƒ‡ζ†4Έ»΄Ϊ !Š`KvD», Ωε2.%`-‘ƒOΒ²Jgš‘΅/sΛvHC€XΒ$ ”Q †¦QηΪω²₯”G±‚Φ%έΓ SΨ€φp.Έ°’§F1,ΰ”lΤβ‘$’ zΨζ•UršΩ{!ΩI˜'ΜΉJδζ% z80E@°2˜2 .£œAMXEeͺ˜­³žs7™F‘]1• ‚e9±@ΜΖ€ Ί€9t‹ EΖ1 ‡Œ`δlB £8Ο4ZνΥβ"(ššΰ#Β.{j(–   XΗ"XfΉ–‚U,ΫΨD΅Œ8]#f‘]Ω1`;ΩB¨ƒq bk(ͺU™a†Η%%£DρΆ¦ΰ䀃³QCs“­BVΨ(Π.^g±-p€k˜Χ tV J’dI’ I‡cŒ…ZŠ@pa˜B5 g˜Ξ2ž„έ*@t‡κ΄`Žαξ2¬ƒ2ΛnR£1‘D$lM³+ αΒiόΜ€@-…Ȍ%©‹ ,0Hs Dfρ'BΊΊ9Š Zvb¬–‡Θ $!“%Xƒ‘j!iΘvœƒ…­³Έδβ„ aOw\ˆΔAͺ$L˜kp¬i ±1¬ 5'&sφΆ‘!\wU&FάEζξΈUΑ ΙFZ°Έ67 \Ϋι,ζq W+kvv€• xη½Oό―|ώ3/ΎϋΏρήkϋϊγDΓn–‚*),»μ‘‘Ψ€L ΰQ*L—@―N…¨3–“{*E²x`ΧUˆa6#ΫC«Šξbk­ pΆα Ε2€ŒΌͺΕσ M5n4–-\<ΓqA„3B»΅€ a+95–΄V5=z<fκθžfp€a΅ΥV€ΦΎ―|ρŸΣχ Μ㽟ϋ΅θηξσοΚΪ3HI₯γ9gΩέ2–,`LFŸ.¬zΞξΨζ2Վτ -χF©4;.=Ψ¬ΎmλX€ˆωˆ @ΊZpcI©qxL3<žϋΜ +šp3*†dќAc γjhBM=‘Ƙ€­γ(3ZJΦ•ƒD9θ²€ˆv‚Ψ“b‡q—©­$`ΰrTe‰V ΣAΩΨBb l,χΚMᘲ‘2#έxʘ–\(ˆΓU6©MdΨsηYͺٜtH\\,l$άηT.¬ΛξŒ#$ld³†ŒŽΐl6Žk₯4&…JΈΜΰŽμ”lՁ ˆ‘Γ6]ڝQ…%Ο΅a½’l3ΙT€pš!! [Œtc,O3–W½šYƒ€–Ϋ’‚ˆ:΅ ¨5YsݘŒΣΔ@CΉ9]-F&’.B+2ٚ2&³Νr $NΈέm—@yΜpY‹²–Yލΐ«JF„pη1η.–³‚†ϋŠ94ΝΰΆžΥŒBΩI((kΟφμΪ84(ξ΄’ LμtqΓ”ΑέΩ€ΥM†¬vdζ tΪ Mο81£.λ&δΞΜ K’…€Y’r‘0BλΘNE±’!Ό;m’^`ξd {HYΊΚf¬ˆ:0 ‚+Sϋ8“gεvσjvΌT†[*B29;wzbΨδμ¬#—,*½ŽE’:NLtjma `]ΆYΒι@°ηšjTΦš˜3NΆ»²S»ά`Π.έPt˜έΖΝ$¦'žA‚ŽbΟθ.LJΆ"°ΓξVμμ‹ζ©ŠEŒƒM™ƒk†-εDv- έΞN•22Jv‘‘ݎ3S»­„%£fΡ”ςΤ$jδ2l”ΓΜζάr£…”†Ά³›₯ β΅σΈνa>υ²υ° `uFx@ ,Α΄ω τΰαΕ³+6°ΩH:ˆ˘“νά³‹ΙؐƒžNΘj ₯3ΊΰЬΝUUΨε KΈ"Ψ`§Dc ΅•HpTε’eεΞΘ‘+[Α±Ž’€e3ϋŒ‹ΚΠ˜4GǞΡ€ιΦi ¦η‘K㰏uu'TA;98²ΓMΒΒ€±žsIΐ0ΜΨ;ΗQp/C‘Έ0Η—/ώψ‡ώΞοώΙώω?―Θ―~ζkβΫ―Ύϋu?σΦλ?σo?φωώwθ|ιρκΓ׎cО7_όΗ^‰·?όΚWΏϊλΏώΥ?ψθ/½ϋήΜϋίωgώ›ΏψϋίύΩ?χΪg?ύζΫ/ω­?όζίωιΧίϋθΛϋΘ‹σώΞό쏿wζύ/~υ~ϊνΧ?σΞˏΤύΊ/?ςζη~ίϋ?}υέοΞ™ΤyτΪΛ>|p^g_Ύ>?ς£ωŏϋAόΛλ7Ηπϊχίύ³W|αΎφφ―ρώ«Ÿ{λωΛ?ρ•ίόϊ~ϊ½Χςγo½|υύίϋςΏω{Η·ΕΟΏύΏύƒχ—τϋϊ[Ύρ£?ρbίy€<Ο{ύωwLάηoώΧΎπΦ»ηυη(`ΊΨΥΝ‡@aκΜ½Ϊε™ Τta yΩc—#ε’ϊ8‡Ω}^DΥy4βΒ*:#Ηnν³»Σ V—™υ•/υβSW₯i‰JD•ΉC$½˜Θ½κΞ„$8Β4tΞμΪΐu‹ΧΕN²ŸΔΥI"ŠΈž=gάΈΰΘ^ΖΥΦΔ³'6p£s_ϋΤΟόΚίύ―ΫΏ½ͺΰkoΎ|ύ-ŸηΒiηΙ}Urτρd»Λ.^άFš& ‡έ‘]6έ€y•ΰ<φŜ…”ŒpϬϳ̎©xr.ž8Π΄Ε…U_DWΈkšY‡i'Ι­Nγ 7\›½'Ÿη(²ν+Έ3“D‡• –ΗšΞ•λ4›w»nςΆDEΜΗΊ$α&”zf°vΫη™%Dm” CGu»°œ™S7Φ‘aλc"‹ Ά•UŸAΩφ΄b3aΐ\b˜°Ÿ‹»8d>g$š=έ©Υ4Χ{˜ξΤ:ΝpΤ»€€˜Νy6 Ϊ@y[š'{τœq )xή³NΧ Ί‡κ9xΣ^οΊ7Γ₯I˜ζŒiΟ³ς†5ƒ»ΛΞζ™yΌˆX‹³+Ήrη9Σa…•›Gΐ¦Β…{ši`ƒS”;³ ΜΊΘaΗ6r φq»3Ν ±·’ƒ. rΞ ²f‡šφωЦ^‚Œ± η2³#6Λ6Ίμ‚€σΪƒn»«1kšκΜc5.¨;v—λˆa]Ζ€Gœ\|ΚͺI Τ:ΐ=μvάvvΗ:.`p‚Ν8Rv‰ ¦N¬σΔτ1̞:ΌŸCŽYΰΎΒšΣΰά6μ-Ά6;d^ πΜΖr_œsFŒ[Ϋμσ³>…vJZ*Ο<ΦC±λήfq‹š=j‡V`ηOΞ,'€ZvφΡ‹£\Yξάμq²d.σŒG8μΤΖβžμ4Վ8Ϋ,DcÞεb,ΕΞrςΥcfrŸ-°3'‚މ1·u’ ™΅=λμ<χ^Ÿχ6p΅XΆKΝ0\Ž–[JΒcowΩtAQ‡ΑξxΗΗ‚Ÿϋά™0vΈΧx±Σx™‹†(,D“8Š ψ}1ΉΣN(Yl€ Ž;43e œοΜΣ4#χp…Υ¬$΅Έη1gΫΕU‡%€eZέfm&k›Eέ΅yΡΜ̌ƒ[w§η<ΩΓN…ζ,ΆΧ»νŘ <ωxΡέ‘ςΖμΞ‹’ΆΪ‘Γc_œjΨ,0;εέΩquΑεĜxšΊΉβ#Ξ•6Γ¬v`ζΙ„Λ μΞδ‘…M|ΊgwpΖ©έg\fGeΨΓκĐ6ΑNΜΞFϋ|ΦΠ£l‰:‹ˆ;[1nΆ₯8σΈ{χ:ΩΚ ΝΘL އCHΫ­8ΝΔΒ24μc}Œβͺκ.-ΤΞΚpšžE;ξcΈΟӊŒΛ„Μ₯F9†ΡΩ₯ otHΈΞΥ†9έ©ΕRξQξΤjž†a—FF, ¬ρ2›œ{¨e!Β»μy±sΞΜΐvαξqΗξ,Α=,ΛF?AπΆ³ω}ζωΎŸϋΝ —’(’ZZΘ­l$€³Ωέμθ^O'P @n­ΖnF‘,+±¬%RΓυpfήίsχΊ…ααΡσgΟOΏϋωΧίιγ‡ΏώΑ«ού—―?ϊόφέw^}yqρ»_όζ}ωπΫgίzγα[―>zΕGίύ֏ώΗ靑‡φΜνΡΫ―?qž?<™'OϊΧϊνWoγ½'οΎωψ[oςI―όΥoϋς_ώχΏρίδ»ί{ϋΡwίzτΞυςoΏρέΡΟώτ³§η³3ƒ:s½ςδ†ΐωϊά=\oΏωψ χ‹ύτ?ωUn―ωΞγG―έΞ½NŽpδγςησΓ'βΗ?ωŸίώώσΫΟαŸ?ύOόΕ‡<Ίζ&n¬ιˆΓHΩ ·8ΞΡ£—;+]Όq» €&ΫΑςΒ+N%X£ς0I›AE1­l—­ΓCu(’‰q`BΫξpbχhyΈ]<I80"¬ξ!¬I²4˜ہU 3F€ ]δΥ@P 4²ΤKξ• vXHiγ,ΰΖ‘dΖiΡλα7_ϋr΄³»ηxv‰πΊnwΊžNVν½εζ: Ία ΠNΑ¦§ιD‹w™%\HζΪ-…qW–+©†Υ隬 2DΞ**SΥΑέN\1'NνB"j‘ μ‰νΊΕ0VΗ1o₯lt7Ab ΠΕXδBŠŒˆš•†ςΤEgtΈbΞ +"ˆ–³gΫ†…m!“Ψ›θ-€υl,Ηf^B δ’-7·ΞΪNΑ 4³ ^"B1.ΆΉŒ@†+;΅P€ΰήΨ*P‡;Λ^cΘ‘‘:w9y6RΝ9d sc7«qpvΖyΎ/²ˆc;03η*6Ε‰be8 [†g₯4“δ肃ΧΠΈKΒ€96Σκ:… N. ,zMΡA58‹Ε–aΞΨΐv%cΥi\/Έ"…vXD―γ£ι¬–ž,ΦBΈτ‘Ί³ΰ¨E‚F;©RU†μp£ΩέΪL*(ZŒkΪφdΜ›m·ΑƜ£m{"vi–s˜6Ψ*RŠ]6γΪΩ©tΠ$.υΊrb»c‚’‡ΊΜi„(ΥAχNW, ¨uι±Γ" »έβ-€C¨Σ΅K9ŠΧ-8 ηlŠΧΞΰΑΣK¦“{GF»]gp‚νrddΗ’Φ9g!’FδΜ蘺²ˆiK3‚»’Χ&ξ4ΜD±”1hQŠ#ΆΩ‰-tak‚Ε‘‘E φΎΜ^S£νΞrc½Έ/nNQPk2γCx*V1š$£‰&ΗΉm"¬‘αVSg ·(΅­‘—0&°ά ‚©Q|H'φl-Λβζ2F”Δ 4Zg­‰Φ°ΥHbRλr!-Ϊ‘ΘhoΖΤJsQ,š΄ZS/z(8’-e mχ%Έb`cΛaΪ[l(“³άcΪbααΊξsk’{χܚ"Ξ5gΚBœ%ΐd™‚ W‡vVπΐ\"€)S΄vΧ‹e w†2‘ζ $8›bΊΈ΅aZ^Šˆ΄ΫΩλΒΓΖ™qΉΨk8Ρ"αIbYP/ zΩ.%ΘP*jV―fεΜ’iΣc§Š6<« Iλޏ\žΆ­%΄½ΔΉ…uάΊΗV»xT ΐžφL–Ι‚€1ιθ€’ΆqJ‹γφp«ΪzΩeθbΆ’`T:ΆΔ uN§Έ"XBg s#‰YNK:xΗyY‹^·ζωG_|ϊχ?ϊ_τρο½ω“·>y±ίουwnχΟΏϊτο~ώμ«σΚ#Η6`ΟγαΡε°ΟΟWωΣΟϋΗ?°ϋβŸότΟσpϊΙ―žΎx{―½χΪ£χή½ϋ«οΏΞ‹ίόι?όόΩwί~λ­οΌϊςΙωή·ί|"_όαΟΏϊrΏμρ€δ#Ύσ­WΌΎ~ϊμωΛ>y˜‘ΓσOΞ«Χυ$fn=ΊΎ|Ž7ίΌαφΚΕγǜ―yρbΞγΧ_Ζρz¨ ξ/ΏΌώΏώoρνφΗύoόπ;Ώυ7?ψα;Ώύι—χŸ?ϊ?~wq ,εΊŠpƈ0iXoμ𢍠Αkβκh:ΘB.Ρh@Δ @KKŽΨ5΅Μ€Δ„py–ΐΘP i­]hHl¬ 1:  `ڜ=H&^˜Ψ Aα‚΄–Œ΄JQ‚ηζΊ1T»˜κReΝj0*‚ΜΆœϋaΉ†QŠ “2†Ρh’%W# ‘©4œ$X Ψΐμ,κ8bh—»+λ•«ΰΆ‚bq ] –2:νLΓΐΠ QΘΖ€BΉͺ΄Q¨4¬@ΣθuΏͺ5Y†‘mcXs  Ά&; @`A(MμΦ8Β@8.tqǁ+‡Α’©]Ψ’"ˆ Z­Ή“€έvœLšT@Χα›k+Β aΗ,‹Υ9Ζ#Ρ9˜΄KˆNX N—TP(ΐ JŠ"\ŽUΘ”Šb$,ΈΫPγ6­6ZΚA0΅D€ ³CWƒ`C ΦC1χa ic‘‹.RΔ±Ή…]Ak6r3ΙΙ–6ΨΐφšC³Κuf—ΨHjAΔ’%σΪ@VΣ¨N ν0”„±²Δ†t „“zΕΘGv’Ψ†ňkšͺPšBrZfC:“˜„™ν€χaθ‚Αu«V]j3΅Œ€ @˜F±j!H„βH Β…y»#RI*4ΙΒA²3•  ΰm'Z°… ¦‰Qβ0tVΜ‘ΡJ€Ε"#Dΐāj&Vb…Θ"lD– %€…E‡6’†…θΆŒ³ά1ζ ,D…p»¦ƒΉDmƒD.FV\’Œ)1%Œ"Θ`.Νc/²v%ip%Iκˆ:0€ΉHΐ!dΓE`me`Η] ,Λs›γ> 1‹6Lp‰"%†` ‚j@ A&ΑΥ”(GTPƒvr%Ρlb `\ƒRD `².Τa",’©›Αœc.+,0.†±°₯]Z—»Ω @΅D9Θ‚$ˆQΤ¬•MεΒUsŒλΈ]Η†˜@„Ζ-ιj'ΈΠ 0(q0*A$(ˆ$P§ZΝpαbא’˜q°²ag+[γ ‘K¨0‰μθΐΘ€fU«$Ψ,)HΠ6ΕΪ8z„#Ά”«]ΘyΘdΩͺ4’ ƒ€aˆ0\DΑ`ξ’€¦ ŽŠ—·‡Οξ_μ?ώώ³Ÿόπ'oότW7^ύΑ{―<Ό|ώτ£ί_>ΊΊιX@νσϋ9gαώΙŸώω_Μ$\·/½}6―^ϋω/>όϊoήο[―>ϊαw^™ηo|cΞo~χΙ>yφ‹žτω[ο½ρϊοήίόήγ|ωΟ~ωτ9λ\ΧΌρΪkόΰ‘žΟ?ύυΣ―>ώς•ΞδΊέ]sΖλš‡‡λ8χϋή_žvAΗλΡ5£ͺ¨q]/?ός—Ώώ_ž}όγχΏυ―~ψή_~[ί{οί{η•w_=ό꣟ύyŸ―ΈΈ0"ΙRGœ&Ι„q!0ˆΞ•άΔ!Y(nt΅;Σ°XΈa/¬΅τι€$‰Σ” —žKŒ’Θ0€γ hg¬ˆPpO ΐ5μЁ0¬†E³Βb―ΊΗ ΘΕ ¦–α]@Φ5dνrt/‘ qi·ΝQ$@…‘$Z&XΆΪQHŽβBfp‡€3K‘yth€C “UE lbμ¬Ϋ †qCBΔ40Ska\‘VVdBŠ(VwRcά©Š ―ŠΑ6\mš{Α1ζ’ ‘b™H摍ΉCH300­Έ–i“w1h’Tν  ,BΞeΠ€ΊλF. ^²²HC#m©#M .–4ΪΕ.”1ph₯ζͺ ΧX80Θ,&TvπHta€pΝΦv˜»ΊΥ4Ψ½J`EA„iΰPΛΪ8­@Α$a%[Œ ΄‚Ψ C-‡b@΅Ω aλΌb„%āΘ°ΰQζΐ2CΚ™ΕΐhYhΨa4Η†ΪάΥA†" naXˆ±υ’YP.(AΨ\vp2\Œ™Β„$'€« ΊLcڝνΦZΓ°B(’ K &εF“D cg Ω! ΒΡ!ژ t.Yα$NmœtvYaaAδΦ!©ζΦχŠ%ΓΣΖύr§1c2χœD @!a©$“Σξ2 0–ΐ&’0 -μ‹Μ!\θ†mJHcΗA›;ΒΖr!ρ–!  -rνΕΙh„δˆ2a°GwbΠ‘«†ΈJ€`6ldaλŽζΪMp(A(– ―\%Cƒ₯pŽ,\q΅;6„… ξ]ΞpCa!€Ή,)BopŸΑ*‹άΤdUiΘq‡0F%kκ s H ‹1ΥΕ"iΔ&WέΕ†‹hKΠ`ΚjŽά©kˆb[η\ΖΤF Š„ ˆ2”₯X,Υξή AXΘ!3˜$­`lyV»/#!)Ή „€;Λ,Ψ‹\)–,*¦@hSIkLγeΖe*YA€I)’XilŒ v£•A&εθΔ€­ΓΔ‰‹ζ !«Kkhš{,‚ΐΰDάεŠiƒf’ΦΚΓJ@ΒεU‡H/g»Sr₯Γbδ.‚ΡX$Ν΄0ΪμInχΕ0j/j¦’ά‰n ƒΔ d°‹G₯ xmg wlΊhΦl« t !©–€ͺ65h8(’κναΕύεώι_~ριχΝΓγυνo>žΗoσzφυ³ίόΧρΕ+σpςε_ώΥσ—ωζχΏύθ/ΏσζλΟ_©/ΣοξΟ;ΏϊψΩΣ§όπ7ώϊύϋΌγ}?ύ>"χγ/Ύ~ρόΩ§Ο^|υ¬λΡυϊ[―½=Ο>@›―Όq^>Ώρ‡ŸύζγόςΓΏϊΡwώζƒΏόwΌυώϋoώτ>ϊΥ§ϋυŽ•!΅&wΊ…ΛlD1·X οξ TA$αzΙ΅Q˜ds’₯)DE€p‘’Ζvi…¦0&\³ƒaΓΚAN°,0ΩRS, Δζh$%Α½½"4¨$Q™ ’ΜaA \½4₯!EΔ  JD£kuM"i Β Œΰΐ @±H^$m­Β^;"Τ’β°k… ‚Ζd ’ΖFΣΐJ  (€Ϊ2aLΠΈκΠ’Qf%X‘dmεΊ΅Υ28(œ’d I)  @‚†έlη˜;V–,‚SΉ(,#’J¦ΑhΜφ~FEb- — b›m!‰Πud‚΅,&d‡Pˆm@‘¬U†BPΙ"¨+\uj!Αš6V sŠα’Dˆ‚r‚6Žˆ°4X‘ͺΗ¦‚RLΣ 7’h³‹΅„(ζ\Θ.!a!ˆ AV€ͺ`±,£Dά͈EΠ„ —kΊζzτς£§_ό쟾ϊβφΖkŽκ³§Ÿύξ£/>~ώΝ·_ηίώ»ώvžςUw|υρΓ7_½=ΊυΩη/τ ?ωπΓΟ>|ϊՏβυΎ{{χευυ³?ύßn^O~χη/ττ«Ÿ|η΅½ούρΎψτt^ΌΈΝ5ΰΜνμΌωζ“ΏψΡ[ο\ϋ?γ³―Ύ~φΫ§_ύι£ηυ£‡wίϋΦίΌϋΗΏύ€―Oέή~νvs?ύτΕg<ώζ_όΰΙ·ΏωΡΣOώρ_>ϊΗίωΩ³Χϊƒ·^_wά‚…P ¨5hGΘ@’J@ΉΆ’^ΊUΞ^Ϋ‘΅!Ϊ@Pΐ³ Eˆ €δl»‘]8ΝRνξ•eΕζ6ήΊάΑbS° ³K™,ˆΪ‘"$ Η&ΰ„-L”`’ jMΰ±½Ξ … Α$³x-‚4 I±χ ˆi<”^#‚΅1–š €₯Šœ€H$θΒaLC7˜bm4¨U΅ X(­A†­Ψ80aΊΉΙ‘Ρ ’φ€1+Š’.Ž €ΙjΜD’N¬ΰΆt¨Έ‘n30Λ‚³!!ΞΠLMgΊ^\I€Š fA¨H†ž¨%°=έ.ͺ’†)Ψ<ǚλΖε![·'––ΰε-Y„‘[0IZαš›B%k˜4ΐ ˜ :5‡0jV:°8E8‹…‹8$ E QΔ GŽ #’,‘†eSυ†D’šΖ쎱l’d΄Λ\΅ 6rsdΘ ’Šr\‚BT2Υ#ΘΔ°Σ\\HUELL6"…§f'@H– €@$Н‰ϋ™lΐ‰«deĘΑJ!CΤΡj/έ)+άiΐD₯(› @Q€‘fΫ³ά©*.f«]B4Wγδζ6sλb₯˜*Π°––D‡&²™8©m™– ‘6@4ƒ!"XVx&,!Βΰ6«‚8 YK %„α^H7Β\-Š ²δBRHIMΒν2l0‹k˜ VS‘ ‚Ÿ 8ΩΩEM³|ίΟϋύk­½wνκ\±Σ'€SLB$‘‚‚’L@bΒ ‰§ΐ)€Ξ$$(‰@˜9βΈlΗe§ϊf7΅Φ½ΟΝu`„9šΔΖ¦Ψ„‹θS!β8Π.΄L Β$ŒξΒmΐc¨@RΊAΒƘCKWH[ΊL9n@ŠnJΙξ¨ͺΔgO-¬ Ε"Ζΰ‚‘„‘ˆ.΄Οβœva›¨* ΈKˆο@M)Τξ5 ƒ@;dŒ(²fP•+Va © !b’e„Θž=t™`Rb6@€j‘‚$&Ήregθ²h@Ή(ΰΊM›GΑ „0]ΖΥ QfL jQ]‚@ȐΡΦββΔμΤςR΄ƒ2΅—Œ Qd„(Ό ΰš€ΛH’ˆ '’k"Α–άNι¦`HZˆXI2Μ!°q›Φζ99Ή…Θ@`n4€’‘γFE{ΰΘ}ς8PE SΔΖfθ™γZΫΉ­’a-={}AQ: “$ ΠThΊD°€hRA՚4›!-€,‡œ˜β¬ΊCCR΄΅L4,ƒ\Ya\isM‚Θrƒ `†—ΗΟί=ώφίύξOψ»wΏψξ+Π‡ύφΎχώξGoή|χoώΦ‡Οxό‘ίϋ΅ΏπΗ>ύΖ›χΏς·~γΏω΅'ηM?ώΩ·τε/ίoύαO?ϊϊ‡/Ύύνού½/ήvή~ρΓ/~η‡Ÿ}ώξ~ιγwχωαwΏσ½παΌzF`>zσΡϋ£ΏπίΏ=ωxΜW?yχΗι+κχ}ό΅—Ύ|ω?ω»οΏϋυίύωGσαύ?ϋΞwΞ·ποSι?ύώ½χίί~γύOογ—ΰ7ώςŸϋ…OχυGίϋ§γ·^?0_όξώΙ?ωޟύCΰχ|ν—ώƒ?|wΎψ­Ÿρ{ρkιO|ς‰_ώ―ϋοό??~χ—ώείσGχσΏυ[_ύΝοώΒyyσ‡~§/ν‡?ώ­xώ!+‘)4–­Έ!’L£ΐcφρ˜λ-w·}ž;ΛΈΊ²»ΓΞγS"Κ¨<žUьσ{~¨»ΝδLF΄¬ρX_‡Α–½³ΟηΤΎ[_`ι2wΟΩ-Ϊzξ}ΘΓi<‡ap’Bš₯Γn…2/κΜ²««ζαŒξΜJ)G©h hΝ0ΖΈηρpXά¬Ε'<Ϋ‘4ΧνΡ—Μ±ξ“»Χ½zpjΒΠαΰe9uΰ.οKœ§nLxχyΞεες KFζΨ9·πnνz—•sζj»²Œ'¬ΖFν,¬΅7Ζ™a>wŸ ΰΘ²νš ‡αά§{―Ϋ>š—ΛΘΕΛρ2Δέ₯εΈwζι’9‰« † bχ¦ΜΑΑlΩ†…Gη gž˜xlΩΩ‚TVv.0{&ΖΞq‡η>Άe.ά’pΦeχδCu³Ι½³―N©x†fΧϊpΦ%g‘΅uΈΎΨ ;λΘ\κωZ:Η!iνυά C8₯oΖ½σ¬KtO Μx—`„6 Ωά΅™‘“.tsέ^ggο98ŠΖΞ^wέ:ΑLοΆχΊΧΐA·“*μΌπμς ]y’Λγvwœαρδ±tφŠqΖΞ¬Bσz/Ϋδ*λΩ†}2ŒNNΟ‘i6vͺ-qŽ…ϋ~»€8&UOžgηθE§ηά{»λΤΉ>Ήt™vΞ.Ρ³₯–=žϋτm$œΔΓ\ΒdΫΆy4oάuWB™‰9sWJ’Ηn3#΅Tٜ=㝙3κ.ή&žy Φ1‰νΡηκSέdυΩm6owv՝•vΚZ§ŽΨΤ σ“ »·½6γαAΩΪΪ1θ€Νx†Ι»=§φΞξΓΑ)»¬H΅Σ©Ή‘¦ƒoο#ž±ƒ9Σs.‹ΜQΝπ'ζl csHž―Ν>5˜]Ρ9bΈΈyμόΌ.Š6>‘|l{Ν―<βΘuΞ›y20D»Ϋ}žkΘΞ,l+wΞx<ːΪΑ:kX»93Η½Οέ»00ƒΥR§ŽŽφΨ»χΙΠΎάσ‚ΒΪw‹΄KΟέ‡‘τιރ#NB°Έ:m΅q˜£dλΝ)‡3Ίx±%SΐŽγsΆ2‡1ǝsfZΪN.<«’εΜ%·—½Ξmϋ!Ÿ+xāwοi…uu‘;ΥΈ=lš8'ƒ7ΉΞΆϋΌgŽΧz€S3/ΓΰޞtΉ³;1Ξ:w™[D‡σ\CTyμxΙWΆ3s†u/‘Ηa°f!‹†§{Ήϊ8pΙέφήφU8a ΰ8x‰Ω#Όλ‡J'ηΒΟ2·{3>^{;ϋό;?ός{?ξ—ΎΙϋϋϊνίψΩηΟ―G€Η/|ςΝΏςo~σ― μοϊχώΦς_ό―οΏΟ·^ή|όΰΛίόρOϊπ·…τίψ—>ώΖ_ϋσίψkηΎψήό?ύκ―όδ[//ŸΜχπ·ώΞώόζΏϋ/~ςΗδΏπŸύ _χŸό―½ύϊψgαλύβ_ύWž?y]f>ywΊοιίύ‡γ‡/_܏ΞΞpf2³Ψθnν6_g–]o*žŽΈ/σ4 Ησμ=;2wΈ^ζΪ9;¨yˆNS/oz>_ŸΟ`”™ΪνμΔέΉτ8ηιμnγρΈ>/CφdBfΗΗ²γΆ ΖΫΝΗ«Cέ ΞlξέYΖ7>6žπΔ 3ъGΪΗπ:„€ΣΊήfv!qžΚ"h—m{ΩKέαOo.XΊΈMw—&E1w‚+{"ϋ0½Ά7Ό«=€8›.ηΡK^»³€€Kmήu7σm3ϊ<­¬βahOξΞΦΖνμέΗΫ˞͜‘3Δ}ŠΛΫcs,–Η‘εlZ£g$^ΕΞBZξυnδ›ζ! ;έπ5K·rŸw’…iΞνi£3²OφO'9kΈ*ΒBΕέ―GgFf›ΫάϋdΑe=Ί­v=θζιΒ^v™“C$ρΌlx˜Γ smΞν.Ω}‹'Έ§Ωrw/tcοLž–½φtzε9Οw\ΟxΖΗ>><&ηήϋϊ|Gq––³ΞrαNν$ ―Υσ©xbXZφΛΩ‘Y½^Ξυž“ΰΰŒ³Ζ<_^Ϊύ°―Υ(ΞBΝ2ΐΌΒ›™‹ΙΪ<Ž—ηUφ°Έ Σ™GμΊfwκθN―qwcΜ>σξ™7œΝ΅‘a ΄ζ¨‡:CrΛu“°λΜ<Υ00άη}ΎmΩΘπςtΆ°”Υr/l'{)―‹ {Θα™}μžƒ/Gφ"=Œ8›ΖΠΑ₯{β\―p°Ήλs™xӜAyΕϋΨ’=OhΆ.ΠtΧ—'=ZrΞ0πtŸνΦyih Αqw­vΊΰœψάs[ξEp}¬Όa§ξJ^dτΐtgΚ67ZlΞΎqdΩe—r½4“ϋlwοΠ#Χ²³›νιt£]ολΌδ,<ν9;―³Νœ†fΞ8;ΜήΗ οσΉ{•™ΣΤum°σxωπδΗΩΣrΧ—/ƒ9ΚYVl§}8wgWg·g<ŒΩ›Λδ|σJη0‰s»stšΓλ±f΄YΆΩΕ‚μœŸ/†”ԝ½/=Ÿ{:ψr5n΅JϊΜ]©i9vv.μt_ΆηΰCp#Ξ0qjhΕ9ΟΩq|Ό¬?ϊίικ›_ωδ―Ώρ³Ÿύ“ί~Ξyΐρν'ΌώθWώή?ψη?ψξΏύ—ω_ϋcίϊΦ§ο‡ŸόΰϋΏϊkίώ›πŸ_?ψΪ‡ΗΧή8Ύ}σν}ρέοvΐΗ?ϊς§Ώϊ½t<γ[Ύύγ/Ύσ۟ύΩoΎύlπ}ηΜ=ƒ(ΐ}}εηίύώχυΧγWώώoώί{χώΝ·^ή|rζψζ]ΟΏώ;τΏό―Ύοό[ζ_“Ώτϋ~αέ_ϊ£ό£_ϋυΏωΪςγoΜΛG//?}ώ³ο~οΏωoϊoύξ/;όCί|+―?ώώwφ?όυΏρΎ·ΏχΡ|ρΟώσ/~ο_ϋWπŸω#ίψϊWϋός;Ώω»ηί‡ύίωωOόΕ7#ΐΔ\Φ‡:vΟp‡m†aZvnNaβΌ’‹2vΏσύŠΝ>Sœ¦v €™awo*p‡GΙeVGνΦ-t&Ο²°›λγ15wΘd‰p Θv+φLJΝ6Nl¬Μ ’ΔK…H=Oƒ.€d0I..«a„w¦²„ρ·₯&$l=^&݁˜2 (ΩΔ₯η&Œƒ§¬ηœNg1n±«LNΕvrζ6ΖΈξΔ4 ή¨4'„τωκΛi‡ΐT!Ϊb‘ap2{Κΐiw5 ;ϊpŽ>ŸΖκi0=¬6go'gΈΣ΅vζ4]o c>.Ξ¬μΖ2πˆXXα ξN ¦  N£‚f°½3.@ͺΠσήC½ƒpΊyΗ©zV:β,.;l7sΗΕ½ ΐΰX·–Ζ‘ΩƒΈ2ƒCΚήgN ;­™,A†ˆSβζYŒ`”»Š003θ…έηY°ŽΧfvά&Θ(6† 8ŽΆ΅ΫQ籍;#χάΉ²UΑ g‰˜εΈLλ@§iAΐΕjf™7 ϋ„Αa ΪjqΗ‘³FW’GQνLχ9ςpŽΆs/ΞMši0hYHΘ `;λΡ=]Ϋ@Οάy B›Y μΕ}Φ2ppΘ-Ω8 5»8ιn8(‹―ωΒ„††sΫ3 m{#<Έ|φι»ϋΙyžSϊτ|ΩΛOφ£/ίxϋζέ#ϊπ_όψ[σωW>ξΛ—·?zύκgη«/Ž?βΌΙ7όόӏχΛwoΎώ_žO>FΨνύo?όδώμέγύΠœWΞϋψι}ωβ|νΝΫOη1jυϊ~Ώψ)_όμ«οξW^žη,z=_φψΙσέηoΎωφρφαx_χ˟ωεO?z|ψΪ›ηγΤ™λωόΎωiΏηγ—χ_Ύy~ώ΅—½ά™ίσψY/?Ί_?oΏςr^Fψέ_ϋΩ_ψ£ρŸ“>»Ν‰μ‹!$™(™δ…LWθ_ύτ!λž‚„I€`Ωμ"$ΆΝ0νΚ »uTŠ# ͺI°f9δΠ.ˆ:z'—ΖuZM4Α@Š*um@"RΔτ2€6`+WΪ΅6 ΁ji›‚ (₯*p ¦\EXŒ˜-(€aT`€° |Ž6ΦΘΰYξB‘):3I,& ΒΔ²h΄3&'(›*€,˜ ’@„Mc‰mΣ‚a’²Ε0Zrˆ2‘5EZTΓRю Š%—]$M‰‘ ΚΑ³fΘ=q)„£Δ9n°d@:΅3”’ΑΆT„¨NAΈ Ω"6 ΘΔ²%‡άΓ`ˆ ͺ« ͺÐ.5,ΡΖlŠhHΥ.ΫΓΒˆDl4;¬Μ,Œ, ϋH˜­[ˆΗG²1F²Ί3ʐ:Ι²›Ί ’›7‹ΜςŠ"LkΖ ˆP"t¦eq€Ι-‘$ƒ Œ’P ! MEͺ°P$  DŠ%„ƒ@.@'Œd·•-…`§™LI–»Θ"MsΛNͺ@KOF E€™ι`6‡rj‹B%Γ’›cλΆ±pΜUY3ΐD!tu D:„ΙZ{₯嘬)Αή bΑB([€΄™Kg`icΨI0Ί”ΐ0*BDl;άfXζ\ .l+Α\P!Α]ʜαa`JZ2Π°€E”Q&D˜“%„βpdZ„ PYΒ:e‘Q'“ΐΓυ>ˆˆ4 ˆ–6ΙBH€rš³+iιbΕH *Ae€€S)NTΛ’¨Z³N«4 $#DΆ,ΰNc4‚ˆBΊ²;C Β€E,ά‘] ,Y›!i«M V·vA4Š ZΛτΪΡ–¨‰š₯%Μα‘‹5Ή°γ¦90ϊˆ{3iBFΆ5*‹!†άLΰH-HSlΞ€H&ΔAb!&¨€@$bΕ4‹ ΤL—F!ŒΦ Dγšd€ K'ƍj\—¨ΑΑ2“₯°”P¦Βˆy‰D5Ή―L ‘Gw((„ΔΣ`«fΜ’ΔS 2$ŽΠ,f‡H$›ΜYΠ† ¦Bƒ4, ΅ 0&MhΕΖ2A¬l„CΕ… Ί¨ % S‚ ΜαγO?ϋιϋόψυ9γGo_ΎςΙGPfxΌόδΓΛχΎχσΧΓ|νέ»7gζψζ#gž?|οΛΟηυΛ[9ΎΌ=ο>yΌϋδνΛΫγ`ΎΌέΗ»ο|ρώΓ··/O?~λQ}σξωόπ/ήψόΉoί>Ύϊρ;Ž  >^~ώώρνΟηυυ,€γy™—·ηνΗηέ'=ޜ9¨€ϊςv>ωϊž—ώό³ο~ωεsoŽ7σξ+/}ε£—·γ¨ψ2}uΟγ§_όδϋ?ύςξ6γΛ»σξ+/ο>y3Ο›Χ/ύŸφϊΩ‡m™‡oޝ>}yχΙγ0Ϋr“Aq‘†A½05nhΟsζ»ΉK”αŽ’€Λμ;ΜΉ,³λ!»Ξςνf£wvΦIgΨ4QXtœΕεΒ²ž€„!½m±#θvZΩ›i0“D4d2ӈ#α†ή9Η9Lλ¬V+ ‡ °τ2ζΈΝ†«2Φ² ΜΔBŠ3₯*²†(S›6lΎΓA(6kfq’A7‰΅©{©Ζ°\f#Ρ=΄4l,2Œ=_ŸΒ P΄’Ý]&'fkE&)RMbΌgΖaA”³ΕΒy$άTΡdυυ‚β(ΓΦ,α Π¦lΣ.­0sΉΛ!gα#Ίά TΖβΒ‰Q0qб`aRD 'vYΚE‹m½Μ’;ΊLZX&…ΥՈ¦W #οεŒκxέX’E°–ΥL$±„tAΧa 4F XΌ΅ΰœ΅Θ *Ξμ"02Y8- αd­ˆ†«A‘Τv—u Έ…²s;ΡΚfƒΓΩ‚ΟΡœv7gGaZ7ΧΓΈ°‘!m8Š‘Μ0 ΐ g&)†'h—K @;H X‡ϋuEΦ,"f P@@@P@P@\\δπΛ28γLΡRγβ©iw£pn§Όΐs–ρ,έ­Rζιœύύγω<ϋΠ0ǹ€σνΞμZ¬₯›xΞτΣ»g€ω­·§KΝ'žαL»§“ΒΚw\pwΎ3«ήθδΉg<½wζ›K{π¬ΞΌξI`gΧfΙ±hNΨΆtpdΟ\΄D3.²†M₯8ß%:« cΠuΒθσ1˜{‚εŽžžύέgv‚+χ>ϋ«ƒ§οΟΑo½Δι„Α™9<ζν¬ƒYlge$/s;κ=ίά=]©uߞg4Όœ¬‘Νfj οαZ;oΝJƒ4ΧΏΜ,gIΏnήyzΜίΪμ¨γZέqOρu‘=η£γΐ,”’ώχgǞ«·Έ1°wݞ}`V”3ΐωͺ]iŽ3έΫώρο}Ύ—=0LξڝŒΣΌλμu©γ : ―g˜ο}λܝ©)Ψλ}?š†Κ”ηΊρbΎά™„vηΆ>{'α¬|μGa˜Γ¬'ΏiρFί ΫΑC;μyχγρ .A Χ6d„οB+]iu»œΓaYα4μπ}ύŒ’ wYžώζ·{§2Ηǝ?„M’ŽGδξ·Sο”Ϋtμœ:W™…{Ύ³MΰήCΟsτΖΪ$ [l¦snSΜ+ΔΔC³³ΨƒOx§9φ’[Λεp^Ξ.Ώ]Ψ# `,utξ"»3ΕΣ3ν’œαά³Όλω¨[ ΕέaΖη&6ƒΞΖ»ΞΣήψe>ί=~˜‘₯u;Ξυ0».97wkH7ρ4½zxš!žί{aΞ›·mΎςœ€œΞŠάιjΕξσ;6Rr³ΣyΟ ³²Fχ霳\8γΤΐ± p(6Σήπ²žEq<4Αv=Ίp—ΰ(= ׁ ?ŽφΈ]ξΛ!ΉΧWα™>ݝ=\wκœΩrWΟδ͏Ξμδρ Ϊξ₯\σλm1½ΈϋΧε`ψ Ν^Ÿrϋφ4gFβ#α0›υeƒv¦†=η›‚‰Sgα>;—Ω™[ΟιΘ—eξy<›_mWχu μ)~wΏρxΘέέξ™cΗaί+<Ÿή­­ύξδμδDNgfρήežΞ³ΏŸσyφμΛgάmχΤ¬wš7s™ο±Ό%žϋχ}χ ι·ΔΊ=q½Γϊ-s†™j<Λ…ϋΈΐ2_ξ3,p?8xξ™=½λάYhκ¨.9Οτ°βΦΕς”Uσ°wΌ[VΙ°‘€ω]‚³ΘiŒΊRίΌΔ3k;vψΩί{ΔΑΎ{φΫvFœξ3Rσ‡΄c6θΌγ¬]vΟ2Lφ,ΜμΈ;—#ξΉvgwf;νΚ=ψxΞ0ΏBFl‹/™Ω†pλΆΎ5εζϊ^t¦g –ΝζœσΣωέ.Wwΐ!Ή}γLΉmμžωΰe†!nzdφ]Nηwζ·@ΆύξϜ˜ν¨3»΅«x˜Σήφ—άιε>¨»ΆηœΓά5–Ω,oΣδπ:O?ΏΟsώ\nΓξ‰Ψλ}ΎΛ7–ΖΚάγηΈΝ‡«lΜoΛ³ηpί;~ξΤ8Îr†iD=ΰ A;³—eO 2μyξε¨ €-¬;n‘£Ύ₯­vu•\ΜWΎΧ€aάαϋ~~E”og³Γzθ²GΫκG†ΐ;Œ΄6Z΄¦½Λn‹ ύK!Œ†Χηάo™8 δχ_cšα²ξΚΗUQdΆYδΠΔέ»@nΏ?g7–IΧ́­ΐ•₯«gxmk\lƒΪ™‹lΤυΞ§NΊlαΒύV$©+—ζz:m@°D_χγ;¬¦šδ7;SEϋν“žΡυΜmΈ }η™Cla ΄wŸa*Θ°™½έ…F˜Y„u[œΛ°TΠ¬άΤsj©s;,nΛΜΈَc-ϋν= Xs!—8wΰή™ӍI0έ{Ϋ;ƒΚΕm`ΩόC·n)ηΈwάΛ,Άc±σνž‘3Ήq½S7œ o2on{»m­˜ξχΡ;™n΅U——aWn°>¬ΣD»K—FαΉΩ°“э~οoνκΞМ½ζ€ #άYŒύΐρ‘ ŒΡm,‹’ΰ;‡vΑmPΩ™Š…{δZ2·ύF`rΦψΒ[v‹V?η N+Ν‡ ΙξέYŸΡ‘3ξμ†εrΗf†ξ]α΄SG– “  βιμΨΰ]’ΫΩkE Ζ€£³]6κέΕ0EΉϋZ°2”Ω––kΔ). KK;Η4(Αpcο7,#+‹Ϋ°Π²ή΄Ζ)ΉžωeΆaiiΗεΜs]ϊΌ°ƒ(δ #“Εε^nwψhψ @η:·ίΏ[q ¨β»[Afγ"?§­K]/λύmgΰLq֐ZŒΙέ™w@ϋΞ°%λ.γέ¨ˆp’6oŸ 8@$I’$VΛΪϋw§Γ…pž[ρβΗο}«―χΪΏξξΪτάΦΫ·Ό4·{vνσηΩώΝσΌ»έ_{Ώϋ½έ›-ΧυίΆί΅ϊ4Œwo€άΉ;ουφδUŽϋ‘ωΫοίκo~oμwίtς/»ϋ‚yeνύϊ§ncκάoΎ‚“πkΟyΏΉΟž6g3νΧ^Ώ•άdχdΆ5QΗϋ°²žfέ?vkϋIjIjΟΎzΧ•ˆzΞ-<ΖRƒL― ~vlπ–‘+`«ΜgzΉΉ`³ i¦¦Χ›yύ€ΦVfkƒz““υzzIυΦτκμY;Μ{oΏ«·ΥόVΉέ+Š1ΆΧλUxλ­_ΩΦήBΖ#έήw­fΝκ–aΛshΟΧu³΅ΧQH…ly›»«^Ϋν¬Š6σΞμm·jΙjΩ=Έm>·Χοή³©ͺ1C©8γήΨ=½φΦ=―Υj§ut­iΫσ™Ν©εήlάKΉ­7έw‰:ΊjA¨υiŸήy΄»ͺŒzUƒ‰‹Ν½šzη5ΣӘε.fCφX»^ ˆ²Ω6γfPc·7ηΰd·y”lΆκκ™f/—Ϊkhl»o›m<΅{=7A'f·Ν-G[[Π¦-ά}f©ΪΨ:ηm+jΨμΉσ^νW…ΆήΚV=]όΆ²ξ6Ϋ"ύ€6šΆίΦkΟνmφZ½κ9u³φγΝ6Ɯzχξ±&±έγυ~½³uTN^’šWž½μτ•.’V;OXΫr†L_GvΦ^|šfΈZސ֢₯θ²lΕ›Άf7/my΄ώvnOΜΆ§ΪXͺθλρΐ‘16λg{ΰ¬₯>?έυς4hήL ΫΠλཞNJ[οΩ–[ A…wλΝβξV³W+'‡5x6ήx3"μξ!λA{ηŸ~qsΧsΟΨι(oχ–χϋΧ›ί¬νφλ½~:Ή-ϋΝΫ2hZ›έΞΆžF}΅;[εh νvνΥΣ.ψξ"CžΖ <[u¦Χa»vlβΣ¦\-f«†jͺM;7½=CμΦ^k—+;λT›ΝF2Έ²}MτrΝνy?g›·ΨΌoϋ΅{©­§jώΥ„m˜Ϋι±₯ςήΆ)φ)ξφΎK½φ½Θήxύ–{ΟΚΊŽ­­Mb³FΥ`†ΟTΏλέΖ΄kJΩ:͎ƾρV½ ήΝ${Ώ}ΪΊ{³©κ&ΎμTύœΡΦφ{ϊzoΧ6ΣκύκσSm΅›χ0{;ZjΏ™μέ·ξ^½ζ‰_X!sον^ΟΉ‹¨Υ;7{ΑΆ]2§Θ;3³•Ψ$žΝJ%ž,gΫlΫBΝΦ6χΚ­­ˆ΅ΩΣΦ>v¦ά[ξYΐXdΜγχβ_Ϋ―‡œρv{ομjzZksΫqYœ›Ϋ|ηq³ν΅zQ¨U½nmΦΞvΑ—7Lοfέ&›‚έSΣcρΊW·ΩάΎόή#_ΫΥl³”­=mλίϊξϊΥ،Ӱ,ΖΤo§ΙΩΦ“V“νω΄Ÿ_ζ,Σ,coΩΚΥjš9ΆG˜yŒΩ««Ϋzh΅ΛŒΙY6‘ΝΎΡ³φιχΩ³\xΫλΖn¦ΉΟΌ·­Sš½ΆΣΦkkVΤΖ[Ψ6σφΪιΔϋ<œΥ…Σ’½y[φxs_°_O³Έ–=Άš_f΅3o·‰z™ΣΦΧ>o¦Υ»Ζqιyo± ›•eΩΛcVOΞΝ0RΨΆΌΧ}ΠMξΞχ-[&Vλ;Άe’­θ}Ή·i6΄»•§)qσμΌΝKΉ"yf~oΛ σxε6ΩΩσϊφxΤuυΤ¬6z;μ=7fγjuοΩ½Ϊ•mΫΪ‘1“`oγΩμΪiωΟγβήF―sΟ4=ήv>Νov^[Έφ6Y’†WΦΩ66μ±όνμ{&μ"™³=o“g[=VO£υ­ή.ςκ½ΤΦ[›”iτε_³]ή–wQΆϋΥsYΫiΝlM³ΧΝςxuŒΙ£qMc³Ή₯•Κ†Ω^υΌφ’ΨΆ‰¦·<σήΛ€]Ί6―Z1Z–ή¦Κ9ΗΆρΊy—z=½’ίΩϋ‘ k³m–Au™ž4τάή†_Όρφγ{€tΦgnΏcΨ/,GΌmν±}όνύ²ξ³·εχf›WΛnΦ½ω&ΟVλς²ζΦγ›ΦΆΙ‘-SdlK:{}`Y—Υ±³ΆyοΙcs‚~}h‡Ά{OΫez¬ε­­­†Ν:7ύ³₯Y{^ν¨^±rΥӈ&¬i³ΥΨW¦WkΏšlά™-σž{ΝΥΕΆgvΙ6ίΪν 6ΨΒWΆφ΄y6ΦΧu‘1n§g—]•ΣfoΟϋϋN^h{ϊͺΫκ½_›‘y›Ζ[}ZυJ]žmx­ά^k/BΫ›³ήμ_λjLu~6»Ό»§ΧΊΡή˚ΆΥΞnog,hΏeiοDΓ&-=―–5γύZσΪ΄Ζ{3wξφ^©’υZαμ½Όίmafθ­\žΡ^c°΅Χ{?m°zαbΝ\ΑV΅V›wϋ_ΧΨ6§»KŸΩπΆkλΡv¨·Ζl¦Χ~E*^=E:CΛF§ε݌­}νH+IžΞ Ά½οšnύ^̘Ιlϋζ­ ΆζΆ}Ξͺ­νΊ›=49Βfίή­‹­^nίI{½‡ηgΧGF₯›χΎΒ}M―x—ΚIn>ΏnύV6ζζή[Τ»=M•=€]k'―νX˜ΚΎάςμΌ›1ΫΕ6O­³Ω—H:™΅κνΖκ©qΜrΖf½ΤΆY˜—έ{η>Όq_/Υ.Ψ\—Ζγξ’·Ν©Ίί=ΫfmΣ»³=kZΣfΧΆxŠ{,*/Ο!Zυ,c—κO›σΌέr/BMΪ!΄η½΄ξΥV―σdm{Œ·~Œζ6λ­΅ ¦\yϋž‰v΄²ννYw5-kΖΞΛv{۞Άkw½GR³½yΉWz5ο¬ι8Ώ§ŸΪλžήoχ³ΉνήΪΦ–ε©nΦbΑ;ΨYF3˜lXχ’‘5―=›Ηήͺkε},'ύ΄™)n{οn+‰mΟξˆγ™ΥkΫ`{g·΅ΫύοΝο q±F—@K΅>+WχΆgœξ²^»˜΅’χ<&λ1o[<π+±…^©¦©¬Β₯Z}3Ϊμ΅H«auΪσ¦Ήί»Νfζšo{š’m΄₯Νˊ³—kΆΝM+=mΌm'OξiνφRνϋmŸΆk5zΣΥ»ž½―»ΥkZxwΏ†Κ‰3ήυτ=Ϊχ²³φφΩΪν-E^6”Fg{Ώ fΌj{Ά΄σ¨υ{yΟ͚„ύΫފŒl½§ί΄ΜΩgβπό{h’₯6ϋήw•zk—ΎώεEm½sφ3§ΥΫΆέr_Ί&Υ}β:άXωξΎμ^ΗwζΈ7¬φΫ[SډkοΗK«mρ½{†ΌΫβ³ήΆWΉζφύχs–tkSKoΡλΦ΅4γ΅‹3L_›ΫΗnίούίlυ n½”6TΉρΡήφΔΟύ~χyΫl³ΩίmήvΡ{V3 Z-·½ηήέΦΆήΊ‹³―žΝπ/?].ύφτρF[ΟkίξχdΪυκ»ηcΧ.Σg »νίn{²zŽτ‘κΆm6—σ}šήЬφ°·GŠΛί»Ž-―Χnl]hΣf[Φ’Ηί¬νή°μGυ&¨ξ·χλ[5ΝζUoΝέv_ήΝγ•Λ_>Wοχϊϋν={fεςc[Ώi7υϋnυn%š΅Χάο·ΌΛΗdΙk½k1=Φ5BέΌϋ^ΦY·;yΨ³Eχs½zΦόF»Άy―Χ»U΄›mΣ‹ˆΩπΫ―_k›Ά«ίχ;–M«―ύ6[«—ΜΈ–qIΒ2Λύ:³fmχŒέ5j¦wcχιmYkBΈΧ3™LvΪξeJgySΆ&Œ‘Σ3–±¨_oΫΣv#«­MήΆΧέ~:ΕΗmνmD¬jm³ΦΦp0«έχξ΅Ό§w₯ζφτιy΅α}Fo—έλΎΫήZήρΛάL₯cλ-οΫ‹\ŽζΫ†ngν^ΉέFszσ띡0Εόή]­7λ,‘{έtο’7žyq­=ήσζGφl―w²tχ˚­e-Μ²yštέώ˜žΦφ3z½ξ%οΜκΰΦΉnΟΣΚJ6ίΛ:ξ¬i{φXΞΙ?—³νύ<₯o·vν΅'ϋφφgέτΥφώ·ωZuΏXο0ήm›o›ϋ[Ώς^σάξσζ·ΆΦ9M[|ξνΧ-ŸχΜζ½φ›½,Ζ½ω-UΌοσ>ήΏΪοχvΫΪΞςΡωΕmΏΫμǝηϋ6ύ~©SUέ·οσž:νλ…j΅ήmΫ½RΞωο6}³λEϋΞφϊξ”ΉGΗ‚sΫ½ν‰κΪ|Ϋ£³¨ΐχvωε±5Ώρ¦©Υ6{{uWΔ~Νͺήλ8omΏΏχ}ϊl»VσδZ··΅χΫώτuuΩ΄ώά¬Τ²™΅΄όSŠσϋλΏοޜwΫ杷ίmΪέ^ΆΊΔ½ήϋΦ-ΣfΎo3ολσΏΦWΟΞ»7ϋsΏΞΨφαζ½φnήφ­ίοNΗζωύ»ΛE½Žž^ΟΉΨΟ²—oΏͺίη½{lo·v7o?ϋtΆ^ΏZοϋzφή»ν—Ώϋ¦χϊνφ)ώΞέή½νίφγ²φφΆΉλΤ©ωΎχΎΞ±1S-n<οχXΏλΛχφνκ^ΦΖl½Ϋο–{±—ξξkλΩηN‹7o4m₯ξ1oϋ‹LXΏ={w7Αž½ωέZΑLzΏ-―Α½_|οκρΦi—^ύΩy6ϋνϋ{Έχw#ΉŸJήUΏm^ίy»8wMΎ«ο·νως”δzo+nš·οu―Ά{Αυ°šΧΦσλ²έ?ΛϋΫ|)χ‹yγΛ<^ΫΆ7Oϋu³η›ηθvΧΉχτJΞΪSŸίάέςΟϋzΏ·{tΣ»·ρ&λ\ΕϋΫχΟϋήήϋ;χχάφn»=}ϊέ~ΆΫ··mώΊkŸχfν’[ύΊυώ‚ΰA’$ΙΘ¨Yύ·uΊQΐ—š~ή»2…mΩ²ŠdέρΌ>ί’ωΔ.KΩ’―³Λoκ™‘ »h4s_KΟXVYΞ‚±Xέ],²TΆμ—ε’ιœνΖσΏοl›d°$Ν»›#ΉTμ[“χυΧΜΎί};ιœΨrγŠ”Πω³νq?μ―ή>Yθ­y‰]o>jwσIsΛΦ\ζ4‘€Ί‹›μο$ύ%λ’Ϋ{ΫβK«υšT#ΌχήΆvV«›m±xΩ²ξΜ±α/Ώ[v•O%ΪΛΥ²t°${f·EΊ||΅ΌΛ^“₯—Μ7ΫώΏ“χΧ$9ξD›Ϊݜ³ν6ϋόΧ|uf±-[φIΣ_,³έ~ύωΒ]'k%297v¦s·tIšeοφhΎ. ΙΈ‰lI.ήl\¬Ew±ΘRao–«]ξΚrkDjlΙΦό{6–.|Χ—“R[²dy/Ξb9Ψ.ΡŒΛΒίθί­γj·miΎηvG₯έkzΉΛΛmΏœHΫ%2λίΩέ•Ϋιώ½΄FφeKrΊ[Ί—ζΣ/I?v»{Xsu»YΣΤ}IdΙe³ίfΛKΤ2O&&‰bΆύΊλςJ"wτΫΘIΒςxωγ›gsα†ΕΟ²}n‹~λχ\ή>ӈevYBΏΛΕlJΧ“·ιΎ$γάΟΛλέ–‹]›ΨdgΎ}/χεΖΩ²όξΎ%kVrΉWϊ†χ³%Υδ-χtϋίz²Mvφ―ζ“Š,}Ιw/ο4ςˆτRΙ·ρ.,Φ‰Ψ[ξ'^,]μ»n.–ξύ5ΧΜ{Ω,[^·ΙZ‰erΔ"ω–Ύ‹owΫ“Ά]ϋΫοouνδšuΫς3Ή΅]‚l½οοξŸ#{φn[Σ{ϋΊ6¦·ΈδΫ‹›ς_Ύ¦‹ΞώΩ’o‰ήΉT’$Ώ/9Y6»έl§ϋ¬—γdZrΊύΊϋ–%zχN΅‚%Τω?₯Þ+Σν²g³o³]Ίο»¬o݈/[ΈΌ…tb—³Μ·l9Φ%ΪxΩΛΏΫξmΆlέν3η•°(έχο?n;lΫ|KΧΙe―Λ·™Ιξά/±~yIΆL·Η·cK^ς’ώό₯\,Νζ^~ς$]D,ϋδΕΛV)΅ΈιΣΕλΎά.–ΈΟLΈ$^έ–ΝCΞk…Θ’_W>œŸχω*}9ηΛΎmύζv0Υ|&Σ3;Λϊώm‹ΕR'έ²Eοβθ|ιηΣtξ}ΫΠχn­$I.™εbΩΞέXΧμ»Mž™rξί·‰Ε²ρ¬-‹νΩ/kφMμ½aρΛjξΪε»έo ™{~ΪΙΆl_ΈΎδLΧθμr?ηܝlΩέπΛΩ§½/Ου_–»Ϋτd_D\μ;ύ™άζ~,ωO“σMζoήnσδ₯Ξ§ ^»±Λ;Mz«.½TφΝrWΨ«”3Œ-ωw6–.φ­w[B]’Kf ϋΥr9t;Ι‡(δB“έgςέΊeߎ$ί·ψέο/Ÿ€[“k—γε ύ$ΫνΎΏc»œe/ϋ ¬]’Nοd?Μ§_šδKΎχχΆdΝ5ΫύΌ6‘ϋ‚N.³Γ όΙ|·\¬»'M+yΞ[ΏΏάοOΉΌνς³ζΦΚώž|•έv’ͺΕTr¨οϊmΏ₯‰φνόΆVμ&;IΪ₯ηt–¨δr‰,dqωνR\rYς‹XΆ$&ˆ‘­&Ρ,]–E~ϋˆΨΆ;}CrΛΞ™co’ϋŒpdλ8ΉwΆ%d!•Οwθd°mΞw0,&ϊτvš³ΈoιeσK»Ν^,>LΎΙ! 4!/·ΫL6wΎ|ΎM SΪe‹m#§βvΩυ•`ά©e§ΙKEv‰:Λ% Ye:Λξ Yšw“]ΨΎι\ΌKΥw²ΙΔ'7³,»4Ω΅»οΙ"‘°„ζdœΩΩ΅:½Λ¨€Jπ‰ΛL/gσ“¦%¦›m˚$l»MΙΔοn•Ι‹tqYδS›ΗM'Ι%g\"±jΦ!bΪ5‹μcΝκ/΅wΫβΘΰϋΝΒn‹4XεΆlίδ†3ΪΩ ΰ?0Ϋ¦ΥΩάlΙ"‘ΝNxΝμΫr —\ϊν^Œ&!’tΙ bDH,{Ϋf;‰ΏΫ“X&Θμ˜™,fΫ:,ΜΞ²Lά^²%Λ²Εjέ–Ώ#"η{ΞφmΝ₯ ΙμecΡ­G<9ωd=r†lbY·luΏή―—Θ5β’“c»μAΎ¬Ήν^Ρ½τ2_ώ^f3!M>•-³‹FR7Άδ€R³qήώ’'²ΐˆOζ‹Ν&ΡDzΫεjdB Xgϋ€dΙΪI–’„²»Ω“²ΙΫ—³ΜΝή±rχm9³ dι%ϊYXΘ|›ν6Yz0ΆX³KΪΪα’χχ"»X’Yφ‹oB&ιΊcŽ-•eημΆ,gyΥΘd›δ,β θ²³mΩ:Aΐr·lΜiΤ­\"$kFŒ ΣίΦ],Ή4ΪΫmΛπχή9‘$›y½Ώ$³ !—irΉχ=Ρ%ŠΙ–žΫΙΩσύ—%—ά2iΥ‚Hg_›Ϋν†T1uΞΔη›Ωv;I}ίΎ…Κ…^n[%Ib–B’‹…\œT†,’e«A²–!ώΎ$JΆ³ΩΫBΞχ›3ΞX–Ώ< @&'ο½o“¬Ι"‘2[sR=G“‘π}ν»' –ν».~m·mγcU“  ςΥs·m[Ǝϋσm˜ ‹™±…ŽνΆυRaάττφ‚…q©Ί­b ‹,Ί9Λφε%K%’Λ-»,τΦYΌ_#:Ω–Ιͺ۲䬱λ[―Ϊd³₯o[Ξ&IΎΰ-шE|γΊ^ώ„='IšΔtsƒ6MΟε6!$οξΎErθβ²Π|ιŒΝY£2}ΩrdKš“ΘV§ίΎ,²HΊ5_’mwgG0}“#μΆ$u†pcλΈΝ³vH€f>Βl›TΞbδΚEH“dΉœo±¬ωΡν`Β—΅³L“¬ZΫnΫ ƒΎ-sfsΏI.ό-ΟV[M#.a‹‰ŒhΜΊ€EΆΟ-–Κ"jw9!&ΙX‚“Ω$YzWρȈŽ₯³X²„±-Π£/έτΔ2€Ÿ’·Ν$Ω~—₯n&IΰΦY]TJΆs[ς%±Δ&‰νΫΚ-·fςm³,Σeb΅lΝζΕ"[^>"ζΩωZρ6c“,—$rΙmۜH–\ε$Ά‰Paf‰Μ²ˆΥΔmλ’TΕM&L&±­•„Μn―——ή.{隴mΓ\Lΐ,3Yšεvn²„-d3Λ!’$ΈV³εΆe±ήlK·„ΊX.ƒ¨Ω›.I¬n³DšΔΜy2 ±vwωo.DΘΈ€KDœLR7Λ‚ΙΔRkΗόYbςKώ^bYd‹Uwχja»±l ²Ψ-“Ί„l±μΆmišκ ±<ί6nqaΙΟε"V,S›d[^ȞdzZ]nΉa νwΈm2lYRΛ±]ΖdΙ*ΡYŽU0ζn#‘™-²Zβv IB2ξ„0L6Ϊ`g;½uχ’K€‰4Ί½±…‘m2_Ξo»mU`iΙ6nΘ–³6IμtΩdΛ2Yvy…ΕΤυ――–`20Ρr·Eͺ’Y-Ϋβςι,Ζ=’rm¨-›HbΚ‰ΩFVΙ%ΣΊfΒm,z‰ΙΨόf.νV%g—ΨΆ=Ω² $·Ζ‹%ŸdΊΩ6m–$λrY œR;Ιj·έ*aΖbŸtΫnωΕazΎ [ΆbΥΕμ&θΦ₯±ήμN–K'«οˆΉXe0Ψ’,IŒXm±½ϋΎ ΕΜ€FX"9ΛFnnΫ~­$Νχ…nΆ,˜N‚»έ-@£Ζm36—…J’Ή}š]ΞD§[fK&€–­-Y2$P—μΦDΊ,ΣΫlω˜$,—ΙΔ€c&"t,F†,½PYΜ% Ϊ^ΒΜ`z‘§N$FtΙzΩ dn,›ίDΒ6ρ’O2έ³νςU²Y-Ο7'[2ι™=Ϋ2•…-.°mΛ―?"–›m1MΎ1€ά²ζ;9gΛD"yΥ#d,|iŒ™Yf‹²X9[OΏΚ’“ Α4[$ڐΩνΒtnχ}IΪFrΖb§ΖΝν²E²- ,ƒΓr3#₯μrU—[Lr—πgΙ’ Y@HΟέ"Vg.™O–’ˏI–XΎs±„zrŒt6DΜ4³t°zΝ„ν&‘‰IžlΑ¬Y―γܚl7–aB|Άε$ Ρl1f·iš(›XBθω,ΌΕΕ·x[6²oB-t»\\„ΌeωΔ pΙ“obY˜¦–³ ‹»ίΊγb"1n_<φχυ‘ ΙBΖν²žˆφ–έ Y.Α ·ςΎ"Ήm©Υ~ΙΩήΞ\bλΙeΒe—έb’,ψΎοέξΠ”,[κ«―Ζον¦‚·ϋB‚mlώΉ£Χ,‘ή»Ε–‘Πu\ryμϋΠ5œ›“λώtΙ6gΟUΫ{81‘¬yo±TΚΖK²eλ΅Ϋν/ς]5W[³εn9ΛZΙά³δMΖsg²Μ’Ισ"™^ΆωΫ²²‹%³πμcι–ύΙmς5YŸ› LίΙζμφ’ζ₯;9¦gΆ!=ΣΛΥΒ·χ©'ΔĞ―{jΕβq‹₯’6_άvΊΙ"ίt‰°[³˜m£ι[ΎοtfIrΙ#;!ύΞΧ·Mδγε=Ϋv[n½μ{zΜ2»ξl—°DΪhοξe*΄Ύ/εχό¦t[nφe™±έΌμvϋ"uΫ –4ΊŒΙΛ–5i²|zξ2ΉτΊόv·™ˆμ¦Γ’%·δN’m—m6žΏ½χYΆLTsω²Λ™›oωΦdΞζϋ–­—]n“d)³δ¨ οΫ¦·NŠ]2 šœxuIη‹΄ωςmχ,qY0&“Άn.›γ2}ζς||›·δR]ﲜn‘ΞΟXf’₯ωηΛΆ˜άX²$υ…9¦ θίύe$²Εδ…Ξm’\’ψώ{oΉΛ„cΛLΆ$χύWάΆ|[άe³»™^bϊ‹e—MήEHC{ηήS©ΆΤχIŒίΫ[Ύ™±$ΙeΆρn—ΥΎλ|΅lΓ,H“μ;~Ι¬Ÿ„T"—Λ…’’ΉΌ½Λ’rΓd“5δέ²K—nΓχΆέ—ω&ΛΝςυ™›N΄brY³F_―»άdΙςντŒ*ϋm½u²γΚ2₯»x ωHλ/έn›Ρs²υ"Ύ4yηv\–\Ί%O*΅Ύέ KfΙυL·qΙ€3΅$—άšmΙΝΘI›Ο'[H“X§χ_PC\ˆεν—}γ’]{ΏλfIœN?νχo'έbΗ³νήεολΙΕΜe—έX#«ψš[ξ7‘/³$kύ}ξν=7‘?ϋΉ’ ·1ϋέΕΎK4Rw›Α’ΘxΌ,ΉοϋβΣpn9ΉϊDbΌ»s•€—™˜,YάψΙ·”mΞ&n]ϊΆάjρΊ|‹ε’eΩlŒoωV–gΙύe‘Ιλ;“₯[_^ˆτΊΝ·EΨβΚΒF$zμ׎/’δ«o?Η†ΉεiΪ€ΩάνφJ½Ι‹ΙίΫ²Ϋ"Y°μΊžΟ}ΞΩ‰-œ―›ΎUΜNn²v—6_ŒΫ4’ψθϊ-™X"³ν¬ωžτ»t7· Ή$l“M>ί—ΗvλήO.wΫn–ίΙγ,³eΛvίtm–μνnώR²,hσΥWΏ·–ΨlξK#[fς»ΫnKVςe·3LO¬OΟ²&Λ§ηΞ“e2Ίfo~»LΙαYσd·δ₯ε.f=9YzΟ·ΕUzI³eύςγ’›mœύξ>λ5²wA*™γ_*oό5μ¦ΦX2ΆχΎ†“mΩιφ³ΫBίΆo܌`XΐeI«π%].σΗqσvχ·Ο™ Dw»7»ζK₯—=ΐ;I’ˆΛ›‰…ΨdK ‹,Ή‰ί·‹JΗlΩΆέ’λ4MœΝε»εΟl3C²/]fJ ΆHHΤX:·iuΫ$ύ ‚I’$ nP˚#\n‡ LWϋ,6η֏οWΉo=oηΆ,=Οχ[KX΅nΗ³ˆWΛΨά!Η~νZw>χJ1Φt—Οβl‹Άlμ½β±5χU³νζyε·΄ΘΆέv[ύγy?6η’·l;fοΖKb—ΉΝJzωym,žόμίύοφšάοkΗjVχ^OΆ§G+Λo“n;{ψρΫΎ―oήڞ›ΑΉͺWί;ηΗ΅a/ΈΫΜJΠΌeΪ ΤΨMV³οηΣΆλN½½ͺπΖή~σ©=k»\Ο«°ωMτlXmΩfcήyϋρ”uvζeγ°’/oΓνζ껏ξΩ{χ3EUΊΩΒΦΩ ©,wΫ΅eμμη€½ζήfϋή·7ΩΨΖ[ω±_IYλ¬φyΫΉ•Χ;».^ΫΆ9·½|ή΄m¬υVΫο8kχφ%avΩύ―β²ιχ½εΜ΄—½]žO»βδ=©mΘζ—xsΫ¦ΛφΦκψΩkίχΎ{Ώμ¬ΩΊχz―λν~ΦφζkΟKΝνn¬=ΆΞ{Y&5ξHiνωŸ¬_wHoυθmΞΞΆ}쾌£žhsƒ4m&¨³7¦ΫσγΙfλ^5¬c·ΡΒΜ·Θ4&V‡[e ίωξύφ»H©κ΅½εvγ-ή~―ύ’W [6€«΅ΙήvΠνν,^Ή9Šw–¬jΆΩΆNίκWν΅νΜςςνϋΝΔk’q–e_V7wva·ξΪ―λžοφ"k6σ6xoΟβlk^²―ΕήoΏο·ΩήλλύνΨ#Ϋv»ΫκΟ;ΜQ{oΊνl“5yžέΪΩΆ!―ϊ“Σχ.?οέKρήΌ»±ιυΨ―ψΕKj.1έTOΫoΣ:;{ΣΩΩΛΧ{Λ΅56;œSοΥ{λv\λeεw›Y ZoΟ± ˆ=?ϋΞ'ΫΉλe=υπnΞN­δž•Uο ;ΏΣ“–ΓRΓvΔ4νΧ•ΝΝͺ˜]cέΣΰ[Τ›g―Ι²Ϋ·ŒΏ=*0¦aNΦσ$kΫ3KUc½ψΎeLOxVΓΔLΫΚwυΫέόσž―oΆυkρO_ξf&εž3‰νύρΎmΟuΩφΆW«ι†½Ν^ς€yOϊέηqη.ΨφΊΧ7oκYζd£l’ΪvΫN­σ&?}΅>?£x±»γσή…_ΗΦ^⢟½ζΩ½6\EΗ׊‰κΞΆό‚-‡ησz«uΟ\lορJ­ η&kΦ]τή§Ώνζsoϋ|σΉ­[†kΫ­αΛ{sv«ΩJΟΩΝΣØve=RΩ­iJγ΄{’c»{ίWΟιYΝ§‡έUKš1‡iή=χ;«•¨ΧΪοΤfΖριρ^k Φ3dWuisYϋΊ1u3Ω=κ©­Ή¬•Χσ˜ΪλmοΌ=ύlκZX•em;Χ11nΫ Ν;mšΎσ›VpcΛσο]Ώξšϊg³›_ή³Ϊ8Y]Υi8Έ¬™τ¨^Μ#.{ΎUυ]ΏbŒ¬GοΫϋ›­Y»Ή‹^OΝΆ³7Ψ–ϋzνmΣ“[m[Φ{ό{Ϋ8ŽτLz6gτͺ͘œή"‘¦cΤΎΎ_†ΝΆx=[[/Ύ½{S΅[ο€Αμϊ΅ΌεΆ›φސ*³ί0γ,ύ«οk–³7¬–iΩγ§CΓ€Φ]—moλ5©Αο­φ^ν ϋ^·7OrξΗρΆ’7©Ϊ:›™Θ¨ͺs·ν˜ζm1ΎχŽŸα©Άν„ο­Ϋ;·zΔΟΞ*΅εh"·’~qΖε’%„Βq'aΫsωτΤ›Φ}r[)κ•λ·γ [»κΥ[μnρ²±ίη}{Ώ%ά f3=υΪχΆΩyΩΧBυνΪΆ^aΊ1ρžΎ0MΗUΓ“ΧΫλΛμWyO³'ӎΆχΛcέ&ίκ7mΦΣ4yΝ~£1—τΌέχO φφ;1οηΤfΰ™ΝmmοͺNΓέ»—ηIΌ<~χoζ~°Ϋ>KWΤ³Μvk]m δμni ^Υη~C n³κν΅ώήoΖΕ~³O™φc 9―¦~–CC€γ·υ{ΰΨƒO=΅–ΉΨ^σΌ^λ7Ϋl6νζΤ{^·Ϋέ`›έkoίσvO·ΒΞfλ©/―“ΏΰYι3Ξθaœ6ζ΅ΎΆž™¦q±Wυ.³ρΘ³+½ζ“}_οY›&dn€ΌΛoχ­”Θ#Ϋn4Λττκ{½VVΛD λςg_wΆ‘ΧܜlΟςΦΫΜΪβν»τωτφzΫ;oΏŸ-³·=~ώ­J΅ΞΆΩMΘlγ'λ<ΗκσœmKO€…eεΦί£±x[wΆ=S4oΜ½iώΙάLx+vuMmΪnϋ}Ί1M‹7χΎm;&ΥΪMη/ζφn^―rηόΈΤνΕLΞάvϋΫΟχͺWkηο5ΟvΏφž–ΌΥ―wέfΛ¦ΰώτI5YvΟΩ·ΎχΎv獷i^OdγηΆ‘^-έ·ίέζwlς/Ηm»¨yΎ―ukgΝ78έIϊϊ0­ΆΥyΫ³αƒœ›­GΪ™―γ?gΧΟ««n-o½»ΉΡ«όmζγ-ώ=Ώ_φMIογ{ϊΉYz[[iΜΆsϋuθω<ωω{M΄coo―WΫϋe9»΅)pΧμ{ή¬­¦ύύσ»ώΥ{/ξοέΕ&ςοuW[7¬=5*έ―xzUσo¦ΏΜ,ƒέ¬υδνΊΫͺ$ωυ»³eΏ³u°Yl+=£ξ―ϋζmγτΟχlZ 7·±}„΅mΛϊ·ΜΖ Sϋύ^{5™~ΩΣΦ―ο67«Ό–ίό»₯φbξίτ”^sΫώ¨y»²55ηξnΫέηU΅ΆύΧ›ξΨ“Ίηνύ΄œΓτ²Ω”GμΩΪου΄ι½Ζζώήή5Σz½k=;ί9;’Xι«νξԞ;―Υξ[eg¬[žΟο±4oZ΄KΣt[―OϋΊύn—ηRηϋέ±―e―ζmΛΆ§Ό΄šρ»ξΤnC5ΆκνυͺβMsυ³ζ)sσ»½ζΛ½ΝmxLΤΉs–cWύγΟ¦;[OΡδήΈο6Ž““zίښ–νt΅ež}εlnπͺυ»ηX6Νωύή˚š.ž7φ›‘ΧrΝφο‡χlΆnΡλ=3~~sρΆ·αΤlsw;\υ―Ξζ··ΪvΧ½τžjKοΊΈΩTp—ΌηΝEΉφγΪΎ—έάΩp©ο™ΊiΎίώ¦–TσnΓΫί_ΦΜ“ύ¬iυμω{ώύ=oΊΘΪMΫ ½τžξχϋMrή­έ•―{šΒmΜ§WΣΔ濟Ϋφ^“Ez½ΎOϋ¦΅όt[XΆΪ{γνηΞΌ΅ΨηwύΏcδ«Ω·χL;›τΌ₯y;{± Ί1ϊϊΗ&έυ¦MΫ—ΨLΫΤgž=ΌΩφληι{Μ[ξψαUόl|WβίsΏΆ¦κλ=uΤ<+Σφgχ;Ÿ―χ^ϋΥΌ­nφφφΚΣ;ννlkc<έΟΪ{šΨΫΎo?n>ή{Υύ½ύ²MΣή«εƼߎ5Šχ΅s;ΩΫοόkλ6ΑlΦυ^ς{>ytΠzZUΫ[›SΑοΟχy‰Λ²ίη΅™Ώ5¦uמ—t[W{uηw^O² sΫ₯i\΄7Ά}{ŸΌ³©²Zm―ρnϋξωρ†ΨΛΫΗkΘ#5έΞn_ήη^σ½Z3mRΏλ%ΣG^α6S«3«ΛΠ½ΒΨψ ό ½§–m~½3ΫK2­›«ήΣΪά¨Kt{ΆΦΎ_•Χ¬q΅K{7Ά·Νν»OŸaf½χZ§c··ΞT^[~οϊχ,uV½jpnWm.εΩ}ξ¬ΙlΣιχNkq²όdΫo*6ή‘ϋς΄Ω1žoβι˜=ΫΫr©Ωπnσφ έΊΟρœ;‹/O[6[ΗΪ›]δmΆ΅ήz΅6POV§Ο«σŽ5ύ4\žškƒΞtιi+±Ωvsϋž{ιΑ°ζn*ΪX5nώHz„wνΦ»μM?οή{…Ω@»ηέμV₯½7¬~ΨΥςRwΫnνϋΌen«ρn«¦ΏφςjkΩ’χΆ7oχΫΧ-·7­]ίžοΕϊΝ’Σ¨5}Oυ·ΫG©‘εϋ33υnϋU-jΗώμΑ¬“v-b ?5Ϋj²‘W‘ΞτsοIcοΥ²ΆMυ:τR©ί΅Η»ϊe«₯5»f6ΧΥ‹V{rΦ~sϋžολϊΎΦhΖlΫΛΝΦΏΤΝί@…Ζ±_oΡy+―Ψގόζ«[Ϋ!g(Ό·ΆΫ¬ξ{9cC λ·ήžθGέk,ξEoήζΔχ»½iO1Ϋ¨Χkύ™wΪhς΅kwΏχΏg-Ržq «Ÿύz/em·αΉΣζψ5iƒvυ§-[πκρΝ΅ΜΦυyΪُ€7ςΈΆ<ΣVΓ›ίl­χΰmλ§=ΏφξόζIUΉ27³hφ«τ`kύ󘨒NΫ ν:­ΥθžOζΪΐΟΎŽ½E$Ϋmw_ήk―υ%ΝΔ’[o½n~z-Ά™—³lΟοή_σ› σo7ιK­Qγ,τR·;{οήΣ8wΖΒ:™δj_¬±lρ½x›c½νloΟο<}Π¬υϊχtΛ¬σΆε/kχΆ΄GgUυfœΫΥΓΩ/―bo;g2fέσ'ΛoέlχήfT₯§m5{yzsc°MΰΧΎρέrΡκξφΞΛͺm›~ηνΊΫΚ«W[»εΊ7zcvυ™m­§§΅3τZσΎ+σξΪGY,Σ7g—GΥ^Z›m7Ώ½gο]οΆl3Γ,uό7ΑΛΜd―ε·Ί>ρΞ»φΒ6 "3»%΅ˆΣžίξYΥ2ΞΝΎΟ[v·™eiΏίŸ§lΐ{έ~Ώύέγ{οχžJ̘m’Ίσ/:~sι=›Εί]½=CΣI\Sž{ΛϊσV/ΖέΊ΅Ω~[­ΝEmίφΆTχΧ±ΤήτKΧΓ{ή‘~DΦΎί»^΄½ίφͺ½wΌίΞK½η΅}nΏz‹Ω–Η§iuΫοL/EŒέ»΅fΟ}ΆΣο)/Νάο½NΫۘπΦOWνζny½WΟύφ,χήΆ·Ϋi{«΅η{￈iΫu―Ό5νώmΏnζ(βΕ΄cΌη“χngLθ\Ÿ<Γ¨{ίΊn―›‡]]=ΉωχSi5Ά}·Moee7m¨ZMmΏ΅{…_oΥ ΔΛ&΄λΟj·QΛ’ιͺ;‹μΫέ:WΏ[[ΫζίΪΜVΫ·eίbχώ~yWΣ¦λ½ΦΣuͺ[koΙςyκ™sάχ^λ]:Άσσ}χ{οΌ&σΎοpC"&ω›ςΎ…m»Ξ;7o{–uω{ύkmμ2οKφvfςτλρΛοξSUlϊνu/[uyίήL/ήϊώή=χn·~©λυ4νέύΫ~6ΗόxΤή\Rq{nšž4-Mά½λ}±½}οšι¦M›o[ηmUΝ»±Υ‹Χ-γ¦1“αυšΜvήΦωοή+‡Ρω²MOΪΨeώΆφs뽌1yξm·χλUΑΆ­[gΟΎem[MφΩ·Ώnέx{]χΊe^ΌΟχkšΥQš΅xϋVnχ³χφz~οΫι·ΉQίέλύ²|ΝΉ₯zCΣٝιε=l7λέμέφΆ¬Λ!_ΪύΪΌΦ³·mΓήGΊ:ε·ν·½ή3'{ο§έ]δ›~=ήλκχzΦΝ:{ί{«ΧΆ‘ίοΫΝπςm?J {v‚§ΆŸΥ ­Ϋ‹Μ¬½ηϋv½»c2Vχ=Ό[·—£.έ^FŸ²Ϊ2~Σ`±κν΅qÚ»~χ}ΫΊ’6Pώ1$ؚάs?΅μΩθςβLiϋ\{Ώ{Ώ―χjs»¦ΫΈΟ>ίΟ¦ao{Ϋσνέ―ΫDkήοq}Ϊ;_Ί~ͺίήZ[ή½yžnοo{I­χ“ߝ-υvοο»]emΆ₯ϊl:Oϋ;K|rΈnyνs›ξY½Φ°_QΥ«gρ“Υ½ΜΜEβ.ού^·{[³5»ξ½χO·³iϋΆΊkϋ%―²\6οΩ’ΛnOΑڜW3£w―₯Ϋם7YνuΛΝw{kΟs³νέΘλ=+ 6ύ*Z9Άύ»]οξ«ψΥ•i£ΌΡͺγu+ΉΆkΩή₯±Vέ·ίφo;χm{Η8χσ~ j–Jš&e…+V°WΨ=Ϋ a]¬Uhš(νϋάk8Ž›­«‚PΆ›θŽ}¦“ΣΣδ XοεY­”Ό’“#ιπυ 8Ψ·{ΙQaΒkΏ–έs<ξξέμ¨έ *£.$j5}«;Αχ'εAšρΚ8_@ŸžΊΑΡQφ8γΨΟ’:ξXόvKΛnχώαίύΗςΟυύυϋω›ΐΏωWώ[ωχώξίό½Eέva© Ε€5™L.οήψ?ύHLάE”±OOn€Αυ-^o[~|ε˜ϊνύ‰ίw…ΉαzlZΠχ΄=nƒ™oMκuσC`8긎N@QKθΗ’$ρφΆϋ; QίΎ]"ξNί·–υŽx ΪΞλΑϋa°ίηwxψήτνm?9L΄‡C’ ξqe „ξόŽ―ς>l=ή6Yyt–<ή_bζ―?ω>9/SdπΫ1pΡ·»iHKՁ_w―·ž²ΕΪ jvš?“?2iwφEŸ!‚έL½³Šο„—έƒŸΩά74‡^―9X‡ΗœΛ€Ÿ|4ςΖ'%±WC…}λS$ˆƒ{νρnΫ\ΗΥpμήϋΏ_wΠ&ŽΙc‘E4α―Ω“‰X―_Œ ΐΕuΨ„y Œ],9ζ‚ϋύ8…,ŽΎ—siœΨv¬?ΗCŽΚά>ζkc·}z½ς„·½χς1υπ₯PVΩ=[• ί―Ώu|ψqώΑΆ7G™μή}`lŸ?Ÿ?'Ώ–‘’N~Ÿ―4ΉuοPΐp¨±£―jlχ3ΩΓa˜­]Ÿ,Nc]άǝh4½μͺ;.ρβλΑ›*Έv?\Š_ꚯט2‘/Ÿ·Ώˆ?Χw˜=ύΦσξΑkπΣγWΐ½ί½ίψΎ:§ŽΙάτjW#•žmτΗ“7Ζh|ς1a”υΑ Κmw  ΕΪ>ή_$rΕχŽ1τ ΒχΝ”οψχΊ/4Λxψ‡ώάξνΣƒΒcφζφάŠΪμ1£¬’χT₯’ΠωΏϋθΌ|ξ9œŽ8;χΓώ¨QTΜσ}όœϋμφ1EμΑσγfχjΪH‹Ή|t˜kk{ψDΪ/χΰ™–wΤy'`MtΏ'wgΰΧ_*ݒ^Ž₯£ΧaN]φΰήγ’˜Έ0.c§Ÿά‘λkγ§·7Ό*ΔΝο½_όNsΚz:48‚οŽήβΙ~l οN? @Ηρ;A@˜ΕF42ΫΫϊςnΤyv;u'XΪή§d½σžΐmg=όΑŸ»η±Kβύryß=}8L΄α’8°ξqƒ9X›”wό^ωΙΗ—ΊΆm <:k­χ7𸠐Ά_~ς}ςKρٞ/\πνZ8o –C„}έe7ή”εΪ)(ro󁘫ΧqwάYcDibxύ~“²σξΑΟLwβn†ΛcCί}x χΗνΙΐΣO.=φΓ ~ςY3ύp8ͺ:h€ƒ£Ι8zίΗ‘·n1Φφρ‰m7C0~>~–„I»~άΉvΟΏFμΛσ“ž{ϋΑ1‘υδ!QΠχc:RtρΏ7~ώΟλŸΛη_δχσ—§p_ςŸλ?ΕίϊOύφŸώΥίω›Ώ§gΨΪά]}y·ΈoίΎ’HœΌοyώνΏωΛO>†«ϋΎΐ}πΊ#qΜ_Ώ_Χ}6–{ w\"ΫΙέι'ΜN.ϊ«W“5ZžλCxδ»^ΏΙo\­ήήƒυΜ;}ŸΛXυ—υ‘‹βyx\ύ±‘ΗwχΥCΎAI >o`ϊα >:A=3{:ωύΏtdύtαρΡ½{£'κƒπ―έγž—0‰ρσͺq{Œοϊˆuχϋo¦ΓβψβdΎ+ω6Μ·"λϊβ<)ŠΆΗέΟaϋž>ΐ«χηoΏ?N6H|ι«H‘yΘξ½{eΎψ~py8qφμ:ΰ€Σn)δcΣvΧqAY2†8δϋ>ξ 5'ΎξΓ|VΧ_ΏςιΨθόΰκEoΝ»“ΣL{ήξ¨ΟΰΑ„{1ς°μ—WΎx|ΐΑ—«Χ'ϋFο6½–·wΔOMšEŸν}ΰ%˜«ηŽξΎΦ=βΜί’‹Zp­ο#`†°ώβγΊγxπQυΈΡ׎£oι οΟ©>)ΎΈξυύœ0m/Οο{?ξ}άwΏ]ꈾ?;~\y:ΆΏώ8’΄y€wΕ!:ΤgΐY»v;=ΪώΒ§•ίytoŠCT――ΆΫcmίμΖ„ψpyqΉ^„Πιχ~Yhι/*κzίtCם”G―£s«οΟ_J p]{ΙfŽίί“¦ŽίρQ€ς³ο’ΓΈΩψΌ;;ήΌG+;8@I?όp]ήŽ>ώθ“ŽθΫη,Ή§ψωWZ7nz#x½~ίϋa|άέu Η՟f:Λ`?ϋ@nζΐσYξT―γ‹–Ϊi„Η;S°ξϊЎN`ηόΎŽBsΰλ>ΘΟe]}Ώχ-ω‚9 ξ83˜η(ω0ΛΖ­ƒϋμCPΗΗΚ>F˜τ!}A­ο΅ίG/mΧ’½3~jp’γ^~―εΰMΎϋνζψFb'ς-„Βπ˜}ύ"‹Dωρ­ξσ{βΡΕ«Χοǎ«[xΣϋ«ͺηo~έγϋ±ώΙ?ϊ/ώΏύΧεŸυϋώϊΜχοόντŸ“ώŸύ‡έίΫ‘-χi?t¦~+γβ/^wχψαO™τ#6(cΧηUTΚB²Jˆ#:―7Z-"΄ύψ8Γ{3VΤ‡ΏϊcΪσ¨Θ…€LϊθΩΦ§ϊv³X€ηm”^œ‚Ϊl8L!KcrD§@a ί€-­ Γτzά ’Μ.;ψΒOS ΝίQž"‰ΰMšG·&Œ‚ƒtζQDά―Θ€ΐΕvrςΎτl4”%Α™h’š]Υ!8ΓΑqΧ;BQΨ‰ˆΚ‰ΪUxU8t6»ΖQR~Bˆ |·y$ZΒͺ=¨ΌήtΧ0μ=ŸΩ₯·W8„‹΄ρψ“^O–A,8}qΨΖU8ε0Ψΐi™jzςDU³τ&qrπ[$ΆŸλ+ “πzϋΖΚXt€ΑyPξs]L¦Ÿ€ί†bάπθˆ„3$8eμβΌΟο<$…pλπς%y:Q<8@“Cш*Ο·ΎΫ€JR ¬­Θƒκ:mŒ8o’uάqχντ™$•₯Bx`hΎs–`‚?@Ea·" ΘΟΊlΛZ μ1ο>OO„π'†zΉο'.£ϊ”0Έ0ΖN’λ£s’i!βηΣΔ΅θ48¦i³‘šJtξΈap©'(ΜcρE¦eΙ½QN]©*2Έ¨α™AJrMt™ž€i—Ϊγ Ο+ΘΫ3αΰΔ!ω]QyH$bHϊΛ+Χ€“‰0ΰ(! ΐSBξH…έ}œΩ»€ ¨«ž— Θ ϋˆͺ ΫΣζ)œD5™έށ%δ:€AX«Ο (q΄°ΒΒ£Ί]Ϋ’¬‡)ΞGvέξQ0PŒΛί9Ϋ.φ=5―β4(ˆΕήΗ=ΩΊJΰν€Δ’qΆQz™~ͺΜD1(=G” z#\ξ8B0©ΥΫ‰V§ͺ‘,J ?πΆ> Η¦Ÿ4j) ¬Ιι ώ8aηϊΕd yyί8€ΖVΨ8½sgγ“%Q”€‘WwΗL˜@t˜@‚Ξ°‚ΰΊCeH\o‰@G‚WuωT½YI+/˜«v„ʁ4¨ΰΈΠD»H@|\ίύlj–rοωdUέ<ΌM*>Eζοž.ƒ: Μ€ :ϊc|u=λ΄ΣšFΘ¦Iρ9M†ΊφQ’‹n£!Im«κ@Σ²ž'$cΤ‘:χν .Β}.τBu™~‚»υ—Ώυχιό_έΏωΩΏόoώϊη‹$Ώ΄!Έ©Hβ«P‰ARAγ;€™@KE‰ε Π°’ΐx˜"Π!0 @  ͺ@Tΐ ήͺ κ8N›%QΑ·χ„‹BΤΑΗ :@%ΎκBͺ&q@ά?7*B‰ΗΑ)`*QιΜLWt ˜)ΉD › ΟύέΏσχώρ?ϊgξΏσεύ…PnƒδΌΤƒΓpk4œ¦”ε’€ (σΎν‹Κ„WŸ‡κΓΊHΘžͺ 49NDӝ “–ί'‘—ι±1ϋL”= 0¬,`―ΐ€‘‚γΰΧ„MˆK’v !ΰRPgΕAμ˜7 + ‰ƒC±¬ΕΩμI3δφΛ^ƒΛπzCX ˜ΜDD4ŒϋEšx„4ΞέǏŒ’gQP(ZΨ=T†^}YΪ<ΣΟς4‹"k€ή‘?C#HBΏ‚Cܜk^Ÿ]Π­.ι σ‘;½IIόx7Ύ^ΐ!ƒT”ιsUA€­ζ#!δ5ˆP@IΏδά€ΰ¬0¨φ‘Iz=ΛMQ€ΨιΓε!€qtφ„I8ζγΞ»] €s Ώ#Γzϋ΅εΒƒΔgΗΜY‚YΖτ,3@XΣu Œv φ“pGχ D7D4‚βλ΅I(’c€wXΟv}ΘΩ’*ΰQˆJ3<Γ³γŠ-hΝL>ƒΖAζέp^ΝΤ7"N³.«πPǐˆϋδ;(ϋ,8Έ|¨ψGγžί_yAeΒOqGu e?^U „Y²T% ΩΡP$=¬„Δ%‰—@!͚Ή~ωdk”‡'CST’Sαqλlβ„έr]”^ΌοD|o«ό₯o^m.&ˆaΓZŠh$"βs―Υ[αέ¬‰S @ibyρΞΘ"¬ ŒηwpΌυκ;}…¨$)ΜΪfΏ\0h2“γRI’ΈΕŒ€εdVxOΚ‹4|O(ϋͺ° ϊp)Έάα‹,GzmΏGpŒήΡ³ΐ=WWQ"Ic§8hΪγ8C  wB~²ςΗΖ,O4—†Ε‡Νΐq“[η 2'}@¨(ŸG*γ‹ΰ0Ζ͏6_`>;F»u $’έx$Ι!0nΧ“-βΛξ‰8§?θB`}F‘²ΰ5NΏz6³ΎA(ϋ₯ R΅θ8ο:HμyfG灏³Κn4-vO\'©χqpΞ1%Žξ€£–π!ΘιΞ…„ΰ%ίΊŒτ0ρu\G*ϋ‘H̚Λ)£ τ5 @Dl0:!’}!’ήΝΞAs±ΘΰΣα²ό9“LqσΥmV… ψt->H€°ζ―ZοιAβ“€–@ΓXήΩ†F™4fλΞδίώ·ώαίϋ;ΰύ'ε^<ώΪ½_c1Rβ}Δΰ"\Ž€ W| χψύ°ϋΩ‰w―m/¨Ξ2o€6Ί8γ/xΞ‰ύIΐ°­aG‡!Ηέ1Ης½€ŽR8»;ξxΎχδgύQα/ο=ψ’sΌMόΣωa'ιhίοούq‰9"γ‘;©Οc)T—ήϋγuΔUΊΎ><‰D5Ί/š[$<Ÿ_w}& wΞ°ί–λΙcp=ƒŽΪq{E 0€δO{ςΐtλ ύιμ ΐΉδ‚EͺξZxάό’©JiθΝ"FYp)δ‹€ςπğ j” «ƒ †Ffΐ„8Α€ςrΣ@λS5DNO8PD1ΔτYˆ#A"8•xWοΛ8‹€θΊ ELΜπ|ι@ξ$1MωˆZba‡‰T‚η½szάαq]&#ˆJ›–FΤA`u‘i~ŸŽM¦œ%€]hh ΰΎΙ­QƒHξ6`d±ΰl ΠVACŸϋX!lΖ.¬-%I:*Ž 8»λΪ+ŽFεw #W>Έξ˜†-tdŠ‚¦Λ  E@Ρa\u#TXNα1<Τp]b‚@C"H#,x—Q†@ DΘΜΝΖJ Μ…š\ΔΗβVεi ŸζU *@S'ކW‹(Rν]Š’σJH²ˆ8¨»c=$;?T68€ϊŠ@v³ο…"ͺtDβi’];š©$νχ‘j’ξxGqysn'σ΄¨I|M~Θ./ Ώž; ψ ‚dΫ’\Α‰’½oύfΎDΐȁw–E&5οŒ‚fB:.Π‚ΔSRτ¦9Β!Τ).9^8Κ`ŒtajΚΖ!(A°G‘„ž¨hΠ!–‡H!!Tt€ ²ž85ᄐs’J†Κ˜φ›σ‡„v…!ͺ̐°§‚‚FΌT¦*Κ8Š޳l:_Ξ¨ pΒ’KNOG’ «ξ©0$Tqε³Nk^0(ΖοΌU•! 1¨μΰpΐ.ŠΘΓCΏƒ(,=2$§€<² Μaπ0“ΚΛAPG3.: ?LE¬Γ€ΑSAa¨(ΐπDAÐ6€0,‘»‹RD† Ι9œ „η„zΔ荫eΤ<ρΒ 0 "GU\n›½Β”ΰΌaͺiqδ­‘ΠΡMzήyΈƒ:1Dt(8%ύ†n Ձ˂Šρ€’@ڈ’ QBb*H5qάcކ‹g@―°1-+Λ^ι€dο8š˜¦8*ίY@jJ֏Γ QBLD‹0 ΖyΠY88δ(’tz † Έέ9°D† € ‚0E! VS₯uA’ΰX&NMΉΰΎK.Κt¦ΝηzΘˆ@•‚κH‹Σ (ΩU\8μ] *x! DΥάtŽŽΒpCB@g–CΠɍ܀"—T@Ξ]H]N” (·Σ3ΑcΣΓTUΚpn¦Š±(Έζό"Θ.ƒh&ZDA zδ1”\ˆ „ ΣH €f†!’/{ ŽL3ϊωB2BΓ†‰΅'ͺΚΥQž! 8Jr —§1DΥ‘ι%)Μ^Ώ7 QH£ι€TδΙ!iaX†ˆO&„‘[*ΡΎΐ»0$¬ Βΰ€,α&l~Ϊ‘4ΐΧ₯ e8–A½`tŽj†cŠ#uˆ#d&(W…ΓnΥρΕ”―W¦Ι)η9š΄žλ½―€ΛΝșœ€Ι˜‰yώVUuvώΨ[F™xo»ΙΡ™Κژǝ¨ΨM'† ¨‚ές»NCΓΐΏ‰ΣqBEΚiφV‚†[nΐιΞSΕ Ή}ˆ£fNwάν8/‘€{$nΣρJζnΏόh<ŠΧd3-ΠΚίϊ9žA£έA3wΘΠΤ'σΩηrΏzΟIΛ³ΑŠΙahkE§Wρnξ†CόύΗ…ƒƒ€¬α›Aνœ£|ΓΞEg~3<4‡(”/ζ0ˆt\Έυ}·­Nα έ½w ,|ΓόΝ―#ϊεοα³ι†$šΈ6§±―‹;―οέ}·²¨Ε‘Ϊ›fG fϊ&9ΒίςΖόΣ·sH‡ζΞ] sπtdδ­–G?uζ‡φΰuΧ?o2$}Μƒ&νρθΩ1t_ €έΩ©Κ~WN* ƒψσ93ͺΈσ1ΉŒΞCΥnΊaFGΪγουž’xM7p ‡€Š0œό«0„)ŸL‹0¨"Μw;74ܐ;ΣeN&aΝI*£$¬ ά{Ÿ#Cx πΝ₯χDάδ|%σnΆωγΦƒΌLfξ`ε[“ΊqΊΓTΰaΙΪaΖγfΈ_ύΞ@ΛΙ‘8“ΓΨGK3Ϋλςš»αίί¬@d7μΞ* *c»‡7Ώι7ζ"3Ώ]^πY€:Δ6ζ R]iψχΗOζ›ωο!ΑuυΈhΪ‡σcdYΎΗ³Ε{7ΕPπތ ŠΣ =W5DΡƒ›γ¬ͺ3#ϋc¨γP΅¦ΫNPv'"&φπqη.έ\HR  τθW9 —¬ š2β±l3·ϊ RUΊ›DΪ4U• ŒΠυ·[γ5¦ή“Ώ4_ΥΠrσŸXΔ偢s‹ΧίΗίΜ4ΔΘ{ΏYΥ‚΄©Ώ΅!Σ€ώ{έ“£―¦CAš||m·4"Σοϋkx7²ό""ΉΑeGYΫ ςτ·w~8΍§tψς#‰θpΰ;ζΰ@t@Ήεχρb>˜ύα•Ό|ύγψ_ωž―cξƒqοό½¬|οˆοοξ½y:Θs…6€«ξΊ_―ή—Ύά> Ήα›]ˆγX‡oΨηΌΈ;z~>4ψw|ΜηΆεσ*/
½zυŸtΌ,ζςΏ/©π ϋ€/9±­nŠŸο½ί>gφœŽ»›Χq0(Λ€ Νύ—κωσϊؚGΏaΏΎ;2cώŸixχ«;†Yηώ^mHxά»_Ο?–Œf’ƒfxϊ`ΰ’Ττ όΡΞ¨X~μΞq—:c Λ]ΌβCœ›΄1Ζ7ΫΓuxωjl§Ež/GΖ:~‡3p‡όά­ο!Cλ3;ˆΓΩέσΩ|/yεG?HΨοz\Ož¦KΎ£š/ޝ߬χEΟΰ†u1¨7½T½ή·±­ΝœΓwζήΙiΟ]83ΓΐΌμ* 0Χ½¦ϊλ?-ΝƒΚn―ώy€δεuΓΌα‚ΰ=5qΠ\ΆΗ_ ·Wηwύτύύύ5tά΅΅―»» „1Τξοxά›ίν4ήπWΫΥdψ“ζqχίwΩjνεΥfσΌΗ»Η6b₯νΖk†ςόrsΐIοƒΈGΞ βόωz ¬QεΏΙ#6žΓαΫ½§ι η­}ˆήρPXήρœ•©’χύ―{’L-7£6ΎC²3Ϊ7ΦtΕoD؁!{ϊ8‚§νημΎ_­έΎ½ˆ·³ψ#`lWr+ϋ7υΈΧ«ϋkj& oζϋ1œΠ χ:qF”}ψͺ;zj³>μ’ψπσ?χχ“λθ,OŽίWw:σ’šwΌΓ/'ΟΨϋ^ &mΌΣλΙνΊ³8Υ»φυwΑ 1Ζθoοq”ζψγ»wο#™ε»Ό{-ϋOoκ«—Γ΄Χ$Οί»|_ά7ΐΡνΗΏP‚rΫα±·$hλΘ»„ωσzΗΛΡ™ΎŽ|ρ3Wύ7oy3„/;ζNΡ€cӌΏ W²‹‹™ύλ;Ι™λ?οΗTˍoΔ£―Ό‚››…Ήƒ{yζ SΓΛ»~όN|»κ”οWΌΩwΒ9-χŸzOΖέηŠ~/ ξWo½?ς― xB³αXqδͺΠqerΐU€˜iψ—ώχψp]–ζQ1=ŠσkŠwχακ ‰ε;Ήυβ™ΫΤ{Γ°Q?;ξ½γμβtά;―οί5™γ²0θ·Ώ|όΆωš_ύ>vΫn̘w’λqοvθΎ;‰ ζMΏβ±ν0οή.₯Άχ`α"œfdxγύΡAή8ƒυRgxχοFέiξŽία_κ4fk7Σƒ†fή!ύM;rΎƒtό$ψ…80½˜ϋ΅{ν݈ }ώϋΚ`ay<ΞmζN^ωOY˜Ζ«λ8:ύ7.Μ{υ˜οώώήε7k_Μγτdgt†cκμŠŽόuψmν­ΞςEƒ7ϋ’SόVαξPuΨΨ‡wu—ΑΤψ~Ν½ώΣ_ksrEΝ=~YS\y€·†οψ ά\φέ\{l‚ψφΎςΌγ^?y³uϋΪW]΄+bΏ½λΥωΏΏfΫƒίΠφ}}\7σOoŽΗΏwьΆw^MHϊΟο_ΚWΰν^ΏFn½€—6 ΞΙ- 3+Ό—ƒλρŽΧ¬ŽL\χ>ύ ΐxι›½zƒ:πΞθ›ϋ†9_ΌPwˆϋ]θΒ@p·όΪΉ·‘Τηϊι%΅]qΰœ_xη―3Œ(LŒ?ζŒ{s 3ϋ~Ή|ϋnf–ζΈ77OT˜Gτί/Š:ξwοψMۍ9}ζμα=­Ί*f†•=ζυΈΣc8¬xψΑηΌο'ΧI^Δƒ¦ΌΊϊO:>(ΌσΏO?˜(žπ=|%ŸΊΫv=s²[;Έ΅ω†%šτ¬79η’Ψ¬*Γ4QFˆƒVόΝ „'Gp:3pz;#Q8*Χu A‚'ΠΨhsX ΟΤ$g€λe‹IΚ‘£:7'‚‘f°š\4­ ήuoΈq ¨ΓΎsƒw‡ιOχJC°λέ1cŠσ]aιΜ\7IξœηΡΞIS2QXW€{u}1Q@zc@ΐŒƒΤ›08f‚½£ΉH^Αƒ;Ε€ΑA/JgΎ…ξΞ;hD½2gq6\BzNb3 ν»{6ƒ$OΪ)γΖΖŒΨƒ8Ρπ.bΊ)aPTf²9"—Ά―W†g'έ07pΜ­$†šwT “ζ›`ν*ΌΌΜΑU{tήΨLœ§Ž&Μ/AP˜ρNΘiΐz―{žάˆs97%#Σ»Ί“7ΗVΗ)μLr…‡&W78rA P7™s Σ”•TΌ)Šϋ=Ώ’-™ωGx¬* d?IΌ:n@IφΏ7gx*Μ•8€Έ\θΝ άέ{\ήΠ Πy>;™I’߁?`/€^'¨ŒΐΑμΡMθ 0ٜ#Ψ—ra„ˆβ]RΡέ brW―ΏŠ ΐχ™`ie /ϊ/‘σi/n$ΗΥΑ₯ρM@¨²ΖmημήΡ-]ƒξ8Δ‹Ns’aηmyυΜq:s%*jm>f=':’9UΥA˜§ΦdŠ‘ „sΦοqp :ΝP5Εν8½«;| 2qά;…ˆί‘ιu ιŽόΚ¨Α™2~J™“5*’•wNH½wS‘γπ Zέ :z jε1arsΤΙ―«Š ζ‡\Š;qΏwv'@䊻ϊβ2²ko4‡ν<ήFδ€Υ8²Ζζ³š˜#ΐ°ͺ3η\o>`D!+Jρΰσ¦.Σ³CJn4D½?Χ@₯Έ^Τ B§0Z„/Eδ†qf`~πɌηT=<Τπr2JL*a|†@-5č YnE™ uΣέΘwsxβΰA@ζ’‘ρH‚$IrΟκΩ»Ώ•Bξt…ͺa²<π&’ —rTΘQΞ$”γNωΒ@Π’!y$΅%¨Š„μ˜ϋT,ˆ‚FXΆΊ~PMα2"<ΤθΗ}0@(B‘ UP@­ί €ͺ5]d$Γ'B'`IώΤ];JŽCŸε$‘ΊΈžNNnp±EθθŠ(RDkΈˆ D8ν»Ή7Ύ!†%` "T"HJJ–+5."%©ΤψpYa|ξΥΑ%ΡΨυηΊ ΧEˆ@lHhA„9$H (|ZhZY‡xœ,H;CxΧΰ8<£ -Ζ]xbd(ΔV žΔ ”dΪέ±1* ΄§a5¦^eέ!+P”$WΠιGψžΉΠ ΐδ”Φ (l͊bμ»brΞQΉΎ9 oœ!;yHžM‚ΩΩE€¦έgΏ½ΠΈρ^ί‘”Δ˜‚ƒAdX q’ΞkνupΈ@ΰ³E€H@N"πŒTλΆa|B4 ?k68fίiχ wJY$<6ψJ„Ίω„2ΎπΤ‚πΒΞΊe|ͺξ BA ˜l•ˆ4™!ͺYD‚Ϊ|Ξσ8BΓVz ΓŸ€*ΠΚ ˜Œ š‰t;&ίƒP9\^HRZςθJ)f3˘œˆp]ΟΧqyCGƒ:o"F™rP ΤyvWmέΘ‘€‘t·ΐυQΗ$vB)dA)~lu¬Ιuλ{Žλ‚Π"]J±# ,ΡΈ_8πfΔ‘†v޲ΈNΟ―/H (AJ°±*§‚;cwŠ%[A–(†«άq‡R14.¨¦‡€Δ<ΰψ~ψΝ` Ѝ"΄* Ε³ ΚqΌˆDf–O@•Λ”quΧ€τ!‘Mfˆ@χE4έαω…W HD±δŒŠ#”ξζ½·oa YA†D„`Σ€ΜΛθG¦ .)ΜC ™rA4vtχ‚Kp2‡˜ΑΨI’P_qδ€!?)ue<Θαωuu%!Αγ.tY*Uθ Ι w§ƒ(¨€°u«›rάEϋˆΐS$WΩαόΞh € R- Š…h4’DΌ ¦ςΐi"vΊ GXZϊwώΠaΗ,w‚έgΟM9.=Ψhž‘`e¨°("‚BŠΟy+CΠ’‚ < " Šξ·‡¦@|Μ»H@ „άδΰm”\evφμ†;<|"P=6Ή‘pe„!H]ξΌ!θqzφ†πWνΆH#뇑P4Θ ΐΗB3!.ψ0| Ar+,"z… ΘλŠcR- “ΰZθΌ=­, `?7ΉΈ=ΗΘ}Γm¬k}rq|οDά`Tύlq!@%Q‘©B  rv ι‡@"υ2Hv}‚0Œ“Ύz‚U”)ΧΌ Ξf€e 8x‘pD¦ΩH@EwάNPΤC>&₯Εβ^6WΒ4ɎΕΊ£QΚ!ehΠ"ˆ0ρΖΆχ‚ΈΞ(ž&DLDjA4:'Κ nA I„>Ύ"{!}tD ‚`"R@ΕL—Τ-dqAφc’ϋΨqj΄ όΊ[ρΨ–urή$ƒnT.k'!@E!(€‘X@xtτΡ„Ž8Σƒ/&]'…pP„`P ˜FΆ»*N<Α0€  Ž)(Θΐ  TP4ξU<ΤEO†%$A*Hσ£ΏΪyhΑFˆFJŒ0…ιφt'! t‹―(FΟMH#dE(²#HΖ L…b0tp "JB‚Έ΄E![VgjψvΔΩ‡T„Ιπ -`9GΨ€ΕM θ.wt₯FΡ‚ϋͺuŒ£ΛN 3,lJŒŽ‚P‘ˆƒA8φIΘPρ@Θ£81†ZDβΒŸD`±:'Θμ:#@&'œ©`¦ŸDΓe―`rBAwΣ;Π°η]Αfr`ΐΊA&™Œ±w”`|ΤΪμQA„˜Τ‚Α‘L² |#p³/Γ”$ ²… ^»:&6!\d,FΊ=), ’SΪ׎GQζη&λZ‚yœ_0β‚Qυ#’FPw*‚aYˆt Rΰ 8ε$J1₯PN;hΔ…ƒΒω΅τΧ£†βgQrΥ ͺ+%ΈcLLτ³Σΰα:Iθƒ=Ό #)f’¦wγŽ&)Η‘,ΙΕ rΰβΥ³8@†Δ‘Υ³] v!§€Τ‚Ρΰ 3ΌρD*-Lΐ4ϊΏΈ`ξ…ΛΗ'ϋΨ[bE"σu¬khΓπžP_³*ΰλnΡlKQ.»{Nr-ŽΉ¦‘TE₯~"W”]}Ά€3€ ¨ˆaQζ……νn{Cρ(Kškˆ#λˆ,0Ν΄3ξ:ΜSπL ~₯Aψp;Μ‰Lŏ~΅σD’7ΚαbΎˆ0ˆΌ·§;‰ƒ“ΊΑ‘εhΧ ΐA!V„&YΖ=’!Š)δ€ΛtH θ Vχ Πu«ƒOYQΓ‰ ‚θΖ.©,œϋsGΗG'ώΠΓDUΙ'HΈθ ².TΰiP£’;―6:9> c¦ 8"ΰ©©QGƒπ΄δ¦`ŽΓg£C3.ŒΟŒκ π‡b9N”tΖΐ’Πθ!ΕSPšhŠ€)λκƒ-πP&!)CΌλ|ΓΈXή)πϋτS«ΰθ“υ‹ š‚†T ύς†ζ%¨Θ½ZŒθ†=qάΈKoP&b &ΫPΣθ!zh2xΘ‰‚m5†Ο**‰Μ†PV™’έ\Ζπ$©YšŒYϊΓ‘ Œ` € %Λ†rΊH¦z`ΐ HH„g‚HvΦΑΥκEδꞜIΰ©:ϋMgΗ z@η£―,Ž‚.M½ΦAΔκΜB8I‘ͺœoEMUά}ο R$JB.†a…‘θ +AΕ~’4pήƟξR‘‹½ͺ&tν | έ‰.#΄lžL€* NΣd|‘*š=8α.<’΅²\$Φ ]*wc„ —<Χμ@^Αό¨Ίδ?}T†idK BII~‘—RF„Πnί(σR7ιψΎ.*FΊ“ΰPŒξ2Š(qΆaYH RΒ₯ZΪNŒB%N&Ψπ='oXŠžμ%¦ZD‚VPC BB($…£ΐ•q‚Z_20 /―;Έ£XΦΰΪ±0.οή\©aQzΆͺ*@ΰžŒ.JAΔΑζΟ šŠ0Ϋ1@Tμρ‰‡vœΟΊ’$*ˆAE‚Η΅Χ—(rΔa@±„ T­v€¨ˆσ*€AMΔ8D°΄κ·Δ£«:ό$FšΖ‚f ©Ρ@8ˁ† dg˜0¬Τ  t€Kβ™ ƒ-@Uιο/ώˆ$Τ δ`\V|‚€ξu‚?IBήΠ΄;Πτ^m¬&ΑW6!ξAp‡ηp9ΛΊs‰6IΔi4LOc΄Α+Y­$­‚ `Πςδˆ Θ]Ηο­γ€AΎ¨’0<ŽX₯?@€σ 0"|U‚,εWUJ1υ{έW ˆΖΊ†šuαaύ·’ϋβhμHΰ‚<ΘΞ(Œ€0yr%Dˆ…™Η‰u&Φ…±Rƒ Exάέω"¨š*ΐΖΠS θ$ΆNJsω@Απϊ ’ΐΌ?;βΦ’1€::(η…Ω}‚ƒ@ Ά;K * τΐA°Έ‘pbΧb(Š]oEψ@ŠͺA‘*IT9½KžeιΙ b@ΌόΠhν89~0Œ"L0€bΑγΞd0!ΑOŒΈ"ΐ&Π—d\b]œ­ίψ.€χθ@β"XF?Η'P,AX£ηFχωM^=ͺD^|_γυ|»]ω{A7¦μšΕ`—Όo_Ιƒ ΈίΎYhήv•-§ΰγ©KίΝG£nWƒίυ=Οwνφζ6>ΪΖΤώΞβVΠ•€_[=APΥdŒtŽ E£oښ¦―ί_Gweγ~ΤΒy*υ·~B6[—ωΊ:9‰m3ΑGΎvd‹ύϊσ²Q"Έ£ϋεν ω³ΗЦp_Œ½^«ί―œ½›vYQ°S‚'ϊ{ΏΧ“!ΐΫ]9ξx„Εzξ{€»’Σ1}Ζ\hδΛΧψφ½³CΚπζΕοΌD‘{‹—‘­BΚφ“ΓΞKF‘Β{ΐZ«θ>/AFn^ivΏcλ#’ϊ8v§ΔΫ‚kQIPυv'οAζ&œg{ Ίϊ~l~Λ‘tEΏžΐ°ιΣ·έ^_“κηέa9οψ½ι}§μ}ϋSeASsAȎΟξ‹ΰ^ls Ώ@Ϊo,χΛ9[l~ϊύPo˜ΖΖού’κ7xΆA"0ΎoΔήΦ’Ύ}Ώ?οzD/― œΑœμφρκΫ=οgΏΈ’'Γ j=oeXΒκ9–Ϋμš‹μΨν4―‘δ权۠Η€ω΅ί6τpn(P΄Ž·Αjΰυ)}V4.ŽΌΟοί·– έWΏ―θ‘££jPϊή—Εx η„½Zp°uΑΰΌΧˆΫ;ζεΕώžO^±Gt_}-o~ιάΟeyα'άbψΎό;Ώ’”$ΊU[rblί£ CTXλ/}p’,Ώ9Q ρ°όe3Οo -ЉpΪ}ρ[{‰ΗΗ±οο% (ύ)ΐΎιμxgŸΰ= yWΆ#„;ί’\³ΟŠGP·ω|ήnίυE qΨlΘyπpˆ\ip ϊ%BMDuCRΎε)Ё8₯δ)xCΠεΨyu€*ΗΩϋ·~@NͺcΧΥ<:‚“Nο«—ί‘79|οnŠ'Ρ/χ/ΐ“Η9•Žω₯άΈ„ϋ₯Χτ'ΨEΣΘ½“NL*(Ιξψ~ΈtAεRΦΖ α.€Aϊ΅.Β_{Ζ)9.Xι"―φr_77œ„_ „Ώ?§ο;'O9ϋλŸ@‚Ό“γΟϊ΄Sδ9Kϊ½…x EπœP·΅­ο}οL΅ Ε‹·3οά&q‰„l΄Ng*i(“ †Ε…\œζ-άΌq ‘ε£#aμ;QAΆύ œ— xuέ@§¨~πxε!„‚Χς{1ݞαAΆ»Ώ0²‡vd^~Όϋ9~α—A¨φnXΉξβ@™ΐŒ‘x(wqέ“ ²$υφS+IΠp%ϋ,~Ÿ|ŒCWΏΤuΎ]P‚ρέ)5Ψ+.Ύ}Ov‘+S•τit·γΨ9Iyάγ»&y'_Ρ:w©p:‚l@ΗΈξ 00ΰΗ΅ΣέuK€θΕΖ‡υ½}ξοϋχ>ϊG09=ΑŽ,υΣε{‰\™μg~οϋϋω(yŒ³+ηυqίο=ϋ΅iέ—ίVuxΩb³Rώ\ν+$©λύ~zί½ctW«ί―>zώωεγφ™ ΑυExuμΏά/Ώƒ+€ο΅`ΎnΧ1,oέΡ±"Ψ·Ώρλδσ‡ΎΏόϊσΟoιΫ…ˆ»ŸŸŸωσσσ}™ήέωIώχ©ΒλΝqέχϊύ·Χοφϋψοοώn_Ητ…τΩΩ—vΗΖGΰσήΟOόύΪMͺ‹;v χt<φξη~ ΉιήΩώσσήG3„j­}“;Ξ€όΊΖ¨C•Υ‹ƒŸoq›Α^ϋύώχOςΑ“ΧwΏ‰MΩςγο±ε'nτ'Ύ9ξλUκ4p}σ:ύv_{ωS΅€σδ˜νΉ‘}t\9ϊ‰έv¬swηΟχˆγϋϟŸσλΫί€sώgςΞ #bΩWΎύ}ούώuvƒCπυ qs§ –¦tŸ―ξφ¬ψ^ΧόΪ7κζ―8ΏΧ΅n“3‰έ―ςqΡH.ΉO°Ή%ΐΕb‰ίΗ゚ϋήσo³~8:†‹vφ©ΉωΦςη'ητ}7₯ΏξY_~H<Ω‘ήόΊϋηόηŸώσϏΗβ^_γΟ½cβ¬ΥΘφw½ίΏώΏώξί·Ώ{4ς„θ…π™χ^‡Ϋθω£>·Ÿ όΒγœ>8S ‚&Ψ5Aύρ~Ηΐ…‹ώχέΈ_Εο‡άίξζ_{6οζnεHŠ=ό\’τΑϋφ}‡ί ^;C\ωΏχύσηξ?џη½†P&ιΉ?{ΣqCzοχίίν5·6‘|]_ΧC恖Žw\ΏΧκζντώŽέΣ9œšχψΎ'^Δrη}J‹@”γΎoΏΔ{ΰOŸγοοήΟς7ήφ‡“σAqtξδ§{σEuzWΟߟ―3~―ξiνˍΑ;:nήlώήο{χΕ-Πξu ρGvbpuΌwςέυ㿏#MΦ»ύŸΦοχηωM€Π·ΗEΠ!ύΤυώυŠ/ξp œ·ΨχΙερκΫo܍9ώv– ΄Ύί¨έvΣ‚t_΅³λΰ§“9ƒN6Τ»ϋ>9α€υΖϊψΎωžo?χχωΒψ²~ΎγΪ»Ηη]χχΑΖ…Ÿ xyή5=ΛΊϋύφξ3¦@ ͺY–ZNυw`9Χ‡’’@'ι€;½ύ;ΌΟ}Ή–mŽ9Χ¬˜½²mκ‘3Άφ\ηJ’Ϋ1™ξ\g΅qΜΩɞ·³£K—ν^=Q9v§†ΩΜΞk{,i–ΝυΈΉΞΖP]½w»ιΥ>ά²tsέΕ4Y•½ΪΖ#lΟڍyάΩέ½jvtμΆέ$•Ν4¦½ŒλΚΩζθ‰59ΧΫλt[SΩMv΅)2S™^i“ΞΥέq_WξΨ»ΧγτLŸ―Vy·Ι™YΥ;i\έkχΔmΊδ;‡‡);+8s„Λ±mΧv!ΙIΆΉŽφŠΙυΪm›ξάMc$Η}Nš&φ–Άišsš\ΧdrŸ© du{š3fOΊΧΜ\9;/g¦$ι8³ϋ°Χφ’4»9»ͺWΫlsη±CMf6ΊϋΠ8fW’Σ“4IvζdΛάqζΪ”΄έ6»—š19ξ½τκΨ΄Ωt'“d—Φ€y΄}μ6£ΥνL;™φ­dτvέ+mr¨-Ω€ΑΖIήι}œΆΗ需:sΨ­VΉvΖ6έhΜxΘuη₯½£bη’λ:{κΚθ΄±]νLφ‘tF―hΤτΔύϊHǜx-6—kοΎ~ΆMuMΦ£m[šΛct_»ηψψτν/Ώόβη_όφΓ·/»@ (€P’ (@@K %%@oΧόΰ;φ“τ>ϋφέ•™+·Vuτ¨©5―ΫΧϋε<ίΏώςλŸκ«ί~σιε>%@” %P€(€T   €€ @ πέwoϊωΏψ³?ϊόG?ϊώμ#λΝΫλz•nW[š$—ƒžνžσϊυϋΏψΥοώωΧΏϋψτΪΠ” ”(@€€€·λσ~ο_ώδσ?ύ“?xϋΩ›ΉΟ²ΧDt'Ή:©•ΜhΫ½uοσϊzυίόμόΥϊΩΟώξ›χ(@¨PB   R T(PP@*B@σωη?ώ·ξίόΗπος§žΈΞιNΆRW$U=;[¦csίχϋo?όβΛ/ρλί~xzΩ-@ €J€€  • „ο}η͟αηϊΗόƒοοqjΫ}3ΧΙΥR–Κ£orΡ=ΐlΌžššξΥ΄£•3’yΘ½}u?¦iάsm«WλzMφ‘ΩC›T’Œ€φ²μ‹έm#Εζς°JšΆν½χΕγ’Λksbτž­€“ŠJv›ΣIN*2τ\y3™£mw[ιζZέiμΜ•ζμ½Jfv³«ΙΘi«i“•s£iΝ•Έ’Ωqš΄^²ΡTvΣΝΔυ(νf3Is™N ݞγΘ›i€»ͺ“¦'₯NsΊ{Ror;Ε4¬ι•υ6M%©λ€›ζ‚"‘™νa²q'xt§7»½zEe›F―ιτ΅χAmrΛJΟ4i²š‘\£zΒV€J›t™²mΫ$fξΎΡjy$}tνbΘλZΝ΅}Σ2ν˜ΧΘδκΰμyϋ³acΝFfO‹Μ-;‘nzΔΞD†ι£½ZS―=i£ΤdΖdο„!ʞMΫ7—wsθ¦Muœ™6£koSσHLΟN4½fήΞHz²­ZI= *mŠK&ΣφυœΦ5g³›Κ Ήχ\!G6+¦½6W“71:§ΫŽέά/‘lٌLδqφΜiH’ΖŽΤƒžα£k7Υθ¨JΥάΙΎήΓt«{­}ΈXšΩ™Έ²Φa¦ΡT:Qš™ΚkSIγ$χ•G]=Μ^Ι½Άι5μξΓλ#ξ]vˆξ0M6ΚLΪκ†V+Ӵ݈™LΫ» ζZRi²Νt†Ήͺ}νΚeχ©©G©ftεeς6Σ:ΝrεΜΜΎΚfΨκΡ ΧΛ$Q{’νLw—I6SΆ»Ν€ΦLtRΣ“˜±i·ΫvβzD½$—ž΄’’6sξζ’)ΊyŒ{GσHš΄IAt‰Ω΄ν½χ7Ÿ>ύβ‹ίόύ?ϊωε>»―ηΌάχ‡oŸ__χ§ς“ο½{«Χάi±Fi;-SΩΣ§ΧΧ/ΎψκoωλχŸžξ³mξsžž_ŸŸ_βόΡηŸπ‘Μcz™³KΓFΩνΆι}ξ―Ύώζώι7ΏψΝW――χΩx½ΟΛλύριεμωӟ~ώέΉδ*a*­φJ’LΪξφεεευύ/υΏύό—τττtΞ<Ώ<ύϋίώw_χ?§ύρΆΣ+*εt³Ε5§‰fχ>ηλοώ«_ύέΏ{y½Ο.ΰ>η<κΣλύ―~ϊΗ?ώяId$DšΆ0MΡ4³FΘ*pj©™ΞDœLVξΈJf£I4U*·&I!!’Γ$\ۊ)ν”Y±² G²vZ™IΪf `Β΄­Ρh++$£Ξ^™ mviχd«V%†šV·#9m‘fΥ4’&͚–μ΄’šb;Σ΄ͺ$ ›6Fš΄:-C*Ν,„¦œXlΗ’6mL]Ι€Σ΅=¦Mu1Mn5.‰Q»Ρ²³k²$Ι.-ŒˆmSΪ)Υ$’…hfc*Dι]$Σ’’iDΣՌq’i‰0Ά™–‚ˆŽ–ΩLU¬lIΝd₯NrsI24²‘a9‘L#“ӎ$©6<ΘΖ6»“HjφI₯›XΧ1© ™" #©Υp΅Ckj*™Z»±Ή²Ξέ(ΚiΫN6©ιJ₯œΓ8©4₯iIg%:ΆI:KZێΈ2ΦΆFHͺa’΄›€™j’©H΄Zθ΄v¬h¬FIFG€έΊy[A)¦iF©4“1¨XνI›hBΪδ΄a±Ίν0­‚ŒκξD'›€Β =νVL¦TIR‘jSƒM)Α bk’ 4ΜI …Ά)KΝtF₯RG’—Ω€‚tγͺ,QI˜$&‘M\3KS¬œNŽŒΨΞΩΜLz€d]ν\έL˜6QBRm―4ES!I’«'ν#ιFIL»it;ΫΦΎžΧ―ήσΛί|ωιιhϋzŸŸžώρ‹ίόπϋίύμρƒ+ο*i“ΪΘ.vΟή?½όγ_~σρΫϋ,°Ϋ§—Χ_}υυw?»~πέ·oi¦Œ HZ=ΊέΩ§ηΧ_ξ›_ύφλ§ηW@Ϋ—ϋόώΓ·ψΟ_ώθ‡ίϋμϋoηρ ΪjͺνmɘȢηΎ?|όπίη_ύόΏόψιΈοϋ›oΎω›ŸύνώψΗ?ύ“Ÿ<ΎσΓFΩ6Ώ€³ύφωυ‹ίώξϋίωμ{ίywΝ»L’m₯IkΆD;Iι’iΟ¦©¬Ά{*›d«Υf›L%DΡ¦•BiM%©΄+5’R±“œ½Ϊ©t$εΡ€m5"—£j2m“Σn«M‘ΥΩ“;©&-MMZΆ΄¬„΄›#fL΅f§Ν^R‰€‰ΖΞ‰„”΅Ž6·Μ&—D©ΘΠ&±Ά₯ɐΆι–mRӌe;ZΩ‘MΣΪdH¬ΣΣEͺQΠT"΄ΚΚ\FgΡΪ;©€I\»qd$j•€¦MyΣ]Š­l«=YΙ•«΅Π™¦ +•κΨv$ˆ+4LΊΚμfΫI2MT‚΄tͺlGJ»•6šζ^H"Zf’FC«`*RΪ¦IBΪΆλκ΄ aZr7έτΚ€Ρ$29'Δ΄΄’F“)‘ “j—‹I ²M7²!Υ6š(ΛΆx€tΣm‰lι4:›dšΚ44:•ΐ&“ΆurJ]βjJU„ŒŠθZ½"²’k»U™JEKν₯Ψ€•ͺ”6‘ΖΔ$aw»[S₯šl„‘A[Yi2—ΩΥΆ{͐©i’Ά7#a έ#mTdΖτΌ6!€šΆ₯Žέτ’₯M5šiͺR©F#“iΣ&"Μv³₯S‰f“θŒ[Z•’Ωμ6-ΕΚ}’d“I8€.Πœt¦U+Ν” £ΥέΩNε$W:"Σ€»ΣL«•€Q i'Ž<²ͺšŠ™dmΊxlJb$MXθφεωεΓ‡ο?=Ξφλχ?Ύπ£ο½ϋμρ.I¦{w3k œ}ωψαΣΧο?³€OΟ/_ψψιιΫwίy7WwUh-ˆΜ•άΫ§η§o>~ϊψτpŸσΥϋO?|ϊξ»ο~φζΝLfmΗTtgοΎΎ>½πώoώ===_}ύϋψω/ήψτξνgn¨HΘΔͺ!έέηΧηχ?½φψψνΛ7>>?=}φξmζΪ-;@šh‹H"κHGJ¨4WqFΥ6ͺ­6DΒ¦=JΣθœ=Ρ‹P¦˜Fw2ΩrDŒΪΝ Σ] ‘&'=ν#†ΦFH'jΠΙn²­Γ‰,έͺTSŠΝ " Υ4”JLΆ»Y“ι,›Vιt½:Ηl“–Άg˜­ΩfšΆmSM›ΪZ­Ά‚ΜfΩΆRBη>s‘ŒθžV6SΘ&c‘Z©K©Ρ$%-9\‘6ΪΨδHD+ΙΈ*i§]= e)M2i.ν*vˆ¬ž%˜T»Cεt\™vT+[E£AΥεdΪš€A›D¨ͺ«½¬–¬΄iK§IT΅6"“ŽΨlΫΩΤ€Jš"€v[;Sa²••“Ιάvs’₯=uš2ΩXMϊPΝj#r•Ϊ μθ‘۞dkΪξ‰mΩ&‘$ !@t„ϋœ΄q Ω¨mš5$ζih{ΒTΧ¦MΫF›Ah[V—΄‰J’ι©UFJwΪΜH€‘h»kM›0f5ͺTκ‚c4@5m₯r$1ͺΠqL™UŒŒΗv§’Ά]m.©])f&W₯›&”©΄έ YΉ»W—WeFV–‹΅2›š6E’ŠDˆΨBebΥ’r_ι–¦MI75΄­•2Ιhšn:G!446­du·…™vf­rΙ8Ή“M£[[M’NVkiV« Igͺ£Π(=[w,MbEMμtΨΣΧ—η§§η³ΈOŸžΏ=―·wMR ‘ξ½__>~ϊτzΆΆž_οoŸŸo‰Χœ«t4:WϊΌΟOOΟΟ/[ευμǏίώΑ_ίΌϋ,I«Ÿ 8K$‹v&ŸΘ†OΨ6ySWε΅‘l“Έ„΅[»υύύύύ_ρŸωZΰοίΏρυŸυοωΏύցδ&νrΒΦΏώώϋηΟ+ΰΝΏώωχί»νΛΜ#ΔdœνvΛ‘ΜΊlQ7±XlRΗν[“ ϊ½4k,dΛΣX–4.ω’eΆŽy“ν“%l)ΧXΌmσνε³`±I›f­MΞ·ά.Σuaωφ“ύFΖ»-"“|ςf]N&]"‰icΫ·-ΩV1™―ΒlŽ˜ΨΎ[Ÿ²άvŒn*5š!§…ΘΡτ>Ibά’ε€nΆ Ω›-ζψ"b‹-*€%{{c Ή;ΎE²ΈΔ]ΎlΆ@š…e£[β8λlL‹HB:]2ωš,Χ;ο.‘b,v—ΈζΫΆNLbΆΛΘbg‰}s=Ή˜ENΒΞz"ZΫ²œ»hfϋpωlέͺŸ½’Ό€?οv#ζ³-ϊύlΝv“n—ή©LvΫz·ψϊnI’λbpηN˜Οήh.;οΝΟ%g6*mEΒκiNξ.rΛψόiΎ&s}?ώξ]ο$™›O~][ε8o!3 “]ά\λu&άVKήew1πy¬ΉI–―ςw_ΉΙΝKX1~³οϊ±±˜+IF·ΉXΤ±μFšXκ–sχύδώFΒΒ>rϋΚV[ή¬ΦήΉϋξΞΫjϋ’ιR&ωΆλmNΟ|_²N–,Iά|bέ–ΧՍγϊΥ²ή’%»$ŽΠX²fOζ’/™όςΜή]ς%†MΏxέ€Ι²lρ³Ι‡Ρ»­{ύȌ})Χΐ6ιύ|ϋ{K–dn‹·|Ή$‘ύl‘·­ξάJ“ά·-Όm˜mΛ’K\$[<φ’—Οvν·.ύΝχE$υ{φ€I4xK3l™ϋd»Ι Kj&O$²dάz:·œε«lζw—νnI2rTήM|yΩΧΕάr2TκΞ()CΆ-ηlΧ;w§YΌo[φv$[dΉͺ.ΞΊΛK|χρι²],$θu›‹ε–4ΩWΙ­·ε»“šΚ.’γ§ΙthΦ»=I,ιOυֈŸ,χσp˞κtύνοoί+ΐϋ»Ώυ›δΫχυn·ά>kkεΕΊχ έϋί|έύT­O.ωMLΧθοoηάεvΔχ«ύΫmΧ­kχϋ  ϊϋ½©δ“[ΜOR]ξΉΏoο=hΫχς–gΛΟΟE†˜ύšΦ–.ζΞΕζ2YΎΣˆΣ₯s}η7&_ˆΪ=’gΛϊ}7C\r—;3‘ΚEΨ²œ»θ?Cφ%έ°Ώχ›._r§Ι’sω>"k^Ο}Ry…·ž&Ξtίύφ,Ωl]γŸΖ’ζΛΞfΝΟΨVΙK߀o·f,Ή`Λ&Ο–Ώ‹s±eΝϊqωž{t|½›u΅ίŸί-[©{?΅]cχ5·“ΈL»Ύάz™Έδς}ϋϊ΅mΩKζŸZβ$Lε·WΙ£=vΧ/»±λ΄ g¨Λv]Rιe[ˆ΄ιu†\n—}ύή—fcξϊυΦkQ{kό“}ϊrlχVrΙϊl'ΒΌ™n’έEŒ₯ΉθY,_τll_ήύέύœ‹n#5ΙN₯»»0Hœ»Oœ,“%SΛν„oύΩη³Ψ6/―δwωη—ΖΕ=^-‘χΫθ'“·,©Ϊ»H–I–Ϋξ{f]γ‘δg±˚›ΛΦϊό|·HJeΏύα)!Αδ-ίF³f%ωnοz'Ο=χ\βŽo:ο$»ΘΟΘ³ ύ–όZoί½|Λ|»,«–ζӟ4ΉΛ‘~όnYwy‘σYy½ί^Βo/–μ»Λz΅hvΩ²JxΩ,£ίΆE’άnβϊ©Λ­_ιUb1ϋUωμΛ;Όn#ωΉc|Υe[7۝K:±|4†;rώ4ζήKuί©­²eˆ}žKξ2L–Λ]ξΆIb"KΊΪχ‰oR’AxVΥε’ŸΛoΈ\γ·Θ{™Λ}ΛIٌήΩλY~~d76ΆkΉΫ6³°ΦΝΞۚ»S›m„ΕΦξυζ»`=+DrύΦk•μέΆ±l*ά·Ήf6Λ¦ΫΚβψ~―_ΩEΊ5·&/ϋ͚ΐθͺο{’μΓΓv›u.ιŒ$’dΡMέrίeAY,½k}•AN^Ο+• ΒΩOς&κžK“ξm7‰ελf‘t—`λφΞχd l’3‰|—Ζ#Cτ²y£DοεάΙΕμέ³|‹sYf•ζR–Y½d–€·²ωq\ΟΆH¦Θr›0{ςC˜“ψќμξk:’‹6.7 έΙΔΌ™ΛYΪn ‰$GfΫέ’Μe‘.{{„³™ΞΟ9",]’ζ[“ξΤ·έ§³έL$β\`†¬Ω΅ί—·Τ²₯"‰$Y&Žœnž6όD.»Ϊ«o͊D-..c–εrwR•Ϊ²€‘W»ΦmΆ¬Ό6»›…Ψ‰Δ*ΟeΎj±ƒ<η™Lˆc]‚l[Τ~* dΙ%ωςD8]6jqaΊlί՝7™Ϋ™'K4)ΙVJl—9‰[ξέ2ΙqΜzξV˜5"ŸLNφ-fΙΒUήυϋ-„ ‘ϋYP±…μ2έΈYm3ΑνFv‘\Ύ“·˜n§rΞΐŒœΘ–=ΙlYως»[}[²Δš—0‘@³EΜj»{[ 3ΆlY Ύ~υ„Ψ[^ž/YβΖΠύ¬EΔΆΙεD²Ο6Νε.‹LJ#š¨<Οu·Ν[,Y œ‘I2ήβIχζQΩ’•» l“ά©e΅›4."Š.Ή{Iη.ΕΆr–ύ|:όoΫul1ιr –9ΉY#Ή€kΧl»ξ’£Γ²K[½dd²ŸφΆ˜£ΩΚ³Œ]yr«ŒIΓΙ dΉξ²A²³ΣΞ‚ι°Ω ΗΫ€Kf#›%δ"$›²1‘,'kΔ2ί^κq»ˆ¬Y³]Ά»&$dΛςI˜vm―Ά–Λ溌pvΕg!ψδ»F6SYvmH€—eΩ’™•™!g‰H:™$!ΛΌ~)vv릲ΜdΘ†Κš{cψNšξ˜Ψρ4rI0s%If“»%OΟδ}^χΔς%Ύ6ω’HoΨ΄IΏfφβ%D»ΝφyΉpjλΨ–\’\"[yM7„ΉψΆΰ27l6»βςσ‰θ±1‰,βb!’ά^Ά1§VωΆ™³άf"’%·€χcΝβΉLqœlΫδΊ}Ϋl!™,ΙfR5‘ΘέšαΜΖκnln»uΆ=w–νL3IΪΦ]mΫ–lI]G||GΆΧΉdAψβ;›₯,ΙjKά’ΩmΩVΖΜx|.VΆέœγΝ’}-: sfΆ4ύ.Νή2Λίe/ΘMlλΌ#„ΙD\¨σωΤfΡ“uΩςΉœe‰ΟύF²±ΡΈ,²5«}I¬έvvgžH$|_œuΊLΌ’²ˆ]„$r°ιΘ–ž»άΙΛ–!–\2G„Ψ­›%ΖV·ΰ&ι2Yˆœ$σ# JŒD°Νu͖ɁγBΆtƒ$Ι²]ΓYΆm“LfgΡlcT~sgΧM"±ιv«­uΊi2bW²mΫ–‹\r–ΨΆ šaw‰ΐe·g[g0uΧ|ςπσ;μ"Ηΰ’$A ΐδΘ>ώ]Ψ))·φ*&e[–%_³ο&vzͺk \\ΪύηοΤΒ£˜μΩ#Ε.ΙΎo—Γνn‘`φm_όου''!~ϋˆ|»O“Κ›nΙ™Μ!Ήmιy•vΎEΔΥ]₯ΙEξΎny·fN抻Œ•Ζ…Ϋώ’% ·q•iτ}Kς]˜˜ΖMΦ†Δ[Ξ²ͺz›5’E‡γ—™ήφΨ”5Iώ»mν–,ώ²/ύΉ“@βmφ-7[ΆMω*ωg*α,Ζn¨/ΒEΈεόφζŠΤf³ςδm»ύc½W —_ΌΝfL—ήai3‹oΡλb—Ζuk1±ή.ΙbΆl·ό‘ΕζI~s‘uΌ‘,mblςΧό–_Υ–}ΉLζ ºﺌœΧoβl̈$Όu’δΧ±ΫδoχΪΘΆlnΏ›ΔˆΡεκ6ϋ–EHF—ξJξ—Ϋ–ΆYξ2©4q°5s’mQwg쨘3–ν­Ή[&ζΩΩ²„δΘ†Όν6«έλ-Ψηζw=Ύ)}λsΆ}Ιy1έ΄ή}Ωw oin»•Θ3΅Ώβ{s–%–θOI6Βκ’Θά’Λ2₯Ϋt"±dίλ9—ΠιvΩt$έ™&Ή»ΈMΆLv{mDBf6k’Ι™dοgoςYΊgΉg½e_²IΪμηφΜvœqϋ^/»δς&‹ζ“Ί€,K:Ήν}™'‘…\jι̐O{ξcHrqλ7έL–έr«ν[6ΛΠdκoοHθΆν%"•YΦϋ&ΩK³€7Μl­5©mλ–εvwΛYΙd9K’%Ν_n;›-re»4›]“g&1X“Έέ•Θoc=žl6Η’·ΧΌ€Ω"Ω₯»©ΦΆθ»T.5ŒlΗ"²0>‡―χ$’hΊη.?Άn wϋ,€ΕΤ>^Θζš4–Έ˜Ϋb—žΫφ.’Ω˜$€ ©|I^y“Ω;Ά ;ΝΠ2gΩώηΆY\ε 1@,s·± ט·Œ… OΎέ™ ωbΫϊ‘%]mΩw[ClΨ.ΧΕΖ&Βmˆ%™,έ2ΩΨ—/79Ξ’U67!‰c,dΩ"δ›?ΖΙΥ’ž³%ίI8r’¬m$b&YbHΰφδ-#!Q݁H.ƒ­4Ύ5_sYK€Y#3n&=ΛΔΫ –Ϋέ]³ω¦μςBF"ΆΤ-&n[nά%%­A–\kd4– }:)­Ϊ$ΩY" Λ6’s€ί”#­X–”Fή½ΉΪe9™ ΝpΣΏά²eΫΚ“H€ΨΨΊ!ΙvŒ1r‘HwK„”ΕKY7³νFΦ›YH4”­4yqΩ²“ΈΈ™±yσ‘±e>ν}οm{†1R·ε’„M$Λ²!Ιχε³rρ“Τ’Ε‹&NΊ½ΔeΆY’"7L$»Ε~Ν¬ϋ6»dH€Ώtν\“ZκzΉλ±†Θ–uAζΏε_,s&[²K"iΊ},.‘!Ν5!Λ “$dk ±„–5i’dgo«λθ&"–ΈΕΉu­Xf±eτO^\$؎uΎ“δ–TθέΨf, ύtηΊlΨp·ΩΠmΫ–Ό%˜ƒ,s‘“9!eΘwϊ}‘‰$«νd·np6¦4’D£•ΐΕm,Έω–ŒmΪ|n/Ών6”Œ.I.ŒΜ-“fΨάΧό³γ‘MηΕοیωlΫ]ΔgI–‰€›τD²Ψyν3œΤ… vϊςϊξ‚νVi³xIΖjΰ«/kζ–°mK_Œ^ϊRΩΘ\φŸ΄n»ξΛί’Ω–-ys6ϋλΒf“άωL“\"›,/Ή¬‰‘₯ΫφΉ_ΛY²±eΉΔ-c™Κ’k2–c°μ©eΛς–Ν–,O~όcΆu;žΘ¦š‘[μζ. ¬Yo™ΝΦ·#σ8»-ιΫS 7ηv“pάθχwgoΞυœ™ΚΝωϊe‰Λ’ΰλ‘7Y·-xsϋ-ζΫ5™₯‘Τu%_,s#ΟVί8nΡ|KRI"cημκμ–$[%=γ·/l’ ζr£`·mΫ’IΊ-Ήη{Ϋ–]d±Μ²_v΅)ŸdοωΠωͺoΞmά2‘Λ:ι³m_ήf‹ΘqI“}v¦5vΣ­k’€^Ά%X\rg6Oξ&λ"ήΪ­’^Hμ2Ρ™H."M6YICnΑ,‹^³™'―鐷„‹?ΫΪέvη%UΫzΞ₯™ς%w‘™υ–&©;םƒ&ς½ΫήΦt@Lϊ}Ί\<;ά9ΩΎ9»ΝΆοόn9d»-ά«|šoχ]₯OΆ©4l‘ΉΝ˜όεΨ1™m_εŁΙ2·,IάXΘnϋY$’YΦ(·έŠΘF$’δΛώF$Y6|Ng˝Ύ5ΛΒΓ,ΙΜaiφΟβΝΦVίΣμ™Ϋ$OΎM/|nN‰$žLΊ¬φΣ•σY·ίu±I,²ε.’ΦρBLLβυϋwI/βζŠά‘d|ˍΙ5rKXš½ufΧΔαkΒ›ΕΗ¬Ϊήr·ΨΜL’­Š—8›e½₯7W‰[c±ίŠΫΨ’ΉΉfΛ’‰0“Ιr’]χμ_C–,ω¨˜TΆ«£ζ `Χ?YφŸΐ›loŽΨτΎΛƒM*ΐ¨mσq`ΛFo»Νc„@ˆY[‰Ε&Λ-oρe·…D"±dn{•ψλ4 X"‰IgŸΙΕH$!YFr"0’½±mf²œΟy_Δέβώ:d"sΙn&\šΩnΩn’©Ϋ2ι~Ϋ²α “¬±ΫnMΆ°Ϋ,—Όύnε‰,rd«uςŽYŸ 8ϋέυ<ΟΊϋύφφ”xˆνΨ±γΈIDΪ¦Q€RQ(¨ $ΞAό—p ”C$¨¨MiΣ6m’6σΰ!žϋ{ξ‹΅μ.{kG7Š$1a€ά)‰Πα@"‘`™h+IjvΆ—Ή7iš4*!†VKΛΨ‹4Ϋt’vC"bΣΆ₯‘$“Kf»ldh­Hš¦…΄© Mh‚΄Σ\iSMΪ ΄f³ΠFM“N*DO·΄"€ ™ζͺn–aFB½–€vO―M’B#šlνj˜€A"νH“D5‘AZM»±‘1Wo½w’f$štCB%1hΦ$b-i/ ΘΊΩδBeΝ6χYhˆtGgwΊFC§½ c'½GBB“dM«ͺ»­IGšΖΪΆCYŽV‘’@–S kWW0¨*ΛdȎ^6§Hgͺd2‘jƒ½l¬‘€+#TKβm2“1£V£©¬JAΣΒΥ„΄±jͺYN]DW³c„JΦ$T¦i£¦Φn£…¦ ¬K4[†‰‰&’ššΝΡ³έΣ+#BT2΄²›­4ΛƒINˆˆN“‰¨ΕdvTΫji½v³ŒiBΣ₯ €L’ͺhœWgeŠTΪLΉ•Κ-'&mΤLiμ ¦"κV«€²ΉΖ“6"ΦvΐvN―³W³Ω:I%&;Ά›4$™\\5khΊ1=€‚ΩΞΩ±B ,=zO&Χ”¦MΞΜ­ΝH)P–b›εš6iΡΪ ΅R!J\²ΊjœφŒkŒτH]9M« -‰ I;5±Z+“€ ΙΥm₯¦ιΆš+{ΝΆSc)MD$©Ξ•σ€m› `ŒζH[I:±qŒΝ:vΪΩLLNΪ΄±‰Ι­M©ΘΥ‘dRΉd’Άie«5»nrq“ΪĜ8’’&•ξ$³vEΒ$“³ˆmR³Bi)TΡ ’œvΜΊ’m»ƒΘξ΄Σ™žνΥΚ΄ͺD΅’Ι„\ šθ΄¨ΣΩν4„€N&™jM]ΝdOб¨€Ν ¦¦š6AΪΡέ(V*‰Μk"²Q΅‰˜’žng:ν>t―›‘›¬©kršJKI[bM t­$™D›‰Iβ:ΊνmΛ0ΧΉn4ͺ I ›Φ2I₯νΥ¨J›’\ΡX³Νn\9ΩΞΙ¬²£Ϊ8™Ξ\ΫiSˆΜ&r₯fdŒ¦+ΩV£cN<ΞLνΦ6I“;Ρ€Υ•™R%QΙΔ€“„ΪRM[£A3ΝjW&ΥέM§™•jΣ^ ;έiT’’6W…"Υ¨–I’@6tt΄ΝŠl4Ηfš„ΒΠ$™555Q₯cImVΚ–e4ͺΪθ6+‰ Q3STe M]uœš¦›£ΝήΗ‘•’ά7»±“l¦™t΅*’”ΘΘ$ΛΪv3Ν$g›θθ3Nn6”D€s5±χlMBaέΜiΪ4˜tτ>Σ-n£‘ζl¬+χ-γΔ¦Mh ’Π@΄D΅Ξlφ:»w[gνvm€Ά€ΆP9Νύ\g3¬š†Ινlμ<Ϊάj -I€Ά@LΡ:$ivNAS9έڐm“@Ϋ$@[$&½ΖD±ΜtΆuw΅iηξlΫm$m$ΠH@Θθd36"gcsΩ™γ€NΕQυh)«qΪJr₯=έΤμ&„”½ε2dRΡv·½Ζž¦pίH&™Ϋnβ>9χΡΫ\;ΧΆ{vΫkN3*[φžœs%I«€Ί_gχθΥmΫφΆ\zΝil²»Ϋ±Ι-i“&ngrΩNO…6k–vg{Υ,¦΄g’l›ξ¨‘H29,Ιeζšζ\sξ+›I’mνv½Ο­ΝU±±IFUΫΪM"MmJ­–ΜΩΫ¦Ηj³›nm§©&§F‡λΡΫ%Ωt›-mr27Ϋk˜\ΛΆ=2ΙHιΤΆΫjv―^Ϋ41bκνšKZΥ’ŽA›ZRι,ΫFMW…d˜Ω•J7Ϊt―TδzΨ<²“Ӱڝνζj¦Ik²§»Μ£hφhtšιI\;›Hr‰$ΙΥΣφ耕³Β6‡Θ\Ν΄g²Νιm2smœn»ΝΚφΊΊ+Ϋi―±΄;ANΞ^§;φj€šΆ©6GbΝΊll³›φJ&l˜“drm§§³θΤlnڏ¦Ρ–N{Ϋ&+ ‰ΙΓvrΉdo͜Νξ™[“imΞr—œΜ½uΖR‘dZψΥ―ύγ_ΎσOhσάkΏυ{_yιΥωΰg?ωι_ύθÏ+ TΜLΫ€ι<ι5ι!i¦τ™—ήzλϊφ  {ςπρGΎϋξ»?ϊι―~ώI·΄νγ/|ν«oόΞW>ϋθέ~χ»ρn+ΠΆ_ωέί~σλ―?ΫwρΓο}ο½' `Έ¦·ΈŽ»mQ‘™δμ9›³i΄Ν³/Ύύ₯/Ύυκσ_xφΡγτώΩ§ο束ώψηπώΓ'@Ή^ΖΏώϊ^yϊγΗσύ_ύͺI ΊχτpΫ„hK3‰4#zΫ½(ΰ™/ϋ_ώΡ‹?x΅μ>yψψƒχ~ρ“ονŸΩwΎσ³‡Ή=~λύΏϋύ―=υλΏωΣσΏώόϋΏωόΫΏχoΓόζ³~ό?ΣϋΏω“ί|ΐ3_gϊΝ?ΚΣΏώαwώδ?ΧΏΫSvΪ±Ωvd’]ΫέNͺ‡₯€Μν™7χ›ί~ερ3η½|Gϋγχή;I?Κ[o½υ­7ŸΎΞΗ?ψ³Ώψ«χ΄s=~ξΝoό“oΎμΙ/ώρ―τΞO><Šnτš[rΪ‰PΫ]I'1ΨVΫ»΅­Ži#iΊWšYifΪ«[z]•Υ+6M+νIGUyΠ‹1f6›œ±mΆ·“^7ƒ$RΆ“¬6›Κ&G£‰4h" MΕ΄έnIŒ6šI\.Υέ¦Ε‘KZνj#₯ml’[£‘ΦHGΣ­δφH9£SZJΟΙά[ΫΫΐ&‰‰Œ<šžτ$]NNν5&V[ΘNΜD[mΪtξθDDe3½2έλ˜μ¦½"ΙΚΉ'κ1§!s™{Ξ΅;Ia:jH=ΝِθΧύvΡi[i“φvΞ™μ¦GΘν$ih•H"³—N·UΉ€•Σ$‘&©†dΜΔvg%ͺΒ€'š:N€νnd:Σ^Σ½VVf:χӐ©t4β F€&‰±ΪΚ½M:Ν•™KΛќΞJf―!‰Π•Η&MλDΫ΄W¨&I‚ΤMδΪΆ½ws₯M+IbλτN‹ 3ΙΩΥM#XZ΄M’ν¦ ˆ\™ΜΩXΉ2·τ²W·*CΊ±sνφ ±"&Ή¦8Ρkο选KΪ--Ν5WΝVζ$[ i§ΧŒΞ•έΩ>š9ΛFΣΦι­’‘ΞΥI¦3kSιL»Ω“»D/«WšL·’5»β\ι=ΘUsB[…$Δ₯ΣέΆζ"Ζ½‘DiŸ$OΟLΠ©‰'"UC¦Zk§χϋvΣi#³S·k›5jΊWOn#i²ƒ6½ *Lδ΄tυ³φΦ\’Imςdj;χF:WG$“,Rj›M“Ϋͺ!€ˆ°ΧH¦Ί]’·¦« €U’$iSΩΪΆyϊ…Wήzγυ―½ρ…/>ΜηŸ~όψv]iΟύα³Οή|ν•7υΛώ㏾ϋΣOž$8ϋψ ―½ώφ—_ώγχž>ώβ'ο~tΏ&ζΊζJ’lgχʞ€Ι„‘IΫmΫtχΙ^·gžγK_|ΪέϋΓΓΓ'―½ςΦλ/πϋπ·Ώό䃇]Πφα>žύόk_zειw?}g?άσŒŒ@w?{r=σό ―ι…τύ_χγέgf7šc’”’IΩΡ΄ΪHϊΜ+ίόίϊΖ—^ϊβηž~φρuΣsξŸ~ϊ/?άwώ{Ώόθύ{hΫφ!Ο½ϊΚΛ_yn>ύi―'½PY[Ω^§se£Γ^)I›F’™$`χzκωΧΎόoύώ[hΟyψμ“σ―γkoΌψ_ώδOφδ“{δzκ΅oύ«?ϊƒίώκη>zιγύόώΟζΩίϊίφ žώΛώΤ€•Nž{ρυ·χΫΏχΉχρ„!ΐLIJU‘ltš& ‘-ν>ωμ³½³ό—^{κsσΡ»ούϊΧο~xΐφΡ³ΟΏτΦWήψκΛχ}η§ίyΓϋΓms%λΊ=~ρλ_Χ>ΗΣΌξo~υ›‡O3"ΣΜ–²΄6’Lfi‡F’išlφ SˊD6Y‘Ɋ”f©H“Άφ$ν¨½d4Χ¦š¬LDΥΥZg£“$mΣκΥH:έš$M”£š€.i* i %MY14¦F΄:m ©Ζ±ΠάΪ6΄ΩŽή3]έtχΊe«Σφ²ΩήνμŽIb΄Υ¦{%sMΪΆ­šξθFc’°I9ΥΆ[Ϋ¨«TΕ΄)^@l5)Pj™4‘šF&¦νζ>›˜€Ψ΄ΝιiΥԝœ)I£4¬f€΅$’Υ;»r(JumJHh*G‘6$Σ$9‰Zi™βN₯’MGemΣ¨” Sν.Ϋ¨‰,݈LS82 SΪ6g―$S΄$ΩΡ΄I+†‘ي&ΐmΦ$₯)E“κNΣdΓ0€2iΆ&MΜ ;m[¦Iv+­i§]©αή¦έ͘$›F―΄Ϋ»RmjΔ€θ¦;˜™\=§«f;m"¦Ϋj·νš­«T₯©eΆ"ͺŠLŠΥ’JWBdZ΅Ϋ^4Iв•:ΆtvΊ‘& 윈ΝUΛκmP)Υ\™. ΘVB›J ΥΡΙH€Ιfzv™AW·m§iX’ΖSm ‰ ΡMd7™ζ&e›mM§­”’$ιl΅­mτJdΫTLGt¬’&&ΔV[CnuL**MŠš.Μs―ΌύφΫίϊκ«o~ξκ'ούμΏψεo>ώl―ΗΟΎπς«―~ρ₯WΏώόs/>3yςwυΞύ‘@ΫsςΑΗŸ=Ωγ³ϋ§<<<Θ%€hHd05ΆΣΚP€ΆmοχσπδTΟ“OυέύΏΨOξζφθ©η^xωΥΧΏόΖ+_}ρωŸžρƒΏώεGο?@Ϋϋ“σδμΡξύ<|ά}Κ$ €ΆOžάvWgοηα“ξSM’”’JΣ΄j[ΩFZΙγίό­oΎύϊ›?yηgίϋ럽σΑύφμ ―Ύώε7^!Oχ3χ'ν£$€Oœ5έzθAΎξη]ίϊΎ=Οښ%Λ–%ΛξφΨN›jCš I₯rDN(ψ/όŽ8Hq” ‘Šc25i’τΰ‘ΫΆdΛΦdiK²·Ό₯=}ΓZοss]Ϊ9ηρΩ:guΧυtξφIΪ©Tt˜±΄j QR•ΩmΫΉΞ“ύρΩέΧΏχ/_δl]—νΕΟΎτςΏτ«_Ώ~εθ·oΌύΟίΉσp·€Φœστdw6;Χ³uwΊίνζfξgaξΫUg$ΪΞ9ΧΩ©:ηΊoΡ€CUθL‹†$i νΊ?ΉσΛwίϋΝK·Ο]Ώzαζ•Ν[χΧuY€ΆλζΒ₯+—o_έ8ύμή[?yοΣ³“ν…lΠvΞυτττ³γύ<7ζγϋg§ΦuvI€ͺtdλTcT”&₯΅„΄4Ռ2š™!ν †9­2K2¦QM:Μ  M;“ ‰Y¦™"Mf!£C „$“YEš‘юdFh[ 1Η`ΝTvΤ’m₯’Tš€Uš–€HCdΦ€˜5J©DΊ²h§h‚¨ Ρˆj[“°tš£F;ˆΔ`*ˆY³ RI Σh£FuΔh¦6mšΞ ͺΥJ›ΤΠ΄₯23k$КΝ,‘€iHHΊV™kG’DΖLuΆ3ŒfUf4’³tŒJ'M΄1cΦΠ9FΜtͺbŒ3©”‘Ά @…€2$ΑšYI#] Œ&U™Q€ ©Πš Υ™ͺ₯C²v0“&e!LBDD%mkTΘi’ͺVŠ:ͺΝ JΝY™Υˆ€š‘0j’vΔ,ΥΙ(«±ˆR€*‘2ΫΞ‘DZΥFF:SC’0c™‘&RKjh9L‰ ͺ(‚d5KšcQUJ#†A͎YMƒ`&Ibκ¬ΦμLmfB»Ξ Υd¦Ξ‘BΥ”ΡD΅t”tR΄Γ¦kfI’Ii…AΣ©Z2"QΙh”¬™£I£Ν4f›&™R‰F£‘ƒΩTt0%fšν ΝXg—M£¨Ρ$A’0š1hhι ΥNHΡ9L©™N#mΩΤd!Ί¦νX’J$3Ϋ[Ο>κηn?{ižά}ﭟΏργ7ίwσË7Ÿ|ξΕ/½ςk/ήΊυμ‹ίZοίϋΑ‡w'€ΆHΪΆν<~χΝ7κψγ;㷟|όΡΗ'›ž ΄U#cΖh³–T!H-MPͺU¬»Ηw~τ/ΎFξelΟ]ΊqϋΩΟ}ι΅Wπ+/άxϊ…―œ=|tzv|χτΜmΫΆf΄΄mAΫΆ h[•Žv₯m4VλθLΦ€IΪ!ηŸ}ρφσλG?yύ‡?ό³Χ?ώέn{ρΖ³/ΌόΉΟoύΝχw[ ΄-˜m‘:«3Iš1“ΡšΡuv,Iu’QUP ϋGgόϋό?“_<ڝmΞ]yϊ΅θΏψo67ώΞΧ/]ς7_}κ_ΏχΑΩqΊξNήΑΏώώ•ΉύψέΎω«ί}J Z­€R-€€ƒ9¦™ΞA†Ž₯©…T€$#–½ώΡ£g―=uεκk—―|pχ“Ž$h{pιΚυ«—oμNοξ—ΏΈσ(·eσμδώGωηγΦάτζ;χφ'½0hΜdΥJT Y 1J4f«‰©ΙŒY h›6΅4RΡ”B΅Jʘ’Τ i ™Mێj32“Y©LEBˆ¦LKƚJ+™ΕH:‘ΙJ:—θ:$©Q:«•YKH4­eL3C%Ζ"±΄m»0‚Ξ)F°ΆM'*2HL)•Tfu˜]‘D#3)©N΅XCi1v’Ra¨€F’!’ΛΎ­&Ϊ€•Z’J‡¦–€©‘s‘‘NfšsY›4&+SΣu“TL!#3%ΦVmi₯#ͺ )™0—κΤY MΧ4I©Ζh•΄ˆŽΜ6fLΪ ‘j’S«I$i02΅k#‘ŠdΆH M»Hڌ6-Υ6ΪΩ€€"’4U$1SΕΤΆ–t‘@JKb45ΐŠΘͺ­E ,™Q#cF'%E%5š¨hΤf$14Υ΄ƒ΄mƒE΅j4K24©ΜJk3m#"Ν(ͺ“ͺfΨC"˜)SR2€¦M†tuΆ­€&£F#ιΝ Α”Y•4!Γ`MI f¬m:SAE’Μ‘±.mDIjVš0T;H‡IU:§ŽŽθ:jΜJ[fšΡ΄HΠ9“6‘E)’Ή7’ ‰¦s&Σ *“IAΫΜfZ:ͺmh΄•D‡’$#³ΚJMZ]IJ" ³C¦4€•Ξ’‘˜ΐŒYI—a&™I%cj£«dΒfj€PI €VΟ=ρΚ 7n_:pν·ώϊί}οίώόƒ\>:w>Ιρ½ήπ7οά}tΌύΞί~ρθκ‹/靏οœ“ƒ£s‡›±ξNONξ °lΆη6ݟ<~pF–νφΰp»9XΖΠΞu·;;>έο§²mύyvr|μπόαf3ζ~_Ζv“Ήž=xp²“‚,›ναα₯νθzϊι§στΑσW?{Γn{t~sξάH`lŽ6#tϋ³ΣΣγ³ΉZŽΞžΫnΖܝŸ<άŒνΉ£σΫΝ2w§§'Ο ΖφάΡΉνf3w§§'Ο ΠΆ³m 3iR!Ɛ€Ά’±\Ό~σ™KG뢝gg§Όσ‹_xowι‰?yιθΪ3OΏψρ½ί}ϊψΞ.IdlΆΫsΫΝΑ2ΪΉίοΞNώχ­;Έ9θώμμταΙ:ΑrxώΒαfkzvϊθd`9<ρpsέιΙΩγ]6ηΞ]ήfwόψΤfsp°]λώτδδρYK”’ ²Ωl·›Γƒe“DΧuΏ;;;Ω­» ΛΑφh»ΩnΖ’θάοw''§'+€Œe³έm—M2ΧέΑΙζp ΤΨm7ΛΪ9χ»³“ΣέΩT¦SΪH«B*ΜJ£Ι@eφθϊε±,>ϋθέ;Ώ½χ`\:Ό°yτΩGώ―~ώWcsύΦη/Ϋ$$Λζθπΰπ`Ω$:ΧuvΆ;Ω­kŒΝζπΰΰπ`YF’sݟœœœμ;«YΆΫKη6Λ<»hn–1w'§gΞ¦,ΫνΉνζ`I΄sξvg''§V Ȳٝ»rιv³ΔܟŸξΟΦ‘Œ k,ζhuΜΜHKH–Ν…«ΧΝ3Ωϊή/ήxγ‡όν―Ώ66W._Yl/]»|ωό2έδήg{ppρϊ΅K—Ξ/σψΡύ{χσήΟ쟽ώ—c·žœ―Λc98›OCΫΩΝυk—ŸΈzαh“OύΖΗ—·ΛφθάΡφθ`YζΊίŸžœο[šΝααφβΡ°ί=x<·ηΆ‡#ϋ³Η§gΗϋ,›ƒσGΫe sξwg'gϋγΩατή―~τόταωΓΝζό•qΈΙΨlΟl—1BηΊξΟΞΎΟζ6‡ŽΆηΖΊΫνŽηζόΡΑfΠΉξw§§»“έœΐΨl6#ΡΉξw»³γΣu_DΣIMf3JAJ“‘3Z%mš9L•"BЌ‘*Νl2T£Q`F#ӌ&‘#‚)S½Œ!#£²ͺ1Σ±$F ³VFE΄QTΣΘhΞ$PŠb4“&ιΤ1gZB*5ͺΥJ£‰ŽQ)•ŠΆRKƒ6΅TŠtŒl$3K΅sΆ3#Ίκ0d$"Mb΄ͺYΕ~H“UfŽ)S*A“tŽI"mfZDJC*IƒH ζ°ΆT›ΜZLm*‘c1›9%5&5Gƒ"њi#Γ&ΓHg:«©Tb.£1‚iύ¦cΦ ŠT ΄†ͺŠHΗBJ«l B3Ijj)iG‘™BD #ΛΪiJ“4£15MPM$#:I¦&³Γ0–Œ²νĐ ³’2Ai€”$³•#:˜‘„©!2₯“hͺm4™Ieiθ˜Md$mk4€ΥΠJ'K3Œ$©fši΄™:YΖHΓIDCΝ¦ŽZ3ŠvLc&!“v”€)k"TF©1B4™© viU+’Φθͺ‘$FιœIΘ”Άi…hΪ£ššaH²Hζ^©DjK"¦뎎U›’’€Ά£ŠDT΅ R"Ρ„HCZQJ5‘Œ*‘‘Μ0Χ!’iT₯#MšŒ ’ΖL›5±dΘ’uΜ¬t`$ÜM¦Ά’I˜"Δl‡ "b¦`κF&6ϋΉΜŒθ’©,cŽŒ1Ϊ‰Μ9:€Ά²½ύΜ³·—Σ;oβΏ|ώ•ηo_Ό8–sΞK''χ?»χW?|λ΅ηο™εΖσO_ψλϋGΟΏϊΚΧ_ΎyαΑGoώδGίλDΆΛ…gΏπΕoΎφΤ§ŸόκGώ½³η―Ύψωη^}φΦSWŽŽ²ξέπΓwπϊνOηΈψδσ_ύΚ+_=πƒŸύθ‡_όζK7o=ώΝ'λtώΕΫy|χέο}'ο­c/­ν•Ϋ_|υ•?~ι0Ÿώκψή/oΌφ?zυΙ—Άwυ‹7ε>ώ”ΚΡ΅§Ύτω§ΏψδΥ[ΆKwοςώΫoΰWŸ|²»τΉΧ^ϋΖo]yπΡ›?ωΡύΦ šσ/Ύϊ•o|ቛΗΎωϊί|ο—Η₯9zφ΅―ΑK7o>|ουŸύτ_ΌsšŒddΜdΧUΧhFƚ,ΓP΅Μ±Ω@²9<lXΎτϋόκ­g|τϊΟ~ω.Ο|σoύ—Ο/wώζ‡o'žxβζsW·έ=όμξΫΏψωΏ{볇Θ8Όzϋι—?χτΛ·―ά8Ϊ,sΰώ'οΏϋξOήϋδΞƒ³}1ΖζθΖσ/ώή O<{ύΒ•Γ8;Ύϋ›ώΖ/τρι*ƒ‹Χžxι₯~οΉk7Ξe|Χοάsω`‘J+.>χ…Οωω[O_9:Κ<;ΎΫ;w~ςϊ[Ώz8w @"ƒ΄SΪ$šFG₯cŸ$m;»vέUλόΝ§o?uφԘgc3ηΌvσΩŒ1ˆe{ώΪ_~ε…Wn_Ή|46ϋΣϋχ~σ«w~ύ³wξή=#I<ρΜΛ/<υωΫWŸίl»{p“7φƏο<ότdv{εΙ?Ÿ}λΦ•‡ο~ο{τΔ«Ο}α©ΛGήιoώι['ΛΉ«/ΎόWž»ρδεΓνXΟέν‡ΌρΛw~τΡ @Ά—n½πκ ΟΌςΜ“—ΞmΦ‡w?xϋ‡―πΦ'Ǐ§$€Œ‘XH+3ι6ΛYΧΑڌŒ@6›Νζ@eYΖφπ ₯s>ϊμαn?Ÿώξίoδο}ϋΚύΏώ·τϊGτγyλώήχ'υ·n>ψ›?ύgθω'Ώ><χΝώόΎωκΝίώε?ώ_·ύ?Ό™2Ž<ω₯oύ§ωίύΓ/>w}3οτ˟ήΩέΈtγΒ“―}χο|χΫτε瞺΄ιΙ½ή{ύΟΎώσ}τρƒ}*«Μ،€£tŒ5M»ΣΕΊ˜Iξ½χΑ/έzξΦ­K—=yλπoޞs\yβϊ₯›—7λƒΟ>{ο½;Λ•νζθΪS/ΌφΣ/=qωΪΡ’έρgΏϋψoώκΗw?:›σπϊ ―~ώ?~νζwοΟ½ψυ—ΎpσΒς»_ύελοόθξrσ™ώΰ‹Ο<{εάαΨοŽ|όα―Ώ}獏NΆΧžωΑwΏ}#ήαΏωιϋ?ύΝΛφόg^ψΖηŸ|ξΪ… ™»γOχΫwή}ο―ίΎϋΩΞdΉωΉoε…―]~tηΞ‡?z|σΫ/?qγό’ύγOϋΑλoώϊgΏώ쳕Œqξϊ+_zρ•g?qα`3w|ϊΑ―ίώρ}|fŒŒ%ΩΜΉ¬Ίf$:$sKη:ΪΜ՜dcΞn±fM–Ρ₯-2F2μηH›2#›Ž³ΝYYu€Μ)³c!m;χΡ1ΗμX£ΛΨ$cLΊNϋ5£€fZ“}Ο’Œ$Iš93ΣΖ2dLƒtj­fΪe†›‘.³ϋ‘UCΒbXΝΑHšVg[s£ƒ$€m§h₯Λ`5’2±L£ΤΪ m»Μu4YhG’±d¬£;™mΦN5²PsNϋdΩw‘‘”vΥ1d޲ο΄ξΣ&IGΖ°„Κ:Θ5+id5ζΊn5S¦`³tdέk2d™ƒM»φ##s˜j•΅Ζ4Θ²ιœΙœΙ.6Ζ’&ΥΆΚ²n²v©ƒŒiԐŒΞ™6†hi³Œf¦sν*Mfζ˜MbŒŒ͘Νά§ν³.I3Χ‘ΕbME’d™ν:s0kΜd Z+ccmVI:΄νΎK$ff'됬c—±.]–e™Φu¦ΣœlFχ³£MΫ΅λ]2’D:RsVš‘ΜqЊVχͺϋ4CΪ1njŒf[Sf³$Ρa¨ΚάkZ-{λF’6sZJ̀ɘslΜ&₯cRd4λ:»΄9R–ΞŽtΙb4φλlΛ4G–‘vm3ΛΪeŒ³•Ž sμ­sξ‡’‹%šΙΜL¦Ά•0*$kΣ6sfΜύ›}ΗRΛh†Ωu—,ΩLƒκΪξF ¬ΓΊ»ŽL£ΙH–t¬s]2;²"IΖΘ¬Z5ζƒMΖڝ6Q©QbΚ~cd€SXdŒΤήμ\©$†¦ζ2Ζ²s¦Ζ\ǜk²ΟXΧυ@€3"›.Υ&ΙHι s?³9θ˜E†tμͺ:RνΪQ £λ`Y³7ζ0F–,˜iη>s&#-5—Y«uj$’ΔHuΆ­!ΝRŒ&έ§k’H₯tΨtιfŠMW›μ;Ž!Ι°t]ΟΖ2Y™`V»άΈvωh³gώξ³GΏ;ΎxώβΕƒƒ­Λθ87ζq{|—χ½}γΖνK7―l·ϋ‡gΛrΆ½rωΉ‹Gˑ퍫G—ΧϋŸΝn\Ύzi{αθρ½χξΌή‡s½2–% sΜύfν˜#›t˜λ~n¬#MR]ΧΉ«IF$Ι²Ωd€™gχξώϊρΌu~{ωβ…KGΫ1ηφςΝ—Ώφϋί}ξΒΡΑΨ?z°Λrpώ‰η^ΌzλΪν υΟφιοόζω›—ž}κΰβ₯ƒστQΧyώκΥσŽ6›£K—nζμΑ<\ηα•«ŽΆ›‡Ώώΰ£χίΎ{ϊμΕ“²<ωΚWoΡύnξχϋΝαε·Ώτ΅ƒωψί‡Χ‡g¨*GOΏό₯o½όδηn­ϋ“GχO6^{β‹WάΈςσόόύŸύfΏΉpωΉ/ν?ιβΑ°ί―k²½τδ³—.nζŸώΥΟ.ΗλΈτΤ³_~υsίxφβ6'χΟ Ο½|ωΰ`ΩŠf,GΟ|ετωΛΧw|zχll/\yφΕΓΛϋ?όλGŸν*J‡.©ŒV;C" YF—t ϋuοέ;»/nΉϊάWΦεgŸπγ·ο|ςήǟήΫ–$θζό§ŸφΧΏπΕKΓι§ΏϋΝΩztρυg…›—~ρ/πρ½Ή<ωΪWΏσςΝg―l—9ΧužmŽ\κkίZΖψρ?ΈηtχψtΏΛrωΖ7Ύυά΅+ΫΓνΨ}zά³ΗΫ 7_ωΦΧΎϋμων2vǏν²Ω^yξs—.¬ŸώφŸ)<ύόž|v]ΫύΙΩzxξβ/ΌςνυΜΟ?ψΩέ΅5vζι>g–­ŽF–ΉμΖ*Ζ™f­™FΐφζΏπΚWΏσ؝<όεΏω³7vΈξηœf»›»³Ξ-v»u7;ΝΞύܝΝu³ξηlY»ξ»Z¨qν _Oώ«ΏχΏσΜ…ΝXέ}λ/}γφΡΡfσ>₯ν•―ύέψώπo}ώΚrόπ“;·—Ÿzυ;θΑΗwΎχgξή€ŽΆνŒΔ¨ΕlG7ν†9£)@’Νfsq~ς«ί<~ώφόε ·n_ΏυφGΏ΅νœΛΝO\9γΰμ³ϋχήzηsΟ_{εόεΫO_ΪμήΏϋρΩά^zβΙηΰΖΥ«ρώΧί?έŸξχΛΑΕλ·ΎύνnέΨn—q²>\³άxϊ₯ο~γΕ§}φ›Ξ]Ήψδ‹Ο›g>yλ­Σ³G;³rφpžΟul.ίxϊ ―ύΙ—―_Ψ §ΗΫkO=wεΚ•gώτ{ωρ'»υαρξt7— 7ž~ιΪΜ“GΗΫsGη=υβο§λγχ½ΥΑίƒ―~σφΉ+=~πιƒΗ=8ωΦηΏxΰήGϊ‘G{ ³³ζ’ΉŒυtnΖΎ#:H–aι\Οƈ9ΩΛC7#³™³ΖnQI†¬YχsίlΉ.šŽuέΜ±™c™΅ ]’aΜu,“9Η f-s΄ΙΡ=smfcΣeΤj‘€ΤX±Ίf­Ρ±t$λ:ΧΙh,±${FΗf˜IcΞX‰hŒ)S͊°h€–)tŒŽΩe¦Ι\F:ιά¨΄t]–ΙH‡9Z³ϋμw]Œ¨Ρ K›α ƒ΄έΟ5c—‘„.K³mΗΒ’:ΧedfΩMC#ΛΑΪt{sŒ&ΆΪ¬;Γ:23fνlΘ°Ž¬ΝfsΞ¬S$ιΊ.έtY`vΜ1ΖΪΡΉhX“}lΦΉιΑA»Ω¬sνΎ$’‘vλš™‘M4*Ζ²f]ζΥcdέμ7FΝ™ŠeΖm;ΧΡ%›aNkι’΄§g»Ν’MdΜύΜΪeμ׌414³cΔΊgoΠ͈dNΝ:vKe€Φ}ΦΥΠ$σliΧύfŽΕH²¦λN3ΊΔhΪ1²¬3‘aΆc&Θ²Žμgζ‰uΜ,££ΊΪοΗ2¦tΞLι"Φμ*΅iF:­λ\ǜIlb•Θ")ζ3±'52m6­&BR$Λhf:Α2£™6²Ϋ;θή(4¦Εœctι4»κZ³K†‘2HD–ŽN΅_§ξΗ’ ‘aΡa?³!:t&­μ³ι:m:X*sΆΝšΩΚ~•΍u 2Φ©FΜa&™#s¬»έΒIiŒΉ9Θ:—id!«ΉJχ›š±O–΅Cm6:*Ωνφ$Œ$’iξ:—ŽΜt"™!§Λ:ΧΔι˜‘₯cΝl:Λ’•9g:’QλΤΉ$•uνμ~Y–mΦӘV#νF–1³κ:†¬Υ½±κ’iŒ9G33vKšŒ1t·ξΦ,jiΗάt0G’Ζό ΒΣ&MΣΔ<¬;ΧύΌo.•YϋΦΥΥλLΟ " ˆ2I‰€f„‚vΨaK@ΏΗΐvHβΣ’IΨAHΔh f0k/ΥΥ[Ue­Y[f>χ₯sj=KΖθ¦I3ed;Ν 2mη˜#cΜαŒy&―;7s³tC«Md:³‘Œ$ιiΚ₯Y4YΧ9'£±ΔHΞ:Ά#)•΅Γ«9GΨdˆ,•TšL«žmlNgΧ•)€LΫ½ύνρςΕιYΟv6›mΖ$ƒνšœ>|ΆΊ6rξάώ²}qξ­Ώyσ`κεwχξόΝιXηΈpωΒεση6ΟΎxrχ“;σΰζ·ΎυνkηŸβ?ώ»ο‡Ÿ~q4o|πOώΩω;Χήϋφ{ž½xΈφl›νώΕKΫΗώθΧ_>zvτٝγ“σοχ½―ύΑ•sο½yα_½\»ξ^ΈpxλόΙΙ“―~ω³{§λL-mη:gΖζςoώζ{o]>wzηΟώψίψ=}Ή{εκϋίϋΏό³ί»ώώ;_»sτύGΏzx£W·ϋ—oδ«»Σξ₯σΧφwΞGvφφ.]xcsχρš^Έpmoηπυύ{ŽΎzΖ՚:I“f&νΪ9›dfΜ.SI0Ζ²έd{ϊψeOW‡;cwc»»ώΦ;ΏχφΑΉΝρ/ΝΏϊγΏΊϋΙ“qώ­oοχξ?ωφΕ·ΏύΝoώύtώ“›Oo_ά;Έpνΰμ“η;λΑΥΫ·η7’ƒƒύ 7ΟύϊΡΞΩζΒΝσ›s›'ŽŽŽŽ^ΜCgFŸόό?ό»χ£>|²χΖ7~ηΏψ/;‡—Ύvλ܏ξŸ8)΄–koσν«·/ΜΗύ―?ψ³ψo?~y:.|πŸύWτwίΌqϋ­―={ώπΡέO_ΎψτΛ'Ο―>ώՏΎησΩg'ίξούγΏΫο^φΫ»ΏϊωιρΈςζ›7ήγάxώω―ώςOωŸ~ςΜξ₯ώΰ½ίψ­·Ο‘–ζς;·Ομ―_|ψ‡ΏϊΙΡΨ=ΈpγΦε Η_=ΙεΉμŒ€Κμ˜1ΝΐˆP­ΦJ₯’$³λρώμό­οά:wpρϊ»η.έxλυo??ΎΥη?ϋπΛ/žŸ½ZΗώ₯+oΏ{ϋληNηη?όοşψd}ΉΉφνίύ;ΩίώΖoάώΞ•Οδ«³Οο?}tυτΥ/~ω£β'χνίϊΓι}σΰΪ»o|ςθψξγΉΞ9-c\Ό~ιυƒ~όΙ½'ΎxpΊ=Φ»ί»}no9ώωύαύψξ§ΟΊwε7o]Ύή―>~ž³+…œω³―ρovτl\ύξώŸύWίΌpιϊ₯kŸάϊΜ’±nΊŒ1FSMFΫΆbΠΦΆm @ΫV Ά‡»οώƒΫχ―¦Ή9wxιΚΝ«ηŽ~τoψϊώ坯ΆλU­Ξvm‹9g[0gΧΩvNΠvΆ³Pν<χ΅―ΖχώΣοέΪ?}pηOώ_χρ―>Ύ\ϊΝτOι?ψGί»Υyξύο|γφΛ>ωΣ?όWκψχ_œξ\zχΫοŸύυOο>{ΆΆcmf­UZ΄m[ɘ#•Ξ1ΧΡ dŒ±»»ήύόΑ·/γύ η.έxϋΒέ{O7s.Χ―_ΎrΈΏσϊήΣ‡ŸϊψΒΈω΅ίϋ֍›ηηύΏψΧϊƒŸύψήΙrώΖ;Ώσ―ΰφϋ_ΏωρΡλ{Χuvf»ΩœΏqιΥ½Ÿύψγ£η|ϊ`sγΪ—om^ž}ϊƒΗηwOƒ«o\Ϊ쏳£ΫΉ?Ο&θΪΉγπΚΥ7~ηƒΛη7g~τ‡οχαǏ^/Χ>ψνίύΫψ?Ή}ννoώ'Ÿί7wϞ­s5ΦΣ'Ÿ}ψƒ?ϋ~Σ‡λ…―οώΙώΫοΎwρΒ΅+‡ξ<~|xυ½k{‡›{?ϊύρΏρ»λώΉσηίΎΉχκαρσν…ŽMΖ@f¬i‘!Ι¨hΖLXυl±œ­&‘M3dιΖLGh³–΅]g›udj՘‰tTͺ«™t3Φ‘š1[£ΝŒYλ\2¦Tj3Y™έԘΣh†d-3YkΝ νT”31’SifJ+³)λθ0—©5™4m›€mJK2dΜ€Ι”1št4sR]š•i“h¦ SfΗ:“Ω¨ΞL΅PH«Zζ4Ζh2dSΡj•Œ)3Ν`vΆ’&k†t΄΅vtXζ I•†h$ ©ΐ6Φ₯kΪ΅)c¦:ͺ΅κ4d„ 1η°Ξ,mͺ†±ImζΒhf˜-5ΜΩJi%aΆ]#‘iv†d¬ f¦Š K²΄S’¦kH—³3d‘4£LMΊ€Y%sv­™i$f׌™4’£23W]2³ι04ڦ먈³†,i0f–uŽ0³QιHβ΄Ι:²ŠfY:‹΄f5F’£M3“fΕΜ(lΦqΆL™•ιΜ ŒˆΠšE"Kc6:²Fš1ζHζΤ)ι2©œu‰ΦL›Š,ΥΜfŽ«1Λ4)B4ΪjΧ,à Ɍ6f΅’L™kͺ¬gΝԘYf:ژ RcŽ˜Π":Β’daŒž-sΚ͘iZΜ¬JŒ$‰˜:‡Yڊ,!3Ί1Ξ†0ΥΤθh;™Υ!‚Ωv&£Ξ:U1‡*CX$H3fΪgk»ŠlšE†Ρ”€#Υdmœ­f§ͺ¬ci‚Q¦95ζ²9€%]Øι:3›±˜d4™MgFm4fS!³ΞΦ1Φ€ΝhmΫ¦”6ΙH™3•‘ƒ•usšu€Ν‘4š¦mG cm+ΝR”9 aNYlΆH²lΖX²`ΙfπδΑύϋ>yυέsΧή½ΎωΩη]sώζ•Γ+η=ϋκθΞέ£W»ίxοΛφ<ύθξƒ'/^οξdημτψ'Ÿ>ϋ­οΎqyοp7GmΡ“³ηύΫωGωπμ%λΊ™}Ά{χΡί½~ώΒ[7―ό“γ“5ηΟ_½tώςϊμιύO~ρtgξn$…2›Γ+_»ΉnyςρgχŸ9w~»Œ—Οόκa_»ώΖενξΡ££ΗΟδΖΑΑε›λέγύ+W.ξ<>:9[Ηαώε›—ότž‹W.œίέzόπργ'm3FΖ¬©“©:Fd¦@ c»ΝœZ ΅»»ύζ΅+ΫφήOϊW_~ώτμΕάΎΈχΰ'?ϋτ[_ν―ήϊϊ½ΏΎδήγ—χ^ήx{χΰΪΕ1ŸΝέ+Wξ.Οο?x΅Ωμ~圣φΚ•›ϋΛώΛ»Ÿ<;z΅q>-8ϊΕΟύٝG'ΗgωςΡ£_ά;ω­σΛω ;ηΖ\Zθ\.ίΈ~ύόώήΛ;ΏΎσαOο>{ΡέυμυOκWΌsωπKΧ/_ΈuΑ‡_<{φσο󏞝>}|τόμx=Ύ{οα/ο―。½~q»ν«εόΕλ―n_<ύμΞO~όΙΓ³eM?ϋβΡ—Ο^}½ηΠ™³³±έŽd잿tαβ…½g/_½ϊό«£'c»œ#I©Ni•Δ¨Μa¦$Ω,Φ‡ώαΙΗ_{λ7ήΏύξK/ξ_8<ΌxώπΚωέώτξ‡GγόαΕ[Χ6gξώϊ—η6ηφ·žφθμύλ‡o]ٌ/^=Ήσ“?ϊόεςκΙ“γ—Η'›GχώβΣ“χΎ±½xΈs°1ΦΩ*]Ϟώμυχοά}Ί6o~πΝΧ,gσ‹Ώώ«_|ώΕρϊͺ›ΧOŸΌ>yy΄9;Y+pφψήέ;ΏψΥWG―—ΧΛ‹~τπή=Έt°°»μΟ³gΛP2”j΅Š±ŽfFьͺΆ-`ΩY.ΌϋΫΏϋV16Ϋνf;Ξ^=||ιςΕ‹‡{Λ) ……’Π* -Z€²ϋΖ[oΏϋΞΧ_?Ώϋ7όω“Ÿ|τόιi>]ύΑwλoΞ΅=₯³ΩΩέί.Λvΰΰpg=ώτξΓ/=ύb>=έ^»}ύΰςΑςκΑΡύ/Ύz΄ΉppσΦ{ηwχ_~όωύGO–ƒΓf<ψμήέ“·ίΏrωΪώ§ϋY[jž<τΧΛΏόσΟοΏNuΉuλΚXΆcιή…Χ/ή»ςΙ«—_eoΫΝώBΆsfάΕ«Wί9˜^έω«Ώψπ‹''Οη²ύβγ»o~νφί½|α½ΫηπΕΡƒ–Z_<;Ίϋ“ΏϊτήI^ΟWσΩγίxοχήΪέ;·{°žάkv6²μ^ΊpαβΑςx}ϊόΕΗ_œl7{›ν H-c£•#3‘΄sΤbY)²&MbhΝΜhPΜΓ‘H₯hDieΖθH[”–3CkΘ0“fh˜§CΝtΚ\RX+©tPm+±$‰©ΥJP₯!)₯Υ*Y•†4&$sF₯M;šiˆΆΡ0d”$‚9ΜΡN&HŠ ΪREt RI#ͺM“,JΝ #3 ÜT[³’d fLdΆJΪZbŽF 0*Ϊ9i*Hev@g& Τ#€“΄«jR4)¦‘Νμ¬16JΫZζ¬Dƒ £c†–¦³νX4‚4š•IΫ@“€IP€kGš‘Mh£T”j5Ϊd͈tΦ:ƒΜš3i€U•Μ…`dŒQ:΅ ‚L$K 4TKM̘νP€ΪY΅© ƒ93Ε@5³#c¦"©…D£”‚Ω%%…Ά€i:DͺU -¨¦ΣΤ ¦“A’΄`Vf“1–ijŠ6ŠjTC"™’Uf«m“$R$H«ΜL-‚1Π¦Ϊ©*34£¦JIΫ©ΖX6Ν„I›v³š²i ICf::ӁVΓ؈Š„ΕΪV“¦4΄€!h’‘@Ϋ”ΩDffRm€:uΖ$ Œ$3ΓXb5'€”™1­"*Πj’ŒMK“Zm“I‘Ν`¦³ƒΡΘMΗ"ΙΘH ν<}υzΞΙΞξvgggΌx Ωl7»‡{Χ/OησυΓG?ΉςΑ‡·n_>όόΙΙαε[Ο]Ι“άy˜\Ήtσβf3Ί½τΞ·ΏwύΝΉ“d5rqَegoٌjαμΥzτΡOξ>~8νξmv6''―|~ο«ίΎ|ϋ[o]όδσW›ΓΛ]Ϊοσ£{wξ<‡Ζ&ΪŽΡƒσΧw²› ·Ώυ½ _ϋφΨi5›K»–qξ`wμ―O?yςΩρ›ίΩΉψΖ•Ρηηo]=w0ξάyqμΪο~γπληςεrγΚαώΞ|ϊΩΓGŸŸnΞo€ŠJf†$2H³6Υ@’±Ω;·›e˜§§'§²έΏ|qo™§―ξί9Zw·—.^Ϋl_ŒWΗO>;ΞϋWχ^ά;Ώ}vΡΣ{NΎ~qοςεΓνυڍK›η_ύϊ“Χ—ίωΰπβ• =~}ύΚωΝzο«GΗ/³;2―žΌ˜;;_]6cϋόεΛ²M @­sοΕƒ½e=zψμψΕ‹½kWtφρρ㯎OΟΆΧφv/žΫ,}=NžφδΕvχpοΚώώ;ϋΫη/ΟΒΞv,λΊ{n`ogχμαΛgξΏά»xσŽΧΝ¬³ZΠ³³“Η~vό―ŸΏψΞw~w7<½θ郇ξ―–M€@Z3 h̘£K›RH²Ωl.μ½~ψΥέΏ|ztχγ__Ώ|ρζΝ[οΌώ7o^Ύ½;ž?8zπ’»{»—΍¬{·Ύσχ.οwlΪ9w/^;\ΖXΞ-#6gΗGΗ/ΗΨξœ?m³΅³τωιμv»Ι&M«θœΟΏψυ/?{8ŸtoΩ9Έ°wρΚ…½e}uόΥΗζήΞ₯ΛW·;s]Χ“±»Ωl0_Ώ>YOsxώΖ…ΛΓΧ―Χ9»ct™«Q Κ”bΞ&Z&2hΣIΪΆ Ξ^žήϋρό―~|τz=Λvχπκ­―σ[ίΈύ΅ίύ‡Η―ώΕ> ΝαΥ+W―^|>wΞννŒΣ³³Χ'''΅m_}ϊΡέϋί½ώΦ΅Ύϋχώλζκo}τιgŸάύθWOZ@iZŌ™Ž4¦]iΝ*d,Λξϊ䫣ǟ>ΊρΖ₯ƒ«o\Ώφσ;χ―άxλβή…εΕΡΡΡg_―{W/^8·dΜƒ›_ξοΏ±Ξe;η˜φΟ/Λώώvξd₯¬§'O?ϋε/ξ>έ9Yφ·Ϋν|~ztτδΑΧn_Ήόώίώϋηή~ψμήγ'χ>yτόυ‹ΉAj»ΩΩ=8{ψςφ•ƒoΎ±σ€Χ―\;άέ>ϋταƒ{_žλf»»HΊμ?<Ώ]ΞΪ“/?ϊuη½ϋO_ΎξΆ°}y|vxύ`χ\6ευΙΛΗώζΑ7nΏyυνϋ?~Άwεᡃ³γ/~|χΩά½9F€Ι:6»€ΛφάΕσ}½'¨zπλ_=˜O<;9{ώτΩ³Οœώφν½λΧw]»uqgϋμήgŸέίί|π­7/]Ώva3o]ήΩ_ŽΏ|tόπΩΛ­6iBF«1"TΪ€²Œƒ‹7΍₯―ŽŸΏ>]Ά;ہΣW―ΗήΉνf/cdΨn½8ν6;›ωμΡγŸ]ί9Έzυςζε;ΧχwNύεgwŽΗή{Wo_Ύϊζξρλv7gξ?|ςόδls°c³»nχlœe۝%:'Teνfg;–‘yzVٜ;»»m§“ΥιjY²Y²,Λαε«·ή~ηβΑώΑξΞf³l―άΌ4Xiηά,Ϋe,ΞNηΊ{vvw[;s»,I@Φyφπ§?όλk'·Ώρζ•k·?xγ­ΣO}ρΥ—Ώόυ§=YO šVFHH‰hH$cμ;ΈΊ,/Ÿίωψ«_ύΪώΕ7?8κώρΫ{o^ΪμΞŒν.Ϋƒ«—·™ κd>ΎϋΛ―žΎΈtZφ/^ΉyλβαώαήξξΞvΩξν__vT«³}ύμυ²wαβωμOϋ۝½Ρφυ«ΣμΫvol6˜sjgE@–νΞΞΉƒέέiμmΖHh;kΠQ™ˆ€Ί.ΖθL1ΣΞ^œ|ωƒρόηΏz~φ:Ϋέ 7ίύ[πΧμΏύύίψ;—Η―~ψ³/Ϊ±Ώ»Ώ·»λμιϊβψYχ²Ω–e €δυ'ρ§tεtύή_Ώυ›οέοώΑ³/?ϊυΟ~πgζO~tχσ''g€B’E₯F$B€“΄J…ΙξφμθθΡέ/ŸώΞυσϋWo½{x7o^»²Ώ³}yηώΡƒΟŸ-Λεν²Ωμμ\Έ8N+΄Ξž}ό«§}ςε“―Φνa1gOŸ½ή\ΊΆ·μ―?ϋθWΊwφέ·άxλƒ[oŸ?{ϊΥW_|ψι—Ώόβω±h%cY6ζ‰W―ζώωMφΗ²c΅]N_Ÿ±;ˎuQ`,›έƒΓέ½vξmΗf Τ<™OΎψσόlωΰζϋ7.Ώωυo――ž>:ϊμσ/~ςα_½΄"!JG;):TΣiT;kš’6£’‘‘fΧΆ‚hmΪHBS’†ΡΩ€i΄F‘©!³³₯’1΅Z£Α0Ρ‘cφ,f¦f4‘΄΄2»$­ΘLd†¨˜ͺEIPBΥ@"mζXΖ)…Ž™fJG¦ŠFΪ6νF₯)&‹.¦t2£©$32΄42Π62–H2e6L’*…ͺ3ƒtF«’ŒHΝΖ•N:ΔHΗ€i23)5Σ’U¬΅4#νΠ΄:30Ϋ’*Ma)š6sd€R™Υbi†΄΄1!šΦΤ֌²h"mCQ“DFš!M ³k5«D“Κ2€sRM0•¦# a‘TtVa&“1Θh'3Mۚ5Σκ€F’CΊ΄Ρκš]GGš€1kΤf6•Ž5Γ 35Ϊ"³- 5€R‚†4Ϊ¦2ut˜*HΫ©™1–Œ΅:$ΥF›˜#“†42b¦ιL‡V$Ι’L©₯€³‰€ji3­ ‰ͺ3”ŠFuΙs†ΚŒvΖμͺfGG₯i†9ΫΜ)Œ0ΪVI«‘4!iDΣȘs¬%IͺTZΜ4˜©1kmΧhŒ41’ZŠˆ„FC £" νΤ$iΤX†¨΅k”HΚμL“D[t‘΄hCG&‰f¦©Qm«kΊΆM’‘F3eΆ:uši"š‘‰Σ Eš3fζhK”–Ha3E+͘Cν˜3‰Q i²~qχ‹‡ί:ΈvpωΚΥ«·n}ωUOPc{ξό΅Wί>ίφυ½»_>y=ηf·/ŽέΏΙɍo]xγνΛwvήΌxΈŸgέκή£γΝυέΣ³—ΣμϊπΓ}_ά9‰VηΤuYŸΏΘΑζb ΙΨμμνmΆΖ’έٜžœ>ύ›žόύ7―]Ήqνς“ƒ7.œ»ΨG_}qηΙξζόvΞhu§―_Ÿ²Χ§wώϊ?ώψΑΓ9ΠꜫΣqvϊ:»/_άΏψτσ―_»φτϊ½ΌΌϋπΡύ―ξ_Ίώιλ·Ώ{νΖνΛ§onv^ή{ψδΥ£“έœihFΗ0›£CŒ˜Ί2;»—ίΉωζώ²ΫηŸ?xxτψψδςϊϊ¬Xvφ·Λ٘£I–e»έΩέΐΩΙI9}vόθρ³‡Ήyώς7/ΏqδΛΟ=πΙzύκ£ε½K7ίΎώψυ΅νΦύΎxςJ–%d$I’$@ @;ΧΣΧ§s²ΩΩlw7ΛΛfww3–XΧ³³³³lΞ]~γΝoΏχζ;Wφ–˜³ΛΞΞώ†UΫ9ΧuΞYcΩnΆΫdΠ$ #˘Ο>ϊα>ΈϋΡΧήώΦ;7nίΌzύςΝo^Ίxkyόπ/Ÿ½ξL€JCΊH£M-SGΫ(ΙΨξnw–±»»sxαβιΙΙση―~υ“Ÿτχ~σϊN–±:;=99{ݞ½|rχ/ψOΎ§3hΫΉfžŽωϊtοφ…k7?xηΝχoœΏ²·Œv69·³]B΅H²lχ6ΫέΕ¦gΦ9OΞ*ΩμΫnΦ±Ž$c΄Νœ’‘Œ‘Q##UˆF4„1€ιά+Υ6΅Τ$cspρ…y"sϊρ―γ_ϊώƒλ;ο½wsspR0ΧΩ9I2Ζ²ŒDJH@$Ι8ϋτΟώ§ρΣφέί[Ώω­―Ώϋξ­ίώ;ο|ύΦ|r~v|Ρ‰‘,cd°LΥjKu„F›6Ιf»9~ϊδήύ£―N―άΪΏρήν«Ϋ7ϟΫΝσ;ŸέπψqΞE^Ώ:«]Ο?ϋ›ώμo>{v’ νœ'²§γxη5dΩΩί;²»λλ'_~ψΏ|φι§ίz7ί»υΦ΅+o\½ςώ7φ6닇O~φͺθΩΊžžN‰νξΞv»ΜΡ1Ζ’e³έέYhΟNNΧuΞ‚$#IΪ$ƒ€&έ.―?ώώxrχΦ·ΏφφΧn_σΖεkΧίώΞ• η^άϋןžŸ’Π˜3cf‘h₯#3*¦Ρ5IjTs–Œ1GfΖX$3λθ¬ͺ€u6νhFζ2Ϋƒ‘DušlΦLeΆ’TfuŽΝΜ¨jgY,²’iG7‚ΰ¬Ωϋ<οσο~ώο»ΦΪcο½»wΟ#ΊΑΖ N†@Ρ²,*Ε€d—«βς™ §*9ΝA>‰Ψͺ’]’:")R0!‚I ΡθyΪCοyXγϋξ\W–΄₯€ΒŠάI(3ˆ@H#c $‘B ²@¦€-'²@²₯*+I…ΔFTQQq"Ι†βt" `δΔ@€ „!6–ΐ[²Œ%…#ΐrΪDrE ‚eKΈ#’4Υ"(AX8,#cS-Q#e '8]T‘3D",XdbIXƒ…%0E*81aŠ„ŽL»‰(D–lŒ…‰,€…‘S²$Λ$Ά-“V₯1"DλRƒjΫF2R’Ωe€D€,Š"›‘‚[6–e)Β(2Jbμ΄!‰°:H‹,NCJp)ΒDΊ$™ΖD ƒd#!2–aaY%mΩ(#I`œV€μ@’₯16A !76`D![8l €L0Ζ ˆ™ŒYΆ°„A(‘!¬ „«°@QεD"±X$i,$„Β€‚Hl*H)JœΞ@‘„‘I‘-Ω "p‘e–P±,B"Bv#l§%–$[I̝…¬PՈ΄AΖΖR Šœ’-’ƒml¬ŠJ1"δβ"δ°3‘@6N£$H!¬ BH'6E‘`₯m#‘TΤF€Š%c‰ƒ%Ψr-iL'uΈ l€$²mIBBr"Œ ¨A–XΆ‘k8a–΅€$‘DχςΡύKη–Ο^ΈxυΞdΊ?|ώd¨jΫ(’4 ―\zχφω5ΟΊΣ§χξLΟD[ϊu<<ΩΏχ*ίzγμ΅Λ7ζ.Ξ-ΔΞ‹έΝ}·E9:ή>ΙΫgz =O&'»™ IŠθ•hΫΕ(ƒž !„d(Mq7Ϋ~ς|λγs—Φ/ήΝ]μΗώ“Ν­f‘a„kΧμUΟχs1 _žŠ(Ρ+jϋ‹₯iNvχ^³zεμΕwΖk+Νι㽓Ρx8žŸoϋΫ·ΟίΈ3<ߏ|Ύ΅{:>Ž^#I(­\¨R ((RUιp RH’₯?Ώrα{ο^\mƒγ§Ο6φ·¦“ωΡΞΑŒ΅θ―^XκoοwL‘’?,―/€λώΑါmwzrrςςτκϋ goΌ9©ΟΞΦΞi7έŽ·vF:ΏzυΪεΙ’šγ—―Ί“Y?z€ΨΐΚγΧ“ρ4›ω₯ωΕεω8=PτVΞ.6σmކΓΓaΧ_ΉϊΡΗΎ·δ“χ<{²y8žΏrνέοώθ:`Θγαp4Ή΄σ‹Λeχ€R’ Im―wξΒϊπhοσΈΛŸΕ…;ώσ?ϋηq§=³ΎΌ’½C3!$H(ˆe pZ™%s¦©τV/¬Ν χ†ΣαΜ]―ί„ϊkσ=‚<Ž''G'‹ΓΓ±.E»ΤŸξμOΊ0RD‰θ5₯iךΑΕχΏύΑ‡ηzΗO=Ύχτυώ΄·πޟ˻s P„OGγαξq§•²xώΒ™ώλ“3£P‰λ€€A@†²T‡­ΐ%-ΞNΰP„Υ$«ε@₯išΦV Ξ,―œ™o1žΝΖ³:­™ΆQšVPΣ6₯„Τ£Σγ£αΠν 9³~i.žNUMi›¦) ©?ΏΔlλή/ύζύOσ—ίύαΏϊwϋςα•»·Ούτχs― I`α"΅²!3:INͺP€Β’ "yrx°»»viξΚ­»½sν Œ^lξl·ͺΪνξξΝκό`Πkg£αΑώVI₯)½₯?ηJ…$!Iκυ«QΖ§Gί|ϊ‹O2\Όρύω/ώψwΟ-.Μ―ΟΟΎπh<>8>©±€3ληζ6wGt5ssƒ³gϊΞΩdo`Z§= EΔ 7wιrs΄ϋμgϋ·±όΖύ‹ΰ»+σWΞ΅½S;I€qŠΕ*R'IXΆPΦΐ0qm²΄ΡH˜¬(–»D5Τ8p$ΗΒ ΩΘ8•Gΰ°eΛXͺŠ"γš5IS£Ÿ`ͺJJ.FgI;K ΒαΜbG%zQ W;q'B8JTΙ5ι*‰KΤP:Ιͺ,RΘRΘΣeΘՍ„“F2‘ ddƒμPΊΠeF *Ξ”-PE–C)[¨:2M¦‹-)3]ν,κla)ƒ΄K'' "#z5³ΖDK³p”€βB’±*R eΦ¨¨Κ \Ϊp₯Ά²U)ˆ‰jQ6,I( 8J€”3{fΒn•΅-]b5"E–N™°°”MΰHlE HΛΥVFΐ]©–DqΧ5ΕQdA # c‡:§Σ=zŠ&Ι$ΝLX`H‹ͺ(R„°α$g-KΖαvΨ²-2Q"3]ΣυRB‰ͺΒΛAΆ‰;Ωͺ"²DuTΤJE4 »FV[’#ΒQͺνΞͺιPWcg’edHQδ2²Θ¦€„ΣdάI`Ω8‚BXΈE:Θ”μp%2—3°Μd!ƒ8ΒJ₯%Yv:«€H\TΓN”„iΒ$²’HM15M¦Bͺ²ΓΒΩ‰Tˆ° .АŒ©Σθ v%P„Ω p(k45ΐšŽEJ!…Š\" Œ±L¨Rj^Π ‡«&„ΨR’΅2)QV&*(„­.ͺ…IQFΨMdΕ!8l©3ΥY(­ΛΥ™t"™ΨRF”ˆβ€b't$δ@Λ„Qgΐ–"ŒΣ™i#ŠΛ\bTC.P„j3U΅ΨŠT©ΩZ΅‘ €«³FβpDA%%ΧJWA5‚Ξ™*FHΘR#7‚¦ΰΆ-‘›ΞE`c'NΫ …ωxύΛ_?^Ÿλ­ά\Ήxσν?[YτtλιώhœΡ[XΉxωΝ +ηη5½ψΥO>;ξΝύ&’iσttϊΰΡ«ρ›«oή^ ΄ω|gx;’ΝΩtϋ³/χΏσ½Υ΅7ΏύαΈ7λm½–Ξ―\ΎϊΑ%o>|ρloT€mcšœN^~Ίω_XΏ}½Χ4γΝ―vžo©wΙ6³q·ϋς7›ž½΄|σύχtOjΫ–―άΊy«ΏσΝγνγΩl2νχ―ŸΏώ=}Έ}0;½ΙdΌΉy¨»Wήzs©ΧηΩΦλ£αΘ͊ V,»QFΑR*Γι™Td“acΫ6ΨΆmc0 š΄M”φζ—V.]ΌrχΛ·Vš’Ρ£ίόφήζΡ^3Ÿ§ΗŸnŽξ\»ςξ{ΧFΓ'G―†š[[½vης›sUέΖΓgϋΓρ i&ΗGΟΆ§ίΏ»zϋ[‹½ΨίΨަAχφŽϊg/ΎωΖ¬‰ρΛ—―Ηέiτ6Ά `+¦ΏΪΉ>ωβεΛΧNοξ?ψl'«ϋλoέ|σμάJ<ή}ύb/ΫΫηξ,Σ‡?ϋωΙζιA,ήZZΉ› 8Ω988ŽΫ3K+Wn_~ωd£šή…;—­.žG€‰hW\~s<χrgiχh4“_μΟn―΄γ£ΡdeK Ϋ2#‘l ƒ…M¦LHΫΐ₯?8wηΟpλ<Η›Οžί±»qRsnυʍ›―ͺŸ‡7φNŽφ_χ^?|~ϊώ›‹WΏχ£?}ώΕNwΪ5sgΞ^½Όz±?|tσΕκ…+½E6>ϋΕOϊ«ož—…ώ₯;wk΅°mcΐΆΑ–καρΡ£g[γ›—η.½ϋνk8zvΌ5royν…3—ςυ'χφΐ`0ŒΑ€1Ϋ#PUJn6Υ‘ΤβJ¦p©!@m4m ΡΦnΌσGşܞχŒΣ―ο?lŽσϊςς₯λ7Wx5’,Ύωή·_XGθpγυφΦΞIϋΖβωχώιχWώρ““i.άx­wξ\> 4ω½·ΧG/_<~Ή:9ύκΛηυΓωz:M³3Ζi:ͺΘT:.EΨ’ @Ά©mσπθψ›§{zνb{εΪ%IG_oΎ>ΩφΛ|™M§‡Ožέ]zwωζ»ί9΅OΎΨOΤλ/έ~σΚϊΙΣ?Ό<~>0`lƒ 4ƒ••΅sΧΧΆŸΟ½N†Σ½ΣιΡ¬.ΝΖγαiζY@γύ£­—ΫΟ?8{cώίήx}οpγΤ±ΈvυκεχΟ«v§Ύ~|<›―ΫΖ"ΪΕΛo¬je~seοxšέδΩaύΞ™t3œ­J± °#kC'˜r"rηŽ"ŠΣVB₯8€ΖΨ),[YΑ‡±‡E–θŒkvnΥZ2ˆLάehfہ‹Ά2B!ƒ±©Š»ŒTΑ ŽΤ¬m’šY*ΥΩuY€’†0iθJ‹”²qgΤLŒdŒ… Ά29$œ8ƒ,JT­0`Šœb6ΆΑQƒ;;WˆP„εtʎHaSP²X‰dp: Ζ)T„1–»! bK)ΛΥF –S’6"$2XF΅ a£jAY‹,Ω(­4²",h2BΚ'™d”‚AΚΥ€…‹L'E8Œ²μTvrc!)l°Αng€pΕ•,iJATJΨ Υ‘€°$Ή©Y«”δ€βθYuΈKEˆP©ͺ&p’iΉ£TΩ’fΨJP*BΨΆHŠ6£#š \₯.dw΅ZΈ%M­Υ¨QDΘ–jη"K)‚ 3VJ€lΙV¦ΐ–‹jj†!"#dΛ .ԈκhΆ hδ@ΰYΙΞv’›grJΆm+CiB”TΖ€ΤΩͺv©Ψ‚Δ‘$Hͺ-[ΕB)c“™–E42€²J@ig$₯ΤiΧ@ Χ°§²Α’,» ‚$u*ΩYΞ(.RMθ)vP’ΖT’Q°©–±²*mEΨ²"23Tl FVh&H\m0@ƒœI€‘6Υͺ4„šΖΰ”%"Mf­!Ζ TC©2³+ …ŠEΚ8ΘΞΈF$ΨΒ‡•5”αΐˆ\mͺΕ&SI!£‘­vDJ²fΪnδb”ΞΪΡ@Dƒ! Σ,Q“€*Œ]²dhSŠHΩ‘rSll…bnnnΌω»Ώϋε΄›ΎύOn―.¬]Ί»Έ~«¦Ji›Άm=9xφΰӟΧ<ž.]hšVŠˆR&“ρζζΧ§·ήŸ΄1ΫxΉωϊ`Τυ–JSΊiχόΧύΧ—ψGWΦο|πΡ…Ϋ£ΡΤ.ƒΕ…ώ ½νG―  •B0όβΛW?\]?;ΧΔΙζώξώΖp,@PΒςρ'?ύυΏψΰΝ•›ίώhύφ{“Ρ,Κ`niΎΧ«‹O&³IνF“γ§―ŽΈ°άk5ά|Ύ7™ŽΫ&³ΣέύηυκΝA«ϊbso|<*j ™Τ” „dGΕ2AJ‰lPΣ_Ίυ'ζ|I E)mΣφ{³έΏόλφϋ­­ΊΨλ¦γγύ‡_ύνυ³ρΖ™›ώρΩ;£ΡLџ_Zμirπτ“Ώώύ>§ΡkΪ2wΆχNίZ\hν?ή:eμAρπδψρA\:Ϋ΄ŒŸm쎦•RjJγ—_}ziypε+o}΄rύαtͺΑβςΒς\|σΕΧž<<^^<8ζΪ«ώθG+ϋ³ιάΚΉυσΧΟχ`„"_>|yας…ΥwΧΌσΗΎ~0œΆKgzσsM¦ Ž^,ίόΑχο^i»ΡααήιΜύ₯υυfGώπυφdnm†Δ‰€tΙ¬(%¬HlΫΐvf֚3G̝ΉςΖόΉkέ,A₯ν΅}u»Ώϋ»Ο_ξΦήπυφύ?άϋββ·ί_ΉωGΆώφιdšMo0˜D=}έΫΩΈΏ{t0Λ ‹λ·>ό£??{g/—ΟžΏuΉΏΠ ΐ` "tzΈϋθ›Ώ»yξΗΧVo|ηγ•Ϋ£ΣZšΑ`©ΧΝφ½ρpϋ0€‘*&²-"Œ0`ƒ6ύ•Ή»ϊί_?žKQzƒωΕ₯E͎7ώπό‡_l Ž>|όβαƝvωώΫmύγρβεkWΞ]X›ο1πΦ½ϋ_έωδΓΫ?Ίpλ£υψ?tϋ€Y;wvemu!;(.WιΏϊŸ§·Χ–G[›[ϋΉ΄zνΞΝππσψόΥώΦΔ2Ξ’£‘%H!„ J£“γ͍£‹oΟIpπόΙζΙπΈ”(Ε΅>ύΫ_^<χΓk—―ΌσƒΥ›οŽf“,σσssƒVϋΣƒΓρξΨ€ΐŽ₯΅‹oί½ύσΎ>O›εΛWΟΦνg[_Ϋσ@R/φ6žόδΣsφγυ΅»WF'SΤ,-Ξ-Lχόβηf£^‘`Σφη½υ§ί=Ώ”oŽέ[ZΉΈΞlΌωε½­ατ¬£HH€Α„lHJa…(A„±ƒ œV&B"›$SΒ€C!Λ`I K(±Α`©²q‡ BvΨΫi# % !°3 lΛ6Ά…ΆSD¨Xi Θ0ͺΖ€‘ ,[¦ͺ”tΨ€ $ŒŒ“”’BI$V&Ά‘–δ0KΣ™ΨH– v8%›°”&,ƒ%…%'A␃aGb› CBΆΛ&”-90J„]"R²vΰ‚%;ΐa–SXTΙ„Β&USUΘ)c€P$&„(6€@²m Ι"HR‚P#@ΆŒ°p%Rr˜,Κ΄Β)ΖD œvb [²«%!A‰Š 6€°Œ,‘D. ‘I( 6$ d” ;Σ‘£aXΖ&mHΨΥ–ˆ°A,€4Ζ δΜp¦A₯DBB!Q@`₯+FBDZ)Eν”NΘ@ °K‘"μt: F`c‡°lD YΨ’FD`aPMԐDΨ ₯ € @lɈ€°",”V&J!pΚ–daPbDZ‰R1²3Ui: :e)„%%(…Β$μ,Θ*- $N!°q 5ͺΒ p#Ιv‚‹;S`P Ω&!q°ΖΆŠP`θPbƒX€…δPmŒΘΆ`Œ\JcΖ’D@`°‘m0€ νmΨ@ €¨ΘΨΠΨ T%Na$Jš0€°„ €€¦ν- f‡―ξτ§[\yΞ΅kVWϊͺέxx°ύςΥ³‡>z°ur[τ!ˆP³n²ϋΫΗ'οΌ³ΠL6^lοž*ΪFR[ΤοΆφ“ŸοΏsϋΓ7.^]]]κttzπόwχΏΌlo{T΄œ`Ψ‘R½θΙΛο- Ϊn{c{gχ¨ Š0ŠˆVyΊ}ο―ώζτΫοΌρφυσ+ kξf“Σύ—ί|vοήƒΧ“£Ϊi:™μlξ²ΌμΡΖ«ν£qGΣoλxv²χ`»ήΌTκξσΧ§έiΆ!ΨN9@a‡,*€Œc€€TϊKk}Θn6n=ρμώ½{OΆ΄XζζKΫB7<έώδ'?½{χƒ;―­¬œi˜MN^|σω½?|φυΞas6ΪΆH1žξνΏ˜\»ΫvOŸμN=-½Ζέptτxγθ£³ Νdcγυlψμλί}ϊ‡“ΣΑBτΑΫΘH²TMΆLΘ$aΙX6ΨΞιpιχ|τφ·nή½yρςκ™ΉΘιπ`ϋωύϋΏϋνWO·Ίyχζϊ9Ω}ώΝ_ύ—γνΏυήΥΥ•΅₯pžΌ~όμαΓ‡_Ώ8ͺΣΩΟώ{Sί½~ϋμΥ»g֏ŽΆ··ΟηψލImΜF―ώώoώΏξ»wΏ}λβϊκΒ™œw7ξ?Όw›g'₯[±1Ψ` €±±Α€M·Ξβ*a…BBΆœ…” *1·vωϊΤιπtλΙ§χ>δ?ύ‡ΗÝ<ψυ/ώ¦ΥΰGίyοΚνΞνξm|σ³OΏΎώα›ο\ξ€@EέτΫΏΛVεΗ}捻ƒΕΧΫΟΏόΥ―~³tηΦwΏ-ŒkΞ^α³/>ψήνKwήΏ^\g£νχ—?ύ«ŸΉy2¦„νΘLe2Xαƒc€ €VγριΞίώΦ€ά}φlηΰ4Υ΄ŠhΘA›―Ώϊδ?·Ύϋφ­o]Z;»ΦΘΣιΙαΖύoΎώϊΑ“}Ζ€mΫέΑλ­₯,λΝυ³7W 9μ?ωέύίωθΥv]’ΪFέhηΑ§?Η·?zλΖ΅Υ•‹ΛΚξτhϋ›ϋί|υΕ—·YR4’`w“ΡξΣGŸ-tο^___Ώx>ΘιιαΣ/>ύo?Ϋ‰ι\(ΐΐΒ–-LI:…Α8DΘnlHHEΪNŠ1B@’0 „…±e;m£+@Yͺ)02%RP!3$YX","’₯tšL EΖ)ΐŠͺ,Ά%+$ +!l§‘…H9H‘ΫBH°%Β)')δH@ Β²­*²p‚ΐ"- 0gJ•b%Ψ€lY€ ΩΒ’ͺH nͺ»€™Ά­D– …μΐF2"ν¨a₯eŒq€­4"Λdš ²Œl‚”0N#ΐ Œlƒ­j…J–Tε*.i BvΖΆmd$dΙ#$l[ΨBuŠ e [id9 ™#Ι’AJ,`aY²3M@…• γ"l!R&%K€d‘@H†΄’ CM„D6‰"Iac‘ AȐ8eX²¬X’±d0Β B‰ΓΖ€edl9‰ r`! Λμ„t„(Jc @‰…ΘΆp@N ”¨`I²]$ccp•###dv([eͺΒ8* Ψ °qΦΖ8HR8%d!;ddΛΨ€•¦:Š –‚Ξ¨1Ά¬ŒDΒiΛ)Ω$##I–-lIF“``ƒqJITSΙ ŒB ΘΨNM@±’°9‘ΐ`Θp‚2HBΒ2HVIgb Pš’ͺ&œŠPΚ€@Ζvš€DŠZƒY9l ΠφΦΎqF )l+-₯'“ρζλΧŸoΎά=j׍†§'£™šφμςάς ΄ΡΨδl:'ΓfyqωLΣ4Ψ]7žε΄·όΦς¬ΗΑώξθ$η<· EfΞ&γ½£ΙάΒάΉ₯ώr?Ϊ€:M†G‡‡§,Έδ`ianp΅=-“§'‹ξυ2³λκΑ —.Ν­6.§›G£ξ0VΫ^θfέ€YΌ0ο΅8Ζ―GύiΗΓαΡΨΛKsk‹½ΕV=ͺλt2>Ω?Nϋg’?Šq§™››ηΛΌ'£gΝBV¦νςΑ΄ln—q,D―/ Έ}yυΞ•«kgW›~A .’'Κ.꦳ƒύύϞ=zu6Ψvζ°–~―ΉΈθu` ΩΝf“ΡΙΙώαΙ€=3·΄άφϊ’2λt2=:–ωΕσΛ½3ύ‹tNFΗΗ‡G{ua~ωLΣφ€ρΈ¦ΚκΉΑ9&ΎΨœ-wΝ|βq ΪΑ3΅υΡήζpΨ.e―οdF0Xxc0ιO·7N{C LΜ(Ρ›Ώ9?j»έW±ί-Ξ-Ξ.z0Žv΄šΞ“γQfiqpn‘/ uέdxrΈ0‰Ϊ[,m:Λ‰Ϊ««ύ3=5ΤΩδdwr8ν_ΈΎr‘Ϋ|~4Ί9wŽrφΜόΉΉθ1NN·7† ΦΦΟϊψδπθΥl‘‹vm‘wfPϊεt:<:>:ά«‹sKgJΫJ`mqξΝ+η―]½:Ώ8ψ ‚“-[ΧσL¨σyΏ΅’ήΥ)$Ω²-)Π E›[€‘ Έ@`$— :928IK–Uœsv{ΗϊΏ‡9Σm―nΥΜΫή³Ώ|~ώέοώυŸώεϊπ@Ϋγ8žίϊςπϊξέΓυ««ΣυΤ~yωςψω§Ώόεεζ|φt}έ½Ÿž??oήΎϊΩύΊ=Ν8φεΛσηOŸ?{0σδϊϋΧΧίή­λΩϋεωΣǏείζη³ŸΟ>ν?~½››»_ΌsΧ§§?ψΣΌ>ΦU’}ΟΟ_>~zΎ}σκ―ΞY½/OŸ?όιΣΧ—›wWίόύk―OŸž>=x:}ΉΎ9vίο»Ώ³ΏΟž>>―/ΧwΙpšω‡Ÿ½ϋυΏϋΝ›·―ΟkM₯Ε¬Όμ4M΄yωzyӏψώΣϊ§§'Ϊξ}zψωίύέ―φWw—*€//Oϊσυ_ώεϋ”σΥU’―ΗΥύ7ίϊWύ7oonςςτιOΏϋ§?<Ύϋ۟υ»WΏ―ω·Ώ}ΌΉ|σίώΝΫon?ύεŸώωύ/?όxϋέwΏωοώϋ_d½ΟΧϊΣ>Ό<υ|υκΫίόζoυνύη§ώ—ώέ‡λŸχwΏϊεόειόΏ8οϋ_όςίύυw―ίޝΞ._ίτoόΫώρsΊΞIΐΏϋυ―ώηπ?ύϊoέένΥZ'“¦;[œφŽ>~ϊύν·ΏΓΏώψ @ΫΛΛΛσ…›ΧπκΛΚΣ‡ϋαSξ.Χ―fΗεςιΓ‡gWίΌΊύζξ|{κ8φΛΧΗǟ~ϊψrάΌ~™W7wΧ?{έ›ΛγΣ?ώxϊ¦sjϋελε²½Ί=Ώ½YΧ§ΜώzyώτώύΗO—ΣΎ}Ϋ\]½}ύWη/7ΟϊΛσϊ°o^.—ΗΗ§—Ήω«w·oΧΥͺύυλΣ§?ώψ΄ί~“YΉώn~vσt<ώρ'O·έϋC―~7ίέ>_ž>π——?χφtuϊζξόκz§½<yόπΣO>_}w}wΏΦJΐΟήήύ/ώ·υΧχ7ΧmqdŒΜŽVφNv’t³;™ξ•Ζ4Ω«ΡFv{‰&³2:]¦MkοΦξΪ1ΩtC&‰ΠE’ݝDfοέcχhΞΖjΣhΊ9ΊΟέ[d “΄UimiΊt]ι€4’γX]›έΣMφΜi§;[Ί:ΣTσFWΒt7v&‘\˜$­Ϋή9_gΫGΫf%KΔqΩΘ`·ν$3έKY_#¬έ$JχƘ:.ηδRtΆlΊ€Νμ5LZΊAŽΜ%{m'3’mγκ}²'φΨβ #:/M³―HZέ%ιNvgjRM²FEΓ6—Tšt€=Υn'š\Dέ]vc‰IιN6iΫ΅t΅­Ά’#3³“]ΪtgZ£Ιnl‘ΔξaŽEš”—φHfeb${΄Yέφ±«{ΆH'=¨N2’™švwNk·ϋ²[δ”§J7ΫNwΨ9-™μ€UMkΛ&³O—¦1kOEZ’£S1»½dŸZΥ$&ΫξtjΊ.ΫΦ‘6φ1K€Ί₯Ι Ηξ±E֚9ΉμΛΡ,³’ΔήνήI€t“5L&—™ƒS› „Fkο}JjUi2—ξU³ΩIrΔ‘}š=“š½w”μδ zΪ9M²ΣvΗ΄ΣΎ\#ΩΡ•i¬ζb=ι€ΡM₯ϋ”šΪfϋ΄μ(‘²ε’½¦J‹L9˜μšmdDWz$htΞέΪje'bζ(;v&[φ–šΜξŽ=ΔμΙ^iνφˆΞZk8&eΪtw·Νξ4tλ–!‘=%΄*±ΦρrqٍL&Σ={v’;έΣΦΘZιδP₯Ν–M₯σΜΪ+;)TTβ8½y}—ύΣŸΫ•$€mZŽ•~ύςό‡?ώρ·υΏϋαΚ>Ž—―_žžŸ>z~z:./e­uuu}sw{w}s»N§ ₯{ύςόα§>Ύp:­Χί|{κΥωt%Ρξ½Ώ~ωςψιΓγηO_Ώ|iχ¬uu}sπϊφώa­υόψωΓϋ_Ύ|Ή½xχνχ§σ93‘hχ>žŸž~ψσ_Ύ|ΉΎ½{ύφέέύΓΜΰ8Žοότώύξ~xυϊυΫoΦι΄χώςότιγ‡ηΗΟ//_ΥιtΊΎ½½υζζξξ΄NΈΌΌ|ϊψα§ώLίΌϋζαυ›ΣωΊ{y~ϊι/z~zΊΎΉyϋΝw7wχk­$ψχΏ|σοζοΎύζέιΌjcfφ₯{-3ΓεελO?ώπό_ϋϋχm(έϋλ—ηοϊψӏ—Λ‹V‚$kΞWW7wwwχ――oΦι”hχ>Ύ~ύϊψργγη_Ώ~Ω{―uΊΉΉ½{xuχπp:3£½\^>ϊψώ‡Ώ—ΛΓλ7―ί}suuMŸŸž>ότγηNησ»οΎΏ½{8NmΏ}ΊΉ»{ϋξΫλΫ;ρυωωΓO?|ϊψαϊζφν7ίΞ¬Oί?~όΈΞηwί|w{wor\.OŸ>>}ώόυλ—}3λκζζώαΥέΓ«σωjfΪΗεΛΣΣ§ž?—‹8Ξ7wwχ―nnορωγ‡Oίύςησωϊζv­ΣηΟΦ:½zσφ|Ύzzόόα§Θ7ί}{jN‘μγψϊεωΣ‡χOŸ^^.α|uu{ϊtuυτωΣϋŸ~ΨΗ~υϊυ«7οNησήϋρΣǟ~ψσήϋαυ›Woޝ―&ΰ΄ζώκν―ξWoήΎ=Ν)›έ€¦G§“™ΤΌ|}yώ§όώρϋίγΣσ3Ϊέ~ύςεΛσγεεE LfΦι|>_]ΟW™Α>Ž—―_Ύ~ωr¬΅ΦιΌχήΗ±Nλϊζv­ΣΛΛΧηΗΗΆΧΧΧηλ›Ά_žŸ^Ύ~Y§σΝέέiΪ^^^Ύ~yΎ\^°Φιt>΅½\.‘›Ϋ»u>—ΛΛΧ/——―{o5kΞWWΧW§Σ9‰όζΧχΏό‡ρ7ΏϊΝένύd‘Σ=Ε:’Δξγγγοψoτ»?όα§ΟΚ>Ž/ΟOοόαΣΗχkή~σνΓ«7§««$Π^.—§ΗOŸ?~|~zΌ\^"η««Ϋ»ϋϋ‡WW77Ηqωτώύ§οgΦ»οΎΏ½{˜΅θρryzόόιγ‡/OΗ>&su}}χπκξώΥ:^^Ύώψη?}y~Ό½{xύφέΝν.//Ÿ?}ώψαλ—ηc3λϊϊζώαΥέΓΓικͺνσγγ‡Ÿ~ψϊό|swξΫοΞWΧmŸ?ψρ‡―_žoξξ^Ώύ&3?<~ώτςς΅»k­λΫΫϋWoξξοΧ:IίΏ½χΏόΕίώΥ/ξn―·cwΙ iιǞe'M’ιN#“Φ>„‘EdΪα₯Ά˜HΖ^ς²g’ιΆΫέΥtQvvƒ IΡƒ$1έν>ͺ9uwΝN¨nΪcΪ#ae²²§[g'•-etφ4Ωv aeŸ²Žέ—ΆD§5¦Σ6fgšD/ϋH;©YvfBͺ«IZ=΄ΝtΦaοV3IΨΝ0»έ:fΩ£{ζ«`5©¬Ά=JΕjN‡—«cφ€jo“Ω»;c­ΘΨΊ7έμd·«ˆ©TLΧξ^έb'MšhH­CΪ}"Ή΄{›Ξ¨ΩšΩ#³Ν •ChvSQ9h3mb2“vΊΫ4΅"±Η‘dοξ9‘΅·΄k&Ω₯MwΣZΩYμt§I&ΨG΅I–„©P^ΆΜah³Νš=v·všD#[w*!‘ΕDΣ]3«»=vΥΤΛι€ΥMmέmwΞ“YΩckjvZ³ ³A²“¦μt6IWΊrΉ/Ι΄‰DŽ΅;ΝΞμIΨ»»{μLX9šI’­[ΙjdΊ›1+ΖemΖLRέZ$m·ΖLŒ£ΙΕ¬!DΕ£]¦Y»v̞ΪΫN’vΣ΅’‰NMΛΦ =U&m;ϊrJŽd'¨ΔΨ±.ΦΦΥΖζ¨4Σ$Η^4ι€iΣέ}Ϊ1вΓ4GSΡd³+MΊ“™IMw£Rιͺ5φδ­Mηά]ΗAM$€mΊi3:CΨٍ$“}©Β’%Ϊ uΤNduφΉm³`έΊS#†rd ‰œcλ&’Lc·RƒΙžhu«ξj¬‰εH1;)[{ŽI3M«Z¦LφωνλΫΣ]œŽζBΆdvœF25mf­«›ΫΣΥυύΓλ½w!333³’$ 2su}σ½~χ]’uZ3KIfooΞWWoίuo™Μ¬Y ·―oοΊχ¬™uΚ $™usw³Ώώ›ξΞΜZ+3ΜZ―ήΌ»{xMg­΅N˜΅nξξ―oφ>ΪB233kf’ΰt>Ώ~ϋξξαΦiΝ¬$Yλζξξϋ«ΏΩ{g²Φif$Ϊ$‘F“ž4]i8ƒ2sΎΎyϋνw―ίΎk €$’™™Y+I@’YWΧ7§σΥΓΫ·έ»Dffff­$¬Σωαυ›Ϋ»‡ΆkΝZ§Μΰϊφξ›«λ·ί~Ÿ8ΟΙH’\έά~ϋύ/φ·{fΦZ™ΑΥΝν7ίόΝ7ίg²Φ o―ίΌϋ–œN§ΜΰtΎΊuΊ½»ίέm1™™™΅fη™7§o^Ώm‹L&ƒ7{g²Φ)Ι¬Y§·w―φ.2™™Άέ[²Φ)ΙΓωννΓλξMΙLfΦ¬•lvvΣhθ”Ϊ²KΘΪl-U€dΦΊΉ½»ΊΊ~υζ›v—™5kΦJ‚$+ηΫ‡u}{{μ­E’ΜΜ¬™ΑΓ›·w―φήH’™ππζmbΦJζtu}χπZœΦiΦJ‚0k]ίޝ―ίμoΫ"™Y3³pκυΝέ=fΝZ‹ΜZw―^]ίήΡYkf‘Pφ8rθΦv·š8Ί›=FO‰I& Π€IΞ§S[$ΙΜ `­5Χ7η«λv#™$Ϊ*™™$WWΧ§ΣY›$3Xχχϋφ6™™I‚™9ΟνF2Ih 3“dΞηΣιΤήΆ%!3I’@ T£Γ’;’¦Z=d=Ϋ£˜΅oοΎ=_½ύξϋΘ:­5+ Y§ΣέΓλ›Ϋϋ½w5HΦ¬Y“dΦιΝ7η‡7ο’œN§Μ$!λ|Ύ{υϊζξ~ο]Μdfe&Ι¬υύ/ώjοΞΜ:­™…σΜ«σιξΥ«ξR2“™5k!q{wu}³»gζt:‘$7wχWΧΧ»Μ¬•δtΎzxϋ{#IfΦ¬ΜHP’±ΗΦ–搣4»³cM՝½+Η$‡HWŽTφ‰RνˆD²E’/Ϋ™e³UdŽΐLMν:#™Ωv©΅Hχδ:ΥhR;mt’iΗΖ)ӌdΊ³wχdfKνv:m(•΅ΟΩ‘dΛ±Ϋ„T·VF―t;I2cΛ$’@’,QΩΔͺ•©T;ΪδΘμΡh“₯ £ιql•d†΅I΄“†θXšF—N6]Ϋ#œ2q°j’¬œzΪ΅©Α0ν‘ΆmΣMχ±§U!Mš€¦4G»K;![°ΕžiυΨΩ“cο™Z»1VφI$I²m­Ž5kV[΅“[―4G΅‰™ιž–‰ˆ„ιrδX‘H΅'§Ζ娽s5Υ#mb ιœtλήΉDF&»΅5z4’cOŽ9N;aT΅³+4Ϊ΄“ŽΜd›€³ww’Ω™n»’Ω³TK“˜’lΩ₯Σt³Σ-aΩΛnv“d¦QΥd ƒVΫ0 leWšiΫ΄‰mφ0Ί51&ΝΠh΄u”™a‘h‹ΙlMW*M'M—cEυΌννΐ$“κΛ6Ϋ$’$§#­θ +­κ@¬F[mZ•$;+•ΚΞέ½›9v4[ΪΈ$=vvtφ.•le';=#JkwΗ,M’h»“T§N»yaν4ĚθμέμΙΔΔ–=ΩΩsI’-I¦φ%“γHk±μ8Θ4Ω!a8κ=ΦδΆΪ)jμΨνκ>ομιjΫ΄ΩΡ€{R΅²š‘ΞnΪμ•uͺ½΅©4έi.1mz²―Fc«ΞNš4K£φNwIΦZ3’™•σ:AHBfeΦ$ΑZkf€$€$IΞη+ €$³Φ¬$°N§εHdf’«΅€$@²r Ψ³“Ξ2+Žv;ΦiΝΧξvGΧ₯³cf&9;’’$I²Φ$d­ΣΜ’3“Δι„$f&η3H’œΧ ’΄N@`­53€$@’YkΦ,d­™YIΪΜ,@ ΆG I’Υ.NTχJ32 $!³2 €$ΙJfH‚™™ΐId­$$I’εIΠdΦB3k’»c •Ξœ.ΦrŒfjvΗfΖL[’2³hI΄M‚$3ƒ$@²’I ΙZ«$Π6 ’$aΪIμ¦=‘έ£IXk;ΣΜ¬™Ι039ŸOΞ@H233H‚D’u:I€$k­™€$@’œ―€$@’™5³I3§ 033W@$Yk’Q₯'=ǎ¦[6]ΛΜ±“κ!Y‹°³k'ΓκNΪ0»±Ώšι,ΒN/;έ&ΝιF—$±wΆ1IλpθDB[«ϋ΄3;Β΄™’˜lζ²3*v‡iIi΅Y‘²+!ϋxYA ΄+ϋΌ{œ2ΣmοΓΩdζ˜4lΪ&IΖδ¨μ’$‰ΥžŠμf·ΒL²ͺmƒFcΗ6SΠ iW›τˆf¦ΒŒrT9N™Œ–Ckwe6{οt/*mi‡Iφ ³:­φθaMŽI#€M/³³›Ne';ΊuΟμ¬θj³Ϋ&SS޽»;ΓΘ4:ϊrΨ‘H„Ι>eνY3ŽΩ;½Δ)©ΩI9mO«£3Q0Ω+ΔN+•N*»Iγhw&34mv“ΜtΥ™65i½]’«I7›£mDβ΄I›έLš&;Ϊ΅S­#m·4V"v7ΫywŽΑž6H˜4ΆζΨ²›Ω3-;šˆF›i–ιή’ˆέζ%IBiWzj€Η$»fοfMŽYMΊΩ:I“ξ-m›)¦]²Νή­ λ$Ϊ]Hv4M’.Eˆμc΄d&»k‚}؍νXSrΤΆ3Έ΄m§΄E£Ωk-+6­Lf.kŸΆθμKΝΡt:sIw){VΝnwfλnwkΛbvbμ½{φœ’K’ Y/¦ŽΥ­—ιI’΄Ω΅§;G[ “HZϋ μm·³kΤnμΆ’‘6›&‘ιHD›˜ξnGsڌˢw+!BN5έθJμ€{vΫ‘KΫnMLΪdMΆVvW»Žδ˜9΅Ρ‰d§i]vbgœ¨CZMŠI[25Mw3ΣKτ€™#dG–½s‘K²€$ ’ $ €$ Ψ²›ΆΪjiΪ£I΄;ΩPI’$ €$I’ RΩνξ‘œ€;έΩ“N2΅λhŽ(’@€$@$IHΩέμD2™΅μu$­]ΆΣ@’’HH@@@ΐΈθ%ΗH2¨ΝžI/cG31@$@€$IH€$$$Pm›6»ϋ΄vL#iΓΙήΩ% «†#Ά]‰B';Y»SΊ ιΫ¨]ι0‰Ί4M 4’Š£6eh4&{zIg¦;UνVέs€'H‘Œqμd›΄[›N:Ηnu&Π[ΪΩύ:+ΙlΩvutY¨Κ–NgοΥLΖJͺGΪ1‘ΊKŒΨLG³»΅h “¨Q!Z-νtw˜foB¦MΊyΩi²šA[mμι΄’(Ρ}΄‘„Ωm 2“h³'Θ¦“=ifwg³»έ΅Ά”dCš8™ΓtΆJF§mw™Ι1#‘Υ’έ IΝι–˜™Ξ¬ξμΜjv³cΟCw’sμYέ²ΫV²­V֎dK2ΗNGdjh4ιfVšθ֎ڻ’²›v]“!qΘ.!ThGmμͺ„ΔKLvΊ3ΥΜτRm΅έZ's$v‘I₯[ΊU;ΒΪ»ΥΔ${©μ¦έ3Η)k&GΫ4zŠXΊw·idŽ₯ce1Ωǘμ*λHΣ΄iΥΞ*›F&i“&ΡΆΚhμΪμvLχΠ8κΨ‘œwd7έΙnt4ΣV ΅[‘HpμDΕ$Ee―SD›4#ΊΦΎθ‘»tνDΪT›ˆaν΄Ά™IT ‰{&U»₯ύ ‚I’δ` sNώΫϋΫΠ­½εοΘ.Ι’$mζςΎ˜ε†\Ν*Ύ}[–ξBš­ίξ/§ί"Λ[4ςˆ[nID:±l9Ε"Φnμ‚mYΤ'7ΔK7ί²-1λζΫ"&ϋΟν.’XlΆΫΧMβ‰t–‹vΟNΏΕn¦«½χ₯Rpj³χΝω“_ο‹Ό'ίάwΙ΅—ΉΉν6Y¬ξ»JK>wΊΖf`γb kσqœΙ²μ[n6c6ΨΊuΣKVθΗΫjΉN.w˜1Hˆ«ί–¦˜mιMΖl˜Ιι/χ―ϋOίΪνΈΩ’₯Ά›ϊbρθ'ŽΣ$ΜΖ–,Ωjϋ[bA‚«oU‚%‹tΙ{ΛΩz{Y»ΫΆdI:Ηf7£6]HNΊ5ϋιQ)›εϊr—-ѐe'Η_$ΖgΫύΪ»Μ²SxŒl½ΝΎLϊ»έζ/„m;λ\/ΙARΩφέ]ΪHζαΞfΛш₯«cί',wΜΊΞ ³5χ-išΛŽXr‘.ΜδH―Y–“nvΫ™,_. ’%GvΙR―™}Ϋα₯Ίš±l>Ι<ΙnΩz^_\‚Z&λδzλχ&«ΌΛ]>iςNFΠ³eλ-ύΏ%Λ7“]\Ί.Ÿl±/Ύκy—nΊm·N#φΕw•Ό-;έbr!ΙΈε’Δ6–ΚιE¦L–»ΊΧM·n‰HΏΞΩ6„‘\2ΡΛnΙO3‚Θ—ρΉv’ϋ’4c±8πβ‹ΟΈIΩ±Ω$2λ–mφ{9Ά-λ_rΉlΘ'N^Δlw“¦ΩΆΕf_Β-Ky‹³›[,ί’›ŒΡY>ܘ¨Ω>}‘χΛ „ŒΙδλΕ’ψrkΆΏog6g"G“,=qyΉΎ΅―ξΗχIΣ "n3©<³]ζ5D.Ίe‹X/ΦcΉT―ύφ“D₯Ϋf2ύr ΙΣ·δ6n\Ί˜υ[6Ο9οOΊΉ›₯c·άI–MˆΕ ΦάεKRƒ%"oρM‰œιV{Ήο²‰ˆuηsΉI€,ΘΊϋp’H’³ΛϚU,[š—eν± –ŽνφΣwξέ²ΔW_Hβ^:[6·$Y€δZ’ΩγΩήdΣοο/½GΠ‹{•Χ5ξξΎ{IXŒ{φΙ™†ςδ?%·τΌύE_4Ι–έlΐbYͺzžŒf±ΔrΏKχ2“K0½$ώΊ.κλœ`=._€‰δΜΦH€6? GΗvqMždŸXέάu^+Φ}[½#•-S½E4“\,»Λ/9V"ί-.Ιχνi·°δώ–cVώ$JšΟΫK{ίρΘnϊΉ›'.ΩΝgBzMΛ₯IΨ’έΆ‹-_Ξn„p%ίμηοO–~ μΪ[:ϋ,’)/ί»“LΒ’{ 9ϋ|‘x©fΫξ4νεΐeς%³ΕΥ*YwOώυο[ςΙaΉο’΄ο—ΞΔ2Mtω–[*"±­[‡’tν5ϋ}ί±μtodŸ›Ό‰δD_vΡΘrλ!‘™r»±Χsœ.*σxέ;IξΩMX&Λζώς',€y5~³ΕmrΝ^›οΨκζΎά½ΌΧόλ>η{I,έdBoΊ8Ϋω©Ψ&^ει·c σΩ΅Ω{‘ζfΦΟώxXζInϊυΙe—xμwœ£ρΔYΆΈ³~'Λk’%“ΖΙd;Nή§ίώm3ΕάάΌθ“_#aΉ+‹ίΎD—lΙ5{Χ/!‰%‰7;γOτΝν;I’τΛd1KΞ–ύ’uΙj-Ώχ|Λ₯Kč]ς€³…†ςΙώ5%έ²Uξ^³Fήξvw]"ΛΦάίD>f@I"ΗΩΊ»7$ΪέmΆ΅Ω§'bλςβUPΧδ;{σ-γ’H―Ω]&Ρίb–‰%‡]]“’ϋ–\άμ2ού₯ϋύm_+‘‘. %² ΉlίΎeΝm!‘jδ·οΆ]"5/½gΙ_b―ΛΗε’ΩμΣ·X <–œ.‘₯–ΝHŸ»»2„Νχ­IΙbμΞ:Ή/;»Ϊ²«ΎmfϋύΌh:½FE:B^ΣVνfq™Œ•t²Ω€±‹Kn}»—sρΛd6ύ΅έΒ&–f’½n’/»ΉΛ*—¬Ό%rιgέμƒΖ„Dn7'δT¦^ΦΕݝۜ½ΐB·lRΝfλ]H:³ζ’₯Ι2ίWuY‚Dš›ύv4R/Ωz.βl7iBηd/z33ΩΛη^ [ξrk³YgΫqΊ%s™hEΒ'^šq»;‹˜ΣZ-ˆεo–LI-Γθwλ屴[ξvc>τ(‘ΌέλώΐeΛMΆsΉάΧΟΗ*ΓΠ|ۈH&»ξŠDFέΝnsI4έϊE’λv|+έΜΆ΅—DJ³Z—Kf7'‘w[ΑΆΤΦI—ΔeŸ,3YNε{Ϋ[³ŒΉY~ίΐ$š%·»]sΙΙ%}ι­^τ»»έ6–­q‘;–t©Ψ\"—&X·f zϊ—#wΉ%oΛ$‘7χ}_žj;ϋf™μ—ΫΌ$ιf–κΙ9Ι½mΧζβύ.§‰F·Ώ9›LΩeIBlϊ1šDl»έŠdΛynoΒϊ\ψ’%²„Ξ›E]Άϋώ[Ÿ'ϊφ»ίΐΰΨ»¬SΤΧάλu–±a3w'ηή²^|ŸU"±άA–lΙ2lΖΎOOλ—$‰ϊ.nω’ۘqΟo©=σΕza»Μš-†εeΪI6–}‰}ίz Φε]’ƒΉ#„D¬M6sΛdrši―]&ίο·mΫH$†ήi’·Ζά"ρ¬K–˜.}qgW‘'K^3αOr°ξzΝ"–f_&" t2KΆπuΙ’ΩνΞΐ2%›Ÿ4fΛ}Εl«t‰‰Ω†%–εKTͺkVδ α–­£1GK&J€LOΎ₯qΧϋzΊ.–$ٜh]Ά/°³U6‰ΕwΣ{Φi,™rξ“Θ£n’fŠΩ²Ψd ‰ΆΦ»ν4ΆK#„›d Η”-Ή•YrΣiστΫY’ΣcQ2‹pCdά5“ΞΆ!Λ©Μζ.d±RΊά3έY·Μδ$d*M·ά0€΅mΫΨ(ΙΖ%"‘Ι\²ΕΆžΔ— dΝμΫE€eΉμνz·!Λ.ίΤ"Ή`ιφηڞ,›m6ΙΙmΞΒΩμ₯dI2Ωlv}!³άΆ-–šεK<{V~Ϋe’ΏF7saΙ1Ώ~ν*$Λ27K'66Γ˜}“ι;SΡ bg§%φЈ­ N&#K[™~Ϋ}³*‘Ψˆ„ιb±‘“nε-αΖog$ΨΡί₯%0[οkΝ0d2ς$[8w!b±’₯σɐ­»νm''£BtιBΟf±€ΚΆΓωc“5]Ωr©-Ωb‹‹A,ζ>ωK* Έw—ƒΕάΨJY²=χ^,X6Ϋ8ΆεL˜ F—¦³Ω%’X2ܚcΫ,™$§χ$Λ%{Sϋ-ξ’&ιτΫ…Ιλύή5͘Δn€iwΛN7;dΆŒ€χέL¬ΦisΙ[vR©½l‘„Ϊψ–1`!Iš-Ηχ₯68"I5~“H§Λ–`λ€γ›g]ώψ,tλbYό»dŒΣH|OΦI$‹evΦΫ\|/Δ'›£€D’ζٌ ³;η΅€R €ή^εYY’—mΝι“osί]“-Ϋ€^χΣwߟμžζΎεΟί4{·l}‰§ω.·-šάξ;½mίΎ~sοΙΣdΫn²ζΏ»sŒ#WEί-ν£ΆΏύ·ύΛ’}½yζίnΗ9§—nύfέT’οFχK₯KΏε·¬™¬‰Ϋšί³Ω„dΙOϊWknΟ$ΥGξeοu½uΩχένήϋζ2·\’ψχ¬šŠyσ²Ώ<9ω27·K\_Ύά†%=$}"³‘φ}ΞϋίΙ*ιΪŞο&ΙwΥ³Ϋι™χ’oyή[tϋΆ΅/!–Υ:š/[«7ηboίΞ—eχΣάrŸ]¦R‡7ϋςεδ›Ήεί²»/ρ-*Ώ³”ΜE Γlk“χοστ;‹οeςΎωσW3'™ϊώvIvΚ}—ΘΩ_π­ν|‰χΫCΆW"ΟΞ%KΒaIΩUAΡ~έΦ_ΦεΫ₯yΛΥ_ϋά’]–DΌn_λ^zΗ;—o³δ{ΏtYν5'Ÿ½έΎμx’œηϋ;•HΞφνϋ>qσβ?KφrξέΞ·ο²z™—Ίm»~nΘρΞΫΎeχ·άφΫ&ΛΟύΝΓΏ»YΣέN'v©ΨΕΩ“oοsK_Ύ{©“m‰ύˆ,+Ύ_žΖ\’Ψηώε|ϋڭ͚<ψ²ΟπΟ½dͺ±ΫΡΝΧχΆ/Y2ζΏ½q“f"–Qo$½+ϊE%ρN2H€66Ήζߚ·%ίέN–χ·|ώΏΙ²΄l·Χ]o·5ΙKάb™ύ΅ν‹\œ±΄±_7ρ.7Ο]™LƒΡ4„}Ωm_m•δφχ]›»׏έύz[’%Kϋu_½τ.ήyWtšΫ²/}½T―7?όQˊ»ΜΩώ―gοKz›`{ηΌΈεϊμ\Ώ'ΧuΔΉŒdΡι_.φ\nχoΏKώησοΟ `{Mn»ξβŽΧζΆάΛ€μoώrχΫΔ…Ό\&Ω{KHε·fόρΛ=±I`ΛρK/uο‹N6qή’l·lη>o9η₯ν:³d?χοσΥ1·™lςνlύΫ.}!gσΏ‘KΘ’%.5σ–ύ}ΙΙ°9k"·e$OϋμK.Ή‹ΜζΧ­šJ—Uβ…κzAΎνϋ5Ωun•I"’œνnΏΣHώƒΙ—w²«Γο}|sΉs6—όžχ|=Ϊ4Cζώk˜$Ψ’~Ρχ~ ‹Šμό—![Ά]–{Ϋ_&^vΛΎ›·m»eRΛυ"ΆΤςΥ"νmξwYX²ΌΟνvΫσΕΗ—Ρ'Ώό>χ9ω6‹Ι·V“ύr—·ΏεώvζΎe‹Όλ~λbΙeΛχ7ν—δώ5oo—Ώ|qο]ξ[ς7’Μϋ€š’\ΞφmΫ³ννnΙe¬Ϋl<ω›―Ί ™ύ]-Λ­ΣΪ\ϋρύNή±δ.•ά}¬»ωυα/=›ΐj™wΩmhn²&ύΟϋ]«lΧuΫu·oy~'Ÿv IΫΏGzέfηK’]±IίΏ%Y>JSΫ\ξ$7ω–w_”Hrηλψ½x©τΛόχeY"a%\’`ٚ}wωϊo 4ψ²r–‘ΉKήιw{zΆ·ήf3‘f²»ΝbΫ|ω/MΏ qΉ«mξοžοLώ{l§??ωm_}ROΪΨdNŒΏtΎΉδ—fΎΌΏωΥmζΒ[I’Λχι½%wςΎ»ΌΛ“Λ“ξυϋN.ΌΪΜ&rοlυηV^]/ngύl›^ςΫύϟZ9Jρšψ«UΊt»τw–ψγΏΘ‰d"qωΆΜλύΆ^–&k5[²\vŸΎ4·Ω—ί; ή·Ν2ι’ήΟο?έRbΙηϋήή_c~ι“v+‡›mΙ{wΏw/Vy€΅qΉ82›‰Μ_s„LΌ»ίo­„ Έ]Ά³Ό²:ζzφ>:BΘv₯«“΄S•Κύ_O~v₯m§1`0’Ξ»fΨUΙ43₯[[Θ9Gsζ„ξ•9@kχžsD΅ιg(tT²srnχ‡η§ηOŸ>ο.n·ΫγγcζL›^ιΜd±ŠIo2΄›΄•dnΝi'B·»:΄±‰²IΫM€ λ41ΧtΣmΗ&) ™šX@²Σξ%=’ig‘NTš0ZiΞΪΪQ"یμφ4Ϋ&΄νv»3;‰tΫv†‘jS­θΩF§Χuέt’ιΦT‘™ΡΠh[šH›+›.©°Pνt+3ΪT:r΅K€₯BνΆΣ£Ω4&iouIUG-λSAj‹ ©tbR»νhUΣM*RU‰Dc’ml’Mš‘θ΄r%ΓΤV¦‰΅Ϋ0»Fš„€ΖΥ³†ΖF›^ ‰‰€™$rBνΥn;Β¬$’B»_’•ΧdM†\UΫ‘ Νib馫ioii!an₯ΙΖ*8΄³Ρ0²“l€iOWf΅66Α4»=m£©$cHKνnηΊ ±Ω6©‘Υ¨jwΦΨimΊgΚ.Φ2 ™ΡFU5‘š’W΄Σ&MІˆ0έ–稦$h-IKΫU]fΝΞ₯IDBκ &«llΚ0UΡR] !+Υ΄tΣΌži’J›lg"“τʈtZ.9™Ψ͘t·mΆM›XjΉzΦnw4Ί!“$¬I™iΔΥ/W―Σajš6£΄««3‘ͺr%₯I¦¬vF`Φiš³γB[€U‰:hA'«ν&FΦl$™$Ϋ΄C£nkcƒΔL7Ϋ›^T’IFu·Ϊέ3ΨiΝΤiΥh£sυΆ«άT“EKc…cMoηφxx8'½z³·ϋΓΉO¦ze₯˜•$‘•8ηαα~²―`‡“ϋΓ-3MΆ₯‘&&½’9χϋύαδθ’7}|x89ŒΡ±ΉΆ4νĜsΞΓΣγΗ?όπλoΏ}ϊτ ///οίΏΈ?©Π€I; ΤpΏϋmNvLφ~›ϋύ>‰†HW΄ˆR£Œd΄νκˆΕH’Ν¦i$Υ†fš3 k*2jo™Ρ6Li·IdvτTV©±EnšΆJ„Œ"΄RqΝnjNT›•6•J€R’;ΩHA[ΥIμ°!²i­”t«b£­0ZKT±J³’©¬J€6‘šv»›`*Fj“VHd¨Τ-VΪ΄iΊέ9•²©,™r%‘εB-•tfVe1M«³Ι‘4₯¦Σζjζš‰€έ366ΙL³&έ.ŽR• …†HTv3z©Žž‰`£ΊΆ-§μΞ­HK“έ€QQ©1AΈΊ‰$*"Ί&JCeC7T:“LςΪlfL΄£“IΫU"a²•.d΄Zœn«Ga΄,Hics5’Ρ΄»Ω$IΟ¦k“š5‘ͺhfUQP“^)±l j£«;Um:€Έ˜Vιͺ&brUΣ€)M©m[S¦Wš‘Ά.SΧiQ!Mb›6M««œΫRm* Ω •‹e„*ΞtF€΄ir’=θ΄ΣmΞ ».ιdL,WF*4Z"’D+’-Π’6ΦνΥi'’Θ†twΫ­T·4Σ Δ’Mʊ&€™$ΡέV'I΄ ’a"²‘ΖμŠuL€q™ΝL™v’΄­šF5“ΚΩ‘TΚ‘[ϋΩ&ӌκκ$š0©H₯₯…^iC&‘-ΝΖ†nZ͐˰τF₯ŠTS4"fζαώψΥσ›—ΗΫOΏ}Z3η$€vΫ½Ξxχςψζεωαα6Ψͺi’ΌΞV·νΈέΞΛΣΣ7/ωί_―fN&ΊΧ₯ϋςxϊ«§ηηΗΫm"Ρ°I‘lM;3ηιιιλ—§ϊω—OŸΦœΫ! »΅ΧΓ™w/o_ήΜύ$ΓΔΖΥueŽ­vΪ‰ζvΟσΛσŸνώόΛ?ώϊΧΏ}ώόΉ-`fžŸžώτΗ?όλΏόΛσΣΣνœΞDK©θ`¬vδααα«—η—§σσ―ŸΦΜ9I΄έλ}yΎ½}σττpŸΑΪqšͺTU4"!:ZUmΠ€ΪΆ‚#œΆQ¦νκΨ“θ$&ΩΆi;ΆQTΆ‰³“Nͺ²Uφδ¬T‘Ζ©E©Š“Fg»ζj¦F‹kν\ ‘0@š4¦[Σ†ΑDz΅TCM“΅–‘Θτ΅fͺ²D£ΡFέ­9)––¦©“νt&œ\šW³έ*l4’V-mšΘ•{§Υ­m£ΩŽ–1ƒ΄§•&εΦ@©l"Ψ΄I ›n[“T€fKdδd_†Fe3²Ϊ5Hi·:I+έ0CΣ^%Ρ„²‘m«i h€ ’šTۈ$ŠhΆA܌ΆvrͺŠ0m™”LT+š΄΄XkvμL΄Qe'’±¦6r…v¦±ΣΆΆ“5ŠμMΝBR %$6ik•™±ΆmМ΅–@FνUM₯ Ζa΄*™tΫVwJG’“MZseΔQΊ’amIμΦ6Σ™¦m·©ΪFΕΘΘΙvZ©4†#m*•ͺΆ'F«Ϋ*I€5\sDΊ4“˜jN^© $ΩΈ²6Y’v2‰ξΦ2Ι4Tjμ…€ Υ΄ΝΔθ(ΫΞV‚f—&κ­4Dm'Σ&κd"AšΆ*ΩF71Mw"©6m6Œ4*―Hͺ(—NLν:mFΗΆzMVSs Ν!n¬©€"©pΜγΓγ»―ίύπν/ΏσίωωK{[ΪB  ‘‘”(@%ThIh§Χ›ΗϋπώέΧ_έοχK1‰ˆ€Yν4χΫΫ7ΟϊπώΣοώΛηΟ»―ΝlQ   €P@  (@@€ Q”™μλλγΙ·oΏώώ»o^^ήάn3έNœ$D,[i;ζιιιύwίώγΧ_Ώύ~ΩήZU(  €B@ € €‚@ftχθΛσσ~xόφeNηΘ$›nΫλ5Ϋ€1άοηνWoώόoγχίΏ?ώψγη/_ΪjJ(€ P ™δριιγώχϊσΧφςΥ›σpΛLΌκκ΄tTgβš™‡Η§oΎyχα·_?ύφ—~ώήΆͺP@(„(( J„ˆ$\―χϋ|Ν7ίσξωιρL([A*νΘ¦ΡPv£’Q’Ρ΄iWk$“k―–’Ζ*HiΪ4MΥm³‚Q­ΝlθΖ šYš,‘œMΣΤθH;ΙnN9&Ί₯θ`’¦+c&“ΩE'A“«-§]0q΅lvΪCXe“[₯½΄iH‡&Σͺ„h֞ŠA³MSa4ι&VimL§š¦£Dc«!!™UYvΔvz!B&Σl§’I+ΥKŠHr.iΔ&FΙ,m­T“­)S'%F4RS$$&^γ\›QΣM’QΪJΟi“¦i5R`2H₯…Τ. £ΣΆέ¨©ˆΈΊ’TTW‡Π¨jØll›f”J₯Mš©κF“h²Ζ&K*SaƒeΪi"Λζ΅IηΘI·v₯Ν”LB’™hw;1IˆδJ›f+’:£Ϋj²šφT¨J Σ.!E*Mš`TΫ4“0¬TΒ0H'i£*΄²IBRŽ(Λ†L²l&i³);Ω+šfd’θuΪh‚¬lve‰)ν˜ “V¦ZU,Y詉₯š*5\qI™hlΉ68ٝHͺ±Π€'i“¦©!MZ‚‘’v:UDKιͺL‡@ΪiΫ4šlΗ4[Z¨–6]@Vw:ic£ΫliNšn1š΄τ•€l€IΣ%ΡΤ†”p*ME^›ν`$μ­‘Πhmf’ά:MΥ€†ι€½ζ’JnχσυΧ/όγΗϋνόΏψλΟ?τϋ§O―ϋͺΝ"-©hK©H)”ΖQ€Ѝ"@‘(iS‰Žͺ€P„$IθVJ@”%έ"Σ-L΄Θ΄dR&¨TcC©’ i4eP’άξ·η7ΟίΎ{χΓΗχ?~ψκ«·3%•FΪnέΉ™ŒΞΘνφςΥΣΗ>€ωΛ_φγOυŸΏΌΎΎΆ ji•±•FAˆ΅S‘RJ–!  "Ϊ΄Al΄)(i‘₯HC •M¨΄©΄•F"Z±RiZΤ@ "‘T!DͺQ QE("E„‚ͺ³2ΔRΔ9σπππΝΫ·Ύ{ρ‡ί~χώαε9·©½r›#5›vΊ―έ+½=>ΎύξΫ?υυvΛ_ώϊχŸ~ωωϊόy―ΆΥ ΄•‚”DTιPAͺ ©@6•*P‰ΆE$@ @&i)‘UMJ"€V KDC΄²!TTiˆ,%‹˜sξχΫΛ›7ί}σξ‡ίαύσΫηάg'ΣΡ³½’Ξ—+“žtd“υp»½ϋκΝ~ψαόŸωψαγώύΗίώωΫ—Ο_ΪZMD§Ym€iK'Q„6D*’‚ΆJJšRXBB¨¦…iAR 5 E› e[4D„œδααρνΧ_ύ7?|έχί=Ό<»e6“¦[RWΫζ5“Iξξ½Ώ}ϋξ?\ζ/ω?ύςσ§OŸ^w[m₯4²’J‘¦›*’*¨A"š2΄J*4ZΡD%‘i! ͺ‘h© RΠ¦4Q*#JI£$΄Šh¨,*e*4J"BΪΆ‚ΐM—MZ%‘£•fB“σψψπφέ»οxρΓχοή}sΏ?7Gκή¬jšpd:―σehw΅Ω‰“γΚiι²™=έnάΠΙTšλjŽLν«KyΉ₯‰΄Ρ$I’­φΤ΄ΨL£3k'DΈJ3†TUlgΪ‘ςκHoΎθ΅νto›-Y‘9·ψςΊ¦I*ušτšXΉšΚ%;s|±is΅ΣΆΓm¬lcΣ¦¬d“<€²“ΪΈμk2›΄²‰3Ρ\iΒt¦yέ֚Hv]9dεjΚ}·štjνr2 mkν=cšΙ$΅ϋ₯Ή’‘FŽNkΌžΩ!f&·ζj―©νΖ+“½u’^έ4s2f7Χ²4M€i'Χ$«ΝU“ž4r™‘΄[MgD³“έ}¨mV„lj¦Χ4ΣΉ²kΗlwm“Ι¦ΛQΥvε:VΣNEλͺ¦*KΟ©yu΅u%$’$6M"I@Νξ\ WL΅)―'G$KWdr.MEE₯†K6χ'―zΥSΩ—6;3“Έj―ΉG‡!ΫΨdΫV.ΩdŽΙngWl;ι-i5ιΖNΫk€™N&9ٝ=‡ΧτR›„Ω™i.:€Ρ4­ήμE›ˆΦgΩv$IbUΝγΘ‘9μ…διώhVε„nJ‘]χ|Ζ…Ιiηaˆ»6͌#`΄4-+ir€]ζKXM =σΠΜψΑ·ndθΩφωαΐDΎ­σΜ‘βnmΆGfιΨT ¬7„‘—‰Ρ± 6f’w›:ρv‚HP0Ξλ]ƒb·Ε¦†€j₯™{ξή=5%LΓΆ“£χ‹m³ρ £¬!8’eUwTΧ AΕfYΒa€§”ΕΈψr>ξαλεr™Α\Ή.γ™Πό―{sw„™΅‘Ήξ4Ϋάέ―χη?χοΏ~ώύοώχίϊλ}Ώn»°λH[RB±²ΘξΘΠ#M$eΨξL ΘΉ;™eΰjJ”‹tQG"O91 f±ΫŠF°€Žξ•…μδ8ΩΆdΉΕ6Α§€bΧ™UΕΛFJc3ƒΎMI4Kƒe-Ur½’˜§˜B;ŸηΫ·οΏ|ρΫ―Ώώώλ―Ώ~~σƒ.¦::ςϊ&w]χΏόγΟύρןύσΟΟξύg―)Δ=,z„›U‘ Ł•°²=Η}%›χάY,(ΐ‰ΔAܝ šΐt΅0ά+Ίžγ@‘@xZNŒ$΄K1Ÿη|όψνχίΧοΏώΛ―Ο·o€έ#_ƒΓrw—ΞΫήχޟ_όόϋίρ―ρΧ_?ίχξέl1Ί.…Ψ¨3 /² g[,*8ΞW„Y΄’FΦak0Υ\XU‰ς;°L&½!xPΈE‰0ΌΗ½NœIJΏR―++!j;°MΠ€²°‡g>ί>ί~όςΛοΏφΫοΏ~ϋώΑΎ‰τΖΧΈΟω¬gbCzΏώωωσ_ώυ―ρ―?ϊϋλg{½Λeνq‰2Ζλ>k€³Pw>θΈ° οJ»‘0πFRHc4{›ΥPVbg΅²œ@…fWά “”p±¬ Ε\’&œ8‰εVKβΐ­83?ΞηϋoΏώφϋ―Ώφ돿|žη™ύπqΩΪ²όbΟW§₯――ώώϋηŸώϋύχύωΧΫOͺΈn…Π!t!8]rA)·œn2Šs-™˜hh ΉwΫι¨Pλ¨εF!δrPη&αΐa–nχ4`+1½>§±₯‹‘^ζπJJΆθŠμ¬q$Έh4» ! NŸΊρj3ΰ‰ΨP†!‹™ησνϋ/?~ωίώγώώΫ·οq¦ο½nƒHƒμΉ³“u—λΩσ9ΎY>ήΨu”m7žιΖ€Έq©™}χ™Δ½€3πĝb-Ωth˜‰Ξ½SɞegΟ¬..ξ½Š3 KxbŒΟκχΆδƒo-ΰqέsΰ-·¬ƒΓΣμέK–·Ψ΍O 45 K§yšΞ—w» φ|;κn wΪ:—=MM»΄ΫtœqήαgqΪ‰}άζέν.πι³³y!ΕQe}k“nΟΞ™³Μέό0ΑΫΣ/χψ"ΧΟYεg¬α"τψ8Š{'―δpΜπ3\6m»Χ¦…2ώ‘―93ž< α8χ.† +;œγω§ έΓZ·igΞ5m‚Δf;ΫevΎ9Žoάfΰψυ:#[[ŠφΖ·άΈ²ZΈ_ŸsήPGηΠΰ;K;WΫ¦u‚Αa†ϋ>²–ΔάΗb7[θϊˆ(ΕCžεΙ¦Ό·­Dσν:>cF{χ‘wβ8θ²[47vw^ζ,a)έw°sv4όιk‹C‡ζ8g‚Ώρi§%;›±Ή·h†3gυkJ?oNL14Φστnχ-†ΞΞKΧAu$w½ΤJ5ΧΟ˜7‘yπΓόέ₯a‚ϊϊ6ϋs&?³3{ρgβλ$―,ͺ–»8―χ€;0²ςζgeZΪ"Ω.tdθΒ—ή3σ\„eΣρ0,Ϋ;Γ²²Η³ΜΟΎ†o^k£Π{φ,f6u.ĝ§σŒWή4Ž›»žα~-J$œόY[Ψ=Ν½;ž™#Mξ΄μ,nΈΝΰΰHsίYο Y]M /–E­β&ŠπΤΜς„ΪάΪ·Μθ’~g˜σ?~ΕσŸΥ@G’ΨiυΉ‹l—χ²χφώόzΏήŠ6S*c³Ϋ€…&Λ γΩε©‘•KΩΒ$R³w g $ΑK.‚b”Έ;Β‰q" –ΆXΨ9Œ@E*γΊσ%‹Λ>’˜²₯Κ°γ..3ιξ‡Ψ]fΟΞΕXΝΡ―:gd (—ΓqŽ}žσ<ηΜ9ηαΨ 3=Οξ;χ«]›i`ξώΉ_χύηήχvχέϋZd‚ΥΈNοΡ‹E3s½Aπΐa ζŽqέ-β|άͺΤ’]8‘³3’_ΛγTΝι€ϋleθ.‚c#—$±h›Σδ›1#²GΘ*$f˜’Κ /†Š# ·οvεZ°Ξ²JMw & „ρb5‹'3IeŸŒ IΠ™ωψωvΎ>Ο<Œ΅*‡€ν|ζf[owοWοΧΧΫmΫ[lθˆ,²θ2δ”νœY·b₯ΩƒŸw_fU7“sΖ*VEA²ͺTŠ™ΞτzΏκœ¦c³πΆγžθN‘€ξΞY&:μzΫΕ9šμά€@0@ξ9M3Ν<σωφύω|;ΫΞdc!θα|iΌkxΰξξώΌχηΟ―Ÿο^‹wZ/λΨ  0άξ7HcηeŽH#eQ8ŒΝΆ’¬Τ$3ΒΟ6ΣdKΑ€ΨΪ†‘rπbA N^‘•0“žD ’…Še:Ÿσρσνωζθ΅ΘΠ©†u6γ}Ž7’0>ΣΰžK…2$gΛΉk—ey>³΅„άXhηžΩqΐw5sI6Ά›ύv+C Jέ3^’ŒΩΝ³Ϋl3’[pd`ל¨‚y8EνZ’ˆ¬Φ Ž8ά>•\Λ.dΞά―SθbL‰0,nΥΙ “l5xκ0Θkm\§uΒ„Z@…±©νΌ'K_°=ˆ’°Λ:6U\Η iN؝3Νήrm§ΟmγΞэ†9Ηtɍ$p(ͺΡ¦5pΰ<+μλ}«ιΜΞ}ηΒmeΟΐ‰―ΩΑvΟΉ Œ{&ρf΄‡Ž2\7 sε ŒηυN·­fτxw°…€avζΞ8ψ.1ƒe‡έV;:M{–λqWBiΉΌ‡!7tGΪΉ‰s\ͺ‘2LΫ  `9»»–!7Ρτ΄œmμΚ²Ω…“dh{φWaΒHιΡ‹Τ€ζ 6­. =!' hia‹‹Χγγι_Ÿέ“ˆ&l΄‚ŠŒΪ8ΐŒŸ3ϋγ‡:Dlƒ'/- ΪΤβ¦M»Ϊ 쌻<1tmνg5b`g_™ΕVΪ!˜„σ$α-ΩΠ 9εΒμβbΕΒUguMΉHœη²σZB kn 26 s·YBve™ΩΈΜθkρkφΠٌu˜qΎκ9 €»μ¬«ƒ³Lxhf1Sq"–HBEIb&ŸΏSΠp]IμJλ€ μ™ΠaGηΜλ01*Ό$ .³Όv[ρ<.@fk•άΚ: ΌΛΑ€ cΩυL'έg[Ο,Γμ ·3TœΘ»lh5g€Θ†XŠ˜ˆE’΅½ηΐ!Eα'ίvΊΆt…εΉvmqr6Xlg„A›ΕθQ„k ΉOLC.RΣ6ΰuxΠΈ^xVc‡­jQg|ΖζpΎΎ +‰-k ζ°Θ&‹„—σ9ΝV\Ι=qΆΛ0z°3²BΈ±.α"`ΞΝ)½!]Ǝ«έΧύjΞihšpiΫC#AŒk•Bςζδ,cc«eνpyΕ0F—Δ¬Σ\Ή„žqΞDƒμƒtΑ€ΐB;ψχ@Ξ2 –β‰]GŽ 1ebމμΒΤ  ‚Ι Π£»°«9ΒPœΐJ"΅Uς445`Ssc\¦FbκΡ†eUΑgΘ0»q°”4CΊJ‰£ς€() A™λ"ΑHΜ]‘@Ϊ~}ΌΎIp˜η (ŽΖƒβΌƒΪ΅Μ4‘,@ζΙKw„YV0Ρ΅:@²σ³―|ΝΌφ`π0€A’Γ*όXΪΰͺγCpͺ}Wi‚α€ ”t.&7 €€L ««1ˆH•'~tI\%C3β£8­†ε rR{‚Kλ M€1!)h«TΕqNyέƒƒ„Ε'+#C½6¦¦φΑεΧC€ “‰‚.«#ΓbcΟΚN„€YΆ!2Ji@ όβ’Φe';E’NC ᆧš t8 1€–ΐϋϊ Εu\?NʎT¦«(J v&ŠHρ>μΤ}q;ΦM/8ή£Υ§PΞ9>ογŸ>Οƒ ‰rσΜ…&ΐJϊ„ΥΖΕ‹πIΡ!ΣΑςβŠC2Ό5Q”φ!τh7ΪΏŽ…Dqοξ˜©ΰζ`χρ¦μ¨d9ŽθδΆΙΗΥvŒƒuΊMΊ"!ML1€B8 ³@Xδ qh9 ΰδdΒ!kB‚š‹ΤSφςΡΏ@ΰ‡ίΞfψ~ωΓΤ…Ζj ζ5ΨςωwU ξFFρΆ§μK{€φ~ΎqΎ‹»;ϋtί7Ώ;OΉ_’ŸΚύξzπRΡΰΎόNώhΊΑoζt-:€lνΎχ;~}ί‹ ΏύώWΟέ½οβύϋ{ΚD’οΗίρό\KΧό3`Λ]‡ŸFΪΦέqqρΦΓvIrT^OΫ{Σ…|U-fπ}xάOα‘pγϊ[υ-~†ύ~ώο[Ά2v"σΦ_άηγπ†^ΏΊοlPέ›ωQƒ΅ΪŸΞ+όS'°δ:όtίνώCΫ·χqύΞ4_DMηάιΕq½ώގθ¨Ϊω{η$@e·mϊΑ=„Φο6Πξ… Έ?]ίq% ͊ΑΏΚΆ†φώpߐ=7K>ŸώΞnzΌ±Σzή>? ή5λΙ―ήΉϊ>ψ£Ό‘ΟΈ…˜²Œ}ά{ήο»{’žΏλΗχ}»£}·/;€ό']{ŸŸ›ϊΩ™β"ψcwΜۊ+ψβθχ/ϋϊ# Ύ&Μν½GγΰΈΕΒέαάσwLΧ]ωΗ/AηLΏωΈqoκ½ΎD‰<σ‘ηυ?οΥUϊ+ώkε1¬ΣΫp”ΗΑα:οŸχΧσΉ]ΏφΉ ―.ζ|btq~oν-©θ#?~°ΥΎτφΈLmŠ~Υ―ΒOοSλw9τΗl+όϋRΠY΅}ΟTκ{<pΫwτέίÞπ=Όφe½βφoόw·.εω~ϊήwυwυΡgC½Ν=i·C†ήώˆͺο½ ΎοιGξ\ςόήώϋϋSΏo‡r:§Ηχψ'oŸΆ©χό8_\œžv½ίU\[ηΡίξ8‚€ψ2žς~{9’οxl\πQ4a?τYWΌγΏs*ή έyδ‘"λ“;άηυK£~ΐ€»Ώ06Ό:ρΧώX’|% θO­}wjσšοpQθΆ&ŸΡηωϊοΩ¬θ€Ψ½n„1ϋυΧΌ)ϊΑ=όε?p.&½;EΜά?φςρU!θzœY|·!ϋ˜ƒ΅ŸOοφΎΰ»ΟΠυKNΎSzωAτί]ω“ςΥύτoό| ΑΚl`‹ώΡξήοc]λ”σ{ΏΏλusέ»‹ύkrsŽΕΙύό"Ϊϋόζz~MόŸγ›‡z[έU\Μ~΅ϋξ+ˆ;ψΞkΚάσ-υ‹j‘ΐίογ~ Γ7?κΎΑoθοωΝο2‚θόΦ_~π“ΏύνWΧ@α}wΝυϋΤ׎ύFpz WŸξοφύ“°φνΧ vΎώώnΌίζΤβ#φ=πν€>γX<Ψ;gΐό€›Σnτ‹Γ?άέ@Αςkά~άq…(mgνφ‡xhιφά(ωXWΧΏ_ο~AB­δοΎ}pπjW?ωέον΄«ΏΊ?:zκ“·Ι82„ΉΜ0Ίxίίϋοοϋ^§διωίυο!ύξΘοv9™€Βύψΰh¦χγνχg'’Ζ;ώΆ 9 «γ‹«ί΄―ο8ΰΰϋ:χφF+ωγvΜΰŽβτ?&ύ―ρρ³υ6rί€ήeβ‘JΟO.<ρ€ΛευŸγ]ΤίιxΖg,½χob‡·9Ή-ΒOΎΪχη}Ν“<τvqkξ*™:wqu~οαdΗ}ΰ°΅}ŸμyΚzt‡Δ΅sΤM0ΓΗΏ/`οωΏ_/ΟnώΝΆWˆ& κφα'Ž·yΧ£ hμης&{φ–+Cž?νιΠEωί―Ηδ±}ψ5ΏΧμ±)K0rν½ΜώΕΫM&Κd–ŸΗΠqπ_rH¬&Hβœ[~ψξϋ}νƒ q§m<ήwkΊiςΕ%ςsβγΞϋΘo0eν}ύΪχ·CE’cΒ1―υyς’Q―°( Ρ=.EΉ²3Ύ“Η1~oO«ΊςΫ£‘Έƒ£«ώŽυν>xρΖΈξγk텐„€ήΌ±Ϊί}Ϋw3wAύŠ7U„ΖMΏμΖ½ώ€Βτςς{_|ΟofΘr:oχη—ίλχzΛͺ;λ?‘ϊσ7ώSΥ ά~a‰n:ψM½£bς“ωάϋΆ/ΉήΣ‡–ηΉœΎ}φ¦ω‡§Ζz‚ FeNo|qΡ%9˜©Σ)ŠΏοίοŸή@“ΟΛ›MΤοΠΉaG_z|μ؏βΎ§ψ”~υNnχΩYl@u•€»yλt²ά½ ΙI8 @@mu»+sŒδφφ~γβ*Žp::’Γ»υω±γ™ΛΎϋŽσ…‚ ‡ϋ7υvWœ―?3ΘZMΫD9½²δγ+ ƒσΞώ~O)€½χZϋβ†―ŸͺσϊΑ`ξ?ώLΨΨ“·x|:7yϊFάχ9yγ±ωzϋcwγΌαsͺK"όφt6˜/ξοέΦd€<Γ‹vLš'ίq‡₯0SBo―εŒwχϋϋήχ &Yž΄‘XοΓ1€ϋRΒƒcβγοŸqh>|¬ϋοπΌo'ΚξhυсhήΏ}fC(ο~t’ €δ”©\€’#Ύ{>š9ή6εΎ"=ΈΰβŽτΰZίΎΫ±zCο8–/²€$δs―ωΎŽk—ή֌ςx’’‚ή<τZ7ου‘'wžέ>ψΫnώG‚C‚φτωηw~Θ―ίξ‘UΧ‹lߍŸ’ΌΩ3+Ω~“'oΐ}qω{<•η{ίvΩ½΄±§½ $½·χ ηΒ/ώxˆm)ΐ`–G,'_άAΪM[ώξ~ίχώτ ™Εaaυ}δ« €EŸƒΝβϋ#ΎGͺ>fΏλ—}»³ηjŠTW©megqπξ%ΠΊ‘v*Œ‚D j3’ͺεόυ1Ο©Υ]pΎk`.=꺏 9vίξφ5|°Wί_Λ—B–r“›7δήίwξkήlY/œΉ%$)εΧφ=ώΙ„εχ.ζ·)#DΒΤΝvŸ&Ύ~άͺξV?z@Θώ䧊ώΈύΚέ6ν§[WwN~cLΧφω^Θδ©“'@Ϊ¦oŸ<ημΤ°'‚2Β£Œ)γ/ΎBΐ©›nXέίοn ΑYάΖo»Cœzά‡DΡαρ~\~gήΠ=οϊάξ–*»Ο™TA[ϋ[G ³κέI "Iι9GŠπυ=3Ζh|ΎύχG?‡^|ΤWgpψ΅>Ώή1˜Ωυ}ά–„ξŸ»#>Χmη’ˆ•ΐ†Žΰ6Β’{|ΏΚΓγnπχ8Ωpόσ;?sΌυŒλψξΕγ0φŸG&lμΑ[>2TήπΙpδknν؝~ο{ϊpΒ€hžΣ·ΜEςsΚN™IOfωω1MΞΎΈƒšό_η~'C”H‚‚ω²Vώ (‚px!„8έ^αφ~ψεά†Δ‰btά ^ xž‹ά)LxΗΥ]Qbz*l8.€ΓZ*ΑUέ-¦ ΌΠPΈ"ˆδgτχΙΚτςTqρ‘'Ht]t€β#ψŸ€·ν>vΪΕ h-;τˆƒΡΌD¬ΓXœ–όΉΧόˆ».Λ†τ4Ζ A[©!ΐ%γ₯ί Ko 1 1Φ_’#Ο. uΤP9₯‚mF²—_ΰa‡/@€&ϊήy†άλn‚†X£ƒ“Ά|ς 8OUυͺ.―ƒΌI1ΰηΐ³8vΰ;_$pU†ΕΨ)0ΨaOŒ*Ύs'_χE=@°†p–¬βΒ—l}unχ>j’~Ξƒ*»ά'gzr‡©Nδ«β`€A =nο.θŽΙr7Gr\"§αΙ‰ή "p,²KΉwrGw”ΔDθIŽ_tzCεr%Ύo!5ΟH S!^Δ—‰1ρ?;­ƒΛŽ'©FDbσ±"Jύ `K€Λ j^!MUΥ>ωbχŒ ΝΓ”UxpΨν‰γМ/ŠUςέdΙΌ ϊ:zι`e,΄ΝͺKI±auΤιΙ`ίΰ=>ΈΠπ:ψΰOŽ0Έ£Γf>/ͺ+β'y 7ΣΊ‹’mΑξΌ!Y$'Pάχ3*BJΎoͺK?κŽ‚a 1Jιυ φD Ή­»w01ωFŽo4:F° :L‰―<”‰R$0ΥUΎΛkqgΫρ„_άΧ]νHԝB―ττ P]vkJrGWΧ`¨1¨β}Ÿqƒ)\wόύΙ«5Ωϊ\Dpΐa’EG‘ŒΦΏψ 2όœ¦\αˆs¨z nD ’ˆΧeHƒ²ΏωšΡυa <Βξ=σqBrŸ+μΌ± Ϊι …°Pqw€ ŸŒ€Θξc¨FR κEθΘβ@S4‹Œ}5€ Α‘o·όƒ.όήΊ“šaΒκΰΓΓΧ{vγˆCυα‹©Abt 4Θ|ηrgρ‘ζ>ˆΥQ†Δπƒ~ͺN¦Π"}μβΓ³Οήψ aΤΰ'Ÿχa|“όg±Ž6€ AΘκΎI-Έ₯̐`€Iαόέλ) ΄ί}ϋ½Έs'ωNμΊƒ|)Ee'„( !ΪώœiPά±Αu+ ΖPD½uθε‹}πΈS·„ώŒwe·2»z§bΓ“«HƒΐΠ48ςIξ+χ38 π$ `X]ρ‘LOξ›ΰ; Έ €…ΕZƒ€χ’$I’‹œ}ώΩ½.WΌˆp-νn–)€58ӊμΰ, ΛHΓE? /—‰Ύq}'`–π τܘLΏ4Š‚$·gŸ„ ΚA”ΛϋŠ.  ° R { ίδ'–Γ‘δ‡'x΅UŸŒg˜ƒiςΎ_œΧΥ?τ„8Έž`„LόU:FNΔMζ—Δ¨±ΎKjΧΒ8Tx‚Q’T-Υ³Μ )HNho㎀읿'EXΗ!η‘šWΖ”;ΎΫγΘ‹)ΞθΛ 4αβ?p†`«ƒPT0dάΙ‰pβB@©ρ»γ€dΠΙΑsyͺ¬ κΫ»$@ζ|£ΎŽΉœAΐ‰•o—žˆη|ήW#ΝςΠ¬΅Β4΅Žd SΎL lάϋΎ‰‹Kδ λ˜Y†zHͺpD“›|a @“w_ή-*Ω•£ uιihπJ,L%«βF08=]ΰaD±C!jπΊ3]–†fa Ν8ρ„;Όu0α“3έ_^ζ4€+MA ΅DπŒ#MdFgΏϋ{Oϋ]|Η܎Ώ½k]JΊŠI@φα_ί:*ΊΓ€χΰφ-`΄ΎEL&<€nΙUP'²ίχM–—ΖQό•ͺ‚2 Œ†§r@€i3GW†T‚lροτοcτΙ§>Ζu Φ†ίΓης]Ωχ娀ο#|Ε‚Έt[k]θΧα›aJ „Ϋκ)z °#ΉΝzέδΪ1¬,œ`›{ŽψžΕͺή]œ/yˆcƒbœž’μ>1ω#J‚ΰ‰uγ Έ ΜγTŽŠFζΜ=’θ ―χϊIμOΈΙφ²Ϋ$T rk1"νϋΫWEgU‹ο=„*ρN$bΗ;V†!Φ:I”Of(\Ε`ri‰’˜'ΙΒ¨‰Υ}AFЍ»)$‘C²χލ¨“ύ}Η‡ΞbŸλΟ”ΪΘέχά|ηΔ―†pΗy/X•’A-λ”“±žXέπ KLͺ^4.iα8ίωmuσ`ίζ5hɁTοοpp j’ήΔ Ίϋ{1’Σ + λ.ΞxP X·ξγΑ λ½ΕˆόδKž£W?§cίιύŸq“Η»6KT‹v= 9…[·=Ljƒφ3*ˆ;8‹€ΈNb†‚„`OρΰΨΘ<5 Gžΰ‡ΐƒΑ‡pT»/GΤюa)M&$@›όΦ<χ1ϊίyQ&]#»‹Šˆ{wγ/_xέEQ‡p+’4δΥλqxχ[ΐςƒδ^ΠήJVRτNπγ―Wƒ§."Hγύ}»?M%xΝπsfΐ;d6W¬β†HΦ΄y|‚«€Eιƒ γθ ΈΧ…\ΚQkρS f0:ωωχ’χ™?™Ντ9zΏ(Kΐˆ\ΤU ŒύπuK*ΊΛ ’ν (κΠΟΰYpq›^λ”/Ωο»°°β%βe’‰¨Γ 8c„ι '@GΖΩ „fAqEFŒΧχωŽŸ|θ#n&« ›οΚξ›ΔΎOτ :Έ«υZΦy­+’ςP{ˆ$7‘֟0―`γΨ^ΩίΊ'WΗ°²‘’mμ݈ož νyqšξ:F _^ΆοC±`ΑδŸ‚eΒ!1ͺΠ/ώΛE4ψœςήg~ŸΗ{ΒeoϋzΧmžAιŽW#AΎϋυΞ€m*°ΪΊΈ“³"؏†…u@W ΄“4άξB8ή@< βTŽ@Bς'“Πι­Ό#†ΐ”„Μƒλ=εγϋωΗN>½ΡμŒ':ZΔτΙHψνΐhΏξŠ:ΑpoΉΉoα›I°ξ£IB(7ΩϋΣΒ΄roηϋΌΧνΕ=v{^aΕN ΨΨ³{^t¬ΦŒϋnrFι@» ’ψ £$‚„€΅J"$ρ/„*βϊθfR· ¦ΐ§φ•1”ΏΞλοB/šN+ΚtxΗέ©T’ˆγ^IIP|Υ˜p`U9ΎCTΤA%S‚ιπŒˆ"Ϊμ.αx﬍A„νΗ|σ£iζί}L:Tχ%Ρ A#MͺοΊ<ΌΟ‘A¦θό.2—M;Ρ<@ψΎύΆλ)θ;ywX"Rσ$ < ΰ[‚„Ύ$t('ΘΫ0‰(VnΨ P($ |ρ"Β)X*W6Apz^ϋ/"ƒAR`upέήq©$@Qšc’…οPΛ1D-‡―%AP'€–Τ ρλw Š8‹ζϋFbP¦ ©ΊyTq`-ہQσ' `§ AP8³ΩaηΰL€}—돆ˆzQIΖ%A³>CްΌΠΌN@BΈ^ϊξ ˜–ΒΙ^Š ΔΓcΐb…&";)C, Θ§ΐj  Pň’’δQ$VA‚Tν \Qt—’™u«apΚ·I„Iήn˜{z$ΐΠ4©θ™ώα<.΅ZƒS)λFR 0QΓ’Υšψρκ£ K:(‹?=ψͺ¬|³“,ήΎ’0ΨΖ1ΧΑ‘y@ΡQΘω˜XšΖ>νάΩκ_ά * Ž4/9 YΤ]Όξ»aL”8Λ{ηvr‚Ex0­ €`πΘ󐠰€aq₯Ε I€ˆEλlίxW‚@IlPΨΑπ¨ή>KρΌ#(‹Α0:―Œˆπ1ΔΓvL> (/]T†Cεw§R§ίB–Ρ‘p€Q{ΙΧw ΕͺΈκ¦‚Π_N££ Bƒ\^Β¬€·ύΰ0Νψ˜­Γ“‹ζqΠ#@Ή:ηά IΩqν³»QxŸC£™’DAWΩ„ΙχY’Ώλ4Ϋ:Y"-T‰πΑρΕ#TΧ89^"G@±δ’MΘδˆ` ­:•’ΒΑΥ(ϋ,Α9Α[¬…‰πσκ£ΘΰA‘ρΙvMD‚Π©°ϊRΈγt ΰΓ‡TF€"}’QQCύψI²APG0<οcUAE\ψωζQXF#/)pT)\'"‰#ΘΚΞwΗΎoΎ πT†fs˜0κŠγ#-ް&œ pΦν/zWJ:ŒΚπ€ΈP(ΰΔ‘0,h^~βρ*R(β€e%Α5K ’"A€’€—Pδ·³΄4Iκ^Š(ί’“°ΛΓή…)°0V΅‹{ z‘­9DŒQHXHθ€;5dτ{½‡'o€ z’d+ΧΩ¨ZεΌΏmCOƒ̏oGύΔ(J7Πγƒ{Ή…;ΰδΉ@<ء볫‘χƒŠxά―χΟoΜΒξΧŠv*Ό;b ϊ࣊RΕυ-„γ.Ή9°Δ²Η@₯1$DuοO㱘Κw«›0ŽLήνΛ²DΈχ7@w]]Όρ5]>ΦρŸ‡~χ0ΑόπξύΕbgzbόές.. ΜΆ”>Ί’οu_YΙιUλδOΗC,Bv@Ύ $/±|Ϋ7:Π<¬68+εΓ€αϊbΑϊj¨%p?λkA}G½šx‹ΐ<Εr—Ζ‹‡wœlb¨άqτ½b“TΊŠƒγΌώο οhΣγό&σψƒoށu…ΊΘ‘ΟΛZΑ'ΐX’ZΦ·Θq§ZzrfΔ%θωκ·;ΉΠηž;E' ~Οϋ^° ό)ίES>ΩΑΖ’ξFΧŒxοϋcΩy—ΨwσpŸψ»Νώf|πη₯Ρ΅ ΉΙ°ΰπ+2”;²LD‰e£xZeHΒ€ς: ͺΒηN5("θ; δς`v,όΧWΐ‡ΖKO8OϋϋoΤ½˜Œd/ΈqΉζαyQΖe~±†χyη{Y‘ˆΠμB£οA%;Ω 8φ•ύμw*΅ΪΌί„Σ·ŽτɎΚφyvΣ}πGμδΰ«Σ—εŸ·;Ων€ψͺ,–2τυ}? œ½{΅οnJpSςϋ[„;ψŽϊF~@ ί')iχmϋ­cηΫχwοww©ά Qܝι{έ’QΜχ}“kGtΒ1[ΐBψΪ‡M»ˆ Uέ¨;ώδρ4‰‘s₯ν"νL|γŠΑ‰p£θS₯1ΎJbΐΈυΡώΕ'&zP§HΆΛυ ²Υ‹2(Ήσ¬YNΉξΪeͺψΡ X$rίΓ-άJWwlΧΜKΈαύ jwπεσڝ!]`l‡zΑΌΞύ•ϋδF™Βψΐ}§Ν€#±ΨξΕI@\ηwόϋw˜>7¦rχW™·ΰΛΤ”ϋ^€»:lƒΟέAWό{|υΉόcyκΉnŠσPίπ=χΡyb>/‰L‚Ω–ΖΥΧθ^w€κUC _O€,γ²,%―ρ½}@¦yX1vxX!'»{]οΑτEz'}μΖϊŽFΡ‚ο Τ“c‡Λ?<ρ‹Ή€l"@©ΐ}έ M¦ΰΑΙ}}—qΐν%zx,|ήΫ3ξcίΥ)­}ήu΅Wtτ₯₯Θ•υνrz;-ΞΎZ_hάω·ώλDoάχ|#Μ»aΕχ€‡WϋŽο_ΐ―ŸM,δΝ.…Fυ#ΏΗfŸ'"ί=Lξφέχf/'SτcyaμΪΕυΙ`8Έ»όλδΎ’HΡ£–Α<ώ,,›GXB^B£r€(:¨§Jb@8α_Ώί„Nώ†* ‡Ϊ€pύχl$½ΘξώΆε©”`ž,€χ©Ύ‚QX‘ςϋϊώ ₯;ΰμί·ƒΓ“}½λgWqάA[ειa΅ϋήίdΘέ ΛΊ–šαϋ?όφwGρ©κgΩ `w}_Π$„cΫ_}`W0΄ο›>«R>],ΉOΡh>…f3`ζKαx‚Π5Gέgƒ!Μ^}&(a0£Ύyd•χ$`ςŽSD΄ώuΒq'³Œ‹Α2€t€Ηέ-‘OBΤ] ΒκΝ`»―})RͺυΣ€΅}ήC³ΐΏ!ŸhsS‡!†;SΧuA–)B;ώω( ΩF˜tPvΏ«J$‡»e_ηD*σ8/I8ϋμΠ„aΥϋλΟύΏ»;;<Ύ7Α~uη[£O» ^P'*Šτ’€ϋ’½χ΅“ŽͺΆψeΙ΄ξΖ‘ξj½τCa™ΰΑδα£9UvoϋΨ%Τ•G6σ’+‚Rqτλ$¨Πux‚π0)²/Ύ\ή@_d:!)κ( ”}z§³ Ώ(wΞTΈŒ2T>Ε‡ΐΖ°tηΗ°/>Ρƒ-ΫοξΞˆεΖυΣΙ.€“0…ΎρΌ.‘κΤ/ί?ž:`Y€€ωqΠʝΓ4³G―»‰a »ϋ:N‚Κξ+ύ+ήφΪ§;‰bGgΙ£n§œέUπβ‹;ΈKŠvkŸέE,ΫΡNΰ΅΅8Ÿ Bχύvwƒ­ΐsλDΌΘτI·ΕθΨF·<’:‘ίͺ©_ώ£„@τS„aΆΓϊžρΑpιŸ€$› ¨ͺελΘOώ Ύρ€@q‚θν„g œ¨Η@†Εo‚Ÿ““s όΰΟvΑ:1ώ:ˆΒrp·€ΐ*ΰψ2(7ωs2tπG AT3ξ"\s6ƒΎΈ}‡]px‡|6 n­-σT9Ό|•ξ‹dνΙ|οξ¦PTΔ§žžcCΊλ`΄]M8FUe·'έ‘ωέΏ{‰‡Αͺϋ2ωΘF0αΛςΗD±+Vφ»^ΐεβ±Β€„!|λ›qΡσPω~QΞ0ί₯δIΠ ΕΟΰθƒΟ—vj=ΒƒONUϋχ—$ƒΝγG;&€@„42LxBG όβο&­Šη5+Cςγ°hΖΈ6’ωŠ/έ ζ>8‹μ$|Πή^S<υ8Ž}AG'œoλήߝΌΊ8哃μ±&}β·Χ׎€νχuv%Cχ}OΏΞ­β§Œqx<Fη47ΘdΉγ^'°ϊ6Ρΐ0g‘pΠqΑΣ£@ϊζΝψ¨NωyžHˆP΅<―Ω’πιπE©“ου5Έΐ!1“οό±"(°8*²ΫϊAχ”[‡I 2Ή£ ’fψΰΰ#΄έ:xž@ΥΚτ0A˜Y³4 ,Ζ~ΎΕB<4 ²¬Έ.ˆ‹t$ƒ8’ΦM₯‚’„/|&± —άξΰ,.LΛυυΕ$ K R8_Ύ8ξ O8F0oΌ¨°Žh(E`οσηρ€†Αζ€ηύP’αδK’™P§«ƒa°{ψ]ΐyΌΆΑΙ '€jΥα:ϊ‹›7  θLXΦΡ’σ\W]’¬ΚΕn»uHL₯ †ˆŸ—ΑΨǐ¨@χι^Ψbυ=0μ3€ r2ή5?ƒΒΈ—―HΉt,“X<ΡόH ˆδ2墎$#Mo88ΌεΑ  3h`Hˆ—ΈuMπ©(Υ*,ήξ}γγ·οFƒττ’\@ΈΈ Ίuι32O·jβΗׁ€x¬έL 6Ώ‡e£ΠΞκΦΨϊOξ»σ£‘ˆ(”@EΉΖ©hΛ{,>2Φω<ζαΰ#°ύuσl, ―K#αY@¨€—c³ΌApHˆΖΛ%—AϋεƒœLκw¦KΔeA'7ΝlΕ/₯Β'ٝξ™ %Bδο;”»r(₯Y­ό€"iελ‰ή! 6ŸςντλβΡτDΞlDΑ@ψ4œε@9“θPέΪ&¦AJky\Ϊ’p84Ψ,”#ρΝR^±ΟΠΑ€£ίG"‚/€X€] θI΄wbŒυƒΙΪžwE–ŒΔuλp‘TNόϊϊώ:βœτάΟΓ±{έ-‚ήΥεœš$X?ξ9IκHZ7… ί ΰ—='³¬ŠŸ­{ –ϋϊ Q&, yŒγΈά;¦9†₯±‡¦ΫŽξR/l*A°ΉnΎΣΟοˆ–£_ŠM„β\˜ίŒΊ5δΰˆLεFkΝw?H988T«‚‰¦-˜χfX”w­Ϋ(ξvΗ_ΜύΌhƒS8 (€h™ΪμqTΞj&žH£bαΰŒnϋ=Χe‘UME< ǎ΄°„9ZŽn;Ϊ7(Ž«.ƒlζ.@λpοXA9―ΓbQΠqŽE2Šπ™Β‡Hω(1,ρΊΆcX&B€¦πΈ+Q4$5D xnaH0)j‡ήe,”*ή|ΒΟ―ο γDNΧ ™aƒWΙ(*™d>Ύ‚‡₯/ήQηύμ€W€,Θ –Ζ© %οt//‚˜K/„ ’δ5'ΖG@έ{Η=‘€ˆZ”—ΰΟ’ήΉX[C†¦qΤ€&Obώό:Η]‘‹mq A)GΔ’Α°qβΞ»ύμhtrrGcFF©fΦgΖ‹q΅«WΈ[Nv|ζ$PΊ - wY@]°σ⋍pΓfΜii0Ρί Xβω₯γοš05āΌv&@@{κT<Σ Ί v΅ Ξ»^nS€•Χέ%#VΚοͺ r°zczάΟϋ4ήΕqͺR $”8αόόΙ©T#§ΐzΡωŸw8Ό(€Τ;7’m^σ¦ΑΥΔ]φ胏C¦ήκ›Tmβw, E‚G½<<˜NQr‚Y_ ©Z0'^΄Μίυ—Λ Τ 0{ίΓw}‡`Šb&8τ(‰»΅3H<Ώa΄³Ίa"K!χœξ:ωBξHŒΉE RšD 0ΑTωΔ_ ΐ_6-Ύ/ͺlˆη!Οδ}0ONψ΄9DΗ'’6MIEo~X.ΣΟ^.γ2ιϋξ:’‚’ElEXΌΨήΧεΫ…©§Γ™ @Α»OpŒR€ΦΆ?οηέ-/"τj3qω αRw>'|ͺNΛί}ΊΙŠ₯ž&l@Κ•-ZāͺσJΠΞj_’œ`I—s€™ŒΟͺΝέΩΓV!πχ=άρ]†!~ˆ \p%=‰ –‰h|ρΖλΎ2£{dΗ™lί‰ίώ>žY΅“;>έ_r…‘|Κ’QFͺwa @ρΐξ½P3ΰε:L0<ΌƒΪΎ£k@A4ΰηηύξgοI’xOO€@†j$rͺGPc*ΖΦΓοόοŽ4Θyň–”Μ›R_P(Η$φγ>Ώαρ]u+ήvž  0A,J0€… ˜φέύ}Ω‰; 8QEsrt«o|Όœ_­†Jξ9Ψ}ω€‹Ϋ}^?Ίώώw_tύαO"‰2Ύ‹_‡vγφ[ίϊχ·mŸxƒ†υ»>] J†δέŸί—ώγ˜ϋ³;Ρυ€.ζο#ω SρlΥ_;.οΟ―«kζo¨ΉnμθvWθΰsΦσηΑ>Ÿwa]υ”#{¦^,"zypW”΅cwWL§ {έ:œDΰι%Ϋζ*:ΤμΊΛΡWgχ•½¨¨Ξ{vžπ»=‚ω}B“DΉƒίϋϋYͺΊΡΊ8wǁ°cύ=ˆŸϊα@¨wχϋω½πγΏόTχ βpΤφ›ϊ cpγ:>όZυG^Ώ#Α0Ddœqx\χ>ϊθίΫ6ΎϊΡ§ϊo9>Έ;αΥν]~ί[€u=ρψ>c«τ0ΓΗΘdΟOΟbγ΅Ηώ3Ηα‡φθO~OΙeυ5η₯ήδLˆ=ιΫΣέϊtύΰ.€ 2½ΎΝvή^Α ψtߚ%|—gπN°ΔFž(ά ^eΒ œπΝwΖV#ιγβcίW6³Έ3{쏉‡ž(θηχΕε}ς6πΓΛoλ1ΌΞΚ †τέ;=θV[œzž”ΰQzη―ύώϊεγCϋπG㞠Ό^}λ‹ρ­iΦΕντ»½ΏκΉι]‡ΫΟΑ>Ι,βΘ›_Χε7Ω·ώύ=Ώ·_ΛύυιO”b#Ž·wξΎώk7\tŸΨΪPNΈCI^Ύωίw΅υzzχξύΡε@7$δvψΰδ*Ϊ½„Ρί>βœ>!G¨Gl'p°όc1TNβίψΘΊLޝHEγP8›ρֈγp'b€O«GΦ‘|uΧύן­(EεΩc“O]pβ‘βύΝΓSjέπΌ½ ϋdΌAη”CَݿΗ=ΏσΔH/†?ΌΕzΗ―ƒ―ß@OΖ"GΦׁλπeί°;Ξ―Ύύύ1Νσ‡“P2dœγγξ|Χ·nέΫ[ςφΣ»Α†ρ;>]eCςξω‰χκε5›έΗ}n{ Έxό~dopw­θ5β;Μ{^Π“{€]ΈŒδωT‘‚]Ξ%Δή9>θœ@F‰wΈ‚E`ΖύTbΡϊŒv’Ζ‚!œŒ@ρΠήεXr(ΰΉ{ίγΦ‹ΎΠ‹³ϋΚ=¨I}GΧ«?:0YήwpR±ό0Hό;Ό·7Κγ`³:Ί/αhχ⽕?εγx ;η·3βγ—ψΑ‡O8ZΏ=ψΚ(ό†έ}τρΪBμϋΆAˆ'uΖ/9Έ'όŸϊχz{\ύWί©ƒυΑOΥ­‘Α:Ω}qχ uy=S<ƒ·χξ>ΜβyrΗ ΖιiPo{μρ{χC{τΰ›Ή€Œ›ΚΓΰ̈ΝkςΌΰxήΔ0 „£Ϋ;ΏhŒΖ`π ·aIy.v˜Ψμeq ˜ήα­¨ωœ †bΑ7Ÿ„λŽƒ―}_ΟVrœs ύЁκ}ΰ}φbqx­)Χ]~[ΏOςoέ8ςςΈΘΝΏηBόN…>€vpώί£J~~ώ¬x2Ίεής―b‘tyγγγ{Έ:―ΧΗΓ~’³(αcόΊΔSχΑ·ώο›_ϋΐ ΆN~ͺ.£’qΤ}οΎo°EέώμϋΔΦ’8ΈC HζΒ¬ώΪ©tσςƒϊw9Ώ0|^ψƒtΰΜΛεσqŸJ\ΨΖλ»OAΰΓνγ[RΠ£B>΄ΖθΞMΦΗ&γ(υ~έ£™ί3ή€βΒη£ —P›!?οηχ·[d³Hό†:ρ“ΘžΠΗ’Υρˆχξ2Eγ`‡xBv$O Fp&(   Ξy4Λψwd'@8Ξ.ώ¨ HΙ S•}ό­o)Ιαu#ΘιšyF1v=gήΧΫ}A˜7*iτΔd"©Q₯’ΌόΎŒ §ΓCγ €Cš@ žϊΕ›hΖvΩ·vq[¬Λ»O_ΪΝ΄ύ‘΄υ7s³%‘θvͺ‡_7pΧ6ςς£ŒΔ€Ϊ<€πo¦wžAρ ›ΛΕϝπΠγt°cέίlΜwκa:υR/θ@|0"Hι؝ΜHΔ‘d’»Λ―₯jXΩΑΈ’5…I$ΐ’$%ξ₯—ŸvŒ΅Ωίηqz}@¨vt;ς@Η€θ؝Ηηξ9αDΥ3ξk$S¦€#H”ώ9ΰ’$Kσ©hρ>{UΒ4ΏΈD#ρσJ±t²E‚Αά}ϋΰߜχ“£ΎQλΖω)όψΥfη‚G„•ρ‘λυβƒοϊη‘P‰W{AΚq_‡εξ[c;Ό€Ο½5N€Ϊn~ίμ€Uζ|"cΚ15fY²hϋ]ζ4ρβ[SA<‘›u˜Ύ­¨2G΄²>7YΗ  ŽρλΠΉσY›Υω7βX‚Bφj#LΏŸίΫ«ZdΟ°Cαsͺ“ƒGγ3dQΑ“ώρw"ˆŽ“δΰ.ι"œ ΐ•ζιDͺ“.χn~Ž!ԟ ΪἐνΩΞ"ƒ ϊήyΙδ―GΚ‡ς©)D »°kwƒ*ˆ:'ώη«v¦ΧΪ΄“˜!¦1PŠFΰΝ$ͺ©•χ!ΆȎPΌ8DPβΗ™AΤ`r_ΎΙ™Ρœό’‹[±.οN σǏšιΖυGv!Pλ/~Ο‹ΖQnAzh‡Ϋνcoςβ!|>ώC¨μQΜ—‘ίύΨG5€ζž|‘~¦cηϋψ4{’Ϊο{£·δ‰£έ;UΜρPDwM>! ΰc§œˆ3ςΖ4o\’ά`ΐδ ~lσ/ †$θβ^~—jΗ?b³Ζwy'G‚‡οθv ρ1‚ ι;Ž»~Ο?Kσ zΦ}!π”) D€ΗώŸ!f35…₯JDx«ΧΞ½8@:FβΗ…PθcEƒ.œΫΧ=6§Ÿueυω)B~Ήzτ}e-βΘwΈ-ϋ>χ‡M0 KBδΎN{7™λoΪβ!!x8Ϊ8jsΘηχγ{{ Ή§‡'g)θ»=λΗΘΛΝ„’>₯ *ΡΙΉϋΊ“ΥUέβ­zv ι D”ΪPγΖ0΄:‘œΌ πΠJ; zνη)π‚ΑφU­*ΎKˆΕPλ ΰH šΒ]–™‡ΚΗ` œLš”y‚¦ ~x$€όαw9 η— ΨxίϋΛ‹/=‚4(ώβD‰@sXD4άΉλ|ξ:`aGͺΣ‚jόρKŠ0~ n*,HQα’ ZC8Œr’Ή‚’ΐ`θ MEs\‡$Bυ(δπƟΧ'όpοςΊ:†`‚XΡ‹ο’ hΊw°φΆ<9⹟™U“Ÿώ TΉσλΰGpή7)’$Λp/ΫBŽΈlη‡ͺpx}`E„x¨AήrΐZ§BLίu¦€&w@³Χ–?8†Α Ξΐ  n–N$—}&diΒσξFŒyœ…’V:q#…ƒ'€€Pρ‚ˆςQ TtI0ΘΞ9œ‡;<ͺX@+PΉ ηOΗΡNΚ―δOπϊZ L“΄ƒŠ&—/™ Ϊ³ 79ΚΎ³±8Q•~βiw–΄bp'Α@;{VŽ«,* 0 šΏzœεΖΰ#Ί Αp½’ϊƒΟQΖn@ΡEΠαηΝ[ι8 "ΏŠμΫŠΡ‘/ˆK|+>o=>.ΠψBD ͺλL‚8ΓθĐ©ŠΜ7.ˆΦ0ŠZ€‘eœ2½ŠΪl~{υŸd(5\ΤE —ηιΉT8>Ι£{0Ν2ρqφǟο_„™"γΊ(Σ£ΘFΔ"wb N•ΆίΈΊ Œπ;FC^χ…–(˜΄Ι:!AO’$"ΐtŒμ’Θθ£lSΘwŸKτ` DΞ>9ƒVkU™*Xx_%Κxλ`κ‹*.qέΚZ¦θ'Šb Ε 2»#DΔ ΐκ™+GŽ“‹ΕΎz$’nδ]–’WςΝΧΡΫaݟό8‰Γ\œΔΝM"Κγ Ώ{>α`‡²εe(™cοίχw ˆ`ΤΘ/,8oa‚!$_Œ‰ ˜Βμο(<|°Aq;„ž Μ£>‰β# Ξ‚ˆ AFPρŸwέP†s† Wρ·§΄§arφ‘œ΄„u€ˆD="prΎ‡!(·ΞKvE!€„‰€ΑŽδŽ@iQ–XdΒEo…Šσ/.TρξΒ›Q^T‹Αm¨ΧΡM†ΆeuπyΏβ0M!θ«‚ΰΘέJνFΧ_vxΟ<ιΕƒ2”Έζφm!—2€€]e$ ”Wx3Cς§Ίψδ‚Ru.>LDrΩ[Γ"ŒS*sfY⃏’ ΰb­ζQ†Κ Ž,Οfa-’Γcρ,"B"ŠhHt`π1†ΰ‰Š€@A©…‚PΗy·R‘ƒΕα₯π`‚jWΔi¨I£­θ@„ΒΐHƒP :eΫα%—_3“FCFXtΰ’ŠAζ όΛq€fD‚rΥA&ΚTΚ£RHƒΏξ… "Q΅`]aζ( —Hβ"H*NpΗ§Sb,ε™ηW"σ’οr’!Β.ΰ AΌSsmTTͺ œ AGγΌdd½ϊΣXM¨?ΨGB0B΄U4υΆαΰ „‰S€€ΠG€ T}υΊifκ‘Ί" *@(Β”E €˜»;•,LυyŸd²±Ε' œ@`p'Ν=»hH˜δ!‘ %’2ͺԁg".‚’B*^q₯…aE!‰ƒlWͺB\&н7ΫZt •rE,D,9` —ζΡIRn+€« Lρ¦A–ίC(s‘€Η[Πf‚ ”qΠϊT‚€$zhh’‘ ΠΡGJ.„Μ@@βcΪK.αhTa2 Y$(ζ ‡‚Υ $Υ­μ£δkcB@εQ^pΓMΉo>‚‚θwjΘHΐ8αͺ*aϊH,Ξ2xΚqΦ@P‘ 0XGάΠ±Σq€σ-££€ΞΥ—‰J.{A7" Dΰ€/rψΰΰμ kX–Ϊv§U—‘"€ pADɊ”―%r‹E³H!J˜Ψͺb QΕΈ€ ₯ˆEΤJ9 @<,•†‚Σ"!όp©½ŠU„‘‚₯°θ€=sχAri³ Ÿ4Tš„ΔχΑž`!&™!ΐΨΕ<‚ ŽΚKΘl—’;fΐ•ec †Π‘F‚H±n žΒ³θ+0G‚ †h‘ pn" ŠRƒbH‘ ”u·»J.ΜXœ‘20ΔhXA…aπlΚΘͺ€Ψ ’$§³³³CoC!i¨ρ8΄ΈƒΣχƒ3DΦΚKmAwΓ ‹#ΐ| κ’#j0D’‚Π²ˆ€0‘"`t°‚‰—¨Κ1~ν±Ό "pyΙU±Πμ€…pΠ+Έ…X#άΝNΉ£"LΤΑ©„\'ω-5€ιa,fG(€JΑ@RFuΏ½K8ψ€x‡Z’ΐοοJΧQΥΐRκ•@0 ež#Δy°A ‘.ΟA,―΅qΤqyާΌΞ%=2Dd$, `”ˆ•w"HΤM§p, ŽFΔ€qA„`Dˆα±ιο €€Ύγ©iσΐ,―;A>hY:½Oύμ(ό•~^Ϊθδ²£TCA- αΨyΏέ1Χ|Aάρ†τ ή.ΎkΏVΈog†JσΰhήI,³ ω—οxΰCL³vίOP ˆ£$I©Υ€PXh%–βΎη< !=`aΰ²ΚΓxμ+–†p\’N"ξϊ–$=LA,€l& Uύ₯/+=EέξŽ!π΅γΈλaσ"΄ΐθ%-— „‡p坚m=0aΘυρ9ΰ ΄v} Ε~vy,”'κ₯c₯„_”&Ψ‘¦ ˆ[7p³v}M'‡Α“Ω]ž"^v0Ζ)Ε—θΖwhͺ€¨]‹ S@΅Š{Όΰ °ΌγΡσπΌσp“ ;Ϋς0?`·02l₯GΠ-πCΩΨίη†ΆΓγf I^GA††\ Av—”9Oƒ!ΖμbQ„!’•A,bWoiVpyβT±ξΐ0#S„&ζΌ ΰ——ΰL―^χψύaPΒƒόΎσ/uo>;BΈOΩ<‚+Ύ›Ί…τ’„˜Ν^wυΕ<ΎoΏ—I` ώή » ‡ιGάio`Ÿ5ΏqpυΨtΩ!W+9ρέ½m?θ,ί‡wν’·ο»χ‡Y ί?Qθ/(ύοίύs۝SK?R^z}₯,uFέ}ό~_€°»οΫoŽ“δ°jδϊ¬Ϊχν(„ΎΏξΈΑΣνŸ~ί݈‰OήwΧέ}εŸν‘…lle_r`(Φƒΰƒ=|»ήΟΟqξ> 9ϋυξ.Ρ;ιχǎΖ­ΈrΗ5”·7ύσοa"OFŸπύη…τϋύΉŽ_βό«… žς{έΉ Ίψ/cΏŸ£3«;/‡Zα]Ϊ8 Έ52@˜{ΧωE^Ψ»..?»υΞ5A'u.ώΈ?ŽŸ§AutέΓ ;τŸΪwίά{“£,_―@ψ—ƒϋϋώύ< ―qηοdς½Αή΅kKύ³`8θ«z!8*ΛϋοCΎqϋ‡τƒ° avλˆk βnΏο;ΖoS±v׍CpbYΧέΧ§π»ƒ|Lάχ_ €΄;ηη€Αdσσν€CΟ±gχπ²οάΗοEΌώοΗgΘχ―5Ÿξλ#ΖΝ‡ZP_—&ϋή?Ε?Έ’4χυύί­LΪ€Ό½σϊο·7™Εqq§8Ίƒϋήγ£€¬±_0Œ³ϋ`Π6Χ`ΛΎe—ΰφΎώηεοc‘}c?>θψκμ·B.ͺΰΡΎ‹ϊν§ιΉkχχ=.ώρ€kί½σ?Όώγϊ_Ρ=oϋ}σβ)ž}¦Ύβ>F¦Ύέw%ρŽΗv\χΩퟹί]ιeΟz|Tνγ}\ζΉΏͺ?½έΖ­ˆnοjήΡΧέΧξM}e5z‡!‰!ιχΟo“ΝΫ8GΒΉ6ΉUYtΤί~58~θψο9­yΏŸ}_ρDiά\Μ{οΫ――3Π$Ψέσύ—=oδ}Η—Ίχ|λˆβz?.)θή{άύQ3bς€.ψΛΰnΒ›~έ@ ΟξυϋfͺλŽωφgήgύΈω}Wψ†-°Z"WƒχfxϊϋΓ;Όζ³οϋφ±#ΈΌοΗ{ξΏξ„;½§μ}—<=;ογh"Ξ-ϋ«χϋz€^άuφmμΡψ€ΌΘz~€1χΥqΒO·_―σΈs OΦq}w~ώο=ΆΡΚβŸ=;»$(ο~ψΙ{ŠΗ{άFotΜslλ8’‚>όπ~~ρkλ¬ΐύs_Χηx›.ϋξΟσΩφHŠΎ\ψΎνςΕέφ_g™ΰgόFΈ0ΎƒΏŒmϋΟσθ’'w άUč‘Πdά;ώΞδΫ†yΗߏ‘Ÿ΅Μ}o ϋ¨ςΞ}>ϊ¨qγO»Ž.άχτΎYP­δfΏΏϋ{κ\ƒ£φΟ_―[ΐΏ&ύυϋβ:Cν†πΛώΧuΘχ†Ύτΰ5ό³d±μͺ^ΐ†\΅ξa_wΫΉ‡|«­?Ž6kά·£0ρ|»kύ£σΉZWςiπt΅ψξΎ»ΓcOv@>&ξοΐI v©/9ΆΕΔ-ζρΈ?;Εί¨*Oϋ;χ± ©ώV'Ο Φs{Ώψϋγ#WSόYΖ~Ώwό’4ύΊoβ§=Θ8ίYτwόεά~L"Šο41£KΎγΝ”υr”0›χθκ/ ΈΫoŽ+.°%ΛΨ§)²χΧ^Ύσ}œ΅˜ŸPGί‘ίΤμ°»ΦινŽλώΩ?γΗ;ξ¦Ώv7Yσήχ7ίΉCŒέ"έ‡Ÿ2ŽΎGo~!ρΑΤ ο6Α49Ύχθϋς±Dβޏ_±αξ*N>ύάφεEΠϐΟοΏvoύπιyΘ8rΘΑ’δάτ₯Ÿ/ͺϋδSϊ-$όπΪΎΗ=ub_|ΙπT j49EΟ―ŽeάοσύˆΫs‚ΰ ολΰΣoCτίΑχϊsδ}ίΗ·ύΙSGfΙAΜ83Œn~σνg‘έΡΙmοžwxYόθόό‘φœ;ΒΆΫΡ1[EηCˆc½8‹’qCB=ΰR½η—ήΧώώ³ šΘύn€F<οΝλswXΌυίμCŽψ±―ό6H½«ΣWΤ%ωΜΏκ·δ@ήή:φήοοε› ˜ψ‡φ>―eξφ=αg―~z;RχΩ!>QΔχσΩ‹ΞλaŸΌ΅nwbΎγξ) hνϋΗN“ψδ^l‹Όψ*=U.INΣγί†ΠQΟ½ƒζφ“Ά°¨~ΫΟ!oWΐΉŽ»}οkόέ\q—{kU— Σ}ΎŸ·λΕ1ψΫMqάΞξ­yGΡφΓέ"ΒΓ σ―ό8ΙΓu0ωh"gr(9ž€ΫΊωεΙίtŸάvտ烳ɹίm—€Κ~owhŽŸϋ‘a" ½βyΗW—O|AtkΕθΟ5~pIgνχAΩ‚φ{χA°ωsόΗύΡ?ψ¬>ξδ ‡Ψ˜·Ώδ}ύnσ΅nγέσvŸ,ί;ΠΨ„έ‡§7ιŽο―ζΰφΕΟ¦€’IGχ6žWχF]ίΨ_R¨Ϋsž.W\|ΫΗΆζ΅ GŒσϋύ}]ϋ>ϋΓ‡¬JΞB owΌιΛ}·ψ_ϊΆ΄ΰΰςŸύ‚Ÿm<_,bΗm ΖΎ8Ά„yϊ‘ηίρG΄»ς7cόΈΙ‚ƒuΚΌωΏη_OŽ{mέοΣΏ'ί`ΌCμΟPϋΟέ5θ=ψŸΏο(Τή~λ;w Β܏N$Žο„1xq?o;ΆώPύώΈdίdoά€χΨόμΌwβ­uOxγy‹΄Πήg1МοΆ3ρθ¨·υocjγ{ίΙ|„©ƒ|ΕχΈό΄YΕΙο?Ζώd]\e2Όα jά\ "Χmχϋ؟ώ}lΎ½ΨKολΰ“Ο1ζ/Ίυ8Όvίίχu~ξ?dF–ΧϊΟ ³Στ?ηoIGΩΗv“δΰΓ_œη·ώ|ώ½#Ό#Ϋϊ»ύκ¬’ΫGΥίυWηβ“Ÿδ³Η LΞτϊΩ³ρη_΄Ν?Ω‹Žnγٝ0^JΜ&γο»½οcΑΦίίn|#±Γ―ό5νΊ“Χτχœ―8ο–ΐƒΙΝyώ–›ΜcβΨς³[;$e7ΪΖήJΈE¨―οg aœ-8»ιϋΎαξqϋ>ΎƒŸ'Ϊ}ΠαΡ)pžάθωΰ(~ˆžB%7¦¦ΗΏ=ξϋqΟ½“J|ο!ΰΠ‹κ·ύ\Ί}r=½oίΗΗΗ»g8iςΑα|ό}_]†ύυ½TؘͺυΞ9½¬έhόw€UTΔsHθ@*EŒΙ7₯WžK#<€€'ΐη­.-C‚Έo,%Nh2§#„!ΐXΒFΐQ=Tͺˆu"#Ήf(°PΑ’ΕλG‹§O“μ«€y" rΑΒ2<ϊˆsΑ₯ŠUΔ{U`gΞ5h ›‹%ω›-«ξπ‹ηΧ…"hYΌΨlή—Έ­qŒΐΆΰϋΎ!QD}ΦΕq.,C%`ΰBlϋκΌΟ<Κ²Ÿn™Τ "d I$ͺ\‹°^€§ξO…Œ1­vή^6Xί± ‚λ–ΓΤ T`ψIΓ»kR~ωδπ™ΖΕAIwΠ”<!e:,3€E½Η @X…»…&ϋ½i°P_=ΩΙρϋΣΉ„uΡ‰:Vd§$tϋδγ(IΗ“>°ώ«λZΙzˆ4Υ'­ϋ„AxqψΏϋcGuw=Ύ>άKKBzΑαΨΫΔ”vyFΌR™ΗξλΎοΒ’Θϊq^ι9]C€η5d^|δξ».;#·ΆϊψΐN’A€8 ΞΪAjlMΚŒΕκλαZ\ˆ!0όθ†d·οίΌ?ύ '!= γcaχάH PΣk@Β—qΖPι*0ˆξ”ž7ƒ' F£‹/~"lg€:0)P„π{ξŽά Λ/7H2Œΰ δζ,ΎΡΨyƒΉ k–JΚ Τ&œqβCአ‚2’μτ~OtΦΓΝ’‰‡rφυ=œ‚¬‹"i~γοΐΰ ‘Qη§_g¬]C>2fύΥuvΆ=Κ›Ώ”!YϋΏgœuݝΠγΊΨ%!,Έ€χ· TΫεY±˜‡}~ί7Λ"9ΰξΐ{b€ω·k€ž|ζξ«²μΣ‰ŸΡ'.NΑ RƐ/ŽΪaή@ٟ΄ŽΑ;^\»φ:$€ΰˆγo†!CW)0όθ&τΊ}γΤΒƒhτ$ΘV₯ai˜ί₯@™ΠΆg8B’Έφ€€ {Lι*€ξTβΟsͺN”΄vι.#€rfΛΊ;›ι(šΤH<Θπ&ΫηΗ²yΘΔΗΘH;ς0¦( †G$@ ½MΈQˆ>ηtkB«žw]ΚWtžͺ'δΠ¨)«²IKά-AЏ±ΖΘ€ Qd0o||Sρς.Šμ.b(šΘ \'P!θά;ΎŠ€ Λn_['β˜\E_qβόŽ jD`t!wαm£(Š>=ι$μŒΣ“.ξFnβœn{YŒDτ(B‚ΠΎ.:™ƒΉ„Ξ ^1„‡ˆώxoŸ6Α |$ˆ'U/΄D\ˆ 4ι^ƒ"UcΒΕ1h»l½ΐίE¨ΖθhΘAΒ0“/@‘ ‚«4Ι„q!²–-DtPtc‰‹aGl#ο @ξ»5²<ΎΔ¦SδΧAΐuοnΧύξfAΧŒw*! exa_…Μ0Nυ²/8‡MΊͺλΡƒtκ’?\7ΒINβΕΡq'3šsR)§^+08z¦ωI’Žs<p0πδΠΕJpCŒ`„ΐΊͺlG" Α‡–ΰΑ€Ϋ‘€BάQˆA ;!ΎCMOaf‘Πr<9mυcέˆ‘(cp'qS|Ώ/ΐΰGJΘ]?€ ςΉS8:Θξڝwc–TW',ΐοŒδ<).Gž§ƒˆˆ’Γΰ3„\_¦¨tΗ₯¨CuθΘb—z¨ƒ£#rI©*nReHΓ_Σδ³Ω֊βδΣ&z&X†ŠΠυ>2TPž@$OL‡Φˆ MβΠ"vj"a$Γμϊ*D0 *'ΔPˆTˆ‡i8Ξ AŠZ˜a2 °ώP@0†Μ‡‚ΔwΠΡ,ͺρˆ„υΏp½νΑΡGQHy·/m˜“‹―8ΐζ$4C! "Ή3.;oS¬(ψΙ ™ˆΨ7Ί€ι‘θ67»TέG‚€…υy—o'Κw‚ kϊΔpρΉ‘Ά‰A0›rDX¦Αθ ΧA:uu•¨*-α8F-@ΛK†¨`ώ ΰ Α€ψPˆ|1$LTΩ‘ΰ’L2a}>ΔnQƒΙpΖΕιΠ;@“»ΌΤ €Ρρ ΅’©:ψηAAη»vέAqEpΖΰN€3. μ;*}ΥT Iλ~Pu=ΉpŠCΦv§ι.«„ pR‹κγΞrlμΘ`pμRŽΫ‚D?œα)OΖ‘42QLΎx%h¨8``=N—p"’q‘#λ S€‚Eˆζw€Dΰ‰ P‘€χžΈ/A9¨€²ΔE LΣΣΨα‘ν₯άy΅(Ύ©NtC1ΛΊ{=‘ArBΔ>Ά6Ž;‘eAΑι †Πםͺ©AqaΣ/80-4F‰ͺx»Ο yIAΥͺM’#e±πΞαβr1<,½ηΘ”ŽLuΠ:C„%tΑqΤ‘Ί''ΕH°,ΛΕ# @θ†?ύDGC“ ’}υ₯°˜{Ο;‹‚fΰΕEΡ”θςnE00#'RA”qj ˜+‰€D±5>JEq˜Ph*˜έ€Έ$ƒ=λ"Θ`ϋ6Ο™ w‰(ΆΔ%KΞ‡¬ˆ€©m"Αfρ @v eΙeݚR-8©d?ήΓWಎ "‚CAˆΤΡŠ&#.€!qbJ`SPΓμ^”€½ ²  ―;D'œ ƒ…5σ˜Ap!ΑΎαΠΌΰˆxΑQDD_ξϊ;TEχ6τη] Zψ€YIΔ‡”w‹†π„’H ?L9v @γTbŠΛH0‚γCU4jΧ0μ0<@-Ho»m"Ι…Q H‚< Ε“ΫŸ­ΜOΧίψ²λ #Kƒ(¦ΡΓj!2ΔPς|τώ8Ώ ‚€‹…ΐ DΏͺ˜ibpρ5I)NWΒMa2Ό8U!»υ‘ ֎"ˆ »njdӁΕ5r18$στ[~Q —UTηqTΧKΓω|«ow₯h° >ζˆ2> (οΐ3'ΠAΒ©(iXςuζC Ξ‘θΔ|Xαq‘¦ξN Rυ žͺ\dΓ Ιέ&ΫΧΌO£@@$±°&Ψ.!„=ΤολΐΌˆ@B0e܍? Kΐ?=h+ ¨#ΐDMψΠkKˆΔα:k,(xΗi@ς£ι|·―άα Œ1PΒΘ>8€ΐ½η8<Ν<8',[‡ΕΞ‰!Š\w€σžΕb΄eϊ`rγEbpφ—GǁRθ09τώΉχk0X‘qΡα@;Z?rΌ(ΈjζJe¦‘6*€8Zά”­€·Ž,ι‹Ίƒ:0?̈ydz:5Qπ"Ήy·ΑΚͺ4φœ„;DxψKB ͺͺβ30 Χ ΠQ2‘A=ΚWhaξcΪ³ ΥUπwΗΝ‰Θq;ΗΠͺ#ENurAepš$AΆ¦Λδ0±vš3'Œ00 ,fUo<εO@T`yρΙbtήαΒH€@ΞŠήSXαί:δ©ζ­.›|Π @(Dp%QY’tΊ“ΐμΞE4ΌΓК7ό ‘δpέ8€Πτ\Ίέ<κ>*,;x“Μ/ZψN5μPŠθΓΔ!7ΧΣπaq˜ˆΒρVJ,p^8―ͺήwh˜§?Μ01Lΰ@ϋ‹K ψ Q€˜ζf?ύ`aν"P(HSωψ·a±P r‰€©°5 3:κͺCŒ8ΒTζݏXQ``ΠΕuMy@|πΩ_Ι…‡Σ-¦@ήσ04δθHΌfBξ«° S:Šγ€‡ΤhτΑ"Κΰ+;0ΒαΞ§h39³―,/PηkίyΠ.ί9 :ξh6ΕΞκθΓ`$Ψ{˜ΒΑ rΗΐαΔvσΖ>‡p@)³ίw†Οžƒψ !ˆ‘f%ίЈqd'„ΐmŠφS?^ˆ"x€P$(>Κ<ΠΒ’5S H’`c"7ρŽͺβ@«ŽjLx₯έ  ©aW@Χαsd%Ρwž|€Ί$ˆ€y, ‹KIŠΡ#ž~*ΠUp5’Α€(½τŒίHδF‚`φHΆμiŠ œ;₯‚oχ`pž#iγ‚½*¦€ΠaZ³Ο³‘j qv!z˜(CΜ42T³˜cŒ:Š4•Δ€F ΠΆ1Γ1ΩΗ σι”;Α”ό“΄KA`d`I ΄ΜΜθۜm`pΦ™DPV–G™qv†ΠΔΣ‘ΒV%e,)ΥD@€yˆ)§†˜–H9θ[UΪΜ0 p!‚ŠΥLβ!ξΈNΘ²KˆY‚€Τ0e@榛UD$ΙΔωpJ&Ά%F–Δ,#Ž ξΒΦ»2Μθ²ΝΒŽ­+’(HΒΆ°Έ"HΒΰμM1H3WDaŒ‘Ζ•ΰ{'·Σ³ΚPΐVÁ V1Šš‡C-v‡0’+gϋnύφ οΜ2Œ8΅Λf3«μέ·>Ρέ)Lϊ ’¨ΑΨw–ΓΖ.DτδΞ~?¨qω›<νΚΣ˜Ο‰Z%tVοώΜœιΈ 4»{Ά•&u0½"NžxSζΫα½f‘ ƒZ}p΄ΑQζ9ǝ\X@ε*ŒΰBΌή–δ©8σFvW"Y`7_g]d†Aa Š}<˜q˜tηsύ,z;οϊυNpάŒυάΩΨͺiΎŽ ³;E@νsΖγ(P»§9‹μœΖ»ώΉE&ΰ¦ΫΔΨσU¦Μ6eS RΣFΡΒl{(κlZˆ Ιδ<ݐ™ 0ΎwΫ½μ Δ£.ν1ξ ³ϋn‹ΚPά 1 τμŽ6‰2ϋMχ΅Kτ,™}ίsy~pΎMΈΓ:ӘoŸpWΩ73~ώ6wu›έŽΨ ϊτΙ‰IμΞς²•’œ·ζΫκ 0Θ*’ΆLM € 3αςZZΖΫ9>gΧJŠΩV’uΐΓΊ3Ν0A`΅μ²ς§λɁ™<§»ςbŸΎύxƒο‹/’kIu]?g:Ωswr£bŸ£*QΦΧωy…tΗΕβΈneΆΗvH»­dS–υΊΈΠbΰŠ°mBsWΖ!Π’$΅9DΓHΝ³œ[»;άauFjγahηD½·o'cZ w†~ς 4=Ρ™1mμVepΏηi™˜/NΛπS9DTŒδœ»;0žα›»ρvΨfΑvψœ…•‰7-yο Μ2ξ«ν)(˜ΌαH3ΫWιΒCL•€Zομ€hŽϊΝo‡ H`!t5­u9 ΓΪΛζ‚Ηaξ™˜Α€δέρντqfΏEΉ²ϋά7sN`φΝjlΡ2yfXj"ˆ—σΖ‡-Η₯ˆ„Ω˜m*[‘wπυυ¬°˜yΡƒ`€CͺZ·oa‚Α $%y°yWΆ 0ώ^ύvεγθ‘ͺesΠšν½.œEΛVFφ'ͺ²$N_«ξ°KK΄–{–³ν{ώsς°ΘΚ›ωΎΰ(λωΩ±γŽ½­³E{˜a˜`uεΐΙfcy+«αl’a±4\8HΪΰ丝‹‰43"TλkaέO}¨wζ­ξ†Ϋ „;³uΪi¦A’'³μςΐρ`~οοΈσYΪ‹~žφ΄Ξμί2²ΜbkΣ|™gk;ЊŽ3Υv–C±ΐzί*;dm2‚δvfχΣλS6pφ·VAΚR"(E"Y+κΌmaΓ§!hΓ&Ύ»ϋvΏχœ§ŽΥ«ΰΩ3Νέ}mŒuξ…˜ΊrEGΐδœύ¬Γn-ΥΚμ~ dΟΏξ OvΖfςξJ¨9―’£3ί,°ωrΆΩfη:9 όNΌlΗ}σ·|νs˜”qu™Ωƒΐ•R”„Y–‰miYe^3ξΨ)f‰ž„ιόΥ *ΖξωχΗ™m¦zέγϋU¨ΊΨΦΜμΕ73rlΌσ_·*d<§‘»εημ™šw|vˆAOη―[Kll8σ`sΰPΌSU-Υn«ίaȜ3d a¨·;+ώΪ“;>Οσψ~~΅ΛΊΔΉ8pΆ³o„!OUh‘ϋέsπμ¬μ<ίώίό“\φ­σϊφƒy£Ζ,―~lμpΎΗΘqεAΓω‡ϋ»§9 μaυpf†7ˆ¦ ΥΫlOνμTΝφνόM.»ξΨη‹Ϋ }δισm·»waι§δ.mίτχρΏ³{Kd'fΞΎΓλόΛόs[ζΘΛXvΩφ#ί#±:t֝挝]=Ξ@λ½ξξ9λ΅qe/Lσss{Ν7αBiρ.ΓψΑ°˜qκ-w’Αi€FΆύŸŽΓ7ζpGΨΞΙrχ½ΣΌx "Βό–‘°{kw—έΟC¦:Ξ.Ό"§jwŸ“ϋ_ψΪ…ΰς=βόS»D ³pκόvΘ΄‰―Β€Μ}ϋχϋΧ‡οŸΞΜχζέGΛ|oΏνδΉκ,=ώ·9Μ4Φθ_ΏϊG»oϋcΖuΠΜ5—,+|΄¬―“ΗΓθ?ώφΊ3Y[άόkf:§ρίύυθ5]Yg—ϋϊδφτzχϋΎRpgξaο~λ σ™5’ψΦΧ;Α­Μκ Β šΧφF˜‘C†η›lξυΎw ¦q·sV|(N΅; ϋp‡‘³λΐθTρlyΗYB< .ό8Ξ€²Γ›­Uϋϋy΅¬6ΏPσp·oθY΄΅mm‹σύa¨%/ΆN|³ξ!yU[Έ\ΏAkz‘NΏšEDSw˜΅ΪD­›ί¨»+’ΤΎσ NαQΫ₯«υ˜ t}AB`όuήΘq„ ]|Lm-2£σΞaώ+Kμ!έoο쇳π^mn“{rΨSJsBΙcί·Ή™ žXΨ³=—άH&εΎG10Cc§Φvƒušα8s1›\OΓ€ϋυ¦3V›LWΕp5™₯6Γ’-”3½Ω_sšqρε€LΌ›.²˜Δ|S³L]ˆ1N³YΰνάaΨ3οˆ ­ΊR‰i_{gό9{Š `Š*ΘA‰euεΞ.-q˜f˜'φ,eΧ؝EU vW³eqdŽΨ²kρ@rφΟC[θaνΈλ<@ŒΦ§² Βi†ω|€ιβυ€§™=κPεΔέ/„=έY§ooΩ²\Ύ¦}“FiHγ¬hόErΒ1;Ue[νk{©28lόήEVESΨzΛΩ‘‘•…`m„Ι5οxšj#Kbp4’‚,·X™3μμ“·σ ²(Šœž½;ϊitψ«pqβ[‘<%Δi‰Eʘπγύs΄1€–]ut«ήϋτλo±‘XDΪ8eΓΨΪY^σΤAvv‘–ͺ †ΠΔ-K³¦ηδ’΅"ΦΦUΩ΄3βΠΖ(Ψ$6.ΥcˆbI2LgεςXΨΨ­Φρ°Nη8CΛlgsaϊΝΎιδlξ|>FTO³³(œ‹#~Ϋ¬ ƒξίγšέ5rΩμΘ°Π[‹qEˆmαΝ4Š^I£ΕλΩ?cx”9 Ν QΚ4¬αSˆΝΕ -ΞθΩ'7ΗΗόπ C΄αύa#γ)2ηρEJγvfcZ£Ξ n‚珑M*ίΒ½Υ{˜―ώv60 ΝΠΗ;,΄„ρ―߈"1ϋbUΆŒΑT ‘h2¦ηΟΨί kΔξU ΠAUV3λκC(ε‘&πϞ§sKΒ…έβsί9Μ§K•5­μΧΏηύƒ³π–XΆYίifU-›eŒAœUΑ©sbV F]ƒΨιΊ+,„Κ γύ7w‘fl&k·έhύRΞψ–a1'νc]ηκA—…΅Pΐ•D,γeΙ2ηΐΌ›εΈHΓτΟ»Ι"OΣ…™ tρΔ, 0-ίlΈ ΄s‡aΗ7b§RφQ‚J»μj‡ώ³ga±¬ΐˆΦ†7-΅qpf0™Φ6νΛ’CA% υςΠcί$Ρ%}g Z˜³‚wΒ XώC‚“ΓΉ3iΎΆ^0#ξ™ΞΡβAήύBΪιΞjίή²7jB퀫d–εΞ2 ^ώ&\ ›ύλTmΛn―—ƒŒΚΦ}b&•CP±»²žΞDΖbŠΜϋ·Ωƒ0M;νά™a©Ν4  dͺ͘}υδœa§'wηΟαl”"‡Ύ½;ΎΥ`Gι‹0-Ύ ’SΈων#ƒ‚:Ιa€!»ΞΤγ­rπτ‡ΌoaA’ Мsά†'°΅./σ8R³+©ύΆ …ρ-˜fMH|#”½Š5\ϊŽ( Θ²εγt"Κ“%£Ια¬Ξα( ι2[Ϋ«€œwtώQ$λ―σϋQί YUL˜l˜.„ŠΨP¨ ‹T€"vD€!#XcP‹°ZΑ%Ι‚Fˆ˜ Sd@E’™QLΧΪ!Α@KwΗ c†±I` (Ž"‘‹‘T&Šbˆ‹г $5[ ‚G`ˆIi4cˆ‰°ˆŽ ΛΒ"Q΅Λ Τ²α EH+HΞ I4ΑLRh”¨² aΨ$"Εα #”0 0€4F!BJd  kMΊ a cΨj0’Β¬;μV Β)’.Γΐ :φBT6jSkXΑp] θͺ%2E€‹˜ Κ`„9 θ$ ³+ˆ1κiwΪA ¦„fιa(3ΔΪa ` EΗΘ0 (W„ΝRf„!!Ξ „(cρ ΧBΠνdL Vξm±˜ €m-2MBP’JfΤtΛΝηŒ€$“Γlλ₯‰&Υ‘ƒΠpE²a4cμ-MˆPd68°’eα2’Hi*Ž5»V΅±ρ-βP^8€’»θͺ΄%ƒΣ–¬`P@ΦIΫΡΒ(«–  XAHʈɱAA,4 gi„ѝœa‘¬B˜‚p1ΊΜ0ΓΒ,ΈX ΘQΠ–Y‘Βΐ4tWMžF) ΐΔLtR‡βA„¨2 ˆhIΈ|ŽXqƒΛ2,…[φ Φ„ Q¨iΠ%ΛMΠ₯OΜ0τA¬°,Ζ”†kG"€•D€‚PBNfΒ@π2SΖd3Κ’ΆVNΆeˆΈ jiΉ ²ˆ@[ZΣ &;β&MA2DJ¬°fAšΞHΑΐ†y((R΄Β0OF%ΪC†D"³θ6bς"f™2ΑPFu¦r1 €VE1‚rΧ†U ‘¬F@IW*Νδ +69,.0KÈeΛ"QP°hk‘Œ Œn%β4ΌšΗ 4JDΐRhΖ2ΑΖΓ b€0ΊC‚(‚QŠ159`δ„  ;&β(„»m+ΖIA\±q}:΄TΑθ΄ΐ€MΔB–R–ˆΐ,P¬€D† Šΰ¬€κ@ͺŠC+;"€!λμ:°²(S`S(Κ8b-S“@©Πΐb„›„cϊ0AY#¨$ΚΤα:ƒ2S`†‘ˁ°ΐ½¬δ–=0 EΆ•4]²|*ˆf&©­"E†’4¬q‚‰"vR„‚@E¬š…0†D%€£ nDνFpJ€E”π₯(8 ²%S΅ I‹°ΐ! Pd «\( ΨΡ$"H˜DDQA2s$IPs†’΅† αƒq­h ΫL)sάl[r‡Š1!Ζ[aM$Ή:Β³\QΆˆ0 "P@@Z'`Α‘Ψ“³¦Αφk$Iγ"9€N#&;”β0΅€hh0‘F¬n& ₯e”;»γ€Ρ+Κ€Tτδl™Aag0°΄ΔAΛ}$jζ ^IbKΊ6z6ˆι‰@FA –™e–M@v±”(V˜v" Ά!uœΙj…F•θδ"¨ΙΤ‹ C|c6¨ 5MŸ•j@kj!0š³-DS$ഌd+ˆ0Q'ΐ%T„ž;HR³³qe*! ) Eή IΖ43Q-M₯ λPΨ L‚ν(ŽαF» ˜˜`ΑΛ $\1fв4ρA8Σ‚@WVΞΰDζα`bCœ²"ΖBήeUΝ ֝$ΓUΰΰlW”ΚPh` mZϊ–›%-‹ SS4DPΣœSQΐ;Ȑ;χ8 Y.[Ζ ±² a;š KA `βͺ€’6φ Φ$‹bLm!5Θ˜Ψ ƒάu²SDϊ ‚I’$Ι‘«Χܟ»a‚# 2€ΒP±°6 ‚οT’„%S¨,Ξ@šΩ‘η–(Β‚Ζ΅Ÿ‚*` ‘((%‡_£’₯…±Œ:»  6=4ε.. Π-‘Ζƒ°₯­^]!ŒΰΌj‹ 8«ΦLλπk T υL `οό‚baυ)ΖϊdœΑ1?§3:€‚“_&’&›'y Ή­ 4$5½ΡWgθΌ"L=AήΌ–Ω ’)dGΘ‚¬e”‡Qœj&rΰ§λ €!—ρMΈ 2Fζ1`ΌF' Ψ| p…(H°~TΒ€€2HͺΦ́Ρ'aWC‘Δ—@¦ Λt€!Y‰ξ3T0δΜ°;x‚t6=₯' κΞξ>‚HB;•μˆΧ‚JR:RϊŒΜ>ΔpΐΊ΄&7…. ΒkfZ]Œ’ζ)1BT[zvSτ‹Šΐ&† |퀆ђ(»™ψ€ θ\’̐»˜j8·ŒbŽc₯EΏγΥ0 Ι(U²;~ž(‘@RC±δ„$Υ7­Hπ ~q,ΣΌ‚Ύ‘ !α׎!x’ΥyυΘ8—V”Φ(e—$’A*ž"O/»‚ υ‚R§λƒl`ΐʌ˜ςyyC ‘ "1D €„­ XbπJD1œΓπ€δ8#IΗ@vΏKœ ’„nζΓΘI”p87₯Ρ“h7 5¨czΐ:―λ1υ  8Yc—Φτ‹ϋƒ!U₯t`ξ&Xΐ» ˜°–ƒJξαcκ”„¨@Ύ €νΐ,Ι κ”θ'€η¬  BζMC`'jQ臀­ξj`€†ηu(H†Α„ΕTηaœžh‚rpΗ0ˆ,₯AQ€(}¬$MYδ­`Pd~ΥͺgθWΡ`”’σ@ΐ±w„' ’r«ƒ0pάƒ‘4ŽŒ›ύ5Όΐ GΣng-tβ@*,‚γΑΑ„‹Ί΄‚pŸͺEϊγ#$ —Ÿ"šΦΡγ{l~ΰ]@[χa d_ !™P&¨7; ‰κŽyT‰01` %`œ„42ͺ+yΐιPP@ο ΠgW»§ l€’Υϊ,$Θr‚rXΉΉ±˜@λΐ0³ΙΟ“0oαΐWG“c_υ₯Œΐ₯ Έ4NƒDαμ€0pάH$"ζY°TO(X†ω²»Pδˆ #O±Γ; M»θ6¨HΟ!%πqeηMΤ‘ ’•GώΗϋN,ˆης$¨#€C#F 0h&ίδstT% 8ΘΫ&)NŽάϊ€ƒ›€7β«Ππdθ0Ι;R@ΓΠ₯`}-nΚ2ΈθΨ9ψƒGq©ΐκ’)@σˆM}¨œvraVΚΰΠ‹œΕί%ΑUπ³ΫJ.ξV¦*h#p’œ&ˆ³O B%ϋ{ ¨šIΕνGi ΐun;’dΗL  ’–—ΖθΣϋC«:E€΄-,ΰU~”d™jxΠά²οΑπα@¦‚ͺƒ8!‰@2({ žœ[u Uʘ0Θn…`_rΨ<ΊkβΝq Ε5 ‚²gu;Ÿžh ŠxΠα!ΥR!Ύε“’;HΟi, B  ²ύ‚ΤeŽΣύΥ…_ŠΓHˆ k/Βΰ€!–$Ε€ρ©’DΔΨμ’γ@¬uέP  ("ό‚Δ°q~pΛΑΤq₯ύΰλξžYηΖA$¬tE¦ˆ΄›°’P( qΎό ζΪηwnAi~kHXZp¬ΣO­w€'GDΒ’ΔΒρΐ"μBTθΌΙšη=Ο\φε?ΓΓ‚¦X*l7b»£C,vά8’³μ, ,‘8Iδ&Ό\WI‚ͺ‚§BΓ!-@ԝ―Ψ|@ΗA.(R§BRH$Œχι)‡ΡΩΪ8…€χ™"€’(Hψ ~Ε,Κ‹&ΝσV³Υf Ky]­{b¨―«‰ΰA•&i Φ)) Lˆ?‰ι ?˜Wj’š–Λ :’ρ9hV\'—‚Ε—m"…QΆ©dΠb·¦Γw|Ε?ϋ/†‡)³hk’ΤλP,Ž[ΡΰΑ­,ƒΰI½ <8xDΧ fG… QιΧ tΉsάϋρ'@a€p΄ 5ͺ–q‚Z’‘WeƒšGΐΰ ‚”½›š@†hšα{žUZ@ƒpz"7œLΗbΏGψGGΔSυ/j’@» †γƒ˜@οψV$³Φρ₯ „5?€-βΊ8C¨…Δ!\4vΎsΔ8ψˆΰ‘μυ!RD’ίΒμδΑΡ& "DHεΞ%ŠH ΣtFyY „XλvqΟo4ΗHse?Έ«°{²Λω"ŽktDš-ξŽμ˜oΑΑ‡€.?/©"Υoˆ–%a@RΠΝ#ӚχQ( ΅$žΐA1Ά΄AGθQ&@qΠλyή1KζWF Δ΄$°-υκ/€ΗδΫI#₯έ-ΈόY,β  sπfpUQ>t€‹ύ8 pΉ»ΔAKΤa0Ή²l$ΏfΑiQpž›6/„B‘T€ o"ΘέβNSK°όb Δ°³8i4Aš@Κlτ―Φ=œrΙΘ’ΠεΉ΄“„@ R&ΖJ3Θ;άb’˜†…E5NήaGqγAP¬Έ”Νdΰy`–§“2hθ©ή}€Ιaτg―†αI(r²υθΖEL<28ZG£v’ ΔͺH΄Qδκ ²PQΖ]˜2•~\ΜεΞq­ΓδΣ p―€T‹`€‚hΜ’ŽΤΰΞ‚ φ>ΥL@Σ o$ΛKRc­σ¨ΩjΞ‹π·Εξψΐ4τ_Ϊr­ξϋO‚’¬΄γ±OΏ·–η‡Γa|ΈpΉ(‰;(υeό'#ΒΠ(’Ό‰ΘΦο­> ΨOεqφ₯ρHβΉ(c*Δw‚sοv«χ‡§§οΖ¦·βΈ@χΏ˜"IEΜ[“(Κσςΐkτ~ή½c2pOΘ³ΰ€¬?«…ελχϊϋŽsΣν\ΝΠιΛϋψ‡½99ψG‘’~θι‘~Ώη›_rxΰβΛ?ο(ΏΕ&*έ ή·χ=¦\Γ™ψLψ£R緎ώο>"ίρ£ίΎzmƒWwύε=1φΡJx ’α‡Ώc\ θ#Ξφ»E·}T^’μ”χχΦ€Ο?’=ΔκΧνЁύ=v.gGwyάΤυmΌΨ1hτ4‘•°χOSvώΞ?Χƒ¨¨…­χι=!ξόόǐ:ΑxΆξώ$ Spί~]u,΅a°ˆoΓΫ„(Έ)…›`ηύα?Π=—eξ”ο¦>&ζέ»ψ›‰Μ ώΗ§^ "㈠&KΞμψςƒ}ίϋε^½Ϊ…œΚ–@t’ήeΑ>vΰωξΙί}»ωΕ¬ωΓπ»ΎΈΡόώ²΄€T3τ°Γ}gέΌίΟεΏΣs)Δk_‚Α·ΨεuBνΰ·Ώντ7ψΞV%ό¬Ÿwτ!!›7ώΘΎχpξφΫ1Άοϊ»B ώθ…4AβΔ―ϊΑAi²w£ύύΊ(ː–’όχο7ίωΑ?ί1KΩΒόwΤβωczGE νŒ2UTϋοVŸ…μ!Γαeτ£g’κΣ#œΦΡQn{χnέΚ?ύάοΰtgx”Ÿμb¨d‘-–p„ν θξuοχί΅Kά@¬N€ >€Ε>ψΛwowމΉnΐ˜ώ~~ηχ¦ψΞιA€)~Ή»ϋύρ§χ~ΎσXίηΉp_ο;tYυ§ƒσλ·}ξ{nry ΐ.zŽu/ώHη§yΏnοφγžGζtGίΧ|“ϋ˜wΖ“@wyυ±b]ΰƒNήk^ίβΑAY’±Ώω½ηώόπ`(ί|Αyp4ζ>§Α΄›…AYέ9v’m‚φ‡ν)§aύuτ{f‡ψ\ήΩqG©{<Ορη>}±ΓαΎEziN³ˆOΉ©iafηVλ6χήχύ*HΨV‹π( ―0<<ψ»χ ΈCζΔ}ώ %ώ΄ογOΫΐ.B @JΏf§χyϋήξΏΫa᱐ž7Έ8ψ#ώvL¨“Έ{GΫΏΙάŝ1½;ϋq{’}ǍΟΦχώΎXώ—??0žΫvχοΧbΤ© –—4vGδZξ[ήχ³£*,Kβό±OΏ'ƒγDcζ;Ο»'ΐΞ‡ΪW«g/Μ„αήΗν)ΘΡMό)C°σώΰξ=y9ήσ>cΣΊΎojς/Ξ!€±Θk8:¨yίΗ‚&;οCω-ΗγVχβ―DYψΏΏϊ%G~<Ωyο'IhΟτέίϋΎƒΣ‚.SλAο ώΫwΫ°‹GΦnί¬‰#ΓίέωοϊσΫkκ-―c7ΗΑuΏoο·Ψ‰I~ώ’ E”RΏώxPόaμε―οv“bA_Ϋρβ¬έοΏ`]žΕ-Oy{γΘp2nπ?,/”­Ήo1Ρ0cgΟَ»₯˜”IΙηjΗ^h_ΆςO\†^δA΄ιu^0Oα›έ=ο€ρΑEzV€Μ·0"`XφŒ”1αΩ7ˆŠ+6š„Ω’³z†Π·œεw_ϋMπσ>²~½ΏϋβρxΟnzš­ΫwήχΉwy*ά|ι\zξο%•΅Lν(*‚}Χ·ρϋαΧΎΫΙωϋΜΛλ{χ›DΖKΩ‰\\Ψ>)Φ„μΕ#ΈΫwΎ² σ]™―·ΫΏξμιΘ.ΟΫG“ΕCίW₯yƒΓ§€?hϋϋέο―ڍ³†Ί>ίnK “οoo؁Ν³½OΦΧβ’ NU ξ;ΩI$Gέύ,;ΣT"ύ,8χŒyP\€ΑΣ"l&t? 2„Ν\«ΰ™pjš] og ΎŒ·ίωχΗI/‰ξ‹‡ΗΨFB3·ϋvίρ>₯θΪΙ Ηοcή<VR½ Ε(βBΨοΎΟ”·Ζ:ς―1ά©ϋδψώοο±ΛΊukxΕςeS’ ,tw ‚ΰ;ΞχέaXrPΎοΒΫr”%οwοοϊ“Ν‘U'υyτ '\Εδτfΐαw<€ž¦ίd½ό 5?ρηŠΪ§Α%ί~‚0{tμKv‰κ9Αƒ’’‚|^±_<=»’ίk$Ÿd§ΣVPaMΏ«Ω <ΗυšΉ“Π»“`"€…ΘΩυ{[ξΥχ*ƒύσίύ_ έε90Ϊσmή„Φν>o3 ς@yUGˆaηWσœΣ’ΒX;,δ€:$½ϋχρoχΦ@εϊΟίΝvλλΌ»}eνœΕ™οΣ<"ό€ΨύP{ψϋϋΞ6N»ŒΥc@οέΜΔpΎ; κfοΐ‡Φ]Β΄ΡΰΨ—Δb/ρΆΏw΅Γ³lžπ¦ΗΡgšI‰φνMŒ'¬ŸΠΎ–τ>Ρ|g ytΧΜ”}ώθΦΡ-=ωW=Α7²²Š¨PΆeΒΊΕΐYq Χ3υdIt φަά:ΕοξηOφyhΉσΎk(/CήFRσσ­οέρuΌΆΎςq$\§ΓΧρΥl;\AτκεΉ’’μŽd!ί}άΖ{μ[”ϋΔέΊΊίίΨεΥjΙκψ}nK>Š—X¬“Έψ‡cΏΏΓk!―KΕkχϋέώ_wςd`w~άbΨbΘ›\Nμœ~Ω`?LΏ‰rϋ¨άPxόψφ¦x&χmMaφD¨χΗx_!.Ÿ|;.ξ%<‡φξ.²ΣΈλž5Ώσ Hlrl«‚x0 Β°Q½΄Λ'`ί Ή;ž iπ>οR~†Βzά;²v݁>_φgΧύί œ_|>¨έτ*ν™ώΎΏ»O ͺύ“W@€Μ•χ!χvΣ‡%„±φΝ+ φ»ϋL~‡}‘,8Α욈–ΆνϋƒS4-8%Σ†Φυ²~ψ—ΙίΛΰ:pށθθ΅Υ(`‚ιΔ$ˆ »Γ΅4²Ÿ 80$IŽxΞQy»4{ωϊΈ»™69pmς½‹„eL^ˆ0w‚€!`ΎŸ‘~fΌSœPbΐξ€Κ2jwΫώ:8εή “γΰŠόΝ„τL6φ€@i2":ΏMžxΰΥεe=αyΠ$rπΤ(LIΥΧnf…ŸΙ©ή[χΧE7ΰDο|<ύN@ΐ‡ ΌƒŽ‡L°3(Jt§5Oο  (ΐ aΤvς-<ωααƏλη„‘€;½€PΩjYp€J`tqP‘1Χ-D|œΥ‘/=Ρeθ:>ΑS<8BEμ­:PΤ20ό»°8Q 3:ΒΌk}wK ŸώυAZ#ΊžœW24γƒ˜i@„ό•i5°tά™²„ΰς9B‚΄yΐ”ΛAud­ΧΉ<έrΥw› θ`Ρε9ζI±ο—0—‹Iςƒ;΅qΚΥΡΙ€O¬8Α’οοypͺŠC”ΒD3Ηβ*ˆ;ϋ| 0,>§.Τάw :Xg"„ΙvQƒITΥPάAgΰβu]Ή4%tι:2'ΰ―@ўT&7< TyΡω:RΒ½/ΧAΚΚƒ-’KΤOΎK[#γ"sŸMΡe7΅f "„]ξkέι‘κξν>_]–~ο+ ΘNθΪε2ΈοYΫ—›’†(@L*M,ν£„₯ύπθχΪΑ)85r݁ άbYυMΐ€Ε}„ŠŠ„Α`Ό² Q”@Qδ]œ(過αυ”ν.L-Ε>@ΛΔwŸ`€”œΘ.(Š3h'ΤΒΑ|vƒ˜ί>y„œΏΒ%‡±εŽ€?0j"γτy‡2 π‹ChρWδ± ‘ %ωβŽΕzux§zΟψ―ώŠ ^R‚#ΌkόΉCΛΎμ°$8θiv&—₯NθΧ<½TtL 1Y,rΪΤͺσNXσΓ_ž$r?ΠΓ+ ζx΅Ύ‚θ—^ttΙ TdθmgβΈU‘˝θ2tyŠˆJΝώΊ+“h’οΰ ?`'8¨εYςiΨ•w«OVͺ+ϋf@2Ԟ U ͺ‰Ÿš―αJkΐνϋΞδOοόeb·‹€-Θ0•ƒc‘ΏΈπΨIήwo*³ΖΡχo*Z"±« ±;iΑWςάNω²‹‹d₯DBΘ +j*Xτ½Ρφ}€Š  +s˜pφΛόΰθοaρ9Q1qWqlτRΰ ‰pWAL’’ͺP6Ž2dωϊ¨Σ4-d=wΒ_—_ ©'"ώΔ(ΒraΏ„0TμΞ=Dj"T|ΨΖΘ+€ίΨ±S7„;ŒΈ–[‹“ΰ€κ;Ÿ7@ Ϊε^-Α¦wιMM0\"sΧρΜ€šν+a68 ©Ϋ—kΫPRς{­vz6ΨΏwώ΄η˜±ξπƒΩΐJgΙw vά’pE@·Γαΰ ’’x5l4 Ώ}πωΖ?&)ώΙΉτ@\f·n€ώn"vώŒ„³?hAA’E¦I–llχUW„λD'ΤλΈ»―xύpΒίFΘ!–υiΗ7ΛΎƒΣθ―“K@uXΌΠ” Έ}¨Xxμvr\w§°I6<::Ο^’π$PH.ϋ2žΫb}zDΦE<σΓ6qΩ‰ƒ“Φϋxρ”εϋΖ~θ9ρ½Β‘Xπ½ΫΉ“‰ŸΠόώΔ½ΨέΪδeΖ?.nώβ‰oBΒε§Š%d-Š#)9%€Εά½3ωVœœών(Γΰ€H΄ H#wΨέϋ—ΐF˜n­$DP2HΑθΊy€€ˆΡE 2ΐ)a ή_?Ύξψ kψ0α°η #<Aϊ„£˜.ιž„„ν9ΐ 9ξ„Gš~Β»}u'±‡Β]O)‹.ΰ―υ ΰ菹§xgψΡ}YΝϋΎΗ‚8hζΐ¨88›½ βε§ο³Θ_ώW؜h)NŽ,kητΈ ϊ͞/w]…“QΠδΏλΎΞ}ψο.αΘ€ξΌ NͺqΝ b1lτΪ·θ ‘έ¦ΰAr‘ 3]·rα]ϋnŸ¨Ž:Su4Βδ”H„iΚF8τΊΣZg9SΆ|ό»―αRuκίόΎδP: ΰƒ~l^τΕΎ±΄θΨ›S‚Π΄ΰΌ&";iΙρρuϊ€Ρ:/εΎE QHΎβc9ίη!]wΉv l9. gΠ€²cΌ"/=c4P όΐυ~ξl΄opσž:κ(˜=)2€|%cL‹"Ώq* „Vƒ" 9$νc4H>“δ²'ΉδΐΒT•³Lχ£}νKDaρCΑ±ς1 Γ¨Žέ K6fΥ]x§ΐ:Cζ8 ηρ<ξΎ―y<*Nx©ΰπ0 ΰύπmW}ΙαюQμ)AδR>:GQzξۍΊ»;Θχ'„dfǝΩςυϊ< P²k]ΖΣ§μ>/L¨:ˆGϋ0υ„β°-'ΑY0ξ5€PΖ"οίτ—εΥ½Π'Η‚[ϋΨMύόΝΣ{[μλΰ@Y Γ7‹ΰΛ‹'Oa —€ΔΘȁoYYzΐρθΉΫ |+ž²Ε&bpB 3ΈΦ)μϋώύύ0qhŸΫZŒ0`†‘JΝΛ„…leUDΕδ€1%,α½τξ>Ύ°{z†ΗτjπΑ…:μ>επ` `GsS8Π\ιΧη#(‰ήίχΎϋ}'±‡B1DΩ½SH †rrθW%G]ΚΒΘ{ΖΕiO4(n¬†J@όε!β><ξοk₯Σ W€ƒΣπ[ΦK³—ϋ·σ·5‡ΦκΓ<α4σο•@ȍοD€uh@DŠDpƘΒψοσά)\Y9L£ ΐδC¬Iƒΐ!V’ΐIVQΊϊκΰJσ4C|”αι†DA@  μ(@‰.dF$mΪT^ ˆ uI ζΞκŽ…“ Eqe$lΓ-θ_ρΊYy‘F˜~Ϋ¬{θ`ͺοό"$¦―ΥD<Y‹\`ΰE_oŽΒ°cπη>#ή—„ ¨°ΐ„ΣTΨ‡—ΟΈECFPA:9’@‘kχ}7'cβQyNΒ›)ž©`"I(―q1w,.Ο‘€Π‘8”AXKνƒ@~±Z!8LT΄΄ΨψԏQ–ΐιhά€š„:€L3@2:έCEY;LO"@Ί8A<έ|³@(α0DyF‰Α`ΐ#ΰλ“€ΒΰΦ‚(ΒF!A"p U™"Τ΄©ΌopLͺοΆΖ ŽΒΡ ‡]§FŸ°g―ΉΑεΟlqfΪφ{˜1Ωhr²vβÚM`°€j%% ζQu<ά1T0ΰxί ("ƒ‰…œΘή}I+ΒΔ’S@ˆ0ΆΩG a<8cΰDήηαMΈŒβ3ΨΨτθB ω‘Β pˆΕˆ<ͺkœf$Σ…tT‡˜ΛΛΠ”Μλ ’54ΑΑ!ΕˆY_› MaC^ˆιπΓΕX`ύ*P&/( ¬ T|λ‹ϊξ»ΉmVFYa†ιmXχp³AͺΎσ*4Tڝ Ζ<Œu9$°Ββ>φΘ XL‚³£ΙE΅Z©0”D˜DƒΣΡγ+Γ@wΘ€*C]H$!&X΄PδέΞ%ρIi8>… “ΔΩΝ@)$2ˆ‰2‚(²Lφ@αΩ2MB_8K έ΅A…QA(EPΰ ΊσoΣEfsH(π*u|m±l/¨»€'γ] #(d›mόΈ»x{//"°ΘXΔn6‰xβQ˜!)>4KΐwYΊ°AE4~±*ŸΙ—λV -m€€ζϊ’RHw‚”:π#€DpΛ!‚μŒζɁ!ΐΓ :˜œ*lœψ΄)RR’ˆ=Ɍ’HPE_‰νρ:#„•Δίyξ αnMβpήΐ‡†œ’ζ-— σ(@@ί­α;γςS‡νͺYUαΞ±‘@“k­Œ·΅Έ'·ξPCŽNq·dǐa0œΩΐDΩy©<Ξ}ͺIjβd±°θ(τ\λD)8‘Ρ…Μ½wuC7†P΄οͺO…Gχq–9Mx@ϋ}όρdά•ˆdΡ‰k:+QΣƒ€Ν‚ Œ =ITŠΙB$;½ƒ1 Ύ3•τ€β@Δ‹›‘4?!;Ζ±^ϋv+Γ 0ΐhΧΙ‡ΧfɊγ@6=>Β’ύ‘ΰΰ«,$#Έ  " Aί“―Χ•±Νη·Ι‡aG'hΠΈql`¨::Œ$ΡۍGPϊ)F$ ©ΐ(Κ«; ‰%AψǎθV]Q£αeΩ7ΖN λ}χ/1‘Ί«%ηί`Βy·οάcαΑΐ€θ^o eIlΧ0ώftI„‰j@‡€$<ΜNbCϋ—}NFΙΥq²ο8A·ΰ7‚9+†ϋŽΏcηyš@A²q'Ή„­δΰšd{J‘Q"YξόŽσc‘Λε J/(,Ka ‚vΖeΊaμώς,Ώ,;Hq·f$!(ySUΦΙGΐσΓ ±Wΰ7 j”Š»ϊ+-8Λ+ “Π|p± Ί"žͺ1οΌέŽΒ(‹:ωςΟΊξΘ0§ˆΛΞολƒΝΦ±QΒ=[~zzQσ/“£ΠvΦ0 "4aΠψf‡Ώ[€IE$‘^>{E(}ž 9ώ;Ύy™PKα Χ˜hή'‡₯ˆanZΕ‚ΊΔΗKγ£ψMt9dέΡΙ°ΰΰ}Η6ΫC0X%G%ˆ$ψ<τ(ISvΓ; ΒΉurH°V£Αι±ΜbX^δkœ cq€1ΊJTΖξ€OT ΞσkτΑπθ>" uΒ”ΫχuόŸOF]<2,aΜfd°Σΰ·ΰƒ'$I’$Gl^sξu† –ˆF€€-U Bf(*Ω §c+x·’":ΔπβύψϋέhςgΒBbωεγ; ³oQ*'ΎkΰJ―οΓa€r]$©2Ρ|wωXκ—‚€]\τφΉ‘ΠΝ軨λKΏ΅¬/’θ žΙ ”“si γ(δ$Ρ[0ϋκάΣ#!κW7=$ ξ,$–€kT<iΙΡ•1ΦκΰΝρˁD}wOT] žŸ0₯έuWθΟwE"T΅6ζΜτδΘΏ<9‚δ–γδfsa…4 0œ*7ξ0ZΩeEayq`JŸΏp‰{ψ_ΆΔq[ZοS–œΈXγΰ²6|»₯†u”!ΘD&œΖγπO7&ZΡ‰Πe) ?‘Ι‘e•ωΉά8yλN!ΰQ€kΗΐ_ΆY9$μο›8…{qL?|PpCυ―³·nίΎΟ«€ιΓΣj*ϊxCγ Σμ7’κ ‡“κ ΐγC©“gε(ΉΠξ —](ΰΡ{]~ςnΨq‰H˜x¦v{0…-ϊώ]ΐWΪ„ρOώ mV G |Ρ]i0ϋ|p―OΫΕ›ΐ{tθιmƒ3ŽXzΩ‰Έ«ώ`Iΰ ώ`4€£u8š>φ‚υUpϊ‚HΨŸ#Έγ ξ7Gpxν!Žξ{§έξwΑ}μΉ‹§6δ#ΰ4'ξžυ…4ΐξΓΪΏ=x$Ν!έ5υ’ζΙϋ―z/Dω0|Tβ§―Ώέϋ†ΏqΔ’;H†eτ†Qγ6Oξ²O‘wWΓsΉΑΡ£ƒα‡pq  ―:\ZvL“w=.Ώ]!ž$kΐξ`ΕΈγΥϊΰζΣΰGQΐ½μ ½ήΑLοHΞβΰΤκQΝΫ‡JΩ&αΞΌ«p·ΙΊ«ΰ+vD'ΒnίλK\Ap&}Ηί@C‚ΗPοψ»€™8nzηAŽ>ο}οφρͺkDίέ‡ϊ!$7>ς6Υ—ο€>‘‚₯lιŸ΄‰Ρ}|έ}ξό„οώ½{ ¦αAA.†Ιλqο£ύΦ·ξ₯ΑΒ#ΐ£€ Ν vο3μŠΘ>x\°ηdr_Ξ“`tRΰvΘy¨ΌxπΣ_θͺ(ΌU†„΄ϋδΝ._@Μӝ;ϊ G– ήˆλN€ΟƒtsΣη“Sβ= ½ δ‘ψρ…ε=ύΐ;°‰-‘θ`τIt_LίvšiAα ΄8HΠΒπ”πžυΫ‚άφ/Β9οόkχ$~ώ»ΐ“NόX6 ΗΞoα{”£ΤΘbάΧ^ώνΰΩ<7Ί#— ύo[›@οξΰ˜ώπeCτθϊ[χ}sAνγΪ³ΒyΌCΈ<ϋH;θ―›DUηνψRαΈgwώΑ¨"x`Y•3-‹D4ϋ{”Γ/½ω«;B’pΘθ<ΐ‰Ύ¨Ÿ}ςηž>:Ϋ™\ΈΈλ4˜Νw]}p’Επ]>αmmΒL‘―Ÿϊ’(…Έρ%ˆ!Φ]H8n‹κχX‚a—ν>‚4€φ6‚—ο‚sŽβ$ρ―=œ£φή’ξV}]vo~O_:'@3#<…}wΌϊ™”$!}άϊύνⲑv/ε»On[l\οU ~<*ΜΑδθuφΎ»οϋ Š[°‡g^€*4xα_)IupίO΅«ƒlkΏ’?:Y~rq‰ΜCŽ‚izέΏρΙr©»βΣΰ”s‰Ϋ] '\Ό£?@WXF―QΠpΧ{GLΪ―λ‘΄Ž“£5ί&Γ¬XΚ“Ώ£“χθΰΛ’ξ*˜)P—Κ‡ΊΓ¬upCϋŽ ΟyΗ;ŽΎ%Žƒΰϋ—Μ»u²όϋηΟWΥ˜^-qn Lΐsψνςο>ΒͺVŽ[ΘoϋNU芏u}œγό$zχΦΨπ!T.κ_―nάΆ~t5: N£EˆY½‘f-ϊν‚β ΕO^w§Πž›£»υΖς# Ž#`¨aΡ•ˆ>Ί£όζwΫ„ίq5ΞŠ#}Η)$a ή`@r0Ό#R\.λ`gg "AοξCQ»_ύ}uIŠ @t!_ YZ]ˆΖ'Ηβξτp‡άΙΔ¬Bš'pε)ΘY ρΆΣβ‘!²8 Έ`¦φR\w…ΈΌƒ9zBΗDββ‘2I―7„‘ά3νξHό4.h7„₯ξXaͺBΎλθχY†Μ A*i£Ό Žγλ?τ$μ)°4@A"…ΑκH’?\˜ ΔΥΐM‰`ΩyMοΰAAΞ\E`"pgγ9Q˜ζm_³8 †ƒΗŸ@Аd±ΜΕΞ0WοΨΒT­C‚Ȑά:k PΆ΅FΪ© š‹@2“ιιxy`’ZF¨ƒΕ‰dη1ƒu›EDšx₯€Η†ϊΰΞ/A¬Y†*Π“AG¨Έδx<2–e‘’ΔU0A:ήύ*8A3C™8³3…’ „!pw —»δݜZσ]τ+ Ϋr…tDzIhχ‘ˆΧΨqν„Ζρ]W“θ­`L¦ΔσΚ F³]ϋ³’rvw_@πvgΣ=: A‚x PNΩaAΪ8τUœnA΄ΊίquΗιo|]…ο†ΐΛοψΊ.ͺο4„’9ο =‘[πγ ύθΡ·H[z‘q@#°ϊ&1ψόΨΏΛXU<ΉΘΩ*Im„ŸάˆοέΧχˆ₯aM㏐0aiwϊ“$6pύG€x#.τ·X@’ˆ»(JψŽΓ(Zί―c\Ρ=ι›@qΧ:Α²3oΛγϋcΏηοΙ-›LΑμ8ΰ–o΄βρΆιγ>”‰&·{¨N:ΌΑ"Έl½ΧB€…g;̏›)‡w§ΝŸ―ζ‘|׎ƒ‰2Μϋ Οή}ŸywG;(:>ώεN(Έ©­; r̟ehΫaΙ`$αΠν-,Έλ?ΰ*2’yΔ₯  §β„―Ϋ…ΠΘΖ!}rz™Θο.jG ²€χ΄}ŠŒΙ=¨>Πόψ.ξ@6\TΛ+.hDΡpνϋ»ώ[A'1ΫΦU’CqžΝ€έΏΏv`ΕA#]ί‘³HΤΈΈ§bΗέυλ«I‰žΌοnM}€6>:3>ΠZ`νΜ“Ξέ…MΊ‘Υέ½Γ‰ ΟJZΌϊ‘w^@ϋcq¨ [·cίγθ퀏#Ÿ, ΗgRΒvξU^Π-ψ|;ΖΌy ½Ουΰπ5αΘ#N˜½ κ›Š˜A­»Ζ‘υωάwη©΄,©QTL\Ωίίο?E Τ„ϋ‹ΟFšθ=6υ$ _%z„•h~λΎρ–ΐ«© a™ΒΉΫπχXυ,•ι‡ΒyGSŒF_q" W™k`ŠΙσξskΌƒRA„«bΩ]&ΎZxΈΖ!‡wχ9ƒ: `qυwώ‚wάJGΪχ΅έvGGΠ0Ο$ŠιτOεζπŽŽ+ŽΫHrw SτTœςλ&i`aχY~―H_λ€ϋ$« “ύξοινC$8&οΰh4q}Ε=’‰Ζ ΐNUΚς‚ΎˆΫ—{Η7’ΘΡ&xέEbjΨyδΰΨ!ιcΟyεQ”O§Ρ’ΘG;ξΔS‰Ί3?>$Β“χέΩΚ€Ψΰ ΘT1Έ0Wz³cπϋiοΧΎ3Ψΰ;yρ&0ξΣβά§ί<0Ώ‚Γ£ρχUqΘη†yΒτ‡‹ &Wf˜Κ¦ωΝΌθυεŸδ“Ϋί€GηpΦj  ΐυχ—?Ύΰ,χ1Ώυ]Όyόξζn! Ββόϋ>ώε…°ςη3žρέR ‚27ώρ¬oΆ§σO{˜hλ“οδ8€οώ>9φœ­MΙNχ1―»{ί>QAΌ*=ώuίe \G72•—:΅8iPD£ίλ―qΤηέ“l€|ΔcτΥ€―·$,‰μμW£v]œƒή‰ρˎμΣ°£?2|ˆΆξλώέŒ±ΌΌψθƒς%ΫO“ˁOαΩ?£Φܐ8οΓίόβΰ δΥN0QΩοSκv†·γnυ—ίω Βh§"Υ]΅sΫdŝO£•Θ4#yWΉπ―K>pΗΰ¦ξ€eŽOφΣX'o|C6]&|άφ=Σ‹@NψΦϋϊΰRά=^mEyύnχƒƒƒ*BVχκ?`η&ϊ'<O£ο ƒ“uφ¦Ηώά6 ž24χc;ί»Ώo›2ρΎΗŠ«Χ-– –€`Ž ϊ‚„AΥϊT\}!r«ŽΠ7œώq\~¨A?ψΦ[Ω@Υί›λOΤ/ΔΎ»Η‚“–?<.°ŸJ=ϊƒυ}||z7ή 0όΞ\žΛΖύτ₯ιά-ŸΔŸDΏXs{&}σ7LLΞπ ›~ςX n'οϋ›pΠv@­&£^οcθŸΐΖόΖwΗΙΫk.—Ο^ϋώ£ΐνοϋψ‹β»ΌγqBήν}Μ ΰ ^ΕΑqΕOϋ½‘pα)Ά6ΎΛλπPϊξΓοΟ9Υ:xcχ±εέίγυνsĝwεΐ‘π­3•$EآꠁhΏΧk/ΎBΑΞ£xπ[Iƒ?ͺιŽ Z(|Ό=ΰ όΧΩ›ί1Φw]<δΈγΐ;ώμ!ΰ,?>ύiλoν_ Kά·gΦ€ΌdϋζδεΔΑ}·k©ΒyώτCΰ$ΰ₯G.Ύ‰|S’wώςπvp{όήΞwώΞ\#NοΰΨ­ƒ‘ϋ±βΰανσϊiΗν¦ bΊΑψwφωΡξΡnƒΊΛΜ‹ Aύ}π[μh{x ·οƒοuvσ‰ΗχwE­nϋοΞwwυ†Ÿί˜‹(,ξϊλ~ΗΒ1KRΞ hΰά€κφ)]Θ}eΌψj‘ƒ~MnνΟ³Ί:[\}ύ@–π'²οNkq-^π叽eΒ„μΈτχ9±uvΗώ̐ο.ή7βγΟ}nƒ8φuψτhΌ―θΏ‹ο§JD|ξχα#I82A˜’΄oΞ¨£λ—$ŸΧ’:»} ΅n€©gwέω1δΟpγ7Έxy|½νϋσ~Ρ<λςΆ°ZGΨιCξ[ΫΎ—Aΐ}τ—ξsϊΊ`Ώ°ήξXΙζ…ΌμϊυΚλ«Ο._CΦ&Θσ. Ίυ3Ž‹:t  ΰιΫΞ½δ%νλyΞΛακ=NξFpΫ=π憀ΑΣ%O…ΕwΕΗPkGή«,P7.’|+%+•‰Y܍xλΐ€ύ€ςdςερχ(>dc\ ι^χ…?^Ώ»ΖϋDυ»χPΑPόΠυέ¦AοTΏρ9’–rΡ9nί«ο]Υyνϊty!'”ο$¨-θGŠ@γδήύεχά%‘γΥl“- €nΫOG―Ξ’x)ˆ_ά+C+’€Ζ}ά»_Œˆ«x¨ΜbήA±>ϊ»¨J ,9dΓΈωΨ{Xδμ£χ Αa|6ŠQ?Ύ—‚hzπΕGήN²Έw·ΫͺNΫ㝂’LΈƒa] 3‘[ Οοp†τʐBέψ;œ,’σοE8ω<²Œΐf5pοŠφψΎχΏM]U1—Κ†J$έqΙη§5n@нٟ{Έn^λύϋ]C5α’$ΑΓΘ¨9ωοτΫI­μ`©2³Ρd»š'wϊήAdvϊ%ν‰π£¬@8Αϊ˜o›|\π7δ;Dq!ύG»Β$! πbέ8ς―_=8όλ;§Θ„δλ»ž@έ·V(Š%©qΐόΙη»Σ―­FwϋηΙ'§λ'z©2Γ‹Žk,„ρsΠ€»ΰΰv s‰uQמ(ͺ°δμpc`ϋo2ƒΫ‘Hn‚ws=Žο£c‚c^ *“HšΈί}g½οoώφŽκPΣR7ΐ`iΕwNχψ)? ƒoΎuΦ8*ΌoωπΒ-/€‰Κύ~€υμϊΒo―V††τδ={\άθ0ίώ©…dρeϊ뫃²ι ‰p@=>αο~υ€ό*i$auφ3ͺΫv’0T5μ uϋ#>wνϋž}v@\ΚFχqr'a[ΗΩΟ‹;'¦θ‘΄ΓdSθέ;:/Ρν‰PDg¨ #*r°.Π·γF/Δσυ>;0Šό ωΑ“άρΏdγΠYΧή7¨‘šϋϊΊϋίξžL}W κ‘l¨pίin>GrƒVtΒ·χΕ»Ώ~‘ί-ͺœVϋNBAmFHnΤύέ^F;@ω\ΜήθQBά(Θ9œύΥI(ώ"οΓzη$WΠ5Θnuμξ‘ηΥΗ‘θnΊτ―0ŠϋΞ$H–°!εώΨ^['ίξΙ}Šρ}œvγΖ}*˜:GωβTX<ŽO¦ vžΕg‰κ41Ώ:ΒIS{U&Ωe2μ]?½ ·λ„άψ]œΧΆ[‹;Ύ³šϊv1 Θέ»Υξ¬οv½έί+έ^U‘‚΅§J¦$uύΧϋχ˜‚τΎoφη>΄—tϋξ‘Z@Zϋ €©–H`―€“³.Ύ½΄2KK{ΊΉeTπύθΏΣ=§GΥ7ΰN|aύ‘η9˜₯E|aχΊςz5ΈσΏ>(ŠΏϋe¨;“*p0DπQ6‚sίΝοτc”έY*ΨΙε—5;Γ–& €.$‘€ZT}Š1vpˆ$υRjχ‚o(I ‡Α-&H˜ϊ€JςΎ$v†δΥ:…y ΐQ@Ϋ›Ζ¨Ÿ@άB$”„mE„…@ β’£LωIFJΌ PΔXΓ4’‚βœ’wΙ2$Ώδχƒ„ D(ΰFt ] π˜οSAχ“©†$ρT(ΐsŒ\ρRΌ‘@ΐρψŠλ 9|¬IιΘ3%€–ŠF!§ίηLAΙξv)›@kQpΒ§H!ˆ€ww ˆΊ―[ٰNώ:Φ€vΐ;Q š‰„A’G$!ŠΌτe'ί± Ɂ—ΤDŽGTΞiL "^8G‡& ‹ƒΞ»S‡ΐΑ="Kα‡7¨šΥΑ—¨Χ·<-98``@³: "ΐδΰ…x¬Ξv[2χ,§A)dŠγ₯”7ΰ “πΰ™ΰ„O Ί{wX,KΣ π˜}# ";‘ν©8KΘXJΖ@倄έχ΅B’Ι:n”‰ψƒ›P Œθ"‡›ί7)y7’L "A@Ο\DaB|q5’@?$sŸΟ{Σ€θ€Šσޘ zί€@D^㊫lςχyΠΰ"³σΒ8 €ΒοTφJΎΌl62Υˆ„ ‡(>!!twwŸ ˆκEœ+ωγζυ }ΐ;q`cgœ)€M€ )H΄;:4d…ΰ…ε₯’Lj"’RΩF{@΅ IͺΠ‘I‚Qt^ΖΕ-€’@¦I]N4©OόθKt”Χι'“εl ’` „gpE @F€–^³wχΌt„˜Η…κhίXxΤr @–|w}€Leΐy‰WjYr` ·»σΪΫ‡%_^Φ¦ΟΘΑ!Br`‹T'9 @%ψκώͺzpέ8P ω#½LΉΦ n` I‡',& SψO‚SΟδ H£s΄ΰ€ θ8Υn‡Χ tΕΔςΊ ’rΠ8½€†A-πƒι°ΈξΒ1’‹γ„l%«Leh»Ο[2ͺκH¦Θ}7@ˆ/ (pξί‚Fdm·§GHΙEρ†h‘ ”Έ•φ­Γψ½/)ΌўόE({Gp€¨BymlχsΙ|κλ! €Ίίqʐ‚?0ˆRΥγ Q„Ɲw³SbΕApωd’2„‹/œ™ϋ΄ ‘(-.‚A:Ϊ?Ώ[F€LŸ‹κ QPΜ¦ α¨Βšϋ°ΣPψ‘ϊuw8Dκ πγ ξ@Mς.¦›žTWͺόδˆϋΈ‰R‡šRΘθυ_₯ˆDD”οZG€ιφ«‡₯ΧΊ‹KDEžγξGqˆ¨7»¬rγζ $θθtϊ>/OYO†Ÿ ͺ@cπ· ‘’δց'ˆŸάύ“ƍξΌo”G‘$Ly‚*‡'§.0Θς‚HΠ„ΠΈλ\Έαt»οΩbsΰ³]ΦQ»šA˜‘ΓRΓΘ0qθωC%Έ;‚9’+π^#‚;ΐ”Ήώ,Z0e]}$ΐfQχ0 J Οσ ύΚ°ίŸΝ¦DDΑ01•ΰΓξ3KflΞώ~Υ£ό‚ΏΣ‰j¨L:ω^\0&ΐέ荓" ͺSτ₯§·&?LΈ.XNΩΔΆ>ΉΓf'½ν“Ί ~‚&w+Χ!Mμ(‚ΰ$αΙAEEώ έ€8ΒμΈν€@D1!EΗ©ΖΨvΡε†"¨ΜΈ ’ΞΡ]hΊ+B}Ψa8˜"ΧˆjU| Ωρyˆ"κξ3T ͺ+QΥϊ“δŽDNτ^'ŒWu%ω"‰Θ.ͺα„c zP­ƒ¨ωμ^ΒoΧκƒ― TβR'·.‚θφΥΨξˆ@Ή1?ŠF:ά0  κhώOβ $θΘeD)j|έ7žˆ#ΊΟϋfζQpΒ;QΡqω‘ŠρA„yxORˆβ%$²eŸΜ¬²ŒθθJ˜€ΟNο°μΒtϋΥgχ¬η5.Έ„ L“\ΟΊa„*”%5O@ ŠUΟO›Oˆΰ1 ͺ`ωβ6P!(.M"’qL‘u‚φ‘=₯€0IU`":‚?&F-”($ΰΗYΚΓKˆ7šΉ0wφαoGΧ-tΟYΤQQˆŠ%έSτΰŽΎΉYА|dawN‡œ‚m͟P³PΠ,φP μ |Šρ<(8«PBΠΗΝ7 €‘vDae‰'šˆΐΑ΅8Ν'zXRtυ)AΑ€ι£s²4A’ΎΓXt}Δγ— •AŠr„ί άΓ„’ΰšΰ…ε1:±€0°δ‹‘0Poέ'#3Νo}Z,Ατwvk(QάHΠ½›αΡΜ%GΘ󸻆ΟFtTˆΒ’οž\\Ϊp8‘ rσ7βθCΩΐδ„`uςΰ…€H`€$β?)*Ε‡°€δΘoUSbzXLgζ\`_x©'/D06 ΒKψ$s°…%QΥωŠ€!©ˆ ˜σδ™E!BWΨP¦δU‘`ίS|‚Γπ#ΐ’ΓΠ²€0,†€‰(EI0‚οw‡09+ξ―σ–bp\x)KnΒωw μH³Ÿ«CQ›Έxι!’αՎm€GEψ2²O―:LUYHjγΣΠo·β.aC‘“…“5‘’|„ ΚH’Α…¦ψδeQj„δδ? Δ!ΕωŠO.8,΅±%ΐΖχ±@ŒgrBt˜V `Έν}ύL O|ύώ"κP‹«Θ!Sα"JCΑŽτΪBEρ ιπ€$ρςBL!I OώΡq‡Ξ’xp«,kY΄ωΰ΄ζ ϋφ!ͺιΚΠF`ρf#ζ’ƒ V•ξ!aTI0\ζ=Έα ψ>ή Be|ΔνΘΊˆιFœ‚M”ahh(P$I8‡t ͺǏ 2(up‘„]'Š· -)C0衝I@Η>N£ΤΙ,/-BΊ:GΠΥKP†SξΎ¦‚IkΡCεΈλ¨ρ`ΜΚ"‹$ΒθΓƒ ΰbX`V ͺIaΎΎΓΆ€ΛxrΦΡΔ½>ξ›·(ωvc€ev†ΟΧ h†;9Aπψξnν½Ft”ƒ$!όΓδƒ;l(Œ;iœ#ΉρΈ+›}†,eΊf,$• R$†‚α} <'†$qt€0 T±£hN Δu@„Τϋϊ”c1$ΣAxx I†ϊ†Et £ ˜ΠτΘΤƒDyE…ι ‹λ’‰(V¨$΄Ž›Ι@‡αAfΡay@±k… Τ ,όNΣcΠΊθ†e„ξœœAΣΈ%)ΟyŽ`ΚHsιGαvtέR}Μ ˆ"‚„φ7ΎRgP‚ψ”i°»ŒML²Δ1±’ ¨”ς2‹’ΪΓ~ΐ B| ΛΒζ:/3}Pΐu Μƒ1ˆ€€H’tm$@ά¦_³4ΚΕυg…)‚”$‚Ά=όͺ@Μ &@‚ςωKΰΌΈΠ)xHΦ±ΔEΐU—Η]ο‰'~’Τ‚O•—ΗΏύΖ©a‹€ LΣ[ύQ!Š88k¨άŠ9ΛƒeΓc† fqΎ8΅ Ό,’4Ε½#L/gAΠcӝ*#9¨€€ϋτ—ƒΰΌΐ?βΓ„L@ ‡EΦp•žSΨ½r—QN‰ΏθΎ˜ 0HH9"Qχ¨Ϊ Cο~Ο4¦α΅ϊΩ#0ΊEΜC † ΌXh¨# n> ‘νόΕ­$8υΑi'/9€ϊ»FpQ[U#.Μ”ιΙξ/oώ=Μ"‰ζB±€κ@œˆwΣI€Cƒ»I±/>\˜@ΖJ9& €„–wœ2C\ηω α dX0\7pQvM¦Ε ]`@μ_œœEeάC&θ ΚGv(Ό›~‘ʚΗαΎ+”ΑWu]ςΓ @!abYΞ=ισ6&†Λ)X^?ΞΨu¨I  ^ŒŒ―γsσΰ‘l…ή~r^R₯ΫC$ΰΨ-oˆR]σΰy¦νJ?AΑπΖ8{ŸοLΏWj£A IŠša2"”€ε ††—‘w1ΌŒ»΅Iΰΰς* b€± npΘY„ [H‹MΖΠL’ΰ‘@EψπΑgyd'ΆE-T€’U Bc·°Γ0Β!υ}ϊπ1H Šΐΰ’Μ|Ϋ κΈΆv"h“Δ‹k1Qπζ '‰2­χδ#«4‹& ‚rξ|ς€¨ˆ ­ΔO@yQqLρλ.ίOΠB 8ΕΑ΄EύϋCόž§PΙ-%CΤOͺ.D’HY“ΡIόαΑςΒ4r(‰Θ PΏ³‹ΩLρA ς π@’Hƒ3L*ur‚%"<φδΰΌ€“Β0!9‰P… (w ΘaΣυ:Wε κΊΓdƒ(ι°ΓΐM£n4žΧ(¨ΐΧB’vœ]ˆ@Κ’ŒF#@ δn’3αH—ΣΣ’Ο–J0 ©ΎcΕ‚ ²N vΎ“ŸΠŽ—Ν? ځάBβ uA!ŒT˜|ίp’]Γw‚―bxqΈ0’•Π -Ω L@Λ;>6QΤZžΡΘ’&`l5Ψd\ρ% α‰Ξ.   χ βvgήϊͺSΰΞœIΰρRΚ‘Wο"t… =œΚYΞΝ“ΡuΠά.θi>TΏΈ~& Α¬ƒ©R” ΘhρhπU± 7_B€|ώNδŒ8,Ά!θ―Ηχw}ΎψΩΆ|υOΞχε— ύ‚πΩ’,Λ’δΜΦ} "5‚j’ΉΚπΆAuVύ½ώ}χcϋόΗ°„0Ω;{OΡ;ϊ€ωΖΗ„ΰy\ίv.‚φ“+{Ιί?σφ=C»υκ{h£τΉsσ²Ο ΰβψψ—Α}―…;8>Ύ\5PωΈ•qzΏλΏD|žτΞ»νOφφ»Ιχ,ξέωχω”s·―Ά½Β€πώώ%Žίχ}ϊξΎPΠΑ½ή{ΩgίΧ¨λ~»ίχΠ―ύ_|ολ[KZl*Ρ-Ββα aΘηβΛs„φwߟ|<}Νχ­lLϊΠ‹;|πΩΧώψ0|έΑέγέώ3la‡sokΊσξύϋλ‡_š¦ΛΡΈΧϋkηΟχ}ƒ―ήΌoΎΎWΖάgo—ό{Η_Ϋό­Ο²ΐ§{ŒδΊOqOΛΨxγsΪ<ξyΐχν‹|Uτ~>χŸΏyƒ³ΗΩόwσΨωιΓ?3:μζs‡Φο%±σ‰‘ρΡϋψ…v ή{΅›}τΰ―wΗ‘χτ\_ΏΣ7·―οΖγξεΛ…ζσΛyΉ«ΝΞ ϋžxΣ68ΝuΡ;kŠίΎoηϋσβήΆφ«›\ΩυωΎν܍6ώΛ‘{Jz±??MΞΑ>»›o£ϋ^»άI>ϋ`v€ζψΟηί’Ώ’ΦυΨ±>#€Ψ½κΦΉϋψ`oΪΎΫΨ³ ^ΎAόώe+H ρΕπƳʜ|ίiο>b"Ζύέί_­ΖJ ϋ|ΏίΏΉ;ώm²‹oοχO>Š*΄ θΣ‚Cδ»τ‘10ασϊρ^>Ό6Ύ~ώρ“π±>L|μΟώ‡ΒΓόkyύeNύ Ϋ»q‘ηχΎ/uΟgύΪLΝωζύΟՏφ-Ώ»~RϋcW¦~Ÿ―ξ:ωχέίρc›Ώ5;+p­'·ϋGΣΧ ψo §Κ|φΈ7?φ]!N©:κοŸνΫn$½οώΰχ>@οΰόΞιΊΉΊcάxξ`Χοαςdbν»n¨ί‡έy^orή£ΌΙώ­w`ξ]pάιm|χΟί€}ηϊNώτ½ηΏOΉoηΊ#yΘρΖu‚ό~ϋτΥdŸάΏύ½GΉ~Γ"ξ·ϋφ^ϋ/ώσΎ―/ΈSέ"Iύ‰ ΐ§±Δ)tΨ_{©'}_ΗόΣεβ.v€χύ—yy½Έήϋ˜?ωΆΓN£Ϋϋv|μνx»α— η IžόύέύνΗ}ŸψōΏΟί»,έμλŽκΥίίΏ―}ξϋψ,;ςΉέIrͺL\‘„β'ξΰyπ‘νΧ˟ΙέηΏ?ώηOgΐώ6/Κίsš<>ά8Ίϋϊs‡Φwνπw{±Ύz?~sΧvW»ΩΟώβ²άε;½ΙXψχω»ολ;Α»ςείΈΆηw|’_ ”κ;žcσΎοίΪέΘ‰μxο=ΠΎ―Y‡όσέΦ‡εΩΏω^Ώο~<Θξ»έ`fήΆ΄°4ΎψZξΗI/nmγι»ίχZG¦{$|ίγsυ¨^έN7ώρŸZΣ«<·5υίύχχ'.~X˜ϋՏŽχO~9χ;Nώ>Ή}χφ:Ω6ύ³βθο/Ÿ?ώCXμt·ώžKkΗlΙΓ΅-yφθ>Φ ώΔ»uχyŸΆονrόK”0ξτωΉσώεc~DΗzγ±ΈορΥ,Ξ7Ύ πχόΏGœωΕΓ~rŒ“wŸέχόζ³?/Ÿχo£} dΨΗϋb­Ϋ[·„€υώετΤ'π ΒλΓύΎ‹uμλ^uαν£"82`Cu8OοφΟ+φ―χΥ υ‘M?~ϊκΎWήϋΞψƒ|­:Ίρ>>Ζ ξqυkύΦ$y~GΟΘ‘2s‡··ΞcΫό‚?ωΠΧ_ Jξθi&ŽkϋϋvSωέυί*νΡqΠΔΡ!όε;>>l·½ρψ/ΏΎ“ |ΫηWqowcμ;׏ψρ>οΨέoWόWί¬DΨ»Ϊ³Α€`’λΰ π­σ{»oލkΉοΰ8Έ‰ϊψφοΑ5cΗr_k™~λβ―ύrΒ8ο£ο΄ώ}Χ@‚«{ώgύΉTN"xwz|°oOηWHί?μβ:HsU>£D"Ι}lœ»άcηΣwv‘ž–γσ‹ύΧώσ'άΫEΑ±OG·n ?‹ο€;ή΅˜_ϊv';}Vtt 3wšiό}ωμˆα6άρž ΎγĐΛ{©άH•€χΟοo*»φθyΐDηζŽw4ΨGa]ώ₯9φς­ν}>_ώwϋΎ9!GVyΞφ{ύEŸ!4Nώή}ΰb1}φ_onΆε'H†χρ~ι}‡oέB“£Ž…γo'pUΗκηψφίς;Fm}Γwu·WΕΕyK3HuΓωΗ||Η±?;ξ#₯άΓο>?ΨγΈχ{ώϋϋƒ‚cί―οuΡϋ`LΔ°κΕ―υϚ<{ όύν:Bu§g€ΙΡώ|™ί†Λώ8ά?ΡuΗ€Σ”§BŸΏοδW>x $’ΙΗ‡Ψ@ΈΔ&η2vrίϋΗΗεŸΛΣγ†OsϊQήΏ;e}δΩθΗιχΫ½ηί…L>π΅ow‰°„3vŸ †ά<aΗ§Ι}Ιηg/ύΔΝχΕ—άχΖ[- °ρ^Ζδ9Pβ  ―ŸΊύΙ³ο롏­UwΑ}?’.ŽQ8ΐη.ςvΏNLΓο~ύΟΏώΥν‚ήžπ`ό~}Εqχ1ŽxΥ!Ξοώo= vϊ<²“\3v;“;Žιφ!wϊC_„7ξ(L§ίΈΖϋΎ7‘ίr&Œˆ Ÿίτψ;˜(qψίγ?­vr»ν}Όϋ/Ώ–;§σ#Ήw1ΟΗϋ‡ΏΟ»ήωΧύζ`%ΐή‘O”/ ˆΠΆ{ϊuϋώξ\#»ΪΫχ{pή@9>’kΐ:ώΈ?oϋ­‹kβΘ­ρ>ϊΫείwŒ„€qΥγƒτ9=ΙΰŠ^_ό~ίƒg,δψώΣwΤUΉ•L!8ΝΧΎΓσπΟςύS)ηJ™ŸμχίߟΠω°ƒcΣ?ΔΡΩ}$ŸˆΗθρw UΎ₯<{γ;yR”½TΝ₯'؞<_κξμqΊΔΏ/@° ½ΏT’›ϋΎχ[ςΥώθ™ HχssqqΔοη•qρ²γσ;|ζήηγ΅έΎ?‚ΐηpsέy;Άίνς~>νο~žtω ψζήM`Ηiή/Iτy6zςάυ·ŸΨ•ΗΔοΌλf“cœΎΛp΅η7Ÿχ―‘σηύίΆόPΙ²Ώ_•Oސw-Τ§θx―ΰ0>ΩοχΏw~‡ΔΦχα»fˆ‹H4’ΓΉžvώσΗΉ-n$„ί“½σώΏΏ]‘έ0Ɉ#BΠψμ€ΣΤ‘~χ‘Ζ’«²,‰(ύ žδΟέβ Σκύ=ό$ν3γςC>Ά«Ί3^©!χ€²)œ„Nο~^Ζ”έΚ"Τ2cWqʐxCΞΙΣtΆuε%Ν%šH”Α…ςˆ¦WqA WΎν^“Α;*U%ώ=ΨΊς6ώ‰tœΕYž†ΪΟξ O*ί]’=„a}uλL‚Μ Ώ&Ά/³ͺ̜[dv₯ψ–ΔθΘκ <Ν PΚTn ZοΟ/d sά'a-ΙͺžG=0—»!Κs’§ΙgΚκΊw96ŠπΪβŠΛΓ(oϋΎ»θς*Αƒw(Sƒ³ΈXPψF}“Μ΅wπ…dzφ{-^2UPnbς|p#ΔΎΡΗEMΑ&A…] SD .™œ †•o{Α%%Šΐ‹ƒQvβ)†™ά‡B™ΔΩ‰ήŽ‡β‡ΖξΙ› υλΜ>€Epol­PωϋηΟ:?_aΡ‘δVUχ¬<8v~γA.Όο—²Δ_γ„Ϊauˆ7φ΄l>?b7Έ€ηUοp°³ΟΊn/#LίΊ£c Gα„+xp²‘δ‘'¦Ηzγ±θY>β /“―|=I™€oA»+»†ΝTΗέXƒ‘(8A‚ Ψ: Ι "Ξύ :­Έ*'Bψb”%¦žΑ}σkqGq„'˜ξ•\šf3ρυuHρϊθ}TΚ?~»‘Ύ -GdΣ1ς‰”]9T„”v + ˆ±z'± N΄ ·―T˜_Ϋ›ΖΚ$*xp\ˆ~o’x㜰μ€βHΎΛω ?„yWpμŒ1ΆβΊΘ=I!γ ('¨Q\;ŸϊŽ}ΰφΐ…dϊƁροSžΑI~³­ ( J@‰‚ )H—2(I…οZ@πζ»fΒE8^D‰S%ˆΦμΑAP'y ͺŸΥ ΉQŠx}\_!Ζ7ξ&ŠΜ„ΧΤά)œΥΥΗ֎0 8σ$!Γ‚Ζu„)α©έdΆ1kήL¬ο$ͺz  υ@ΨΙ}xhŸirΕπΙλ½wŽ κΰρ}QΧεeΐ’ο·Έ»8…£T°‚β‚5ΜΛ#:mWΑ1Ιάν…‹(΄Ψλ)SA-εc½žΘgύGy© ]!Wg ££8$?ΐ……ΝE‚;xEN>½3²δ 2Όώθ―"ˆƒσΖ!σgD'"LOˆ―cΩ°Θξ«ΠΦΤ%}θύΎ8Ή^ϊεψ80 *™"¬¨@τΈ0‰€£y¨}5fc‚ τŸϋH…¬ƒ H :XξΐτάΐΣ3Ž€Ή«zώαI‘£ΙέΉΛƒLύΨξ]βΗQ¨PΔ“»‡(LβG§@A…² %"L€πΊ@¬ΰMΑ+ΚΛ3`G€E ’ΡIˆι₯ ΙΈ;.vjIw-­CdAΉCΨ]Ё€1θΊzri†ϊΟHπΠνh…'@XL…δά" ΛΒ%YWm… Βd¦DHrΤΙ€RΤ:^H  :²ŒJ’ΘΨI ςpϊι! n‰‹ihΚ… hZ.9¨Ξ>Ηd\›ί uBD˜ΔΨEX €T!΄JN¦ƒ,  ›Γq†GΑAFΞ hdΧW1ΣξDΑ3‹XZp]xμ `“aPrž μ€KN AzΩ)7 šΞφΗMEGΛTp‰Ε)δΤλ Ψρ2² Ύ‹*j† L€FT`ΰ2€δJ"†‡6¨…,wwιP@Γ»‘d`χEP‹IX!#ΔqμqΦˆλΓ$ˆuv±`wΠ8?0(0Iβ ΊΘβ$ φδ’iM£nΰ I&ΧC«ΰΤ’.Ώ*t;‚,Α;w„έ! @ x`ηBίΰ,eΊβq~ptι₯ς9"!$N•ΐ7@β#;ό.iDtzjΤΰΨŠ9Ό$‘ΤΔ‹+Ιvq2ƒ =ΎƒC·Β(ΊΥ<Έ/2°¦β]Š (ια£G!‰@”"„8QŠ:—μ"0 ˆΠ —1D0 B$δt0!Ž‹ σ :˜¨ά"ΝH‚R Š‘`†ΗN’XX‡ˆ’^YrΖ$;’€v €Ηκζ!'!g¨pεB88 ΤΛtb€‰¦άρ!@eΙ@  rΐυ¬•&w^€(J$α Θ U |8ͺ»dΑ]IRY»™qL{I€Φ8™4 M¬@aAOεΰΞtl χ€&ΣOŠ rWxgP ‘Κ')Yp!"ͺ κΈ|„ΐ¬ °\gΙB )@³Ρ=Βΰ:ΫυΚ0ˆΖπ1ΈΘNNՎ(;‘t`αA`·yγ€,υΣ]‡8ˆŽMΓγ&›@Xω r꽓DΠ(δΐψ‚`΄«"JS£&‘ΰ¬$β@Θ$T’¨&ϊΊex%€GΔ—4ΪUcLΓ’;ΛN\  πš„•"Zξ(ξ*†k’œ¬”‘ΐf¬;σ#ψξVQ@8• ‰Ad™@Α› $ŠΓ㨁]L²SR°ΰrˆ»ND―£„UΰA]μ d 5Λr‡°.Ό;D@ι Yΐ@uΓβ( Ε…ςξ΅²„€§ ** Γ#`œS‘Αe­^‰–°ERM  ±Ξ·δ$D…4ypΦψΒuψιd‰@ŠœΫiూΰj& ‡!@, *’’E% 1(’Šΰ韊A’oιΑ9&6˜ §χΙ°*Α€ΝΎπΪ "Θπ@„i) *@Ηx]'‡Ρ€F¨(ΑYaJQΠ/AΠx$U—`hqΗ@cf JƒΡLσN†^]€τI σ¨ƒΘΖ (%!y0ϊ2HΥ‹ΚΡ>„ ,‘g‘’šWΑ<;Όυ‘RυAL" δAFμh ±;ικ”ΑA 0Œ θ(`υ€Θ`δδ’#8LͺQH 7$Χ‘½ΙπPŠ€ ‘mέ? DІ€‚Ζ€.š’GPΩ‘Δ’Œ€τS0JNπσ” Ύ’„Ξδπ(r}Š’dQ¦†ή)i"m‚δ¨aq%# τNͺΔN€%ΰ‰0Xž"…ΐ½M€@Xη3΅^Μΰϋι¨ww:r†5ΈΙK k―κ¨>˜‡)u„ @EΓ―”03”’ΐ:žͺ`X'$ρυX8% i †‘A@  ,<Ψ»…Ψ<11Π… NšW„‘ :7¬N‚PX u_ ApγjTŽ AlxY^(9 nx‰*@^Ÿˆ€|½ψLΰ :-m(QŠ‘ΑD‘J’2@4Θ‘°£%‡M¦ΖEπνΕc¨ΠυΙ΄ D‚ό Βƒ\[lˍ$—Ÿ™ΤΠόX­Τ”~<χM™I–%Τκ«*~£c Ή8€ ŠŠT’V Σ’H<8}E"χ-r6Ψfƒƒ/$Ȑ(IΆj’€Υ£% Α…ˆ0;,fr |G€‚΅Γ$ “(ΓΖ δiτθ =  A—P‚ͺ¦ (0Λ % ©wmpy x4 Ž£Αhp]Ρm„€°γΦ­]ΰ€Ÿͺ"€C³ v>ςαθθ€ hB t@†!ƒ  DŠΑ}L€„₯Q5 Κζ·n,žΰ ’θ¬α$ΐΆ!!`(¨†FHΒeJbHΐ?$,όΠΰΑ{5!v§H‚žΔΕΉ†­> cΡ"Έ₯³ AuSX I`t%rrz Έ¨NΈzdκ6σ»oWr[y4’ς0œ(IU]³tA˜² p͋ᆧ@ κ»~<ψPH сHIIJt ΣPe€€€ΐIθ„‚dTΖ€Š}λ†ς€Χ ΰ+Iƒι² E4”UF…I@yψ`xhε±vJai*4r}4vx€Q@‚žN:HΒΔΑ€ϊD6δΐ‚Β:ΐCAΔ0ΦΚ¨’€ιάE*Š}h~_]44’ˆώͺΟπΊuηˆ "θΨ1i‚RG$<}WaŠι0€²„ < 8W¨"‚ AEΒΦωΈ’„²+ξN@HA€ Et¬’&Hά "-–DPΡʍ"’T'CJeνŠ‹ ’’BƒŠ£ΰΒβπ0X€ae ΅•&š'vŒ}b€Dαdΰqp*+Γ$ ƒ)‚‘‡ εΈ($a ž­‡ƒ(Ί4@?8¨£cˆVΘ3ρyXq5‘ΐ]O ΠΓ"Έ γ:£™H³ƒ³`ψ‘ιΉΤDΑP㠘ΑrbJΡT41J Š œ Π~$_ΫΆΩΓwχω#<9ZΉ Ά±±NIKΒύ»‚‡ι>pΰυΐ°΄ςψnΏ'G‚χy—ϋζΏg½Tt¨ήΛ―ΌΞFhr,π‚Γ!L?μμX6ς 8<α§_œοωπΰΪέγHω%—ΐ4Ή»ή’ xJΔχ1οGι ˜άΩαΠβχ άˆΙσx‡qφYΊG/£γΐ}»g'’dQΆŸ|lψƒ‡ϋސ§ίξ#λ"LΔ‡ΒΗc>« ,$ό°(9=•yqΙ¦σΞο#šΊ‡ϋσΏ.Lzω>`yŸ„N)ϊΊbώ‰μΆNSP― ώθΰ³#n΄ψμγ£ΈŠ9\₯)VΥ—‹π³ψΈ„yuPςΙ!(lAφ{,Ύ"pnΟ>ΎοξSrςψIΗδ‚sΟΓ$R’Ψ^zθa@ ωορ€‡α=ϊ„=OΞB{•Σ¦γη³— ‚"ΩΠΘkE}ΦE?­Έδc Rpε|’y™%‡*Ζΰθ ;»€ζǁlΧΎΟ~ciΟž²‹+Ψ–˜–}7|ΰτƒdh’] ξΨ'rƒΓAR­œ/θκȞρƒγχύL€―δ€'…cΓίψ]­£/m1ϊΗpw‰9h4Šm?Aα‡ΕWxΌaΧAYςA)›ΑEΩζά{܁:·ΩΓυ}>©7€šΠslŒf‡ώ䃻»4Ÿ=?4(8±,Ύmm4 8 ΏoΟsϊΘ³qτΥΓu:Q„K>Ξλ a0Ο]†°ΨΝ υαΉθ‹«Tƒˆ€^vή{?T.€ΔN„»žΟΖ7#ιNμσ>Ά‡ΕyΧ=χc·&IΒΧ Ά)p=όΜψ―wH+γθ僬‹0 -$E«ϊzΦ*άz@ ο~ξεŽa—ΥοO?$λ>Β=»ράz~xω?Ώφ|Έχ~š; I=;ώ³sΎψοFώτ?½ΛίΎ‚χ?ράvΉΟ'χόο»ΟŽΚΨ ήϊΩO―;ΫΉορv»—K&Β7žXπΪ«?/žcWρυάMnμwΛ{ϋξρ!χbΧβYΪΨ‡οΎ%·φL,Ώω-|ΧηZ“ρηϋέϋ>Ο€»‹ΪκyΙ΄cΗf]}H*Ίλwρϋϋo>žϋΞϋ>x}φΏ>Θ›7ΊΘορΦCΖ8Ό»Η#k^Ών?οiϊ=|Ύ;χΚLνμ;~ωHA²νΪΧΗοeη§χΨ=ςο@>.qί§cΊZYίGΞίn}1DΟ7οχωo€k»‰ΏΏήχ­ ΙέM[±ϊ3ΆψžΆ?ϋ»vλvE žgί―ΛΕ.=χwίσœΏ:οv-cŸ`?{FtuήΎν{ΘoUϋρƒ“ƒbυ`CΈ1‹ηxκΛ»ηrž[Οη}έχΊ§ζιAηκχώpμΰίbοnγ‰λήϋ29φ<ŸΎƒέοξΧμωώξχω~Τοχ{ίGi=²QΒΎζ°ϊ¨ζn<_Ώέ~oόgο·ϋ>ε‡_οΌgDψ-Φ’σΗα•ά¨vΝχ<ψ»;ΦvλΫΡ=―žχ›δ}΅Ϋƒ>%Αό6ΨW·½Œηߏ–0 Ώϋm/wΰξ,φπ€Χμ{‹ωxΫqqkHœeώ??~·§gΉ…χ½RΪJROβ±ίxέ?ΑόuOεσ=οQ7˜’χΏsΗ<~~Ο_ԝ…΅°}˜lχ'ΨG‘ηΎνžoχR6Ωn|υ„ιK―ΜߏίU|=w.ŸΏ[ήΧέψ~μΚλδ‹ύΫέΎΕz”ξ»οε&υ 7υβσyο―ϋ;Χs<χŸΫϋυ}vΫ¨οϋΗC{ Αςά³»—.@Eώώ{{v{.υφΌ/}/»πžω—Ώ’r7ΏηβφαΓ;κψ»ωΤŠΤί~ΎnΗσβνζχ#ϊ£Γ€_ οΨΨά!ΗυΎξ7ζΙqrλοΌΫgg|ϊι_ίž½οήλ`4;zϊύϋ|άκžBn}ϋψŒŸ½tηsξΞΏ§zώŽΥgνcxμDϋΩ3^ΟάιχίΙδδ€zΐΟώ >kr<Ηξ^ž»šΟΪίωοωχ½ττEΎΫ6|εΫ‡·{Ζβ»ολ2]·ΏΙŠΟ}νίώάν¦kβωΎ}ί¦ϊ}_”‹™ƒ³ΛCEΊ AŸλωΞΏΏmζήΫ}Ό›Ώγ‰W{†ΠA€žϋ ‡η8|―ŸY’~Yόm_Φiϊ=œŸΧ>iύ΄γŠεƒόξ΄#UGϋΈž—ηστ{μi5ψzBύΈƒeτ<ξQχξ·o\A"o»ίωΚ§kΟ ό=]οχ½8›%‘ΩρŸσeοYώΙzνω?―γ·>> Ο»ί=ϋ‰^’«š()1?|&ŸTkήθ»JyβΨΛζ?e§΅0{80 ΆύdEΛ_Fp?·Σ‹ }ξ·#ː`<›? ?Θ³>½τΪίπ“πό}ΐ©d½½L%‘{ΐΘ}AΖβ»Iξ;Ž\Ρ€μ{ν:θ‡CMy€‡;zύX8};δ'ύ …{υ.?ζΠίƟ^ °7ψw~¬ύύΆŒΨtρ^ƒ§XI>λΩn¨€„Χ|¬³Ϋΰ14Έh~ρθϊΊΌ“cΓ…ψα‡Πξ_ΧΉ7βYΒέQ©39Ќn9ηθψͺρĜσΜΖΑ|€<][τѝΰxτƒ-ŽΨ‡qσ³Η»§ΐ‹/fg(>m“Ηγ«#Ψ/lζ‡χUˆ=Ώ›]θΖ&;?ŒΑψρžΌFόΰΉέλΌρȾܝα—ορς &„ΓΛžzS6:X]­gΉέ_7”TΐHB|νψ>οKώ:ΰ&θO?Š—“§οϋ½≉΄œχ£‹y>ξ‘x8«ΊLvόόΨa8Ρΐ² ιn?Sƒπή'ψϊψ<8fŽ=·> ξΉ»`KςƒKQΘλ9ζ29‚`δw³ž©‚ά™vN•ƒ„-ππΈnτήd’Fv}~μΜέΎvŽ[œ|hE‰>ΉΣg7γ*ΩωΓO»ιΒ/ϊ>ρτkfΰœϊΔαΛbΆυ=τ?ήΘΰΧsxyιοsώχ< ¦ΑχPή‹βΎΝΓ"ΒZά mχ―{š!q‚ch£xίΑǟ½ΐ#S–τσΉϋŽ+χ΅žnΗtwjίκΛΟη)Λxξ#Ύvf<Τ}ΰ4Τ@­π؝έσ–e/@έΫυ/Ιαΰψ|ςpOΧi.·‹θvˆήsΡ >χ<Θεˆυ΄A ΠHA›wΩΈVwp ‘EΟΛά?Γsέφa…e8π›ήωuΙΣΜΓ ξw{Βοˆ.ΖξŒα3΅½,ΩΞώKΣ“œ=ό½ΘCςίqν`¨aά(?΄Ey ΟxO@Λΰ}kL•λƒ|ψ„R}$εγ{=v0όφ1y€‡‡ ǽ֝ׯ™΅T|ζG€=΄ϊςλQ7θ)Ή―ίa<6>τؘc$0 Λ&vί=«GΰβπϋGυUtgLxšϊκ!φλ³Ύ`kΠ‡εBξΫΉP"‹eWφξάζ,ΎˆΑNόIx@ΗύΑΗέ“Ν9ΓO£ωžΓZ|yAa ύ}[ϊ˜ΖΡΙογ>‘-ζ>ψΎ†ν—β+³=nnuψ2όYίG%“έξs6~q Ζ—ορ‘ κΥxτ°ƒ–%θΜ\w‡NΈ§ΤΫ8ξa› Εwή'ΟΑŸ')?aχ°ΑΥqΗ:žΔ§8ΝCέŸ}ρζυ0xΊZ­φ²jŸ35ΥΩι‘KT‘£~?Ξ χΘ#άέ]'ΐ“Οϋ9ύρ=waΪΎ8¨ί1—'Pίύ3Ά‘ά‘ Ύm$§O?< 7ξ-ήΙD=ψ8žτU–+φβ*Jp`σgϋ#`h~gεx¦‡_pWΰsΏa’!ΐ|tqzƒΙχ³rπGrΉb@ΐP‚(πS#Ό5‰?™ˆ€(@#Œ,¬Š…§pςΡ!GEgΒ±AU ωG§*œK ‹qΚt‚ED2½¦0$>Έ§K Π €]Η³±*JTΰμΦTΓ,AΎΰˆ€s‘}p™ ͺKM½^’F2ͺΆ2‚«δ.Vp‘˜TJ>XU5S`q AΑ©ΐ7Ί]$‘£!ˆcaœˆz»$Ό„‰pΗ)*” U~"’EΐΥpμR$`°&_\™ςʁ %Ιf(¬*I›ˆ‚ˆPΤmw ζ΄J«JD pRg=”aœd’`†4“ Ž,ύQ% bHEz(Qp© U€ XΓT£( 80$IŽDφ½ώςr§4}ξqB4ΜcQ˜ŠFM #ŠΎΈΛΤ„b’""” ˆW:Ν"$«<‚ΐ28ΰο1rπ•# €«δŽ£βΩ•δ…/„AO¨θσΔaA ؎T‘†)DΠ‘ψ‡7άς0oŸMMX”e$Ρΰ0ŽpϋΌ±όˆSΐΖθΜCbμπτ`ϋΰ"Oρς²=J0œ$K8ΒK δLf€‚I0A&„E„S–E9N3WRΒWΕb;p‹‘IΓT‘£Ο H€ ρΪ@ΠΑ•PaA@°(’FRdΚ'Ιδ»ρΩW€‘$’ B€άi™ƒ@Ν:R8 %"„mq'γ©RdΘ!iΠΑ‡+ΚtL>!D˜-H’aHΥΎ#_zΐŠrƒΊKG=ΞΓ "j§u ‡Δ'\EH;‹εqF’-ŽυΒdτ]'˜YςΑ ˜Š’-*RPΞHΖ v€‡`¦q‘" @_ΐ#Ά`Γ'@PˆEΓU Q#לŠ$ίάG„K€£Bυΐˆ‚ΔηŽ *Bx1’" hΆΟ'_XP"³ΰΰώάC*oGας+ΣΈ‘|2¬ύ+0’ˆ„$jGκέ θ ²UΣξΐL8‘σͺ€j9}§Ζ`Φ£eΘ}@π.΅$(0δT4˜ΓN₯d`@@:€,Y"§‡@q‰ ’±ΩΜσf΅3ŒΗΖuˆA8‚C‚8Ε$d™J„Ž ’"$€d5Ο" o²kΐθ+RBΒ£δo ‚$%h @1€CP έY£Τ(œ"…AΨnq@ςΙˆ‚"€dBΐΕ ­Η} Υ h#y ‹ψ!œάΟ+ŠUΐδΗ.Ž“u#‹4%΄ΑP8L² νό`E7"¨Ή—„LνiR)ή [„C-*ƒC(!―Ί˜ΉΟ{&€0όPΥ’,QL8-ΒΈyΔRηυ}'(‚¨z-‹‰ˆ'P&—΅}ΰ$Ξ–7” „«Qˆ&0έ₯™&…¨υάq ŽκΌ‹"΄2€*ΐŠ ˆ<”ŽΘ‹ Κ1(”7κΰ[€PWA+hIwΔ…€ΣΐΈ@$‘¬p@XU,K‘°N>Tш…Sm·δ>Β₯˜bή)Ι³o‰L<]‚”U}ΧΓqQδT₯ΤΑ@<:0E›T,ΐy}Β*—XR2₯S=LCežBΨ]΅ό*„bqЁόΈΌρύΨΙήΡ`"@¬Κ ‘GI\L‚Ϊ‘…ε…Qδv”! ²&^Xv€p'yερα‰δ€¦CtsNB¨ΈΑ¨ΕUPΘmTJ ŠυπΌΥθΌg[ewDGO€ ί•¨D!$x€XΔ*Σ;&Ё|w8‰Β« °tiA#DΉˆχAΣ΄HοΧ΅KΐœpReƒo*gR6Cι8Q€v3“l‡ΦίΥMΟ”λjQŽ“p^όuπD¨Fh!δ™–μƒ ©$]X\RQWZŽ©(σΠ’ Ε .O'|ΌγσAσ$p’Ÿ>Δ&$‘AuΔΩ€¨Ϊuˆ2j䁃ŸΦΝ₯>υ΅Έ²Αγδiψρ»ŽΒy ˆYΪ8Hΰ@Q¨ΰ« ΤΊώRz §ΙΛ&Ɂn@A@\"^Ψ ·€ήλœίδ‰aρΑp»“GΈŽ³UΝ3Β•ΑχRθςvA…₯e\δ…±ΗŽώ~—PD#"RΔEΡβ™&’q(λς£YP¦x~)‡gžy ¦ηwδ₯r©₯“Γ τ3]8€$¬2D_—EυZ  ςΰΈ/2ŸžM(—p‡°Α8z"}ž@#πdδRP `@c…Ldpuυιppΰl@#πΑ:ΎOŒ@Rαν>n:―έ‡^g6D$θƒε_žœIs $§šΉHΌJvμκ¦οΈιΉ₯CπΖοσ»&uΗΟ€(B³€―B’΄₯!ta)˜@nΐ…$k υΚ"€»:ΣΌy†ΰωyrBMΠΔtz)fc$˜WA#P!ΗX‰ˆˆˆρΈpΞ“αoXΔ^δΖΕ3‘Θ h B€…€¨αE+ Θ;ιŽ. ή—ŒΒ«@8ΐ’Š<&‚ωxPšΖ-΄ϋeŸ!'N Υΰ―3OtQ―%§EΈ₯quŒnςέTzγ0ͺ„d„qŸP?ϊ)ElΨP!5ς›”¬Ž€G\‘ΡlB¦t¬Ίρ‚Š.:ΐLμΦ-Θ ›Ι]ws‰‘ψ`Ή<9“%ˆN…0^œ|μ»bwxrμ‹aȐ09&…aώ£(#‘. 2‚αq‡ξ/n5ς.„Qψ_(ΚEY±q©N¬K™ίc_&pQΗαϋ»o$<@¬ΓN>ΰΣ2)BOb&tuΠ»π#ά ίpήΐEΰ*A‹aΠ·οH¨€ήΕG‰‚ΩΕyΠ~pμ€]eω— r Λ\ ²Γσ>τP¨‘Γ1#Mβ$;`@ ΡX@ψXc±ΈνN1B1PγΑGŸλβΊΣcQ`’4L %D8QV‘D`ΐ•TGˁށyŸχCΚAΞ‹ύς`Κ °A‹qDΏ!Οξœkή Uo2]œb–¨†ρςœͺPƒ‡dID{έ}¦#ξα¦ID΅’ ~υ,VΉA]΄γ8g†¨·„ώαΡ#0ˆΒΥFvά„^ ΩΖ%‡`Κy“S5Θw‘£윩ˆf@—©ΨŠ€Tξΰ­oD…|žε@Ρ11žVΦ βΈg‚Ž˜:HdGw‰c J ½(X€‡βMΠΛΖΔl` w EΜ}ό €K{Έγ“ΐή‡σv3Oˆ‹tΘ’@’¬L Hx”ρ‚Οuήg4?ovyα Ι$Βε8x띟ǢΒ„±R^΄Λƒπ“Ο[ γΖ~BF ‹Tυ΅‰(Fς†–—‚!qϊŸ'V„π³Ζw…ΞOβP.Σ’‘ω¨κ‰―­"³TΈβ9P‘Gζ}σςδ±/B0=¦M–¦qQ$B½XTεŽsŠšΖLa.|q‚½\@ΖιŸX†"ίcˍKNέ1 δnϋψ>οθ ‚'@`WewpΣ7Ή !*B0₯ςΦ;Α/ϊϊ€±‚\Τu<£SΊ'—Ε…ΐρύ'Ε@ΘςσT―θ%4iw…pDΰσ¦ΐwqΠΒΠ̏b‚ΊσIE° š{’UŒ$PΌSb}΅pσDΙ ΥD11’όvyέ“£¨”3,0 (H{Œ.M+©Όρζ‘―œδ|ΡƒƒΨπR9nώ™ΔAΗκ­»GΰŽ]}j7ː›¦«NΦa²ςž;ˆUͺΙC2ŽΜΝΌ 9ˆvŸ7jza$ΠΎž BAρθ·ΐA+κbνΙEͺ—ŸΟ3‹?οι#Δ, °ό@υκ|)₯ρΎHU?τzz@Δ™Έψˆ&σ’‡ &PW‡ ϊω=Ύί,§ Ύ1π;-²"υ€˜TΗί$Ήη+κ‹έΨή†{~όπ©Ž5δΈWvhgΜυDπϋŠ]H=Ξ7nžυΪe8nΆ'Γqp'ϊ¬&έ"‚B;0Ÿ{ΦσNώpvTχ—χ1θэώHΡγΑn+xB›Α΄κΎ't&‘ω /?šΧ₯ΉΫχ"_ ƒMώ’< Ύΰ―χK Σϋό}ϋΩ‘Ι’‰Ÿšg]ί½¦΄ϋ?xxUύ»2‚Ι1 oΒΌέυδWDσαΟ[|1„ΊωβOA:p° χ‘δύ>θΖΑσΫ· Πύ7ωάΰ‹Σο’D€Β… ²I]=άίχΨ.βz―{ϋόΑO•ΦΏv>yΉLνφŒ\G‚Λ―Ω―ω©`:ΈoGτβf >o0ž*Γ‡Υ,I H3=Ηρ§εύ½s‚rxδύ+θθ‚?J=pϊKWUםΫ‹ξόδDζ‘Π ³Δfί«0…aHmέ:Uέb]όοόύJΎ=»G„QΑ°tπT7χ΅ΕΗ+όxrDπg\|Ω…ιŽΪΪδψΰΓΣΉαΝ›p'ΠΝ—θμϋΘ™ώέΧΝxρ ϋϊHϋ%ξt㓆Ÿˆ!§rΔ2Ϋqϊͺυαξ_ϋ]ΛΏžύπ$ΪA«Έ…94ΉλL:±όŠ}Ηρ˜ό{Ύyƒk– /[ϊ)Ωxφ$ αZ²GA!ΚγϋΙΏS<Ώ—XB:ψ˜τ‚ΕŸ ή ρ7ϊ6ΐGπt;wrݐyΗ‰Ϊ%*ΘYίkζςn^{~{|όOΈ‚'$I‚γQ½ΤŸKή€Cf[cςεoώw˜xΥΥ‰αVμJάωχαξZΧ ψΑΖu’Œƒϋψα;`1λ°ˆίϋ)d¬χ@ψYΗ&δ½οά―Η³n’ΐΜτε{Θϊ,ω/}Αω©iE[|2 τΞΘ'π]@CUν‚Άγ…ΗΏίσr·οόϊ΅ί{k:ρ‚ϊpΊ―ϊ)_KW6:Φ|qw¨’rGΜ^\‡ΎίE ξ˜|z›‚y0³ο%ˆνή{œ3~b_[τξσ`ίz}ή*ŠXΐχƒΑ4(ΐ[Ι!QqLΎΈ!ξξ J8ς£;U°θΆοΎρΡ~Jί£~œHΦGβxAηFo©ΖΥMgQ(§ {Ή΅ηΑυS=ξzkΐO”ωw€Ž―>w[ϋ¦=αΊε->ώ²rαηo~AŒAϋΞ&ͺ!€ρ3Έ“uŽx}qχίΧύyΚ}έΦ{1εP$ΉΙϋ…ί'؈U7AηέuU‘ ό«oκ―o΅ή‰ŠώwwL¦Ÿ)pυνo6ήx’Βμΰ0Δ=ρΎ΄χ}Š ’p σ?H“…GΥΖιΧΕ7Œτύnw\ή…4ν»G‹‡Ÿz‹gώγΥ‘jDή]š’Άκ―ίοπŠΰΘy*IΤϋ[€^wŠ*Υϋ“tDπNŸ_œ_γYχνΚ·²|π^Ί>χw7r‰ώΠw[IGhƒ»³Μΐψˆ hp'lψ{||v{~Χmn[/wοξ$±σIŸzG§E‘wΡ”Zm~|PDΝ^όλλ[Ζί}ΗδΗΥ’ΐΐδθφ‚ŽŽϋ{{p’μr±lξ<˞_ο@ˆ\ΩψΓ?yZ”F`«ͺόβσΖEΘέ=ήΣΰN•ΞPΊσ—_ύaͺυ=βΗ©ζ‚φ³ θ9ΎΘ₯¬Ÿ~?νΐ―¨©z€+·Ά‘‡_?&ŸT¬χωG;k8ο}Ηί5Ÿu©&Ωc€ϋ–¬ σx“»SΉ«h ι Ιΰ<έβΓΥ5οΈ}#6π6ψžΧyΏν{ο½NύNπ{χφ)§q²$6ζΈσŸ.Ψ? ώΰΫ‡ν·ϊλ9ωᎧβΧhσ‡B˜ό6$ιΫm› €ϋ ŒUϋ»Ÿw!%χέF.θά?όɟ¦‘y’‡UΧ ρ/vNα+?ωδ”,r;ϋGΛΎ»Ύ‡Iά¨yΕε₯&ΫΏŸz­π·Xœͺ΄ΨΪΦσ‹γS9θ¬Αv·>ΎΖ»r7²ω'nϊˆηέέ_B$ύ°Ιτ⬏0€‡‡)_Dύxqυ˜&χΈοϊΪχΆΧΣDαv\ψŽύŽNA:‘ΰu!^πΑΚq%C[ιηŠΈ ™,ŽΡΈΗwάYT‘M€p½>Γi@0ΜΡ£Η.>ύŠ2ΐόΒ’γDι°ά’<©XX0œ„‹ώμ[—(ρνΦRζ‘Κ«•iψ A€:χ 4πς’ψ° ϋ\cœH‚XαΆ’ŒΈy~Θf’ΘPNΊw1"Fz²^ίπΘΉŸͺz’ΰ›R°›©‘&+Jφετ}ζμΩ•ΏέΫ Ψ2ξ_oΪqŠ˜IUx€qCnξ 1>(”ΎZά’‘΄°σgΤγ£³Π`ωQ&rη?졁H0  •«ψ«oΨ2$τηώθΰ‹+šΝ.Ώp€w [’䑌b…°pπy#πΐQΐΑ œtρτΦ*ƒξž€Dΰ(  θu‡ r7Z ‚vYα, κή:α>hdόqάF‚œdσ?ξτJ‚‰\ξΦ $”ο3™,ސι>ŽGgšΐs݈ΆŒ»)οέ!η z%}Gfέ£Ίˆ0/^D―!TΌοΓ5€NΤ$:’‘ ΒΟOβφκ-€x‘Hnu¨δ‚}`< pτGΡύ’πu7)PΟ»εή go9vζ½n0Έ:Dͺn«RUy{‘hhzGIQ ΐΐ@°» ΊΌm†"˜«ΟXEίΒ`‡pΕΗ\ΖΗ!ΰŽ;²ωbz"Δ @EΫ7 UΉS;# ±~W0݁]{»_£,jε_wq•r*¨πΫ΄>βκF8%u4ΐ8θ²ξ‘A­Το~nQΪNNΗLYψrλ~žΖZ1Ξ@ˆpŠοΒfAP1ΕΡc―„|ΑfΔΝOHδΕ£°Uaχω<ŠXX0&π€«/ΡBǍrΠσΓb―@Ζέ8C‘΄ ΡΌ μΔ„ΫΥΎTNŒ@:je yq^H<.xϋ§ ‚ίρΆΎ!vdם*Z€άΓ+©‡ˆŸ˜Œ(ιΣt«ψΝGο‚ˆX†_'ςνΩί:Qςΰ–\τΗM}P”xpTάψͺ-@>RR’`ρ1€.¬ϋρym―`pV<@>F”p‚ο]Aaΰc<βΣ―l6ΉόΖ½D’ΈpβEγŒ‚3M>ZΧ$”Η;ογͺ?Cψ$ „―‘“"Έσ;­Κƒ„*<Ήή_GM ’‡0ϊ³Η>όβ fΔε ”σ»„€ro ωΕθŒ0Mέι`"ΖoμίC`]y‰Ό{fiΙΑ]ύITˆw WΈ‚Πυ5g^α# τLώΪ³AŠέž…ώς" Π‡D@±…λ&t„ObcϊΣ»’`¬.αD(˜β 6ρΌη%ZΚ’€wKε;½.ΚNߝCΈ2°ŠŒυΫ"&˜bΨ•‹βΈΟŽ–ο<;‹Έtφ90‘’ άYjΩ F΅Σ“£-’΄:]O=nΖσͺQqA˜$ΑΒKφI]γxΪ΄ &&h6Ω1Χ—» δ8>e‹:–!ŒFqGq΅ ňΒe£πŠkœΡβΟW{"»Ez_ύ Ρ ŠŒφΚq'’ˆR8Υ.ψ>£’ͺϊN₯ )1ŒΣΡH΅;|ΘδgMTCAnᐃξ%bP·¦(4ˆjΕP8wbŸΔ Ώ\B§$Φ€A(a‚{ ΥH;=¨(» vίW,ςpžG(¦Ϊΰ€λeXΊZDpβ 1˜dαό.n™„u7"άέYVΧ°δ³YιͺΘΒ;A>* Ε@ΔWξQxL’'Z½x΄ψοCv›xήΎo}ς ‘ΔVŒ~‚ˆ˜οΥ†ŸŸz›PՁ§P0a"’G£NζεΊΌ••>€ž(‚bšΉά‘’!\\RT]σΐ’A…rτ" ŸάΑ4Νδ8 {ΡI¨λx3B;T$ΰp ¦Κ±—‡z°‘½ΝIA©€Γ―OΘΖ1<rB*Tƒ‰W|γ#ςJŸXΜσTΚ¦C•TF „ΈG†€EdΨbή―`α£ΉˆΨ#μΞo~Pθ"¬6°O€πIۘ~ςΕ*Ύγ `B˜fΑaΑ NAΆ3„XJ' z;ΨL8K€«±ΡGLε#ZPΗΜ4ΛβAψ³ƒΑϋwΌ›j•†@IΤƒπ!μADΟj /‚ Œdv™ζ΄’Υε"i5Q°τ‚γ!YΠθϊ§Θ$3ύ³TΏθΦ…KvΒΧαA¬eΘ'χ"bVHrˆΒγ€Zΐ ΈΝͺy)$²υxs(pΜEκΗ}a ΡlΑ8σC δ‹τA¬άύ;XAΕΊO₯ΡL0.Η’ΐΌK ˜<Τ(CΤIβNβ"Γ2ξ@QŒΈ1θΞ‹DΖ1@˜Α ρπηΘ§g°Ψύ$σˆ”Iƒα‘> ΐ#ν@‘Zp,Dΐx0ύ3Ύσ"ΪV’`:QΫήΙu„2YϊΠ`)) ΥlF:Ψ%Ά~!8Ψδξ.X»&Ϊ7ˆ˜-Ρ<,YΠDl_ΟΦ>B‚‘€€‹ΧeΟΣά 8ύς" ΠΔ’ αψ‘,V9. Έ:ΩΡjηlΗ‚ΰθdΙxπpΑ8\w‘umGηΰΈ_Μ8™%·BGn-.ν₯DHώθ1ργP¨ζϋυϋ8-ΐ“›οm‘F9ΤTh 88 x *~E όKOώχ>_ χ‹Υ„»tT?ΕX:qάMˆ ‚G|*Ό(ƒwK()UΕωɍE€Ο«,YΆγ:§δΘ›P,°kRΕ}ψ':ΦΒ8zν’cz ŠΓθBΈωΌd:ζΊ ―Ə’ ΈΰΧ@D[ΐωΪQυ€ϋLΰΕ\_„aΝώ»ω„s§ΨSH™³'ΝʈcB]―ƒ}₯¦ψΥΑI4œqρG¬Β²7εϋžB˜eΤOR©¨ƒγ;€zΑλνM€\€‡€?‚†_wί{Θΰ}ώƒgΒΞiα‡5<Ξογέ΅uΌ'%e|χG—&^ό“ΏX3> Žωμ…*ξ{Β—WT’koή°ΐΞW΄‹Ξ. ¬ί―ξ FCκ’:έζF«ΓΈΚ u»sω₯AνϋƒG‚<°ZFwάažΡЧ|(t½^χ3…`°{j`HHΗxΎ//ΉM+cšά›ŒŽ²E^Φέ"ˆΏΨTψδ`’!Ύe}Γώζ—Ώϋ (&Ptπqσκ•r|‡ή›Κ…co8k9ο#$–8sqυ #ΰχΕι> F@π!\Ϋι>‘Ϋ£·~š„£o'q%“΅{ς B ―Aeάu‡Υͺ·ψTθLX~rΐ`4ΗG`ڎΫΪy{g¦¬cΌcY°Χw£k—hΚϋ‚3?ˆr0KΘ ŠUαή€ϋ8ΚH=:SˆpΏΎS)ΐ“›οoƒ$Βε'π{œIFY”έ)·£0‚'|£0˜ ²7ͺATV€ ’Ϊ·₯}q―ά@τetμƒεΑ5eP “D8 ΐuφ#¨bp'X„ρ ‘ΫMΡΑVutŸΩΦΥEΒΟ€ Β‘|Š ^Hί‘”Α€&Ρ]‡`Œ‰tm€¦… ­λΗΗ}]±7Lqΐ;―£5§‡/aXEŠΥΚΊ€DΥ.ΧŒ wtQ΄λ~έΘ9§€t:ƒ}! AA-ΪY[‘Nπ};2a6Δϋ:fΐ½Cξ„{W@ς..vΣ€X½Ώ8DQ@ΘΧG=2.\ψΰ<αH‹C@’δW2τMH˜!‚€₯|ΦWƒ€ΛA|qΑ―ϋrΕ*°<ΈώΨWGšΐ PυΞmwψQ& (I0εΒ°*φ4O&‚‚­Ύf§ΧΉ{{Ω™ Iψ›FPrξ’Δ'ŠL•g\SτβWΑζyΎ"ζμλ5ύσ>$Ι 6Ό³ΫU7Μ8:Θΐβύ­C5©Υzxύώk_Θf„ž*¦ο Όψ2αΙα(Ε€‚¨ƒcž]< Τ2γŽ]1- ρ˜cόΒΰΰL2ϊ!’jHί΅‘.ކˆ€!@ΠςέaΧd―; )VPοΊόπv«^ά3$δnό—a@AAβΆΚ€ ˜|`šaΰwόA šη6ΰΛo7;bζ09η!’Aί:€‚ΖUmΘ±U‘N€oΗ,C²qzL0Κ’Ψr;„`°/„α`Ό΅ΕyAT/EωVΠίkμ θ)²ŽΘ@ΰv;™4…ν€€OΫZ”mΧ¬ΉoΣθ(ΨδPόύΞͺ’]?νeu­¬i%«w6~QηtΟC©ΤcΧwχ«wόΦOxr°Y<λρή.Ož=gΚQ7i³MΖj€SΦσXί1c‡;[Ν½z~φλWέeyνπ=―­}Ιj‘Vm’f£|Žιu˜7ΣkΧƒeηӚΆy]WχΪ―‡›ίνvγ9³^΄ΌΨI1Άy]wm·±veίΛϊΩξυ=ονΌμυs‹Φ)νΩΆ~ίο\ζ{yiΪ²|uŸf{/Τ…-Œyή½όβΊσφŒ‘a¦₯ΧφςόyϋϋΆpΛ6 a5~ς$Λ,‹σς;ώ©Ui7m‹«+υΦ~Ϋυ+۞ΫzdY3~ζm―Ν½ΩΟ³#ŽjŽ­·.Ϊj }΄mVιΜv)»Kέ«ηm?oί»ϋͺi³›·λήγ…ŒrΩΆ₯ͺZ―άΝfυΞυς,»}φxYϊfΏžjηή߼ښ­ΗΎΥΣF€mΆZ_i―§ŠέξΧΦ―kϊΰρΦ+-»–ΩΫΌ{z=³έά‹ρΗMσχw=Ϋ)»Βlοη}ήυΈμmΓΜBfΰΫπ^ύΪξλ="ZγY Χ^?y¨ΈΌV+yy΅τ_η{Ε?ωΟχz)Η₯n;»ΊV#wχΞkΠiu½­twΪ«[Ǟ½œκkίΕΆ·· ΅šΪr‹[Sίρέ.c£i6λΦ$·>φ4Žγύ-qξΪ»έέkηβͺzλΪω·σΞ³΅Nλ€Φζν―φhλS:)Έυ}a₯Ψ©―η4qnΝςχεήl―[Χ£i²»™Ύn ζ~ί_w‡ZΫ―©–]Άλ£^,λZΚ7οήώ²ζ­w“―Z^δ£ϊݝֹΞγ«ohηd!ΗΌΝ ζ¬›Ξφμ‘―ϋnχž»ΚΪ»7;έξΞλΥ}}χή~λεΛ—:Ϋޞϊλ^-8Ιϋ6§“’oΥ{ޞ\­ΆzϊΈmέξ+wTεo·m{6ξ’Ι'―ξΎ»{ίgkMΟi—ŠΩΚέ/Ηa6WΦΩe₯6ίwάοf‡K΅΅W―οώ睗·ΙM«Ώ[[ƒ›‘o·φu.7·οάά’₯v·sηΦ,ςŸΟz›^—ΨR›Άγϊ^Θτ|οοο―qλ™&―λ;vאHOίtwΙY矿΅oΫόβΤσΟ‘KΖϋ}τU΅ή—q«Tύ΅um<ΊzΟΩ55ρΆΟ]}gšη.φnοήύ¨λΎ›vώŽoουRŽοXۚξΊξwτ΅ϊσΪΉ’nwmήΫLΊKΈΨμ³wΎοΈn=Ο{F¨^7n'ΟΉ»ΣwσΩ4Νζλv—·ΪΪΌό©m›²ŽkΥ«ρέ΅ή™!Τοr}ϋκυœ¦uRλo¦1»gΟͺΫ9θΫn'snζ€άf;υ†εί―έc~jx²μ;[}}ΔzwοΎξ;|ίΫn²ΧΤ}ϋλciJ2R{ύνΝnΓξυO“ZisŸLχΆάΧυ±»ίκ{&ηd‘|o’₯w]Νfυχ}R•υ~ίz«ž»Ϋr:gί~ο~Α—KΧΦΧu΅z‰ϋkφ.ςwZNυήΫVͺλΆάχφ^Ξ₯ΥΧnw‹mo{9*υS“O“zώηϋΣΧ:Ψ#z¬e=i·ϋvΆ^Ž½Q’―Ε«jλοŸ{_Ο±¨ͺiμΎ»νόlΞΉι+·ΆζΨ°u¨[]Ζ­Ώ «EnΊ[›-wnό}x{5_°΅²n¦Χ½ΐΦ}ξ+­ΩψΆ—ρύα² DBmy%³Ϋ›‰ΝοVkσŽ|¨}χΦuΤ»D/ŸS‹rβχφ Ω5wΆgc}έ΅αJϋέ›]m]}½ΊOΝ½·ίn·ψrΩΞΆΥw©W|·όyg!Wr§Όy{Έke™βlwKη^eεΫΧlοναξ ·ΏEXλξϋzίgl inνN#&ϋzΒϋc(r½uj«Wzg §Jzκ»;η—·ητΊOZ­f¬Ηί~ί}•ζsχΧά.§Υήv+Υ ;>kΫόξο†iXΆ―ϋ™ζ~ίίw΅ζ67yΏsΊwZ€)‰ΊwέΚ‰Ϋώωw³υ²Vϋ:οΌ|Τθ}=νξNοŠέ>ίΧs)l{”ΫvvM©mΟ›sΧ>σt•Χϋέϋpwz7-ίυύύή~―RŽο°½·qWυŠšΎv^ RνΎ6οm&έ%Ο§ύφΎόΉIi·kρΆηM\½šΎ₯žsw§»w_οšfλ:·„υ·³΅·ή^iε.΅ηώΝW]ϋ?χ11ŒŒ  &0B6Œ € F@`10b@ 0ΐˆ€3€ΐ 1 1€`P 0bF1€1€ „ΐ#1K#0Δbd  F`0€b €`ˆ€ΐ01ΔF1#1€0b0 €ΐf€0b fˆFX1€ Δ`i€ΐˆ“f†lb1#€2Œ˜  #ˆΑ(0ΐ(0Œ€1€6@ ˜€ΐ# €Αˆ€0b i0#€@`ˆΑ(0€`Δ@`Δ1bb €Ε²‰([m Θh 0bFb# `ΐˆ1€`S 0# €Μ€FŒ€# #1#i0#€@`€Q F `Δ@`€@L £ύίkwί•³ΩΦκΆRa™ν]ιGΖ(²ιΉom υ½χξΤΫ26ΩJ.el±UάΫ›eΟ+om’σ8ΫΆ•h{W―Ϊ2Πڞξ{Λ$nύZΫτ.ν{loT½ζΝdΦ5Ϊ@lΗs!bΫζjm«Α=OΧfCSΌe“JΆ7"‚Άx«Λ#`SΆ·mU`(˜ΪX]ΪΩΟν½¬’©]jΏΩΓΔrwΛ6›Ν”κχΦe”±a{…Ρ†‰Ώ»ΗΆ‘)§=“–aΩΠ΅m@r΅Ν6JΆ·U­Ω&›Wžf©΅foδθΐ«mν)Π₯μμِͺyοξ{ο<Ά"uφήΐΣQ{K²μ­Υm₯Њχ{wιΑΕΩΣΣ-ΨΓκ{ο•ΜΆΛ[IΉΰ­ΆwΕm{{‹ΙeσŽε-Οχy0cΫnϋ]­l™Qk{κƌΥr†ΦΪͺνυϋnzΣΥΠΪή €ύΦZΫΫΊ{Ή7ˆbπΆ²#=6ε»ύσZ"eΫHΫ–t;φ ¨fξλ½m@b΄Ά­6K²E΅ άΧ~οωΪ lKVΨ6•Ά4s4Ώζ·m"l\=›"Ϋ°νξΆ©bΉmgλ―iύnήͺœ¬yKL{ΦL…m€Ω΍·±.ΥΫ¦KmΌ§ΚΔS–)ιυσlΫwί!;9Ώρ8EaΏGΥ6HΖΫϋsc@*ο½Ή;V°Y±ΦfΘΜ6κΒZο½οnšΩF΅uΖ±΅e[›ζͺ¬Y[m™x$3Σb‹ΝEL:žm{¨k l31*ΩDί·ί[234S7œπz£υtiΖVΈMg[[¬Ά&ȎlηQ`β͞Kν­UΝCΑΖ°mT`›mQߘ±κϊžg/@•ΝΦe=1(Ά1¬*ο !$g[6l4’μuKf6©˜E΅g•°M!³ %6#fm;+μ­ΥέΆb˜lΖ9[ζΆάkλέK-6Τφφ–mWΞo :ig 3ͺOΖA’ζ•ΔΎmΓQ=X{SΥΜΠΆ6MK-LR°ΝΠL³m,teo΅iΖΪ5e^ΆυjjaVFdK²FPΝl„=`ΤΡφ‚–€=RA`Ϋ”±χͺΔVΡZΆef£έ±–Ω²uσ¨eκι–GοeΣjŒqFάoeΧφFΨ@έ}ο!Il£R {,Ρζν_n ήϋά©½±rz–ΐ¬θ¬Άm;'63i … ojˌ*μ46ZWΆ Μ€Β(«ν!-­ήPνΆ-ΰΥ‰gφ θ½nή­άΪΌΎ”f©λΆΝΆ»·™£ΐσhλ’™©ΨWγΩ!fUl··Nۘλx’³ΝZsα½wΡ•½eΛΰεΧSV`–·yJΑ4 AΜ*kΤ@ΥhoΒ`ΫύNo/ FΨ ‚ŒΨ[Α{ΛΊ³­Β¨™­±^οNk8[,ƒq½ςΉVˆ›=ZMμqFtdkσλν% „=zΆ‘*Ϋ6] lσ£Ν|&·2½·RΆMν4#0 wVΫ6QΡΨ¦%DΒβmΝͺ°Ν@ƒue›‘ΐf½λdυ6–0υδUΛ,Ζ(ΎŠ°mŒ\ολfm9ZŸ―J³ŒΤuΫ†1m‹_ŸaS]σ6WΨp’ΟΐŒ o/·-43ΡΪ&©lΫΜ"ˆ|ούW [ν‰/η.h–o3MΡVbΔΌ:khAii›ΡΖΘήξrΜZΒ…dΔ6ΔΆmwg› ,m[ζέZ­Γ Ν2lΊV{²ZΖb³±u{d‰yksΝF „ uγm¨ΛΆ‘„²m_]Ϋ³εtΫ:{oΈhoΪO`ΖΊ³k›A%oΆ™ŠHXŒ‘­™:l3€‡­«fPΩ0©†΅m΄κ“•›—ήtΌPΨ6›θΆ<²vΉwŸO–λΧ6†§mΝρllθwl»»?Ϋ„˜Φc΄-mšέΡ„cf­IΪ–`h¨€4$af!ekΆW,Ψ [eJ‘ΤΆq£Κ«ΜFdλl-(±}ούξ73ΫBferAlΝ Ψ€ΡFˆΕŒMC†5h{Υ™ δ%JΫΗ*Œ™*#mX΅c‹QΫl•m†@KC)fΖ{*f³’κmT‚вMw†™fΖd«³0 ζl’m -Μ ƒν« ΐ6T6…†ΗUl‘M˜‚7r±½RDΠ* H† Šea†•νΧγ† (³ Ϊ„ŠΝ`M*Μ€•-˜X°yœ-Aͺ˜Ž=ZE, cPHy+c5.iή{uΥΆM¨νΕBΘΆιl ¨m²Ι¨€Aΰm! Θ΄!V6¦ˆΦˆΌU΅‡ejΜ€ša2ewφ³FK•m ¨Y0›TΓl[%1ۊ.A€² 'llI˜A›b#`²aΞ-@+Ӂ½‰Υf Ϋ@Μ„ΕΗ%6cϊ‚1ŠY\`Τ*Τ’²" 3BlRyςhFͺ1 F&­ ‚ΖμX…Y­l°-ΥhσΘFIΣ±m][1F²΄HšχVͺmΓ¦ ₯²m 63CΫm 5cxS,ΖΒ&31Ȟ [0η­ΤKΚφ9€Ω*»X°™fmUˆ™±œΕΨ¨£φ†jρΨU₯·Χ¬€­Ξ6€0Ϋ†H`M›b213ΐ΄ΨT bS`oB°JΐΫ’aLΌm:†Y"o6WΩsM!BKΚ$ŠΝ<ΚVzv›“1»£Uƒa­U†Βμ–€b Q˜Y&3ΐΥΆXΆΚ”$MΫ7]Ϋ4ΥΪ¦21¬ΪZΨ<»šml$,V.±Mbkf l2`΄‘HΫΆΓLΪ ­zO+L^R±ΥΩ^ Υ,mUf¦YC!fΖ¨1‚ΆIΩfΘφζ 3$•·…–P¬™ͺ±fΆ΅μ%Ι@&f01Ι6«˜px[6TΫ S‘ν=c`u6o]bͺ€Ή0€ 0Ό ±š1Ά‰4™f7ΰΟ ^δlΪ”˜†υΘ0˜–330Ο.”lb¦€ΆΝβm• #4Δ8ˆΩ%o›N5ĞΧ" Κ fΊ#HΫ03*Š„ΐΆ©“mΩ’Άm±6•»Άm`°& a«0μ–Gd‘Ά` MΑ’ Ψ\έZ Ά” ¬,aaζ©° )-3e13iT°g”k6˜P`λaf(…±’ΝLš F 5ΆΩ&Ρ^i­L˜Q0 ΐ6hdλ©f°`QΩ&ΦVηΠΖ¦ zЈΜ&ƒ™mσμ ”1kBh£·ƒ€CΩlΓυκ¬ΔζΑ"$dbΡ`›ΞiΜ‡Ά™m²JK Ϋl“²Ι„˜ΧΘσΡ΅mf€5h„mE·Ν’Y¦ ΩXIi K#`S5ΩΤ’h¬aͺ fΔF¨Ά‡Gk#lBR€.X`Μ¬‰"Ϋ¦hΜ4cK l $3C­bΫΦ¦„Β° nƒ6„ŒVxaG„f „lΓ†΄HΦS1XΖ6P± μU6žͺ Η΄92†Α@Ϋΐ T23‘`fi6 Τ‰ΚΨΫ΅ ξΧClΨ#ke2μšgF 06ΫTœX²io°Υ°σ%±™Νbk4,Αb2±,BY± 1ͺhF‹Ξ6€aNm†4Γ LΩΖXƒΒ6¨&’“fΜ³›˜J0F4fš±%e 7›+3CIoE„Βh[Π05l$Κ0Ϋ$ΛΘn‚5 Νj˜1eH’«Μ,ƒMHl`+J&O(ρ4¬Η5ƒΑ`BΖ¬ͺLAk`ŒΜ #ΐΕβm@ioVΏ±Α³,‰ȚA•΄­0ی="E)Ψ†„Q±eF D₯Ά X‹@h0Β e‰ 4ΑV²ͺm͚κfΊAΐ›Bρ(€Ε@‘΅l3,l6„&‘CYΆmB{zfΦ"1ΪΌΉ‚™ΙQž±…„Ϊƒ4L-Ύ©Κl³M2[e·² P°YtF3hI΄MS­yf”dφͺp°iSΦ›Β)˜Νl(€‚!Κ°¦Ρ$‚ Œ© €I ¬1@–ΐΨl)B‘Ψ Υ6 !6Ϋ`¬ˆ`€Ld©Ψ؊¬d`  Θ€*Ϋ ) ΖΪFœ•1lΜΜ€a&5ˆΩl6PRΒ²1l±°DJ`ŒˆUΣπž£:±f< 0ΐ ,± …ƒΩ6 ΫΨΣ3) P0ΫΨ@ͺCΔ` i1°ˆ Ζ0Τ„DΨ°Μ¨ΐ˜6&€ 1,‹*™ΩΜ@`¬"Ϋ 3€ΒlB ‘1J€ ­€ŠΨ6₯Μ6P0c ΤfΝ@cΦΜ4! \ –ΑΜR₯k›V&ΓŒ€R*Œ ΡΠ`βΝ6UcΖ Ζ `‹l6…1Λ8ΆQŠ1 АYŠΖ ΪfΨ@)3`Œ!jΐ΄ "Ρ0Θ‚ŠΩ°ΑΆakdB`bQX- V °,6ۘˈ0 ƒ 6Œ 0Z0‹P° 5ή)0ΖΩΜΩC0ΛΐŒ!…ΐ2˜mQΘΖd2LƒP€’˜(D4l6U< ŒZ0Δl•ƒŒ΅c6(0E&Φf (cP5lc $Κ©°€6Λ’ @€df1-3"Ψl³a¦T‚Pl¦€jF f Ζή° „0Ζ2%¦ašΨ‹ $0jl0X’Μ„3°!؁ˆ‰ΖΛ# ›%`Ά!@’a˜ΆΨ‚a3J%eLΨM‚TMcοa6TΐΜ†"Σ 4°Αέ2`Θ4 ΫP°ΩŠXΆE‚ΰ °m6©`ˆ2‚?d+(›)ΐUΨNc-&Ω[9˜HKWσΨ ‘²Ω&€όΊ·‘Ž5f(ffδ5uKΖ¬ «VF`›,ΖV‘ΨΆ·ύ0 RύΨΦ°‘φ^!$φφ&£,S+ΟpU`še‚6΅c˜±%Β¨Q˜ΫΜ¨1l!°1‚ Pΐ›LΕΔXΩf@›μΫ*ΦЌyΦΰa›ύξoFT,ΫφV*›΅Ρ³Š!΅|œύf‘TΆŒ‘ΨΒμϋvΊ0ΕΖ¦4bƐΓ6λΈΆ­fΝΆVΚ0ƒνξ†·d2Ϋ†’dΗLjlΈ[ S±u‡† £6ε° ΉυΊšΝΦ¬€2ΆA ~wo£„63Τ¬S垚ΊΨ¬mLUή“ΕΨ’ m3‹£ !ͺ™±al2p9Άm+λlƒ’°6ͺӚ&°©mΌe€„Ρm“AΆŒ™JΌiƒY3°Η lΑVf΄Υ^Ώ 2šΩΜzcγ­ T3ΆmΣͺlΪΖPΑZΥήndT—--2γ}«έ•ΡΪ–Y²•ΑΜ΄dζu„M‘fΗ¬lUŒι˜FΫXiTgQΈVΓ* ²νξ°a` ιlu«"ž­Y!Α6Hwg¨23ΤΆΨlΊco΄mΆ˜Τ©Ϊx3ξ ›G[‘‘!ΰΊyΖhcγν²IΨφΐ–X¬#”Y©V 1³λm™€6ݐ†©xž!ΨΨ6ΆY1ƒ‘°M3U3$-›0Ϊb—Ϊ˜z›₯Η% b±ΙφΜVMΆ‘%l©΅­΅ζς¨Jk‚i©aήΫμwe€mΩ†„ VΕσŠŒ©Y€1 •Ν†΅L£mLΒeΫ2·νRΓD±©jfk10JΩ£EŽ˜­m {.P)ͺ2cV`MΧZϊ-m²± ₯mL†Ρr€½e»Q™†-fο]ΆQcl ™ZΩ„ΚŒYT5x,YbžύLGΠ@±©ς6ˆm«Y’m` ͺμAŘFΛ6^ˆ»€ΝΨή³δIΛζ?Ap`GAˆΪSώϋ§ ΅mμΦ+l֌΅Δ΅Ά—QQm³Ye Ϋ~7z[vΨ˜U֌υ₯m1›•πjμ6(LΫ΄l“-ό]ΚσΊΩ―¨νΌgmΖGωέο)S΄l^λΪΆρXwχc3ΤΕΎζ½Ν CέΆΘΡ₯‰i²YΩ΅‘±­Νλ}·{›F·κΡ³[QΥ·Ϋξ3΅˜Ρ ›ΚΜ΅αΛ2ꋝ)sCΡv;5₯εΖ,―όήΣ–ΎχsyΤ+ƒΑνη¬λU%σΦe.κ₯-m©m τ3)·R[Ϋ6oΙζ˜ώό^OΩ”ΧZ³zwά΄žΔέ¦2“ννv7•ή&ͺOΫ6fo―·W©·Ν’φI‰aہμVy™Άυ^i•Ωψώζxm ¦"Δ_jƒTΫήjOގշεΟ0Vάήf»ΦN¦Υ¬ίfξ^½šUξφZ-―šξWhΪ5’άΞ@Hn^Ϊb³ΝcνTΊΨkj{kΆ0ΤΆ»Ω«ž]š˜¦3+»†ZΆuσυΎ³ξ°lτΡ³-„τά6χT[™Ρ‹m¨²ύΊ6<ΨBΖλ‹ΝΖdNΆνΦτm•ίXK–M‘ܝ•*9λϋΞ5½˜υl»mιυd¬ΥφΣ^―Ϊ₯ -ΫφΤ£g χΨ{|wΫ0КΝ~ϊs)σΎu­ινlr½εq^isλένkΆ 4κ½M¨WΫ6Ξή^ν•δυvY탔˜ν²[x©ξΦ{ςdΦΌχΝQΓ–) τ@ΆG΅ν΄Η½ϋ½χ·ε#Svo³›­=»Yί¬³YήνΣZe›-κκ­ξW,6ΤeΞFLO6Ψο|ΟΪ­”žέΠΕ^£½χfέΓX°»]ΥsX€Σ™IksUλ7―Ύ™]£m»=φBΎ³νθ³{™Qˆ»©Οv­žaζeΤKΝf+ητ”m7η{·χε‡YmŠδnΆŠ^ίΉΎ7܁ދ,Μ혽λ½'ca_ή{o[2ƞzτ LΦοnΨZμgΗ³—–eσ½½kFt§]ίε±mCeο·ήmo2ΫX£Κυ^ζ6g­ͺ¬κe΅RbΆ³­—j7=ŸHε¬ρΎΩ 6Ο”4π(Ω"Νβ[ν±χϋΥGN΅{Ο2wν³›yΓζΜΊϋσΦ(Μκ^^oΩqΐ6ον:ƒέέSjjέυήnkzŽ­τΜ€‘¬ζ=3WΤνφ«ΌM£K“N †­›zηΊ)©νm£(1{!Άέnσgχ2ƒzp[=ΫΪYC_Ξ”Α«šΝ­Οl!ΆΝΟχέzΑ-³Όn 4»­Λzί›ymνΡ{1‹±Ϋόμ»ήK 6kός*"m©γžŠ/eRϋυ>½mΖx‹Ν~dŸΥ#ζ½½…Y½ίO[oc[Us«-[μnZ˜L›Gο}scεΥΎͺΧm”€ΔlΗ@φ[_ͺΩθ{=­rΦ(ί7γ΅ Bό%[ r{«}¬]ͺV2œ²}f φλž™σ΄mέΞΪ}iάφΊzΙλfΏ’ΆσžuQ~χ{βλίοFΑ~Ϋ[Ωz•ΩVdwΖT^νξS“-)fPΫT§·γ.υu›χμΊ-ΰ-τvS/c§m―Ί»šΪ ₯σo –7K[@Ώ.žlΐfzŸm CΜΫ6l%Ι‚ΖρΨέU œ[―z·mDkΥΫVl†Γs”Ψ™ϊΔd2fF»;V―%½ΆΥoΏoŽ*ξΦΣ f{…Ϋ‘bλλp32rΑ“-ƒήΆ­Z!#Ψ.Ο«Ϋ͚V΅gΤh“ό~^ΐΨΤΆT*οml³¨ŽΤ¦ζf)n³ήk“„ΜLŒΥ»ίϊ$Ϋn#Uήa#*λξΗzŸ`ΐ(²ίΦbJ/φ[l‹²ΉΥSΩΆ7³χά’bΜͺmͺ‰΅ί²ΎοφΣƒΝ&Ό…ΪP•έtΫ«v§©©γ₯sŒ&ιάσΝ†2€_‡hm¦–Τ61mΪ*μAc |Ψ~H4FΝοz)sߜ׷ …0‡Γ@s8ΓΧΗΙdΜ`υ°έΆϊΪV·ΛΜTōΆI%f[YΫmϊr·χuql2rAjc‘o»­Z!³φt[¨ΦΆΓ»ͺe΅t“άΑc[νΤΆT0ΓΥSΫουΫR£fsτqΫVT `f»z»Iel·Ρ+olΛ0{=λvΆΚ{aΐΖSvΫh•υΚ~+˜ΩήkΫφ€`Ϋxl+4€8«Ά©Ϊ­»ΎoΆ‚­mo‘6¦^ν¦Ϋ"Ψυ:u”7›#ή΅oŽ€΄•!]{„6δΙΖλέ&¦ F[…ELcΰΡv,Žš[xΡΆ;}Γσh[!Μl ³³Ώ>°•a[Ϋή{Ψn[Ιsϊšι,gF½ΨΜ½ΫbUΜvΧ{lΚ³q“)—­¦oUfνι6Tδ·‹J0LoiCξ˜χμV«b†UŠΖvΆχތΠΘ9zlFT;½Ϋ€μWνˆ$·Ϋρ^išΩb–—ΆmΎΏμ3Pl›΅²½—lFfΆχΪ *±νS£!ΕY5˜΅ίΊυޞλΆΖS#Ϊ¦χΪΖ°…νήs^aΌΛ›Qš₯­ ΧβɎ†Q=Ϋ^oΫ@΄mΨ^a4<Ϊn–ήZ†f³UυnΫxC^ΪV3†ˆΝ(±Ÿ}½`2΅έbTΝv—Υ³<`ΩξΝQ₯ΝLΫ½χ&zΟξw{/6ρ:ήoΛΘ2"Ή«ιY3K«šk‰m’WΏk…’Νζ΅j3wΎg[6 –Κ “z4Ά³½Χ4R›:?zΰ6λ½v* €mγ=»˜lžδΆmδυ¦a#f;Φχ0`μΆVΆ^eΆΩέϋΎ?8« ›'ΝζΞKfDήcΖjφŒ­2ΪkΝFΪZήVΨiΛ@\I6sۏgo ²Gz0λg5£c[d ²΅­zΔ LΪv0œ΅ν{°iΚΆΖςnq΅ΚΤV‘lCoo―ΆE“΄ίΆ-RlP―fέΆQM °’LΆΩρz½χμP°έΝσφΨΓR‘‘f6M³¦f7ζ cΏσ*΄)ιΫ&•‘P¦Ν³ RΙΝv¬ΤΓ6«³h›ΫBχže’H±f”›;²xJ¦θζμ!UnRΨΨT¦ ΫΆΛSω΄UΠ3e(š©χΆ­RfDnΩκ©·»€΅ΩLΟ5πS³‘‡`[cΝ4‹­εmKvΪjfUυ,ΫYϋAoλ,­eΕA/o³~™‚ςΆ &γ•bf0ΪΜ™ν{yΩ†`Ž₯i[Φ[#%½W½Œ­Ξ6Ά Νx½ŒΆm*‚±Ν–ΚΐmC―(Ψξ&)(jX³YθhjvceiΏ‘gΪͺ=΅MH$&–K«0Άk“Jm£Ψh³ί„ΠΠ )²AΪ~λ‘y"σΦښ…H"ΦvS™J0˜mgΟ£HS’XS4Σ«c•π6‘±Ιl‰Χ ΞΞιkέQ‘ŒΙ,£¨fsΡ2λ Φh+cvϋ¬¨ΑκYΆΩvoΉMIΦΐo½ΧΫ¬›Y†½”6–ΙΈ#O+&63°­ω™νU/ΆΩ3Žh“_―4τ„ΆtQmΩW§°Ωa 2£JΪfG5± Ά™ˆ½₯έ~»υ^―Ζ27,BΫf‘™©ΩΨ‘7‡ν¬xm±§· * nΠhΏjΖ&kλ½^6 F³ύu…f*AeJΜΫ­¨ν‘ΒZc‹½HE܌o…έ¨…ΑΆΜ¦—τjI /¨Π τ6ΑbŒ]ς Hš―±–” ΈΠ¦Tld-k1{£φΆ »={3δΩκYΜμΗ›w€-Ώ{ΐ˜ΥŒ—ΆYdL°΅©±ƒΨ6›Ϋš±­φήΓv“d3€[ό^τb ΫͺΪ–UΣhγlΫcŠzΜΞb<¦Œm&BS»;ͺWzΟ.ΙlŒ$D’°n+,Σv7aΘ6§ͺ‰=LͺI³~4q ˜Ήvz•1¦ΡΨοŒZιu&T[*Ϋb²νwοu‹*[μET#l(ژ’Q1fΓμTΛφ$€lΥPP¦ °l³ͺ°IšΝ—ΜˆΌ‡7CΓ$΅Ρ"ΖTͺ­ešωm΅3/%"‘Fr‡Υʘ̰a3ε…gf6€ °έT©ƒ΄m6ΐ†&Y`‰š: ,ΤLSdfg‚ Λ`ΛΘΜ4PP3”ΖEΓΫΜP ΆΩs3–Άgfg#ƒ„EΔ@„(lAΜlΩ,%†ΖΆmΆ‹ šrc̘Θb!ju€Š­Ά•" f#R”l3x`‘aŒŠΫm+€‹ΐΈCο5Ά+Ν6afͺΑjŒM/=Κj«5ΫX6F™y)- ΫfZl3ΦΦΐfW^ΘΪ63,ΫMU* ςΜ ΐ€”,K*jlC-ͺΑ@fΫν†`Œ±Ζ:CcVZΖ„Π&lΜF•ˆΆl3£Šνΐ`Ε²ibΫύxІX` jΨ B66#bYΣfŒΖZ›M+2ˆ9’‡±ΫT)ΑΖΆIΝŒV,³1c£ΔΫΆR`‡ήK6FƒfΡ‘Ι„mͺž5΅f†,ξ%"‘H`[]6†»(ΩRΫmK©h3Ϋ ‘ΕτŸ 80Œ#‚ DΝ)ˆύΫ,QΚ΄•0T­Ν³Q“1–ΑΪ¬·0œ–a(°EfΫLu ˆ`3Ο6ΧΙ6Β`°Œ¦f{?(LPl0R&’ŒCJΨ`k›!΅ΪlΓΆZ 6›&[`Iœ‰ΖΦΘPEΐ`‚,Ψ±Ω6ˆ…‰ΓΨ”H±Ωf0«φlξRΓ`€mΡ D˜R§’"ͺeγY­μm°‘”I΄„χΤ †e`[”ΰ $€MƒmΫ¦JΥ€Θ66° ”³Ψ‚Dk[HV*ΖκΒ<›Me°6kŒRΐ¦QQ°³mIC˜½mS•mΓ e±ŠmΟ¦d0ΔDΑ †mΪ€DΜ†ic TkcΨ€`ŒΩfΨBΦ`·Δ5( ›l₯ƒmHd G³m‹Œi ŒΩ6₯€Γφ6+š•ΐfsΧ]3›Y”Ά‘a’Ϊhc*ΥVπgŠΩ0°XU6p ΦSeRAB±jk›p«ˆ›ΘŽ0cœ3‘€1"™ΑΜ`ft™,HB°'BmX+@Qc›%€ Φ2E©LZ/a{Ta ΐΪ”έ :Άmd3F΅½P0€euR,Fa ‚ΐ• Τ0#ΖΆΐ4ΩBΓD‰‰šeΦ’²±FYΩ@ Ή“ΆΆ)Τf8ŒΙc$eΫ $ݘKΜ@6Ϋ¦/˜`Βΰ)c[b±θΒFIƒΜ eΚl F£˜JΝΪ¬hc’θ0 ¬m"³02σψbk4C3c0˜Y FˆlšB[<―°5(ŒFV« Ν@FΫ—μY³°—h’ΠmΫ Dl2‚a¨:πNHW66L&< Ο"ζιΆΆ’Q³Μ%f3ejZƒ«Ψf 2€ΩO¦˜X Rm%κf³ 6(b`lΖ…Ψ0ΜΒK‹²Ω6Έ$μq!0™!¦ΡζΪTbm[¦4،ZeΡ°P3˜„c—ΖΖh$m6lΣρ AΆF£@6 ΚΒBΠFΘ0lKΛ&lB.h0ΒhΖ(ρΖ ͺmm™š(tΫ£m jD ƒY‘Zš# DI3Ϋ0¬Y3`ς¦MBŽa3¬¬0’QŒYΒΤ€f†΄Q!γ́…L `0MY«…Ψ(‘› „6 0mQ±a2 ―XΚ†/Ϋ €I³5Š£‚X‘0”M'64M“°Ρ*²`“k0G 3ζqD’™Α° Μ$dΣ l&‚Μ2¬ΠfM5Λ¨Y€N4Ι›Q›5†‘5b-TΗΆmx†6‚ΐXVm@ šΕ6ΤΨ6Ωl+!³Q0Λ°2ɈŒ΄V10³T<™΄¬A™Eε€χΖV ³€1—ΑΔ D` &³Jf €¬ΜΆρ³Œ³a`±ͺlΰ3r³!ΔY³1c,R» m³‘Ž@Λ@Ε²¬EΣ U™Ί˜½mc, ³ρ°ΩŒóΩ@Ϋ6†±±»ε!41@…Ω€ Ζ¬Θ­D³l6b›MŒR`Ϋή@S¨κ`3+,«…΅ TmR"`σF¦Ll†iJ0aΧI”hmMۘ°ƒDΊ)0„ΖfΫ,#°™,"’)C‘EVBjf6ΣcΪΞ6” m--›)Β2[ Yo`6cG+‡B©‘³0ΐˆŠ piYVTšΦLΊŠ‘ 6Ϋlhj aAemc›yπ0G6Ϋ¬²m6f6t»ž&Ά-Jl³Y CdcKͺFVoΖ5Ά7 c¦Rˆ±ΩLLEεΞΫf₯VKΦ‚ ¨ΪDE&Ζ¬Ψb ˜R’…W*"ZD†ΝiΫfL$ͺ›@²LΨlXfcΜΨ"‹ˆ2„,e©ˆ’ɘΩlˆA[’&mΪˆ–‘¦’΅mΜp‰…¨FfhŒ,DΚ (…]`Y)[¦¨*•ΨΆ±Θ,¨h6Άρ˜ΝˆƒmΜfcΨF·z š€6RΜcέ΄cΖR(Ψc ΆΗ kΓ6”ˆ16†Ζ„"uΗΜPͺeZ˜4Ν¨h Ό1ΛΜ c5P˜΅°¬"’!2cΜs³1˜ «ŽˆH&2ΖΒΆ›mZJa)RŠζ fΆ!€Ι‚¨I[@ΨB4› hHi6–`ž‰°Έ*…h6kΜj ΓͺΡŠΪ(Ε²¬l₯hͺ2eΫή3;P„ΩxΨ`Μf° 3ΆΩ j5΅ φΆ91ceK‰Θ³ Ϋf „Ά1ŒKΫ,Ζ„‚ΊƒTW ­³jr„¨Q΅Θ`³‘)@Δ@«)e¦ΖλJB“hΪΫ4fhƒ₯ΤJm6lΦ0@ΨΆeiQEE4†aL›ΑΦVEΠf4ΆF„ِ@βΜlEόΙl Σ6CSΪ΄½Ϊ†’ͺ·΄ŒΆšUΦ†‰νm›κͺ‰›M•˜™n¦`msŠ7’m¬š-η «—oK œ(ςΘh†Š6¬ͺ6›ΌlwŽMΐPV Νέύή/*lV5c*6{«\™Ω,Ÿ]4ΫVΥjjo±™—P₯± τ6&žRΜ60ΐΞjΩΒƒj2Δd°¬kZΜtek­˜™}εcΆXήv[g«ΖՌ·$³M±˜‰»=7¦­’ΆMWon@Γ­φ~l’σ&ΫΦg+l¨dΟ,,ΉΆ²Ω(šeΤΨΑLΡφH¦m ζ³€m ©Ϊ2`΅MΧήΔ„,ΆΨΆ=]νΞ[ΕΖ(bΝ£8³7ŸΚ`ΪlϋκK.l,+ΫIrΚΨTΙ#XΡm1EΨ°σvjXΫξξmFΕjΖš°„7Kb{{-¬V&aoέ§h{`FfΡRWΫ6T#lf°%a£ΨYΑ°Α˜jb ΐͺ3l εmv³Εςφn HΛΆΊΩΜFγ*†΅ξφ4YΕ<] &­³‡ΖH71£m]v@™-9›m]½+›ΠV{*Ό,΄=€°·XŸ5‚=m;’š XmSΝl‚Ξb‹Νφ »nwή³² 6tf›H΅gΫ₯}ېWΝQ β 0΅Q9Γ0Μj:kV φ°lGʘ"ΡΨT3£ΚΦZ dΖ*³ršνy-Y--oλnΨ[ydVΊΛΌ! LΫ3Ά(ΜlΣ ΈdΪ:›Krfb«ΘΊvΜV[Rήf₯˜-δmΫ iΩ––a›±ΊΓ±ΕσΊΫŠΖ΄Χ}ΜS%¦­΅:κ˜1 <ڞ.7cΥli53³ΰκ]٘M™-,‚…lƒ•i›M7ZkΆΥΫ.©L›ΔκYΫjVgm3³7_₯œμ™*a†š§yΦ$l:σή0“φν{{΅΄%…ΔκήH"› 3©Θ°ͺΪlΛΚ"e²’„GΠ6tm1jΖΫ ΉΚ6ΟKV͎mλ„°m=4JkΫ@Α4ϘΩΆ”¬ΆΜTΦΤ`Y΅3¦¬²%ecθ²Νή6‹fͺεM¬xφΆ˜T˜)–ΦS00νu3‘zscΤZΦy+ΆYΌ‰‘ΌΨH³U3¨ΨΆy9Dm²7]ΆΕ"Œ Α2m34₯MΫ«ψλ²-Σ¬½ΊΡˆΉYΫ°½©d ¦myΤ¬mζΗεΜZΪ{Ο?o»ΏkΤΙmxκζνε6PΝΆ™’‰R›‹dΫ ”1A«m64ΫΫΣν_wjiVf˜ͺ6ΆjC΄˜yoS˜mΫέΩΆ•kσπ^ω [j`ŒŠM°έΆ΅mIfιεή(“j¦»υz󲍩ΐ0_i§y#• ›Ρm»3CwΖΘκπ΅Ν†Ω’ˆgφΤΛ'τ6Ϋ₯Ί½ξΩgTΪΫ²'\˜umΜi›’m οχΊ)in#ΜlνΊ΄!2ϋq΄΄ΡΆΆΕ{rL‹‰e^Jν°Ω‘ƌIj^» Άm«ΙήΧ‘nΦ6x)CͺΫcqlΫπ›=ο΄χζ·MG£Φw†FόήKƒ’»{ή6[•ΑYmŠAtή6(iZ»Ά!«½ˆy{jΏ}km+ƒ‘ξΜΫ* c­δM)3£β½m6rΩ=Ό• “5 ŒC9Ά›±mQ½Ϋ§—Ά$Zk¦Zλ±·—ΚΫ ImΫφΧi4γπ• ›Ρmλ°ΠΆ©%*³·ΪlŽΗ¦άή«ΗU΄νm•MS–­»ύΆlkU;3ΧΖD3»λy9μmΦG΅έ›Β‚Τ<[K­νΚϋq4JΫδ½w±4^6ΛLk%kΫ’Œ™Ζkfͺ0{―›lWά³FlΆ© ©XΩΫΦνΦΜΜΫƝžwΪΆ½ν‘šΥϊΪb‘λˆχ5rΆΉnmΆΙ"Ω$mC· X‰iZΛ@dΛbΫΜ³Όξo­YΜ@λΞχ °’-§’6FXNC#C²B”„±QM(³q˜Χ“™σΐΘΆ ΐ66Ž1σv³-@7`Λ!ahk cX£€²6oͺ~3 [έ3 Ίl“©6Ω¬»jf›]θVž6mMg{+ΒΖ¦ Α6`r”ΩΔΔd13-@›‘‘ΤΆ&63μvÐb3K°u‰μΩΦ Jk6ΐfŒΣIΫXΫD[ ±UοiΚΜl³yηziΖ€έΆ ɏ£0Σ"Ά‰”f{kͺ•i£q‰yBΜzΥ†`°ΆηΤ™gΧ²ε$aΑ@ΦiΤΘΠ•UΕΒF𐱗h&U½m0¦V³Τφ–°Ν(0k,μXΠ,™jΤψefΪ*­m¬Q0eΆήv©:Ά5Άj`ΠΥφ BͺM6«κ°mv¨”g[Ά¦™·pdΫ›‚ ½fΥΆ Β†6Ν2dok™F›q6¨lΆ ېX2™2ΆbΊΆ1Γ(·ž Θ`SZlfΝ°‘V½ ΆνmυkiΜcν· Fπμmw l-ΚήD0QŠΪ£­žΓͺΪ0Fk›Φ]62ΚͺSaΐΘ %HΩΪhMX‚]€‘ha1Fš­»Ϊfm<΅š•f<Ϊ*  ³ΖLLPMΓ°œιšζi‚΅5SAΕfΫrΥ±g¬ΘVΗV€e+f¨hΫ,ͺcΦΆ6·•ΩX΅ΖŠ­mΌUΔ6Υ›C¦αmM*o²A[€mC{iZؘ΄­rbΜL6Uƒ1dπΦ‘ΐhl²XꚭΪ·Ψφ6­5γε¬’5{ΖͺΨ–Θ&W46Yf&]†ΐf ‘Š’dσ ΖJΦ`°ν₯ˆ¦DXΦf›‰ΒΖRXaΫ°66›mM*Bd« ˆ™m6Θ’`”j" F ›ˆΑΨFDΖΡ0lΆΆΐͺ”04΄’ΩΣ"†jΪjΆ*k,S ›mΓlG6lΫΪ†jΨΌ€F«‘©dν4Ά1Q0`‹Νy °I4‹ͺΖ[J€Μ#ΓΨDcΫΣΠJؘ!RVΣΖ ικcŒ…¨”ΩΪ005$°m›I"hY›1R"afc3ƒ-…$²ήΆN(kΫlHD©Ήšζ€˜•³'cƒΡ–1$0m&F$3(0F„eΐ΄]³•ΚΖ,KΆT0Ϋ "c3Ϋ„Ϊ€°Ν·Œ¦A-, ‹ΐ< hΖDΑXΠld CU¦•H”°6c„Œ D°™1‚+0f‚Hΐ˜»lž„…B[“F²ΫΆ™TΠ"`Ϋ†Œ Ρ"Κ6l³ ³•`τx₯.fΫ€"J!RMDB$F€YfOc–¦i²5Ιi$[†…@Βΐ΄D±±¨Κ@[f+•Y–³†03Œ•Θ³m ΓHmΫ7 ¨ΜFJ²1`Ά™(l ΅C–ΓP…Qux —Tim#΄1@M„ΖlŒi-‚€ ΝRdΆ-2]ΐl{€dQcl$°νmE4ΨlΫd…-Z‰²Νl[›±½‘Ζ‹BCΊΨΖΨQŠN6PΥDΐ 0€=M€Y–lΣ4 Ψ€ΖΨΪ°š2R2@LCJ[VΚBM–ΩΊΐŒQ Ά f6lKq“m›„±‘ΪζΝRΤ ‹DΨf&¦a›‰bΫf8ς©–QαB‰blD°˜L³mΓΌ%@KΙl ΅M–™IwU a6ƒ?![&PYΨ Ό‚Θ`&ΘԘ₯™l°«&†B±¦XΪ ΞaΔZm3Βꌘ₯ΨΦΛ5£Άα2`L±Ι„’aΕmκμ ZcVΕ&$mUΙ6o.‘`[ ΏςlU°ΑπbzΨ»\ΖΫζjˆΝ†¨m3e‘”›šX’ΔΫB„eKΘf­Q ΪΓi2ΥFΆ·Υ.΄‘23i ΨΊ°7€°‘2Ρ΅· kΙͺά6UΪδ!–=]ƒ%™MΆ„—4©’±lŽQ!³m*Γ ˆ [FVž4 .φ`)€]!ΝΚ̌Γ&Ψ¦Μj³ν²V`£ΗΦ ›mΏ²—Ε6eΚ΅χ”Y€a«jkS³ΔάΌ*"Ν Cgσ&;Ά΅Δ€Œν©°ͺ‰Ϋb ,SxΦSiΎ-WMcHS3nλ˜`π8 0RWl ƒq, lΦ¬jF›M 1 ζνΥ.΄ f)ljΐVΙތ³΄ ΐΓ£«΅MΜσ¨Ub[)mk²I2K01Y,4$es@ΛF‚4b‡†-ΘβΥ,0²+#f'I³EeΦfj€ Π ·χ«΅€l²©Œ²5“-Ž52;†l&[)`8=³UέΨ°–˜†–b„šAΟR `[KŒ΄ψφZJL04BΐW'Οz I΅ΟθΔ›%šˆ™ι,ˆ‘xΕH1TlαΨdά šg*°eή\Ϋ΄C՘“ΣΨ*lP-†%1ο½ώn-f€΄7D5fm †΄Pη&f ΄-b’mlpνΉ@@Kgcu——d# rΓOg»Ll΄FVΩl,ΪΆg# ›™­j'lR‚„6κ:C©)Ϋz*˜\ΫΖ†§\ΑtΡPζ‚mL„E"0mlUΆlmΘ0j+―Η™©ΛΑ$lΦ$lofοΊ‹šlγͺμΝ¦ ΄&–l†‰JΩ΄°ΜZ JRšcUΕΠ[yK₯΅xΫν1‘2,·aDΫ„™Aήb›!YΘؘιzΫ· H¨²Χ&Jd£΅ΤˆμΩΦΑ†fή:C™ΝdEilΐvHVΜZ%-3Ά-e€B‚%ltαύ$Α”υω,ΆˆmΫK°fc«,€gR‚Αf$ήH"37­-6MΛΩΦΒ2یΩΡ„*¦ς}C€€=…3D``*Υd3Λ8m΅eXΩ[FAΪτφ³WWmFuΩΨ,QbHΙ6ΆEεŠyaah¬©ͺ°vyο bΠ mΦPΓΫ›€«g–8›šΩHΆΫ›l…1”a;§k˜€„*Ά-HU6ΦΒPε={SeΓj6¬G[ίΔ›Θv°…€5Œ,₯΅‰mk%θBAΫΡ5φ€„Π9xdΙΆ=K³”τllΆm -`l*Ζ¦RfšBΨ6·ΣC Ά-ή6’fΙw0Ψ€dΖΛcΨTΤf4;γ,^8#0ΰοz+X8ή{…)Bf^n*aÞaV}1²MU¬ν„‘-Œ-lcUeŽ1³χt.0ςΆƒdcœΩI³Α6ͺvslΫρ¨5ΥξxoB”gs©ΆνxΟ€ςΥ¦Ϊ¦jΓ ν™-£½W<³%•<χΕΆΑXB›Y•FlΫ°)ΘΜ€¬ξφ6°‘μξ›…’M{£ΰ΅)ζςΦήJ‡{ΏuZ‘U6) Ζ³―Ύg¬‡Ίν5«Ψ{·­OΆνeλλΪ{=;γκδf-00IΛΆP=3°!ekon„c%΅½™§«loϋκ‰ΩHIΕή–’©-'chέςUl°Ν¨Y¬²JΨf3Υ ZRlB.ΫXnΞ0!;άΆνGηT]Γθ½WσNy[‰w>³€1Ϋ.U–bΫ¬€]ή¦cDSΊΆΙΜΈ»σ6…ΩV4EΑ6/χ\m[˜Ν0»«I:ύ6,kN΅G6ΫX]ενLρ6±—lӝ)Ά™G™%Ω@bΑΊΈY3ΆβJΝΖΊΆŒmHŒΧpe{Ωϊt½ŸFΪΨλΗY13aSc–N˜²·Ψ0Ϋ³[Ψά—mνΝΉ›yk~ί©›f–Ά±J†T³aΓ&—ΩhΊ¬zΙΰm™ϋn~ϋUiΕΌυέ ΪΆΩu›ΪαΑτξ»λ‚aΌΧέhd›J·m{0WΏK *˜p}Ϋ¬΅"ο­—Ο±νν½λ[ͺ΄=3{ΏΧΫ/ΛΉ6¬(ιc`Η£v¬ΘΰxyΉQh[Φ’Ύ;Τ0Fςjʚ!υέΫ»ΖήΨΊ\i3\YΪΆ·hm^Ž}o ΒͺMŒgtξμυ~ve›§TO΅Ό±JΜΓVCΪΫΫ’“ cΓΫ(*σ›Λ!hUγύή!lsz\χ¦Φc΅οϋΆΝ33ΪΆΧ’Ž»;Ϋ*™Msι< –·e5eCxκ6₯Ό·½Υ‚Ν‘υΨikζΞΊS`Ώ·€a΅yΉχάiΠnφΌjo1^Ν΅νΙ*‘ί½ίk˘zο5–»»Mfάm³-«­›‰U9Ϋn™Y"_Ή=C"mΫoW›²{λΆ-Θρτ¬έ[οg_mΦk*4έ=oοPbv΄-[ωχή):΄1Ζ›Kˆfεy7«mμΝ6{ΥΕ,΄g©.΅Q—m¨ϋ·mΏ―ΜθqΧ¦ΛfŞήoΝy†+bΆΆ-1ω½%]ΪΐΪo;6Q™7rΔ«kΪ›Mδmίφ¨66Θ»χΥuhiΝ3Ϊ†'uϊn°)°ν]gŽΞΣ–Τoο,SΠ➺Mσ~Σ;Ψ” ύΞm1eqΩ)ΰ½…ˆ2/· ΦτnφΌRΨΆτ2ΧΆνF+£t·7{6VΪΦ–¨οNfΤ0Fςj|«νΝ*zχtŠmΐ³’„Ω•ίc“«½9™fœ*­m²λΫ,`fWNŠ=+GφlάzKo €΅7gζ5gζω–l.οMvΐΔ­J™6ΥΦΘ(f¦njO–ζν!°1P[ιkΝf§eaη:ο=eGΙv]m3XA³€&ΫpΌTΕF;*–A•ΝŒΫͺX³1Ω›:—ΪΖΓV™¬4ΧσΜ:6lμj$Ω{fui™lΥΨ½Ν6#0ZZwΝή¨Ϊ&+¦ΖΜΦ|Ήc&έ%΄½y§kή@Άηšnή†²C±mΫDlΆ»•Mmo-Υ½χ 6¬`›2XYΨΫ•·™ 2/Wf™κΝ͈δΖ»εε‹aYbfFβŽ’·WKlφμVέ[š†βŒΫ›,6CΝY3Η•`SΆ‰³ km£š.Σ¦ΪXK€y¦š ΫΥl›σf[ͺΫΠGcvZfΣi³gnη`p¬ΉΙp5σΰUˆD{[V›΅P`qιno323ά]Σ&@lxJΗήΓVT2i4oLΚΆ·…₯.Ϋ³·.TΣ¬»mΫ0Ff-&ΧΩΆ6g‚ΆZσΦάU³»Ζˆ ›·WN΄χΤͺΩΖΒ₯ΒΆmKΙφΦΉΠΆloΊ»m6ΩΪά7 Qzο•ž™ 2K1d•·’AξN­ΩR]6YbBu«SσΪ0οY«Ξši`wΑ“m6Ι ΝŸlŠΩ&;lΝB3]¦ ͺ­­,¨Ϊ°%mζας3«†uΣ2C’χ–\Α6έμζ(Ÿή[¬H³Ρήd!γ.›΅@aq©`Νp—•-°U›ͺ7MΗΌκύ° ’I#φΌ-eok°X7ΆΩFWšzv5dΫ6e4¦£kΫͺf"“¦^³ΕIef](eޞJΜ'΅7,ΙΫμx—"³m#²mra›Ϊ~S…χΤ{ξΜ€™£τφ’Ωo³ͺΞΌΩFE#LξŽΦl©j·²`Aw›d–Wή³¦βΆiσXΗβΩ8ΟzΠ †η?I1ˆm²#Π jf$mlͺ­­ΆΐΜt·—=³=\ήmN5kΆ΄ JΫΆ”d{ξΆΕaΓΞWokV`š‚ μ\1’βEQΨ Ϊ›¦›)[΅)q]ŸΕΖΜV™J&ζ‹=ΟKΩl†λFec iηΩ XΫΆ‰Ε4nuΩg₯©ΑL\]ΫΌBξRΆη΅ͺΫΌqRΫ ¦{~^ˆ‹`{o:θ›Ϊ~£’χΤ{:Α<«κOo(ZΫj[* !ƒd{9mVΧ€6…X1fZ莱 ΦΆΓ³q[9ΦΆ‘%3ͺ0UoːͺιΝͺ6ΝΜb[5cfŸš1`*zƒ‚ ¬LΪΫ}ϋMΪΨfΊΚbI4«Ηm›M1;TΒ°QC˜™ŠΒΦ¨Ω$Ω†5Yژ!Ešρ&•²ωΖlv₯0ΦocbliV‚Υm³Y»°­χά½d׍¬ΌΝ@FΝb™QΨΣ…aAa‘=Ή’½ηn‹±«`kP’Ά§m΅©I’žΝVm‚™u‰±ŒΩ^bŒ8Ϋ• …Y±Ά*˜»,LAφ`­Έ¬moj©Ω&'£l³IΤ7½υΪ© a[Ψ* °Γ`[E›Ρ šU@¬ΝUϋ­ΫΨ‚Νήξ eK2H³o‹·•ν΄Ι9ΫpHŒ0ΫbTgL›„8{K‰ ¦[laH-2Ό΅Ή*›KΠcή ›‘lIΨhΦ¬Ά‘/ΐζ΅tνι½ΥŠΥΞhd2ۘ24 yŒV~κ°Pk—±M`y}WmOM6ΐκΆ›Q”ΆΗ`±ΥκδΑ˜LՌ˜Ε*k^‘Y23Θ6€G\’A„ˆš“v ΆŠ΄­XΆ±T΄­B‚•DlcnͺΪΫΜ”†-W °gJmn1¬ξ΅¦Ϋ6©ΆΕΆ%KbPlIΖ“U#ΆRΫ2gkƒν§#bTž¦υxΦ³Mm.’Ž Gz3‹]clα λ½₯ ƒ‰˜12CJAy3¬2ŽΔ&ήΜK$ΆυtΨ&³F-ϚG„YΟ”FoX-ΧΞ£‘ΙΨfΚΠ,+γΡjeƒZ €ΩλκΨ†Ϊ²K’­u›t̚©U2F¦=*ΩΜbΕ“˜’υ Κ6P›₯8Bν­XfΆP5K¦–ˆmΫnͺ˜Ν¦FΆ₯  yMm0ݚIm šC΅-Ά-t”Φ¬b«7&#A’€·’s{kcφžRA¬z,τΜμTͺfθ†Ψ0³¦"cBгΩ$Ω0)f, ³*EšΝ†ΚhΖ •Β3Βq…mYβΚ¦°1έbdl‚)4š‘,)¬₯Yl A<.F,–˜₯8 U``Υc˜,ΙFmD“ž·ν£cŒ°φΤ£`lK±Μ¦`°5l°§*T ΔΖ42©l«3N1 !FŽVγΫPK$h£5C,ΜκΔ€ Μ8fΐ–ΘΥΖL!Œ+Sˆ™Œ BL5ž]Ε0ΝŒ `65…ΐΔ6c3 Š!¬1f5˜`cc]a° t˜±›m[=ȈΝJΨ²…Μf£4 f eQ0lbΛΆ€D,Ωδi4©ˆ`R˜Μ”YΝ`ΆΆͺ’ ›©bl#'Γ¦%Ευ–\Μl[2…6<Σ’FFŒ­°DPΫJ `Ϋζk4 ƒ dΆu2³‚ΖΌΤٌS³Ϋ66΅²Β #1c 0ΐBΑd3³*60ΝBAΑ° „˜ΔlQŒ`l@‚6@&F˜ JY3*š‡Ω6O 2#ΙΝ‘%5DšΩ11b!-mo;-Ά3S§26F¦G[’"Dk¨1`F•™‹Y°™‘N2ΐΐΖ6-!ΖHΐΘΦͺoΫ–˜’4Ω³΅(0 ¬ ,`Ά«:†eΞ`ΤlJ±f€a³5"²©ŽΩ°‘mj53FXPVφ΄˜lf–Šm©šΥ,0"1`Σhž06k^0Hc™eΓ1ΆeJ66±‘UŒy[HR›ΜΨuΑ5Άͺ“ΆŒζ­λχV΅%,νΉΆlΣ«ΫΆ ‘Λ6οϋj›ΜV¦ή^qArϋνmڊDΫy«M <]Οϋ\`œύFΖe³4ƒθlξΜYΘφ6ί}΅νmΌΦ*]«Γ€1‡j֐‹μQ™!•²ae‘Ν~οΎ΄HNμ½ηβξ{φΆζ2~Ϋ©`3KՎ­!Ή6½§ΪήΫΨΌoΧΦ¦(ƒ.ΟfΥ΄m5»Ν¬frΗ££ΥήΊ³—‘°ηφžΞ¦jHΌ­‹VΖ{»βm‡b{›œΩ™:Ωv¨·WzŒ;ηv3²½=vU'˜J·΅ν:VΪ~$3oΎγν§³ΛΊ³Ν›+ …χΆs/³l;aΖͺdΣV=ΆβŠfΛροUΏ­ Ğk;3›_»~ηoΫ0‘;{³«¬5ήΤΆpΫ}·{›–užηΑ°j›T±mλ.Υ@{«§ΘΨτ6|m†Φφ€ξ Λͺ7 ήlΎΣφž"Ί·]o2V7υ{σvιφx€ 1³]§7qlΦ*Ά… Ϋtυφ¨`σ¬v]šΩφςνœψya ύ¬aφ¦J6νφμΖφ6ͺΊγΩƒ²λΗM€χ’ολήΖφ˜6mόοv΅Ω^?w[­ۚOoφΈωIρΪ³{―£]›(Υή[*³g[W4Q4―„Ρ\–MΫ±ξήϋϋρxν;½aBfo―U«».ΫΝoOW Όm-Pd³Ϊ#‚Ω¬ΝϊNYη·9Εx½νΗ·ΌUμ½m]έήXuΜTν4%b{kοΊ]ΉφΞ}½ΝWΆaΫrΓΦφΪφ˟΄ξΎ²{~•.L›·žiGˆήφNΛΓ–Η•δv³G\Θ0F»ΟΆšΤηύήΦ]—Ν¬~ϝ`¦εφήξ;λ½±ZΩΫξZ… λ­λΉ΅ cΟ}±Ν¦²Ζέx{‘Ht{ΏQeΓ$ΆYmο6Χμι“ŠΧ»y?R±΄ΝΝXθšmJ―…a‰„‰1Συ›Άc£„ί›£ΆΩΦnoOΙΌν5fΑ5φΦO—Έm­™Έ»ί{5)­½Ώ»oΙ”`ΫΉ Yo+)€Ϊ›l•Ψl»φ@U,ۚ=ΏΉλ»ΨozΫΥ€Νο½οΛΩLme&5·νšg8ΤlT¬ΑΖVχz·Ψƒn­ζΉŽfΜf\[6₯e4Ϋ²—₯Ά½·¨šΕͺ-HŠmoΛΦr΄·šVŸ½·©»ƒχΦ½‰‚mχςm›J[o«w΅χΦ₯Πd―·ι‘evŒM©Άm*kΤjΫφ8Q{³9ΥΆΝ‰MKV³{O­=΅nv«x­Ν³ιbi› ΥΪFξ%3³ Μt½±EL*Ώι­ΈHΆΩΦΪ…mΦέσoϋR-lSΧΨ[*Zξmic³άΫέ=ζQΛvέΞώύ”BMΕΫϋ+Ϋ`XWšηήΆ©αm)fΪ‘YMΆ (ۜξ읖Ιh ι‚6vS ΄6‹©tΕ6[΅·]πζε*e΅α:-l0Ά m“Ό€»l1-@ΖoZ¦§\ΨΖ°17[dc] *›ίf/T$Nšhm SmΛ@QέcxU]™Ϋζ=;³ΩX¨Œ–Υι~[UT΄ΆVπΦTή4 ΥxK]KγLήΦLν±]ΕLY،Š2¬·­{iΪή8'Œu₯IlΨ*Β<“Δ€IΥ(Afž‘*λ){[L$αήξ>y{ mU…₯`ΫF;ή†`?΄₯‘²¦ Ά1ͺm“:[Θ`Ι°€βK‹ƒΰ^ζ…₯˜­2«bΫ―;Ehw·Γ°aΪhΫ$#ͺDʄQ~Σ2‘ lUΘ[Ψ€9t5Υ¦0ΏΝ^¨T˜jA[²ͺ5•mV(ͺfΜXjž§ͺ2ν={οW߁ΑΠ ŒWŸ»·΄J6"kyλ;ۚ…2΅΅”ƒPfdΆΩf©œdC΅lFθ‚ΦΫV8bΏ½sx©/Zͺ ۈΒΜώŸ 80Œ‚„ΦιΏγΏ ,1lf‚†€ΚlΣd³€«¬‰ŒHeΌU]ΫΪ¨[ΚΫ6Zc{:Ψc9‹Y«ZΣ0lΔ¦*“)Μ’fT>Ž6 š^/<ΤQš­Ϊ0φS\EΆ†jaMm›†Vκb3’™u½ΩτŠk‹Ωͺ(/ΓΡXΓΞ‰©6…ΩΜΪT’ΐΉ‘ ; Ν‹«8hƒmU³=©*¬gΫ£\Œφ S›xuτ΄νj΅΅ m~Ί˜FT¦Ά–BΤ†M‘=ΆYΊT Q‹xΓFΊΰΦ³‰ΚΝ~{ηΏ.΅(33™;ΐ’˜!ԐbΆi oͺκ³ lF€Bo«+ΖΠPΣ%μ k²7Œ=–ζL³ͺΨ°mΠ@UΨ΄ 5«GΫ즂hmΦμuW΅fLΜv…TK™6ͺ… ¦ °°hWΥΔjΜ¬Œ·šΖλn‹1S]½6ŒM‘Ω"#UΚVΝφΪT’€ξ`ŽV,LΩΤΛŁ Ϋ(1Φu]Ό;Ϋςec’l.3­ΤννmUQah ρ‹‡œP[/κ¬UΥΔfF{l#UU±F-bl•86ot/7{γ§*1Ϋ0w0z&‰ˆW«b͘ƒ7U9X“ΆMŠΨΫέ§Ω`XWڞ”±mSΓΫϊά -ΐΜ [›‰°MΕ㩊€§Α^haΫfE­f3£)BΉ`S4› Γή^©€πfͺK&Β0ƌA“°M[J±mXlk0³©RΖH΅mΫ5Q³hk’Œ-†,c$―!’ *Ϋl₯l³0μ‘0ƒͺbl†N4†D₯ΆmoHQΨΜ*Ϊl³Η†)‰ΐζ‘Ρ`Y) Š,*3ΓfdX!PSV*ŒΫdVfπ€(a6Γfca]ΪVd *ƒA™!mfV˜m6ΙΒ ΫΨl­HmCOU m φ’BΫfPjƌ(Bν‚M³Yƒ·‘JTφf₯6ΚƌΕLˆ`›Ά¨’™YΩ4Ω6›*2{­ΪΜƒ¦€¬!dΜR J˜FD)lSWcΫ cl†Pc{¦ΐ΄U©0Ζ’of *eΆg…m³Dc›mήj#‰0`›§,²C‘–ΥƒΩΨBΠd“+ˆ`6™©„Ωπf@³Μ63PXΥQ”T€!f62Ϋ4°`¦% ΪZf¨πS*€ φ€’JŒΑ*š1ˆFΠ.aP˜7Ϋf…RΪ›·¨Κ¬³mad"bh£-«ΘΜ¬°HΩfRφΖhΥfΫ`mQ6‚6F΄6J T…½G#Ψ° @©νm SUΖpŒκΜ6ƒ’`›c³m{Λ6° †ΗΤYΤΒ Ό,Μll`Εδ€°¨bήΪ Ψƒ‚βΚ‚(L0ΡΆlΐ°i³ΚΫΆžΫͺψIžXd Šf6¬KΝfc£© »„1•ζ ΐ6»C”ΆΩ.3Γ˜Ν  ŒUdΆ‘iΓ‚±±AjoŒ¦hΫf‚Yjk*θ2Zf4ΐR•½gͺΚf΅a €ΔΆ ±mU3Ýhš1VGž½A*… 6ΫΆ'”†xzjΓΘ²K1¨–%…™Ω Γ –U €*dx< •Ψl ›]š]H‘Ε& Cko 6„ ˜ΑΜΜhkL„ν©β₯ΡΘ³C,Š’’ FcΤθŽElζΗ₯U,Œm › zdΐ²Ήσ¦‘i:`Xρ΅­cΦΆU Œe˜‚3RΨdsΘfVu˜fΩ6F0ΆΓX"Q2¬™ ° κ€5,[mΫ ΆΡΘB 8AΪH%{οΝaΫΐ„šXΫ`Ϋ Γ–­˜9ƒi±¦lΜkdšP0" …©5σΦ`cb‚M™‘ΕFΨ(o`ΐ"ΤhffΜa²ΙfM ’€B[ΓAad!¨‰Νr)[1ΆA@Μ,vy[£ΙHƒZ_oK`ΨfAxA fH+c£)fˆ‹`lΨΐˆ ’„m#ΐCΐΌΒ°©H5Α°y“©½idF@$•²M9hΥΩφ{;³«˜ 8̊Λ³MŠ6#Σ²b0”Ef±Ω Ζ¦`bLέ6"Κ5#f6―Η‡°ΆηΪb Ζt{ƒF FjbΜ€4LΆ™Ζ`΄œM6kQVˆP[Ψ‰‚‘z4† NσͺxΈ)f3“a’͘‰Q]½A7Π0v_3ClΛ¬5,3…l$΅ΝF2ͺbΨ³%ΫL!!dŒBA³M…ΙήRnΩLΙ`Ά”°1#‹Ε‰A₯6‹!mF©v{›­•4 υέή–Κ °Ϊ,ΣΞΆ"‹ΝΐŒŒ©0ZΆΞ˜jΣͺFœΆf3f›b€©½‡,ŒeΜΔ„‘˜˜ 6ZΜMYQ‘)rδjŠΨ6KPΘΜ62¨a=Ά ŒΜ&j4³.l ³Ά Φ–(€YZ1ƒ ŒEW0˜a4²iΔΔ’„„afc°*Γ„aDc“< ƒm*3#°X,Β•2Ð6XfΫΌ•V™mLΔDΜ¬2² ™ΫΨΓ¦š1ΙV°Α Vj˜¦Θpφ¬jXΥ'kφΜV8­mš‚1ΓΤY³@kΝKΝΚΔΨΠ0c Ff‹’€TΑ$1™E`‚ͺh‚: m&5k°™dX†1 Ϋ„©ΆΩ`€ζ† ³ΙξΆ6Ϋ6(l³Ω6”³6a@Z Ψ²Ž₯ΜΆ‰B4©β6 @cΫ¦h0dH›Xi H ΥaνŒ Ν(!%@²f³Aΐ„f6MHΒF+akd"Σb6“M;QΕ`Ά¦±Ρ˜ΝΆ$lŒm,°5±ΖŒ‚‘`$a3)0@e4ΜT«€P.λ t$2΄b š™DΆ,¦1ΐ˜j›ΝX̘’ΩΖξΘΜfP"a›ΝΖlr΄3 D4lΆ.’ΜΆˆhΐ(0c@4Eΐ0›Π``²i1ΜΜ˜Μ(h5 =cƒa³ JVJUΰZ6lS„°±ΫΆDd™Ά½2, °€ΝžΝΨθ‘Ν6„0ΐ`ΨfΓΦ3`C‘M7σXX¨Œ6 :# RR4*ˆ‘ Φ„aZ-Γ‹1؈b{³ΡΖ e^­¬›R‚¬P³™ΝΜlm !ΆU\Ζ6@ D4`” ̘Κ0&°²2šΐ@Θ@`Ϊ °V €YL ³mTJ©*’ΥlQΒΫBΨf [7›‹ΆΐdLΆ'°ΈilΨcaΓj™m&Uؘ@†°Ν„΅Ε˜Α&f #δΝ†•c%3H}  Α•¦: a&Ν΄Μl­ΐΐ€ΆΩhc…aZYff‹"ˆB0ۘΨ„a!ˆ`€(˜m ¬ ΣF\€E0„(`Μ€ Cc` ΐ‚&Mƒ@ €ˆ–ΆνJM*‚RΜΖ³(šMΑΜF+Mo X΄M¦•m3Ωδ°ΰΊ6[c0ΖlΫ$%0ΜfΫfkΖŒΩ”‚ι ™6¬fΔ ™ιZVIΰΟJΛ†%cj˜ΜκfۊŒ1»ynΨ03Ϋ*­ζNΠΆΩ TΜhχ¬IBΆfΟ»nΨφ¦΄:1ؚmX©²mJΌ*d3Ϋͺ–ν3šg-–2΄Ή°mD(σΌΊ·Ιde‘™0H6˜TΫ[¦ƒΝ€gVsΨb₯κ=,¬1kƒΪ6KšΙ°Αr˜An ίl[Τ1½Μ’m3]τLC›†4f=ekt lΩNΓ,¨m\«Ά MήVΜβŠYΝK3DΖ6b0Z5LŒ¨α9°f2x„Τΐ΅i#Ωl«`ΥΆΦΫ€`̚u ˆ6 ΫF’ζr`μΝ½Ή*Ψ±ͺΜhmC6ΧΆ‘Υ₯νyO§ιΟΆ!ʂmΆ•±΄ΝFˆρΆs‹=€Ι¦βxΝ[Q›™•™ 6j+ΛhAc*ƒFΧφ2ΗΆ{X[ΦL*ΫliaafΪΤήFΐΖbƒU˜©‚ΨΫPΣ33¬‘eki Γ`S[l‘ ³9Ϋi6Τ6Ε–©Mžα€λς,#ΓH›ΐ¬š΅Λ@ cʐζ©Α]5pΑ˜κΩήΓΤ°΅„m2F „6Lۚη}“Ή ΆΆyo]*¨I-k΄Κœ±™]KΫφž‚‹Ϊΐ0T%k3i²Ψ««mF€xΫΉΑΐ ΫV–κyλ’͌Ž«ΝL`[meΙP˜Ω…Y ΗL D6kmVb3Ϊ6’Yΐ¬Ν53#ƒ-6X¨C*„ν™.ށ™ΪήΫ…ή"Λ Π,`Υ[@›³% ƒm‘ ¨₯7΅ΙΨ„ΩuaYΨ1³.6 ³₯™‚EX{ŠC„”† Ζ6W™₯dΣT³ SΓ4b°I£¦M1 ΐ³Ϋήv”­χ¦-I•Ί@Ά­gν.s¦ml-½ ”Zh²ΜJ•=ƒ•6xUΩ #FΫͺ΄ΆAΫ€ ,vl‘Γ{S‘`ΥφVSˆ5H›ΩΠ¨°ΙΫˆκ=k+Κ{KΆah³Φ¦&Ϋ,`‹a†sΟlH…0oΣ`a«mˆž© i#άΜ„₯ ڌŒ3cšl\K€lΩVŒ£‚evΜZccX+΄‰ejx."a&ƒm(5J6MZφ°dL “Yέl[γΉ)λΝS€„πš:Eo·•Έfφμ1E¨·ΆΒή{&ΙμνΝ– ud£ͺS₯Ž`Ϋφ6ο1”ŠΌͺ²κΈΑnKu±auΧν±φ Ψ׌\7($rWmnWU &y/“›γΊ ΆŒ§yήkΨΐ²·gdͺό†‘$GΔ΅χώ,N§₯’ΆΝΖCJSΧΜιŠm{Ӂrι<ΣΆ…9Ω6³ΩΤΌAu‘Υ«]Ƙιmx`ήc€Ξ]fΫΆCW²XTuT‹Ζl,ΫφΒ’jνy΅K†Ν{†vΆ½ ŠZ½ΔμyΫ佾يΌmvΦͺ”ΩσžΥZΖ2†„™±ΉC₯ΠΣ”υζiD+Π•h»νgΆ=υΦ[Ρή{{KemφΆρ ud£ͺtU•Ϋ0ΫΫμmQ”WΣ~˜΅₯\ΨΨΥucΆ°mq7H­ήƒΤιͺM«*‚Φ-[V+2nŽκ*hkO3{ 6δμmcUqE™mcΌκ4©ΩZηκm{#ΒL.glΌo/L’1›=5,blS—²¬Ϋε΄1flL³νσ؊Φ~šΠ0‘ή^Ψ¨ΒΞ¬oO—‚ΩΥ£μΚΎΥ€άυ=™=¬„ύv ΆΫΌνͺ†)kYΖεmI`ΫΎέ―6Žήžu·g«ζ–66W[™Ϊ6UŒG›*{ ½νήfL[hή^wΝLZ•^ΫΚ²ιχΝ·~ΧΕϋφΥΤ6IΆuΫ+­Eσxoχ[ŽγγΝ?ΓΖΝ’š²οσ»mTΕfiίsΌ\έΆΩ=οΊlμΪΫm“––m5«ΪlXyό.μΙVdί”½ο»ϋm“Šd{)f'·½‡ ¨ΫχΉ`ΖφΥυYΙΦήj2ΪΥ&[VΫλαv/ε°½y·Ÿ&„aΎ½`Vͺ^Ϋ­o[₯ΠΆζ]m€UΝμ-c$ƒ»ήΨΪf+a·lΫvwο½+›ZžΞšifuΕ6]ΝlΫ[—h­ά·gχϋύ«μ TΫ,7d­Ϊ6σ=ΕTˆΪha˜¨Ζγ¬|vBΩΆQΝ@X›ύŠŒ΅°ΊίA΅Mx˜Λ`Ν^π¨sΫf―ΊŽ¬©–Ά[‘υ˜ξЁ†γ­"Μ5Λ]f¦…!ζAΝe+…Ψή#¦{ύNνέ5«Zς m–$·mΜͺ­Λέy lσΩ­FΦuO·‘0Άqo“`Νvkm2%—3λ½Uͺ B ι {Χt–MΝφ^[wx¦(΄½ Ω¬}σc2 m›Ά•2Κ6ΛΦ± ½m-™ν‘βeB«λf€6¦Ά’ζΆ—UΣFΜ6Ήk›%<4ε6³ D΅¦Νή„γ(«1+Ο(•mfΥL€τΌλ˜±›ΣK ξ™ †8a5€Υ™ΩSu`Úςrή ΦcκΒ@Η–&\³tΒ8™Ϋγιψ±©Όο)£­Kνm۝YE¬XοO·5u³Ν¬Ϊ*uΖsw›ωμd‚aκ¦ rπ6,φn­MHMwžΩ €uKΐ¦¬Ωc•΅ yο΅Υ‘Βφl{›₯ymάXή(Υφ,SœmΥΫ&Ρ’Ω^*@{Kλ₯%₯νLm…tΫμύjW‚Ϋ›»Œ% ·U7ΆetmPθ½AΝ )‹ΧKΜ¦`κžΗ4cχ\-]ΥFcΨΚ,NAͺμΌν†*6³Ά«GΝή"γΦ•b£=LpL¨΅&WΖ½Μ€Άνuζ°LUϊyoΖ,WیM*ΌoΏ3£BZzΊ –ͺ93«%|ξnlc4Α0iξΨ6Βp²lο$6er]ΦlΕΊΚL›²ΨX±6-ΩΦ$NΙφͺP°ν­š΅qΛF­Ψx]θΪz΄Ξφͺm“’hήχZI ΪΦ—–dFvJmμ]‰ SmοQΑMifγ•1JC*Pο Jl˜R=!“Y²1‚ΦΨiΌ&-Λu¨Ά₯1"žΕi`…§νΝXuRo―ύΆj o›š£mcuΏ²© sl)7"³ό~Mƒ!ΆW[‡‘’sQήΆΉg'ˆ΅Xwfίλ(€φ€(ά¦`•»σtΑ6£iά0ŠθΆΡ±mD3W[fiΤ€Š›Ν,W³B˜6e0sθφ΄dΦD§m/SIlΫ(c“έ̚lΑΦŠyl¦Ÿν•m·Ϋ½οΔ*dΣ—F•›εΨbΊ­ν;”J›«ν ‰m–²*Ϋfή΅Ζ²MΆITaŒ·EΡΠfdo똴`yά²–Β[ΥΎ9 ΅ Ln±υΞέφ6R¬€ f›ΙͺΝΦl£U‘MXQΆ}γj›²Mk(XΫ–Ϋ« 3“;“bˆΩ³τμ6‚a«YΦMΨ^ ΚV[΅κlΚfkt•ζΆŠMŒY([do™PfγM%MnΟέ3ΒV=€°a'°e6Σ¨‚žΥlΘ6ΛmkPiFΫUHΛμΰ©mΆυ›e΅«-ΨΊ`zIacvΰmυΛΆƒ*Ω:6[,mb  %€iΫγlfΫͺΙΖtΨͺbΆΌ­€±ρ¦Nhφ^Άm’>β†šUΦLXΥθqmZ±PΆ Ϋͺi³m±u5e†‘ΛΆo€@k΅γ-DΆ/·§bφr‡Μλ₯YC“­f¬γ’±½,΄ͺQΝ2υ^›R {ΩΊl“°ΨΆ6‘Μ°©³lΛm7 ΄₯1μ±u °ΩnΫ4RΘΫj ˜%cP“†M%^Ζ5#lο­{–ΥΚ9c9hA*&£Ε63LχΛ6 L•l’±PF1 LΩ²*ήl²ΪΫΥΐΫξ eͺΩΪΆΝIi™-,W’ŒqΨΆ‘z<ϋ V±V5LΒΫfw,”ΆΡF€™m\G ύ²νPΆu[Θ,Ξ¦&φh“0σφrA(›ΧΠlˆΡZdώ‘i›­΄"\ΆC–l‚tΔΆJ³f,6&ΘlOQmfPκv[Ά₯ΉΆ 41†)¨6ΜmΡEήδI3 λy„­¦h R³¬Ά]ρήδ²—•ϋΥΪ8HޜΊμ1š΄™™qΡ0 T²I1yξaΘm)Œ„’jΫ³’ήΪ^†©Ρ€l“Κ¦@[ήΜβj-3˜y­μ₯ ^ΠΪt΄(νoϋMΠ$m„U “°νΩ‘bΟbΆ­ŒΜΦl›Ί˜ι²yW³j+Β΄›©Ρφΐy9ΓΫ"AΥ ˜Ν€f# [Ν²ώIΣΜ[IMHΊFΏle³ΙΚ§ί1cvμqf{ 43ΨάέLΜ°·™βρΧs[Ϋπ΄Ά«lHΚ6 φtΙΣL°Q­šj›‘ϋ­oΘ޲q{³ν*[eζ΅λΪmΊͺžbϋ¦Φc΅Ε½mZέFz½4WΠΞzάωBYw{½ω1[©ŸΫΥΆ6³ψܞwvE½7λ4šz―Ÿϋ³Ζ(ΫώΩΥf6›Ρ/γnl΅Ήo―λ₯Θf†zfΟ،ΰ]Ϋae›©πΪTlλΒΊ1½Ω6ϊ΅±±A³lzбΌ΄;yρf4Ο7υ{ΛN<ΆΫζχμΣΆr―»•a”ή²oS²Ν‰¨υΨ»³ξ^›έY%ΣzΤ>fs«ͺƒn‹G―i§ρl#Ζ–Ϋ:’`AŸ{±P»-ΕΜ6N€-)œiSα™Ή³»΄΄™)μύ^7kβ=χQ`ks»Κ–†Ό»ΧΩάΆžJΟm'4ρ~γν[μ٘ΆMRfςάΗ™ν}·W`ΐV«ŸY&c[œ[:1k_=Τ›eΆP½~ξΪijΫVϝNο΄Ϋά”Ž Ο)lΆ•lΘ}{©Π ΥC–Ν&xΪΞLΪξΌΒkƒΚϊΒΪΘ¨»£_£±MO± όξ>yxνΩΑΝσM½·gΛ·χμn›Yέ­\ΝdΡ[vΫ4˜𬇢ζ3wΏ6[#¬Gν ³uUmk^U{oŸFΖ6ΪnΏξΦHc@:+ ―zζvΑ€Ψ6ˆΨ₯sΪ¨ˆΆ“χμ#άΆΧc^¦mkςž;dDwέΎΚ–°Δ [έΊέφZύ”qvŠΞϋ7Ϊ1rmΧ»-AyΰδΉ]¬Y΄w7ΖΨhυ,Ϋή³s[B₯ϋK¨hiσJΟpYνV―mo+W;sSFyτ| cc{mΘΆ¨zk†’{n6šν¦΅έαOkMasίσ»·νT¬φt¬^c0›ΉωW³•8˜O³€U±%3³—ΉYzEχ#Ο-Η67Ο˘Άν[6¦»]΅›Ηb ^μΎπšp›΅φiK2Ϋ΄ς΄‡1LVa,ΜΆ™υrρ ―n6›°Ν€oήέmL@Ε†˜ΒδΡφ1βΆͺ³m)Œ%*mΟ&rΖ%Η{ŸΆΗμT…©8ξV₯Αz•ί5Ά ³[¨5ٞÝ,£p4΅³9d΅ZλΝf+όž»bΝΨv72lΙ#&†MkΓή2:6›37K&[)€ζ”5eΪΠk›ͺ\άY‘Ξͺ`Ϋ}ΟS3 kΏΧ±2lΜl^e3/ΛΜi†ΖΤCl±•YsφΒ7θWΥwg³d6ΑaΫ€Χ.·Άν[ΆQΠ·”›X6€_μ.Ϋ+a»[« «^›!mI΄›ζω]^3ΊUΐ`²΅[/ΛBUlv3Ν΄ςΞ³±ab*7΅/«q,ΈM½Ω6PΈ@΄E,.ZΌU6ΫcLΌžH³)&›§zgšv˜έ‚τŸ 80δ‚ DԞόwXΣωΐd.Λ XΘΦ)Ή`³ύ¬ΧJ ²ϋ2A¨n1FHγΪf•έ ΘY)[PlY폩ΡΨ[£³νb†Άm{½κτVΑZko]έλ±VnonΎ‰°mΝ]“”mίvΞΣrdΩA˜Yn½΄αΡΫήήUc­βM«TΨΌυ_―κνν]΅₯aΊ© oΦ5η-C]ΝmήΣ…YΤ6sέΪσŽ'Ψ5ƒ9b«tyυΰν™;F%³ΖX 1ŒΨ,oέΔΫ'lΛ*ΠpƒaμΪΊ=)ΖΠςMΉfΣMΎ=ŒˆΛΆ’@€b$βMšGd30 AΪΖr˜Ω³%Ν6± ΫφΫ]1΄0Φ2ε1³Rg§ΐl.Ά­²‘}λΝ ζΪΥlRΞφΜfΫ›ΡνmΫ„PΙ;WΆ–­ΆŽν"d±mΆ‹6SοωBΜφ¦κΆfvZΓΘlΏξk±mO7c+’‘U½ifH _}›Bj·­.ڌmo›*Lh»wφ”Ru³j!›Qe{ϊ*0ΘšΨ‰3žfΗiTsήͺ moΫΊΟΫPλ–MbƒΆUΝ[³ΆΚΚXS‰k›M%i†š‘Μ3υ½^FTΩHC ·&±˜qΜƌŒ$ΥήX’ΩφΪB™ALf{‹"`-`V#Ϋ6«T–ˆΝζΨf”ΛΘϋΦΓ¬ΜY…±bΈΆΩπfΫS·[26ΈPΙ£ΛΨZυΦ±]„f›­ ¦F6ΓV¨Yo/Q°Ωv,`›Ϋ~έ+”Ω{ڌMֺؚk3’Π³³-Ξ–Ζ°mηn6£Π$ΝX―F¨rφΤAυfS•½u…ΐΚkn#>™1ž5©(Ά :οχ66 ΝξFπ8±-Κ6kVσ΄­c«ˆ±ζδΘ`€¨m¦ͺyΫ»ΎΑ#ͺ³GΒ\αiχn"ccΩ–CRm’y{m‘°LΦφ[«Δ–­ΝΣΐŒ&IQ΅=›lSV#»›Υ‚aUyφΊ³q<³y{“–*ΪήμŠ„5‘ήZΧ›)ΘmΆΩJΕͺΑΆΆu½­mΕ›½Ε’Zc#ϋUΊ΄¨y{f¦ΆΖR%›Ά‘€ ™Ε1`ΨΆά·mT΄j›­W0r6‰«mUl$XYΦvš―<oxίN€Ψx:uήΫήTρΆΝάNƒΕΤc+΅aΕΌesΙdMΊ8ΆξΘ0ZPΝΫVΖ¨>° ΪΦλu³Iλq,fVΛF‚΄=-gmΟΫ„23=%&ChQW“ΨπFΚ”Ekk΅S«[οΉ3Τΰ™Νφή"§Ά½·» XvΌ₯L”¬ƒά†mΛ «††‘Ϋ{]Μ3,Υm±ΩΦ¬©ΦΨζΆ§ι‹EΝΆm˜­MOc©R›f–ώ6adŠσV’Y–oΏsΫTΈ…™ΝΫjγΦτφxΧ)οΝ³ Έgλυr*Ϋ6Ο»wΎ°6₯Ϋ~°Π­xΝγ† ή₯m–Mͺ]ύ>#`εΊqέ›¬’v³§Ϋ«b{“»zupc-΅χγών]]ΙLm+ΩΆΦκΊνi΄mϋωξv±j7Ql›₯ρΆ²™QۚυΆEU†žΪέ+:ƒ΄lk_ύΆΜqΪ{έ• Κl›™‡ͺnV7ΟΥμ²7ihZ«θœμν|ά‚M€ΩDg$-“–Ψh[ ׊٠To«ΝρΆχήwιΌ7›’{¬5ΫΤΩΆmfΩ9μΥ§ΫWλx1zΣι‘·‘ΩΦέ.οΑlήχύc³©UνfΓV`Ϋψ»φ†ΦΛέήoΖίUj›†Ϋk­κμ±uΨοη»TmE’³1mΟ°…ΜVm3Ϊφ6—²Τ6¨Y š![_Ω†Œ«+lΏsά²΅u—Ή”ΩƘυJέ¬n›) ƒ™ eΉ'Άfέ½Ν~ϊμν€~λ¦γςfS@:{οχ;ŸkfΓΆ»<՞ɍwΞFμdδΊέ»ll*ΙΫκ¨ΈΫvϋΈΕΘVœMt+1­!φ@o»…K‰Ν3TZXΗψχ~ί•`›ZnZ΅OφVg›m»εμΆ»Z!#τ¦thlkm―»•wm=σ»ϋ#²νΡ»jΗ,TΫΖw5vοφΉΫ~Ωγ·ύuWO²-ΔΫK«tμmE{Ο-Gc}ί^š)Λ&ΊνYΒ£I³U6c›χζK™4Ωμνξf-£±ΊU“­«ΜμŒ‚ν}ΫΏs΄lmέ…ˆΩπ˜Χ―Τ­YΙV[ΈΌάΔΆ‰ΪΦwo³κ£»·κ·L—Ν6© ½χΊSloέήΫ]Άbm\σrΘΫξn½²Zκν₯fγξmν-Ψ$6Q\[φφΞ½ν–anaž! SΖ­ρΫ‹ Άy{WΛMΌYoΎφVfΟόξ};΅A'Ϋƒ•9aζq£Mq±τΆ[Ϋ«”κqφlοΊjοΡ«Ϊ7fԞ;›q-QaχΪΥ±νΡΏ½Ώξ4j[qπΆκΊ½ίϊΰmMΧΒΊΫ‹)ošΪ›ΖΫX‚­Yο½wu θ©χήέ1Β2οj…λbΫ€HoοOšŸα Ώwίwˆ"³·y­WΝκΫήea°Μ<}yinΓ^OmλΌφκτzl5ŌQ ν=ψNφ^·χVe¦Ϊ/_σ’aΔϋ΄­rΪΨFZ΄~‘¬cγn[²S 6adŠσV-’xϋΓ_ΫbΤΆσ,%QΥl―Χ«€Σά{:6-εΪΥλV±»π€†Ϋυ΅T3γ7/}ŸFƒ·ω=ΎΜ@Δ“f`ΣvΡΣ΅Ζ0mΛOΥmFΨθ9χb k+"l}—±1¦ς―o‘­mφφΓϋw»jd{½ι놨½HΖcζΫνε(a―Η¬“΅zΦζ]ίƒIζg¨Μ`­¬χtƒRΠuΏ-3fYΛ%h€’&³Ν€0„Œ wmΧ,3QοηξdΨΫ}½M΄Ηβϋε¬-ΰμ.ήIρ°½v f“ξ6U3C`[C³Fͺƒν΅ΟcΫ Xκ’Φ3F«&₯·ηUBΕφΡήo±­:a……›χU&U‹ΨžΡΥ6†0#l²σ΄=vNΖγζZ1l3ή  ΠΆm—j0ΙmcUΪBTcm€ΖΤˆΪ ·gύJBΠΨ†M5΄njƒ‘nΝ–‘±ΆΥΫn!6’ڌ&Ζ¦PΔΆ%UΤfΣ“©cflΨ”…ΠfΔΆΫή‰°mΫh*&^`4 jj›YM© ±§ F§5ΩΦ‚bΫB‘X ˜©½PΒ3ŒhӚΕfέ!’mlͺZΥΆ1λ=§ ²±vWΨd5KT˜BΜΆmΉMr*Α3ήΥ€`Ιl#f‰ŒΫζ₯»Ϋ6Ž΄mΖ"6oο¨0P· ’¬«1c SάdD SνY―FQ΄=`fTkŽŒi0Τi j³GΟnF¦6›ˆa Άm¨S—7›L‘lΩcσ†m₯¨”η€cσ’ Ϋ#’a’Qbša3Υ0 XΪ6XΡ—΄ΜJΓ&‘Ϊ•  Λ”aΨjE±!ΔFMcΫH–j›mU΅ΐήΜξ½ξŠm³JΪx£@’2yΟΫϋv>*ΪήΦK€`ΤΜ6‘`ΤΫΨΧ›¨!3cM@Έo/R™!™mB¦%eΪπšLΡdƒƒmdρfUSΤΖ€mUσά\‹5 κ΄ΖΩx­χ?Ap`IrA QΓχί`έv °©AΫ$Ι0Ρ@`žI±ή i˜J€f6ol«B#*˜1e›;{’ΜσΩS‰­%ΫPmiΆ43¬¦ Ša†*3!ΐΆ ›δPm2€lΝ3r5ΓBlfŒΣ³05Ψζ‘ T@3›Z΅€Ν΄χj•P±½ŒR1ο ;3ΨΤ#˜Νoοœ₯)—ͺ½mD £ΨFΨV–΄ŒmΆ»cd™Αc·ͺx{ΫͺmΣ*/B5¬U4Α8±AkΫzUΒ0³M5Θ΄1σNΥΜ±±ζ5c‘–d³9Α@˜ηιZuh³'i ™bۘΑΤτ8œ€›†Τ°Œ Œ1cL€aΖlFJ €„E0ΘΚΪl AVŒa!ΖX‘@Q#ΐΓ°H@™dΨ°mcΜ06ΖŠ0”316c”B ΚΔ €e3[cΑ@)‰hf3 „ )C$Ɛ”l€ͺ°1  3HΒΆU€t™m,£ΒΔf32&0 ™‘’)ΐ ΫfBΩ4•’!4@0° 1X˜  ²!Kf0FΤ±M@‘0I6ƌ0DƘa’™e2I$‘D*J0XxΨΆΝ&cƒ”`2lcE©IX™m›ΝJ@40Ψl" €ŠΖ„Z 0@9Μ ΖŠ6 Ψ@`I16HΰΒ6Ά,d£0Δf`Rj ‘ΜX•ˆBjΜ@†‰J…@Dΐ†Mcafb`ˆ6£Œ εŒ1!DRh’hΩ0¨Ψlca›-₯"‰2ΐ°1€’†,¨yΆ06°@•ΨΆi‰laΘ°Θj²EJ( ΐΨΖΦ$ Ϊ°¬@’¦! @'3V0–ͺ0lmaTΑl›‰„α*6³ ²Qj,ŒΝDΊ)c f(„Γ#D QX@0ΐ"CV Ɗ c*04BD˜`Θd`fΫ6l›e™"$Έ 1Ψ $"€ 3›Ω c06ΖΐaΆ’ Γf#¦”%`ΨlΫΆH@R¨ Ν\ d”ΤAGl2 mΆΪ0k 0‚6ΫL0³-C£cmΆ DΉ‰1AC’E°al¨`!ΰoΣΩ,lƒ.;J5bc{˜ͺ`ͺ†‘m¬Νˆ e«YΆ)€˜ji‹At‘° #CΓ`^Ξ¨T6b€ν±MIΕΆΡ&»aZS6ρp/η@df3nΚ‚mήΆΉ»œŽΩΜ““1ΣY%fšg#f΅”«ΙΆ₯WΩFeΓ²fAΫ΄V ­$0Ζ{ξ-K Ά%”6Ζ@™ΝlέΝΘ@Dƒ]ΡfS`ΫΣg‚Ν6ΦΔŽ.ZΥF­έmX1S£•ΨŒΙ’dcΆA)ƒ¬3Ε&ΫΌΉRXƜ `©NkΫΠZΆ)TΑFZ‚eE ΫΖȐ…Αxή­’0Ω°I…–bΦHhd %0nžΛ†aLη^RF„m3šήΫζjΕ7k3kŒRc0ζ¬`Vb¦™=š΅Ή’={ά€FκΞ6ΒΖδ5 ²6 InΨΫjΔͺε–%4Σ.ΨΤTΫDΆlFi@νm6,3DκPa0ΨI™ s΅™EσΌά 3°β.5€aXT6X¬l›hΦ@y4Ϋ \^μ¬C`ΫΜo»Ž΄…ΒΜΩ@›ͺͺmF mKΜγR,fȁ :$ΆmdΑm[ΪΝ’‚MΨ`›λЈ56¬‘$Œ˜llIX!#KΆΝhBΫΌͺ΅τ1›šaΜΦ°ΐ( Œφ¦Yj‘꫍g VwΆ02›HΆQΓ¦RΝΪ66ΈΓ’sΛ‚`Ϊa Sd‘f΅-‹ PσΆU ΦΆ>θ’$O ”’66ΕΜ" ήΕΖ¬θ*4Υΰj1,* 8Ν›‰l`:cΆκ²l(iΔ6σ* ²Ν,¦TPήB΄MY,m%muGb›‘lΘΆ–,T±Mΐ†½u£¦ E˜VF‚Ε&A΄δΘ’ΝŒq#‚m{Uο–N 3kZΖ`βΜH<Ϊ›Z€|ΧΖlΪu»XΓ ›ŒAeζIŒ C73ΡΫX]^«!‘…1€13₯HSm­Ωb@5Ϋ¦Υ M%*š1’θŒq1³ι³…Ά ΜΐŠThU (΅A– ”x3ΘB¦³YΨ0]– ₯±‘Ω ΩΆ±ZM£eΫΪDv“V«JΆm›­” DΐjAfΓ†*a¨X†¨"3¦ΥΩ …=eΆ„² 5Š ΫϋJ™ΨΒ JuM31 €Š{)f#-°msUΒ΄9 )b lc²M*²1.AνŠ&Λ ΫJ…Ν&;3fΪ‚`ϋI%3Ϋ±Œ‚ ŽΑΖ"E3­š ²Θ₯SW0˜M&Ϋ6ΠX©U΅keLΓXXΣlR { ‚­m«°3Ωƈš–VΫf3’-­VŠ6oΨ’‚-«0²@˜΅Dc)ΐT΅Œ6”0­Ξ2+Ρf#cH@Q±L³ͺΨ@)ˆΐž PΫb ’ ’ʘ4™ͺܞM(I€iΤφ6WUΐ M"₯klf›lP©mŒE)ΤB5f&‹ «„mp{½,,˜Η”²mϋ"c€Β@6”Ϊ¬)Γ$5Y6ͺ"V7€Άm’‘Μ•z1m ,Μc(’Μc’Ά΅mJ6f°Ζ6C¦Σx{kVΛZ΄Z{m‘ks… x[K ”*FΥ`ͺR00Σ"RςlΦ–1b(ͺ`5«ŠaH‘ΆgFmIJΙ¦9Hi(i―=Q‘4€@4jΆIW‘Ζ’`‘‚ma³3 R˜1΅h—P0³1€Αdm–΅`6e{[‘e6¨0&XΖUm›Œ•¦-#lT₯FΜLa²ΩQmUj1&Œ­€dL…VΜfšͺ6³mZ5ΖΆm,ΣZΆ­M­-h΅ͺdΫΆ!ˆe`Hb[#š&©bŠbΤE ΐ’³mΘl‰!D²u#J0PHˆmφƒQ ffJΉ6MXΠd J΅έ{‹+*Αˆ!c]EΨl,S΄ˆR ›ΐ6†mΨΥΨΐ†ΪΡJJͺ5› ˜…mΫβ63,K€yͺ/6Λ.ΛlPb6YlBΖY(5›ii6 +² TΆQ˜±Ι–ΝfMͺ6U΅³Ά¬!šmU€ΔΆ§­Š™ΝX5ΖfΩl°mc΅šFΛΆ΅α―R±ΩΊ{?KjΆž₯KlΟΪΛg­T³·fF&b³Φ6KΊN6f΅ύϋνοΟκ›­½ΦΆ΅RŒ5»ηΚζmωqCF”j»M]΅mž™έγ’τΆY‘υ{•LΘΆλysΧ7l6{νΡ“HΊm—jΙμ₯Ύ:΅ΙΌΩέqο΅u_Ϊφͺf›ΡήL [EύήΟ^ϋκtۚ٠Œ°v.¦α½φΒ:½ΩμJ΄ΜtΣ'λ΄·­Ωzοά2™Ψιόyo¦΄y›-jmή{ebqε΄χsc:πθιΣΆ¨f3½΄ΒaX]‹i,έΒo―»‰§Μϋ­Λa.kf›s<yΣJ bνξΪMN6[w{˜,³\υL_Ν³ΆΔY•zΏwΩfΖΌ^ΫCώΊ!0ΫΦΩΏίξΈ­yΟ΄Ά­‘kvΫJkz[~άP@νΊΝ¦N°½νζέ£xo…fλιΘVm»ΨV]mΫΆ:³ήfs‹€l‚j-Ο«―κΒ¦·νNΨb―χλΎάΆ±Τf΄Ν°²UΤΫΫϋΥu_έ›0S(KδΨπ[ύbϋNτΆ·NgƒΫά'JήδΌν6ZZ=³ςΧΆ=Θz›-g7§7Ϋb†ΆξΎΣφfz+βGS²αjmΠΖκZšMΓ·ΓΫk°ϊ~Ώ—€Vo»m0«Yφm‹³ΧΚΠΞΩlΥ΄§&³Ίf/<‹ΊyΨ Ξ]ν½fŽΧc^k^ƒΖ coλμχ°;κΦΌkΨΦʍ5ΆSυής#jΊήυmm¨[υήΫnΦŠο½BςΌU²U›Z_mΫΆΚ°g›-5XδvΛ,ΉΆυloVΥ­l;φμt΄mUo€gΖΤΆ+κνν=τuwoφ„αΟ ”Λ^ό–§q­Σοmu–Qχ~ξSB³MεΌi―QKλžέΊ]›mΘz›-Χ…ΪΎ ΅m“α­Ϋ«ϋ‚ΩΦfξΣΟ „νΊ΅™=I«hct²xzψvxΫRmρκ{o~9’©·΅QQΖS6“ΝΈΥΫ‹ΠT]ΫΖΪΙf„φΣF–±žΕ_7cžΙY«Ί½Ωbf§Χ˜Ω6nΌ|•4V1ΆMμν·}ίΦ}3› ­mk₯Π³oUΫΆi4dͺw}[ΨtW½χΆP”ή¦³ΧΛΩ*ΓK6ίΥΆ½—„­½ie―€έ˜ι"ΉΆυlWΥιμνΨlQ—ήΆΚΨ ν™ΪV‘ΩΫσ^]ΝΝ ΑRŒ[Ξe/ήμε%λτΆ§XžΥνΡΊe₯½§’½z·V³”nl3Yo.χΎΦΖ{§ΫJ%φžΫL6›ή»nm›δ₯U΄1R-¦‡[ΥμΡ΅ίρΈ±·.αΉΆ¨ŽΗ#fX6νξyΊΆ ν$6[wο§³`i5[Ο>ϊίΏ—„mI³ή& *ήοΉ2•m{Rg$Φήφ¦ά²:ƒyσΞ·wz;U³ρ^;myΣτΦ}z›VšΧVΆ΅œα­nfo\ύt—mΦͺΆMe{΅T7{o·ΊΆmI±mvα,ΆΆ2GΆmͺς{8ŠL㰍D6=έ( lSΕ63-—m ›νέ±^K+iΖ,s΅Ά·TΝΆε¬EΫ–«™Aο=[wλlΌ­C«Άf pΧΫ42…δχΆvukΟkΊ?ο)οmv§6_μύwݚχΛ₯εmγlνσmz[93΄ΝZJΜΖΥΜΚ)Ω6΅5 ΩΞ­²fΫ6­ϋF{PVmiϋ)΅AŒyw·amKšυFSWφΫ‚¬Ϊή]#yΏ§΄šΚy[›_Ξn«›½’hνYοz―NΦ6š]šε‘ΒΆ–3 ͺν·η§ ³Άλή6fO]Ηή›uΧΨΫt—½Ν’²3λν2G˜Ι½­*ού6Ί¦¦qΆ©³ι©)d4±m*ΚΆ·w;—Mφ‚ΰΐ@’#‚Qsώ;¬ίNu6Ώίϋ-¦5뀃…x*ΝfκΖφΛ­`°Vυή–*f¦ή{6₯Πή[‡Vm±m ³«‘ΌνΝε΄η5υ†ύΆ/έ›μm£λ[σ^½l;Φ;gٚ]kh›%(μΗW3 υ,%Ϋ€ή,Εήi]d{Ϋ›[wSO·έυ–Ά'XmŽ1»ΫΎΠΆ€YγmSWφΆΝΥVmošΚΩ€<ο=΄¨ΟΫΪό’}ΠΫέΝΖ–Ή,·Ÿυj£jm£Ω%6k+ξφ^KcΊΫών©VRm³}έΆ ΪφΚυ±·™T½·ι.Ϋ6Yασ¬·Blοά› Όί{υ55Ϋ¦Ξ¦a…2ΨuΩφφZ•²-u6ο­–Ζμv³N 0 1°Νuc{ixv«Ϊ3“jfͺςή6Έ›nΏ·’FΥf{…fΪΦ…δΝ6νλόΌΆ:²)ύ~WmΕ΅mΏ§―Ώ5{ ΡςΫ>ΦNδiΫ΅†6Μ’Β κg§d¨=/ΕIeΆχΖΊSΫΪsmw™1<΅Ϊ˜€ίφΪ†€m½­c*b{s΅UΫΖ¨Ξ&eφΆMiyέη™ρΫ;χΫ]{c;·g–§ιήhVΒ¬­θf&dxλΎν·-ΤtΧΫlW΄m*Ϋ―ΤΕf[κzo[₯Ψ6»p^kΪΫΛ™Q±ίϋW_K+ l›Κ0M7Κh€mΤeΫ6$e[«3ή»cχzχEŒYˆi”Άί»ϋΖφ[ΰΩ-±ΆI53Ψ{θξ©=ƒLκ6Ϋ4(j›ΆU‚ΖΫΦΎκυlTŸχ\ώ½wάMΝΕήΏ§λ³₯τςΆΘδά¦ίφ΅΄ΝZ.쁫™wέΟθ Ϋ¦ΫσRl‘VgΆ½ §nτ~ν ΫS«Mμξ~ο]°%Νz›€ͺxΏηΊλΟ`,bSΏ­TmLήφZ£@ͺyοΡw²d4Nf‰V6ΥΦΖΡΖΚιΜeDβnΧΙάIΗ6‘κΩu=0–Η96Y5H˜„ΆMΙΦ¦­’aΆ9AΛ±AVw;UΩfΫΆ“™œŒJδΨNifΩΡM³»±ΞΆ6klλvN§‹5·ΤΜvJ»ΫvΜ½4«m³‡;ηT1cΧLΞ6ΒZv΄ΉAsXΫΤ΅80Ά«Θ!jΙ²QΝl=Og²ΆeNt7Ϋ9I«œΣl'sw#38ΞΨ².Ά3;μΊ©šε°Ψ¦ΞmeZΉ-Y’΅ΉyH›6M-Ξ£e›΄ —“h,G#¦₯ΕvοU›΄ΖΉwQ²f Ϊh›ΚάM[νx¬ Ψv•Ί §κΆέ’Υ9Ηf–βR1Ap₯ΞΨΥV°KA#k¦Ν΅Ϊ”†νΞCbΧ 1*$6H§£M ³kΩΡ1»Έλ°έUΨφ\Κ•Ί₯ΩNιή™ΆβœΜf Ξ΅ΩΓsͺ“ΨΜΆΡZ ­±£ΝIQ»SΧ²¨Ά˜ν‰“1΅0[ΩΦσt&Λ6’ΞΆmE±rΞ™Ρζn³c†%Ϊ&±s›»3K0›»{:H—œbΓΑLnν:“0ΤΡ½φ ›VΜΤ -ΔΪ­Cf9 ˜U+ΫξP4i³»M=³™d΄ŽX­Ϊ}²:F:v7‘±y¨{w”ΰZσˆg=:Ωΐͺ»U€d2«3Œ-uZΜ.‡(Πξœ κUχŽέ«t˜4›m¨δΒΨ°¨Κ°f8ŽŒͺuο؊ژ΅νΉΞ©Vfθ–f‹˜»šσΘlFΙY»vά‘”l³m΄°cΦ:3[’«άΙq²ΕΨ0TGΪUC3Z’™[Ž3άΆΫYΪ6KjiΞ93Ϊl»vΜ ΫT;kΫΪ i°»nͺεŒΤ»eΩD“°:vgΰ.ηκ̜35C ΅κ\ fVf»¦TΡ@h³'₯{F (³£Ϋ±ΔΊsNΫ-‘2ۚc³y¨ \s½8\œΞbΨ©»U`Ί3[°Ξb6LHΛٝXm;ΣέΪΆsXi6RΖ΅B;3±aHEΆfˆjS₯{g[­l\ΛvΧ9J λv½Έχ9“sθ4ŒSχt9ζnœf§¬™ξΦ©²’Ν(›NFδθΡΦφΌηœ»Ωσttvα²έ8eξζ‘ΰ?ύ‘χίΏψ}θ‹χΟύΥΏυwώ‹χύΉοz1ε ϋ­ψΏwΏπρ7―όΘώίνχ½η»δ#μn»νάΣ9³A9ΩV/}πώ{Ώψλο=ίω£οϋϋοόΗίεtμΒ6χΎ¨ΞCΫ6Ϋ–TϜ­±$Ή9Ϋ4fŒάέΞ1 mΩΨ〆&ν₯5˜ι)jccUhξ FγΊvάK›άNvo3(εœ“\ΫσVjN;gs’€±νΖmMΜΪ¦3χ²urrw_ήU'“agkχNΜpbΝξcm­a›ΫιΡξ5,Mm₯ݜιllΪ‘ξ]ZlΆ˜ΗΞ­ ΄­S8͘Dβ‘]RԌuIb›ΐΆv{šœ%₯²\ΫΆΥ6΄ς85Œ’V£{ΗΩƒΪθnU1”yr²©2rJ―΄΅έ›ƒ»[{q^ΉW\ΆΛͺΚΊ›”j³Νε[‚©s[­΅Ή†Β<ο=k'a›'Ϋ -Žsλ:νb[Ί›{₯*mσά3GμΨΚ¬­S–6’ΉŒάέ D›ά±Se0ΗF{*Β1Σ`™m¬ αΜκœγš±Ωζtoj›ΝŒΚΞ9Yαή;ΫΡuΞN›Cαξ&nlgF{NmgΆ©Ξi»Ο»*@Άμρt”Ψκ`ΝξY5Ϋ½Σγœf†εΞΓÚι`χξμH»[U΅Νs–BΜΩn§³΅œsF*Ζ“ 2 ξΜ잧I2ε<Ϊ2›YfΆvΪγœLuZ]χ‚ZΣκlΣέͺ¨,ΦΣ#’^ik»ξnžσΚ•Αέμvͺ¬»‰ڝ΄ͺmun+mxQeξΦ₯lVƒJ†έΞw+έΝvP•ΆΉ»9jikm2JΫξ€mrΚΠ&Ϋ¨QζΨh—ι yΔ¬6,Ϊ½¬ αΪhVηdm»σθήNm63«c§ΙφΌΟ=tΥœδfX;׎v98³K·Η£έέ‘"ΣΉΛΞ΅ eγk&;’kΫέ휚ْ¦cΓ1“¦Η}ޝ΅΄ΩV§lΆ ΅–csΆ[iάέ΅œsδZN]cΖνT&Α@\3mPZrN5³m·άΫN;=Δ9έΊœm΅Άξ£Η6] ©μd]XŒΚ%ρΨξΞ9ΟϋΜ=ηAΦΈμ>Ο9•‘Η Ωv ΊΞ9Ϋ­f!Ά;— έ«Vηάέ%­©N‡ΥΈNΩέJΫlmκρώώ/πC_ωΞψwφ?ϋωο{g™Oύπό£Ο}ω=ο{ίOύτΟώΐ·Œk»Υ¬-³νœζΏkΏόώίόρωΖ·ΰ_όkΙυSοΩ£q ΉΩ<Ώρ₯Οξ'>ς‘ύΞg>σ―Ώςζο|χ·ώιχώωο±Ώτcρ‡Ώχ;^Ν²=m§zύσŸώΘ―ύ/λϋΰΐŒΧΎν=ύΏω/Ϊwώ©o}$‘ηξ‹Έ§ψζη>ϊώ_ύ‡Ώϊαίyύ[_ϋžŸωoλπ½―ΎςJ³Yw9Ψ¦fρ|σ«ψωύΣύζ'>σ/Ύψ₯?ώζΛσκ«ίφgώμw}ύθώψŸy΅“DkqΜξ*zωΥύϋŸόθo|δ·~η³ψG_ύε‹WΏεOόιοώΎψџψK?ψo}Ο·ΏCqμvAQχΦq:³ΪΫ/ΏρΉηϊε³ίψΚΛσ/͟©ŸϋΙο{Ε㜻S3Άωκ?WΏρ‹χϋΨ ΰρ=υ?ϊωŸωι{oΉΥΨ4Ι8 ›ΡqœMέ=4lžΥv‚ٍt'›DͺΗΨ΄€Φmm£RΫm]±$r7«6Ϋ.˜τ8‹k»E͎MΧε$xZ;ηS §0lΞ`.Ψ΅VΛ)˜ν:YU€-[Δ΅]Ρiœ 3uΊn†VΆ‡ν^η$›μΆΣˆ8Ν°‚‰ŽAR\Ά»ZΆkK§€-»ŒšΩ²ͺv·GηΐΦ*znXb»ΧΚ]TΉέgldΰ$«VΧR‰©&k» Δv:±f’₯`s'pNχ©S۝!•eؘάέΗ9°mBΧPPΫv-ε„Œ‘»!)EΛΠ²­YΗΡT[k&βzV[ζˆέϋ,ιbΓ eΣ³ΨΪ°)Ž:gn;f$©ξ2³Ρ5ιœΕμnM͎1pδιΆSυ y8“]8±{ΙΪZͺˆmŽ© p» ŽmۘN#iƒζρxјaknَΩΤͺMΆ΅©ΆS#6»’ccΞαjΫ‚ΉΫπ(ΐbl9³ Σ»›I±-fkUzξ–³@Žέke"Eλξ:k θΜNΒ:mΩΙz Λ.Λ‘c[Eˆe»‡–Π6›ηΘ9νͺœΆ±%bΧ`WέέγU³έMh&΅mΧR‚ΓΩ ’K)†“­MGR΅5ΣΊΚܘ¬#ά{k„Αξ):3²³vš›Θ6Τݎ:gf¬©­N΅m€mkέ†N«αnχ©f\£SžΦJ·γαЌλv/1¨•δD³ :ΝGΆXvμΞQ(³9σ8cΦFw·l»cη4°‰aˎqΆjΐŒdWΠΔ9g&ΫΖφd§@ξmgCUqw·εTΑ[«έΖλΔαnά{tm]Ά„m:³ aJ.#@‘;–γ:Ω*BYΆΫœ2ΚΨϋ™O}ςγύΘχΰ_ω~φg~ς/Όk₯shήzύ_όηŸωτηž/ήύ'ϋϊ}ygΑpƒͺ—_ϊgδWωW~νΓΏυΩ/|υ|Η»_ϋκχސΖR±m o~ισŸψ>πώώ“O~ώ _ϊΚΧΎωΖ›o½ΌΗ+ΏΩOύΞ'?φθΓ?ώΧζΟύΔ{ίυξw,ΝdΩΫπ›ώΐ―ΰ7~ϋSπ₯?ϊκΧΏωζΫΟ{―Όγ3Ÿύτ'?ϊ›ωώΏόWώύχύτΏσ]―m92fm6˜†μΝ?ώWŸώΥ_ψ??τ۟ωΚ[oyω_ώΖ›+mq―K-ΌύΝ―}α3Ÿψδ§?ύοό–ούϊ›o³QΔέέ¦γrfθ΄j6ά'‡y,aΛͺMΪl΄šΞΖ0C6M™ΛΫ²Œ¬‘Ά«›GΫ6+‡•έ₯Ξ‘±έ qgv(2f›¨ΜZRšΝ’A-ƒ53'S܍‘’™5ΛsJ–!3<Ζ0(8P!ΫF+ξ‚‘6Υ+@ΕΨPGfN\kšΥ‚Ξ$)%Έλl+6“Ln±Έ=j]“Δ΄(ΆiS6hœ°±±MΆέV°°v¦m`dμ:•Ι˜ΕV6$ΛΘ\μ6h(Φ)hΚ–­Ήά)ˆ6•a‚m«έK€bl”©ΚέΆ«γΠdŽSΩξ3xĚt‘h‹‘f6΅Υ63£•«Μ…†­Ψ=2SŠΡέt£ŽΩfΛae[tRc»˜ΉvμT‘†Ω.œΒH ₯1·„²lZΓFp­ΦJΧ¬ZΫάy¨“ Hm–V¦aP"P₯mƒ³Υ¦b…fITjζ€a3²BH@UΈ:w+p— 7‚D 9[!ΦlGf[£όΑωστaθηυώ|u‘έBβF€1`.ccΌ$γΈ9œ&ι±Σέm§?tvgφ‡ύeg—i'›¦l“6vβυmll@ŒΉB·„$tKίοϋ΅Οͺ³•iΥVζ(ΪU­’–V"(I5΄‘!&Ai« ™F“hT%€m(Τ,BZJŠΆMΪ©!D4Z­v%" #I©vΞ(I₯f %hC‚T΅h$Ϊ¨hΥD˜”RQIuF Œ”ΩΚJ:dsVG%QI2©&0ΫΡ&!QQ΄­‘¨‚@©™HΡ (Z™M‚e₯Υ‘ͺR4š(‘RΡItT#I€ΪBΫJ”$­D“L I *’*RhfSD@“$bΦ(H˜(& ĈPՌ**‘¦*I”Ά©Bu’9I₯jކ ΐhUKͺh…$J‚h+ E"&Q­©iCjDI`Π†m›!@E)ŠjA»Β(‰¦- 0‚Ξ$ιŒDͺHΥͺΉΒΘ¨R#”KJi‘€UMAΐΊ-7άtύ »Ά¬F΅ζ\Ύrρμιγǎ<ώΡρ£‡8xμά\σπΩ›6―[¨–TPE%DΝ h›€­±ύϊ[φήuΧϋ'φŸ·uοCŸΏyΓΊU€@’­BDΠ9Η‘hΡ ’ ‘UHˆ ²rμΐGGŸΎzΝu›·οάΆ]镏½pώͺkΦ_³~γš’’ˆŠ4sω™γ―=ύύώδηΏώπθ§——kmTšŒB „ΔΚ©7_xβ?yϊείψδΒXνξ½χξήΊiU–/ώδπΑΓΗχφΔρ£'Ο^˜«ε?ύάΞΥcU«6lΪuλ|nΓ₯%@ `ω©ӟœσκύΣW―NFH$IΫΠ’ˆ^ΊtιΘΡO–ΕΖέ·μΪp͚%€¬ZέΓχήΌ{k$‘JΡ P•PI)h‘(­΄ Iې€ €V"ŠVi›H ZJЍ‚šIP$DZDP ΝDBRh’m($ͺf¨P(IR³3’‘ZTŠI€Uh%©ˆ”h’Π$¦"’j+„”Q€! ­ΪJE(%A$Z%€FRIŠ DJͺ­$‘Š„R%ΥV@T΅£©*I#M›e&‹ Ρ”ΩŽ4%„&Ρ’F”Π’‚„J(-¨I$Аj2+•@#Z&©DD"4J’•FTADTƒ$’™E₯M ‘’ˆ΄1ڊˆYZ‰DKIFΠ„šmRQQEUPJ4E(’’¨"I+₯ι˜" Ρ$‘‚H4Υ(‘P΄4‰¨" Š "RZΥ6 ΡBkDAR3E’4m… €0)’( tΆ!  •(‘"΄Jˆ”BՈ6m‘!R΄E:’M#ͺ­¨΄1ΡFJBh4ΣΚΘSg³ΘL#„B¨*E))@@‘hi ’ΡͺˆTi(-i€•h©j€   ˆ’T'C*T!£h‹$P2@%‚hU%hIF( E2΄ ­@ M€NE*€„€¨‘ *I[hd‚„‚¨ !ΡT)iC[! ν‰%’…¨’@‚6Ո$QT§Œκ ”tΆΖXΠY© M*1HΪΡ‚€ •TB"ˆΠ*(LmURI …’X€‚h #PD Π΄D„V‘QDF­H•ΩŒt ˆDQ‰’MmPP$Z•”$ΡΆŠΆRFR"Sێ%)A)­A ’E’€ @'a„¦­DΠ""€΄…€f (₯E"HZΥD‘΄3–ζ$‰Υ’υ»ξψό—ψ³›΄3 σκε Ÿ~rτΠώ·ήzγΧί9ςιΑןύαζ[οΨ΅ιž=;Χ 2ΪF (!ΆEΠ) Ɛ-7άύ…ΗΗΪ-ϋΟuΣ->rύ†΅C‹ BŒ$MΥ ( i«‰B0#Š*ΡB„‹Ž|rαμευΫ·lέΉk]#N~xθτεssλυΫΆμΨΎ€J uυς™£^ΕΣ?ψώ^=|C‹”Π@ŠJΚΩ^όΙOφβ―?Ύ°ΨΎχή{ξ{ΰž;o½aϋ¦ΥYΉψΙ‰ίϋυ}ϋήxo‡ϋžύњλnΏωwlάΌv)X΅iΫΝ|ν›[ξΉΌLJZ"RΝΚωCoΌψ«O_΅~νΞΟ>ΌwγΊΥCΪBθΥ gφş}{Oξ{οΔΕ+],ΖΚ T¨‰’ŒZώt+/<ύδSΏzη¨ΝΧ}φsŸψή»nΎqΗ¦΅«\ΎpςΰGοΎΎοΥ_ϊέ―?σύkvίΊεkχ|fΧΪ…DXΡ3ο=ƒ'žώεk‡.-ΆίvΧέχ=tοgφ^ΏeΓΌpκπGο½ρΪΎΧήϊΰΠώ}OΐΖέ{ώδΑk―‰θœ)WΟ~ςαkΏxς©ηχŸ[΅icΞ_n h€ΈzιΩ£ΗΞcύm>ώ;7νΩΌ¨–EΣ±zύŽ[nή³ύΪ‘mF$m’΄D"Q-ΡBD΅Δœi²ˆΠ*D$% ‘$­) Υ"¨fHU’ €ιB[-!IAAͺPM¨QJ΄D „†6€©@›(H‘0$PC$5AT•hΫHhJR-ePBiB¨$’* $J«•„F• •` ID5Uh3Y”„FΫ"­ (!΄‚ ͺΡ™1ΜΆˆT5!QIP$MS©šT£ @(M­T IΡI%ΪH B4‘‚H[AP ΄DͺMF£©RH’€"C ‘$tΆƒ @:)‰h!’T£MI¨@4%"M#J[H1€H€@¨¦-Œ¨a„T‘*0J Υ΄E‘¨DE5 ΪD ’H€ f ˆCj–@ B΅$T%©€6‰DEBSU%Pi΅IBΣ*ΐ$’*ΠH h¨’m!C… ΄UBTΣΤ$iH΄€Άˆ$* P1TG‘H % *₯$ ‘L3)AΪ«Ÿ>xθθΡ“g>=wiyΕX½ξšk·ξάsӍΧm\»X "­„€Dj6€©V!‰VI+ CˆΡ9Ȉ«—|ωηο\έrΗ=·ξήΆy΅ DB«QΪH U Κ¨†"B@Z‰D«-€"rωτ‘φΏδτ5·=όΠΝλ„v¬,ςζ3o^^Ϊsχή›vοήΈ‹ € τκΉσ'>xω΅ηΦέvƒ·μά΄v©"-„Š©H(­€‰Θ¬Π“οΎόΦΑΛΧξΌε–Ο^ΏCšRU0—/Ÿ=}p?}αJΦ¬έ΄}ηžoΨ½ύΪ5h‘B€©ζΚΕ³§τώΓ§/\iΦή°uΗξλoΨ½}σΪE5ΐΥ³§ŽsόπΑ;ρι₯+s±fΓ–7ήzλžΝΦ,DZ%P‰T # ­B”6ΡB(Ε0' I €¨– ()Y‚f’ˆP(IRBtjdiÎ=7έyχέ›ιŒa ]>΅­Ύί3ίώπ•Λ‡^zι7_Ώλϊ;w­’΄D R3’‰‚$‘(kvμ½{ηή»ΏR35(H€hS­&4$ΪΆŒ6QJWX i£ͺTšJΠ6Ίrό£cg/žΫoί²eΧ0{υΐ‡G/^ΎΈφ†m[7οڈJ0+‘W.œϊψ½WώΤχΎƒΧNυšm7ά΄iωψΙS'ΟΜ‚Ζˆ9“h»re~ϊ›§~ςΫ‡O_έ°χ/ώΑ7ψwΏ|Σ ­>ߝ7ξ\ϋo=ω›γ_ωΑo?΄ϋΎ k––4V―Ϋ|γgΉρ3A ͺ₯MFGFg—?yν©χ_ΌtayΥΖ{όΚwk Ρ._ωττG―ΏόΔ»ΏsψςͺΧέΈΓΕK'/TuBJ£m—?y…η^ύΝ‡¬ίqΗƒ_ωgκŸ?vΓΖ,ZξΏοswί΅gέΚωΡ+'ΎόΤsχfηξν7nY„˜W–½ςďωξΡO—7άzΗΓΏ‡τ»ν½vuΤ¨>τΠέ·_·ώ{ί{ςωwOΩχ“ο½πΰM_»yνͺ!B’AK\½pμ­Χ_όω_8u^”HΪ‚pρΕONœΈ’¬ήuχ£όΞ·m_­ͺU²hŠTJT()"J+RͺMc…’DQΪD©‚šTD’t(-Œ$ZE”Dš©hΣ# %₯Cšj•• ͺS’¦4T(¨DEΪΩ •TiŒNB"J5%) ƒjΫ„„FH2hK›$J%€•DŠYLB€$₯1’%I@5₯- Ρ†HΠ6šh%•š΄‘F@% Ϊ Κ"’Ά %ƒΆ΄ ΥΠD„TΥlGJBBvΪ€šͺV"J[:"UΔ(¨D¨†hCC3(1Ϋ@h©ŒP‘FΡ֜Š’’ΠŒhA"TIΠP‘MigŒ‰ †BT‰Vi5Iˆh EˆH)’F"Uš&US %423UJΠ&E'’΄šΠ”–4H;Ϋ„F΅F¨ŠΠH€M:«’•ˆAUI%4*ƒ""4m“,RA¦$ͺ­šD’ m5‚" š*mιH Z %"ZtD+A R₯MΣH¨ΡC+C€ͺ’AiKŠ@[JI„Ά MB΄‘Šj“6ƒ$i…j(m£ %š`€:seωβ…Γϋίzν…—φ½χώαOΞ^Ό²Όb,­½fӎέw=τΘ—ϊά­;·^³:iKΝ‘T‘FΡΦ¬– $4ƒ„„ΆI ZΠ&QdIPΜΛη/μϋ»Ώϊ›σχό/Η?ΫΌmΛ*ZQUJ«Υ$!B—?=τα©Ή~νζ;6.Zi΄$%„V‡€jrώΨΫΟψΏ?χζ?»ύ‘›Χ1‰EW\όπΙΏό«§Ξϋυ_όιW·ά»m]*#Υ€šyωτΗο>ϋwρ―_>ΆηΟ―·_»iΝ"”¦ Z4Œa΄Ρ6¨4ƒ­0Υ/}χ}ςԝ|σOοΎ~## “m΅sεβ‰cοξ{αιη_ώπτω+WζXZZ»mχήϋϊ#_ψμM£ΙU…Άb@—ΟŸ<ςξ«/<σά ούτΚ2KcνΆ=w>π…/?ςΕ»oΨΌ:-WOŸxoί―^xι₯ί|tτΒ²ŽΕκ ;ο~μˏ>tί-;·\³Hjωςω#oΌτ³§^xσΐα3Μ,VmΪ΄kο}υρ/άΊmνRθ₯³'ήλεžzώ͏?9wa₯KΧμΌσž‡ύΚ#χά±mέ‚i¦‘”’­."m•\ΊrϊΠώ}?ωϋΏύΙΎKχγίώωΦ=›Χ΄FVΞϋθυ_ΌψΎΧί?~ζΚ¬Εκ ;οόβWΏόπέ{woΪ°*¨t6‘T{υάρ#οΌςβsΟΏψWt,Φ^»ηφϋΎψΕ/άχ͛׍qυΜ‘·_ϊΕ ―ώϊ½£Ÿ\ΌΊΜͺ₯ ›oyψ±ίϋύ·mίΊv)4Zj&)„Π€Y2[³” šP@θ(ΠJ‚f’ˆP¨$RB”6IΑRΫΪ&©†YRm1$CΝt`ZΊφζ[οωΧώΥ ί90?πΡΡ³gn·k Ϊ¦ š΄­J’1ZiZ„΄U"]$³’€j‹F«$‹‚AMQ­R€Jš¨ͺ€L) ΄ež;°ψ… —6μέ΄yλφ₯fš++Ηψτς•ΉuηΆΝΧn­$i₯eεΚ§ί~ξGίϋΏžώπβbέζλξωڟύώΪw~ϊσ_œ:sžT‚’ dεΚ₯‹o<ύκΑ³§/m>ςΕGύςΝ$m#mY³υΞΟ?|φΤ‘wήϊφώ G^|ώƒoήΎsΛΊΝ!!Υ$m2b0‘™2ͺWOώζΕ7~ϋΦώ λ·^Ϋ£Γ}cΡ–ΑΚ•sŸ|πς³ίω›Ώ~β£K‹΅ΧμΎϋΡ?zdΧι·ίψя_"Ά|ϊζΎίϊψψε₯νχάρω―ώΩWnΨ8-kΆήtΫηΰ_μίχ›ο~|ργ—{ΰ±Οά~γ–kkΜ•Kg=σ—_8½Όjϋݏ~ι+Ώσ•½›–’hU»ΨpΛηΎτψ₯³§?<πτ‡—νϋρs<ΌsσΊUh5ITXΉpτν_>Μ“ΏϊΨφ]wαψ;ϋΓ{NOΙΤQišbωά₯σŸ?=’u»vo[΅zb‚Hͺ"–6‹”΄QT΄‰J#U„Ω&©’j4+m")MΖb¨¦H™ν`$+†•$ιœM5I˜US#‰΄mc4-B‘A[iŒd Q­FRνLFK’Y Κ (%’•4 mKYa€ -DYƒ*a6eTI”ΩŽŒ΄MK%FΜ"Υ„dL(H‘ιJ!!Q-ΙSZΪtdT΄Z‘&5c΄­$ š hΪRΥa&iΝFšR#:2§hD4h+2΄m3ΖΜ”ˆΞΩ2F„QEZ%ZL‘ˆΆI’΄’mh’@[MRUΙ@ H΅$URM’mQ³Fζ6SK"Z3†DΫQD:™ΝRΠ6"Њ6QΥFu¨6 *©P£™„¨TΖb Š”Ω†‘T΄hHF笂@c0«*"Img: -ΠJͺ­ˆ€³Bf€T#£ζhg2D ΡjA#M(J€mBR΄…“U-DV¨Šv6e΄AFημΘ Ϊ¦5‚Ci•¨2“Ρ‘Ϊf€³ͺ"‚ɘΥ4­.$Eΐl‡4©­$JS!¦¦V’m„0™¨ΠΩΕB‡6mEŠH%ΥVˆ$ΡΞf€ΥH΄m§,‚ΐ’EG­”TUJFΪ™$P₯$Ϊ†& Tg³HΥ$A―^<{θΝ—ώώ?ώ§gN\{ύΝwήσπ ;·¬qωΜ‘ί}συ'ώϊνcωχς±‡n[½ZEŒ6±2’6Si3F΄“‘D5‘–ΩŒ-"’%€j:ƒCΡ$LŠ"€J”&‰„šζ0ΜO_ώσμ•»o{μOβsΧT5DZ5‘Ζ 5"‘ŽΩPΡ6)&*‰±HrεΠ«Οϊφ={oύς]U’Ε<μΰ;Ο>ως‘eD«’€‰ˆ*i’$¨ )Zˆh«`Τ”‚RUIΫJZT$m3k@dτΚι^}ρ;νΏΎ΅ρ‘oόιvσΊ«Ηή}ριη_όžYΪω―ί1P•(-A§D/Ÿ~WΟγϋΫί,έφΕΗΏ~Χ΍+Η>ψυ /⻇Ž[μψŸ~†₯•9―Ϋχ³oλgο\^sϋC}υφλ—Ξ|υηO>ρŸžΊ΄όGΏϋψ]ΫΦdως™o}ϋώ«g/mώΜ#>~Ϋυ.Ÿϊθ΅W~ωδύνΑΉύό§{7m^cžύθΝηψ·πς‘mχήΧΎqΣ¦εKχ½τΛOΣ3+ώΧ?Ήkƒ©¨hK$‰ΆΤΚε‹'φπΛόεyς£s—Ξo*%4M―ž~σ‡ίωΦΟ_;}νΝχ=φ‡wνZsρΰ›ΟώψΩΏϋπΘΕσΝςπ»Χ. Z’5/œxλΩ'Ώο~ούυw|ιk_½kΗ…χή|ι…_θΘ‘γκχά1ΘΥ“/ΰοΏυό±M»xόΈeGNyν©όΑώ›s ώΨc·o] •Ζ¨0“jΪΤ¬1’-bˆͺˆ”Ξ&ιmD΄hh’Th›€Š4³–f„˜©ΜΞ%$#]Τ I“”ΆQΦnάpύM7sΰηΟ½zε2’£m‰4!ͺi’ΆUDH+©A΅HhFjhH( ͺmƒXa1Ζ SͺQ! ’$„‚ΡHR³m2’TR‰Θ(‘b`:φΡΑ /-mΫΆeλζ­‹\>ψαΗW._ήtΫ-Χn]GItEڌ“Ώύι?όγ?ότΉΓ—λΆn}θώί·―ή³όάΙ§AˆdΤ¬h:!£ΛΛWχΏρζ©+W–Η[nΏε¦[ΆT;;+£Μ&ΓΖ;oϊά=7|ϋύNΏύώ±‹χ\Χ-k£„dN­Π ι4“ ΈϊΑΛ―ΌύΦ{§ζ¦;wήωποέΎzdj’θΚ§οώβΉo§Ώ|ϊΨ•ŒU»ϊ“χοΎ~‘'φο›1m„”N‘ε{πΜΩ3+‹Ν{nΊω{ΆvΑœ]Ι€ŠZ·ρΪΫΎψΐŽ>Έ|βγ§OΌ<]ΕΥ‹?υΜ›g/ικΟ|ξώΟάϋΩν«FΪΆ+Ιh"ΖΦΫoΎλGο|a――\έΜ+}ύ–m›7¬‡”VVWŽόκgΟώό/[·yο½ίό·xϋ‰°: J§BFRνΤsη.žόδΨςRVνΎαΊ₯5««QRf22MmjˆP‘Ά‰Ξ*T32₯©DE’ΦLeΆ (’‘Ž™Ιli02f’΄­•1₯s$’θ$£m‰4D΄š&h«-J*­YI’IjhE™ Z4‘VXŒ1h©T“TΘ ‰@‚ΰ΄Yσ0σu?ο98Ψ €X άQ$EŠ€F‹Yrœ¨NR·i§MβΤΝδC—i:ΣεCgϊ1? ΣΜ΄3M“t2$M"GeYΆ6вΈJ$HŠϋpA€ΨΧσ>w―«5F3“΄₯‘ŒQ#•0•Œ*1UڈšIGFa& €M#aκl’ C;FTSY‘IR-0Σ&£F:E1₯1’s9KPZŒŒŽ%2f΅΄fΜΜ$#4Π4Ι 235H€*sΎTΪ ιœ•Y$P!‰θR$BF«‹aΆS*ΤlFΒhuJΝh’Ym#„VΗ­vŠd”Bk™±J%e •a6-νh“¦:ΫEkDF«f ‰Πκl,E;SΣ CΑˆ m›vˆP­ΠvVFηΜbQiZH S₯JTGF:ͺPΪ¦ŒŒ)QAiηP•V;1η$Ρ–@:j­¦"5Ϋj &©©YIB‚Li;Cg¦ΜvhjΙHF’j₯š€‚!HȈΉœΖhŠDMS‹©ΠRYT‰ &S£MΣdŽŒB3h†Μ¦’JΪ$ ΥΜ4+Q#Iͺ΄¦ΛD%EEΛLF΄sV#J›,H"©VΣ&1I ΥXΆ‰ˆjFΖiJ5‰΄•ΆM›QI»朒*ΑŒΜf aJƒ©ŒVΖh™33ΩŒ„΄Ρšθ"3#eΆ‘¦9Ζhι‰θTˌ#e@uΰς―ΏόGψϋΑ{›ξεόΞ#·μΨΎ2ΪΔμϊωχŸϊ³—7?rσυΫV‘1¨Ξ2Ρ’m¦0C ˆΆ:‡EJ5Ρε”ΡΩ M*M%©$L³₯€Δ@Σ4™£Œ€‘D˜sφψΗ§.ŸE«s$†Ήœ$E ΤΒh5%I‚9[DH R­B’ZT¬¬,ΞΎςΒ‘έ|Χ]_ΪƒΩ₯•«ŸΌφξ‹ώΣ7׍ΕH‡$$ΡYΘh$©₯Ι€B—²R…T§6U@ζ2›’@§΄I†^yέ·^zκυΕΎ―ύνώ?ΉΓτs7mΫΎϊ‡ζW/=ώˏΏφΫΪt&©΄’e&‰œσ΅—ωσ7ΧnϊΦίϋ_~ލΙͺΜμΞΏϋ7ΟΏόΨΣώΦ‘’K―ώπOžzύς/ύ‡νχΎυωeζ_?ψώΧ穟s碞όΙ‘§^yπ–ΫnΪuxIΫVkDh[K f¨i’LӜΓT₯m†ΞI΄ΕH₯©$ ΐLev&(‰ͺi$ΛZXI›BE’Π& ­™”Tgg‘i(€†°Άame1΄M.Ÿ|ϋυού£ψΨ©uΎόŸνίϊβ]·lE¨‰j?yκ_|‡?ύρ›«;o|ΰwώΫ?ψΝλ³Rτό+?ϊɟώρwž;·qΗm_―‡Ώ|cΆ―hZ$$`f΄Χ:η‰WžώΥ3ΟyνχNž½΄\Yέyπ¦»ωΝo~iχ΅ J™‘ΞV#¦’€jΤΠ ηί|ώρ}χί?υφ…u\;wβτΕεϊΩϋύɟό³ΥΠΞ+gNŸ½άyδ;όΝ?ωΞΪbuηα›ΎώΣ·Fv8tΓΑƒ;Ά\έvΫΧοούG_:΄}mνdKQͺ³M £”Με}ξΒΕΛ«³ ΡΩ΄JT*AΚ¬(EuŽR™-`ΦΘ % Υ™ΦΖΟάwΓ'ηΞΏόΚsΟΌpοοάΏ-LRΙςΤ±—_|φ©cΧvάϊΩG_]hT;"Ν5›₯&:;uda‘©ΤB ͺ"hΪ6IE Υ)IbΞ₯$„j†Y‹dΆΑΕσ—Ν»΄Ίq‘ƒ«ΚLtηm»v]·ώΡΉSηtΏΔX‚$‰hi’ΉΌΎaΓήCŸ»ιžίΈ{«‘4Œ=ϋ―ί³ηϊkΗΟ~όρΥΉoυΒσΏzεψ靟ڝwά³£#IzέΓ_δoώ䝷ŽΎρξΉΟή^—W7ίω΅o~λswm] 2ΆξΨzγ-7/žyρψG―έ4Νυ΅­»ξzδ/μϊWXBmΪ½wχυ»6Όuωμω‹›e€ν,‘4 ΥΊςΦkΟ?ώgOžέϊεί―ώΣŽύ³gί=΄tH—W_μΘϋ6έρ;wίuηήΥ$²XνΦϋΏώε;^ψή{o½σα›Ÿά³g―6­ζγΧήxϋνOΗώΎτ•;6ΟΩ4ΝbοCwέυκkoyο₯η_ώΛ·ά·ζΤ‰O―.vνΪuέ-‹…ˆŒ-oή½φβ…‹—/_YΚT“F$­HΖJc¦Ρ’4ZiΥˆ1‚†6"ΡΆiIEh’ P$E+Z‘A«2Ζ i+‚& @IBšN‰€W.\ϊ䣨νΫ²}‹@—λWΟ~όα‡'uριΕΛW;Qi¦υ‹gΟ|ςΡ¬^έxϊΒ²5(ζ•ση?=ώΑg7_ΩqζͺY €@"i•aύΪŏ^ώώΏώ£'ήxλύO>=αςΥυe“>>~읷_ωžΝοΎn!š–4’$₯…h+ –'}τΖK/Όuμμ²,/œ»zαά§ΐεsg.Ÿ;3Φ[»ρΎ=;BFFΖ»Ύς­μΆϋΟ­μ<|σΝ;7―(P•V…DE«sΩeΡ9gηD%Ad¨βڜ—―^[Βr9ΫRB•M$iD]}ηι'~ύφgΦ·έ~θφϋΎcΣ"M&Yέyϋ_ω›έOξΊυŽ[χ]·ie €AM !³b–šΛεΌΆ^H#’i+9]Ήv­ͺsΉœ³•^Ύxωύ·ί»bY»ή³kηΖ ‘™t½‰”dΓΝ»nΎιΐx坹ώΡ»GϜ»y}ίΪjMζ0Τ…7~ώƒΗžyιέs[oΏο‘ίψφ—nYR@«m©†Vh―œ?wφδΙΛ‹Υ•έφ9wρψΩ3/^]ZlάΊmΗΞ½ΫΧ$5£’ Y-$MR‘f‹T$i΅²θh+’j‚LB’΄€ι2‹’(%I"I΄Ε McFL€HP‚’j’@§ΐ@"ͺ‘$•iDΫB«‘d-*m’I$FB2gIΜ’ ¨h³ͺͺ¦©ͺ3³  ΪΠ9Qˊ٨hJdΌQͺ©ΆΙˆ0›QI)‘΄‰VΣΜ)T2IΪ" ʜΛ1©"!iuΩ„4‰ T’JMA0K%i4mfA΄*B(*©FftΩ€ˆhi€m‹˜ΓΡDQ#4ŠNhͺZA‘Ά“D"Υ&‘6!dVHEik’B³&’`ΆΣ™Ρ$UA ’ L2$ΥR fCi ₯2HΪͺΚdDΡΠ6m!‚hNE#J’6I›1@:(5‡$­jhιL4iPΝ"Τ”˜Ι mΖ@g©h‘Ω˜ IΫ₯ -MH΄A΅h™‘eSh) ’#m΅+†ΆD’N•VT*­PLDšh›€‘€-4ιΜΉLFŠe‰4!€Z‰‚νlJ"˜ΠA›%#€$Υ¦m€‘ΡHD©Œ©‘†Ω44eͺ­FTUcvbύδΏωκ›W7έπ~λΑv^·:!EͺΝΚ†Ν+I¨ζκιO޽ςτ“Ο½ψφ‡gXέzύ[ο}ΰϋξΉmί–EΗςjŽώπŸ~χΨφΟ~fχΚΉ_~顏/Ω~ψ3_«ίΊνκGGžzζΕWί=s-‹m»oΌϋ‘―~ωžwnΩ0DΧ/ž:ρκŸψη/Ό}κΒΥ±sΟ-7o9wςR‘‘₯D#‘Z!›ozψ‘+Ηήzσζή»~σΖ i]=φk/ΎτΚΥέ7}ιKχ}πΟίΌPH\;χΙ±Χ_|ξWGή|ΣσΛ±eΟΑ{ωβCwίy`ϋͺεε3όκίώγŸoόφ·ξΌzκ_yύψϊξ»ξϋΝoλΆ >}ηΩ'žxξ΅£|zy}±eΧ ·έύΐΎςΰΑ΅Vι΅ Η_{ξΟ~φδσo}xvΉΨ~Γν|υΡϋo9ΌkS*dŒ„Zέ°ΊaΣΚϊ'ηΟ^`η”Μζςε+—―^Ϋ°iίξλ΄1%ŠΒ0Kη0Ϊv˝ξήσπΚΆ½k#sͺΔ\.Χ—K‹ kk‹9}εS—7άzhίΎέ›†M²ι–;οzβO>όπψ‰Λχή~ΧoόΝΏwuΫ ΧοάΈ`VΣ₯υυ«K«k7,F²Ίλ–‡έ}ϊΪΞ‹FdκςκΥKWΧ­nήΊm“$νlJ@&bh™’ ϋίχΝΏΎλKέtηΑqjΓ ΠˆΞή|ύ“KχΪ³ο–E@’±zΰŽ›φlyϋŽτρ™σ=ώςΏύwΆ~ωχώγGnςρργ'Ξ­lΉύΠ-ϋWG-΅MΖΆ}ϋφΈ~Ύrς­ΧŽη³‡mέ²qe^ΊzωςΥυZAYΏpζάrΓξλΆlά²Ϊ­”κε£OθιW>9·ϋήo>ω‹Η_xϋγΣ—³yΗ‘ϋxψσŸΏη†λVΤΜXF4η{ρ‰ω“Oόλ_/?ωΪG?ΎΌvέ­>τΘη?ΏσΣφΣ'ήψΰ“‹cγž;ξ{ψα‡ξΏmίFc4€­€&Θd$XQD’h‚”V© ’h# ΥKΗ?<ϊλ?ώΓυ,{ψΒ=φν\-Z-’ŽTKR@£aDQ„@ΫJ ΄P]^9Ρ»OόΫύ'ώβ{η/―μSΣ†{φnΩ²e­6#b¬mέΌcίލ޽θβ‰g.]Ύ"B SΟΎρΣ?zμΩ׏^άyΣΏπ΅―}«]^%€$ΙH•’ž?ρ““g:Χ—§~υ½ϋƒ syνκ΅ε”•΅M[―Ϋ΅ο¦[ξyπswνήΉiΓi‘ιLF€Zh%"A«H­‘’B’’fjBƒTm Ρ΄#DiQ­ m[I‚F$Ϊh’‚&Z’­€”  @I@΅ #$ ’Jm+†h‹DBΣΠ€)A’Ά­$Bi… ΄- „jBCeH-ňʁT“˜%4њŠT¨¨ (ͺ€’Ά*¦– £­D ΪΙH ΄#’ΤBE¨b6 ITΫ …’m;’!j΄Ρ‰$"Š0„B„’F‰ED‘‚©-$ f4iŠTt6¦ŒJ)ƒBg I΄CC₯¨dΠ@U£ ©( !‘9;# ©΄ƒje΄΄0ŒH›¨(ŠJ •¦"€Άh%΄@„)a F:B"0§ΐ­κˆ€„θ$ 5ΠF…$AL’D 4"‰$@B«ΖΠJ)Ihf*ˆP₯ͺBIͺΥH@Jj%ΪV"mUB΄TZюhI JΫ AiIΪATI‰D«‘M‹ΆIiT;e΄­DUBfEP$Iš O:ρώΗWΦΏή[―Ϋ΄²2Ϊ ”HcΉzβέ#O<φ“§_ψdeǁοΌpβύ_ύθΓχ>ϊπ+_ω‹έΈΥΜ…ήxρΙOΎΣm7Ψ~π¦Υ“'޽τγοτςm—N_ήΈΊvΰπ‘³'?|ο…Ÿ|tr}λρ;·οΪ΄zντG_~κŸlϋθΓ=mΨΆ{χκιώϊη?όδό…ΛΏρΝGoΩZ-B˜O{ω™ύψ±·―\ΰΐ ׏±~ρτΗ/>ω£ΗΟώΕΏς₯ΫφίqσO^ύ£‹φμΏω3χμΫΊqϋΑλW;zκ•Ηώδ±ηŽžXnήΉπΕrύςΩχžόξžΉό[_άgn_=χώϋ/>φέ?~β•s;ή·wΛΪβΜΡχΎύΞY9 ‘PΪj”ΒΨrΓηξΪξκ‰_ΏόΤSG>wθα]‘Έv⭏Όπζ…qπ‘―}ωΆΕ 3Ήψρ«O=ρψΟοΦ]ΧοΩ|υόΗoΌπ“g―Μω΅GξΫΣε΅σΏφτ//m\ΎzξΕσŸ\^\Ώι•υ«ΧNΎϊ£ϋορζωΉύϊ{ltuύΜ»/<΅ρН7άΎ)X?ωΪΛ/\ΆnΨΆ{οΪُήxφΟOœYχ»_ψξΓ[‰ΦΪΎ}oΊsο[O>χΣίϋ۟;ΌsΛςΤϋΟΏπϊΛ­·?πξΩ‘1(m©Ω"IU¬mΫΎwϋφ*$ Ξ½wμύN^ήΎηξΫχ₯½tβΔ§WΖζΫ·mέ„€tσΎ]Ϋ7mxοτΉΣ§/δφλ·οΏu«$tVδΚιΣ½ώζ{γΊ;>shΣΖ΅t¬mΏ~Ο6$QΝ• '½πτ ο|0φίrο=7oT DšΆ €DΆμ8pλΦ½Λ±i±θ™T!‰e―ϊθΔ•υλvμΨΌ}KR€”Υ={wlZΉvζτΩΣ'/mόπηρΤ«Ϋw}υ۟=pεΜ™σηΦ7oΊnמ5”$bμΈnλυ;V׏Ÿ9~bΊq±οή?‹_}αΉCϋvάs`›s'ί~ςg―ΪuοΓw>°k!h$΅Όpςέ7Ÿ{κ׎½qΣΨ°yλυ#'޽σ«τάΕ~γ·ξί³A4 Zrυόŏ_ϋΥ“Ώ~^Όύ†=;ήΌ8ρξ―=ρƒχή}†«οžήΊχΐνΫ.|πΞ;Ο>~ζβ₯Ήφν/Ϊ$­VΤ4c1²£DdΠ‚€€ΠεΥΛ—/ž»°aΜJΝ/―\<χιGΎύβsΟ<ύΜ{Χ6ξ»ηαoόφοΨsύζΡV΅јhC+)€Ά(”&@!Zͺ3ΈvζΜ±ητǏύςέ ΛMοωΒ—ΏπΘύχά²χΦ΅qνΉOŽΎ~δ?ώμΉ3λD!4IZ4„€ζΊΓ·πθo~λ‘»6sρΔωψ#‹CwαλίόΚg―Ž1JZQŒ±VZ„‰ "HͺUΥd±aÁ;οΪω“3§ΟŸ}ΧήxύŽ{vί΄s΅ŠJ’WNΎϋφΛGžy㌠kϋφμ\έΈFE ŠJΧ畏ŽότΙ7OœΏ°vγ=wίqί]{6G[!ZJ’ΕXYl˜%hQŠ€iEE’Υ·άpέΣΧ­œ:σρΡw_yρΝvί~έB«Bb^9}κΨ―ΕλŸΟeΆμΪ΅}ΫΦ5ΙςκϊΉOΟ\C6mΫΎemmE P‘jΐΨ°ΊqΫΞ-\2ϝ9wυΪΆ a^½tξ͟ώΰϞzυψrσ<όε/ϋΰ=·ξέζΜ‡Οό»υ½—ŽΌpοαΓ7Ύec€U¬lίsΫ½}ρ½£φΤwέ₯·^·y~ϊώ[otΓmήϋΩ֚ΤT’(‰­JΘϊι7_~φΘ«Η;ξΈο‹wνΠυK.]‹-7lΨ°ˆDΆl\[lX^YΏ|εJ₯š *–—>ώΰυžϊΥGΛ›Ώψ΅ϋχnΫΈŠDS5―]ΎψΦΟφ‡Ηήzν­OΖΆ;zτ‘Ϋφ,(•‰HII"­•±²2Fg+Ξ.Χ/Ώ2mάΈΆΊa…&‰ͺΨΌuΣΚJΟ_Ή|ωΪΪΆ½7?τ΅oΨψ™½k›Ηι+—―]™ΆmΪ4’2ΔκΖ΅k«Y^ΉpώJ»iΛν>ϊ…ώτΩמώγσ§^Ω·Υ…O½ψβ…έwο-ϋwm‘‘ΆDBͺσωΛΛΛΧφ}α+χέzύVΧ>yυ™Ÿ 8mΦΔ,ΜΔ|έΟϋž>§»OoκnukC»„‹  f5xΌŒΗ•ρΜ—T₯ςς5_’ͺ$•ͺŒ3™ΚT*3žqΚc[ΨΖμ @H€v΄wkνE½ŸsήηΞu}σ?yφ±Όαχ½ymT[₯έ™;N_ΊκΖ»?ώΑ›φ/O?ωπ?}χ?xψ;§nϋπΧΎψΫ·ήά>ρΔ?όε7ςρŸΏοίΈώ¦5€P%Km’j‰ Κ,Mτ©WŸzς!ηφ.ΪFV«+[ΞΌsς…ηŸ{ωΥSέ}ΣoόΦ‡?σ΅―|ψ¦C»’€RDK ¦@)-D’ ( UέzοΔλΏψΡwŸΉ°3wΉσSŸύσξc—ΎφζΫn»nχΕwϟϋΥΣ§J΅ι¨™JSS4RΛ,:ΜΛ'^gkgkΟρ#‡φή»Ü«σ'NœΪ™«#Ǐξί<΄kŒΠRΥZΠΞJ•¨¦UDΤXΫ}πί{ΝSοœϋέ~φψφέχ‰{―»κΰžυ΅ΡΥφεσgή~εΙούΰ‡ώςν•Εξ΅γxί፽KigI(Πν‹η_Ι·;yϊΒά{ϋ=wή~Ϋm‡Εl"@!‰₯@’M5cŒΎλξΫ}φε·NΏϋΪ+?ΑίΏοΠοήsΓΥφn¬%sλΚ…χN½φΤ/ωΦ?=yjgΕξλ―?~θͺKμ¬V/\„±Ύ±1ΦΦ*d€ΪR€Ιb¬νή³[pιβ坹ΥT­.]|σ™‡φoΎλs—χίϋ©ίϊ­ξΉρπZΥACi“Ξ&ѝΕΖ恫oΎin­–{φmξήΨ΅km«ν+ϝ=σΞΫoΏόό_~ύ΅w/fωΕί|Νw…† E!h+0§HPMIPΪT“„4TJ i !4QΚH΄¨ "@…ŠVc@€‘H •ΦH5‰AJ+%Ib6mK„€m‚@jj*$A©Žš"’`h;k$”"΄Ih+‰D1ΣH‰†΄3"HUQ‰jZEDΫ$DΠ‚6aJˆ6C’‚* H΄ͺ‰6Ձ‚΄+†-•(H+©¦‘EBSM2€LhUB!­()­D³F„’BFh[E‘’#ˆj4hšΑl€fƒ΄‘I•išJ›΄(€SιΥˆ Pˆ†BDgζ"‘JŠ$ΒlZ ”T#-Ζ’€$U•ͺ’…*” ME%DΫ΄FAaΖ JZ­V$4mU#D5hˆΤŒ $‘©BK Ε°h§Ά*I€Ά@Σ ­$‰’4a¦¨¦‘-@‰ͺ4J„*HhΚH€4Uƒ†PUHΪ¦i!ΠBΪ "AT›€ šjR 2‚PDͺ$i'T’i+Z‘@«hG4:›$ ˜²PΥ΄RtUΝ RS2v-B)T t4Υ 'ž}κ©ηNΟλ>σ…ίϋτRΣ Χ]»ϋάΩӍ~υΘsŸώθα=ZΊεώ»ούΠ]7¬ξήΊmγοΏΥΏύ·ΰΦγ=WNέwγ·ώρΟΎtnηΆƒΛΧάzίgΏόΩΫΦ§.ά|υκ΅'Ώωθλ/=ύκεϋο0€e^xγΕ—~υδsη~βθKοΌό»%ζb}\ΊpβΉ§ίΙ‡nέ³s}}9Λυ½ϋφΨCuΎϋσόβΥχ–wo¬ηΒ;―½πNunο>ΈoœzώΥ“§ί:³gΧ«/όόΩ‹k·~ωΏό±λv/–½αΰ<}β₯gz‰Π¦E“$@³χΪ;οΌη₯gŸώcίύώ3όΙ]{ί{ρ§tχΥl½wβWίϋώ£―δεη>χΙ{=Ί6qύΝοΏ¨²8pσϋοδW~ηΝΕθά~ίώΧϋρ―Ÿxσ­wί9ηζ $ΥtnΟ±{―:ΌρΤ+/=ρψk‹Ή}ωΚjΧα­­‹§^ϋ‘c»Cš TΡd¬.ΎωλŸ~λ‡OΌqώΐέψw^dggΞ©I’ h MTgTZtλΜ›ΟύδαG~ϊδΉk?ςε/~δϊ½k M4ιΞφΦ‰_>ώσΧNΎ}n΅οΖc{voΨΩjΦ’BuŒ1ΪU[‚†–(@«©N’Œ­¨ŒE’Άk›GoψπΧnΈύβb߁υ…7Ϊj$Q’”Qc‘ ]ΝΉS½rel<Έoν΅Χ_φΙΣ//»ΪΎt~{οΡΛ§ή:σήΡ£{7w/ •ΖΎCΗοωΤWΏπ±}‘nΎnΟΩWίyγΡg^zξω³ŸΈιˆkλΗ?όŏάuδΨΎΡ«ξΌε…§ψ•—Φοόμoήvύ°tπž;oxϊٟόκ­Χ_½θ¦ύDeM’$+–M«$Β€’$¨f@K θ{/?φΝ—ϋ&{Ύοή»~σKτ™;Žξ].ΣdC% Db΄€M*ѐ‚&$„”i‹*$†D·ΞΌy⩟ύβΤ–Χ|賟ώΐW%1Ϊξ½εΫoxθ—/=}j‚T!- ₯’H#‘Ά;Ϋ;oΎς깝U―Ήξͺύ,h―l]~εΥ[s΅~μš#{χξKTΫH€™•€PZj¨ͺ41[SD"%‹ύΨg~λ―œΏ|ρ΅ΧŸώΡΧί=ωςsΉήχΩ\τΩ/=υΨOŸxφω7ΞνΘbמγΏρΐM›{χ&-RΪ"ιεσοόϊϋίzςΤΦΕόΠοΎγ–λ6 ₯ ’šΡjΫ$H2+ ΜYm5#UB‚Ε χ}τΓ/œxχδO^~χεΗΎωοήxνcΏύρ»ήwdίbl}χΥ§~υσ'κδ™mdγΪ{o½ώͺ«7‰ΩΉ³ΪΛεbŒQ‚Ζl“‘––c¬―-ΑφεΥ\΅ΥΦιW^~δoŸοΎvnΉγ7Ώπ©ϋo»ύΠB[N Z:[bv–A²yνύάξΓ7ΎvϊςήnΎρψΡƒl,\9ΞΙž~βG?όώρΤ…Χτ­:~δΐ‘cίζ’#ΥQΥJ%ΖΠΩJZΕ0’ν$I€’‘E’TUEHΣ-š$I’%BθPIgBhHE“V"2@ͺΥ43©”Ά“(‘$A)i˜³‰΄ESHhZF©Šͺ™vD™BΣ4I€” ˆDΛlBΣ0‚V6"ΡFΒ€₯’Ϊ2#1΄Zš$JΞ€4@C’ASšΑˆJ†θ, "˜š6mkŒ9§D ΄Qν@Z393B’΄ZI+ ΄J) ’ZŒ62΅ ©U‘Q!‘¨m(D  ImuΆƒ€I€¨V*ƒ΄¦ͺHiKΕμ,©H$i;kΜšRUiT[#Hb,¦Ž*• c¨MZQm!QM5C«’ΆΙHiPm+’騈Ά£šHh’bFJ*‘¨ Ιl#IBH©‘™1H₯h h% ‘­• ­f)FUΫ€ιdBζμθΡhΣ €C*­ H’©:›¨ ‰h%Υ”Pb0iiƒθμ 1H`VΚH   ΙАh©¨fQc¨ˆ& Ϊ–‘s,†˜-UF™Ιˆ5“ΩΥLRE#­Ρ&J!ˆh“HQE¦’†YeH’(‰U*5Šf„š ’­d$ΡΆΥ*©DŠ.w-vνσ οί™sΡEΒT₯,2[ͺοΌωΦ»o^Ϊ·š›ίΏΏ©DΫυλ?zΝ‘§ΞΎς[ύΨM¦Φb9ƈvvȞuΩΏίΖb#ΘbΉά΅{wW—/\šsv„±\ίΨ8ppWӚ5φ]sόΘU›γ₯σoΏ{.–QHX=}ζ­'Ο=σΨ_ύΫ' μΪν]«*€J[έ:ρβ‰ —ή;σψ·O=Π€Ϊ»ΉgΟϊς»ο~λνΥζ‘;ξΎ~cŒŒ6i±\‰€QfŒHνςπ5·|ΰ7>τ«Ÿ}γ{ωαΟΉύρ‡~ϊς›»ξϊθ‡ξ½οϊ]ήm*’ΕšΥ•­χή9{εςΦΜστω흝­­+[WΨ…Δή;>xχυΧΩ­ΥK—.½ρΜK—WΧέsη±½‡Φ£i‘ŒΉ ƞΝέ{χξR²°ΰζΪΪΌrykk+(i5CΫyξδ―ώζƒϋΘ3{?τ»όcΈzϊκμ›Ο>ςΘΓ?{θΑwήΨΎκ–βΞ ’ I‘Ϊٝχήψε?ύέΧϊβ{>φιΟ|ξώkΧ“fμΪX_ŒΥφΦj΅CFΫΩΉ σςΞΦj•εbΉ\Άa’n]<ύλΗϊΦC½°Όφ_ϋ³ΟίΆm5ΞFX¬ο=ψΐŸύΛ[N½υκSπ{}oί>»ϊWυοά±7%AH1t&‘€ΘΠj΅"‹±Ύ{‘nomοlΟΩ΅HSƒΉuy{΅ΚΪrΉk­YŽυ΅ƒ»ζDw­νZ,[s{kkΥ=¦.”ΖφΞj{5ΗήυεΘ…“ώρožz{σΞϋΏόΐύ·έΣσο½ϊψχΏρνοΥ©­ρgŸ½οŽcΆeQ‘,7Φφμ?΄‡ν½ϊͺΕgN½ύvηaA“$£‰H’Ε%ΖΕΪϊڞ΅Εζώ£#ͺˍ΅]‹ν+/―HT3 %Tΐ2U4•*IΆ(ΘX,‹’ -hη•s―<φ½3ΏόΤόΡ~βφƒ‡φ„6mΫͺmK‚΄a₯šF5K D΅auϊΜ©Χ_xρ²±ξπύχίΆπQ3dΠdͺD€Αl‘$Υdvga˜Ϋ;—^yιδΞΞjσΨ±›ϋvKmmoyνΔ»s:rόψζή½C€sV%„BF΄$PΪ‘Lښi#{ξψ½υϋέΞ·φδgή~ϊ‡>υƒ‘$c,#Σ‚Ή±ΎοζΟ~ξΞƒ›λLJ*f $ vϋμΩ·ρ­o?wq›cωπέ7ή~ν.ƒv,Ϊ‘v’Ζjv9ΠQ:4¨6 ΅ͺ±ϋ/~αμΞί?ψθkο~ι‰―Ώψσ!Y,’±`%w~β£·?²©­D‘dŽŽ¦S†Ρ%ΐPιΌςφ«O?ςWι‘7v–w|ι«Ώ{Ο]7ξ]˜ibLΠΆm€cjͺ•±kρξ9φΎ{΅£$«4έuθψΎ\sλθνλτΏθυΛηžyδΙηnΏυξξίί’Z‚©vfJ€@ͺFQ$Αμ mg2FΖΚD%iI# e5;†Ρj'Ι(£ "Y˜«™ŒR%)ΥV†¦4BJζœc ƒDVΝSD‡AG¦ 4Ι,Dh©&6iW’ašJ ΄ ’†ˆ’$i*:K%ŠŒ€)dD†²ͺΡF@hΝHu U4Rf³ RI΄ΜŠ Ϊ]Υ€$IΜΩ‹™T32kΉh!$΄•±=»hHΣj§$‹hK*Q™νVm ΪhͺL23*UE2ŒͺΆ"Q‘a†2“TLF%JD1†T5£@Wi†1Sš$‘ζœc€cŒ©*Πθ”±H™MuΤ‘h₯‘sΞ’¨ΆQ€i› ΠP‘*Uh’0;I„€TΫ™$mjFP’Y΅Hg'Ι(! $’‘©‘L‰¦Ρ΄%€4™dvŽΔZ ΩiF*JRν‚‹jІŽd6@š2;GFΪΖl£Ι mƒh§$$ m“$I[CK! EF YPfa ƒ’‘ΠV›€΄ AR³ΝHZZ"mΥT ³ IF: ­fdΦb*IŒΞ•ŒvH₯!s$ @%H[$E΄4U*™dʈ@«jΞ‰‘EU[’θ 3A¦6Y˜Œ  s2 ©isΎ#Gχυωw^xۏgΉ”RIF";s΅©Λ—··Ά,χm¬―·Fζl2²Ά±±Ύk½[ΫΟm'Λ„H’$CΣ6P%P@[DΫBH‰]»Φ–ΛΕΞΞΞ•+΅ ¬ΆWΫWΖα«οωΚ?όρ­ ΛΠ†XΝ9@5ͺv.mmΟύχρOψ«ŸΎλΨζŠ$υφ£ί»rιΚryυΎM2Y)0U£i+ˆ* Π²~Ν­·>π‰ύψόλόυ[ο|δΡ=ώιίwΟmϋνœ@I»uξέ—ŸώΩΓ?~μ©^zύέ³—.\Ί΄΅sΥΗnΦ7ΦF††™νyξά•,χξί‹5-R :›ˆ„¦­,Φ’@ ₯2BqαΕ'ωӟύ*·όξŸwβCk‹©Μέ~Ν‘=ώύžyψ'Oύρ]\iD#UΥ&iTΤ ΫηΟΏψυσύάξίώ|εσΉιΰ’l:°wΉsαό… Ά:Χ ‘Œaητ™s[—7677G"1ΪΥ•7ωΗΏώΫοΎ΄yγ§~ΟπΓΗΦΚθœΨ™­Ή‹Œ1φΉvσΘ57άφώ£k›ύυ―ςΗί{ζ“wάΏ– m •YÌ-U‘‘*•€€Y;pθΐr\:w~ϋβε1ΦΆSFœzηΒ•+cssΟ榦šΞdΑΑ}{vo:³uξ½s3ι rώΒ…3η.΅#‡.Ίύ‹ο<ψθK;|ρΛ_ϋκ§nΫ›jΗ½Ύοθ₯ρ_πι§^|wήq`ί iی΄mI:vEem}Χϊڜ—wΖ i«…jΆCE€‚aTIΪΪΆΣL ˆ₯aT«tΡA§$IGούςρ˟Όο#˜mVW.ŸyλΥηž|τ'?τψ‹'ϊwζ…_Ύτ/ώΫίϋθ­7\„(Mͺ‘MνͺΥθH›RΑͺMˆŽ Ρ΄ΆΞ]zμΨΖξέ%!PE5M4‘DSm•BiWmd P*MFζΖuό£ζκόμρG~όγ'žyώδΩs[Ω³χπ΅·ά}׍=υsO>ωΚ₯ύ‡―ύΔ?ϋό΅‹}Λ9–Q†(Ϊ΄l:ωϊOΎΠ»Ωž»oϋΔΗn»αΨΑ…ȜS F†€$ZΙlWLQ@… chW’ λGούς?;zϋξΡχ~τΔ“/Όφξ…Λsmο‘c·ά~ϋυϋη»/<ϊπ³;λkΧ}εw?rέuGΦ#j1kΛ5°½³3ηͺD«$ C΄«nowηβ6Xί³X[.rωΝ§{θΑόΙΩυ±yηWμ³ΈφΠ¦ šΞ !Ρt6I€Ϊ˜‹ΜŽI©Ž±ΉvυGτ ί{ώ?ύκβ©7NΎzςΥW/έwΟn 13’†R³‘Π΄hΞ4M₯*Ɉ$€₯ iΫX€ ‘ E›ΜŽΖν0ΪIͺDR£šΡ4θœΜYD›hCΪV‡h„N- iR«UΗŒ΄;ΝXΝ™$H[νl#a’F¨$™­4‚fΆM‡&‘2U 2Ɯm–0m m4ZmΫJB»’ ₯Tf2t–’•fJZmΪV@£"ΡC‡Θ(T;€tEF"΄³Σ IΛVIΫd” Ί£"@™΄€¦’Ej1$C)FC-"Ρ€΄U Veˆ$R+e,D’І):'†$ƒΞVͺMg“DZFΪ¨‘θ¨Nͺ2 ©jg2’AY5‰Ω$BΠΤ,B΄4Ϊ΄νΘH$ˆFESmŒJ’€‹j₯³f„…ζΘhg 4$2f›Iˆ¨vκͺ# 0ZΣjLm›Aš2gcˆ*‹tΞΖ”Ά‰ΠΆm‹0U3B%1 ‰C£‚H€TW³ EfΝvt.‹9+IΫIΜi,@[šκ€’$-muN‘ͺ’4A•ŠDk6™³#Ν\$ͺ chۊ’"‘h‘D*i‡΄Iš Ν"‘™mΫΤ c!QŠΦΤ™±œι0P)!Υ1F;A’jΝhIΥ €ΤbDLΔH§‰H£m[%5K$Βs5'‹ Œ6TΔΪΥΧ^}ΛmΧ}ηαΎϋΟ|ςΏΊ{m±{ Τ̈Fχμ^[_wiλςΦεŒ5Œ΄qeλΚ•­+‹΅=»χ,t₯΄Z-I$…Θ ‚˜Ri‚ΔˆΡE΅\ΊxyλΚφΪUλ»7΅…ΕΖϊϊž΅nŸyηδΕqσή&ŠI Ί*ιXΫ·oΟςι³η.œΫκ±uEb,ΪΩΖbΧX/W.]^uΆM3H@­i†ˆFΚN[„QlΊϊφίωόίωΧσWol;δKwίrη‘Ήv"ŠK/?ώΏών§_ίχoξ_~ν¦c‡χ½χ³χ/ώαy‰4‰†.—cχξœ?{n{{«]Β ΥU;Ϊ€Β謘U m1ہ:wξ½³ονμήsτΖΦ΄³"rθΰ£Χ\5~uιόΩ+ΊhPˆ©NI§¦£‘—ί~λ©oώΫ_>½ηwώθOΏς‰{―?Έš‘]7έ~ΓήGώΦΙSgN―ΗΧθμΏ>yφ\ί{τψρΝ!ν\ν¬ήψξ_ώΫτΠWίυΉ/ώξ?ΆfN 2†νΣ―=σάλο-7oϋΰ=W/ζ\ΕbκΪζζήΝυΕκΒωχ.Ι©&Aͺ‘Ϊ0˜€ c‘Œ¨ayύ·ψΞco8}κΤ*ϋΧH£§^~εδΉn^δΨΡύ!a$:ηζ‘c‡νyωωwήxρ¬/,Œ6Όϋφ;o½³³ηΰu·ίgNΎ}ngyέ΅Wν?°·M#»oΈικ=½}ρ• —uS©L$‘ˆ.&SΟ_ΌrώΜΪ}{+ƒ΄D"€$f ΙΜQ’6FtT+I ι–mΝΐ 霣νP") ΄U0ΦvoξΏκΘαCm„‡_uμ†n½εΞ[όΏΓcοΌϋΒχξ[ΧΩ}ྏJi*M©‰¦"IH˜Z( I@*•\ΉΌ}ρΒΕ)Λ±ϋΠ‘έ‹Ε2!hΫ6#ŒB‘΄JC2ΪζΚs=ψο=τμΫ]νl]xχd/­v^ώυΏψa=μl]:ϋΦ{έξ•ηόŸ—ξή³ΆΨ}έέwς+ϊΡΓ#‘΄P΄$‰‘d$IuM4Rs6cΉΎηπuw}τΐuοΏοσ—.]ΩYΙb±Ψ•SΏψζƒΏ>ρΦΕ]Gήόρ?όέw!­jT«)R‚ΜΣo½φ̏τά….zΥoόΦ}Χίpd=-0UdΆA(B£4# $‘Π6B«fM’]ϋ½ύŽ]σΑΟ}υβ•νNΛ]στ3?όνG^»ΈkΧζMŸϊ“Οίú½ T,—csίΜΛ/ΆΆƒ¦‘AΤθŽν —.‚έ{–‹}β‡ίύΦSοeΟ5όƒ?άGΪ³HՈ‘³c ΪΆ(ζ£J’4$cμΪsόžχϋ_Ώqϊς™ΣgΜ~odO[*ͺŠ"­Άm…RŒ1Ϋ1+ €hg$’”‘€”ƒVHC“Q@΅!•ΒTZŒŒ¦eΚPBΫV’Δ M“H:S$$‘cHh’Υ6!iΡ’hH ‘3$Ρ6‘ Œ ZͺL‰hΪ’1RTΑœ‰*’AΫ1† b‚Π!5[’TJ%ι@ΪΩΞd$$ͺZ‚NΕ(‘6’V5E ΥB A;C£Ϊ„ ‰!΄ ‚Άͺm‚¨&iKTD-%ˆR΄E(MS#£"Π€‚H'šQΡκl‰ˆ4Jt€ͺˆ4†ΩŽ$‰ͺj1“!mahΣXT#ΚHΪR@#IZΥ΄šΕh›V5*UΙHFΣ(BΛ$€ £€$‘Ι¨Ph1P‘ZEc-ΪΆ’$HK"†‘mΪJˆ BK’d΄%HbΡ6’A΅%Ρ (ΡP‰!šΆΖ0€‰…κlG„6MΜvPՌD΄­ %Ai›’JLΚ‚9€ZΐH&i‘ΖOœ6ϋ}ζaΎξη6ΰ`%V @μ$H‚;HJβ"KŽc7UNœL“N^tiϋ6™~…~vΖI³Τž&NmYV$Q–DJ)ξHqΈAμ8ηόξ^W%jκ”Œ4Υ M'Ε(R’h΅’šTS§H  ’ ‚fD†‘ ΄m’h!HͺIΪB!UI15Ϊ€(΄ˆ™4Ι0΄jtˆHKeT΄:‰Ω†}·ή~Ο7οyω|α'φίέϊΏχθι}+›fPΦ_|ν‡?τΐι»wνά΅cΟο?χΖΧ=Έyͺθtγ£χ>;ΩՍ{NάΎo AA΄m‹ 0bΔ@¦©k«mk «ŸΏχώǟݘmέuλώΎž˜mΩqΛΎ}›Φž?χμ―>}θΫf‘f}}mZ_ΝΒ¬₯ ₯B3πΨα-Ο½ωήΫ½ςδ‘c›¨–΅ΧΜΟΖάς–M[·oλG>zχ’][“!ΦVoήX½v‰©H‚‘A)IΆμΩswΎσ£ΧώκγΫο?{ζΤ‘]ΛsΝΠ@’qγ£7ήzσύO—nψΏχν›7nXX˜›m^œŸ'H€dγ†εGΝύδέ_όΡύ{Άν^P’Y0U‘€ΆjdˆΆ‹––6δΖ—Ώδ“λΣΆΕYŠK—―~υΕ•ΜoΩΆgAΒΔ΄M22Qhzυόϋ/ύδ/ώόGo,>ϊ½ϊ'ŸΪ»}iŒ4vfσ=wψ«·^~νΝ·OΌ}|χ©ΝdβΚλ?{ξ·_,ν>{ΫΡΫΆ€¬^Ήyώ™ύ?Ο|Ίλτί}κ[§o]™ŸiΪiˆθŏί|φGΏϊjœιξ|ίφΙΠ/?ώτγ/­.μήΆk£°~νκΥ.,.Μ/ŒV:ƘΪHS“΄v6[<ρθ©]―όόάΛoΏsΫιΗΆΞ°–kοόβ™·?]ίχ‘[μ±ήΥ›—―Οmά8Kf»=|ΫΫoΌωφ―ž}ο‘?:΄4¨΅ ―ΎυφΉs7oΩuί™S+£Χ·¬,Ž΅ίqρΚ•«½eγ0€φροo¬/lή²qΣr”$‘m«­NΦΧW•DGΎώψwŸ||alΊuί‘IjύϊΥ«[\œŸŸ•Ά ͺΙI@ΫijΪ)&@ΡΆΧ―\ιΒβΒΒŒ@;E2g‚j*‘ "€ €M΄Υ€mΖάβ†Ν‹ϋ–O/ΈςΑ―_ωΟοάΌψή o½χΰΙ#ΆoI¨©6ΝΊD m[(#ˆPͺ¦΅imuΘΒΒ\FΡB†IJΓD£i€₯RΚ E4Φ~υΙϋοΌώΚkzνΒϋο_¬_ώδΛŸ ΛW6n:y3R’ νΤ*DˆΆ€E‘ic’Ρ&c~yσΆεΝ[T5ωϊϊ“·ήών_uσm'οκΫμί8Q*@¨ΆF+In|ώΑ{―ύζW]Οάς­=qϊΦ[FΡ’€S„T•HΑΊΆPB‡€F2hPζ—6ί²gσ-’YΊςΫ§ίψν―ΌχΥκξύΡίΏwηςάh[bΜ/¬lί>磡^ϋϊλ―oάΈΩ.–$04bΊ±zνχ_\њmΩΎyαϊ…s―ΏϊκΉO―ZόύΗ/όηχΡόά,!θzΎxσΓ―O½πφ/ώΛύρ―·ΞΖό†­·η|κp–η!ddκ:!!Ρ6Ζό-;WζζζΉqσζΝ7Th„ͺΆ’f’D'DΣI$$J‰NE’j’ͺv¨Μh›$ͺA΄IΥ’€Πͺͺ!‰H΅ΡV) D$ˆV›ABͺ%:’Ά¨$ZI*’Άm€ MΫ R€Mˆ„A›Vb€6iETR‘΄MBZB iM΄"BUC…–2ͺ™dL’‘ˆh₯BΛ„$(Dˆ–V@)Ρv"HK‹’V%©*©$B"m« ΡF› •D€v2΅$ƒT%AΫ©΄IΪΆm„!i Ι4Ι,P ͺš¨4#•‘΄Ϊ$ΪAی2bR•Π$C[JC«iRF(’šPΥFΫ‰5…FΣ©‘€Dθ$U Ϊ2Α”6 ΥHP$Ϊ$Ϊ‚$Δ€*͈HU΄”j$II!„ΖΤ€’JJ΅ F‚Ά•$m₯’ͺT«BͺMΤ€C#€h4Π¦m£ ’Ν@΄ ’Um1$:΅’’*ZSΥ$c5"!Κ@0΅ )%*ΠH#…*IKUKΪT’’m:$ ΄mB΄TBBt"Š„Φd*d$‘’@;Uš’ͺΆ!B«2:1"TŠh“Ρ’¦(M΄M"1[ήzΰΞΎσ‡_ό˟ώκ/ΓΧη^9vθΐξ­+ ]»ϊΥgΏϋνΉΧ^ϋtΫw·άΆϋψΡ“'πώΛΏϊώb{<Άweξϊ—οΎφ‹_όϊόό-Gο>{|…iHŠˆ( (m«½|αΓןωΑΆ?tϋξειβο^ϋՏ_zϋΪΞwŸΊgߜHI26μ9|θτ]wΎρτΛωgΎφ3‡w―Œ«_ž?wξ½/―,έώΤχ90K+Ksσ7>{ύ7ύ›l]½>wΫ}'·žώΦύGχτ»?ϋΙΦ§―O½eΙΥΟή{σ₯Χ>ΩzΧγί}lΧξ=‡OμyώνWψ—?ΫώΨ±›¦‹Ÿ½ύ⯞{ύƒλ₯ZI‰TTΜoΨΈηξοόrοΕ[>r`σΒ 1K’dZ[»yσΪͺΞmΨΊ²8»ϊεΉ_=ύ›w?ϋμϊΑ=”e~yeΟ=?rθίόςϋ³xωώ»nΫ°φε§όφί­ϊ£οΩ:(­D"ˆΝϋo?|μΰŽž{ζύσm—ξ=²cγόtιΒ/=χσί{Ž}λ•_ώό…/6ν>}Οƒfn~ψ“σΧ/]Z<ωψΓχέy|ηB"0iΫiνΖΝkWά\λtιςΥi}νΪ•‹_~ώι§YX\ΪΈyγΒΘΚ‰ΗΎqχ?|ηΕU}vΧΑΝ‹7>ϋνKOπ­΅νχœ½ϋδα]ΦΎώψάΛυ―~ϊΡΚ™ού“³·­lέuη©»ήοό3/ύνψ·ΣcwίΆuαϊ§Ώ}ιηΟΎϊυΏωΘ±­σ‹wή{~ωΪs?]«gNξίΎ©Χ.~φφσ?|ώƒιΐ™nί»ΤΆ:’J#!«_|ρώ ϋύ=ΣΙ}›sω‹χ~υ³gί8ΏΈθέw[6­_θ'φΧ/]Z<ωΤΩϋN˜+@k΄Z@Z…VͺΣ΄~νßώ›ΏzωκΖ;Ÿ:{ί©γ·Μ'΄D+™ƒ ’”€i1‚BRΥV#Β„σ++;n?sxε―ήϋΟ>ΏπυW—&[fΠ’ I$ m H€UΪ¦T32fƒκϊϊz[ΚH΄mI[Π ͺσ»=πğl:φε₯/Ώ:ςžϋx}Γα{9upηΚ"Σ₯Ο?νo~ςΖE[N>ςπ‘½[7Μg,l;xΰΘ¦Θ€) ΐ”(€IΣFH‘ ))iυς»?ύρŸ}ι½/nn=qΗ™GžΈΆ­s--„"Τϊ₯Oί}σΥίϊduqΆνψΩoœά»ui>TŠ©΄•F E€@EΫc€Ρ& ΥRIθτΥΫ/>σ³_ΌπΞοnnΎεψ™?ψξΓΗ·-Μ†VTΕΕε½{wδ₯ ]»πΙ§__Ί2eqΦ¦&‘ικ嫟}rώ†²²gߖ即՛«7o¬›άΈψιΛ?ωV?{χεΟή}™Ω–ύ7ξύηί8hy CŠNSΑlΜΖ΄ ₯Μ”…ˆ„Θ”6P…„¨‘*A’€4‚I‘T’PI¨h“H!0‘E’…P„I”€D‘*F[P"ͺPΥκHPZIΡ MΠB’hEցJ(U"m€ͺ ” ͺ %QP" %"‘PT@ A(d„ΠΆI B@ eH΄JΛ$3IT˜”$”T΄H1‘©Bh€Π–„Nν@ ΠM΄€Ά‘(J!!”­(Z"RθΔΠH‚J‰v‚„¨V„j%‚D+Edj%ΠJJD*4R¨I’%RA+ŠBT‘PͺH–„$ͺ I4IhL50‘jmABKR-J(E IhͺΪ6I(@€Vͺ „DA‰€V# P- ‘ `P€D+‘„@[ !Z%ZZ&†Πm“ hA€QRTh !L5C%σwξ=ύψ­Ν-ς₯7>xύ₯ΟΞ½Ί΄°0—1M«kΣΨtβτ郻Ά,oYΉγΜcΧφΉίψ»|φκΖ…ΩΪυ‹/Οo9ρ轏ή{rΗΜ΄.‘(PH€(ΠD+Κ°vυΚ§/?σƒΧ¦υk—ΎϊβΛ«[œ9ϋθC'φoδͺwίzϊΡoώZŸ}η7?½ςΡKΛK37o\»9ΫΆηδΒΊrϋργ‡ή{ξ£wŸύΑυχnή~dӝ§Άο<}φ©oίόΩ ―ψ܏Ύ|η₯σ£k—/_Ι-;277ΖςΆƒ'ούΖοΧמ›τΩΛ[·./̍Ήυ₯-ϋvά8OE !“‚ A;ζζ·Ό©ΣΒΒΜl΄ΠΉGϋν»ΟόζγΏkσόlmuZή²²uma@ͺ"!QsΛ[φέϋχώ›ο¬ύμΝ7^όρωw7oZλλ7;->>cDP€jZAXΨqψδΑεŸ?σΞ‹?ώο^Ω0›uυΪεKΧ²ηΨ£<φπ‘m/όΖsΟΌhοΒύwΩ1'm“ωκύW_|ζι_Όςα₯ωm[ΞΏςσσ/ 0ΏcΗα·yrΏΩφc~λ©›γ—oΌσOΎnyqnύΪΕ―Φv?pφ±Η8ΆgΫ‚«Ÿ~zξΩΏώώ3oή­>χ•gΞΏ4ΘΌ•{ΡΏxκŽ}[φœxπμ₯λ—φϊΉgώφΛ77Ξg}νϊΧW¦•=wί{φ±{nίΜzΏώθ•ŸΏ°a鎣w4(Q₯:‘›_~ψΞK?ϊ­KνΝσο_ωϊλήxφ‡Χ?|~ΗΓ·?π'Ž,vl9τΐ·Ώ}}ώ/τΪίύΝϋηη§ΏΏxyεξ'ύΦ}wξίΊdυς• η_ϋεί½Ύ}λγέ™ύ,οάΗΩo^½±φμΫ―<ύύΆΜΟΦn\Ύεƒ<όθ#χZ˜ν<ωΔ=ιΩ—>8χόΟΏ΅iΓBΧo^ϊχWn½λ}γžC·ŒjQBH¦ΥΛίyφGοvύζΥΛ_\ψ:ϋ>όθ·ξ=ΈufZ›.}τςožrγιc§NtEBPZ@ˆ$D_ϊπ₯ίόϊβζMw?5QH0D""2™:‘Jƒ D₯5¨’#σΛΛs΅7VWoN:+F@[Σ€ 5j6" U€ƒI!  U­J*!1Ώ8Ώ΄ach―^ΎxcZ›‘¦"DT«‘ {>ΈχθύΣΝΟή<χ“ώφΉOΖφ»ΎυχΫGNνΩTW?xα…ςφOήΊ2·ομwώΑ“χάΆu)³Œ1₯5 !L%„€‰΄E³‚j€Q]Ώ±zρά³ίώ/ήψμ‹υm‡ξxΰ‘‡ΟœΪžvŠ™h‰ˆJJΥ/ί}ηΝΧ^zχ«,νΌεΤ·;ΆyσP)"dhI$¨i*Ν Ϊ ¦©Š4"IuυΛχŸΫ§ŸyεΝOΦ6ήzη]>ώԝΫζ“"„ΚςβήCΗ6»xΙΧΎΙ_~=mί>˜jLΝ(‘vύϊοΏόμ·ο_hš {o?°²iηΓwάσΘΕο_ŸJ ΄0υβϋ―πϋ+7Άξέ»wοŽM³ΩβΖέwά2fB%b0IQ‘ΣϊΪϊ…O/ή”εεεεΛ¨jB„ˆBC)AD$LSi“P!…h6 ‰PU–jT‘@’6-dΤ$ΆI£„¦ Eu*š’miA‚€-D™’1S’I΅­(ΪFV­ ­VRR BQ !!EΠHKD :T’€ Dю¨$„Φ€ebDD%I)Ii h ‘¦CMFΆ ΄* ͺm#ˆθ€DIšv’€’QI4BˆΠΆ₯@Ij€ E!$  θ€U$΄D†@55† TD‰΄T’¦vDD‘Ά¨’T„Υ4Q*ŒhΫΆ „J(mGB…$&’%*M΅mA•©Z#5B"‘mFU€E€i”Pm[΄I$M‘-ͺŠ$Ϊ0iΪ$%D’j5 EΫ„H΅m’TZtH#(₯1ΒdŠ„HIA0£•D D52…ŠFš$5jŠΆ%1DI’‚(AšŠD ‘‘#R%Υj›TB Υ&Ϊ)‚*AšΖ΄5TT‚@ˆ -P”©†6e@ΣVΡ’…jΜζ7ν9τθ?Έεΰ‘W_}σݏ>ΏpιϊΝΞζ7lήΊkαgξ9Ύceanfεΐ~wΫΞ]ΟΏόΖG_\ΊΉ6·i‰S'O>qβЎM™ŒaΛ‘{Ύzγΰ"t6·ΈγΘΓO|sγα]›CknyΫ-Gzβ[γΘΦ…ΕYV‘ [wξ?qΧ‘ Όσ»―ΦΆ9sίι{Μ<ΊΫB:…ω[ο;ϋΨΝύϋV6ΝW7ν:~ζ»+›wόκ…·Ο_ΈtmmlάΌΠΑc'ξ8ylοά ]:xΧ£―.½ϊϊω+λΛ[vνΫ΅¬#+οκ6οήϋκ›oŸβςΝ΅iΓφƒG8uχ]Ηφn]uˁ#ώύο-lξ­ί}yγκυ₯-·ΈλΆ»άυΡΉWΧφ-§£QF˜2ζΆδμόήC·¬, Θlqi& EfK›o=σψ7―ή½iq~ϋ‘S]οό‹/ΏΕΗη―,lέsδ³χZΩσqΖήm caΛώ{žόΖ₯ΆnXHD΅³Εω§žψσ[ž{αυχ>ωόΚκϊάΦ[ά~ττ™»ΆΉlΏντ^ή}ϋξe•BΗφΓgΊ9Ίmη‘S„hηΆμ:ϊΐ›wήςΒ‹oΌιWΧn¬Εmo;}δŽ;N?Ίk#[n=yxΧω/–FΓ€¨&iΦΧ6ν:|χ£·…HΫFZtΆae1"φέρΘ¬μΨχΛoφΣKW,lέϊΔ=ž9Ίϋ–•Lζ²ΈγΤ£Ož*AA 3‹³Œ΄cqΗΡ“oΪΈuί ―½{ώΛ―o¬/μΊνδm'οωԞ§w-nœ+Yή}μŽonΪrΛo~ύΚ{Ώ»xušνΌυΤΡSwέyςφ=[ζۊn9ϊΝ'—nΩχϊkoΌχρη—n¬Y\ή}κΨΩ;ο?s|ίφ ‹£šHm“ ,lΩΌνφ3wnό΅.¬·ήuμΔ=§ο8rlΟ’4c~ΗρGΉqyιψ­»Vf]ΨΌyιGΎΉmηΎ₯₯ωY’ŽΕΝ{œ~¨;wνž“(w:zχΝΉqxί†Yζ–Άxτ‘Υ«OξΫ΅2ͺ‘J…bRT4Q“THT«- TͺA ¬―_»πε•©•ΉΕ…………!’ΜΑΝλΧΧVΧ”f$:Ε΄ήišŠjΫM₯΄S3ˈυš € XΪΈ΄yΛφEοޘn|φρ7WΤr HT£ΣΤ©Ϊj‚0’’5hΖX[_ϋύ'Ÿ―Ε†έϋvmάΈyn~Φ΅›7―_ψτˌ±²oŽ6ΜΝ IDθ,i E!H  mud’PT¦v θΪΪεσός/ώόιχ>ΉdΣ‘ϋxθώ‡NnŸ΅U •ΥIλ_Όυς―Ÿ{οΪΒςΎ}gž<{xΓάbT@«Ι-@j JBΣ$JP4¨†` jΊωυΟώΰož}υ½/V—Όλ‘§Ώkϋ€$”Ps›Ά»χΨς+/_»ώΙο|pϋ§oΫΊm>€`ύς…O>|υnd– ‡ο8Άkλʎ[ΎωΤmχ=tιΪZ5†DΫ€ΒϊΝυΧώμψ³ηΞ}ΎνΘƒίύΓ?:{|iŒΉ ΫvΜζB Œ&QI˜R΄¦΅›—χϊΉΟήΈiyŽν;v$iUΠ†v$ͺP Q…†­TJD§‚ ‰ Φ… θ‘˜Zš”V‰ΆF( ’š©š ŠΙHU§IYŸ2’ˆA E‚΄D•(@F:΅SB«D BA‚‘ΆRAšΧˆT)A„Žh!ZL@@ν‘PΚT!@I5‚ͺ$νdŠ!U 1MI $©j%Q€D§NΙJ4Z4’"­(ΥDD4Ρ*‚hZ*AD B#mKV@$ %QU@#CK”ͺEΫf$T΄t’4 ’(£I£D‘vjG‚j+2bRΠ†v$š*Hh‹)Τ(‘tB+Rj’Dλ…ˆ *‰J*A'%Š$A*Ši$ iH„j©ˆtm2F‰QB•-mA(”€‘Ά)‰Π¦D‹Ee QhJR‘IΧΫY„R"­$Q‘QIQƒ‚m’DE$i΅I !‰€R•ΠΆ ‰˜¦$ ! m‰(@’“)I’%΄₯D‘ŒV*’$P‚)"$’€EC %I@‰Ά(ZˆΠ4 Q ©B#Ci I7Όχ‘ƒg%•Ά΄EΔΒ–m‡xςπOD&IP΄šŽωxςOεΣ4% K›nβϊΧOΞF§V°°νΰ±o‹ύν ι—‰daeΟmχύρίιι” ‰ΦdX\Ωψΰ?ϋ_BΫ’ωΕ-·έρδΑ;―$‰΄©†€ΩΦcΊλΔ#Oj"#2΅A+ ›vΎη»‡Ξό‘©@«ΉχμYΡ) €jΪ±΄αφ?ό_η΅ΙH•%ŠbΆΌu}δ_έ+ΊtθΜc‡Ξ<ڌˆLšή~’EΕζύχ“ύ~h !™'ψΓ %©δπ#πΆ‡;EB΅bωζŸωVD«CT‚VŠΉε•}§Ξξ;υH h΄$ΕΨ~δžΣw|υυβώ½{–˜jD°σΞΗήιΗ0ΡΞeΆή©t*Γ,iI—vοΏ{χ»Ÿ,4‘ mwίώΐ―ˆ6I₯ M4ΆΩ–[>ΈχΘƒΝŒ)IŠ6₯cnΟΩ?ύggˆ©iˆ$Λ{Oέϋ'§ξύ“‰Ρ¨„ I ]Ψqψδc‡O>ΦUI*-ζ6ξΩΌoΧςΒܘ¦ζκΥk_|ςΩ•‘»χοΩ°4?"`"S'³šHL…ΐ4m D2’9„©΄’–κP“’!Ίϊϋ ούκ?ώΩΣ^²ΎtδΜίzπώ·,€ΔlDΫvκ€ Ρ4LΧ~ϋΚΛ―Ώ~ξΒ΄΄oΛώϋŸ|pλόB§!!‘dhΫ¦tͺ6#IFΣ‘ΪEͺZEJ“`ͺ0˜VW―ΕψχΏ8χω…Υ₯ύχήΐ7½w[Z­$–6,νπ±»ώσ;ΏΉτΥ―ΏτΪΡΓGŸέΏ<ːV2¦imυΒΫΏ}γεgΟέΘάΖεΫΎΰβφ₯2·΄qΛΖΝ’Ά³ŒfjC΄#νκ|ΊΌa6†ΉΕM[Άοέ»w‰΄:M©šVWo­­™-,ΜΟΟ i€Ίzυϊ—―ΰG―~yω† §άwhίBΥhθz$"h¦‘If™‚ΒP¦i}ŒΩάΠReΥv#SURu?Axφ=^ΨχyΟλσω1ƒΐμy€mΦ†0’,!Ω‘μ4±][Yœδδτ²O.’‹œΣή€½h›€'‰›ΣΤIΫΔΕΪlΙFFFZ€‘ΨΡ 0 3ίΟ»ΟΣ ±™DΞΞv‰ΩpN±IΧ‚m8w:٘eν,KUΦLξφpΞ½7h6Μ9έ»4dκ°΄ Ε©f0kκli3Π½9ΣΜqZr†“ΦΦ*—ξ3²c3ά#7¦d3{•a3fιh9Ήl;:ηάΉέ0—`Yːμ”Ξΐ±ΗϋνΟόζgφΟyο_ωΐΫ²Μu9GN—±»σΠ]³:‡²νξq7+ΙZlΣΓ–Μr΅»N+jΆΉ»ΠQю™aA1›…‹ χz8*§œ»–qiΨ؜’@mŠšlElκ<ή[ΩLΣΩ6‚M…y¨Ϋ#›#f“ΗT‘m»7.ŒaΜ.χxΨXe­Ν\η΄bbάvφ˜m4VΣιq˜ΘέΆσπΤi³«θp·k§sp4­«ΞφhSNγξή;D³%hΑ^ώΖη>ρ›πΏϋΥ―ψξυΤχ}θGήξηήΜ<υΜ3o{ώςS=νΥΟκŸΣ?ϋφ˜Sμxε Ώρί£ώξοόΙkQU-χf+€Ξl­ήόŽwΌοGμ=φΊW>ωΟ~γμK/slξuNίωδ~νΣΏΕW rzhχL.w›r°mΫ΅ΩK/Ώόυ/~ωυ[Ήί~ι;ίόςΧOη{ήσž·?yxΊ;ξ\\b:œ;nu&€ξfά±έ6κΔρκ7žΤ―ώwνo|έwοΣίϋ‘_ωϊ‘ΏςΦs¦…Ω†κœγœΗ˜‡xαΏυ‰?xώs/=Όιέοϋ‘_ψ₯Ÿx9υχ €i1.PΩ6Λ­Ϋ=νΜλ/~ύOώηΓύwΎφ•ο<>yοΟμG>ςK|Kg·iΆ“;wΆ'ΟΎλ―ΚGžϋž·=ρΒ§~ύ7?ϊΡίg_`]λ[ŸϋψGΙϋωψ—_κΙ3οόω_ωεχΏιMo¨ΤβμΦ=c6»ΠΦ}΄»\'ι»φρίψ‡Ητ?ϋ?ύ7ΏφGίzmw»Ήlzύ₯Ÿέόώ/?Ÿξ·Ύλm?ω³?ώΓ?π7lΆ£S™Σ'0φ¨Ά»M­0{Ό‡6³΅m-@­Ϋμ’Λ#v:΅vQΉ™S,άλގ'ytηNιqc–;Vτl§κΨlεtBgD–χͺN5.†rzˆ¦mξ6εΠfΫέ½ΉΗΥ9η°νz̍{έrU©:wΫΝYg’Ω£MΠΆΝΦξ†μΆ%N:v:³έikΆ³΅ΐrN=<Ό²±,kܜͺ±Mέ{ο½ΐ,Σb\]Ν¦#–ΥcfΣmξξl6wξ½ημ6ΝfδqΨ―«aΊ‹M4pœtIw :«Ξnέ³uu‡qοE 6[έG»ΛU’eQv6&Λ¦%ΥΈq–»{νξθ”±ν:QMλΨ#0¨evwgΘlvΆ--ζ‘5›±u¦žcG±ΩΜp:Ξff»eη!!*΅]νtBΆ3mηΐ&Jγq\”*ΪmΈΫ”ƒΩl»Ω±¨ͺs Ξ)lηΖ½Λ₯΄ΣSΫv‘Φ!<Ίc0m¬έ Ω2‹‡tΆΣΑξΨΙνΞΞΦΆY9§‡‡‡{Œ8·–5.μ'Ϊ¦ξv·mrIΐ4†Λ£ΖH±¬nΖl'dΆΝ½ΜΦ㽝­­iΆ“Χ‡Ήwc2ηήΨ³'TlΫ€Ξͺc΅Ά.wwΐ…»ΊΧs€nmΥ¦%]ηάέkW+ΰ.jWŽΠ΄cφhsΞ’έm†3vνl[ ΡbŽiΝ¦‡)cΓ©Zχφΐ²€Δa―ΏŽ’΅λΪ8g3‹–{kλ(§κlW«NΒ9γlm'ΨL§ŽΌΞΖJuΤξM‰°m»›eY$υπPPΩvΧλ‡{=j2NgΪΐYgŠkƒ˜ξF{ζΉ_ώOώφ_Πίύ΄]uG˜ΩNέsgYcΩ&ηtΞY]β„έ¬fZtO=2ΐέξέ©kAΐ΄Έ<2Κv‰eYέΨξCr΅νξq\[χŽΥΦ4Ϋɝ™Ν½¦qξΝ†‰'UEζn9₯:΄e]ξΫf wugΧξ˜s€Vƒΐ잢Z"tΉΗ9wξξΨͺ#[d•(£iΑͺΆG›rΫ½w8pρT€Λ6“κά0SM;9ά—Ύρ§ŸωΤΏxγγ›Ψlλρρ»/½π₯?ϋόύαgπσφΥ—wφ†χώΒίώΕύώwΏ±ήπζ'ίΣΏπώελŸωΕ/όζGŸ―|ηΛ?ϋ3?ςžΏττkίώκη>υ;Ώυ±O>—^νΩ7x Ά‡»΄¦ ΖζήzάR`ΦΟΎσΉόΜ_«Ώώίώή·Ώψ›Ν?μkίψ7Ϊ‡>πΞΏt^ωΦWΎπϋυόO~χω^yυ©§=2›»ν°maMΊχ1tάoΏςWΎόΒƒ‡w<χžοyζ™Sλε_}αλ_}υισΜχΎοΉ§ίπμqΜL³3­;'άΧ^}νεW^}]γ‘Χ_όΞΛ―=^ξγk/ΏςΒΧ_ψ‹§ΨΓΣΟ<στ“'Fn§XμΫΟβΏϊϊGϊ[―{xκ½λού[?ρ#ο}ΣΓ¦jγ^§,§‡τΪσςcŸώόŸ~συ§ίύ½οΡ_ό™w>΅ξ₯lw“ͺ9I UχήW^ωΦ+―!Ή/Όός+cΧw_zω/Ύυβ3O? O½αι'o|ϊΑ©NjΫΛ_ϊςgώ§ςόΜΧΎσϊγ“χδ—~αηϊ‡ίώΤC9λήϋμ°Β9O{ӏύθ—όϊ«ϋά ϊ/ηαkτٟϊΠOώΠ;ίϊΤ}ρ«όΩO~ςSŸωΒΏρϊSoyξύΏψŸό‡?σφ·<σΐT‘νΦιρΜ§Έs7²¬Ξ­³υψ΅Ο~μ·λ£τwΏΦοα'ε?ώΐώψΏϋήρ=Oο΅o}ν+ΟΑg?ύι?όΣ―Ώτϊž<χKφ/Μ‡πO;Ε²ˆœp[ξv•3΄κΘ 9M%meQΉ[{€’Ω&Νε°!h]ˆ³ 6»[YΗf::m‡ΈΆΩ†S­ 2Ϊ΅νœ[Ή7jΨμ@-ΓmJηͺ-l£΄q†άk«ΤJΉ &ι”m«κ Ζ<4VΤμœYF²ξ̎i›Α΄Šd₯3{ ΫΐέΖΡ9»l(±bNGΥΖ–m›³m­N#elœrΈ€¨!ηPΫ€Y'ξϊΕ—@έWΏϊΕηΏςyόζσόΡΛώι76πύς/άίψΠso¨Ιhjρ™OΞ――τΎφΚΓΓίϋ7ώ½ν‡π=o{ΓΓ Œ–γLsχŸϊυύαŸΩ·^γϋ>π?ραŸ|ΫCi“I f˜ΩΘzυ›Ÿγφ?όΧΏύ {ε/Ύόg_z‰ϋνWδWoΕgž=…‡w~ΰΓ?χoώύόΰSμQNJ½φ_ώ㏯γ―ώλ―ΎςΪ}Γs?οόό‡ό‡ίώ†Γ6«Ϊ.­€ΞYίσΞώ½_ωwώΏΏρ;πε?£υβΧώδ³ΏσΜΞΎϋς‹ρΝΎύ}κν?τc?·Α―όμϋίόΤS›MΒ²)Qe` ²έuήτžώρϋΙ?ω“ίϊύΟ}ω /~γk_ύβgίτ=Ο<}vΏϋΚKί~ρΕΏύͺgίώW~ρίω{η—>τώούž‡Blc1aΫ.M5! [mδ0 ’₯6l[И٠j›­ͺ³-Άf₯"w‚FCk[ΆM™%N…kB!m…m΄¦™J0Ω&<Ψe—“$Ζ6’³eK†efΥ­c †„ŠΥεXμΤfΣΫ6UΫPm ¬³Ι$A,ι؍•Ψh–΅M6“˜1ŠΑlIq 3%3ΨŒp·4i†ΫcΩU°νΜ¦b0˜ΨΦζΘ¦“`‹‘@@V #k΄ €mK“±-†Ω6Js «Δ6Z‹­el€ΫξΦT³QΘΤGd³LΩ° 7gΜfYZΨvΊulΐnκviΨbV!1ێ)³QΩ¨˜5Κ¦ΚΒ)Ά‘,άΞƒΒΨ–¨Γœ³k%…Γξη*[²wS* š5IŒ…3XƒΝŠŠΩhͺΥΖV*K³PθnY…™γΨΨf¨Ν@4cm`˜φ 3IζRu·X T΅έ"Ρ±šk Ν`f0ζZjs 2tF°’ifΛ4Œ”1«b]ΨΫ6΅8d«Δ6šΕ–IΤΜ@ΖΆko|Ϋ»ήψφμšPΓf‰2·"1ΩΦpΣΔ#%.NΑ`7uΛ& ["Lfq46*…Œ[Gλ„mΩ΅N$06κ0΅0ΓIr° U«ŒPŠmJ(3K“ln͞šlF[vοV P™»žzκk2ZE€Ί°fΰΈs˜ΜLj³aΆe’bΆ1ΝΕ)άUŒͺmUmΚμκ˜Ψ†@ Φ0d»$¨ Σ& HaJΖ`νΐLΑ(!،ΓH³m[΅ηΩ·ΌϋΩ·H±A™­9G—¨aZhΆMZuΗΗ°‚ΚΆ[ͺΆ5ЧξŠ8`vοŒ%2^ι›_yι›_§ž<σμ›ήϊξϊ‘ΏόΎό+?ϊc?υ3υήρμ“§Ξ¦θ<<ύ–η~φοώ{ίzϊ]ς“Ÿώܟε+Ÿϋƒ??ΟΌω/½σ½?ψσσ§ΪΟ~ψm_ψέςՏ޷ΑΤŒrށ`άΞ {ζoΐOύ­χπŽίώWΏΩηΏϊ₯?όσηΟSΟΎεmίϋΎ|πο|ψη~ζ§ίυΗηkφΉ?z G’Œ Œ¦L{ύ;ίωΦWΏώ‡7Ύω>ψƒo}ςΖγ‘ΗΩ“'η]Ο=χΜyΓa0²&&Γλ/}ύ‹Ÿϋ½ίώ­Οxω…ΏψΒούσ/ΌοΝ?ψ“?ρΊž°ΩΦζ΅―ύΡΗΩ?ύ͏ώ—_{κ-ίχώŸϋϋη#xξΝOž$ɝ6ΆVΡ@{}―}ωΏώ±Ο}ι…WίψΎϋρύμ‡ώ·>eΆJr“Β‚πϊK/~ύσνώ%ί}ύΕηοcΟήϋς[ί‘ΧΧkdzν…?ύΜ'~ύΧ~ν“_y₯gίϊCΏπ·ώζΟ}πϋίρ¦§ΫF…8ΫDl«₯7ΏGώΪΏ}ήψΞη>ρΙOΡσ_όςΧΏόό+―<>τ‘ώΥοΛΣUk3ŒΖγœ)a5'ƒ„ΦlηΩwόΐ‡?ςwίτ–χ~κ3Ÿύ£?όΒΏςΝo~υ›/ΏϊxΞΣo|Σ[ήώ—β'ίχ?ϊΑŸό«ώΰχΏλMoxr*#3iR›Mˆmn₯Œ£±]¨Ά]·•f¬–ΥΝšΖC3Ά2 Σ h6³M΄fS4ΛI3Da³Αδ”…B:…&χΚΦΊ†RΗαn© Ω¦Œ%S³P c˜ΐ g &3“ 3b³’VΖ(3`1,L΄&Κγ²›tΑYQζή[Hc-˜(΄•™ FΒ6 [›kΐfv2lcQ •‚c«€ m2a[γΨ0© ZΑTk1ƒqǜ£4V5δΞ‘Πڈjj:»4–jQPΜ`)` £:³;ǎΫ0« νΚ+ΫυΨJΒj­j±9γ Άf§¨Ωt©΄Ή“’ν’Bδ°‘΄03h§`$΅r/¬ΩΞ¬²*±aΘ&Sۊ3†)Ε°d†Φ4€mΦAΜ’ν’"%L†f5Šl‹TξΪ–Aγ¨)³»Y4°!”6#°;‘»Ψ0Ήš,† ΥVi–4ΫPYs‘Ϊ΄ Ϋ\l3MΛ8“μΜwp¬SXBˆΨΈ5C©3‡en"LΘJA%Μ`©€`…βN‘mfV—§1Μd”ΝάY;2C[5‰s§”mΜΦ8(›Ν΅d±Ή“Ts-Εp6…Ά-Z  hΆm₯Γl 9Θ°Α¦)S6 ˜M#a°Φ4 sζ")ΐ 0k‡1PX€κΞ&ckIM™mۊ  dΣ e–h„Γ°FfE`[RΆ’’F0†?Aπγσ{ΦwυΌΟ—h;Ϊ±Ά”Ÿ…ƒmDη4Ί,³l$ώύšhΠd‰™qη2„HŸϋν9*KΫ&˜UcζqΩf2š–Ρ$\¦Ζ΄-ξ„B4[{S-VшΩhA0“g>©b˜Y’˜° Υ½‰80{oΖ’‹Ν$}C Ά7)0Lϊόχ~λ?Wνξ?ύΐΖέ/}ϋWΏσχ~πλΰgΏυ;?£ίύΡ―|6Ά°Ο7ύ½ίηέ/ϊoώόώ—ϋοώοτ·_wΏςύόύίώγϊηπγο|χΏςΧώ·zπ‹όώo²Οoϋ₯ώφΟ³ΏψΛ_ϋ›o}χΗςƒϋφ ψΥύθώ—κίόμο>Ÿίόέο~χΫ€_ϊΥοόδΟβΏΡoόΞπ?ύ―ξψύ½Ο7Ώϊƒ_ιοόα?ϊ§ό“oϋίϊΗμ―χσύτο{Ω•ΩV`ΤΎυύ_ϋν?‹ΏόΣούαρ{ίύφ·"}χΗ?ω“ϊίόνΏ»ίό“λσK`°Yͺ6Γ>ίϋρΟτ_όεΧ32K0ν‡ς‡?ύώ70*iϋϊΕη‡ΰημ_ώΦ7ίϊώO~ŸΛ?ύΡ·Ύu€)4ΘΆχυ͏ώτŸύ‹ίϋ³οόξ“?Η?ϋ1 dli1ί|η‡?ώΣΏψΛ_ό?ΐh– σωαΟώΡοόθ€c`ο}σ+?ψό?ω'Ÿ_ωαώ7ρg?ώο}c°­ ,d°M±oύπwμΏϊΡO~οα_ύΥΏύίώύυζoΎ>Ώό«ίϋώ~ϊ³ίϋωΟη§?ύ΅om`6©m5hΉ‘ύΩ?—Ώό³ψίϋƒŸόΰ±qΏό£ŸώΓώΖoύџύΩό?ώΥΏύίύψύών»Ο·Ύϋ½οθοζΟ~χώπώώ―^ €ζ’Ϊ0i¦Y@šgցaB6 °43’3Β "H²)6E e3³28l4 # `ΑF3„Δ΅αΥ‡ m7-a0F$ΖCˆ±ΝBlF`ƒh2dΫd«εfˆYECΒď@`±Ψ0΅™Ί7e' s…Άχτ‘ Γ`ΓBa–m0Σ6Rš›¦l[‘ mX<.bΪj›4Ω&lV S‹ΝΓ–VΩ† ³fΆ&“-ˆFΫδ:Μ &Ϋ¬BΰA*l«m)0 x–Ϊ˜Κ¦Ά˜¨4!ή¬`lœΦb³joeΓRΪ&¨6d&]6V—y Ψ ‰PΨZ6‘mH3Δ¨q hh1@F롞1£ΆŠ،„ΜV•4»°i_Δ6Άi§mi%)ΫΘX’mΕ e+Δ’65k0£lKiΆM¦C˜1³*fŒ0ΔB ΫΚ6,†q DΌΡ0ΐΔ‚ΐ (Ψ”!`0Ά‘a ‰mΚ6`D Œΐ4›Yΐ’b#μq ΜԌ`Φ(€­ΌvwΫ†™₯Ί·5² `  ›ΝYΕmΩ€X­··fBTΖ]¬e6³TmSz#6ΜτζAko}š y{°Uh° 0(΅13–[{•bͺκ­4ΨuιλΩv„£ Ω.c!k7ow=‹H‚mPρZ"Œ©q>σφ&½Sl'{Ζ¨jΜe΅6H³ξ½₯bΫ ¨W°m[-[Mχ΅έ–*Qα-7ˆΩβ²lӊŠ½TafTΫj–˜lzφQΗΦ‚gν½»h`šΩΓ07XΓΆJΫξΜGΩΕ{―…jcΆfe‹™MefE*―3T±΅'5R3±φζΝ!#… f£!TΐΦζqUΫjΖ6Ω kΥ6fyz†ΟΠΪ[Q@²m 6γ,6FσtΉΈ666/Ÿ•±χ±ηͺš·…Ν]υy_ή–E)2!CYο歐ΑfVU@XZΪζšR€ή{[nq@)΅χX2ΆFλvŒeXšΥVκ*l ΅ξΡΆ%V[=yξσ6ά¨J%οΩΠΦΦΫzΒ¦*•lΫvέ65£¨=υ lΤ³Ά5Υΐ‚5Ϊck>,#ΨT΅·NHRyOS…m[3‰aΫ[ΡD’±DC£ΨژbΥ`΄78[¬£ ΨΜ[Us…2fΣ‘Μ2Ϋ*#»W·adzΫΈIkoE&ŽGΪf΅­³5lΙzsΝΣQ)1Ω³©ή³ωζγ™₯mλήkf‹θΣ&Κμu­΅‘©yηˆΞήή{έ§‚ KΚή΄… δΎή—΅δΘφXJφΪ¨k1S^fvLυfoŸΦΰŸ{Ά7jcαγ)β ―cή«j{Τ30Νzͺ·αΆ·›ΧVOΦΌωμkγ>rΩ{“z†Μ3οέe¦r3ΫΦΎvgj0²Ή‹ nΐϋzŸn}΅ζVmχΆ―νΉΪ6{ϋšƒΨh=‰NΞΰ}ε„0Ί=―³V5[τή{oΆξŒ=³wέΚjdο}M-•Ο'#ΪΫ—]b>΅Ό΄’™ΝΘΫ°v yΫ½GFHΫ¬Χj(υuΧ6[­[Ξx[ƒΚφΊpΫ@oͺ›©β†χT.#k{o‹΄a‹ν+d„ΝΦΥlF©³=W³yo·ζ™ Ϋ3΄dΆ½[nΆΣcΊ(4ξΕ³Β©[ΕΌΊGWΆmΣβ…³Bδ}xέ°‡ͺΪήΤ3ΐ½^_c1ξνi΄gΣ{}Cνeξ#εή›Κ4Γ½χxw }x[ۊ’Ά΅Ά©jXY+_ΏΨ…Εάάι³νk;9ΤbΫfζ΅Ο“˜”}ξΑ^KuΫݞב΅Oή‹{{ΫφΦ©OnΝΆW]7Χγμ½7’₯Ο7y ›7Ό«ωΤ²4Τ4ΟΘ@;ΚΧv›RΩΆ^+Ω—ϊJ§k›·g9γmΆ[kO—m₯Ψτ¦C…M6«ΛΪ½=[Eio»Ω)3+Ά.k0l―]Ϋ$ct/ςž6aΫΆs²mDΫZ;5Ϋv“ͺμσN©κ„Ή{ρ¬TνZŸ˜WMRήΆi±Lv¦#οΓτΒσVΥml4ξυz%t¬ΌιξΆ±αΩτυϊ†³±­}ΝΗ]±PΟpο=ιJkŠ7φͺUάmkm›ͺagε}½(σΪηu»mvεκkˆΝl“ΜΌv“nΊΆΧδΊmn3±Φ>y/ΪΆm{p—ΫΖfΫβΊu=ŠmΫΤθ>)ΑΪφΌOΝΥΨ5švΝƒY6Πξtήf³‘Qe³^Φ2<.΅ΫTtολέι–6m›έkν‘Œv•θ΅fuxι&›εAk³M"··έl³‘Όω+ΉXfσΪuΊΩγξEΆg2±·wŽfˆΆ΅vbΦ³mΥ΅έKγQu%S―lֺ쬋Ωp·Ήz{›†±β<YLζ}Uu6Œžωn­‡‹e€bΆζέθkfΫξύ’ϋθ*|φ&υ mϋΪ:•yiΕ†—Φ5Χ¦1p‡ΝYj{σήuλνσκtμmΫ>εκ aήφζ$³Φ1”. ›-ΧmkŒΪ¬σΪ'Μμ½Ω»Ο§f6σήη>KΛΘ{οΝ5ΊΛ1’χφeŸpYε¦ΥΜ†± kΚ‹χΪȐ‹™υZ7<Κ5vŸ{_/:HΖΧ{ŸΧΪ ²½ξ²ˆ†­jV‡±ΤΖΧσωά^XγYDΪ6²gƒΌ9μ‘«5f³V%3Χν‹k%6O{[*6¨Ά΅–ŒyΫϊͺ.»—McΊ+™’‡YΗ>Ν…6κήΉzΆ7jcαγ)²cΌŽyοsŸoΔ i–νυ>w³ΩF]φ5Ι¦4–mmΊ4ΜΚi²½}Ήn7Δ-oΓ•χΕU[ͺΝΆy•»ΫήΜN[³’©›5Ϋ$©m‘ή21žΧgŸ>ψ¬˜€Š΅m69­ °ήtš=Vή,—Sͺχhπ ΨΫΤ=‹Ύ6œTš΄m½7]#…‘εΆwwoΆ!k'X¬Φ¬}5 jήΩ ­fmtΧΆlj̞kk\!Mg˜Ο΅jΟ›ξumΕx»pΛΒhΛΝVΖ›ΈΤήγΚ’νs‡MΪ°.oξV¦±ν 9m4EΆΪ%†Ο–q1o³’UΦΆΆ•“ΆΩ2¨ΖΩΤ,3«+ bΝjϋjΜΫ¨;ϋšq°•Φ6m€bo―ΆGγΨ{_»ΞmXισ6vΨ³Έ-W_3μ±>ŸφΉνYFΫu³4ΆUΥΆˆνX¨χžΫέYfdo/I’Ω6”χ$­„MϊzξΔ„­Ό V·½54›.οmΤΚάΫΖΕRE„΅ή›j€“–ΫVαmΆ³°τn­³ρ¬—zήΩ$½Ÿ·Υ]Ϋ²Ι]cήSΣ¨Lg˜Oq|}ιξZ™μkwh‰€»§Ϋ 3·5—ΩΦΧ–Ϋ]i3ΕΫξΪ–§°9±½‘RΫΨ•l`φ>“ΗYmσ^YefήƒOΉν^¨ΖΩhΦlS&IbX³lK­Μ{#•7γdv4Ϋdΰή{ 4Γ{_’Μ•ΝΌ,k8ΌχΞɌνΝλσK'o0Πv5F³mΉz‚6mέ}mzΧ‘΄jo/ %όΑaA!jNωGμί6όφHŒΕδʞ΄‘h6a+cΘS«Άxbφt1t›žνι$ ­Χ)œ³Ρ’»ί›·Ξ`’`iΝ4XσͺžΕX™vηmUΉ±X_ΟΤ«ͺϋ²q€tnSx£:Eݝν·CΦ©ΫΠυbΌuΆ±Άί£[t68χφ*“˜nΨ†%…η}}²IΜΆ™=ΚΧ6οuκ­ήj‹™¬Ϊ80lΪ[ΐp%ρ6νΩΆ₯4{“ΪΦvΪkmF—ΆaKšGΓ{kΊΫΝ”o3‡ύκ“m–d³mϋυ}έΩΆ6‘­lfΫRΜPx»Ϋ\ΫΦ+_‡Ώ]½%©ΜΫ›ŠΩJΪέycmF§ΩO± €#¦F΄qλ1ŠmΫΤύt³7%‘άY4ΛΌαnv2P°ωΎοχ³·"Λμ‹Ε΄²Χ,Μ²΅ ½»Ά•–­λΛμ‡][Λw— ζοnŠy³ιξ»ιξΞφΦnΧm  YέΌ•m@ινχ¨Θ¨ƒΝiSٌήιιΐ6›ͺΤφκ Ζl븍ΩΧΜΦtXυVο1εJΫlyu₯mj[{«6Δ i–νυώξf³:φ›4ύ½=Λ0WM±ŒXEΣ–šQΧΆm¦Δ& ™J¦mۈt3K+_’ML5 ¬²φ―J™ša›'ΫΫοWηŠΝVl«c4Λ6U4BQh4σ&+@!>κβ­·ΨJΕӊb+°1T@ΧΫXF›ΥΫ1€Z1Γ6;ύ˜m#«Ϊ,Œ‘m[°WΡ‚jcπΨΊ©c”"oϊ YЦΜcφ›‘*¦(k0LSΝ6`Κ6υ&‰λψ½gΈmσ¬vΜ0…θ₯i`α2Ζt–±‰4f4ΙI&*XSιΩ –[ΕG˜ΦƒŠ†5-¦ŠbΖ&—½Β€Š’’mΕ­YZωŠ{Δ2Μ*»½ρT‚ΜΤΜΘfΪπtΪ €1j{€$4¨p†Ί΅ν- PˆPX–Σ^o6‘:ƒ&T{A„m† ¨† ›₯= qι–Γ@ΫΒΆ(iyήΦ­j,,³-°φtkQ γ©η+ ΒS­#oڐ₯KΣ,¬₯Ν‹θ2α{ΪΖ‚ž΄Μ`SFΫΊ!»{οMΖΜ~ϋ΅HNoLI—±j»Ψ–0Γ$'™8ζ€Νͺτ 3Λ­‹#¨ΥL₯6f’s•κΕζMΆR›ΜΫ«Εν85φ¬ £h{Ώ΄·«k93‘mΪκcΓoΫs«m D…‘ν)‚œ‰RΑ€zΣ6[$8//ŸΆή aQ­ΘΖnT[›EΨΨ¨d›Δ6³Yl­ͺν$F:Φ·­SΫZήή[G5°΄'°fΏϊ^C‘ 6OΝά[5‘ΝΫ2v*MšmkiΌΉ^ŒmUeŠ"kof‘¦΅°‡M™΅%«·Ν,ΛήΟ―`ΰͺk0+&]+²eS4^}’ θ€ΝJšf3ΛVιc#E₯&Ϊ’GͺJΆy΄˜Ζ E0φ6£€†₯Wώ²™Α(”Ω{¬–‘[1μεύv_hΜΆŠF6Ϋ”Š3±€(f Ωζmq*šͺŒ‘ΪΆ{ΐRΤΨZ₯-SSl†A1©Ά,oZ΄ί’%αq›6°eιρl“R΅Y2Ffφt΅Ω<™C&€7"Ώ'γ₯ΚDΌ­% –±m•:FΕY{`nΤl˜¬zΩBΉšmΫpΫ›€akf5΄Iƒ"›MI³WGEΪ,ˆflγζͺ£‘Ζ«hΪβfq%λ …‡*…"26ͺDj„²Ωf Sͺ‹Q΅ ‘-dc TƒΥDmφ†d»†•ΔΚl03κ̌aΫ…5HFJΔTΒΐYŒΑڌ’Šf@ H`ΥV1&•ΥΐlΨΆaLXX­l+f6c2+ΫΆEΒΆ&˜h PͺVΟΦ`hm9Uΐ‚3˜ΪΆ ±±ΐβf¬9l3–‘&–*{#Ζ`vœ²m›1ΆΜXΉ¬§)UΑ@‹ΘΨΆŠ’₯ ‚m&Γ4Υ¨Έ΄šmk€hή0ˆ’½ΩR[’ZB6fΜΆ© 06*±›Υ(ɘ ››5E`¬΅mˆ ƒΤΪ+SŠ=‘XV ΄a€m† ΒFhέHml³e LfΫ³Ui›4Ν ,S•5Θ@ ˜½6 %*J5,TΨ ˆΝΫFΨΨ.ͺΆsΨ6³ Y1퍔`, u4ΆΗ„y lq΅{š<ͺ’+y3ˆ±΄±DΒΆ!mؘ& ΊŠ¨ΊΨBΒ6’.ƒ‰.νm[‰Έ@²²ΩlΖL• (l`³Y U5یeP)› S=–6(Œ°LΨJ’²ί"…†Mcΐ Ϋ€ +0Œχ”Ϊ ˜Ωμχl‘Œ”f šŒTaΐVγ‹$UΥ°PΙΆοmSΒˆ`VέΦn±Ν³ ΠĊ‹ΆA‰™ f8ΒΫ›yΫƒY\ikBα‘‚CDΆ km‚ΜΆ…™iΦT%Pu/D¦°†Κ@Α £mΫ"e'„a2lC•™1P° U˜Ζ2ͺΨfPa &ͺ₯Η€ „« “lH[Ε€ŠVφ4 Μ6Μbl0 &VΕ=MY4lmL²ί³Ε)Li[¦‘ ©ˆ€ LΨ³1 –εTE ]6fcπήΆe€Ψ4X•΅aZgf–1hbU—ΪΆM”…13œͺΆΝ³νg™•+YO(lσΜ’em³‰Rlf©ΑΉ ΖΔQΨΨ`F–#b€ bj x\l  UV~Ϋ aΥV@b°…(6ΚhΫ†ͺνΩ ΐ€6e³χζΥ©‰!MΝ΄Π&ΚfDσX•mΨΆ„›M,hεŒ 3€ ec›€lN dΫψΘ„Œ1ΆaΘ”ΑDHCE›φ”bΐ$#Μ¦ΣŒ+σ6d`"6!«ŒΆ.±mΐdΰ²!emΟ@)6 ­ΐ(6* lC3°F–D @€Ε¬Ψ† Ζ(­]WΨ64EX €Ν&‘¦m m6΅UD00 Ζ ˆΕΨφΖdHdΣ\~•&KZŒΝXΫΒ•Ω ³l/ ­š£€&x%Ζμ„ CΥ8ΖhSS€YNΐf΄aa[&dΚKE0€ 6(˜b`!tgSΌ§ΔPΝήv΄ŒhƒŠ4k«ΔΜθ5Μ‚H₯ J±™΅€Α@%°ΖmS‘46dkj",X‘˜AΫϊ±?ψ/}τ£ΒsχΎυΖΧΎψΫΏφχ>σΧδύϊηΜ'?ρ‰οϋΠKc›£{ΦFθ€νιI§·_ςk_ύχ•gίϊεΧήςΑ°Υ69Η½ΥΪflw»Κ εœvoo~ζΗώΒΟΎϊαύ‘?φύς·ŒΝCz–5k’ΈcΒfΦ©OΫY6₯™£m΅€aqDCΝlΫ:fΙΐ}νσΏω‹?ρ?•OoΏώυWvΤ³m»χΥίψωOύδ§ζoόΚοΌυς·ιr)Ν7Ώς۟ύΫι/όε_ήϋΏϋ_ωο{εέoρŸ~ξsŸωΤϊλΏυυβ?ύΑίkΟφφοώύττΟό“σ­ΰξŸόV_ωGΰοόμώŸ§?όoύπΏρΏύ…6Ό=μ”΅YΒ–j«m[kΉξV΅6@5ŽΉrοέ–³έH0Ά³»!”9Ή“»UΔΞΨԝ-Σ4°Ζ’ΩrΆSOm ΐ\l•g’]ΜΆN8<›\•YΝXΦrνt·ΘΦLŽX³RƁάD&-Λ[L¬Ν¨ΪθΦ%&³Mk­`ΦFk±Π.‰5ͺζάζvZ!›«Ϊi4°έ4ι§a[’΅'J„ΆY9Λ. m9³©”&\&˜†)ΕΖr6ΫlAf¬i†»†MsNeΆ©u&Ω–ΝζΘΙcf4ƒΚ’d»·¦&­Ωέ֊!iwXUaƒD³νFΕ)ξˆ °ΡX1’Γ–Ήip]G3mc·œsξ‹±dΨΚ”Γν4•T³ΛξͺΒ›L…q˞¨e6«,΄;§h15'Φ€΅YuŒ–Œ•»2Ν2—t6ΊάBΛΨ40Ψkm$#V»P4¨`sΦ6Η Q²ΉT΅“έ,1˜€Ω” κ‰G!ΫVΞ²AkΙY+ŒΕ΄%M­mŒ{΄΅Ι)lWvl!–ζi»5’vΥJΈ«ƒ5γBU6M-P™­*Ϋ ΞhBgΫ ΅Ž΄Ν&:f,ΔΨΖ §“‹΅*`Γ4)6ΦRΆ)ל©a»7Ξγl6.šMhF3‡Ϋp7Ε»μ8³9l2RmέΓ.'λ6V,0K³”†d+«Q$c΅ΙV3Φ5:[5V£u§mΙ ΅³X‹YmŒdB…M³Η”•ccVΗ¦Ιn†¨mBΆU4›Ή”±MKlγhf­m7νμ¨1c;KΨ­†]š{+l±rΒΩ¦Ι†’2[¦T΅; lΨ±±‹J@΅±‰‘ΪͺΒ ΙtΜέΖIk­(wξΨΞ90c!-»c‘“Ψ6«ΐ4d ³ΫU3ΝiΛ¦v—Σ΄qw«YΐZl‹© Φiξ†*)PΙ.Ϋ§ˆŒΆLe-3–G³kΊl˜JΜ8DQΧ¨Ψ±š‘Ά±βv‘³ ݌Φ₯k¦Ak­ŒΜFbα]Ώη£ύCψο{ήξΣΣΧίό?ςmωψΙΟύ½Ο~ξ|Ηw}θ{ή½ζt ΙΖ¬ΣΉ›η>όΪ'?ς]_ώέώΘ QΛPq gΫ,Θ2ŽΆi§vΝ_ώόηΏπΕΧίϋΝ7―­Έ.•Cl­ΪΪΦΘ½iξ­Ψ.N05ŽΔΆ±Ζ\'UΉϋΖ~υ³λ§?υ³πyξ;ϋώ?ήΆV²-›νώώڏύΤίόείϊΪ³χ}θύ―Ύ*;'ƒ΅ϋΥφ…ΟώΝΥ7ήχΗώ£?χρoφχΎγΉ½ωκη~αό™ŸϊΤgώηώΙ}ς#^~ν—ξ/όβgΎπΚ‡ΥOόΩϊξΎόΌ·ΎοΗ§τψ₯_ω;χ½οϋΰΏψ‡ί°ΑŒ•ΤvmŽŠΜͺ@wœfΛ ;lSCs΄Η6εβ:Σ½vw4 e[@%ΦifPAΥiΫ¦OFd›\<› JeδŒ…eΡMd Gg¦­ƒ LνR ΨΆmT0lά93T16ŊcfDœY‘%PŒ«GfΣHkUΠV0Z˜ :°ΑΪ±1KΚΆŒl23iN ³M:Ya»£j«q²–­F6i »“2낍‘ΫRmχ,)#Έ³™{23”M‘–V)χ.ŠΩͺV9iΓ*m΄Κ€νZ΄ Μʘ;GX‚³e»”Ά@Z[kbX 2‹ 1l‘ξn‰  ’¦Δ¬ΜΜ$'l³Ι °¨ Y΄ †BΠ4‹$΄±]ͺΒμB w«Κ0bΕ²l3"Ν‘P³(δi Υ&ΝͺΐLΩdUΆY1ƒ ΆΜ( ³:Ά!lXΆζ$0Μ6©m»VΗ&”ΨΔ†…΄™{ΧNi¨„Ά Πl£’m&ƒY•ζΉ‡Α ±Δνˆ3bΫ*AUkΗhΓTΗ½ –έ-ZLZ΅]QmLΫΖr6DZ‚ΆνΜ@ΒΠE#fe³νlœ™v°Pe1ΓΦς(fξdXTΘ¦ƒ₯a”Ω8:ΦΪX!a˜Œj΄±-BΨ•{α$v—:LFeV2ΝA‘…ά•uΆ­•ΆU)Φ(2bΝ•0“”-3KΜ6;mΨ&w;šr ±-”νΊulLrΒΨl‘˜Ή[j4: mmil£dΫ昸Vΐvu·“;T³TlΓͺ° ¦ΜŒΝ ƒœ΅mhwΚ°’˜ΪΖ A ‹@’†aŒΝ8"HwsH«³ l–ζΞΞ*γΪ£HZHe ›m-GΕΆ™X#ΚV5Φf9ZΣΦAF»TΓ†m#i6›C±‘*Ζζ€εXf b,§•†Ησ/ΎλεW^yεωΪφΚyίϋΏχ»>π Ώώ_ώWΏό΅υneμJ˜-)σχΫw~Λο™Η ϟ½΅»m²`cΆI(ΚΌυ₯ίύ7ίΈ+œc–­€ΙHΪξΑ¬rΠΦώ‚πυyόΌ ο»Οχχ·GXN{…$ODQ4&ؘ8I¦ιx'iz˜v:ΣΣΜuέΌώλšλšι­v:Υι€i§m’Ά1žˆΖ(¨QA尜Yv]vΩeίΟλz<°‘0kΧ ‘65/}ρΣςαίώπηΏυπ_ϋ{?σξ—νŸ&)‘΄νφιϋ+Ώω{O>ύΠ_}ߏ½φg>ϊύ›oΞmBάύξν+/ίϋΠΫ~κ§ξ‡ίσΆW½ζžδυ½γ›Ÿ{ψWώμ+O}ε₯Ϋ·άyφSόgŸ~ζξ#?ώ£ψkο~όΥχžcoxύƒό±?ϋμΧ>ϊ©?³Ώψ<ϊΦ;,Y Μ `kΩ9±¨–ΡT0f[‘ΛΆ¦ͺ]b &IZe h1ΫLηT2 q C©Œœ±° ]G“;Œ£Ν@’6ΐ2£³mιΠ0ZΫ₯d6³kέdΣΖ#¦ΉP(a3KFq ³Τ‚¬bˆ΅mΓ„˜r0C`ΣΩ0–Τ ΐtΘΖΖPΔЌ03eKSf2@ΫLMΝpjΪXΖ˜6LΖFLΕ33³65€ 2€Μ‚Š ‘H€k蓍i•\[-…˜WW’Ψf¨‹’Uΐ–Μ †…%lkΧ₯# •@Ќ43&11f SΫV¦2 2#›Ιt6£V XΛ‘]KˆΕ.»ζHk¦ ƒΕ–qΣ\ˆ4ΆA Z6QhA 0ΤΆQ`“F[΅ΣΪ!6Ϊ€%˜B‚m`e 0l&&–i@2›*š,、‰ l€ΕΒ&`Xΐ `ˆζΒ@Ša‘`tl ΆΨ¨Va6iU3€Α6UHΫX˜ ΤΜP!JClcUƒ Τ±ΛΆΊb BQ† ™Ν΄˜l‚©mj’†‘€“ιlΝj °I²kSΔΒμšR™af‰™!‹kr*sD Ιh 5A&–%j[jΧ6¨)cu¬Ήf-W’iΠ0Γΐ8SΨ0V m0ΔΪ”€Z¦Ψ&r.Š˜5ΪlšYP,lΒfLΐj€dŒc€׊#Βf“ h °MTΔ°εZͺ˜TStqŽ6eΔ!ӌ@§]fPΑκP•iΣ@3f6;‚Ψ25ΝXGΔvιœlaΫeμΨΜΈrl΄h6Kh'a3*#0Φ¦@kΘΖΨΆ€;7χΤΝ=wξάά{£ο<ϋνΟμώ·ίxΣOώνχΎωΧή“΅ΏώΜg?φ‘O>–ŸψΫο}γ}ή~ρΣςΣΟήϋ†wώθ?qO °λξΛ/|ωΙ?ϋΤS_ϊΦ‹―μW?όZίxώε1l& ξΎςg>ύ»Ώϋ§ϋθgŸ~ΪΗΝΏΊύς_Όξ5}ΟόμO«ΉύζSO}φsŸϋς3ίxα•Ϋ›ϋ^υΊΗήτΔ{ήυ–W?p§¬r½όςs_ώΛ?τηŸώζσ/έuΟƒ½α͏Ώνmίχ–‡8ƒ sέuϋ?ϋποζ…Χ½ροz珽ζNf˜mάάχΪο}ϋΏξέοόαχΏλžόΞΉI­2tη7Όυ½ύoύΑzμΉ―~δIΠl:Αyπ‘GίύΣοΡίύΦΌ‡Pηάάsο9wξΉ“ΎσΕOξ«ΟέϋΊw½υϋίφš{pzβϋίώ½}ςc_ψβg?ύάO½υaƒΩνKΟ=χ΅Ο|βΙ/<ύά‹wwίύ―{μΝoύΎ·=ώ=―@ΣέηΏόι'?ϋω―~ν›/ם{zψ±ΗΏο―Όύ{_sί©αξ‹Ο>σ₯Ο|κ3_|ϊ[ίyi7χ?τΨ›ŸxΗΫίςθ#―Ύ§Ϋοή}᫟ϊδ_>υΥgŸ{ιξusCoxγγo{βo{μ>Xίό‹?ωΔ—ΏyσšGίψͺW>©Ο|νω—―sΟCozόϋΰoyδαlίύΦη>ω‰Ο|αιg_ΌλώΧΌώ»χλ•[Έ€»/<συΟϊΙΟ~ωλί~ι:Όκα7Ώνϋίρψc―{θή€w@`Ω΄ Τ(³)j0ΥΜhPΫ%›Κ¨Œ•‘:™Ά™–%ΫLf˜mV0ΰdkW,n§Ά2΄m„•:Ϋv)³€PΆ1…ͺMƒ‘­ePΩ0+ HΫFe`“fk©v!ˆ5Λ8Ά™¦,VAc‹ΥfΣI £f˜Τ1³MI£M(›2‰fΤ Γ¨Q˜Su.£`ΚΔ°3dƒ˜MΤ‰ΉXŠν'a†‘lP0 FaΖΘΆ!Α6#Εb*l˜Φ"“kb›2™PL&2 Υ66ƒ%1tŽaˆλΊ4@Kf&mΨΆ 4Œ“­]ΚΒ,6lΡ1Ϋ ˜ΝtΪfKΐΚtΪpUŒΚbΦ¬Ά•!V*T3 j[‹$–mfd«ΦLc[Ά&TλڜN,. 0TΦΨΤκl"ΦCC132ΕΆ©bΧVηt.3ΚE0°ΩΒ ˜Y«NΨ]'aΫFΚZΓ,™PΔ„Q Τ‘a…m–b΄a 4rΡlC‚.N*°2±mΚd³vFL³M4LΪΐ©Ε†)ΆK#Y΄mƒ„ Γ(Σ5ΖΑ,fFK΄L&Ά²ζ˜ι˜Ža 5Υ&$vΦ¬f¦ˆUAΙ†UΓltΪ–,0$d›…aΆΙšEbl "κμΪκ@,\’!ΡΨ΅Z΅²!Z;3¨f3`»lƒ 3ΫV*ldPFΖ0%¬Y1°ι€l.BΫΆ)Ηh 6E6 „QΒ6š™U2Ψ†Β6WRh°`fbi΄icC‚Fa”™†m–€m6YeΫd.Ϊ ΚF¨Κ΅±-pΝ bš2αp Ά₯ fΩuvέl6s]·Χ+/~ϋO}μΣίxε5OΌγρΗήτίyφΉOόκ/ύOΎοα<ώ†ϋ_woκzαkOτ_ώς?ώβO=ϊ7žxΓύΌτԟύξΏψ—ŸzθϋΑ;ό‰Χ °WΎύό?ρ{Ώφ+ΏύΙo~§››ϋ^ύšΧ½φž—Ÿωκ <¨TŒŒvέ}ρΩ§žόσΟ|εΉ_πυΟ?υδ‹Ο=τΨΓwΏη'wϋΚ7?σρίϋ­όιg?μΛ―,νΞΝ}χ½ιKύo|ΰ‡ίώΘ<έ}ρΉ§?σ‰όλύώSίΌ{{;εζήΧ=φΔ_}ί~ς'ήύθ}€°ΫWΎύ΅ζ―ό³υ₯ΧώΔO?ςΦο{›˜β²βήοyβ‡ήπ–χ^ηΑ;η;_ f’2¦n~οΟΒάάsηžϋΎω±§'Pĝίπθ»ήπΨVfΫν+/>χτηžϊΛΟΏpη±wώΰγχή{ηι―=ύ­o{Ν―ύ£V][αžG{ΓλΊύσηŸύΚΧ/ŸΔΌςνo}ρπ‘ίψνO½ΰ¦›sηζώΧΎωΉkΎαυoΎΉξ>₯ύξΏώȟ>υυ^|eΧڝϋοδ§Ώφ“λ―π_wM/|ν ŸόΓήώΙ瞿{n²sο«ώΚwoκώΧΎσΑWžωܟδ·~γχŸϊϊΛ―άέβζΎ‡ήπ–χΌοg~ϊ'ίύΖϋν<ϋgπλϊOžπ­?πζ›―~ώ«Ο}ηξwΏσάKΎρΏωΑΏωώχΐ£ήμzω›_ϊ£ύΪoώΑ§žyιεsί―zθuφΐ‹OΏΒωξ7Ύςδόήο|䏾πξΈΣ=7~φ™Ϋξ»Υ―yψ™Κ,έα Tr]˜­! m\ΥlΩ₯­’m΅iΝLζtγΜt˜[ušΩŠ’Q…νZ‘άp΅V£ΨFΦ¬΄™!K[9΅m ΦδmFgYVZ²Ψœ\6(”jΫnkr.U –*mΧdƒj«6f`C©m›μ’:• ΠF–Υ’›ceu6kζ8Ε6 ͺh.Ε2ΪΰJ˜dγbsͺͺΛ.Χ&Λ΅ΞQ³’tΊ.Ϋ8I3ƒΖ,“άœΪζΚαΘ¬ΦΕH³΅Ζ.΅–ΜŒΤΆ&uΙ5š! Υ5-R±λR¨`mvI’ΝΤ5Ϋnp"XΜt’ΝΰB+†0œͺμRcq±«Ng k±΅9§MZh© [QΣ(Τl['Φ¬0ΪΑ€TλΪͺ\ΆΝΩV:ν²a²­-GΑ†–,“ͺm`A›€]QIvΩζΚΩeB5³T`ΧlŽ­sUKL“έ‚˜εΔ,©ΨΆι$gΐΦβ΄δƒΔUmf;Ε6€!Nqf‚,1;&ΩΈ¦ͺ‚qm‘[ΥΒΠͺ+»Œ&ƒ6Z&νœΪ@9Λ¬tΡ‚P­Αu+;‡š)ΣbΩb΄aLͺ6HΕv™ΝΜ.‰4¬m]s¬RΨ£ΚΨ0[j†6Ft²‘ ΈΖ20b94RYm ΧPdC§iD»Ζ‘ΞΆ0«©n 0 ՚!c›™sΆ±jl.Ss5IaΈv[¨“M¬ [FμB@‘Ϊlsεl¦Ά3NΝ@ΫuΪ:mI`­μ²©ΫβΔΆδΆέκ$gδj7VˆS ν bc\Κn΄°Ψ)K.ΆΑΚlCUΤv][– '°ΖR΅Υf—: &-`CŽbk&2²H˜]Lg­ΩFm¦!Ι . …]l;Ul”κr±mv iΨΩΦ.μ”’-aTΝ6Μ֊1d–•°–Ω²ΫEf24k€ ΰ8ΫΪΪͺ‹J :‰m“*kΦZ­“Λ˜ΦN3Ά±™—Ώυ•―|ξΙO]χ]w_yιΉ/ξO?τOzέOό̏ύ•·?~ϊN€qKdΑΠR@ Ηpϋς³ŸϋΛίϊ_Ησ όΤίΉχΏηmŸο|εύ½OύΙ·yl3­ΑRχήχκ'ήχ‹ΩχΌφύ΅ώΕ{ίχwώΞΏσyψž;χέΠ^όόoδŸ~β[ΌχG>πγ?ςΆΧ=πςWžϊθοόΚ―ώσ·οάσ‹?ύγοxέΝs_ψΜώΪ?ω'yφ‰ώΒΟθ»Ύηώσ­Ο|ςχΝ‡?ό/ώβλ{τΏό{ο|θΞi΅λξ ίϊβŸώΛωŸ}όϊџϋΉΏρώ|όα{š-f΅Λαžϋο½ΣΆΫ»€ˆ# xΥ«ξ c  ΜfVUφ³_ώκWΏό™O|όψc_Έϋ?πΑŸ}ΛξΏΎωό·οΎ|σΰύ<πκšv­^ύΰ―~ΰζφλ/=όέάk–½ψΕ/}ό·ώ_{ςήΏωßyΗ›οyξ™ΟύεWoϋξK·ξΎόΧώδW~ωωγWύȏύδΟΎο]o|ύωφΧςη7?τ?ύγ›GίςοώΘxω/?ό;ςWυ/ξΌυ'Φ/όΔ;ήtη;Ο<υρO>ύ Οϋ…o?χՏώϊ/ς‡žωΎώ;|ί{ήτͺ{ΎύΉOύΡούξόΚ/}εzδΏωΕw½φάWφάW>ύ…o»ο½νƒξ<φš—ΏπϋχώΫώυί}ΝCozΫΟΎλή_όʟώ_ΏτKΏύς;πόρ|Ϋ#χΌπυ'ν‡γλ·pΜΎρηϊ‘ίώΎτ=σρ'ν+OαΟ?ωtΧ+/ίΪ ΈSm€Ν(HJc %Μ†UΞpvΦ.fg\;§XΥ66!»έ"˜•e—™T°³­sYMI³ΐΪ ΡΉYsYΚ†•6uu hΫ©kv­*d4EΫ„UΝ ΚΖ&tζΊν„Μ9κ0ך:;₯Δ1¦MΧΕj vkBδ§v{)…ΝκtΆ„Y΄­ΞΆ\gζφrsΆ †-l›€:luζΆc»ˆμΪΥΞE™Plnwέt’Β`κμ™±³0;έΉ( ›†γ²ƒckˆΞ6»ζDZpœ΅!aΫζDLΜ uU·—"ŒM­ΝLŒ 6 —Ϋδf+'ΩF±€†LΆ‘ΩJλφΈQΫXΘf¬θ4—a3³ΚrΪ–Ω›*CΝlBv R퀁u%eg›CΫ€&g.`&³¦s³ζ²bΦ©]΄s6₯mGkV9ΉmΣ€AljŒ*Z'³kθD.Άγθ¦ΚE:»Φ&K›νβ:Œ1Kΐ9γ”k³:eΫ:uΆ1„.Χ!Ϋ ³*η2Χ₯³²ΩΆa•NΆ:s68\»m]šJΈλͺ#ͺΆΩT.²$ξ8—λtg@L\ΎQK³1Τ6Ϋ„l…γ¬a¦°kŽU1–ΣTΛH9Ϋ₯Φ† qΑ¦#`7qΉ’"Άν€d[LRέ0dH·+tIΞl³#3  ušΛ]¦4IνFΧΕΝΨΔ©Y9›AΜά’rmΑΆ™.eg[ΡΝ:“#¬ΝΪ:7Kƒ₯lSmXGβΦˆ΄mΫ9)€)lCΐT»†W¬Z'³Ωt‹kηŠ…ΞΆΙ`mΩΙΉ\Gf£–€kͺΞv­SΡΆ¬εΜ1ZΆ–›Ω+9g“ ƒ-LΞ9ΩͺΔ΅«sΓ΅ΫΦΥεœ„k›₯¨³ΝF³XάqfŠ M3©’4cjΫ ΗΑlH\Χ*L(Μ’ΪE€ΉhΑ1ΦΜΉΈIs‘RlΫɁ™ΝndΊΓ65&ΧεΤ΅PXŽ\»ΞΆ-©Σl[Bf”ν¦ΛZ‘νΤ¬Ψ0Εp{IΩ6€ΝL”‘9Ν\Ui°ΐΥ5³gf£Μ`Ϊtvn”|νό'ό=Α\Χ½oό‰πώΏφ·?φ ΆŠT›`\ Έžϊ‹OώΦo}ζΉ7ύΒγ?ϊυ‘8ωΑοϋžΧήσΒχί‘nΆΪ°l½½ώα‡ξ»χΝ{^υΪΧ<ςΨΓλϊξw_όΤ―σ?εο{Ώπώο{}ςWήύžχ<ϊνΧο#ΏώG?ς}oώή‡^υΩΏψθΏωγΟ?πCθΏώOΦ#=pώκΌι΅o8Λ‡ϊ??φsŏΏκU\w_~ώ OώΫν—ώωηίψ·ώλψ³?τ¦7=`†Άs΄‹TV˜klΪ₯ΈΆmΓVΗΨ(μΩζΛΏύO»ρΫϋβΛ―yβ]ο»χoΌϋAέ½½{]»9ηζ΄ιDmvsίιt}χφ»/_»ΓMΫ ΧσίzξkίxφΎούωΏώSοyϋ½―»οΞοϊ!†WΎωω―ψŸύκηήπΑηίω™χΌωMχΡ#―υ}ψβώ>τΗOύόΫzξ³όΡ?ϊ‹=ς£χ?ωξ‰;\½γ=?ςα•―β·>ϊ―~χ/nώΚπŸ'?ψ½―½s“|χγ>vΟ/ύΧώιΗώΞυ―ΊΟΜ«±χέπ?ύΰcwΊσή'|ζ«ΣGΎτ₯―}φkηϋ_μ7ώθ_όζg}ᅬώΑί~χ»½w³Η}ϋ½_ό½φγjφέgΎφΜ³ίΉϋΘΫίϋώχ½λ­wξΉσψ[ΰΗXC w,Γ΅*§kqkΊd””ε,\kM•νdeX [+Χ#S»ΨRL%qIΠΪuέκ0ΜΤT±1₯°λΤuιζμΊ΄±ΛŽSc *Dμ%#ΗΨ:ΤflβΊλœλ:¬ˆμ&1»Θ\wu3H‡VZہK³\—DŽ6±¦Ά–vνuνœ«TΊ¬ΝfΝΙ5ΆjUΜmNg[:\v{νΞΝΡLΠeΫνιlWRdζΪνNg²iVsΣΡvAU-Q]Χ­D¦ΘΜεl«]iK«ΛlgΙΞΩΣi'Χεš4λ:η”cΦΐšΫͺŽfЎ fΧΈmΦΝιΜfS L0±ΓuZ,4c»½œ\UН9ΉjΖUuΊ·¦±Eͺcufۚ9•MNmX ›ΕΈ4θζΤΖdtVGe›ͺ³ΉΠld±Ήt¦ŠΑ9q]§6!Ϋ₯n―Ϋ;έTΨ†Tˆ[Σ΅­nŒ­’ΥΆMμ—cΠIΫΕhθΨn/2€Ί‘•f;pν\p{ƒ …pΩ,–ΉZ;Ϊ΅‹kt΅XQ9‹YζΪY—²Ωΐi1—Νu”\NgvχΪΝ©0J—m·§f©BΞ΅Ϋ]«³Ϋκ\N‡mCιtΝ‘ΪΆ±L92RŒ­kηp pΥμζjΩͺ qι΄Γf—[₯»Χusͺ6KgΝU«”Ά‹‚nfCλβbœ:±mU€‰X³Γ¦¨1ΞŒνΊcuΤΚj3’λvGuΪβ²™ΆHHklW3UΔN­ΓRΐ΅¦nχ'Ξ~51ϋ0?Ώχ;ۜΩχ•δpQάM-”’(–m$΅“τ&Ao΄E‹^χ¦(ΠΏ’½ΚMPM ˆƒ:HmΛ»D™¦ΛPw‘Šξλlœ}ζœούυyRΘ` Šv$šD’ΆQ12Z₯ΑμŒ$ZSF‰ i•1B;šV+‹t.eΜ9G2RΪI ih›eΫΠ$2¦YB'•1—‘™1D[¨˜t9‰Ά’ΝH›¬hˆv,aΝL5"‘9fS„hgši©eU™f’HŐa˜fGSJu’Ζh΄šN #ι42ΚμL2ιh‡1-gg’@%F;Ϋ#FUΫdT–Μ1Ζ, ͺmu¨4 ˆΆYΦ@ͺ@f²ΤΕL’EΖhUšHui™D–³IF’ˆvF0!2’΄“&$M™:’ΡΆ3‘E€„R4ΥΠBFJQ₯ζ,DΗ’E4i#svŒMK3iCDBˆΡLΪ¦•΄I’Cmf’ΩL0$CŠ6I€$•&CSͺΥΜ₯Œ(³‰¦I:•‘Π2#2FηR2Ϋ02$™ƒΰΠ“μΗ?xϊ»§Φt.·œψͿٟώαΏ~ν½?ψ—Ώ;ίy Q ‚Yhg;₯ :“ŠšuγΒ΅―>ώπϊŽΓί}ζޝλ;V’.V¬¬`hΗ”šCƒ‰Š.oΞ―ί8{~kσΙ3w:°7₯V;yϊΡ}/½ςΑΗί|yρ➠_œϋruγτoύΰθΚƈΜκκρGΌοΠσ―ΏρϊΗΛ'Τ€εΟίxε—ουΚΖΥπΟ;z|s‘f‹Εˆt΄‘JU%©9€Œj[S2ͺΪ&©4A•š‹"av© GόŸΧ~ηγ³g_ϋε/_ώwΛGύwγ=++«cd{.—Ϋ3λi#3&ΫΫΛ©cm±Άž±hU$teχΎύGξ½ϊŸ›ίύΑ3ίyμτ‘›+pηΦυχί|οφν­?ω_ηΏXŒΠΩννsηεkΛ­ ορυωk»N>|ζ‘»ΧcκHyε₯/ήύ4+χ|η»GWwŒ‚΅ΓGœyθψ_<χϊkηξ<υ-‰XίΉcχΎέ«e‘²cοώλ+ΫwΆίΪΊqύΪ»o»Ύ8ύψcχο_c¬―¬’J»vΰπ‘λΫ―½ψΣseω“gΎϋΔ·Oμ^]M ‰Ζh°R ŒR •ͺŠHJ*1g‡h›Δœ’&­‘0g#ΙΝχžϋΟώόο^Ύ~βπcΏ?ύ«§Φrύυηίόυ―Ν[yδ‰όξωOΨ0Ϊ¦P₯h[!†¦Z3£Ϊ42’¦sN3€†6U£Π D3ΖμTΐ ͺ‹$˜‚Q%QiDΠ’ „ΞΆŒFg#h5EP₯Š i[‚$­s±H’E ³2*JI6³‘ΡAD•Ά()’!‘MIΠͺ$Ι$š‘4RIZΪΪY$Š‘€ͺdκ0T[i’0ME H3@ (‘ΠTšB…$“4‰j2ͺi ¦!M‘B ɘ1hZ€&‘m΄€Ρ‘F*(&’ɐ61#ͺJAuTš Ϊ ­Tb4L‘­‚HΠ6‰NJi)HhΕlFˆŒ’$“‘U4hm ιHˆvšιHRZbΘbŒΞV’mΪvdš–hƘTƒΡHΫfDP*‰B** Q$f[†PŠ΄šI‚*†΄₯E$) ζHHhZ:%5΄ΪH’ΆH˜0BDΖœΝˆ„ͺ#£ ­"Βμ4’4!Š!IK›@ηΔLF΅@ƒT‰FΫΤ"#–Š@$*D4΄LjC›6£ ‰"¨J$U&Ν ZΣ$I%"Š@“Π–R4ͺ&jN‘Μ ιl’Ϊ"©™.*΄h+ QU1 B1ΚΪΆI΄B2“6‚2΄’UhZišJ›₯ht$ νμLCΜV2d jΞJSG•ˆš‘ΐ@Ϋ*ŒF΄2B1ΤU@E2tv ΪΖh[-ͺ5΄•€Z’”–t€$ΡΠ’ͺB΄‘$U$ΜFt„$H€QŠΆ4Z₯:F:ۈAͺQΓ€$iiU’9g‘L"H”©’ˆΩͺ‘D΅)"CT‘Cͺ$B›Š΄’HJšΘT‰€©I 4Ρi$­š-iQm„”6™MIjΘLRˆ&jH"s6”$5eT5Ι Ε0m₯Pjš’HBg£SP­1fθ ΄%‘ΆQ0Tš4RVڌ( E[ι’ͺmG’‘Ω)‹ŒΆsN‰NŠυύǎίϋΰCχ―'™s{ωΠ·9±ώoέίΏρkΗNάύΐƒ@dTΣDΠ„(ˆ& "lέΊsσΪ΅,φΨ·²ΘHΠ©S ­¦š"¨ (Υ²άvεε­ξΪ»ΆΆ1D5ΊΊΊc߁‹ε•k7οάΌz{λΖ΅­±Ά{ηmΥX[_ίΉΉ+Ϋ_}sy;mΰ«7ώζ§ηžί0Ζ}π/~pμπΥ‘iE‹sLΠVH$՘FŠΘj$Zm€‘‘뻏ܡc±χ=pςΤΙΏώΏθΕ?ϊΛί{ψŸνΪ³sucϋϊ­Χ―ΟΉ9‘ακ΅«Χnn­¬οΨ»{΅ TΨ}ϊžοώΣv{ν/ώξΧΟ‡_ΏπΣ½Gξ}μιώθ™§ξΫ΅άΊvsΩ?ό—Νο=zrί ΄ζ°Άσ葃—^ΉqλΦ\[ί΅{ηˆL2JΫφζν­λWο,ΦφμΩ³°ΠQΒΪκڝ»ΗςύKίlmΟ–JbŒtΜ±H’hu{ΉΌqυκVWχνΫ½ΊΆ@I3(ɁGλw–6ώφ/Ώτ'Η‹΅σΐ©3ίϋΡοόθ‰ϋX•ͺ&Yi›‘”&Ϊ„"ōo>{ϋΉ?ϊ³7fΩB’d±Ά±kΡ£wέδSά»±Ύˆ–ΆED©F#0o^ώςσsgΟ^έΊvδκvηZ—ΧΏώψγχΞ]Ϋwθ‘³šŒ4Išyυ‹σoώΩφσΟf+"2,VΧvξ>|δτύί~ςα{χοή±X†L¦92νD -0[H‚DDX-%T¦h’‘i! Υj‘†"ΚLGRΥ€$™–—ή;ϋα…'ŽŸ²κΫ/}ρςs―}ΘΣGn€ͺHR3 S Kf2P4ѐNZθΦΝΛ_ύΩ—6ξόΎέ#c¬­o¬­οX?qβΤΡύΫ/Ώσαη·Ά>|lίξ·.\ΎtιΛ›Žμ ([_uώ•±λΔΑ“‡Σ6mΔbcσπύύxσΰ}Ο|ύυώζ_Ÿ}ωΉΏZήξϊ?ΎoμΨXu{ξ8rΟ]χ;²BC›©©ζϊΪκXΙrήΉ³­B[d±²X]syϋΞ* Μ9··οΘbme%  #sΦ€*ΙXYLKJ#D€iθΪχ<ώύ]Gοzδ‹―Ώϊ샳―ΎφλŸΩΦjρ~όνCkm‡νΩD,V(‰¨jD Rΐ›W?η{φηm «k;vξίδΔ;Ώωψ?ώΡγχ>Έ#’’ -RZ΄Z @5ͺ:ο\»ώΡ+ΏψΕ[ΛY@’ΕκκΖζή}GŽϊν³ίϋρžΎταAbPR iJg% J΄$iE£Sˆ@b ¨Π€&Ρيˆ@€h”TAΞΏυ·Ώxξ½+'ςΓΝγΗw ΡNY™M’ˆ½ϋΖΩ―μ<ϊΔ3νίΚ—ΏϊΣ.οϋφ“έ{δΰκΛ_œ{υΩ—>ΈB% {Ξ|η™o?prί:n}sεΓ—ώγί²έ–P₯‹υ]»ψΙο?y`cu΄₯[Χ/θμΩsŸ}~ωζεXί΅Θ©ϋο»χΤΙC;VD jϋ«ίzλΥW>Έ~psD‘D ’Dh[(  Œ¦:o^ψ蝿ύΩσ[O«ί»oΟΚϊ]ήΈ~λβ…+sμΨ΅9ƎS=pβΥ―ίώδƒwή»ψπ“ΛJη7gΟΎι—=tτžϋοέƒ€ΙΖΚϊΞƒ§ΟΌχ‘;WΟxβξc/ψξ{ηŸrp΅T@X!D₯…B‹’ `Η±{O;΄wc1·ξάΈzωόΏσ«OΟ}rώφϊκ?ύαΣχΨ ‰ @€€‚€€Ri$£iΠ4% {O=tςπΎ=ksϋΞυλ—ΟφιΩ7>}οƒ―n―lnξάυΐρJdڊ–&#ΥPP₯ͺ‰j’ͺmCUJ‰J#ͺ€J‘"T’ΠD’H2A@ f³όόΧήzύ77η£ί¬„V“jUT’td$νΦΧoΏφwΏxρκ½?>σΜ£ΛΫ7ήύ«?ύγkgv:rχΡ«Ϋ7|ώΞ‹ΏψO— ].ο\ΏxρZοϋξΏητΙ}λ²}γςωΧώβπμƁ½«c!«»ά΅σ‡ΏϋX7VfΝ›>>χΖ‹/ΌψΚ―?ΉrΫJηdlΊηώΗΎϋύο=ώΨ]{V¦jΘυOίΥΟώψ^ύfχώ=kƒ¬zόι]wο©0c΄-4QΡHͺ”jͺPI$… Šj2-šΖ$$„¦Π’F$ΪIΪ¨΄’ITƒ‘-₯,‚JІ΄€$ DUƒ€(:hB"@S:g2d@ •RA R€$-Š’h¨D"€"Ι¬6’(… T2kHUZ#…‚’’m₯mKΣ’ ATšDS¨ˆ(‚"Q΄( ³DD)5QA(Ζ L$@HJV΄ZΜ’$$ B(‘-JΫ’€@Aι@Ϋ6i+“E¨"bˆ’Π*š΄)š’†R…’‚$F΄­PJ5 ₯„HR¦-QSΣii“ H4U΄Ρ"”θ$E@ ‚j€€vΠ„)³3’„A…IQR‘B…J’ZBD4BΛHf΅‰QΠ¦ „ΩΆ‘Ι‘€Π‰BIΞ U­&!Ι’J%ih’’B€&AQ)dhf#†4’• f Š&ΐ’E‚ UΚΠ6H ©&ΥIPIHDB J"––Ά%I)iΠ @i΅’ΆJ"΄D AU)m#DUM©Θ 0MΠJ’€ ŠV¨‰Ω&ΑΤDš€T[I)*H«`΄Z‘hͺ₯• h’€* ””@QQ4h5₯me$  ͺ@ HU’T„Ά(J$IšFT#ͺBͺ!€15€‘Άn\Ύ|ώσ/v­ιμrλΞυ‹½ψΩ‹φ>pςξ“ϋk·VŸ8΅γ… ηήώμ[;¬lŒ›—ΎόΝΩWίΏΈ„A©dR)euߞCw?xδω·Ξ>ς»‡>Έ{άΎ~ώύwΟ~πωm T$*4mke}c5ΫΧ.~υΥ'|rk±•έw<σ[Oœ<ϋϊ›oΎvτΰζςžύλ+ΫW/~φΩ?Ω:ψύ3§Ϋ»{Η±»ΟylχJξ\½ψώΛ/Ώtξς»ΎϋΔ©ΝυΥ½χ=υΘ™χ?~εέ_=ϋ‹γ+ί΅w}άΉόΩύŸή»žίδΡϋެF‘¦4,―_ΈψΥg~³qδδΡ½;Ζʎ]›;6vμΘϊŽυυ{?ωύ‡φ?xρω—v-Η™γϋΦmέΈψΥŸ|qογ?ψΦΡέ'Nίwχ©ίΌ~ξΝΏξΑΝ'ο:΄ΉΈ}σηŸ_Μζ‘czψαcΟόΥgŸ{e㉻OνY·υΝ—oΏς«Χ?ΏuψΜο>yl}}Ρ(Hb΄JAΦ7wŸσΰ‰=γζΕOήyλΝ7^ψΛ/?Ώ±zψΏψώ‰΅ψ ‚Σ`ίοΒ0οΟσύύΟ9wΏΊΊ½Ϊ.hC»!$@ΔnlΫ4v0ρLμ4]I:“N§Ϋτe_€ιLΣL2ν‹ŽΗNΪq'qΑΑc³B’Y…@ Ϊχ««»σ>ύ|˜―ΌtβεΫηΌρϊΫn»ώπŠ „ΝΓoΌτM…АΙ… F2›’1$H ΔD)‰9Sa  ‘(1ΠΜ„$€B(H!DTU*˜2B DˆΔH(QΔ@”Α€ Π$Pˆ@‡"₯€ΘˆIh€U*ˆ&¨d„ !5p"€RHEˆD@9  Θ!TΓPk"εΑP1P Pš(‘’Z( $ΠlLΠPB& € @@ )d€ $"@ !Œ .$DΖ€  ˆ#D,€I" („*(!%ΠL„)TZ(PJbHF₯()""€¨PM€  3¦  J1€ %4 D©”`0$Κ@ ¬R•ΐP#0b*H!I@J8 D2ΜΙ€T”0‘€€˜₯‘@` ‘ B( 2Τ"h 4#Δ"@‘‘€T%¬ˆ„"$„ „@€Y2@H)Œ !@EH ' Hed"$LА‘C&”B‘ @VΝ3%€Pƒ T B %" @" @€! €T¨AΔ€P¬ H(` …J%, B€fΓ   T€  C’1@Œ ŠΑ€Oύπϋχ~ύ‘ΤφΙS/=ρΘ£/œ>xέm·άpΓ₯έ΅ΉηΨ wάpΟ_όψξ{Οyύ΅Kξ:υά“O<ςƒηΫ3ŠIˆ`ωψ£Οlo­D€B¨$Μ­#—\xϋψχψΚK/=΄:όΆίϊΰ₯oύΐϋ~ςΚί<όύo~ι•§/»pοΦφ«Ο=φπw^9zεήsσeGΟΫ\ΖE—_wϋνo{ς‹ϋωoyκκcηξξΔS=ϊψ“§.Ίαο½σŠ­ άάτwόwŽΏτGuχΧΏΎokσΆ[―8wίΚ@ `ϋψ3O=ρ菟9Ήsζψ£/ξœ9ωάγ?όΞ·N?yή‘‹/ΌτΊ«xυ—?zδΙ^=yό‰'ιΜΩS/>rο}cΧ=_qν₯Η.υσG^{‘υˏώδΑο?ψΜ‹―xΓ‘}μ<χΓΌ΄χόλΎς’σχ H¨ °"l"똌• %lξ=xδ‚ Ž]|@½βŠK―ΊόΒΧ~ψΏ|ωΕ—Ž?ϊ³_<ψ Ϋ9uβΔ™5ckΟ9{6Y†ΘzητιΧOœΨq΅μή·ks‰` `(EB₯°ΪάΑΉΌα’ ±Λ―ΈτŠ·yνgό+?υψ³O?σβσΗ9vfσμ™SgΞnο쬛貱ΉΉ΅΅kkc™:ˆΉsφμ™Σ§ΟnοLrΩά½΅9ηΞΩ³grck߁=Γ³'Ώvz{έΖ]»χμήZb š§OΌvjϋlnξΪΪ³gΟ†θ,ζ™SgNŸΩήΩ™ΣΛζζξ=»·V†,0Χλν³§O<³³³^±ŒΥΖζ͍֭ewN½|ςԏζτλ§Ω:sβυWžρ…MΗζžsφο`Μ9MvžφεWΞnμ?|δ’]mνџ½Έsθφ μ?g%lξ=|Ω[?rω-Μ†$Ξ‰g~tΟ~ιK»Ž½εΧ;zp£hž9}ςΙgOpψ]οΨ―ΏωβC{AcΈύδί~ο|αΌ«>πΙκwίvξ0jΎςσ―ώιŸζξΏυ7_½νΦίΉrI¬“/ΏτκρυξcoΉσχΓa(‘ X8 U­Zη’FĜŽ!¨£ζ¬P§α@\@H€Iމ*Ψl½³vs…Z@Μ&$Ε’ ΄YNPLT… G1³HQB€¦’°Žt‚™PΕ²‘3šΡtˆ&B„Μ")Aͺ‚ˆ’ˆ#‘*PAŠ(YtΐΊLek#B+L ,œ(Y ©1 ΗΘQ „M† H"‘H”Ψ,„ ad€‘±¨„(3S‰fPΚ°P $Š2gκ@t³υP@£’iFD*ΚΘ! ajLdŠ(‘ƒ†‚α°„Q6›j Q*Ι‘5ΓPt†U:tΤΊ™ͺ•Rš.5«‰Ψ0!E Iΰ$‡€ ͺΔz{§…‘@SŠ‘€£YN0SK…#€*ζT+JAH0cͺ9 3 eδŽ’f):H…%œE*R$ӈ!κ4 Cƒ¨ ΐ¬lQ‘*Pi uΐ„ΐRΖ(ˆQΡΦ8Q"„j2GN@G:‘£‰" MD#”…(±Y’ΙDW"Λ†#B™RΝΙs8B‘@‘(JΥ$AΕaYT%u ²@ˆYŠ(#sR¨L)ͺ…BBκ‚1†3!²ΩTk€€Yζ…&( , ¬€Ζl† …+a¨€ ι(’ˆΜA ΒPH™` Q%ΦλuΚJ‘’f‘Κ@₯­b ‘:X*4*ζT+Ε!Hƒ&„0a&ΈfΕ9…™s8ˆI•αB*@9&“T€H!"&Š’8F³!I Š@ ZΫ"Π Ζp.‘N( "#·πύ#η=zψΊC‹DbF“SΟόδwζύΦKΨ/ώδž/όδ]ύΫυЫކ σιάύ_ύΞcΟM€ΥqόΫϊοΎ ήώ«ΏχkŽž³Ήλ‚#7|ψοηη|ρσ_{π§ίΎοΫn<ο’kn{Ο;ίsǍ—ξ‘fŒΓΧΏχϋΟ?zΟWΏ|Cί|τu6»φθwί|Υ‡6c6†`$.Όπ«―ωΩΧξύβƒwΏΞΖΎ .½ρέοΏγΦ[94gn]ςφOύgϋ/ϊλ»οώΓχώπ;λΝ­s]zν{>ω»n>²{χ€Kn}Η‡<η‹υο}νsχ³uΰΌcΧίτ;nΏρς#[\ϋΎΏ_žμs}φΓWΞΞΝCGήxγmΏφ»ξΈϊ‚ΕͺΥξ½ΪΩ»g34r΅gίΑC‡φμΩ½!«ύ[Gnύ­pfΧgξΎχ{_όρίn{μΖ›oyΗ'nΏβ³Ϋηvν²λό+ίxιΛΏψε}ίϊόΧ_85vΌψͺ[?ώ‘;oΊϊβύζPœeE…Δl2\¨I:Μ ‚ζ²±:xυMΧξωϊ}―œ|ύΔ+―ΎώΜΓΰΛΗωε§Wϋoώβn;η’j/ώ螻δ_ώ›‡68φγ‘/XM `0„ P†€HD¨°. ’Θά5Άήπ†£Λ“rζμΩ³g·+ηΩ³―ό잿ψάWΎυΠϟyε”ϋΞΉψΪ7Ώγ=όΰ»―9<& ‚ΞΎτσŸ|λKŸύβύίyμψΞΖΑ‹nϊΰ‡ίΖ =ψ•ϋOΈωWώΙ?ύΨε+ςοη?όΚ#?ήΎρέϋΥΏχΫ·^ΌΨœγ©/ύ_ϋgξύρ ηάφ‰ύΦ§~γ*˜ΣΉσς£χόΝWΎφΝοότ±OnoΌΰίzΧΗνΆ‹ΞΫ»ΉP:ώΤό«Οώή<ρβ©υζ9η_qσ-wΎοΞ[―ΏhηΙ|ϊŸρί<ύΛ—Ožέ™άχ§τΐgώνΨ·Θ-Ÿόgτ½ζΎE ΐ©c ΦΟΎπΒ+―³όΓGΞιΤφφ/~ωδΩ½Χ;oχξέMt Ξ™!αΤ³/?ώυΟ>xrΉφCοΉβΰΕ{θδι3/<ωb«‹·΅΅5¨Ι‘…Ρˆπ©Ÿ?ώ‹§^έ{ΙM·Όηζs˜˜/½κš―ΡwΏρԏzΖΛ/lxρ₯γ/Xο>ΠηΞ‰ΓQ“RE`:g U‘ŒαΠ 0”e0Ι©‚@ FbT(i3Γ2aΦζtΓe΅`J0IΠ‚ΖPh" ΰœ¦F QEΐXۘ )*DP( aŽΩšαb’"1m €"΄pVk'. C+ )!‚T•Š€pΦjH FΑXƒ! R*Μ9U)"2qΩ±QD Q…\`ΞPg tΒ €*ΗBS €c¬*€$Υ²ΨlΔ”LΠΠ$*f³†L€fΰiΝ&hΤ"¦Qš±@s B5„˜ CD"¦1i‚‹Χ3E C…™C˜P „e”‚Τ “#‘Œ([η2 ˜h€•DMs½š€ ¦0XΘι:Q!&ΓΠ¬Pˆ!‰Œ!”Q6Ζ ¨rаXμΤΜ‘”ŒΥ‚hEic 4TuMΔΤͺ9a™F"J8R(pˆs¬›Ι   cΘdΪ‘’$΄hRCΓ€u,ZZLU!`βΊVCJ ³˜š‹ ±4§€i32q¬M`F0rΞ99¨*P`θΊ‘`Ѐ֎…P¨QιpUβ@,­Ζ"EΣƘ:ŠP\°Ρ€ATΤJ³M ‡Τ€@Ν1š0@)fCfs8ˆU •ˆ ©HU“cΐ,a*Μ%f˜ ’ZsΙ‚@@Ή^η„,œh€ ΐœΣl™© T4‡+2f’"dΝT`HP9qAb.’EcΑ:•ΕΊ¬”#P2–@@°¦ac„sCuN"΅jN SΐAhŠ ™CΜY‘"4iŒ…(0 $΄ š‹¬cYθš* 5‘³†ΰΐ`Žυζ@@$b­$E'‘ˆNŒΖœMY!b Ν6V·όΓφζb! @a κζžΝ7ήωwώΙνŸ@2*7ίφ³¬–A{ίφώί{Λ]SK—k~σΏϋggΛ2€Ή±ϋΐ•·~ςŸΎν·k’9Π;οxwΛΦΖ2¨$\Žάπ+Ώ퇛q,caδάwιm8vλ]ΏCP«eηhβΨ<°²wύϊzϋ―AΛ²,+«ZσΖ½ζCαp„2Ζrψ–ίύήϊIΛC` cmθΑkήϋ±«ξψΠ¬F’c5VΛ€qύ―ύγιc"8η΄±¬–AΖ²±Π›ξϊΔeοωuDΖ.«eΤ€)‹lΌδ-·ύΦ oxŒ1–e $tΜu*°ϋΠyΧ½χγWέρ1v–:\ΉŒ1°‘θΡkήΙ7έE@$ζβjΩ š°yδŠ7τ²?\NΓ᲌{v_rΛGώΰ­**e,«e‘ΉΖαήwώρ{>1Λ"»όcŸϊ―?‹Λj’ckΟήχ›θΏΟ‡ΛΪ5ϊWYV«^|ωmΌα֏ώΞ:Ρc¬Ζ"DP0V S&Ž!Š1IF  1'cgkH&ΝυΞφΩ³gηφΜ €Ή^ooŸέ†υœ0« (ζD&cQDΝ€1ζ@’ν3OύςΉνυΩVG{ΰΰ~w^yξΡ{?ϋΗϊ7?{ωΥ“;nŒ:ώό£|υΩ_>ς“Η?υ_όξMϋΫ½rϋ©ΎρΧωg_ϊξ―ž<}vζιΗΎυ™?ωρ²sκδρν#»Χλ΅“šλ³ΫΫgΟnofliΊ^οloŸέήήYOΠΦΫ―=ύσGφν‡ρβρνЍžxθkΟ?φΠϟώO>υΑλ―:ΊρόCχ|ν³ξ3ί~φ΅“gZmρϊKO=τ΅/<ΪΛ/ν|βσWÝΑΑ]»vνή½±wΎ]»ΖH€(Πι—O>όιυί>ψΜk'_{ωεgΗςΔΓ?ό«Φ§_}vϋτΛψ/ϊ“ oώπϋήΑήpNŽ ™ΒΞ+Ο<ρƒ―ή$έρα·=Όc²~ύΜΙgžu9όφc›μœzνΛjkscsHΑD`ž9}φμι±:΄΅gΝڞ={χ,σ…W— lΰρW_=ώκzΧC瞿sςτ™Ιζώ]›ΓAR8–€3 BtΐB°`ΰŒΩaBΞh ΠX@+`‘XE4pN  A A%G€*IJR4†(„”`ΜP ‘Ή¬C&•c2@Δ°$¦ –4£*Œœe !`ќ¬VΥ0œ’‡šˆR¬gΛbK€JB*T@Γ5„€Μ‰Ω(Π%h€@ι(4˜;ΡdΦ041(˜8sˆˆ„i*.A΅FA”eF†@ Œ1QrXQΚ€tδ$ƒI5F’fMuΠaHH²0£Ι%Ι„9Γ†’ΐ0X“‘„€D‡KZ8†Ξ "0‘QΣΐ €BF€ ’3ΘPζ…’˜ζ΄ ŠB† T@ AP '8THS€@Tˆ”Μp‚BΘ(Α@¨1ŁA4ΥΙ€‘IC’˜&(A*ΛHfŒΐ&…Μ9’ 4r›5¬l€0”t¨c6E†ΐ¨ΐ!­RP`’ Q DζD„ȁ‰$(Μ”Τ AΦ1Χ-ΐ”h3‡ˆP0©0’ … ΖL% (ΛD )˜ ›€P`0‰œdΪ€ £h8(ΦC j‚0(%£Α`‚1B`0GƜa ΓbhEˆΠ‡€€šN΄‘„3U *␒ʂ*Aΐ‚EQ‹Ή(M²¦"€1$Hj‚ BP8Α‘ € ¨Π dΒ’(„XJŒ"a ΠtΊ ˆ¦0j Ib6€A"c$Υ  šJ "&b d5KͺX!™@ %U₯*Ζ0Λj¨@1gΛ0( Hrc1€,—e@²l΄MΠ…ΥX™X!r–€  Ig"C `Žαp%&Α¬΅Ž1 ΅˜9RAT¬–₯A hL\ΐ sΚ2–e(Εd‡ŠP±8ά1ˆšͺIΑd@ƒΉŒΥD ι`‚XF¬ΨXάΐ’€Œ4p,«ΝΥ Φ9•± ˆ-«‘«Ε³‰βj΅Bƒ’ ΰ†8WΛAVΖ²ŒE$’šΰXm `†,«'9‚±±ŒBtl­"΄‰.«Υβ‚bLR #Η •°R# • # Κ$ΪΩ>υθŸ8΅½3Ω·οΐ}ϋ6y@A ˆjb EζφΩמ{ς‘ξώσϋŸ>uvgχεW^~Ρ/Ψ8ώΤΏϋΕΟ}εGΟ½ΊηΪ»~ϋ=oΎμπŽ?ωπίόΖ?|θŸώΒΫ/ύΨ₯»χΌϊπƒχσžοώμv]tΛ―|ψŽ―Ξ<ϋΘwξ»οϋ?}i="D¬ Šν;OέϋώόΎό£―όΥ·.=οΰ¦Oύμ‘ϋΏσΤρyψ²ώΞ―ΏεΠ^ŽΏπψϟέ>zΑ—œΰΐΉψύtω=Οέύγg_?pΝ·½ϋΞΫ.ΩάΨΪwαΆ@€Œrcσœ7έ|ΫήγΟύΰΏ=»Ϊ:vΩuΧ_Ήηυןϊήž=sμ­·_Ρy—ΎιΪ+/Ψ  P Όώ‹g~ϊ―ύx{σڏΌοΪsΞέ½0Α8υϊΩŸ}ϊ΅“/ίΗϊ§ΛƐ±Ήϋœ ]}Σ[oΉω†£ cߞ]{vΝΧ_υιgOqώ‘P^αΕŸyώ„ΛΑ­])0wžρΥγ―<ύΔΣ―~ϊ_>ρ…Ή†±kχy]~Γ-o»ρΚ+Žξ"%*‘…d„€ DH©HDd €‚a0Bš©"*Α,aCη,’BE²ˆ€ˆLTT*0ΐ@‘ ƒE*!T…ΐ(j’$”ˆZ", MPΤΤ`’’ ‘‰@8Œ 1†…ˆR€ɘ©BQ @œ ΜGŠ Rf€H(&("P Š ‰‰ HD‘R€Τ#ˆ@@@ ‰h„ ΘHF$B %’JX! "B₯V @(aNE" ₯J•`‚€`  ˆ‘Mœ8 G εZ’P(• Bt’ €@A ‚4t0ΧHC±ˆL0‘Y…¨B ‘BgB 0‘RDd‘Fh$D8„ †€RS¬D* ΖΜΔhβHΒ”€P0  ΕE$Φ5@’@!HΐB#aŠH΄!T "΄pU¨!! F$M¬†Bj‰ %–¨„6Ε "R‘@!B! ˆ‰kFE‘B‡ͺ!K  ΜD3I„Š ! @€N 'kUD€J! Ρ@†Fͺ!’ŽAΐ£BE @‘&"BT!ΐ ±„`ŒEΘ€ˆL@h(D¨@$Ε2Eω ‚³ηΡοΒΎΟΟλσ;ηhH ±ˆΕ`c°Ηvbl§N›€_΄3½ΘLoΫ‘3½οLƒή4΄i;Σ4Νt™ΔIΌΤx ΖΨF˜ΝGHBB ωΎϋ<q3γ”mD™Γ ΦU{}Ξ 4VΆlBۊΠfVAΆr΄m&”`X,΄Ιΐ-±k‚ mΑVFΤ\¦aXAbSλβ„!³k;U€YKm©Φ6mΪ@5£ Π(h-L#(BcάLv©XQq±ν䌒%Άk€F€ΛL%€νΰd›UΣ6@L£$ΫT“PΫ ·fΕjβš€Χ_ώΑσί}φ™;/^―ΎςΒΣOύεηΥ<ϋβ«w=τžw?ώζwτw>ϋ·~ϊ‰wήχό³φ₯?ώγo~οξοϋωπΩ_ώΔ‡ήώΠ}½ό7ίΊυΓεωΜ“ΏϋοΎυ«oθG_ώς—Ώϊδ·_Ίυθ‡>τΛπ|ζcοxπΌώƒχ?ός3/|χΩη^ΐ6ΣD˜1΄lπΪK?ψΦΦ~ύωξϋΘ―Β/ύΪ―|ό'½g―~½έϊχ_όΏ}ϊOόkŸύ©7~»ίωαuσΘÏ}θSŸϊδίpϋ΅?π‘^»}λ‘7=pΟ-οϊΘOίσΝqߝγ₯ϋyόέόψΟ|πζIΩ—Ϋχά~λΗ~ρW>όΚ—Ώχ­―}ΦΫ>φ©ΟώϊΟήyϊ™ίΖύG>ψσΏτK|όέo|πΎϋο΄Z̏ΏΝ―<ωω/}λzϋOζ—ήσπ[νڐΫχάΨ;?πήξ»ο‘οΉs³W_όήs_ό7ΏρΧίzκΉW~σ7~ϊ‘ξ9=ϊΔ{ίω?ΒΏϊ'ϊ·ήΟΟ|π­ίλ•<ύΥ/ώήη>χίέ½|χ»ίœe.χ=ςΆ·?ρ£Σwξ»ΞΝ~όβwžϊ_ύΝ7ϊΫίωΥ_ω;Ÿώδ»ξΫΕi•0 mΫh‡rΔ0³J1‚ΞΉ3TCΥ J›KV†Ί`3jV»v) C‚\lΞ)£ iΔΆ‘Κu‰±» k!HJ1[ ‹¬KCP²ΝV&Μ`«`³¦KaDΛΈ¦!6&c¨΄YΛD-ΆΩVs ›!™5±M5wi Θ‘lkChl)± ›)C)ΓΨ,Η*ΑΆΡ\\Ϋ XIBklUζ²6©ͺΕ ¦₯JmΗFœPmWΧ&™%ېN΅Ν`6k•ΧV"TΒ€Ψ˜΅ƒŠΜ¬.Ξ¨€Ωh£’ΑfΥB5[€εT.weU`θ¬ΩζBk@› Λ¬ ―_ΞQ£ C;Έ aΫ8Ff\$fS…)!Z…f‹Ε–3ΉΞ‚fT0lΝB—Α$1 fΙΖΈ»f«ˆ₯-4Vi#,de›‘­Ib`֐ 6sˆ‹M ‘Ε°Eb3L™΅m Ke›%;QfmΆAΕBi0ΡΨ\Φ¦* ˜*&ΛEœPΆ΅ΛZΝcd΅Qγ$Γζj§¬°-΅P €˜\[ ΧtιL*0m$5Ψ JBšl9ͺνZ›$‡™c³Φ0Ϊl“lΛ¬i6²έr³ -\Ε((Χf 1”ΩP–bRA„±2³ΘE«  Ϋj[hΜ"Ήƒ2¦«n6HΈBF6,4[₯ΖΒ±΄Ν†i%`„4 Pd3‡ΈΨΚΚ¬C °i*6&ΘhΆY*Ψƒ*ΜlΧ*’ fV  ΝemTΕ¦…"Ψq-VΪΆΆuFf±i3RiΜΜVΙΨ.N •€!θΪX„$.š(J€ FRƒΝ$ΐe[¦ Ζμr‡Γ‚m+F†Ν$Ϋsw’#γl³Q¬fΘR«ά²ΥΑΨ§6Ψllψα7Ύψ{κϋ_yπφυκΛ/<στ7ΎςΕΏyεξGŸψΔΟ}μοxόώό%C0g 0h03€πϊs_ϊηΎpsηΦ~ςSΏψσΏόkϋγοyσC/ω—ΏύWωμλn½τΤ_όώσίΊηΐKί~κ‡w½ώΪ+O}γ©W^{Ϋ³_κ;ί}ώ΅ϋ{쉏ύάΗίϋȝ͝GίυŽ·>ϊθ7^l‰ ΐλ―Ότύ―ι“ί{υξέΫw_|κΙ/όΞ³_ΆΛυϊΛΟΏtmw_xϊΩη_xιΡΫwnί{{w_yαι?ύ­ίΎyΟ{ήωψΫίφΨ[Όη60ΰ°1s«ϋήψ¦ϋ|ο/^½»zμνοzΟ[ωяΎσʏ^»χρw½λν?ώζ7œ€°νε§Ύώε/}ώλ/ίσΔ/όκ'ίqΝ•ξ}δΡχύάπήύΚ=ΌωαϋξάΪ«?|ξ©―όΙηπ _ψ7ολδ‰ΟΎσΝχίάΏόΤ§žωχίύύ/όΙυΏΏό͟|ϋξσγ—ΏχWOώΕWΎϊό=}όŸό‰„u}β§~ώ3oώθΛ·xδαο»΅Wπτ_?ωΉοwμ‹Ώσ;·xσ{©Gn†, Ι,-0ΔΨ 3J³` e4•lKΰ4RΫ€M0£ Ε˜m26mΒ’ΒlΫ)ΓΖfv[f)lbm‘Q`JvA‡A ΐrŠ]»Δ`„$]†Β š(3Ι†%‚Νl;#€ˆ©66°) Ιr03RaΣaΫ†Y€ŒFμ2›UŒHš] lƒA§ΛŒf³ΫΒΘ@0ŒΤv@`Ša™ [†JmM„Ά%`f#Škΐj5΄ €fԈ)›€9‰UΖl:›ΰΐt˜kjΣ‰6`d:˜aΩv$ACΜΆ²€1[΅Ie›EjfΨUf…b ι2±1€#ζΪΐTΨΨΆA΄²±1`ΫΦ bΛΑ¬LŒYB ˜U­™MU3ŒQl€Ž©f#ŘbDeΙ68”`Γ‚Ψځd °‰l6 ((-Δ²e6©6#²…’m0P\0Ј0SF@Š™š’laSfC„bΫ 0bŒ l3rYŽ›6κ`Ά‰Κ`\Ϋ)f”™5:f `ΒΙ(Œm5eŠ+,‰m3b lk[ΑΡ Ψ³[’m±`b‘f6ͺΜq12`’caŒ(Φ£V6l` dCmb³Μ Ψΰ0›4ΐ $0΄Αf£2FΔ v¦RΨ–@ Y’bcS›@ΘhD¨±­bi9²0‹”³™±ΰΖ洍hc¦ΖΆ8΅ΑΨ Ά­ άBΝ²΅«( ψΑWΏπΫ_ύͺ››;wξψmοyμ=ŸψΜίϋŏ½χ-μΨlšt°Πκκlέœν ΅Αzθν?ρΆ7=pη•ο>χΜΣΟΎrϋΦCοώδ―ύέOΏΡ7ήsφ_ϊώχžΏ»λzι۟ηβΟ:ΜCoyμ‘{ξΏΣyρωούθ₯—^οxΣ;ή~§“λb@œ€¦Σ˜ν΅Χ^ύώ3ΟΌ|—ώϊΟ~η™―ά90Π›|Sψα|τCόΰϋήύΆ7½ιώ[5DjvhkΤ ΫφΪ³Ο<ΓέϋΨCΌαžΧ_}ώGO?ϋ=oψδcχήΉχp˜±YέύΑ7Ώψ₯'ΏςWήώ“ŸώωŸyԝkI”;?τψGανe3Π‡ί–·ής·ώΙώ·ώψ™Ώύ–‡Έ}Ο}ο|Οόςί»}ηήίωΣ―=σ—ϊνnέ΄—Ώμχξή~λ‡>ψ³ŸώψcΕVηo{ίώΨ.Υ|τCίϋ£ό³ϋ―?ωδΧ?σ±O>4«fvMG–¨ \™ΛnΆ±νΊœVΫ²0¬‘v–°d7(P‘CΘ6'—±‘(ΆΛΪHΑVXi³@ F3LlKΫΤ’mjΫ GcΙΜL'aYΞvΞΞьZΩe-ΠaΫ’΅`+CAۈ٠"f€Š±A€’$S[5³"Β¨!]»Φ) 6Ζ+ Β@ Εeaƒ6ΪiXΓΐS&6ΪΘƒΜΩ„h0ƒΑ&+f+;'aŽ0’M΅­Β Qi»ZbμΊξ“du$Χ]ΞMwν6¦Δ6H°ΥΆh0ƒA;2€ζ:fCnXR…€M¦ T7‡k›€Έ(+(f&ΖhŠ-0–l«Ψl€LΑ–bbΦΠ‘ΐ5ΛκVd[UΆ—‘E1E­ΨΤ IΆ‰1˜#-‘ff” lͺ­ZŒ1«† ΦΠΊΆp‚Α*†°‘(Β,lΐZ;Ά53kΤ¨`²Α@h1,Ι2Μ,,Œ1§al¦μ”Cs„Q1h,mP*mk¨h-Ά‹K–γΘΆΐιΊ:]ΕfsNCΩΆ©t°C`6J„ζ:Ω'ŠT‘ΐV@›ts4Ϋ–TV\)--6μ06`ΆŠM±m£­6±³ Mͺa›EetΪ$[2³Ρb†8­΄­(ΩΫ -C*ΐ©MLΈe#€°­Ω¨`0 °01Μ)0Ψβb-Y°aΧFF£J[ΊΔŒΜb"S†` fΦ ˜Σ€ΝΤ:Ν9΅ΛŠΔj#aF6J©œmhΝΒΑΑ*Kl›₯M§k±6²Ν5Τ6›$ f³ˆl²΅#Ά%°0œ(€ŠAΒ ¦Έ,›­›£l›΅Εj–…”]EΝ86pKΆΩ™ Άn?ψΖ7φޟϋυθώΖoόΤ[ο€ˆœmXB”λϊρK/<χύ~όΒ“ί|ξ…ΧίόθΛ/ΎπΝ―Ϋ_κΣ?:χίzν;Ο>wσϊέG|π{F,׏ΏϋδŸόω—Ύφ£‡Ÿψ…Ώσιχή³₯˜F«Ω.b›7ΌυΡ'>τ―Ύωυ§_{ύ‰ΉΝyΓ»?π‹οό‰Ÿϋϋ?όΞΣΟΏzsλϊφύ³νίΌr?υιΟών?€P›VΝ*Όχ½OΌερ7~ν«ίξoΎ»Ÿ}°Ήs]]9‘][J±ιšT°±•‹6C"ΝΆΉt,V6› fΑ6΅-Š Ζ™Ψ,,§Hs™4Ϋ[Ίͺ»j;§Œ³Β%3mΣΘΠΒ₯¦œl›Κ΄‰F±ι(ͺ™­ΡI§ΝLb°Vb3%ιξΦΖu:vD»Ά%ˆ—λŒ™j­k±-”AJ¬LlΫ&Y±XΝ–°kKs²λš”PaFˆ•m4fg` ;pΆA D»œ3L³:'šͺΪΊ\D4Ά‘b`λ°]$ΜΆΖivνœp1ΪN9‡ΜVΘ¦Z6U#D6Χj+j#v5D\ΆνPν@‘5[ΖFGμξvVg‹U³m# 5“lS†U%gb³R†ΝΚΆΖ€V₯Y9fΫe’p@³fF–Μ°Κ!»VKΣXΩΘ96V¬λš›*Σ\’f¬Ω¬’tm`%ΗhΧΆvD4\6XΥZΧ°aΥΤΦ*ΐbΦv @Ά(`AΫFs²m£©°.dXNۊaWΞΪΉ*bΆΰgl™NuέU;Ϊ6Ν蔃’3 `ΩΫ@΄uΨFΪ.nZ†βbc;₯ͺM$Ωˆ•Ω΄j˜˜BΆ SΤFΩe£ΥΑνL΅±%IVΐd³5[ŽfېSf3ƒXl4ΦΒbU‘\Dc[-A€™+Ά5pQ+ΗaνrΪΚ–ΉšΑ1 Ϋ¦ΑV9©]W²Lζ,+©`ΕΊFT4Σ`)esI6­ά‚lא£9Σ©ΉΆ …U- Ϋ\[5΅©¨mk› ˆaΑŒrν.‡°m”S”™±šY]#F\9Ί,«` ²˜ΆauL”…ΓΆΙΆ:NQ“4M ΅ Έ(™ΥhΩlJλΪΥ¬Κ†²ΉΖv’*ƈP³‘Œ‚TΝ6–­›‹²Λ.šΒ-Ž`ΪP #Έh3BˆmKŽΙlΦΒ,l›ΪV‘ γ`[HJsYΩ…Α–ξΦ90[mηΔΪ6["3£Α fΪ¦Α­ehg₯-gg;ΰρ_ύΟώΛψW?ώ‡₯mΪΥΝΝΆ Ά4­l—1f—­V0»m€λ4u)ۈpΛ­Ϋ7wzηG?ρ />ϋ•ΏψŸ?χΒŸό?ώ§?ψΰρΖ‡ίσΆ;·οΉχž{υΚνήφσι?ϊď=0E6θ©[7ηΧuχξλ/Ώ~νN»v:DΆmΖ°Ω•ΪuVΛPnξάάσΠΓιπΐŸψ•πΧ?ρ{³5nέ\wο:ΕΦ{>όkωϋώΦ?ψζηϋ£ίώύ?όΪί|οΩ―ξwΝ½o|ίΡGοŒa³™9ηΨ΅E\sχΕηŸϊάψ_·ςΉ½ςϊΦΧώκ«π―ι6ΫτO›κŸφΘΟζ?ώΝίό½ω¬­š~όίϋΣ?Ϊ_Ÿχ~βcŸϊτΫͺ]w/š Vwλ&.›[ηζΎΫσϊk―½ξξœγΜ΄ΫΌρο{γ~τgΓχ»_ψ֏~βοκg~ρgEΆv΅­†Υ—«[wnέ{ϋζζΪλ―Ώ~Ϊوjknœε4ΩΉ.»sλœΛLd8kΫl™v’vmΖVΓΞMm[NΉΆYv:ΖΘ΄Nfjsvsέ½Φ”Mp`Us9ΙμΊbŠ«ΩZ:uuΫ]ƒ©jΕ’΅Ά“]ΥΆ1ΊK眡­4†»nN»ΆQT²kΧ΅ΪΞ9ΔΦβ:--ΔΆ:gkGgΕlηr˜Υ¦­œΓum)0KrL“Υ΅λm«H»ΥYsΩΥΥ©]v8#΄ΤE΄m6r-Ρ΅ΩVN›s΅Ά™%;4i—Ωl»:šsŽmcΐ΅«»I7ΩE42.΅Ω΅N»ͺiqΛYTmΧ.I.WΞΉ{:q!εΊ[:Τf‰›&Ά1ΞθΨΪΪ–Ι΄Γe-³]“Σuj6±J];έ8’‰‘Σέm»ͺ£.kΞ¦g­±­mΗ₯•΄Ω.ΆΦι0Σ9gΧuY&ΙΤ™ΦO|. Ÿφ}Ύ>ΏϋyΆχeYϊ‚θ Š¬.+’\f’7Ι$/3“3ΘδX’Μ$'q2ΞLdΫ²QE¨P K/KKέ½ί\W™aΊ.ΞnλZ«VmnU΅v]±Nc-ΊΊΞMΪ\ΞΤNEkΩΩΡ–fTΫ¦λR₯dλδ—S™sN²Ω΅pΤ9Χ΅•q•Υt\›NΫفZ³ΛJƒiΫΙ‰m#Δ\9’°lη\#‚U];ηfΝfΧfε»νͺXcX;κ²,Μ5'ΧZ:c»¬kΦΉΩ΄š«mΆΩ‘‰-ΫΥ΅KGSΫ ^άΥ•ΓΡ)μβpfE—΅μZΗVpΝ΅έtUΫ΅+:uΉΝ9ΧQΛΙνJΈFξdΫ€‰νbκ\HeW™lδΨ΅ΫΛiimAQΧκ”Bm³›sm»VŽš™³ι@Š«k—k;¦UΤ\Χεl5³NΧάܜ]ΧZ"-™:΄ΛeΥuΉΞnvYƒΨœs`Υhζά8Χνm¬C]]©,&Ε–³Σhͺ%ΧΩΡ† Ϊ6μ"©³Άu²Ω₯mΫμœ’q]XΝΉΉΉ­!šΊt\›JM»QkΆΛ\V»vr»Ά£ΓεΚaQΉvΞ  m§›[·7‡±m»βάΨΥ"kΆΓ fUslלlA›]—Ίfu†9kνšmΫI˜Φ.ΧΉέuJIμΪXΊ5Χ’ΒX£.kΩ«¦ΫݞŽΝumΊ©ΛVΛΑΕΉqέF¨Ν.Wn‚±‰Ψ.¦"[»²«Π‚Νڍ]»½ΤJš]SΤδœCΛ9sΩΚΨΨΞι\fΞF ΞΞn^Όmiν**]Χ­μ”Ξi·—S²]SvΞ1±%—UΧΜjνښ²K'n`•Ήuξ8Ω°n*ehg₯-gƒ1΅ΥYβΞe­WΊv«fζ0ΫΆ-][ΫθζζΞ=wξ7^ψρO_ΌνYˆH)–«€r²iŽk‘ιš4νξΣ―|ύ/όΣίϋΘίύοŸόρ>ροώΝ‡^υΨƒ½ξ%<ύŠ—έσ7Ÿ~ροξßϊϋ7Ύε‰ο=ƒvm;κ±'{θϋoφύηπυ/~ω…_~ΓέnΊφβ /Όπβ‹uΞΝ=w;ΗνΟλg·Νpuivχž»½ώ™—ά|κ;žϋΔ§Ώϊ«o}γkήπ Ξ6—›Ξ΅k¨κΞ=<υΜ{‹§ίωkΏυžρΟώοτǟ|ώGΟύΧ~Π[Χ.ΰ–[ΐ΅«ΚΦ‚σΠ£OΏχ‡ρΏώα?ψηόΖK>πξ_zΧ;ŸψξןύΠϋ―?wίoύχτν>φΨK_υš—=zr­qΓυΝΏύγ?ϋ₯Ÿ½δoyχ{_wŸλΪΞ9dΨlœΕt-zρωΰ_ωޝ^ϊŠ—ήsξ;ŽΈ}α'ίω³ιŸπΩ―>ϊΎίύεχύκ²…“mΊf*΅λͺ|›ίωΦwŸ?χΏτ‰'Ÿ8ΧV‡ΙΆk‘Ν)6"MΧaΖ5m7'Ϊ7™M«ŒΫ9§ƒkilc7³\‡Ρ΄ΫΛιͺ UΧe­Γ2lY6u:s)Χ΅ΣΆ82λJΧ°jœι4ΩΆ1Ι.ΊFk,ucdr©©“mœSlά^ubΓ$'λκvn\*˜L­λκœm[!k—Žp]ΣͺMΆ]U₯€έv΅¦pXs…jΖ2ggΉΖZͺfδΪJ¦\ΞΝ6s„k[4ΧD §uαBNlΓ΅«Κ€εΆKRάΤu]§³IEΘtqp{ k—ΚΆιΑ¬έΪYΩΚf;ΞΈ«sΔΨ$fœkN›k:7νj'jλZΆΣΡ0»‘λΪιd3μ,˜²L‰Χ΄•sΪΥn9n‚ δφrN§\CΊΝΞΘ¬₯Ϊν₯V‹qtκškλ°LΫFΈ’sΞv)]ΰΘ¬+mΠis¦›mΫΞ5‡aΨΪ,uL†aΧmu)IΓv9»½v«RΧ†–²A“kQΧΚBE]—Σl ΧΦ.™kWΗΆlTG±κφΆΛΥΈY;³ŒΧΩνΞΥnšmκ0rm50u·;74ιΊvΥ šΝˆΨ©tUηζdf–δΪMiΉΑŒUζ¦v]+ƒSͺΆKk»©k—Κ6rt«λΦZŒΨu]ΗΧ(•kŽΠΞΥES13έήκζœc°ΦΝa]λΪͺ€ά^Ωjd6n°#S[36ێέ9νj·–sΰ‚§s½x)₯ §sέnvF1k©vm³CΝGΥf¬ur«CΫk·Χ9g›ΫΨv:†‹Ψ΅λtMΓumΫΉhΉΆvΩQ†+νΊΚ -ΔNͺΙu›­uΊ6€ΡΧ’.’fEνΒXC»–°λ”Ή½ξΨ–1[Nsͺλv3bν,±lJmΧΝnΧͺΦ\¦ΞŒlΣ2¨Σq»š¨Ug··Χ9'hv;kΧΈRηά΄ ۘʡ›b°k;'sjΫ†vνtœΥΜΆΠ\םΪ&1Γ97θ:—­‘₯9ηΊn[²Ν€έœΣ΅%°ΩTΊΊ2ΘΔLΧ-ηœ`³³ss˜k粦“kΧV3:βΊˆΦ Έζ΄«Š1›vέ9mν"'r7’kΠ)v»sΞΖ†Γ²4k‰kκ:eŒH隡nr)KΫ.jΗνͺ°k΅]Utν"V œι4\£;³JΆΑΆ:Λε:T @ꔬ“e—Μ½ήϋΠO<θΩ½ψ₯ΏϋπΧήσλ/}θΑΫ―}ό?υ_όΕ_δ.»Ψm†8ʚΦVl&£j€ΝΜΝέ§žyζ—Ρο|ψ‹ζσ?ωΦίύ‡?yΣ«Ÿ|βη_ώŠ7ΎχMρSΡwώμψ^ωϋΏϋ·όά£η§ίϊμ³ϋΧυΙΏΏϋώΫζ_ςΐ«^σ'ό|ϋ»_ϋΜ_ύϋψΞG>πŠϋŸΦΗ>ψύω'ώσΧ_6syΙSO>xί½η;ίψΖWŸϋδW^xυ+η‡_ψ“ϋηŸύζχ~<ξ}θΡgή«―ωΰ|ζω/ώεώϋGξvχ_²ϋ_ψΑsπŸώΡGΎύΜ~οΧίυϊ_ψg?ωο½ψ귽獏=tƒ/ήηζΨΉχž{ξτηpοϋŽ;nŸΞΧΏρ…O~ιΑ~Έ§ήϊš‡ ΥΝ½½ό oς…ώ«;Όδ o}Ϋ{ίϋσ©O=χ‘Ÿάσς_όΐ{ήυϊϋΏΞέ{ξή5Y₯Ÿ|ζ?ύω'Ύτυ‡ήςΎ·ΌνOέ1ƒΚ Οη»ίϊζ·ξΎϊ­―xπXθ§ίωΒ'ξOκ3=όϊ_yί«ξ½ηžΨΊ~ϊύ~ώΩΏψ³OύπΥού_~ί;_σθ= ΚΎτά·χβύOΌμι‡ξέNZίδ}δγ_ψβyυ_χ¦·>ͺΆ%fWQΙl\Νj œŠΆ!ΠΆ]ά«³bh*ΆiZ;ιΪΖia­llKΆcνœZΆ6Υad)ΜΜ(΄ Λ΅lΧf•²mΨrΖ ΚfMNY¬8٘aŽ™±l …Z“k»°λšLηF³0Φͺ΄±ΣΆνΚUgUZ• Ϊd\™Eg…mט«;‡.’]ۜc œtΦΥΩ5dΔVΗŠEΗ€8ŒΨl]:”Ϊ˜Sg&ΑL³«nTaΫ¬KΦ–hΫ¦†-£αTΩI³YΫ₯Ωa ΤΆ2R΄9¬Λ-„k±(cΓδ’@e1v»CUΫŠΥ₯ΓiνbŒΔ©h0mΫP'‘ζ*†ˆjŒbΰš› c,³ͺ˜±m[IC2ε2Ά₯Š\ΝΜΕ”b»01­±#ŠRέ,»j‚ΨΖd±ƒΔrΦE—ΩΩεΪ¦Ž†-Ι6¬Κf¦QΩXJγZΫ•«Ι‘ͺ3hΣfhgWkœۍΉ½ΞM˜ΖΩΆ[;š4mμ¦Ξv9£a‰M΅Ζ \ηX1‡cs9QΣΜJΠfš]9œšf³bŒν’`Χ₯Ψ†vTK—«νμh€UΫ#ͺΒVhΫμθZ, Ά.“qb7v9«AΡ0W²Λ*ΫU—₯eD‘cΆ!šmΥtζ*΄ͺM“”fK»ζ$Β,f` ͺβjΫٌ€‘ΪΞ©Λ° ]­-;Άa 6;€Δf­ΣΝ²IQ6λRΫΘΨZ¬š »rΝΉn;ΗTTΆ–cPf3m:1»θ›m™pԁκΜhΨΪZ»q΅ΐ‰Άέšk‡uΝΩΆ[ •6J ζΨΆUŒ΄ζLbWQeX‘CΰšiucΜ¬*Œε`‹]ΘMΡΞΆΣ)6ېνr]j#ŠXšΛv ˆ1+Ά“Ρ¦ ΄™]±qlΝJœΨ±iΣVZfQ°Λ­ΑlμhΉ%EsLdΆmœͺ₯λ`bh*dS%vm©$³˜A°h§ZΆΑL«³ΐfƒBW,fc³΅$Ϋ`[εre³Φ;'@Ϊ8*Χ43 €a3 8=ώΤ3οxΫKώςCίωώ'ώΥυΏ=ϋ‘Gοή~οΉ―|ρσ_ωιŠ« ™₯mΘ[Άμά}ψρW½γwΙϋ?ρ?ιηΏεώΩ‡^υτK~ηνoyΟoώΪGŸϋwϋΞΗβίόμ[{κ‰ϋzρ‡ίωΦWΏςΥοGωυί{Ηγχ?ωΆ·½ιo?ύμ—μλΟ}φOώεϊ­<|η§?ϊζΧΎρΝo?q²zτMο~ζιΟ|φ«_}ξcτΛ7Ÿ}I·?ψΖg>υΉo>3!uξ{δ‘7ώΓςŸό??ψΩη>στ“―}ι―^ςπΫŸ~λ_ωΒηž{ρ[ώό»^yίWΏϊ—αώθ ?{μO>τͺ§ΊϋΒχ?wξω{žώωWΎα ―Ήouγ‘W?ύΨ=χί³oρ―?τ―Ύφά_<|σψ;«ξŸΎξώέs¦Α:χάχ³o}νωλρWΏμ©—?ύΠΝΧ_όαwΏχ³^σΖΧ=ςπ#wΈ3αΊΏΨϊ“gΏζ©χ½ύνo}ζΙ»Ϊ.’]Οσsωγύ‡σ³—ΎώΝo|ΥS?p~φγο|峟ψθΗ?ϋΒƒoωΝߍΧ>vο6§ΙνπυΗω―?τΉ½μ·ϋοyΛ3OάcνZ½πՏ}πίωηΎπβ―|ζ5―zι£ήόμ‡ίψς³ύΛΏύςέ§ίώžπΎ7πΎς£ώόßως³ώ₯Ο?ψθC>ύsoΓχόεgΏς]ΐΉ{σθΟ½ύώοώρ_~τӟΪ'ώς³/Όθζήϋyμ©7Ύλ­ο~ύΛ}θήέπCήϋ“η>σ‘Ο|όάάL<ϊτ;~ι½Ώφ+οωΉΗΟΞΉχ΅ο|/|ώ‡/ώΝΎρά§ΏύνoΎόUo{ζω[·›)€6?ύςŸϋιCoyΩ“>yουγπ[ίϋρ§žyζ‘sο j0ό³/Ω?ς•<ς Ώρφ7Ώωεέ™UΖLΩΊηή~θΎσ©OόΥ7>ρϋξΧ‹?ϋιuσπcoΝίϊΐ―ΎοΥχίΫf{ρ_ϋμί}πίύΫ}oόίόχΌυ=pS0lΨyπρ‡ξϋτOžύΔ‡Ώψ©έοM/Όψ£ŸότζρWώΒ»?π+ΏτξΧ>zW¨mXΑ˜‚b Γ΅š° •ΜL β‚l$¨`¨F`‚€61X'd›db©³2FCΕ#Η’e#&ΐ6” ƒ΄ΐ€AH°­vM%Y›΅0pfΨva iβΦθθH7’ †`Ɛ&k¨‚lR@d†³΄Ψ¦b¬‘1j„a¨AF JΆ]F§΄m`%KΫfΠPv‘Εbb f5@ΐ¦ ’@6 d [̊!T\ΐ6q™ΨBΨ€ν@*ƒŒ )Ζ v]…-@&š±A¦(΄f–ŒM(f£²ΐ¦$3“˜(LΨ„*Ϋ¬Ε­E)›ΫPBmˆ…l“±­.J…¬Ή€@ΝPF›!Σ\ΗΒ$%Ω ˜AΉŒΕ QΜl9IΣ±M–±­ƒ©4Χ$ΨΒS]ΣXˆFΤΈ&NbΤRŒY@-pU š0­1Δΐ$X5`M& lr@Fε‚aΓl1¦Θl° •™­‘°!.L&Z θpΩ`„l˜ sΨH²m‡©M6RΙ†C5™Π²•©²‘5[΄Τ²AΜ`(45aÁΆ1ΑrN8fΫ,2P°­Α.βΈp€ΐ1›(ΓV`ΑΑ6Λ¨¦cΠ b¬fCΕl¨baα"#±@ B²5ΆS‰Ν©ΐ\­"¦Ά”λŎ‚‘°Φ, … ŒQlP€Δ,@£™΅aΕ ΅Κ›΅f `SH¨ ց1‚ck’Ω4ΐ@±f‚Nl„š-¦ λ6ΫH3X'c„•‰h16©cV\‘¬e,0Fξ¨ΦlΔf[‡‰mά½ο‘—½ι}Ώφ“οοzΙλ_ρΘwΑ6ΥͺM χ<ς²WΏχΧη‘WόνΎφόνΉορ—½ώo{ν#|£wώ⏞xζ‰ϋξάqσΘΛίπ–χώΪυγ—>ρ¦—ά‰έ}ςuο|׏ςζo~υ£7Œ-mVέσΰ―|Χ―ώΚ—{_σΜcί‡”›ϋΟγοψυτΫ?zμ³Ο}χ§wžΈχşά<πτ›ίϋ?ρͺ?θ§Ώτάw~ψ“Ϋ›{|ς©W½ώ oωwΏαρ{»9zψ΅ο~ΟέxβεωΤΧΎύΒέGžzν[ήσK~ϊ_όαίωΩXwŸ~ϋ/φ‡Ÿ|κcŸωΚίΏΠ}OΌτ™·Ώχ―ϋΙ_=φ7Ο>χ“W½ζ•mΛ͝G^›Ώχΐ“―|νΗ?ρΉ―~γ{?}ΑέϋyςeΟΌωέΏψŽΧ>ωΐύwž|ψž{xδεύΙΟ~ω[ίϋΩξΏ‘—ΎκοzΗΫίπϊ—?xc[zτ-Ώϊ»Οί}β%Ο~υ»?»σΐ―xυίρͺϋά θ'VπρσϋžΧηωΝTRˆ“6©IIιΠX5±HΥΤ” Z·B¬υRάτJ„ά‚k7.t­nE€.Qh–‰₯ω?ί·η@X­ίόύχοώνλ―ώε_?φkι7ούμχηΗηgΚZ³™7Ώϊ³_ϋΧποό΅Ώ‡σw΅Ÿ&γdθ/ώζ/φτ_όζo/σϊΏ_ϊOυgοΗΟύ·ώκ_7ΰώΰχϊoόΣΖΖΎώ/ώό_όΛΏπ;ψχ½ΰ?ύο/ύε%#1Εόό―ό­ψ?ωΕοώξ?ώΗΫρό“χΟώΏυkν·ηχώνηoύΝ_ώφ―ΪΫͺΣ3ΰy§f•aΆ9†­rΪΑ°A”Ν Œj;ΙΦΒ4†3zΝ4 0S­%4€` fΫt―³°{[FjΠ,6i š©33›<ŒΚ6Ϋͺ0Φ¦I3σμ$•Ά1XcΊφ6JP,,oBZa „Γl £4›₯™© FΠ!uf½TΡ`yc‚6₯X`#AΆ*†lkΖ:Μ<Υ$lLAΙΨΖtͺ Δ@°±΅ž-¦Œc³i ΩΤΤ›˜ςέ;˜Qf3ƒΓVhΰ²™γ ²YΛ(A³Φ6€™‘kb‘!Sσ6Š ZΫ«zf"Μ0RΫ5«Ά‘$&4“”Il+O =3Ψc[§1›FΖŒ½-E̘ΝF"(XΝ`’fΤ šqE CDŒ2²­5­0Š ”™ERΨ–‡ ΪiACΆ ‚lUγ̌YIК‡„=&h [N€ΚΌ7›½έb "0€4c§QΚΘͺ %ΝΩfUωrΑΨ†ˆ'[Ef΄ pΠΫT6Υ0Αmf)ˆƒ%΅ΎνV ™Ε‚•Ν6P…΅Ά—b3sΪ #ή–b4£XPΆΨ¨2ΔΆS –{m[PΨx[Ηl`;=¨ΪیΟφbI0̚¦lΆ-₯! ΖΖ"±UͺΔ°`–Θΐ`ΓhU[ˆ kΌsBl`kSΔ#²ι63)‘5›ΡAfΔά2lD Š΅mAΩμ9Μ2šj#ˆI›Yi͊‘6"…˜Μ(σ@Μ6Hΐ&”ΣmΉXΫVV@φ TΜ ³VΈΠ(F¨6˜Q―5±g·Ž A½½„~ϊι§Κ6lTZςέK·dΆQνu›Y¨ŒπžR›z[š…qΨΆUΥζν]=IfΖ{έgΖPιφ^6…ζ»]ΞΤ,₯Ν{ί:[oξ΄σΏύ―ώλζΏϋοοΏς7ώΞ—θο^š₯Ίν΅ΉƒΩ&΅χ’`VΩFaϋξϋιΫ’ΪŒ»mLR °§3lΕ–&̍Ϋ^Uύ³ώOτOΙΎΊΫΐ6«Δ{έΫl–ͺΩ”—6›R½g”ΣLeΨl΄ bLzdΘΆΟηΗoόβΏρ‹_μ˜ΖΫ>—=έΜ4Γ'=UΜbͺaΐXm_TΨ¨Ψh˜yίϊt· c[vkY°…OπݚJήfŠΗ©6ΐz­ΐ¨ΜΆm£Κ0±–|χΙ¬™U[Q6#Sa³ωά{71 P#ΪfT5ί½«1pm·‰1U·½Ά‰ΚˆŸή.gXeωl{{I Δ[o.Α–TΆmκΆ΅§”ΩW›Ϊ{’¬m 3δlί}―‹1]Ά§kΓ„Ξh°§Θ°ΕbFΘφhS*yο§vέ™a›U]{―Ϋ–ζΊηMYlm+ΨfΰSos5Μ<ΜέΆ™Lr_2σήλξ€-=„οv•aekfξ`c•΅žU ΩήmcΆιbIlx?ΥΥ Œmμ–,˜6?‚·™JΆ½νΚ#]Ψ`½V`Ϊ{uc–j YKή6;wσšY©ΰM¦°½υΉ=Z Φn¦FdfVΧΌM0pθτ6ήP7³§λΪΨ‹ΉΪί7\²;7φ6«”ql½‘ƒνxέ1Ϋ³λ³­MΜΫλjΤ6›Λ$™…2Ϊ›ΤΌ}Υ9φ֝ySg4Ψ 6ΩB†IKƒ6Έ«ϊΎ?ΟεdcΆΧUm―βΆ·΄Υ‘½·Rμ=kŸΞΆB†7›ΟmÁ•Ιι+Ζ ‚½αJ­™^ςΆPΟξ™ΉΘΖTkΝP¦Ό½*ΝlΈ»χž.Μj[μϊTΫπ6v ; 7ŸΔlcuΆ½νΚ ¨m΄ {οš+7 kφμάΝΪhΊΰM¦‚ήs7l΅‰΅›) lΫΊkžΩΤfήP7³IΆes΅)ήl»œ½b9lΫ–\F˜§qιm‡’μm£mw·±³M‚°=—IΒFe΄ 5σfΧ§ν»: ©mLE 6¦Ϋd ,¦Y!fΐΆ»«ήΎΫΞιΆaοU]lSg{­-'ΟRχΆ φ^λ}:ΫJΪβΝ¦&Η«MͺiΖ ‚½‘ͺΦL Ϋ…ιfˆ2φ\-mƒjlŠ=ͺ°=tm³T34.ΓφX]έ6<3Ν±,τζrΜή4]ΫγTh01PΩFš*Γ@KΎ{ιsχ£³ΝΛψ<™3XeΩξvYmςΨϊa릚žZlήkέι’Y7,%zΫzΛΩδa€€=‚2dίηΞΨͺΙt6°ΩζγΣάibXb׎½–*Ηήά‘aNΧΫτՏC…YΠfήιζΣΗ,Ο’ŒΨμ7΄Ά=΅Œ똲·ν£l γόώ§?ω“?ωΥ―~όΦoύΦώα?ψγμg?ΩσU9xφV½A¦ŒΧ΅ΗσφkhΒ³φΈaPΓλ›»Žk·5Αfiν½ύΈ₯-hδΥΊO3*ΤΖiπ¬Š\σmͺρuonhyλΪέ.Σͺ­χύήΟn“=΅0[[ξXe3KUέOoυΔD(‹Q#ΟΆ’”=’Jm`Sήrξl2Αf۞ηE 6ʏէ* `2 “Ωλsos£ŠU2ˈή{υ>σι¬e6SK`ίΰl»΅ν©M‰νζq“½7β2‚YΊ> c6Rj^gcˆΊχήΛQA›»ΐΪΖΆΛ›•mloΊ>[£M›vkCu³MΕ{ν|”ŸΖdΪ–Ή|³jμ1εΎϋ^΅hΪ¨™Ω›¬Gγ†ΧK©γΦ™!‚ΗgΩ[Mh/Νε«ugB₯Ϊ’`³Υ‡\σΪ’Ϊ›ŸΞΥV›­l9o]«•2Ι}ί·OKS₯·χRφl­~Θ1LφlοξκΎok(&”%²-o‹k¨ξj°MΟ[rgi²Aοmω΄Λ€I±Rkφpweƒ«' 2χΊ›ρΐU¬ΒŒ₯νήϋ^šλ`^ž]0fKΆέΪ6pgοζ‘Θή{# ‚mr}3:ΕΊ·ε VŸ½-”ΚPΨT5smΨ+˜{[‰ε&›ήv5›ΔΖΖΡΪ›LζΫύPΎΆ•ιΝyξΌΌ:²νιΣσ%jΡΕ6fγ³·χΈaήrŽΖΝν­m—`΄l -½wβxLu΅©€™H3{υZ΅ΤΫσΝΝ=™{“–l¨v·eBϋ>§BΣέσ6n€y―u ΌΩήUυf›VΚ ¨mΩh‘zΫΈ:΅ΫήsΌ%Ω8oΠ›=w i–Ζkφ¨κΪ¦ŽΉ5bjΣ™6Φ§±=?*V˜χφΨ‡[+ΟΛ,Ι0Ψ7gΦΒΆ©Mg/Œ:1Χήw³(1jRE o£Z³/˜nΫ¨ ΰzΓ^ΑfΩή[‰e;›ή2gHf¦ϊϊιϊΤy3£ϊσ:{Y©ή³§k‰7γšά]6Οkγ3³A›­εΓ&ς˜Α&ΨfΧ{§4Xι>mΚHΆ9 {usζ5Ίm^ΖηɜΑ*/V­ΫeψΡΧδΘh΅ΧΥ4cͺΦΌνΘΆE%Y› ©0χͺΩΆ½uΡeΫ³ισΩdΟ€Θ)†¨0V­O#Ψ«€‰\y?>)»½­”dysmcXP7v °y[Σl‹IΣ‹>m6ήΆ—Oe›™ΡΫRφ€ VΆ%χΉ ―šΦΨ,O΅Ύysv¬Ωf^N™’΄–Ϊ›Ω’,η³&Ίj½8ΥμΩΆΊdlηsΧ{³μ>gΆem­wyΛ΄w»Οd‘wέ6qΫΈ5› €ή4S†΅Gκm6ŸΖ¨~ζΩνl±υvw­ΩFyΆq—D›Qaʘ²˜χή*λ˜ννUŸM6›qhWAkRέμΥ­‹F6φ*iR•,νϋγ“2›Ν•1–ΆmΤa ή[wyc Ωδς–¦±>¬αΩ–«SΫ³Νυ&KCŸ‹­³-‘dOπ™…=;_Υz™½9ϋτΆfͺsi-Ωx[2Έ’΅5Λf1¬RNΪQm‹7ΆRgΦΘS[JNζ˜7Α#5=ovU¬mΉ»mΪΰ.oQ՘‡΄>7›!*³·Ϋ§φπNΨ}’m¬Ίz―YΑžπήϋμΚΐΨnδUκβνqΪ&Ϋ6Υ½­qbΑΫ«z›MΙ3έΟ{ή­ΆMyοξZ [δ½§dήx©@¬†vΜΫ ]ήϋώyχ³†7ZβϊΜ^5½J •Wν}>@ΣήΦu•φΖh{£ p/nν}ί}ΪΦΜ€M.ΣLӚ7ffκΤ6{smc~άρ”Αrξn{ͺΔάLzΟΞ‹^{ΟΩ§mk©Ξ¦άL {{œAαs·5cl€Ζ&ΆΕؘλšΧhΓΪVŸn5›5LήVטʹͺυΆά΅₯ ]?6ήX%ٍw>λΓ6}Κμ­]ŸdΨ;M`sΆΩU±Φxuίη°·ζښς6ͺ‹·GΛ°gυΩ΄­$Vo«z˜»·§ϋњ±ςͺž²…2³ν8{[E5ΦΆοPqφŽΩ6›’«½}{Έ{‘…βΊΩbΕ'dc―Bi•Α.‡²{ο­ͺΒΨh{£uγ}vΫΌ'Ά±΅6Ήl&¦ΧI›­ΩΆάΥΫl³Φl”}}.†•mΙ}nD¦ω‘υF°Κn½οs\oΓmσκκ˜’Ϊχ½‘©ψ˜»­²YnS€}ηΈΪσdkΦ†ejΛ§[glΞ{«kΪΜc»ϋ.ˆO?6{3₯«vοyΉu0›ΟeΆYΧ©™½ΣΙέ™yUΒΆΩ΅οsΨ΄ΨΠv³¨YΫTΫ0·fΟ6tζM¬ς–·%Υ{c>χ6Ή΄Ρj―«'¦QΦΌνΔjFM±o3WeΜ$Y¬XSΫ3ΚΆfΕΒcήΥD7nλzγ„ΩVu½e$Œ 3ΗΆ­šd[Έ 66Ϋ»>›΅8§˜·λ LrLΓ›(1Δ{λ^ՈV=Œ Υζ9'Ά™υΨήυτyΣΠ¦f”Κ˜1ξ4δfΈ˜h6k!{I(Ψ’OΫώθώθ—ΏόεήΧ¦d`›αŠœΆm+{k*aKμΕjΊΔΌ‘φ*5šb όψρσυΏΈ*oΆ.Ϊw‘-ΰκnΩΆ‘μ½@,ΫJﭏΚ3‘3u[(Sm·»kΆ₯ŠΌ½V«f+² άΣ„Ϋ[vΐ>ƒb³vΕNz{žͺΟJ’1EΠhΖ’m₯Ν«ZlΟk-―’²UtΩfΈ»iΒx‹½93Œ΄Ϊ:ΖΖΦή»>›%yοΚΐ ²5Ό‰oοΩ½ͺ2k]c4TŒ·[ΫfΫξΩυΥYh‹u›bΣ ΛŒQš$³d±ο^.²‘X”77P΅cΆΛz“Eflo\%Wf[‹7[U!ήhΓlΥ›;)f33ΗΦ²­¨™•ΚBT`Ϋ’5φΊdcK؜GΚΫφPm ZΓ³ΆQ’ΧŽΈκ unWi[¨δyŸ±Ά[{uΪV)δ&bm²ΫΘn Ϋ»‹΅²Ω£}3”άf]{…ΜπΆd[1VKڞΧΒ;thχ£Q3ͺu›jy/65°MY7šάZΫΛfοUάRΞiδ½€ι²™lΖ΅^oΣl{¦ΜšΒ0ΚΌ%’ylλΩyD,ΪΦm2”22c”8ΦΥ¬8α»—R6›™Λf R~hΆΛl!»flct₯ϊ1o‹b±yK}ΜJ{ΥW`³ή^Η΄™e2C+v›Ϊ]ΠlDΫϋVblΪd!.3ΫPmFy›΄Ιy[ΊΦŒ+ΔΕΆΤεkGΛ"¬·‚₯!–‡U΄'kKΝθf­ˆ·ΰ½±O«ΖLεΜSU¦ή{‰ζiWΫΜΪΘy.λ'ώzΏ±Ί_―Οχn»Άk{»­[7‘? #c˜3QτH Α#<α yβπΐψδΘΔDb1έD‡ŒΉ!nΦuݟΏΟΫλ’ΗΦ Ά‡νR₯³1²΅9lΫͺΥh#η΄έΠΆΆ:w΄œΩ’2@k³έΞY³lO›Η=eΦTΉ˜.Ή‹uwΩ΄αf:›¬m±˜fuše–CθΤ aξQάέ”SZ#Š=;m3m²ΣŒ™N©ž±;bΪέQ'ΣΊi—VθškΡα6[Ά Ω{ίό_ϊϊίύΑΛΰwώΕ―Ώύw?ώJs­΄‘ϊΑŸύήόOΏ‡ο~κ3δ7Ω§ΎϊοΝοώώΧλWωg>ϊJŠκœ™ΩŠa±6ΒF*OgqˆS»— 4NlK•ΩΆsίωξ_ύαΏύWΏϋ­_ωωoϊ/0 $D«γιfYkjΝ¨™Υ;βŽ9)c& ž9Ωf&™κiΗnic΄Y-AY€„k¦S6‰5b•νͺMˆΩξœrw›νAŠQl«fΦh#Τ£Ϊ³]™ŽΨ΄ ΘΗ>χ›ΏύΡΟώΚχ?ταών'Z WGΦv+™itFκl]c‰Fcf€ΤceΦRΣ& Šέ 66ˆΠξΦδ7^γ7X#NΖΖΆιt3ΫlΥΔΈ€,ˆiS3*Ψ†²m€Κ6BmΖmc†SΨΆ¨kSΥΖΨ*c KšmƒΥιqC¦Ζμ8[ ΪΨΪPmCKΫ2›E؊mΆΑΆφdt’Νθ”ΆΝLΪTΫ™M™47lC’ ,&HΈŠ™QYΚ83welΐΆk…‹έ’₯²±%ΩvjfΡΜ‚Ž΄A0Άu―scι0›5‹5•`mθ4Ϋ-Ι¦%a –β‘:[[΅ZΪm‹€b¬¬uSΣ&Š*{dc‹3°vΈ L›p₯σ`6fS–™€6΅B:Ω6Κ˜Ϋ κd4bk4›ΑŽ£UvHΪΙ2CΤΕΨRl K cbΫνΌχίωΪϋ“/~镏ύΒ?όε·χ±—cXePC₯™±lK• 5Ϊ ¦aP£²]J{ϊρΎύ΅?ύς7>υέοΏΔ²aΚΫ<]χ& ³T΄mf’QwΗn„b0Ϋͺg"-³‘ΙXβΐlM#[fΚT1Kw‹*)Ζφ4‡•aΜ@f†hk`ŽŒ³±ٌ€©-„ΪΛƌB-’`h–νΥOώάg>ωφg ŠΡ€f­LJ6(4±2#K@†0(6b°ΩhΪ’Ψ¦‘d ΜjfΊ#bš³™-˜lDl•@£ˆ±X1 Δ±Κ؊YhΜ€Ά!–­Κ0°4”- ΆrG 1[Ce3 a•qο¨bΑl:0˜{Q°'ͺVc¦M" mhjۚ Q0„ΫŒm‘*±ν.T£--0˜Ψΐ °•%΅‘Μ„uflF8‡fM’A l+Β.©‚e2mŽ ΄ 0!+ΜaΆ±©LΆ¦m€$† Blƒ`1YL΄ΉΘFˆ&»‘ ₯U`™€#€%ΈI±BΆ°M ³Y5Ω, )f¦Κ¦άY’μZI™Y, SŠ1'fΫ*£ ΈΛ–αš€Ζ[ͺdhEv£fb pΠZ `zΪN©Μμnͺ&³Ε$Γ`Ϊ†m7bP“­`±Ρ”©YSΖΆM(  ΐ Ω†’lΣ¦ml…΄˜B.ƒ%,’™‚‘0Π°` ΆaS@`m€˜a`¨¬­±`e›bΪΨB9ΩbΛ³$6₯P€4C΅ ”Α6 ctJΫ6BFRΝ ΥΆ„;₯»0ͺΩ&2²)²)•ΆΉΫ£¬ŒΩ0mΐ]FΨμζ(h›"–Bvڜͺ1w¦c"Lu‘cxΪ=RI˜έ]Zl,3ca†°…ΩεA#2 l4ΑcPa°ΐЌA²AQ6dl™Œ4Pͺa0 ₯ŒΤ\!ΔΐθΚΐ0EζN Kšΐ@†Λ™EiΆ.'fΜLJ[ΜV)Ζ‚n#b£  °^ψπ«Ÿψ…όχήό―π…/ύ―_{ϋ'>ρόωI,°ο~εOΏς΅oΌΡ·~κνΟ~ς΅ί}ϋWιoύδ‹Ώψ鏿ώR†•©6Ll4&•1²A!£šmΜΠ"‘$`Άu†aΈ$f·5b–‘ΛlˆŒ%NlΆζΒ³§‰‚]‹V`# iΩ @C@΅l³¨c30XH75Ε 1€ΪF1ΫPAΊnˆ΄­me‚ ˜΅ ai fv°h0";Ξ`ΐ¬:ΫD`Υ`›Ψ”²‘Q6‰5ΐΤaw ›RΆl$± Ηh ε^›Σ&Εb †°‘€ †ΫFb\š‚4S!›‚$Δ01ΩΆ›†-¦’]ef›p4h»I6λ¦:±Ν ƒ€„š–,[8Θ@Z³»’2ΫͺΕ€"kΕl9Ή“ 7νl›©SΖn1±lΝ"h†jV ،c3Š—€ w afŠΪΔFaZZ°±…₯ ›Σ(4·:#Ϊ1ͺΓ8, ΖΥifΚ²-Ω*Œ ›"p«³°UβΐFΨ(°†+ΫΒRΫ‚afΖΡάΊc!Ν*v―β¬%Š•­ £ΕfΝ ©έ)f0Œ.Α‘³F–νVu iI@Α4ͺl™Α€p·”ΤξUΠJk­Ϊ*°CbŠa$Ϋ6ΞΙrƒΖ1±Œ H ae`bΫT„l,Νmf 3Sh˜ˆ6K6‘…b˜05Gk[g[œld@0bdΚ0››ƒ²έ:FΕ4·bΨH!›Ε’l [ō›$Z ΕΔ݊‚ͺΆm[΅Έ2ZN1*v’ͺmΝ€ `fΝ$Δ¨³1ΒΜb4«0΅ ʘΩͺΥvƒͺڌ3’X!₯7)[ˆG‹2lWΩ,KkΛΆ )Δ¬X;ͺuw/"›°”ΨT$šYΕ̊Ω&fv4£dΫš™“Αa`ΐX² 0,¬ Œ£!8ŒALΚ†¨\e›cSbsΕXΞ<ι@rΖΆΒͺ™)ec±Šf°­@0l$0ˆI΄f ΕΐΆ$`H1clwΥbΪzΌπς›ŸύGŸ{λ εΟΏόωσ?ωιηϟ5Σ}η+τ?Ώώ}μοτΟά[/ΏςψΠη~γŸΤoΌτβy˜ΨΜFΔ΅:&A`&D0λΣκΊ§Nη€ΦŒ+ΙjΓ@Άp'ŠΩt,Z%šΩˆžέ»C‡ ‰±NΒ†œ=’l›!gd#; 3&iΆIζnwΣA¨ΥFKg£c"[s!SμAΰ̚EΆ‚΅HΓ&Ι¬P°6έ–¦• Jͺ-wΊb9ΪLœ™)˜΄4LΞ£mX¬ΝŽ­c Ϊ¦0§a¬ZΦX]NΫCΫDBff‰Ά-­;³ŽΦΪ5»ijvέ4F$"ξΜT³Qe »+0θl‹ΝpTΗΜlΓ’`6Βl4Ζ598©Œκ΄ΝhR=Φu; 6CΛlΫζmvŠi`νμδΘvglƒ!‹6“„;΄™=MU `*k#VZΆΑĚΠ\CΓΰνήq¦,Ξ,f’Μ6+•6Ϋκ Ϊ¨¬ΐ6ցm₯ΆASJm6›3K2Σ€™d#Fkvj6±R›MΙdf³­LΓ [5fŠf[ηRΛεn‘€N›m[Β42Z›™Φi:Ί—ΖΆ1YfΨ h6ΫvZΐΆΆ(ƒ FqR‡ml3²Ϊ`ƒΨξ4l«ΨάvBa€ηΨl«R=Φg΅tgΣ‘ΩfΫΙ±YbL$Ϋ6k'%νnwχΡce MΙ6IsΆΡΨv‡ͺpj³˜θh™E²Ρι*kf†9©m ™ Π™v; ©˜l6Εbͺ˜™@a[³₯:1M3˜F…`›mg-Ι`2—”°₯₯Ρ²I(ej¬¬mY!¨ΖΝ¦&Vf±XŒ['³FΈd4[³΅έa˜ρd«ΒlΦγP ω`œ‚²m;η΄‘aΆuuΪeΝ`¦"±mF–lΔΆΩ€έ–†œhΖ(UΫ6jUkVKwF,mv―*l…°Φ#fRξ6™lνδL˜ rnΓΤφ4Σ9q˜Fˆ(ΛΑΆ’Ϊθ˜3#»δ$ΘT`‹fΆ²š$lΓ@¨ΝͺΐΆ0‚1[ͺb6,+›bdB-=lά­Š;+¦`H6­CeƒΨΡӎ³΄F4«v£¬ «Qw2ΆM*TΨΜ,₯k`X›Ή$Μ†ΝΈΦ/ΎτΖgν?ώΗΏχgςΥΏόΩΟ~ϊω›ΟΨ»_ωΒ—Ώώ77?3o웏ξΣο}λόΥw_Έo}όΧ^~Φμιύ}ο―ΏύΞ;ίϋαOΧyαC―Όώόωσ7^{Α~όΞ7ώόώπΥ·ήzσυΧ>τLάχοχΎυΥ―Ώs>ςw>ώ·^~εΕΞΪ?ψα;ίό‹oύψυŸψ鏽ϊx1γƒόΝ7ϊoή}ο‡OO^xιύ§wίXξίώwΎύνο|ο‡|0^zεΓΟ_ύυΧ_yvn·vΊ?ψξ»ίyηwΏƒξνρΒ‡^}νω›yυ…—^θώψ½χήωΞwΎϋή{?ώΰξρβΛ―½ρ‘η―Ώϊ‘ΙΩ#I<{<ζ)w‹Ά™ͺm°csλ±έm₯…άέΫγa!˜­*Ž ‘Yηl3ΒwS»XξφXXΩ2ΪξΓn-)ŒΐF!jsνd{’„© [³€Rf£έŠͺϋ€SΆFqWΞ@ΨΆXΡ†1X;yβΨVυ™uvŸ8YΩΐT°‘ΫΜ=΅™΄f:ΐfΔ‰μ†Q)cΨd±h³<+Ϋ(Ή5«,Ϋ¦ZŽΝͺ4ΫZckq(7¦σ°ΩͺjZNYFb`IeΦέSŒ­μœΜΚ(:iξ,…ΝZ³Š%37ηΓ]œ‡­ν8θβξpŽ-L΅νκ`[`Z,-d»φ‹™&{ΰ 2ηά t›C(¬c*maτrhΠszΏΒ9ΏοΗΧk8ΞΆq20(κλξ±Β˜vw2k u4ŒCΆΕ6i’Θ¬΅¦%RΧΙ9μτp.»’Š±ΚΆΨ†l S«ΆG2QνLΨbΥΜ3ΘέΆNKΫξΤ9ΆΓΆm’Δ©š6)΅­‘ΘΆMξ΄nŽYCX΅ΝΪΚ;5²μSγ„έIΆ]Ξ1;5HΆζ¦•j ΫΩuͺΡ݌S»λΔΡΫ͈3lٚ΅4cΩvΞ #ΖvΤ± ‚υDwŒΆjkgfŠΐv©ƒ ƒ©bΨ6Μ±YMΆ’v·Ϊ”ŽMΛμœ³Ω†§3wCkLλv8ZfσΔξmZ€ΣΩݎ I°»Ωιl›l₯2Γά:*ΫΆmUFV5"€Ήj::ΆΆΣΓΉlšΤ±»²m2¬έ9HŒ[«ζ‘`δαa^Θ™6ΩTΊΥ ΘΆmUΓfΫ:ΫΫ†M§3a£sξΦ Δ6ΊwΦ-¬l‰΅Ε-:5ΐ¬jΫ#dkCΑμΞΩ€ΆΩt:»Kl]06ˆv—Ν@“‘»Sl°Έ·{Ξq§ĺmΨ©­΅fŠlΪ:”±±MEΖp灙h›œ“»e«8Θ­Yem3ΥrlΤΫZcb °f:»³UeλΌx―ψα?φžWθη?φ۟zζγ?πΞΧΌϊΐ½ϋκ‡ν#Ο}ξ‰οωρ·Όι―β~σ_όπΟώ·ύη^ωούgε_ύρο}Ν“χωo~αΣΰϋ―ςO?φ™/}υ…'^ςνoψξχόΔOώδOόρ·½x_ψεΏυ_Ώυ½ρςoԏΎγU±―Αη>ψΏόυώ—^ϊησΏϊ―½ϋέ―1^ψό'ηηώηςoνΟόΥ_ψΎ—Η‹™­“Ξγ ίψύό??ϋσΏψα~ζKίxxΕ«ίόΖwΎυα…Α‚έ?ϊζηžώΔ‡ώρί{?ϊΜ—Ώς|O½β΅oόΎύγ?ρc?ωoxω‹Žεω―ώw~εα—>πλŸψΜΎωG/}εΎοG~ϊ_³?τΖΧ½Ό―>σΡ_ϋΕχƒ_όθ§>•ϋΔ·½ς-οώ±?ύΣ?ωž·ΏιεηΨάzΨΞ‘M­Ά[‹ΕΜasέλ€1.™•½€<ΜΡrη>žσΔ΄Ήw°:±ΈmΣ₯kχ±sΆδ@yΨkΆ ΣΡΓ±[%ƒm΄ΈG Ϋ.9!§˜];ΉͺΥvΧΞ¨¬ξτΰlΫP‘’™U±iηρΔl«ZN³Y»˜-9N¬muΥΐ˜ ±λœ­ΩΈ«ΞŽ»mΗ9ΫjΜ£UGpe—«"‡v©mΡ ³ΩΑƒnlγ’Ξμt` „ΊΦ©aΐ₯³Sd†ΆLηΑ6`ξV<>^υPͺ¨μρρ$N„)*”ΆλœΗ+<>ξAΆΆR™e3·²”γp·.›Fβρž{ξÎS`ΫvοՎ„–-kS«=œά΅γ`lkφpΆ[KlΩβa»S•.ƒkMσΨ¦ŽvάqλθlξΕ¬J·ytz΄»²Jr”³Γ̘±­Ξ±[,»W‹EΔέ9NhQ5[»vrN&«Ά{ε‚²ͺ§£aŒ₯#%ƒ΅sΪcuξΜΨl-5³;­›‡’›+s0Fͺξv³ΫpΫΩ’*΄μqΜ5Ρ){tμς()ΥΓ}ΌjYΫπhg¦Νξn[=ΜNc];¨hΫ—Z§ƒ…ΪΪTg,έ»βή;N§•έ{Œ€ƒLqXUνξXuΩƒγρξa±,:Ηm16$·‡m†ΩœœφΒγΉΗ™NeΊέΗG§ QKE±Ω΅‡‡¬Λ4mΪ£ΥRf±lΡάΜαΒ΅VΆν=䬛]ιtͺ]MφΘ­ƒΣÍl‹³ —μ!βξΆε¨q vfY,£S+γ¦φPδΆ6΄tzΒ~ΰG₯7~πη>ς[Ÿ|ϊγŸϋΡW½zχήΗηΏω‘_όΝgΏώΤχΫής–Wbχΰδωg?όλ?ΏOχ³―όαί·ήϊΪαsΏυώΒ~ώg>σμWώΓΰ§_ϋήχΎσ©⣟ϊά»ίωŽW½’―~αΛΏύ«Ώφμύ£ϋ‘ίψƒŸzγ;^Ϊ§φό—ΏψμG>ςτ7_ϋ§ίσO~λ“ βξήo|ιγλχ7ώΑ3ίxӏπΏρw½Ζ7ŸωΘ?ύ'θ7}λ:7λώΰΓΏϊsΫίψϋŸxώmβΟόεw½ρeψ₯ρ+ΏςΑΏχ·?ώ;Ÿό Ρϊ§ήπΔξ7?ς3λoώά―~ρ5ozοŸϋΛ?ψ†WΏπ…§?ςΛόείόέ·~Ϋ+φτ?ϋ‡χg~α³OΎη/ώ•ŸxΫ«Ÿ½OόΖo<ϋά—žύΒ o|ωKΨ\χ:α‰­ΗΩ€f‘Μδ\³κ°θžα Ζ m«­Κ‚ω_zη[_ϋζ'ΎπΕΟξoύώΓλ~θ}ozκΙ›zώ«τΩόΜ?ϊδ—ίπηώ_ό?φ}―ϋ–ϋΎ7½ε»ϊμσwžΛlχσŸψθoόŸΫ[~ϊ―ώ΅?=/κ%η>ΎλίυͺΏσsοΠχ οηοϋwήχ-Οό“ΏKύβλίύ§~ϊΟώΙχΌωe/z οόΑώ±Η§^ρm/;Ÿόείφ+ί|Υ;~μOύ+μmO>±·Όωο}α‰—Ύδ©ΥLΞ5«ρΡΟΌχMoόζžϋΥ|μ‹ΟΏώ§Ύη»^χϊ—x3Χ~οΩg>υ±OόξηΎψ³γηaxόΪsΟόΑW_ςςΧ}ξoΥόΐwώύπΩ§ο³Ο~ν]ηΉ/ϊιώα›ίχηώεOύ͟}ζιΟ~φoyγKžύ§Ÿώτγ«Ύ‡ίώ²?ΩaΪΦξ7ΏϊΥίω~ΎΧόδΎυΝίυκWΌδL/yε«_χνO%_½Οώήο~ϊ/ύŽyί{_ͺ—ΫυΤ‹Ώϋ»ίφɏώζ―Ξoθίό‘οόΘG?υωΎγGήώφwΎύΥ―xκΔ^ό²o}ΩN۞ϊ–oyκα…η>ρkΏπίπόχ}Ο;ήςκWΎμ‰£Ϊ,ŒžΨ„62³$Β b Άj#6`– 21Ω„ „ΕZ+†•jΪΖ(l’B²©“±¨TΨŒΪ jζ’f•6V¦²a€ZΕZΆ‹Ν9†FΜ”΅-‰­€€™;² Ν89΅ ΠTg)vG7‡f6Sa Εltmsb¦™ΕHΦ΅—„˜–†Ν†YŽ­’e†M&βR9vfΫ¨0UyαξΔ*mjF,[n‚‘ gΦΆ&„˜™%tΖAΆ!³ mΙΜ’h Μ*31lKΝΠΨ²­`ΐ ltΩ0¬Π”³Τ cΞiΨlΆ•΅& b©IͺΜl ­H˜dέ–6X܊΄” Ν²1TΫT,+`ˆTw;…bcζA›Ω9Ε6ΠHK˜ΠnΕa6$ 1[Ίf Ά3³!;1# j`V › ³[Ϋζ”ΆAr)™Λlƒ›˜ΞΙγΤ&Ž]jF1b`¨΅Œ`c0!ΨfBA›(u° `$ΩΒ!†fΥ5΅- ΕF³-Ϋ*1 c4(0fΑ6 ŠŽ ΜZΓΤc»1ΘΆ# #ι€6΄Ϋ΄‰6¨Μ  [Lˆ΄ Rl CšQΩ¬$ΐŒΑ¨ξ8:Η€™₯ΩVU3̊0(†˜™$š‹l dK3#±Άm€LQΫ sΞΚ›;uέ#jΆ Ι°m‹P1C††fΊΫHAŽ]2XLXkΩd΅K0lΆD5Ff­ΦΆjΈ›r €;ΫNd˜b bƒ±K›P ΓMl4Μ† ΒΙ€1kaPl ΆEk( ΤΙH±Γ”‚l3Ά Φΐ1˜$6‰•­ΕL¬Ξfnl¦ΔX;*`Φlδ¦ΩV‰Ψn"ƒP `›$šΩ‚Ψ&6šY§m™±!ΐˆ]§A³BΆέUά)™’άΏξϋΏΝό“Ÿ~ζ“Ώυ±Οόψ«^ςΉ}ΰ·Ύψ’ΧύΤ»ήψΊoΩ No|ε›_ύκ½θΫ_=ο{χ[Ÿ43Κφ’WΌόυo~ͺ'_τϊχ|οώΡ3ŸύτούΑsΟ=ωά3O?σ…οxο½λ½―yϋί}3OώgίφάK?ϋΜο>χυWΌύ½οxι“ΣΓ}ώω―=ϋμ—οKήώΊW½μ%/:SΔ9Ðo|ρk_ωςWžϊŽοxνK[‹Ά‡—Ύόίφm/;ψε?ψμΧχgžύϊ OΎυ5―xω·Ύ8H›mq^υŽο}ߏόΔ7>ψαύΓγΣϊ•Χ}ηίςΞwύΰχΏεu―ό–',ˆ'§-ΆΑZF–(£K™™X; clΚ#g‰d0²m›Nΰ0mΧF[ Œhȝ3%ŒP-GŒMH£Y٘ώ‚ΰ‰τ οςυΉ»ΙfŸ MHB`CB%iΚ…ΦjqΤ“Ύ ΗWΰΎOΟ<ς€Ξx‚γ΄Z[yh‘Α@R(!€dMC€dχΧλ²H›j\Qd F ™‘…M3 ­a,) ƒ¦cC˜2-%5χ’mŽ ΩVpΙv‰j°†1γ"g†’­Y•±­m픁KPΩΒZf2Kle¬Ρ"6ξVi–Ž΄!n‘±T 3ΫFι D‘­Vmf  iK΄Θ0Ϋa¨Ά2Ci[…’•13©‘X" ΦLfiV±ΖI’Ν,ΜΆ‰‘44Χ°90Χ€QmΆJ$fe€ΐDF¦¨Ά˜¦’1š1ΞAΫmΠΐ0PΫΐˆl₯Qj†S±KΆ ¨ «έUΆ‘ 3* [04δN£„Ά©Ζ.Vbc`K$ΑΦLK 2—ƒm¨–ΖJCb-ΥvΉ[rUe†­ˆUΘf •p-s2kΨ&bEe@Œf&¬ΕαKΪ&” (1°@3Ω$‘Ζh`›B-˜ejΚ†Κ\‹Αl@;ͺkacœ Α6Z© ¬²b*G3Μ !&†2XTFEΠ @@8kŒsΔ&£Ζ°Α*Øͺl%’§bΧ2cˆ fn†¦a‹lΦ ΞΈsZΘ6•viXG5³ #ŠJc&HΫdΜL8Ά bSΗΖΆSwK ml5q7ͺd˜‘Ζf*zΐt2 3 (Šl‚ €›†'CΩˆCm`…₯‘ αbKΫ†Š•Ρ7 Ή[,°±MŒNΰdl³—ΆHK1–Τ$SΠH,9še— 4+›ΔКʢ 3έ2 0ΕYc”ΨΖΠ ¬rΒ0²LXPŽ6[3›cΣ@6M™;Sθ [0f,Ϋj‰-FŒmΝΆ₯%Δ†A'([l ‰‘Α΄Ak»ΛP[Χ"/τӟόψ―ώζύρW~ϋΧλ“oύρ/ύξΧ_ω‘Ώχι~ψύ―J/Ώϊφ§ΑφAΫ@bwNΌςρΟ|κCΏόK_}ϋν/ΦγΟΏς'ίϊωάΗίψΠϋ?ϋ}?Oθ«_ύύύΚWπΕ»―ύΐgώΦwοel4»ΟηΣyωρ8§m@hlΓTΥζ^w›Rΐα΄½ϊ‘ψџόι7ή‘ίόέ/Ρ‹Ώ/~α·~η+_ώ'ηs?φ±7ΊπR!†£1L°Φ"°)b#J [±6₯˜γΨ0PŒ6[ ΰp9S`4ΝF3†2sΓNβ.3;Ι.#Z“c†£ 2ΐΜ j€`W! .FZ¨a„]"€ Ά%Ρ,ΐΘlf„Ν °Ζ₯ΐΠiCœzΨfKΆ-d*ŒζΔΜ±hΙ SΪͺ˜‚iΜβQPΝΖtRΫΔ`4‘†2F¬bIΚͺΩ&’0Υέ43TΑP=9-™ΝHrXΪ Škf a Ζ™@m;1Š…ŒΡΘ6!H²ahΫŒ²ΩΨH3FΝlΥΜΪꀍ‰f%6Χ’uΧΖΡF ²Ήd–L’›’6ƒΐL6Ζ)idXΧf™±jhqΙ`Ϊ ΩŒ’₯0"ξH0$8 ΫfΥ&”(ƒΑ9Ά5#H5ΓP‘LΜα²S΄ΛΥ#5£­ el²)„‘kΫZiMΑ ”Y³fͺΐQ5p²Ω,!ΡΜ¦–6"m:6lŠ@InΠ,΅9Φ¨Y`l²j³F…ݍ3X (›Y$Κ6ΠΜVΝ,³t2m"¦šΩ₯‰ ₯²»ΝΡΨIσ$°M#Š F΄fFk€‘Α @`•ΈŒ V F€6ΐΘLMΈ¦0€Mƒl,Af-΅έY΅-γT 8ΗΖ(P ΥΆ%A`CΞάQBΆ=λ‘˜5ΝΞhC6ΩΚλd±2Ω4 †Ν)3˜*D ΤΘ’΅f‹Ι’Y€m€U (Θ4ΞΆ,¦Mbγ°-&!ΥΨdF°]Β—‡%1Œf¨Ρ6 [‘mœj»6Β‚VΩ(6"i.`f-!f (4°baΓHΕΐαVbΫ%KS0`$#ŒLhƒJŒ‘0a1ΨPm,‘°­:Ϋf ©³Ϋ:Η{Ύο?ωρζόκούσŸυ?}ρΫυΚίόχ?ϋΡ7ώΖ+ 3ΆΌϊζϋή|γ•η½ύΏηο~ΰΝ‡£α>7;‡κՏύΘ'?τ«οΧπ ΏώΝσoΌρΓ?ϊρΧΞγ}?ς™³_ώΣ/αση›/ώκυ·>ύ7?tέM ΞγρΎ7_―?ώ‹oόΥ»ίΉzΐζέwŸΐxεΝΧ^σ΅ηŸ}γk/ώrίσ>£Ίίϊζ7ώόί|λωήοϊΰχΌΗw}ψƒο{ιşύί~σ/ήυκΛ\N#˜—_ϋπ[ŸύΠ[Ÿω©}σkΏχşίώαΟώΛ_ωόGΎοϋ>ύ±Χ_[‹/ΙFΥ`±›&`-η8[T άΡ’jX 6W*Ά;ιΑlŠ·3λW 7iΫ₯ ΜΦξΚ`Ϊf–sήuΥ`,YΫQl†ν²uμ€j7a Ϋξ:FαnγQμQ©ΔΆ# 2ά¬nδΈ2ΆΞΧlΥlZ§…ΩέTΣ9@ΨV΅p—»ΤΩ†ͺ”Ν½ 3R›ΖJ»sHk–q6kΖPͺla4λ΄ΉsHc­λή$fn'·l¨ΓΆ{£R5f =6Ϋ‘vm;n²ΆΞΉ[ lζޝσΘ°Df[g–ΪΨUv§`ƒ²Ν³βl “­ §²-¬Α„mGΓN)Ά™”Η7a°2WΫΆͺ#­s­*°’ΨMŽm†RΨڞΠFΆΨ8]NS7A›ΗaƒΩ΅{k{U[1C²ΩΆ„ۜbΕlΙΑv+SfΫt†i6L«γ‚ˆmΣ)΄]ΩnΞlLD³»©¦Ž l7§…ΟEYƒŽz©mwS˜’a£dw8Z³ŒΖf­eReΊφ8™ "][Άξ]ΡφμδΪNSaΫm’SaΆ6=Άf3³νp˜έΩ-Ϋ™ŽΝH1lχκQΙl«΅¦±»TΩΕl:ΆΉ1‘Λ†- AuΪnMΨFrΆR³»• D™1ΨΆ©:¦qυ˜Q'XγdΟ–ΜΆQ › «­κ.؜‘2ΈΙ[w^Jδn™=oΆGi'Hf•ΝΕ6©V,ΟvΔΓΖ`:v―Ξ°LŒ³{χXe©rο₯ Φ†9ΫΔ€caΫ*…ΒΆδ€{»άYE9Nξ0lθŒ.Tf›Sk–Ρ°έΦ29ΊΦ֎UΨ†Sf ΆžwEΫ³pLΝ#fΫ:Rl6gΫΖμξTέΩ]Ά3"›tΈΨ6ͺ@’Ω&ΰ°έu Δ63wVgΈ˜Ϊ ’‘ŠΑΪ₯ ۈ€γΰb%)πάJ9 ,QVΆ‰rPvοR• MΝΠ)λ䚲Ά;« ›-»΅UέaFsϋ¨fŒεXΦ¦”l3ξσ†C©Ϊ*3d»b—8Εq»‰Γlb:μ’Ϊr•Y†; sΚ½—PqΆΈŽY`m=,\’Ž4Ά%₯vΨΙθΙ”ΪH:ξ0lpΔFVμΞ£ώμλϋκW{ηE/^xϋήyΗ‹―ž·_xρb?ύΣ~ζgΚΦ «κ^Ϋ:ε₯ςSŸψΑίψ΅ς―~υψΪ_όυ›ŸωρύΠ―½gΆ[c˜Όϊ½ϊθΗ>ώΪoύσ/όΒ/~εS?ώ‘χ½ώž³η_υΏύΖ_~;ίύ=yύ=›W>υι·>ψ…Ώϋ_xΟGή|νο‡_e}τΣ?ψηW~χ ΏσW―½ρϊΗέOοfΫΆρxο«o~μγίχΎΟΏύ_~ηkίσΦ―ΏΤύΞ7ΏώgςΞ7Ήξά½ώα~οχ~τ=ψΫ_όόΏφΦ½ώήχžνΫζΛΏχΏϋΗί~εƒŸϊΜ'ίwήόαώώ7Ύτείϊςοβ+Ÿxυ£―Ύά}ήoε_|ϋ₯Wί|υ½}ηΫύνομρήWήϋςk|λΗώξώΚΟύήWΎύνwΏύm΄œγΐK[ΆŒFΊχ:GΩ²U¦λ>Κ’γξn­ΊΛc3;Η Ϋ­cΫ}ΦQΆΫšvw*lΥξΦ £˜aO{ΤΜ²»±³NwŽ1pΦ=SΖN:›ΨœθΨΆΓšYjjwάj:YlΔΙ΅;:η ΄f0[%0Υ}πξ-+Ιl\Nc »EAΩ<ζέAΫ:έΞ£«v—bul°Κ°΄Rσn2v―UGwοN)€ŽΓξ9έ†Ω6w=nηά«\Χ°­Υzχω<§΅mqφΈ»·-&JkΜb˜]ΞΙ½SI[v2Ί–zΠΆsοsΗξi΄`χΞ9\»VΪά΄‹…6FYΙp˜š[gaΛTΫ‘Ν©ΞxŒΪl \Uχή©N­m\Ν΄°šξσφXecΛYΑ.”ΙΆbvο¬σ°{[Σ¬9΅»sڝư!Γ%afϋXΪD›–ΗνŽSγΈΧa°Ξ±{؜(s¬Γš`·Σ KDν^EΕa΅f0fuˆ©n<—a§ZΟυΘLj3γζ[!Q6gžHl»λαvN«mνξΤκΨ`Υ°­Τ< έ{­JΫsΖ#Ζ9‡­Β 3Οutw«š!sΟNλy―”ΉγμqwŠKsΞςXλ`0˜]ͺŒq– cœszμξμήΫ&ΤχήU\ΟΞ±fΘδRVθO=f±Ϊ–ΆLeG\۝3Φ²ΝvΦU΅;(Υξ:ΉšWY©ϋΌ5'c",›QΓΖUΆj lΫ}φ8fέ0ΩNgwΆ1†a +G33Χ= m2–sƒ%Αt<―ΐ˜ιœΝa‡ΕΘ6μvšB“¦ΆuΚΥY3ƒ Ιaͺ΅έΉ+χœΞvW%Z]γ&°’£lΞ\.»Ο9bά»G­²1C΅{&Η\V‡ΊwVεΜ½»s£sDcuΕΜ]qpΊuXefΈn;iΆ­Σuηž=Ζ5Δ₯9gy¬IΓ fCΒ,G[VF›;szl;ΫέΆlΥΫ¦ͺΩvs\ΨB›\Š[B\@Ά΄A΅¬=―‡:£ ΪΨ=΅©l₯Ϊ]'cΜ=e₯φœ¦©ξ",hq/WΗ°Ψ­γή{o‡™iM{ξQ»*ٝ†Ω£ΦΜΜ²£uηlh-3¬ΰ*cW§9GΓχ>B€Q›XL,ΆŽKΡγtOG³εˆQ²]Ο•UiΙ8]&ΖΨνPdΣΰ9μή‰RΗΤ}ξ”Κ0ΈuΆ›„΄yΦα΄» =Ÿή~{οόΙ^όιyϋ/^τΞ;½ύφήy»/ΌύΒ‹Ύσΐ~ζgφ3]ΉfŠΜ茹χžG³YίσCϋΔ~β•_ϋ§_ρžόδO~ξύoΎφ`§#£`λ»ίϊ‘ΏύαoκςOύΎφ_ύŸωΠΛυφ?‹Ώό₯o|ΰcωύίώ§}Ξyγ3Ÿ|λo|ώKκξλϋo}ϊGή°^ώδ}ςΏφ;Ώύ}ν­Oδ§>ϋΦΙέ.§™f{Ο/βοόGŸόΗπWώΧτκwΎυγ?όύ/Ώσ‡_ό•σŸός;σRqœ~βӟύ‰―|ιKϋΟώΓσΏψ{ŸώψλοώωΏώ₯Ÿϋ…ωΫο~κΗ~ϊοΔw½ηq>υSsΏώ?³_όGίϊϊŸ}ν?ώάΌΫςϋβμoώΐωίόƒη₯ίψυ_ώώςcήO}ξ‡ήΏoύήυs_όϊύΔίύώούΘwΣuE½T;±,qwjŒ΅jiw£vέ₯ ­fYΫέΥ#‚̐䄙ρ€…ΣaLwC€r/Ν}ηtΞƒβή»Uη4ΦmΊ;Α-F!u·sž§3fwΆuΧγ]λ>΅”°ιd[¨iAηd›έXeΟ;fdΪr:53Νv₯‡bluo<Γ’Ά{n.ΆΗΩνρ‡~ό£Ώϊw?πGφ'>πΜ O-ΙΨΊ·]­g>τπΟύ՟ίόετλ_ϊσί>>ύά |εc?υ'~ξ3?ϋΣzτΈΨ ?τ‰~τ•~υνW~θόΨϋΧΑyζc?φΙWΏό›οχΚG>ω#―mΝmέ•Pzκιη_ϋΣϋο}_ρΏύ―υΟΗΓσ/τγ?ώ©?ωΧ>υΓΏτOΎτ@SΟ}ψΣδΟΎψή—ΓΏϋΥώΕ_ϊβwŸyκω—>ςΓζΟ±Ÿω™ŸϊΘ³γΕOεΏβΥΧ~εΏώΪ―ύΛφŸ=σόϋ_~υ“ό/ύΉΟ|όΓ―ή}σ£ίψίΏς₯σOωwίξΩη_ώΘ§ώβ_ψSŸωτΌΈPK»ΫρΠ»ά-mm;g0’{—l¦Θ s7;ηΑh“a¬YGΜ&«‘Ζ6K+ˆΝ¦h„έ;ti·Y°Λ95#ά;ΣYRΫdκrfΨt€af8’Y  Μ,ͺ™‘Γ`X³LΗ bΖ&­ΑM*lQ8˜ ΈDΖ8[Ί±Δ@3$6£»₯j‘Ω„f¦lZ³P·mχα<΅{¦bƒΚl“T6ƒΕr”{±2¬Xχκ˜E‚cΨ#qhj#L,Ά–)k@2vοΞ9Φ 0ΠκάΝD™]€Ω†k›bmœΕvΆΥŠ‘ΪDw³•ΨL‘-‚f»wηᴰƝ»šΞ£'`lۜ³³VcWΡ@ΫΆι€Σ2Œ Œ*€vοs–ΆYkΗΤ0iΝl)†νς a0fΕP†!­™ι†5Λ”…¦ΜX2£2ΊΓŠέ­²BΔζ’£"ς8[Lƒ‘ˆΚ61ΚͺΪξh6(l«Θ&mc9χlχ>œ쎩²Aef›κd3X¬Ακ<Ψ΅Ρb“³―ŽY€X{$jAΫP”˜‹m9;kWYRl»WUˆ1R.›3±’†4Ά‘bm4؎sckΚjpλlΒf[‰ΝH‘-HΫ½·s*³F†Ϋ¨6nη<₯€»mNK χ*ΒΘvwΧΙ9 ΘΜj ©ΊΫιάέ]Ϊ ©mΔXZ6Ί°’†Ήv¨0k̊‘`CZ3γ([³LYΠbΚΰ"g[et‡±€2—,±Ήδ‘;mAƒ‘3Υ61Š5RgΫ,v‘°­"°ΜάγάγξžJΫlR £bΫ¦*³`kvκ0-›Z³5jγ¨Ω!ΫeœdΔ6‘M‰Y›΄³– ’±œΨΆKΥcΰ »5‡•„Q܍ Ϋχ¬Υ&‹ΥΰΦΩ s·(ΨP°ˆmΞ9f-mk­ΔμV…0»Wν@ βB#³έI™Y60Π©»Υ±{§‘Ϊ°$wŽ0(i˜YT˜…KΝ ±#m: ΆΔ‚Sc9Ϋ*ΓΦΰ&•³ƒpΨ Ζ Η™άi κ§?Σoό†·ή€ϋΩΟϊΒ?\ΝΕιlk[ͺa$™»•tί·ΎυϊΧΎϊνϋτΛϋΡίχόΣ¬:dο>Ύσϋ›ΏύΖS―ώώ|ω₯žq»οΎύΞ[o|νυ7ΎϋΦΫο<φπΜsοyροιύ/½ψμSdίόΪמΌρ­ϋΒΛ―Όφƒο;δΨ[_?_{γ›oΏηύ?π‘}θΉέ‰%,S=~χΏσ;oΌωέο½³στsο}ί^zιΉου·ίxώγ{υΕž}0{χϋίϋ½7ž|γΫΏϋΦχή½ηαιηήϋΎ—_ώΰKxᙇQάw~ο›ίόζί|σ­ο½³žyφ=/ΎτΚ+/Ώχ©gφφwίόφ7ήψΦwΎϋ½·gžα₯ΐ_xα=O!Ίwι<<εN΅ι°Ν.G`wΥ Ϋ¨kΜji1¨f¬6Ψ)ΜΥilh £a&΄I‹±Σ™ff£ΉƒHΛVΨfu:ŒMΒΊ­0C°N[šΨ†ˆ)#6eeΖάB °ΕhIk¬0 Ε΄YB"ξ–e.γl¦Αf*²™"7hv‡œ³m(Ι’1YmXfL±:ΖΨΥ†ΪΝi6™­ι΄Œ*΄Ω4szΈ·Ncεκ!@ΨFΨ5T…f°$ˆ:w”3“ •d&c c£θ6`“I»JΧ…!mΧμκLη”Ωδ msjΑT†ΈlU₯£ζR ΰŒM&3¨MZŒΪfΐv%]L‰1Ϊm-Ϋ"‡UΧ$ln‹u\ΩFaΓ„Ω:AΆ0BΜ΄„UÚΝ₯„ΦXA”«…!Ϋ¬²lΪΪ”šΓŒf«s…c,mΫΜ²˜CeΙΐVCCΐ2W»+Κ ₯Ί—ΩάΫ8HeλͺTΩΨΆεxΨΐ!vvΧI l€Ν†J1€DΫΚ9 ΈΫ!E3™ΛΨ(λžuΫUβnγΪ5»jŽͺ1Ta`ΝXι€IeqΦtŽR3–Qdƒ6˜l₯I¬v:Άf3CL#j›©J0“Fvο­XΉΪΨͺY„mΥd«`&L™Xc¦₯Xˆ5γNΝ€6f)œ6C;3±mΫqXe1¦M© F³%Ε\!Λbsg–ΥT܍ Υ(Œ­†‚12fΘ$•«mU₯šmc«€ ³Ω’ZbΝT•Ν6§cg&Νr“’s9˜;Tr ι€ ΕΆEͺbΛΘf³j„ΆΥeΐ6#ŽΪΕΖ]‡jlγvfL •BW윣Γ0S‰6 ³ΨšH°Υ±ŒΐfΕ4pZ™μ2Ba›΄Δ`’νR«αšlƒH 1ʌT›Μl-e+ΛγN„f¨i›Š]i#ΞLΨξdY6—ΞΖ,φwNσoΐ>χΉϋ…ΟBhT¦mσζwήόΚWΎςε/ϊ@3ΓΏG`SΓ†Κ,›εΐ&aې– d Ψ¦2όΝ TΨ† % ΔΆ$`›bmP˜½φΡΧ~β'ςŸψΔ]guΒS­`°F 1#L`…ŒY6IΐhbΜ6dm1mJΩ60Β0@Րl3`΄Ά­ŒFb”m΄Λ‘Δΐ0¦ΝΐlΔl ΨΖtbw4ΐAΨFFΓΔ΄Τά‚™Uΐ*š)B¦K(΅­5HΔ(dΓ\˜Z'[` 9l»Π‘Œ €ΕΆB¨‘(ΛVM l+$vmjcRΕ6 XΛιbBf-ΐ¦mcBΐΚd  01 2LF@° m.LjΆ‘c‚° [’$یΜh™‘Xγ0aĜβN›P c#!Ν€CΒ,wJ`4”K”fl€ €a@E€™Uΐ*#` Œ«š‘³AX’°»Xa f l3—(2,AŒΕDH –Μ‹9И…2’Ψl4)Ζ`53Ρ6 P$,Β¦NmF DbͺeW`˜‚„f€mΒΘfœ‡ξl` ΄˜A‰Ν# [%#€˜SΆ±A™KF@ Ρ1›0ŒΆ ›€ΨΐT¨5c“f@hD €ΙΔEΐ„m*°‘Ζ,H ΖUaΕ32@ ΐΆ6 `@Β€νŠi6—¨b±a)€l°„ΐηl³ QΘΘ&”X``9Λ&Τ, ΩΦ&F ,dΛlIMH –]cb‘™ΐF4C1ΨΦiΆ1#(Ωƒ¨Δ00KΩl4lBLe›Ρb(0Ω„a„’Ή Μ26¨Άέ Œ€ 4±5€άΏώ7ϊGΈίϊ-`ŸϋάΎπ…Ϊ0 €HO?<ϋάs’°  lΐš lSvI‘ «ΆIΒΆ Γ•Α(€]([`ƒΪ° Ψ0„Πefž~ϊι‡ ΗwίY%lΚ<Ϊi 5 £‚Ν0ηŒνf:±•j†ΔC¬mvuΘ¦΄ΩZ7GΝΦΒξ­:!ۘ R3΄vΥΓ°zξ˜‘Ϋ–#ΨdΈ:V²]³{΍`f*μά‘‚ffMΆ 3ƒέU–8ΫfͺsΞΆ;ΘΔ–ΘΘbΰ؜ «ΔΥΤnΑξΨ΄s)ξXη˜Ε”fΆJ&dŒlλ#f‡!D³ΐ(63,ηrlHΐš‘TΩ½”=‚£ΪΨe:ICkm K„q·ƒb ΜN‰ΗGl³ξι¨·&¦Ϋh&#…­Ϊ\Σ‚˜₯faHέ1΅Ϊ–«V6Tl«ΈsˆeΆG©M$@³nΛF:챝!μ^9ηŒΣvaCp΄ΡΪdŽa˜ΞΫξŠΩ‰mBΡ¬ ;…mηtwΝV•M%f¦R{Ό'w–‚ΨfM…‘šl ‹zpέ]霰™²›Θˆl#ΙΨ–MΜV–p7œΘ¦ε˜fsj- fΆJ&—0,,YšYŠfQl[š˜ 1‹ Θ8ͺέKٝε¬ΞΖΖU9ZkΦXpηqχ @cv v7΁mΦJ ΚΔm£ PΩF3,t°U› «³hΙ`I£;¦V3»UΪΚ°Šm–ΩΥ”lͺš΅ΜEλd·5ΫvΟΓΓ¨³™ΒΡΉLwel:)Ϋξ*›,fT4kΓ„¬ΪΦΙξΖ’ΞΆ ΜZξ½±Y vΝTΥ6ΆΨ@n=εš;œs°mΚΧ$‹9m#ΪΨ–ΡV–°™<ΨΥ’5Ζaχ:₯ˆ-]³U2!Έ„ aΙΜP4 š beS1 3Θ8ΜΐvΣ:gΨlΒ©‘΅f&,‡ΡΕNΆΖ,Ά{;³k‘ŒΓŠ΅K˜N6 CΆNl„1«³ faI°ΩDηbU²ΐF5KlqΫΩ”4Ϊ¬e[Ωm&Αvο=Ge0ζ*$ƒŒ«³6•φx+›,ΨT4kΓ μœξ]ΕfΦΦ9Ϋ 0kœγŽ™‹”΄]lΡ9ΆU3ΫHn=XΫfͺsΞ½w aΆD6§aD6gc[˜ϊΕ_<η°πΩύΒ/d+°Q₯F°qŠ€Qlf@γt·*kF΅KTΆl7­Σdμ2SΝΆps[8Ω# φψnlΜNΉԌ΅qS‰m4“­F6ΧN#¨Y蜳ŒQΞ +¦+D)-,—am·²iΓΆΝR’1†]k%Аq/1ΣdSΒfΣ›I΅ξ fƒΤΡ²Ϊ@c³Α6„1›™-cηa³Ν$Aͺ”°‘EγΞZ£°2F” YΆ¦©(263SdTΫbYΆGΆŠν2aQ§΅{歷¦ΩdT³™(«Μ*M¦± T‡„ΜΆmΚΦ6CIZIœ„˜ŒmF™ŒΡhW P%±mcH•mfΫ6CsΧ@U΅±1FΤj“ ΖΠͺ΄ZFœ&YΣh•’°¬Z΄Άξ\†˜6ŒΩP₯0Ζd—6(FΡξ΅LΣ6•ۚŒ1©ͺ³6cƒΤ ΕPbΪl3Ϋ±1;G΅m[*€ΩfΊ³Φ¬h₯Q²ΖDάΩ’"(Υ΄mΨΒ'Ž(ΜΆκ φ·ο&™©dβ1”¨y!ρH6Φόg©;cC ­ dž=6V(mo¦±¨kν½mCΐΪΨl6Σ”m‘ψΥ1,Γ†©N₯Θl°ΗXI RKȐdl3 )³,MŒ @"Αφ4[T`ΫloΨ @Ue±ŠZ,“AZUΡjΰ.˜–Vι„5Φ^ΥRΛz<ΩcΆMͺX¦ΗΔƒ™€μΝ†Pfš6Lfmm5muΛ3XӌΊ`Pl„6Ϋ¬mˆ ΐάΊΩΪΆ‰₯$³!zσ% Œj+JiβΝfͺ ͺjΪΆ1α¬ΆQ…–™Ηc’PΪΆm ΅ΊvΫΆ 5΄L€Ψl›¦d¦πΛEm™ΜlΤ (EΫ€7Ά-JΠ*δΐ°TΖfE™eγ-³ €*aοiΜ€²mcΫ6ƒgk0(l&d5l™΄*–»0˜^Z₯ 5Φ^Ιlž-²1Ά±m#A1Ν³™•„ρΥ`lXw#šLf*š ΪθcŠ(€Ν6³±Μ»k› DΨLjΡ›/YQfΥ₯ ΙbσfKU£ͺΖΆmhΚjcJl‹e_^ϋ_Ϋ?3ΐώύίί_Ιf‚γR +lPΥ±)EΫ ΆlJ”Π 8B¬©06P”YV0ΨΒBTIΆΩ€TmΫf³7ƒmk(Ša3Zmˍ‘Ui™P]M+¦’H Γο?ώΟΠΌe’2ŠΥ©­63ΰ¦ΛU©έPμ:kb±mή½ͺVT]e™ΩdΜ‘κr;uir7Ψ₯2u]ΛΨςήϋνͺ³jͺ±G›V‹:u5Yd¨λa€YΧέd#‘½΅ͺmΆΦ•ͺͺ³bD₯Ž».‰γ|ψ —Μ¨Σ6—‹Ν.‰ν€š’›«»›ϊMΧΩc‘δ™AW΅1©•T;7.cf,­#ΛΛΊcm(Λ6πkom"πςΌήvp`OFl• Κν™Uœκ?YΫήfuΫD©²ΕΈ2•„φi IΟ;㭍ζΆU›°ΩόνQbΟΘ“ƒ’¬NMml07].₯vC±:ΪBΆm}=)₯θWYE­<›”r:kΫϋΉJŠΤ¬\™Ί:™=υή»•JL³±αaέ’Nˆ,X]Q°AyΦ)Ϊ0—Ϊ·–qΨZW•hΤUˆ²mw©Σο\%Nη«²ΆuΗP.<ίUb[ XυΤ\Uwxoυ›κΚ6\-EAf\wmΉ€Ϊ9lρ¬±Φ:EΦ^χc2ΩΆk›-)ρςyήΒήʈmͺPژ‘tkΥυ«;k3Οw΅)JΚ£j…nα$Ϋ†E#iΖΛf―YΏm6‘›7›«K4gˆ`vQVqΉqο=1MQj7)Ή­‘mζΛRՊ‹ˆoε1q“td[\u€\ΫJ₯¬ΊΞ˜Ϊ^λT²Ή™‚Ω¦­[TͺͺΘ2VU†)3ΥΡ°Ή”7“ho]σΦ:%₯QWbVmΫUΏ:q:―ΆZPzo•θzϋͺf“±j’’©»ΓΫrsuΥφαJJ6«’ r-ΥN˜ΑΩ³Ζ ΅Ž¬½: ΖΚΆΑΕ<‘Λ3{CyoeΫju…¬ΦΆYN§Uυ»~qžΧΫ^ΫͺΚ6”o«q”φ¬ε€ξϊγtžm{¨Ά‰L•©[8Κ234™wΖ[Ζ­L΅‰4fƒκνhήΖ2IEHuj«κϋ>΅=[NmSήW‘Yͺ·y‘Π»L±·VΏχ}uΠ€χvάύΆΙΪ’nΝl3i-š-ήluz[V&•ο]·Ό‰»mΆΦZ!l°οοξmΖο§zί ΑΦΆrέσLR6{―’̐Ά­χ‡ζ±νR·16rC²m†«v ―Ϊΐ¦mΦ•ΜΜ6%Υ™gΆRνMmbZέ6’°νU«χΊ)˜mέΆτ`·΄7ϋήύM··»½Q½mv¦Ά2³j[-( ί·¨WΖ΄½Ÿ›gκ&΄χΎ‡»ΒV½­—°ΩˆλVΆΜf·}λ~*ΨΆ’FΫlαη©Χ`4«=θΤ8}vj=eΦLͺf³{Χυ6eεΜΛΥΆΗ,rνΕJ¬δmχΥoί b­ϊή»©S6νΤb6Ά΄– fή괍 M™]ή’ΪFwoμ΅Φ’Lμ}Ι…iσϋ©φfXo+Υ°Qeμ{•d°΄m½Ÿ6o‹»ΫιlΫΘ ΫΦ»έΒP66m³R‘Ωή”DΥΆ·]ͺm΄ zΥ† ΩΎj΅e"Ψ›T#gΏ₯mφ^ιSΣmΛ@Qm{–%΅i[΅­–`…χfjy+ϊφ~b°«·νmS]μΥmσ‚ΫΆΗqέ Ά<οΆouξm+j lήލŸΥ–a4«=(έ8=Γ­η2k&UςφέϋU³{έυΌ(ΌmVΟΚ΅ej±κ|oχκφl«b­zο™«ΊmΪ;©-­E…‰­ω{]2Œ•η2+[pk{ύ~oμ™΄v™6°·­Rτ=ΏŸ«ΩήΒρz[©π¬©3φ–‘δ΅[Ϋ΄8 σmQ‘A?Ϋ6Ιή^οΊ[+ΖͺMΫΠͺdΆ7ΉΚ|{—jc£m;he’l―,[VΫ›TΚΠίήoΥ6{cχkLMmBۊ«m3„Ϊ΄­²AKFQΨΫ›ΈΎ½-‚YuΫŠφlOχ+{ekΣΈΩQU“­ΟΪfκ±½5Βxοέ8»ΆZσi–-SuΐθΝ¬O3swκνkε0ολ~ΝjΫC_+…­¬L·yŠφ^]¬₯ΩήέmΣI­™χvZ *4[σ-‰Άee‚½Υ­΅v™6°m―Rτ¦άΑήΒρΪ†J=―©,ήΆU’ΧZΜ&? σmΈͺ6¦Ÿm‰†dok%ΧZxΥΖΖd³dΆΝ(©ΞΌ=I±Mνm Υ6ύχ/βϋΰύωηώϊ«Άlw{+Š,ΣmΪ–d[-jσήβz ½ύά|vΥd²ΞΌοΉ»jOho½όΨ{ΰ*­l5˜ΩΨΣο‡ΨƊal³•Ή^³6 3½ηN0ΪΌ?ΦXσܝڞ-§Ά)ο«Θ,ΥΫΖοκϋϋοσ–Φ›3ΊZί{WkUf*Μlš Μ,oο\έ<ΓF]3φžͺ k 2ΌΚxυε΄š(1(P(P’€*L •4I•AA›’ ‘HŠ(ZΑˆ)Šj,€‚΄$(Šˆ–H5Jۈ)ˆ -ͺŠ*(*iiJJb–A₯$Ϊ I@΄A5‰T’*­TUB RBE[E’ΆͺM’h ’i Eii“HZ-"„‚H謁€$!m5’hј ‘•€JI‰ͺh%”JH§(%Z‰„RՁjΤ$š΄UΙS-MJš„ΜV[ˆh H(D[(H„ ‘ PJ@ HΫ¨hSΪ9U$-@+!ZPUH 4Ρ–”HD $€’$­€ -‘(ΆI*Z@ΥhJΡ$-­E%@ i„Ά­$(€@TŠ"΄mŠBͺBI ŠRͺ‚Π )­ΆΥdh[$H΄‘"@ ΡRΘ Zm Bˆ4ΠP%Ρ’΄-T}4ωH~οχΜ_ϋυώΖo¦h$Ρ謙ŒVJ‘™hRMG‚–d`*’ZII“ΆUEH΄”A!ΪB%"B ”P€jͺ6‰(!T«F;£€Phš’jšͺ€D"@II@UIŠ%‚€šP‚$ 5ΧWπoxυu–”n¬_Ό΄ΆzζάΕΛ[ΖΆν»φ8°oχΞν A“ ‰LΥ š’A& ­^Y=qκΜκΕμήsψš«wGU’̍‹««§ŽŸΊP(I"ΫVvο;xδΠΞhˆT€jͺΜi1š˜[—.œ;ύβ±s›-©Π ΔΆCΧ½tΚς‚HθœzeνΔ‹'WΧΗή«ΨΏw{i“NZ‘Ν‹ηϞyρδΪ†JXμΨ³ΐU‡χ­΅=ςόΕυ)Š’Dkμ<°οΐΑ«φ,%IΣΜ+ΦΝ;·vρς•-ciyΗΚΎƒvοΪ>ΖP”hΠJ5$‚Ή±~αόκκΉ΅K—·fŒΛ{χά·²kΗ@i› %%­¨DiD:―\>Ώzφτωσ—―lΚbϋέϋχνΫ³²k‰ˆj/œ:zjuΓ―ήΏ•Aš& SΆ6ΆΦOώΰθ™Ν•k>°{ΟΞ% "-³η^ψξχž9{ρΰξΊσΗv¨”’ZI€’ Ρ’$*D΄3M„€΄‰Ω€΄M³₯‘‘P"h*BΡVK Q!£…"C2ͺ@š&휌`@gZ$£f+AQ"J’„ΡΩΤX’‘P‘TF:AI€I$’T5ΠbdD·Z"EE+” mh΅$-SGi R£­Ρ4•&T΅Aͺ‰(Ϊ€5 3¨*!‚„4Œš-‘hh@‹΄³I€ͺR‰(‰€’΄ –ŠΆˆHHΫ$--R³!ˆΦΠ™  I₯"‹ L‚ͺ"%#ͺ•Q££!i›™Ž6QITDι¬JMRU‚–dtΞhƒ@…h₯­$A[!’šB$*ΪR†h›$ΪV#IhΥ$•” -#’Qi'I’M'DiƒDt‘j’vͺD€’Q‘m ’Uɐ1‰6#Ρ6 %”J2bVK"mŠH‘΄M΄Ϊb΄˜΄A’f2F[κhZ5€m5 ΘBA΄TRCfͺZ”*C‰Š0ڊDCK€h%™νˆH( F“Β$JξΚψΘGς‰Oΐό΅_σ›Ώ‘΄5˜‘”΄΄I"J‹4‹4ΙL# UU͈BT*I2f§IPI΄Œ΄&ΪLΜ$΄%₯%ŠBΪD©PE’΄„ͺT„A ­6CeBΪ6MF’@šZΜDi&4…ˆˆ0°”)‹HΣj"Z‰€Άs£λΟ|ηΡΏ99^{xΧʎAedΞΜ9Cͺ§μ‹Ÿόπη_υσγόsΧ¬,Υ³σνψ™~μgKΐΨ{γ[~φ½Ώψχο~‰$U¨¦ΡΩ$”Θ…<χΥOόoξ»σΓςwoΌζΐΆ$˜SK,LInl^<φμΣίΊχK_~䱝ΉΈ±mω%―Ύν­wίυ“·ίrΝ1Š‘Μ­+gττ·Ώώΐ~σι£§ΦΆ»^ΫΟΌϋνoΊύ¦—μ]Z£N s^8ώΒ7οωπ‡?ωΔώwσω o½ωΪ}ͺZ΅ΥΛ«Gω³/?πΠ7α ksηUG^uΗ;>πsoΉιΐΎν@2g7Ο?φΨ—οω³―?ϊύœΈpy,ο»ξζΧ½ωοχ_Ώ<!MηΖϊΉ>φΐ=_ψΛǞ;qβFVŽΌόυ?υΣo{λΫoΉvΗθVη ’H4ιωΗ>χΡOήwj۝οΠ?ΧΝKΙΌ²vό{ά—_ψ―ŸyρΜΕυ₯}―Ώε­οϋΐέ·ΉjeΫH&f禱θVͺ’­›O~ιw?~οwΎ{b€vnmnΝΕKn{ΧΟ|θ—ώ«.g?σ;λ_ΌιώƒήuχΝϋ:3G‡ FšT†6Β$…4 Δά8ςιo<τΰƒ_ύΦ“ΟŸX½Π{―{ΝOΌσύoΓ+_~xηR„†&mΡn}κλ_ψÏέχ½άϊχώΕ―ά}γΑΓΛB»΅yαΤι'κσφΥGž<ϊβΩuΛ‡^ϊš;ίtΧέοxγυΆ‡ -!5[WΞ>{ςK>»γυozχΟύΒέ?Ά»©Œ$Tu&ιΨ2Ο<τώOϋΞΥ·~θCοχm?ΆΠ*ΩΪΌψό£ώωη?ώΐ ―zΟΏψ՟½iΟΨ!ιάΌpόθ_?ψ₯{ξ{π‰cμΏύεϋςU―>ΰΚ±'Ώϋωίϋן}ΦHΆ_χςΫίωO۟Ώ9¨vλΚΉ“ίτ‘Ύrο#O=rmciί‘Wν·½σ7Ώξ†—¬ŒΡ¨N’*‘"Υ$³‰&TΖH«snœ}ϊώ?ώΤ=_ύήςw}θWιŽsF “KΗώϊή{ώψχ?σΈ₯E°tνOΌιόΥΌz—13‘DΫΚL˜&mΣΤωgŸωκχύξ/N sΞ­ΝnΛΛήΏϊoήsΫΛφο’H΅—/Ÿ{φkϋθ§ΏφΒξŸϊ'οί»ήϊRFЁ’>ώθ?σΡΓgKΐΨϋŠŸ|Ο{Α/ΎύˆΆηώƒίωΔ·;zh·67·ΊσζχΎγύχŸ½υ%5Η°yωΜωςύωΠΓO=ΉΆ™•—Όμu?ύs?Χ­Χξέ³}$ΒF €ΠDΕάX?ϋΓ'ω«/ίΰ#ί?Ίz>cχΥ―ΈγξwΎγΝ·½βš½‹!•¦EΡ  €sγΙηžώζ½_ό‹oύρ―Μ‘ΧΎαmo^ΉψΓ'ΎσψΧ>uβτ™ ‹½σΦ•6iΫ3ήχ©O~ξgΞξΎα΅ϋ=wοέ:φ½―}ύ/θχO­ύωw½η GΆΧheΞ™”°yκω'Ύϊ±?ψμχ.ΩzΩΑ}Ϋ—v€Υ©²ΥυΥΗ?ϋό?_xόψΆ—½ό¦7ί΅{ύμ O}ϋαΟξSgΗO~ςφ/•X?}ϊρ{Ν‡πα΅ύG^sσίΌ3'Ÿ{橇ξϋGnώWκ7ξޞP›οάχρ~ϊ±S»nΌυφδ–₯ΥcO<ώύ>σ©γ'/ώ§μƒ―ά1€CR­­ΣηΞ­_ΚΚξ•έ»Γ$Οίϋ;Ώϋιο<·tγM?ώΎ·ΌtΗΦ©'ΎοO|ψΩ΅όΏ~ο_ύnBjΞa©ŒABηTΝX€9τΚ;ξΨΊφΊ΅‚6/ςΫOŸΨqΥ5Χ­–FηΖ™“§7Ά–χοΫΉsΉ­t‰ŽdΒl™ΣLZIES΄²qόΑύϋΟ~νg]ΛΫ?ψžύΫ/όΰ‘?‹ϋ~θΪΕ_zίΫήπšύi₯Ϊ6‰vσΔΣίΌσŸ~ΰϋΫ^ϋΑμΏ|ηM+ϋvŒ@―œ~φΙ―ώΙoαCΗv­ΫoΏϋŽ}σάsύυχΎπΩ£?:ΉψΥ_~γώ₯‘ΆUΚHζΖ™‡θcχ|ύ™νwάυwΏ-7μιH›l΅£$m;gζΙS«σΚςΎ]ΛΛ;RDHΪSίϊϊ_άσ8ΆύΥoΗΏόήWν]lK…“O=τgŸύχ_όζ•λξ|χ?|ν΅ΧήxΛuΧξ‘ηVO=υάΙ^ϋϊ·ήzΝ₯‘ΐΆύ/½ώ5wά΄°H”­3OωϊΉϋΎvrΧΥ―Ίύξ7οΈψά·}μσ?Ύzϊ?ϋ»o:ύΰWοωδίB―Ήε w½iχ₯£O=φΔ_}ζΔΙ“ηΖ?zΟλχ’4gΝΏx©»φμή·]ΪΦΔ¨B˜[›WΜ:Ή9άΏcy‡jΫ!sN’1²tΰΠ‘CϋΏττ GOΞλ,F΅m‚˜•dŒNӌ$1h›š(CSš¦bͺf€vbj²ˆB”ΆdΡ‘mf›–ȝνLRQ4ieQD RΘΨ’U$ͺΥVKd(ν(:§‘ €νhDF”Ξ #v4m Π1΅ITh0’0ƒ!‘θœsΖ0†D ƒR‹H5cK£)ͺ­ŠTΣ ³š1t$S#D*Y5ΪPm₯ͺ’€t6‘™M£™c$i«&i‡I*Αh5IE) ͺ!EΜPtd&’,Ϊ­š ‘bΑhg5h5BηHZ€Ρ’9!D2Ϋ2˜S£ΓΝΘάͺΆc$ΡΦ$1ͺmɜM‚1BD[9 ’**™ΪVΓO|ιav~~Οξ³»Ψ^Π Q A‚‹(‘U)E:Ι>—ψ+γ»ψEdΟδ]¬Χ7v^δ]fB_|3ηLrN2E²t–HJ¦(ŠD•θ}‹έEΩޞ/Ÿ’Pˆ ΄JJ­T± Ζ`±*BDjP $΄C‘‘$TS‚jR(@¨±Hi%ΑF4Δ $…ΤRJj‚J h 6A‘€TmT€ bHH!`RS"$ ±Π°΄RQAR΅!&)˜$$I("b…’b‚Q‹’Z["DA€DˆHŠ–V ¨Τ4!’€RK)F)΄L Xk΅TRˆDbJRSŐΕΨRI%‘ RR‘₯* DJŒP)$H’bUE5©‘E$HH $ώΓγίΎξ±£@Ύϋ]NŸφ£ ί~ύχž@ΡPY…"!$ΥJTP‘€%©€ΕP„΄i‹$!C UI(D$‘EmΔΔ$(6°ƒZ $‰J ͺ$PSΨ(†Δ”„jJ@ΐŠZS‘†‚Š€΄’@Α$1I!UM 5iRƒ% ‚&T,Jˆ‚€DS£΄• "BΔZΡ$’˜ΊΈ:ιΤΩ»Yχ…G֍ wX$H…J‚uaaώαƒ9Ϊ†‡‡Φ€TH˜nnzζ΅ξOu[ ͎ŽR‰¦΅ŠA!"ΐβΜύωVY3<ίV $ E©Υ† ­±>ψΝ»oŸ/λ?χ§ϊϋ‡Άτ6­₯‡ϋϊγ7œ8{ςπρOΎ²’L~ς‹·ŸxΠ±›όυ/~fSGg#+_x~ϋ?όοož;}αžύϋ6νν©„Z£fζΦι#Ώzγέ“«λ7φήΎ?8ΤΫήή  ΣΟόψ~qyvλ7ΰλΟ?»{€»ΤΉ±OŸϊΩzύπρ#§ΎύΨpΰ°a~jτμ{?ωω‡s;^ώ³ο~qί¦υέm,ά:w䝟όΣ‡Χ?όδΖ·ΩΥήή,]ΏpκπΏ[ΩρΝρΟ^Ϋ9ΨΡU–—ώζ—?Υ›—―όΡεoνΪΧQ0„₯hΒςύι™₯…2ΨΩέΫ/΄ZΏϋ/oΎέσόW^{υ•§Άφ5aι±CWώ—Ώ;~κΓ«ΟmήΪέ –’@@$•j¨ΪXΏεWχ¬¬VQY:“ϋz£γ‘ύϋž>Έ―ΗΊ”‡w,ΆΪ7τυvu4I)₯Φ`@ %Ρh‰ Τ„ΚΔ±wή=y~υΡgΏώ₯υςžΑŽ­…C›W_ϋχ―œΉr}χΞΗϋ¬ ₯”jjfnηΧοΏΤ΅Υοό›o<ΦΣΫQJ‘…ΫΧΟώΙ[οozνΟέkO¬ονiO}κ“χήώιΟ―|τξGγŸωϊ€‘„ BkifρϊΏόηvξ“χŽ<π•M€Κύ™™ε…³Ύ»§―aH-)CB ¦΅z01½\;ϋϊ{›Νv""‘hˆή‘ήήήΞ±ΉΙ‰eύΔF#&!X’₯)!"ˆΠE «–ZŠ˜€šΪB@ T)!L@S#PT$ΤhQΔ ’€P‚„ 0@ΐR b’šPR4ΑBZUR+` ’1!‘’X (TS¨Qb©5RPIR$₯˜hD ¨„X50±X@ I­J1-@"‘€ !‰±F*@ˆJ­V"B’%’DICb“@«(€$˜ BAjEͺ)’„€¨₯Ά•(ƒD+@Ε°@ƒBb1U  P$ )Ε‚Υ "‰ˆ† PR P %BBΥB(‚€H‘Ζ*"ΕJ‚E j D*F  Ϊ@”@*€RK)‰5(…„R€U# •JT@H«š(ˆE¨Uˆ΅’ID"‚P#• ’P!51Ś ©’FR!Φ&TΡ T-‰‚ @A"CD0‰$!’‹5Q(’@T‘Z%’H’E!Qΐ€‰ͺ ! I­Ε‚ €VcQAΔZk)€Δ`’C(Rc‚’$Z"$€ΤJQ A„’΄, 0D !HωΫΰλ―σι§”’α/κ_~Ο=ϊΓρΗπύοηoώΪJ‚!¨Bΐ₯T,B°Ρ$‚š@ )‘@4Z…’€’’₯*Ρ*Α@@,₯šD’„IJ„€(Ζ€ΔΤ*T‚!%AR„ZΥ’$I(€P©D…€‚ΤT1F’,J!b­h(R« ΨKA ‰©IDHXYš»υΤΥΉώΝ‡Άv·Hc$@k~~nzz±΄χŒ”"D–FGοLL.oέ{`ΛΪ΅"ˆ1€‚"Q‰Λ‹KΣηmno΄I‘βΓΛ'?>qf²?ψΚΧ?»oλP‡¦ΆϊŸ~fί…«—ίΏ}υόε;―l\'Œ}τΡι+γ:πά‹{6 tΤ€}=OοΫrτςΕΙ‰©;λž.M΄P+Sg{οΨΉω‘C_ή<ω£Ρ 5ΫΫ5 Τ…Ή©‹G\^Ωώκ+Ο=³Χ–ώφωμΩΨώ›SSS—–W Γω‰©kg_m =χ―z|ΧHg;I«lέ²eKχ±ksΣΛ΅F„…Ρ+ΧΟ_-›?σΪk‡vmμn/¦¦mΧ–3§Χœ›Ί?9ݚ@1$`ξώτβςJgwwwχΘΚμΕό/υ|ώ‰έ{vnY#‘Φφ= •χ―?xΈΈ²Dι‰–@Υφ5}ύ]  $TWoύφτωΫσ#;>ϋδ‘'Φwš•ΥΦδΔΤjννλνθθ€’(($@ 5" "€2wύΜ…±i·Ύ΄λ±Η·Œτ6 ιΨ½}ΈσθΝι™ΉΉΕ€ΕZk³:uςΧΏωΝ‰KΩvπ«_ΖΑΝƒν! Μί½~νμι σ}½φΥΟμή2ΨQбο±]»ν8σλkWFW^l·KŒ­ιΩяίψΗ7NΧύ/}γK/άΉ‘§£H„@DH$ bm9=yo©εHogg'P¨Τ{g~ω³w?žbΗ ―~ε ‡ΆtB ΒάΔψ䝇ιή·χΐŽ‘ΑF)€ΦτΔύΡ;£¬β™½kΧwvΆaD`ρӏŽrvr`γ³/|εσmξ(5–‘Ξηχ|iόƒΫ7._»ϋ…E $P¨IPP₯°:·:ρρωα»—ΚΎέ»ΖznΟΜuυτ« dzlόξέΕ:όΤώΗΦχ…1©!Α‹U „H«"‰€ΆžΎ.ΐ’ $ c·>½qαβBξWΏφό¦ž6˜ΎrώΨ»όφ'ΧΩύ΅ζ3σoώύτj_ךΞφD\Ώ{wipΫή[Χm$IΔ’5I«j‘4:zϊ; €J++sW~σα•©ŽΝ/<σΤ;†›…ΦάβΤ'ςσOnΆϊςgŸ}ώΐφΝ½νΊΫΊ^9π晣—Ο_b۞‘> ˜€h‹!uβδGηn<θμΰ /Ύψψ¦Α&!φνΫΉαηη&¦gfη¦a„€€¨©$A}xετ''NŽ― ?ω•oΌΈoλPG8πΤ3ϋ.]½πήΨ΅O/ŽiΣ†‚ χΞ--΅­_ΣέΫ‹$5ˆ €­ΥΦύ©ϋ-ϋ:;: %Bˆ@ ΆχvινδαΜƒ­2lM! D RQ”TBB…P‘₯XALBA©E*(D‚¦Q‚J"€@ ` AB‚€B ŠB0Δ@H $Q„"@@ jP! *&©Ϊ A1 *‚ ‚I%‚€&Ψ„¨€D$„@ $H€•€…€ΕCH F ’I’@T@ U€"₯`$‰h’VΥF$΄΄"$BI‚ˆ ‰…$@($(DΠP ©DUC0¦V”@ΐ$’B $U"I5&€)@A€ͺ Tƒ " €A$`*†¨‘‚ Π …Š’„D !€ H@@Œ¦€ZE$AΔˆ  L’  Ρ$$‘ € •@ b`@ $b&’D!ΖhΤ *H*B@΄A HP%€Š&! …Ϊͺ6 AIC¨5J h%QTK 1 A Φ"MHjͺ * E °₯VΠ  š„Š‚₯@  H’A™žφυΧΛλ―36ΐΰ`ύ‹ορ½Ώdν:LHώυο5ž~š'ΘχΏŸΏωλ@@*@€‰J”T%Jͺ(”($@LQ *ΐ"5 ‘DΕPA"1!Δ`!I0‘H0($Δ")"V@@‘@@ Φ •ΐXILM( BB%‘-1bB@DI’¬,/άΏ}ulΎη‰GΦτuΆc€Ί473~ιτ©OoN..5Φ nΩΦ1~gjΆΆuτ VVο^»3>=ס婝ƒY^Y.Νφ"@L€TjQΚΚμΔ­ΫΧ―_Έ7;·΄j{ΧΰΪ=Ο<½kΓΊή²ΌΈ0ϋpΉΡΦΏv˜εΙ‹_ΌtmμήlνΨΌk߁'w*•ιΛηΟ_½Ά0°αΠSΟξκ@C΄ΡΏuέΪ‘‘rzβξθψ*λΪYΉ}ωΖ½ωΊeλ†Ν›P VΫ;ΊšF]Yi­,@‰βάυ>zr²sύ3ΟΌ²ύξ?6ϊΪΫΫ ΠΦ5°ξιoόɎΝOάΆ‘·έAŠ vtΆ5нC[ŸώΒ··Άο=Έs¨³Ω05ΦΕΥ₯ΉωΪήΉnέ`± €³eΧ3_ώΞcƒ»nκmQ…h΄7;ΫAΠZΌηΖΥ /ή›i5ϋΦmΌοΖΨμbΦtχτφ΄k’:ϋpa5=½}ΐ¬Τ₯ϋχgͺ͞5mν !i΅Vη\;wξυΡ{s ­ΆΞ[wξ}|Ο–’ˆ `€¬ήδWο}2^7ρΙ'Ϋ1،΅ULNΧlξοoΆ-Ž]ύψβω«£“σ­»i³/μY·fM›!@–ζ&.>wεζΔμΜ ½λΆμ|tο£ΫΦu f~vq₯ΥXΣέΩΥՐdιώύΩ•4†ΊΪ;šΐ"dιϊΡΓοΏ°°~ϋs―|ω3{·τ4lA‰™Ό;~λξrǞέϋ·τ΅— €ΡΡήήΥΥ–Μ-.j‰Τω‡·Ο}πΞ―~;Άαΰ7^{ιΠξΝƒ] ԁ:?qνΣK―ݘ˜]jλYϋxK­fwWW’ΤωΫΗί|ϋύK›ž~φεΟ?ϋθ†ώφ$™ωτΘα3§Ž=uνΑ,W>yσz°†ώΗΏςκΦφ΅ΝNLŒŽ>XμίσΔΦfm­¬–F›₯@ „Fζ―}pζʝ₯‘χμΫ·k¨YΠbbΟΦνλzΛΕϋ“““ lμNΤ:;yνεΛΧnM<˜[-m½#›χ<ΈwΈ·Ωήuanββρ~γ·cλφύΕ½ΣοΜO.­ιλoŠ"fϊΞθΨΨLιΩωȎΞεεεF{[CT °2;~υϊ•«ΧoOޟ_I{ΐζέO?΅g]oG{©‘`ƒ€ΘΚΔΝOO~xj|aέΑo½²g€«Ρ& )ΨμΪ|`χӟωά3εoΆΡΩίΣΥ΅¦€!ΘκΤυρρ鹎OξtiuΉΡhΣRŒHRE„ΐΚά­coΏwφήΰΛ_}rοž k°:χπώΕ#GΝχ}αΩ'voZίΣf)Ν΁½mh?56vff)½„,Χι‰OϞ»zλξΤόj:ϊΦnΩύθή½»Φu tnΪχς·6w=²λΡΝΓΤ‘Mf£ΡήAXΉυς₯KWnLNΝ€Ω3΄iΧώƒϋ7χ΄΅Ο]»xαΚ•ΉΎ΅ž~~ΧP³€bI½›ΦYΫΈqkόφΦuΐΜύιΕε¬ιιιjN^»|ξΤΩ“ «]ύ[χνbΗΦu½Mk«΅ψ`j6nh+s·.žΏpαڝϋ ₯9Έυ‰η^Ψ=άθ,ΑΎfWgϋΚƒω™%ι,$M1*Υ€’  `TˆBˆ " !‚ˆQ0b’€QH@-! !!„(’HJI‚IB„@ D@ňH$¨1""’ DD€€  R…D’Œ‘J B@€"!‰B‚Q0P $’$P€(‰€*Α@Ρ 05(`$H$`DŠ@ΑbR BB6(H%T)„HŒ€"!@@@Hb@@-`„$X (!!€ˆ$L( $" @0 h"@VbRAA Τ ΄˜ HB B€& `Pˆ€BA1I„€hR•-€ PE ƒˆ„ΐB’ˆ & ` “ΠPC$(b$P“(D"„"(0 $PkII ‰€!J1€‚ˆP PP” ’€HB”D(ˆ‘š”nάτoƒ―ΏΞό<Ϋ·Χο}/ω—4›Bυ/ώ’όωŸ@ύŸώ*ύ7P "J*ˆI”$Jb@-`f©5ύwo½ΥΆηΐ³οήΧ@Ρ„ο\=ζΔ…ιΖΦƒ―½0|ϋπ/ONt¬ΫχΤޝ›†Ϊ¬„¨AΑ@‰Υ˜"‚@€@ΐ¨ &1BT1  *B!DL’’(I*@b)@AQ@¨Ζ`¨σKσc7&—ΧlΫ5έμhhΝMή½ςΙ‘Γο=;ΆΨΦΥΦμθΎq­ΜŒή}ΨΦΆ©€ΣFςΰζρ;χΪo|ϋs΅6:»ΧoΩ±sΛΪΎΎ&ˆ%Yyxλό©Σ'O_Ό1y±•ΦΒςΓ;Wο–Ν_Ω02ΈΆΧΕΕΉ‡³±Ή¦σαΗΗO|rύζ؝ρΫγS‹«ύ.tΡΧvχQWnΏqgj‘οιΝ;vnhˆBΊ»:;»ΪVVf§—C{κόόr«UΕF €λμθΨΤμB£«³£»K Ι꽋'κψΥΞ=/}αΩ›ξ\ΌW;· υ6›ν" i4{ΧνxιχwL¬,ΜέΏ|υξjϋ¦GΆφw―i Ψ1ΌnχπΧv¬¦4ΤΊ²²4woκΖΙӟޘθΪωΜ ϋ†₯ €ζΪݏμ~6~΅gxΓΆGΆmZΫίD υΰΖ'G9vκϊ䃕ΡΩέΣέs£}t|~u o ³«KMR—ζgnύεOωΙxί†‘Υι›g>Έw₯{hΓζν;·7­K3χn^Ώ}χΞt«v―Μί½~£0²ie©˜Έ;vύΖΔ|kπΪαŸMΤΆŽήΑM[·mΩΌn «€ΦψΉscθ{tλΆm#@θι_ΣΩΡ\^XœŸ‡žΪJλΑ΅“GŽύψμΝ{—SR[4?:?ΫΦΪSΫ{Ϋ"+sSΧΞωυώ=\ϋς·^zvΫΚoκRΪ:zzϋ  ™Ώ}{μΞΝ‰«=現u·ΆΝώΑ Ϋw<²nν@g#@$aiκGϋπτΥΙ{‹Υ¬΄V–§?:7±ϊ_=τΘ¦ώ6(A²2sστ©OΞ]žΪϋΥΟ=Ώ΅«Q voΨΌοs_έΜΪνΓ=«ΗfηZ­5ύ=k:» V ˜‡7oί™˜šiά>υφŸ²ΪΦΡέΏvσŽ][Χυχ5Q"@’Τ *Y]šΎsώ½_οέρνηχο\7άκμƒ[§?½³:τβώλ{š$€ϊ‡ϋ›…ωιω₯Ε%hΜΝή½|ϊΘ‘ί~tir‘΅ 5v\›mόΚcCmΕώǞ}υq 0’Φψε[χ–V7ŒŒ  ΊΊ4zφψ‘ί;sύΞΓ•ξK·2ςG/l\3zιζΨΔlΟΎύ;wohbDμξμZ³¦ΉΊΊ0ϋp™4­ξΟ,/•.Vή<υ»«'.\Ÿš½9ΥΉ<±όΝ/=ΏoΗ`]mΝNέ_Js 9ιό‡γcWίΌsgbβή½ΦϊΛ³}υΧέΫΡ΄Ωhk6΅΅4Ώ˜tj€hk,’"j˜DA!Ј$!`€DΤ$AZS’€ I  ) !€1• ˆ$bM@Q ΖB AABTH$@ ‘ @ˆH ‚DA @ TAH €P€ Ζ€@P“H‚€R 0Δ„ˆ B’€„ ˆB ( A€*’$"Π‚‚@@PPQ¨$ ’„€€˜š€b@1•†Q$D¨±ˆ  D`ˆIU#’€Hb  $ ˜•( 1 JP@T"%@B €&,Š$€€BBBΐ‘`b !IB% I0€B""€jH$€$€’@@j’ @ H1I’Ψ€‚$DI”ThΧϊφΕϋΧN|xϋξΝω5λϊ††ˆ°ϊ`f₯EΝάν³οŽžm΅Vηg—:·=φΩΧΎόΚΑ t Y˜Έπώ―~ό/'&μΪΆoS{w Τ\ϊιφƒ3Σ ­•–da~qfjΆΥj[=όζΥΡΎGφ|qί±SΗχα©ίώrέΑ—φθjάΏy{ςαbΗΦ‘υ‰%HH)J­­•VHš½ΫS·οŽNmΫΪΧL§Œ:zκζδκϊ'׍¬ο7Ij‹εΙ«GϊΞ‰»Λ;ΎπΚηŸ;84qλάƒφκκhoT’`X›»zϊπΉΉΎ]ίzzWšE*³c—ŒŽέΌtιΣσ—'ΚϊηΎψνΟmοh„@ΔΔJ¬•‚Y˜ΈrζΒΕΡεξ­{žyb=*u~όμџτWgο/oάwθ³ϋφŽ΄ΝŽ]:φξG§W†ΊϋΊzΦ6;;}ρ…ΗΟόβςαή›_šΨίΊwη♣§[ŸώΪ+OmήΠߞΠZz8yωΨ/ψƒ·nwοzζ…ηΫΊΆ9;yγκθάύρK‘$A₯(­•ΕΙρ3Ώxλγ‡k}ηΠγlνod5­‡wξ·VΫ™>sς½΄Ο<ΊΎ}~κΪ‰ΓΞtξxιΕƒ;w―k ¦΅Ίtοζ?ώΙ»ηGλΧ.ίΉώΑ₯Σ Λφo:ψ•οό«—χoκνnotv―ιξmΆ&οέΈ8vΏod “,ΝLίόδά™³ηΊμά\FVηξ\9ρώ―ύΡω…ΎέΫξŸ?όΦgΪΦο8ψςΏψΉηχ uΦ₯Ή›ΗίωΡߟθθ{τ©Ο>±}sΧςύλ珼se~Ή}s_Gg'JkiaβΗoύo™o>²viτΜG?\HchΟΑWΏρ{―>1ά»ο³_+«3χώζφʞΏύίΎ΄Ή€chΫHw[mMΜ...­€}εΪ‘ŸέH]Yœ_ΆoηώΟ~αΥ—=Ή΅―Y¨™ΉymbfipΗΪα‘‘φ!’”’Y­««+ImΝΟ\yνώθƒ{έ»ŸϊΜγ;Άt-O^ύΰάθδΜκZzΫ²4=vυ“#οώϊάόΞ―ώΙkw͝š›_€£··――!•bζ¦ηWl΅η―ϋΩΝ”Φτύ;ŸιΛ_xιΰ-}ν@¨ΛχΞ{σGo_¨Νm}φρ=#.ή>ώ£γ§oΏχψΎGF6τ ΄‘ώΑiΈήwaίιΟηχžσœMGηθh—¬Ν²k±,―`ƒ°1$’ ₯Ι΄“f€¦―fx3σnΠdf^΄Ή ΆW:m2IBXllΖ6ΘϋŠmyΡbΩΪ—#}}~ίΉο’$5™?χήΛ/Ύ~|ͺ΅εŽήΊcDSAPΚΰπͺαUΈ4“‰  ]G–χwϊ:DH΄F…ξ•Ι…ΕTfΞώεΓ‡»uifjΎ³qη­χ~βΞΧm(ΤHj…ξΒψΕχ_ϋι/ί]άπ[·ΨrՊή“ΕωΩρχ_^lοέΌqΩ`C !IKΊέ₯Τnf/½ΦSΝ3Ώιϊ7_»}u½όΑϋ§f¦.^οf€T  ‰Π­ cΗ=σή•ΦΘυ;·oY;$΅..Mœ|γ‰oύηΊ«wξ9°kΗΪώfκΜρc|pκς|wΝό©S—Μφ¬Y³~΄ ’FA-E¨έξbqας•ιΉΕ%Ο<ς›™₯ι²εƏ,§ŸϋΙΟ^zρ™ΆYΏiσpkaqόμεnναΒλ‡>œο_ΆrϋGvμZψΰΥ'ε©=ΊοΐΊ‘NO«)₯i„n·›D@HH’` ZJ  `€ BP RA4JJ@!$€jΥ"A bL‚0*©   ₯1€!PSΑ"…Bjb(–b€b¨" bCM `Π!j’$AB„RS‘B‘4Δ ΅jΠ@€Հ`RΡP0‚`¬ΑR •€Ρ5!’†€! @"$†DkT ’ @D„$ `¬†±ˆ*)¨H!cP‰H!„AQ‰ΤT0 0j@HXB@!ΔB0B’@EE1I ( B€*(I@£1BAM’ J"Œ¨€&A‚ˆˆERIb’h1€P‘„(Δ$€ͺQ5DΔ$‰1¨’(*’PSΑ" @0U$AC¨ˆ$BM ˜D$D †$$bBbSDB !EˆM­€ ‚E“ŠQ‰ ΤZ΄`bj Ρ@H@Cΐ!1$BΠDE‚‘‚„H‚€H€@‰(C*˜ – αΧ‡Κ7ΏείΘΛΧΎΖ~'"ΔAλWΏVΎώΏΐΧΏΞ7ώTΔ@D „$XT€A bŒ‚$I ’’˜$!„ΨΣτmΏωƍ‡ώιΘ;οά}Ν–M›ϋ€$¨ΡLŸ=zτθ±KYqΰ†ΆvΈΠ7Ίqγlgd°·‘JBΤ’I @ ” €C ‚ŠŒA­  $Aƒ•ͺ& ED $‘D 1€Q’"ˆ($°¦66-‹©!Τ$TJK•tηζζ―LΞ•‘+ZM#t―ΌχΦ‹Ώ~μ₯Ιώ}ŸύWςΐφώ1L­,³'½Τ^ΎrΔ@Hwpγξ}·―ΨΐςΡUΓ}ejκψ ΏόΕ‹―<ς}{{WήΆg™ιΞ/M½ύ³Ώωφ“ηΧούτoϊλwŽ΄»γž;c΅iwz¨ηζ¦Ζ'fζ{Žzύϊίϊγί»eۊ=‹§G{ΊgΏύθ₯c‡Ogο6ΖΖΜΞ΅— .nQB¨]ͺέΕn·K»)Ε°mφ oΌϊλ―ώͺYϋ–}kš… oΏψΘžxgάέwoΩΊe]ΤΈ49υΞcίψ΅‹λο½coΪ6Ψ½pjjlbŠΡ‘‘j‚IL,IŒt§.}ν•§½Ρέvσo=ΈΥΠP PSύΰ—ψ=ύΪρσ ›wά|ο½χή΄u@ALBˆA‘v§/½σ‹§_yχD³ύΐM»}]SSKwβΘ?ύ›ΣsΫο{πs}zη oΏjφ±‰σe wΩ`QB³ ΉυϊwΞ=σΖ‘Ÿ>$΅¦΅lωήΎτϋχν]ΧΫ_€ξμ™w?ωπ#GΨψΐΏόγίΪ{Υp§ K‹©KέNoj"„dnόά±§όλˎΟέyύΆυ#H» c―€Λ±·ŽμΉλ³χάύΡ}[ΊΣι;ώΒ7_>ςώΩΩρ­ŒΜΌσφ ΏψΙ ϋ?―ώδώ«šžui©΅έטΪ]`Υ΅Χο;zκ©C―ό^ΒΤnΧώ]όώηξΉqϋͺ‘@–f¦ΟΌώγo?ωαάΪu7Έω#»Ά.―“'^{ώ‰'=όίζ–oωΚύΧl_Ω3΄zν¦k6>ϊμ?~gksΟΞ5νξ₯·_}κΙC/Ύ?»κϊ›ξ]‘­”daμη^xξΕ—/lΨsπΦ;ξ^;ϊλ§~ωβλO>:WΫkϊΔΥέ‹―Σί?qlaΥΗΏψ;χέzpS_¬‹ϋ7=ϋ=Υ7΄¬ΣΧ+©έ©Sηήzό»ŸZ\Ήg =ΈkΝHχόρWŸ{κW/>ώνρώ-ΫCΫϊ†‡{Ϊν&ΝΠΠΊm{·_=ͺ)•,φŽnΨ}ΓέΛ·τΨ°jΈ7gή<τ«gί8τ³…Ή2°ξwoZKΧzρΜ…ωΉfέΠ@PŠ„Ψ%₯..-Υ.­΄Ϋ,.Μ~ρ{ίωΥ±Φ†ϋΏψΫχ<°Ύ€χΟΝ΅zZ6Τ…ΙSΏyφ™_>^οώΟιΑ=£ν^OOMΝ/2<8°l „Τ`(•[φΌkΕώΞΘͺUΓνφμΉί<υψΣ―?ώγ₯{‡ΏpσΪ6t—fN?υݟΌ8Φsσο=pm·oξΤξΤ•‘™mC033Ώ΄Τ•Α„bwκψ‘η_;z²u͍n9°ͺ¨5E!@’n½xαςR·3΄¬―··Δ˜P‹ Ίƒwο»}d]–―\5<ΠΜLώΙ'^|ν§”NΟ²•έ?\ ¨„,Μ=φφ―~ρόδκ[ΎtΟή΅ΓΛΪ©ΔΜΝΝ_9s±²l͊Σt©‹’t»σ KIΣn•¦,œΰ­gωι+ηWάφ‡τGΫά?Τ.R—ΊYμ6½BΊ"!š…+ǟώή£οN }τS7ξήsΥ@’Μ^λGυOo^ήτΐοΣwμέ0ά(έ°0›ΎώΖwΖΜΜ5ƒΛF† &U#BΊέ₯Ε₯n-₯΄[ “WΜ-tΗ/;?ΌqΫ'ΩoYΧ׍[&OžώρσgϞ?q)#έΉ+cWΊ΅9ςΪ;ϋοωβwέxΝΖώ:~bcηΔkοΎςφΡ³KΧ―g€ €B‹ !@PR4RŠIP”D TD₯D š(€E$$@I…JAjΐ)’n Z €PD¬T€Š`D‚ PR0 5“`%Bˆ D  ¨€– ©hBˆEcMj("©`°(Τ$•,…˜€’ b !jHj₯D  1$"+JR )EL‚‰†XƒSRk·±A ˆ$$)…DŠ 1Ej €T#$J!„@"š€hŒ€K1I΅R‹ TΐΔ€–RB€IR ‰’ % !‚’P“¨ „Xΐ$AL!Ε"•¨!€!ˆ$E‘ A…HMŠ $P¨A‚Q IQΊ‰€ D)A €bREEj `bSj $€€ŠΕΊ@‹j΅ŠT@‰…HΡD 1I0€AŠ!B ΐPBZˆ „T4!’¨©IPHBQ‹55• E‰$!56c$‘Ϊ­”€H‚•R„ˆ‘*b!!‰jI & U"$b’ŠE @¨Τš¦@ X( )…Τ˜€A$ΰαGΚ·ΎεO δΑσ'Β'ο‘€LΕbPΘΏώ*φ\Ύ δλ_ΟΏύ†`M0H€Ε Q„P€˜tU(* νΞ–›nΪςτ^=rδΤ‡;6oξ$1%Pη=vβψΨΠΚ›φάΈΉSΚΖ;ώΰίμYθοον tH5I΅b@ΤHR“"`€¨  –X©)Τ‚]‘B‹IH₯+E‚ͺ΅…P)-U@0Ζ”JPh…4JBP¨€Εωωι‰ι4λF‡νi7…Ε³oΏqψυw'FχέϋΉ{w Δ€Bfgζf—zZΓCƒ`gνξέ!©pπΞk‡ώτίύγ+ο=ρΞ‰{φυwg&N?ϊί}giέg?χ­Χνi›,-,^Ί0ΆP— -θtZ.N/ΞLMΝΡξΨε?ΌesοHΣ?°|dΕ@χβΜΔ”΅;=55Ώ°ΨΫίΣΫί©ι’8ΏΈ°Έ°ΨSz:½$°μΰύœ™_xψρ—ώαΏ>σύjχ,_34sqͺΩqχνϋΫ½Ί7IwjϊόΛύόΰ΅φ'ΎxίΗnέ½¦¦Η.OΣ¬Άέ“€„*Υ4‘€Nyζ™ΗωΙ{;ϊ—tλΘ@» !–Ίι/όσ?χΒσ/½ϊτώϋtkτίάsU‰FH bi˜>φΨwΏϋθΛΫ?ς©OύΦ][ϊ u1—žύΥ‹œiοϋψ7ήΉ{H §fζ–jgx°Σ?HMwaκτ‘}λίπ½Ε7ίτОλϋΈςώ‘WžύεK?ωΡΆkΦήΏcΣH―έσ'~φωΛ¬»ϋ ŸΏ~Υ`S45”ViIš’DM Τ±Ο½ςθίξŽ>π»·o]έ PŠ,.Υι±ρ9jeΛΗο=xpχ–ΑZ±΄Χm˜žšΞββΒΙwίyυ₯cσ£ϋόέ―΄]’Ϊ­b»ΊqϊΔγι/τΪιήk½χξλΆ.ο™ΏpςΥ'}ώ±_Ϊ²fdpψͺeΒόψ₯Ÿώα/OuΆύήWώΕ}{v鐰gΛΎΝ=gο½χΜs§nZΉqΕͺήU{vήΆψω ϋW?ιωΖ£Π, σ eΓΦέw|ζΰp‘DήύεΧ_>ΪlΈω‘?ώŸΨ5&{wmμIϏύζϋ‡Ÿ{η†uƒΏzδΠΉ™υŸϋΔ­»χ^5` ©Ku|rΆΊrx€ΥΫ2wιδ‘gφƒή]Ÿϋ“ε‘]#«ϊ uχU«Φuόω<ρτ‘»ΧξξŽΟΝΞ,4}+—ΊΨˆ”Ξͺ«φΪ°§šX¨$7_Ώ±[ίό­“GίzεμΑ{Φ-5Ή29S[ΎNΣW  1V33=Ώ°°Ψκιλ_Ά0Ήpβ©Ύry~λοίsσŽύz$ ©νžΖ¦@wόπΏϊΕ―ŸŸΎϊζ/ι“›z€,^™˜ž―Ν@ί²‘„‚ΚΠφ½·oί›” 7Xίϋεc‡ί|θŽSΧneΆ;ύΦΟϊΦ₯͟?°cίζΜ_9ώΚ³?ϋΗ'N7+oέΎrxYG…©Τs/όόΩ7?μΉκΞλο8Έ©jBAk,@L­ c'»ύΛϋϊ:@"P½«―ύθκ݁€΅βMέ½όΟώβϋ/9ϊΑ;'ζφ-H@fO|ο…ΗžŸhνψβηn^΅r¨₯IΊuzbƌτ 5i“j¨ΜLΜ-₯;8ΨΧξ©Ύwτ΅WΧ΅;>ύ₯oθݢՒ@Υ€‚ ’ΕρΛ'žϋΫΧΟΝίπΫψ™Ϋφ_5άJXšΌtφωŸώκΔΒͺ»ϊΤώk6·’V‘5Hjf¦¦ηη:vο@/E)ΖTj˜›_\˜_l—žώΎΎn₯›˜_˜­ΛφνήΛgnYΫKY½qe_ogfa~qq~q~iκςΔ<kξπΰή-ϊ‚M»3Έzέ°/MONu—°?€[kθ΄JӘJŠRH"₯Xb„$ΤR‹PUL‰€1!BP“tT‰Šh4`D!@ QB*”! $"R‘‚"`Ρ$ΦP‰¨Dj’€XŠ€ 1„D@DU’b0$¨©©ΥRP ±X+QLAPBJCH("I…AHE•Ζ%(PD€ˆ–X­   ¦Fθ&ˆk(”ΠP‘ŠD$΅ΑQ!`j„b%%64έZ‹š 5) j…JΧΪΒbHΥHΥ’¦@•$@Α41#!jν¦! )‚$k₯©–$&©)E0&$$ijΊ‘”`Α˜P₯ $‘Ζ¦`’@%P¬R‘L@‹₯˜hb­€M‘˜tE-`’kν6B)¨¨ "@ Q!AE€T’n‘ˆ`°•B*" „Z!H 6T¨I!j’H "€H‘±ΤΔM•”P‰Τ$IΡ‚ @ ](ΖˆAQ PB‚š$,DΤHB  F5j 4RIͺ š@*‚ K P ₯ib I€Ψhj„š„ ’JHˆT‚)‘Z T °¨ ’! ŠTK¬I ¨ώξοψζ7=τ /9_ϋjnΊ™jH¨€‰“T’ž|υ«ώιŸζλ_ϟ~ΓJ΄΅Φ”SJ’R¬•KRERS `R“`€©]KM‰”" hΣͺ›>zέ–'ŽΎςφ±ϋŽίΈiW_₯†¦Τ0wόΝc'O^^±σ–={6€ZW­ ˆ΅,…bRΑbc ΤZ­΅S‹ͺ š!D $j!¨„jPIj¨ UMΕVŠ „‚†’TH’ A‘’B‹)4 •(DΔ¦) h°ΤνΞ.,ΡΣίiΊ©gNœL$“ΊD₯4MΣ2T`½2515ΎΤί³rεΪ†’ι±Λ3 ‹[oΨΏηΊC E“₯₯₯n΅ιi·:­……ξ•Λ—h·vήΗξ5†Z !έΊ΄P)=}ύ₯ifηgg—šιτΥDŠI €D“”$QK1‰bR!ԐZ£XL‚’cm,DhŠRH H °K5ͺ’”Z»R‚ Š ‰‹“ •ͺΖ  E’R ¬DSQ”@,†$‘†R€B‚‰E,b4@€P)&Aΐ€¦R(ΕΤ€F 5‘ (…¬‰Α€!ZBͺ Φc"Υ@ JΑPIPH 04JR R£‚@@R+JBRI  ‰˜@JB `€$’¨©E1©IΤJŠ…( ¨ •$Š€‰!ˆH*’€h €ΤΤB$¨j€!Hƒ($•$ΖH D4APA€Ρ! j¨΅*j„€Τˆ΅X"$ PŠR &HRB+4‚Τ”¦Tͺ1‰Ϊ‚`-†$H‚!UL€€ˆ(‰Š€₯V4Dˆ€‘PkJZ€ΤD¬ΥRP) !A’Τ#"h¨©X@Œ‘RHΐ”’©‰T¨R( €…h „D 1R!B4P  P *)D„(”Bͺ @А$ΐτύζ7yλ- _ϋZύΪWΩΉ°†’€H*H“Τ―όΟΝΒBύΖ7bRKӐˆˆ$ •"’R 0 ˆ€₯P’ͺH’"H BŠΪύ_{ωΓχOΌ{όΖ]»{%2yτχNžiVοΊϊϊ½+)ΦΪ}γoχοΌΊrΗgΈϋΆ]λ Yœž>χƏ?ύΚΫž™\°otΓφ7ήφΡ;―ξ~αΏω?]σ‰ϋξ»υΆ+’0eιΨό?ΏΨ½ρ˟½kοώ }Bwςμ₯Ώϋ~οόώ?όΚέ{Φ­οP,I…ΙgώςΟ6{σ;—υΏυΤsο^joΨωύ»ϋZSοΏπόs/Ύ|ψƒ“—η»½£vέxίηΨ7RΪ§Η‡ŸωΝβ57ϊσχ]Σ‡I{ξ―Ώχ‹£WΦίώρOί{Σj›Ζ€ϋήOώέχ_*«oΎϋΎOμ_W0ΦRMŠ₯e (B5€@jΊέT[힒ΖισccW.ΉlεΪυλ;’’šξ…3gΞKgΥΘΘ°š@Ё’`!4₯)Yœ\Όxτπ™%χlί΄|pySΊγ3G_{oa‰‘α‘V§'έ‹Σ³ΣέΑ‘ϋnXΧ¦†8?5=~ql©Υ»zΓš¦Υ΄›RR“Z‹\:}ρΨεΎWoάΌZ! ₯gpheίΰπͺыǏΎpθ‡―Oτξϋό§>ΊwηΊΑž¦036~ϊυC―ŸžœΉςάλ»?λi€₯ΩΙ+g'κμ ίϋ‹~vυݟΉηΖƒ;VΆ ΉΣΟύψ‘Ηž}g~㞏ίuο­;Wχ·©©DA0Δn—v«έ·vέͺu+;υΔΨΉKσI›“P§/^xγ±ΏϋΞοΆάuο]·}dΫΪeνb€TrζΔ©ιωΎUλVŽŽφQ©b]δςΫGΞΞM6ϋϋ—5ΜLŒ}ύ΅ΣΛo»ωΖ«7―]ΦΪΗΚMΧ\5°tδψg榢ڝŸΊ|i¦έΩΌqC_«¨Z€€ FR‹L|ψφΫ/?χΖΤπξϋ8ΈΆ°… ΦΕ₯ωKcWΪxνυV,(––ΊK“gN_ξ–kVφ Τ‹'&&&Κΐ²ΥΦ·)‚„ ,ΞΝ{αΠϋ—[›ξά·}ΗζUÝB_§§΅cσHyχΜιsγW&kι™ž9β|zΩyυH`#ΠbΣΣ[°P@ˆMΣξιτ/_9ΊqβψίyχψΨ¦oΎλc7mθkQ±Ζϟ»]ϋV,ο΄ϋ˜Ÿ:φlν\΅cηh_Σ#@cι)mιSθ^ž˜™™νvϊ–/GiA  ’M±¨M±έ²X §ΥΆΜu»5K!ͺˆ”\ώΰΓ γ󝕫֬_sι?ΌΤΗΜΩτnΎώΐ†εύECX˜_œ<{n2­νkV΅;½™ΉΉ™™ΕvΰPǐD0  "PΠ ‚IΥB¨$€ˆJ€ŠR!I)‚HH5‚€‚ˆ‰Ε BŒ5h!0R€DH’UI@ ΑPDH$THΥ¦BSPQ@b € 5]€€TB@HMŒ Š &(ˆ¨E’%(@…Pƒ‰Xΐ@ 0₯ „T ‘΄b( !JB`B΄΄" *  ΔXE,E ”@° X‰ `’Qˆ@  ŠΦH‰ͺ%˜(1"Α&IQ €„`ͺ!’€*€ +)*Δΐ …T ©D£&†*ΤD(( QΔ$I"DP€¨„DQP©E"$I­Q‹$`*† ͺ€1@1έS("!‘I4%VL€h„H¨€*Aj„*!$‘hHR Υ)RH ’( "C¨-$DJM­E’ ‹„@¨T@ES§&Λ·ώcωΦ79y€εΛσ΅―εk_­kΦ& %¨%ƈ`T(Α‰θšuω³?ƒ$Δ$!)E(!$¨X‚†„’˜$HL¬Ζ¨I4- Š­‘=ϋ·?σήΙSǏΌ{τΚ=#˜šL~λΘΉsΝ¦[wξXΡΔζ/Ÿ;–Υ3σ $σ—ΗŽ½πOίyτνrυφλξΪ7Ψ0sώτΙ7~ύγ3ηζΏψ?|dσhgόΠ;‡ν½ζΪν£Γ……Ή™c/:όΑΙ™ςζGέ°c}§Ο:7=φΞΛ/›ή{Η²ί! B]š;uςƒ§/Ν€scu`dtΩ²ΎV=ρΨχΎάϋ?Aπω^‡™˜ωωχΌ§ΰΰ w‚ v±‰"%‘EυY3φμx]’ΨŽ½ΙfΗϋ!Χ΅ωSl_ωM6‰½―Ηφ43κ)нlH4‚θ8η}rί5₯Ηϋ½2;φψΪ―ίετ_ώΩ«»κ›³T†‡LΎέΏ=ΨbξΑΑχ¦[Ϊφ;ήˆTFo=[ΫΧ‘+’Œ%€mXŒ Ψ²‰Άq΄…ΕϊΖΖz%U›―­­ ΘBΦΚγχ‡Žn€ΪjKυΫ@I–°1hvlrq|{C©TPεyeqϊω&ιΊRM:•ΐζόΤ³;—oΞRΝ5Φg™εΥ΅Υ•u²-m]-©„l{}|jjtx9_Ψ΅gO”Ru…šLjfuuuiΩδ%@ΛSŒN.§šϋwυwfQt„!x}zςα₯ΟΞέIΏπ{ί{εΠΆϊBZ’ωΪφ=/½½΅c #0€Χ—§7ηFΙΆή=Ν₯¦ϊ\FΖ9n-N\ϊδ£ί^RoΛ§ΟίΣΫ”ΕF@·οή|΄ΦΤκμΡFŒ±+›››λe5Ω|d! Tǟήό귟}7ΐΑ³ο½}κΘΆΦΊtγ€mWV·Άr΅΅™\6!€X­¬L\Όπpzu9[¬Ιεk7ΦΧ¦§·ΤΥΦY—Ο$€Q¬bͺΒA±ΊU©TbHe3Ζ δυρα{7― ,ΉλΔ;―υ5dƒŒAB+•ΥΉωu²--΅ωLXT–Φgο_ΊΪχm/Υ5YΩΪΪ ‰D*•AŒ–P0nm<›˜ΩŒM-υu΅ι`ΫFˆUˆG°«1n–Λ"“O… HΒqskyj|I‰ŽŽζL&Ζ`ΔΦκΰ_œ»ώ$μ_jοΩΩΫΥΥΦZ aƒΖ²XšέXK7vΧ—’ΰju}£B’qϋ‹‡NμΪΧQ¨©­+kj3²]ΑΖ[k‹γ~}np£φθΛ‡φφu6€«ΥΝ™™•ͺ;‹…d6ƒ !Œa$`v|rqΝωΦR}C Ά$ΛβȝλΧο<[/ν=ϋΪ‘…h@„Ιdm©6jiv~}«c.ΰ͍•Ι«Χž€ϋNξlj¬ΧσΚVe+„D2“00 ΐƊΛ#w/|ώωΕϋ£‰ΎWΏξ™Γ} )'ƒ±\υΦfΩR:“!@&‚ Y(δ³ιςΒΪκβ2ͺdYTž?_L4Ύ°{oWΖ/,,n•³Ν₯b‘”ΖƊ°9zϋώσ•dc[[k!nN¬-,–C½»5ŸI'$ΫlΞ-O?Ί=ξτΆC;κ3΅IβκΪΚΚFΘ7–„‘ΐ, Ψ0aC„`I2H€„ B’ˆB#ΐ–ΐ`, Ι"XvŒΖ`„m"ΐ@Ψ€ @`Y G#Ϋ`„@F` €28H`Œ-„;` Hƒp °$@aŒ%„ΐ`€e ŒŒ‘„ΑX` „dŒΆ€IΨ€0„‘ , ΖX`!ːŽ c@ pt!D` 0Ž(a,XFHΡ lΛX„„m ˆ€ΐ€ΆŒc‚‚ `!Eΐ²Ά ΨD“-ΰhI2 `Œΐ1"!@ ˊ`YFBAΨ–„€AΨ 0 €1`$d B0€B l, d0ΐ`ΐ l„00€1ƒdŒ– l0BlΐFKD° ,Ω`lΨ#°e# ΐΨ $a@ΨH&X‚h2–…Ψd@„l„ΆŒ%²Œc0’ 2„Θ ,Œ±ΐ ː γαo&ω7Λς2==ώΙOό“ΰlV–Œΐ8’` Θ€eL@2X !l6Ψ’  ƒ!$,c„°ν  Β``ΐ–m0ΘΈΙΖ½»ϋ:ξŽ=~π`αΐΙ’m=œ\N΄ίΎ£»>P` [«3O/Ώ0Υpφߞ9Ά³½₯&xc~όφωo/άψφλ›―{}Χގڱ‰±Ιι‰εώRέΖϊΪψνΟ‹›3O&¦f§7Ϊ»Σ+«K£w-ηwή^ͺ)$ Θ€ `«σ£sΝ‡ϊΏπΦΞΦB±ΨΠ‘JWΊϋfΆη[ۚKy6¦ξίΩόΗγΪω;oiλονκliz025<τ¬ΪΧ’ηŸVΘζR•ωρΗOf_jjRΥSž•3Ϋ·΅·7ηe "X€dΔB’d …DήͺTͺY‡jΥ•­ˆ¦Ί4vϋΚέ‡£“«©ΪLM©.Ό±4_Nζ3™\6eW+•ΕαοOΕΦΎm½­YΐŽβζfΕΥ*f}fbθήε«ΓΛ–λKιdV¬―­―¬n†tPŸ"P]Ÿy|gpπΙlΆsο‹;’ ͝m₯ΒΘΤΤ³ρ§σΥΦ†€ΚΟο^»qd­ΎsοήCΫjΐ!c6fΖΏϋϊ«+·ζšŽ|{g^h)e’Š˜¨d±ΎλΘ[ξΨ¨b„ΐfkzτξω…ϋΟΦwŸyχύΎ–ζζb>›0P­¬ΝŒέόϊwΏώrP;œ:sζΕώΎ¦œ ²ΰˆ—ΖFn~ύρPΣT݁o4'Aΰ΅©§##ƒγ[™ROw:!0(–ηž>ΌφΝΧίή(χϊπχή:ήΣP“ `a«R‰Υͺ%ΉΌ²ςτΪ—ί Ν¬”MΕB.—GΛ„DήXY.omΩ™ΈΌ΄>7<πdm+ίίΩ”©Ν2ΩL.—ͺT§§7b1$ ²²ΈΈΆ²•jj-₯.Ο=Ύ~λΖΰh΅mΟΛ§Žv¦@4ΘlUΆη—’ς…ϊt2-.―L?}pως½εφwn+ς™šL&›Ωšή\˜©Έ5Ψ±Ό4»΄ωΖRJJ„€7ΧΦΚ•Ν¨\0¦Ί— /‡Φ¦†ΪΊ<ιdΊΆ>_ݚ\?Τjεfk}~jψΖ­‡λΕm/μo««ΝΐΆΛk«cW~χΡo/OΧυŸyεΤ‘Ϋκ ΘjrιΈΆΈ0;½ͺ¦GcW‡>}ΊRlΪ·so{jYΰΚfΕΥ(Ξ<|σόΥ±΅*ιϊ†B:–Θδ’5΅ΉκΤτΨΨJ,dδΘΦςμψΘΐ‰ΥšžW_h―Ι%W––Χ7Ι–jκ )Ω€A±ΌΊΌQU"[›K @xuμϊ‡γΣ‰φmΫw „ζžR~dzrςω³ΉςΆ–D³9;pξΰσυΊ#‡wφυ•ͺ$κŠ5ςΪόμjΉΌ₯š$Ψ•υΕι±rCoKΎaχ‰—;*W#‹ω σ“Σ)φΌψϊ§χΥεKu!——ΦHf25ΉT‰λΟn]_KtοΩΦU V"™.ΦΥ9YίΩΣ»-Ÿp΅Όςld’ΪΤέ\Θ沉•ε₯ρ;ŸφΪdξΰ{/νίΥQ—Œ1 ƒ‘«+3³Υd]‘&•Ib, ‚ͺ›K εd>•Ξeˆ[•₯G—n?‹­½ΫΊ··fΐΆ‘6'n]ΏvηΡJ}Χ+'Nφ7Œ‚e²ωΖήξΌ>~ψdρ`s±& XYššΈsαάΓraίΛϋ·56Υ°”ΝΦ€+•υωι…θ’€θX^œ]ٌΥΪ–ζX©ο>}μΠξRΒ[ΉlΗώΖ―έΎ9Ψ2Ϋ ioΞLŒ ?,ΌώγϊL)W¬Λλ$6ΘPI8‘¬-4·oΫΦښ;R]Ύvιαj¬mκμlΟ&ΩZ[›Ύτρ₯'΄ν>²ίφ¦ "Λυμι/έΈ7t}p°¦>΄Υ%½΅±4ϋlδζΥΙ–3ΌΨ—Ιε„+ O‡~ωΩ#ZήγΕ]M-y0€1‚Ι•ςΪζΖf₯Ί±2?91WέJεBeccya9Ιd3Ω„ŒYŸΈεΪψV]mKGG}M6TΛ+ΣΟ|φΩύrjϋ©Cϋz{‹BΖ†@enθω›CKtΏqτψώξ¬ ΨP:_hν?²§εζ•{ί]Ϋ™έάκΘ%·–§‡o_ύός“šoΌqbgs1ŸŒ--mΙ;γC—.άιz‘©’²:;ρθαΜV±tπTSg¨.O\όδ—Ÿ\~»χœάΉ«5΅<ώxCrυ-₯b!›Lrυ=»ΫR&o]½³#»Zrͺ¬ΜL>{<ΙήΊj‹Τmί»½ϋώΰ푁Λn5h­In­ΞOάόδ›Α₯rΫK'ΨQΐΡZ˜YΨάΪ sΟΗGG“΅‰jyaόι•_~ύ4Ωpβψ‘ώΞφΌ—66ζ—­ϊΕρ£ΕζJΞε₯ΙGΧ.žΏ9“Ϊuς½ΣΫςiyu~fzvE™ΖφΞR Ϋ’0`ƒ,$0–2Ζ Ι0Ζ!`[ƐŽ!d0€ΐ2–XΨHd  IΖ2d$Q Β` Ψ€ ƒ E@BDH@`@Œ`ŒΨ– 02Œƒ$m#„Ψ(B€ X6ƒ$a؁$ Β ΐ ΙΖ €6(b`ΛΆAXΩ„ΐΆJG„Λ ΐHΨ6²AΫBΐLƒ0ΙFH` `ŒAΒ  €1„ΑΨ0ΘH 0ˆ€ΖΖ@l,0ΨFAΖHΨR@–lI0ΖBH`, Λ Λ2 ƒŒ„ƒdΒFl6HΫΨB$ΙΆ€dŒ `ΩHF²#mƒ RmŒ$ƒK2ΨHB`c@€AΆA`ΐΖ–ΐ²Œΐƒ$0HŒ 2ΘF2 @’‘12H`cl !¨’dd,# ƒ„°ΑV2Ɛˆ°-±Β²  °M€ƒM,°υνwϊΫΏΣ?όΰW_ύα°ΐΆe$XBG#a @ˆH€£…`ΫX€±6ΑF2€` € €AΖ° €@F"€1ΨΖXA€Άοήή{οζ·γoݟ;|Έ~ώΦΥϋ3k…—ϋz»»KA`l„WΧVfŸMm*³2>pϋYBP·σΨΆΞΌœΫvp{Λ₯ρ™Ι‰‰™ΥϊυΉΗ†²ϋίΈσx9{θDgc[!€[Β@ΒΑ€l[ YJgς΅yUWΦ\­1ωžžΎβγλƒ_όγG~ΉesψΖν•ήSέΫWbyr1_¨ΟͺZΌvώ³o―¨Ή³»»­1YYžKt~½Wφ·ε1©Ϊ\λήγ;Kϝݯ—ΆfYέLλœ9|ηΡΣεϊΖΪd*ΑζκΪΚΚf₯Ό1σπγŸώκQwA‹Οή™v~ΟλgΟΎv¬QΨ²h=vδΘπδσ―/]ωΕ™θοΘnN=^ m'ϞyεΥƒ-iYHΖΪZ{~οΫΟΎ:k6Σζνί;³» ΘcGͺΗ„ l——W–”mknH$+Ίώψ£―ϏΊΠј|ώέ/ώα<Ρ–€DiG‘—?<έ›κ:p΄ϋς§7ο~{A+«=©Νηcƒ3-οœmϊΥΘψZ©˜©Ι"ηJΝ;OΎω΅Όρέ?ύjι`Wc§G‡οΟ–·χήΨΩTΘ Χuwμzαdο½_>ψνσ_^Ψ^—―Ξ?›mΫ_ι)W‰ΰΝω{η.ί›.μ?uμΕ#MΨ`;€$A¬”ΛΛs‹λ‘P˜ψζ£/'»šj½65<22΅ο?υ£ŸξΞΧd,Z·οά{τΐΐΠ•«?Ώ—ŸμemfψαΔf©ηΕζώ*©BMίι^ΌωΣ‹ΏϋlεΙΣΎΞ–ΜΖβΔπ»O–φΏώώλ{ۊA(Wlθ=vζ₯sCη/όg³½]υ*ό=}>ŸΩρκώτtoΎ.pycφρύKύ§Oͺ]gίϋώΙ½= ™ ΐD:_ΨΰρθΨΕΫ_Σ~ΆwgS¨L >~ΆQ³ηΔ©W_?Φ]¬Nm;x€ϋΒ—#ί|r²¬°2σliΙν―½VψΩνΗΉ¦ϊd:e[Ιζ–ž§_}πεΧόχs{w΄Y|ώtxl6ζϊ^ωޏήޝO-.―mV2Ή|M} ‚ςΔ­›ΎϊϊαF¦ΉkΗΆ–ΊΔϊβψ£G“Υ|ίλoΏφ‰Εc΄₯#―Ÿ_Όxϋ―ŸO vuζ7gέ»7“j;ρή[―μΫί™ΑNΦ›Ÿy₯οΙω{vγΙÞ¦zf&&žΜεzή~eέ“‚€Œ]]YX)'ςΉ|MX6•‰ΫΎύbΰΡf]SgWosMbcqβρƒ±r©οΜχΟΌΈ{WI6JΧ${NΎυβw3ί]ωζ“ΩΡϋ=Κ “ΓC£σ5‡Ύ ΜO?Ή{ώ«Α΅ζCπζϊ|²J XΖA ./N<zϊ|}cmqδΑœ+5K“χn\ŸmξάΡ»kWw­©F^=Ε…‡KjμμξioH—ΧŸ ŒΜ%:_}S/ξnΟc#@°1qα«ΛΗΌγψ‘Nτη%[`aˆΆe)]hκ8ρΞ[—F>Ώσ«ζοvσΣ£γc[-ϋήϋ“mn© hκΩ±οȞkc·―ώΣŽOΆΤΔΝgΓOŸ—S½'Ξξ-»²1}ύgίOΏXΜ·lί\½τιΘΕ- ‘ξ;ωώιczT(6τŸ>½σΪ―~σ‹ž½Ώ­³”Ψ˜Ÿ\Ϊl{eG™qΣ‘ƒ‡‡Η&ΏΈxυΧΜΐžΞ|ωωΓΑαE7ύΜ«―jΛ€ηfVͺ‰Μζγ{W«+“­Νιςβψ£«Σ™]οΌϋζ±½έ₯›εΚΚβς†²ριgςΩhW)[Y{42:ΏΡψΒ«?ψΑkm©dΠΦΚψΤΜμ2υ-=­ic,ŒΑ`!$cŒ°1 F’eΙΆΨ` c„‘±l$m0!„°‰8…"Ζ€ε`„ 6€Q 6„!! b Θ Β06H Ψ€ΆQ4!8F %„,ŒΘ2Ά%Œ-$„X`Λ€ΐIF ƒ% `c„°ΫΆ…e[²€ ΒΆ!ʐ’"Ά‰ )&ˆ@BBΖγ`GG`€ €M„ h2 "@2F @l0`@Hv C΄m+(HE– [’l0`$ΐ„„ΡR”c0’ ‚€Œ1H2ΨŠ ‘@ΆΑHŒ$"„ΰˆΐ‘e 0XFBX6Xl‚ !p΄ƒ% !0FˆΨBŒ0Ψ’ ’±°%°ΐΆ„ΩΘX6–%a$d›jt"°6B6H€0`@ƒ Β d0X2 !Œ "Ω€ ±c4€0²Β2#Y’ A-°F c,Ι–1Ζ’!°ΐ€ea$ pt„,ΐ²$d² Ȑ@@°mˆΑΖ X²Άƈ €1XŠΆŒ‚Œl$YΏωώζoτΡG€Ώχ=ώύR}χ{@Ά 2AΒ!° `Œ‚Xc@ AXDKQ dc$Aƒ,cc,„ 6Β„LΫI ΫΨIŽ–ΐ€"–(Š€`Y¨ίΡ»½·οΖ7w\Ώ5ΣΒ½ χf(Ύ²w[WGΒΆμB5ΊZM—Їώθ?ύϋ‹ΉD„ ,Ω‘owoγΛΟ'ΗG‡ZυδΡ8ύΊ³‘w۞¦K“γO†“‹C#s΄Ώ}°-δ’Β ,LHΩ`H€”H°€`WΛ+3nάΉχπΡθΤσω•••εςΚl9ͺbG“hμjιlo½zψ#ΩΫƒΛω½ΫΪwχ έZοX»9ΈθέΩΥX_JV)„Ά@Ζ(Œ p2Ν ΝΞnV‚“₯ώ—ήΨ¬TV>Ίpϋ·?iΩsβƒ?ύΓ—ΪWΎ}49Ύš/Υ7$HΊνΝYΎγ‹sηέΏϋν@9‘+΅οήσޏίϋθŽR.Ÿ0€j ϋ?όΛ?ώ/zeδήύε_{ϋ­Σ;Ϊ/ εΝu‰T .―W«uχ}ω‡ ·ργ_=]HdZϊœ}ϋΝS/ξ.Κ2B5}g~οƒϊΖΒo>ϋϊάM›wxύχί;{dOG1"Β„ ©:uύ“_uγιZοko½ϋώ[έ©ͺ« '€`ΫF²AΨ 6*Υ΅JΆΠڐK1ρωγΡΫ—Ώ.§¬ _Ώ< @"_jkύώ‡/”Œ uۏΌρΓΪΊΒ/?ωτΦ₯s•©iιμ?ρξλ―½|twOPŒΛ·/]YŽfρι O i£\ίΞιήLHζφޏή]LόξʍKΏήyΰε³ρ‡ΗϋfΫΩ\ΊΎΟg,)ΫXίΞόίκώυ_~sώξwŸήΨt2ίΠ±cχ[οΏυΞ©ύι„ŒŒσm»OΎύgY~φσ/ožϋό©bΧΞ#§ί?σςΛΫ ΑΡ„­±Ληn<]jά}φψ«Η;“°$"$@φf₯Ό²²Υάψκ_ώΕΆ»—/^Ήpqq3ΧΊνΐ;φ­wί?Ψ%ˆŽ*ξ>y2›Oδ~ώΡ·7χ«+ɚΦύ§ήόα™W_θmΚͺκ¬ZΟώ!ίρρ§_]Ήwώ“Λeeꚻ{_ω£?ώήλ‡ZkkΑ€¨mj{ωOώSͺρŸ}ξʍΟ*ΙΪζή]G>όΰμ™ΧϊŠΙ€±±ΧŸ ίώφηέZ)ώΰΟ»½΅υ)ΐBa bDΝϋίψaΎΤR›/ΎΉώ»Αu嚺χύύ7Ϟ8΄£Ή˜΄έΠpτ‡ξƒΉΏβώ΅Οž<μyαδoώρΡ–μγώηtMSc]6“DΖ‘ΠΉοψΏωIMρg?ϊΞυ/o& M½ϋ|ψΪ™SGφΤ',δΥ₯•Νuεk )0Ήƒ|˜ln;wξά΅ΧΏΊYM•κΊϋ^όΑ{oΌΌo{sMZ& ‡€Θν:ϋγ?-6φΫΟ―άύnh ][κyα?ϋαΫΗΊZκΑΖ(Q›myεΟώΪşύϊάν+ξ;Yθθέwβ{πή»‡›“Δ`[€eͺ­,­•«Ή–BmΎ68‘ όžώ⏾ψκ›‹W^>wΗΙB{[kΎχήύ­5 ΐ‰d¦ν₯ρ―Υό―Ÿ~sσξ7w―…ΪΖΞ=ϋ_ύσΏϊήKέ5d‚„*Ο†ŸήΊtu½υΐορ›έ5uΩ`cp0Cœy|εӟΛ·— ŝ―yηk ;_ϋΑχ[vφΤ€΄Ώω£>ΧώεΧί\½χβ@9δJνύ{ήύΡ{oέΥ\› €q”p\ψιηƒ3©W_9~tosPD60ωlγ±?ώ_ΊπΣ_|umΰΪP9QΧή³ον?>ϋΞ[‡RFH68πFώ―²ΉϊψκΝΟ^ ΅½Ώυξλ'^θo¨¬Ξ}ώσ―ž,ΗjuqςΡβδ#H·9΄wLCǎߏSκ~ρύ‹χ―‡BγΆŽύΑχί<ܝC`ΥτΎϊώΕRρ7Ÿ}uηβ'wC±eΗΎΣΎwζπΎžϊ¬’bŒΥΉ…υDίροΏΤR}τν…ίLn¦Ί{ίψσσΓS»š2ΉφzΉΌ±Ί;NΙά{λwŸ_ψzt-Φvμ8ςΑλoœ}co °½ςτΡΤτF’Τί³­-Ψ&FUEΒ–ŒΐB!8:F²-Ž’°`Ψ`Œ‘„°# X‘ llΩ$LŒQ’e,I2€‘#`IΘ QΆl#!Ζ€ !Θ²QΒVTA@!€#2 „" ²!€e›’]%‘2Θ6F–Š" ›ΰD$‘ˆ!F)‰€ "F%’ "°#$δl $A ڊ&ʎΘ Θ‘B΄e[’Λ€0Ψ’‚μˆ °qT#K(€ƒldDeEG²ΨΒHΒΑ{ΜοwAπΧϋσύ]žηάڞž^Ξi{ΪzA¨­0Έ(Μ¨Α©›S4Ί,KΆa¬˜ύGb²μ%»eΙ–P5»ΔΔl3ΩLˆNG':Q Ε" B±‘:.Ϊsžηω=Οοσήλ₯iΣ&ΓΠ(D t’ #φm–€(­€¦DΒ$Ε^G€hDη ΄­1φŒ4„ŠΆSΣŠ@’L–ΜΆ9ͺBEQCΛl&Ρ¨Α$ΛhΫ)Œ€΄•‘MRΠΆ J΄TU2 ν”A:uȈD«RRζœ‘.% I€­κH‚tΘLG³’‰Qνˆ₯-tT3hΪdΆ¦‘H UF­–€H#{i#ΠF*£έI h›Ž†LS%„9-™¨@ ’Υ9IŒa’MΪ$ ϋ©*U::’aV&! #M[:¨ ΐ”€HFΡΠJ4TbΆ#IRΥ*ΏψίςΎgσΫ@δGϊΜ3ήϊV³Θœ•&‚jD€ Ϊ¦MPˆΠVv–tΆM"ii%΄-"ƒ=)‘ šΔ¬UΒl‰T#:eLΡ†P!έk‘hˆ$™ŒKΧxΰΡ‡πžϋ“δΕ›Ο}μ―ηλΏυρkχ^=h…΄΄€Φf½9<ΏήέόΛ/|yΤ9ΛfΤ2RΪσ<|ύς₯/>άε[ΎόRώή7ή²9<χθ#Wώΰγ_ύ‹η?ΡόΕ_εoyσλΦλνˆ5烩΄htžΎό‘χ‡ωωΣλoyλΫή»ΏžΝΧ_{ξ?ΤΏΔ auυŽ;ξΏς'>σΒGτΕΓηΎtλƒί}χ•;ξΉύΥ«χάφΩ?ωψ~ώ‘ΣO~y{Οw>tϋν· iMBMΊύιY‘€•$jξΏϊ•—>φ›Ώό»ύθ»ΰ-ΧoΏ°1ΟvΗGΗΗ»ΣiŽΥφΰόαΑβτζρρΩΞoœžœμŽw§ggϋ&Λj½Ϊl·›υH bφΜιٍ7Οf:V«Νζ`3–yzσ΅“ύφβ…mVKwΗΗG»³.›scœξ'c΅ΪnΆ›Ν’Jθœ"Ί?==9>>>=ΫΛj΅ZoΆΫΝzΙ€Ί$΅?~νΖΡρY—νΑΉΓs›‰Άm‰@2ΪFνIζώτδθ΅γ/άz~e ΪξOŽOŽ^=:S!θΆ#Ιφ-ηΧI:sξwΗGG―νN#c¬ΧΛz³Yo6«%‹QΪy|σλΗΗ'³‘΄5Iš(λΝΑα…s›1“Ρ³γG'»“½fY―Άη6+G_m77η6Ϋ%ŠΩΉ;ΎΉΫν;ΥXΦ«Νvs°YVΡΣ³“££›§gg³«υf{x°έG΄sΖ<ΊρΪΡΩXmΟnV΄1DΫ$ΡyΆ;9Ύyt²>wa9;=ένΞ洬ΦΫƒƒΝvI†Μ¦ϋύξτψθθδtίD–Νv»έnΦΛIg''Η''»³³ύl-c¬6ηΆ‡›υHR‚Ψ—ύιιρρΙιΩ~’±Z­7λυζ`YR¨ύώμδψθψψΜzsώΒΑ& €‘’Δ”AΊί펏Žwgϋ9%Λr°έl7λ%#IηΩώδψΖΡι~Κz΅Ωn6KNΏφυέ8ΌtώpY’ϋΣ㣓έξtίc¬ΦΫΝv½A€/|ΰίώμϊΒ]}Χχγw=ΆΡYaŒΞ³έξδδdwvΆŸ•e΅^6Ϋνz³Z1€ν,AœžμNNNwϋΩ)«υΑααv³Œ‘€κ`ΏΏy|swrΊŸMΖj¬ΦΫƒυvYΖͺφ`45ηHv7^;Ϊ/«νΑαv•4TύιΙρΙΙΙι~Ξ&YΖj΅>8<\/Λh«S1θώμδψδδtw6gΗ2V›ƒνΑΑf"tžνNŽoήάY\ΈpΈZ5¨ΔώτθζΡΡΙi;Β0V›Γƒsη֝ν`žžμv§'§ϋέΩΔ²Z-λΝj»lΖ²Œ‘Jg•ετθk7v3›ƒƒνv£¦J@Β<;Ύq|rΊŸΛ²ή¬7ΫΝΘJ FΚܟνwΗGΗ»³9+Λ²Ωl6«Υ˜φgG―Ύvsί’R€Σjsώπ`»^'‰˜§G7nŸœνی,›ΝφΰάΑz‰ΞΩοw»έΙξδtΆοX­Χ›νΑv½^θ•έ«―άΨ―6KηΩΙιiΫ1Φ›νΑv³#šΜύιΙэ“έζά…Υώδd·;›2V«νv{°YvρΡίψ΅O½zΗ½OΌεmO\Ϋ&±oFD s?ΣdDT•)Ct߈(š¨˜ …ŒDΪB©FΣΆIGF»O ­J#0G[iRI Z”sϋŠ$mie$³2@ΫJLΪD²Μύ$#‰”ΉŸ" DLΙ΄Χhɘ-SE³Ι2S³I’AhK*ΠR’DΡJ"A;ΝΙ:’0kV¦1‘- ’΄BΪΞX ΥΞ(J "νΘΩd΄:;mχb1dŒ9k²)mu†1FΥlEΤΠA4HΠΚΤξ;š ’ΖlG’€ˆi;S’RH4f#s Lm%IŒDΫN%©H†’ €‘ΝcΏίG bΆ:’j£$˜š΄E¦2B«š"³s΄ΙTSΥϋFHHLBgM’ …ΪΛ@+Eš©#* 2’ŒΞ ₯DGη$ϋ‘‘M£³M₯h:Ρ 1¦$@C1aiIΪΆ2’Ά³Κˆ1΄ -‘Μ&bΡY‘Fιœ%HDΙμŒ4S«cŒYμI’mχΙhgšdHhK ‚΄mE2-’ξ΅"Œ€μ§TH"΄³%A„T‡vψ$‰Ra$ΙP₯ cNmIϋ3?3ž}žσ=?ξ™g<φx«šΤΠJDIΒ¬Tη”i„AΖΎs$JP’Ζh;S€QH4fCG {Ϊ™ŒΔͺ³•d"#ZDDΫd€ϋ–„Pc˜m›­&Ι>†©B«$i ¦Fw_ψΔ‡~ω~α|ώφΧ?π…?όΒcπΗμΫήφΘνΫ„ΡΤ~џω'?χ{w}ÏώΠχ|Ηc?σ»Ώυ ?χ?}νϋίϋΣίχΨ₯+ιlηι<۟ξΞ‘ύΚοόωΐοξΖΌrΟΩ[ήρΟώιίΊ<Ά§Ÿό•Ÿύο}ιΥW6—ϊερ¦g~ϊο?ΎΪF[₯³Λ+Ώώo~κΏώΩ#oύΑΏϋ·Ώύ±»δμτΖgΗ?Ώβίοψφ'ο9Ώ¬ςΪΛ_ϊέτ“οϋψοωw?φφϋΎ΅g/}δ~ν—žύΰΝoω›γß~μΌη{ήφψΥ _}ξ£όί?Λ_xπ7Ώυ‘[~τ½θ_»²2i…eΡJΖͺΡ6M‚P‰FJqώπόΥλWςΕ—ώόKOήύΑώΚ ϋ£›7nB΄ˆΠ@@K„€* ΄E%Π  @€’‚€ @ Z ZAT[‰΄ ΄EPh’%΄ED  @A Š’ͺD!PZ (Z‘ P­*‘EJ •m#P΄T BQDъ’¨ ‘  Κ₯‹ίϋήχnO_ωϊ‘σw]½ϋž»6|ωK_ϊ—κ_#P(Ρ""@K (ͺ J-DQ ΄(PE  ($(@h%h!AgIΠ‚ΠABš(D E(PD‚’…*ˆ ”€V ‚Vh @  „’ΌεΙ{_=9Η£=xύŽM"©Π(ZmI’€ZI₯m’€%E ) £₯κle ΕΔH M™f’³$‰4@E)ƒ*%m$R-IΪJk€ΡΆ‚ A©LMŒΡeΆch"”ΠΠDΗμ”0ΪF $­ΦȐ ™­€DΫ–ΡŠ$P%ΪLm¨*c¨J΄-I"iڊ h«c Μ6JU₯Fg# ³‘‘΄M­hʜS"I4‘‘S…$tj‚’*%Ι 1uLIͺ­DΫ$²„v’­ͺHh# “Ρ!T$ΘCΫ‰dJ€hE„¦:g€–V’ͺΝ"ΡV’΄(Ρκd¨ΆbH΅έ'‹΄’MR’Š4©sd€ΜJdΆ‰Z(‘’@΅˜΄m2D΅νH) ™*YφΥH$C•΄’ „Fg›˜D¨ Y Ϊ2΄F ΙTŒ$U`VF[ J6H!$"Ϊj;сM[$ͺ$T‰2ͺj*"ABg%I’ι$DhێDΆDi;j(•: ‘΄Mm’*fIC’΄MδθΘϋή7ή¬_pαΒ|ζ™ώΔ3ξΉ' ’ͺ!bN‰V" ‚Φ’A« Ρj§! ˆRL@FL’9,h‘¦’L"Ϊ$"“YͺmS @ Σ2΄DͺŞ‘ΝΥ«Χ~ςϊ‡ŸώSŸέέϊΔ“oΌϋΚmλ1 ZΔϊ–»xβ[ίφΘg?τ›?sήρΦ7?xωάΩW_ώΣ?~αΟΎφ΅kίρ“?ςδ₯4—ΎοΪ•η_ψΜΗ?Ώάυ†ο|Σmc=dϋΐƒχ]~αO?σ‰—vw<πφ7]_–J)ΙHgΫ€Œ)slΆ‹ωΚ—Ώv|vl9xνε?ώ[ΏϊλŸ:ήw’Εκφ;/?pύΣ_ύπΝα[ώΞΥ[.žεέ—―^½ζw>ςGΫρΠΫΊ|ξ*H+΄ΣˆbUM"E£Œv’hΘXœΏςΐ#Ύψς-Λςς_~α…O½ϊκ«ΐεΛ—χϋύ\ίδwύΰλΦw^{ΰbΨνvŸόδσ–ϋξ>Όϋρo|πώϋοΊΈ Zν>@ͺRR­f ΘP)! $‘˜• Š„LIš΄2,Zj4R”Κ S΄Ϊ΄‰6f…jIUGˆ$¨΄HΫ i«%‘T£m«’P΄m”‘₯š ͺ%š’Πͺ@Ρ@Z"Υ"„˜JD‘Ρ‘€s6šj( ΠL‚$m›₯@SE:eˆ"Uf$‘*c„ΚlΝX2Τ,"‰¦-ACJӌYS1¨V; m1+Ρ††’’I:$H)dQΥ‚A 4Πˆš%SF 0₯‘IJ%A‘„–Š}„$ š&QEIBΫd0΅‘Š””¦JC3BZ‘$ͺ©0ƒ"S)D4RL‘#C«‚΄΄I) ΪH3’T΅h₯-‘!‘­Š¦mΠΆ$ъΠΆZ•T;i:!Νl₯ -͐˜”ΠB΄ˆ΄Dͺ)H‚F΅‚ Iͺ%%Π‰4(™šD‚6’R%šŠtJ$”—ΏΈ<ϋώ<ϋ¬W^pΟ=σ'~’Ο<γόΕ M[D:HiJ™Š„vjfVۈ–ΡJ*Ρ$£€Υ@Θ(Aj$΄’ T%Qm–&Qf’J%ΡJš„’F€IΆ·ήuοήψΠε=•Ϋžϊ¦ΗoΏγ–%IB΅$ ²Ίpη½ozη»ίίψȟϊΓψμο­ΧΛ2Ζζπςυ74’ B¦Œ‘4R¦Υ²ΎγΝοόΆΟ|θsŸύ_όΛηn½xώόωs›+O?ύΊΟώ&upω–»οΓ΅όρ‹·=ωΔυΛ—΅ΊνΆ;οy䁃ίώD^Δ·Ÿ?\)2΄Ϊ$V! mΘ²½εςλΎι©+ηnήςτΣwέvλn·C[ˆ΄-‰B‚j€m€„ͺ" €ͺ" €vΦHEΡ$JΠ‘IΪ"QA[H‚ D‰P΄ TUh΄D΄• E› …†6 ZJ(Ϊ$€Ά"T΄­Ž  Eh-h΄EDTM"IΡ† E΄’D΅ΝΠJ€-DP•@5Eͺ$HΡJΪ&  ­(šΠ4RM‘"TR’ @[„† DD5JΪ Ϊ„&B΅Σ$€’šZ‚Γs‡Λjeuυ‘§ξNΖ²€zρΕώα–΄h΄…ˆ@[ˆ€ŠB’šFͺ$ͺI¨*’@+©*’@+©Ά’EΡ$ RU‘IΠ6QF΄…A+‘D‰‚TQTUh‚΄D΄• E›@im#J m’BΆH΄­Ž„ h‘@ͺ  -’T’ m¨€"”΄’‚M@DUEP@“@5Eͺ$ΪB’@‘*δΡGξ½rγwέqΛaTiEΠΠBƒ„Fš*P‚€ͺΆHhI’-H@ƒθ,ˆD sί BT $TΠF%JBι, Π €hEU‚’(’@@C%4TK$R(T 툢EΠ-!€-4(P‘DB” U’igͺQT’4hRͺI  T#PB)B@C E(D%©‚@ M’v’hˆ’ͺ* •DPAu@΄b2 @%…D[Išͺ(€­ A‘D%44#"³A FLRM A€"‘m!QΡR4ΥΆΘHU¨†&¨* D“‚T#‘tB[š$‚D Th(• -€!‘!Tˆh[‘(‘‘΄’‰9 Q%%‘΄( $‚(P‰‚$"m# εӟγο[ž}V ΰρΗϋΜ3σ=?^(U€Φˆ–REHˆ‚¨€F’HΡJ„ΦdI’ΆŠ’hˆ HΡ$ͺΠJ” Q Bv&A•¨ln»φΐ7ΏλO{εβ£Oίwρβ %”ΘΈϊΤ»ΎϋŽ w=xη%ιrxξΚCOΏγπόςΉΏόκ+7Ο¬Ξ]Έ|χ΅ϋξψuWGfG3.?τΤΫΎϋά­_Ώtο›οήΚ¬ΖΕλO>ύΞνα7lοxΓλn]-’΄΄•8|ΰ­ίϋGnΏχ+ηh1ΖΈxί7Ηwn>ύΉ—ϊΥ³¬/\ΉσΪ=^κΪκž\Kςl³ͺuΝ¦κY|F½m0ΫͺΗΆcIρΆΣ`•"χYEΫ’:ηΌlX-nΔΔΚ„l 6@©M› Ψ^m‹+ΨPoν Ψ@u›a6iΝT&τ² 4˜­ήλ{χ™‘Π»«0z΅]X‡”-Οwπή&GŒf6’&ΐ†Ύ9Ψf °*6C6₯ۊ€ΞΥ›Ω¬€ΡΆgG5i6 ΥΚͺ{s$™Uέf“Y<KͺlF,­Χn3ƒ$§¬`cͺΪ”™± λϋ΅³F™ΪΚ΄†<6 iO›Ju›)D3f “Mς°»^­΅Τ³ΈνzocBcσ2KΑn–„‡9Xς"w£d―Ξl©03ν±YΠ¨›‡l 6bJΓΨ^m˜\5ΆjiΫ€ΪlcRo·΄f”aBΕLΜV½Ύ·mΗ*Μ »Q ΝΌΪd΄YΚΖ άf½·ΙaΈS@Α–²C/ƒ™S€…ŠΝ²)m˜”«7³YI£mF5 Ν†‘’νΥ9¨™zΫΆΙȞΖB…ά4{ZΩΜ`’ llυjSΖΆΆAπ^Ψ‡m)S[ΰHkH؈‘€Ψ6π^·™B΄M30‚Ι&‰Xά]EΪF*Ύφ±©aB8‚E^ξζ’π°M³€ΔΆM=l{5ΫΥh7ym3ΔTŒlKŒ`[`’l“«ΖζοώnΏό²ΏόKτ‡θΟόύ»»0KlU6`‚]•G·«@d›UC3IΪh3λe6¨ΚΤ6 3 FCTw7/8@Α3Ε6O“fΫδek#Af$k1^lJ6%-Γ‘ΠζeδΝΘ*'ΚšΈΦUhjΩb˜*΄ΥDλ˜Ω⠡ɐ©•cσhV@ ₯ΩΆΆΫ.m“) fbA…ŽρdΈdΨZ€ΝΘdƒ₯±©ΤZƒ'nPh°œE˜iΝ –™²lLKU³ΑΤ 8Π€Ά13²zέέVUζ6y΅΅ %Ζm6‰Q/Υ6Ζ€jΫSΩ6μ>E΅ †ΑΔPu·F―`›΅m―Μ€΄Ψmx5Ά‰  ΨFYΛ깏RΩ°U5ΓT›W“!²υΝ‹[œ‘΅–BΆM&ΓζUh€±b«hZ›ΑΒ6•†mc*ˆΥD#bG- ”Ζ,ˆi§nΧEΑ›‘baΨ,˜YΐnςΐŒL°‘φΪ„ΐRkaLlF€ƒ±°ΜК₯μ1fAb«ΐ°κš6Š-ΚΖ°1½ll«ΗΊMͺ ™ιΕ–m»‘lλ}“ΆΝ€ˆe0¦² μDYΩL `€ΪmXm΅[•l8˜Izۏa…°M6ΚΨ(’‰™εyφY$ΝlF`sc=;I,0SΣc 2lٍT΄1Α–m©‡6šΦVlͺښνΝ° Άφ_χαoώύλγΟ~ύώδO6“΅„ bΪ© ³H·QM°ΑΨιaK#³€Ϋ[Ϊ0وΕ@`% Ζn ŒeφΜΠC”{vv…Υo~ώζώαώϊoώϊŸώο?€#€ΐ,€1ΰχΥορροώκW°}ώ¬ΗΩς6σ³~ΤmΜΆιύœ+£χ΄‘ڝι=/ŸΟ6=Ρΰ<[½eX5ν¦νi¨κΝm"»ž B(Ή5,UΆ™l―”mΫFV_m›ΕΛκΞΣ‘Φ9-m¬³sV` Τ” ν–ΆY΅9ΎmšΠ.'ο&c™uυΆ Kdflή7elΫ¬žUΞΝ’•Ξ*„m΅νμ)ΚΌ­ηΚθΆyοΙΜ£j«Ϋ’m¨τ|nU˜Φvϊz{Κn;½m_]΄*­{΄NΜ6Όη–S3EΌ·q›U*ξξvߟ?Ϋ¦”]Ϊn2‰ze·Σ3Ϋπšνγ}·+‰rSΧΉ σ^;Λλm·­2Χ^ SΑξy·ΝΟΊ:vKΊΫΤwMΒΌσωj#²m«όόϊ|v§ΘܧcŠ[Ϊk7&b΅Q5Ϋ–χά§gƒ€JnG ’²Νd‹JΉ»‘ΥW³mρ²Ίi ‰l΄Ξ4ΦΪ*el5!cB»%6«VmŽLdŸΗ•z›YΖήϊT› $3ΓτκΩj»Ν*{ΒέΦΣ•f«”΅]m›Ρ#Τν=WV3,έv{ο©Ω¦±ιΥVΆΛσrFaΆtm§'/±ν£·νiΡ6»zήΪ³ΦAvο±6ΆxσΎ›U―}>·υ}―œmςΪξiΫ6e…χžm·Ή΅·}ΣκYΉ©΅x6›οkgy…»«škΥ€UΆΟΧwΫω–|΄[m›^5Χkη{ξa ΪηΣ{ΎΟέ6£Δ›ΣΒ Zvi΅±y[ZmTΉMΖ{Άl1•l›QΓ^Ο6Ϊ*εξFΖ«t;|³ΪXΈ<²ΡŒ5­k«”±™υ8BSΖ„Ά™ΨVνΥf 6μž­¦xΫrμ­O…mayfδ½ΆΪn³Š‡Ϊη§e±©bo»Ϊ6Xz¨ΫΛ^+ΆtΫ–W-³ΖM―6-Ά°]3Ζ«ξnOk7x―φΥνN6ΊΤέηυ΄daήξΖ mL­ρ¦7Ϋ¬^―έݝΧχ}Ϋξ&―ma³­¬Π{™z>7jmΣΤvοeίάhm[Χk³yγ•ξ>UsM0υΆΟΆ7―>―ϋ,iΫFυζzνΌ‘K©}.ω>ζn›žh¦…±υή²ΕB»i{Zmΰ•Ή›ŒΛdƒPΙl5¬j0“ν•²mۈϊκ³Γ ¦ΝΣ'lΐΜΌum‰ΝY1ϋ«ΏΊ_~ρ· €ώτOϋυ―ϋ£?ςΪyd—ΡJΎ·εXΪ¬ήvaΙ Όo[mΫ•½ήέMK†fυX›·Νžj2YΉbKΫNΤ“³ΖFT[έφ°-τΰΆή{w·„ΩθyϋΚφΕ₯ΆC…­ή6›”-§Φh^Όα6«WmΫ>γύόωnw“b,νne…ͺr·y?έmΛΚfj»χ²Η`šs}Ϋl^Α,ίχξkΥ0υΆΛς6σ­OέΦΘl›ήΟΉέύ ΤγΆυjΏ5›¦›s{―Ϋ(βΩ §E¦ϋœEfΆE&Ν¦Ί6KΙ+λΣ1ŠI2g¬Wμan_ύΈΥS―ϋ\go‰IήΜΒ·ΖmΪβϋ}ΧΫ›Ο9α±Ά½¬¦6ζΤρΐ"βcΡԌ6Ϙ₯a₯8Λ65΅ε#ΣΎΉΩZ/άη­kD Ÿ<Υ΄[u£¨f°ηΫέΕ¦Όήζγή:· 6«έ‹^9ƒe/n{Λ`ΒΩ£ΊMZΫnχz½k‘ν¬ϊjξhfΫ*z_ (pŸM―G‰νN½Όά„zwλΥ~ŒAιιΓ|l‰·sƊyοσγ–—a–@Ϧκ₯Ϊέͺή›™φζlš2Ρψ"’}ΪXƒgŸJΆaςr03)ΝΙm±ν”7·ƒ@”Ύ«†%Υ[Ÿ6"•3ζλi{φc_έ¦WΝXγ΅››¬΅ty3XOΥgΣ-zO-λφ[ΒcΉiΥ΄9…‡ˆƒE[™Œ‘a“χεlh³Χ>2νe|υΒ΅,„y|pՊΠέͺ τJ {jΝίowο|ŒmΜΥξ…DfΝφε³9Ϊ ­Ξ"i›4ϋόψΌ^A'ξx{h›­m·½’RΝLŠϋlJ ±»©k½η>«ž›­‘υ³>nΦFυξφVΚfd™zσήέΒΘ΅MδπΩ{•mΖ7ΪξνΝΩdoV.(“μcGΝΰΩi`˜ΌŒ‘Εz:'·‰`Ÿ•7ۍ’μτVΩά“ͺ9΄₯Uf3Kο=mΟ>ήΘζυSΜm½%]ήΜ΄βΫϋ±ik*½ΥήgLvΈW^» G2‡ˆ³){[l†aΦb“cmλΛ΅Σ°ΪΛηcUΩetž™h†S—ξu·4PιϋϋΌ’RȀ؍ΤS€έM%Τ£χήn©–³+ΧW³Ήmζ½χΉ½Υž ς‘Vν0yά΅έϋvρY«ΗmΣkί·ϋ΄b6«y5α Κ$;FlG ζ1VmΓ΄akΦΣLΨπ&ϋ,ά6Κ’lj usOJϋΟeΏό§ύ€χοƒ?ϋ΅?ψƒwk}n―οΰX’»λcίU y3+^οlΖVήϋžφ½ύXSΔΪηJΕ§ŽΗ€ΐYφΆXaΨ¬ΕΆ8«mλ!ΧN¦ύ̏CdΛ΅Ά»F°ρ©·t΅Ϋ,m*UJέ.m{Ρ»ΩΦ:·Y\₯Κ9²μ«»;ΛF4XΌϊμžΖΆΩSο|^Ά|gvrn§’κi™یΚ#μ)e{ ο=»m½ΪΩjλAηfnουa?σ³΅mΖzΉΟsͺΌ΅Ϋ5 ¦L=f'ε4ucˆ|γϊ`˜Roζn•f ςjΫd’n―ΝƒΩΆS­QkδΖQdƒAΛτš3$Ϊ† †[#…ΡƐ¬=m ŒhΆνυB;K΄m&ή³uKΛ.ΆΗWΫo¦Α’Ϊ>€—²5γnx₯fή»}Ά*·Yy5ngΝzΆl†yUΞL%n[υΐ-Hb‡ϊnμ Έ^–mΝT[u=Έ;• CΎ³ήM&Ψf<φ`½Κ>{―ΝYͺΜΆ)XψlHΛv}Ώf›²eηυj·M5EξHΚlgλ›…%ΪΖέͺΧs³1eTΨ‘η4΅ΩyiέlΌχΦάΝΜΛΟ+s†a΅Ό-ΐ³m»ͺ) ΅Fnl€³1@Π2½f6ZjΓ*`[έDΓΥμΥΒFήlΫSΕ.%Ϊ†5ο1·F-¬{l±QmΗƒ&Ϋ^©ν ς²5c·‘ͺ΅ζϋ}·;’Ν6Ύ΅lΫΝ mXΫ{fΓ€Υ³m+[CγΥΫ} 8Μ‹άΦLΒx)Ÿ Τ°Ωλ碚ŒΫ–7‹l―χ΅Οfγm‡ΨΤ·ΤHυΩ½m3’ά²y*³[5E6υšΫ™ΠRΊMΫπzΈΆ]‹  ΔΗ”ΥdڍŒοΣΊ6μ½―Ά#ΪξΥιΫf“•Ψή”ym·­kod³Ν¦DΟ0@ΠB’s­Ρ6ΨV7‘`“3*&Φ(6‘a£Wρi%Βnk^βΦ(YλžMf–*ίΫΗiβΌνJ½™τή읱mυφΦy―Ω™υ΄έxUnΫFŒ1Α΄νm‹^l˜΄<Ζ±™„b6ͺŸΫ£ΫͺW?vo† u·οSnΤ°Ωσte˜˜FΔά­χή³[И½’m #6υΐΩ&5Rέ—rΆΑΛΦ6ιi}>χj½Θf£jmΫζ kρΌΫ΄ UήuΫ‚F¦Ύ|l"ΣΦΫmjΌη±λ0l―§ζlSΪΒPͺmcX”i{ml‡bzΝZΛΖέͺθ›Z@uΫ¨§Ή-[H°ΙoώίύςΛ~ωΕί=€ίώmΏώυΟ?ϋύΛymtΘΓΆ™—²u#YkΟhΓM―ηm· bΎs½ν€ێτήΪ;ΈΩυΦ"υάnλΥξΖK΅νΜbD@dk[›¨΄aZ{cκΝΨ v¨ίl·ΡχυΩΨ'Κ¬Ρ2 Φ»NΟaQ 2 `ΰΏΔ yΔA€‚8€' θύQ%f΄½«Ν¦ 6SΉyσZΫe2(dτ OφφΌν’fΫTB΅­Μl€ {1³ΤΪ{ƒ%ργyχΉa/¦ι²·΅*ΡήάΟ‹\aΫΌvχΉ/m°bi»Ϋͺ2ΗΑΒΠLMŒ­Ρ ŒO‰-­0 φ6]ˆ1³ΉΒTœm$Eν½.koU‘•J Άe”yUlHγq^ZK†­ k¦mZψU&V4c–²eTFLΆG[Η€*,MV6^Z˜‘ Š΅-Ξi[fΑ’μ5―LAΠΆ΅#‘Νidي°mœ33$6©ΑR‘Ν„†‘PdΆ-ΩF±¨ΑHiΩTmbΣΥ{‹ &ΙΔ‚J<{035}Ϊΐΰ)Ϊ–ΞΆ=a€«™Yΐ<5@Α6¬2³‘±Ν–Mc3hιJlP†€‡€XΜ³ΉΒ5X³Q©jo(6[šfKU†IEζUΙLkσrV6jf–ΖΪΝ΄Η-0Bφ^—4c–ΚΪT `„˜X%[‹“Η–†jc«°I²± kηΔΫΦΤΩΐ`5nS΅FΊŽmSlζ•i «Μ2 $°eK[ΰq³ξ(fVΑ¦Q]σ\m5 CUΚ†%zΟ…°a£FŒTΤΌ¦^;zd₯Y&k³›i“ΑΌxA v… ΜbbΆTPk[59ƒP΄ύpCλΆU,Ψ`Ήj`#Ξi^1mceSš1pΫ*6@†Υv6RΪ6R5f‘O6UdL‹™ΝJFͺ±)yoηΨP@‘Η°Œήfk§±νJφTΐ’qΖ¦½i«8ƒΥrͺΩ~6ΫHUΪ&ΐސ,mΘhnm h6©`Αfρ¦QηΨκ¦…ΕΨ¨c1›Α6ZB€„mKι=₯¬ˆmlTΨ&I5₯΄G4fQdΨώOίyίίώώwΏϋέ~ύΪoΡ?ώƒχŽ‚FΝΪt,C•­ˆ-›6g3Rΐ©ΞΜ¬ŒΪ{ΉΚή„!—QŒΝϊI›B°™ΨΝh“—­ˆΩ(g{–*Ν:ΛfeΐΤZ*I˜Q²-Ω6 £ͺfo%SیΑNε½]ΤΖTΨYPΑΛ¦ D}ژxŒ°©z63šΕΥ6m°bi»Ϋͺ2Ηπ…λΆΝFͺYCΨHbΫ£ Σ݌,  2Œ©0 ›™‰5…Αl―Ra!Ά U,Ά%d Νh–Uφ<>6Θ›d01Νdτ0νl͌Ρ$1ˆ†F‘Šellœν½΅±•ΪΨΜRΜ3fl+£²iΫ0YckΔ`ˆšν- I4³‰-$Ρ°-9›R©4f°˜½Š‚Ν4ΐB,’‘aΞ{АΤfΆX0Β 4 fm“‚m%–BH± ΧΝΆ!@Z3D6$6Ά7ην]76q™lΜΖ6άb^oOUΕΘHBΕΆiΓΣ™ 3ΐtl6"¬Mή$Œ”Y"6†'C#ΪDŒΕ¦€X°HΖ$Ϋb ΫπˆΕFa6Ϋe΄m˜¬²E#b‹ΜŒ% Ά[΅ejcΦRΪl4U°φζdl¬JjΆ ΡTF°*T£$ˆlPPXNΆΩ˜4¦ΆXjΣΖΦΑΆθ TΨ†€PΜ@bc{sφ¦Sˆ Ν`ΑΐΫΠ°jΛή«ήuΡZ/ (ͺmΑ`yΆ…f€­²Ω&e֛󦁀1L$™1L‘ΡƐ@el(f[γ’me·]#0ʌ5ΫP7Ν0M‡…U0°Φœ™Ω”X³xb6 FΐΡφ΄T£fƒ ³UΗj†ΩT6PB@’`-ΐ†A™Τ`Ž`$Œ)d‰ ΑΖ&5ƒ¦€cΖͺ΄½)Κl (Γφ&Γt=r„ ²!ŒΪf²…bΝVlΫκΤΜJ Υ6ƒfm,ͺˆi ΫRfFή1ΠΐXL›Ώύνώτ§ΎΏύόψύοί―_ϋν—J£eŒ K‰mλ=w˜YEdΫTb›c&0μρ)‚™ΨΤf²0,6Μ< ή„ΑVPY,‰Ά ͺ Ωfi˜½*Ɍ-h4BΤ’4T2"² αlΟ²%ΝͺΤ¬!‚‘06 `!Ψ6R†­tžUŒRlO]·m–ͺY3a# Ύͺ!1L‘xS¨m3‹m"ˆA˜Β6eΓ†iXyλΪΜ FDLm"ΠΆΝ4cŒYj`³) ΪΖdλΪ[SZ;mΓ°ΐΆΐ­–FSΫl•­b$ΖΪPΜ¬mJΫ¦l›a΄vš7C0“€ν™ Γ4 ·°80’–ΐΨ@φvAΥ% Μ6Š% -ΫKB€4²‘l•˜™¦FlΙ:ήΆD`±±–#Κΐ£₯1‘6Β$€6LFPlƒμ-ΫΔ†1…m+Ψ 6Xe Ff›HεΝ<1 „Y„±Π†i[1†ΚΈ62ΫΰΝiΫ›΅kΉΔ€6TΫΆ bfFΩ‚ 6‚m¬²c)ο !ΐ² ΐφf͐l΄ΫΕk±IKfΫz˜΅D12JΕ1F°eoŠ΅PI{[2 Ά9 ΘͺU#fšl„%ΪVbˆΆUXb Α`Œ K &μQk H#h˜RmΫ†fΩRb€2&λΌiμ‘`l X…@Kd›ͺ΄·Ά‘`Sΐ,±Ml[{$Ϋ£Δh6ρh ˜ΐfv- 0šΕ¬©Ά 0Γ 13£Q›ͺd@Όƒ Ϋ8Ά”MΨ ³Ά=•Βf–*F[CΦf Θ¬!ΓΜ6•­ˆ΅=$ˆ²Κ6(6@XΠrZ3° Ψ&`°–¬ΔžΡ™Q`˜²°1£ΰΨR!6΄LGMΘUΆ%mƒ‘Aʈ)c2›Ψ ‘²‘mm •"ΘΘ0Li•eΐ Α0K `AΫΨBΩ[U h΄‰Mύ_χύݟ τΟϋν·ύϋΏm›Ω€©fΫ0Ε„mΣ(‹4Om2F0KΩ°ΜN`ΐΐ–φφ¨’6ˆΚ 1P@fmOΞ ©Ϊ¨¦B6Μκ60Ψ0 diΝlJ¦d›0F΄DΔ»³g#@€#$°Y K°˜ΜTj›b  ήπυ,(•±aL{³… kŠξΩΖμJσΆ‰n‚ΐLˆ5Φ‚ΨfŠQΜFd`›L¦πΆΣ±hΕ0.ζqbήδϊΨS’bAΒΆΔvΫD,qξνmT\›­Ωδ ΝΛ[°ΩΤRrr-eΊ,Œ-Ι„‚A£Ε²l,ΫΫΟ|J3‹ƒ`“΅Μn³#^Ν»U1m:6l›ΞXZ2άυvlΓΆ™Κ,6°ŠxkΝiΆ1blTTmKζ5\afUΩhˆΝ,…m&©™Ψ@ σ~τqQΆg)Ε²±1m³%Rγ.†‘³'†9Œ‚A5ΣΤ&6”l\˜¦ΆΩd+SΥΟ{ιQ#FŒ޽:SP<\ ,Ρ¬ΣˆΪΗύΜ6εΌ5#†$4# Ν̐ lU’ΚXb+ΐ€ˆžm‘’°-1%ΖΞ–λ6ο­“-SΩDΡΆ¬eΦΥΪ^Z`«Hοι€AΌ½Ι£JΆ g33쩚μmΒb‹Mnή–ρF`zΓf³ 3S0BΓ0ZTŠ FήΣ­RΆ·UιXΜ0LΡΖ›&}zΊ€ νj³ ³9hC=Σ”Νtb Ν†Ή6Γ–­Π q5bc”Ψ[„Β 1ΥΤ=―Ε΄4΅σ™νm¦ΚyΪ°½2€e±3Τ6l»"©³Y’Κ@Ί.šMᑊ!ΑΆJΔk«½Ν•΅·εΘ²ULΫ€ ±™ UoŠΨ°ςήΫ(c€ŽέFmΫΜΆΤΒhήΦI`KΫΒΖhξΌ‡ d 7ΫΑΆV93TIƒ™Α˜0ZJξΘ0l°{΄ x[f…˜ΨΪS˜TS[±ΩέΜΆ0–%Bhh2 3%‘aΫ\f€aΛ›O4C‹–ΝγΌ©:{ A V&73CΛΠ_ϊωώΎΏόϋΓόρο_ώ΅Μ€…f–Α-3»JCr-E Ξ‘aΆd*Čj[±φΚX:½·ε”Ρ$AΓ–ΦXΝk*¦plΆ]Ά7ΑXkΧΩ*βΨdی“g§™‘ΒLۘ„Ω›—­V¨±MKΫf‘0kΙ ³f@3­bf…λΐLlΤΨf:o+”ΚΨ0&„½ΩBπ5ΠVΣ² gτ6φ53=ͺh‘$l“dή{ζ@ŞΧ*°aV76F€-MlνΌν6S{‘°L•χƒ%­I₯H{O)1±M½=T1£msm5•bΪΤ2ΖU0Βl–2‹yοu-8ξ{°ιTe+˜5zΫ§έ,IEΫ–4cΩF]Ν6έ}~Ά{±ysŸσσjΨΆΧ}˜%’l6ωΠz+―1j<ΪCjlKۘΩe#›t±Ÿχ2n#bZγd›ΒΖu­™’Άš½KeU?oΩm/ӛ՝½y΅Dc,65°mήTην™6—’«Ε[³―aŠ A"0«Μ{kΫ₯zΊγΝ imf[ ή”f1¦Ά­MΆUEΫ²γεΚ{3Ρ΍@q΅χH'VlSΟlW8£·!e’Z«MΖ uβfΒΒbΆΙ2}Ωݍ½ΖΜ4<}IeƒΜ ›mk#₯Z3¨m Μt§ν©>ΛhšοΉ ΆX°·½:0RκήltρΦ΅ςΦΌ¦ŒU{δΤZ›j°Xm‹χ2MΗl&»>x{νΡd9¦5"m*{€sΫΔ]oΒ‹+2,=ېμτ–aš±Ί/’ΪŒΕ†y{ιΤ6›šή|’iΫ,½ΧΌ£aRQΓ€³YΒ{Ϋ›κκ©΄™±J›™©4¬·̈j¦ΫΆ–5[ΨΆ1[$ΌRm6ˆΜ*Q(«ΌAΉ6­m2fΥΗVΧ aYFχυœBb H΅m‚4c¦frmΐeT€°€α?ονήWφhΫ ΤRkšBΛP#`ΆvXΜ6¦&φΉ¦Νΐh˜P₯™5Ο<5₯mm€T‹msU3fX΄s³ΚΉΆ¦ΗνYΉΌWdlΫ+œΝR”y6΅žyg0ΥτκΙΆUƒA½½«χeDΖΆV{έ•νΩK“εΨZ΄ωπVe”kΦVMΖΖhΫ(;M›YΡZc•γUj3΄­ή&Gl{9n«†šm΄O½gΦφΡ6­ŽΜ`( ³„½½·θs ΤΌY«²Ω†TΓΪ&kΦi¦6³ΆQΆU Ψ"α±m%mJνMΉ6f›˜ύ―ωωϋ?ϋ‡ΐώφoχϋοϋΟ₯ΦΖr F˜ΝR‹±ΝΊ¦ΩU΅1cI}lΩhΣΆKΫΪΤuc{’c`’.ά=υ΄ΨzͺΪjbf{™>6 ’ΌΩδ3Ω`Φ4j@2s'›UYέΫχΨ³­Ϊ^²uUΫΆop«υψα Η ΉfMΕF¦ΆwwΙ@£mΖΩikΩ–ΥšΈšω^'[3,65°m”+o zΓjZΆαŒή–}f¦Η秉™½«ΦkV|$οVΆ0›ΕΨTcΛΡnͺjΓlkη³mκΠΖ[-fΥPρή»Ο͚νmPΉO΅7­F² 4%Επ‹3Ϋ6*γξΝ±7h5έάΕΜhΌ{}USΦΜ‡5£dΆυψΙbλΡΦvwmcωzχfΩVΘx»ή3ͺ#›Μ΅±‚4fφn§ΆyS›lz)§­¬rrΩΫχϋ=#Ά6ϋόˆΡ2cV>₯φφή’λΆe3'¨<’"ooέ)TzΫ{ν„2ΆάΕΆ²ητ^χ™6ېyϋV­ ‚J₯΅ Pφ6.‰lε½ΥY„½mk―»Yšυ© CY₯ΣΪ ΟΌWέZ‘υAνV6fΫZŒM’ΩήK΄Ϋ₯₯Κ6ον–3P]{ΌΥ¦VŸοχu-h›½·ϋœ;ƒ΄-ΐbΨfηlΡ¨[;ΎΰφΆM΅G·Ω^©―>SσύΌž4Ah›cΝ`—ΆΧγ±ν-λΪkS&ΦΎvο ³.ΛΫ΅ΩTq{―»YSͺ1³Χ’Υή;m²ιm%΅ΣցUΊ³ν½Η;‘Αkχ³ΣΓτΰYω”ΪΆχΎ_wέ&–v‚"l6*ςΆε”’>ΫΆΖPα½E.&&§7ΕΩΆ&ž·ΧΥ ¦X]eΝήΣΩ6€Ψy+{K©Ε„½mk+ξζ=;ΝΊ˜»RU(Σyζ­*֚בfPΫΆ­ΕΨH{/ΡZ§”j{φfuiΫrw{lΑb^υκ²7QΟΪζϋήΊΊ#€½uΐ VΆY2 Q·–Ρfo*Ϊ˜ζ½Χ©§ζRμ!σξ™T&‘™c0γZΫkΘΩc[ίumν‘·wΎΆ‡Α,ΫΊ,O=3£»Οϋ~»Έmm(ΥφΒZν½»6ΑΖψT;ΆfVωά™½ΝήŸ FΡ ΟΚ cο}ΉO7Ϊ½ν€`ΛlTdΫ ΣQ·ν-θΣχ»#ρ©³­ΌαjKu›­‰Ω{ͺΣbΒTΫ3Λ3“‹ε]hYφ°Ά°›χμ4+6σ“”ΚŽ·Ο οͺυΚλυ¦lm[ ŒM5φ^uϋ€ͺΆΩƒά>{ΫGΧ–7S‹yΥͺΨ6]ΟΪfΫHΉσT{k–ν²­%0D£nxdή›ͺ»ΫάΌο+υϊοßφϋouΏύΎτ!Φ<.ν±…ΛΪήΪͺaq]oο·ΌΝ[,”5bj:l[•”m{P“gV²-wgΟXΌ• ‹χΈ‚mΆJΙφήφΦpχ#&±Ά7r†ιΛφΥ­‘Ά ^S©·‘Gu™Οφτω\ڌmΆ»F͏ΩΟ¬Yٳkx3έΉ7šMnΡΪΆ’Θ›·…Ή6¦’b±)ζLΛμ=sήνƒ2›₯Φ{Ε `Ϋ&gΫ<―$UΫF@˜―ήφ“fmcκ:{;χξ%fΫ*w½χΨX]kmι±±Ίf₯ ζa΄13#«·ϋ΄ΔΆiφ„ŒuέΰUΓ@ Owƒ fcV%«-ΟΗή~½ΉK©ΦzU«ΩΆήvQֈΩ暒Ν(UΖΎC&Λ¬m©Β›n="›Ή ‹χˆΚΜ[₯0mΨήw έ§2-™IodΗτ‹ν©Υ„sΫ¬-u}74¨ΤΆ΅ή{Ί» Όχύ܍ τψΪ_̚UŒΗ 2ΩΌΥ9k™Nή^!eσ݊η2›p‹ΔζιΌ΅\{οωΨΞvTΖσiΟ,S2ΐΖfέΩΩ63-Q·m„yϊ΅ύδfΝlͺξή{'Ρ`{sΊOΫΫ6¦»KτΜ«kΜΓ:iƒm»ήΊ@ρ6ΝΖ”Ωϊtƒ 4τT+6ΐ³Q’Υ΄―mίmκ=΅Φ«R·wo+•5½Ή¦h3SW{3¬,³vΆ₯ ›ςžZ#²y»Οa0C{―R˜6ε}Ώλ‘»:c]ž!ΫlΚys_βŒq―ΨΉ ›eζξή6¨«χμ^ΫFέ6Ψ{E± ρ΅›‚a—mΏvwΚ‰½ͺ_ϟτΗώιŸόε_Ύί~ίοΏυώ½ΕeσtΆ δ}ΏΞdβ¨Ϋw/N[ΫXΥyλ†m―Oφ±m¬pέ6"ήϊ΅]>˜΅=©OΫZ²–ςΎΟ©‹mlL΅ ΧΌ* Z[λ‹cΫ`΅JYbΕΆ5f“τžλˆIl ‘‘jΜΐlH²p{nγ½χδ>·/©ΦΠkyο΅M’X#{S+ή¦2ΌmP[f-Υε}•ΑΒ8x―ͺ³qΫCRκ=₯ό¨ΥΜTΨΆT4¦llKI‡Τ3€Όλ‰${κVΙπΐ’‚ ‚φΆ­΅¨lCL`m‹K i[Jƒ1…I²!ΓΧ[‡ΆΩ1γ­k*f™mΖζf·M's0 )lΡφ¨$Z£ž-v΅gŠdφΉ0¦K[ΩJ>B<[]c›|-Λm Τ쫏¦²ΝΛ³Ϋή² Φ6q1–„¨#·>l ιΪͺ³š–Ρή–΄Q½mή‘¨Ν¦O½Ν˜`ς©‚±†³²½-1“Μκςφ65²ΜXσf«ͺYοMdΥx#mUν™,3“ΤΆM₯Ζ*£·—B)€mL―Ž ι²-7‘aL―‚Š‚m6‚φyΜl D Λ—€m€Wΐ-φ¨Ν#›mK°yot7ͺlΚήlξΩg›€ιΝ¦RΆΦmWmoK€QWΟ2ΪΥ†T4ΔφΉ0°5‹­3¨›«xΫR]Ιcm– U¦GJŒ΅< 3Ψ`l‹6g/ ϊ1ή$ΨTHgΚ ZmƒΠ[w„ΗΆ¬’θ=cceš”*0“¬°½¬eVΕφ,₯MYFΦΆy+u³ΆT-Ma›šδaKΊmlJˊ•mHJ™Π6S/GXθb£‰ΖΣkHi³Y©»ΟΓ €OΕ 3‹Od₯«1eζQςζcƒm›·m}š’f¬fζΛ=΅±U{so&‰΄lU›mκ”Ψ*6Z‚ΑY l5°5 2LΕ_(fͺ*³΅GIRM_•4ΚήX2˜gJ΄ Md΄aIϊ7)Ά©Xg5m τ֝Ζ6VM©=Kxc…H)ι˜-¬°½q²WyV¨m'#«΅63{―€%3KeΥUl±Ψ5Ά%5c£Ϊ5±˜mR•­"lCŒ…λΕZ%Σ3IΥΫZ²δ€ b. ΆΕ喍b0(F<ώίυχφηώε_όυ_ο·Ώ{χ›Ώϊw}ڞ7M™mΥήτΦM%)T›yv§ ΅6 m›²ΨJ…±5C2l%?ŠmcΉ»2ΖZ ΆD©·s£lC²™έΫs­ΒΖΠ6G`°$D5ylΫ$ ΚVe‘/¬±œΎλ²06Ϋ»k’φŒͺο^cjΈˆ:fKΫ{Sy«YF•·7"€mΝlkSŠΧ,U2Œ7…₯Ν8[­f¦ΒΆ₯’1ec[R?Ϊ”Νΐ6¬Μ„;° mT6‘ Λ,!›iY%Ζ1’™‰΅)«K¦²ΧY`ͺ2Ψ6©Ϊ ˜…·Ε1ΡaO³ζ½*Μuλ=kη<Ψm%€ΉΪφ+ΓlήΘέ€:[ο}Ž©Κ ^ FΆλΌ]·ςζΝΩ=λ=K­½§[^Ο„Άρήje%d{€¦·²ήvΫΫ›uΡ«f[`–Ηχ<-’j³Χw+‚ι]—S”eΔ]Ο{4΅ΩΌ·ε»φΆίxο9Θ…Άf•n=ή²9ΞΩ+½νmqqU›ΪφΫ›}:§υx™ώψ€­ΫZ³Z`σžkΥTpX²™±PΆΦ<―6‡kΌ™mοdY7+ΝΆ]<―ΤΆ,Sγι2˜­=ν½qW-3τήξξ-cΣδνάκΆ·AcΞ[Άƒ‰ΙΝώW—όqΨcΨΜ6»ΒœvO³€=KiΌQΌΫ€K–ΝζΊΝc„ΝΉ›€8‘υήηDλΞμ‰ΝΨήάν˜›υ›Ϊχ3o1ΖΞν=έΒΣm›½i…^*Ϋ#s-™΅yoΆSχͺαmΓ0Kγf³Ό“jcΕ™h«§Λ©σˆΚ·gήφμtθάσ~Σ{?ΗΎZβ°5“ζαgŸ;ο·™/ͺk‹§χφσ>b™—Ÿώψ‹lkΝB+φήs°*ρ-Œ€Λ[‹wΈΝΨΦ\mφςΝJ³ ³έΟ$“-O 1³έ{Ύm›«Z­™φVGom»½}w«ϊ_»ψsϊ―ΨoϋΣπΑˆηΓΖ`ζ½gW˜p­g6ΫJ;οQΌΆβCec*Œ—†ω”šΤ}"Όw·»3{€goS!Τϊϊ§vσφ¦ρ;·=Β²fuΫ²7°ςͺ²=‚šΆΞ #lΫΫT―kΩ[ΜR#n<㝔Ν&‰Ϋ²…8RηΕΩφζFo›₯ͺrΩLφΦtω»°ΦHαy—gι›ψνZ₯ͺmέο½ηZεj~yΊΨΞ6X³ΪoB£©αχ8@…ΥφΪξ‚Zš½_E`lλ^ϋfτ=# ΧFλΫȞ»0 ΅eƒΪΦΊΎgΧͺI¬ν©fΪΎrωΥζ–{υ±ί·oEάΆΝ{ΔΥxλφ¨-Ϋυνl€•gwg{d̍ cS}[Χ^λuU›½ξήΓ-iٞdlZN™ΆΜΪ\3Ψ…Ky<Ψ,₯m*Lcΐkd½ά·= b[½ϋγ, b3]mΫΨήλ>¦ ͺ-S›-QdSfΜ0W%v%·υήο΄BιxΑDΆΩμ1Ž6Θ&•β½u²?.2²fYλ΄=a[BT°ΪΦVΎ’f{‘,do5Šaά™ŽcΤ™»Φ% ΫS›Ta†Ϊ²Ω&β£φΦΊξΎ3‰e[ž±}% 6΅΅ά d"φϋv«UΫΨuχή6΅šαŠ~sƒD&o}Ÿlΐ€•gwgΓΘ6FΨ0ν­»Ϋ2NwxΏΊ1 Ϊ]k™G)³lZ’ ΘΆΜρ™Ω.FIa6KMΩΊycτdλqΦ˝=δΦΪVλΣ{j›©²™άΩfƒn[5ξ»Ι€Z%#ΨΜ£ΉzΧw[ο½,ΉKέΌ‘mΆ φx΄،l"€ΌU› _)¬²[ ڞ0³qΌ:X ;λx-a›%BΆΥ„c{|g:ΒfVχ¦Ϋ«feo²‰.ΩƒlΗx4ζKmλUwΆωnάbΥyΫ•2V›ΪL&Υf"φKœšmΫΧeΆ‘Ψγ+φP%“-©ΗΘ€•!έ±GΆ1’7¬φ^w7BΨΉNφ²ΉmΜΛξZ ۞]Ν²i ΓΈlcΙΪ|Ν³/”ΐκk3cιΐΆj«i†¦Ρ›³^:—o­M½ϋl{j›)ͺmϋqξ`oCΩͺQ‰…μY- 6ežGSNƒΈθ»­ά₯ώ˜‡`βσφ»Œmγ6€Ρ!ε-°)Ηέη·4#²Φ%Ϊa{ωx‘eΫν}-°m+bxΕNΫ£$DτΫξΪΖθv5+6Tj©ΙfΣ\„mιϊήs­j›Ν]g›Mν“ςςϊŸΎ?ΩΏ;€?ώΨίύέώτχϋΟiΫ ΫPμΗ—46mΊd²EχνΌΙĊΜϊΞφ(3cDονn[*Qmν^Uν1l˜—UΠl{v5b@Nν₯Φώ?Apl°‹:¦at]ο·UpT *P‡πμvΨ“P6„3‰ Θ„2‰Ήg-΄ωΦv< ¬>m0²τt»²Υ@_ΌΑRKΆ―|[kΓzS<jێΆmVo[5ͺ²ejg5ͺ°)sŽ@ΕΛUσ6v΄χ’Cρ±¨ l># [)FΆΫΚΛλcƒ,ΖήZΆ Ϋε*¬Ά{Ϋ{‘,Νξ«ϋ6b„…WσDl6ΠήΉPao―6ΆΒ $‘ i›ššΩζ€χaLΩ‚d‘VΜ vXΝLztG«ΘΫΐlΒ²ΕPƒζk›υͺ¬%λb˜‰δ‘%ΕΆ-Ν6$ΰ7_σͺΦΆyέφA ³IΫj*$γvzΤΆ ΌUi 3gb―–ΝTΫ¨KLm‰μ‘Μ’`Sλ![&3›U•΄mαΦT/ OΓf·ž}%5XΆ΄—„m`“zΫπF‰ «jd»Ye\=°έDyΛΣω/―²ρU™†Γ’m4€m…6ΆqŠψZ6”fiσžIO³ΑŽ·ΖΘrz=!4SΘc2U±MMΖvs’‡Ρ˜²Τ"Φ4³Αl{m3)ΪlδqyΫΘl’ΝΔX J˜―m¦ž₯Ef5E‰PsΝ)²M^βζF½d;uΫ£`Ϊ–f±‡ΚμΉρΪΪΖc;‚Ί‹zΫΙΘΐ“L™i„a˜-oSΦlš †$…YΞK‚a±l Ϊ†LbL©Μ€l―6Ϋ8X γec#Sͺ±³F(Δΐf¦"‰ΉΥΔ°™Ϋ΄B£d™™bE†±i%Άέ>Ÿ‘Ψ€%±2›B„‘‘ŒΖfˆ ΄6›4›21,τb@ΨΨ€F eΣh€±[= °lΒ€ •™mΨ})‚₯ˆ΅`›ζΚ€©@ξ Δ@/&;‚ΰΆ\}Ά­%Α„Ρ4* ΕXfLˆM…b *0ΎbZE2£YΨb†€!4Ϊl+6y™6Œf‰lS†Jέ—YPα錢̀JΐΙ@l3g ŒΝς4Jjf‚‚Κ chv[σf`τ„6I챇MKc3–7@³š Š€3Ζk„f €29/Pˆ6,nKa°l„€aƒΫ²΄ΜͺbΆ•5ΕΨV0˜Ζ,# bUoΨP ·±dΦ(’0¬6³Α+[Χ2$ Δhͺ0KcTplkTa¦F†66’6Y³ΫΠ+6‘j3rFΝ’•m@²SΥ“!H1`ή{’ζΨΚ„₯mΝ`–‡Νς„Q^ iQ™aŒ ΞξφžZZsd&‘Ν0”‘Ω0–Ζ kΠl0"fAŒlΣμ%! (™—ΐ*lΫHŒ1C΄°iΔ l»[΅₯Xc¬²¦0³˜1#B6›`pΣ«7ΨΆ «νΨ#έ&0$ 5ν†°Α«Ώ½?ύ±?ϋυ―χύ»ί¬fz™ΐ8λPΆM²mH˜!0šŠΝτ€ ΌΪ„Y^6ΜX0κu‡™$ lz₯jΖ­ ,mkΖΆdlsI1J-˜Ω€ )°1,4Ϋ-š 3IΩ2P,”&`,aƒ ²]΄i`DΜ`Ά1Κ€mBK P&+4Ω„JƒFV ˜M©6ΨΨξ¦Τ(b&:>  [œ(^‹f›%° 1Œ1ΪJƒs‚HԈŒ±­A€dΐ‹D#«€ΘΖ$ι·3ΐ 4[Δ4-£”M°  ΐΨ™ ƒ¨Μ6[€±ΙΚCaΫ²†(Μv3Ρj-ksΔ ™R6’΄@$Γ&c%ρ,‚ΐbƒ4l0† 2ΆC†1¬GΫf& 36mL fLH‰6#°Αl3–L±6F")Ά₯μ»m š (±9F[‹f°QS²ΙζΕLCaŒ=6Φ)ΐ 6faΫLΡEŠnJͺΐ° @)†BcΡ1"[Α`@6Ϋ†±±z°ΩjeŒ,Ϋlΐ6j³­­― «ΩnΕ­4Νfΐ € ΪΨ6QΤ,‹MΖ¨d€-†h°Ω f©€#ΛΆ€ Τϊ¨m3“Κš±fc° 6ΒB‘l!llͺ MΑΖR&4©μUμl©°QUd@4·!‚›•‚‘΅)#$+*i«±Ωc z P²ΝfC) ”"Λn„κ †ΝF„PŠ΄΅h,¨Δ llΖhX³M+Σ`ΜΆ Ω&a6kKσ`LΝξƒI‰f ˜6 l›²ΘΘΒdͺZ"")BIΠ’›m€‰I0`Γ²-ΐ@τہdΖ&€Νΐ !D“ΑΪ°±‰1ˆ° „J€&Κ^ΚfcU‚!Rsۈ †F^S Ωˆ ±φˆAƒRlτ·Ώυ»ί½ίό¦?`Ώύνύυ―χλΏ_6†Τ«6cΨ ASͺ–m-ZΖx%˜›Ω0HΙfC‘ ΛΪ6(6€mk "Ξ »Ν &―E ›MΜ 4 (±m’J-² ±6œ)…H‘VŠ IlΆ5ƒŒa2 hJa3Q`0ΕƒΑ, *³a³mA¬lA ’ͺ°Ε‰β%šm–€oΜ–τ€ΝΫ{o6Μ\Œ—€ ,f6s€P΅M­e<Ϋ[’Ψc}j£Ήjk‹Ν¦G,€M”εΨQ³$f†v§jΆ-™5RlιΓε%c++κΨ°a3ΛKΟ΄Ϊ€—vΕ²›Y%D•yΧφ$™ΩΆj†y’α¬3€bb`›i^/–νŽ§β³ͺ9(iw₯F›Qyo· “ͺ‰ΗW̐ϊ΄„5*έyFClΕ’-#6τŠ &¬½Η  Ν6=IΖ¬›lUZJΥέ…­±ΫΪ¦μΨΠco[/»lδυ²ν6)L%kFC―Φ9k[ΤΣ§M―’aζ²Y=b› 4‹Η–§s,؝ͺ ή6.Οφ† ή¬W j¬½-ŒMy¬†l&°ά€ΫΠ6­Ψ6λ­9I0“κc©Υ†½¬p†ΡΆ½^[V›3Τμ½νœE©2νaƒ¨κΘvΫζ#΅q˜F*& ΫΆΝΫ^/Ω‘Β[՜YyΪ.jdΆ­Zžžm[^m(Ψh>―eI›z―; h Ωφ@ΫγP•`3…U³Ζp'[•§±zέ]ΨP(νΆ0 7τΠ¦νf©—mΫΖBFΙΨdαΥ:³a[=Υ¦€ŒΙ.›U³b&βεQJNΣφm­βν΅ZkL%3©²Z½a!τ -’ΝφTJφΊ­ΜΌΉO³° (ΙιΪΫ@Se[+ήζ–X³χ%UΜ‚±½ΙφΆ\M“ΣfΜ§ή[&ΜΪφͺ%ν½ΦΑSΚ0kξc%3[Ϊͺκ ZΝ” šΪρ ε3―€©X5Ϋ 2²±‡ wΗX]ΫΌi¦PΪ6“<Άqš΄·­ΥeΆ·±˜.CKΖΆ ]ΆG{λβ4B5£νΥΆΡe•Ψ0ΦΪN€E‹·υˆP΅MΝ7™¬QιΆΙ,Y±Χš`fO?fΈŒ~χwο?ώΗ~χ°ίώm?ύ΄ίϊ-eο™ σt„m[mZ€7³ak†)±¬lι¬=]k‘½™U’¨ςΪΜ΄FΗΪ&Ϋ:Άi*3<σ ’ Ϋ›(ΧΕςφž’ʌTm6Τho+B©½±‘Tƒ2̚;Σ(,M¦bSγVlb¦ΣR6 LͺO3¬4*«f›q!6« V…ma«12LۈǦ帡+½ν%c{o–fEl*™!“ΚΜ3ή«ΣI›ΫέΝ†ΙΓΈ¬πcΣΙf$κm³V!ΥΆχά)c6[β°+Ά7HΛR‹9\7ΣμŒ6€Ν£ς²AZ]y³mˆΆΉηlT-―έΨ4UYπΎυy­ι³ήvzk“eͺϋά—™ΉΝγγ9ωl―ͺiSνή›*€­ήΊ@³1ΦΆQe<»ΦβΟ9ގ‰šΝŽsσ]KVDμ­Ά žΥnˍΫb™© k™ήΫ}^6ΓΔχήu˜™΄«™mΒΈ;ηmH9y{gV&ΥΊl›QζYΊΩΦJΫ–έΪ¬ Ε±΅«^³΅6Ωͺɞ½υΙε-©y³ήΊΆ*[mDΫ6Ξ΅½JΩΫ^χcx‘&ys’6ΫpKtmή4kΆ—€Ε\m{ο9Ι‚‹9\g›3ΨaΙΕ`ΜRΜ‹εΪΊl›ςφrΐζΚΫ’ml‰UŽ-΅ζΝ`υΆΫΟ―O»΄^jή¬W΅₯ UΟpΌSΫΨέα½Ώξ~L–‘ZޜΘfnDg³ρ”x=Ϋ άέΆmC\Ό˜¦ρ֐™Ι’&ΙΪ²I*”ΝήήΉφ¦Ε–kSς{Kw^d/oξ΅[>ΗΪΤbσζ³9ΊΖ`ξυΞzΙΠPa»mEj\͚mI¦ly+΄ #Τl{έ‡ΜXόΌ~€f=˜Έήwl³V»ZmΛV3–›d™Ϋ²W`PTm]{ΦΪτΆm™³Ν\ανυΞλ2o²ayο]%5›RΣ±f›©ΆΗΒςήϋάy“Αl­½UbKΨΌή½3€‘­ξm·οŠενΊΧw­l«Š"^ͺfO°QΛφξ{?ΧιLλΩέ²‘mΟ>#\Ωήήs'^³‡έ΅m,₯[„«g„eΩ–…oZ‘}³ω•4{{©υ??κ_ϋΕ/°σoότΣώεΏ4Ζ^έ»΅\Ψ^μτζΝ©ϊΟ—Γd&&Λ&5 6Š·©PΆΤ»gΌ9υΆg·j€Ϊφž;ϊρΉΤޞ7εΪσž£ΤvΆξ|=‚±Y‘λžΗH•Ό/G ΫΪφΞ­%Zνmo‡3χ»ηsΪVΉ±L·I¦q7ί₯Ά±:V±·ακΣ±U6ή™ξά|η₯Ύvw±=ƒλΩ9csΪΫlΝΚΒΞΆK{ŸJΫΆT—χ΅8nfοΝ΅-ͺΰΤΫΆ5w‹Φ=ϋΚ1Υ–wͺγΓ«οV?ow]mΫ^ξkΰ’Œ7Ÿ‘΅Y=“QΣzΗν©dΜΤyΨf†Φa„ΆΫΊžΝCŽ\· Χ{{sΫeΫΫu¦š›-©f[³Ά«‹ΆlΫδΥςΓΐξυΨ)υ™χUΥd–½i‘ΕϋΎΆB³ωϊΆσV)xςι3nΑw³₯ή Fͺl―W±¦,KŸΆ3Ρv©₯Y³~žλέ]{Ψ³­ΪjTa™5sΧ–·$[ϊϊ4+Ÿ½ο·ε{Ωββ³7m&±Ύ{ J˜κʞψ¬ΩΫ8†€SΫΆ™n™Ε˜υ#^©M\δ½JkΚv7Ό§ •αmΣ‘cQΥ6ΛHwZί­žUUφήΛݘ+ο\5Ωή ΩfVDίή‡ΆOXkoΫυ1fβ6ZaHψ|Χ§1ε-;bR7ΖQΫΎ[|f]υφ]Tχςέ 3Ά}κbΦ¦½ρ—_1 <;RŸΩ3$Φjο+™PφΉήχ΅νθ’ΚšηΫΞ ‚'ŸnΊΝŒξξ»gKΑaSUΆ΅%Ωϋ°[”Φ4Ν¬0ΚμϋΤΊnμΡχ»»Ά%Αn±UΖ·QΆτάΤ°΅O§{ίgs‡Ν{·A{9ΦΫ–)uυφUΒ{|ψ¬±mh†΄Tm†­Ηyh5pΣφϊcEmΕEφαϋίΈ_ό΅Ÿ~ΪOnτŸ΄φ¦άβXT΅± ΊsληΡ+vwρή7Χ’m89π¦+ΩTΨ¬,υZ΄­ήΆΊcc/nB¬!αήͺ…­‘–"@Ί#jφΆΝm]š·¨L}–·Ϊ6½½O]εΩޞΥ7Kτυ>.žGͺ-΅=†β²}>½οφ–)Κšη…—ι‚'ηVMΫθξΆ½½Τ΄SΫͺΐXϊd;ΫΥLiMl§feφfΫE΅―ϊ~wEtΕ[‹ x¦2inΫ,Φ>ΪΫσ¦\{ήλ(έλΞΧγCοηοƒ ±mΌ9.―Lνηο_χώϋώoπgπ'ω'ρβϋ~&``ƒbf„#Œΐ@€ΐ£bŒ0€` €›` €‘MŒlBΐ¦ΐΐ0F`‚τΉΟ―ώ­_ύυΏσλμΧ~γτ+ύΧΙVaVΝLΑPm1©νmΞgΝ°ΆΩ«t5l6UΩΜΐΫ£Ο ±Mz 0l;VΝΨΰ“Υ{ξφ}ͺΟΩΆνιΨ뎢eλb{OqhΦLΧ6 &mΡ…m³}[4!ο»΄fΝξ»}.Ϋά ŽΥ€Φ,[uޞŠm3ŽۘԡχD6„zNΆ‘‰ρ6XEϐάn½-mμυΙ` ·§«Ρ&Ψ#±$Ο¬k˜‘‰±I}ΆWΑxά΄νf Ϋlξ`C1ΚΎλF4pΩ†Ω ,72%ϋ7—(Ψ[Χ¦fΆ‘»Yš΅0{v3“=Ά’fΥ Q6―n{ϋ™ςΓ2σm©6ΊcžE!ΐΫΓ©νUΝ 1ΓΆceβa>YΆjο™>Ω{{ «\ΫzΫ]l{F4k¦bٜΖ6\ΙΆY€—qs,ο»ͺf­gχμ–ρ™AΪζΦ CΤΩΆa…f‡LΫlά}ΪwšxiC¨ρήϋάyΫ5(xΫ°ŠΖσnηvΪΆή:6έΛ½©Β¬qφ%!S³·> 32™zOt¦ˆΗγ¦Ω @b›M©mQŒ²·LΈiv±³)K²©:ο«οs©PΫ‹)“½‘n-ΑF³g ΤfoΕfMg³2‘Όχ3rΚ λνη¦6κ4¬ΔΫV ιήήΥ†!Iσ°-΅MoϋDΆΎ―?d{ϋ*LΊΫ΄7UΫfΣ͚©Ψg“° ՝χ6‹4½Φςή¨*³Φz΄ι?ηΟoώ&ψ»w?ύτ~ϊώή―Υ6-Α&Q™mƒœžyD™fή¬ϋ΄a F‹ !Σ{οξz[Ι<Ά‘ιΖσR ³)c²…@φ·²¨˜5Ξ„Œ΄οΊlΨ΅ηzΣt`•ρ₯ΉΡΦ,-‡mFj“£xΆuΛ=ΖΑ€ys²IχτFΘ6»Ό7T ΕΆΣ³a ͺδ}wQLhfξlVhUο}g9Sλνk«S5σUΩ̎Uΐf&¨ξmeΓΠršb]Ϋxσ‘σdκύόνσΉλΗlP™±­°ΒόΌŸψ/ώψλψηψ§ηOκ—υΛο/ίΈξoώψ›τητ_μ~γΧώΩoώύρώφ?ΈλνIΦζ)†D†y[§±¬Jk3¨δf  Šy°m8T°š‘2ή0 diΖRΕΫb X¦5[bo0•δhΖ$³¨UnCjπ`F•˜0΄™.s‘XjC™rj[Œ'ωΜl€΅6φ’Y`΄ν}»O16)k™]΅σς^쨍qΪVI―ξ)Ψd4]οa‚„-lXwΓ@ŽX[φ@• ¬!³§£4΄δ™χR‹Κ΅ΜΛ A‘Φ6EΈ2 lb64^Sβ¬JlήͺPcVi“X¬vŠ(lQedžbΤX£™ΗM;tŒ ͺΫi+ SlΣl²•TΪj6ΰ­ΩKЬΡX¦aΌ9¦ˆ±IΝ’F{CŒ¨(›΅h3]ιf­ V6"[—[,Ve{zω “™ΦΪ0ΓΈyϋζ Ϊf]km΄c[}—Ι-Ο>n[Vφθ  syl«QK­«RΧj ₯΅Ν% ֜Μ)*{‘Ί–Y„H©ΐΪ¦WΐL³ΣΘ^§₯η(‰Ν4KΣFjƒΘ6E-ET`2`CX›΅ž«eΗ\ΑŒψ8mͺ3ΖΜ”TΩj6ͺπΦΆˆa,©lΦ€…F°ς昒Ψ^Zk9XL3TmO6Yͺp(†ΨΘbUπΟω~ηwϊO ~ύΧίOΦΏχϋΛ΄‘₯ cd›Œ°‡:fc.±—VuΫkπΨdY·G’”PΝ6g&Μ6›ͺTƒΊˆΙN] ƒ΄Άy³„L™νΤΪ ρςφ²dΡE3+8 l έ1Ζ %UˆE†xνqZ5lΝRΑτέ&$ƒQ@Q°­K›)ΦFƒΩlλБ°ΘΥ -1ζΩΜΖphΜJκΝL₯ *3Ά5Β 3k«?P1J3„ΐFΏ|Ώόυ'ΏχGΏχ_ώτΏόεϋΛοΎώ?Ap³‚ {žetΧ-¨ΰT‡‚n‡BgνfθΘΊ•!BGιLZ0«΅…|ώ$&ζ}ξΣ΅ΐΫϋν~ϋΫ?όφο~ϋwγ7σΏ~?ψŸύρ?ωGΤȈ-Hf¨bF9ΆMI(ΖbSPh‡m Τ@@‘c£"H’ΝΘ†‚ƒΚΆ)#«­P±°Ϊ–Ίπ6\ΆmΚeƒhMkf›‚P£Ω ;†Al› ˆΝHa‚)€Ν¬dŠ$Ψh=Ψ€†Œb tm³ΚLh Š˜ΚΒF" ΄„ΖΆΜ,ΆMΙ0‘ι¨²·₯ Š6 Fΐ6@š™½Ι@0 dlH’0ƒ‚Ρ0+§m ζX12ΔΐMUaF΄Ν&š@¨ΑF2€Β¨b`Sy*¬‘ bˆMΩhT¨m’° m["6¬ΪZc½Ž©„Ω& ¨M Δ– ˆ©Ά7• P`8ΩvzMPΆΩ[  6㌍ f¬‘„adf…@mΓT@1`[QZlC0ΐΜbΫH†A¬MaQΫZ1„mαš%°αY›Ω‘Άe Ú!H³%Ε6ͺARΆ‰ΉLhb`H Ϋ@€А†B#$deFΈΓHc4ΕΨ$€²§˜%Κf$Q‹-a²Ϊ6JΌaνΟώΜώOώνΏ{ζ_σκή„dDV !6ΥήΣL…y)ΐΜΙ&S±Ωΰb-*›362b*d³­q #hΩ P‘™­°‘šMY ‚™±­3„j[©˜„dΖ#lΜ3+i`›€)L€FfB‘΅aVΕΆA c…Œζca‰ 4P†‘†4¨₯B`H3]ΐΟt±q" cΜϋιχ?ύόW?σϊη?ήπγύψλύΧΏρϋπχώαŸόσ?ύϋŸŸΡlΛt%ΜXšΔ2Yή^.dX(&΄KKΐΜ`€fΛ@Lˆ2°q"Ν,£ ©0€˜ml*Ηl$ab&6°‘L"εΩAƒPήΜ›*°m^u3ρ06‘6@₯Ϋ¬`Φh²¦Σ΅ APaaΐΦ«ΫΉΆΧEΑΆ Ϋ› AΫ 0K¬°…€πH‰™ΑI72` ±ΔΆ­ΚΌ­Ψ¨Œ-ΨΒ‚@φ6–βΐ”½MΜ–qlmUΪ&‘±νŽ™ΩΖͺ*Μ[ΛΗTKΠB½})ΙdPmŒ“F(03Ιl™ŠΡL‘ff'@πLE“d ι˜ ΉΨ@43XΚSΘΔΓΒ$Rfc 1ΟLb³Ή Μμ©[› ›MbŠέΨT³FΨFS(ΚCΕΠσΊkŒ&ˆm‘Πlo²7¦€Ψ†a€Y@ TmΓΕ#0Άœt° m A@Άν]cV¬lΛ Ψ˜E“ˆΈ ƒ‘Ψΐ–Η‡1 EΪ&³M‰a΅1L%Δ6ι@(X‘gHf!S†A%0 ΐ°aiΑ³Ek€$’ša6D«`11 Ϋζ.ΆA4k°”M‚Yš…FkGΚ6-M3fM0"μ_ύΛύΕ/}―™›©ž·˜ …6›"³hΦ†μεFIˆΔf΅Φ° ¬ΔΠ…ΨήdoŒr"†FˆFιm)ˆyo€ŠF†­€„m)l΄ V56`aiƈ& bω`036y9eΙΆ4Β`S›f–TζΝ*2"±šΜ6ͺ‘aΕΨ@HΜΔ† cφ"mFP±q" c’©€Λd˜6cωΏίίύ—Ÿ~υ‹ΏϊΕχ}€Ÿ~χΣΟυσΏω?ύΗώf{σX™50I@kk¦² °V ΄0`ΫΆΝl{P{φ…„™$0Y¨McXf”B°Ν…Ν†:ΪVjΖ J#cΡΜD)C ΫXc©-Ψ6ΦUΑcL¬Θ@ΜΦl{c6˜Ε„ΩΨΆvАlM 0ζ!m#ŽΖ€`f³*[D² ¬mΖ(ŽΆ‘³±šΝ ¦ΨΆmˆLR΅ΝfŒM`lY5 Œ$›AΪ<³@Ί’UjؚΆˆΐ’5šͺ˜ΨZfm۞&h@Ζ€±fSFA Z Šmo›ΩΫdνΩ$̐ΐ΄Am@cšM©،Œ7¨Ψ†³°1Τ†a"•!Ζ° …Μfλκ cΠ&‚mk±mΜΆŒ±Y`mΠ‚ Nc&0™Νf6NΜ`)B3-@• $2°6›&t΄ 26Νfm@ΙΆm,KΆΧe`Ψf`HΔƒζΩ6΅Ne‘&[+Ϊ"H”ΔζE›&ͺιt…1†΄m{,adl0Φ ΐ ΪbhΐlΫ6›gƒυž=W h΅1€4Θ c€±Χ›ͺf³¨™-HΦ`¦ͺj3Ϋ`- !cΫΆu\Ά†€ΙŒkH΅mΫΆΝ€Ιl†Ψ jΫZ‘f‚†a{34³1’̘*™ΖLh+"œ6Ζ„"Y6­m³16oΫ¦1&Ω3@Œ­·XmlΖ4°ΝΠlfΑLͺVˆΆ΅’ ¨e%±y좚R&ΊJeΩ ci3Ά΅”1€€±fƒΨ š€Υ‚ ³Ν6ζmjΟήͺ†l¦Mcΐ3+ΑΟΆ·g3ΪKZΠίώζoωί~ω›χί}ύ»_ϋΏϊşό‹?ύΗτGΗ@3λ½EσImk«Ρ° Φη“l#Λ°]f†Ί™΅s’,QΎί·sΧΉ­½­on †Νγφ*-N‹―mϋds›­WηmΆ’ηέ>Y"¦°-–Rφeζ₯ΐ0x-™€ΛΥVCΛφzΉU}˜m+³V€m·'5mλ­Ξ™m3Λur‹a'ΓΆ& ΙΌ€`G}ΪfΪΖΪ±©½Y]d‹šΪf£jΩ¦Ni&eΫΆGΛlΫiYkΑ1²»6”Ν²νΚ[Ϋ9m R˜wδΖΜT΅‘Κ 'λzŽΫΫxoŸ»η›Ξ™κΫ»]Ω¬[UΦ³sΨΖΦΫhs tw7o{wL0Ϋ NέΫΪj˜ΘxvŸΛΩΛ°]f*ΦΞ•₯I΄΅χΖͺ»μήwλٍywΝ°w1ε΄τl›»n3MΌ6Ά•š+ͺ·)ΜΒHΩƒυšΚ`f(³»¬*ۚfηJlΥ†ηφžO³Y’Τ³Ύ«²Νή«:ΉeΓZU Ϋa”3ΛκžmGέΝLΟ졃 φwm!Γ6OwτΘΥi&e6dφΆc…m-ŸG€]TΩ”¦ΩΆ–«Ι|„]{Ο]roͺ@½eΥJYΙ{c[ZcηγιΣklν2shΩΆΝΫ£M ΊJΆ1ϋœ, {h[άκ³±Fd<»«2˜eΨB˜Η³ͺd&Έυ*πφζά}¬χΜk›4cv‡ΑΪ’^N]M³}ΞΜXΆ™―]}κ°Ξη>±mΝΫ{ά6½―ύψΡf-b˜Ή»²mYOS• ήβήνΗφ6ΔΪΆ­χzkJΧΆ‚!wοezΩ­[7;6ͺͺ\oΌωbΆmSΝ€³νϋήπ©LΊΘzƒ*ογρΆΗ‹uξͺn;+“\m¦ΛEVMoV»ΆΎί6©.τΎf7Wwέ™1ίN!»v…§+›Ν±s΅φΰLί··oΧ•5fŒFrœŠ{]wψΈ“ν}ή­·Άšš=’ μξ±’±φνϋ}žϊͺ»θΩΎ„lΫQY6Ϋrί΅χΆgΟ¦&¦­-RΉΦΆ}Ÿ7RέΥG½χX—kfVO«ΒΆwU'ΈhΆΩΩlμc[σΆQΧ}κfλvw' o7oοQE€Ρ{φγk3΄³aθΨφ@ΦΣΔΥ%Ύ‹’|{ί ΝΪΆ­­χš¬O3M”κ¬Έ{;ΣΣk>λf”ξΊ‡7aΆ§Β’ρΎί·ε£CͺνmDνΌ|·g/Φ)²R·W $ΫL—‹ŒMοmΪ΅υΎ½ot]\oΆΕ­λξš1―[²Οeή³χL)– Λsά}ίήΎͺ«_‡A©ΣΙ½NWsΆw―`ΣΤμEΞ­Ω5άcΩ5iίήΫήͺϊ@ξ’ΗΆ;dΫ’2fΐΆάsο±·½mj‚`‘δ’ρήΎΓΊξκ£ζmλκnž‘Ή• Ϋ^ͺS8άlγ8»σ<οc[σXSξgΧjWΧα•·›ν=–F{φ}½wxνc3н½A֘¨ξoMI^οϋ†f6c[ουdH@uV\[{υάknέΘ°Ρ]Χ§ΗΖ›aŸ xη@MΛ2ΊηΫ”S@ρ@D”Œ!Zi‘‰…ρθT ;56j 2@9:Γ °ητ½·kΩ6ž&ΪχΏΉΗΥ!ͺŽήΫPΘΛηΫ³ΧLJ™\·΅WÐλm¦r)ΛͺΟ·±L{½ΟΖuιΪlk>Φu1fY)dwaΠα}nJ %Φ{Ž»ρφΦΊ»²>=€δt:UΕΙ&σnmυ\³"βlΝ2ΰΧ$―χΉ½΅ξ> ]ήΆqGΩΆˆbl£ιχΆ·7šΐ΄e!Yf{ϋό4λΊ«Ϋ^uŒM«ΥΞ‘έ‡€αΓξ<ΟΓ›)"“χͺyπεwΏόγ?cΐη>Ώφ­―}ί£T‡ΫΫ*˜½QwHx‘Ψ«Δ^Mm»΄fq7Βή6ρt—ΆlښωβͺΆm£’2^υ–f3‰έ}~.«*ΪμΦ^keks•΄‚Ξ­·a“Π-³ ¨YΗπ²ΚfΆάΞyoRloΦΦ³„«MοΩ„ΉkkSΩέ[jVU3Ά°=FλΨ]ΡΖ…·ΩΊι’*yΫk±ΩΪ¬ξΜκν Φτdk^Λ%ΩΓπΪΌΦCι-ΣΆξΌσ&ΪZ2Ό ]­g§­™Tξ02yO*”Ω^Χfk$u“Φ¦7ν™ωΦέ+{Ÿ™m­y»Ω{Lζ.3Σ΄»ΓΜȘ‘«ή¦B…χΉT!ΤτΪe²ŠεUo+ φζ8 Ω>?O₯Θ›ήjσE ΩΖκΔ²2mΪlΉLol%Ί{ΟrζQμ>ηγ³"Ε„ΩΉ΅m srΛψC“»yφ²Šyͺf›NνM5ˎtΩ*2Ÿ[²ξΪ²ΦRa{Έ­e&χьͰρh»*†ςΡ3oE §χΡ½½W‡Ω2―ξšΥ›Τ;φ)[Μ²€™1ƒ=­΅=¨»kˆσ&5Άjτ6ζœΫϋ4£Νfη:·ΚΨ@fS)kοuαΝΔUG΅6Ά²ή››¬ZhΪvΩ{LΔΪ,ή*ςfOζF¨˜·Μ»/³ d +Κf[]οΝͺiݚ![­‹εA{s“ŠftμσϋާRδνޞSΪΆ:± j―zήrΠΛή.GΧΦΆθΦξsn]€ΟxΫ0·ΣλγξfΩx3¨Ζήs]{„l»΅•…ŸŸ³Z`[UΨή8Ό5Γ}4cΩ>i+Εdϊθٞš¨ΣΪ<―4›­ηΊΔVVΧ±θ±9› –€™Α{ͺ΅Q·»J£y€σΩμn‹ΆΩζ\ml½·*Ψ@†m£VΆ–mήUGΠksm½Ρ“UΛ m—mŒQΩk1¦mοΙ\ o™ΧΗ³aδͺΈlΫTυοΝRΊ™–6Ϊ•δf—ΦΫ ©Δ—}~j°ΊK{ؚ–ͺmΫ¦ˆLΊή«ζAbέηηλ.ω‚š"Βl>χύo~°νΛοΉ LKc›6R°MΔ$½ν][bΟ,MΦfΞ₯—m`&«dH†iP&kƒnfmυ1ΦKV&4+’gˆΉ"&dΒ,ΫΧb 1m«fˆL›ŒΨZCLΕ°Ν,DG{ƒ&ΩΖΜ ±-˜ΟΖ ΓHl“:Ν¦k€lΝ.Ό­χŒΤv΅ΩƜ=IΛΆ 1%‘TCŒ’`™@‘Μ&Ο€h›’Fͺ±³ΒE°˜Œm3ž₯2Β³W7Ϋ° ⰈIΌ-MX ‹­ia[ͺ {o–`a0Ιa‰G{Y›št%ήBΑͺΔ$Ψl,Vͺff«¦f6VΙͺΜ»He ‡ ² ΫUΉ] Αβd3r˜™7BzΜ> ˜yjkS²ΝΆ΅f f@11<©l$[Ϋ.*· †­H‚1X.oή|„A%fψ"iΨΐ–ήŒ €ΎψΛ?φ£Ώψλ?ω“Ψϋ|ίύξχβ[_ώΡοΩοΏο|ϋ;ψβ§ζOόΒ_ϋα|ίώ½ίύΣίω½o}ε—ώΑOύάO~όΕ|γ«Ώυ?όΦ@?ρ·~ζοώόόˆ/ϋwΏώ›ΏύνΠΟώŸϋΕΏϊΓ_|η[_ύ―υkί@_ωΚ_ωΏςχώΦ~χ/ΎυίλΧΏωε瀍-ΑŒ©+šˆt›€2 ˜!&Γ6ΐ˜Xˆ¦ o+!0ΜjΖlΥ6V Ά1©ΨΖ&O'6’}Ξ² +# &SfBiFYΩP(fcJ ! Δ”Ϋ,A Kΐ¦hm ΄Ϋ&,Ρl@6«ƒ³͐&%ƒΦ`S[Μ,  dΝ‚d΄mΣ…ˁΑ`hIΩ1`°"€bHR6Δ Ά[ Δ³ ›gSΐΨ4‘%6£²ši ΈΑ¦ΚΜf™m£b`#ΨΫjB``Δl,±YΪΖ ¦ΔΫ(ΚF0Œ˜1+³H@¦•’23B53Œ€J@!›%£‚Δ6ΠΖ­ΙmŒ±9°m 0J‘ΝΩ`[’`°(a&°cl0‹2AiŒ,‡‘Ν¨‚- €‰†€ €*Α$˜J3ΔVΕά¬‚˜ ž!m$ 3”ΝŠf@l«Β`#Άjcƒ4k˜±‰i$@²±X ͺa†-”ŒΝZS€4ƒm.…!ς0A-ΗF +{³$3‹‚F •ΦfS†€Ζ^>d‰ΐ°ΪΫ(€ ΜΪ”l¦ˆyΨΆ &ΦR02* 6l±C H#‚V€I€CΚΈΜP- SΚ624$€-Υ1F™%`3V³†˜c$1Ά΅΄ A†­4K²1` –4l`K›G›Ψ €/μ-“ ›αΘθϊ§~φŸ›_ωΥ°ο}~ον ²mίωΞwΏρίόυνόΡo|υΟώτ[ŸΐǏόκ?όΕφ~ςGΏχυόŸ~σ_ϋύ――»ύυϊΛψοπύ/ϋί}σOπ[χ³Ώς ςŸόά_σ‡ώ?ώΟσΫίώΘύΒ―ύνυζ/}γwνψς«_ϋόΠWώΖΟ}ε‡ξ»ςΗφ'ϊ܏Ψ/Ϊ/ύλργίόύίϊΪo|σΟΏό|€4Β6Ž‚YΜ\dΐΊ<σΈ#l6 Ž‘ΆK[0/dθΆΧn¨‡!{£"€­­ςV£Ylpg&DΔΕ6“hφμζςl+u€­Ά₯aΊm‚‘r£·2Ά™Ωφζƒ˜œ&Γ‚ljSN F˜ΐUυΆ3, ™•6h4PL@Ω€e€jc›)aΫ¦±1{“΄Ekyνγ@†­¨^KνΝΐΘΊΜ j{2ΐd;Ά1³ζΚafl¬"Ϋ,f»kecΨΐV °È)%­`FΝlLΜ\f–6*‚™ΩΤ'ώzποϊ._―Ούk)PΪ2Rώ Ζ[\4cLLτT<0&‹ΟΝα™1‹;[’Ζύq1 X4l2„A₯όJΫϋϋφΊp *fq0«ζ“ °θΆΧ2jgΣ4“m ΡφŒΚ˜¬Rρ³›¨jK )˜ΑΪφ¬9ΔXE3#cΨ&ΣΩΘVέ”—0΅mFFT˜Ι1 ‹κ6€šD5mZqVΫΖ°hC“–@ecΐ•f₯ΨtΩφhΙ²Ν³ VRΫΊΧ>na«ΔB¦bνΥ±)lC5+SΔ63seHΐ6KΚΆ 5$•΄aΕ2X·-6"^WZ`Γt‘m`.XΑΨ,₯·‡ ŠmΆi„py°πΆ 3Ω tkΡLΆg«Ά’,dXΈγ敁θΜΚ†BΩΖjŒΪφΘ6»Βd„m±)#Mέ”ΗΚΖ\ΆmΓb>j™i+ζΥŸt<₯šΆ1₯•—Ί³Ω d LΕ&ΕbB1`B Ά Ά™²…mνM(ΡR/YQμmWdΐ¨ZΖDΫƒbL Š£‰™Y$‹A±ΝTl1†*°i“1Ά’MlΕTΌ*a0σΦΗ%0ΠtΆIl^υΆ%‘ΨfΓ$±›‘€5%aV΄Mb6ΆL‚n†ΓŒ2ςΓΠ¦ά`Ϋ{_ώΙχ{ίωΑχχq_ύΪWΏρΝ―ύΜίψω_ώυŸύΫΏφ―ώ‡Ώ»Λ?ωσ/?ψΚOώκ/όΔ7ΏυΥ―ώΔΟβΧϊ‡οόθηαΏώ{Ώφ‹?φ'ΰοφτΏ}¦)‚j[ τŒI mjΙ#v™Ό^JΥ61ΝΊ·έZzDT+Ζl΅’‘‚ΝΆξΆŽΝژCΪlŽ ±LhmΫ$WΡσ^3―.'QΫ­"Υ6˜@—mΫ q5Ό†VΒlCΞ–-ΛΆΆA(RΖ„–τ,Κ4LΫΒΆ‹dI)ρΆŠo±ΞΦ¦­πκήlš"š± šmϋ΄ͺlžš2Μ­°­fm`#MΑ$ρ6SΨ%€·lL³eσΦM2ΘTۚΔl»₯Ω,Π•†=(Σ|T` ΩΌήej΅Ζl[†|΄mor­zΫΜΪΥΖΆ©vy˜d+ikρήεP7<Β4ΫVΔcΙφφ±QaΨͺL7ΚljΆ²Ω΄η e$:Ζ6KFμ%3τΆ1©—•˜Wε DζVΔ¦i™ŠmΓΒ†ΩHIfیk²eΩΖ’R6W“ΖΌ2­εfA΅³Ά]€6εΫΆΊφvΌΑωx^46χΆΥinΜ~οŸόζχώΝοẏ―ΜΟύνίύ/ƒoόςΏϋKΙΏύώŸύήwΩ ΐχμσοόΒΟ|όΤμw~σOυ|ρΧΎφΝ―ρ΅€”Ψ{Ψe“±7³C ΫΆU›Τml€jή{Κ]•„“§¦°Ω»·O[ƒ±έm»τφ–Γ›7K=V1KPΫζ\g-fέρή“c3VU›&†”6|n―ͺxv]Ϋς&κƒn^Όξ‹7ΜhΆ±>Ύ¨έkYΠ»ΛΆP₯M>=uy†mžϋ˜8ƐφΪ‘°΅tΨΓ;…GΌήVŽΗν™)CWχΆ₯»Δ63κ>²%3”œεj6T[λΑ،λ.mž5fφΦέ>ΉOΕeΦΩΫ»κΕ­•½œl°mUžΩΊ;½ΡŒN#ZΫ@6‘…ΊKΫljcΉz³Νm[+{―²ΥV±χ°b{Ο³C„mΫͺMͺ!Wiο ]iάcςDgΩ³wwg³™XΆΫвπΌψќyVΒΜ½:΄΅mΥkΝ[Η{d›UΥ ›6Μ>‘«ΝkηΪ–±η:ξf6•bo˜ φΌΟΎψj­΅™ CοΚlSͺΆ2Sgςtλъ8m-m&Άkh­γ=²/ςΙmΆ 5]χΆYW]Ό½yTΗΆI%lln=l΅wωΨŒmΝμ­»=νs»šuή'—–έKΩKŠm#ΆW1οmW6Ν>4’5Ά%›Π ΝG„Ί΄aΡ5yΉlΖ6ZΫΦΚ6[d«Ψ{€Λ¬φžg‘ ρή«FKؐ*νm{ξΤυ‘ǍΥΤ\{¬»“½g Ω;­.}ϊΌκsšΆ©b¦IΩXΪΆ₯+Y―ΧφΦ₯Ζ&ٞ©Κ΄!pWcΫ<¦»ΪΜR±-{»β ΊγYl-¬©Ωϋμ#u›gm‡‡Φρ°ΑuΫX“„gςˆ½Τ§m kΗ[£³™ΡZΗf£Ά>ωJΜMυΙGΫu•fΟγκ²ΠbO­51oNKΆ‰±gw_πa"cΦΓή*Ϊ›>υaW³²<:ηέ;e ΆGl+ΔfΔΌ·]꬙f'¬mΙ&„eς1"E±±υq“E±m3k›5dΫͺΆUlvמΌΝ†…xοU0ΤνTΙή{OΉ»>˜Π˜|ΊζΪμ₯ξz{ήΓ²E΅’KΟ+=6χΆΥΝΊUn¦ΰ‹₯±±UٚΧU™€½ΟούُύόOϊψwΏώ ?χSΏψ7ΏφΟώθKώκwώαoώλ|ν}Λώ ύμ7~μΗμ‹Ύ`ζ-|Δ³!J<Φμ${΄β2{sM¬b†6l‹ ν½Vw΄MNσ™άޘkΩB†Υ{ΌUٚ•ŠγΝ,T³·ράΝRQ±ξ6°½W.ƒ˜MΜ¨Z cmκ£ΌIš΅ΐnΫ[w‘8υφ.Λ°J9Ζ¦ΨήfΛνΩΗӊ΅χΦ³D,ͺE³Ϊ0pz۝ΉMΰlΛ.ηναΝl6½>΄5 †-­XZcΪ|ήΛK\υΖ« €ΔŠ™Ν–ΦήΆG ΄Νφiw!’2{`bw»υ>ίHcTή§»'†Άg¦ΧΞξšκžmkŠ€X€΅6΅YΣXzxΪ•Mš1[kφΆ=ξΜΫ³ Q¨αΨΫ@+owkΩΆMζΡGήSΓbgoos Λ0wMΥτ6λZνΉm—ΨZko΅τ¨ρτΡήΨ$¦§ζΨ3Ίœ―mUŒMν>šΌ1’¬λ½Η™)²­ω¨·­IjcŸηxzΥ>ή{χAMV0zεsJΛf^εx²1ΪΥηητ”ΪVΩ]Y& ΆE(7†JsΫl› D5ΧΩ4•ρz½[›7”ΠέYΪΖ.Λ[M)Γ lo›ei »z―Y#d»‚c³z“mΙH™Ϋ ˜Εœm6Κ­νٛ֍lFSV œ[kkΆ{,UΓ‹UΈ‘Š™²™­ΧΞk¬^šΩή#«‹b³=jε­–lΫFλM³MΣΆ³ν+d˜»¦jl6§nΣ¦λq{«&b/t±ΩX±žΜa6Κ‡‹Όmβε=ΧξZφ LY53fRg[S±·Iw·7{M%Ζgχ¦ š"ΓΨΓͺlΝλͺšΩΖΛν _lP6¦Β{’ΰ}~~σόΏ?ψ;Ώv?ω΅―όΤ7ΎϊαΛOϋΑ—υƒ/τΧΎρΥ―~ν£/Θ”ΒΔ²a†tN’Mž•±ΫΚ”h&Hk†PW0fζεnτŠiM5of‡Šdc’l6ΉΨž†-Ί>Œy}Ϊy$²© (SΝ`46ΆmΐΘΖ‰f2‚ Ζ@¦TZ@ 6ž³­΅Ν:“fK‘ΝφxλSb&©^F"lΆιμΥNdΫΆΦΒ†ͺl–sre£Œ•*l‡„-ZΩXκs>0’a:PΤ’;|πšΑFι£@Œ=Š΅‘&“Ε­άŒ(Φ£ h¨Σ3£IηΙGυμ€mͺq2 ’y[j،½ΧΈ>Βl{f™» QΫR£€™A½A”Νj―…1ƒάΆΚ–FΦΪ&& œΝζJŒ΅ΚΆ΄Η¦ΥGz›*CšΉ₯ΆΧΆΩά½v$ΐή>χ>xE«lΖ&Q³U΅ %¬FΦ†Ρ±mK{srΆ4§Q£lo›N¦”mG‘+4LυΦz‰°Ω¦L-©lΫZΒfSWτ*pli 6O« Ϋ@H²VΫ²λήFςPEΆ`XQJ,–Νl£Σ d΄G™6ΆΔŠ6B,k,ιm*0Γkw¦­!5oCQlL…χ€Δfl«ξ οiJaΫ*bΆ€}ώπG{oŸo?ϊΡκ‹Ώω+ο?ϊζΧΏψ‹ϊόρoσοώ%ΎφKίώΟ½Ÿω•Ώώω/Υ―πOϊ₯WγΏσ­―θίώΦoαώΫ_>ΈŸϊωoύΖίϊι_ωşψζΧΏθ½ο}ο{Ώ/ώΝηO|>ΎυsίόΥ_ϋΦίψ₯ŸόΦ7Ύψκ‡|―Ύσ'ίύ—ηŸύίπ—ίύr@ϊΪΟ~λo­oύ_ψΙoώΤWΎς…Οόπ»ώ—π»ϊ/~χ/ώό{ŸŸ€―υ§³ζΏ}ίύGθ;ίύ«―όΒoότ/ϋΗβΗόπ{_ώώοόΡ?ύΏώβ»?ψΚ·ΏύΝ_ŸώφΏσ΅ŸψZΎƒ?ώΓ?ϋ­ίϊΞόΡχΏ ϊκ·ιΏϊOϊ'πω'ޟG?:??―Οχ䜜άο! δPA@ρ‚nul»u;;ύ‘3νLl‚ώYύ©+΅]W]ͺ2ΠE!@€`δδBNN8ίΟ»ΟσνoΌ}Η3Ÿxψ™'―ίχέώōwςƒΧΏχΓ·^}σΐqΧέO?ύΐ³ΟήχψΓΧξΌΦ./ίyύΰυ~αo]Nw]ΏφΜη?ϊΉ_ύΰ§|聋γΞΗΎτŸίρΜgnΫ~ωΛ7Ώς§/ώψ΅[·ΖΕ•ϋξΏη#θ#ΊηΎ»/{½[―½όζžγ…έΌ \\»φθγ~όΉŸzμΪέΧΊύήϋ―Ώςζ~qνϊE0i f6 Β6Pvβ¨ιhf0ΐΉe,Ζ,§Ua”Dlΰ3¨8g;C•6‚`ŠΕ@'±σΜ‘4Ψ-ΩΜ46% 1 kHdf#'JΆ–€,mΑ@Jkj['Gъ‘' J±#Ζi ™’m Λ8ΆΑl S1l€΄dΆ Π¨ΨT‚XΩF6Ζ±hHΫ*†Κ M₯ΫΡ6ZθΠ2Λ`h6ΠT+ 9§@#XΑ6R%ΤΆ°!”‚ΦΰΔ‚pZflƒ6αT#b£M΄3'h«ΕlΣ9ˆ…%# Άf©ͺ9Ν˜΅ιHpLL3§Š …l+Δ` iœ4‘Œ:Ψ ”0ν¬‹EΖΆ–62’JΘ”΄†P6'Rˆλ`ΠΓ‚TΤ° Ζ戜[ΕΒ²9la6K;Uΐ `F΅­*ΆBm3²lΫreE Η cJ!˜FΘΉEEΆ΄ ­1„„)(§p™m…4"ƒΜH(”- ƒͺΐLΜ,'¨ŒFf¦2lΠ™Φ1›h¦˜sΛΚ–*0«Μ™’ -Ρ@ηΉ¨2Άβ4[ES‡!Ϋ²q*lF‡jΔ°ΞVœΪ45ΛTlΫ*3‘„Ω”KŽjΆΛ9:€«(KLKΚζu΄- ˜ƒ€L3ƒ2GbΣ0VmŽ`ΠF64;ν(§$Ψ°qΤ@Ν’™Q,…Ω8Ά₯ΪV¨9[EȐi$mΓ*²%κ` am 1(ΜBF(±… % `ƒJηbŽΚNaΛ23DvVΘ0΄FΞsΝYŒ™Qd‘[pžšRΨ¦§Ω**œ#”Υ€$cS‘1Žο}τ;έqϋ7_ι½W>πΨοޏ\}ν­7ήωΑ?Όύ.ξxψΑΟ~ξι/<{λ«_}χ›ύϊ;p<τΜγΏύ»O>ςΛ—ή{ϋηίόΗ›‡‹ŸύΐλΙίψτƒO>zυŠΛ[ο]ήΊ}ω‰gξωwί{Wήΐqυ©=ώωίψΐ―>χΐγ^½γ8/oŸ—ηαύχ^όθ+_ύκ‹ιΫ7ήxΟΕq呏<ώ›ΏωΔ―?χΐΌzεΨ9‡ΈύϊOzϊ[?ϋΪ·^ιK·ήw^άϋΏχΜ§ο}ϋϊ=άΊyρΐ‡ξΊηϊ•»ξΊzΟ}½ω‘λύϋ—ήά]τcΏςα;―]ΉΈzη•ϋο»γ֍‡?ψȏκo_ώώnΎΊςΠΓΏσ»~¬{zθώwn^ψ§xτž‹λΧ―\½λΚnΎχ//άχθΧ~ϊυΏνΕ'=ςΠg?σΔg?σΘGžΊσϊοr;ΊΆσυO<πδ7^όϊ·~ώΣWoχ>ςΐG?rη#^»v5»φΘcχ_½gvήΊuϋ›WpνΞ~ψαΟ|ξ‰_{ξΑ'ΌΈ}ϋ<ηβ’_ήΌω±ΌςΏϋΩίϋ­wOΐχάσμΗΝίzςSΏΡϋόςΦν[ο]ήzχ‘Ό³{^;@Š!-—sδ€Γ™B1«5S˜U2œ§"δd:ΐ¨‚ΙΖIi°Ψ΅`SGN˜Q3Ϋ& mš(fΔ†5 €Kjb-a4Z› 01€$ ηj rΪȁΩΠ¦Θ@’˜™ΕXΘ@tž+G";WFG-œ;OcΫT€Ψ&46ŽŒ@Α‚‰šΩ’Μ™ΡPZΒvžƒ$Ψ%Tm›c0vZ:@H³UΪfFl"Γ, C(#֐*†M8"'ν4eΦr γ„.`Δ0LŽV₯™l mNθ@ΰ䘀™-SΛd³5 [P¦sŽB³bΆΕD‘‘Κ°„@Ω¬Β2ΠN †Φ€…U`c²sΡ‘!#³³%5˜Ά0šJ,ΆΕiΪ9)lG°­0G©98wΰ €™E›Ϊ ’–`@ΖH˜-ɜ5ΒΔvnFK“$؎Α΄“„ΐ€26Κ–l EΜ f ‰LX€BΨ&„v*Λ0„΅±Œ­‹0+fΨD1Λe‚ `˜T ν\ Še²Ω!jF’lΰ8L›*(Ϋb5‘ΞP°† )@l΄΄šΩΪQΐ,ΨΓ –q˜B‚  MTbΫ*³™p Θ&ΐΜ€Xlb°₯˜s6Η![ΫΤ€‹LΨf³’qE4Ϊ…U€ΑTΦ¦6b„Ά1JΓ0k¦bsI8ΗhiΨIQΞ5166Η*±44•ΆM‘΄M‡m‰  0Λ ΕPΒΜ ¨ΐ#HXBΖ₯.`†˜μ(Xk3sT›MI‹ašŽ,l£Ϊ0$06F”Υ€€mΰ8L9d"\QbΫVΜfth:Ž«ΧξΈχώλO|ζɏ}πκή~γ»ί{ύG?~€:ξΌο‘ίϊΟ>φ―Ώx]»ωΣ^yα§ΏxλζyυϊυǞ~π£O^ΏηžΓ»€tο}ι~ε>sο•Λwς£—ςβ»οάψαO}𞻺|χζΟζν›@W―βσΟό›?~βρ{oΏόΒίωα[―έΈtυκ“O?ϊιO>ω‡χίΑωξW^{λύχόεkw>χGΏςΉήΣΟo|η[?ϋφn³ΛΛ›/ΎωΛΫ»xπ‰GΎψϋωƒί~θΪΝ·Ύχ—~τ{ΏΈ}άσΐ½Ο>ϋΠΗ~㙇:~ρϊ?γ₯Ϋ.ΉΈϊΔ³ψ=ϋ۟Ήλςηo~χ›oόδΥ[·Ο+χ=|Ο“ΊοƒΈσ0V@£c0t” *2£‰Σ2`jsΤΆ Ά1ΚЌ8–’`pš@)lˎŽ9m¦:4m ah0η¨QUbԘˆγhmΪΖ˜mlŽb̚3‚ΑŽ£ΝQ †ν€†bΆ™ͺ†‚jΗFk– k²aFΠT"‰aSې)L΅)[²4c–F0 BB­m@QsbLSmv.i‰©c;7KDνœ‹‚iΡΦšΡ$ΫjbΔ 1l– Y§e0ΚΞS ΆΩΒZΤ95”Ε¨a†Ž²aΠa6rΜl˜ΪVΩ‚`:Άm;+D6+Ι¦Q™’vž8f• f€°NXΫR›3ƒε8Žf#P³ƒΙfJΩfΫ•1k2Μ@&ΫqqnbΣ&­‰ΔΞν$E33$ŠcƒTΫV6 Μ2 Λb(€™bTcSΆ’ΐŽΪZ3±‰˜YP…:[Ϋ`€:d˜Ά›mV G1iζά‚¨SmHƒ,€YA3•f6+Ν6aͺΘΜĘQ8쉙 ­‘ΪΨ†œΤ •“# ΙfFΦQ6 :l[rΜ'œ–f•-(™²™ ™ΥΗΜ¦ ΩfCͺ΄£]žΪ!ΐVb΅΅M(ζΪ6ˆ ˜mΜ8*Ρ6%#ƒe’llΓ¨`f2,ŒL;wkF` K[θΘyΞFR3ΫZΫΗΡΆΙ0ˆ‘kΫh+š2ΥΡhΖ`FM!SΖˆ˜MF²Ψ‚’ΤlΓ4(›aΐ؎šœ³)[ŽbˆmΫΆ£ ΚΆqΘ΄±΄5p@†at€!€1iΆ’-Μ™)”a$f†Ε ™MΆZCG†4D§™‘uΔbcΊ³4'β4Κ ŽJˆaΣ²aXl« ™ΩLTZlΫͺƒ±M;΄ŽΝ,FΰŠMΜv˜5Έvί=O>υΛ‹«£+Wάχπέωψ~닏<ΠΝϊꏿϊw/Ώπ6\Ήγρ_{φ_၇ξzο?ύυΟλΗίώΡ­“Έσιχέ'ηΣwάθŽγΪs_ϊθΏϊ΅ξuγoώφω?ς‹ί{εtχγ|κ½z㝗ίιΪ}ןωΒΗώδKήsλΖ_ύυ?ω―_ϊρKΏ<ιΈΈχ‘ό7Γ―ώΑ―?φ{Ώσή+oή|υ―ήΈ9WΊρΪWώγσσwoΎuιΈrεΑυδτ‘g?σδqγΥϋ/Ύ§λΟ^Ί%.Ύωήέγ'?ι?ρΡŸβεŸ= ΐυkwό­oύύŸπ//Όq ΧξύβράΏύΓ'ž}ϊ‘_ύΒ[τ·Ώ ΗέO}θOώΥ“O?Άον…?ύ³}σϋο]χ?ϊoΫOρύόηή}υ‡7Ύςό/εŸςς?ίuεϊΤ‡ξφΦΫό?ύΛΏ{@ΧξΜgžόΒΉλύ_ϋπΏόο―άΈΝ8ξ|φןώ/λ~ώγϊ£ί{ιώ·οί<―άΠηγΙΟώϊ=獟ύίΓΏϋςkoήΪΖ]χ|κσΟόWζΩ‡>|°A™±9ΪΆ8aU#¦(›m9€ŒcΫ9ͺmΘ@֚£ν¨μ€Υhͺαœ€(l„m΄ :̐fm›ΠvβPbΫ™`ΫDΒ2R˜mΫ¨’mlsH 3$3K€ΆΩ₯­šfν`› [lp08Ά‡8 ŒHŒY™€ΖΆΉΈˆM6ΩΆ%Υ‘KL±MXŠ΅aνδXΓhbŽ0e†-l’f!ΪΨYΞΙg‡‹γ87ΐ¬ K†-*lΥqTΞΣ43iΩ†Y2[Β‹-Ϋ€©m(lΨ ΆΩPe 2‘ h[eΙN%saΩV΅4S8Œ«΄mΗVΩŒh›#6­‹Ά ;Ηε΄ΐT²ΐ”΅‘Φ° 6³:ΰa›9„e΄Ϊb33UΩ6Ϋ€‚±°vμBlCeιsοέœ?όΚ‹ϋφ½W.φ‹—_ύΪΛΐ•Gο½ο³ΏΨύWzνοψη_εΗ/ύς;/ί~εΕχη}τ±§噇?ρ±Ÿΰοίψξ;nύΰ;―ύπ{oΏu ηνσί}υωΧ?ψΔcWίύιηΏσ—nΑΈύςKτΣg>ρΜ½ίwύ‘Ηzώ&η;ίψϊ ω埾π.ΐ­·Ώϊ•Ÿ}μ©»ύΓϋ}ςΑ=sρύο^θΪ•;žύ‡>ωθΥ+/ύψoΎρς?<ή™€·^ύ‹oΌϊάSwςχ?ύΙϋώφωΧo€+O?φάΗόΰ½ο}χ[ςW_~εΝΫBμ½~΅oνηž{ό™/<υτ—ίώΑΝ=φ‰G>ρΡϋ»όΕ7ΏχΣ?ϋ?^½qΰέwΎϋύΧ>τ=ϊωί`ΞmΛd ‰£bΆΨDJ“ΓΖΕ€³”c›Γ9Q™ΜtΫ:§ChΫycΗΖ9«ŽγβςΌmΣh΄:-!ΐΤ‘K*ΆuŽvvX§ŠiŒDΆsG+Φ–Y—c]asIT`vΜFΥ΍μ¨P8ΪζδX„Π1 Ϋ9‘J,3 °²uΑf¦eηe© ΕΖ‰³£jP¨auN™ Ϋl΅„F*Φ.ηΠyΚjΫ²8©UtHNη):"ΨigΗΕΉΚ°΅΄2mXΖ2«#¬u lν\¦9ͺ —t*ZΎq±΅9GΛ9‡.δ€Y‡σT[…Β*M6;Υ–&[ΝXΒ•mηfŽγ8XGΪ›8Z‡ΞΞmΘl§Ζ1j’£bΆΨ””crΨ8JΗΞ9l€f:ŽmmΖ‘2ηεΩ‘œj]8ΧΡqηΉm£ŒͺYfκΠ‰ηi:ΪΕaK rξ(¦1‘ŽγάεŽΪZpfΞ.G] ΞR̚b6T;7jq₯;Ž6Ξ-3BΗ€αΛΡ‘,§l4Ά[UcλΨyY«CΐNsVM¨ͺa΅²ζάl%QΓΤaνX›£mKΝ dΗ’ΣN:h;ν²γΨ@Ξν8ΣΚ΄AΖ2«$cλ<ε,Ž₯vžœΣT­i9Σ±›σ΄Cs訐\ž‡Γ&ƒͺ̎ͺ³μRmij›g…©2»<·9:ެp2δΒ:4;³uΞαd:ΐJͺƒΩmŽΚ1ΙLi8::.ΆaQ΅™jeΪ”#³sΠΪ%Φη:ͺΖvnjs€FF8˜©˜F5ۜ:8‚9—²‘±ε8Žν\ΒVœ‡΅.g:8η’Ÿ Έιω=ΜΌz~χ©ξͺξΆέ;'Ζ /‰`KΔnFbΕΰ[²EβmΕ $vˆI41™ ‰νΨέν~«ͺͺηΎ8η!0Ψal(6΅G-”1·j‚±;–SbZ5,¦ ξΖ΄-·aΈΆ©¦ͺ†άΥ±ΫάΝ&”s°!ΪX{vΞΆE5FέuΞΒμ₯s¬νn·Zmjwχά4 W›c,6U#W›ν4ρ(΅;š3₯5iΣh;ΈΧŽάE§ŠΩ"ΩU[2;ΨΆμͺ­%ΓVs°U”»έ»Σ %iΛ½δp7ΫΪw2†M#e4[lNΙqD†Η€[`Η&ͺfSuΪ֝ŽΆmΩΩε'“œέσv;›“Π,α0SH—Šmτd“΅TνξωΩy¬F° ΐψξΎό»/ί½}8ηριO?ϋΣ?ύΙύίύ—ωΟϋΏύ―ϋΏψΓΐyzυιOΣπΕωθύΏΫ?~ω―ίΠΣωψ/φΟ~όψθω»ϋοΏέίθGŸΕ/Ο=oΝ_σέί_Œ7φ‹χ‹_όιŸφ‹Ÿ}ϊ‹?{όΥ_Ψ‡woήήϋao~xξν{<χϊωύ»ύθ£§?ωθαΝπαΓΫξο€?~σΏ{σΥ·?ϋό嫟β#υΐyy>ύηώΙγc›oΏώζ‡Η«―pΏzϋΝwοφ«>ω'?φΕ—p~ω|φ'?Ω·ί|ρ›o~Χ‹W―Π‡·ίΏωΓΫǟύμGΏό€χνΗΏϊΕg?ώΩGίύυίϊ‹ί|œΚΦ¬`a[Ϋͺ˜ —Gv·”# l­fΆΙ}Nw:ͺ‘“Η0μ>?[–:•ν>«u šΫ)Χdv,ZΫhξάλA@•l‡]Ϋ™ Xz8›u[Dšvι:1ΨVɝ,Ζ!Γκη4ΊΫΘΚ₯1Ν΅ χn ¦šYf–β΄²ηvΆi;χξ€³mI–Δ7³)ρX΅@mŽΞαΖfΚ½Ξ‘KCεŒsο©›¨UZf •š¦ΪDγΡ‹XXΞ陀h;©lwL‹eΧ¦clΆ{Ϊ<6RˆΐHΥ²;ΥΙnœξσUubLšέuNkŒ,ηZ«F­­mm«‚mkάgž²$ e3&vv[;ξσPΨWuTG‡³–έηηM%t2«c»j…Ϊ½N5Χ&X;6³ΐ΄yΎ€Κvmg zΘZνΥΩΕ€S‹ΩV©mΠΆ©‡±š©KTš[wŽ•KΣl ηlΫ¬‘©Ά쐙ieΧF$μά»“ΪΠθδŒΡΨUΚ4%Φ¦:1ΆΉΟN+w’pιΦq¦vgœSŽ]Q«εΈVb€y΄Σ’ˆιτΐΘΪ]œΊ9GΩN*³mΦM‡T6f‘]Ϋ=™ζh)χ^‚8%άη«hΒΠlS3.YΞΜbΚ΄³­m ΰξvŽ{u\S©A ]±φ|vvll’ηg%”ΦQ9·ew3Tμœc¦ŒΉWg5ŽΤμΊIF–`mkΆ:s€¨c»†™ΓΩΦΪ1ͺ¬mZ§³fΆd£mS#«™(Υ¦σ°r™±ΝΆ)SM!r·δ”š›­ΩΩ†¨mNF₯5ΈiΩUβΘΤέ㈡ι‹ϋ쀌\ZΡβή³νtΚaŒSSŽKμΘi€›υΫΊ‹£η νej¨ΆΝνF ŠΝpμΪΤΜ<#!<Ίχ’sΤ‰έ-UŒ9˜4Γκρh.Y0Κ΄c¦bξžλΨ%Š»‡¬ΝκΪΩmκΜv‡Σ.»‚ͺLzΚkm«ΓΞΙZͺvχ|³λ”η=§x’Y2ΫβœPΜ~o~ύ?όλ/χϋΥγιρκ—ΏψoΥ_ό‹_ύΩΏό—ΗσψΏ|υηιG―~ς²Ξ»ίωώΝ·ΐ9}ςω«ΟžΞγΝΫ―Ώχϊ=ΰ£Η‹Ο_ώθ#··_|υώύΫ Ψ―ΏόξΓοŸΌ|ρΙgO\ΐ`°mv7£ `Ϋάwίπώ‡Όxq^ύθ ΗΣ˟|~ΞΣωΕώŸόχΏϊΥ›ž>ϊ“Ÿ½|Υ·O/O€~ϊΩGΏ|zρ“ŸόWΝρηΩ‡€σκӏϊ‰wž^Ύβ£?τΕΛ—½ύφύχ_} 3pL6–`Ċݻ)§Τv«Σ1ΨfχhΛ %Ζ@šj`°Ά»s’»fΗ¦j»C±-ν•FRs‹έ›‘΅{JgΖ9Λu[uekΧɊ܌#m‰ΖšPξmvN93»8™dΓ*κŒd›`έ-…lΫa£ͺbΩ¦b ¬\§,cP°{'jVΝέ6@›{=Κ.k‰ˆ»έu(lά-K3JΜvηŒm°ΑΡ„Ά {ΞΤjbulC•4μr°)ά{Ο9΅±±Θ$ξΖZ‰ΦlD–©ηλΈUژU§€’tο=+1@gΫΉΓlF‰–»…ΊcΪΖ*Ϋ9ΜVΩlSΒξf6)œ³{«c°ϋ|4¨ ¬4IΝm»NΤro™ΥΙ.V̎v/š°N1Ν6[evoTΣ§‡kV›ΆΓu²’ΛΝ)Άd ΗΪv»gΝ‘μ^S3Τ¦ͺm]kQ±{‹!§™9Ε `Φ±₯02gwΨΑΒΡlΓv—†;ds[5%mθPΖ΅,Ρ¨˜9me£Ήw±Th­©ξ2μζ ηžl ³₯ Υ€A§ν’ΣvlΉŽλ`›E75c[£sά[[Ɂ»© :BgΟΟtΔ0œΗξ"flͺ\+„ΪlΪ†T0ΆXFΆ)aΧΜSΫͺ Ζμ>'€Ξ6XΔ*ƒfm[tZžο9fu`ιξΉNŒ%¦1φ\˜a§VeΤ£kM¦Ι½]‘ζ mΉ©2ΘΩ8fΚ¬mkΗQΫuW±i°Ϊtΐ§sw—ΐάΙ£Ψ–r:(3(²­ –ΩdΞξŒc6j­mΫd»wGΨ&„λ:'U6ξέ'—;Ϊmλdl5΅Ϊ&vΓΡ΄fbΫr5ΞN›ΫRQΩ#lTm—TΪΞΩ]nΗ4Ψξ–΅°p§­:&«VYξ* μδ8¨έ™ ΆMg›93ζŽ:fΤΨΥ6„bƒΡVΔX²]ΰήmΜ9’ε8ΖΜ r”4Τ™ dλy^HμΞΡ,™mqŠqΙ¬3Ϋ³υTΖ6€4’ ή½~ύϋόξ~sΰόώŸΌϊ'κ—ωδŸύ‹Ÿίϊ«Ώy €τtžΒσϋξύ0Δωψ<βω~Έ»œOη£pίΨ`χΓσvΣyvηNιι€NOzώγ?~ύλχέ_πpίΌώ›Ώύκ€ˆw―ίώφοώπΧχ»―ίχW_>ΈηιΡ9žŸο‡wΟf3ΰŽ BZK¬ˆiΧ‰ΆE,wm4Y@Ί#YKSJš6H# °Ι˜θT[c[ •Ζ́˜j Ζv+FZ% Έc€Q6΅­a3‚-Μ„Σ½cR6’4SvΜfŒtlŒ¨5’6F’-vššΩ‚!a»γԌ Φ\r`Ϋv-Μ`£ŽAl6A°l‘­&1etbΨ`XΣ1ΆAw;Q# ΆΥ£b6V$› ‰ΒUfZE1’m#fs†! -ŠŒv‰fΉΈ4Ε6Y@‰jΫ¨΅šΞbβ°Δf6ε¦ TuΪŒ!±ΙΩȚ$m«©ΩV3“0άMEs;ΗͺY"\+‰)lΫt°QLˆ˜ξfC3$›T&]‹Τ°e$°ΪhemH΅AΑbk,©%Ϋ›€Ψ&"pΫ́ΕζŒShΆI™μ€2±ˆ’mm ΕΜΤ΅¨0Ϊ.—Φ 6›”l›FJbš5M₯˜„„Γl `¨Φ2Ίc ΩΙ2 Sl[ƒ°±U¨ΆAkΞͺΙE…aƒΐˆ2€1¨ΞΩΒΨV(›v΄…²UCΨΦ*™…††E¬YŒ5Οͺj €bl2σL Μΐiw5pΐ#Hf&1H’.šΕ™4΄rΦ ’َΝ"Ψ”Xm7@0%Ψ5"Σr"6hHΫ"“™±€mΩ…Β»«IZΫJ!nLJ‚΄Y1L˜©kM%£!›]΅ ³FͺΑΨ$c΄‘Υ‰5ΦF X`¨€$bλ^'šΆX€60Œ0Υβ¬…₯)§blH#Κ` ΙSΖ›»ΙΑΖ`Έ―_ϊΧίόώ‹ŸΗΡǟμ“?ωΜίΌ…ΛpννσϋηqžžNΐχo>ΌΫ^|ττςΕγE >άη7~Έ:OŸΌ:G œ―>:§½ϋpίΏ{€στρ‹Η‹'ήέw―ί`χ>Ώ{ύz{ξΫίόαϊ?ϋ7π ΐžŸ_Ώyχ°οί<ΏwοyϋχϋΫιώϊ`ΟΟώψΝ½ž?|ΈχΩΣιΌxπ 1ƒ™δh„Q36&d6ΤfwΑ,tη΄Šcb FFb±»[†±œ]ΧΥ*‘€έ+… `Ϋ‘² KΜ†λPθ°¨cdccΫΙf¨”Ν=ˆΡΒe†1ΆMΑ Ζ*2U`a˜E€v/2ۊÈ"[ 0£«h  ΨFΝζ)2±ΣHLpGL ‚j@`˜‘c΄b@rbΖf‰B33XX€Νr° whΑ”Ω ] VΆŠ f+@‚`˜­(³![Ά `Σ&j Μ Ή†) °»Q6–Μέt g2»Σ­06©!ȘU³iΊθHŒ!)ΖFRξΨlδ(c‡M\Φ€M’ΩΜ †i‰ŒͺF΄Αf‘*ΜΨΆ-Ν±9%dceˆΔΖ΄9Φ3af#CΓH‡L¨Ε@pGL`Ϊ¨ؚeΧ™š"©ΨfkΖΖΔ`††€Ω6f³΅‘ΒV™aPh›ƒ…4X‘` [¦hJf4Ω2Γ„ξ0°lEa `†ΒE‘-ΐΆM9ΨΈikΫZ-g²™ΉfZUQ#h††£‘­f¦AR“V‘Ωek3—Zά'ΑeΔŒΩΐΰ'Χ †±Š LΜfΆ»›Τ@1[ &6ΖΦ±¦™Y1ΚΜ KƒEΓHιl‚ZlF$]l΅QYΆf132YGAŽpgS `lΠΜ`hH˜Œ΅YNc’ƒjw’f&(c°6βh΄F›&dΆ¬Œ»`–­¦ dΧL`„"c‚;Ϋδ`c°Ά­±:τ΄Y³bF0π8UΒ><ΨvΞyτˆ ΰ>ψξΝ_Ϋ«>Ι‹?9Ύy€S•ΐξήόφϋ/ίέ—?ϊδŸώτγΈ?|=ΌπζλοΝ~ω“—ΏόΕˏ?ύΆοŸ€§Ÿ~φOςβεΣϋ/Ύϋα›―žyτι§?ιΗ?zυόϊλΎϊέ;Ψ»―ΏίΌ»φκǟ>žίΏϋβ·oήφεoΎύφέωόρβΥγ‡/Ύϋβ=ή}ύέϋ·oφγ—/>ύ髇wΟ(=΄΄6@mvƒ‘ΐVhΝDl–hŒΫ,’@m€¦Y# Θ0,baΈ+L­ U3S¬£ΪξH9(w‹mΕ’XΧ †Mh&ƒ6 :h»€Ω¦H` [1ˆ%iV†U†``afP&0£„UΖ6$ l¬š`ΦR³sI›θΨΪ„ Laf#1»s¦Γ… `R³ Γ„Ρ Σ‘΄ω ‚“ηqπΔΛΟλσύuχtχtΟ={O&ΙL $!!,Š,βRjY^¬ςβΙ*Oώ1–ά=Y`yΠ…byPˆ&$2 YfΛτμ=½MoίΟΫηY˜UpW-L™MA΄ΉΫCνNf‘.  Άm•ΐL)Ψ Κ†‚ θΪ b›ΘΖF„Π#bΕČΜh…`ΑΖΝΔΒ †-c²ŽΕ%™™΄qTΆYΚ™iΫb†…f`ƒΈ(4mΒ¦ΐf:ΤΆ•ά{λhm0…‘,blΔλn₯ ”eC`idfA£lŒͺ³ lM»λΨΙd kξ&Θav―‡cΨ–š•ΐ,ΫRΙb²3˜Κl€ΆZΫΰ³Η»'§Ρ΄΄₯ !e :Μ!Έλ!ΪFΫHe4@Ε6Y°»Κ’lŒHΛ0ˆ @#f€‘`‰ f€aΦ1lP§͐‘ΐ,8 Ψd7šΙ&¨MΔLΘΨ œΨ\ŽŠΉ$ΙέBL1Ϊvr YŒ A‘‘u7³©fi Mš23€QšMZK‹‰•­qΘ(„Yͺm”)fΐ ΣΨγΔΐΔ͚Ae”ΐΘΆT X "ΐ,›ŒQ¬²­’m㐢έ9‰1’₯-&£$ »Ζ9Μάu"Ν”Ά14±9₯™Y“έUfU٘%0+f֊a p,k6+η(6₯–σ‘?3_ϊΘ§^~ζΙ{ο½ρΪΫί}ΨοΎυώ}|ζι^xϊωηp>τΜKϊCΟχρw^£?~ρ½'Ÿ{εΓ/ς™§ι™ŸδǞzϊ™€=ήwΏω½ίωήϋομΩ/~ρ£?ωΕηž}*€ž|θ™—_~ξc}ϊiο½ρƏώπίΌ»Η§κOΎόΉΟ<χμΣΤSO?σω_όΔOΎόΜΣοΎωΝWίxυ;pž{ξιηž8=yκ“?ύO½ςό‹ηέο}ου?ϊϊ#άwίω½ύΓ½y?φ/}ι‹Τ‹OΠΓ3OτγΟ}όcΟ<ϋΐξέζœsNͺστSi?ψ揾ω­·ίzϊΩΟ~αε_ψΒ‡ž}B€ΞωΠszιΟμΓGxο›ίyϋ΅ΎΜ ϊΤη?φ‰ηΰ<υΤΗ?φμΗ_~:4AcΪμnWSUλL»6ƒ f«ΥΦ3€€‡ιάu‡Ι²ښƢΛ”‡ŒΝ΅Α¬φpœQ«­Ξρ 0mw.ηtrθ8§ͺΣ)άΩ2·έΩκrηΆΫFζθ€8–;6c[Σl3YΝξέvY§:LιXΫέ5 ˜ΝΝlurmcΓvΦ™(j‘9Υ!@<μΊ·ΛRŠ#Klh¨*₯C`6›m išŠVλ!‡T”κ€0­$‡ŽsΘΖR{°hΡΖ–™m.˜-+s7†±ml&QQͺ©R4Ζwοξ6«–kZ[)ƒΚ΄ΐfCΙ,Ξtk…n-K9ΣΐΊLd6«Rj5‰0έΩέ&ΥYιlν2š¨m«ΥΦn[CΚ9ͺi3H²C²ΆΖΆ1°NΑ˜1Ά΅γT˜•ZvV:ΧΆΛ•œN;§ͺκ„Ν]Ϋ½³εκΞνήΆ@s8ι‘wl6Ϋܝ΅Ωil»»3§ J‡ζnΆΐf™­J«Λ`ΨZg΄Lœt*„8»ν³ZqdM°%ͺN:λΣlmξfœ­­ͺ”GGE)‡’i%9œ££»©ξ!™ΙfΓΆΑ–6ΛfΫ`ΆD•*jB‰Ζwοξ6+Υ–‹Ζš$T0fΜ†“ΣΞt΅’&Y“3M˜±°‚ζ¬&bLwξΆ»MΤΓJMΫΆ1Ά²­V›­=FRλ¨8wmˆ*Ω±dmΝμJu8—±Ζ6[Η9ΫJgY«Ž£‡e\Ϋ.WtN[νδTΥQΆmmάνήmΛκΞm·-’yPνOώWΔΏϋKyιcωΨγ{o~ϋώg―ύάΗ>ωΉ?ρΩ_yσύΧ_κοΌϊώ½Ξyςς—^ω«ζΉΌϊέ/£?ώΦo|ωΎϊWΎτωOό‰Ÿψλώχ|ΰί{œσ䩏~ϊ3Ωίψτ'_zxνwΏχ―Ώςƒ―˜ΐSώ™Ÿ}ωkζ|ωυwW=ύς'Φ_ύμΟ~ρΉΎϋν?ψοόήήΌίόυ―}ωΟΎψ«κεΏψ«οΏχΞϋ蟾φ£χ6σπ‘ŸψδŸϋŏπψoώΟίύ/?ωΛΏτ«β§~ξ•WΏωΖήπάΗ?ςS―<ϋό“wώΕ―½υΥΞ7^}η›_ώΚψΚsΥίόψ/ώυ?ύΕ_ϊ©oΏϊΦkoωΠ‡ŸμOΏπρgΟΫίόΪώ}γ·σ­Gϊψ—>χŸ~ϊΞπΖ«ί~αΩg_ωΒ‹{ρ©‡·ψk_ώΖσm€έχίόΦϊχ>όόS?ύK?χΚς_ΌόWώΖ›―Ύϊξ[Ξ‹α•O}θωgΏρΫ_ϋώ|ϋwΎσΏώυ7ίόΡG?σӟώ[ιG~ε/½{^xώ…^ύώ»ίώςΌύ[Ώρυ?τ‡“Ÿψ/άϋ'ςλτΖo<φμ3/κω—^zΖ;o}₯oΏψ/~όϊ{―~ϋΧγ…OΎςα_ω™OGυ‹κ+ίζξ3Οψ Ÿvχρ―ΎφϊΟΎΰΪ¬ŠΪ\Ξ}ΌD›α‰8θl ρuξ™)is±΄,΅ΖΊvΞΙ`¨Ή= ΨR9θ^mLκΑp•f9ά]έ*mνLfS©š]s—£“5μΙa©Εζ %fξΚ΄΄υxo‡X΅λ±Σ–9•]WtχŠsLq'#κ°νζ4ebIΫc;Ž΅μpοTΪΘx\Ξ=Chαƒvxΐξ–ΆxΨ.«=ΪƒNfhԘ²q7œu:Ξ:ξ=Ξ•™ΙvΞ6m=ΜL5χv[ΗQ{œ¦&Τٚ΅ΪV3mΠμΆΓ‰Ξζ^³#gH{X»ιΊ,gΥΙέξŠ °S«λξ^χ¬=nG%Jά«#³hS³§§e{:l=rφx₯™8ΪH”eΫmΝ9gΧ°΄΄Μ=—“2ά©3Υ!llVǁ-΄k:O fΠΆn灻ι±:ΪμŽaΖTΥlΆ­•s.v£tσ€%ΆΩlκ¦­»1έY΅λV;¬Si―θ {Η§£m7uΓ9¦]`ΦX9νn*q6Ϊ–»΄6P«ΦΒ#ΪΉχ¦-ΪfΚ8νΡR]:ΊL;³ζ‘g1 η“ν΄ ΝsOgjΰΈns<ΘξŒΓyxάΞ9Φ΅°šέμ!ΞΩ΅k&GZk«ccγzΜY§ΓlΓQμdŽ”΄φΗ³va§:šr―R3»GκΪQš‘ΪrZΆΝh§‡lμΆέ"³qtΰ ‡ΰ0­{[sΞΩ5@ΪΩΪ1k—R̝:\I›ε8Č6»bcv²νΦ‘™ΗΪιΰ^a»·N5›mš:9ηBgRs4bΆλρκά΄u7ΆHέ™Xc+§κhξμ’Nw=^(f,m§£m «9Ǵˁ6[{Ψ™ξD9\ΠΉ ΅ŠΛc{Bξ½ œmΫNgξi–ZnλhL³f<Ϊ™S•`ς°™Ι&§ ΧμΤφΣρθ69Υ–;gžψύ_ϋΚόޏΚ_όΤΟ|αΕ?ύ^xηΝχ~τƒ7ώωρϋΟWŸώ•ΏπΚ_ψε'vίyσ{οοώφ›ωσφ?φ™OΏτς'?xσυwΎωΗίϊ;kΪgΛΏωΚΟ~€ϋξkΏφχ~ϋυo|φΟώΉOύΜžΙ/>χχΎχγwΎσϋίψςo|λ_όΦχΏω½kήyχ­ίϊ_ώΫ_ά―όΉOώμ?όɟ~ω³Η}όΰνούWίϊGτΫζή|ύƒπήόΧπƒ?φζε/ύΒ3Ο=έγίύ£ωυοΧΏωΏωƒo½±°ν΅ίκτwήόέ_ώ̟ωΕ—~βS/όΜ'?βƒϋγ7ίύξ½ϊυ·γ·Ύϋϋ_˜}η·ώπ|Μ»}ϊ‹ŸύΠ'>ϋπΪχίψΝρν―ύΰƒχέ7_ς―ύξΏυΪ/κgρη_ότOΌτω‡½Αϋo½φΦοώΏίψνω­φ?|σΗaου·ΎφΏ½χޝW~ωη_όŸώΜgί~Όρ_ώύ_Wo?ϋβ'ύ/Όπq¨a6œΣιάa³QΩ2˜5졁fλL'Ωf³έΕξjƒΒν6ά]‘ έ-%₯Ϋϊ`‰΄V·LΥξc΅Bv‡Ξ6S™†›ιΨΒ„s¬ΪΜ ­s6ΫΆ0HΊvdNΖΆ€G7M0’έ:3‘ΔfS‘΄Ί»Η ΫlΧNη mv—V΅, §ΜΆsΪlƒ4Η‘»a¦Μζt‡ΩΝ½Ž»ΣᚁH»›ξ]w©΅Γ›Nΐ†»P ηΆV5»i΄“-γΦ‘Ρξ6šlΰΦCΝ6TΪ¨μRNι΄HΒέtμ’³a7)Ψ†Δ½·ͺ ŽΨΆ“ΨΦv•€lΘl Ά™p&ΆY¬ΙΆ΅έeΨ€ΫژΩTΠξ–ˆ:ŽYοΟα₯šE@έ­NΫcΠ’έa9Υ@³Υ1ΩΨΦ”i`­P@Ψ’ζ³7U³)jt·#]CΓd$›€œš1Θqξn2μntέSU6Μ·ͺυ`3kNΗΆ­ΒΜΘΆTmؐ;›γ.*Ϊ²‹«βΒ2%m6,αdw]Y™ŒjΆι0v·3ŠΆv4ΕμvΟ\΅ΪŒ*£έΝ x°Ω΄¨6³]Um$ΝΖIΗ)C Ί»Κb7»χˆ`swΓέMΫͺΪΆRΗfΣξU‚`ΒΆˆΩdΞ–Ά‘ ’#Ϋ΄m #lb€Ω6ΨLμ:禦‰G±C³ΞέΝakSλdwη³f«°Εb6θ¬6Έ­P lw•Vl«Ω!έ0bη΄-1ξquFVm““ΕL9ΞΆh[v·-]Š€νΪγqΘhχ”Ω”²ΉχvlK΄˜αμ.p ΙΓδrξŒ[‘ΨΕΘtr„ΖΩΓ\ΤΆΓ¦M”ΫŽF ΅toχΜU+e·Me„;@η‰;¦K±{W‡aͺΩζ$’h₯΅έ©aθ<ΨΝvE±Η{Γv‘ΥέtΪ΅‘’wΞξD²›ΜΜΆp6bXGΆΙl“vWlbh6Ά«‚Μ'­N‹;wR`t©Yιρƒ†jάy05‹M~νkθΏυΏ θαΓΟ=χΚΟΏψbμ>ήw~όώ›―ψϋ?xχ­·/8ΟτΉ—_~φ£/>υτχƒϋξί}ν{o}χ­‡—?ρό'>~ήzνΝ―ώαΫοAωΜΗ>‰=}ίύφw^γοΌ?κ™ΟΜG_ϊΘΓ;?|γΥ?~σ‡oπδι—_~ξ₯?σό³OΞήορΝ7όέWί~σ<χ“Ÿ}ξΕg>ψξwήψΖ«ο ΰ'~v01ςŽžίϋΩ3ώ9Žƒ”&H­Δ!‚ήwΒΆνuT•Ί*lX°©z¨‹.*BΗφ؞xfΎχι9@Ÿ|ςΑgŸψι'οΏ|dΟ·οΎξ_ώώυ·―ή½y7€^~όαΟ?ΰ§?}ρΑΛσ8vο›ΧoΏύΓΏύύ?ώx/ΰΌx|ψς“O^ξλ_σoΏ{ύέΐρβώ³?ωιγΝ7―~ύ›οΎz5€>ύΕgΏϊβƒχŸ―ϋ»WΏώέ[η|τυ·σgΏψβ»όχύΣWo>ϋΰ'?ήΛσνσ»o_ωείΎzχφθ³_~ώ˟Ώ|Όύα7Ώ}υo_ΎΠϋοϊι‡Ÿ}φΑO>zΌx΄½}σξυoΎωΓλ?|ϋζo@O>ψOΏψ𓏧ύψϊΝ7_}›ρΝΫ —/_|ϊΕGŸμΕ‡/³ϋΌo~|χύ«?~ύυλ―Ύy€σΑ‡/φ'ύόg/>xqάϋζoΎώκ‡/Ώ~ΎχβΓπŸ>~ωξϋσ/―~ψρ^ΐίύυίύκ§Ώ (Ά-»P‰9vIΔm- wael˜Έ§l²M΅l"&i£Έ†jάy˜†ž«£i΄©ά«°Š4k³‘r6™MΦΘjSl°΅dq²2˜l©ξ(ΨέΦylKŒ&ž{& :‰έI‰ l–0Lgv-Ά f«“Mƒ›ε¨«CsYCJ»w©Μ5:Ν ΫEB!bv1έyΤuOU΅=+ ΰͺQΡ°;K†˜˜mΝ:〘£Κ½UIέϋLuXsηZ’bCmκ°Ήζt²+ΨfDΫ(Γ²9lI'»6ܝuOl—₯ΩΦ©lh‰0Šm›JΜa€λRδޝ3¬Œaχtˆ™!ΫTΛ&ΚF΅ΨHςœc!#Ά‚έ%Ε•­d“Ε`‰1›ΡΓΉ“Ωd¬6ΕΦ₯³¦0Ωθ΄»AΑΆέ:©0άέ¨ΝtκawC‘μͺ4›Q`γ¬]KΡvaΆœΪ˜ΈY­’]:Ξ6&΄νκΡΘ΄έΩqLQ€»χξ“S›ΡΠΞcwNaΐ΄9χ: r˜ΪNέ=G•ΜL‚¦YΖΥΜm»ieγΪbŒ$]lκΨΜX  lά<² cnδm`€bwJbΖάΦκh›ζFR³g6‹„{oE™`‰Ν“nΫvR64³ΛA(Ψ†± ;’έ˜ƒΪ6ͺv'A£cχ6+ ΨΠ`!Akfœ5Π&+…²1eŒ©$dζF£{' …l†Ή9u03-¬!aA Γ4£(342f€Ξl{Vέd©ΈwΛΠΆέηγρΨ ‚aΪtW[‘Γ4L¬m·ΞΡΨItΙeƒŠ»ν&’Ϊ¦kΐΥ@l—u6ΧXCC` cwξqbИeTΡl[βB%w“#enCκ0ιn0P5 ИEΒξU”A† ”0”³Ά­œlγ°mT° bd7hͺ £Œ­Π¬Ϊf«w·Ξ{ŒΪ&ΗΆ*,h„-Š-&-V0Β([˜lw›Ž!CΕ` BŒΐš™’Θ6b,%Ω4Ž U‡&[ΫΔΆF2l6₯€MFG\R H4l „ΐ†Ρ”UB±]l1’M²˜Ω\Πi‹‰4f™ J6TŒΒ6Έξv“blŽΑfΤ!bΆ(!ƒmΠA(rd›6¬ΣΖ†°l θdƒpΪ„Ζ™³ ΨΆ‘Ά*‡ …4baqν` Β3Ί1Α(‚Α„ΐ&DH#Πauο΄„Μ†L)²•bˆAl»WΗΐBeΡ–qЌΐ 5ecš1"(Κ΅[qL&›]’2l6%„ Kal:fΝ`›LΆΣVœQšm³HP[ΜΒZΔ\›’kΙb ΠL$ΨBΙ@Δ$Άνj΄m¬›Y–ΫT" !3 SEΫΆ-+›Β2k “‘fl™s¬Π@€ΫΝ1TΚ¬…΄@XΩ֍‰ΡX΅1bΔ`ky 2HH™£±mI9l–Ω2#Θ3˜ls―²f…ΤΖΖΤي²•ΝΚ@i†‰’ΑRd+› „²ΝF€"˜©έ’ θtmc³Dh16ŒΆΥDAbΫμ$°“c4w¦θnΙΠΊ„0 Q²”3:f»kiΫeyh3jmΖFQb€Jژ5‰tBΫ“)4΅)i5Nfl(LΐlH¬ΘH–4CXΩ†΄Ašctsζ`“››“€Λd$‚1c.θ8³& Ϊ2X Ν­˜ΝΠ€%‘Μ6¦‹@Ω„fδςˆKm FΒΪ΅t’­8)P#Y„Pƒ‘CͺˆB€Tš΄¬™­±mیAj³(4fC Ν΄ReƒVΘp ΣΖΖFͺΖtea ͺ`¬FS€-ΐΆ­Ψ6˜1€ΐΐlΫ6 "@l "’Ϊ@ JΓΖfS[J0†Ι €U4ΡI±*ΐNK±vl˜₯ΚΨ lj‘ lDΫ†”š]„4ΆA`£Τ`°BBcΐPα` ΒΆ»₯`A—M„ΤIΆ`6³ *lh[$e…&R©YΔ’Ωά6›;3†Um7Ζf£Π f(Μ‘›V0 h0 5 ΔHγ2аͺc5*KΡ@Ψ6Ψ ΖΝ€lp€Ν†ΩlΫ@ΠlM$FdUb6ΓΨlŒ΅hƒΦ Ζ¦:gV ±–΄RΈc+Ι"F΄mSŠ΄1‰1 °5 U“ %…PSj`ΤΑ ccΒΆ2Œ u’Νl6—ΖP±‘m0αHTj†ΐk6³ΝŒaκlΛΖ0jκ†Ν•  ±-€LRb΅’@ͺ–±Ί„£¨Ζ³°0`HΐŒlFΐ.0ΆΨΜ΄ ΄ۊ¬"³Ν&[²B `Γ A 06ιœYŠ€-Τ¬V»&ml m3 €ν$Ρ`P›»‘“ΤlEaΣ€¦1Δ±…(šΘ4κΔfΒΨ "fΖ@ UeΩ†l6ƒ΅Ω`ΓΩ8(ΐNΤb ΅C3›ΝfΫ\³U˜ΕΖ¬&ΠΖ8M(›¨…²h…ή›F‰š`Γfl φφΝού{{ύκϋη0c-ΠΘF*ΛΆ6ΩΞ(]f'͘5TŠ]Œι°f,5Χ!ΥΨΦl;΅mk”hρ΄°mc“]@NηΆ]=ŽΕf•χœuwΗξ¦ε€12.%›j•=7@€Qub!˜K¨$•™*.#k–2Χ‰ Ϋ±YUmDΆ¦*Ά2lΫΪΠ΄v¬%ΫΆ•vDiQmΆ«λΆs†ΉS†2ŽaΫ8ZΛΨΔIg£›Ξ »›TA³032¦¦Ω’9A¦³§“ #Ž„ξ}Z³v«Ν˜%³£²±Ωεc§Α°©\˜™k'³ΆLΥ±ΨZΆ\ΪH0CƒtɌ:μΨ΄i[S k₯ ¨j¦„M!Ϋ­cfhC w+³Κ6VMiss·ZηΠΜ†Q§f:‘™UZm›k¬θ™a زꞸ›S£8'ΓAf@IΤ±;U6»Κš₯¬Α£€mΗf•X£Ϊšι ΫT†mΓ„Ψ΄’ςΨv·bG–’UζnΊΝ€³ΝΘPŒ†νΞIK.‹9œ³Ε’²­3Οm7Qa:ξΦ5ΐΪΆΚΕͺ*ιμ a³8lΫ½u4;Α`6k§\KaΆ±ξ½ηΡR΄"w2K+œΣΩ5Μ$lΉͺŽΕ0Í-Q°‘YιΒF-£]Ω’©t­ f¨Œ–βZe Τ\Ÿ 8F’mΛͺ+˜cG8U l<ϊͺP !θD™’σοžΚ$¦)V½½™ Z‚‰›mο=8Υ°gvršΩΙYσ*ŸΫήf^•0,πWlœA {‹GN3ͺΊΨ!3w«™6f7K΅ΖϊŽjΏm:bmͺ­Ϋκ ³ Όu7«5ΫuuξιM±Hœ¬2³yθε»™G†[ΩیHΛbŽŽΆIέν½Ž7ΕΣg³ΝHΖ[α©ήb)1¦Άš’=Γ'ΑζχϋλE6V½wš2XΕή°φVsΑ锇ٔLίu{˜ΛΆΞSΚa3ΆΌ-‘`–Y1=ΪLΜΪhš’T3f† "ρ°0ƒb΅^YV½SΫΤ(‘)l،9ΥdΟψϊσΫΎΦRέUo―l’TφήύŸϊ?ϋΏΑ6`pvΌe‹$Υ=ΌΊT›αδχφ%©¨μ½§wwΙ]ΪοΥΆBDf½d‘ccΝΫ5+aωΊξν9{¨ή~ί΅±Ν6ηΦo-‡Δnfuo4Η΄§›F%ήλ† ―ΎύRZΆΡΧή2E›εmΗR mlǘ΄π80 °Ϋ^U0fbHpΪl;{;=₯Γw½m[‹²ΌΦxΣTΛΫ’FϋVSŒΫΥϋ0δžς*cπή»Ά1՝—Ώς§eVΓ^]†Jo΅­‘YΏ΄kcxίφ΅–κz{e“dm~Wλ»7όI-Άνy{reΫ4€m@om ΪF[»±=:˜Ν4₯ϊσρΌGλ4΅— ϋmγμΣw0μΖ6tΝTλήΉ™Υd³·ύυφ!wΩΆ΅>Ÿί+cCΕΦr6›5[λ«υ(AΜφs·X2/©[[k†fg\t6χήή―ŽfφΘΊMΥμ7ΖVήvŠ‘M¬yP›RΆ©νTΆ7Sξ1x—ε )aόφήvέ£ν6uwΜlΔZj©νε)₯Άύφ[˜-œάκ{―’Ν~›ŸwϋσΥ±Α*ο½v¨ΤρšΆ=$Χφή£©ƒ:†7CŽ–=]έκνEΣϊ"}½φΝle mj«Μ0j3Λ6―Tf‘φK€¬™]·ΥΜ&ŒPmoΣZ‘€|5ΪΦRL«–Θ`™N6©Νζ`h[TΌ½¦0λ™ΨF!ƒ₯6Zͺ5†Q…mΊYňΥlΛκ`C©a±ύφ»ϋΣ¨€1―ΐ€ΔήTιΆIi[Œ[/r―΅A‰m«ƒw+ν03ΒS›»6`EΩX¬6¬ͺ©MŒmΚ ΄₯ς¨gIc ΔήΊΉ`S˜΄DX,Zά`‹E€Ϊ›ιͺlλ½]SΚΨP7˞*Λ–ΐbš"Ι¬KS½ Άyre­-#eΝΆj« Οh–Ϋ¦oΥΫ[£&W–ΐŒΘˊQ'οΉaΐπΐΫkΆ²υ6ΨΛΑƒΒ6u/¬L- £ 3μfέ,] V1{+Ά"ZπΫλB1“Lα ις–64Ϋ 0Φn₯aƒR0`4žχ­PšΨžΪT1fEΩԘU…Ω™ Ζ6±D« ˜©g‰16ފ€ά€‘Q2aΒdςR*Ϋ6Ή2Ϊσel£šβ]Ω²QdAo–Γ”7ˆmΣ»Ύx°Μ–ΔšQhΣσ6‚`oN5o­³'υUBmk[έZ2Ϊ’e’ ]ω=‘ΓfΔΥΆΝ*O͚aΨf–lŒ΅μ^Μ6³›Υ,ͺ S1ΫX*ΆE‘ oOΥ5*Š*˜)Γ›rlb1΄f›m†V[Lš™•VΫ»™”fŒ˜­ΧΎYlΔφ ¨μ5ό)«YΟIΆGόνΟίώρ―ψηϋ'\χοφοϊ˜EZeσ6uΨ–ζy9gΓσ0qΥΜ¬ ’%l ­ΜHc˜^ίΩ@*šΝVa«.³±2Μ¦¨&[Ήl5$jŒ°Νή ©ko›M³­Ν¬1•f³¬ςRŒΆΔjs3lAΚ@ΝΌ@ΜV@–%Ν@a³™PUh@fc-HΫρΦΘΆu΄ŒQKc#ΙfŠ` 6]C‹A5[Ωμι„šΩ‚©y4™Τ#½ (Ϋb3S•aΫL ΜXe΄f¬Α 3[cΧ‡XJkΫσrΪλm&u%˜‘™­¦P΅23’ΑzΧm#‚!ېdk{»‚±―°mΩbΐιQΡΜ–Ρe 6„RΣ l³ν­[ΥΨΆQΕΆ-T¬ «l•΄m(-ͺ`!6,1L†l ¨%ΙΖΒΘk–A…f“PΖN0…y’BεΙΦc’ΦΒΉΡ6ƒ [Χ„1ΩD1hDΙ H–lcFLφ”$l΄e+ΫΪ$Fυc”MΆ4£ΚΨf fΈR π* a†™‰¬!‘νNjΓ²Ν[%g­·Α”‚¨ΐΛ ##6³X³M™XOΘ&†@Ζ6„ΪΪ&yϊ0Φ&›; E3[†D`&”ΔdΆ··.ucΫ&*Ε\,LνΔ΄+βE™M†Ι€‘Ψ4‰l<Ϊ[Ωm •4€‡ΪΚxWd)ΆΩŒͺTΗ Γrψά`ΫPiyS 1g•ec-%3D2j“·ΚXΩƒJΨh ™±ImU«Aλ1Ϊ6cš%el£ 3\)›W ΒXlhJCJ{ήΝ–*3›•4lΫ΅ή6¬rsΫΫΌSΕ@F‘­!΅Ω”‰yN²½40ά6AηDŠΆ,‚,όύ_ώΐŸϋσŸλ?φύ 3Tf€@° "lΓL2hdΫήpJ’,² B6Α[Κl, Ζ€€€€ˆ Q-‘Δ&°Ά ₯&H@T"ΨΨά6ΐΖ€m¦YD‘±‘3•0mCd›Z"ƒ΅K0£ T˜°·Lτ ‚·• Τ² £γzΎY3[‰‘³6ΓΒ‚<·:‘N1=hEaH‘{7ΐ`³a³mΓƒaΫ$„m`" c@4az†D  ΚΒΌm;B³ΖΘ³Μ  ΖΖ$ 0ƒ4fΠh€Ιl,kS˜R’Ε’4 (03˜Œ ™ ’06Σ@J€ΐΪΒ6ΖH$DΘ@l€΅…Aj  T 6F ΪΖd3Ν$€@`Me½RLӘ1dA΄+Ε `X“… "0› Γlc2Π Ά-cΫΐPl36pΐxŒ,2QƘ0ΫJa 0F­m€ `ΜΖ@@ΓΆ°"ΜfCC HČΝLε& † `ΙlL`‚iΙ0€@[0 0RJl› @["8H‰ΘJ… ˆll’lΆ…LΆVlMΥξmƒl³Yd(d746Σ*  ›7¬0ˆΦ†aΫ6…06lFΥΰU2 XΜ°€1fT€l &³- Σ4ΥΦ³ŠŒΝ°€0¦Α²±56ΐHa6[Σa‰`ŒΤΆν)'(Ϊ²€mψF³H[F€~π½όψ‡?ώΕώβ7ο7@υϋΏσϋ?ύξ§ΏχΝχž–V‘-&Άai&μY+[¨ ΐŠ6PϊX&–YΐΪaž ΅VΆ!lsˆXšΆ-ΊfΌΚ¨νΩΚ’3H*f.`Ζ¦&Γ°ŒXφΆ‘0ΞA€-LΕ#ζ=έ“JΕdήΊ` Ω93«…ζ B-fS³Q%ӈe›" ½G’XΆ}2˜΄mΫ%-Ά²GΦi€6Z™4«`Ϋ,@ l,₯™P…-΄-‹ Xš²y{κˆ‹h6‹Κ˜ Jέ@šmlI)`e>Ωfo²j@ΩΫHRΆ½JΘθ΄g!€qΝ3„%+ΫFx›‹"H6k£(2o£m³e‹²Pˆ c;„›šFllQb Μ6ΖΉΐΪΨ@ 0/ΡSTbλ­2ΝRΖfs  ‰IR‹Μ6e¨ZΝf€σI¬m.@Θ$ΫΫJۊe–b] `ΒέήK€ u l‘m–‚^kUΚLmŽ% V0oΙEΦΫ>m‚f³L,ֈR·j†m•ΐΚ\Άm«6A6‚`[±MΣI/Ϋ3T `1ˆ%†so•jkm0Bͺ6Ϋ ΩšΩ²AΚ  0\x°©˜±Ωφ™‚Ω++ φF΄^’€΅Ν 2^₯δΨΪ  ›E&ήΦ ΐΔ@Υΐ†Ω&fHU5‹e6MƒlφV©ΦΆ]d“²mΒbd0ΜR,noΐ`³–H4@΅f"hΝRΕΪ4υΈA(«Ρl–«Ϊk ͺ1/K.[ ۊ ―ΜeΓμq«6ΐŠΫ’bΨ¬&iΛ¨Πb‚ΫoCSͺJΦζOΎχƒΏωΡί~χGί]|ϋωφ/ώψ/~ϊέί}ϋΝχΐ‹ΒlΫ8±E@…AUU°66JAλ΄eŒiͺ–‰!JU%cΜpSͺJΦΨ)›γΑ^ΫFΖμEQ-cΫ"‘J₯ Xcƒi(™­©Zm£ZQ"³Φ6M)ujmm{P­Y֞ήlΛ 6›mM4V Y ©εٌ™­­VδΖ‚!kΛ Ί2fΫ³mtR΄m†ˆyoo©c΅lEέΤbc³2Γ4Vo §(™eW'&]ΝΆΝ;ΚUU,³m^½,«°h«IΖ›M2³Bcsτ†Ω0–d€TJΆΆΤf3Φ΄‘DΚΪ<VU )C«+¦4ΖHŸ«l3Ζ²φ”½(Ϋ6ŒΩ²$dΨφ@Φ‘AU%˜ΝcSE)QΟ–i’jLΫHJ—’Mko¬RκͺeΩRΥ "γ­χ0£Ζf›(=Ρ€VΤΤ"Αl¦™-ΔP+r˜1Y΄΅”bc{ΫΆ”¬hΩΫ[J`¬–­ i f6q€i5¬ž6-‹Zfq:Aσ)l›‡UwUΖΌ½z΅ ³•‰Όy“Μ6”i[걍ِ„AFͺJA¦lm­°k»½Q)ͺΒXXͺ“•@Τ4ΝX@₯Xˆ&`€Οu566kk1Ϋ€±ˆ`+"ΆmΓI[P˜ͺŠ [›m*H­€zΆ6M¨–i›…RUecΌ M©*Y›ΝRΥ€Ο?ώΓ?ΐ—Ν¦ ‹άοήο~χΎΟρΟΏέoϋσ?ϊσΏ«Ώξtέu:…*η2dvrlCUfΤσΆta[ΩΊŠX•†Ρ +ΨΉ·W ³§κ:0―ϋ¦½ΩP•¬ς,-O$£… ‘ŠΆ9V–Mv¦ΪΆ°,SΤΤεŠΦΨΫ†•άFa₯Ζ:ήξLΥUWuν(ΪRΪέ±΄Ωμ©μΪJ{ΌvgβΚΜTŸΫžU †β‰©.<«Zkͺφž;rUuξά¬,ν½ξ΅6›(Ίk¬ΠάΗ}gΤ²Κ•B-¨gŸκ:ž§\5Bή(ρ†NΈ6Ϊδ*%eΫ6(³›!ŠΆ-VU­SκΊναΊ:Εκrkrf'±ΝTΥ<κΩfB{λl]²²©4Ϊk%^Ξ&‘fS]ͺf}uί4σU²Κ3R«ις° J΄­ ΞmΧ€Ωl–e ΥUZgΐΫ6–ε6 Σ ―c+UέΥ•Ίφ΅—ξ"˜m›Ρ:v­±T³·έ™«ˆb[υΉQ‘‘xbcθŠgͺ% ³ͺmβ.Tw.ΦbΥή«»Ί„7MQ…UεΉΣ]Τ2WŠJF¦z¦ακͺΖέa&J2ΥΎ–ΒΖΦης\ΡHmU΅={M[ecldIΫVV₯Φ)U·mV]§cU™œY*°½Τ5CΛl‹Ϊ[Q©Š•MΡ€M―WbΆb©˜mθtέz½Ί˜*¬2f΄…΅Μ•"hdͺ™ΰr]7»O˜Ν”“©Ό*συ|ξ<ͺΝΘU5σžMh΅UΆΖFcΥκ¬S*₯ν₯»ΣAUuΚL2²JΌ½S—ΐ2o;jΫ]ΕΚV!†M―WB`ΫΕRιtέΜ*.›M–ΰyΉ—§ϊόόη?€ ³5‚žίι›?ύƒώΩώθ?ώηίύ›_Ώ=ΐ·ί|ϋ“?ύΙΟώςg?ωα_>uΧΨ:³mͺiΥ«, $Ε³BΆνννΊφΊ¦‘Tž•4’oDUς\ΩΦ€§Π°Ό]”;ΙΊ†Ϋ”°mwŸήήΖͺvyoΌΦU—μΥ1TJm™»LO¦©Άͺ")Ο.IakλΛ“ ε"¨€(bZςjQΡNΊ7•Ή»ξτΆAΰ©G 34YM¦q₯t₯c›» U†˜’*«ΩΫ]i½½‡έ§ͺ6U' •-V…kλsHFx΅«HΥΆ½GΊ„‘L[ΑΉΌ‘*H£mž$a_~[§Z†EkYφVu₯=ΫβBRΗ΅υΆ^wξ¦·-ΡYΫ©iΥ*iZŠ!ΥΨήή»jœΥ&©<+iΕnΔVˆΠ¦J›lΌjšF,[(—ΜξNΔpιν­.m›ε8μ²Ν^\wWη½ HΥjΛTά£IWVUiάυΆrbZlm=Jŝ•HUΝ΄d™$zWcstmξκ’Όΐ“΅j˜‘Ɋl='΅ΫΦ]κR1%Reνν΄Ά}yΫ]χ)U ΄ns¦¨Y[ ™ V«RκnΫήΧDU ‘L„,‘²™«yΉΡΖxκ2{ΎDUΙΤk·,›Άλφl‹(ͺβΪzγuέ}ΤΫ–$ςl[ͺI™$μ4 1€šΆmK7Ie°ΒΝ>#³LΩMν©Z§·' Ω±Lv΅;Χ¬ "±ΩUΫVw2o/q΄ŠmΛnwWΗ‹ t΅ΪB*ζ^zRUYuέζκ΅Έ°΅ckkΫ‹αN0RR*¦₯Ζ΄”ήQGΖ›£ksUuρ΅G`²0¬k˜šΌςΦH•NΥb£ξΒ΅P€ΚΪ[TΩΌ½―έ§» ¨:ΝkέY…jf«*I&ˆ±r]Φ}6Ϋ³ιͺ@²ΥΚK€Ζήξξy Vήeή&W`6l5Ϊmͺ‡DF[mfOΓƒΞ,7ΐ<›>…l¨­L[Œ,WΆ °a}>·½fL˜gξn[`…€Ϋ(³­;{›R†έ½οέΕF²­yQ‡χΖ«KŒ4_kκd[!…6š/qΔΖΡυήc*7c4šΠΆU[΅™₯b{γάFc&^Δ+΄Β6Π &Υfͺ20o»Ά“mŒΫo53*³κg{›\w“YπqΟμ›{Μβjc$†…υ¬ϋdΨ6£VM6$ £1δΆ`˜»03TΜlΊ˜½ΑGLφΈΠf[W0Γ6Φ΅w·=Υ%£­ ΩFσ䄍§Oσ fφ]Ÿ+²qΆc΅ƒς^]Ω†Α†ul4³UΔb΅ά`›jήVa[)cλSΫw³λή›0k¦έͺϋξΩ«Kk·Φ|΅Υ©m¨ ڈ7γ86’`οΉrƒa˜CΆWmek†λΪ{³\Μ4O,‹iIƒmΝL*l¨bš·iD΅QΕlXΧΜ¨Μ6«{DΨΫΈ“ήΆ4fg|Ϋα™ω\ 0™υ¨ ΆΝγθŒΐ°FcH£š™Γ”jf¨fΝ¦ήφψ¬šμQf[U˜aΨλμ]ƒQ£Κ£-d κνьUΗ—ΎΊΦΓΎλͺ ν­>Ϋ2™ ƒ²—” oXWmΝƒf,)[F+Ly{uσŒΒΆ\°Uε½χζϊά½7 k·κήή69‡²΅3ζ΅We•a3J°yΗFμ=©›£Ν!ΫVmWmo–67O,‹±’‘m€΅Se3έecΆ‘Lέ<U°‡uMΧ£€1c£4ΜLΊ>όΗ?ϊ‡όΛ/ω―Ÿύ?ψί|―5{–x¨«ήΖZeΓ\h΄7 ’αU° I{{·ΟX ‚T6¨ΠσX’‘™ΑˆΐΤ`-¬I2Μl-5Σ¨…ΑR<Λκ6 ( ›ΕSΝ€«m ΑŠΩ«,ˆY3YF€m$ΑΆMκΜ( FaΓ 5³) Ϋ««l΅miU6&#,‘Ο¦kHLŸξ—τ—?ώΛΟ_ώφΛo·mZa 6£ΜLΘ[ ΐZUƒ©‰:ΦΆF)ΠΆ€VΆboŠfZΛLš°1¨ &“yv ΚΆ•JƒΔ‹UΓkV£M-@΅½NNΑ`Ϋ³ΪΝ$Α†TtΟΓTΩhf©Ν(ΪHfMŽΤ6³žŽMĘΑ(™±Μκ&ž…Υ©ΒΨͺΨF³˜jFΪΜ ΐ€Ξ Ϋ`€ 2lc*m³16 ŠΘ†A·νL B2φ. ³m3WΣ½ωd[Y-–Υ-ΜbσΆo>Œ*ε-±ΑhΨ6­β ΗΦLi&ΥΎC£:›«Ωf*ΕBLLYΗΒήL ©hΖ¨΅†xrΑ$m½½[›ΪφκS& Ε³ΆΥ”g£:Ύ’„,m»μν™vΛΌ|6š Γ’€eΖh&m3ΉΝ¬#(fΑ…lΩ{m!3ΐš1+a3Άι°ΑUef[IΆ' Ά€`}Lš’φφ,;)Šm@eΫΆ­M¬°-U53ƒεfY΅•šY[ΝfΖi™zsΦ Ρhκ ĚgT€Šl Ζ`I΅΅m{uςCΝ’1Πΐ<£₯P‚mΆ–mB,֝·Ν@–N3CKΩ4]mX3ΊΫͺŠ tυl–HΒC`€ι%έ h6…°ΝΛΆΛ›(²±›]k`2c­)ΜΞ•k–Zp ƒΫKΤ6dVΕΆMκΦX”QΒ³€οΫκ²1²Vζ‘%ΨΘV­Ψ(Ω¦°±%3b,Σφ­LΔpΌΝ6 t­yΪ¦ΆΩΉ…‡Dޘ‹ζ™0ΫΊφ6ŸωT JμY2Œrαl •mdŠB[덁³­ Πη3x3’˜ΞΆΑΘθμeΠR6V ΆfΣέ–fYΩ€#TC«MΡCΐ &±n6€Lν=2Ϊ0.λ΅Y£Ψ0ΐf–ι˜!±Ά!{$AœYJΠ*nΖPυ¬aVΑlcwΆ cRΜ’B„φΖγc”ΖΓΦΊˆ5›Ή&¦ZIΆLa[2›£+6Γ—™ΛΫ°£rΫΣ&m³DŒ%e³)™Ω’΅½j£­τεζͺQΆMΑFΉp€Y–²"[kf± LcΆ5IθSυ0Εl›‹!Αήh-QΓ-eQlkφζΣFˆbHΖΘ¨X(V#Κ6šΩΙX†‰ΡΝ&*Τή±²ΕX[kcΓΐlfYl$΄χ(fP¬a’ZeX2Σ6-―‹“[˜yΒ­ vf‘M0πήθ’χd§]ιYΝ›eiV-6“šQ"#υΆ¦B]3¨Ω8±Ν:0orXXco―έ΅Ξ°Yr„mc&'ΫΔ–bšTmŒ‘§6!Ψcl΄ΖδΊ{›1‚¬ŠΕΦ¬*.[ [Λx•e+‹²mΜΌy:m>κa΅-·ml€¬2H0ήθ1Ίzΐ°­ςΖ£*°νM€xέ<Ϊ%Qούσ.63XZσιΊed›6q¨μ©IŒ7Χxk)εή΄‰ΉNB¦UoܚΙ0<ς­ fΩWm?ΦΞ°•“_Mrkν=‡–τz— ”ΝοAΙΓSφ•šXΝΆ—₯΅jΝΨ¨QDθ·έT‘³έΝFSs`A›’½§uW6΄­—γ°m³³φd&³VΛ5šD±ΩΊzd lΛΨ\Νif£jΩΩΚj±˜ήvuq³¬Ϊ΄j+Ν"³m½EΣmՐ­Άε6š{χ°Ž½ΗQ*Γ`S6›4{CŠW3Ϋ–r4Ώ½΅υ΅š 0ll›yͺͺΩh°£μ¨lX¦·wšΊέyΖΆˆušΉn­rΫZυ@f O<κ†#ήΨWρl¦[œό:ά΄άZΫc΄š1½ήM6‘ρζ`³­dΌ·Λ„ƒv{ο—/ΝP†χ83βVύ6ΫqέΆ­Λ†©ςή>-¦΄ΩF­kXΫZ"žΧξ<ΔΦΞCQφLiBΫ’Ϋ<‚xοξfŒ1KΆl±ΦΫ.B,f½ D&Ϋ₯Ϊ6ΘyουΣ‰Ξ@κ­ΫŒ­6cλ¨ΦΥφΜΚ 2 `πΦ­T(›m{ξŠ1lΦΫNTΝΫ^―Ϊ}­&aΨcKƒηMGΧ`–-]*fV릲]ί 0Y6§™ΊLCέΆV6ƒ™9Čτ­uy?v€zgΒκΥΡΛ-mγ)ΪνΩυήΜφ€Zή[Λ‘tkη½_δΨ5σQΟrͺ·¦°λŒ…RφVΜ[9™μΝνΙΪ֐#μYœY2slvΥnhEΩ35΄ΥΆ¦›e γ²χΊuχ{3%Φz ©Έl1k,±jο2όισVΖ,σώι―ΪΪ¬]“U³a‚‘—;Cژχ^+·=›4—Ύ0£9αΗ½M€ή]=ƒfmλμ©·pφΜZΓ@gK.Λ0κϊ»ΕώYΛΪvΧZPσe7™Π0,΅ΪΖ¨bΨTmC™ηΎno Ω]IΌ ՚΅½i[tΑ πϊkfFrϋΫ™²ΏYν=[/15ΞΘ1(ΩvOm!#ΕKπ<ΧΆyIξ–Ό„ia!ουχmΫΖXum«2ΫP•υf­sΛo+倘‰jΫ$6+±υ­·ͺ;εMیafuΫrΟ2yτγ—άΆmΞΊό†ΙΤο½ΟfΫl[–:›)o»γύγκΆφ6»;Χ¦Ω&yιΔΖΜK­f{Z#]2Ɯπ˜5Σ[ ΛΦ³š΅­8ΆlΟ³V₯™1ξl¨Αb[]_ΣόlsΘΪViy­­―νζΪΪ–₯V6†ϊfIš]ζΉ+JφΦγ„Ά·νΊ5kf˜Ρm덐Χw-bfΟN>₯7³oΆycΙtΟ‚μΤ½­Κ˜nsFJ‹«žΗυ6³G-ΓPZ›ΌΧw“Ω›vumΓ‘·Ν]em½Φ-υΫJ‰ ΜΆ(cΔτΊ›z™IΊ”7mƒ1ή{w·I2“ρήbσ’}ΊΪ»m²BΫ£ί\ο½RΫΪ{«6UΆx[±ψͺχz~w°¨f›³,——Ϋ² ΄K{΄F€ Iρ€·ΉWΫΫ±6tΪ›Ώz›Υ’xοUX<Ϋ›Άwέ€ΛfZ0hUΛL›‘€s;λgq|ήΌU¦ΖΘΘΗΕlOg!#°s³)yΆίˆZeΩ4Θ-ΦΜτ}Ϋφ›¦ΞΚVχ6I˜mΦ»λήΆ„:ΉxΆ!₯mOwσ4„uλ½IU £mήΆάσ.c˜cόςmWd†Όήh£Ω8ϋχΦ6Œ`©Ε˜eήO_έf³v}LVρ—–™aKΗΕκ“ΩΌΊ b䲝CΖ\Mοmsνj1©Z‹ ΨBΓ2<–ƒ=΅66aΊΈY±νm±Š-a˜λΆmƒΕ–γma-8kΚlΦQo5FT$φΌ™υ:° dΛv΄fFœfθΩ!x#·HmνM#‚ iyΚΆmU­! lο=NΜ†Ή–άl’Vƒχή]£4Φ¨ζlΆšj½½*Ω¦‘L(Ψήζ|ΐš9gΫΞν aδjEΘ)d{άLΓ½ZΞ?;;ΤL;y;l¦lR±†΅QΫΆI4ΚΜV‰ΪΆχΖuΗή‚4­τ"‘šmΛΆ΅ΘΉσ”Υ6Τ6)vRΆΜχή{S.:“b‘f{z+Lƚε—6`·‡iŠnΖ²½-&€Ψ0¦Β`δm§Ψœ αqd1­Ψf΅ς΄Ικš7ΟΌ>°$™³ΦΜ•6z–…ΪόΚ†΅‘ΗςCΪͺρ”mΫ W Cn{›Eα™υ΅€1Pzw½·‘Β©ΩTƒy;sέz{ a“ν-Τ€ΔΌΝΉ4kW{’ ͺ-ήH;(ΉM6lΒΪ·zΙΟπQζΦRoΈL›Ά²‰΄Ϊ€=²±‘4ΚaaΆJaΝήΛΙψ!Τ€‡”°lΛ¬ΤύGΟ“Φ¦Ά-K)x•τήΆαNέ†ŽEΰM[€5ςΛ‘Md·gΖ›]φήBΤΜήØ™‘mX―kˆh«Vl›n΄lwoeΖσΆχϊ€ ²ΙD 3›ΝXqgϋ₯ ·Ό=‹ΦTc ΆηΝ.U™5Τ Ά™άβ5―kI1¦Š·‘—^©fΆΫsΪΝ Ι¦Ω&‰jΫΆΩ΅Y»WΩΒΜ’-ΖPM%uΆΐlΒz·»—ΌΌνΟξόvZ΄p—nΣh6œVoΘffi2š΄m•djο I±-‘&&‘‹wx†Iυ§žΗκφΨ\Ϋ$Ÿ‹3)Ζϋmφά6T"©=†k΄§3jgl*6+ή+нu£AΞ6#$ζυ5aΓn‹jC+κ­{ΣFiφl{―%°$&4ΐ²7©ΆYθΨ#ZPοεmΘΦZSm±½·5u*ΜP7 ›m\±l£³δfSi₯yo»nV΅M(“ν60δξΆ'lkI&Uf6Κa¬΅ &½χΌ &iU₯25Ω6°vΣ²vώΩίΦ±44Ά;m€lN« ™EΆ·Y 2”›š2ΔλΪή›¨b[Bm€†\,3Γ–Τγ >™Ν«ΫΘί6[WΝ²a”j1-ΜΪΌv1Slή,°`eΆΗΈ)Ά%«“ ƒβ6PMΜƒ$-HΆ¨θΆlς€ΥΆΪΜΦ’6ٌKRd3ŠΝž:jCΆ c6±†dΓ²a­ΑJBΰξ`0~ά–ΔLmο­X;X `"ΫR™6 ΆPdζl‘Kk†Κ6+*›lΫ†D™΅m΄Ε„ΊiΫ’2FΆΦ’ Ϊx#s΅lƒZϋΤ&-ΣY›ŠΠΆ­w>ZƁν±D#›ιΠΐ&Ρ<l©ΪΦl³©°ΤΖ°yb±uh&v΅™²Μ/#νν­ Y™³,CΆ%«” PΠ¨‘²6Μ€B`‘a μlZΩC(οƌUήV…π(6{iv0+S{0bΖRIh0έ[ͺγ%Τ…Α6ΰΡ„3eο­4dHΨ¦8ΫΪΥf£YΏF[R›m&tΥΨ6M—M6ΫXn՘ 2ΛT΄™ee¦`_ for more examples. Creating indexes ---------------- .. autosummary:: :toctree: ../generated/ cftime_range date_range date_range_like indexes.RangeIndex.arange indexes.RangeIndex.linspace Built-in Indexes ---------------- Default, pandas-backed indexes built-in to Xarray: .. autosummary:: :toctree: ../generated/ indexes.PandasIndex indexes.PandasMultiIndex More complex indexes built-in to Xarray: .. autosummary:: :toctree: ../generated/ CFTimeIndex indexes.RangeIndex indexes.NDPointIndex indexes.CoordinateTransformIndex Building custom indexes ----------------------- These classes are building blocks for more complex Indexes: .. autosummary:: :toctree: ../generated/ indexes.CoordinateTransform indexes.CoordinateTransformIndex indexes.NDPointIndex indexes.TreeAdapter The Index base class for building custom indexes: .. autosummary:: :toctree: ../generated/ Index Index.from_variables Index.concat Index.stack Index.unstack Index.create_variables Index.should_add_coord_to_array Index.to_pandas_index Index.isel Index.sel Index.join Index.reindex_like Index.equals Index.roll Index.rename Index.copy The following are useful when building custom Indexes .. autosummary:: :toctree: ../generated/ IndexSelResult xarray-2025.09.0/doc/api/io.rst000066400000000000000000000030001505620616400160140ustar00rootroot00000000000000.. currentmodule:: xarray IO / Conversion =============== Dataset methods --------------- .. autosummary:: :toctree: ../generated/ load_dataset open_dataset open_mfdataset open_zarr save_mfdataset Dataset.as_numpy Dataset.from_dataframe Dataset.from_dict Dataset.to_dataarray Dataset.to_dataframe Dataset.to_dask_dataframe Dataset.to_dict Dataset.to_netcdf Dataset.to_pandas Dataset.to_zarr Dataset.chunk Dataset.close Dataset.compute Dataset.filter_by_attrs Dataset.info Dataset.load Dataset.persist Dataset.unify_chunks DataArray methods ----------------- .. autosummary:: :toctree: ../generated/ load_dataarray open_dataarray DataArray.as_numpy DataArray.from_dict DataArray.from_iris DataArray.from_series DataArray.to_dask_dataframe DataArray.to_dataframe DataArray.to_dataset DataArray.to_dict DataArray.to_index DataArray.to_iris DataArray.to_masked_array DataArray.to_netcdf DataArray.to_numpy DataArray.to_pandas DataArray.to_series DataArray.to_zarr DataArray.chunk DataArray.close DataArray.compute DataArray.persist DataArray.load DataArray.unify_chunks DataTree methods ---------------- .. autosummary:: :toctree: ../generated/ load_datatree open_datatree open_groups DataTree.to_dict DataTree.to_netcdf DataTree.to_zarr DataTree.chunk DataTree.load DataTree.compute DataTree.persist .. .. .. Missing: .. ``open_mfdatatree`` xarray-2025.09.0/doc/api/plotting.rst000066400000000000000000000022051505620616400172530ustar00rootroot00000000000000.. currentmodule:: xarray Plotting ======== Dataset ------- .. autosummary:: :toctree: ../generated/ :template: autosummary/accessor_method.rst Dataset.plot.scatter Dataset.plot.quiver Dataset.plot.streamplot DataArray --------- .. autosummary:: :toctree: ../generated/ :template: autosummary/accessor_callable.rst DataArray.plot .. autosummary:: :toctree: ../generated/ :template: autosummary/accessor_method.rst DataArray.plot.contourf DataArray.plot.contour DataArray.plot.hist DataArray.plot.imshow DataArray.plot.line DataArray.plot.pcolormesh DataArray.plot.step DataArray.plot.scatter DataArray.plot.surface Faceting -------- .. autosummary:: :toctree: ../generated/ plot.FacetGrid plot.FacetGrid.add_colorbar plot.FacetGrid.add_legend plot.FacetGrid.add_quiverkey plot.FacetGrid.map plot.FacetGrid.map_dataarray plot.FacetGrid.map_dataarray_line plot.FacetGrid.map_dataset plot.FacetGrid.map_plot1d plot.FacetGrid.set_axis_labels plot.FacetGrid.set_ticks plot.FacetGrid.set_titles plot.FacetGrid.set_xlabels plot.FacetGrid.set_ylabels xarray-2025.09.0/doc/api/resample.rst000066400000000000000000000034311505620616400172250ustar00rootroot00000000000000.. currentmodule:: xarray Resample objects ================ .. currentmodule:: xarray.core.resample Dataset ------- .. autosummary:: :toctree: ../generated/ DatasetResample DatasetResample.asfreq DatasetResample.backfill DatasetResample.interpolate DatasetResample.nearest DatasetResample.pad DatasetResample.all DatasetResample.any DatasetResample.apply DatasetResample.assign DatasetResample.assign_coords DatasetResample.bfill DatasetResample.count DatasetResample.ffill DatasetResample.fillna DatasetResample.first DatasetResample.last DatasetResample.map DatasetResample.max DatasetResample.mean DatasetResample.median DatasetResample.min DatasetResample.prod DatasetResample.quantile DatasetResample.reduce DatasetResample.std DatasetResample.sum DatasetResample.var DatasetResample.where DatasetResample.dims DatasetResample.groups DataArray --------- .. autosummary:: :toctree: ../generated/ DataArrayResample DataArrayResample.asfreq DataArrayResample.backfill DataArrayResample.interpolate DataArrayResample.nearest DataArrayResample.pad DataArrayResample.all DataArrayResample.any DataArrayResample.apply DataArrayResample.assign_coords DataArrayResample.bfill DataArrayResample.count DataArrayResample.ffill DataArrayResample.fillna DataArrayResample.first DataArrayResample.last DataArrayResample.map DataArrayResample.max DataArrayResample.mean DataArrayResample.median DataArrayResample.min DataArrayResample.prod DataArrayResample.quantile DataArrayResample.reduce DataArrayResample.std DataArrayResample.sum DataArrayResample.var DataArrayResample.where DataArrayResample.dims DataArrayResample.groups xarray-2025.09.0/doc/api/rolling-exp.rst000066400000000000000000000003471505620616400176600ustar00rootroot00000000000000.. currentmodule:: xarray Exponential rolling objects =========================== .. currentmodule:: xarray.computation.rolling_exp .. autosummary:: :toctree: ../generated/ RollingExp RollingExp.mean RollingExp.sum xarray-2025.09.0/doc/api/rolling.rst000066400000000000000000000016611505620616400170660ustar00rootroot00000000000000.. currentmodule:: xarray Rolling objects =============== .. currentmodule:: xarray.computation.rolling Dataset ------- .. autosummary:: :toctree: ../generated/ DatasetRolling DatasetRolling.construct DatasetRolling.reduce DatasetRolling.argmax DatasetRolling.argmin DatasetRolling.count DatasetRolling.max DatasetRolling.mean DatasetRolling.median DatasetRolling.min DatasetRolling.prod DatasetRolling.std DatasetRolling.sum DatasetRolling.var DataArray --------- .. autosummary:: :toctree: ../generated/ DataArrayRolling DataArrayRolling.__iter__ DataArrayRolling.construct DataArrayRolling.reduce DataArrayRolling.argmax DataArrayRolling.argmin DataArrayRolling.count DataArrayRolling.max DataArrayRolling.mean DataArrayRolling.median DataArrayRolling.min DataArrayRolling.prod DataArrayRolling.std DataArrayRolling.sum DataArrayRolling.var xarray-2025.09.0/doc/api/testing.rst000066400000000000000000000017151505620616400170750ustar00rootroot00000000000000.. currentmodule:: xarray Testing ======= .. autosummary:: :toctree: ../generated/ testing.assert_equal testing.assert_identical testing.assert_allclose testing.assert_chunks_equal Test that two ``DataTree`` objects are similar. .. autosummary:: :toctree: ../generated/ testing.assert_isomorphic testing.assert_equal testing.assert_identical Hypothesis Testing Strategies ============================= .. currentmodule:: xarray See the :ref:`documentation page on testing ` for a guide on how to use these strategies. .. warning:: These strategies should be considered highly experimental, and liable to change at any time. .. autosummary:: :toctree: ../generated/ testing.strategies.supported_dtypes testing.strategies.names testing.strategies.dimension_names testing.strategies.dimension_sizes testing.strategies.attrs testing.strategies.variables testing.strategies.unique_subset_of xarray-2025.09.0/doc/api/top-level.rst000066400000000000000000000012501505620616400173210ustar00rootroot00000000000000.. currentmodule:: xarray Top-level functions =================== Computation ----------- .. autosummary:: :toctree: ../generated/ apply_ufunc cov corr cross dot map_blocks polyval unify_chunks where Combining Data -------------- .. autosummary:: :toctree: ../generated/ align broadcast concat merge combine_by_coords combine_nested Creation -------- .. autosummary:: :toctree: ../generated/ DataArray Dataset DataTree full_like zeros_like ones_like Miscellaneous ------------- .. autosummary:: :toctree: ../generated/ decode_cf infer_freq show_versions set_options get_options xarray-2025.09.0/doc/api/tutorial.rst000066400000000000000000000003011505620616400172510ustar00rootroot00000000000000.. currentmodule:: xarray Tutorial ======== .. autosummary:: :toctree: ../generated/ tutorial.open_dataset tutorial.load_dataset tutorial.open_datatree tutorial.load_datatree xarray-2025.09.0/doc/api/ufuncs.rst000066400000000000000000000042451505620616400167240ustar00rootroot00000000000000.. currentmodule:: xarray Universal functions =================== These functions are equivalent to their NumPy versions, but for xarray objects backed by non-NumPy array types (e.g. ``cupy``, ``sparse``, or ``jax``), they will ensure that the computation is dispatched to the appropriate backend. You can find them in the ``xarray.ufuncs`` module: .. autosummary:: :toctree: ../generated/ ufuncs.abs ufuncs.absolute ufuncs.acos ufuncs.acosh ufuncs.arccos ufuncs.arccosh ufuncs.arcsin ufuncs.arcsinh ufuncs.arctan ufuncs.arctanh ufuncs.asin ufuncs.asinh ufuncs.atan ufuncs.atanh ufuncs.bitwise_count ufuncs.bitwise_invert ufuncs.bitwise_not ufuncs.cbrt ufuncs.ceil ufuncs.conj ufuncs.conjugate ufuncs.cos ufuncs.cosh ufuncs.deg2rad ufuncs.degrees ufuncs.exp ufuncs.exp2 ufuncs.expm1 ufuncs.fabs ufuncs.floor ufuncs.invert ufuncs.isfinite ufuncs.isinf ufuncs.isnan ufuncs.isnat ufuncs.log ufuncs.log10 ufuncs.log1p ufuncs.log2 ufuncs.logical_not ufuncs.negative ufuncs.positive ufuncs.rad2deg ufuncs.radians ufuncs.reciprocal ufuncs.rint ufuncs.sign ufuncs.signbit ufuncs.sin ufuncs.sinh ufuncs.spacing ufuncs.sqrt ufuncs.square ufuncs.tan ufuncs.tanh ufuncs.trunc ufuncs.add ufuncs.arctan2 ufuncs.atan2 ufuncs.bitwise_and ufuncs.bitwise_left_shift ufuncs.bitwise_or ufuncs.bitwise_right_shift ufuncs.bitwise_xor ufuncs.copysign ufuncs.divide ufuncs.equal ufuncs.float_power ufuncs.floor_divide ufuncs.fmax ufuncs.fmin ufuncs.fmod ufuncs.gcd ufuncs.greater ufuncs.greater_equal ufuncs.heaviside ufuncs.hypot ufuncs.lcm ufuncs.ldexp ufuncs.left_shift ufuncs.less ufuncs.less_equal ufuncs.logaddexp ufuncs.logaddexp2 ufuncs.logical_and ufuncs.logical_or ufuncs.logical_xor ufuncs.maximum ufuncs.minimum ufuncs.mod ufuncs.multiply ufuncs.nextafter ufuncs.not_equal ufuncs.pow ufuncs.power ufuncs.remainder ufuncs.right_shift ufuncs.subtract ufuncs.true_divide ufuncs.angle ufuncs.isreal ufuncs.iscomplex xarray-2025.09.0/doc/api/weighted.rst000066400000000000000000000012371505620616400172170ustar00rootroot00000000000000.. currentmodule:: xarray Weighted objects ================ .. currentmodule:: xarray.computation.weighted Dataset ------- .. autosummary:: :toctree: ../generated/ DatasetWeighted DatasetWeighted.mean DatasetWeighted.quantile DatasetWeighted.sum DatasetWeighted.std DatasetWeighted.var DatasetWeighted.sum_of_weights DatasetWeighted.sum_of_squares DataArray --------- .. autosummary:: :toctree: ../generated/ DataArrayWeighted DataArrayWeighted.mean DataArrayWeighted.quantile DataArrayWeighted.sum DataArrayWeighted.std DataArrayWeighted.var DataArrayWeighted.sum_of_weights DataArrayWeighted.sum_of_squares xarray-2025.09.0/doc/combined.json000066400000000000000000000017041505620616400165660ustar00rootroot00000000000000{ "version": 1, "refs": { ".zgroup": "{\"zarr_format\":2}", "foo/.zarray": "{\"chunks\":[4,5],\"compressor\":null,\"dtype\":\">> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.{3,}: | {5,8}: " copybutton_prompt_is_regexp = True # NBSphinx configuration nbsphinx_timeout = 600 nbsphinx_execute = "always" nbsphinx_allow_errors = False nbsphinx_requirejs_path = "" # png2x/retina rendering of figues in docs would also need to modify custom.css: # https://github.com/spatialaudio/nbsphinx/issues/464#issuecomment-652729126 # .rst-content .image-reference img { # max-width: unset; # width: 100% !important; # height: auto !important; # } # nbsphinx_execute_arguments = [ # "--InlineBackend.figure_formats=['png2x']", # ] nbsphinx_prolog = """ {% set docname = env.doc2path(env.docname, base=None) %} You can run this notebook in a `live session `_ |Binder| or view it `on Github `_. .. |Binder| image:: https://mybinder.org/badge.svg :target: https://mybinder.org/v2/gh/pydata/xarray/main?urlpath=lab/tree/doc/{{ docname }} """ # AutoDoc configuration autosummary_generate = True autodoc_typehints = "none" # Napoleon configuration napoleon_google_docstring = False napoleon_numpy_docstring = True napoleon_use_param = False napoleon_use_rtype = False napoleon_preprocess_types = True napoleon_type_aliases = { # general terms "sequence": ":term:`sequence`", "iterable": ":term:`iterable`", "callable": ":py:func:`callable`", "dict_like": ":term:`dict-like `", "dict-like": ":term:`dict-like `", "path-like": ":term:`path-like `", "mapping": ":term:`mapping`", "file-like": ":term:`file-like `", # special terms # "same type as caller": "*same type as caller*", # does not work, yet # "same type as values": "*same type as values*", # does not work, yet # stdlib type aliases "MutableMapping": "~collections.abc.MutableMapping", "sys.stdout": ":obj:`sys.stdout`", "timedelta": "~datetime.timedelta", "string": ":class:`string `", # numpy terms "array_like": ":term:`array_like`", "array-like": ":term:`array-like `", "scalar": ":term:`scalar`", "array": ":term:`array`", "hashable": ":term:`hashable `", # matplotlib terms "color-like": ":py:func:`color-like `", "matplotlib colormap name": ":doc:`matplotlib colormap name `", "matplotlib axes object": ":py:class:`matplotlib axes object `", "colormap": ":py:class:`colormap `", # xarray terms "dim name": ":term:`dimension name `", "var name": ":term:`variable name `", # objects without namespace: xarray "DataArray": "~xarray.DataArray", "Dataset": "~xarray.Dataset", "Variable": "~xarray.Variable", "DataTree": "~xarray.DataTree", "DatasetGroupBy": "~xarray.core.groupby.DatasetGroupBy", "DataArrayGroupBy": "~xarray.core.groupby.DataArrayGroupBy", "Grouper": "~xarray.groupers.Grouper", "Resampler": "~xarray.groupers.Resampler", # objects without namespace: numpy "ndarray": "~numpy.ndarray", "MaskedArray": "~numpy.ma.MaskedArray", "dtype": "~numpy.dtype", "ComplexWarning": "~numpy.ComplexWarning", # objects without namespace: pandas "Index": "~pandas.Index", "MultiIndex": "~pandas.MultiIndex", "CategoricalIndex": "~pandas.CategoricalIndex", "TimedeltaIndex": "~pandas.TimedeltaIndex", "DatetimeIndex": "~pandas.DatetimeIndex", "IntervalIndex": "~pandas.IntervalIndex", "Series": "~pandas.Series", "DataFrame": "~pandas.DataFrame", "Categorical": "~pandas.Categorical", "Path": "~~pathlib.Path", # objects with abbreviated namespace (from pandas) "pd.Index": "~pandas.Index", "pd.NaT": "~pandas.NaT", } autodoc_type_aliases = napoleon_type_aliases # Keep both in sync # mermaid config mermaid_version = "11.6.0" # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates", sphinx_autosummary_accessors.templates_path] # The master toctree document. master_doc = "index" remove_from_toctrees = ["generated/*"] # The language for content autogenerated by Sphinx. language = "en" # General information about the project. project = "xarray" copyright = f"2014-{datetime.datetime.now().year}, xarray Developers" # The short Y.M.D version. v = packaging.version.parse(xarray.__version__) version = ".".join(str(p) for p in v.release) # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = "%Y-%m-%d" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build", "debug.ipynb", "**.ipynb_checkpoints"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "pydata_sphinx_theme" html_title = "" html_context = { "github_user": "pydata", "github_repo": "xarray", "github_version": "main", "doc_path": "doc", } # https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/layout.html#references html_theme_options = { #"announcement":"🍾 Xarray is now 10 years old! πŸŽ‰", "logo": {"image_dark": "https://docs.xarray.dev/en/stable/_static/logos/Xarray_Logo_FullColor_InverseRGB_Final.svg"}, "github_url":"https://github.com/pydata/xarray", "show_version_warning_banner":True, "use_edit_page_button":True, "header_links_before_dropdown": 8, "navbar_align": "left", "footer_center":["last-updated"], # Instead of adding these to the header bar they are linked in 'getting help' and 'contributing' # "icon_links": [ # { # "name": "Discord", # "url": "https://discord.com/invite/wEKPCt4PDu", # "icon": "fa-brands fa-discord", # }, # { # "name": "X", # "url": "https://x.com/xarray_dev", # "icon": "fa-brands fa-x-twitter", # }, # { # "name": "Bluesky", # "url": "https://bsky.app/profile/xarray.bsky.social", # "icon": "fa-brands fa-bluesky", # }, # ] } # pydata_sphinx_theme use_edit_page_button with github link seems better html_show_sourcelink = False # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "_static/logos/Xarray_Logo_RGB_Final.svg" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = "_static/logos/Xarray_Icon_Final.svg" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] html_css_files = ["style.css"] # configuration for sphinxext.opengraph ogp_site_url = "https://docs.xarray.dev/en/latest/" ogp_image = "https://docs.xarray.dev/en/stable/_static/logos/Xarray_Logo_RGB_Final.png" ogp_custom_meta_tags = ( '', '', '', ) # Redirects for pages that were moved to new locations rediraffe_redirects = { "terminology.rst": "user-guide/terminology.rst", "data-structures.rst": "user-guide/data-structures.rst", "indexing.rst": "user-guide/indexing.rst", "interpolation.rst": "user-guide/interpolation.rst", "computation.rst": "user-guide/computation.rst", "groupby.rst": "user-guide/groupby.rst", "reshaping.rst": "user-guide/reshaping.rst", "combining.rst": "user-guide/combining.rst", "time-series.rst": "user-guide/time-series.rst", "weather-climate.rst": "user-guide/weather-climate.rst", "pandas.rst": "user-guide/pandas.rst", "io.rst": "user-guide/io.rst", "dask.rst": "user-guide/dask.rst", "plotting.rst": "user-guide/plotting.rst", "duckarrays.rst": "user-guide/duckarrays.rst", "related-projects.rst": "user-guide/ecosystem.rst", "faq.rst": "get-help/faq.rst", "why-xarray.rst": "getting-started-guide/why-xarray.rst", "installing.rst": "getting-started-guide/installing.rst", "quick-overview.rst": "getting-started-guide/quick-overview.rst", "contributing.rst": "contribute/contributing.rst", "developers-meeting.rst": "contribute/developers-meeting.rst", } # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = today_fmt # Output file base name for HTML help builder. htmlhelp_basename = "xarraydoc" # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "cftime": ("https://unidata.github.io/cftime", None), "cubed": ("https://cubed-dev.github.io/cubed/", None), "dask": ("https://docs.dask.org/en/latest", None), "flox": ("https://flox.readthedocs.io/en/latest/", None), "hypothesis": ("https://hypothesis.readthedocs.io/en/latest/", None), "iris": ("https://scitools-iris.readthedocs.io/en/latest", None), "matplotlib": ("https://matplotlib.org/stable/", None), "numba": ("https://numba.readthedocs.io/en/stable/", None), "numpy": ("https://numpy.org/doc/stable", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None), "python": ("https://docs.python.org/3/", None), "scipy": ("https://docs.scipy.org/doc/scipy", None), "sparse": ("https://sparse.pydata.org/en/latest/", None), "xarray-tutorial": ("https://tutorial.xarray.dev/", None), "zarr": ("https://zarr.readthedocs.io/en/stable/", None), "xarray-lmfit": ("https://xarray-lmfit.readthedocs.io/stable", None), } # based on numpy doc/source/conf.py def linkcode_resolve(domain, info): """ Determine the URL corresponding to Python object """ if domain != "py": return None modname = info["module"] fullname = info["fullname"] submod = sys.modules.get(modname) if submod is None: return None obj = submod for part in fullname.split("."): try: obj = getattr(obj, part) except AttributeError: return None try: fn = inspect.getsourcefile(inspect.unwrap(obj)) except TypeError: fn = None if not fn: return None try: source, lineno = inspect.getsourcelines(obj) except OSError: lineno = None if lineno: linespec = f"#L{lineno}-L{lineno + len(source) - 1}" else: linespec = "" fn = os.path.relpath(fn, start=os.path.dirname(xarray.__file__)) if "+" in xarray.__version__: return f"https://github.com/pydata/xarray/blob/main/xarray/{fn}{linespec}" else: return ( f"https://github.com/pydata/xarray/blob/" f"v{xarray.__version__}/xarray/{fn}{linespec}" ) def html_page_context(app, pagename, templatename, context, doctree): # Disable edit button for docstring generated pages if "generated" in pagename: context["theme_use_edit_page_button"] = False def update_gallery(app: Sphinx): """Update the gallery page.""" LOGGER.info("Updating gallery page...") gallery = yaml.safe_load(pathlib.Path(app.srcdir, "gallery.yml").read_bytes()) for key in gallery: items = [ f""" .. grid-item-card:: :text-align: center :link: {item['path']} .. image:: {item['thumbnail']} :alt: {item['title']} +++ {item['title']} """ for item in gallery[key] ] items_md = indent(dedent("\n".join(items)), prefix=" ") markdown = f""" .. grid:: 1 2 2 2 :gutter: 2 {items_md} """ pathlib.Path(app.srcdir, f"{key}-gallery.txt").write_text(markdown) LOGGER.info(f"{key} gallery page updated.") LOGGER.info("Gallery page updated.") def update_videos(app: Sphinx): """Update the videos page.""" LOGGER.info("Updating videos page...") videos = yaml.safe_load(pathlib.Path(app.srcdir, "videos.yml").read_bytes()) items = [] for video in videos: authors = " | ".join(video["authors"]) item = f""" .. grid-item-card:: {" ".join(video["title"].split())} :text-align: center .. raw:: html {video['src']} +++ {authors} """ items.append(item) items_md = indent(dedent("\n".join(items)), prefix=" ") markdown = f""" .. grid:: 1 2 2 2 :gutter: 2 {items_md} """ pathlib.Path(app.srcdir, "videos-gallery.txt").write_text(markdown) LOGGER.info("Videos page updated.") def setup(app: Sphinx): app.connect("html-page-context", html_page_context) app.connect("builder-inited", update_gallery) app.connect("builder-inited", update_videos) xarray-2025.09.0/doc/contribute/000077500000000000000000000000001505620616400162675ustar00rootroot00000000000000xarray-2025.09.0/doc/contribute/contributing.rst000066400000000000000000001231471505620616400215400ustar00rootroot00000000000000.. _contributing: ********************** Contributing to xarray ********************** .. note:: Large parts of this document came from the `Pandas Contributing Guide `_. Overview ======== We welcome your skills and enthusiasm at the xarray project!. There are numerous opportunities to contribute beyond just writing code. All contributions, including bug reports, bug fixes, documentation improvements, enhancement suggestions, and other ideas are welcome. If you have any questions on the process or how to fix something feel free to ask us! The recommended place to ask a question is on `GitHub Discussions `_ , but we also have a `Discord `_ and a `mailing list `_. There is also a `"python-xarray" tag on Stack Overflow `_ which we monitor for questions. We also have a biweekly community call, details of which are announced on the `Developers meeting `_. You are very welcome to join! Though we would love to hear from you, there is no expectation to contribute during the meeting either - you are always welcome to just sit in and listen. This project is a community effort, and everyone is welcome to contribute. Everyone within the community is expected to abide by our `code of conduct `_. Where to start? =============== If you are brand new to *xarray* or open-source development, we recommend going through the `GitHub "issues" tab `_ to find issues that interest you. Some issues are particularly suited for new contributors by the label `Documentation `__ and `good first issue `_ where you could start out. These are well documented issues, that do not require a deep understanding of the internals of xarray. Once you've found an interesting issue, you can return here to get your development environment setup. The xarray project does not assign issues. Issues are "assigned" by opening a Pull Request(PR). .. _contributing.bug_reports: Bug reports and enhancement requests ==================================== Bug reports are an important part of making *xarray* more stable. Having a complete bug report will allow others to reproduce the bug and provide insight into fixing. Trying out the bug-producing code on the *main* branch is often a worthwhile exercise to confirm that the bug still exists. It is also worth searching existing bug reports and pull requests to see if the issue has already been reported and/or fixed. Submitting a bug report ----------------------- If you find a bug in the code or documentation, do not hesitate to submit a ticket to the `Issue Tracker `_. You are also welcome to post feature requests or pull requests. If you are reporting a bug, please use the provided template which includes the following: #. Include a short, self-contained Python snippet reproducing the problem. You can format the code nicely by using `GitHub Flavored Markdown `_:: ```python import xarray as xr ds = xr.Dataset(...) ... ``` #. Include the full version string of *xarray* and its dependencies. You can use the built in function:: ```python import xarray as xr xr.show_versions() ... ``` #. Explain why the current behavior is wrong/not desired and what you expect instead. The issue will then show up to the *xarray* community and be open to comments/ideas from others. See this `stackoverflow article for tips on writing a good bug report `_ . .. _contributing.github: Now that you have an issue you want to fix, enhancement to add, or documentation to improve, you need to learn how to work with GitHub and the *xarray* code base. .. _contributing.version_control: Version control, Git, and GitHub ================================ The code is hosted on `GitHub `_. To contribute you will need to sign up for a `free GitHub account `_. We use `Git `_ for version control to allow many people to work together on the project. Some great resources for learning Git: * the `GitHub help pages `_. * the `NumPy's documentation `_. * Matthew Brett's `Pydagogue `_. Getting started with Git ------------------------ `GitHub has instructions for setting up Git `__ including installing git, setting up your SSH key, and configuring git. All these steps need to be completed before you can work seamlessly between your local repository and GitHub. .. note:: The following instructions assume you want to learn how to interact with github via the git command-line utility, but contributors who are new to git may find it easier to use other tools instead such as `Github Desktop `_. .. _contributing.dev_workflow: Development workflow ==================== To keep your work well organized, with readable history, and in turn make it easier for project maintainers to see what you've done, and why you did it, we recommend you to follow workflow: 1. `Create an account `_ on GitHub if you do not already have one. 2. You will need your own fork to work on the code. Go to the `xarray project page `_ and hit the ``Fork`` button near the top of the page. This creates a copy of the code under your account on the GitHub server. 3. Clone your fork to your machine:: git clone https://github.com/your-user-name/xarray.git cd xarray git remote add upstream https://github.com/pydata/xarray.git This creates the directory ``xarray`` and connects your repository to the upstream (main project) *xarray* repository. 4. Copy tags across from the xarray repository:: git fetch --tags upstream This will ensure that when you create a development environment a reasonable version number is created. .. _contributing.dev_env: Creating a development environment ---------------------------------- To test out code changes locally, you'll need to build *xarray* from source, which requires a Python environment. If you're making documentation changes, you can skip to :ref:`contributing.documentation` but you won't be able to build the documentation locally before pushing your changes. .. note:: For small changes, such as fixing a typo, you don't necessarily need to build and test xarray locally. If you make your changes then :ref:`commit and push them to a new branch `, xarray's automated :ref:`continuous integration tests ` will run and check your code in various ways. You can then try to fix these problems by committing and pushing more commits to the same branch. You can also avoid building the documentation locally by instead :ref:`viewing the updated documentation via the CI `. To speed up this feedback loop or for more complex development tasks you should build and test xarray locally. .. _contributing.dev_python: Creating a Python Environment ----------------------------- Before starting any development, you'll need to create an isolated xarray development environment: - Install either `Anaconda `_ or `miniconda `_ - Make sure your conda is up to date (``conda update conda``) - Make sure that you have :ref:`cloned the repository ` - ``cd`` to the *xarray* source directory We'll now kick off a two-step process: 1. Install the build dependencies 2. Build and install xarray .. code-block:: sh # Create and activate the build environment conda create -c conda-forge -n xarray-tests python=3.11 # This is for Linux and MacOS conda env update -f ci/requirements/environment.yml # On windows, use environment-windows.yml instead conda env update -f ci/requirements/environment-windows.yml conda activate xarray-tests # or with older versions of Anaconda: source activate xarray-tests # Build and install xarray pip install -e . At this point you should be able to import *xarray* from your locally built version: .. code-block:: sh $ python # start an interpreter >>> import xarray >>> xarray.__version__ '2025.7.2.dev14+g5ce69b2b.d20250725' This will create the new environment, and not touch any of your existing environments, nor any existing Python installation. To view your environments:: conda info -e To return to your root environment:: conda deactivate See the full `conda docs here `__. Install pre-commit hooks ------------------------ We highly recommend that you setup `pre-commit `_ hooks to automatically run all the above tools every time you make a git commit. To install the hooks:: python -m pip install pre-commit pre-commit install This can be done by running: :: pre-commit run from the root of the xarray repository. You can skip the pre-commit checks with ``git commit --no-verify``. Update the ``main`` branch -------------------------- First make sure you have :ref:`created a development environment `. Before starting a new set of changes, fetch all changes from ``upstream/main``, and start a new feature branch from that. From time to time you should fetch the upstream changes from GitHub: :: git fetch --tags upstream git merge upstream/main This will combine your commits with the latest *xarray* git ``main``. If this leads to merge conflicts, you must resolve these before submitting your pull request. If you have uncommitted changes, you will need to ``git stash`` them prior to updating. This will effectively store your changes, which can be reapplied after updating. If the *xarray* ``main`` branch version has updated since you last fetched changes, you may also wish to reinstall xarray so that the pip version reflects the *xarray* version:: pip install -e . Create a new feature branch --------------------------- Create a branch to save your changes, even before you start making changes. You want your ``main branch`` to contain only production-ready code:: git checkout -b shiny-new-feature This changes your working directory to the ``shiny-new-feature`` branch. Keep any changes in this branch specific to one bug or feature so it is clear what the branch brings to *xarray*. You can have many "shiny-new-features" and switch in between them using the ``git checkout`` command. Generally, you will want to keep your feature branches on your public GitHub fork of xarray. To do this, you ``git push`` this new branch up to your GitHub repo. Generally (if you followed the instructions in these pages, and by default), git will have a link to your fork of the GitHub repo, called ``origin``. You push up to your own fork with: :: git push origin shiny-new-feature In git >= 1.7 you can ensure that the link is correctly set by using the ``--set-upstream`` option: :: git push --set-upstream origin shiny-new-feature From now on git will know that ``shiny-new-feature`` is related to the ``shiny-new-feature branch`` in the GitHub repo. The editing workflow -------------------- 1. Make some changes 2. See which files have changed with ``git status``. You'll see a listing like this one: :: # On branch shiny-new-feature # Changed but not updated: # (use "git add ..." to update what will be committed) # (use "git checkout -- ..." to discard changes in working directory) # # modified: README 3. Check what the actual changes are with ``git diff``. 4. Build the `documentation `__ for the documentation changes. 5. `Run the test suite `_ for code changes. Commit and push your changes ---------------------------- 1. To commit all modified files into the local copy of your repo, do ``git commit -am 'A commit message'``. 2. To push the changes up to your forked repo on GitHub, do a ``git push``. Open a pull request ------------------- When you're ready or need feedback on your code, open a Pull Request (PR) so that the xarray developers can give feedback and eventually include your suggested code into the ``main`` branch. `Pull requests (PRs) on GitHub `_ are the mechanism for contributing to xarray's code and documentation. Enter a title for the set of changes with some explanation of what you've done. Follow the PR template, which looks like this. :: [ ]Closes #xxxx [ ]Tests added [ ]User visible changes (including notable bug fixes) are documented in whats-new.rst [ ]New functions/methods are listed in api.rst Mention anything you'd like particular attention for - such as a complicated change or some code you are not happy with. If you don't think your request is ready to be merged, just say so in your pull request message and use the "Draft PR" feature of GitHub. This is a good way of getting some preliminary code review. .. _contributing.documentation: Contributing to the documentation ================================= If you're not the developer type, contributing to the documentation is still of huge value. You don't even have to be an expert on *xarray* to do so! In fact, there are sections of the docs that are worse off after being written by experts. If something in the docs doesn't make sense to you, updating the relevant section after you figure it out is a great way to ensure it will help the next person. .. contents:: Documentation: :local: About the *xarray* documentation -------------------------------- The documentation is written in **reStructuredText**, which is almost like writing in plain English, and built using `Sphinx `__. The Sphinx Documentation has an excellent `introduction to reST `__. Review the Sphinx docs to perform more complex changes to the documentation as well. Some other important things to know about the docs: - The *xarray* documentation consists of two parts: the docstrings in the code itself and the docs in this folder ``xarray/doc/``. The docstrings are meant to provide a clear explanation of the usage of the individual functions, while the documentation in this folder consists of tutorial-like overviews per topic together with some other information (what's new, installation, etc). - The docstrings follow the **NumPy Docstring Standard**, which is used widely in the Scientific Python community. This standard specifies the format of the different sections of the docstring. Refer to the `documentation for the Numpy docstring format `_ for a detailed explanation, or look at some of the existing functions to extend it in a similar manner. - The documentation makes heavy use of the `jupyter-sphinx extension `_. The ``jupyter-execute`` directive lets you put code in the documentation which will be run during the doc build. For example: .. code:: rst .. jupyter-execute:: x = 2 x**3 will be rendered as: .. jupyter-execute:: x = 2 x**3 Almost all code examples in the docs are run (and the output saved) during the doc build. This approach means that code examples will always be up to date, but it does make building the docs a bit more complex. - Our API documentation in ``doc/api.rst`` houses the auto-generated documentation from the docstrings. For classes, there are a few subtleties around controlling which methods and attributes have pages auto-generated. Every method should be included in a ``toctree`` in ``api.rst``, else Sphinx will emit a warning. How to build the *xarray* documentation --------------------------------------- Requirements ~~~~~~~~~~~~ Make sure to follow the instructions on :ref:`creating a development environment` above, but to build the docs you need to use the environment file ``ci/requirements/doc.yml``. You should also use this environment and these steps if you want to view changes you've made to the docstrings. .. code-block:: sh # Create and activate the docs environment conda env create -f ci/requirements/doc.yml conda activate xarray-docs # or with older versions of Anaconda: source activate xarray-docs # Build and install a local, editable version of xarray pip install -e . Building the documentation ~~~~~~~~~~~~~~~~~~~~~~~~~~ To build the documentation run:: cd doc/ make html Then you can find the HTML output files in the folder ``xarray/doc/_build/html/``. To see what the documentation now looks like with your changes, you can view the HTML build locally by opening the files in your local browser. For example, if you normally use Google Chrome as your browser, you could enter:: google-chrome _build/html/quick-overview.html in the terminal, running from within the ``doc/`` folder. You should now see a new tab pop open in your local browser showing the ``quick-overview`` page of the documentation. The different pages of this local build of the documentation are linked together, so you can browse the whole documentation by following links the same way you would on the officially-hosted xarray docs site. The first time you build the docs, it will take quite a while because it has to run all the code examples and build all the generated docstring pages. In subsequent evocations, Sphinx will try to only build the pages that have been modified. If you want to do a full clean build, do:: make clean make html Writing ReST pages ------------------ Most documentation is either in the docstrings of individual classes and methods, in explicit ``.rst`` files, or in examples and tutorials. All of these use the `ReST `_ syntax and are processed by `Sphinx `_. This section contains additional information and conventions how ReST is used in the xarray documentation. Section formatting ~~~~~~~~~~~~~~~~~~ We aim to follow the recommendations from the `Python documentation `_ and the `Sphinx reStructuredText documentation `_ for section markup characters, - ``*`` with overline, for chapters - ``=``, for heading - ``-``, for sections - ``~``, for subsections - ``**`` text ``**``, for **bold** text Referring to other documents and sections ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `Sphinx `_ allows internal `references `_ between documents. Documents can be linked with the ``:doc:`` directive: :: See the :doc:`/getting-started-guide/installing` See the :doc:`/getting-started-guide/quick-overview` will render as: See the `Installation `_ See the `Quick Overview `_ Including figures and files ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Image files can be directly included in pages with the ``image::`` directive. .. _contributing.code: Contributing to the code base ============================= .. contents:: Code Base: :local: Code standards -------------- Writing good code is not just about what you write. It is also about *how* you write it. During :ref:`Continuous Integration ` testing, several tools will be run to check your code for stylistic errors. Generating any warnings will cause the test to fail. Thus, good style is a requirement for submitting code to *xarray*. In addition, because a lot of people use our library, it is important that we do not make sudden changes to the code that could have the potential to break a lot of user code as a result, that is, we need it to be as *backwards compatible* as possible to avoid mass breakages. Code Formatting ~~~~~~~~~~~~~~~ xarray uses several tools to ensure a consistent code format throughout the project: - `ruff `_ for formatting, code quality checks and standardized order in imports, and - `mypy `_ for static type checking on `type hints `_. We highly recommend that you setup `pre-commit hooks `_ to automatically run all the above tools every time you make a git commit. This can be done by running:: pre-commit install from the root of the xarray repository. You can skip the pre-commit checks with ``git commit --no-verify``. Backwards Compatibility ~~~~~~~~~~~~~~~~~~~~~~~ Please try to maintain backwards compatibility. *xarray* has a growing number of users with lots of existing code, so don't break it if at all possible. If you think breakage is required, clearly state why as part of the pull request. Be especially careful when changing function and method signatures, because any change may require a deprecation warning. For example, if your pull request means that the argument ``old_arg`` to ``func`` is no longer valid, instead of simply raising an error if a user passes ``old_arg``, we would instead catch it: .. code-block:: python def func(new_arg, old_arg=None): if old_arg is not None: from xarray.core.utils import emit_user_level_warning emit_user_level_warning( "`old_arg` has been deprecated, and in the future will raise an error." "Please use `new_arg` from now on.", DeprecationWarning, ) # Still do what the user intended here This temporary check would then be removed in a subsequent version of xarray. This process of first warning users before actually breaking their code is known as a "deprecation cycle", and makes changes significantly easier to handle both for users of xarray, and for developers of other libraries that depend on xarray. .. _contributing.ci: Testing With Continuous Integration ----------------------------------- The *xarray* test suite runs automatically via the `GitHub Actions `__, continuous integration service, once your pull request is submitted. A pull-request will be considered for merging when you have an all 'green' build. If any tests are failing, then you will get a red 'X', where you can click through to see the individual failed tests. This is an example of a green build. .. image:: ../_static/ci.png .. note:: Each time you push to your PR branch, a new run of the tests will be triggered on the CI. If they haven't already finished, tests for any older commits on the same branch will be automatically cancelled. .. _contributing.tdd: Test-driven development/code writing ------------------------------------ *xarray* is serious about testing and strongly encourages contributors to embrace `test-driven development (TDD) `_. This development process "relies on the repetition of a very short development cycle: first the developer writes an (initially failing) automated test case that defines a desired improvement or new function, then produces the minimum amount of code to pass that test." So, before actually writing any code, you should write your tests. Often the test can be taken from the original GitHub issue. However, it is always worth considering additional use cases and writing corresponding tests. Adding tests is one of the most common requests after code is pushed to *xarray*. Therefore, it is worth getting in the habit of writing tests ahead of time so that this is never an issue. Like many packages, *xarray* uses `pytest `_ and the convenient extensions in `numpy.testing `_. Writing tests ~~~~~~~~~~~~~ All tests should go into the ``tests`` subdirectory of the specific package. This folder contains many current examples of tests, and we suggest looking to these for inspiration. The ``xarray.testing`` module has many special ``assert`` functions that make it easier to make statements about whether DataArray or Dataset objects are equivalent. The easiest way to verify that your code is correct is to explicitly construct the result you expect, then compare the actual result to the expected correct result:: def test_constructor_from_0d(): expected = Dataset({None: ([], 0)})[None] actual = DataArray(0) assert_identical(expected, actual) Transitioning to ``pytest`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ *xarray* existing test structure is *mostly* class-based, meaning that you will typically find tests wrapped in a class. .. code-block:: python class TestReallyCoolFeature: ... Going forward, we are moving to a more *functional* style using the `pytest `__ framework, which offers a richer testing framework that will facilitate testing and developing. Thus, instead of writing test classes, we will write test functions like this: .. code-block:: python def test_really_cool_feature(): ... Using ``pytest`` ~~~~~~~~~~~~~~~~ Here is an example of a self-contained set of tests that illustrate multiple features that we like to use. - functional style: tests are like ``test_*`` and *only* take arguments that are either fixtures or parameters - ``pytest.mark`` can be used to set metadata on test functions, e.g. ``skip`` or ``xfail``. - using ``parametrize``: allow testing of multiple cases - to set a mark on a parameter, ``pytest.param(..., marks=...)`` syntax should be used - ``fixture``, code for object construction, on a per-test basis - using bare ``assert`` for scalars and truth-testing - ``assert_equal`` and ``assert_identical`` from the ``xarray.testing`` module for xarray object comparisons. - the typical pattern of constructing an ``expected`` and comparing versus the ``result`` We would name this file ``test_cool_feature.py`` and put in an appropriate place in the ``xarray/tests/`` structure. .. code-block:: python import pytest import numpy as np import xarray as xr from xarray.testing import assert_equal @pytest.mark.parametrize("dtype", ["int8", "int16", "int32", "int64"]) def test_dtypes(dtype): assert str(np.dtype(dtype)) == dtype @pytest.mark.parametrize( "dtype", [ "float32", pytest.param("int16", marks=pytest.mark.skip), pytest.param( "int32", marks=pytest.mark.xfail(reason="to show how it works") ), ], ) def test_mark(dtype): assert str(np.dtype(dtype)) == "float32" @pytest.fixture def dataarray(): return xr.DataArray([1, 2, 3]) @pytest.fixture(params=["int8", "int16", "int32", "int64"]) def dtype(request): return request.param def test_series(dataarray, dtype): result = dataarray.astype(dtype) assert result.dtype == dtype expected = xr.DataArray(np.array([1, 2, 3], dtype=dtype)) assert_equal(result, expected) A test run of this yields .. code-block:: shell ((xarray) $ pytest test_cool_feature.py -v ================================= test session starts ================================== platform darwin -- Python 3.10.6, pytest-7.2.0, pluggy-1.0.0 -- cachedir: .pytest_cache plugins: hypothesis-6.56.3, cov-4.0.0 collected 11 items xarray/tests/test_cool_feature.py::test_dtypes[int8] PASSED [ 9%] xarray/tests/test_cool_feature.py::test_dtypes[int16] PASSED [ 18%] xarray/tests/test_cool_feature.py::test_dtypes[int32] PASSED [ 27%] xarray/tests/test_cool_feature.py::test_dtypes[int64] PASSED [ 36%] xarray/tests/test_cool_feature.py::test_mark[float32] PASSED [ 45%] xarray/tests/test_cool_feature.py::test_mark[int16] SKIPPED (unconditional skip) [ 54%] xarray/tests/test_cool_feature.py::test_mark[int32] XFAIL (to show how it works) [ 63%] xarray/tests/test_cool_feature.py::test_series[int8] PASSED [ 72%] xarray/tests/test_cool_feature.py::test_series[int16] PASSED [ 81%] xarray/tests/test_cool_feature.py::test_series[int32] PASSED [ 90%] xarray/tests/test_cool_feature.py::test_series[int64] PASSED [100%] ==================== 9 passed, 1 skipped, 1 xfailed in 1.83 seconds ==================== Tests that we have ``parametrized`` are now accessible via the test name, for example we could run these with ``-k int8`` to sub-select *only* those tests which match ``int8``. .. code-block:: shell ((xarray) bash-3.2$ pytest test_cool_feature.py -v -k int8 ================================== test session starts ================================== platform darwin -- Python 3.10.6, pytest-7.2.0, pluggy-1.0.0 -- cachedir: .pytest_cache plugins: hypothesis-6.56.3, cov-4.0.0 collected 11 items test_cool_feature.py::test_dtypes[int8] PASSED test_cool_feature.py::test_series[int8] PASSED Running the test suite ---------------------- The tests can then be run directly inside your Git clone (without having to install *xarray*) by typing:: pytest xarray The tests suite is exhaustive and takes a few minutes. Often it is worth running only a subset of tests first around your changes before running the entire suite. The easiest way to do this is with:: pytest xarray/path/to/test.py -k regex_matching_test_name Or with one of the following constructs:: pytest xarray/tests/[test-module].py pytest xarray/tests/[test-module].py::[TestClass] pytest xarray/tests/[test-module].py::[TestClass]::[test_method] Using `pytest-xdist `_, one can speed up local testing on multicore machines, by running pytest with the optional -n argument:: pytest xarray -n 4 This can significantly reduce the time it takes to locally run tests before submitting a pull request. For more, see the `pytest `_ documentation. Running the performance test suite ---------------------------------- Performance matters and it is worth considering whether your code has introduced performance regressions. *xarray* is starting to write a suite of benchmarking tests using `asv `__ to enable easy monitoring of the performance of critical *xarray* operations. These benchmarks are all found in the ``xarray/asv_bench`` directory. To use all features of asv, you will need either ``conda`` or ``virtualenv``. For more details please check the `asv installation webpage `_. To install asv:: python -m pip install asv If you need to run a benchmark, change your directory to ``asv_bench/`` and run:: asv continuous -f 1.1 upstream/main HEAD You can replace ``HEAD`` with the name of the branch you are working on, and report benchmarks that changed by more than 10%. The command uses ``conda`` by default for creating the benchmark environments. If you want to use virtualenv instead, write:: asv continuous -f 1.1 -E virtualenv upstream/main HEAD The ``-E virtualenv`` option should be added to all ``asv`` commands that run benchmarks. The default value is defined in ``asv.conf.json``. Running the full benchmark suite can take up to one hour and use up a few GBs of RAM. Usually it is sufficient to paste only a subset of the results into the pull request to show that the committed changes do not cause unexpected performance regressions. You can run specific benchmarks using the ``-b`` flag, which takes a regular expression. For example, this will only run tests from a ``xarray/asv_bench/benchmarks/groupby.py`` file:: asv continuous -f 1.1 upstream/main HEAD -b ^groupby If you want to only run a specific group of tests from a file, you can do it using ``.`` as a separator. For example:: asv continuous -f 1.1 upstream/main HEAD -b groupby.GroupByMethods will only run the ``GroupByMethods`` benchmark defined in ``groupby.py``. You can also run the benchmark suite using the version of *xarray* already installed in your current Python environment. This can be useful if you do not have ``virtualenv`` or ``conda``, or are using the ``setup.py develop`` approach discussed above; for the in-place build you need to set ``PYTHONPATH``, e.g. ``PYTHONPATH="$PWD/.." asv [remaining arguments]``. You can run benchmarks using an existing Python environment by:: asv run -e -E existing or, to use a specific Python interpreter,:: asv run -e -E existing:python3.10 This will display stderr from the benchmarks, and use your local ``python`` that comes from your ``$PATH``. Learn `how to write a benchmark and how to use asv from the documentation `_ . .. TODO: uncomment once we have a working setup see https://github.com/pydata/xarray/pull/5066 The *xarray* benchmarking suite is run remotely and the results are available `here `_. Documenting your code --------------------- Changes should be reflected in the release notes located in ``doc/whats-new.rst``. This file contains an ongoing change log for each release. Add an entry to this file to document your fix, enhancement or (unavoidable) breaking change. Make sure to include the GitHub issue number when adding your entry (using ``:issue:`1234```, where ``1234`` is the issue/pull request number). If your code is an enhancement, it is most likely necessary to add usage examples to the existing documentation. This can be done by following the :ref:`guidelines for contributing to the documentation `. .. _contributing.changes: Contributing your changes to *xarray* ===================================== .. _contributing.committing: Committing your code -------------------- Keep style fixes to a separate commit to make your pull request more readable. Once you've made changes, you can see them by typing:: git status If you have created a new file, it is not being tracked by git. Add it by typing:: git add path/to/file-to-be-added.py Doing 'git status' again should give something like:: # On branch shiny-new-feature # # modified: /relative/path/to/file-you-added.py # The following defines how a commit message should ideally be structured: * A subject line with ``< 72`` chars. * One blank line. * Optionally, a commit message body. Please reference the relevant GitHub issues in your commit message using ``GH1234`` or ``#1234``. Either style is fine, but the former is generally preferred. Now you can commit your changes in your local repository:: git commit -m .. _contributing.pushing: Pushing your changes -------------------- When you want your changes to appear publicly on your GitHub page, push your forked feature branch's commits:: git push origin shiny-new-feature Here ``origin`` is the default name given to your remote repository on GitHub. You can see the remote repositories:: git remote -v If you added the upstream repository as described above you will see something like:: origin git@github.com:yourname/xarray.git (fetch) origin git@github.com:yourname/xarray.git (push) upstream git://github.com/pydata/xarray.git (fetch) upstream git://github.com/pydata/xarray.git (push) Now your code is on GitHub, but it is not yet a part of the *xarray* project. For that to happen, a pull request needs to be submitted on GitHub. .. _contributing.review: Review your code ---------------- When you're ready to ask for a code review, file a pull request. Before you do, once again make sure that you have followed all the guidelines outlined in this document regarding code style, tests, performance tests, and documentation. You should also double check your branch changes against the branch it was based on: #. Navigate to your repository on GitHub -- https://github.com/your-user-name/xarray #. Click on ``Branches`` #. Click on the ``Compare`` button for your feature branch #. Select the ``base`` and ``compare`` branches, if necessary. This will be ``main`` and ``shiny-new-feature``, respectively. .. _contributing.pr: Finally, make the pull request ------------------------------ If everything looks good, you are ready to make a pull request. A pull request is how code from a local repository becomes available to the GitHub community and can be looked at and eventually merged into the ``main`` version. This pull request and its associated changes will eventually be committed to the ``main`` branch and available in the next release. To submit a pull request: #. Navigate to your repository on GitHub #. Click on the ``Pull Request`` button #. You can then click on ``Commits`` and ``Files Changed`` to make sure everything looks okay one last time #. Write a description of your changes in the ``Preview Discussion`` tab #. Click ``Send Pull Request``. This request then goes to the repository maintainers, and they will review the code. If you have made updates to the documentation, you can now see a preview of the updated docs by clicking on "Details" under the ``docs/readthedocs.org`` check near the bottom of the list of checks that run automatically when submitting a PR, then clicking on the "View Docs" button on the right (not the big green button, the small black one further down). .. image:: ../_static/view-docs.png If you need to make more changes, you can make them in your branch, add them to a new commit, push them to GitHub, and the pull request will automatically be updated. Pushing them to GitHub again is done by:: git push origin shiny-new-feature This will automatically update your pull request with the latest code and restart the :ref:`Continuous Integration ` tests. .. _contributing.delete: Delete your merged branch (optional) ------------------------------------ Once your feature branch is accepted into upstream, you'll probably want to get rid of the branch. First, update your ``main`` branch to check that the merge was successful:: git fetch upstream git checkout main git merge upstream/main Then you can do:: git branch -D shiny-new-feature You need to use a upper-case ``-D`` because the branch was squashed into a single commit before merging. Be careful with this because ``git`` won't warn you if you accidentally delete an unmerged branch. If you didn't delete your branch using GitHub's interface, then it will still exist on GitHub. To delete it there do:: git push origin --delete shiny-new-feature .. _contributing.checklist: PR checklist ------------ - **Properly comment and document your code.** See `"Documenting your code" `_. - **Test that the documentation builds correctly** by typing ``make html`` in the ``doc`` directory. This is not strictly necessary, but this may be easier than waiting for CI to catch a mistake. See `"Contributing to the documentation" `_. - **Test your code**. - Write new tests if needed. See `"Test-driven development/code writing" `_. - Test the code using `Pytest `_. Running all tests (type ``pytest`` in the root directory) takes a while, so feel free to only run the tests you think are needed based on your PR (example: ``pytest xarray/tests/test_dataarray.py``). CI will catch any failing tests. - By default, the upstream dev CI is disabled on pull request and push events. You can override this behavior per commit by adding a ``[test-upstream]`` tag to the first line of the commit message. For documentation-only commits, you can skip the CI per commit by adding a ``[skip-ci]`` tag to the first line of the commit message. - **Properly format your code** and verify that it passes the formatting guidelines set by `ruff `_. See `"Code formatting" `_. You can use `pre-commit `_ to run these automatically on each commit. - Run ``pre-commit run --all-files`` in the root directory. This may modify some files. Confirm and commit any formatting changes. - **Push your code** and `create a PR on GitHub `_. - **Use a helpful title for your pull request** by summarizing the main contributions rather than using the latest commit message. If the PR addresses an `issue `_, please `reference it `_. xarray-2025.09.0/doc/contribute/developers-meeting.rst000066400000000000000000000024041505620616400226170ustar00rootroot00000000000000.. _developers-meeting: Developers meeting ------------------ Xarray developers meet bi-weekly every other Wednesday. The meeting occurs on `Zoom `__. Find the `notes for the meeting here `__. There is a :issue:`GitHub issue for changes to the meeting<4001>`. You can subscribe to this calendar to be notified of changes: * `Google Calendar `__ * `iCal `__ .. raw:: html xarray-2025.09.0/doc/contribute/index.rst000066400000000000000000000014271505620616400201340ustar00rootroot00000000000000######################## Xarray Developer's Guide ######################## We welcome your skills and enthusiasm at the Xarray project! There are numerous opportunities to contribute beyond just writing code. All contributions, including bug reports, bug fixes, documentation improvements, enhancement suggestions, and other ideas are welcome. Please review our Contributor's guide for more guidance. In this section you will also find documentation on the internal organization of Xarray's source code, the roadmap for current development priorities, as well as how to engage with core maintainers of the Xarray codebase. .. toctree:: :maxdepth: 2 :hidden: contributing ../internals/index ../roadmap ../whats-new developers-meeting Team xarray-2025.09.0/doc/examples/000077500000000000000000000000001505620616400157275ustar00rootroot00000000000000xarray-2025.09.0/doc/examples/ERA5-GRIB-example.ipynb000066400000000000000000000054431505620616400216460ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# GRIB Data Example " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "GRIB format is commonly used to disseminate atmospheric model data. With xarray and the cfgrib engine, GRIB data can easily be analyzed and visualized." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import xarray as xr\n", "import matplotlib.pyplot as plt\n", "%matplotlib inline" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "To read GRIB data, you can use `xarray.load_dataset`. The only extra code you need is to specify the engine as `cfgrib`." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ds = xr.tutorial.load_dataset(\"era5-2mt-2019-03-uk.grib\", engine=\"cfgrib\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Let's create a simple plot of 2-m air temperature in degrees Celsius:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ds = ds - 273.15\n", "ds.t2m[0].plot(cmap=plt.cm.coolwarm)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "With CartoPy, we can create a more detailed plot, using built-in shapefiles to help provide geographic context:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import cartopy.crs as ccrs\n", "import cartopy\n", "\n", "fig = plt.figure(figsize=(10, 10))\n", "ax = plt.axes(projection=ccrs.Robinson())\n", "ax.coastlines(resolution=\"10m\")\n", "plot = ds.t2m[0].plot(\n", " cmap=plt.cm.coolwarm, transform=ccrs.PlateCarree(), cbar_kwargs={\"shrink\": 0.6}\n", ")\n", "plt.title(\"ERA5 - 2m temperature British Isles March 2019\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Finally, we can also pull out a time series for a given location easily:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ds.t2m.sel(longitude=0, latitude=51.5).plot()\n", "plt.title(\"ERA5 - London 2m temperature March 2019\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" } }, "nbformat": 4, "nbformat_minor": 4 } xarray-2025.09.0/doc/examples/ROMS_ocean_model.ipynb000066400000000000000000000161551505620616400221070ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# ROMS Ocean Model Example" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The Regional Ocean Modeling System ([ROMS](https://www.myroms.org/)) is an open source hydrodynamic model that is used for simulating currents and water properties in coastal and estuarine regions. ROMS is one of a few standard ocean models, and it has an active user community.\n", "\n", "ROMS uses a regular C-Grid in the horizontal, similar to other structured grid ocean and atmospheric models, and a stretched vertical coordinate (see [the ROMS documentation](https://www.myroms.org/wiki/Vertical_S-coordinate) for more details). Both of these require special treatment when using `xarray` to analyze ROMS ocean model output. This example notebook shows how to create a lazily evaluated vertical coordinate, and make some basic plots. The `xgcm` package is required to do analysis that is aware of the horizontal C-Grid." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import cartopy.crs as ccrs\n", "import cartopy.feature as cfeature\n", "import matplotlib.pyplot as plt\n", "\n", "%matplotlib inline\n", "\n", "import xarray as xr" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Load a sample ROMS file. This is a subset of a full model available at \n", "\n", " http://barataria.tamu.edu/thredds/catalog.html?dataset=txla_hindcast_agg\n", " \n", "The subsetting was done using the following command on one of the output files:\n", "\n", " #open dataset\n", " ds = xr.open_dataset('/d2/shared/TXLA_ROMS/output_20yr_obc/2001/ocean_his_0015.nc')\n", " \n", " # Turn on chunking to activate dask and parallelize read/write.\n", " ds = ds.chunk({'ocean_time': 1})\n", " \n", " # Pick out some of the variables that will be included as coordinates\n", " ds = ds.set_coords(['Cs_r', 'Cs_w', 'hc', 'h', 'Vtransform'])\n", " \n", " # Select a a subset of variables. Salt will be visualized, zeta is used to \n", " # calculate the vertical coordinate\n", " variables = ['salt', 'zeta']\n", " ds[variables].isel(ocean_time=slice(47, None, 7*24), \n", " xi_rho=slice(300, None)).to_netcdf('ROMS_example.nc', mode='w')\n", "\n", "So, the `ROMS_example.nc` file contains a subset of the grid, one 3D variable, and two time steps." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Load in ROMS dataset as an xarray object" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# load in the file\n", "ds = xr.tutorial.open_dataset(\"ROMS_example.nc\", chunks={\"ocean_time\": 1})\n", "\n", "# This is a way to turn on chunking and lazy evaluation. Opening with mfdataset, or\n", "# setting the chunking in the open_dataset would also achieve this.\n", "ds" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Add a lazilly calculated vertical coordinates\n", "\n", "Write equations to calculate the vertical coordinate. These will be only evaluated when data is requested. Information about the ROMS vertical coordinate can be found [here](https://www.myroms.org/wiki/Vertical_S-coordinate).\n", "\n", "In short, for `Vtransform==2` as used in this example, \n", "\n", "$Z_0 = (h_c \\, S + h \\,C) / (h_c + h)$\n", "\n", "$z = Z_0 (\\zeta + h) + \\zeta$\n", "\n", "where the variables are defined as in the link above." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "if ds.Vtransform == 1:\n", " Zo_rho = ds.hc * (ds.s_rho - ds.Cs_r) + ds.Cs_r * ds.h\n", " z_rho = Zo_rho + ds.zeta * (1 + Zo_rho / ds.h)\n", "elif ds.Vtransform == 2:\n", " Zo_rho = (ds.hc * ds.s_rho + ds.Cs_r * ds.h) / (ds.hc + ds.h)\n", " z_rho = ds.zeta + (ds.zeta + ds.h) * Zo_rho\n", "\n", "ds.coords[\"z_rho\"] = z_rho.transpose() # needing transpose seems to be an xarray bug\n", "ds.salt" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### A naive vertical slice\n", "\n", "Creating a slice using the s-coordinate as the vertical dimension is typically not very informative." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "ds.salt.isel(xi_rho=50, ocean_time=0).plot()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We can feed coordinate information to the plot method to give a more informative cross-section that uses the depths. Note that we did not need to slice the depth or longitude information separately, this was done automatically as the variable was sliced." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "section = ds.salt.isel(xi_rho=50, eta_rho=slice(0, 167), ocean_time=0)\n", "section.plot(x=\"lon_rho\", y=\"z_rho\", figsize=(15, 6), clim=(25, 35))\n", "plt.ylim([-100, 1]);" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### A plan view\n", "\n", "Now make a naive plan view, without any projection information, just using lon/lat as x/y. This looks OK, but will appear compressed because lon and lat do not have an aspect constrained by the projection." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ds.salt.isel(s_rho=-1, ocean_time=0).plot(x=\"lon_rho\", y=\"lat_rho\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "And let's use a projection to make it nicer, and add a coast." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "proj = ccrs.LambertConformal(central_longitude=-92, central_latitude=29)\n", "fig = plt.figure(figsize=(15, 5))\n", "ax = plt.axes(projection=proj)\n", "ds.salt.isel(s_rho=-1, ocean_time=0).plot(\n", " x=\"lon_rho\", y=\"lat_rho\", transform=ccrs.PlateCarree()\n", ")\n", "\n", "coast_10m = cfeature.NaturalEarthFeature(\n", " \"physical\", \"land\", \"10m\", edgecolor=\"k\", facecolor=\"0.8\"\n", ")\n", "ax.add_feature(coast_10m)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.7" } }, "nbformat": 4, "nbformat_minor": 2 } xarray-2025.09.0/doc/examples/_code/000077500000000000000000000000001505620616400170005ustar00rootroot00000000000000xarray-2025.09.0/doc/examples/_code/accessor_example.py000066400000000000000000000012771505620616400226760ustar00rootroot00000000000000import xarray as xr @xr.register_dataset_accessor("geo") class GeoAccessor: def __init__(self, xarray_obj): self._obj = xarray_obj self._center = None @property def center(self): """Return the geographic center point of this dataset.""" if self._center is None: # we can use a cache on our accessor objects, because accessors # themselves are cached on instances that access them. lon = self._obj.latitude lat = self._obj.longitude self._center = (float(lon.mean()), float(lat.mean())) return self._center def plot(self): """Plot data on a map.""" return "plotting!" xarray-2025.09.0/doc/examples/apply_ufunc_vectorize_1d.ipynb000066400000000000000000000671051505620616400240060ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "## Applying unvectorized functions with `apply_ufunc`" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This example will illustrate how to conveniently apply an unvectorized function `func` to xarray objects using `apply_ufunc`. `func` expects 1D numpy arrays and returns a 1D numpy array. Our goal is to conveniently apply this function along a dimension of xarray objects that may or may not wrap dask arrays with a signature.\n", "\n", "We will illustrate this using `np.interp`: \n", "\n", " Signature: np.interp(x, xp, fp, left=None, right=None, period=None)\n", " Docstring:\n", " One-dimensional linear interpolation.\n", "\n", " Returns the one-dimensional piecewise linear interpolant to a function\n", " with given discrete data points (`xp`, `fp`), evaluated at `x`.\n", "\n", "and write an `xr_interp` function with signature\n", "\n", " xr_interp(xarray_object, dimension_name, new_coordinate_to_interpolate_to)\n", "\n", "### Load data\n", "\n", "First lets load an example dataset" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:45:51.659160Z", "start_time": "2020-01-15T14:45:50.528742Z" } }, "outputs": [], "source": [ "import xarray as xr\n", "import numpy as np\n", "\n", "xr.set_options(display_style=\"html\") # fancy HTML repr\n", "\n", "air = (\n", " xr.tutorial.load_dataset(\"air_temperature\")\n", " .air.sortby(\"lat\") # np.interp needs coordinate in ascending order\n", " .isel(time=slice(4), lon=slice(3))\n", ") # choose a small subset for convenience\n", "air" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The function we will apply is `np.interp` which expects 1D numpy arrays. This functionality is already implemented in xarray so we use that capability to make sure we are not making mistakes." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:45:55.431708Z", "start_time": "2020-01-15T14:45:55.104701Z" } }, "outputs": [], "source": [ "newlat = np.linspace(15, 75, 100)\n", "air.interp(lat=newlat)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Let's define a function that works with one vector of data along `lat` at a time." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:45:57.889496Z", "start_time": "2020-01-15T14:45:57.792269Z" } }, "outputs": [], "source": [ "def interp1d_np(data, x, xi):\n", " return np.interp(xi, x, data)\n", "\n", "\n", "interped = interp1d_np(air.isel(time=0, lon=0), air.lat, newlat)\n", "expected = air.interp(lat=newlat)\n", "\n", "# no errors are raised if values are equal to within floating point precision\n", "np.testing.assert_allclose(expected.isel(time=0, lon=0).values, interped)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### No errors are raised so our interpolation is working.\n", "\n", "This function consumes and returns numpy arrays, which means we need to do a lot of work to convert the result back to an xarray object with meaningful metadata. This is where `apply_ufunc` is very useful.\n", "\n", "### `apply_ufunc`\n", "\n", " Apply a vectorized function for unlabeled arrays on xarray objects.\n", "\n", " The function will be mapped over the data variable(s) of the input arguments using \n", " xarray’s standard rules for labeled computation, including alignment, broadcasting, \n", " looping over GroupBy/Dataset variables, and merging of coordinates.\n", " \n", "`apply_ufunc` has many capabilities but for simplicity this example will focus on the common task of vectorizing 1D functions over nD xarray objects. We will iteratively build up the right set of arguments to `apply_ufunc` and read through many error messages in doing so." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:45:59.768626Z", "start_time": "2020-01-15T14:45:59.543808Z" } }, "outputs": [], "source": [ "xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air.isel(time=0, lon=0), # now arguments in the order expected by 'interp1_np'\n", " air.lat,\n", " newlat,\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "`apply_ufunc` needs to know a lot of information about what our function does so that it can reconstruct the outputs. In this case, the size of dimension lat has changed and we need to explicitly specify that this will happen. xarray helpfully tells us that we need to specify the kwarg `exclude_dims`." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### `exclude_dims`" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "\n", "```\n", "exclude_dims : set, optional\n", " Core dimensions on the inputs to exclude from alignment and\n", " broadcasting entirely. Any input coordinates along these dimensions\n", " will be dropped. Each excluded dimension must also appear in\n", " ``input_core_dims`` for at least one argument. Only dimensions listed\n", " here are allowed to change size between input and output objects.\n", "```" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:46:02.187012Z", "start_time": "2020-01-15T14:46:02.105563Z" } }, "outputs": [], "source": [ "xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air.isel(time=0, lon=0), # now arguments in the order expected by 'interp1_np'\n", " air.lat,\n", " newlat,\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be set!\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Core dimensions\n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Core dimensions are central to using `apply_ufunc`. In our case, our function expects to receive a 1D vector along `lat` — this is the dimension that is \"core\" to the function's functionality. Multiple core dimensions are possible. `apply_ufunc` needs to know which dimensions of each variable are core dimensions.\n", "\n", " input_core_dims : Sequence[Sequence], optional\n", " List of the same length as ``args`` giving the list of core dimensions\n", " on each input argument that should not be broadcast. By default, we\n", " assume there are no core dimensions on any input arguments.\n", "\n", " For example, ``input_core_dims=[[], ['time']]`` indicates that all\n", " dimensions on the first argument and all dimensions other than 'time'\n", " on the second argument should be broadcast.\n", "\n", " Core dimensions are automatically moved to the last axes of input\n", " variables before applying ``func``, which facilitates using NumPy style\n", " generalized ufuncs [2]_.\n", " \n", " output_core_dims : List[tuple], optional\n", " List of the same length as the number of output arguments from\n", " ``func``, giving the list of core dimensions on each output that were\n", " not broadcast on the inputs. By default, we assume that ``func``\n", " outputs exactly one array, with axes corresponding to each broadcast\n", " dimension.\n", "\n", " Core dimensions are assumed to appear as the last dimensions of each\n", " output in the provided order.\n", " \n", "Next we specify `\"lat\"` as `input_core_dims` on both `air` and `air.lat`" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:46:05.031672Z", "start_time": "2020-01-15T14:46:04.947588Z" } }, "outputs": [], "source": [ "xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air.isel(time=0, lon=0), # now arguments in the order expected by 'interp1_np'\n", " air.lat,\n", " newlat,\n", " input_core_dims=[[\"lat\"], [\"lat\"], []],\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be set!\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "xarray is telling us that it expected to receive back a numpy array with 0 dimensions but instead received an array with 1 dimension corresponding to `newlat`. We can fix this by specifying `output_core_dims`" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:46:09.325218Z", "start_time": "2020-01-15T14:46:09.303020Z" } }, "outputs": [], "source": [ "xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air.isel(time=0, lon=0), # now arguments in the order expected by 'interp1_np'\n", " air.lat,\n", " newlat,\n", " input_core_dims=[[\"lat\"], [\"lat\"], []], # list with one entry per arg\n", " output_core_dims=[[\"lat\"]],\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be set!\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Finally we get some output! Let's check that this is right\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:46:11.295440Z", "start_time": "2020-01-15T14:46:11.226553Z" } }, "outputs": [], "source": [ "interped = xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air.isel(time=0, lon=0), # now arguments in the order expected by 'interp1_np'\n", " air.lat,\n", " newlat,\n", " input_core_dims=[[\"lat\"], [\"lat\"], []], # list with one entry per arg\n", " output_core_dims=[[\"lat\"]],\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be set!\n", ")\n", "interped[\"lat\"] = newlat # need to add this manually\n", "xr.testing.assert_allclose(expected.isel(time=0, lon=0), interped)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "No errors are raised so it is right!" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Vectorization with `np.vectorize`" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Now our function currently only works on one vector of data which is not so useful given our 3D dataset.\n", "Let's try passing the whole dataset. We add a `print` statement so we can see what our function receives." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:46:13.808646Z", "start_time": "2020-01-15T14:46:13.680098Z" } }, "outputs": [], "source": [ "def interp1d_np(data, x, xi):\n", " print(f\"data: {data.shape} | x: {x.shape} | xi: {xi.shape}\")\n", " return np.interp(xi, x, data)\n", "\n", "\n", "interped = xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air.isel(\n", " lon=slice(3), time=slice(4)\n", " ), # now arguments in the order expected by 'interp1_np'\n", " air.lat,\n", " newlat,\n", " input_core_dims=[[\"lat\"], [\"lat\"], []], # list with one entry per arg\n", " output_core_dims=[[\"lat\"]],\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be set!\n", ")\n", "interped[\"lat\"] = newlat # need to add this manually\n", "xr.testing.assert_allclose(expected.isel(time=0, lon=0), interped)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "That's a hard-to-interpret error but our `print` call helpfully printed the shapes of the input data: \n", "\n", " data: (10, 53, 25) | x: (25,) | xi: (100,)\n", "\n", "We see that `air` has been passed as a 3D numpy array which is not what `np.interp` expects. Instead we want loop over all combinations of `lon` and `time`; and apply our function to each corresponding vector of data along `lat`.\n", "`apply_ufunc` makes this easy by specifying `vectorize=True`:\n", "\n", " vectorize : bool, optional\n", " If True, then assume ``func`` only takes arrays defined over core\n", " dimensions as input and vectorize it automatically with\n", " :py:func:`numpy.vectorize`. This option exists for convenience, but is\n", " almost always slower than supplying a pre-vectorized function.\n", " Using this option requires NumPy version 1.12 or newer.\n", " \n", "Also see the documentation for `np.vectorize`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.vectorize.html. Most importantly\n", "\n", " The vectorize function is provided primarily for convenience, not for performance. \n", " The implementation is essentially a for loop." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:46:26.633233Z", "start_time": "2020-01-15T14:46:26.515209Z" } }, "outputs": [], "source": [ "def interp1d_np(data, x, xi):\n", " print(f\"data: {data.shape} | x: {x.shape} | xi: {xi.shape}\")\n", " return np.interp(xi, x, data)\n", "\n", "\n", "interped = xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air, # now arguments in the order expected by 'interp1_np'\n", " air.lat, # as above\n", " newlat, # as above\n", " input_core_dims=[[\"lat\"], [\"lat\"], []], # list with one entry per arg\n", " output_core_dims=[[\"lat\"]], # returned data has one dimension\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be set!\n", " vectorize=True, # loop over non-core dims\n", ")\n", "interped[\"lat\"] = newlat # need to add this manually\n", "xr.testing.assert_allclose(expected, interped)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This unfortunately is another cryptic error from numpy. \n", "\n", "Notice that `newlat` is not an xarray object. Let's add a dimension name `new_lat` and modify the call. Note this cannot be `lat` because xarray expects dimensions to be the same size (or broadcastable) among all inputs. `output_core_dims` needs to be modified appropriately. We'll manually rename `new_lat` back to `lat` for easy checking." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:46:30.026663Z", "start_time": "2020-01-15T14:46:29.893267Z" } }, "outputs": [], "source": [ "def interp1d_np(data, x, xi):\n", " print(f\"data: {data.shape} | x: {x.shape} | xi: {xi.shape}\")\n", " return np.interp(xi, x, data)\n", "\n", "\n", "interped = xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air, # now arguments in the order expected by 'interp1_np'\n", " air.lat, # as above\n", " newlat, # as above\n", " input_core_dims=[[\"lat\"], [\"lat\"], [\"new_lat\"]], # list with one entry per arg\n", " output_core_dims=[[\"new_lat\"]], # returned data has one dimension\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be a set!\n", " vectorize=True, # loop over non-core dims\n", ")\n", "interped = interped.rename({\"new_lat\": \"lat\"})\n", "interped[\"lat\"] = newlat # need to add this manually\n", "xr.testing.assert_allclose(\n", " expected.transpose(*interped.dims), interped # order of dims is different\n", ")\n", "interped" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Notice that the printed input shapes are all 1D and correspond to one vector along the `lat` dimension.\n", "\n", "The result is now an xarray object with coordinate values copied over from `data`. This is why `apply_ufunc` is so convenient; it takes care of a lot of boilerplate necessary to apply functions that consume and produce numpy arrays to xarray objects.\n", "\n", "One final point: `lat` is now the *last* dimension in `interped`. This is a \"property\" of core dimensions: they are moved to the end before being sent to `interp1d_np` as was noted in the docstring for `input_core_dims`\n", "\n", " Core dimensions are automatically moved to the last axes of input\n", " variables before applying ``func``, which facilitates using NumPy style\n", " generalized ufuncs [2]_." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Parallelization with dask\n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "So far our function can only handle numpy arrays. A real benefit of `apply_ufunc` is the ability to easily parallelize over dask chunks _when needed_. \n", "\n", "We want to apply this function in a vectorized fashion over each chunk of the dask array. This is possible using dask's `blockwise`, `map_blocks`, or `apply_gufunc`. Xarray's `apply_ufunc` wraps dask's `apply_gufunc` and asking it to map the function over chunks using `apply_gufunc` is as simple as specifying `dask=\"parallelized\"`. With this level of flexibility we need to provide dask with some extra information: \n", " 1. `output_dtypes`: dtypes of all returned objects, and \n", " 2. `output_sizes`: lengths of any new dimensions. \n", " \n", "Here we need to specify `output_dtypes` since `apply_ufunc` can infer the size of the new dimension `new_lat` from the argument corresponding to the third element in `input_core_dims`. Here I choose the chunk sizes to illustrate that `np.vectorize` is still applied so that our function receives 1D vectors even though the blocks are 3D." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:48:42.469341Z", "start_time": "2020-01-15T14:48:42.344209Z" } }, "outputs": [], "source": [ "def interp1d_np(data, x, xi):\n", " print(f\"data: {data.shape} | x: {x.shape} | xi: {xi.shape}\")\n", " return np.interp(xi, x, data)\n", "\n", "\n", "interped = xr.apply_ufunc(\n", " interp1d_np, # first the function\n", " air.chunk(\n", " {\"time\": 2, \"lon\": 2}\n", " ), # now arguments in the order expected by 'interp1_np'\n", " air.lat, # as above\n", " newlat, # as above\n", " input_core_dims=[[\"lat\"], [\"lat\"], [\"new_lat\"]], # list with one entry per arg\n", " output_core_dims=[[\"new_lat\"]], # returned data has one dimension\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be a set!\n", " vectorize=True, # loop over non-core dims\n", " dask=\"parallelized\",\n", " output_dtypes=[air.dtype], # one per output\n", ").rename({\"new_lat\": \"lat\"})\n", "interped[\"lat\"] = newlat # need to add this manually\n", "xr.testing.assert_allclose(expected.transpose(*interped.dims), interped)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Yay! our function is receiving 1D vectors, so we've successfully parallelized applying a 1D function over a block. If you have a distributed dashboard up, you should see computes happening as equality is checked.\n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### High performance vectorization: gufuncs, numba & guvectorize\n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "`np.vectorize` is a very convenient function but is unfortunately slow. It is only marginally faster than writing a for loop in Python and looping. A common way to get around this is to write a base interpolation function that can handle nD arrays in a compiled language like Fortran and then pass that to `apply_ufunc`.\n", "\n", "Another option is to use the numba package which provides a very convenient `guvectorize` decorator: https://numba.pydata.org/numba-doc/latest/user/vectorize.html#the-guvectorize-decorator\n", "\n", "Any decorated function gets compiled and will loop over any non-core dimension in parallel when necessary. We need to specify some extra information:\n", "\n", " 1. Our function cannot return a variable any more. Instead it must receive a variable (the last argument) whose contents the function will modify. So we change from `def interp1d_np(data, x, xi)` to `def interp1d_np_gufunc(data, x, xi, out)`. Our computed results must be assigned to `out`. All values of `out` must be assigned explicitly.\n", " \n", " 2. `guvectorize` needs to know the dtypes of the input and output. This is specified in string form as the first argument. Each element of the tuple corresponds to each argument of the function. In this case, we specify `float64` for all inputs and outputs: `\"(float64[:], float64[:], float64[:], float64[:])\"` corresponding to `data, x, xi, out`\n", " \n", " 3. Now we need to tell numba the size of the dimensions the function takes as inputs and returns as output i.e. core dimensions. This is done in symbolic form i.e. `data` and `x` are vectors of the same length, say `n`; `xi` and the output `out` have a different length, say `m`. So the second argument is (again as a string)\n", " `\"(n), (n), (m) -> (m).\"` corresponding again to `data, x, xi, out`\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:48:45.267633Z", "start_time": "2020-01-15T14:48:44.943939Z" } }, "outputs": [], "source": [ "from numba import float64, guvectorize\n", "\n", "\n", "@guvectorize(\"(float64[:], float64[:], float64[:], float64[:])\", \"(n), (n), (m) -> (m)\")\n", "def interp1d_np_gufunc(data, x, xi, out):\n", " # numba doesn't really like this.\n", " # seem to support fstrings so do it the old way\n", " print(\n", " \"data: \" + str(data.shape) + \" | x:\" + str(x.shape) + \" | xi: \" + str(xi.shape)\n", " )\n", " out[:] = np.interp(xi, x, data)\n", " # gufuncs don't return data\n", " # instead you assign to a the last arg\n", " # return np.interp(xi, x, data)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The warnings are about object-mode compilation relating to the `print` statement. This means we don't get much speed up: https://numba.pydata.org/numba-doc/latest/user/performance-tips.html#no-python-mode-vs-object-mode. We'll keep the `print` statement temporarily to make sure that `guvectorize` acts like we want it to." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:48:54.755405Z", "start_time": "2020-01-15T14:48:54.634724Z" } }, "outputs": [], "source": [ "interped = xr.apply_ufunc(\n", " interp1d_np_gufunc, # first the function\n", " air.chunk(\n", " {\"time\": 2, \"lon\": 2}\n", " ), # now arguments in the order expected by 'interp1_np'\n", " air.lat, # as above\n", " newlat, # as above\n", " input_core_dims=[[\"lat\"], [\"lat\"], [\"new_lat\"]], # list with one entry per arg\n", " output_core_dims=[[\"new_lat\"]], # returned data has one dimension\n", " exclude_dims=set((\"lat\",)), # dimensions allowed to change size. Must be a set!\n", " # vectorize=True, # not needed since numba takes care of vectorizing\n", " dask=\"parallelized\",\n", " output_dtypes=[air.dtype], # one per output\n", ").rename({\"new_lat\": \"lat\"})\n", "interped[\"lat\"] = newlat # need to add this manually\n", "xr.testing.assert_allclose(expected.transpose(*interped.dims), interped)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Yay! Our function is receiving 1D vectors and is working automatically with dask arrays. Finally let's comment out the print line and wrap everything up in a nice reusable function" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-15T14:49:28.667528Z", "start_time": "2020-01-15T14:49:28.103914Z" } }, "outputs": [], "source": [ "from numba import float64, guvectorize\n", "\n", "\n", "@guvectorize(\n", " \"(float64[:], float64[:], float64[:], float64[:])\",\n", " \"(n), (n), (m) -> (m)\",\n", " nopython=True,\n", ")\n", "def interp1d_np_gufunc(data, x, xi, out):\n", " out[:] = np.interp(xi, x, data)\n", "\n", "\n", "def xr_interp(data, dim, newdim):\n", " interped = xr.apply_ufunc(\n", " interp1d_np_gufunc, # first the function\n", " data, # now arguments in the order expected by 'interp1_np'\n", " data[dim], # as above\n", " newdim, # as above\n", " input_core_dims=[[dim], [dim], [\"__newdim__\"]], # list with one entry per arg\n", " output_core_dims=[[\"__newdim__\"]], # returned data has one dimension\n", " exclude_dims=set((dim,)), # dimensions allowed to change size. Must be a set!\n", " # vectorize=True, # not needed since numba takes care of vectorizing\n", " dask=\"parallelized\",\n", " output_dtypes=[\n", " data.dtype\n", " ], # one per output; could also be float or np.dtype(\"float64\")\n", " ).rename({\"__newdim__\": dim})\n", " interped[dim] = newdim # need to add this manually\n", "\n", " return interped\n", "\n", "\n", "xr.testing.assert_allclose(\n", " expected.transpose(*interped.dims),\n", " xr_interp(air.chunk({\"time\": 2, \"lon\": 2}), \"lat\", newlat),\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This technique is generalizable to any 1D function." ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" }, "nbsphinx": { "allow_errors": true }, "org": null, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": false, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": false, "toc_position": {}, "toc_section_display": true, "toc_window_display": true } }, "nbformat": 4, "nbformat_minor": 4 } xarray-2025.09.0/doc/examples/area_weighted_temperature.ipynb000066400000000000000000000140531505620616400242020ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": { "toc": true }, "source": [ "

Table of Contents

\n", "" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Compare weighted and unweighted mean temperature\n", "\n", "\n", "Author: [Mathias Hauser](https://github.com/mathause/)\n", "\n", "\n", "We use the `air_temperature` example dataset to calculate the area-weighted temperature over its domain. This dataset has a regular latitude/ longitude grid, thus the grid cell area decreases towards the pole. For this grid we can use the cosine of the latitude as proxy for the grid cell area.\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-03-17T14:43:57.222351Z", "start_time": "2020-03-17T14:43:56.147541Z" } }, "outputs": [], "source": [ "%matplotlib inline\n", "\n", "import cartopy.crs as ccrs\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "\n", "import xarray as xr" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Data\n", "\n", "Load the data, convert to celsius, and resample to daily values" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-03-17T14:43:57.831734Z", "start_time": "2020-03-17T14:43:57.651845Z" } }, "outputs": [], "source": [ "ds = xr.tutorial.load_dataset(\"air_temperature\")\n", "\n", "# to celsius\n", "air = ds.air - 273.15\n", "\n", "# resample from 6-hourly to daily values\n", "air = air.resample(time=\"D\").mean()\n", "\n", "air" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Plot the first timestep:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-03-17T14:43:59.887120Z", "start_time": "2020-03-17T14:43:59.582894Z" } }, "outputs": [], "source": [ "projection = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)\n", "\n", "f, ax = plt.subplots(subplot_kw=dict(projection=projection))\n", "\n", "air.isel(time=0).plot(transform=ccrs.PlateCarree(), cbar_kwargs=dict(shrink=0.7))\n", "ax.coastlines()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Creating weights\n", "\n", "For a rectangular grid the cosine of the latitude is proportional to the grid cell area." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-03-17T14:44:18.777092Z", "start_time": "2020-03-17T14:44:18.736587Z" } }, "outputs": [], "source": [ "weights = np.cos(np.deg2rad(air.lat))\n", "weights.name = \"weights\"\n", "weights" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Weighted mean" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-03-17T14:44:52.607120Z", "start_time": "2020-03-17T14:44:52.564674Z" } }, "outputs": [], "source": [ "air_weighted = air.weighted(weights)\n", "air_weighted" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-03-17T14:44:54.334279Z", "start_time": "2020-03-17T14:44:54.280022Z" } }, "outputs": [], "source": [ "weighted_mean = air_weighted.mean((\"lon\", \"lat\"))\n", "weighted_mean" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Plot: comparison with unweighted mean\n", "\n", "Note how the weighted mean temperature is higher than the unweighted." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-03-17T14:45:08.877307Z", "start_time": "2020-03-17T14:45:08.673383Z" } }, "outputs": [], "source": [ "weighted_mean.plot(label=\"weighted\")\n", "air.mean((\"lon\", \"lat\")).plot(label=\"unweighted\")\n", "\n", "plt.legend()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.6" }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": true, "toc_position": {}, "toc_section_display": true, "toc_window_display": true } }, "nbformat": 4, "nbformat_minor": 4 } xarray-2025.09.0/doc/examples/blank_template.ipynb000066400000000000000000000021231505620616400217520ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "id": "d8f54f6a", "metadata": {}, "source": [ "# Blank template\n", "\n", "Use this notebook from Binder to test an issue or reproduce a bug report" ] }, { "cell_type": "code", "execution_count": null, "id": "41b90ede", "metadata": {}, "outputs": [], "source": [ "import xarray as xr\n", "import numpy as np\n", "import pandas as pd\n", "\n", "ds = xr.tutorial.load_dataset(\"air_temperature\")\n", "da = ds[\"air\"]" ] }, { "cell_type": "code", "execution_count": null, "id": "effd9aeb", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" } }, "nbformat": 4, "nbformat_minor": 5 } xarray-2025.09.0/doc/examples/monthly-means.ipynb000066400000000000000000000165261505620616400215770ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "Calculating Seasonal Averages from Time Series of Monthly Means \n", "=====\n", "\n", "Author: [Joe Hamman](https://github.com/jhamman/)\n", "\n", "The data used for this example can be found in the [xarray-data](https://github.com/pydata/xarray-data) repository. You may need to change the path to `rasm.nc` below.\n", "\n", "Suppose we have a netCDF or `xarray.Dataset` of monthly mean data and we want to calculate the seasonal average. To do this properly, we need to calculate the weighted average considering that each month has a different number of days." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:51:35.958210Z", "start_time": "2018-11-28T20:51:35.936966Z" } }, "outputs": [], "source": [ "%matplotlib inline\n", "import numpy as np\n", "import pandas as pd\n", "import xarray as xr\n", "import matplotlib.pyplot as plt" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Open the `Dataset`" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:51:36.072316Z", "start_time": "2018-11-28T20:51:36.016594Z" } }, "outputs": [], "source": [ "ds = xr.tutorial.open_dataset(\"rasm\").load()\n", "ds" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Now for the heavy lifting:\n", "We first have to come up with the weights,\n", "- calculate the month length for each monthly data record\n", "- calculate weights using `groupby('time.season')`\n", "\n", "Finally, we just need to multiply our weights by the `Dataset` and sum along the time dimension. Creating a `DataArray` for the month length is as easy as using the `days_in_month` accessor on the time coordinate. The calendar type, in this case `'noleap'`, is automatically considered in this operation." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "month_length = ds.time.dt.days_in_month\n", "month_length" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:51:36.132413Z", "start_time": "2018-11-28T20:51:36.073708Z" } }, "outputs": [], "source": [ "# Calculate the weights by grouping by 'time.season'.\n", "weights = (\n", " month_length.groupby(\"time.season\") / month_length.groupby(\"time.season\").sum()\n", ")\n", "\n", "# Test that the sum of the weights for each season is 1.0\n", "np.testing.assert_allclose(weights.groupby(\"time.season\").sum().values, np.ones(4))\n", "\n", "# Calculate the weighted average\n", "ds_weighted = (ds * weights).groupby(\"time.season\").sum(dim=\"time\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:51:36.152913Z", "start_time": "2018-11-28T20:51:36.133997Z" } }, "outputs": [], "source": [ "ds_weighted" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:51:36.190765Z", "start_time": "2018-11-28T20:51:36.154416Z" } }, "outputs": [], "source": [ "# only used for comparisons\n", "ds_unweighted = ds.groupby(\"time.season\").mean(\"time\")\n", "ds_diff = ds_weighted - ds_unweighted" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:51:40.264871Z", "start_time": "2018-11-28T20:51:36.192467Z" } }, "outputs": [], "source": [ "# Quick plot to show the results\n", "notnull = pd.notnull(ds_unweighted[\"Tair\"][0])\n", "\n", "fig, axes = plt.subplots(nrows=4, ncols=3, figsize=(14, 12))\n", "for i, season in enumerate((\"DJF\", \"MAM\", \"JJA\", \"SON\")):\n", " ds_weighted[\"Tair\"].sel(season=season).where(notnull).plot.pcolormesh(\n", " ax=axes[i, 0],\n", " vmin=-30,\n", " vmax=30,\n", " cmap=\"Spectral_r\",\n", " add_colorbar=True,\n", " extend=\"both\",\n", " )\n", "\n", " ds_unweighted[\"Tair\"].sel(season=season).where(notnull).plot.pcolormesh(\n", " ax=axes[i, 1],\n", " vmin=-30,\n", " vmax=30,\n", " cmap=\"Spectral_r\",\n", " add_colorbar=True,\n", " extend=\"both\",\n", " )\n", "\n", " ds_diff[\"Tair\"].sel(season=season).where(notnull).plot.pcolormesh(\n", " ax=axes[i, 2],\n", " vmin=-0.1,\n", " vmax=0.1,\n", " cmap=\"RdBu_r\",\n", " add_colorbar=True,\n", " extend=\"both\",\n", " )\n", "\n", " axes[i, 0].set_ylabel(season)\n", " axes[i, 1].set_ylabel(\"\")\n", " axes[i, 2].set_ylabel(\"\")\n", "\n", "for ax in axes.flat:\n", " ax.axes.get_xaxis().set_ticklabels([])\n", " ax.axes.get_yaxis().set_ticklabels([])\n", " ax.axes.axis(\"tight\")\n", " ax.set_xlabel(\"\")\n", "\n", "axes[0, 0].set_title(\"Weighted by DPM\")\n", "axes[0, 1].set_title(\"Equal Weighting\")\n", "axes[0, 2].set_title(\"Difference\")\n", "\n", "plt.tight_layout()\n", "\n", "fig.suptitle(\"Seasonal Surface Air Temperature\", fontsize=16, y=1.02)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:51:40.284898Z", "start_time": "2018-11-28T20:51:40.266406Z" } }, "outputs": [], "source": [ "# Wrap it into a simple function\n", "def season_mean(ds, calendar=\"standard\"):\n", " # Make a DataArray with the number of days in each month, size = len(time)\n", " month_length = ds.time.dt.days_in_month\n", "\n", " # Calculate the weights by grouping by 'time.season'\n", " weights = (\n", " month_length.groupby(\"time.season\") / month_length.groupby(\"time.season\").sum()\n", " )\n", "\n", " # Test that the sum of the weights for each season is 1.0\n", " np.testing.assert_allclose(weights.groupby(\"time.season\").sum().values, np.ones(4))\n", "\n", " # Calculate the weighted average\n", " return (ds * weights).groupby(\"time.season\").sum(dim=\"time\")" ] } ], "metadata": { "anaconda-cloud": {}, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": true, "toc_position": {}, "toc_section_display": true, "toc_window_display": true } }, "nbformat": 4, "nbformat_minor": 4 } xarray-2025.09.0/doc/examples/multidimensional-coords.ipynb000066400000000000000000000147421505620616400236460ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Working with Multidimensional Coordinates\n", "\n", "Author: [Ryan Abernathey](https://github.com/rabernat)\n", "\n", "Many datasets have _physical coordinates_ which differ from their _logical coordinates_. Xarray provides several ways to plot and analyze such datasets." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:49:56.068395Z", "start_time": "2018-11-28T20:49:56.035349Z" } }, "outputs": [], "source": [ "%matplotlib inline\n", "import numpy as np\n", "import pandas as pd\n", "import xarray as xr\n", "import cartopy.crs as ccrs\n", "from matplotlib import pyplot as plt" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "As an example, consider this dataset from the [xarray-data](https://github.com/pydata/xarray-data) repository." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:50:13.629720Z", "start_time": "2018-11-28T20:50:13.484542Z" } }, "outputs": [], "source": [ "ds = xr.tutorial.open_dataset(\"rasm\").load()\n", "ds" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "In this example, the _logical coordinates_ are `x` and `y`, while the _physical coordinates_ are `xc` and `yc`, which represent the longitudes and latitudes of the data." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:50:15.836061Z", "start_time": "2018-11-28T20:50:15.768376Z" } }, "outputs": [], "source": [ "print(ds.xc.attrs)\n", "print(ds.yc.attrs)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Plotting ##\n", "\n", "Let's examine these coordinate variables by plotting them." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:50:17.928556Z", "start_time": "2018-11-28T20:50:17.031211Z" } }, "outputs": [], "source": [ "fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(14, 4))\n", "ds.xc.plot(ax=ax1)\n", "ds.yc.plot(ax=ax2)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Note that the variables `xc` (longitude) and `yc` (latitude) are two-dimensional scalar fields.\n", "\n", "If we try to plot the data variable `Tair`, by default we get the logical coordinates." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:50:20.567749Z", "start_time": "2018-11-28T20:50:19.999393Z" } }, "outputs": [], "source": [ "ds.Tair[0].plot()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "In order to visualize the data on a conventional latitude-longitude grid, we can take advantage of xarray's ability to apply [cartopy](https://cartopy.readthedocs.io/stable/) map projections." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:50:31.131708Z", "start_time": "2018-11-28T20:50:30.444697Z" } }, "outputs": [], "source": [ "plt.figure(figsize=(14, 6))\n", "ax = plt.axes(projection=ccrs.PlateCarree())\n", "ax.set_global()\n", "ds.Tair[0].plot.pcolormesh(\n", " ax=ax, transform=ccrs.PlateCarree(), x=\"xc\", y=\"yc\", add_colorbar=False\n", ")\n", "ax.coastlines()\n", "ax.set_ylim([0, 90]);" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Multidimensional Groupby ##\n", "\n", "The above example allowed us to visualize the data on a regular latitude-longitude grid. But what if we want to do a calculation that involves grouping over one of these physical coordinates (rather than the logical coordinates), for example, calculating the mean temperature at each latitude. This can be achieved using xarray's `groupby` function, which accepts multidimensional variables. By default, `groupby` will use every unique value in the variable, which is probably not what we want. Instead, we can use the `groupby_bins` function to specify the output coordinates of the group. " ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-11-28T20:50:43.670463Z", "start_time": "2018-11-28T20:50:43.245501Z" } }, "outputs": [], "source": [ "# define two-degree wide latitude bins\n", "lat_bins = np.arange(0, 91, 2)\n", "# define a label for each bin corresponding to the central latitude\n", "lat_center = np.arange(1, 90, 2)\n", "# group according to those bins and take the mean\n", "Tair_lat_mean = ds.Tair.groupby_bins(\"yc\", lat_bins, labels=lat_center).mean(\n", " dim=xr.ALL_DIMS\n", ")\n", "# plot the result\n", "Tair_lat_mean.plot()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The resulting coordinate for the `groupby_bins` operation got the `_bins` suffix appended: `yc_bins`. This help us distinguish it from the original multidimensional variable `yc`.\n", "\n", "**Note**: This group-by-latitude approach does not take into account the finite-size geometry of grid cells. It simply bins each value according to the coordinates at the cell center. Xarray has no understanding of grid cells and their geometry. More precise geographic regridding for xarray data is available via the [xesmf](https://xesmf.readthedocs.io) package." ] } ], "metadata": { "anaconda-cloud": {}, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.8" }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": true, "toc_position": {}, "toc_section_display": true, "toc_window_display": true } }, "nbformat": 4, "nbformat_minor": 2 } xarray-2025.09.0/doc/examples/visualization_gallery.ipynb000066400000000000000000000137441505620616400234230ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Visualization Gallery\n", "\n", "This notebook shows common visualization issues encountered in xarray." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import cartopy.crs as ccrs\n", "import matplotlib.pyplot as plt\n", "import xarray as xr\n", "\n", "%matplotlib inline" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Load example dataset:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ds = xr.tutorial.load_dataset(\"air_temperature\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Multiple plots and map projections\n", "\n", "Control the map projection parameters on multiple axes\n", "\n", "This example illustrates how to plot multiple maps and control their extent\n", "and aspect ratio.\n", "\n", "For more details see [this discussion](https://github.com/pydata/xarray/issues/1397#issuecomment-299190567) on github." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "air = ds.air.isel(time=[0, 724]) - 273.15\n", "\n", "# This is the map projection we want to plot *onto*\n", "map_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)\n", "\n", "p = air.plot(\n", " transform=ccrs.PlateCarree(), # the data's projection\n", " col=\"time\",\n", " col_wrap=1, # multiplot settings\n", " aspect=ds.dims[\"lon\"] / ds.dims[\"lat\"], # for a sensible figsize\n", " subplot_kws={\"projection\": map_proj},\n", ") # the plot's projection\n", "\n", "# We have to set the map's options on all axes\n", "for ax in p.axes.flat:\n", " ax.coastlines()\n", " ax.set_extent([-160, -30, 5, 75])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Centered colormaps\n", "\n", "Xarray's automatic colormaps choice" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "air = ds.air.isel(time=0)\n", "\n", "f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 6))\n", "\n", "# The first plot (in kelvins) chooses \"viridis\" and uses the data's min/max\n", "air.plot(ax=ax1, cbar_kwargs={\"label\": \"K\"})\n", "ax1.set_title(\"Kelvins: default\")\n", "ax2.set_xlabel(\"\")\n", "\n", "# The second plot (in celsius) now chooses \"BuRd\" and centers min/max around 0\n", "airc = air - 273.15\n", "airc.plot(ax=ax2, cbar_kwargs={\"label\": \"Β°C\"})\n", "ax2.set_title(\"Celsius: default\")\n", "ax2.set_xlabel(\"\")\n", "ax2.set_ylabel(\"\")\n", "\n", "# The center doesn't have to be 0\n", "air.plot(ax=ax3, center=273.15, cbar_kwargs={\"label\": \"K\"})\n", "ax3.set_title(\"Kelvins: center=273.15\")\n", "\n", "# Or it can be ignored\n", "airc.plot(ax=ax4, center=False, cbar_kwargs={\"label\": \"Β°C\"})\n", "ax4.set_title(\"Celsius: center=False\")\n", "ax4.set_ylabel(\"\")\n", "\n", "# Make it nice\n", "plt.tight_layout()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Control the plot's colorbar\n", "\n", "Use ``cbar_kwargs`` keyword to specify the number of ticks.\n", "The ``spacing`` kwarg can be used to draw proportional ticks." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "air2d = ds.air.isel(time=500)\n", "\n", "# Prepare the figure\n", "f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4))\n", "\n", "# Irregular levels to illustrate the use of a proportional colorbar\n", "levels = [245, 250, 255, 260, 265, 270, 275, 280, 285, 290, 310, 340]\n", "\n", "# Plot data\n", "air2d.plot(ax=ax1, levels=levels)\n", "air2d.plot(ax=ax2, levels=levels, cbar_kwargs={\"ticks\": levels})\n", "air2d.plot(\n", " ax=ax3, levels=levels, cbar_kwargs={\"ticks\": levels, \"spacing\": \"proportional\"}\n", ")\n", "\n", "# Show plots\n", "plt.tight_layout()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Multiple lines from a 2d DataArray\n", "\n", "Use ``xarray.plot.line`` on a 2d DataArray to plot selections as\n", "multiple lines.\n", "\n", "See ``plotting.multiplelines`` for more details." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "air = ds.air - 273.15 # to celsius\n", "\n", "# Prepare the figure\n", "f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharey=True)\n", "\n", "# Selected latitude indices\n", "isel_lats = [10, 15, 20]\n", "\n", "# Temperature vs longitude plot - illustrates the \"hue\" kwarg\n", "air.isel(time=0, lat=isel_lats).plot.line(ax=ax1, hue=\"lat\")\n", "ax1.set_ylabel(\"Β°C\")\n", "\n", "# Temperature vs time plot - illustrates the \"x\" and \"add_legend\" kwargs\n", "air.isel(lon=30, lat=isel_lats).plot.line(ax=ax2, x=\"time\", add_legend=False)\n", "ax2.set_ylabel(\"\")\n", "\n", "# Show\n", "plt.tight_layout()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.7" }, "widgets": { "application/vnd.jupyter.widget-state+json": { "state": {}, "version_major": 2, "version_minor": 0 } } }, "nbformat": 4, "nbformat_minor": 4 } xarray-2025.09.0/doc/examples/weather-data.ipynb000066400000000000000000000212431505620616400213420ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Toy weather data\n", "\n", "Here is an example of how to easily manipulate a toy weather dataset using\n", "xarray and other recommended Python libraries:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "import seaborn as sns\n", "\n", "import xarray as xr\n", "%matplotlib inline" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:43:36.127628Z", "start_time": "2020-01-27T15:43:36.081733Z" } }, "outputs": [], "source": [ "np.random.seed(123)\n", "\n", "xr.set_options(display_style=\"html\")\n", "\n", "times = pd.date_range(\"2000-01-01\", \"2001-12-31\", name=\"time\")\n", "annual_cycle = np.sin(2 * np.pi * (times.dayofyear.values / 365.25 - 0.28))\n", "\n", "base = 10 + 15 * annual_cycle.reshape(-1, 1)\n", "tmin_values = base + 3 * np.random.randn(annual_cycle.size, 3)\n", "tmax_values = base + 10 + 3 * np.random.randn(annual_cycle.size, 3)\n", "\n", "ds = xr.Dataset(\n", " {\n", " \"tmin\": ((\"time\", \"location\"), tmin_values),\n", " \"tmax\": ((\"time\", \"location\"), tmax_values),\n", " },\n", " {\"time\": times, \"location\": [\"IA\", \"IN\", \"IL\"]},\n", ")\n", "\n", "ds" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Examine a dataset with pandas and seaborn" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Convert to a pandas DataFrame" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:47:14.160297Z", "start_time": "2020-01-27T15:47:14.126738Z" } }, "outputs": [], "source": [ "df = ds.to_dataframe()\n", "df.head()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:47:32.682065Z", "start_time": "2020-01-27T15:47:32.652629Z" } }, "outputs": [], "source": [ "df.describe()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Visualize using pandas" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:47:34.617042Z", "start_time": "2020-01-27T15:47:34.282605Z" } }, "outputs": [], "source": [ "ds.mean(dim=\"location\").to_dataframe().plot()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Visualize using seaborn" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:47:37.643175Z", "start_time": "2020-01-27T15:47:37.202479Z" } }, "outputs": [], "source": [ "sns.pairplot(df.reset_index(), vars=ds.data_vars)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Probability of freeze by calendar month" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:48:11.241224Z", "start_time": "2020-01-27T15:48:11.211156Z" } }, "outputs": [], "source": [ "freeze = (ds[\"tmin\"] <= 0).groupby(\"time.month\").mean(\"time\")\n", "freeze" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:48:13.131247Z", "start_time": "2020-01-27T15:48:12.924985Z" } }, "outputs": [], "source": [ "freeze.to_pandas().plot()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Monthly averaging" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:48:08.498259Z", "start_time": "2020-01-27T15:48:08.210890Z" } }, "outputs": [], "source": [ "monthly_avg = ds.resample(time=\"1MS\").mean()\n", "monthly_avg.sel(location=\"IA\").to_dataframe().plot(style=\"s-\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Note that ``MS`` here refers to Month-Start; ``M`` labels Month-End (the last day of the month)." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Calculate monthly anomalies" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "In climatology, \"anomalies\" refer to the difference between observations and\n", "typical weather for a particular season. Unlike observations, anomalies should\n", "not show any seasonal cycle." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:49:34.855086Z", "start_time": "2020-01-27T15:49:34.406439Z" } }, "outputs": [], "source": [ "climatology = ds.groupby(\"time.month\").mean(\"time\")\n", "anomalies = ds.groupby(\"time.month\") - climatology\n", "anomalies.mean(\"location\").to_dataframe()[[\"tmin\", \"tmax\"]].plot()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Calculate standardized monthly anomalies" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "You can create standardized anomalies where the difference between the\n", "observations and the climatological monthly mean is\n", "divided by the climatological standard deviation." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:50:09.144586Z", "start_time": "2020-01-27T15:50:08.734682Z" } }, "outputs": [], "source": [ "climatology_mean = ds.groupby(\"time.month\").mean(\"time\")\n", "climatology_std = ds.groupby(\"time.month\").std(\"time\")\n", "stand_anomalies = xr.apply_ufunc(\n", " lambda x, m, s: (x - m) / s,\n", " ds.groupby(\"time.month\"),\n", " climatology_mean,\n", " climatology_std,\n", ")\n", "\n", "stand_anomalies.mean(\"location\").to_dataframe()[[\"tmin\", \"tmax\"]].plot()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Fill missing values with climatology" ] }, { "cell_type": "markdown", "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:50:46.192491Z", "start_time": "2020-01-27T15:50:46.174554Z" } }, "source": [ "The ``fillna`` method on grouped objects lets you easily fill missing values by group:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:51:40.279299Z", "start_time": "2020-01-27T15:51:40.220342Z" } }, "outputs": [], "source": [ "# throw away the first half of every month\n", "some_missing = ds.tmin.sel(time=ds[\"time.day\"] > 15).reindex_like(ds)\n", "filled = some_missing.groupby(\"time.month\").fillna(climatology.tmin)\n", "both = xr.Dataset({\"some_missing\": some_missing, \"filled\": filled})\n", "both" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:52:11.815769Z", "start_time": "2020-01-27T15:52:11.770825Z" } }, "outputs": [], "source": [ "df = both.sel(time=\"2000\").mean(\"location\").reset_coords(drop=True).to_dataframe()\n", "df.head()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2020-01-27T15:52:14.867866Z", "start_time": "2020-01-27T15:52:14.449684Z" } }, "outputs": [], "source": [ "df[[\"filled\", \"some_missing\"]].plot()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": true, "toc_position": {}, "toc_section_display": true, "toc_window_display": false } }, "nbformat": 4, "nbformat_minor": 2 } xarray-2025.09.0/doc/gallery.rst000066400000000000000000000013531505620616400163040ustar00rootroot00000000000000Gallery ======= Here's a list of examples on how to use xarray. We will be adding more examples soon. Contributions are highly welcomed and appreciated. So, if you are interested in contributing, please consult the :ref:`contributing` guide. Notebook Examples ----------------- .. include:: notebooks-examples-gallery.txt .. toctree:: :maxdepth: 1 :hidden: examples/weather-data examples/monthly-means examples/area_weighted_temperature examples/multidimensional-coords examples/visualization_gallery examples/ROMS_ocean_model examples/ERA5-GRIB-example examples/apply_ufunc_vectorize_1d examples/blank_template External Examples ----------------- .. include:: external-examples-gallery.txt xarray-2025.09.0/doc/gallery.yml000066400000000000000000000034641505620616400163020ustar00rootroot00000000000000notebooks-examples: - title: Toy weather data path: examples/weather-data.html thumbnail: _static/thumbnails/toy-weather-data.png - title: Calculating Seasonal Averages from Timeseries of Monthly Means path: examples/monthly-means.html thumbnail: _static/thumbnails/monthly-means.png - title: Compare weighted and unweighted mean temperature path: examples/area_weighted_temperature.html thumbnail: _static/thumbnails/area_weighted_temperature.png - title: Working with Multidimensional Coordinates path: examples/multidimensional-coords.html thumbnail: _static/thumbnails/multidimensional-coords.png - title: Visualization Gallery path: examples/visualization_gallery.html thumbnail: _static/thumbnails/visualization_gallery.png - title: GRIB Data Example path: examples/ERA5-GRIB-example.html thumbnail: _static/thumbnails/ERA5-GRIB-example.png - title: Applying unvectorized functions with apply_ufunc path: examples/apply_ufunc_vectorize_1d.html thumbnail: _static/logos/Xarray_Logo_RGB_Final.svg external-examples: - title: Managing raster data with rioxarray path: https://corteva.github.io/rioxarray/stable/examples/examples.html thumbnail: _static/logos/Xarray_Logo_RGB_Final.svg - title: Xarray and dask on the cloud with Pangeo path: https://gallery.pangeo.io/ thumbnail: https://avatars.githubusercontent.com/u/60833341?s=200&v=4 - title: Xarray with Dask Arrays path: https://examples.dask.org/xarray.html_ thumbnail: _static/logos/Xarray_Logo_RGB_Final.svg - title: Project Pythia Foundations Book path: https://foundations.projectpythia.org/core/xarray.html thumbnail: https://raw.githubusercontent.com/ProjectPythia/projectpythia.github.io/main/portal/_static/images/logos/pythia_logo-blue-btext-twocolor.svg xarray-2025.09.0/doc/gallery/000077500000000000000000000000001505620616400155505ustar00rootroot00000000000000xarray-2025.09.0/doc/gallery/README.txt000066400000000000000000000000361505620616400172450ustar00rootroot00000000000000.. _recipes: Gallery ======= xarray-2025.09.0/doc/gallery/plot_cartopy_facetgrid.py000066400000000000000000000024061505620616400226530ustar00rootroot00000000000000""" ================================== Multiple plots and map projections ================================== Control the map projection parameters on multiple axes This example illustrates how to plot multiple maps and control their extent and aspect ratio. For more details see `this discussion`_ on github. .. _this discussion: https://github.com/pydata/xarray/issues/1397#issuecomment-299190567 """ import cartopy.crs as ccrs import matplotlib.pyplot as plt import xarray as xr # Load the data ds = xr.tutorial.load_dataset("air_temperature") air = ds.air.isel(time=[0, 724]) - 273.15 # This is the map projection we want to plot *onto* map_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45) p = air.plot( transform=ccrs.PlateCarree(), # the data's projection col="time", col_wrap=1, # multiplot settings aspect=ds.sizes["lon"] / ds.sizes["lat"], # for a sensible figsize subplot_kws={"projection": map_proj}, # the plot's projection ) # We have to set the map's options on all four axes for ax in p.axes.flat: ax.coastlines() ax.set_extent([-160, -30, 5, 75]) # Without this aspect attributes the maps will look chaotic and the # "extent" attribute above will be ignored ax.set_aspect("equal") plt.show() xarray-2025.09.0/doc/gallery/plot_colorbar_center.py000066400000000000000000000020131505620616400223170ustar00rootroot00000000000000""" ================== Centered colormaps ================== xarray's automatic colormaps choice """ import matplotlib.pyplot as plt import xarray as xr # Load the data ds = xr.tutorial.load_dataset("air_temperature") air = ds.air.isel(time=0) f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 6)) # The first plot (in kelvins) chooses "viridis" and uses the data's min/max air.plot(ax=ax1, cbar_kwargs={"label": "K"}) ax1.set_title("Kelvins: default") ax2.set_xlabel("") # The second plot (in celsius) now chooses "BuRd" and centers min/max around 0 airc = air - 273.15 airc.plot(ax=ax2, cbar_kwargs={"label": "Β°C"}) ax2.set_title("Celsius: default") ax2.set_xlabel("") ax2.set_ylabel("") # The center doesn't have to be 0 air.plot(ax=ax3, center=273.15, cbar_kwargs={"label": "K"}) ax3.set_title("Kelvins: center=273.15") # Or it can be ignored airc.plot(ax=ax4, center=False, cbar_kwargs={"label": "Β°C"}) ax4.set_title("Celsius: center=False") ax4.set_ylabel("") # Make it nice plt.tight_layout() plt.show() xarray-2025.09.0/doc/gallery/plot_control_colorbar.py000066400000000000000000000015211505620616400225220ustar00rootroot00000000000000""" =========================== Control the plot's colorbar =========================== Use ``cbar_kwargs`` keyword to specify the number of ticks. The ``spacing`` kwarg can be used to draw proportional ticks. """ import matplotlib.pyplot as plt import xarray as xr # Load the data air_temp = xr.tutorial.load_dataset("air_temperature") air2d = air_temp.air.isel(time=500) # Prepare the figure f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4)) # Irregular levels to illustrate the use of a proportional colorbar levels = [245, 250, 255, 260, 265, 270, 275, 280, 285, 290, 310, 340] # Plot data air2d.plot(ax=ax1, levels=levels) air2d.plot(ax=ax2, levels=levels, cbar_kwargs={"ticks": levels}) air2d.plot( ax=ax3, levels=levels, cbar_kwargs={"ticks": levels, "spacing": "proportional"} ) # Show plots plt.tight_layout() plt.show() xarray-2025.09.0/doc/gallery/plot_lines_from_2d.py000066400000000000000000000016141505620616400217040ustar00rootroot00000000000000""" ================================== Multiple lines from a 2d DataArray ================================== Use :py:func:`xarray.plot.line` on a 2d DataArray to plot selections as multiple lines. See :ref:`plotting.multiplelines` for more details. """ import matplotlib.pyplot as plt import xarray as xr # Load the data ds = xr.tutorial.load_dataset("air_temperature") air = ds.air - 273.15 # to celsius # Prepare the figure f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharey=True) # Selected latitude indices isel_lats = [10, 15, 20] # Temperature vs longitude plot - illustrates the "hue" kwarg air.isel(time=0, lat=isel_lats).plot.line(ax=ax1, hue="lat") ax1.set_ylabel("Β°C") # Temperature vs time plot - illustrates the "x" and "add_legend" kwargs air.isel(lon=30, lat=isel_lats).plot.line(ax=ax2, x="time", add_legend=False) ax2.set_ylabel("") # Show plt.tight_layout() plt.show() xarray-2025.09.0/doc/get-help/000077500000000000000000000000001505620616400156165ustar00rootroot00000000000000xarray-2025.09.0/doc/get-help/faq.rst000066400000000000000000000517751505620616400171360ustar00rootroot00000000000000.. _faq: Frequently Asked Questions ========================== .. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) Your documentation keeps mentioning pandas. What is pandas? ----------------------------------------------------------- pandas_ is a very popular data analysis package in Python with wide usage in many fields. Our API is heavily inspired by pandas β€” this is why there are so many references to pandas. .. _pandas: https://pandas.pydata.org Do I need to know pandas to use xarray? --------------------------------------- No! Our API is heavily inspired by pandas so while knowing pandas will let you become productive more quickly, knowledge of pandas is not necessary to use xarray. Should I use xarray instead of pandas? -------------------------------------- It's not an either/or choice! xarray provides robust support for converting back and forth between the tabular data-structures of pandas and its own multi-dimensional data-structures. That said, you should only bother with xarray if some aspect of data is fundamentally multi-dimensional. If your data is unstructured or one-dimensional, pandas is usually the right choice: it has better performance for common operations such as ``groupby`` and you'll find far more usage examples online. Why is pandas not enough? ------------------------- pandas is a fantastic library for analysis of low-dimensional labelled data - if it can be sensibly described as "rows and columns", pandas is probably the right choice. However, sometimes we want to use higher dimensional arrays (`ndim > 2`), or arrays for which the order of dimensions (e.g., columns vs rows) shouldn't really matter. For example, the images of a movie can be natively represented as an array with four dimensions: time, row, column and color. pandas has historically supported N-dimensional panels, but deprecated them in version 0.20 in favor of xarray data structures. There are now built-in methods on both sides to convert between pandas and xarray, allowing for more focused development effort. Xarray objects have a much richer model of dimensionality - if you were using Panels: - You need to create a new factory type for each dimensionality. - You can't do math between NDPanels with different dimensionality. - Each dimension in a NDPanel has a name (e.g., 'labels', 'items', 'major_axis', etc.) but the dimension names refer to order, not their meaning. You can't specify an operation as to be applied along the "time" axis. - You often have to manually convert collections of pandas arrays (Series, DataFrames, etc) to have the same number of dimensions. In contrast, this sort of data structure fits very naturally in an xarray ``Dataset``. You can :ref:`read about switching from Panels to xarray here `. pandas gets a lot of things right, but many science, engineering and complex analytics use cases need fully multi-dimensional data structures. How do xarray data structures differ from those found in pandas? ---------------------------------------------------------------- The main distinguishing feature of xarray's ``DataArray`` over labeled arrays in pandas is that dimensions can have names (e.g., "time", "latitude", "longitude"). Names are much easier to keep track of than axis numbers, and xarray uses dimension names for indexing, aggregation and broadcasting. Not only can you write ``x.sel(time='2000-01-01')`` and ``x.mean(dim='time')``, but operations like ``x - x.mean(dim='time')`` always work, no matter the order of the "time" dimension. You never need to reshape arrays (e.g., with ``np.newaxis``) to align them for arithmetic operations in xarray. Why don't aggregations return Python scalars? --------------------------------------------- Xarray tries hard to be self-consistent: operations on a ``DataArray`` (resp. ``Dataset``) return another ``DataArray`` (resp. ``Dataset``) object. In particular, operations returning scalar values (e.g. indexing or aggregations like ``mean`` or ``sum`` applied to all axes) will also return xarray objects. Unfortunately, this means we sometimes have to explicitly cast our results from xarray when using them in other libraries. As an illustration, the following code fragment .. jupyter-execute:: arr = xr.DataArray([1, 2, 3]) pd.Series({"x": arr[0], "mean": arr.mean(), "std": arr.std()}) does not yield the pandas DataFrame we expected. We need to specify the type conversion ourselves: .. jupyter-execute:: pd.Series({"x": arr[0], "mean": arr.mean(), "std": arr.std()}, dtype=float) Alternatively, we could use the ``item`` method or the ``float`` constructor to convert values one at a time .. jupyter-execute:: pd.Series({"x": arr[0].item(), "mean": float(arr.mean())}) .. _approach to metadata: What is your approach to metadata? ---------------------------------- We are firm believers in the power of labeled data! In addition to dimensions and coordinates, xarray supports arbitrary metadata in the form of global (Dataset) and variable specific (DataArray) attributes (``attrs``). Automatic interpretation of labels is powerful but also reduces flexibility. With xarray, we draw a firm line between labels that the library understands (``dims`` and ``coords``) and labels for users and user code (``attrs``). For example, we do not automatically interpret and enforce units or `CF conventions`_. (An exception is serialization to and from netCDF files.) .. _CF conventions: https://cfconventions.org/latest.html An implication of this choice is that we do not propagate ``attrs`` through most operations unless explicitly flagged (some methods have a ``keep_attrs`` option, and there is a global flag, accessible with :py:func:`xarray.set_options`, for setting this to be always True or False). Similarly, xarray does not check for conflicts between ``attrs`` when combining arrays and datasets, unless explicitly requested with the option ``compat='identical'``. The guiding principle is that metadata should not be allowed to get in the way. In general xarray uses the capabilities of the backends for reading and writing attributes. That has some implications on roundtripping. One example for such inconsistency is that size-1 lists will roundtrip as single element (for netcdf4 backends). What other netCDF related Python libraries should I know about? --------------------------------------------------------------- `netCDF4-python`__ provides a lower level interface for working with netCDF and OpenDAP datasets in Python. We use netCDF4-python internally in xarray, and have contributed a number of improvements and fixes upstream. Xarray does not yet support all of netCDF4-python's features, such as modifying files on-disk. __ https://unidata.github.io/netcdf4-python/ Iris_ (supported by the UK Met office) provides similar tools for in- memory manipulation of labeled arrays, aimed specifically at weather and climate data needs. Indeed, the Iris :py:class:`~iris.cube.Cube` was direct inspiration for xarray's :py:class:`~xarray.DataArray`. Xarray and Iris take very different approaches to handling metadata: Iris strictly interprets `CF conventions`_. Iris particularly shines at mapping, thanks to its integration with Cartopy_. .. _Iris: https://scitools-iris.readthedocs.io/en/stable/ .. _Cartopy: https://cartopy.readthedocs.io/stable/ We think the design decisions we have made for xarray (namely, basing it on pandas) make it a faster and more flexible data analysis tool. That said, Iris has some great domain specific functionality, and there are dedicated methods for converting back and forth between xarray and Iris. See :ref:`Reading and Writing Iris data ` for more details. What other projects leverage xarray? ------------------------------------ See section :ref:`ecosystem`. How do I open format X file as an xarray dataset? ------------------------------------------------- To open format X file in xarray, you need to know the `format of the data `_ you want to read. If the format is supported, you can use the appropriate function provided by xarray. The following table provides functions used for different file formats in xarray, as well as links to other packages that can be used: .. csv-table:: :header: "File Format", "Open via", " Related Packages" :widths: 15, 45, 15 "NetCDF (.nc, .nc4, .cdf)","``open_dataset()`` OR ``open_mfdataset()``", "`netCDF4 `_, `cdms2 `_" "HDF5 (.h5, .hdf5)","``open_dataset()`` OR ``open_mfdataset()``", "`h5py `_, `pytables `_ " "GRIB (.grb, .grib)", "``open_dataset()``", "`cfgrib `_, `pygrib `_" "CSV (.csv)","``open_dataset()``", "`pandas`_ , `dask `_" "Zarr (.zarr)","``open_dataset()`` OR ``open_mfdataset()``", "`zarr `_ , `dask `_ " .. _pandas: https://pandas.pydata.org If you are unable to open a file in xarray: - You should check that you are having all necessary dependencies installed, including any optional dependencies (like scipy, h5netcdf, cfgrib etc as mentioned below) that may be required for the specific use case. - If all necessary dependencies are installed but the file still cannot be opened, you must check if there are any specialized backends available for the specific file format you are working with. You can consult the xarray documentation or the documentation for the file format to determine if a specialized backend is required, and if so, how to install and use it with xarray. - If the file format is not supported by xarray or any of its available backends, the user may need to use a different library or tool to work with the file. You can consult the documentation for the file format to determine which tools are recommended for working with it. Xarray provides a default engine to read files, which is usually determined by the file extension or type. If you don't specify the engine, xarray will try to guess it based on the file extension or type, and may fall back to a different engine if it cannot determine the correct one. Therefore, it's good practice to always specify the engine explicitly, to ensure that the correct backend is used and especially when working with complex data formats or non-standard file extensions. :py:func:`xarray.backends.list_engines` is a function in xarray that returns a dictionary of available engines and their BackendEntrypoint objects. You can use the ``engine`` argument to specify the backend when calling ``open_dataset()`` or other reading functions in xarray, as shown below: NetCDF ~~~~~~ If you are reading a netCDF file with a ".nc" extension, the default engine is ``netcdf4``. However if you have files with non-standard extensions or if the file format is ambiguous. Specify the engine explicitly, to ensure that the correct backend is used. Use :py:func:`~xarray.open_dataset` to open a NetCDF file and return an xarray Dataset object. .. code:: python import xarray as xr # use xarray to open the file and return an xarray.Dataset object using netcdf4 engine ds = xr.open_dataset("/path/to/my/file.nc", engine="netcdf4") # Print Dataset object print(ds) # use xarray to open the file and return an xarray.Dataset object using scipy engine ds = xr.open_dataset("/path/to/my/file.nc", engine="scipy") We recommend installing ``scipy`` via conda using the below given code: :: conda install scipy HDF5 ~~~~ Use :py:func:`~xarray.open_dataset` to open an HDF5 file and return an xarray Dataset object. You should specify the ``engine`` keyword argument when reading HDF5 files with xarray, as there are multiple backends that can be used to read HDF5 files, and xarray may not always be able to automatically detect the correct one based on the file extension or file format. To read HDF5 files with xarray, you can use the :py:func:`~xarray.open_dataset` function from the ``h5netcdf`` backend, as follows: .. code:: python import xarray as xr # Open HDF5 file as an xarray Dataset ds = xr.open_dataset("path/to/hdf5/file.hdf5", engine="h5netcdf") # Print Dataset object print(ds) We recommend you to install ``h5netcdf`` library using the below given code: :: conda install -c conda-forge h5netcdf If you want to use the ``netCDF4`` backend to read a file with a ".h5" extension (which is typically associated with HDF5 file format), you can specify the engine argument as follows: .. code:: python ds = xr.open_dataset("path/to/file.h5", engine="netcdf4") GRIB ~~~~ You should specify the ``engine`` keyword argument when reading GRIB files with xarray, as there are multiple backends that can be used to read GRIB files, and xarray may not always be able to automatically detect the correct one based on the file extension or file format. Use the :py:func:`~xarray.open_dataset` function from the ``cfgrib`` package to open a GRIB file as an xarray Dataset. .. code:: python import xarray as xr # define the path to your GRIB file and the engine you want to use to open the file # use ``open_dataset()`` to open the file with the specified engine and return an xarray.Dataset object ds = xr.open_dataset("path/to/your/file.grib", engine="cfgrib") # Print Dataset object print(ds) We recommend installing ``cfgrib`` via conda using the below given code: :: conda install -c conda-forge cfgrib CSV ~~~ By default, xarray uses the built-in ``pandas`` library to read CSV files. In general, you don't need to specify the engine keyword argument when reading CSV files with xarray, as the default ``pandas`` engine is usually sufficient for most use cases. If you are working with very large CSV files or if you need to perform certain types of data processing that are not supported by the default ``pandas`` engine, you may want to use a different backend. In such cases, you can specify the engine argument when reading the CSV file with xarray. To read CSV files with xarray, use the :py:func:`~xarray.open_dataset` function and specify the path to the CSV file as follows: .. code:: python import xarray as xr import pandas as pd # Load CSV file into pandas DataFrame using the "c" engine df = pd.read_csv("your_file.csv", engine="c") # Convert `:py:func:pandas` DataFrame to xarray.Dataset ds = xr.Dataset.from_dataframe(df) # Prints the resulting xarray dataset print(ds) Zarr ~~~~ When opening a Zarr dataset with xarray, the ``engine`` is automatically detected based on the file extension or the type of input provided. If the dataset is stored in a directory with a ".zarr" extension, xarray will automatically use the "zarr" engine. To read zarr files with xarray, use the :py:func:`~xarray.open_dataset` function and specify the path to the zarr file as follows: .. code:: python import xarray as xr # use xarray to open the file and return an xarray.Dataset object using zarr engine ds = xr.open_dataset("path/to/your/file.zarr", engine="zarr") # Print Dataset object print(ds) We recommend installing ``zarr`` via conda using the below given code: :: conda install -c conda-forge zarr There may be situations where you need to specify the engine manually using the ``engine`` keyword argument. For example, if you have a Zarr dataset stored in a file with a different extension (e.g., ".npy"), you will need to specify the engine as "zarr" explicitly when opening the dataset. Some packages may have additional functionality beyond what is shown here. You can refer to the documentation for each package for more information. How does xarray handle missing values? -------------------------------------- **xarray can handle missing values using ``np.nan``** - ``np.nan`` is used to represent missing values in labeled arrays and datasets. It is a commonly used standard for representing missing or undefined numerical data in scientific computing. ``np.nan`` is a constant value in NumPy that represents "Not a Number" or missing values. - Most of xarray's computation methods are designed to automatically handle missing values appropriately. For example, when performing operations like addition or multiplication on arrays that contain missing values, xarray will automatically ignore the missing values and only perform the operation on the valid data. This makes it easy to work with data that may contain missing or undefined values without having to worry about handling them explicitly. - Many of xarray's `aggregation methods `_, such as ``sum()``, ``mean()``, ``min()``, ``max()``, and others, have a skipna argument that controls whether missing values (represented by NaN) should be skipped (True) or treated as NaN (False) when performing the calculation. By default, ``skipna`` is set to ``True``, so missing values are ignored when computing the result. However, you can set ``skipna`` to ``False`` if you want missing values to be treated as NaN and included in the calculation. - On `plotting `_ an xarray dataset or array that contains missing values, xarray will simply leave the missing values as blank spaces in the plot. - We have a set of `methods `_ for manipulating missing and filling values. How should I cite xarray? ------------------------- If you are using xarray and would like to cite it in academic publication, we would certainly appreciate it. We recommend two citations. 1. At a minimum, we recommend citing the xarray overview journal article, published in the Journal of Open Research Software. - Hoyer, S. & Hamman, J., (2017). xarray: N-D labeled Arrays and Datasets in Python. Journal of Open Research Software. 5(1), p.10. DOI: https://doi.org/10.5334/jors.148 Here’s an example of a BibTeX entry:: @article{hoyer2017xarray, title = {xarray: {N-D} labeled arrays and datasets in {Python}}, author = {Hoyer, S. and J. Hamman}, journal = {Journal of Open Research Software}, volume = {5}, number = {1}, year = {2017}, publisher = {Ubiquity Press}, doi = {10.5334/jors.148}, url = {https://doi.org/10.5334/jors.148} } 2. You may also want to cite a specific version of the xarray package. We provide a `Zenodo citation and DOI `_ for this purpose: .. image:: https://zenodo.org/badge/doi/10.5281/zenodo.598201.svg :target: https://doi.org/10.5281/zenodo.598201 An example BibTeX entry:: @misc{xarray_v0_8_0, author = {Stephan Hoyer and Clark Fitzgerald and Joe Hamman and others}, title = {xarray: v0.8.0}, month = aug, year = 2016, doi = {10.5281/zenodo.59499}, url = {https://doi.org/10.5281/zenodo.59499} } .. _api-stability: How stable is Xarray's API? --------------------------- Xarray tries very hard to maintain backwards compatibility in our :ref:`api` between released versions. Whilst we do occasionally make breaking changes in order to improve the library, we `signpost changes `_ with ``DeprecationWarnings`` for many releases in advance. (An exception is bugs - whose behaviour we try to fix as soon as we notice them.) Our `test-driven development practices `_ helps to ensure any accidental regressions are caught. This philosophy applies to everything in the `public API `_. .. _public-api: What parts of xarray are considered public API? ----------------------------------------------- As a rule, only functions/methods documented in our :ref:`api` are considered part of xarray's public API. Everything else (in particular, everything in ``xarray.core`` that is not also exposed in the top level ``xarray`` namespace) is considered a private implementation detail that may change at any time. Objects that exist to facilitate xarray's fluent interface on ``DataArray`` and ``Dataset`` objects are a special case. For convenience, we document them in the API docs, but only their methods and the ``DataArray``/``Dataset`` methods/properties to construct them (e.g., ``.plot()``, ``.groupby()``, ``.str``) are considered public API. Constructors and other details of the internal classes used to implemented them (i.e., ``xarray.plot.plotting._PlotMethods``, ``xarray.core.groupby.DataArrayGroupBy``, ``xarray.core.accessor_str.StringAccessor``) are not. xarray-2025.09.0/doc/get-help/help-diagram.rst000066400000000000000000000107251505620616400207070ustar00rootroot00000000000000Getting Help ============ Navigating the wealth of resources available for Xarray can be overwhelming. We've created this flow chart to help guide you towards the best way to get help, depending on what you're working towards. Also be sure to check out our :ref:`faq`. and :ref:`howdoi` pages for solutions to common questions. A major strength of Xarray is in the user community. Sometimes you might not yet have a concrete question but would simply like to connect with other Xarray users. We have a few accounts on different social platforms for that! :ref:`socials`. We look forward to hearing from you! Help Flowchart -------------- .. _comment: mermaid Flowcharg "link" text gets secondary color background, SVG icon fill gets primary color .. raw:: html .. mermaid:: :config: {"theme":"base","themeVariables":{"fontSize":"20px","primaryColor":"#fff","primaryTextColor":"#fff","primaryBorderColor":"#59c7d6","lineColor":"#e28126","secondaryColor":"#767985"}} :alt: Flowchart illustrating the different ways to access help using or contributing to Xarray. flowchart TD intro[Welcome to Xarray! How can we help?]:::quesNodefmt usage([fa:fa-chalkboard-user Xarray Tutorial fab:fa-readme Xarray Docs fab:fa-stack-overflow Stack Exchange fab:fa-google Ask Google fa:fa-robot Ask AI ChatBot]):::ansNodefmt extensions([Extension docs: fab:fa-readme Dask fab:fa-readme Rioxarray]):::ansNodefmt help([fab:fa-github Xarray Discussions fab:fa-discord Xarray Discord fa:fa-globe Pangeo Discourse]):::ansNodefmt bug([Let us know: fab:fa-github Xarray Issues]):::ansNodefmt contrib([fa:fa-book-open Xarray Contributor's Guide]):::ansNodefmt pr([fab:fa-github Pull Request]):::ansNodefmt dev([fab:fa-github Add PR Comment fa:fa-users Attend Developer's Meeting ]):::ansNodefmt report[Thanks for letting us know!]:::quesNodefmt merged[fa:fa-hands-clapping Thanks for contributing to Xarray!]:::quesNodefmt intro -->|How do I use Xarray?| usage usage -->|"With extensions (like Dask, Rioxarray, etc.)"| extensions usage -->|I still have questions or could use some guidance | help intro -->|I think I found a bug| bug bug contrib bug -->|I just wanted to tell you| report bug<-->|I'd like to fix the bug!| contrib pr -->|my PR was approved| merged intro -->|I wish Xarray could...| bug pr <-->|my PR is quiet| dev contrib -->pr classDef quesNodefmt font-size:20pt,fill:#0e4666,stroke:#59c7d6,stroke-width:3 classDef ansNodefmt font-size:18pt,fill:#4a4a4a,stroke:#17afb4,stroke-width:3 linkStyle default font-size:16pt,stroke-width:4 Flowchart links --------------- - `Xarray Tutorials `__ - `Xarray Docs `__ - `Stack Exchange `__ - `Xarray Discussions `__ - `Xarray Discord `__ - `Xarray Office Hours `__ - `Pangeo Discourse `__ - `Xarray Issues `__ - :ref:`contributing` - :ref:`developers-meeting` .. toctree:: :maxdepth: 1 :hidden: faq howdoi socials xarray-2025.09.0/doc/get-help/howdoi.rst000066400000000000000000000110341505620616400176400ustar00rootroot00000000000000.. currentmodule:: xarray .. _howdoi: How do I ... ============ .. list-table:: :header-rows: 1 :widths: 40 60 * - How do I... - Solution * - add a DataArray to my dataset as a new variable - ``my_dataset[varname] = my_dataArray`` or :py:meth:`Dataset.assign` (see also :ref:`dictionary_like_methods`) * - add variables from other datasets to my dataset - :py:meth:`Dataset.merge` * - add a new dimension and/or coordinate - :py:meth:`DataArray.expand_dims`, :py:meth:`Dataset.expand_dims` * - add a new coordinate variable - :py:meth:`DataArray.assign_coords` * - change a data variable to a coordinate variable - :py:meth:`Dataset.set_coords` * - change the order of dimensions - :py:meth:`DataArray.transpose`, :py:meth:`Dataset.transpose` * - reshape dimensions - :py:meth:`DataArray.stack`, :py:meth:`Dataset.stack`, :py:meth:`Dataset.coarsen.construct`, :py:meth:`DataArray.coarsen.construct` * - remove a variable from my object - :py:meth:`Dataset.drop_vars`, :py:meth:`DataArray.drop_vars` * - remove dimensions of length 1 or 0 - :py:meth:`DataArray.squeeze`, :py:meth:`Dataset.squeeze` * - remove all variables with a particular dimension - :py:meth:`Dataset.drop_dims` * - convert non-dimension coordinates to data variables or remove them - :py:meth:`DataArray.reset_coords`, :py:meth:`Dataset.reset_coords` * - rename a variable, dimension or coordinate - :py:meth:`Dataset.rename`, :py:meth:`DataArray.rename`, :py:meth:`Dataset.rename_vars`, :py:meth:`Dataset.rename_dims`, * - convert a DataArray to Dataset or vice versa - :py:meth:`DataArray.to_dataset`, :py:meth:`Dataset.to_dataarray`, :py:meth:`Dataset.to_stacked_array`, :py:meth:`DataArray.to_unstacked_dataset` * - extract variables that have certain attributes - :py:meth:`Dataset.filter_by_attrs` * - extract the underlying array (e.g. NumPy or Dask arrays) - :py:attr:`DataArray.data` * - convert to and extract the underlying NumPy array - :py:attr:`DataArray.to_numpy` * - convert to a pandas DataFrame - :py:attr:`Dataset.to_dataframe` * - sort values - :py:attr:`Dataset.sortby` * - find out if my xarray object is wrapping a Dask Array - :py:func:`dask.is_dask_collection` * - know how much memory my object requires - :py:attr:`DataArray.nbytes`, :py:attr:`Dataset.nbytes` * - Get axis number for a dimension - :py:meth:`DataArray.get_axis_num` * - convert a possibly irregularly sampled timeseries to a regularly sampled timeseries - :py:meth:`DataArray.resample`, :py:meth:`Dataset.resample` (see :ref:`resampling` for more) * - apply a function on all data variables in a Dataset - :py:meth:`Dataset.map` * - write xarray objects with complex values to a netCDF file - :py:func:`Dataset.to_netcdf`, :py:func:`DataArray.to_netcdf` specifying ``engine="h5netcdf"`` or :py:func:`Dataset.to_netcdf`, :py:func:`DataArray.to_netcdf` specifying ``engine="netCDF4", auto_complex=True`` * - make xarray objects look like other xarray objects - :py:func:`~xarray.ones_like`, :py:func:`~xarray.zeros_like`, :py:func:`~xarray.full_like`, :py:meth:`Dataset.reindex_like`, :py:meth:`Dataset.interp_like`, :py:meth:`Dataset.broadcast_like`, :py:meth:`DataArray.reindex_like`, :py:meth:`DataArray.interp_like`, :py:meth:`DataArray.broadcast_like` * - Make sure my datasets have values at the same coordinate locations - ``xr.align(dataset_1, dataset_2, join="exact")`` * - replace NaNs with other values - :py:meth:`Dataset.fillna`, :py:meth:`Dataset.ffill`, :py:meth:`Dataset.bfill`, :py:meth:`Dataset.interpolate_na`, :py:meth:`DataArray.fillna`, :py:meth:`DataArray.ffill`, :py:meth:`DataArray.bfill`, :py:meth:`DataArray.interpolate_na` * - extract the year, month, day or similar from a DataArray of time values - ``obj.dt.month`` for example where ``obj`` is a :py:class:`~xarray.DataArray` containing ``datetime64`` or ``cftime`` values. See :ref:`dt_accessor` for more. * - round off time values to a specified frequency - ``obj.dt.ceil``, ``obj.dt.floor``, ``obj.dt.round``. See :ref:`dt_accessor` for more. * - make a mask that is ``True`` where an object contains any of the values in an array - :py:meth:`Dataset.isin`, :py:meth:`DataArray.isin` * - Index using a boolean mask - :py:meth:`Dataset.query`, :py:meth:`DataArray.query`, :py:meth:`Dataset.where`, :py:meth:`DataArray.where` * - preserve ``attrs`` during (most) xarray operations - ``xr.set_options(keep_attrs=True)`` xarray-2025.09.0/doc/get-help/socials.rst000066400000000000000000000005111505620616400200020ustar00rootroot00000000000000.. _socials: Social Media ============ Xarray is active on several social media platforms. We use these platforms to share updates and connect with the user community. - `Discord `__ - `Bluesky `__ - `Twitter(X) `__ xarray-2025.09.0/doc/getting-started-guide/000077500000000000000000000000001505620616400203115ustar00rootroot00000000000000xarray-2025.09.0/doc/getting-started-guide/index.rst000066400000000000000000000005371505620616400221570ustar00rootroot00000000000000################ Getting Started ################ The getting started guide aims to get you using Xarray productively as quickly as possible. It is designed as an entry point for new users, and it provided an introduction to Xarray's main concepts. .. toctree:: :maxdepth: 2 why-xarray installing quick-overview tutorials-and-videos xarray-2025.09.0/doc/getting-started-guide/installing.rst000066400000000000000000000144141505620616400232130ustar00rootroot00000000000000.. _installing: Installation ============ Required dependencies --------------------- - Python (3.11 or later) - `numpy `__ (1.26 or later) - `packaging `__ (24.1 or later) - `pandas `__ (2.2 or later) .. _optional-dependencies: Optional dependencies --------------------- .. note:: If you are using pip to install xarray, optional dependencies can be installed by specifying *extras*. :ref:`installation-instructions` for both pip and conda are given below. For netCDF and IO ~~~~~~~~~~~~~~~~~ - `netCDF4 `__: recommended if you want to use xarray for reading or writing netCDF files - `scipy `__: used as a fallback for reading/writing netCDF3 - `pydap `__: used as a fallback for accessing OPeNDAP - `h5netcdf `__: an alternative library for reading and writing netCDF4 files that does not use the netCDF-C libraries - `zarr `__: for chunked, compressed, N-dimensional arrays. - `cftime `__: recommended if you want to encode/decode datetimes for non-standard calendars or dates before year 1678 or after year 2262. - `iris `__: for conversion to and from iris' Cube objects For accelerating xarray ~~~~~~~~~~~~~~~~~~~~~~~ - `scipy `__: necessary to enable the interpolation features for xarray objects - `bottleneck `__: speeds up NaN-skipping and rolling window aggregations by a large factor - `numbagg `_: for exponential rolling window operations For parallel computing ~~~~~~~~~~~~~~~~~~~~~~ - `dask.array `__: required for :ref:`dask`. For plotting ~~~~~~~~~~~~ - `matplotlib `__: required for :ref:`plotting` - `cartopy `__: recommended for :ref:`plot-maps` - `seaborn `__: for better color palettes - `nc-time-axis `__: for plotting cftime.datetime objects Alternative data containers ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - `sparse `_: for sparse arrays - `pint `_: for units of measure - Any numpy-like objects that support `NEP-18 `_. Note that while such libraries theoretically should work, they are untested. Integration tests are in the process of being written for individual libraries. .. _mindeps_policy: Minimum dependency versions --------------------------- Xarray adopts a rolling policy regarding the minimum supported version of its dependencies: - **Python:** 30 months (`NEP-29 `_) - **numpy:** 18 months (`NEP-29 `_) - **all other libraries:** 12 months This means the latest minor (X.Y) version from N months prior. Patch versions (x.y.Z) are not pinned, and only the latest available at the moment of publishing the xarray release is guaranteed to work. You can see the actual minimum tested versions: ``_ .. _installation-instructions: Instructions ------------ Xarray itself is a pure Python package, but its dependencies are not. The easiest way to get everything installed is to use conda_. To install xarray with its recommended dependencies using the conda command line tool:: $ conda install -c conda-forge xarray dask netCDF4 bottleneck .. _conda: https://docs.conda.io If you require other :ref:`optional-dependencies` add them to the line above. We recommend using the community maintained `conda-forge `__ channel, as some of the dependencies are difficult to build. New releases may also appear in conda-forge before being updated in the default channel. If you don't use conda, be sure you have the required dependencies (numpy and pandas) installed first. Then, install xarray with pip:: $ python -m pip install xarray We also maintain other dependency sets for different subsets of functionality:: $ python -m pip install "xarray[io]" # Install optional dependencies for handling I/O $ python -m pip install "xarray[accel]" # Install optional dependencies for accelerating xarray $ python -m pip install "xarray[parallel]" # Install optional dependencies for dask arrays $ python -m pip install "xarray[viz]" # Install optional dependencies for visualization $ python -m pip install "xarray[complete]" # Install all the above The above commands should install most of the `optional dependencies`_. However, some packages which are either not listed on PyPI or require extra installation steps are excluded. To know which dependencies would be installed, take a look at the ``[project.optional-dependencies]`` section in ``pyproject.toml``: .. literalinclude:: ../../pyproject.toml :language: toml :start-at: [project.optional-dependencies] :end-before: [build-system] Development versions -------------------- To install the most recent development version, install from github:: $ python -m pip install git+https://github.com/pydata/xarray.git or from TestPyPI:: $ python -m pip install --index-url https://test.pypi.org/simple --extra-index-url https://pypi.org/simple --pre xarray Testing ------- To run the test suite after installing xarray, install (via pypi or conda) `py.test `__ and run ``pytest`` in the root directory of the xarray repository. Performance Monitoring ~~~~~~~~~~~~~~~~~~~~~~ .. TODO: uncomment once we have a working setup see https://github.com/pydata/xarray/pull/5066 A fixed-point performance monitoring of (a part of) our code can be seen on `this page `__. To run these benchmark tests in a local machine, first install - `airspeed-velocity `__: a tool for benchmarking Python packages over their lifetime. and run ``asv run # this will install some conda environments in ./.asv/envs`` xarray-2025.09.0/doc/getting-started-guide/quick-overview.rst000066400000000000000000000302711505620616400240260ustar00rootroot00000000000000############## Quick overview ############## Here are some quick examples of what you can do with :py:class:`xarray.DataArray` objects. Everything is explained in much more detail in the rest of the documentation. To begin, import numpy, pandas and xarray using their customary abbreviations: .. jupyter-execute:: import numpy as np import pandas as pd import xarray as xr Create a DataArray ------------------ You can make a DataArray from scratch by supplying data in the form of a numpy array or list, with optional *dimensions* and *coordinates*: .. jupyter-execute:: data = xr.DataArray(np.random.randn(2, 3), dims=("x", "y"), coords={"x": [10, 20]}) data In this case, we have generated a 2D array, assigned the names *x* and *y* to the two dimensions respectively and associated two *coordinate labels* '10' and '20' with the two locations along the x dimension. If you supply a pandas :py:class:`~pandas.Series` or :py:class:`~pandas.DataFrame`, metadata is copied directly: .. jupyter-execute:: xr.DataArray(pd.Series(range(3), index=list("abc"), name="foo")) Here are the key properties for a ``DataArray``: .. jupyter-execute:: # like in pandas, values is a numpy array that you can modify in-place data.values data.dims data.coords # you can use this dictionary to store arbitrary metadata data.attrs Indexing -------- Xarray supports four kinds of indexing. Since we have assigned coordinate labels to the x dimension we can use label-based indexing along that dimension just like pandas. The four examples below all yield the same result (the value at ``x=10``) but at varying levels of convenience and intuitiveness. .. jupyter-execute:: # positional and by integer label, like numpy data[0, :] # loc or "location": positional and coordinate label, like pandas data.loc[10] # isel or "integer select": by dimension name and integer label data.isel(x=0) # sel or "select": by dimension name and coordinate label data.sel(x=10) Unlike positional indexing, label-based indexing frees us from having to know how our array is organized. All we need to know are the dimension name and the label we wish to index i.e. ``data.sel(x=10)`` works regardless of whether ``x`` is the first or second dimension of the array and regardless of whether ``10`` is the first or second element of ``x``. We have already told xarray that x is the first dimension when we created ``data``: xarray keeps track of this so we don't have to. For more, see :ref:`indexing`. Attributes ---------- While you're setting up your DataArray, it's often a good idea to set metadata attributes. A useful choice is to set ``data.attrs['long_name']`` and ``data.attrs['units']`` since xarray will use these, if present, to automatically label your plots. These special names were chosen following the `NetCDF Climate and Forecast (CF) Metadata Conventions `_. ``attrs`` is just a Python dictionary, so you can assign anything you wish. .. jupyter-execute:: data.attrs["long_name"] = "random velocity" data.attrs["units"] = "metres/sec" data.attrs["description"] = "A random variable created as an example." data.attrs["random_attribute"] = 123 data.attrs # you can add metadata to coordinates too data.x.attrs["units"] = "x units" Computation ----------- Data arrays work very similarly to numpy ndarrays: .. jupyter-execute:: data + 10 np.sin(data) # transpose data.T data.sum() However, aggregation operations can use dimension names instead of axis numbers: .. jupyter-execute:: data.mean(dim="x") Arithmetic operations broadcast based on dimension name. This means you don't need to insert dummy dimensions for alignment: .. jupyter-execute:: a = xr.DataArray(np.random.randn(3), [data.coords["y"]]) b = xr.DataArray(np.random.randn(4), dims="z") a b a + b It also means that in most cases you do not need to worry about the order of dimensions: .. jupyter-execute:: data - data.T Operations also align based on index labels: .. jupyter-execute:: data[:-1] - data[:1] For more, see :ref:`compute`. GroupBy ------- Xarray supports grouped operations using a very similar API to pandas (see :ref:`groupby`): .. jupyter-execute:: labels = xr.DataArray(["E", "F", "E"], [data.coords["y"]], name="labels") labels data.groupby(labels).mean("y") data.groupby(labels).map(lambda x: x - x.min()) Plotting -------- Visualizing your datasets is quick and convenient: .. jupyter-execute:: data.plot() Note the automatic labeling with names and units. Our effort in adding metadata attributes has paid off! Many aspects of these figures are customizable: see :ref:`plotting`. pandas ------ Xarray objects can be easily converted to and from pandas objects using the :py:meth:`~xarray.DataArray.to_series`, :py:meth:`~xarray.DataArray.to_dataframe` and :py:meth:`~pandas.DataFrame.to_xarray` methods: .. jupyter-execute:: series = data.to_series() series # convert back series.to_xarray() Datasets -------- :py:class:`xarray.Dataset` is a dict-like container of aligned ``DataArray`` objects. You can think of it as a multi-dimensional generalization of the :py:class:`pandas.DataFrame`: .. jupyter-execute:: ds = xr.Dataset(dict(foo=data, bar=("x", [1, 2]), baz=np.pi)) ds This creates a dataset with three DataArrays named ``foo``, ``bar`` and ``baz``. Use dictionary or dot indexing to pull out ``Dataset`` variables as ``DataArray`` objects but note that assignment only works with dictionary indexing: .. jupyter-execute:: ds["foo"] ds.foo When creating ``ds``, we specified that ``foo`` is identical to ``data`` created earlier, ``bar`` is one-dimensional with single dimension ``x`` and associated values '1' and '2', and ``baz`` is a scalar not associated with any dimension in ``ds``. Variables in datasets can have different ``dtype`` and even different dimensions, but all dimensions are assumed to refer to points in the same shared coordinate system i.e. if two variables have dimension ``x``, that dimension must be identical in both variables. For example, when creating ``ds`` xarray automatically *aligns* ``bar`` with ``DataArray`` ``foo``, i.e., they share the same coordinate system so that ``ds.bar['x'] == ds.foo['x'] == ds['x']``. Consequently, the following works without explicitly specifying the coordinate ``x`` when creating ``ds['bar']``: .. jupyter-execute:: ds.bar.sel(x=10) You can do almost everything you can do with ``DataArray`` objects with ``Dataset`` objects (including indexing and arithmetic) if you prefer to work with multiple variables at once. Read & write netCDF files ------------------------- NetCDF is the recommended file format for xarray objects. Users from the geosciences will recognize that the :py:class:`~xarray.Dataset` data model looks very similar to a netCDF file (which, in fact, inspired it). You can directly read and write xarray objects to disk using :py:meth:`~xarray.Dataset.to_netcdf`, :py:func:`~xarray.open_dataset` and :py:func:`~xarray.open_dataarray`: .. jupyter-execute:: ds.to_netcdf("example.nc") reopened = xr.open_dataset("example.nc") reopened .. jupyter-execute:: :hide-code: import os reopened.close() os.remove("example.nc") It is common for datasets to be distributed across multiple files (commonly one file per timestep). Xarray supports this use-case by providing the :py:meth:`~xarray.open_mfdataset` and the :py:meth:`~xarray.save_mfdataset` methods. For more, see :ref:`io`. .. _quick-overview-datatrees: DataTrees --------- :py:class:`xarray.DataTree` is a tree-like container of :py:class:`~xarray.DataArray` objects, organised into multiple mutually alignable groups. You can think of it like a (recursive) ``dict`` of :py:class:`~xarray.Dataset` objects, where coordinate variables and their indexes are inherited down to children. Let's first make some example xarray datasets: .. jupyter-execute:: import numpy as np import xarray as xr data = xr.DataArray(np.random.randn(2, 3), dims=("x", "y"), coords={"x": [10, 20]}) ds = xr.Dataset({"foo": data, "bar": ("x", [1, 2]), "baz": np.pi}) ds ds2 = ds.interp(coords={"x": [10, 12, 14, 16, 18, 20]}) ds2 ds3 = xr.Dataset( {"people": ["alice", "bob"], "heights": ("people", [1.57, 1.82])}, coords={"species": "human"}, ) ds3 Now we'll put these datasets into a hierarchical DataTree: .. jupyter-execute:: dt = xr.DataTree.from_dict( {"simulation/coarse": ds, "simulation/fine": ds2, "/": ds3} ) dt This created a DataTree with nested groups. We have one root group, containing information about individual people. This root group can be named, but here it is unnamed, and is referenced with ``"/"``. This structure is similar to a unix-like filesystem. The root group then has one subgroup ``simulation``, which contains no data itself but does contain another two subgroups, named ``fine`` and ``coarse``. The (sub)subgroups ``fine`` and ``coarse`` contain two very similar datasets. They both have an ``"x"`` dimension, but the dimension is of different lengths in each group, which makes the data in each group unalignable. In the root group we placed some completely unrelated information, in order to show how a tree can store heterogeneous data. Remember to keep unalignable dimensions in sibling groups because a DataTree inherits coordinates down through its child nodes. You can see this inheritance in the above representation of the DataTree. The coordinates ``people`` and ``species`` defined in the root ``/`` node are shown in the child nodes both ``/simulation/coarse`` and ``/simulation/fine``. All coordinates in parent-descendent lineage must be alignable to form a DataTree. If your input data is not aligned, you can still get a nested ``dict`` of :py:class:`~xarray.Dataset` objects with :py:func:`~xarray.open_groups` and then apply any required changes to ensure alignment before converting to a :py:class:`~xarray.DataTree`. The constraints on each group are the same as the constraint on DataArrays within a single dataset with the addition of requiring parent-descendent coordinate agreement. We created the subgroups using a filesystem-like syntax, and accessing groups works the same way. We can access individual DataArrays in a similar fashion. .. jupyter-execute:: dt["simulation/coarse/foo"] We can also view the data in a particular group as a read-only :py:class:`~xarray.Datatree.DatasetView` using :py:attr:`xarray.Datatree.dataset`: .. jupyter-execute:: dt["simulation/coarse"].dataset We can get a copy of the :py:class:`~xarray.Dataset` including the inherited coordinates by calling the :py:class:`~xarray.datatree.to_dataset` method: .. jupyter-execute:: ds_inherited = dt["simulation/coarse"].to_dataset() ds_inherited And you can get a copy of just the node local values of :py:class:`~xarray.Dataset` by setting the ``inherit`` keyword to ``False``: .. jupyter-execute:: ds_node_local = dt["simulation/coarse"].to_dataset(inherit=False) ds_node_local .. note:: We intend to eventually implement most :py:class:`~xarray.Dataset` methods (indexing, aggregation, arithmetic, etc) on :py:class:`~xarray.DataTree` objects, but many methods have not been implemented yet. .. Operations map over subtrees, so we can take a mean over the ``x`` dimension of both the ``fine`` and ``coarse`` groups just by: .. .. jupyter-execute:: .. avg = dt["simulation"].mean(dim="x") .. avg .. Here the ``"x"`` dimension used is always the one local to that subgroup. .. You can do almost everything you can do with :py:class:`~xarray.Dataset` objects with :py:class:`~xarray.DataTree` objects .. (including indexing and arithmetic), as operations will be mapped over every subgroup in the tree. .. This allows you to work with multiple groups of non-alignable variables at once. .. tip:: If all of your variables are mutually alignable (i.e., they live on the same grid, such that every common dimension name maps to the same length), then you probably don't need :py:class:`xarray.DataTree`, and should consider just sticking with :py:class:`xarray.Dataset`. xarray-2025.09.0/doc/getting-started-guide/tutorials-and-videos.rst000066400000000000000000000025201505620616400251170ustar00rootroot00000000000000 Tutorials and Videos ==================== There are an abundance of tutorials and videos available for learning how to use *xarray*. Often, these tutorials are taught to workshop attendees at conferences or other events. We highlight a number of these resources below, but this is by no means an exhaustive list! Tutorials ---------- - `Xarray's Tutorials`_ repository - The `UW eScience Institute's Geohackweek`_ tutorial on xarray for geospatial data scientists. - `Nicolas Fauchereau's 2015 tutorial`_ on xarray for netCDF users. Videos ------- .. include:: ../videos-gallery.txt Books, Chapters and Articles ----------------------------- - Stephan Hoyer and Joe Hamman's `Journal of Open Research Software paper`_ describing the xarray project. .. _Xarray's Tutorials: https://xarray-contrib.github.io/xarray-tutorial/ .. _Journal of Open Research Software paper: https://doi.org/10.5334/jors.148 .. _UW eScience Institute's Geohackweek : https://geohackweek.github.io/nDarrays/ .. _tutorial: https://github.com/Unidata/unidata-users-workshop/blob/master/notebooks/xray-tutorial.ipynb .. _with answers: https://github.com/Unidata/unidata-users-workshop/blob/master/notebooks/xray-tutorial-with-answers.ipynb .. _Nicolas Fauchereau's 2015 tutorial: https://nbviewer.iPython.org/github/nicolasfauchereau/metocean/blob/master/notebooks/xray.ipynb xarray-2025.09.0/doc/getting-started-guide/why-xarray.rst000066400000000000000000000133371505620616400231650ustar00rootroot00000000000000Overview: Why xarray? ===================== Xarray introduces labels in the form of dimensions, coordinates and attributes on top of raw NumPy-like multidimensional arrays, which allows for a more intuitive, more concise, and less error-prone developer experience. What labels enable ------------------ Multi-dimensional (a.k.a. N-dimensional, ND) arrays (sometimes called "tensors") are an essential part of computational science. They are encountered in a wide range of fields, including physics, astronomy, geoscience, bioinformatics, engineering, finance, and deep learning. In Python, NumPy_ provides the fundamental data structure and API for working with raw ND arrays. However, real-world datasets are usually more than just raw numbers; they have labels which encode information about how the array values map to locations in space, time, etc. Xarray doesn't just keep track of labels on arrays -- it uses them to provide a powerful and concise interface. For example: - Apply operations over dimensions by name: ``x.sum('time')``. - Select values by label (or logical location) instead of integer location: ``x.loc['2014-01-01']`` or ``x.sel(time='2014-01-01')``. - Mathematical operations (e.g., ``x - y``) vectorize across multiple dimensions (array broadcasting) based on dimension names, not shape. - Easily use the `split-apply-combine `_ paradigm with ``groupby``: ``x.groupby('time.dayofyear').mean()``. - Database-like alignment based on coordinate labels that smoothly handles missing values: ``x, y = xr.align(x, y, join='outer')``. - Keep track of arbitrary metadata in the form of a Python dictionary: ``x.attrs``. The N-dimensional nature of xarray's data structures makes it suitable for dealing with multi-dimensional scientific data, and its use of dimension names instead of axis labels (``dim='time'`` instead of ``axis=0``) makes such arrays much more manageable than the raw numpy ndarray: with xarray, you don't need to keep track of the order of an array's dimensions or insert dummy dimensions of size 1 to align arrays (e.g., using ``np.newaxis``). The immediate payoff of using xarray is that you'll write less code. The long-term payoff is that you'll understand what you were thinking when you come back to look at it weeks or months later. Core data structures -------------------- Xarray has two core data structures, which build upon and extend the core strengths of NumPy_ and pandas_. Both data structures are fundamentally N-dimensional: - :py:class:`~xarray.DataArray` is our implementation of a labeled, N-dimensional array. It is an N-D generalization of a :py:class:`pandas.Series`. The name ``DataArray`` itself is borrowed from Fernando Perez's datarray_ project, which prototyped a similar data structure. - :py:class:`~xarray.Dataset` is a multi-dimensional, in-memory array database. It is a dict-like container of ``DataArray`` objects aligned along any number of shared dimensions, and serves a similar purpose in xarray to the :py:class:`pandas.DataFrame`. The value of attaching labels to numpy's :py:class:`numpy.ndarray` may be fairly obvious, but the dataset may need more motivation. The power of the dataset over a plain dictionary is that, in addition to pulling out arrays by name, it is possible to select or combine data along a dimension across all arrays simultaneously. Like a :py:class:`~pandas.DataFrame`, datasets facilitate array operations with heterogeneous data -- the difference is that the arrays in a dataset can have not only different data types, but also different numbers of dimensions. This data model is borrowed from the netCDF_ file format, which also provides xarray with a natural and portable serialization format. NetCDF is very popular in the geosciences, and there are existing libraries for reading and writing netCDF in many programming languages, including Python. Xarray distinguishes itself from many tools for working with netCDF data in-so-far as it provides data structures for in-memory analytics that both utilize and preserve labels. You only need to do the tedious work of adding metadata once, not every time you save a file. Goals and aspirations --------------------- Xarray contributes domain-agnostic data-structures and tools for labeled multi-dimensional arrays to Python's SciPy_ ecosystem for numerical computing. In particular, xarray builds upon and integrates with NumPy_ and pandas_: - Our user-facing interfaces aim to be more explicit versions of those found in NumPy/pandas. - Compatibility with the broader ecosystem is a major goal: it should be easy to get your data in and out. - We try to keep a tight focus on functionality and interfaces related to labeled data, and leverage other Python libraries for everything else, e.g., NumPy/pandas for fast arrays/indexing (xarray itself contains no compiled code), Dask_ for parallel computing, matplotlib_ for plotting, etc. Xarray is a collaborative and community driven project, run entirely on volunteer effort (see :ref:`contributing`). Our target audience is anyone who needs N-dimensional labeled arrays in Python. Originally, development was driven by the data analysis needs of physical scientists (especially geoscientists who already know and love netCDF_), but it has become a much more broadly useful tool, and is still under active development. See our technical :ref:`roadmap` for more details, and feel free to reach out with questions about whether xarray is the right tool for your needs. .. _datarray: https://github.com/BIDS/datarray .. _Dask: https://www.dask.org .. _matplotlib: https://matplotlib.org .. _netCDF: https://www.unidata.ucar.edu/software/netcdf .. _NumPy: https://numpy.org .. _pandas: https://pandas.pydata.org .. _SciPy: https://www.scipy.org xarray-2025.09.0/doc/index.rst000066400000000000000000000044351505620616400157600ustar00rootroot00000000000000:html_theme.sidebar_secondary.remove: true .. module:: xarray Xarray documentation ==================== Xarray makes working with labelled multi-dimensional arrays in Python simple, efficient, and fun! **Version**: |version| - :ref:`whats-new` **Useful links**: `Home `__ | `Code Repository `__ | `Issues `__ | `Discussions `__ | `Releases `__ | `Tutorial `__ | `Stack Overflow `__ | `Blog `__ | .. grid:: 1 1 2 2 :gutter: 2 .. grid-item-card:: Get started! :img-top: _static/index_getting_started.svg :class-card: intro-card :link: getting-started-guide/index :link-type: doc *New to Xarray?* Start here with our installation instructions and a brief overview of Xarray. .. grid-item-card:: User guide :img-top: _static/index_user_guide.svg :class-card: intro-card :link: user-guide/index :link-type: doc *Ready to deepen your understanding of Xarray?* Visit the user guide for detailed explanations of the data model, common computational patterns, and more. .. grid-item-card:: API reference :img-top: _static/index_api.svg :class-card: intro-card :link: api :link-type: doc *Need to learn more about a specific Xarray function?* Go here to review the documentation of all public functions and classes in Xarray. .. grid-item-card:: Contribute :img-top: _static/index_contribute.svg :class-card: intro-card :link: contribute/contributing :link-type: doc *Saw a typo in the documentation? Want to improve existing functionalities?* Please review our guide on improving Xarray. .. toctree:: :maxdepth: 2 :hidden: :caption: For users Get Started User Guide Tutorial Gallery API Reference Get Help Development Release Notes xarray-2025.09.0/doc/internals/000077500000000000000000000000001505620616400161105ustar00rootroot00000000000000xarray-2025.09.0/doc/internals/chunked-arrays.rst000066400000000000000000000137551505620616400215750ustar00rootroot00000000000000.. currentmodule:: xarray .. _internals.chunkedarrays: Alternative chunked array types =============================== .. warning:: This is a *highly* experimental feature. Please report any bugs or other difficulties on `xarray's issue tracker `_. In particular see discussion on `xarray issue #6807 `_ Xarray can wrap chunked dask arrays (see :ref:`dask`), but can also wrap any other chunked array type that exposes the correct interface. This allows us to support using other frameworks for distributed and out-of-core processing, with user code still written as xarray commands. In particular xarray also supports wrapping :py:class:`cubed.Array` objects (see `Cubed's documentation `_ and the `cubed-xarray package `_). The basic idea is that by wrapping an array that has an explicit notion of ``.chunks``, xarray can expose control over the choice of chunking scheme to users via methods like :py:meth:`DataArray.chunk` whilst the wrapped array actually implements the handling of processing all of the chunks. Chunked array methods and "core operations" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A chunked array needs to meet all the :ref:`requirements for normal duck arrays `, but must also implement additional features. Chunked arrays have additional attributes and methods, such as ``.chunks`` and ``.rechunk``. Furthermore, Xarray dispatches chunk-aware computations across one or more chunked arrays using special functions known as "core operations". Examples include ``map_blocks``, ``blockwise``, and ``apply_gufunc``. The core operations are generalizations of functions first implemented in :py:mod:`dask.array`. The implementation of these functions is specific to the type of arrays passed to them. For example, when applying the ``map_blocks`` core operation, :py:class:`dask.array.Array` objects must be processed by :py:func:`dask.array.map_blocks`, whereas :py:class:`cubed.Array` objects must be processed by :py:func:`cubed.map_blocks`. In order to use the correct implementation of a core operation for the array type encountered, xarray dispatches to the corresponding subclass of :py:class:`~xarray.namedarray.parallelcompat.ChunkManagerEntrypoint`, also known as a "Chunk Manager". Therefore **a full list of the operations that need to be defined is set by the API of the** :py:class:`~xarray.namedarray.parallelcompat.ChunkManagerEntrypoint` **abstract base class**. Note that chunked array methods are also currently dispatched using this class. Chunked array creation is also handled by this class. As chunked array objects have a one-to-one correspondence with in-memory numpy arrays, it should be possible to create a chunked array from a numpy array by passing the desired chunking pattern to an implementation of :py:class:`~xarray.namedarray.parallelcompat.ChunkManagerEntrypoint.from_array``. .. note:: The :py:class:`~xarray.namedarray.parallelcompat.ChunkManagerEntrypoint` abstract base class is mostly just acting as a namespace for containing the chunked-aware function primitives. Ideally in the future we would have an API standard for chunked array types which codified this structure, making the entrypoint system unnecessary. .. currentmodule:: xarray.namedarray.parallelcompat .. autoclass:: xarray.namedarray.parallelcompat.ChunkManagerEntrypoint :members: Registering a new ChunkManagerEntrypoint subclass ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Rather than hard-coding various chunk managers to deal with specific chunked array implementations, xarray uses an entrypoint system to allow developers of new chunked array implementations to register their corresponding subclass of :py:class:`~xarray.namedarray.parallelcompat.ChunkManagerEntrypoint`. To register a new entrypoint you need to add an entry to the ``setup.cfg`` like this:: [options.entry_points] xarray.chunkmanagers = dask = xarray.namedarray.daskmanager:DaskManager See also `cubed-xarray `_ for another example. To check that the entrypoint has worked correctly, you may find it useful to display the available chunkmanagers using the internal function :py:func:`~xarray.namedarray.parallelcompat.list_chunkmanagers`. .. autofunction:: list_chunkmanagers User interface ~~~~~~~~~~~~~~ Once the chunkmanager subclass has been registered, xarray objects wrapping the desired array type can be created in 3 ways: #. By manually passing the array type to the :py:class:`~xarray.DataArray` constructor, see the examples for :ref:`numpy-like arrays `, #. Calling :py:meth:`~xarray.DataArray.chunk`, passing the keyword arguments ``chunked_array_type`` and ``from_array_kwargs``, #. Calling :py:func:`~xarray.open_dataset`, passing the keyword arguments ``chunked_array_type`` and ``from_array_kwargs``. The latter two methods ultimately call the chunkmanager's implementation of ``.from_array``, to which they pass the ``from_array_kwargs`` dict. The ``chunked_array_type`` kwarg selects which registered chunkmanager subclass to dispatch to. It defaults to ``'dask'`` if Dask is installed, otherwise it defaults to whichever chunkmanager is registered if only one is registered. If multiple chunkmanagers are registered, the ``chunk_manager`` configuration option (which can be set using :py:func:`set_options`) will be used to determine which chunkmanager to use, defaulting to ``'dask'``. Parallel processing without chunks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To use a parallel array type that does not expose a concept of chunks explicitly, none of the information on this page is theoretically required. Such an array type (e.g. `Ramba `_ or `Arkouda `_) could be wrapped using xarray's existing support for :ref:`numpy-like "duck" arrays `. xarray-2025.09.0/doc/internals/duck-arrays-integration.rst000066400000000000000000000065641505620616400234230ustar00rootroot00000000000000 .. _internals.duckarrays: Integrating with duck arrays ============================= .. warning:: This is an experimental feature. Please report any bugs or other difficulties on `xarray's issue tracker `_. Xarray can wrap custom numpy-like arrays (":term:`duck array`\s") - see the :ref:`user guide documentation `. This page is intended for developers who are interested in wrapping a new custom array type with xarray. .. _internals.duckarrays.requirements: Duck array requirements ~~~~~~~~~~~~~~~~~~~~~~~ Xarray does not explicitly check that required methods are defined by the underlying duck array object before attempting to wrap the given array. However, a wrapped array type should at a minimum define these attributes: * ``shape`` property, * ``dtype`` property, * ``ndim`` property, * ``__array__`` method, * ``__array_ufunc__`` method, * ``__array_function__`` method. These need to be defined consistently with :py:class:`numpy.ndarray`, for example the array ``shape`` property needs to obey `numpy's broadcasting rules `_ (see also the `Python Array API standard's explanation `_ of these same rules). .. _internals.duckarrays.array_api_standard: Python Array API standard support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As an integration library xarray benefits greatly from the standardization of duck-array libraries' APIs, and so is a big supporter of the `Python Array API Standard `_. We aim to support any array libraries that follow the Array API standard out-of-the-box. However, xarray does occasionally call some numpy functions which are not (yet) part of the standard (e.g. :py:meth:`xarray.DataArray.pad` calls :py:func:`numpy.pad`). See `xarray issue #7848 `_ for a list of such functions. We can still support dispatching on these functions through the array protocols above, it just means that if you exclusively implement the methods in the Python Array API standard then some features in xarray will not work. Custom inline reprs ~~~~~~~~~~~~~~~~~~~ In certain situations (e.g. when printing the collapsed preview of variables of a ``Dataset``), xarray will display the repr of a :term:`duck array` in a single line, truncating it to a certain number of characters. If that would drop too much information, the :term:`duck array` may define a ``_repr_inline_`` method that takes ``max_width`` (number of characters) as an argument .. code:: python class MyDuckArray: ... def _repr_inline_(self, max_width): """format to a single line with at most max_width characters""" ... ... To avoid duplicated information, this method must omit information about the shape and :term:`dtype`. For example, the string representation of a ``dask`` array or a ``sparse`` matrix would be: .. jupyter-execute:: import dask.array as da import xarray as xr import numpy as np import sparse .. jupyter-execute:: a = da.linspace(0, 1, 20, chunks=2) a .. jupyter-execute:: b = np.eye(10) b[[5, 7, 3, 0], [6, 8, 2, 9]] = 2 b = sparse.COO.from_numpy(b) b .. jupyter-execute:: xr.Dataset(dict(a=("x", a), b=(("y", "z"), b))) xarray-2025.09.0/doc/internals/extending-xarray.rst000066400000000000000000000106361505620616400221410ustar00rootroot00000000000000 .. _internals.accessors: Extending xarray using accessors ================================ .. jupyter-execute:: :hide-code: import xarray as xr import numpy as np Xarray is designed as a general purpose library and hence tries to avoid including overly domain specific functionality. But inevitably, the need for more domain specific logic arises. .. _internals.accessors.composition: Composition over Inheritance ---------------------------- One potential solution to this problem is to subclass Dataset and/or DataArray to add domain specific functionality. However, inheritance is not very robust. It's easy to inadvertently use internal APIs when subclassing, which means that your code may break when xarray upgrades. Furthermore, many builtin methods will only return native xarray objects. The standard advice is to use :issue:`composition over inheritance <706>`, but reimplementing an API as large as xarray's on your own objects can be an onerous task, even if most methods are only forwarding to xarray implementations. (For an example of a project which took this approach of subclassing see `UXarray `_). If you simply want the ability to call a function with the syntax of a method call, then the builtin :py:meth:`~xarray.DataArray.pipe` method (copied from pandas) may suffice. .. _internals.accessors.writing accessors: Writing Custom Accessors ------------------------ To resolve this issue for more complex cases, xarray has the :py:func:`~xarray.register_dataset_accessor`, :py:func:`~xarray.register_dataarray_accessor` and :py:func:`~xarray.register_datatree_accessor` decorators for adding custom "accessors" on xarray objects, thereby "extending" the functionality of your xarray object. Here's how you might use these decorators to write a custom "geo" accessor implementing a geography specific extension to xarray: .. literalinclude:: ../examples/_code/accessor_example.py In general, the only restriction on the accessor class is that the ``__init__`` method must have a single parameter: the ``Dataset`` or ``DataArray`` object it is supposed to work on. This achieves the same result as if the ``Dataset`` class had a cached property defined that returns an instance of your class: .. code-block:: python class Dataset: ... @property def geo(self): return GeoAccessor(self) However, using the register accessor decorators is preferable to simply adding your own ad-hoc property (i.e., ``Dataset.geo = property(...)``), for several reasons: 1. It ensures that the name of your property does not accidentally conflict with any other attributes or methods (including other accessors). 2. Instances of accessor object will be cached on the xarray object that creates them. This means you can save state on them (e.g., to cache computed properties). 3. Using an accessor provides an implicit namespace for your custom functionality that clearly identifies it as separate from built-in xarray methods. .. note:: Accessors are created once per DataArray and Dataset instance. New instances, like those created from arithmetic operations or when accessing a DataArray from a Dataset (ex. ``ds[var_name]``), will have new accessors created. Back in an interactive IPython session, we can use these properties: .. jupyter-execute:: :hide-code: exec(open("examples/_code/accessor_example.py").read()) .. jupyter-execute:: ds = xr.Dataset({"longitude": np.linspace(0, 10), "latitude": np.linspace(0, 20)}) ds.geo.center .. jupyter-execute:: ds.geo.plot() The intent here is that libraries that extend xarray could add such an accessor to implement subclass specific functionality rather than using actual subclasses or patching in a large number of domain specific methods. For further reading on ways to write new accessors and the philosophy behind the approach, see https://github.com/pydata/xarray/issues/1080. To help users keep things straight, please `let us know `_ if you plan to write a new accessor for an open source library. Existing open source accessors and the libraries that implement them are available in the list on the :ref:`ecosystem` page. To make documenting accessors with ``sphinx`` and ``sphinx.ext.autosummary`` easier, you can use `sphinx-autosummary-accessors`_. .. _sphinx-autosummary-accessors: https://sphinx-autosummary-accessors.readthedocs.io/ xarray-2025.09.0/doc/internals/how-to-add-new-backend.rst000066400000000000000000000451171505620616400227710ustar00rootroot00000000000000.. _add_a_backend: How to add a new backend ------------------------ Adding a new backend for read support to Xarray does not require one to integrate any code in Xarray; all you need to do is: - Create a class that inherits from Xarray :py:class:`~xarray.backends.BackendEntrypoint` and implements the method ``open_dataset`` see :ref:`RST backend_entrypoint` - Declare this class as an external plugin in your project configuration, see :ref:`RST backend_registration` If you also want to support lazy loading and dask see :ref:`RST lazy_loading`. Note that the new interface for backends is available from Xarray version >= 0.18 onwards. You can see what backends are currently available in your working environment with :py:class:`~xarray.backends.list_engines()`. .. _RST backend_entrypoint: BackendEntrypoint subclassing +++++++++++++++++++++++++++++ Your ``BackendEntrypoint`` sub-class is the primary interface with Xarray, and it should implement the following attributes and methods: - the ``open_dataset`` method (mandatory) - the ``open_dataset_parameters`` attribute (optional) - the ``guess_can_open`` method (optional) - the ``description`` attribute (optional) - the ``url`` attribute (optional). This is what a ``BackendEntrypoint`` subclass should look like: .. code-block:: python from xarray.backends import BackendEntrypoint class MyBackendEntrypoint(BackendEntrypoint): def open_dataset( self, filename_or_obj, *, drop_variables=None, # other backend specific keyword arguments # `chunks` and `cache` DO NOT go here, they are handled by xarray ): return my_open_dataset(filename_or_obj, drop_variables=drop_variables) open_dataset_parameters = ["filename_or_obj", "drop_variables"] def guess_can_open(self, filename_or_obj): try: _, ext = os.path.splitext(filename_or_obj) except TypeError: return False return ext in {".my_format", ".my_fmt"} description = "Use .my_format files in Xarray" url = "https://link_to/your_backend/documentation" ``BackendEntrypoint`` subclass methods and attributes are detailed in the following. .. _RST open_dataset: open_dataset ^^^^^^^^^^^^ The backend ``open_dataset`` shall implement reading from file, the variables decoding and it shall instantiate the output Xarray class :py:class:`~xarray.Dataset`. The following is an example of the high level processing steps: .. code-block:: python def open_dataset( self, filename_or_obj, *, drop_variables=None, decode_times=True, decode_timedelta=True, decode_coords=True, my_backend_option=None, ): vars, attrs, coords = my_reader( filename_or_obj, drop_variables=drop_variables, my_backend_option=my_backend_option, ) vars, attrs, coords = my_decode_variables( vars, attrs, decode_times, decode_timedelta, decode_coords ) # see also conventions.decode_cf_variables ds = xr.Dataset(vars, attrs=attrs, coords=coords) ds.set_close(my_close_method) return ds The output :py:class:`~xarray.Dataset` shall implement the additional custom method ``close``, used by Xarray to ensure the related files are eventually closed. This method shall be set by using :py:meth:`~xarray.Dataset.set_close`. The input of ``open_dataset`` method are one argument (``filename_or_obj``) and one keyword argument (``drop_variables``): - ``filename_or_obj``: can be any object but usually it is a string containing a path or an instance of :py:class:`pathlib.Path`. - ``drop_variables``: can be ``None`` or an iterable containing the variable names to be dropped when reading the data. If it makes sense for your backend, your ``open_dataset`` method should implement in its interface the following boolean keyword arguments, called **decoders**, which default to ``None``: - ``mask_and_scale`` - ``decode_times`` - ``decode_timedelta`` - ``use_cftime`` - ``concat_characters`` - ``decode_coords`` Note: all the supported decoders shall be declared explicitly in backend ``open_dataset`` signature and adding a ``**kwargs`` is not allowed. These keyword arguments are explicitly defined in Xarray :py:func:`~xarray.open_dataset` signature. Xarray will pass them to the backend only if the User explicitly sets a value different from ``None``. For more details on decoders see :ref:`RST decoders`. Your backend can also take as input a set of backend-specific keyword arguments. All these keyword arguments can be passed to :py:func:`~xarray.open_dataset` grouped either via the ``backend_kwargs`` parameter or explicitly using the syntax ``**kwargs``. If you don't want to support the lazy loading, then the :py:class:`~xarray.Dataset` shall contain values as a :py:class:`numpy.ndarray` and your work is almost done. .. _RST open_dataset_parameters: open_dataset_parameters ^^^^^^^^^^^^^^^^^^^^^^^ ``open_dataset_parameters`` is the list of backend ``open_dataset`` parameters. It is not a mandatory parameter, and if the backend does not provide it explicitly, Xarray creates a list of them automatically by inspecting the backend signature. If ``open_dataset_parameters`` is not defined, but ``**kwargs`` and ``*args`` are in the backend ``open_dataset`` signature, Xarray raises an error. On the other hand, if the backend provides the ``open_dataset_parameters``, then ``**kwargs`` and ``*args`` can be used in the signature. However, this practice is discouraged unless there is a good reasons for using ``**kwargs`` or ``*args``. .. _RST guess_can_open: guess_can_open ^^^^^^^^^^^^^^ ``guess_can_open`` is used to identify the proper engine to open your data file automatically in case the engine is not specified explicitly. If you are not interested in supporting this feature, you can skip this step since :py:class:`~xarray.backends.BackendEntrypoint` already provides a default :py:meth:`~xarray.backends.BackendEntrypoint.guess_can_open` that always returns ``False``. Backend ``guess_can_open`` takes as input the ``filename_or_obj`` parameter of Xarray :py:meth:`~xarray.open_dataset`, and returns a boolean. .. _RST properties: description and url ^^^^^^^^^^^^^^^^^^^^ ``description`` is used to provide a short text description of the backend. ``url`` is used to include a link to the backend's documentation or code. These attributes are surfaced when a user prints :py:class:`~xarray.backends.BackendEntrypoint`. If ``description`` or ``url`` are not defined, an empty string is returned. .. _RST decoders: Decoders ^^^^^^^^ The decoders implement specific operations to transform data from on-disk representation to Xarray representation. A classic example is the β€œtime” variable decoding operation. In NetCDF, the elements of the β€œtime” variable are stored as integers, and the unit contains an origin (for example: "seconds since 1970-1-1"). In this case, Xarray transforms the pair integer-unit in a :py:class:`numpy.datetime64`. The standard coders implemented in Xarray are: - :py:class:`xarray.coding.strings.CharacterArrayCoder()` - :py:class:`xarray.coding.strings.EncodedStringCoder()` - :py:class:`xarray.coding.variables.UnsignedIntegerCoder()` - :py:class:`xarray.coding.variables.CFMaskCoder()` - :py:class:`xarray.coding.variables.CFScaleOffsetCoder()` - :py:class:`xarray.coding.times.CFTimedeltaCoder()` - :py:class:`xarray.coding.times.CFDatetimeCoder()` Xarray coders all have the same interface. They have two methods: ``decode`` and ``encode``. The method ``decode`` takes a ``Variable`` in on-disk format and returns a ``Variable`` in Xarray format. Variable attributes no more applicable after the decoding, are dropped and stored in the ``Variable.encoding`` to make them available to the ``encode`` method, which performs the inverse transformation. In the following an example on how to use the coders ``decode`` method: .. jupyter-execute:: :hide-code: import xarray as xr import numpy as np .. jupyter-execute:: var = xr.Variable( dims=("x",), data=np.arange(10.0), attrs={"scale_factor": 10, "add_offset": 2} ) var .. jupyter-execute:: coder = xr.coding.variables.CFScaleOffsetCoder() decoded_var = coder.decode(var) decoded_var .. jupyter-execute:: decoded_var.encoding Some of the transformations can be common to more backends, so before implementing a new decoder, be sure Xarray does not already implement that one. The backends can reuse Xarray’s decoders, either instantiating the coders and using the method ``decode`` directly or using the higher-level function :py:func:`~xarray.conventions.decode_cf_variables` that groups Xarray decoders. In some cases, the transformation to apply strongly depends on the on-disk data format. Therefore, you may need to implement your own decoder. An example of such a case is when you have to deal with the time format of a grib file. grib format is very different from the NetCDF one: in grib, the time is stored in two attributes dataDate and dataTime as strings. Therefore, it is not possible to reuse the Xarray time decoder, and implementing a new one is mandatory. Decoders can be activated or deactivated using the boolean keywords of Xarray :py:meth:`~xarray.open_dataset` signature: ``mask_and_scale``, ``decode_times``, ``decode_timedelta``, ``use_cftime``, ``concat_characters``, ``decode_coords``. Such keywords are passed to the backend only if the User sets a value different from ``None``. Note that the backend does not necessarily have to implement all the decoders, but it shall declare in its ``open_dataset`` interface only the boolean keywords related to the supported decoders. .. _RST backend_registration: How to register a backend +++++++++++++++++++++++++ Define a new entrypoint in your ``pyproject.toml`` (or ``setup.cfg/setup.py`` for older configurations), with: - group: ``xarray.backends`` - name: the name to be passed to :py:meth:`~xarray.open_dataset` as ``engine`` - object reference: the reference of the class that you have implemented. You can declare the entrypoint in your project configuration like so: .. tab:: pyproject.toml .. code:: toml [project.entry-points."xarray.backends"] my_engine = "my_package.my_module:MyBackendEntrypoint" .. tab:: pyproject.toml [Poetry] .. code-block:: toml [tool.poetry.plugins."xarray.backends"] my_engine = "my_package.my_module:MyBackendEntrypoint" .. tab:: setup.cfg .. code-block:: cfg [options.entry_points] xarray.backends = my_engine = my_package.my_module:MyBackendEntrypoint .. tab:: setup.py .. code-block:: setuptools.setup( entry_points={ "xarray.backends": [ "my_engine=my_package.my_module:MyBackendEntrypoint" ], }, ) See the `Python Packaging User Guide `_ for more information on entrypoints and details of the syntax. If you're using Poetry, note that table name in ``pyproject.toml`` is slightly different. See `the Poetry docs `_ for more information on plugins. .. _RST lazy_loading: How to support lazy loading +++++++++++++++++++++++++++ If you want to make your backend effective with big datasets, then you should take advantage of xarray's support for lazy loading and indexing. Basically, when your backend constructs the ``Variable`` objects, you need to replace the :py:class:`numpy.ndarray` inside the variables with a custom :py:class:`~xarray.backends.BackendArray` subclass that supports lazy loading and indexing. See the example below: .. code-block:: python backend_array = MyBackendArray() data = indexing.LazilyIndexedArray(backend_array) var = xr.Variable(dims, data, attrs=attrs, encoding=encoding) Where: - :py:class:`~xarray.core.indexing.LazilyIndexedArray` is a wrapper class provided by Xarray that manages the lazy loading and indexing. - ``MyBackendArray`` should be implemented by the backend and must inherit from :py:class:`~xarray.backends.BackendArray`. BackendArray subclassing ^^^^^^^^^^^^^^^^^^^^^^^^ The BackendArray subclass must implement the following method and attributes: - the ``__getitem__`` method that takes an index as an input and returns a `NumPy `__ array, - the ``shape`` attribute, - the ``dtype`` attribute. It may also optionally implement an additional ``async_getitem`` method. Xarray supports different types of :doc:`/user-guide/indexing`, that can be grouped in three types of indexes: :py:class:`~xarray.core.indexing.BasicIndexer`, :py:class:`~xarray.core.indexing.OuterIndexer`, and :py:class:`~xarray.core.indexing.VectorizedIndexer`. This implies that the implementation of the method ``__getitem__`` can be tricky. In order to simplify this task, Xarray provides a helper function, :py:func:`~xarray.core.indexing.explicit_indexing_adapter`, that transforms all the input indexer types (basic, outer, vectorized) in a tuple which is interpreted correctly by your backend. This is an example ``BackendArray`` subclass implementation: .. code-block:: python from xarray.backends import BackendArray class MyBackendArray(BackendArray): def __init__( self, shape, dtype, lock, # other backend specific keyword arguments ): self.shape = shape self.dtype = dtype self.lock = lock def __getitem__( self, key: xarray.core.indexing.ExplicitIndexer ) -> np.typing.ArrayLike: return indexing.explicit_indexing_adapter( key, self.shape, indexing.IndexingSupport.BASIC, self._raw_indexing_method, ) def _raw_indexing_method(self, key: tuple) -> np.typing.ArrayLike: # thread safe method that access to data on disk with self.lock: ... return item Note that ``BackendArray.__getitem__`` must be thread safe to support multi-thread processing. The :py:func:`~xarray.core.indexing.explicit_indexing_adapter` method takes in input the ``key``, the array ``shape`` and the following parameters: - ``indexing_support``: the type of index supported by ``raw_indexing_method`` - ``raw_indexing_method``: a method that shall take in input a key in the form of a tuple and return an indexed :py:class:`numpy.ndarray`. For more details see :py:class:`~xarray.core.indexing.IndexingSupport` and :ref:`RST indexing`. Async support ^^^^^^^^^^^^^ Backends can also optionally support loading data asynchronously via xarray's asynchronous loading methods (e.g. ``~xarray.Dataset.load_async``). To support async loading the ``BackendArray`` subclass must additionally implement the ``BackendArray.async_getitem`` method. Note that implementing this method is only necessary if you want to be able to load data from different xarray objects concurrently. Even without this method your ``BackendArray`` implementation is still free to concurrently load chunks of data for a single ``Variable`` itself, so long as it does so behind the synchronous ``__getitem__`` interface. Dask support ^^^^^^^^^^^^ In order to support `Dask Distributed `__ and :py:mod:`multiprocessing`, the ``BackendArray`` subclass should be serializable either with :ref:`io.pickle` or `cloudpickle `__. That implies that all the reference to open files should be dropped. For opening files, we therefore suggest to use the helper class provided by Xarray :py:class:`~xarray.backends.CachingFileManager`. .. _RST indexing: Indexing examples ^^^^^^^^^^^^^^^^^ **BASIC** In the ``BASIC`` indexing support, numbers and slices are supported. Example: .. jupyter-input:: # () shall return the full array backend_array._raw_indexing_method(()) .. jupyter-output:: array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]) .. jupyter-input:: # shall support integers backend_array._raw_indexing_method(1, 1) .. jupyter-output:: 5 .. jupyter-input:: # shall support slices backend_array._raw_indexing_method(slice(0, 3), slice(2, 4)) .. jupyter-output:: array([[2, 3], [6, 7], [10, 11]]) **OUTER** The ``OUTER`` indexing shall support number, slices and in addition it shall support also lists of integers. The outer indexing is equivalent to combining multiple input list with ``itertools.product()``: .. jupyter-input:: backend_array._raw_indexing_method([0, 1], [0, 1, 2]) .. jupyter-output:: array([[0, 1, 2], [4, 5, 6]]) .. jupyter-input:: # shall support integers backend_array._raw_indexing_method(1, 1) .. jupyter-output:: 5 **OUTER_1VECTOR** The ``OUTER_1VECTOR`` indexing shall supports number, slices and at most one list. The behaviour with the list shall be the same as ``OUTER`` indexing. If you support more complex indexing as explicit indexing or numpy indexing, you can have a look to the implementation of Zarr backend and Scipy backend, currently available in :py:mod:`~xarray.backends` module. .. _RST preferred_chunks: Preferred chunk sizes ^^^^^^^^^^^^^^^^^^^^^ To potentially improve performance with lazy loading, the backend may define for each variable the chunk sizes that it prefers---that is, sizes that align with how the variable is stored. (Note that the backend is not directly involved in `Dask `__ chunking, because Xarray internally manages chunking.) To define the preferred chunk sizes, store a mapping within the variable's encoding under the key ``"preferred_chunks"`` (that is, ``var.encoding["preferred_chunks"]``). The mapping's keys shall be the names of dimensions with preferred chunk sizes, and each value shall be the corresponding dimension's preferred chunk sizes expressed as either an integer (such as ``{"dim1": 1000, "dim2": 2000}``) or a tuple of integers (such as ``{"dim1": (1000, 100), "dim2": (2000, 2000, 2000)}``). Xarray uses the preferred chunk sizes in some special cases of the ``chunks`` argument of the :py:func:`~xarray.open_dataset` and :py:func:`~xarray.open_mfdataset` functions. If ``chunks`` is a ``dict``, then for any dimensions missing from the keys or whose value is ``None``, Xarray sets the chunk sizes to the preferred sizes. If ``chunks`` equals ``"auto"``, then Xarray seeks ideal chunk sizes informed by the preferred chunk sizes. Specifically, it determines the chunk sizes using :py:func:`dask.array.core.normalize_chunks` with the ``previous_chunks`` argument set according to the preferred chunk sizes. xarray-2025.09.0/doc/internals/how-to-create-custom-index.rst000066400000000000000000000226161505620616400237440ustar00rootroot00000000000000.. currentmodule:: xarray .. _internals.custom indexes: How to create a custom index ============================ .. warning:: This feature is highly experimental. Support for custom indexes has been introduced in v2022.06.0 and is still incomplete. API is subject to change without deprecation notice. However we encourage you to experiment and report issues that arise. Xarray's built-in support for label-based indexing (e.g. ``ds.sel(latitude=40, method="nearest")``) and alignment operations relies on :py:class:`pandas.Index` objects. Pandas Indexes are powerful and suitable for many applications but also have some limitations: - it only works with 1-dimensional coordinates where explicit labels are fully loaded in memory - it is hard to reuse it with irregular data for which there exist more efficient, tree-based structures to perform data selection - it doesn't support extra metadata that may be required for indexing and alignment (e.g., a coordinate reference system) Fortunately, Xarray now allows extending this functionality with custom indexes, which can be implemented in 3rd-party libraries. The Index base class -------------------- Every Xarray index must inherit from the :py:class:`Index` base class. It is for example the case of Xarray built-in ``PandasIndex`` and ``PandasMultiIndex`` subclasses, which wrap :py:class:`pandas.Index` and :py:class:`pandas.MultiIndex` respectively. The ``Index`` API closely follows the :py:class:`Dataset` and :py:class:`DataArray` API, e.g., for an index to support :py:meth:`DataArray.sel` it needs to implement :py:meth:`Index.sel`, to support :py:meth:`DataArray.stack` and :py:meth:`DataArray.unstack` it needs to implement :py:meth:`Index.stack` and :py:meth:`Index.unstack`, etc. Some guidelines and examples are given below. More details can be found in the documented :py:class:`Index` API. Minimal requirements -------------------- Every index must at least implement the :py:meth:`Index.from_variables` class method, which is used by Xarray to build a new index instance from one or more existing coordinates in a Dataset or DataArray. Since any collection of coordinates can be passed to that method (i.e., the number, order and dimensions of the coordinates are all arbitrary), it is the responsibility of the index to check the consistency and validity of those input coordinates. For example, :py:class:`~xarray.indexes.PandasIndex` accepts only one coordinate and :py:class:`~xarray.indexes.PandasMultiIndex` accepts one or more 1-dimensional coordinates that must all share the same dimension. Other, custom indexes need not have the same constraints, e.g., - a georeferenced raster index which only accepts two 1-d coordinates with distinct dimensions - a staggered grid index which takes coordinates with different dimension name suffixes (e.g., "_c" and "_l" for center and left) Optional requirements --------------------- Pretty much everything else is optional. Depending on the method, in the absence of a (re)implementation, an index will either raise a ``NotImplementedError`` or won't do anything specific (just drop, pass or copy itself from/to the resulting Dataset or DataArray). For example, you can just skip re-implementing :py:meth:`Index.rename` if there is no internal attribute or object to rename according to the new desired coordinate or dimension names. In the case of ``PandasIndex``, we rename the underlying ``pandas.Index`` object and/or update the ``PandasIndex.dim`` attribute since the associated dimension name has been changed. Wrap index data as coordinate data ---------------------------------- In some cases it is possible to reuse the index's underlying object or structure as coordinate data and hence avoid data duplication. For ``PandasIndex`` and ``PandasMultiIndex``, we leverage the fact that ``pandas.Index`` objects expose some array-like API. In Xarray we use some wrappers around those underlying objects as a thin compatibility layer to preserve dtypes, handle explicit and n-dimensional indexing, etc. Other structures like tree-based indexes (e.g., kd-tree) may differ too much from arrays to reuse it as coordinate data. If the index data can be reused as coordinate data, the ``Index`` subclass should implement :py:meth:`Index.create_variables`. This method accepts a dictionary of variable names as keys and :py:class:`Variable` objects as values (used for propagating variable metadata) and should return a dictionary of new :py:class:`Variable` or :py:class:`IndexVariable` objects. Data selection -------------- For an index to support label-based selection, it needs to at least implement :py:meth:`Index.sel`. This method accepts a dictionary of labels where the keys are coordinate names (already filtered for the current index) and the values can be pretty much anything (e.g., a slice, a tuple, a list, a numpy array, a :py:class:`Variable` or a :py:class:`DataArray`). It is the responsibility of the index to properly handle those input labels. :py:meth:`Index.sel` must return an instance of :py:class:`IndexSelResult`. The latter is a small data class that holds positional indexers (indices) and that may also hold new variables, new indexes, names of variables or indexes to drop, names of dimensions to rename, etc. For example, this is useful in the case of ``PandasMultiIndex`` as it allows Xarray to convert it into a single ``PandasIndex`` when only one level remains after the selection. The :py:class:`IndexSelResult` class is also used to merge results from label-based selection performed by different indexes. Note that it is now possible to have two distinct indexes for two 1-d coordinates sharing the same dimension, but it is not currently possible to use those two indexes in the same call to :py:meth:`Dataset.sel`. Optionally, the index may also implement :py:meth:`Index.isel`. In the case of ``PandasIndex`` we use it to create a new index object by just indexing the underlying ``pandas.Index`` object. In other cases this may not be possible, e.g., a kd-tree object may not be easily indexed. If ``Index.isel()`` is not implemented, the index in just dropped in the DataArray or Dataset resulting from the selection. Alignment --------- For an index to support alignment, it needs to implement: - :py:meth:`Index.equals`, which compares the index with another index and returns either ``True`` or ``False`` - :py:meth:`Index.join`, which combines the index with another index and returns a new Index object - :py:meth:`Index.reindex_like`, which queries the index with another index and returns positional indexers that are used to re-index Dataset or DataArray variables along one or more dimensions Xarray ensures that those three methods are called with an index of the same type as argument. Meta-indexes ------------ Nothing prevents writing a custom Xarray index that itself encapsulates other Xarray index(es). We call such index a "meta-index". Here is a small example of a meta-index for geospatial, raster datasets (i.e., regularly spaced 2-dimensional data) that internally relies on two ``PandasIndex`` instances for the x and y dimensions respectively: .. code-block:: python from xarray import Index from xarray.core.indexes import PandasIndex from xarray.core.indexing import merge_sel_results class RasterIndex(Index): def __init__(self, xy_indexes): assert len(xy_indexes) == 2 # must have two distinct dimensions dim = [idx.dim for idx in xy_indexes.values()] assert dim[0] != dim[1] self._xy_indexes = xy_indexes @classmethod def from_variables(cls, variables): assert len(variables) == 2 xy_indexes = { k: PandasIndex.from_variables({k: v}) for k, v in variables.items() } return cls(xy_indexes) def create_variables(self, variables): idx_variables = {} for index in self._xy_indexes.values(): idx_variables.update(index.create_variables(variables)) return idx_variables def sel(self, labels): results = [] for k, index in self._xy_indexes.items(): if k in labels: results.append(index.sel({k: labels[k]})) return merge_sel_results(results) This basic index only supports label-based selection. Providing a full-featured index by implementing the other ``Index`` methods should be pretty straightforward for this example, though. This example is also not very useful unless we add some extra functionality on top of the two encapsulated ``PandasIndex`` objects, such as a coordinate reference system. How to use a custom index ------------------------- You can use :py:meth:`Dataset.set_xindex` or :py:meth:`DataArray.set_xindex` to assign a custom index to a Dataset or DataArray, e.g., using the ``RasterIndex`` above: .. code-block:: python import numpy as np import xarray as xr da = xr.DataArray( np.random.uniform(size=(100, 50)), coords={"x": ("x", np.arange(50)), "y": ("y", np.arange(100))}, dims=("y", "x"), ) # Xarray create default indexes for the 'x' and 'y' coordinates # we first need to explicitly drop it da = da.drop_indexes(["x", "y"]) # Build a RasterIndex from the 'x' and 'y' coordinates da_raster = da.set_xindex(["x", "y"], RasterIndex) # RasterIndex now takes care of label-based selection selected = da_raster.sel(x=10, y=slice(20, 50)) xarray-2025.09.0/doc/internals/index.rst000066400000000000000000000017661505620616400177630ustar00rootroot00000000000000.. _internals: Xarray Internals ================ Xarray builds upon two of the foundational libraries of the scientific Python stack, NumPy and pandas. It is written in pure Python (no C or Cython extensions), which makes it easy to develop and extend. Instead, we push compiled code to :ref:`optional dependencies`. The pages in this section are intended for: * Contributors to xarray who wish to better understand some of the internals, * Developers from other fields who wish to extend xarray with domain-specific logic, perhaps to support a new scientific community of users, * Developers of other packages who wish to interface xarray with their existing tools, e.g. by creating a backend for reading a new file format, or wrapping a custom array type. .. toctree:: :maxdepth: 2 :hidden: internal-design interoperability duck-arrays-integration chunked-arrays extending-xarray how-to-add-new-backend how-to-create-custom-index zarr-encoding-spec time-coding xarray-2025.09.0/doc/internals/internal-design.rst000066400000000000000000000236261505620616400217360ustar00rootroot00000000000000.. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) np.set_printoptions(threshold=10, edgeitems=2) .. _internal design: Internal Design =============== This page gives an overview of the internal design of xarray. In totality, the Xarray project defines 4 key data structures. In order of increasing complexity, they are: - :py:class:`xarray.Variable`, - :py:class:`xarray.DataArray`, - :py:class:`xarray.Dataset`, - :py:class:`xarray.DataTree`. The user guide lists only :py:class:`xarray.DataArray` and :py:class:`xarray.Dataset`, but :py:class:`~xarray.Variable` is the fundamental object internally, and :py:class:`~xarray.DataTree` is a natural generalisation of :py:class:`xarray.Dataset`. .. note:: Our :ref:`roadmap` includes plans to document :py:class:`~xarray.Variable` as fully public API. Internally private :ref:`lazy indexing classes ` are used to avoid loading more data than necessary, and flexible indexes classes (derived from :py:class:`~xarray.indexes.Index`) provide performant label-based lookups. .. _internal design.data structures: Data Structures --------------- The :ref:`data structures` page in the user guide explains the basics and concentrates on user-facing behavior, whereas this section explains how xarray's data structure classes actually work internally. .. _internal design.data structures.variable: Variable Objects ~~~~~~~~~~~~~~~~ The core internal data structure in xarray is the :py:class:`~xarray.Variable`, which is used as the basic building block behind xarray's :py:class:`~xarray.Dataset`, :py:class:`~xarray.DataArray` types. A :py:class:`~xarray.Variable` consists of: - ``dims``: A tuple of dimension names. - ``data``: The N-dimensional array (typically a NumPy or Dask array) storing the Variable's data. It must have the same number of dimensions as the length of ``dims``. - ``attrs``: A dictionary of metadata associated with this array. By convention, xarray's built-in operations never use this metadata. - ``encoding``: Another dictionary used to store information about how these variable's data is represented on disk. See :ref:`io.encoding` for more details. :py:class:`~xarray.Variable` has an interface similar to NumPy arrays, but extended to make use of named dimensions. For example, it uses ``dim`` in preference to an ``axis`` argument for methods like ``mean``, and supports :ref:`compute.broadcasting`. However, unlike ``Dataset`` and ``DataArray``, the basic ``Variable`` does not include coordinate labels along each axis. :py:class:`~xarray.Variable` is public API, but because of its incomplete support for labeled data, it is mostly intended for advanced uses, such as in xarray itself, for writing new backends, or when creating custom indexes. You can access the variable objects that correspond to xarray objects via the (readonly) :py:attr:`Dataset.variables ` and :py:attr:`DataArray.variable ` attributes. .. _internal design.dataarray: DataArray Objects ~~~~~~~~~~~~~~~~~ The simplest data structure used by most users is :py:class:`~xarray.DataArray`. A :py:class:`~xarray.DataArray` is a composite object consisting of multiple :py:class:`~xarray.Variable` objects which store related data. A single :py:class:`~xarray.Variable` is referred to as the "data variable", and stored under the :py:attr:`~xarray.DataArray.variable`` attribute. A :py:class:`~xarray.DataArray` inherits all of the properties of this data variable, i.e. ``dims``, ``data``, ``attrs`` and ``encoding``, all of which are implemented by forwarding on to the underlying ``Variable`` object. In addition, a :py:class:`~xarray.DataArray` stores additional ``Variable`` objects stored in a dict under the private ``_coords`` attribute, each of which is referred to as a "Coordinate Variable". These coordinate variable objects are only allowed to have ``dims`` that are a subset of the data variable's ``dims``, and each dim has a specific length. This means that the full :py:attr:`~xarray.DataArray.size` of the dataarray can be represented by a dictionary mapping dimension names to integer sizes. The underlying data variable has this exact same size, and the attached coordinate variables have sizes which are some subset of the size of the data variable. Another way of saying this is that all coordinate variables must be "alignable" with the data variable. When a coordinate is accessed by the user (e.g. via the dict-like :py:class:`~xarray.DataArray.__getitem__` syntax), then a new ``DataArray`` is constructed by finding all coordinate variables that have compatible dimensions and re-attaching them before the result is returned. This is why most users never see the ``Variable`` class underlying each coordinate variable - it is always promoted to a ``DataArray`` before returning. Lookups are performed by special :py:class:`~xarray.indexes.Index` objects, which are stored in a dict under the private ``_indexes`` attribute. Indexes must be associated with one or more coordinates, and essentially act by translating a query given in physical coordinate space (typically via the :py:meth:`~xarray.DataArray.sel` method) into a set of integer indices in array index space that can be used to index the underlying n-dimensional array-like ``data``. Indexing in array index space (typically performed via the :py:meth:`~xarray.DataArray.isel` method) does not require consulting an ``Index`` object. Finally a :py:class:`~xarray.DataArray` defines a :py:attr:`~xarray.DataArray.name` attribute, which refers to its data variable but is stored on the wrapping ``DataArray`` class. The ``name`` attribute is primarily used when one or more :py:class:`~xarray.DataArray` objects are promoted into a :py:class:`~xarray.Dataset` (e.g. via :py:meth:`~xarray.DataArray.to_dataset`). Note that the underlying :py:class:`~xarray.Variable` objects are all unnamed, so they can always be referred to uniquely via a dict-like mapping. .. _internal design.dataset: Dataset Objects ~~~~~~~~~~~~~~~ The :py:class:`~xarray.Dataset` class is a generalization of the :py:class:`~xarray.DataArray` class that can hold multiple data variables. Internally all data variables and coordinate variables are stored under a single ``variables`` dict, and coordinates are specified by storing their names in a private ``_coord_names`` dict. The dataset's ``dims`` are the set of all dims present across any variable, but (similar to in dataarrays) coordinate variables cannot have a dimension that is not present on any data variable. When a data variable or coordinate variable is accessed, a new ``DataArray`` is again constructed from all compatible coordinates before returning. .. _internal design.subclassing: .. note:: The way that selecting a variable from a ``DataArray`` or ``Dataset`` actually involves internally wrapping the ``Variable`` object back up into a ``DataArray``/``Dataset`` is the primary reason :ref:`we recommend against subclassing ` Xarray objects. The main problem it creates is that we currently cannot easily guarantee that for example selecting a coordinate variable from your ``SubclassedDataArray`` would return an instance of ``SubclassedDataArray`` instead of just an :py:class:`xarray.DataArray`. See `GH issue `_ for more details. .. _internal design.lazy indexing: Lazy Indexing Classes --------------------- Lazy Loading ~~~~~~~~~~~~ If we open a ``Variable`` object from disk using :py:func:`~xarray.open_dataset` we can see that the actual values of the array wrapped by the data variable are not displayed. .. jupyter-execute:: da = xr.tutorial.open_dataset("air_temperature")["air"] var = da.variable var We can see the size, and the dtype of the underlying array, but not the actual values. This is because the values have not yet been loaded. If we look at the private attribute :py:meth:`~xarray.Variable._data` containing the underlying array object, we see something interesting: .. jupyter-execute:: var._data You're looking at one of xarray's internal Lazy Indexing Classes. These powerful classes are hidden from the user, but provide important functionality. Calling the public :py:attr:`~xarray.Variable.data` property loads the underlying array into memory. .. jupyter-execute:: var.data This array is now cached, which we can see by accessing the private attribute again: .. jupyter-execute:: var._data Lazy Indexing ~~~~~~~~~~~~~ The purpose of these lazy indexing classes is to prevent more data being loaded into memory than is necessary for the subsequent analysis, by deferring loading data until after indexing is performed. Let's open the data from disk again. .. jupyter-execute:: da = xr.tutorial.open_dataset("air_temperature")["air"] var = da.variable Now, notice how even after subsetting the data has does not get loaded: .. jupyter-execute:: var.isel(time=0) The shape has changed, but the values are still not shown. Looking at the private attribute again shows how this indexing information was propagated via the hidden lazy indexing classes: .. jupyter-execute:: var.isel(time=0)._data .. note:: Currently only certain indexing operations are lazy, not all array operations. For discussion of making all array operations lazy see `GH issue #5081 `_. Lazy Dask Arrays ~~~~~~~~~~~~~~~~ Note that xarray's implementation of Lazy Indexing classes is completely separate from how :py:class:`dask.array.Array` objects evaluate lazily. Dask-backed xarray objects delay almost all operations until :py:meth:`~xarray.DataArray.compute` is called (either explicitly or implicitly via :py:meth:`~xarray.DataArray.plot` for example). The exceptions to this laziness are operations whose output shape is data-dependent, such as when calling :py:meth:`~xarray.DataArray.where`. xarray-2025.09.0/doc/internals/interoperability.rst000066400000000000000000000063451505620616400222370ustar00rootroot00000000000000.. _interoperability: Interoperability of Xarray ========================== Xarray is designed to be extremely interoperable, in many orthogonal ways. Making xarray as flexible as possible is the common theme of most of the goals on our :ref:`roadmap`. This interoperability comes via a set of flexible abstractions into which the user can plug in. The current full list is: - :ref:`Custom file backends ` via the :py:class:`~xarray.backends.BackendEntrypoint` system, - Numpy-like :ref:`"duck" array wrapping `, which supports the `Python Array API Standard `_, - :ref:`Chunked distributed array computation ` via the :py:class:`~xarray.namedarray.parallelcompat.ChunkManagerEntrypoint` system, - Custom :py:class:`~xarray.Index` objects for :ref:`flexible label-based lookups `, - Extending xarray objects with domain-specific methods via :ref:`custom accessors `. .. warning:: One obvious way in which xarray could be more flexible is that whilst subclassing xarray objects is possible, we currently don't support it in most transformations, instead recommending composition over inheritance. See the :ref:`internal design page ` for the rationale and look at the corresponding `GH issue `_ if you're interested in improving support for subclassing! .. note:: If you think there is another way in which xarray could become more generically flexible then please tell us your ideas by `raising an issue to request the feature `_! Whilst xarray was originally designed specifically to open ``netCDF4`` files as :py:class:`numpy.ndarray` objects labelled by :py:class:`pandas.Index` objects, it is entirely possible today to: - lazily open an xarray object directly from a custom binary file format (e.g. using ``xarray.open_dataset(path, engine='my_custom_format')``, - handle the data as any API-compliant numpy-like array type (e.g. sparse or GPU-backed), - distribute out-of-core computation across that array type in parallel (e.g. via :ref:`dask`), - track the physical units of the data through computations (e.g via `pint-xarray `_), - query the data via custom index logic optimized for specific applications (e.g. an :py:class:`~xarray.Index` object backed by a KDTree structure), - attach domain-specific logic via accessor methods (e.g. to understand geographic Coordinate Reference System metadata), - organize hierarchical groups of xarray data in a :py:class:`xarray.DataTree` (e.g. to treat heterogeneous simulation and observational data together during analysis). All of these features can be provided simultaneously, using libraries compatible with the rest of the scientific python ecosystem. In this situation xarray would be essentially a thin wrapper acting as pure-python framework, providing a common interface and separation of concerns via various domain-agnostic abstractions. Most of the remaining pages in the documentation of xarray's internals describe these various types of interoperability in more detail. xarray-2025.09.0/doc/internals/time-coding.rst000066400000000000000000000556571505620616400210630ustar00rootroot00000000000000.. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) np.set_printoptions(threshold=20) int64_max = np.iinfo("int64").max int64_min = np.iinfo("int64").min + 1 uint64_max = np.iinfo("uint64").max .. _internals.timecoding: Time Coding =========== This page gives an overview how xarray encodes and decodes times and which conventions and functions are used. Pandas functionality -------------------- to_datetime ~~~~~~~~~~~ The function :py:func:`pandas.to_datetime` is used within xarray for inferring units and for testing purposes. In normal operation :py:func:`pandas.to_datetime` returns a :py:class:`pandas.Timestamp` (for scalar input) or :py:class:`pandas.DatetimeIndex` (for array-like input) which are related to ``np.datetime64`` values with a resolution inherited from the input (can be one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'``). If no resolution can be inherited ``'ns'`` is assumed. That has the implication that the maximum usable time range for those cases is approximately +/- 292 years centered around the Unix epoch (1970-01-01). To accommodate that, we carefully check the units/resolution in the encoding and decoding step. When the arguments are numeric (not strings or ``np.datetime64`` values) ``"unit"`` can be anything from ``'Y'``, ``'W'``, ``'D'``, ``'h'``, ``'m'``, ``'s'``, ``'ms'``, ``'us'`` or ``'ns'``, though the returned resolution will be ``"ns"``. .. jupyter-execute:: print(f"Minimum datetime: {pd.to_datetime(int64_min, unit="ns")}") print(f"Maximum datetime: {pd.to_datetime(int64_max, unit="ns")}") For input values which can't be represented in nanosecond resolution an :py:class:`pandas.OutOfBoundsDatetime` exception is raised: .. jupyter-execute:: try: dtime = pd.to_datetime(int64_max, unit="us") except Exception as err: print(err) .. jupyter-execute:: try: dtime = pd.to_datetime(uint64_max, unit="ns") print("Wrong:", dtime) dtime = pd.to_datetime([uint64_max], unit="ns") except Exception as err: print(err) ``np.datetime64`` values can be extracted with :py:meth:`pandas.Timestamp.to_numpy` and :py:meth:`pandas.DatetimeIndex.to_numpy`. The returned resolution depends on the internal representation. This representation can be changed using :py:meth:`pandas.Timestamp.as_unit` and :py:meth:`pandas.DatetimeIndex.as_unit` respectively. ``as_unit`` takes one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'`` as an argument. That means we are able to represent datetimes with second, millisecond, microsecond or nanosecond resolution. .. jupyter-execute:: time = pd.to_datetime(np.datetime64(0, "D")) print("Datetime:", time, np.asarray([time.to_numpy()]).dtype) print("Datetime as_unit('ms'):", time.as_unit("ms")) print("Datetime to_numpy():", time.as_unit("ms").to_numpy()) .. jupyter-execute:: time = pd.to_datetime(np.array([-1000, 1, 2], dtype="datetime64[Y]")) print("DatetimeIndex:", time) print("DatetimeIndex as_unit('us'):", time.as_unit("us")) print("DatetimeIndex to_numpy():", time.as_unit("us").to_numpy()) .. warning:: Input data with resolution higher than ``'ns'`` (eg. ``'ps'``, ``'fs'``, ``'as'``) is truncated (not rounded) at the ``'ns'``-level. This is `currently broken `_ for the ``'ps'`` input, where it is interpreted as ``'ns'``. .. jupyter-execute:: print("Good:", pd.to_datetime([np.datetime64(1901901901901, "as")])) print("Good:", pd.to_datetime([np.datetime64(1901901901901, "fs")])) print(" Bad:", pd.to_datetime([np.datetime64(1901901901901, "ps")])) print("Good:", pd.to_datetime([np.datetime64(1901901901901, "ns")])) print("Good:", pd.to_datetime([np.datetime64(1901901901901, "us")])) print("Good:", pd.to_datetime([np.datetime64(1901901901901, "ms")])) .. warning:: Care has to be taken, as some configurations of input data will raise. The following shows, that we are safe to use :py:func:`pandas.to_datetime` when providing :py:class:`numpy.datetime64` as scalar or numpy array as input. .. jupyter-execute:: print( "Works:", np.datetime64(1901901901901, "s"), pd.to_datetime(np.datetime64(1901901901901, "s")), ) print( "Works:", np.array([np.datetime64(1901901901901, "s")]), pd.to_datetime(np.array([np.datetime64(1901901901901, "s")])), ) try: pd.to_datetime([np.datetime64(1901901901901, "s")]) except Exception as err: print("Raises:", err) try: pd.to_datetime(1901901901901, unit="s") except Exception as err: print("Raises:", err) try: pd.to_datetime([1901901901901], unit="s") except Exception as err: print("Raises:", err) try: pd.to_datetime(np.array([1901901901901]), unit="s") except Exception as err: print("Raises:", err) to_timedelta ~~~~~~~~~~~~ The function :py:func:`pandas.to_timedelta` is used within xarray for inferring units and for testing purposes. In normal operation :py:func:`pandas.to_timedelta` returns a :py:class:`pandas.Timedelta` (for scalar input) or :py:class:`pandas.TimedeltaIndex` (for array-like input) which are ``np.timedelta64`` values with ``ns`` resolution internally. That has the implication, that the usable timedelta covers only roughly 585 years. To accommodate for that, we are working around that limitation in the encoding and decoding step. .. jupyter-execute:: f"Maximum timedelta range: ({pd.to_timedelta(int64_min, unit="ns")}, {pd.to_timedelta(int64_max, unit="ns")})" For input values which can't be represented in nanosecond resolution an :py:class:`pandas.OutOfBoundsTimedelta` exception is raised: .. jupyter-execute:: try: delta = pd.to_timedelta(int64_max, unit="us") except Exception as err: print("First:", err) .. jupyter-execute:: try: delta = pd.to_timedelta(uint64_max, unit="ns") except Exception as err: print("Second:", err) When arguments are numeric (not strings or ``np.timedelta64`` values) "unit" can be anything from ``'W'``, ``'D'``, ``'h'``, ``'m'``, ``'s'``, ``'ms'``, ``'us'`` or ``'ns'``, though the returned resolution will be ``"ns"``. ``np.timedelta64`` values can be extracted with :py:meth:`pandas.Timedelta.to_numpy` and :py:meth:`pandas.TimedeltaIndex.to_numpy`. The returned resolution depends on the internal representation. This representation can be changed using :py:meth:`pandas.Timedelta.as_unit` and :py:meth:`pandas.TimedeltaIndex.as_unit` respectively. ``as_unit`` takes one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'`` as an argument. That means we are able to represent timedeltas with second, millisecond, microsecond or nanosecond resolution. .. jupyter-execute:: delta = pd.to_timedelta(np.timedelta64(1, "D")) print("Timedelta:", delta, np.asarray([delta.to_numpy()]).dtype) print("Timedelta as_unit('ms'):", delta.as_unit("ms")) print("Timedelta to_numpy():", delta.as_unit("ms").to_numpy()) .. jupyter-execute:: delta = pd.to_timedelta([0, 1, 2], unit="D") print("TimedeltaIndex:", delta) print("TimedeltaIndex as_unit('ms'):", delta.as_unit("ms")) print("TimedeltaIndex to_numpy():", delta.as_unit("ms").to_numpy()) .. warning:: Care has to be taken, as some configurations of input data will raise. The following shows, that we are safe to use :py:func:`pandas.to_timedelta` when providing :py:class:`numpy.timedelta64` as scalar or numpy array as input. .. jupyter-execute:: print( "Works:", np.timedelta64(1901901901901, "s"), pd.to_timedelta(np.timedelta64(1901901901901, "s")), ) print( "Works:", np.array([np.timedelta64(1901901901901, "s")]), pd.to_timedelta(np.array([np.timedelta64(1901901901901, "s")])), ) try: pd.to_timedelta([np.timedelta64(1901901901901, "s")]) except Exception as err: print("Raises:", err) try: pd.to_timedelta(1901901901901, unit="s") except Exception as err: print("Raises:", err) try: pd.to_timedelta([1901901901901], unit="s") except Exception as err: print("Raises:", err) try: pd.to_timedelta(np.array([1901901901901]), unit="s") except Exception as err: print("Raises:", err) Timestamp ~~~~~~~~~ :py:class:`pandas.Timestamp` is used within xarray to wrap strings of CF encoding reference times and datetime.datetime. When arguments are numeric (not strings) "unit" can be anything from ``'Y'``, ``'W'``, ``'D'``, ``'h'``, ``'m'``, ``'s'``, ``'ms'``, ``'us'`` or ``'ns'``, though the returned resolution will be ``"ns"``. In normal operation :py:class:`pandas.Timestamp` holds the timestamp in the provided resolution, but only one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'``. Lower resolution input is automatically converted to ``'s'``, higher resolution input is truncated to ``'ns'``. The same conversion rules apply here as for :py:func:`pandas.to_timedelta` (see `to_timedelta`_). Depending on the internal resolution Timestamps can be represented in the range: .. jupyter-execute:: for unit in ["s", "ms", "us", "ns"]: print( f"unit: {unit!r} time range ({pd.Timestamp(int64_min, unit=unit)}, {pd.Timestamp(int64_max, unit=unit)})" ) Since relaxing the resolution, this enhances the range to several hundreds of thousands of centuries with microsecond representation. ``NaT`` will be at ``np.iinfo("int64").min`` for all of the different representations. .. warning:: When initialized with a datetime string this is only defined from ``-9999-01-01`` to ``9999-12-31``. .. jupyter-execute:: try: print("Works:", pd.Timestamp("-9999-01-01 00:00:00")) print("Works, too:", pd.Timestamp("9999-12-31 23:59:59")) print(pd.Timestamp("10000-01-01 00:00:00")) except Exception as err: print("Errors:", err) .. note:: :py:class:`pandas.Timestamp` is the only current possibility to correctly import time reference strings. It handles non-ISO formatted strings, keeps the resolution of the strings (``'s'``, ``'ms'`` etc.) and imports time zones. When initialized with :py:class:`numpy.datetime64` instead of a string it even overcomes the above limitation of the possible time range. .. jupyter-execute:: try: print("Handles non-ISO:", pd.Timestamp("92-1-8 151542")) print( "Keeps resolution 1:", pd.Timestamp("1992-10-08 15:15:42"), pd.Timestamp("1992-10-08 15:15:42").unit, ) print( "Keeps resolution 2:", pd.Timestamp("1992-10-08 15:15:42.5"), pd.Timestamp("1992-10-08 15:15:42.5").unit, ) print( "Keeps timezone:", pd.Timestamp("1992-10-08 15:15:42.5 -6:00"), pd.Timestamp("1992-10-08 15:15:42.5 -6:00").unit, ) print( "Extends timerange :", pd.Timestamp(np.datetime64("-10000-10-08 15:15:42.5001")), pd.Timestamp(np.datetime64("-10000-10-08 15:15:42.5001")).unit, ) except Exception as err: print("Errors:", err) DatetimeIndex ~~~~~~~~~~~~~ :py:class:`pandas.DatetimeIndex` is used to wrap ``np.datetime64`` values or other datetime-likes when encoding. The resolution of the DatetimeIndex depends on the input, but can be only one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'``. Lower resolution input is automatically converted to ``'s'``, higher resolution input is cut to ``'ns'``. :py:class:`pandas.DatetimeIndex` will raise :py:class:`pandas.OutOfBoundsDatetime` if the input can't be represented in the given resolution. .. jupyter-execute:: try: print( "Works:", pd.DatetimeIndex( np.array(["1992-01-08", "1992-01-09"], dtype="datetime64[D]") ), ) print( "Works:", pd.DatetimeIndex( np.array( ["1992-01-08 15:15:42", "1992-01-09 15:15:42"], dtype="datetime64[s]", ) ), ) print( "Works:", pd.DatetimeIndex( np.array( ["1992-01-08 15:15:42.5", "1992-01-09 15:15:42.0"], dtype="datetime64[ms]", ) ), ) print( "Works:", pd.DatetimeIndex( np.array( ["1970-01-01 00:00:00.401501601701801901", "1970-01-01 00:00:00"], dtype="datetime64[as]", ) ), ) print( "Works:", pd.DatetimeIndex( np.array( ["-10000-01-01 00:00:00.401501", "1970-01-01 00:00:00"], dtype="datetime64[us]", ) ), ) except Exception as err: print("Errors:", err) CF Conventions Time Handling ---------------------------- Xarray tries to adhere to the latest version of the `CF Conventions`_. Relevant is the section on `Time Coordinate`_ and the `Calendar`_ subsection. .. _CF Conventions: https://cfconventions.org .. _Time Coordinate: https://cfconventions.org/Data/cf-conventions/cf-conventions-1.11/cf-conventions.html#time-coordinate .. _Calendar: https://cfconventions.org/Data/cf-conventions/cf-conventions-1.11/cf-conventions.html#calendar CF time decoding ~~~~~~~~~~~~~~~~ Decoding of ``values`` with a time unit specification like ``"seconds since 1992-10-8 15:15:42.5 -6:00"`` into datetimes using the CF conventions is a multistage process. 1. If we have a non-standard calendar (e.g. ``"noleap"``) decoding is done with the ``cftime`` package, which is not covered in this section. For the ``"standard"``/``"gregorian"`` calendar as well as the ``"proleptic_gregorian"`` calendar the above outlined pandas functionality is used. 2. The ``"standard"``/``"gregorian"`` calendar and the ``"proleptic_gregorian"`` are equivalent for any dates and reference times >= ``"1582-10-15"``. First the reference time is checked and any timezone information stripped off. In a second step, the minimum and maximum ``values`` are checked if they can be represented in the current reference time resolution. At the same time integer overflow would be caught. For the ``"standard"``/``"gregorian"`` calendar the dates are checked to be >= ``"1582-10-15"``. If anything fails, the decoding is attempted with ``cftime``. 3. As the unit (here ``"seconds"``) and the resolution of the reference time ``"1992-10-8 15:15:42.5 -6:00"`` (here ``"milliseconds"``) might be different, the decoding resolution is aligned to the higher resolution of the two. Users may also specify their wanted target resolution by setting the ``time_unit`` keyword argument to one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'`` (default ``'ns'``). This will be included in the alignment process. This is done by multiplying the ``values`` by the ratio of nanoseconds per time unit and nanoseconds per reference time unit. To retain consistency for ``NaT`` values a mask is kept and re-introduced after the multiplication. 4. Times encoded as floating point values are checked for fractional parts and the resolution is enhanced in an iterative process until a fitting resolution (or ``'ns'``) is found. A ``SerializationWarning`` is issued to make the user aware of the possibly problematic encoding. 5. Finally, the ``values`` (at this point converted to ``int64`` values) are cast to ``datetime64[unit]`` (using the above retrieved unit) and added to the reference time :py:class:`pandas.Timestamp`. .. jupyter-execute:: calendar = "proleptic_gregorian" values = np.array([-1000 * 365, 0, 1000 * 365], dtype="int64") units = "days since 2000-01-01 00:00:00.000001" dt = xr.coding.times.decode_cf_datetime(values, units, calendar, time_unit="s") assert dt.dtype == "datetime64[us]" dt .. jupyter-execute:: units = "microseconds since 2000-01-01 00:00:00" dt = xr.coding.times.decode_cf_datetime(values, units, calendar, time_unit="s") assert dt.dtype == "datetime64[us]" dt .. jupyter-execute:: values = np.array([0, 0.25, 0.5, 0.75, 1.0], dtype="float64") units = "days since 2000-01-01 00:00:00.001" dt = xr.coding.times.decode_cf_datetime(values, units, calendar, time_unit="s") assert dt.dtype == "datetime64[ms]" dt .. jupyter-execute:: values = np.array([0, 0.25, 0.5, 0.75, 1.0], dtype="float64") units = "hours since 2000-01-01" dt = xr.coding.times.decode_cf_datetime(values, units, calendar, time_unit="s") assert dt.dtype == "datetime64[s]" dt .. jupyter-execute:: values = np.array([0, 0.25, 0.5, 0.75, 1.0], dtype="float64") units = "hours since 2000-01-01 00:00:00 03:30" dt = xr.coding.times.decode_cf_datetime(values, units, calendar, time_unit="s") assert dt.dtype == "datetime64[s]" dt .. jupyter-execute:: values = np.array([-2002 * 365 - 121, -366, 365, 2000 * 365 + 119], dtype="int64") units = "days since 0001-01-01 00:00:00" dt = xr.coding.times.decode_cf_datetime(values, units, calendar, time_unit="s") assert dt.dtype == "datetime64[s]" dt CF time encoding ~~~~~~~~~~~~~~~~ For encoding the process is more or less a reversal of the above, but we have to make some decisions on default values. 1. Infer ``data_units`` from the given ``dates``. 2. Infer ``units`` (either cleanup given ``units`` or use ``data_units`` 3. Infer the calendar name from the given ``dates``. 4. If dates are :py:class:`cftime.datetime` objects then encode with ``cftime.date2num`` 5. Retrieve ``time_units`` and ``ref_date`` from ``units`` 6. Check ``ref_date`` >= ``1582-10-15``, otherwise -> ``cftime`` 7. Wrap ``dates`` with pd.DatetimeIndex 8. Subtracting ``ref_date`` (:py:class:`pandas.Timestamp`) from above :py:class:`pandas.DatetimeIndex` will return :py:class:`pandas.TimedeltaIndex` 9. Align resolution of :py:class:`pandas.TimedeltaIndex` with resolution of ``time_units`` 10. Retrieve needed ``units`` and ``delta`` to faithfully encode into int64 11. Divide ``time_deltas`` by ``delta``, use floor division (integer) or normal division (float) 12. Return result .. jupyter-execute:: calendar = "proleptic_gregorian" dates = np.array( [ "-2000-01-01T00:00:00", "0000-01-01T00:00:00", "0002-01-01T00:00:00", "2000-01-01T00:00:00", ], dtype="datetime64[s]", ) orig_values = np.array( [-2002 * 365 - 121, -366, 365, 2000 * 365 + 119], dtype="int64" ) units = "days since 0001-01-01 00:00:00" values, _, _ = xr.coding.times.encode_cf_datetime( dates, units, calendar, dtype=np.dtype("int64") ) print(values, units) np.testing.assert_array_equal(values, orig_values) .. jupyter-execute:: :stderr: dates = np.array( [ "-2000-01-01T01:00:00", "0000-01-01T00:00:00", "0002-01-01T00:00:00", "2000-01-01T00:00:00", ], dtype="datetime64[s]", ) orig_values = np.array( [-2002 * 365 - 121, -366, 365, 2000 * 365 + 119], dtype="int64" ) orig_values *= 24 # Convert to hours orig_values[0] += 1 # Adjust for the hour offset in dates above units = "days since 0001-01-01 00:00:00" values, units, _ = xr.coding.times.encode_cf_datetime( dates, units, calendar, dtype=np.dtype("int64") ) print(values, units) np.testing.assert_array_equal(values, orig_values) .. _internals.default_timeunit: Default Time Unit ~~~~~~~~~~~~~~~~~ The current default time unit of xarray is ``'ns'``. When setting keyword argument ``time_unit`` unit to ``'s'`` (the lowest resolution pandas allows) datetimes will be converted to at least ``'s'``-resolution, if possible. The same holds true for ``'ms'`` and ``'us'``. .. jupyter-execute:: attrs = {"units": "hours since 2000-01-01"} ds = xr.Dataset({"time": ("time", [0, 1, 2, 3], attrs)}) ds.to_netcdf("test-datetimes1.nc") .. jupyter-execute:: xr.open_dataset("test-datetimes1.nc") .. jupyter-execute:: coder = xr.coders.CFDatetimeCoder(time_unit="s") xr.open_dataset("test-datetimes1.nc", decode_times=coder) If a coarser unit is requested the datetimes are decoded into their native on-disk resolution, if possible. .. jupyter-execute:: attrs = {"units": "milliseconds since 2000-01-01"} ds = xr.Dataset({"time": ("time", [0, 1, 2, 3], attrs)}) ds.to_netcdf("test-datetimes2.nc") .. jupyter-execute:: xr.open_dataset("test-datetimes2.nc") .. jupyter-execute:: coder = xr.coders.CFDatetimeCoder(time_unit="s") xr.open_dataset("test-datetimes2.nc", decode_times=coder) Similar logic applies for decoding timedelta values. The default resolution is ``"ns"``: .. jupyter-execute:: attrs = {"units": "hours"} ds = xr.Dataset({"time": ("time", [0, 1, 2, 3], attrs)}) ds.to_netcdf("test-timedeltas1.nc") .. jupyter-execute:: :stderr: xr.open_dataset("test-timedeltas1.nc") By default, timedeltas will be decoded to the same resolution as datetimes: .. jupyter-execute:: coder = xr.coders.CFDatetimeCoder(time_unit="s") xr.open_dataset("test-timedeltas1.nc", decode_times=coder, decode_timedelta=True) but if one would like to decode timedeltas to a different resolution, one can provide a coder specifically for timedeltas to ``decode_timedelta``: .. jupyter-execute:: timedelta_coder = xr.coders.CFTimedeltaCoder(time_unit="ms") xr.open_dataset( "test-timedeltas1.nc", decode_times=coder, decode_timedelta=timedelta_coder ) As with datetimes, if a coarser unit is requested the timedeltas are decoded into their native on-disk resolution, if possible: .. jupyter-execute:: attrs = {"units": "milliseconds"} ds = xr.Dataset({"time": ("time", [0, 1, 2, 3], attrs)}) ds.to_netcdf("test-timedeltas2.nc") .. jupyter-execute:: xr.open_dataset("test-timedeltas2.nc", decode_timedelta=True) .. jupyter-execute:: coder = xr.coders.CFDatetimeCoder(time_unit="s") xr.open_dataset("test-timedeltas2.nc", decode_times=coder, decode_timedelta=True) To opt-out of timedelta decoding (see issue `Undesired decoding to timedelta64 `_) pass ``False`` to ``decode_timedelta``: .. jupyter-execute:: xr.open_dataset("test-timedeltas2.nc", decode_timedelta=False) .. note:: Note that in the future the default value of ``decode_timedelta`` will be ``False`` rather than ``None``. .. jupyter-execute:: :hide-code: # Cleanup import os for f in [ "test-datetimes1.nc", "test-datetimes2.nc", "test-timedeltas1.nc", "test-timedeltas2.nc", ]: if os.path.exists(f): os.remove(f) xarray-2025.09.0/doc/internals/zarr-encoding-spec.rst000066400000000000000000000113571505620616400223430ustar00rootroot00000000000000.. currentmodule:: xarray .. _zarr_encoding: Zarr Encoding Specification ============================ In implementing support for the `Zarr `_ storage format, Xarray developers made some *ad hoc* choices about how to store NetCDF data in Zarr. Future versions of the Zarr spec will likely include a more formal convention for the storage of the NetCDF data model in Zarr; see `Zarr spec repo `_ for ongoing discussion. First, Xarray can only read and write Zarr groups. There is currently no support for reading / writing individual Zarr arrays. Zarr groups are mapped to Xarray ``Dataset`` objects. Second, from Xarray's point of view, the key difference between NetCDF and Zarr is that all NetCDF arrays have *dimension names* while Zarr arrays do not. Therefore, in order to store NetCDF data in Zarr, Xarray must somehow encode and decode the name of each array's dimensions. To accomplish this, Xarray developers decided to define a special Zarr array attribute: ``_ARRAY_DIMENSIONS``. The value of this attribute is a list of dimension names (strings), for example ``["time", "lon", "lat"]``. When writing data to Zarr, Xarray sets this attribute on all variables based on the variable dimensions. When reading a Zarr group, Xarray looks for this attribute on all arrays, raising an error if it can't be found. The attribute is used to define the variable dimension names and then removed from the attributes dictionary returned to the user. Because of these choices, Xarray cannot read arbitrary array data, but only Zarr data with valid ``_ARRAY_DIMENSIONS`` or `NCZarr `_ attributes on each array (NCZarr dimension names are defined in the ``.zarray`` file). After decoding the ``_ARRAY_DIMENSIONS`` or NCZarr attribute and assigning the variable dimensions, Xarray proceeds to [optionally] decode each variable using its standard CF decoding machinery used for NetCDF data (see :py:func:`decode_cf`). Finally, it's worth noting that Xarray writes (and attempts to read) "consolidated metadata" by default (the ``.zmetadata`` file), which is another non-standard Zarr extension, albeit one implemented upstream in Zarr-Python. You do not need to write consolidated metadata to make Zarr stores readable in Xarray, but because Xarray can open these stores much faster, users will see a warning about poor performance when reading non-consolidated stores unless they explicitly set ``consolidated=False``. See :ref:`io.zarr.consolidated_metadata` for more details. As a concrete example, here we write a tutorial dataset to Zarr and then re-open it directly with Zarr: .. jupyter-execute:: import os import xarray as xr import zarr ds = xr.tutorial.load_dataset("rasm") ds.to_zarr("rasm.zarr", mode="w", consolidated=False) os.listdir("rasm.zarr") .. jupyter-execute:: zgroup = zarr.open("rasm.zarr") zgroup.tree() .. jupyter-execute:: dict(zgroup["Tair"].attrs) .. jupyter-execute:: :hide-code: import shutil shutil.rmtree("rasm.zarr") Chunk Key Encoding ------------------ When writing data to Zarr stores, Xarray supports customizing how chunk keys are encoded through the ``chunk_key_encoding`` parameter in the variable's encoding dictionary. This is particularly useful when working with Zarr V2 arrays and you need to control the dimension separator in chunk keys. For example, to specify a custom separator for chunk keys: .. jupyter-execute:: import xarray as xr import numpy as np from zarr.core.chunk_key_encodings import V2ChunkKeyEncoding # Create a custom chunk key encoding with "/" as separator enc = V2ChunkKeyEncoding(separator="/").to_dict() # Create and write a dataset with custom chunk key encoding arr = np.ones((42, 100)) ds = xr.DataArray(arr, name="var1").to_dataset() ds.to_zarr( "example.zarr", zarr_format=2, mode="w", encoding={"var1": {"chunks": (42, 50), "chunk_key_encoding": enc}}, ) The ``chunk_key_encoding`` option accepts a dictionary that specifies the encoding configuration. For Zarr V2 arrays, you can use the ``V2ChunkKeyEncoding`` class from ``zarr.core.chunk_key_encodings`` to generate this configuration. This is particularly useful when you need to ensure compatibility with specific Zarr V2 storage layouts or when working with tools that expect a particular chunk key format. .. note:: The ``chunk_key_encoding`` option is only relevant when writing to Zarr stores. When reading Zarr arrays, Xarray automatically detects and uses the appropriate chunk key encoding based on the store's format and configuration. .. jupyter-execute:: :hide-code: import shutil shutil.rmtree("example.zarr") xarray-2025.09.0/doc/roadmap.rst000066400000000000000000000275711505620616400163020ustar00rootroot00000000000000.. _roadmap: Development roadmap =================== Authors: Xarray developers Date: September 7, 2021 Xarray is an open source Python library for labeled multidimensional arrays and datasets. Our philosophy -------------- Why has xarray been successful? In our opinion: - Xarray does a great job of solving **specific use-cases** for multidimensional data analysis: - The dominant use-case for xarray is for analysis of gridded dataset in the geosciences, e.g., as part of the `Pangeo `__ project. - Xarray is also used more broadly in the physical sciences, where we've found the needs for analyzing multidimensional datasets are remarkably consistent (e.g., see `SunPy `__ and `PlasmaPy `__). - Finally, xarray is used in a variety of other domains, including finance, `probabilistic programming `__ and genomics. - Xarray is also a **domain agnostic** solution: - We focus on providing a flexible set of functionality related labeled multidimensional arrays, rather than solving particular problems. - This facilitates collaboration between users with different needs, and helps us attract a broad community of contributors. - Importantly, this retains flexibility, for use cases that don't fit particularly well into existing frameworks. - Xarray **integrates well** with other libraries in the scientific Python stack. - We leverage first-class external libraries for core features of xarray (e.g., NumPy for ndarrays, pandas for indexing, dask for parallel computing) - We expose our internal abstractions to users (e.g., ``apply_ufunc()``), which facilitates extending xarray in various ways. Together, these features have made xarray a first-class choice for labeled multidimensional arrays in Python. We want to double-down on xarray's strengths by making it an even more flexible and powerful tool for multidimensional data analysis. We want to continue to engage xarray's core geoscience users, and to also reach out to new domains to learn from other successful data models like those of `yt `__ or the `OLAP cube `__. Specific needs -------------- The user community has voiced a number specific needs related to how xarray interfaces with domain specific problems. Xarray may not solve all of these issues directly, but these areas provide opportunities for xarray to provide better, more extensible, interfaces. Some examples of these common needs are: - Non-regular grids (e.g., staggered and unstructured meshes). - Physical units. - Lazily computed arrays (e.g., for coordinate systems). - New file-formats. Technical vision ---------------- We think the right approach to extending xarray's user community and the usefulness of the project is to focus on improving key interfaces that can be used externally to meet domain-specific needs. We can generalize the community's needs into three main categories: - More flexible grids/indexing. - More flexible arrays/computing. - More flexible storage backends. - More flexible data structures. Each of these are detailed further in the subsections below. Flexible indexes ~~~~~~~~~~~~~~~~ .. note:: Work on flexible grids and indexes is currently underway. See `GH Project #1 `__ for more detail. Xarray currently keeps track of indexes associated with coordinates by storing them in the form of a ``pandas.Index`` in special ``xarray.IndexVariable`` objects. The limitations of this model became clear with the addition of ``pandas.MultiIndex`` support in xarray 0.9, where a single index corresponds to multiple xarray variables. MultiIndex support is highly useful, but xarray now has numerous special cases to check for MultiIndex levels. A cleaner model would be to elevate ``indexes`` to an explicit part of xarray's data model, e.g., as attributes on the ``Dataset`` and ``DataArray`` classes. Indexes would need to be propagated along with coordinates in xarray operations, but will no longer would need to have a one-to-one correspondence with coordinate variables. Instead, an index should be able to refer to multiple (possibly multidimensional) coordinates that define it. See :issue:`1603` for full details. Specific tasks: - Add an ``indexes`` attribute to ``xarray.Dataset`` and ``xarray.Dataset``, as dictionaries that map from coordinate names to xarray index objects. - Use the new index interface to write wrappers for ``pandas.Index``, ``pandas.MultiIndex`` and ``scipy.spatial.KDTree``. - Expose the interface externally to allow third-party libraries to implement custom indexing routines, e.g., for geospatial look-ups on the surface of the Earth. In addition to the new features it directly enables, this clean up will allow xarray to more easily implement some long-awaited features that build upon indexing, such as groupby operations with multiple variables. Flexible arrays ~~~~~~~~~~~~~~~ .. note:: Work on flexible arrays is currently underway. See `GH Project #2 `__ for more detail. Xarray currently supports wrapping multidimensional arrays defined by NumPy, dask and to a limited-extent pandas. It would be nice to have interfaces that allow xarray to wrap alternative N-D array implementations, e.g.: - Arrays holding physical units. - Lazily computed arrays. - Other ndarray objects, e.g., sparse, xnd, xtensor. Our strategy has been to pursue upstream improvements in NumPy (see `NEP-22 `__) for supporting a complete duck-typing interface using with NumPy's higher level array API. Improvements in NumPy's support for custom data types would also be highly useful for xarray users. By pursuing these improvements in NumPy we hope to extend the benefits to the full scientific Python community, and avoid tight coupling between xarray and specific third-party libraries (e.g., for implementing units). This will allow xarray to maintain its domain agnostic strengths. We expect that we may eventually add some minimal interfaces in xarray for features that we delegate to external array libraries (e.g., for getting units and changing units). If we do add these features, we expect them to be thin wrappers, with core functionality implemented by third-party libraries. Flexible storage ~~~~~~~~~~~~~~~~ .. note:: Work on flexible storage backends is currently underway. See `GH Project #3 `__ for more detail. The xarray backends module has grown in size and complexity. Much of this growth has been "organic" and mostly to support incremental additions to the supported backends. This has left us with a fragile internal API that is difficult for even experienced xarray developers to use. Moreover, the lack of a public facing API for building xarray backends means that users can not easily build backend interface for xarray in third-party libraries. The idea of refactoring the backends API and exposing it to users was originally proposed in :issue:`1970`. The idea would be to develop a well tested and generic backend base class and associated utilities for external use. Specific tasks for this development would include: - Exposing an abstract backend for writing new storage systems. - Exposing utilities for features like automatic closing of files, LRU-caching and explicit/lazy indexing. - Possibly moving some infrequently used backends to third-party packages. Flexible data structures ~~~~~~~~~~~~~~~~~~~~~~~~ Xarray provides two primary data structures, the ``xarray.DataArray`` and the ``xarray.Dataset``. This section describes two possible data model extensions. Tree-like data structure ++++++++++++++++++++++++ .. note:: After some time, the community DataTree project has now been updated and merged into xarray exposing :py:class:`xarray.DataTree`. This is just released and a bit experimental, but please try it out and let us know what you think. Take a look at our :ref:`quick-overview-datatrees` quickstart. Xarray’s highest-level object was previously an ``xarray.Dataset``, whose data model echoes that of a single netCDF group. However real-world datasets are often better represented by a collection of related Datasets. Particular common examples include: - Multi-resolution datasets, - Collections of time series datasets with differing lengths, - Heterogeneous datasets comprising multiple different types of related observational or simulation data, - Bayesian workflows involving various statistical distributions over multiple variables, - Whole netCDF files containing multiple groups. - Comparison of output from many similar models (such as in the IPCC's Coupled Model Intercomparison Projects) A new tree-like data structure, ``xarray.DataTree``, which is essentially a structured hierarchical collection of Datasets, represents these cases and instead maps to multiple netCDF groups (see :issue:`4118`). Currently there are several libraries which have wrapped xarray in order to build domain-specific data structures (e.g. `xarray-multiscale `__.), but the general ``xarray.DataTree`` object obviates the need for these and consolidates effort in a single domain-agnostic tool, much as xarray has already achieved. Labeled array without coordinates +++++++++++++++++++++++++++++++++ There is a need for a lightweight array structure with named dimensions for convenient indexing and broadcasting. Xarray includes such a structure internally (``xarray.Variable``). We want to factor out xarray's β€œVariable” object into a standalone package with minimal dependencies for integration with libraries that don't want to inherit xarray's dependency on pandas (e.g. scikit-learn). The new β€œVariable” class will follow established array protocols and the new data-apis standard. It will be capable of wrapping multiple array-like objects (e.g. NumPy, Dask, Sparse, Pint, CuPy, Pytorch). While β€œDataArray” fits some of these requirements, it offers a more complex data model than is desired for many applications and depends on pandas. Engaging more users ------------------- .. note:: Work on improving xarray’s documentation and user engagement is currently underway. See `GH Project #4 `__ for more detail. Like many open-source projects, the documentation of xarray has grown together with the library's features. While we think that the xarray documentation is comprehensive already, we acknowledge that the adoption of xarray might be slowed down because of the substantial time investment required to learn its working principles. In particular, non-computer scientists or users less familiar with the pydata ecosystem might find it difficult to learn xarray and realize how xarray can help them in their daily work. In order to lower this adoption barrier, we propose to: - Develop entry-level tutorials for users with different backgrounds. For example, we would like to develop tutorials for users with or without previous knowledge of pandas, NumPy, netCDF, etc. These tutorials may be built as part of xarray's documentation or included in a separate repository to enable interactive use (e.g. mybinder.org). - Document typical user workflows in a dedicated website, following the example of `dask-stories `__. - Write a basic glossary that defines terms that might not be familiar to all (e.g. "lazy", "labeled", "serialization", "indexing", "backend"). Administrative -------------- NumFOCUS ~~~~~~~~ On July 16, 2018, Joe and Stephan submitted xarray's fiscal sponsorship application to NumFOCUS. xarray-2025.09.0/doc/user-guide/000077500000000000000000000000001505620616400161625ustar00rootroot00000000000000xarray-2025.09.0/doc/user-guide/combining.rst000066400000000000000000000274211505620616400206670ustar00rootroot00000000000000.. _combining data: Combining data -------------- .. jupyter-execute:: :hide-code: :hide-output: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) %xmode minimal * For combining datasets or data arrays along a single dimension, see concatenate_. * For combining datasets with different variables, see merge_. * For combining datasets or data arrays with different indexes or missing values, see combine_. * For combining datasets or data arrays along multiple dimensions see combining.multi_. .. _concatenate: Concatenate ~~~~~~~~~~~ To combine :py:class:`~xarray.Dataset` / :py:class:`~xarray.DataArray` objects along an existing or new dimension into a larger object, you can use :py:func:`~xarray.concat`. ``concat`` takes an iterable of ``DataArray`` or ``Dataset`` objects, as well as a dimension name, and concatenates along that dimension: .. jupyter-execute:: da = xr.DataArray( np.arange(6).reshape(2, 3), [("x", ["a", "b"]), ("y", [10, 20, 30])] ) da.isel(y=slice(0, 1)) # same as da[:, :1] .. jupyter-execute:: # This resembles how you would use np.concatenate: xr.concat([da[:, :1], da[:, 1:]], dim="y") .. jupyter-execute:: # For more friendly pandas-like indexing you can use: xr.concat([da.isel(y=slice(0, 1)), da.isel(y=slice(1, None))], dim="y") In addition to combining along an existing dimension, ``concat`` can create a new dimension by stacking lower dimensional arrays together: .. jupyter-execute:: da.sel(x="a") .. jupyter-execute:: xr.concat([da.isel(x=0), da.isel(x=1)], "x") If the second argument to ``concat`` is a new dimension name, the arrays will be concatenated along that new dimension, which is always inserted as the first dimension: .. jupyter-execute:: da0 = da.isel(x=0, drop=True) da1 = da.isel(x=1, drop=True) xr.concat([da0, da1], "new_dim") The second argument to ``concat`` can also be an :py:class:`~pandas.Index` or :py:class:`~xarray.DataArray` object as well as a string, in which case it is used to label the values along the new dimension: .. jupyter-execute:: xr.concat([da0, da1], pd.Index([-90, -100], name="new_dim")) Of course, ``concat`` also works on ``Dataset`` objects: .. jupyter-execute:: ds = da.to_dataset(name="foo") xr.concat([ds.sel(x="a"), ds.sel(x="b")], "x") :py:func:`~xarray.concat` has a number of options which provide deeper control over which variables are concatenated and how it handles conflicting variables between datasets. With the default parameters, xarray will load some coordinate variables into memory to compare them between datasets. This may be prohibitively expensive if you are manipulating your dataset lazily using :ref:`dask`. .. note:: In a future version of xarray the default values for many of these options will change. You can opt into the new default values early using ``xr.set_options(use_new_combine_kwarg_defaults=True)``. .. _merge: Merge ~~~~~ To combine variables and coordinates between multiple ``DataArray`` and/or ``Dataset`` objects, use :py:func:`~xarray.merge`. It can merge a list of ``Dataset``, ``DataArray`` or dictionaries of objects convertible to ``DataArray`` objects: .. jupyter-execute:: xr.merge([ds, ds.rename({"foo": "bar"})]) .. jupyter-execute:: xr.merge([xr.DataArray(n, name="var%d" % n) for n in range(5)]) If you merge another dataset (or a dictionary including data array objects), by default the resulting dataset will be aligned on the **union** of all index coordinates: .. note:: In a future version of xarray the default value for ``join`` and ``compat`` will change. This change will mean that xarray will no longer attempt to align the indices of the merged dataset. You can opt into the new default values early using ``xr.set_options(use_new_combine_kwarg_defaults=True)``. Or explicitly set ``join='outer'`` to preserve old behavior. .. jupyter-execute:: other = xr.Dataset({"bar": ("x", [1, 2, 3, 4]), "x": list("abcd")}) xr.merge([ds, other], join="outer") This ensures that ``merge`` is non-destructive. ``xarray.MergeError`` is raised if you attempt to merge two variables with the same name but different values: .. jupyter-execute:: :raises: xr.merge([ds, ds + 1]) .. note:: In a future version of xarray the default value for ``compat`` will change from ``compat='no_conflicts'`` to ``compat='override'``. In this scenario the values in the first object override all the values in other objects. .. jupyter-execute:: xr.merge([ds, ds + 1], compat="override") The same non-destructive merging between ``DataArray`` index coordinates is used in the :py:class:`~xarray.Dataset` constructor: .. jupyter-execute:: xr.Dataset({"a": da.isel(x=slice(0, 1)), "b": da.isel(x=slice(1, 2))}) .. _combine: Combine ~~~~~~~ The instance method :py:meth:`~xarray.DataArray.combine_first` combines two datasets/data arrays and defaults to non-null values in the calling object, using values from the called object to fill holes. The resulting coordinates are the union of coordinate labels. Vacant cells as a result of the outer-join are filled with ``NaN``. For example: .. jupyter-execute:: ar0 = xr.DataArray([[0, 0], [0, 0]], [("x", ["a", "b"]), ("y", [-1, 0])]) ar1 = xr.DataArray([[1, 1], [1, 1]], [("x", ["b", "c"]), ("y", [0, 1])]) ar0.combine_first(ar1) .. jupyter-execute:: ar1.combine_first(ar0) For datasets, ``ds0.combine_first(ds1)`` works similarly to ``xr.merge([ds0, ds1])``, except that ``xr.merge`` raises ``MergeError`` when there are conflicting values in variables to be merged, whereas ``.combine_first`` defaults to the calling object's values. .. note:: In a future version of xarray the default options for ``xr.merge`` will change such that the behavior matches ``combine_first``. .. _update: Update ~~~~~~ In contrast to ``merge``, :py:meth:`~xarray.Dataset.update` modifies a dataset in-place without checking for conflicts, and will overwrite any existing variables with new values: .. jupyter-execute:: ds.update({"space": ("space", [10.2, 9.4, 3.9])}) However, dimensions are still required to be consistent between different Dataset variables, so you cannot change the size of a dimension unless you replace all dataset variables that use it. ``update`` also performs automatic alignment if necessary. Unlike ``merge``, it maintains the alignment of the original array instead of merging indexes: .. jupyter-execute:: ds.update(other) The exact same alignment logic when setting a variable with ``__setitem__`` syntax: .. jupyter-execute:: ds["baz"] = xr.DataArray([9, 9, 9, 9, 9], coords=[("x", list("abcde"))]) ds.baz Equals and identical ~~~~~~~~~~~~~~~~~~~~ Xarray objects can be compared by using the :py:meth:`~xarray.Dataset.equals`, :py:meth:`~xarray.Dataset.identical` and :py:meth:`~xarray.Dataset.broadcast_equals` methods. These methods are used by the optional ``compat`` argument on ``concat`` and ``merge``. :py:attr:`~xarray.Dataset.equals` checks dimension names, indexes and array values: .. jupyter-execute:: da.equals(da.copy()) :py:attr:`~xarray.Dataset.identical` also checks attributes, and the name of each object: .. jupyter-execute:: da.identical(da.rename("bar")) :py:attr:`~xarray.Dataset.broadcast_equals` does a more relaxed form of equality check that allows variables to have different dimensions, as long as values are constant along those new dimensions: .. jupyter-execute:: left = xr.Dataset(coords={"x": 0}) right = xr.Dataset({"x": [0, 0, 0]}) left.broadcast_equals(right) Like pandas objects, two xarray objects are still equal or identical if they have missing values marked by ``NaN`` in the same locations. In contrast, the ``==`` operation performs element-wise comparison (like numpy): .. jupyter-execute:: da == da.copy() Note that ``NaN`` does not compare equal to ``NaN`` in element-wise comparison; you may need to deal with missing values explicitly. .. _combining.no_conflicts: Merging with 'no_conflicts' ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``compat`` argument ``'no_conflicts'`` is only available when combining xarray objects with ``merge``. In addition to the above comparison methods it allows the merging of xarray objects with locations where *either* have ``NaN`` values. This can be used to combine data with overlapping coordinates as long as any non-missing values agree or are disjoint: .. jupyter-execute:: ds1 = xr.Dataset({"a": ("x", [10, 20, 30, np.nan])}, {"x": [1, 2, 3, 4]}) ds2 = xr.Dataset({"a": ("x", [np.nan, 30, 40, 50])}, {"x": [2, 3, 4, 5]}) xr.merge([ds1, ds2], join="outer", compat="no_conflicts") Note that due to the underlying representation of missing values as floating point numbers (``NaN``), variable data type is not always preserved when merging in this manner. .. _combining.multi: Combining along multiple dimensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For combining many objects along multiple dimensions xarray provides :py:func:`~xarray.combine_nested` and :py:func:`~xarray.combine_by_coords`. These functions use a combination of ``concat`` and ``merge`` across different variables to combine many objects into one. :py:func:`~xarray.combine_nested` requires specifying the order in which the objects should be combined, while :py:func:`~xarray.combine_by_coords` attempts to infer this ordering automatically from the coordinates in the data. :py:func:`~xarray.combine_nested` is useful when you know the spatial relationship between each object in advance. The datasets must be provided in the form of a nested list, which specifies their relative position and ordering. A common task is collecting data from a parallelized simulation where each processor wrote out data to a separate file. A domain which was decomposed into 4 parts, 2 each along both the x and y axes, requires organising the datasets into a doubly-nested list, e.g: .. jupyter-execute:: arr = xr.DataArray( name="temperature", data=np.random.randint(5, size=(2, 2)), dims=["x", "y"] ) arr .. jupyter-execute:: ds_grid = [[arr, arr], [arr, arr]] xr.combine_nested(ds_grid, concat_dim=["x", "y"]) :py:func:`~xarray.combine_nested` can also be used to explicitly merge datasets with different variables. For example if we have 4 datasets, which are divided along two times, and contain two different variables, we can pass ``None`` to ``'concat_dim'`` to specify the dimension of the nested list over which we wish to use ``merge`` instead of ``concat``: .. jupyter-execute:: temp = xr.DataArray(name="temperature", data=np.random.randn(2), dims=["t"]) precip = xr.DataArray(name="precipitation", data=np.random.randn(2), dims=["t"]) ds_grid = [[temp, precip], [temp, precip]] xr.combine_nested(ds_grid, concat_dim=["t", None]) :py:func:`~xarray.combine_by_coords` is for combining objects which have dimension coordinates which specify their relationship to and order relative to one another, for example a linearly-increasing 'time' dimension coordinate. Here we combine two datasets using their common dimension coordinates. Notice they are concatenated in order based on the values in their dimension coordinates, not on their position in the list passed to ``combine_by_coords``. .. jupyter-execute:: x1 = xr.DataArray(name="foo", data=np.random.randn(3), coords=[("x", [0, 1, 2])]) x2 = xr.DataArray(name="foo", data=np.random.randn(3), coords=[("x", [3, 4, 5])]) xr.combine_by_coords([x2, x1]) These functions are used by :py:func:`~xarray.open_mfdataset` to open many files as one dataset. The particular function used is specified by setting the argument ``'combine'`` to ``'by_coords'`` or ``'nested'``. This is useful for situations where your data is split across many files in multiple locations, which have some known relationship between one another. xarray-2025.09.0/doc/user-guide/complex-numbers.rst000066400000000000000000000072141505620616400220400ustar00rootroot00000000000000.. currentmodule:: xarray .. _complex: Complex Numbers =============== .. jupyter-execute:: :hide-code: import numpy as np import xarray as xr Xarray leverages NumPy to seamlessly handle complex numbers in :py:class:`~xarray.DataArray` and :py:class:`~xarray.Dataset` objects. In the examples below, we are using a DataArray named ``da`` with complex elements (of :math:`\mathbb{C}`): .. jupyter-execute:: data = np.array([[1 + 2j, 3 + 4j], [5 + 6j, 7 + 8j]]) da = xr.DataArray( data, dims=["x", "y"], coords={"x": ["a", "b"], "y": [1, 2]}, name="complex_nums", ) Operations on Complex Data -------------------------- You can access real and imaginary components using the ``.real`` and ``.imag`` attributes. Most NumPy universal functions (ufuncs) like :py:doc:`numpy.abs ` or :py:doc:`numpy.angle ` work directly. .. jupyter-execute:: da.real .. jupyter-execute:: np.abs(da) .. note:: Like NumPy, ``.real`` and ``.imag`` typically return *views*, not copies, of the original data. Reading and Writing Complex Data -------------------------------- Writing complex data to NetCDF files (see :ref:`io.netcdf`) is supported via :py:meth:`~xarray.DataArray.to_netcdf` using specific backend engines that handle complex types: .. tab:: h5netcdf This requires the `h5netcdf `_ library to be installed. .. jupyter-execute:: # write the data to disk da.to_netcdf("complex_nums_h5.nc", engine="h5netcdf") # read the file back into memory ds_h5 = xr.open_dataset("complex_nums_h5.nc", engine="h5netcdf") # check the dtype ds_h5[da.name].dtype .. tab:: netcdf4 Requires the `netcdf4-python (>= 1.7.1) `_ library and you have to enable ``auto_complex=True``. .. jupyter-execute:: # write the data to disk da.to_netcdf("complex_nums_nc4.nc", engine="netcdf4", auto_complex=True) # read the file back into memory ds_nc4 = xr.open_dataset( "complex_nums_nc4.nc", engine="netcdf4", auto_complex=True ) # check the dtype ds_nc4[da.name].dtype .. warning:: The ``scipy`` engine only supports NetCDF V3 and does *not* support complex arrays; writing with ``engine="scipy"`` raises a ``TypeError``. Alternative: Manual Handling ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If direct writing is not supported (e.g., targeting NetCDF3), you can manually split the complex array into separate real and imaginary variables before saving: .. jupyter-execute:: # Write data to file ds_manual = xr.Dataset( { f"{da.name}_real": da.real, f"{da.name}_imag": da.imag, } ) ds_manual.to_netcdf("complex_manual.nc", engine="scipy") # Example # Read data from file ds = xr.open_dataset("complex_manual.nc", engine="scipy") reconstructed = ds[f"{da.name}_real"] + 1j * ds[f"{da.name}_imag"] Recommendations ^^^^^^^^^^^^^^^ - Use ``engine="netcdf4"`` with ``auto_complex=True`` for full compliance and ease. - Use ``h5netcdf`` for HDF5-based storage when interoperability with HDF5 is desired. - For maximum legacy support (NetCDF3), manually handle real/imaginary components. .. jupyter-execute:: :hide-code: # Cleanup import os for f in ["complex_nums_nc4.nc", "complex_nums_h5.nc", "complex_manual.nc"]: if os.path.exists(f): os.remove(f) See also -------- - :ref:`io.netcdf` β€” full NetCDF I/O guide - `NumPy complex numbers `__ xarray-2025.09.0/doc/user-guide/computation.rst000066400000000000000000000715231505620616400212660ustar00rootroot00000000000000.. currentmodule:: xarray .. _compute: ########### Computation ########### The labels associated with :py:class:`~xarray.DataArray` and :py:class:`~xarray.Dataset` objects enables some powerful shortcuts for computation, notably including aggregation and broadcasting by dimension names. Basic array math ================ Arithmetic operations with a single DataArray automatically vectorize (like numpy) over all array values: .. jupyter-execute:: :hide-code: :hide-output: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) %xmode minimal .. jupyter-execute:: arr = xr.DataArray( np.random.default_rng(0).random((2, 3)), [("x", ["a", "b"]), ("y", [10, 20, 30])], ) arr - 3 .. jupyter-execute:: abs(arr) You can also use any of numpy's or scipy's many `ufunc`__ functions directly on a DataArray: __ https://numpy.org/doc/stable/reference/ufuncs.html .. jupyter-execute:: np.sin(arr) Use :py:func:`~xarray.where` to conditionally switch between values: .. jupyter-execute:: xr.where(arr > 0, "positive", "negative") Use ``@`` to compute the :py:func:`~xarray.dot` product: .. jupyter-execute:: arr @ arr Data arrays also implement many :py:class:`numpy.ndarray` methods: .. jupyter-execute:: arr.round(2) .. jupyter-execute:: arr.T .. jupyter-execute:: intarr = xr.DataArray([0, 1, 2, 3, 4, 5]) intarr << 2 # only supported for int types .. jupyter-execute:: intarr >> 1 .. _missing_values: Missing values ============== Xarray represents missing values using the "NaN" (Not a Number) value from NumPy, which is a special floating-point value that indicates a value that is undefined or unrepresentable. There are several methods for handling missing values in xarray: Xarray objects borrow the :py:meth:`~xarray.DataArray.isnull`, :py:meth:`~xarray.DataArray.notnull`, :py:meth:`~xarray.DataArray.count`, :py:meth:`~xarray.DataArray.dropna`, :py:meth:`~xarray.DataArray.fillna`, :py:meth:`~xarray.DataArray.ffill`, and :py:meth:`~xarray.DataArray.bfill` methods for working with missing data from pandas: :py:meth:`~xarray.DataArray.isnull` is a method in xarray that can be used to check for missing or null values in an xarray object. It returns a new xarray object with the same dimensions as the original object, but with boolean values indicating where **missing values** are present. .. jupyter-execute:: x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"]) x.isnull() In this example, the third and fourth elements of 'x' are NaN, so the resulting :py:class:`~xarray.DataArray` object has 'True' values in the third and fourth positions and 'False' values in the other positions. :py:meth:`~xarray.DataArray.notnull` is a method in xarray that can be used to check for non-missing or non-null values in an xarray object. It returns a new xarray object with the same dimensions as the original object, but with boolean values indicating where **non-missing values** are present. .. jupyter-execute:: x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"]) x.notnull() In this example, the first two and the last elements of x are not NaN, so the resulting :py:class:`~xarray.DataArray` object has 'True' values in these positions, and 'False' values in the third and fourth positions where NaN is located. :py:meth:`~xarray.DataArray.count` is a method in xarray that can be used to count the number of non-missing values along one or more dimensions of an xarray object. It returns a new xarray object with the same dimensions as the original object, but with each element replaced by the count of non-missing values along the specified dimensions. .. jupyter-execute:: x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"]) x.count() In this example, 'x' has five elements, but two of them are NaN, so the resulting :py:class:`~xarray.DataArray` object having a single element containing the value '3', which represents the number of non-null elements in x. :py:meth:`~xarray.DataArray.dropna` is a method in xarray that can be used to remove missing or null values from an xarray object. It returns a new xarray object with the same dimensions as the original object, but with missing values removed. .. jupyter-execute:: x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"]) x.dropna(dim="x") In this example, on calling x.dropna(dim="x") removes any missing values and returns a new :py:class:`~xarray.DataArray` object with only the non-null elements [0, 1, 2] of 'x', in the original order. :py:meth:`~xarray.DataArray.fillna` is a method in xarray that can be used to fill missing or null values in an xarray object with a specified value or method. It returns a new xarray object with the same dimensions as the original object, but with missing values filled. .. jupyter-execute:: x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"]) x.fillna(-1) In this example, there are two NaN values in 'x', so calling x.fillna(-1) replaces these values with -1 and returns a new :py:class:`~xarray.DataArray` object with five elements, containing the values [0, 1, -1, -1, 2] in the original order. :py:meth:`~xarray.DataArray.ffill` is a method in xarray that can be used to forward fill (or fill forward) missing values in an xarray object along one or more dimensions. It returns a new xarray object with the same dimensions as the original object, but with missing values replaced by the last non-missing value along the specified dimensions. .. jupyter-execute:: x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"]) x.ffill("x") In this example, there are two NaN values in 'x', so calling x.ffill("x") fills these values with the last non-null value in the same dimension, which are 0 and 1, respectively. The resulting :py:class:`~xarray.DataArray` object has five elements, containing the values [0, 1, 1, 1, 2] in the original order. :py:meth:`~xarray.DataArray.bfill` is a method in xarray that can be used to backward fill (or fill backward) missing values in an xarray object along one or more dimensions. It returns a new xarray object with the same dimensions as the original object, but with missing values replaced by the next non-missing value along the specified dimensions. .. jupyter-execute:: x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"]) x.bfill("x") In this example, there are two NaN values in 'x', so calling x.bfill("x") fills these values with the next non-null value in the same dimension, which are 2 and 2, respectively. The resulting :py:class:`~xarray.DataArray` object has five elements, containing the values [0, 1, 2, 2, 2] in the original order. Like pandas, xarray uses the float value ``np.nan`` (not-a-number) to represent missing values. Xarray objects also have an :py:meth:`~xarray.DataArray.interpolate_na` method for filling missing values via 1D interpolation. It returns a new xarray object with the same dimensions as the original object, but with missing values interpolated. .. jupyter-execute:: x = xr.DataArray( [0, 1, np.nan, np.nan, 2], dims=["x"], coords={"xx": xr.Variable("x", [0, 1, 1.1, 1.9, 3])}, ) x.interpolate_na(dim="x", method="linear", use_coordinate="xx") In this example, there are two NaN values in 'x', so calling x.interpolate_na(dim="x", method="linear", use_coordinate="xx") fills these values with interpolated values along the "x" dimension using linear interpolation based on the values of the xx coordinate. The resulting :py:class:`~xarray.DataArray` object has five elements, containing the values [0., 1., 1.05, 1.45, 2.] in the original order. Note that the interpolated values are calculated based on the values of the 'xx' coordinate, which has non-integer values, resulting in non-integer interpolated values. Note that xarray slightly diverges from the pandas ``interpolate`` syntax by providing the ``use_coordinate`` keyword which facilitates a clear specification of which values to use as the index in the interpolation. Xarray also provides the ``max_gap`` keyword argument to limit the interpolation to data gaps of length ``max_gap`` or smaller. See :py:meth:`~xarray.DataArray.interpolate_na` for more. .. _agg: Aggregation =========== Aggregation methods have been updated to take a ``dim`` argument instead of ``axis``. This allows for very intuitive syntax for aggregation methods that are applied along particular dimension(s): .. jupyter-execute:: arr.sum(dim="x") .. jupyter-execute:: arr.std(["x", "y"]) .. jupyter-execute:: arr.min() If you need to figure out the axis number for a dimension yourself (say, for wrapping code designed to work with numpy arrays), you can use the :py:meth:`~xarray.DataArray.get_axis_num` method: .. jupyter-execute:: arr.get_axis_num("y") These operations automatically skip missing values, like in pandas: .. jupyter-execute:: xr.DataArray([1, 2, np.nan, 3]).mean() If desired, you can disable this behavior by invoking the aggregation method with ``skipna=False``. .. _compute.rolling: Rolling window operations ========================= ``DataArray`` objects include a :py:meth:`~xarray.DataArray.rolling` method. This method supports rolling window aggregation: .. jupyter-execute:: arr = xr.DataArray(np.arange(0, 7.5, 0.5).reshape(3, 5), dims=("x", "y")) arr :py:meth:`~xarray.DataArray.rolling` is applied along one dimension using the name of the dimension as a key (e.g. ``y``) and the window size as the value (e.g. ``3``). We get back a ``Rolling`` object: .. jupyter-execute:: arr.rolling(y=3) Aggregation and summary methods can be applied directly to the ``Rolling`` object: .. jupyter-execute:: r = arr.rolling(y=3) r.reduce(np.std) .. jupyter-execute:: r.mean() Aggregation results are assigned the coordinate at the end of each window by default, but can be centered by passing ``center=True`` when constructing the ``Rolling`` object: .. jupyter-execute:: r = arr.rolling(y=3, center=True) r.mean() As can be seen above, aggregations of windows which overlap the border of the array produce ``nan``\s. Setting ``min_periods`` in the call to ``rolling`` changes the minimum number of observations within the window required to have a value when aggregating: .. jupyter-execute:: r = arr.rolling(y=3, min_periods=2) r.mean() .. jupyter-execute:: r = arr.rolling(y=3, center=True, min_periods=2) r.mean() From version 0.17, xarray supports multidimensional rolling, .. jupyter-execute:: r = arr.rolling(x=2, y=3, min_periods=2) r.mean() .. tip:: Note that rolling window aggregations are faster and use less memory when bottleneck_ is installed. This only applies to numpy-backed xarray objects with 1d-rolling. .. _bottleneck: https://github.com/pydata/bottleneck We can also manually iterate through ``Rolling`` objects: .. code:: python for label, arr_window in r: # arr_window is a view of x ... .. _compute.rolling_exp: While ``rolling`` provides a simple moving average, ``DataArray`` also supports an exponential moving average with :py:meth:`~xarray.DataArray.rolling_exp`. This is similar to pandas' ``ewm`` method. numbagg_ is required. .. _numbagg: https://github.com/numbagg/numbagg .. code:: python arr.rolling_exp(y=3).mean() The ``rolling_exp`` method takes a ``window_type`` kwarg, which can be ``'alpha'``, ``'com'`` (for ``center-of-mass``), ``'span'``, and ``'halflife'``. The default is ``span``. Finally, the rolling object has a ``construct`` method which returns a view of the original ``DataArray`` with the windowed dimension in the last position. You can use this for more advanced rolling operations such as strided rolling, windowed rolling, convolution, short-time FFT etc. .. jupyter-execute:: # rolling with 2-point stride rolling_da = r.construct(x="x_win", y="y_win", stride=2) rolling_da .. jupyter-execute:: rolling_da.mean(["x_win", "y_win"], skipna=False) Because the ``DataArray`` given by ``r.construct('window_dim')`` is a view of the original array, it is memory efficient. You can also use ``construct`` to compute a weighted rolling sum: .. jupyter-execute:: weight = xr.DataArray([0.25, 0.5, 0.25], dims=["window"]) arr.rolling(y=3).construct(y="window").dot(weight) .. note:: numpy's Nan-aggregation functions such as ``nansum`` copy the original array. In xarray, we internally use these functions in our aggregation methods (such as ``.sum()``) if ``skipna`` argument is not specified or set to True. This means ``rolling_da.mean('window_dim')`` is memory inefficient. To avoid this, use ``skipna=False`` as the above example. .. _compute.weighted: Weighted array reductions ========================= :py:class:`DataArray` and :py:class:`Dataset` objects include :py:meth:`DataArray.weighted` and :py:meth:`Dataset.weighted` array reduction methods. They currently support weighted ``sum``, ``mean``, ``std``, ``var`` and ``quantile``. .. jupyter-execute:: coords = dict(month=("month", [1, 2, 3])) prec = xr.DataArray([1.1, 1.0, 0.9], dims=("month",), coords=coords) weights = xr.DataArray([31, 28, 31], dims=("month",), coords=coords) Create a weighted object: .. jupyter-execute:: weighted_prec = prec.weighted(weights) weighted_prec Calculate the weighted sum: .. jupyter-execute:: weighted_prec.sum() Calculate the weighted mean: .. jupyter-execute:: weighted_prec.mean(dim="month") Calculate the weighted quantile: .. jupyter-execute:: weighted_prec.quantile(q=0.5, dim="month") The weighted sum corresponds to: .. jupyter-execute:: weighted_sum = (prec * weights).sum() weighted_sum the weighted mean to: .. jupyter-execute:: weighted_mean = weighted_sum / weights.sum() weighted_mean the weighted variance to: .. jupyter-execute:: weighted_var = weighted_prec.sum_of_squares() / weights.sum() weighted_var and the weighted standard deviation to: .. jupyter-execute:: weighted_std = np.sqrt(weighted_var) weighted_std However, the functions also take missing values in the data into account: .. jupyter-execute:: data = xr.DataArray([np.nan, 2, 4]) weights = xr.DataArray([8, 1, 1]) data.weighted(weights).mean() Using ``(data * weights).sum() / weights.sum()`` would (incorrectly) result in 0.6. If the weights add up to to 0, ``sum`` returns 0: .. jupyter-execute:: data = xr.DataArray([1.0, 1.0]) weights = xr.DataArray([-1.0, 1.0]) data.weighted(weights).sum() and ``mean``, ``std`` and ``var`` return ``nan``: .. jupyter-execute:: data.weighted(weights).mean() .. note:: ``weights`` must be a :py:class:`DataArray` and cannot contain missing values. Missing values can be replaced manually by ``weights.fillna(0)``. .. _compute.coarsen: Coarsen large arrays ==================== :py:class:`DataArray` and :py:class:`Dataset` objects include a :py:meth:`~xarray.DataArray.coarsen` and :py:meth:`~xarray.Dataset.coarsen` methods. This supports block aggregation along multiple dimensions, .. jupyter-execute:: x = np.linspace(0, 10, 300) t = pd.date_range("1999-12-15", periods=364) da = xr.DataArray( np.sin(x) * np.cos(np.linspace(0, 1, 364)[:, np.newaxis]), dims=["time", "x"], coords={"time": t, "x": x}, ) da In order to take a block mean for every 7 days along ``time`` dimension and every 2 points along ``x`` dimension, .. jupyter-execute:: da.coarsen(time=7, x=2).mean() :py:meth:`~xarray.DataArray.coarsen` raises a ``ValueError`` if the data length is not a multiple of the corresponding window size. You can choose ``boundary='trim'`` or ``boundary='pad'`` options for trimming the excess entries or padding ``nan`` to insufficient entries, .. jupyter-execute:: da.coarsen(time=30, x=2, boundary="trim").mean() If you want to apply a specific function to coordinate, you can pass the function or method name to ``coord_func`` option, .. jupyter-execute:: da.coarsen(time=7, x=2, coord_func={"time": "min"}).mean() You can also :ref:`use coarsen to reshape` without applying a computation. .. _compute.using_coordinates: Computation using Coordinates ============================= Xarray objects have some handy methods for the computation with their coordinates. :py:meth:`~xarray.DataArray.differentiate` computes derivatives by central finite differences using their coordinates, .. jupyter-execute:: a = xr.DataArray([0, 1, 2, 3], dims=["x"], coords=[[0.1, 0.11, 0.2, 0.3]]) a.differentiate("x") This method can be used also for multidimensional arrays, .. jupyter-execute:: a = xr.DataArray( np.arange(8).reshape(4, 2), dims=["x", "y"], coords={"x": [0.1, 0.11, 0.2, 0.3]} ) a.differentiate("x") :py:meth:`~xarray.DataArray.integrate` computes integration based on trapezoidal rule using their coordinates, .. jupyter-execute:: a.integrate("x") .. note:: These methods are limited to simple cartesian geometry. Differentiation and integration along multidimensional coordinate are not supported. .. _compute.polyfit: Fitting polynomials =================== Xarray objects provide an interface for performing linear or polynomial regressions using the least-squares method. :py:meth:`~xarray.DataArray.polyfit` computes the best fitting coefficients along a given dimension and for a given order, .. jupyter-execute:: x = xr.DataArray(np.arange(10), dims=["x"], name="x") a = xr.DataArray(3 + 4 * x, dims=["x"], coords={"x": x}) out = a.polyfit(dim="x", deg=1, full=True) out The method outputs a dataset containing the coefficients (and more if ``full=True``). The inverse operation is done with :py:meth:`~xarray.polyval`, .. jupyter-execute:: xr.polyval(coord=x, coeffs=out.polyfit_coefficients) .. note:: These methods replicate the behaviour of :py:func:`numpy.polyfit` and :py:func:`numpy.polyval`. .. _compute.curvefit: Fitting arbitrary functions =========================== Xarray objects also provide an interface for fitting more complex functions using :py:func:`scipy.optimize.curve_fit`. :py:meth:`~xarray.DataArray.curvefit` accepts user-defined functions and can fit along multiple coordinates. For example, we can fit a relationship between two ``DataArray`` objects, maintaining a unique fit at each spatial coordinate but aggregating over the time dimension: .. jupyter-execute:: def exponential(x, a, xc): return np.exp((x - xc) / a) x = np.arange(-5, 5, 0.1) t = np.arange(-5, 5, 0.1) X, T = np.meshgrid(x, t) Z1 = np.random.uniform(low=-5, high=5, size=X.shape) Z2 = exponential(Z1, 3, X) Z3 = exponential(Z1, 1, -X) ds = xr.Dataset( data_vars=dict( var1=(["t", "x"], Z1), var2=(["t", "x"], Z2), var3=(["t", "x"], Z3) ), coords={"t": t, "x": x}, ) ds[["var2", "var3"]].curvefit( coords=ds.var1, func=exponential, reduce_dims="t", bounds={"a": (0.5, 5), "xc": (-5, 5)}, ) We can also fit multi-dimensional functions, and even use a wrapper function to simultaneously fit a summation of several functions, such as this field containing two gaussian peaks: .. jupyter-execute:: def gaussian_2d(coords, a, xc, yc, xalpha, yalpha): x, y = coords z = a * np.exp( -np.square(x - xc) / 2 / np.square(xalpha) - np.square(y - yc) / 2 / np.square(yalpha) ) return z def multi_peak(coords, *args): z = np.zeros(coords[0].shape) for i in range(len(args) // 5): z += gaussian_2d(coords, *args[i * 5 : i * 5 + 5]) return z x = np.arange(-5, 5, 0.1) y = np.arange(-5, 5, 0.1) X, Y = np.meshgrid(x, y) n_peaks = 2 names = ["a", "xc", "yc", "xalpha", "yalpha"] names = [f"{name}{i}" for i in range(n_peaks) for name in names] Z = gaussian_2d((X, Y), 3, 1, 1, 2, 1) + gaussian_2d((X, Y), 2, -1, -2, 1, 1) Z += np.random.normal(scale=0.1, size=Z.shape) da = xr.DataArray(Z, dims=["y", "x"], coords={"y": y, "x": x}) da.curvefit( coords=["x", "y"], func=multi_peak, param_names=names, kwargs={"maxfev": 10000}, ) .. note:: This method replicates the behavior of :py:func:`scipy.optimize.curve_fit`. .. _compute.broadcasting: Broadcasting by dimension name ============================== ``DataArray`` objects automatically align themselves ("broadcasting" in the numpy parlance) by dimension name instead of axis order. With xarray, you do not need to transpose arrays or insert dimensions of length 1 to get array operations to work, as commonly done in numpy with :py:func:`numpy.reshape` or :py:data:`numpy.newaxis`. This is best illustrated by a few examples. Consider two one-dimensional arrays with different sizes aligned along different dimensions: .. jupyter-execute:: a = xr.DataArray([1, 2], [("x", ["a", "b"])]) a .. jupyter-execute:: b = xr.DataArray([-1, -2, -3], [("y", [10, 20, 30])]) b With xarray, we can apply binary mathematical operations to these arrays, and their dimensions are expanded automatically: .. jupyter-execute:: a * b Moreover, dimensions are always reordered to the order in which they first appeared: .. jupyter-execute:: c = xr.DataArray(np.arange(6).reshape(3, 2), [b["y"], a["x"]]) c .. jupyter-execute:: a + c This means, for example, that you always subtract an array from its transpose: .. jupyter-execute:: c - c.T You can explicitly broadcast xarray data structures by using the :py:func:`~xarray.broadcast` function: .. jupyter-execute:: a2, b2 = xr.broadcast(a, b) a2 .. jupyter-execute:: b2 .. _math automatic alignment: Automatic alignment =================== Xarray enforces alignment between *index* :ref:`coordinates` (that is, coordinates with the same name as a dimension, marked by ``*``) on objects used in binary operations. Similarly to pandas, this alignment is automatic for arithmetic on binary operations. The default result of a binary operation is by the *intersection* (not the union) of coordinate labels: .. jupyter-execute:: arr = xr.DataArray(np.arange(3), [("x", range(3))]) arr + arr[:-1] If coordinate values for a dimension are missing on either argument, all matching dimensions must have the same size: .. jupyter-execute:: :raises: arr + xr.DataArray([1, 2], dims="x") However, one can explicitly change this default automatic alignment type ("inner") via :py:func:`~xarray.set_options()` in context manager: .. jupyter-execute:: with xr.set_options(arithmetic_join="outer"): arr + arr[:1] arr + arr[:1] Before loops or performance critical code, it's a good idea to align arrays explicitly (e.g., by putting them in the same Dataset or using :py:func:`~xarray.align`) to avoid the overhead of repeated alignment with each operation. See :ref:`align and reindex` for more details. .. note:: There is no automatic alignment between arguments when performing in-place arithmetic operations such as ``+=``. You will need to use :ref:`manual alignment`. This ensures in-place arithmetic never needs to modify data types. .. _coordinates math: Coordinates =========== Although index coordinates are aligned, other coordinates are not, and if their values conflict, they will be dropped. This is necessary, for example, because indexing turns 1D coordinates into scalar coordinates: .. jupyter-execute:: arr[0] .. jupyter-execute:: arr[1] .. jupyter-execute:: # notice that the scalar coordinate 'x' is silently dropped arr[1] - arr[0] Still, xarray will persist other coordinates in arithmetic, as long as there are no conflicting values: .. jupyter-execute:: # only one argument has the 'x' coordinate arr[0] + 1 .. jupyter-execute:: # both arguments have the same 'x' coordinate arr[0] - arr[0] Math with datasets ================== Datasets support arithmetic operations by automatically looping over all data variables: .. jupyter-execute:: ds = xr.Dataset( { "x_and_y": (("x", "y"), np.random.randn(3, 5)), "x_only": ("x", np.random.randn(3)), }, coords=arr.coords, ) ds > 0 Datasets support most of the same methods found on data arrays: .. jupyter-execute:: ds.mean(dim="x") .. jupyter-execute:: abs(ds) Datasets also support NumPy ufuncs (requires NumPy v1.13 or newer), or alternatively you can use :py:meth:`~xarray.Dataset.map` to map a function to each variable in a dataset: .. jupyter-execute:: np.sin(ds) # equivalent to ds.map(np.sin) Datasets also use looping over variables for *broadcasting* in binary arithmetic. You can do arithmetic between any ``DataArray`` and a dataset: .. jupyter-execute:: ds + arr Arithmetic between two datasets matches data variables of the same name: .. jupyter-execute:: ds2 = xr.Dataset({"x_and_y": 0, "x_only": 100}) ds - ds2 Similarly to index based alignment, the result has the intersection of all matching data variables. .. _compute.wrapping-custom: Wrapping custom computation =========================== It doesn't always make sense to do computation directly with xarray objects: - In the inner loop of performance limited code, using xarray can add considerable overhead compared to using NumPy or native Python types. This is particularly true when working with scalars or small arrays (less than ~1e6 elements). Keeping track of labels and ensuring their consistency adds overhead, and xarray's core itself is not especially fast, because it's written in Python rather than a compiled language like C. Also, xarray's high level label-based APIs removes low-level control over how operations are implemented. - Even if speed doesn't matter, it can be important to wrap existing code, or to support alternative interfaces that don't use xarray objects. For these reasons, it is often well-advised to write low-level routines that work with NumPy arrays, and to wrap these routines to work with xarray objects. However, adding support for labels on both :py:class:`~xarray.Dataset` and :py:class:`~xarray.DataArray` can be a bit of a chore. To make this easier, xarray supplies the :py:func:`~xarray.apply_ufunc` helper function, designed for wrapping functions that support broadcasting and vectorization on unlabeled arrays in the style of a NumPy `universal function `_ ("ufunc" for short). ``apply_ufunc`` takes care of everything needed for an idiomatic xarray wrapper, including alignment, broadcasting, looping over ``Dataset`` variables (if needed), and merging of coordinates. In fact, many internal xarray functions/methods are written using ``apply_ufunc``. Simple functions that act independently on each value should work without any additional arguments: .. jupyter-execute:: squared_error = lambda x, y: (x - y) ** 2 arr1 = xr.DataArray([0, 1, 2, 3], dims="x") xr.apply_ufunc(squared_error, arr1, 1) For using more complex operations that consider some array values collectively, it's important to understand the idea of "core dimensions" from NumPy's `generalized ufuncs `_. Core dimensions are defined as dimensions that should *not* be broadcast over. Usually, they correspond to the fundamental dimensions over which an operation is defined, e.g., the summed axis in ``np.sum``. A good clue that core dimensions are needed is the presence of an ``axis`` argument on the corresponding NumPy function. With ``apply_ufunc``, core dimensions are recognized by name, and then moved to the last dimension of any input arguments before applying the given function. This means that for functions that accept an ``axis`` argument, you usually need to set ``axis=-1``. As an example, here is how we would wrap :py:func:`numpy.linalg.norm` to calculate the vector norm: .. code-block:: python def vector_norm(x, dim, ord=None): return xr.apply_ufunc( np.linalg.norm, x, input_core_dims=[[dim]], kwargs={"ord": ord, "axis": -1} ) .. jupyter-execute:: :hide-code: def vector_norm(x, dim, ord=None): return xr.apply_ufunc( np.linalg.norm, x, input_core_dims=[[dim]], kwargs={"ord": ord, "axis": -1} ) .. jupyter-execute:: vector_norm(arr1, dim="x") Because ``apply_ufunc`` follows a standard convention for ufuncs, it plays nicely with tools for building vectorized functions, like :py:func:`numpy.broadcast_arrays` and :py:class:`numpy.vectorize`. For high performance needs, consider using :doc:`Numba's vectorize and guvectorize `. In addition to wrapping functions, ``apply_ufunc`` can automatically parallelize many functions when using dask by setting ``dask='parallelized'``. See :ref:`dask.automatic-parallelization` for details. :py:func:`~xarray.apply_ufunc` also supports some advanced options for controlling alignment of variables and the form of the result. See the docstring for full details and more examples. xarray-2025.09.0/doc/user-guide/dask.rst000066400000000000000000000553011505620616400176420ustar00rootroot00000000000000.. currentmodule:: xarray .. _dask: Parallel Computing with Dask ============================ .. jupyter-execute:: # Note that it's not necessary to import dask to use xarray with dask. import numpy as np import pandas as pd import xarray as xr import bottleneck .. jupyter-execute:: :hide-code: import os np.random.seed(123456) # limit the amount of information printed to screen xr.set_options(display_expand_data=False) np.set_printoptions(precision=3, linewidth=100, threshold=10, edgeitems=2) ds = xr.Dataset( { "temperature": ( ("time", "latitude", "longitude"), np.random.randn(30, 180, 180), ), "time": pd.date_range("2015-01-01", periods=30), "longitude": np.arange(180), "latitude": np.arange(89.5, -90.5, -1), } ) ds.to_netcdf("example-data.nc") Xarray integrates with `Dask `__, a general purpose library for parallel computing, to handle larger-than-memory computations. If you’ve been using Xarray to read in large datasets or split up data across a number of files, you may already be using Dask: .. code-block:: python ds = xr.open_zarr("/path/to/data.zarr") timeseries = ds["temp"].mean(dim=["x", "y"]).compute() # Compute result Using Dask with Xarray feels similar to working with NumPy arrays, but on much larger datasets. The Dask integration is transparent, so you usually don’t need to manage the parallelism directly; Xarray and Dask handle these aspects behind the scenes. This makes it easy to write code that scales from small, in-memory datasets on a single machine to large datasets that are distributed across a cluster, with minimal code changes. Examples -------- If you're new to using Xarray with Dask, we recommend the `Xarray + Dask Tutorial `_. Here are some examples for using Xarray with Dask at scale: - `Zonal averaging with the NOAA National Water Model `_ - `CMIP6 Precipitation Frequency Analysis `_ - `Using Dask + Cloud Optimized GeoTIFFs `_ Find more examples at the `Project Pythia cookbook gallery `_. Using Dask with Xarray ---------------------- .. image:: ../_static/dask-array.svg :width: 50 % :align: right :alt: A Dask array Dask divides arrays into smaller parts called chunks. These chunks are small, manageable pieces of the larger dataset, that Dask is able to process in parallel (see the `Dask Array docs on chunks `_). Commonly chunks are set when reading data, but you can also set the chunksize manually at any point in your workflow using :py:meth:`Dataset.chunk` and :py:meth:`DataArray.chunk`. See :ref:`dask.chunks` for more. Xarray operations on Dask-backed arrays are lazy. This means computations are not executed immediately, but are instead queued up as tasks in a Dask graph. When a result is requested (e.g., for plotting, writing to disk, or explicitly computing), Dask executes the task graph. The computations are carried out in parallel, with each chunk being processed independently. This parallel execution is key to handling large datasets efficiently. Nearly all Xarray methods have been extended to work automatically with Dask Arrays. This includes things like indexing, concatenating, rechunking, grouped operations, etc. Common operations are covered in more detail in each of the sections below. .. _dask.io: Reading and writing data ~~~~~~~~~~~~~~~~~~~~~~~~ When reading data, Dask divides your dataset into smaller chunks. You can specify the size of chunks with the ``chunks`` argument. Specifying ``chunks="auto"`` will set the dask chunk sizes to be a multiple of the on-disk chunk sizes. This can be a good idea, but usually the appropriate dask chunk size will depend on your workflow. .. tab:: Zarr The `Zarr `_ format is ideal for working with large datasets. Each chunk is stored in a separate file, allowing parallel reading and writing with Dask. You can also use Zarr to read/write directly from cloud storage buckets (see the `Dask documentation on connecting to remote data `__) When you open a Zarr dataset with :py:func:`~xarray.open_zarr`, it is loaded as a Dask array by default (if Dask is installed):: ds = xr.open_zarr("path/to/directory.zarr") See :ref:`io.zarr` for more details. .. tab:: NetCDF Open a single netCDF file with :py:func:`~xarray.open_dataset` and supplying a ``chunks`` argument:: ds = xr.open_dataset("example-data.nc", chunks={"time": 10}) Or open multiple files in parallel with py:func:`~xarray.open_mfdataset`:: xr.open_mfdataset('my/files/*.nc', parallel=True) .. tip:: When reading in many netCDF files with py:func:`~xarray.open_mfdataset`, using ``engine="h5netcdf"`` can be faster than the default which uses the netCDF4 package. Save larger-than-memory netCDF files:: ds.to_netcdf("my-big-file.nc") Or set ``compute=False`` to return a dask.delayed object that can be computed later:: delayed_write = ds.to_netcdf("my-big-file.nc", compute=False) delayed_write.compute() .. note:: When using Dask’s distributed scheduler to write NETCDF4 files, it may be necessary to set the environment variable ``HDF5_USE_FILE_LOCKING=FALSE`` to avoid competing locks within the HDF5 SWMR file locking scheme. Note that writing netCDF files with Dask’s distributed scheduler is only supported for the netcdf4 backend. See :ref:`io.netcdf` for more details. .. tab:: HDF5 Open HDF5 files with :py:func:`~xarray.open_dataset`:: xr.open_dataset("/path/to/my/file.h5", chunks='auto') See :ref:`io.hdf5` for more details. .. tab:: GeoTIFF Open large geoTIFF files with rioxarray:: xds = rioxarray.open_rasterio("my-satellite-image.tif", chunks='auto') See :ref:`io.rasterio` for more details. Loading Dask Arrays ~~~~~~~~~~~~~~~~~~~ There are a few common cases where you may want to convert lazy Dask arrays into eager, in-memory Xarray data structures: - You want to inspect smaller intermediate results when working interactively or debugging - You've reduced the dataset (by filtering or with a groupby, for example) and now have something much smaller that fits in memory - You need to compute intermediate results since Dask is unable (or struggles) to perform a certain computation. The canonical example of this is normalizing a dataset, e.g., ``ds - ds.mean()``, when ``ds`` is larger than memory. Typically, you should either save ``ds`` to disk or compute ``ds.mean()`` eagerly. To do this, you can use :py:meth:`Dataset.compute` or :py:meth:`DataArray.compute`: .. jupyter-execute:: ds.compute() .. note:: Using :py:meth:`Dataset.compute` is preferred to :py:meth:`Dataset.load`, which changes the results in-place. You can also access :py:attr:`DataArray.values`, which will always be a NumPy array: .. jupyter-input:: ds.temperature.values .. jupyter-output:: array([[[ 4.691e-01, -2.829e-01, ..., -5.577e-01, 3.814e-01], [ 1.337e+00, -1.531e+00, ..., 8.726e-01, -1.538e+00], ... # truncated for brevity NumPy ufuncs like :py:func:`numpy.sin` transparently work on all xarray objects, including those that store lazy Dask arrays: .. jupyter-execute:: np.sin(ds) To access Dask arrays directly, use the :py:attr:`DataArray.data` attribute which exposes the DataArray's underlying array type. If you're using a Dask cluster, you can also use :py:meth:`Dataset.persist` for quickly accessing intermediate outputs. This is most helpful after expensive operations like rechunking or setting an index. It's a way of telling the cluster that it should start executing the computations that you have defined so far, and that it should try to keep those results in memory. You will get back a new Dask array that is semantically equivalent to your old array, but now points to running data. .. code-block:: python ds = ds.persist() .. tip:: Remember to save the dataset returned by persist! This is a common mistake. .. _dask.chunks: Chunking and performance ~~~~~~~~~~~~~~~~~~~~~~~~ The way a dataset is chunked can be critical to performance when working with large datasets. You'll want chunk sizes large enough to reduce the number of chunks that Dask has to think about (to reduce overhead from the task graph) but also small enough so that many of them can fit in memory at once. .. tip:: A good rule of thumb is to create arrays with a minimum chunk size of at least one million elements (e.g., a 1000x1000 matrix). With large arrays (10+ GB), you may need larger chunks. See `Choosing good chunk sizes in Dask `_. It can be helpful to choose chunk sizes based on your downstream analyses and to chunk as early as possible. Datasets with smaller chunks along the time axis, for example, can make time domain problems easier to parallelize since Dask can perform the same operation on each time chunk. If you're working with a large dataset with chunks that make downstream analyses challenging, you may need to rechunk your data. This is an expensive operation though, so is only recommended when needed. You can chunk or rechunk a dataset by: - Specifying the ``chunks`` kwarg when reading in your dataset. If you know you'll want to do some spatial subsetting, for example, you could use ``chunks={'latitude': 10, 'longitude': 10}`` to specify small chunks across space. This can avoid loading subsets of data that span multiple chunks, thus reducing the number of file reads. Note that this will only work, though, for chunks that are similar to how the data is chunked on disk. Otherwise, it will be very slow and require a lot of network bandwidth. - Many array file formats are chunked on disk. You can specify ``chunks={}`` to have a single dask chunk map to a single on-disk chunk, and ``chunks="auto"`` to have a single dask chunk be a automatically chosen multiple of the on-disk chunks. - Using :py:meth:`Dataset.chunk` after you've already read in your dataset. For time domain problems, for example, you can use ``ds.chunk(time=TimeResampler())`` to rechunk according to a specified unit of time. ``ds.chunk(time=TimeResampler("MS"))``, for example, will set the chunks so that a month of data is contained in one chunk. For large-scale rechunking tasks (e.g., converting a simulation dataset stored with chunking only along time to a dataset with chunking only across space), consider writing another copy of your data on disk and/or using dedicated tools such as `Rechunker `_. .. _dask.automatic-parallelization: Parallelize custom functions with ``apply_ufunc`` and ``map_blocks`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Almost all of Xarray's built-in operations work on Dask arrays. If you want to use a function that isn't wrapped by Xarray, and have it applied in parallel on each block of your xarray object, you have three options: 1. Use :py:func:`~xarray.apply_ufunc` to apply functions that consume and return NumPy arrays. 2. Use :py:func:`~xarray.map_blocks`, :py:meth:`Dataset.map_blocks` or :py:meth:`DataArray.map_blocks` to apply functions that consume and return xarray objects. 3. Extract Dask Arrays from xarray objects with :py:attr:`DataArray.data` and use Dask directly. .. tip:: See the extensive Xarray tutorial on `apply_ufunc `_. ``apply_ufunc`` ############### :py:func:`~xarray.apply_ufunc` automates `embarrassingly parallel `__ "map" type operations where a function written for processing NumPy arrays should be repeatedly applied to Xarray objects containing Dask Arrays. It works similarly to :py:func:`dask.array.map_blocks` and :py:func:`dask.array.blockwise`, but without requiring an intermediate layer of abstraction. See the `Dask documentation `__ for more details. For the best performance when using Dask's multi-threaded scheduler, wrap a function that already releases the global interpreter lock, which fortunately already includes most NumPy and Scipy functions. Here we show an example using NumPy operations and a fast function from `bottleneck `__, which we use to calculate `Spearman's rank-correlation coefficient `__: .. code-block:: python def covariance_gufunc(x, y): return ( (x - x.mean(axis=-1, keepdims=True)) * (y - y.mean(axis=-1, keepdims=True)) ).mean(axis=-1) def pearson_correlation_gufunc(x, y): return covariance_gufunc(x, y) / (x.std(axis=-1) * y.std(axis=-1)) def spearman_correlation_gufunc(x, y): x_ranks = bottleneck.rankdata(x, axis=-1) y_ranks = bottleneck.rankdata(y, axis=-1) return pearson_correlation_gufunc(x_ranks, y_ranks) def spearman_correlation(x, y, dim): return xr.apply_ufunc( spearman_correlation_gufunc, x, y, input_core_dims=[[dim], [dim]], dask="parallelized", output_dtypes=[float], ) The only aspect of this example that is different from standard usage of ``apply_ufunc()`` is that we needed to supply the ``output_dtypes`` arguments. (Read up on :ref:`compute.wrapping-custom` for an explanation of the "core dimensions" listed in ``input_core_dims``.) Our new ``spearman_correlation()`` function achieves near linear speedup when run on large arrays across the four cores on my laptop. It would also work as a streaming operation, when run on arrays loaded from disk: .. jupyter-input:: rs = np.random.default_rng(0) array1 = xr.DataArray(rs.randn(1000, 100000), dims=["place", "time"]) # 800MB array2 = array1 + 0.5 * rs.randn(1000, 100000) # using one core, on NumPy arrays %time _ = spearman_correlation(array1, array2, 'time') # CPU times: user 21.6 s, sys: 2.84 s, total: 24.5 s # Wall time: 24.9 s chunked1 = array1.chunk({"place": 10}) chunked2 = array2.chunk({"place": 10}) # using all my laptop's cores, with Dask r = spearman_correlation(chunked1, chunked2, "time").compute() %time _ = r.compute() # CPU times: user 30.9 s, sys: 1.74 s, total: 32.6 s # Wall time: 4.59 s One limitation of ``apply_ufunc()`` is that it cannot be applied to arrays with multiple chunks along a core dimension: .. jupyter-input:: spearman_correlation(chunked1, chunked2, "place") .. jupyter-output:: ValueError: dimension 'place' on 0th function argument to apply_ufunc with dask='parallelized' consists of multiple chunks, but is also a core dimension. To fix, rechunk into a single Dask array chunk along this dimension, i.e., ``.rechunk({'place': -1})``, but beware that this may significantly increase memory usage. This reflects the nature of core dimensions, in contrast to broadcast (non-core) dimensions that allow operations to be split into arbitrary chunks for application. .. tip:: When possible, it's recommended to use pre-existing ``dask.array`` functions, either with existing xarray methods or :py:func:`~xarray.apply_ufunc()` with ``dask='allowed'``. Dask can often have a more efficient implementation that makes use of the specialized structure of a problem, unlike the generic speedups offered by ``dask='parallelized'``. ``map_blocks`` ############## Functions that consume and return Xarray objects can be easily applied in parallel using :py:func:`map_blocks`. Your function will receive an Xarray Dataset or DataArray subset to one chunk along each chunked dimension. .. jupyter-execute:: ds.temperature This DataArray has 3 chunks each with length 10 along the time dimension. At compute time, a function applied with :py:func:`map_blocks` will receive a DataArray corresponding to a single block of shape 10x180x180 (time x latitude x longitude) with values loaded. The following snippet illustrates how to check the shape of the object received by the applied function. .. jupyter-execute:: def func(da): print(da.sizes) return da.time mapped = xr.map_blocks(func, ds.temperature) mapped Notice that the :py:meth:`map_blocks` call printed ``Frozen({'time': 0, 'latitude': 0, 'longitude': 0})`` to screen. ``func`` is received 0-sized blocks! :py:meth:`map_blocks` needs to know what the final result looks like in terms of dimensions, shapes etc. It does so by running the provided function on 0-shaped inputs (*automated inference*). This works in many cases, but not all. If automatic inference does not work for your function, provide the ``template`` kwarg (see :ref:`below `). In this case, automatic inference has worked so let's check that the result is as expected. .. jupyter-execute:: mapped.load(scheduler="single-threaded") mapped.identical(ds.time) Note that we use ``.load(scheduler="single-threaded")`` to execute the computation. This executes the Dask graph in serial using a for loop, but allows for printing to screen and other debugging techniques. We can easily see that our function is receiving blocks of shape 10x180x180 and the returned result is identical to ``ds.time`` as expected. Here is a common example where automated inference will not work. .. jupyter-execute:: :raises: def func(da): print(da.sizes) return da.isel(time=[1]) mapped = xr.map_blocks(func, ds.temperature) ``func`` cannot be run on 0-shaped inputs because it is not possible to extract element 1 along a dimension of size 0. In this case we need to tell :py:func:`map_blocks` what the returned result looks like using the ``template`` kwarg. ``template`` must be an xarray Dataset or DataArray (depending on what the function returns) with dimensions, shapes, chunk sizes, attributes, coordinate variables *and* data variables that look exactly like the expected result. The variables should be dask-backed and hence not incur much memory cost. .. _template-note: .. note:: Note that when ``template`` is provided, ``attrs`` from ``template`` are copied over to the result. Any ``attrs`` set in ``func`` will be ignored. .. jupyter-execute:: template = ds.temperature.isel(time=[1, 11, 21]) mapped = xr.map_blocks(func, ds.temperature, template=template) Notice that the 0-shaped sizes were not printed to screen. Since ``template`` has been provided :py:func:`map_blocks` does not need to infer it by running ``func`` on 0-shaped inputs. .. jupyter-execute:: mapped.identical(template) :py:func:`map_blocks` also allows passing ``args`` and ``kwargs`` down to the user function ``func``. ``func`` will be executed as ``func(block_xarray, *args, **kwargs)`` so ``args`` must be a list and ``kwargs`` must be a dictionary. .. jupyter-execute:: def func(obj, a, b=0): return obj + a + b mapped = ds.map_blocks(func, args=[10], kwargs={"b": 10}) expected = ds + 10 + 10 mapped.identical(expected) .. jupyter-execute:: :hide-code: ds.close() # Closes "example-data.nc". os.remove("example-data.nc") .. tip:: As :py:func:`map_blocks` loads each block into memory, reduce as much as possible objects consumed by user functions. For example, drop useless variables before calling ``func`` with :py:func:`map_blocks`. Deploying Dask -------------- By default, Dask uses the multi-threaded scheduler, which distributes work across multiple cores on a single machine and allows for processing some datasets that do not fit into memory. However, this has two limitations: - You are limited by the size of your hard drive - Downloading data can be slow and expensive Instead, it can be faster and cheaper to run your computations close to where your data is stored, distributed across many machines on a Dask cluster. Often, this means deploying Dask on HPC clusters or on the cloud. See the `Dask deployment documentation `__ for more details. Best Practices -------------- Dask is pretty easy to use but there are some gotchas, many of which are under active development. Here are some tips we have found through experience. We also recommend checking out the `Dask best practices `_. 1. Do your spatial and temporal indexing (e.g. ``.sel()`` or ``.isel()``) early, especially before calling ``resample()`` or ``groupby()``. Grouping and resampling triggers some computation on all the blocks, which in theory should commute with indexing, but this optimization hasn't been implemented in Dask yet. (See `Dask issue #746 `_). 2. More generally, ``groupby()`` is a costly operation and will perform a lot better if the ``flox`` package is installed. See the `flox documentation `_ for more. By default Xarray will use ``flox`` if installed. 3. Save intermediate results to disk as a netCDF files (using ``to_netcdf()``) and then load them again with ``open_dataset()`` for further computations. For example, if subtracting temporal mean from a dataset, save the temporal mean to disk before subtracting. Again, in theory, Dask should be able to do the computation in a streaming fashion, but in practice this is a fail case for the Dask scheduler, because it tries to keep every chunk of an array that it computes in memory. (See `Dask issue #874 `_) 4. Use the `Dask dashboard `_ to identify performance bottlenecks. Here's an example of a simplified workflow putting some of these tips together: .. code-block:: python ds = xr.open_zarr( # Since we're doing a spatial reduction, increase chunk size in x, y "my-data.zarr", chunks={"x": 100, "y": 100} ) time_subset = ds.sea_temperature.sel( time=slice("2020-01-01", "2020-12-31") # Filter early ) # faster resampling when flox is installed daily = ds.resample(time="D").mean() daily.load() # Pull smaller results into memory after reducing the dataset xarray-2025.09.0/doc/user-guide/data-structures.rst000066400000000000000000001040631505620616400220520ustar00rootroot00000000000000.. _data structures: Data Structures =============== .. jupyter-execute:: :hide-code: :hide-output: import numpy as np import pandas as pd import xarray as xr import matplotlib.pyplot as plt np.random.seed(123456) np.set_printoptions(threshold=10) %xmode minimal DataArray --------- :py:class:`xarray.DataArray` is xarray's implementation of a labeled, multi-dimensional array. It has several key properties: - ``values``: a :py:class:`numpy.ndarray` or :ref:`numpy-like array ` holding the array's values - ``dims``: dimension names for each axis (e.g., ``('x', 'y', 'z')``) - ``coords``: a dict-like container of arrays (*coordinates*) that label each point (e.g., 1-dimensional arrays of numbers, datetime objects or strings) - ``attrs``: :py:class:`dict` to hold arbitrary metadata (*attributes*) Xarray uses ``dims`` and ``coords`` to enable its core metadata aware operations. Dimensions provide names that xarray uses instead of the ``axis`` argument found in many numpy functions. Coordinates enable fast label based indexing and alignment, building on the functionality of the ``index`` found on a pandas :py:class:`~pandas.DataFrame` or :py:class:`~pandas.Series`. DataArray objects also can have a ``name`` and can hold arbitrary metadata in the form of their ``attrs`` property. Names and attributes are strictly for users and user-written code: xarray makes no attempt to interpret them, and propagates them only in unambiguous cases. For reading and writing attributes xarray relies on the capabilities of the supported backends. (see FAQ, :ref:`approach to metadata`). .. _creating a dataarray: Creating a DataArray ~~~~~~~~~~~~~~~~~~~~ The :py:class:`~xarray.DataArray` constructor takes: - ``data``: a multi-dimensional array of values (e.g., a numpy ndarray, a :ref:`numpy-like array `, :py:class:`~pandas.Series`, :py:class:`~pandas.DataFrame` or ``pandas.Panel``) - ``coords``: a list or dictionary of coordinates. If a list, it should be a list of tuples where the first element is the dimension name and the second element is the corresponding coordinate array_like object. - ``dims``: a list of dimension names. If omitted and ``coords`` is a list of tuples, dimension names are taken from ``coords``. - ``attrs``: a dictionary of attributes to add to the instance - ``name``: a string that names the instance .. jupyter-execute:: data = np.random.rand(4, 3) locs = ["IA", "IL", "IN"] times = pd.date_range("2000-01-01", periods=4) foo = xr.DataArray(data, coords=[times, locs], dims=["time", "space"]) foo Only ``data`` is required; all of other arguments will be filled in with default values: .. jupyter-execute:: xr.DataArray(data) As you can see, dimension names are always present in the xarray data model: if you do not provide them, defaults of the form ``dim_N`` will be created. However, coordinates are always optional, and dimensions do not have automatic coordinate labels. .. note:: This is different from pandas, where axes always have tick labels, which default to the integers ``[0, ..., n-1]``. Prior to xarray v0.9, xarray copied this behavior: default coordinates for each dimension would be created if coordinates were not supplied explicitly. This is no longer the case. Coordinates can be specified in the following ways: - A list of values with length equal to the number of dimensions, providing coordinate labels for each dimension. Each value must be of one of the following forms: * A :py:class:`~xarray.DataArray` or :py:class:`~xarray.Variable` * A tuple of the form ``(dims, data[, attrs])``, which is converted into arguments for :py:class:`~xarray.Variable` * A pandas object or scalar value, which is converted into a ``DataArray`` * A 1D array or list, which is interpreted as values for a one dimensional coordinate variable along the same dimension as its name - A dictionary of ``{coord_name: coord}`` where values are of the same form as the list. Supplying coordinates as a dictionary allows other coordinates than those corresponding to dimensions (more on these later). If you supply ``coords`` as a dictionary, you must explicitly provide ``dims``. As a list of tuples: .. jupyter-execute:: xr.DataArray(data, coords=[("time", times), ("space", locs)]) As a dictionary: .. jupyter-execute:: xr.DataArray( data, coords={ "time": times, "space": locs, "const": 42, "ranking": ("space", [1, 2, 3]), }, dims=["time", "space"], ) As a dictionary with coords across multiple dimensions: .. jupyter-execute:: xr.DataArray( data, coords={ "time": times, "space": locs, "const": 42, "ranking": (("time", "space"), np.arange(12).reshape(4, 3)), }, dims=["time", "space"], ) If you create a ``DataArray`` by supplying a pandas :py:class:`~pandas.Series`, :py:class:`~pandas.DataFrame` or ``pandas.Panel``, any non-specified arguments in the ``DataArray`` constructor will be filled in from the pandas object: .. jupyter-execute:: df = pd.DataFrame({"x": [0, 1], "y": [2, 3]}, index=["a", "b"]) df.index.name = "abc" df.columns.name = "xyz" df .. jupyter-execute:: xr.DataArray(df) DataArray properties ~~~~~~~~~~~~~~~~~~~~ Let's take a look at the important properties on our array: .. jupyter-execute:: foo.values .. jupyter-execute:: foo.dims .. jupyter-execute:: foo.coords .. jupyter-execute:: foo.attrs .. jupyter-execute:: print(foo.name) You can modify ``values`` inplace: .. jupyter-execute:: foo.values = 1.0 * foo.values .. note:: The array values in a :py:class:`~xarray.DataArray` have a single (homogeneous) data type. To work with heterogeneous or structured data types in xarray, use coordinates, or put separate ``DataArray`` objects in a single :py:class:`~xarray.Dataset` (see below). Now fill in some of that missing metadata: .. jupyter-execute:: foo.name = "foo" foo.attrs["units"] = "meters" foo The :py:meth:`~xarray.DataArray.rename` method is another option, returning a new data array: .. jupyter-execute:: foo.rename("bar") DataArray Coordinates ~~~~~~~~~~~~~~~~~~~~~ The ``coords`` property is ``dict`` like. Individual coordinates can be accessed from the coordinates by name, or even by indexing the data array itself: .. jupyter-execute:: foo.coords["time"] .. jupyter-execute:: foo["time"] These are also :py:class:`~xarray.DataArray` objects, which contain tick-labels for each dimension. Coordinates can also be set or removed by using the dictionary like syntax: .. jupyter-execute:: foo["ranking"] = ("space", [1, 2, 3]) foo.coords .. jupyter-execute:: del foo["ranking"] foo.coords For more details, see :ref:`coordinates` below. Dataset ------- :py:class:`xarray.Dataset` is xarray's multi-dimensional equivalent of a :py:class:`~pandas.DataFrame`. It is a dict-like container of labeled arrays (:py:class:`~xarray.DataArray` objects) with aligned dimensions. It is designed as an in-memory representation of the data model from the `netCDF`__ file format. __ https://www.unidata.ucar.edu/software/netcdf/ In addition to the dict-like interface of the dataset itself, which can be used to access any variable in a dataset, datasets have four key properties: - ``dims``: a dictionary mapping from dimension names to the fixed length of each dimension (e.g., ``{'x': 6, 'y': 6, 'time': 8}``) - ``data_vars``: a dict-like container of DataArrays corresponding to variables - ``coords``: another dict-like container of DataArrays intended to label points used in ``data_vars`` (e.g., arrays of numbers, datetime objects or strings) - ``attrs``: :py:class:`dict` to hold arbitrary metadata The distinction between whether a variable falls in data or coordinates (borrowed from `CF conventions`_) is mostly semantic, and you can probably get away with ignoring it if you like: dictionary like access on a dataset will supply variables found in either category. However, xarray does make use of the distinction for indexing and computations. Coordinates indicate constant/fixed/independent quantities, unlike the varying/measured/dependent quantities that belong in data. .. _CF conventions: https://cfconventions.org/ Here is an example of how we might structure a dataset for a weather forecast: .. image:: ../_static/dataset-diagram.png In this example, it would be natural to call ``temperature`` and ``precipitation`` "data variables" and all the other arrays "coordinate variables" because they label the points along the dimensions. (see [1]_ for more background on this example). Creating a Dataset ~~~~~~~~~~~~~~~~~~ To make an :py:class:`~xarray.Dataset` from scratch, supply dictionaries for any variables (``data_vars``), coordinates (``coords``) and attributes (``attrs``). - ``data_vars`` should be a dictionary with each key as the name of the variable and each value as one of: * A :py:class:`~xarray.DataArray` or :py:class:`~xarray.Variable` * A tuple of the form ``(dims, data[, attrs])``, which is converted into arguments for :py:class:`~xarray.Variable` * A pandas object, which is converted into a ``DataArray`` * A 1D array or list, which is interpreted as values for a one dimensional coordinate variable along the same dimension as its name - ``coords`` should be a dictionary of the same form as ``data_vars``. - ``attrs`` should be a dictionary. Let's create some fake data for the example we show above. In this example dataset, we will represent measurements of the temperature and pressure that were made under various conditions: * the measurements were made on four different days; * they were made at two separate locations, which we will represent using their latitude and longitude; and * they were made using instruments by three different manufacturers, which we will refer to as ``'manufac1'``, ``'manufac2'``, and ``'manufac3'``. .. jupyter-execute:: np.random.seed(0) temperature = 15 + 8 * np.random.randn(2, 3, 4) precipitation = 10 * np.random.rand(2, 3, 4) lon = [-99.83, -99.32] lat = [42.25, 42.21] instruments = ["manufac1", "manufac2", "manufac3"] time = pd.date_range("2014-09-06", periods=4) reference_time = pd.Timestamp("2014-09-05") # for real use cases, its good practice to supply array attributes such as # units, but we won't bother here for the sake of brevity ds = xr.Dataset( { "temperature": (["loc", "instrument", "time"], temperature), "precipitation": (["loc", "instrument", "time"], precipitation), }, coords={ "lon": (["loc"], lon), "lat": (["loc"], lat), "instrument": instruments, "time": time, "reference_time": reference_time, }, ) ds Here we pass :py:class:`xarray.DataArray` objects or a pandas object as values in the dictionary: .. jupyter-execute:: xr.Dataset(dict(bar=foo)) .. jupyter-execute:: xr.Dataset(dict(bar=foo.to_pandas())) Where a pandas object is supplied as a value, the names of its indexes are used as dimension names, and its data is aligned to any existing dimensions. You can also create an dataset from: - A :py:class:`pandas.DataFrame` or ``pandas.Panel`` along its columns and items respectively, by passing it into the :py:class:`~xarray.Dataset` directly - A :py:class:`pandas.DataFrame` with :py:meth:`Dataset.from_dataframe `, which will additionally handle MultiIndexes See :ref:`pandas` - A netCDF file on disk with :py:func:`~xarray.open_dataset`. See :ref:`io`. Dataset contents ~~~~~~~~~~~~~~~~ :py:class:`~xarray.Dataset` implements the Python mapping interface, with values given by :py:class:`xarray.DataArray` objects: .. jupyter-execute:: print("temperature" in ds) ds["temperature"] Valid keys include each listed coordinate and data variable. Data and coordinate variables are also contained separately in the :py:attr:`~xarray.Dataset.data_vars` and :py:attr:`~xarray.Dataset.coords` dictionary-like attributes: .. jupyter-execute:: ds.data_vars .. jupyter-execute:: ds.coords Finally, like data arrays, datasets also store arbitrary metadata in the form of ``attributes``: .. jupyter-execute:: print(ds.attrs) ds.attrs["title"] = "example attribute" ds Xarray does not enforce any restrictions on attributes, but serialization to some file formats may fail if you use objects that are not strings, numbers or :py:class:`numpy.ndarray` objects. As a useful shortcut, you can use attribute style access for reading (but not setting) variables and attributes: .. jupyter-execute:: ds.temperature This is particularly useful in an exploratory context, because you can tab-complete these variable names with tools like IPython. .. _dictionary_like_methods: Dictionary like methods ~~~~~~~~~~~~~~~~~~~~~~~ We can update a dataset in-place using Python's standard dictionary syntax. For example, to create this example dataset from scratch, we could have written: .. jupyter-execute:: ds = xr.Dataset() ds["temperature"] = (("loc", "instrument", "time"), temperature) ds["temperature_double"] = (("loc", "instrument", "time"), temperature * 2) ds["precipitation"] = (("loc", "instrument", "time"), precipitation) ds.coords["lat"] = (("loc",), lat) ds.coords["lon"] = (("loc",), lon) ds.coords["time"] = pd.date_range("2014-09-06", periods=4) ds.coords["reference_time"] = pd.Timestamp("2014-09-05") To change the variables in a ``Dataset``, you can use all the standard dictionary methods, including ``values``, ``items``, ``__delitem__``, ``get`` and :py:meth:`~xarray.Dataset.update`. Note that assigning a ``DataArray`` or pandas object to a ``Dataset`` variable using ``__setitem__`` or ``update`` will :ref:`automatically align` the array(s) to the original dataset's indexes. You can copy a ``Dataset`` by calling the :py:meth:`~xarray.Dataset.copy` method. By default, the copy is shallow, so only the container will be copied: the arrays in the ``Dataset`` will still be stored in the same underlying :py:class:`numpy.ndarray` objects. You can copy all data by calling ``ds.copy(deep=True)``. .. _transforming datasets: Transforming datasets ~~~~~~~~~~~~~~~~~~~~~ In addition to dictionary-like methods (described above), xarray has additional methods (like pandas) for transforming datasets into new objects. For removing variables, you can select and drop an explicit list of variables by indexing with a list of names or using the :py:meth:`~xarray.Dataset.drop_vars` methods to return a new ``Dataset``. These operations keep around coordinates: .. jupyter-execute:: ds[["temperature"]] .. jupyter-execute:: ds[["temperature", "temperature_double"]] .. jupyter-execute:: ds.drop_vars("temperature") To remove a dimension, you can use :py:meth:`~xarray.Dataset.drop_dims` method. Any variables using that dimension are dropped: .. jupyter-execute:: ds.drop_dims("time") As an alternate to dictionary-like modifications, you can use :py:meth:`~xarray.Dataset.assign` and :py:meth:`~xarray.Dataset.assign_coords`. These methods return a new dataset with additional (or replaced) values: .. jupyter-execute:: ds.assign(temperature2=2 * ds.temperature) There is also the :py:meth:`~xarray.Dataset.pipe` method that allows you to use a method call with an external function (e.g., ``ds.pipe(func)``) instead of simply calling it (e.g., ``func(ds)``). This allows you to write pipelines for transforming your data (using "method chaining") instead of writing hard to follow nested function calls: .. jupyter-input:: # these lines are equivalent, but with pipe we can make the logic flow # entirely from left to right plt.plot((2 * ds.temperature.sel(loc=0)).mean("instrument")) (ds.temperature.sel(loc=0).pipe(lambda x: 2 * x).mean("instrument").pipe(plt.plot)) Both ``pipe`` and ``assign`` replicate the pandas methods of the same names (:py:meth:`DataFrame.pipe ` and :py:meth:`DataFrame.assign `). With xarray, there is no performance penalty for creating new datasets, even if variables are lazily loaded from a file on disk. Creating new objects instead of mutating existing objects often results in easier to understand code, so we encourage using this approach. Renaming variables ~~~~~~~~~~~~~~~~~~ Another useful option is the :py:meth:`~xarray.Dataset.rename` method to rename dataset variables: .. jupyter-execute:: ds.rename({"temperature": "temp", "precipitation": "precip"}) The related :py:meth:`~xarray.Dataset.swap_dims` method allows you do to swap dimension and non-dimension variables: .. jupyter-execute:: ds.coords["day"] = ("time", [6, 7, 8, 9]) ds.swap_dims({"time": "day"}) DataTree -------- :py:class:`~xarray.DataTree` is ``xarray``'s highest-level data structure, able to organise heterogeneous data which could not be stored inside a single :py:class:`~xarray.Dataset` object. This includes representing the recursive structure of multiple `groups`_ within a netCDF file or `Zarr Store`_. .. _groups: https://www.unidata.ucar.edu/software/netcdf/workshops/2011/groups-types/GroupsIntro.html .. _Zarr Store: https://zarr.readthedocs.io/en/stable/tutorial.html#groups Each :py:class:`~xarray.DataTree` object (or "node") contains the same data that a single :py:class:`xarray.Dataset` would (i.e. :py:class:`~xarray.DataArray` objects stored under hashable keys), and so has the same key properties: - ``dims``: a dictionary mapping of dimension names to lengths, for the variables in this node, and this node's ancestors, - ``data_vars``: a dict-like container of DataArrays corresponding to variables in this node, - ``coords``: another dict-like container of DataArrays, corresponding to coordinate variables in this node, and this node's ancestors, - ``attrs``: dict to hold arbitrary metadata relevant to data in this node. A single :py:class:`~xarray.DataTree` object acts much like a single :py:class:`~xarray.Dataset` object, and has a similar set of dict-like methods defined upon it. However, :py:class:`~xarray.DataTree`\s can also contain other :py:class:`~xarray.DataTree` objects, so they can be thought of as nested dict-like containers of both :py:class:`xarray.DataArray`\s and :py:class:`~xarray.DataTree`\s. A single datatree object is known as a "node", and its position relative to other nodes is defined by two more key properties: - ``children``: An dictionary mapping from names to other :py:class:`~xarray.DataTree` objects, known as its "child nodes". - ``parent``: The single :py:class:`~xarray.DataTree` object whose children this datatree is a member of, known as its "parent node". Each child automatically knows about its parent node, and a node without a parent is known as a "root" node (represented by the ``parent`` attribute pointing to ``None``). Nodes can have multiple children, but as each child node has at most one parent, there can only ever be one root node in a given tree. The overall structure is technically a connected acyclic undirected rooted graph, otherwise known as a `"Tree" `_. :py:class:`~xarray.DataTree` objects can also optionally have a ``name`` as well as ``attrs``, just like a :py:class:`~xarray.DataArray`. Again these are not normally used unless explicitly accessed by the user. .. _creating a datatree: Creating a DataTree ~~~~~~~~~~~~~~~~~~~ One way to create a :py:class:`~xarray.DataTree` from scratch is to create each node individually, specifying the nodes' relationship to one another as you create each one. The :py:class:`~xarray.DataTree` constructor takes: - ``dataset``: The data that will be stored in this node, represented by a single :py:class:`xarray.Dataset`. - ``children``: The various child nodes (if there are any), given as a mapping from string keys to :py:class:`~xarray.DataTree` objects. - ``name``: A string to use as the name of this node. Let's make a single datatree node with some example data in it: .. jupyter-execute:: ds1 = xr.Dataset({"foo": "orange"}) dt = xr.DataTree(name="root", dataset=ds1) dt At this point we have created a single node datatree with no parent and no children. .. jupyter-execute:: print(dt.parent is None) dt.children We can add a second node to this tree, assigning it to the parent node ``dt``: .. jupyter-execute:: dataset2 = xr.Dataset({"bar": 0}, coords={"y": ("y", [0, 1, 2])}) dt2 = xr.DataTree(name="a", dataset=dataset2) # Add the child Datatree to the root node dt.children = {"child-node": dt2} dt More idiomatically you can create a tree from a dictionary of ``Datasets`` and ``DataTrees``. In this case we add a new node under ``dt["child-node"]`` by providing the explicit path under ``"child-node"`` as the dictionary key: .. jupyter-execute:: # create a third Dataset ds3 = xr.Dataset({"zed": np.nan}) # create a tree from a dictionary of DataTrees and Datasets dt = xr.DataTree.from_dict({"/": dt, "/child-node/new-zed-node": ds3}) We have created a tree with three nodes in it: .. jupyter-execute:: dt Consistency checks are enforced. For instance, if we try to create a cycle, where the root node is also a child of a descendant, the constructor will raise an (:py:class:`~xarray.InvalidTreeError`): .. jupyter-execute:: :raises: dt["child-node"].children = {"new-child": dt} Alternatively you can also create a :py:class:`~xarray.DataTree` object from: - A dictionary mapping directory-like paths to either :py:class:`~xarray.DataTree` nodes or data, using :py:meth:`xarray.DataTree.from_dict()`, - A well formed netCDF or Zarr file on disk with :py:func:`~xarray.open_datatree()`. See :ref:`reading and writing files `. For data files with groups that do not align see :py:func:`xarray.open_groups` or target each group individually :py:func:`xarray.open_dataset(group='groupname') `. For more information about coordinate alignment see :ref:`datatree-inheritance` DataTree Contents ~~~~~~~~~~~~~~~~~ Like :py:class:`~xarray.Dataset`, :py:class:`~xarray.DataTree` implements the python mapping interface, but with values given by either :py:class:`~xarray.DataArray` objects or other :py:class:`~xarray.DataTree` objects. .. jupyter-execute:: dt["child-node"] .. jupyter-execute:: dt["foo"] Iterating over keys will iterate over both the names of variables and child nodes. We can also access all the data in a single node, and its inherited coordinates, through a dataset-like view .. jupyter-execute:: dt["child-node"].dataset This demonstrates the fact that the data in any one node is equivalent to the contents of a single :py:class:`~xarray.Dataset` object. The :py:attr:`DataTree.dataset ` property returns an immutable view, but we can instead extract the node's data contents as a new and mutable :py:class:`~xarray.Dataset` object via :py:meth:`DataTree.to_dataset() `: .. jupyter-execute:: dt["child-node"].to_dataset() Like with :py:class:`~xarray.Dataset`, you can access the data and coordinate variables of a node separately via the :py:attr:`~xarray.DataTree.data_vars` and :py:attr:`~xarray.DataTree.coords` attributes: .. jupyter-execute:: dt["child-node"].data_vars .. jupyter-execute:: dt["child-node"].coords Dictionary-like methods ~~~~~~~~~~~~~~~~~~~~~~~ We can update a datatree in-place using Python's standard dictionary syntax, similar to how we can for Dataset objects. For example, to create this example DataTree from scratch, we could have written: .. jupyter-execute:: dt = xr.DataTree(name="root") dt["foo"] = "orange" dt["child-node"] = xr.DataTree( dataset=xr.Dataset({"bar": 0}, coords={"y": ("y", [0, 1, 2])}) ) dt["child-node/new-zed-node/zed"] = np.nan dt To change the variables in a node of a :py:class:`~xarray.DataTree`, you can use all the standard dictionary methods, including ``values``, ``items``, ``__delitem__``, ``get`` and :py:meth:`xarray.DataTree.update`. Note that assigning a :py:class:`~xarray.DataTree` object to a :py:class:`~xarray.DataTree` variable using ``__setitem__`` or :py:meth:`~xarray.DataTree.update` will :ref:`automatically align ` the array(s) to the original node's indexes. If you copy a :py:class:`~xarray.DataTree` using the :py:func:`copy` function or the :py:meth:`xarray.DataTree.copy` method it will copy the subtree, meaning that node and children below it, but no parents above it. Like for :py:class:`~xarray.Dataset`, this copy is shallow by default, but you can copy all the underlying data arrays by calling ``dt.copy(deep=True)``. .. _datatree-inheritance: DataTree Inheritance ~~~~~~~~~~~~~~~~~~~~ DataTree implements a simple inheritance mechanism. Coordinates, dimensions and their associated indices are propagated from downward starting from the root node to all descendent nodes. Coordinate inheritance was inspired by the NetCDF-CF inherited dimensions, but DataTree's inheritance is slightly stricter yet easier to reason about. The constraint that this puts on a DataTree is that dimensions and indices that are inherited must be aligned with any direct descendant node's existing dimension or index. This allows descendants to use dimensions defined in ancestor nodes, without duplicating that information. But as a consequence, if a dimension-name is defined in on a node and that same dimension-name exists in one of its ancestors, they must align (have the same index and size). Some examples: .. jupyter-execute:: # Set up coordinates time = xr.DataArray(data=["2022-01", "2023-01"], dims="time") stations = xr.DataArray(data=list("abcdef"), dims="station") lon = [-100, -80, -60] lat = [10, 20, 30] # Set up fake data wind_speed = xr.DataArray(np.ones((2, 6)) * 2, dims=("time", "station")) pressure = xr.DataArray(np.ones((2, 6)) * 3, dims=("time", "station")) air_temperature = xr.DataArray(np.ones((2, 6)) * 4, dims=("time", "station")) dewpoint = xr.DataArray(np.ones((2, 6)) * 5, dims=("time", "station")) infrared = xr.DataArray(np.ones((2, 3, 3)) * 6, dims=("time", "lon", "lat")) true_color = xr.DataArray(np.ones((2, 3, 3)) * 7, dims=("time", "lon", "lat")) dt2 = xr.DataTree.from_dict( { "/": xr.Dataset( coords={"time": time}, ), "/weather": xr.Dataset( coords={"station": stations}, data_vars={ "wind_speed": wind_speed, "pressure": pressure, }, ), "/weather/temperature": xr.Dataset( data_vars={ "air_temperature": air_temperature, "dewpoint": dewpoint, }, ), "/satellite": xr.Dataset( coords={"lat": lat, "lon": lon}, data_vars={ "infrared": infrared, "true_color": true_color, }, ), }, ) dt2 Here there are four different coordinate variables, which apply to variables in the DataTree in different ways: ``time`` is a shared coordinate used by both ``weather`` and ``satellite`` variables ``station`` is used only for ``weather`` variables ``lat`` and ``lon`` are only use for ``satellite`` images Coordinate variables are inherited to descendent nodes, which is only possible because variables at different levels of a hierarchical DataTree are always aligned. Placing the ``time`` variable at the root node automatically indicates that it applies to all descendent nodes. Similarly, ``station`` is in the base ``weather`` node, because it applies to all weather variables, both directly in ``weather`` and in the ``temperature`` sub-tree. Notice the inherited coordinates are explicitly shown in the tree representation under ``Inherited coordinates:``. .. jupyter-execute:: dt2["/weather"] Accessing any of the lower level trees through the :py:func:`.dataset ` property automatically includes coordinates from higher levels (e.g., ``time`` and ``station``): .. jupyter-execute:: dt2["/weather/temperature"].dataset Similarly, when you retrieve a Dataset through :py:func:`~xarray.DataTree.to_dataset` , the inherited coordinates are included by default unless you exclude them with the ``inherit`` flag: .. jupyter-execute:: dt2["/weather/temperature"].to_dataset() .. jupyter-execute:: dt2["/weather/temperature"].to_dataset(inherit=False) For more examples and further discussion see :ref:`alignment and coordinate inheritance `. .. _coordinates: Coordinates ----------- Coordinates are ancillary variables stored for ``DataArray`` and ``Dataset`` objects in the ``coords`` attribute: .. jupyter-execute:: ds.coords Unlike attributes, xarray *does* interpret and persist coordinates in operations that transform xarray objects. There are two types of coordinates in xarray: - **dimension coordinates** are one dimensional coordinates with a name equal to their sole dimension (marked by ``*`` when printing a dataset or data array). They are used for label based indexing and alignment, like the ``index`` found on a pandas :py:class:`~pandas.DataFrame` or :py:class:`~pandas.Series`. Indeed, these "dimension" coordinates use a :py:class:`pandas.Index` internally to store their values. - **non-dimension coordinates** are variables that contain coordinate data, but are not a dimension coordinate. They can be multidimensional (see :ref:`/examples/multidimensional-coords.ipynb`), and there is no relationship between the name of a non-dimension coordinate and the name(s) of its dimension(s). Non-dimension coordinates can be useful for indexing or plotting; otherwise, xarray does not make any direct use of the values associated with them. They are not used for alignment or automatic indexing, nor are they required to match when doing arithmetic (see :ref:`coordinates math`). .. note:: Xarray's terminology differs from the `CF terminology`_, where the "dimension coordinates" are called "coordinate variables", and the "non-dimension coordinates" are called "auxiliary coordinate variables" (see :issue:`1295` for more details). .. _CF terminology: https://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#terminology Modifying coordinates ~~~~~~~~~~~~~~~~~~~~~ To entirely add or remove coordinate arrays, you can use dictionary like syntax, as shown above. To convert back and forth between data and coordinates, you can use the :py:meth:`~xarray.Dataset.set_coords` and :py:meth:`~xarray.Dataset.reset_coords` methods: .. jupyter-execute:: ds.reset_coords() .. jupyter-execute:: ds.set_coords(["temperature", "precipitation"]) .. jupyter-execute:: ds["temperature"].reset_coords(drop=True) Notice that these operations skip coordinates with names given by dimensions, as used for indexing. This mostly because we are not entirely sure how to design the interface around the fact that xarray cannot store a coordinate and variable with the name but different values in the same dictionary. But we do recognize that supporting something like this would be useful. Coordinates methods ~~~~~~~~~~~~~~~~~~~ ``Coordinates`` objects also have a few useful methods, mostly for converting them into dataset objects: .. jupyter-execute:: ds.coords.to_dataset() The merge method is particularly interesting, because it implements the same logic used for merging coordinates in arithmetic operations (see :ref:`compute`): .. jupyter-execute:: alt = xr.Dataset(coords={"z": [10], "lat": 0, "lon": 0}) ds.coords.merge(alt.coords) The ``coords.merge`` method may be useful if you want to implement your own binary operations that act on xarray objects. In the future, we hope to write more helper functions so that you can easily make your functions act like xarray's built-in arithmetic. Indexes ~~~~~~~ To convert a coordinate (or any ``DataArray``) into an actual :py:class:`pandas.Index`, use the :py:meth:`~xarray.DataArray.to_index` method: .. jupyter-execute:: ds["time"].to_index() A useful shortcut is the ``indexes`` property (on both ``DataArray`` and ``Dataset``), which lazily constructs a dictionary whose keys are given by each dimension and whose the values are ``Index`` objects: .. jupyter-execute:: ds.indexes MultiIndex coordinates ~~~~~~~~~~~~~~~~~~~~~~ Xarray supports labeling coordinate values with a :py:class:`pandas.MultiIndex`: .. jupyter-execute:: midx = pd.MultiIndex.from_arrays( [["R", "R", "V", "V"], [0.1, 0.2, 0.7, 0.9]], names=("band", "wn") ) mda = xr.DataArray(np.random.rand(4), coords={"spec": midx}, dims="spec") mda For convenience multi-index levels are directly accessible as "virtual" or "derived" coordinates (marked by ``-`` when printing a dataset or data array): .. jupyter-execute:: mda["band"] .. jupyter-execute:: mda.wn Indexing with multi-index levels is also possible using the ``sel`` method (see :ref:`multi-level indexing`). Unlike other coordinates, "virtual" level coordinates are not stored in the ``coords`` attribute of ``DataArray`` and ``Dataset`` objects (although they are shown when printing the ``coords`` attribute). Consequently, most of the coordinates related methods don't apply for them. It also can't be used to replace one particular level. Because in a ``DataArray`` or ``Dataset`` object each multi-index level is accessible as a "virtual" coordinate, its name must not conflict with the names of the other levels, coordinates and data variables of the same object. Even though xarray sets default names for multi-indexes with unnamed levels, it is recommended that you explicitly set the names of the levels. .. [1] Latitude and longitude are 2D arrays because the dataset uses `projected coordinates`__. ``reference_time`` refers to the reference time at which the forecast was made, rather than ``time`` which is the valid time for which the forecast applies. __ https://en.wikipedia.org/wiki/Map_projection xarray-2025.09.0/doc/user-guide/duckarrays.rst000066400000000000000000000252741505620616400210760ustar00rootroot00000000000000.. currentmodule:: xarray .. _userguide.duckarrays: Working with numpy-like arrays ============================== NumPy-like arrays (often known as :term:`duck array`\s) are drop-in replacements for the :py:class:`numpy.ndarray` class but with different features, such as propagating physical units or a different layout in memory. Xarray can often wrap these array types, allowing you to use labelled dimensions and indexes whilst benefiting from the additional features of these array libraries. Some numpy-like array types that xarray already has some support for: * `Cupy `_ - GPU support (see `cupy-xarray `_), * `Sparse `_ - for performant arrays with many zero elements, * `Pint `_ - for tracking the physical units of your data (see `pint-xarray `_), * `Dask `_ - parallel computing on larger-than-memory arrays (see :ref:`using dask with xarray `), * `Cubed `_ - another parallel computing framework that emphasises reliability (see `cubed-xarray `_). .. warning:: This feature should be considered somewhat experimental. Please report any bugs you find on `xarray’s issue tracker `_. .. note:: For information on wrapping dask arrays see :ref:`dask`. Whilst xarray wraps dask arrays in a similar way to that described on this page, chunked array types like :py:class:`dask.array.Array` implement additional methods that require slightly different user code (e.g. calling ``.chunk`` or ``.compute``). See the docs on :ref:`wrapping chunked arrays `. Why "duck"? ----------- Why is it also called a "duck" array? This comes from a common statement of object-oriented programming - "If it walks like a duck, and quacks like a duck, treat it like a duck". In other words, a library like xarray that is capable of using multiple different types of arrays does not have to explicitly check that each one it encounters is permitted (e.g. ``if dask``, ``if numpy``, ``if sparse`` etc.). Instead xarray can take the more permissive approach of simply treating the wrapped array as valid, attempting to call the relevant methods (e.g. ``.mean()``) and only raising an error if a problem occurs (e.g. the method is not found on the wrapped class). This is much more flexible, and allows objects and classes from different libraries to work together more easily. What is a numpy-like array? --------------------------- A "numpy-like array" (also known as a "duck array") is a class that contains array-like data, and implements key numpy-like functionality such as indexing, broadcasting, and computation methods. For example, the `sparse `_ library provides a sparse array type which is useful for representing nD array objects like sparse matrices in a memory-efficient manner. We can create a sparse array object (of the :py:class:`sparse.COO` type) from a numpy array like this: .. jupyter-execute:: from sparse import COO import xarray as xr import numpy as np %xmode minimal .. jupyter-execute:: x = np.eye(4, dtype=np.uint8) # create diagonal identity matrix s = COO.from_numpy(x) s This sparse object does not attempt to explicitly store every element in the array, only the non-zero elements. This approach is much more efficient for large arrays with only a few non-zero elements (such as tri-diagonal matrices). Sparse array objects can be converted back to a "dense" numpy array by calling :py:meth:`sparse.COO.todense`. Just like :py:class:`numpy.ndarray` objects, :py:class:`sparse.COO` arrays support indexing .. jupyter-execute:: s[1, 1] # diagonal elements should be ones .. jupyter-execute:: s[2, 3] # off-diagonal elements should be zero broadcasting, .. jupyter-execute:: x2 = np.zeros( (4, 1), dtype=np.uint8 ) # create second sparse array of different shape s2 = COO.from_numpy(x2) (s * s2) # multiplication requires broadcasting and various computation methods .. jupyter-execute:: s.sum(axis=1) This numpy-like array also supports calling so-called `numpy ufuncs `_ ("universal functions") on it directly: .. jupyter-execute:: np.sum(s, axis=1) Notice that in each case the API for calling the operation on the sparse array is identical to that of calling it on the equivalent numpy array - this is the sense in which the sparse array is "numpy-like". .. note:: For discussion on exactly which methods a class needs to implement to be considered "numpy-like", see :ref:`internals.duckarrays`. Wrapping numpy-like arrays in xarray ------------------------------------ :py:class:`DataArray`, :py:class:`Dataset`, and :py:class:`Variable` objects can wrap these numpy-like arrays. Constructing xarray objects which wrap numpy-like arrays ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The primary way to create an xarray object which wraps a numpy-like array is to pass that numpy-like array instance directly to the constructor of the xarray class. The :ref:`page on xarray data structures ` shows how :py:class:`DataArray` and :py:class:`Dataset` both accept data in various forms through their ``data`` argument, but in fact this data can also be any wrappable numpy-like array. For example, we can wrap the sparse array we created earlier inside a new DataArray object: .. jupyter-execute:: s_da = xr.DataArray(s, dims=["i", "j"]) s_da We can see what's inside - the printable representation of our xarray object (the repr) automatically uses the printable representation of the underlying wrapped array. Of course our sparse array object is still there underneath - it's stored under the ``.data`` attribute of the dataarray: .. jupyter-execute:: s_da.data Array methods ~~~~~~~~~~~~~ We saw above that numpy-like arrays provide numpy methods. Xarray automatically uses these when you call the corresponding xarray method: .. jupyter-execute:: s_da.sum(dim="j") Converting wrapped types ~~~~~~~~~~~~~~~~~~~~~~~~ If you want to change the type inside your xarray object you can use :py:meth:`DataArray.as_numpy`: .. jupyter-execute:: s_da.as_numpy() This returns a new :py:class:`DataArray` object, but now wrapping a normal numpy array. If instead you want to convert to numpy and return that numpy array you can use either :py:meth:`DataArray.to_numpy` or :py:meth:`DataArray.values`, where the former is strongly preferred. The difference is in the way they coerce to numpy - :py:meth:`~DataArray.values` always uses :py:func:`numpy.asarray` which will fail for some array types (e.g. ``cupy``), whereas :py:meth:`~DataArray.to_numpy` uses the correct method depending on the array type. .. jupyter-execute:: s_da.to_numpy() .. jupyter-execute:: :raises: s_da.values This illustrates the difference between :py:meth:`~DataArray.data` and :py:meth:`~DataArray.values`, which is sometimes a point of confusion for new xarray users. Explicitly: :py:meth:`DataArray.data` returns the underlying numpy-like array, regardless of type, whereas :py:meth:`DataArray.values` converts the underlying array to a numpy array before returning it. (This is another reason to use :py:meth:`~DataArray.to_numpy` over :py:meth:`~DataArray.values` - the intention is clearer.) Conversion to numpy as a fallback ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If a wrapped array does not implement the corresponding array method then xarray will often attempt to convert the underlying array to a numpy array so that the operation can be performed. You may want to watch out for this behavior, and report any instances in which it causes problems. Most of xarray's API does support using :term:`duck array` objects, but there are a few areas where the code will still convert to ``numpy`` arrays: - Dimension coordinates, and thus all indexing operations: * :py:meth:`Dataset.sel` and :py:meth:`DataArray.sel` * :py:meth:`Dataset.loc` and :py:meth:`DataArray.loc` * :py:meth:`Dataset.drop_sel` and :py:meth:`DataArray.drop_sel` * :py:meth:`Dataset.reindex`, :py:meth:`Dataset.reindex_like`, :py:meth:`DataArray.reindex` and :py:meth:`DataArray.reindex_like`: duck arrays in data variables and non-dimension coordinates won't be casted - Functions and methods that depend on external libraries or features of ``numpy`` not covered by ``__array_function__`` / ``__array_ufunc__``: * :py:meth:`Dataset.ffill` and :py:meth:`DataArray.ffill` (uses ``bottleneck``) * :py:meth:`Dataset.bfill` and :py:meth:`DataArray.bfill` (uses ``bottleneck``) * :py:meth:`Dataset.interp`, :py:meth:`Dataset.interp_like`, :py:meth:`DataArray.interp` and :py:meth:`DataArray.interp_like` (uses ``scipy``): duck arrays in data variables and non-dimension coordinates will be casted in addition to not supporting duck arrays in dimension coordinates * :py:meth:`Dataset.rolling` and :py:meth:`DataArray.rolling` (requires ``numpy>=1.20``) * :py:meth:`Dataset.rolling_exp` and :py:meth:`DataArray.rolling_exp` (uses ``numbagg``) * :py:meth:`Dataset.interpolate_na` and :py:meth:`DataArray.interpolate_na` (uses :py:class:`numpy.vectorize`) * :py:func:`apply_ufunc` with ``vectorize=True`` (uses :py:class:`numpy.vectorize`) - Incompatibilities between different :term:`duck array` libraries: * :py:meth:`Dataset.chunk` and :py:meth:`DataArray.chunk`: this fails if the data was not already chunked and the :term:`duck array` (e.g. a ``pint`` quantity) should wrap the new ``dask`` array; changing the chunk sizes works however. Extensions using duck arrays ---------------------------- Whilst the features above allow many numpy-like array libraries to be used pretty seamlessly with xarray, it often also makes sense to use an interfacing package to make certain tasks easier. For example the `pint-xarray package `_ offers a custom ``.pint`` accessor (see :ref:`internals.accessors`) which provides convenient access to information stored within the wrapped array (e.g. ``.units`` and ``.magnitude``), and makes creating wrapped pint arrays (and especially xarray-wrapping-pint-wrapping-dask arrays) simpler for the user. We maintain a list of libraries extending ``xarray`` to make working with particular wrapped duck arrays easier. If you know of more that aren't on this list please raise an issue to add them! - `pint-xarray `_ - `cupy-xarray `_ - `cubed-xarray `_ xarray-2025.09.0/doc/user-guide/ecosystem.rst000066400000000000000000000253411505620616400207340ustar00rootroot00000000000000.. _ecosystem: Xarray related projects ----------------------- Below is a list of existing open source projects that build functionality upon xarray. See also section :ref:`internals` for more details on how to build xarray extensions. We also maintain the `xarray-contrib `_ GitHub organization as a place to curate projects that build upon xarray. Geosciences ~~~~~~~~~~~ - `aospy `_: Automated analysis and management of gridded climate data. - `argopy `_: xarray-based Argo data access, manipulation and visualisation for standard users as well as Argo experts. - `cf_xarray `_: Provides an accessor (DataArray.cf or Dataset.cf) that allows you to interpret Climate and Forecast metadata convention attributes present on xarray objects. - `climpred `_: Analysis of ensemble forecast models for climate prediction. - `geocube `_: Tool to convert geopandas vector data into rasterized xarray data. - `GeoWombat `_: Utilities for analysis of remotely sensed and gridded raster data at scale (easily tame Landsat, Sentinel, Quickbird, and PlanetScope). - `grib2io `_: Utility to work with GRIB2 files including an xarray backend, DASK support for parallel reading in open_mfdataset, lazy loading of data, editing of GRIB2 attributes and GRIB2IO DataArray attrs, and spatial interpolation and reprojection of GRIB2 messages and GRIB2IO Datasets/DataArrays for both grid to grid and grid to stations. - `gsw-xarray `_: a wrapper around `gsw `_ that adds CF compliant attributes when possible, units, name. - `infinite-diff `_: xarray-based finite-differencing, focused on gridded climate/meteorology data - `marc_analysis `_: Analysis package for CESM/MARC experiments and output. - `MetPy `_: A collection of tools in Python for reading, visualizing, and performing calculations with weather data. - `MPAS-Analysis `_: Analysis for simulations produced with Model for Prediction Across Scales (MPAS) components and the Accelerated Climate Model for Energy (ACME). - `OGGM `_: Open Global Glacier Model - `Oocgcm `_: Analysis of large gridded geophysical datasets - `Open Data Cube `_: Analysis toolkit of continental scale Earth Observation data from satellites. - `Pangaea `_: xarray extension for gridded land surface & weather model output). - `Pangeo `_: A community effort for big data geoscience in the cloud. - `PyGDX `_: Python 3 package for accessing data stored in GAMS Data eXchange (GDX) files. Also uses a custom subclass. - `pyinterp `_: Python 3 package for interpolating geo-referenced data used in the field of geosciences. - `pyXpcm `_: xarray-based Profile Classification Modelling (PCM), mostly for ocean data. - `Regionmask `_: plotting and creation of masks of spatial regions - `rioxarray `_: geospatial xarray extension powered by rasterio - `salem `_: Adds geolocalised subsetting, masking, and plotting operations to xarray's data structures via accessors. - `SatPy `_ : Library for reading and manipulating meteorological remote sensing data and writing it to various image and data file formats. - `SARXarray `_: xarray extension for reading and processing large Synthetic Aperture Radar (SAR) data stacks. - `shxarray `_: Convert, filter,and map geodesy related spherical harmonic representations of gravity and terrestrial water storage through an xarray extension. - `Spyfit `_: FTIR spectroscopy of the atmosphere - `windspharm `_: Spherical harmonic wind analysis in Python. - `wradlib `_: An Open Source Library for Weather Radar Data Processing. - `wrf-python `_: A collection of diagnostic and interpolation routines for use with output of the Weather Research and Forecasting (WRF-ARW) Model. - `xarray-eopf `_: An xarray backend implementation for opening ESA EOPF data products in Zarr format. - `xarray-regrid `_: xarray extension for regridding rectilinear data. - `xarray-simlab `_: xarray extension for computer model simulations. - `xarray-spatial `_: Numba-accelerated raster-based spatial processing tools (NDVI, curvature, zonal-statistics, proximity, hillshading, viewshed, etc.) - `xarray-topo `_: xarray extension for topographic analysis and modelling. - `xbpch `_: xarray interface for bpch files. - `xCDAT `_: An extension of xarray for climate data analysis on structured grids. - `xclim `_: A library for calculating climate science indices with unit handling built from xarray and dask. - `xESMF `_: Universal regridder for geospatial data. - `xgcm `_: Extends the xarray data model to understand finite volume grid cells (common in General Circulation Models) and provides interpolation and difference operations for such grids. - `xmitgcm `_: a python package for reading `MITgcm `_ binary MDS files into xarray data structures. - `xnemogcm `_: a package to read `NEMO `_ output files and add attributes to interface with xgcm. Machine Learning ~~~~~~~~~~~~~~~~ - `ArviZ `_: Exploratory analysis of Bayesian models, built on top of xarray. - `Darts `_: User-friendly modern machine learning for time series in Python. - `Elm `_: Parallel machine learning on xarray data structures - `sklearn-xarray (1) `_: Combines scikit-learn and xarray (1). - `sklearn-xarray (2) `_: Combines scikit-learn and xarray (2). - `xbatcher `_: Batch Generation from Xarray Datasets. Other domains ~~~~~~~~~~~~~ - `ptsa `_: EEG Time Series Analysis - `pycalphad `_: Computational Thermodynamics in Python - `pyomeca `_: Python framework for biomechanical analysis - `movement `_: A Python toolbox for analysing animal body movements Extend xarray capabilities ~~~~~~~~~~~~~~~~~~~~~~~~~~ - `Collocate `_: Collocate xarray trajectories in arbitrary physical dimensions - `eofs `_: EOF analysis in Python. - `hypothesis-gufunc `_: Extension to hypothesis. Makes it easy to write unit tests with xarray objects as input. - `ntv-pandas `_ : A tabular analyzer and a semantic, compact and reversible converter for multidimensional and tabular data - `nxarray `_: NeXus input/output capability for xarray. - `xarray-compare `_: xarray extension for data comparison. - `xarray-dataclasses `_: xarray extension for typed DataArray and Dataset creation. - `xarray_einstats `_: Statistics, linear algebra and einops for xarray - `xarray_extras `_: Advanced algorithms for xarray objects (e.g. integrations/interpolations). - `xeofs `_: PCA/EOF analysis and related techniques, integrated with xarray and Dask for efficient handling of large-scale data. - `xpublish `_: Publish Xarray Datasets via a Zarr compatible REST API. - `xrft `_: Fourier transforms for xarray data. - `xr-scipy `_: A lightweight scipy wrapper for xarray. - `X-regression `_: Multiple linear regression from Statsmodels library coupled with Xarray library. - `xskillscore `_: Metrics for verifying forecasts. - `xyzpy `_: Easily generate high dimensional data, including parallelization. - `xarray-lmfit `_: xarray extension for curve fitting using `lmfit `_. Visualization ~~~~~~~~~~~~~ - `datashader `_, `geoviews `_, `holoviews `_, : visualization packages for large data. - `hvplot `_ : A high-level plotting API for the PyData ecosystem built on HoloViews. - `psyplot `_: Interactive data visualization with python. - `xarray-leaflet `_: An xarray extension for tiled map plotting based on ipyleaflet. - `xtrude `_: An xarray extension for 3D terrain visualization based on pydeck. - `pyvista-xarray `_: xarray DataArray accessor for 3D visualization with `PyVista `_ and DataSet engines for reading VTK data formats. Non-Python projects ~~~~~~~~~~~~~~~~~~~ - `xframe `_: C++ data structures inspired by xarray. - `AxisArrays `_, `NamedArrays `_ and `YAXArrays.jl `_: similar data structures for Julia. More projects can be found at the `"xarray" Github topic `_. xarray-2025.09.0/doc/user-guide/groupby.rst000066400000000000000000000251331505620616400204070ustar00rootroot00000000000000.. currentmodule:: xarray .. _groupby: GroupBy: Group and Bin Data --------------------------- Often we want to bin or group data, produce statistics (mean, variance) on the groups, and then return a reduced data set. To do this, Xarray supports `"group by"`__ operations with the same API as pandas to implement the `split-apply-combine`__ strategy: __ https://pandas.pydata.org/pandas-docs/stable/groupby.html __ https://www.jstatsoft.org/v40/i01/paper - Split your data into multiple independent groups. - Apply some function to each group. - Combine your groups back into a single data object. Group by operations work on both :py:class:`Dataset` and :py:class:`DataArray` objects. Most of the examples focus on grouping by a single one-dimensional variable, although support for grouping over a multi-dimensional variable has recently been implemented. Note that for one-dimensional data, it is usually faster to rely on pandas' implementation of the same pipeline. .. tip:: `Install the flox package `_ to substantially improve the performance of GroupBy operations, particularly with dask. flox `extends Xarray's in-built GroupBy capabilities `_ by allowing grouping by multiple variables, and lazy grouping by dask arrays. If installed, Xarray will automatically use flox by default. Split ~~~~~ Let's create a simple example dataset: .. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) .. jupyter-execute:: ds = xr.Dataset( {"foo": (("x", "y"), np.random.rand(4, 3))}, coords={"x": [10, 20, 30, 40], "letters": ("x", list("abba"))}, ) arr = ds["foo"] ds If we groupby the name of a variable or coordinate in a dataset (we can also use a DataArray directly), we get back a ``GroupBy`` object: .. jupyter-execute:: ds.groupby("letters") This object works very similarly to a pandas GroupBy object. You can view the group indices with the ``groups`` attribute: .. jupyter-execute:: ds.groupby("letters").groups You can also iterate over groups in ``(label, group)`` pairs: .. jupyter-execute:: list(ds.groupby("letters")) You can index out a particular group: .. jupyter-execute:: ds.groupby("letters")["b"] To group by multiple variables, see :ref:`this section `. Binning ~~~~~~~ Sometimes you don't want to use all the unique values to determine the groups but instead want to "bin" the data into coarser groups. You could always create a customized coordinate, but xarray facilitates this via the :py:meth:`Dataset.groupby_bins` method. .. jupyter-execute:: x_bins = [0, 25, 50] ds.groupby_bins("x", x_bins).groups The binning is implemented via :func:`pandas.cut`, whose documentation details how the bins are assigned. As seen in the example above, by default, the bins are labeled with strings using set notation to precisely identify the bin limits. To override this behavior, you can specify the bin labels explicitly. Here we choose ``float`` labels which identify the bin centers: .. jupyter-execute:: x_bin_labels = [12.5, 37.5] ds.groupby_bins("x", x_bins, labels=x_bin_labels).groups Apply ~~~~~ To apply a function to each group, you can use the flexible :py:meth:`core.groupby.DatasetGroupBy.map` method. The resulting objects are automatically concatenated back together along the group axis: .. jupyter-execute:: def standardize(x): return (x - x.mean()) / x.std() arr.groupby("letters").map(standardize) GroupBy objects also have a :py:meth:`core.groupby.DatasetGroupBy.reduce` method and methods like :py:meth:`core.groupby.DatasetGroupBy.mean` as shortcuts for applying an aggregation function: .. jupyter-execute:: arr.groupby("letters").mean(dim="x") Using a groupby is thus also a convenient shortcut for aggregating over all dimensions *other than* the provided one: .. jupyter-execute:: ds.groupby("x").std(...) .. note:: We use an ellipsis (`...`) here to indicate we want to reduce over all other dimensions First and last ~~~~~~~~~~~~~~ There are two special aggregation operations that are currently only found on groupby objects: first and last. These provide the first or last example of values for group along the grouped dimension: .. jupyter-execute:: ds.groupby("letters").first(...) By default, they skip missing values (control this with ``skipna``). Grouped arithmetic ~~~~~~~~~~~~~~~~~~ GroupBy objects also support a limited set of binary arithmetic operations, as a shortcut for mapping over all unique labels. Binary arithmetic is supported for ``(GroupBy, Dataset)`` and ``(GroupBy, DataArray)`` pairs, as long as the dataset or data array uses the unique grouped values as one of its index coordinates. For example: .. jupyter-execute:: alt = arr.groupby("letters").mean(...) alt .. jupyter-execute:: ds.groupby("letters") - alt This last line is roughly equivalent to the following:: results = [] for label, group in ds.groupby('letters'): results.append(group - alt.sel(letters=label)) xr.concat(results, dim='x') .. _groupby.multidim: Multidimensional Grouping ~~~~~~~~~~~~~~~~~~~~~~~~~ Many datasets have a multidimensional coordinate variable (e.g. longitude) which is different from the logical grid dimensions (e.g. nx, ny). Such variables are valid under the `CF conventions`__. Xarray supports groupby operations over multidimensional coordinate variables: __ https://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#_two_dimensional_latitude_longitude_coordinate_variables .. jupyter-execute:: da = xr.DataArray( [[0, 1], [2, 3]], coords={ "lon": (["ny", "nx"], [[30, 40], [40, 50]]), "lat": (["ny", "nx"], [[10, 10], [20, 20]]), }, dims=["ny", "nx"], ) da .. jupyter-execute:: da.groupby("lon").sum(...) .. jupyter-execute:: da.groupby("lon").map(lambda x: x - x.mean(), shortcut=False) Because multidimensional groups have the ability to generate a very large number of bins, coarse-binning via :py:meth:`Dataset.groupby_bins` may be desirable: .. jupyter-execute:: da.groupby_bins("lon", [0, 45, 50]).sum() These methods group by ``lon`` values. It is also possible to groupby each cell in a grid, regardless of value, by stacking multiple dimensions, applying your function, and then unstacking the result: .. jupyter-execute:: stacked = da.stack(gridcell=["ny", "nx"]) stacked.groupby("gridcell").sum(...).unstack("gridcell") Alternatively, you can groupby both ``lat`` and ``lon`` at the :ref:`same time `. .. _groupby.groupers: Grouper Objects ~~~~~~~~~~~~~~~ Both ``groupby_bins`` and ``resample`` are specializations of the core ``groupby`` operation for binning, and time resampling. Many problems demand more complex GroupBy application: for example, grouping by multiple variables with a combination of categorical grouping, binning, and resampling; or more specializations like spatial resampling; or more complex time grouping like special handling of seasons, or the ability to specify custom seasons. To handle these use-cases and more, Xarray is evolving to providing an extension point using ``Grouper`` objects. .. tip:: See the `grouper design`_ doc for more detail on the motivation and design ideas behind Grouper objects. .. _grouper design: https://github.com/pydata/xarray/blob/main/design_notes/grouper_objects.md For now Xarray provides three specialized Grouper objects: 1. :py:class:`groupers.UniqueGrouper` for categorical grouping 2. :py:class:`groupers.BinGrouper` for binned grouping 3. :py:class:`groupers.TimeResampler` for resampling along a datetime coordinate These provide functionality identical to the existing ``groupby``, ``groupby_bins``, and ``resample`` methods. That is, .. code-block:: python ds.groupby("x") is identical to .. code-block:: python from xarray.groupers import UniqueGrouper ds.groupby(x=UniqueGrouper()) Similarly, .. code-block:: python ds.groupby_bins("x", bins=bins) is identical to .. code-block:: python from xarray.groupers import BinGrouper ds.groupby(x=BinGrouper(bins)) and .. code-block:: python ds.resample(time="ME") is identical to .. code-block:: python from xarray.groupers import TimeResampler ds.resample(time=TimeResampler("ME")) The :py:class:`groupers.UniqueGrouper` accepts an optional ``labels`` kwarg that is not present in :py:meth:`DataArray.groupby` or :py:meth:`Dataset.groupby`. Specifying ``labels`` is required when grouping by a lazy array type (e.g. dask or cubed). The ``labels`` are used to construct the output coordinate (say for a reduction), and aggregations will only be run over the specified labels. You may use ``labels`` to also specify the ordering of groups to be used during iteration. The order will be preserved in the output. .. _groupby.multiple: Grouping by multiple variables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Use grouper objects to group by multiple dimensions: .. jupyter-execute:: from xarray.groupers import UniqueGrouper da.groupby(["lat", "lon"]).sum() The above is sugar for using ``UniqueGrouper`` objects directly: .. jupyter-execute:: da.groupby(lat=UniqueGrouper(), lon=UniqueGrouper()).sum() Different groupers can be combined to construct sophisticated GroupBy operations. .. jupyter-execute:: from xarray.groupers import BinGrouper ds.groupby(x=BinGrouper(bins=[5, 15, 25]), letters=UniqueGrouper()).sum() Time Grouping and Resampling ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. seealso:: See :ref:`resampling`. Shuffling ~~~~~~~~~ Shuffling is a generalization of sorting a DataArray or Dataset by another DataArray, named ``label`` for example, that follows from the idea of grouping by ``label``. Shuffling reorders the DataArray or the DataArrays in a Dataset such that all members of a group occur sequentially. For example, Shuffle the object using either :py:class:`DatasetGroupBy` or :py:class:`DataArrayGroupBy` as appropriate. .. jupyter-execute:: da = xr.DataArray( dims="x", data=[1, 2, 3, 4, 5, 6], coords={"label": ("x", "a b c a b c".split(" "))}, ) da.groupby("label").shuffle_to_chunks() For chunked array types (e.g. dask or cubed), shuffle may result in a more optimized communication pattern when compared to direct indexing by the appropriate indexer. Shuffling also makes GroupBy operations on chunked arrays an embarrassingly parallel problem, and may significantly improve workloads that use :py:meth:`DatasetGroupBy.map` or :py:meth:`DataArrayGroupBy.map`. xarray-2025.09.0/doc/user-guide/hierarchical-data.rst000066400000000000000000000760431505620616400222530ustar00rootroot00000000000000.. _userguide.hierarchical-data: Hierarchical data ================= .. jupyter-execute:: :hide-code: :hide-output: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) np.set_printoptions(threshold=10) %xmode minimal .. _why: Why Hierarchical Data? ---------------------- Many real-world datasets are composed of multiple differing components, and it can often be useful to think of these in terms of a hierarchy of related groups of data. Examples of data which one might want organise in a grouped or hierarchical manner include: - Simulation data at multiple resolutions, - Observational data about the same system but from multiple different types of sensors, - Mixed experimental and theoretical data, - A systematic study recording the same experiment but with different parameters, - Heterogeneous data, such as demographic and metereological data, or even any combination of the above. Often datasets like this cannot easily fit into a single :py:class:`~xarray.Dataset` object, or are more usefully thought of as groups of related :py:class:`~xarray.Dataset` objects. For this purpose we provide the :py:class:`xarray.DataTree` class. This page explains in detail how to understand and use the different features of the :py:class:`~xarray.DataTree` class for your own hierarchical data needs. .. _node relationships: Node Relationships ------------------ .. _creating a family tree: Creating a Family Tree ~~~~~~~~~~~~~~~~~~~~~~ The three main ways of creating a :py:class:`~xarray.DataTree` object are described briefly in :ref:`creating a datatree`. Here we go into more detail about how to create a tree node-by-node, using a famous family tree from the Simpsons cartoon as an example. Let's start by defining nodes representing the two siblings, Bart and Lisa Simpson: .. jupyter-execute:: bart = xr.DataTree(name="Bart") lisa = xr.DataTree(name="Lisa") Each of these node objects knows their own :py:class:`~xarray.DataTree.name`, but they currently have no relationship to one another. We can connect them by creating another node representing a common parent, Homer Simpson: .. jupyter-execute:: homer = xr.DataTree(name="Homer", children={"Bart": bart, "Lisa": lisa}) Here we set the children of Homer in the node's constructor. We now have a small family tree where we can see how these individual Simpson family members are related to one another: .. jupyter-execute:: print(homer) .. note:: We use ``print()`` above to show the compact tree hierarchy. :py:class:`~xarray.DataTree` objects also have an interactive HTML representation that is enabled by default in editors such as JupyterLab and VSCode. The HTML representation is especially helpful for larger trees and exploring new datasets, as it allows you to expand and collapse nodes. If you prefer the text representations you can also set ``xr.set_options(display_style="text")``. .. Comment:: may remove note and print()s after upstream theme changes https://github.com/pydata/pydata-sphinx-theme/pull/2187 The nodes representing Bart and Lisa are now connected - we can confirm their sibling rivalry by examining the :py:class:`~xarray.DataTree.siblings` property: .. jupyter-execute:: list(homer["Bart"].siblings) But oops, we forgot Homer's third daughter, Maggie! Let's add her by updating Homer's :py:class:`~xarray.DataTree.children` property to include her: .. jupyter-execute:: maggie = xr.DataTree(name="Maggie") homer.children = {"Bart": bart, "Lisa": lisa, "Maggie": maggie} print(homer) Let's check that Maggie knows who her Dad is: .. jupyter-execute:: maggie.parent.name That's good - updating the properties of our nodes does not break the internal consistency of our tree, as changes of parentage are automatically reflected on both nodes. These children obviously have another parent, Marge Simpson, but :py:class:`~xarray.DataTree` nodes can only have a maximum of one parent. Genealogical `family trees are not even technically trees `_ in the mathematical sense - the fact that distant relatives can mate makes them directed acyclic graphs. Trees of :py:class:`~xarray.DataTree` objects cannot represent this. Homer is currently listed as having no parent (the so-called "root node" of this tree), but we can update his :py:class:`~xarray.DataTree.parent` property: .. jupyter-execute:: abe = xr.DataTree(name="Abe") abe.children = {"Homer": homer} Abe is now the "root" of this tree, which we can see by examining the :py:class:`~xarray.DataTree.root` property of any node in the tree .. jupyter-execute:: maggie.root.name We can see the whole tree by printing Abe's node or just part of the tree by printing Homer's node: .. jupyter-execute:: print(abe) .. jupyter-execute:: print(abe["Homer"]) In episode 28, Abe Simpson reveals that he had another son, Herbert "Herb" Simpson. We can add Herbert to the family tree without displacing Homer by :py:meth:`~xarray.DataTree.assign`-ing another child to Abe: .. jupyter-execute:: herbert = xr.DataTree(name="Herb") abe = abe.assign({"Herbert": herbert}) print(abe) .. jupyter-execute:: print(abe["Herbert"].name) print(herbert.name) .. note:: This example shows a subtlety - the returned tree has Homer's brother listed as ``"Herbert"``, but the original node was named "Herb". Not only are names overridden when stored as keys like this, but the new node is a copy, so that the original node that was referenced is unchanged (i.e. ``herbert.name == "Herb"`` still). In other words, nodes are copied into trees, not inserted into them. This is intentional, and mirrors the behaviour when storing named :py:class:`~xarray.DataArray` objects inside datasets. Certain manipulations of our tree are forbidden, if they would create an inconsistent result. In episode 51 of the show Futurama, Philip J. Fry travels back in time and accidentally becomes his own Grandfather. If we try similar time-travelling hijinks with Homer, we get a :py:class:`~xarray.InvalidTreeError` raised: .. jupyter-execute:: :raises: abe["Homer"].children = {"Abe": abe} .. _evolutionary tree: Ancestry in an Evolutionary Tree ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Let's use a different example of a tree to discuss more complex relationships between nodes - the phylogenetic tree, or tree of life. .. jupyter-execute:: vertebrates = xr.DataTree.from_dict( { "/Sharks": None, "/Bony Skeleton/Ray-finned Fish": None, "/Bony Skeleton/Four Limbs/Amphibians": None, "/Bony Skeleton/Four Limbs/Amniotic Egg/Hair/Primates": None, "/Bony Skeleton/Four Limbs/Amniotic Egg/Hair/Rodents & Rabbits": None, "/Bony Skeleton/Four Limbs/Amniotic Egg/Two Fenestrae/Dinosaurs": None, "/Bony Skeleton/Four Limbs/Amniotic Egg/Two Fenestrae/Birds": None, }, name="Vertebrae", ) primates = vertebrates["/Bony Skeleton/Four Limbs/Amniotic Egg/Hair/Primates"] dinosaurs = vertebrates[ "/Bony Skeleton/Four Limbs/Amniotic Egg/Two Fenestrae/Dinosaurs" ] We have used the :py:meth:`~xarray.DataTree.from_dict` constructor method as a preferred way to quickly create a whole tree, and :ref:`filesystem paths` (to be explained shortly) to select two nodes of interest. .. jupyter-execute:: print(vertebrates) This tree shows various families of species, grouped by their common features (making it technically a `"Cladogram" `_, rather than an evolutionary tree). Here both the species and the features used to group them are represented by :py:class:`~xarray.DataTree` node objects - there is no distinction in types of node. We can however get a list of only the nodes we used to represent species by using the fact that all those nodes have no children - they are "leaf nodes". We can check if a node is a leaf with :py:meth:`~xarray.DataTree.is_leaf`, and get a list of all leaves with the :py:class:`~xarray.DataTree.leaves` property: .. jupyter-execute:: print(primates.is_leaf) [node.name for node in vertebrates.leaves] Pretending that this is a true evolutionary tree for a moment, we can find the features of the evolutionary ancestors (so-called "ancestor" nodes), the distinguishing feature of the common ancestor of all vertebrate life (the root node), and even the distinguishing feature of the common ancestor of any two species (the common ancestor of two nodes): .. jupyter-execute:: print([node.name for node in reversed(primates.parents)]) print(primates.root.name) print(primates.find_common_ancestor(dinosaurs).name) We can only find a common ancestor between two nodes that lie in the same tree. If we try to find the common evolutionary ancestor between primates and an Alien species that has no relationship to Earth's evolutionary tree, an error will be raised. .. jupyter-execute:: :raises: alien = xr.DataTree(name="Xenomorph") primates.find_common_ancestor(alien) .. _navigating trees: Navigating Trees ---------------- There are various ways to access the different nodes in a tree. Properties ~~~~~~~~~~ We can navigate trees using the :py:class:`~xarray.DataTree.parent` and :py:class:`~xarray.DataTree.children` properties of each node, for example: .. jupyter-execute:: lisa.parent.children["Bart"].name but there are also more convenient ways to access nodes. Dictionary-like interface ~~~~~~~~~~~~~~~~~~~~~~~~~ Children are stored on each node as a key-value mapping from name to child node. They can be accessed and altered via the :py:class:`~xarray.DataTree.__getitem__` and :py:class:`~xarray.DataTree.__setitem__` syntax. In general :py:class:`~xarray.DataTree.DataTree` objects support almost the entire set of dict-like methods, including :py:meth:`~xarray.DataTree.keys`, :py:class:`~xarray.DataTree.values`, :py:class:`~xarray.DataTree.items`, :py:meth:`~xarray.DataTree.__delitem__` and :py:meth:`~xarray.DataTree.update`. .. jupyter-execute:: print(vertebrates["Bony Skeleton"]["Ray-finned Fish"]) Note that the dict-like interface combines access to child :py:class:`~xarray.DataTree` nodes and stored :py:class:`~xarray.DataArrays`, so if we have a node that contains both children and data, calling :py:meth:`~xarray.DataTree.keys` will list both names of child nodes and names of data variables: .. jupyter-execute:: dt = xr.DataTree( dataset=xr.Dataset({"foo": 0, "bar": 1}), children={"a": xr.DataTree(), "b": xr.DataTree()}, ) print(dt) list(dt.keys()) This also means that the names of variables and of child nodes must be different to one another. Attribute-like access ~~~~~~~~~~~~~~~~~~~~~ You can also select both variables and child nodes through dot indexing .. jupyter-execute:: print(dt.foo) print(dt.a) .. _filesystem paths: Filesystem-like Paths ~~~~~~~~~~~~~~~~~~~~~ Hierarchical trees can be thought of as analogous to file systems. Each node is like a directory, and each directory can contain both more sub-directories and data. .. note:: Future development will allow you to make the filesystem analogy concrete by using :py:func:`~xarray.DataTree.open_mfdatatree` or :py:func:`~xarray.DataTree.save_mfdatatree`. (`See related issue in GitHub `_) Datatree objects support a syntax inspired by unix-like filesystems, where the "path" to a node is specified by the keys of each intermediate node in sequence, separated by forward slashes. This is an extension of the conventional dictionary ``__getitem__`` syntax to allow navigation across multiple levels of the tree. Like with filepaths, paths within the tree can either be relative to the current node, e.g. .. jupyter-execute:: print(abe["Homer/Bart"].name) print(abe["./Homer/Bart"].name) # alternative syntax or relative to the root node. A path specified from the root (as opposed to being specified relative to an arbitrary node in the tree) is sometimes also referred to as a `"fully qualified name" `_, or as an "absolute path". The root node is referred to by ``"/"``, so the path from the root node to its grand-child would be ``"/child/grandchild"``, e.g. .. jupyter-execute:: # access lisa's sibling by a relative path. print(lisa["../Bart"]) # or from absolute path print(lisa["/Homer/Bart"]) Relative paths between nodes also support the ``"../"`` syntax to mean the parent of the current node. We can use this with ``__setitem__`` to add a missing entry to our evolutionary tree, but add it relative to a more familiar node of interest: .. jupyter-execute:: primates["../../Two Fenestrae/Crocodiles"] = xr.DataTree() print(vertebrates) Given two nodes in a tree, we can also find their relative path: .. jupyter-execute:: bart.relative_to(lisa) You can use this filepath feature to build a nested tree from a dictionary of filesystem-like paths and corresponding :py:class:`~xarray.Dataset` objects in a single step. If we have a dictionary where each key is a valid path, and each value is either valid data or ``None``, we can construct a complex tree quickly using the alternative constructor :py:meth:`~xarray.DataTree.from_dict()`: .. jupyter-execute:: d = { "/": xr.Dataset({"foo": "orange"}), "/a": xr.Dataset({"bar": 0}, coords={"y": ("y", [0, 1, 2])}), "/a/b": xr.Dataset({"zed": np.nan}), "a/c/d": None, } dt = xr.DataTree.from_dict(d) print(dt) .. note:: Notice that using the path-like syntax will also create any intermediate empty nodes necessary to reach the end of the specified path (i.e. the node labelled ``"/a/c"`` in this case.) This is to help avoid lots of redundant entries when creating deeply-nested trees using :py:meth:`xarray.DataTree.from_dict`. .. _iterating over trees: Iterating over trees ~~~~~~~~~~~~~~~~~~~~ You can iterate over every node in a tree using the subtree :py:class:`~xarray.DataTree.subtree` property. This returns an iterable of nodes, which yields them in depth-first order. .. jupyter-execute:: for node in vertebrates.subtree: print(node.path) Similarly, :py:class:`~xarray.DataTree.subtree_with_keys` returns an iterable of relative paths and corresponding nodes. A very useful pattern is to iterate over :py:class:`~xarray.DataTree.subtree_with_keys` to manipulate nodes however you wish, then rebuild a new tree using :py:meth:`xarray.DataTree.from_dict()`. For example, we could keep only the nodes containing data by looping over all nodes, checking if they contain any data using :py:class:`~xarray.DataTree.has_data`, then rebuilding a new tree using only the paths of those nodes: .. jupyter-execute:: non_empty_nodes = { path: node.dataset for path, node in dt.subtree_with_keys if node.has_data } print(xr.DataTree.from_dict(non_empty_nodes)) You can see this tree is similar to the ``dt`` object above, except that it is missing the empty nodes ``a/c`` and ``a/c/d``. (If you want to keep the name of the root node, you will need to add the ``name`` kwarg to :py:class:`~xarray.DataTree.from_dict`, i.e. ``DataTree.from_dict(non_empty_nodes, name=dt.name)``.) .. _manipulating trees: Manipulating Trees ------------------ Subsetting Tree Nodes ~~~~~~~~~~~~~~~~~~~~~ We can subset our tree to select only nodes of interest in various ways. Similarly to on a real filesystem, matching nodes by common patterns in their paths is often useful. We can use :py:meth:`xarray.DataTree.match` for this: .. jupyter-execute:: dt = xr.DataTree.from_dict( { "/a/A": None, "/a/B": None, "/b/A": None, "/b/B": None, } ) result = dt.match("*/B") print(result) We can also subset trees by the contents of the nodes. :py:meth:`xarray.DataTree.filter` retains only the nodes of a tree that meet a certain condition. For example, we could recreate the Simpson's family tree with the ages of each individual, then filter for only the adults: First lets recreate the tree but with an ``age`` data variable in every node: .. jupyter-execute:: simpsons = xr.DataTree.from_dict( { "/": xr.Dataset({"age": 83}), "/Herbert": xr.Dataset({"age": 40}), "/Homer": xr.Dataset({"age": 39}), "/Homer/Bart": xr.Dataset({"age": 10}), "/Homer/Lisa": xr.Dataset({"age": 8}), "/Homer/Maggie": xr.Dataset({"age": 1}), }, name="Abe", ) print(simpsons) Now let's filter out the minors: .. jupyter-execute:: print(simpsons.filter(lambda node: node["age"] > 18)) The result is a new tree, containing only the nodes matching the condition. (Yes, under the hood :py:meth:`~xarray.DataTree.filter` is just syntactic sugar for the pattern we showed you in :ref:`iterating over trees` !) If you want to filter out empty nodes you can use :py:meth:`~xarray.DataTree.prune`. .. _Tree Contents: Tree Contents ------------- Hollow Trees ~~~~~~~~~~~~ A concept that can sometimes be useful is that of a "Hollow Tree", which means a tree with data stored only at the leaf nodes. This is useful because certain useful tree manipulation operations only make sense for hollow trees. You can check if a tree is a hollow tree by using the :py:class:`~xarray.DataTree.is_hollow` property. We can see that the Simpson's family is not hollow because the data variable ``"age"`` is present at some nodes which have children (i.e. Abe and Homer). .. jupyter-execute:: simpsons.is_hollow .. _tree computation: Computation ----------- :py:class:`~xarray.DataTree` objects are also useful for performing computations, not just for organizing data. Operations and Methods on Trees ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To show how applying operations across a whole tree at once can be useful, let's first create a example scientific dataset. .. jupyter-execute:: def time_stamps(n_samples, T): """Create an array of evenly-spaced time stamps""" return xr.DataArray( data=np.linspace(0, 2 * np.pi * T, n_samples), dims=["time"] ) def signal_generator(t, f, A, phase): """Generate an example electrical-like waveform""" return A * np.sin(f * t.data + phase) time_stamps1 = time_stamps(n_samples=15, T=1.5) time_stamps2 = time_stamps(n_samples=10, T=1.0) voltages = xr.DataTree.from_dict( { "/oscilloscope1": xr.Dataset( { "potential": ( "time", signal_generator(time_stamps1, f=2, A=1.2, phase=0.5), ), "current": ( "time", signal_generator(time_stamps1, f=2, A=1.2, phase=1), ), }, coords={"time": time_stamps1}, ), "/oscilloscope2": xr.Dataset( { "potential": ( "time", signal_generator(time_stamps2, f=1.6, A=1.6, phase=0.2), ), "current": ( "time", signal_generator(time_stamps2, f=1.6, A=1.6, phase=0.7), ), }, coords={"time": time_stamps2}, ), } ) print(voltages) Most xarray computation methods also exist as methods on datatree objects, so you can for example take the mean value of these two timeseries at once: .. jupyter-execute:: print(voltages.mean(dim="time")) This works by mapping the standard :py:meth:`xarray.Dataset.mean()` method over the dataset stored in each node of the tree one-by-one. The arguments passed to the method are used for every node, so the values of the arguments you pass might be valid for one node and invalid for another .. jupyter-execute:: :raises: voltages.isel(time=12) Notice that the error raised helpfully indicates which node of the tree the operation failed on. Arithmetic Methods on Trees ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Arithmetic methods are also implemented, so you can e.g. add a scalar to every dataset in the tree at once. For example, we can advance the timeline of the Simpsons by a decade just by .. jupyter-execute:: print(simpsons + 10) See that the same change (fast-forwarding by adding 10 years to the age of each character) has been applied to every node. Mapping Custom Functions Over Trees ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can map custom computation over each node in a tree using :py:meth:`xarray.DataTree.map_over_datasets`. You can map any function, so long as it takes :py:class:`xarray.Dataset` objects as one (or more) of the input arguments, and returns one (or more) xarray datasets. .. note:: Functions passed to :py:func:`~xarray.DataTree.map_over_datasets` cannot alter nodes in-place. Instead they must return new :py:class:`xarray.Dataset` objects. For example, we can define a function to calculate the Root Mean Square of a timeseries .. jupyter-execute:: def rms(signal): return np.sqrt(np.mean(signal**2)) Then calculate the RMS value of these signals: .. jupyter-execute:: print(voltages.map_over_datasets(rms)) .. _multiple trees: We can also use :py:func:`~xarray.map_over_datasets` to apply a function over the data in multiple trees, by passing the trees as positional arguments. Operating on Multiple Trees --------------------------- The examples so far have involved mapping functions or methods over the nodes of a single tree, but we can generalize this to mapping functions over multiple trees at once. Iterating Over Multiple Trees ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To iterate over the corresponding nodes in multiple trees, use :py:func:`~xarray.group_subtrees` instead of :py:class:`~xarray.DataTree.subtree_with_keys`. This combines well with :py:meth:`xarray.DataTree.from_dict()` to build a new tree: .. jupyter-execute:: dt1 = xr.DataTree.from_dict({"a": xr.Dataset({"x": 1}), "b": xr.Dataset({"x": 2})}) dt2 = xr.DataTree.from_dict( {"a": xr.Dataset({"x": 10}), "b": xr.Dataset({"x": 20})} ) result = {} for path, (node1, node2) in xr.group_subtrees(dt1, dt2): result[path] = node1.dataset + node2.dataset dt3 = xr.DataTree.from_dict(result) print(dt3) Alternatively, you apply a function directly to paired datasets at every node using :py:func:`xarray.map_over_datasets`: .. jupyter-execute:: dt3 = xr.map_over_datasets(lambda x, y: x + y, dt1, dt2) print(dt3) Comparing Trees for Isomorphism ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For it to make sense to map a single non-unary function over the nodes of multiple trees at once, each tree needs to have the same structure. Specifically two trees can only be considered similar, or "isomorphic", if the full paths to all of their descendent nodes are the same. Applying :py:func:`~xarray.group_subtrees` to trees with different structures raises :py:class:`~xarray.TreeIsomorphismError`: .. jupyter-execute:: :raises: tree = xr.DataTree.from_dict({"a": None, "a/b": None, "a/c": None}) simple_tree = xr.DataTree.from_dict({"a": None}) for _ in xr.group_subtrees(tree, simple_tree): ... We can explicitly also check if any two trees are isomorphic using the :py:meth:`~xarray.DataTree.isomorphic` method: .. jupyter-execute:: tree.isomorphic(simple_tree) Corresponding tree nodes do not need to have the same data in order to be considered isomorphic: .. jupyter-execute:: tree_with_data = xr.DataTree.from_dict({"a": xr.Dataset({"foo": 1})}) simple_tree.isomorphic(tree_with_data) They also do not need to define child nodes in the same order: .. jupyter-execute:: reordered_tree = xr.DataTree.from_dict({"a": None, "a/c": None, "a/b": None}) tree.isomorphic(reordered_tree) Arithmetic Between Multiple Trees ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Arithmetic operations like multiplication are binary operations, so as long as we have two isomorphic trees, we can do arithmetic between them. .. jupyter-execute:: currents = xr.DataTree.from_dict( { "/oscilloscope1": xr.Dataset( { "current": ( "time", signal_generator(time_stamps1, f=2, A=1.2, phase=1), ), }, coords={"time": time_stamps1}, ), "/oscilloscope2": xr.Dataset( { "current": ( "time", signal_generator(time_stamps2, f=1.6, A=1.6, phase=0.7), ), }, coords={"time": time_stamps2}, ), } ) print(currents) .. jupyter-execute:: currents.isomorphic(voltages) We could use this feature to quickly calculate the electrical power in our signal, P=IV. .. jupyter-execute:: power = currents * voltages print(power) .. _hierarchical-data.alignment-and-coordinate-inheritance: Alignment and Coordinate Inheritance ------------------------------------ .. _data-alignment: Data Alignment ~~~~~~~~~~~~~~ The data in different datatree nodes are not totally independent. In particular dimensions (and indexes) in child nodes must be exactly aligned with those in their parent nodes. Exact alignment means that shared dimensions must be the same length, and indexes along those dimensions must be equal. .. note:: If you were a previous user of the prototype `xarray-contrib/datatree `_ package, this is different from what you're used to! In that package the data model was that the data stored in each node actually was completely unrelated. The data model is now slightly stricter. This allows us to provide features like :ref:`coordinate-inheritance`. To demonstrate, let's first generate some example datasets which are not aligned with one another: .. jupyter-execute:: # (drop the attributes just to make the printed representation shorter) ds = xr.tutorial.open_dataset("air_temperature").drop_attrs() ds_daily = ds.resample(time="D").mean("time") ds_weekly = ds.resample(time="W").mean("time") ds_monthly = ds.resample(time="ME").mean("time") These datasets have different lengths along the ``time`` dimension, and are therefore not aligned along that dimension. .. jupyter-execute:: print(ds_daily.sizes) print(ds_weekly.sizes) print(ds_monthly.sizes) We cannot store these non-alignable variables on a single :py:class:`~xarray.Dataset` object, because they do not exactly align: .. jupyter-execute:: :raises: xr.align(ds_daily, ds_weekly, ds_monthly, join="exact") But we :ref:`previously said ` that multi-resolution data is a good use case for :py:class:`~xarray.DataTree`, so surely we should be able to store these in a single :py:class:`~xarray.DataTree`? If we first try to create a :py:class:`~xarray.DataTree` with these different-length time dimensions present in both parents and children, we will still get an alignment error: .. jupyter-execute:: :raises: xr.DataTree.from_dict({"daily": ds_daily, "daily/weekly": ds_weekly}) This is because DataTree checks that data in child nodes align exactly with their parents. .. note:: This requirement of aligned dimensions is similar to netCDF's concept of `inherited dimensions `_, as in netCDF-4 files dimensions are `visible to all child groups `_. This alignment check is performed up through the tree, all the way to the root, and so is therefore equivalent to requiring that this :py:func:`~xarray.align` command succeeds: .. code:: python xr.align(child.dataset, *(parent.dataset for parent in child.parents), join="exact") To represent our unalignable data in a single :py:class:`~xarray.DataTree`, we must instead place all variables which are a function of these different-length dimensions into nodes that are not direct descendents of one another, e.g. organize them as siblings. .. jupyter-execute:: dt = xr.DataTree.from_dict( {"daily": ds_daily, "weekly": ds_weekly, "monthly": ds_monthly} ) print(dt) Now we have a valid :py:class:`~xarray.DataTree` structure which contains all the data at each different time frequency, stored in a separate group. This is a useful way to organise our data because we can still operate on all the groups at once. For example we can extract all three timeseries at a specific lat-lon location: .. jupyter-execute:: dt_sel = dt.sel(lat=75, lon=300) print(dt_sel) or compute the standard deviation of each timeseries to find out how it varies with sampling frequency: .. jupyter-execute:: dt_std = dt.std(dim="time") print(dt_std) .. _coordinate-inheritance: Coordinate Inheritance ~~~~~~~~~~~~~~~~~~~~~~ Notice that in the trees we constructed above there is some redundancy - the ``lat`` and ``lon`` variables appear in each sibling group, but are identical across the groups. .. jupyter-execute:: dt We can use "Coordinate Inheritance" to define them only once in a parent group and remove this redundancy, whilst still being able to access those coordinate variables from the child groups. .. note:: This is also a new feature relative to the prototype `xarray-contrib/datatree `_ package. Let's instead place only the time-dependent variables in the child groups, and put the non-time-dependent ``lat`` and ``lon`` variables in the parent (root) group: .. jupyter-execute:: dt = xr.DataTree.from_dict( { "/": ds.drop_dims("time"), "daily": ds_daily.drop_vars(["lat", "lon"]), "weekly": ds_weekly.drop_vars(["lat", "lon"]), "monthly": ds_monthly.drop_vars(["lat", "lon"]), } ) dt This is preferred to the previous representation because it now makes it clear that all of these datasets share common spatial grid coordinates. Defining the common coordinates just once also ensures that the spatial coordinates for each group cannot become out of sync with one another during operations. We can still access the coordinates defined in the parent groups from any of the child groups as if they were actually present on the child groups: .. jupyter-execute:: dt.daily.coords .. jupyter-execute:: dt["daily/lat"] As we can still access them, we say that the ``lat`` and ``lon`` coordinates in the child groups have been "inherited" from their common parent group. If we print just one of the child nodes, it will still display inherited coordinates, but explicitly mark them as such: .. jupyter-execute:: dt["/daily"] This helps to differentiate which variables are defined on the datatree node that you are currently looking at, and which were defined somewhere above it. We can also still perform all the same operations on the whole tree: .. jupyter-execute:: dt.sel(lat=[75], lon=[300]) .. jupyter-execute:: dt.std(dim="time") xarray-2025.09.0/doc/user-guide/index.rst000066400000000000000000000015441505620616400200270ustar00rootroot00000000000000########### User Guide ########### In this user guide, you will find detailed descriptions and examples that describe many common tasks that you can accomplish with Xarray. .. toctree:: :maxdepth: 2 :caption: Data model terminology data-structures hierarchical-data dask .. toctree:: :maxdepth: 2 :caption: Core operations indexing combining reshaping computation groupby interpolation .. toctree:: :maxdepth: 2 :caption: I/O io complex-numbers .. toctree:: :maxdepth: 2 :caption: Visualization plotting .. toctree:: :maxdepth: 2 :caption: Interoperability pandas duckarrays ecosystem .. toctree:: :maxdepth: 2 :caption: Domain-specific workflows time-series weather-climate .. toctree:: :maxdepth: 2 :caption: Options and Testing options testing xarray-2025.09.0/doc/user-guide/indexing.rst000066400000000000000000000670171505620616400205340ustar00rootroot00000000000000.. _indexing: Indexing and selecting data =========================== .. jupyter-execute:: :hide-code: :hide-output: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) %xmode minimal Xarray offers extremely flexible indexing routines that combine the best features of NumPy and pandas for data selection. The most basic way to access elements of a :py:class:`~xarray.DataArray` object is to use Python's ``[]`` syntax, such as ``array[i, j]``, where ``i`` and ``j`` are both integers. As xarray objects can store coordinates corresponding to each dimension of an array, label-based indexing similar to ``pandas.DataFrame.loc`` is also possible. In label-based indexing, the element position ``i`` is automatically looked-up from the coordinate values. Dimensions of xarray objects have names, so you can also lookup the dimensions by name, instead of remembering their positional order. Quick overview -------------- In total, xarray supports four different kinds of indexing, as described below and summarized in this table: .. |br| raw:: html
+------------------+--------------+---------------------------------+--------------------------------+ | Dimension lookup | Index lookup | ``DataArray`` syntax | ``Dataset`` syntax | +==================+==============+=================================+================================+ | Positional | By integer | ``da[:, 0]`` | *not available* | +------------------+--------------+---------------------------------+--------------------------------+ | Positional | By label | ``da.loc[:, 'IA']`` | *not available* | +------------------+--------------+---------------------------------+--------------------------------+ | By name | By integer | ``da.isel(space=0)`` or |br| | ``ds.isel(space=0)`` or |br| | | | | ``da[dict(space=0)]`` | ``ds[dict(space=0)]`` | +------------------+--------------+---------------------------------+--------------------------------+ | By name | By label | ``da.sel(space='IA')`` or |br| | ``ds.sel(space='IA')`` or |br| | | | | ``da.loc[dict(space='IA')]`` | ``ds.loc[dict(space='IA')]`` | +------------------+--------------+---------------------------------+--------------------------------+ More advanced indexing is also possible for all the methods by supplying :py:class:`~xarray.DataArray` objects as indexer. See :ref:`vectorized_indexing` for the details. Positional indexing ------------------- Indexing a :py:class:`~xarray.DataArray` directly works (mostly) just like it does for numpy arrays, except that the returned object is always another DataArray: .. jupyter-execute:: da = xr.DataArray( np.random.rand(4, 3), [ ("time", pd.date_range("2000-01-01", periods=4)), ("space", ["IA", "IL", "IN"]), ], ) da[:2] .. jupyter-execute:: da[0, 0] .. jupyter-execute:: da[:, [2, 1]] Attributes are persisted in all indexing operations. .. warning:: Positional indexing deviates from the NumPy when indexing with multiple arrays like ``da[[0, 1], [0, 1]]``, as described in :ref:`vectorized_indexing`. Xarray also supports label-based indexing, just like pandas. Because we use a :py:class:`pandas.Index` under the hood, label based indexing is very fast. To do label based indexing, use the :py:attr:`~xarray.DataArray.loc` attribute: .. jupyter-execute:: da.loc["2000-01-01":"2000-01-02", "IA"] In this example, the selected is a subpart of the array in the range '2000-01-01':'2000-01-02' along the first coordinate ``time`` and with 'IA' value from the second coordinate ``space``. You can perform any of the `label indexing operations supported by pandas`__, including indexing with individual, slices and lists/arrays of labels, as well as indexing with boolean arrays. Like pandas, label based indexing in xarray is *inclusive* of both the start and stop bounds. __ https://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-label Setting values with label based indexing is also supported: .. jupyter-execute:: da.loc["2000-01-01", ["IL", "IN"]] = -10 da Indexing with dimension names ----------------------------- With the dimension names, we do not have to rely on dimension order and can use them explicitly to slice data. There are two ways to do this: 1. Use the :py:meth:`~xarray.DataArray.sel` and :py:meth:`~xarray.DataArray.isel` convenience methods: .. jupyter-execute:: # index by integer array indices da.isel(space=0, time=slice(None, 2)) .. jupyter-execute:: # index by dimension coordinate labels da.sel(time=slice("2000-01-01", "2000-01-02")) 2. Use a dictionary as the argument for array positional or label based array indexing: .. jupyter-execute:: # index by integer array indices da[dict(space=0, time=slice(None, 2))] .. jupyter-execute:: # index by dimension coordinate labels da.loc[dict(time=slice("2000-01-01", "2000-01-02"))] The arguments to these methods can be any objects that could index the array along the dimension given by the keyword, e.g., labels for an individual value, :py:class:`Python slice` objects or 1-dimensional arrays. .. note:: We would love to be able to do indexing with labeled dimension names inside brackets, but unfortunately, `Python does not yet support indexing with keyword arguments`__ like ``da[space=0]`` __ https://legacy.python.org/dev/peps/pep-0472/ .. _nearest neighbor lookups: Nearest neighbor lookups ------------------------ The label based selection methods :py:meth:`~xarray.Dataset.sel`, :py:meth:`~xarray.Dataset.reindex` and :py:meth:`~xarray.Dataset.reindex_like` all support ``method`` and ``tolerance`` keyword argument. The method parameter allows for enabling nearest neighbor (inexact) lookups by use of the methods ``'pad'``, ``'backfill'`` or ``'nearest'``: .. jupyter-execute:: da = xr.DataArray([1, 2, 3], [("x", [0, 1, 2])]) da.sel(x=[1.1, 1.9], method="nearest") .. jupyter-execute:: da.sel(x=0.1, method="backfill") .. jupyter-execute:: da.reindex(x=[0.5, 1, 1.5, 2, 2.5], method="pad") Tolerance limits the maximum distance for valid matches with an inexact lookup: .. jupyter-execute:: da.reindex(x=[1.1, 1.5], method="nearest", tolerance=0.2) The method parameter is not yet supported if any of the arguments to ``.sel()`` is a ``slice`` object: .. jupyter-execute:: :raises: da.sel(x=slice(1, 3), method="nearest") However, you don't need to use ``method`` to do inexact slicing. Slicing already returns all values inside the range (inclusive), as long as the index labels are monotonic increasing: .. jupyter-execute:: da.sel(x=slice(0.9, 3.1)) Indexing axes with monotonic decreasing labels also works, as long as the ``slice`` or ``.loc`` arguments are also decreasing: .. jupyter-execute:: reversed_da = da[::-1] reversed_da.loc[3.1:0.9] .. note:: If you want to interpolate along coordinates rather than looking up the nearest neighbors, use :py:meth:`~xarray.Dataset.interp` and :py:meth:`~xarray.Dataset.interp_like`. See :ref:`interpolation ` for the details. Dataset indexing ---------------- We can also use these methods to index all variables in a dataset simultaneously, returning a new dataset: .. jupyter-execute:: da = xr.DataArray( np.random.rand(4, 3), [ ("time", pd.date_range("2000-01-01", periods=4)), ("space", ["IA", "IL", "IN"]), ], ) ds = da.to_dataset(name="foo") ds.isel(space=[0], time=[0]) .. jupyter-execute:: ds.sel(time="2000-01-01") Positional indexing on a dataset is not supported because the ordering of dimensions in a dataset is somewhat ambiguous (it can vary between different arrays). However, you can do normal indexing with dimension names: .. jupyter-execute:: ds[dict(space=[0], time=[0])] .. jupyter-execute:: ds.loc[dict(time="2000-01-01")] Dropping labels and dimensions ------------------------------ The :py:meth:`~xarray.Dataset.drop_sel` method returns a new object with the listed index labels along a dimension dropped: .. jupyter-execute:: ds.drop_sel(space=["IN", "IL"]) ``drop_sel`` is both a ``Dataset`` and ``DataArray`` method. Use :py:meth:`~xarray.Dataset.drop_dims` to drop a full dimension from a Dataset. Any variables with these dimensions are also dropped: .. jupyter-execute:: ds.drop_dims("time") .. _masking with where: Masking with ``where`` ---------------------- Indexing methods on xarray objects generally return a subset of the original data. However, it is sometimes useful to select an object with the same shape as the original data, but with some elements masked. To do this type of selection in xarray, use :py:meth:`~xarray.DataArray.where`: .. jupyter-execute:: da = xr.DataArray(np.arange(16).reshape(4, 4), dims=["x", "y"]) da.where(da.x + da.y < 4) This is particularly useful for ragged indexing of multi-dimensional data, e.g., to apply a 2D mask to an image. Note that ``where`` follows all the usual xarray broadcasting and alignment rules for binary operations (e.g., ``+``) between the object being indexed and the condition, as described in :ref:`compute`: .. jupyter-execute:: da.where(da.y < 2) By default ``where`` maintains the original size of the data. For cases where the selected data size is much smaller than the original data, use of the option ``drop=True`` clips coordinate elements that are fully masked: .. jupyter-execute:: da.where(da.y < 2, drop=True) .. _selecting values with isin: Selecting values with ``isin`` ------------------------------ To check whether elements of an xarray object contain a single object, you can compare with the equality operator ``==`` (e.g., ``arr == 3``). To check multiple values, use :py:meth:`~xarray.DataArray.isin`: .. jupyter-execute:: da = xr.DataArray([1, 2, 3, 4, 5], dims=["x"]) da.isin([2, 4]) :py:meth:`~xarray.DataArray.isin` works particularly well with :py:meth:`~xarray.DataArray.where` to support indexing by arrays that are not already labels of an array: .. jupyter-execute:: lookup = xr.DataArray([-1, -2, -3, -4, -5], dims=["x"]) da.where(lookup.isin([-2, -4]), drop=True) However, some caution is in order: when done repeatedly, this type of indexing is significantly slower than using :py:meth:`~xarray.DataArray.sel`. .. _vectorized_indexing: Vectorized Indexing ------------------- Like numpy and pandas, xarray supports indexing many array elements at once in a vectorized manner. If you only provide integers, slices, or unlabeled arrays (array without dimension names, such as ``np.ndarray``, ``list``, but not :py:meth:`~xarray.DataArray` or :py:meth:`~xarray.Variable`) indexing can be understood as orthogonally. Each indexer component selects independently along the corresponding dimension, similar to how vector indexing works in Fortran or MATLAB, or after using the :py:func:`numpy.ix_` helper: .. jupyter-execute:: da = xr.DataArray( np.arange(12).reshape((3, 4)), dims=["x", "y"], coords={"x": [0, 1, 2], "y": ["a", "b", "c", "d"]}, ) da .. jupyter-execute:: da[[0, 2, 2], [1, 3]] For more flexibility, you can supply :py:meth:`~xarray.DataArray` objects as indexers. Dimensions on resultant arrays are given by the ordered union of the indexers' dimensions: .. jupyter-execute:: ind_x = xr.DataArray([0, 1], dims=["x"]) ind_y = xr.DataArray([0, 1], dims=["y"]) da[ind_x, ind_y] # orthogonal indexing Slices or sequences/arrays without named-dimensions are treated as if they have the same dimension which is indexed along: .. jupyter-execute:: # Because [0, 1] is used to index along dimension 'x', # it is assumed to have dimension 'x' da[[0, 1], ind_x] Furthermore, you can use multi-dimensional :py:meth:`~xarray.DataArray` as indexers, where the resultant array dimension is also determined by indexers' dimension: .. jupyter-execute:: ind = xr.DataArray([[0, 1], [0, 1]], dims=["a", "b"]) da[ind] Similar to how `NumPy's advanced indexing`_ works, vectorized indexing for xarray is based on our :ref:`broadcasting rules `. See :ref:`indexing.rules` for the complete specification. .. _NumPy's advanced indexing: https://numpy.org/doc/stable/user/basics.indexing.html#advanced-indexing Vectorized indexing also works with ``isel``, ``loc``, and ``sel``: .. jupyter-execute:: ind = xr.DataArray([[0, 1], [0, 1]], dims=["a", "b"]) da.isel(y=ind) # same as da[:, ind] .. jupyter-execute:: ind = xr.DataArray([["a", "b"], ["b", "a"]], dims=["a", "b"]) da.loc[:, ind] # same as da.sel(y=ind) These methods may also be applied to ``Dataset`` objects .. jupyter-execute:: ds = da.to_dataset(name="bar") ds.isel(x=xr.DataArray([0, 1, 2], dims=["points"])) Vectorized indexing may be used to extract information from the nearest grid cells of interest, for example, the nearest climate model grid cells to a collection specified weather station latitudes and longitudes. To trigger vectorized indexing behavior you will need to provide the selection dimensions with a new shared output dimension name. In the example below, the selections of the closest latitude and longitude are renamed to an output dimension named "points": .. jupyter-execute:: ds = xr.tutorial.open_dataset("air_temperature") # Define target latitude and longitude (where weather stations might be) target_lon = xr.DataArray([200, 201, 202, 205], dims="points") target_lat = xr.DataArray([31, 41, 42, 42], dims="points") # Retrieve data at the grid cells nearest to the target latitudes and longitudes da = ds["air"].sel(lon=target_lon, lat=target_lat, method="nearest") da .. tip:: If you are lazily loading your data from disk, not every form of vectorized indexing is supported (or if supported, may not be supported efficiently). You may find increased performance by loading your data into memory first, e.g., with :py:meth:`~xarray.Dataset.load`. .. note:: If an indexer is a :py:meth:`~xarray.DataArray`, its coordinates should not conflict with the selected subpart of the target array (except for the explicitly indexed dimensions with ``.loc``/``.sel``). Otherwise, ``IndexError`` will be raised. .. _assigning_values: Assigning values with indexing ------------------------------ To select and assign values to a portion of a :py:meth:`~xarray.DataArray` you can use indexing with ``.loc`` : .. jupyter-execute:: ds = xr.tutorial.open_dataset("air_temperature") # add an empty 2D dataarray ds["empty"] = xr.full_like(ds.air.mean("time"), fill_value=0) # modify one grid point using loc() ds["empty"].loc[dict(lon=260, lat=30)] = 100 # modify a 2D region using loc() lc = ds.coords["lon"] la = ds.coords["lat"] ds["empty"].loc[ dict(lon=lc[(lc > 220) & (lc < 260)], lat=la[(la > 20) & (la < 60)]) ] = 100 or :py:meth:`~xarray.where`: .. jupyter-execute:: # modify one grid point using xr.where() ds["empty"] = xr.where( (ds.coords["lat"] == 20) & (ds.coords["lon"] == 260), 100, ds["empty"] ) # or modify a 2D region using xr.where() mask = ( (ds.coords["lat"] > 20) & (ds.coords["lat"] < 60) & (ds.coords["lon"] > 220) & (ds.coords["lon"] < 260) ) ds["empty"] = xr.where(mask, 100, ds["empty"]) Vectorized indexing can also be used to assign values to xarray object. .. jupyter-execute:: da = xr.DataArray( np.arange(12).reshape((3, 4)), dims=["x", "y"], coords={"x": [0, 1, 2], "y": ["a", "b", "c", "d"]}, ) da .. jupyter-execute:: da[0] = -1 # assignment with broadcasting da .. jupyter-execute:: ind_x = xr.DataArray([0, 1], dims=["x"]) ind_y = xr.DataArray([0, 1], dims=["y"]) da[ind_x, ind_y] = -2 # assign -2 to (ix, iy) = (0, 0) and (1, 1) da .. jupyter-execute:: da[ind_x, ind_y] += 100 # increment is also possible da Like ``numpy.ndarray``, value assignment sometimes works differently from what one may expect. .. jupyter-execute:: da = xr.DataArray([0, 1, 2, 3], dims=["x"]) ind = xr.DataArray([0, 0, 0], dims=["x"]) da[ind] -= 1 da Where the 0th element will be subtracted 1 only once. This is because ``v[0] = v[0] - 1`` is called three times, rather than ``v[0] = v[0] - 1 - 1 - 1``. See `Assigning values to indexed arrays`__ for the details. __ https://numpy.org/doc/stable/user/basics.indexing.html#assigning-values-to-indexed-arrays .. note:: Dask array does not support value assignment (see :ref:`dask` for the details). .. note:: Coordinates in both the left- and right-hand-side arrays should not conflict with each other. Otherwise, ``IndexError`` will be raised. .. warning:: Do not try to assign values when using any of the indexing methods ``isel`` or ``sel``:: # DO NOT do this da.isel(space=0) = 0 Instead, values can be assigned using dictionary-based indexing:: da[dict(space=0)] = 0 Assigning values with the chained indexing using ``.sel`` or ``.isel`` fails silently. .. jupyter-execute:: da = xr.DataArray([0, 1, 2, 3], dims=["x"]) # DO NOT do this da.isel(x=[0, 1, 2])[1] = -1 da You can also assign values to all variables of a :py:class:`Dataset` at once: .. jupyter-execute:: :stderr: ds_org = xr.tutorial.open_dataset("eraint_uvz").isel( latitude=slice(56, 59), longitude=slice(255, 258), level=0 ) # set all values to 0 ds = xr.zeros_like(ds_org) ds .. jupyter-execute:: # by integer ds[dict(latitude=2, longitude=2)] = 1 ds["u"] .. jupyter-execute:: ds["v"] .. jupyter-execute:: # by label ds.loc[dict(latitude=47.25, longitude=[11.25, 12])] = 100 ds["u"] .. jupyter-execute:: # dataset as new values new_dat = ds_org.loc[dict(latitude=48, longitude=[11.25, 12])] new_dat .. jupyter-execute:: ds.loc[dict(latitude=47.25, longitude=[11.25, 12])] = new_dat ds["u"] The dimensions can differ between the variables in the dataset, but all variables need to have at least the dimensions specified in the indexer dictionary. The new values must be either a scalar, a :py:class:`DataArray` or a :py:class:`Dataset` itself that contains all variables that also appear in the dataset to be modified. .. _more_advanced_indexing: More advanced indexing ----------------------- The use of :py:meth:`~xarray.DataArray` objects as indexers enables very flexible indexing. The following is an example of the pointwise indexing: .. jupyter-execute:: da = xr.DataArray(np.arange(56).reshape((7, 8)), dims=["x", "y"]) da .. jupyter-execute:: da.isel(x=xr.DataArray([0, 1, 6], dims="z"), y=xr.DataArray([0, 1, 0], dims="z")) where three elements at ``(ix, iy) = ((0, 0), (1, 1), (6, 0))`` are selected and mapped along a new dimension ``z``. If you want to add a coordinate to the new dimension ``z``, you can supply a :py:class:`~xarray.DataArray` with a coordinate, .. jupyter-execute:: da.isel( x=xr.DataArray([0, 1, 6], dims="z", coords={"z": ["a", "b", "c"]}), y=xr.DataArray([0, 1, 0], dims="z"), ) Analogously, label-based pointwise-indexing is also possible by the ``.sel`` method: .. jupyter-execute:: da = xr.DataArray( np.random.rand(4, 3), [ ("time", pd.date_range("2000-01-01", periods=4)), ("space", ["IA", "IL", "IN"]), ], ) times = xr.DataArray( pd.to_datetime(["2000-01-03", "2000-01-02", "2000-01-01"]), dims="new_time" ) da.sel(space=xr.DataArray(["IA", "IL", "IN"], dims=["new_time"]), time=times) .. _align and reindex: Align and reindex ----------------- Xarray's ``reindex``, ``reindex_like`` and ``align`` impose a ``DataArray`` or ``Dataset`` onto a new set of coordinates corresponding to dimensions. The original values are subset to the index labels still found in the new labels, and values corresponding to new labels not found in the original object are in-filled with ``NaN``. Xarray operations that combine multiple objects generally automatically align their arguments to share the same indexes. However, manual alignment can be useful for greater control and for increased performance. To reindex a particular dimension, use :py:meth:`~xarray.DataArray.reindex`: .. jupyter-execute:: da.reindex(space=["IA", "CA"]) The :py:meth:`~xarray.DataArray.reindex_like` method is a useful shortcut. To demonstrate, we will make a subset DataArray with new values: .. jupyter-execute:: foo = da.rename("foo") baz = (10 * da[:2, :2]).rename("baz") baz Reindexing ``foo`` with ``baz`` selects out the first two values along each dimension: .. jupyter-execute:: foo.reindex_like(baz) The opposite operation asks us to reindex to a larger shape, so we fill in the missing values with ``NaN``: .. jupyter-execute:: baz.reindex_like(foo) The :py:func:`~xarray.align` function lets us perform more flexible database-like ``'inner'``, ``'outer'``, ``'left'`` and ``'right'`` joins: .. jupyter-execute:: xr.align(foo, baz, join="inner") .. jupyter-execute:: xr.align(foo, baz, join="outer") Both ``reindex_like`` and ``align`` work interchangeably between :py:class:`~xarray.DataArray` and :py:class:`~xarray.Dataset` objects, and with any number of matching dimension names: .. jupyter-execute:: ds .. jupyter-execute:: ds.reindex_like(baz) .. jupyter-execute:: other = xr.DataArray(["a", "b", "c"], dims="other") # this is a no-op, because there are no shared dimension names ds.reindex_like(other) .. _indexing.missing_coordinates: Missing coordinate labels ------------------------- Coordinate labels for each dimension are optional (as of xarray v0.9). Label based indexing with ``.sel`` and ``.loc`` uses standard positional, integer-based indexing as a fallback for dimensions without a coordinate label: .. jupyter-execute:: da = xr.DataArray([1, 2, 3], dims="x") da.sel(x=[0, -1]) Alignment between xarray objects where one or both do not have coordinate labels succeeds only if all dimensions of the same name have the same length. Otherwise, it raises an informative error: .. jupyter-execute:: :raises: xr.align(da, da[:2]) Underlying Indexes ------------------ Xarray uses the :py:class:`pandas.Index` internally to perform indexing operations. If you need to access the underlying indexes, they are available through the :py:attr:`~xarray.DataArray.indexes` attribute. .. jupyter-execute:: da = xr.DataArray( np.random.rand(4, 3), [ ("time", pd.date_range("2000-01-01", periods=4)), ("space", ["IA", "IL", "IN"]), ], ) da .. jupyter-execute:: da.indexes .. jupyter-execute:: da.indexes["time"] Use :py:meth:`~xarray.DataArray.get_index` to get an index for a dimension, falling back to a default :py:class:`pandas.RangeIndex` if it has no coordinate labels: .. jupyter-execute:: da = xr.DataArray([1, 2, 3], dims="x") da .. jupyter-execute:: da.get_index("x") .. _copies_vs_views: Copies vs. Views ---------------- Whether array indexing returns a view or a copy of the underlying data depends on the nature of the labels. For positional (integer) indexing, xarray follows the same `rules`_ as NumPy: * Positional indexing with only integers and slices returns a view. * Positional indexing with arrays or lists returns a copy. The rules for label based indexing are more complex: * Label-based indexing with only slices returns a view. * Label-based indexing with arrays returns a copy. * Label-based indexing with scalars returns a view or a copy, depending upon if the corresponding positional indexer can be represented as an integer or a slice object. The exact rules are determined by pandas. Whether data is a copy or a view is more predictable in xarray than in pandas, so unlike pandas, xarray does not produce `SettingWithCopy warnings`_. However, you should still avoid assignment with chained indexing. Note that other operations (such as :py:meth:`~xarray.DataArray.values`) may also return views rather than copies. .. _SettingWithCopy warnings: https://pandas.pydata.org/pandas-docs/stable/indexing.html#returning-a-view-versus-a-copy .. _rules: https://numpy.org/doc/stable/user/basics.copies.html .. _multi-level indexing: Multi-level indexing -------------------- Just like pandas, advanced indexing on multi-level indexes is possible with ``loc`` and ``sel``. You can slice a multi-index by providing multiple indexers, i.e., a tuple of slices, labels, list of labels, or any selector allowed by pandas: .. jupyter-execute:: midx = pd.MultiIndex.from_product([list("abc"), [0, 1]], names=("one", "two")) mda = xr.DataArray(np.random.rand(6, 3), [("x", midx), ("y", range(3))]) mda .. jupyter-execute:: mda.sel(x=(list("ab"), [0])) You can also select multiple elements by providing a list of labels or tuples or a slice of tuples: .. jupyter-execute:: mda.sel(x=[("a", 0), ("b", 1)]) Additionally, xarray supports dictionaries: .. jupyter-execute:: mda.sel(x={"one": "a", "two": 0}) For convenience, ``sel`` also accepts multi-index levels directly as keyword arguments: .. jupyter-execute:: mda.sel(one="a", two=0) Note that using ``sel`` it is not possible to mix a dimension indexer with level indexers for that dimension (e.g., ``mda.sel(x={'one': 'a'}, two=0)`` will raise a ``ValueError``). Like pandas, xarray handles partial selection on multi-index (level drop). As shown below, it also renames the dimension / coordinate when the multi-index is reduced to a single index. .. jupyter-execute:: mda.loc[{"one": "a"}, ...] Unlike pandas, xarray does not guess whether you provide index levels or dimensions when using ``loc`` in some ambiguous cases. For example, for ``mda.loc[{'one': 'a', 'two': 0}]`` and ``mda.loc['a', 0]`` xarray always interprets ('one', 'two') and ('a', 0) as the names and labels of the 1st and 2nd dimension, respectively. You must specify all dimensions or use the ellipsis in the ``loc`` specifier, e.g. in the example above, ``mda.loc[{'one': 'a', 'two': 0}, :]`` or ``mda.loc[('a', 0), ...]``. .. _indexing.rules: Indexing rules -------------- Here we describe the full rules xarray uses for vectorized indexing. Note that this is for the purposes of explanation: for the sake of efficiency and to support various backends, the actual implementation is different. 0. (Only for label based indexing.) Look up positional indexes along each dimension from the corresponding :py:class:`pandas.Index`. 1. A full slice object ``:`` is inserted for each dimension without an indexer. 2. ``slice`` objects are converted into arrays, given by ``np.arange(*slice.indices(...))``. 3. Assume dimension names for array indexers without dimensions, such as ``np.ndarray`` and ``list``, from the dimensions to be indexed along. For example, ``v.isel(x=[0, 1])`` is understood as ``v.isel(x=xr.DataArray([0, 1], dims=['x']))``. 4. For each variable in a ``Dataset`` or ``DataArray`` (the array and its coordinates): a. Broadcast all relevant indexers based on their dimension names (see :ref:`compute.broadcasting` for full details). b. Index the underling array by the broadcast indexers, using NumPy's advanced indexing rules. 5. If any indexer DataArray has coordinates and no coordinate with the same name exists, attach them to the indexed object. .. note:: Only 1-dimensional boolean arrays can be used as indexers. xarray-2025.09.0/doc/user-guide/interpolation.rst000066400000000000000000000247601505620616400216140ustar00rootroot00000000000000.. _interp: Interpolating data ================== .. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr import matplotlib.pyplot as plt np.random.seed(123456) Xarray offers flexible interpolation routines, which have a similar interface to our :ref:`indexing `. .. note:: ``interp`` requires ``scipy`` installed. Scalar and 1-dimensional interpolation -------------------------------------- Interpolating a :py:class:`~xarray.DataArray` works mostly like labeled indexing of a :py:class:`~xarray.DataArray`, .. jupyter-execute:: da = xr.DataArray( np.sin(0.3 * np.arange(12).reshape(4, 3)), [("time", np.arange(4)), ("space", [0.1, 0.2, 0.3])], ) # label lookup da.sel(time=3) .. jupyter-execute:: # interpolation da.interp(time=2.5) Similar to the indexing, :py:meth:`~xarray.DataArray.interp` also accepts an array-like, which gives the interpolated result as an array. .. jupyter-execute:: # label lookup da.sel(time=[2, 3]) .. jupyter-execute:: # interpolation da.interp(time=[2.5, 3.5]) To interpolate data with a :py:doc:`numpy.datetime64 ` coordinate you can pass a string. .. jupyter-execute:: da_dt64 = xr.DataArray( [1, 3], [("time", pd.date_range("1/1/2000", "1/3/2000", periods=2))] ) da_dt64.interp(time="2000-01-02") The interpolated data can be merged into the original :py:class:`~xarray.DataArray` by specifying the time periods required. .. jupyter-execute:: da_dt64.interp(time=pd.date_range("1/1/2000", "1/3/2000", periods=3)) Interpolation of data indexed by a :py:class:`~xarray.CFTimeIndex` is also allowed. See :ref:`CFTimeIndex` for examples. .. note:: Currently, our interpolation only works for regular grids. Therefore, similarly to :py:meth:`~xarray.DataArray.sel`, only 1D coordinates along a dimension can be used as the original coordinate to be interpolated. Multi-dimensional Interpolation ------------------------------- Like :py:meth:`~xarray.DataArray.sel`, :py:meth:`~xarray.DataArray.interp` accepts multiple coordinates. In this case, multidimensional interpolation is carried out. .. jupyter-execute:: # label lookup da.sel(time=2, space=0.1) .. jupyter-execute:: # interpolation da.interp(time=2.5, space=0.15) Array-like coordinates are also accepted: .. jupyter-execute:: # label lookup da.sel(time=[2, 3], space=[0.1, 0.2]) .. jupyter-execute:: # interpolation da.interp(time=[1.5, 2.5], space=[0.15, 0.25]) :py:meth:`~xarray.DataArray.interp_like` method is a useful shortcut. This method interpolates an xarray object onto the coordinates of another xarray object. For example, if we want to compute the difference between two :py:class:`~xarray.DataArray` s (``da`` and ``other``) staying on slightly different coordinates, .. jupyter-execute:: other = xr.DataArray( np.sin(0.4 * np.arange(9).reshape(3, 3)), [("time", [0.9, 1.9, 2.9]), ("space", [0.15, 0.25, 0.35])], ) it might be a good idea to first interpolate ``da`` so that it will stay on the same coordinates of ``other``, and then subtract it. :py:meth:`~xarray.DataArray.interp_like` can be used for such a case, .. jupyter-execute:: # interpolate da along other's coordinates interpolated = da.interp_like(other) interpolated It is now possible to safely compute the difference ``other - interpolated``. Interpolation methods --------------------- We use either :py:class:`scipy.interpolate.interp1d` or special interpolants from :py:class:`scipy.interpolate` for 1-dimensional interpolation (see :py:meth:`~xarray.Dataset.interp`). For multi-dimensional interpolation, an attempt is first made to decompose the interpolation in a series of 1-dimensional interpolations, in which case the relevant 1-dimensional interpolator is used. If a decomposition cannot be made (e.g. with advanced interpolation), :py:func:`scipy.interpolate.interpn` is used. The interpolation method can be specified by the optional ``method`` argument. .. jupyter-execute:: da = xr.DataArray( np.sin(np.linspace(0, 2 * np.pi, 10)), dims="x", coords={"x": np.linspace(0, 1, 10)}, ) da.plot.line("o", label="original") da.interp(x=np.linspace(0, 1, 100)).plot.line(label="linear (default)") da.interp(x=np.linspace(0, 1, 100), method="cubic").plot.line(label="cubic") plt.legend(); Additional keyword arguments can be passed to scipy's functions. .. jupyter-execute:: # fill 0 for the outside of the original coordinates. da.interp(x=np.linspace(-0.5, 1.5, 10), kwargs={"fill_value": 0.0}) .. jupyter-execute:: # 1-dimensional extrapolation da.interp(x=np.linspace(-0.5, 1.5, 10), kwargs={"fill_value": "extrapolate"}) .. jupyter-execute:: # multi-dimensional extrapolation da = xr.DataArray( np.sin(0.3 * np.arange(12).reshape(4, 3)), [("time", np.arange(4)), ("space", [0.1, 0.2, 0.3])], ) da.interp( time=4, space=np.linspace(-0.1, 0.5, 10), kwargs={"fill_value": "extrapolate"} ) Advanced Interpolation ---------------------- :py:meth:`~xarray.DataArray.interp` accepts :py:class:`~xarray.DataArray` as similar to :py:meth:`~xarray.DataArray.sel`, which enables us more advanced interpolation. Based on the dimension of the new coordinate passed to :py:meth:`~xarray.DataArray.interp`, the dimension of the result are determined. For example, if you want to interpolate a two dimensional array along a particular dimension, as illustrated below, you can pass two 1-dimensional :py:class:`~xarray.DataArray` s with a common dimension as new coordinate. .. image:: ../_static/advanced_selection_interpolation.svg :height: 200px :width: 400 px :alt: advanced indexing and interpolation :align: center For example: .. jupyter-execute:: da = xr.DataArray( np.sin(0.3 * np.arange(20).reshape(5, 4)), [("x", np.arange(5)), ("y", [0.1, 0.2, 0.3, 0.4])], ) # advanced indexing x = xr.DataArray([0, 2, 4], dims="z") y = xr.DataArray([0.1, 0.2, 0.3], dims="z") da.sel(x=x, y=y) .. jupyter-execute:: # advanced interpolation, without extrapolation x = xr.DataArray([0.5, 1.5, 2.5, 3.5], dims="z") y = xr.DataArray([0.15, 0.25, 0.35, 0.45], dims="z") da.interp(x=x, y=y) where values on the original coordinates ``(x, y) = ((0.5, 0.15), (1.5, 0.25), (2.5, 0.35), (3.5, 0.45))`` are obtained by the 2-dimensional interpolation and mapped along a new dimension ``z``. Since no keyword arguments are passed to the interpolation routine, no extrapolation is performed resulting in a ``nan`` value. If you want to add a coordinate to the new dimension ``z``, you can supply :py:class:`~xarray.DataArray` s with a coordinate. Extrapolation can be achieved by passing additional arguments to SciPy's ``interpnd`` function, .. jupyter-execute:: x = xr.DataArray([0.5, 1.5, 2.5, 3.5], dims="z", coords={"z": ["a", "b", "c", "d"]}) y = xr.DataArray( [0.15, 0.25, 0.35, 0.45], dims="z", coords={"z": ["a", "b", "c", "d"]} ) da.interp(x=x, y=y, kwargs={"fill_value": None}) For the details of the advanced indexing, see :ref:`more advanced indexing `. Interpolating arrays with NaN ----------------------------- Our :py:meth:`~xarray.DataArray.interp` works with arrays with NaN the same way that `scipy.interpolate.interp1d `_ and `scipy.interpolate.interpn `_ do. ``linear`` and ``nearest`` methods return arrays including NaN, while other methods such as ``cubic`` or ``quadratic`` return all NaN arrays. .. jupyter-execute:: da = xr.DataArray([0, 2, np.nan, 3, 3.25], dims="x", coords={"x": range(5)}) da.interp(x=[0.5, 1.5, 2.5]) .. jupyter-execute:: da.interp(x=[0.5, 1.5, 2.5], method="cubic") To avoid this, you can drop NaN by :py:meth:`~xarray.DataArray.dropna`, and then make the interpolation .. jupyter-execute:: dropped = da.dropna("x") dropped .. jupyter-execute:: dropped.interp(x=[0.5, 1.5, 2.5], method="cubic") If NaNs are distributed randomly in your multidimensional array, dropping all the columns containing more than one NaNs by :py:meth:`~xarray.DataArray.dropna` may lose a significant amount of information. In such a case, you can fill NaN by :py:meth:`~xarray.DataArray.interpolate_na`, which is similar to :py:meth:`pandas.Series.interpolate`. .. jupyter-execute:: filled = da.interpolate_na(dim="x") filled This fills NaN by interpolating along the specified dimension. After filling NaNs, you can interpolate: .. jupyter-execute:: filled.interp(x=[0.5, 1.5, 2.5], method="cubic") For the details of :py:meth:`~xarray.DataArray.interpolate_na`, see :ref:`Missing values `. Example ------- Let's see how :py:meth:`~xarray.DataArray.interp` works on real data. .. jupyter-execute:: # Raw data ds = xr.tutorial.open_dataset("air_temperature").isel(time=0) fig, axes = plt.subplots(ncols=2, figsize=(10, 4)) ds.air.plot(ax=axes[0]) axes[0].set_title("Raw data") # Interpolated data new_lon = np.linspace(ds.lon[0].item(), ds.lon[-1].item(), ds.sizes["lon"] * 4) new_lat = np.linspace(ds.lat[0].item(), ds.lat[-1].item(), ds.sizes["lat"] * 4) dsi = ds.interp(lat=new_lat, lon=new_lon) dsi.air.plot(ax=axes[1]) axes[1].set_title("Interpolated data"); Our advanced interpolation can be used to remap the data to the new coordinate. Consider the new coordinates x and z on the two dimensional plane. The remapping can be done as follows .. jupyter-execute:: # new coordinate x = np.linspace(240, 300, 100) z = np.linspace(20, 70, 100) # relation between new and original coordinates lat = xr.DataArray(z, dims=["z"], coords={"z": z}) lon = xr.DataArray( (x[:, np.newaxis] - 270) / np.cos(z * np.pi / 180) + 270, dims=["x", "z"], coords={"x": x, "z": z}, ) fig, axes = plt.subplots(ncols=2, figsize=(10, 4)) ds.air.plot(ax=axes[0]) # draw the new coordinate on the original coordinates. for idx in [0, 33, 66, 99]: axes[0].plot(lon.isel(x=idx), lat, "--k") for idx in [0, 33, 66, 99]: axes[0].plot(*xr.broadcast(lon.isel(z=idx), lat.isel(z=idx)), "--k") axes[0].set_title("Raw data") dsi = ds.interp(lon=lon, lat=lat) dsi.air.plot(ax=axes[1]) axes[1].set_title("Remapped data"); xarray-2025.09.0/doc/user-guide/io.rst000066400000000000000000001611641505620616400173340ustar00rootroot00000000000000.. currentmodule:: xarray .. _io: Reading and writing files ========================= Xarray supports direct serialization and IO to several file formats, from simple :ref:`io.pickle` files to the more flexible :ref:`io.netcdf` format (recommended). .. jupyter-execute:: :hide-code: import os import iris import ncdata.iris_xarray import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) You can read different types of files in ``xr.open_dataset`` by specifying the engine to be used: .. code:: python xr.open_dataset("example.nc", engine="netcdf4") The "engine" provides a set of instructions that tells xarray how to read the data and pack them into a ``Dataset`` (or ``Dataarray``). These instructions are stored in an underlying "backend". Xarray comes with several backends that cover many common data formats. Many more backends are available via external libraries, or you can `write your own `_. This diagram aims to help you determine - based on the format of the file you'd like to read - which type of backend you're using and how to use it. Text and boxes are clickable for more information. Following the diagram is detailed information on many popular backends. You can learn more about using and developing backends in the `Xarray tutorial JupyterBook `_. .. _comment: mermaid Flowcharg "link" text gets secondary color background, SVG icon fill gets primary color .. raw:: html .. mermaid:: :config: {"theme":"base","themeVariables":{"fontSize":"20px","primaryColor":"#fff","primaryTextColor":"#fff","primaryBorderColor":"#59c7d6","lineColor":"#e28126","secondaryColor":"#767985"}} :alt: Flowchart illustrating how to choose the right backend engine to read your data flowchart LR built-in-eng["`**Is your data stored in one of these formats?** - netCDF4 - netCDF3 - Zarr - DODS/OPeNDAP - HDF5 `"] built-in("`**You're in luck!** Xarray bundles a backend to automatically read these formats. Open data using xr.open_dataset(). We recommend explicitly setting engine='xxxx' for faster loading.`") installed-eng["""One of these formats? - GRIB - TileDB - GeoTIFF, JPEG-2000, etc. (via GDAL) - Sentinel-1 SAFE """] installed("""Install the linked backend library and use it with xr.open_dataset(file, engine='xxxx').""") other["`**Options:** - Look around to see if someone has created an Xarray backend for your format! - Create your own backend - Convert your data to a supported format `"] built-in-eng -->|Yes| built-in built-in-eng -->|No| installed-eng installed-eng -->|Yes| installed installed-eng -->|No| other click built-in-eng "https://docs.xarray.dev/en/stable/get-help/faq.html#how-do-i-open-format-x-file-as-an-xarray-dataset" classDef quesNodefmt font-size:12pt,fill:#0e4666,stroke:#59c7d6,stroke-width:3 class built-in-eng,installed-eng quesNodefmt classDef ansNodefmt font-size:12pt,fill:#4a4a4a,stroke:#17afb4,stroke-width:3 class built-in,installed,other ansNodefmt linkStyle default font-size:18pt,stroke-width:4 .. _io.netcdf: netCDF ------ The recommended way to store xarray data structures is `netCDF`__, which is a binary file format for self-described datasets that originated in the geosciences. Xarray is based on the netCDF data model, so netCDF files on disk directly correspond to :py:class:`Dataset` objects (more accurately, a group in a netCDF file directly corresponds to a :py:class:`Dataset` object. See :ref:`io.netcdf_groups` for more.) NetCDF is supported on almost all platforms, and parsers exist for the vast majority of scientific programming languages. Recent versions of netCDF are based on the even more widely used HDF5 file-format. __ https://www.unidata.ucar.edu/software/netcdf/ .. tip:: If you aren't familiar with this data format, the `netCDF FAQ`_ is a good place to start. .. _netCDF FAQ: https://www.unidata.ucar.edu/software/netcdf/docs/faq.html#What-Is-netCDF Reading and writing netCDF files with xarray requires scipy, h5netcdf, or the `netCDF4-Python`__ library to be installed. SciPy only supports reading and writing of netCDF V3 files. __ https://github.com/Unidata/netcdf4-python We can save a Dataset to disk using the :py:meth:`Dataset.to_netcdf` method: .. jupyter-execute:: ds = xr.Dataset( {"foo": (("x", "y"), np.random.rand(4, 5))}, coords={ "x": [10, 20, 30, 40], "y": pd.date_range("2000-01-01", periods=5), "z": ("x", list("abcd")), }, ) ds.to_netcdf("saved_on_disk.nc") By default, the file is saved as netCDF4 (assuming netCDF4-Python is installed). You can control the format and engine used to write the file with the ``format`` and ``engine`` arguments. .. tip:: Using the `h5netcdf `_ package by passing ``engine='h5netcdf'`` to :py:meth:`open_dataset` can sometimes be quicker than the default ``engine='netcdf4'`` that uses the `netCDF4 `_ package. We can load netCDF files to create a new Dataset using :py:func:`open_dataset`: .. jupyter-execute:: ds_disk = xr.open_dataset("saved_on_disk.nc") ds_disk .. jupyter-execute:: :hide-code: # Close "saved_on_disk.nc", but retain the file until after closing or deleting other # datasets that will refer to it. ds_disk.close() Similarly, a DataArray can be saved to disk using the :py:meth:`DataArray.to_netcdf` method, and loaded from disk using the :py:func:`open_dataarray` function. As netCDF files correspond to :py:class:`Dataset` objects, these functions internally convert the ``DataArray`` to a ``Dataset`` before saving, and then convert back when loading, ensuring that the ``DataArray`` that is loaded is always exactly the same as the one that was saved. A dataset can also be loaded or written to a specific group within a netCDF file. To load from a group, pass a ``group`` keyword argument to the ``open_dataset`` function. The group can be specified as a path-like string, e.g., to access subgroup 'bar' within group 'foo' pass '/foo/bar' as the ``group`` argument. When writing multiple groups in one file, pass ``mode='a'`` to ``to_netcdf`` to ensure that each call does not delete the file. .. tip:: It is recommended to use :py:class:`~xarray.DataTree` to represent hierarchical data, and to use the :py:meth:`xarray.DataTree.to_netcdf` method when writing hierarchical data to a netCDF file. Data is *always* loaded lazily from netCDF files. You can manipulate, slice and subset Dataset and DataArray objects, and no array values are loaded into memory until you try to perform some sort of actual computation. For an example of how these lazy arrays work, see the OPeNDAP section below. There may be minor differences in the :py:class:`Dataset` object returned when reading a NetCDF file with different engines. It is important to note that when you modify values of a Dataset, even one linked to files on disk, only the in-memory copy you are manipulating in xarray is modified: the original file on disk is never touched. .. tip:: Xarray's lazy loading of remote or on-disk datasets is often but not always desirable. Before performing computationally intense operations, it is often a good idea to load a Dataset (or DataArray) entirely into memory by invoking the :py:meth:`Dataset.load` method. Datasets have a :py:meth:`Dataset.close` method to close the associated netCDF file. However, it's often cleaner to use a ``with`` statement: .. jupyter-execute:: # this automatically closes the dataset after use with xr.open_dataset("saved_on_disk.nc") as ds: print(ds.keys()) Although xarray provides reasonable support for incremental reads of files on disk, it does not support incremental writes, which can be a useful strategy for dealing with datasets too big to fit into memory. Instead, xarray integrates with dask.array (see :ref:`dask`), which provides a fully featured engine for streaming computation. It is possible to append or overwrite netCDF variables using the ``mode='a'`` argument. When using this option, all variables in the dataset will be written to the original netCDF file, regardless if they exist in the original dataset. .. _io.netcdf_groups: Groups ~~~~~~ Whilst netCDF groups can only be loaded individually as ``Dataset`` objects, a whole file of many nested groups can be loaded as a single :py:class:`xarray.DataTree` object. To open a whole netCDF file as a tree of groups use the :py:func:`xarray.open_datatree` function. To save a DataTree object as a netCDF file containing many groups, use the :py:meth:`xarray.DataTree.to_netcdf` method. .. _netcdf.root_group.note: .. note:: Due to file format specifications the on-disk root group name is always ``"/"``, overriding any given ``DataTree`` root node name. .. _netcdf.group.warning: .. warning:: ``DataTree`` objects do not follow the exact same data model as netCDF files, which means that perfect round-tripping is not always possible. In particular in the netCDF data model dimensions are entities that can exist regardless of whether any variable possesses them. This is in contrast to `xarray's data model `_ (and hence :ref:`DataTree's data model `) in which the dimensions of a (Dataset/Tree) object are simply the set of dimensions present across all variables in that dataset. This means that if a netCDF file contains dimensions but no variables which possess those dimensions, these dimensions will not be present when that file is opened as a DataTree object. Saving this DataTree object to file will therefore not preserve these "unused" dimensions. .. _io.encoding: Reading encoded data ~~~~~~~~~~~~~~~~~~~~ NetCDF files follow some conventions for encoding datetime arrays (as numbers with a "units" attribute) and for packing and unpacking data (as described by the "scale_factor" and "add_offset" attributes). If the argument ``decode_cf=True`` (default) is given to :py:func:`open_dataset`, xarray will attempt to automatically decode the values in the netCDF objects according to `CF conventions`_. Sometimes this will fail, for example, if a variable has an invalid "units" or "calendar" attribute. For these cases, you can turn this decoding off manually. .. _CF conventions: https://cfconventions.org/ You can view this encoding information (among others) in the :py:attr:`DataArray.encoding` and :py:attr:`DataArray.encoding` attributes: .. jupyter-execute:: ds_disk["y"].encoding .. jupyter-execute:: ds_disk.encoding Note that all operations that manipulate variables other than indexing will remove encoding information. In some cases it is useful to intentionally reset a dataset's original encoding values. This can be done with either the :py:meth:`Dataset.drop_encoding` or :py:meth:`DataArray.drop_encoding` methods. .. jupyter-execute:: ds_no_encoding = ds_disk.drop_encoding() ds_no_encoding.encoding .. _combining multiple files: Reading multi-file datasets ........................... NetCDF files are often encountered in collections, e.g., with different files corresponding to different model runs or one file per timestamp. Xarray can straightforwardly combine such files into a single Dataset by making use of :py:func:`concat`, :py:func:`merge`, :py:func:`combine_nested` and :py:func:`combine_by_coords`. For details on the difference between these functions see :ref:`combining data`. Xarray includes support for manipulating datasets that don't fit into memory with dask_. If you have dask installed, you can open multiple files simultaneously in parallel using :py:func:`open_mfdataset`:: xr.open_mfdataset('my/files/*.nc', parallel=True) This function automatically concatenates and merges multiple files into a single xarray dataset. It is the recommended way to open multiple files with xarray. For more details on parallel reading, see :ref:`combining.multi`, :ref:`dask.io` and a `blog post`_ by Stephan Hoyer. :py:func:`open_mfdataset` takes many kwargs that allow you to control its behaviour (for e.g. ``parallel``, ``combine``, ``compat``, ``join``, ``concat_dim``). See its docstring for more details. .. note:: A common use-case involves a dataset distributed across a large number of files with each file containing a large number of variables. Commonly, a few of these variables need to be concatenated along a dimension (say ``"time"``), while the rest are equal across the datasets (ignoring floating point differences). The following command with suitable modifications (such as ``parallel=True``) works well with such datasets:: xr.open_mfdataset('my/files/*.nc', concat_dim="time", combine="nested", data_vars='minimal', coords='minimal', compat='override') This command concatenates variables along the ``"time"`` dimension, but only those that already contain the ``"time"`` dimension (``data_vars='minimal', coords='minimal'``). Variables that lack the ``"time"`` dimension are taken from the first dataset (``compat='override'``). .. _dask: https://www.dask.org .. _blog post: https://stephanhoyer.com/2015/06/11/xray-dask-out-of-core-labeled-arrays/ Sometimes multi-file datasets are not conveniently organized for easy use of :py:func:`open_mfdataset`. One can use the ``preprocess`` argument to provide a function that takes a dataset and returns a modified Dataset. :py:func:`open_mfdataset` will call ``preprocess`` on every dataset (corresponding to each file) prior to combining them. If :py:func:`open_mfdataset` does not meet your needs, other approaches are possible. The general pattern for parallel reading of multiple files using dask, modifying those datasets and then combining into a single ``Dataset`` is:: def modify(ds): # modify ds here return ds # this is basically what open_mfdataset does open_kwargs = dict(decode_cf=True, decode_times=False) open_tasks = [dask.delayed(xr.open_dataset)(f, **open_kwargs) for f in file_names] tasks = [dask.delayed(modify)(task) for task in open_tasks] datasets = dask.compute(tasks) # get a list of xarray.Datasets combined = xr.combine_nested(datasets) # or some combination of concat, merge As an example, here's how we could approximate ``MFDataset`` from the netCDF4 library:: from glob import glob import xarray as xr def read_netcdfs(files, dim): # glob expands paths with * to a list of files, like the unix shell paths = sorted(glob(files)) datasets = [xr.open_dataset(p) for p in paths] combined = xr.concat(datasets, dim) return combined combined = read_netcdfs('/all/my/files/*.nc', dim='time') This function will work in many cases, but it's not very robust. First, it never closes files, which means it will fail if you need to load more than a few thousand files. Second, it assumes that you want all the data from each file and that it can all fit into memory. In many situations, you only need a small subset or an aggregated summary of the data from each file. Here's a slightly more sophisticated example of how to remedy these deficiencies:: def read_netcdfs(files, dim, transform_func=None): def process_one_path(path): # use a context manager, to ensure the file gets closed after use with xr.open_dataset(path) as ds: # transform_func should do some sort of selection or # aggregation if transform_func is not None: ds = transform_func(ds) # load all data from the transformed dataset, to ensure we can # use it after closing each original file ds.load() return ds paths = sorted(glob(files)) datasets = [process_one_path(p) for p in paths] combined = xr.concat(datasets, dim) return combined # here we suppose we only care about the combined mean of each file; # you might also use indexing operations like .sel to subset datasets combined = read_netcdfs('/all/my/files/*.nc', dim='time', transform_func=lambda ds: ds.mean()) This pattern works well and is very robust. We've used similar code to process tens of thousands of files constituting 100s of GB of data. .. _io.netcdf.writing_encoded: Writing encoded data ~~~~~~~~~~~~~~~~~~~~ Conversely, you can customize how xarray writes netCDF files on disk by providing explicit encodings for each dataset variable. The ``encoding`` argument takes a dictionary with variable names as keys and variable specific encodings as values. These encodings are saved as attributes on the netCDF variables on disk, which allows xarray to faithfully read encoded data back into memory. It is important to note that using encodings is entirely optional: if you do not supply any of these encoding options, xarray will write data to disk using a default encoding, or the options in the ``encoding`` attribute, if set. This works perfectly fine in most cases, but encoding can be useful for additional control, especially for enabling compression. In the file on disk, these encodings are saved as attributes on each variable, which allow xarray and other CF-compliant tools for working with netCDF files to correctly read the data. Scaling and type conversions ............................ These encoding options (based on `CF Conventions on packed data`_) work on any version of the netCDF file format: - ``dtype``: Any valid NumPy dtype or string convertible to a dtype, e.g., ``'int16'`` or ``'float32'``. This controls the type of the data written on disk. - ``_FillValue``: Values of ``NaN`` in xarray variables are remapped to this value when saved on disk. This is important when converting floating point with missing values to integers on disk, because ``NaN`` is not a valid value for integer dtypes. By default, variables with float types are attributed a ``_FillValue`` of ``NaN`` in the output file, unless explicitly disabled with an encoding ``{'_FillValue': None}``. - ``scale_factor`` and ``add_offset``: Used to convert from encoded data on disk to to the decoded data in memory, according to the formula ``decoded = scale_factor * encoded + add_offset``. Please note that ``scale_factor`` and ``add_offset`` must be of same type and determine the type of the decoded data. These parameters can be fruitfully combined to compress discretized data on disk. For example, to save the variable ``foo`` with a precision of 0.1 in 16-bit integers while converting ``NaN`` to ``-9999``, we would use ``encoding={'foo': {'dtype': 'int16', 'scale_factor': 0.1, '_FillValue': -9999}}``. Compression and decompression with such discretization is extremely fast. .. _CF Conventions on packed data: https://cfconventions.org/cf-conventions/cf-conventions.html#packed-data .. _io.string-encoding: String encoding ............... Xarray can write unicode strings to netCDF files in two ways: - As variable length strings. This is only supported on netCDF4 (HDF5) files. - By encoding strings into bytes, and writing encoded bytes as a character array. The default encoding is UTF-8. By default, we use variable length strings for compatible files and fall-back to using encoded character arrays. Character arrays can be selected even for netCDF4 files by setting the ``dtype`` field in ``encoding`` to ``S1`` (corresponding to NumPy's single-character bytes dtype). If character arrays are used: - The string encoding that was used is stored on disk in the ``_Encoding`` attribute, which matches an ad-hoc convention `adopted by the netCDF4-Python library `_. At the time of this writing (October 2017), a standard convention for indicating string encoding for character arrays in netCDF files was `still under discussion `_. Technically, you can use `any string encoding recognized by Python `_ if you feel the need to deviate from UTF-8, by setting the ``_Encoding`` field in ``encoding``. But `we don't recommend it `_. - The character dimension name can be specified by the ``char_dim_name`` field of a variable's ``encoding``. If the name of the character dimension is not specified, the default is ``f'string{data.shape[-1]}'``. When decoding character arrays from existing files, the ``char_dim_name`` is added to the variables ``encoding`` to preserve if encoding happens, but the field can be edited by the user. .. warning:: Missing values in bytes or unicode string arrays (represented by ``NaN`` in xarray) are currently written to disk as empty strings ``''``. This means missing values will not be restored when data is loaded from disk. This behavior is likely to change in the future (:issue:`1647`). Unfortunately, explicitly setting a ``_FillValue`` for string arrays to handle missing values doesn't work yet either, though we also hope to fix this in the future. Chunk based compression ....................... ``zlib``, ``complevel``, ``fletcher32``, ``contiguous`` and ``chunksizes`` can be used for enabling netCDF4/HDF5's chunk based compression, as described in the `documentation for createVariable`_ for netCDF4-Python. This only works for netCDF4 files and thus requires using ``format='netCDF4'`` and either ``engine='netcdf4'`` or ``engine='h5netcdf'``. .. _documentation for createVariable: https://unidata.github.io/netcdf4-python/#netCDF4.Dataset.createVariable Chunk based gzip compression can yield impressive space savings, especially for sparse data, but it comes with significant performance overhead. HDF5 libraries can only read complete chunks back into memory, and maximum decompression speed is in the range of 50-100 MB/s. Worse, HDF5's compression and decompression currently cannot be parallelized with dask. For these reasons, we recommend trying discretization based compression (described above) first. Time units .......... The ``units`` and ``calendar`` attributes control how xarray serializes ``datetime64`` and ``timedelta64`` arrays to datasets on disk as numeric values. The ``units`` encoding should be a string like ``'days since 1900-01-01'`` for ``datetime64`` data or a string like ``'days'`` for ``timedelta64`` data. ``calendar`` should be one of the calendar types supported by netCDF4-python: ``'standard'``, ``'gregorian'``, ``'proleptic_gregorian'``, ``'noleap'``, ``'365_day'``, ``'360_day'``, ``'julian'``, ``'all_leap'``, ``'366_day'``. By default, xarray uses the ``'proleptic_gregorian'`` calendar and units of the smallest time difference between values, with a reference time of the first time value. .. _io.coordinates: Coordinates ........... You can control the ``coordinates`` attribute written to disk by specifying ``DataArray.encoding["coordinates"]``. If not specified, xarray automatically sets ``DataArray.encoding["coordinates"]`` to a space-delimited list of names of coordinate variables that share dimensions with the ``DataArray`` being written. This allows perfect roundtripping of xarray datasets but may not be desirable. When an xarray ``Dataset`` contains non-dimensional coordinates that do not share dimensions with any of the variables, these coordinate variable names are saved under a "global" ``"coordinates"`` attribute. This is not CF-compliant but again facilitates roundtripping of xarray datasets. Invalid netCDF files ~~~~~~~~~~~~~~~~~~~~ The library ``h5netcdf`` allows writing some dtypes that aren't allowed in netCDF4 (see `h5netcdf documentation `_). This feature is available through :py:meth:`DataArray.to_netcdf` and :py:meth:`Dataset.to_netcdf` when used with ``engine="h5netcdf"`` and currently raises a warning unless ``invalid_netcdf=True`` is set. .. warning:: Note that this produces a file that is likely to be not readable by other netCDF libraries! .. _io.hdf5: HDF5 ---- `HDF5`_ is both a file format and a data model for storing information. HDF5 stores data hierarchically, using groups to create a nested structure. HDF5 is a more general version of the netCDF4 data model, so the nested structure is one of many similarities between the two data formats. Reading HDF5 files in xarray requires the ``h5netcdf`` engine, which can be installed with ``conda install h5netcdf``. Once installed we can use xarray to open HDF5 files: .. code:: python xr.open_dataset("/path/to/my/file.h5") The similarities between HDF5 and netCDF4 mean that HDF5 data can be written with the same :py:meth:`Dataset.to_netcdf` method as used for netCDF4 data: .. jupyter-execute:: ds = xr.Dataset( {"foo": (("x", "y"), np.random.rand(4, 5))}, coords={ "x": [10, 20, 30, 40], "y": pd.date_range("2000-01-01", periods=5), "z": ("x", list("abcd")), }, ) ds.to_netcdf("saved_on_disk.h5") Groups ~~~~~~ If you have multiple or highly nested groups, xarray by default may not read the group that you want. A particular group of an HDF5 file can be specified using the ``group`` argument: .. code:: python xr.open_dataset("/path/to/my/file.h5", group="/my/group") While xarray cannot interrogate an HDF5 file to determine which groups are available, the HDF5 Python reader `h5py`_ can be used instead. Natively the xarray data structures can only handle one level of nesting, organized as DataArrays inside of Datasets. If your HDF5 file has additional levels of hierarchy you can only access one group and a time and will need to specify group names. .. _HDF5: https://hdfgroup.github.io/hdf5/index.html .. _h5py: https://www.h5py.org/ .. _io.zarr: Zarr ---- `Zarr`_ is a Python package that provides an implementation of chunked, compressed, N-dimensional arrays. Zarr has the ability to store arrays in a range of ways, including in memory, in files, and in cloud-based object storage such as `Amazon S3`_ and `Google Cloud Storage`_. Xarray's Zarr backend allows xarray to leverage these capabilities, including the ability to store and analyze datasets far too large fit onto disk (particularly :ref:`in combination with dask `). Xarray can't open just any zarr dataset, because xarray requires special metadata (attributes) describing the dataset dimensions and coordinates. At this time, xarray can only open zarr datasets with these special attributes, such as zarr datasets written by xarray, `netCDF `_, or `GDAL `_. For implementation details, see :ref:`zarr_encoding`. To write a dataset with zarr, we use the :py:meth:`Dataset.to_zarr` method. To write to a local directory, we pass a path to a directory: .. jupyter-execute:: :hide-code: ! rm -rf path/to/directory.zarr .. jupyter-execute:: :stderr: ds = xr.Dataset( {"foo": (("x", "y"), np.random.rand(4, 5))}, coords={ "x": [10, 20, 30, 40], "y": pd.date_range("2000-01-01", periods=5), "z": ("x", list("abcd")), }, ) ds.to_zarr("path/to/directory.zarr", zarr_format=2, consolidated=False) (The suffix ``.zarr`` is optional--just a reminder that a zarr store lives there.) If the directory does not exist, it will be created. If a zarr store is already present at that path, an error will be raised, preventing it from being overwritten. To override this behavior and overwrite an existing store, add ``mode='w'`` when invoking :py:meth:`~Dataset.to_zarr`. DataArrays can also be saved to disk using the :py:meth:`DataArray.to_zarr` method, and loaded from disk using the :py:func:`open_dataarray` function with ``engine='zarr'``. Similar to :py:meth:`DataArray.to_netcdf`, :py:meth:`DataArray.to_zarr` will convert the ``DataArray`` to a ``Dataset`` before saving, and then convert back when loading, ensuring that the ``DataArray`` that is loaded is always exactly the same as the one that was saved. .. note:: xarray does not write `NCZarr `_ attributes. Therefore, NCZarr data must be opened in read-only mode. To store variable length strings, convert them to object arrays first with ``dtype=object``. To read back a zarr dataset that has been created this way, we use the :py:func:`open_zarr` method: .. jupyter-execute:: ds_zarr = xr.open_zarr("path/to/directory.zarr", consolidated=False) ds_zarr Cloud Storage Buckets ~~~~~~~~~~~~~~~~~~~~~ It is possible to read and write xarray datasets directly from / to cloud storage buckets using zarr. This example uses the `gcsfs`_ package to provide an interface to `Google Cloud Storage`_. General `fsspec`_ URLs, those that begin with ``s3://`` or ``gcs://`` for example, are parsed and the store set up for you automatically when reading. You should include any arguments to the storage backend as the key ```storage_options``, part of ``backend_kwargs``. .. code:: python ds_gcs = xr.open_dataset( "gcs:///path.zarr", backend_kwargs={ "storage_options": {"project": "", "token": None} }, engine="zarr", ) This also works with ``open_mfdataset``, allowing you to pass a list of paths or a URL to be interpreted as a glob string. For writing, you may either specify a bucket URL or explicitly set up a ``zarr.abc.store.Store`` instance, as follows: .. tab:: URL .. code:: python # write to the bucket via GCS URL ds.to_zarr("gs://") # read it back ds_gcs = xr.open_zarr("gs://") .. tab:: fsspec .. code:: python import gcsfs import zarr # manually manage the cloud filesystem connection -- useful, for example, # when you need to manage permissions to cloud resources fs = gcsfs.GCSFileSystem(project="", token=None) zstore = zarr.storage.FsspecStore(fs, path="") # write to the bucket ds.to_zarr(store=zstore) # read it back ds_gcs = xr.open_zarr(zstore) .. tab:: obstore .. code:: python import obstore import zarr # alternatively, obstore offers a modern, performant interface for # cloud buckets gcsstore = obstore.store.GCSStore( "", prefix="", skip_signature=True ) zstore = zarr.store.ObjectStore(gcsstore) # write to the bucket ds.to_zarr(store=zstore) # read it back ds_gcs = xr.open_zarr(zstore) .. _fsspec: https://filesystem-spec.readthedocs.io/en/latest/ .. _obstore: https://developmentseed.org/obstore/latest/ .. _Zarr: https://zarr.readthedocs.io/ .. _Amazon S3: https://aws.amazon.com/s3/ .. _Google Cloud Storage: https://cloud.google.com/storage/ .. _gcsfs: https://github.com/fsspec/gcsfs .. _io.zarr.distributed_writes: Distributed writes ~~~~~~~~~~~~~~~~~~ Xarray will natively use dask to write in parallel to a zarr store, which should satisfy most moderately sized datasets. For more flexible parallelization, we can use ``region`` to write to limited regions of arrays in an existing Zarr store. To scale this up to writing large datasets, first create an initial Zarr store without writing all of its array data. This can be done by first creating a ``Dataset`` with dummy values stored in :ref:`dask `, and then calling ``to_zarr`` with ``compute=False`` to write only metadata (including ``attrs``) to Zarr: .. jupyter-execute:: :hide-code: ! rm -rf path/to/directory.zarr .. jupyter-execute:: import dask.array # The values of this dask array are entirely irrelevant; only the dtype, # shape and chunks are used dummies = dask.array.zeros(30, chunks=10) ds = xr.Dataset({"foo": ("x", dummies)}, coords={"x": np.arange(30)}) path = "path/to/directory.zarr" # Now we write the metadata without computing any array values ds.to_zarr(path, compute=False, consolidated=False) Now, a Zarr store with the correct variable shapes and attributes exists that can be filled out by subsequent calls to ``to_zarr``. Setting ``region="auto"`` will open the existing store and determine the correct alignment of the new data with the existing dimensions, or as an explicit mapping from dimension names to Python ``slice`` objects indicating where the data should be written (in index space, not label space), e.g., .. jupyter-execute:: # For convenience, we'll slice a single dataset, but in the real use-case # we would create them separately possibly even from separate processes. ds = xr.Dataset({"foo": ("x", np.arange(30))}, coords={"x": np.arange(30)}) # Any of the following region specifications are valid ds.isel(x=slice(0, 10)).to_zarr(path, region="auto", consolidated=False) ds.isel(x=slice(10, 20)).to_zarr(path, region={"x": "auto"}, consolidated=False) ds.isel(x=slice(20, 30)).to_zarr(path, region={"x": slice(20, 30)}, consolidated=False) Concurrent writes with ``region`` are safe as long as they modify distinct chunks in the underlying Zarr arrays (or use an appropriate ``lock``). As a safety check to make it harder to inadvertently override existing values, if you set ``region`` then *all* variables included in a Dataset must have dimensions included in ``region``. Other variables (typically coordinates) need to be explicitly dropped and/or written in a separate calls to ``to_zarr`` with ``mode='a'``. Zarr Compressors and Filters ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are many different `options for compression and filtering possible with zarr `_. These options can be passed to the ``to_zarr`` method as variable encoding. For example: .. jupyter-execute:: :hide-code: ! rm -rf foo.zarr .. jupyter-execute:: import zarr from zarr.codecs import BloscCodec compressor = BloscCodec(cname="zstd", clevel=3, shuffle="shuffle") ds.to_zarr("foo.zarr", consolidated=False, encoding={"foo": {"compressors": [compressor]}}) .. note:: Not all native zarr compression and filtering options have been tested with xarray. .. _io.zarr.appending: Modifying existing Zarr stores ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Xarray supports several ways of incrementally writing variables to a Zarr store. These options are useful for scenarios when it is infeasible or undesirable to write your entire dataset at once. 1. Use ``mode='a'`` to add or overwrite entire variables, 2. Use ``append_dim`` to resize and append to existing variables, and 3. Use ``region`` to write to limited regions of existing arrays. .. tip:: For ``Dataset`` objects containing dask arrays, a single call to ``to_zarr()`` will write all of your data in parallel. .. warning:: Alignment of coordinates is currently not checked when modifying an existing Zarr store. It is up to the user to ensure that coordinates are consistent. To add or overwrite entire variables, simply call :py:meth:`~Dataset.to_zarr` with ``mode='a'`` on a Dataset containing the new variables, passing in an existing Zarr store or path to a Zarr store. To resize and then append values along an existing dimension in a store, set ``append_dim``. This is a good option if data always arrives in a particular order, e.g., for time-stepping a simulation: .. jupyter-execute:: :hide-code: ! rm -rf path/to/directory.zarr .. jupyter-execute:: ds1 = xr.Dataset( {"foo": (("x", "y", "t"), np.random.rand(4, 5, 2))}, coords={ "x": [10, 20, 30, 40], "y": [1, 2, 3, 4, 5], "t": pd.date_range("2001-01-01", periods=2), }, ) ds1.to_zarr("path/to/directory.zarr", consolidated=False) .. jupyter-execute:: ds2 = xr.Dataset( {"foo": (("x", "y", "t"), np.random.rand(4, 5, 2))}, coords={ "x": [10, 20, 30, 40], "y": [1, 2, 3, 4, 5], "t": pd.date_range("2001-01-03", periods=2), }, ) ds2.to_zarr("path/to/directory.zarr", append_dim="t", consolidated=False) .. _io.zarr.writing_chunks: Specifying chunks in a zarr store ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Chunk sizes may be specified in one of three ways when writing to a zarr store: 1. Manual chunk sizing through the use of the ``encoding`` argument in :py:meth:`Dataset.to_zarr`: 2. Automatic chunking based on chunks in dask arrays 3. Default chunk behavior determined by the zarr library The resulting chunks will be determined based on the order of the above list; dask chunks will be overridden by manually-specified chunks in the encoding argument, and the presence of either dask chunks or chunks in the ``encoding`` attribute will supersede the default chunking heuristics in zarr. Importantly, this logic applies to every array in the zarr store individually, including coordinate arrays. Therefore, if a dataset contains one or more dask arrays, it may still be desirable to specify a chunk size for the coordinate arrays (for example, with a chunk size of ``-1`` to include the full coordinate). To specify chunks manually using the ``encoding`` argument, provide a nested dictionary with the structure ``{'variable_or_coord_name': {'chunks': chunks_tuple}}``. .. note:: The positional ordering of the chunks in the encoding argument must match the positional ordering of the dimensions in each array. Watch out for arrays with differently-ordered dimensions within a single Dataset. For example, let's say we're working with a dataset with dimensions ``('time', 'x', 'y')``, a variable ``Tair`` which is chunked in ``x`` and ``y``, and two multi-dimensional coordinates ``xc`` and ``yc``: .. jupyter-execute:: ds = xr.tutorial.open_dataset("rasm") ds["Tair"] = ds["Tair"].chunk({"x": 100, "y": 100}) ds These multi-dimensional coordinates are only two-dimensional and take up very little space on disk or in memory, yet when writing to disk the default zarr behavior is to split them into chunks: .. jupyter-execute:: ds.to_zarr("path/to/directory.zarr", consolidated=False, mode="w") !tree -I zarr.json path/to/directory.zarr This may cause unwanted overhead on some systems, such as when reading from a cloud storage provider. To disable this chunking, we can specify a chunk size equal to the shape of each coordinate array in the ``encoding`` argument: .. jupyter-execute:: ds.to_zarr( "path/to/directory.zarr", encoding={"xc": {"chunks": ds.xc.shape}, "yc": {"chunks": ds.yc.shape}}, consolidated=False, mode="w", ) !tree -I zarr.json path/to/directory.zarr The number of chunks on Tair matches our dask chunks, while there is now only a single chunk in the directory stores of each coordinate. Groups ~~~~~~ Nested groups in zarr stores can be represented by loading the store as a :py:class:`xarray.DataTree` object, similarly to netCDF. To open a whole zarr store as a tree of groups use the :py:func:`open_datatree` function. To save a ``DataTree`` object as a zarr store containing many groups, use the :py:meth:`xarray.DataTree.to_zarr()` method. .. note:: Note that perfect round-tripping should always be possible with a zarr store (:ref:`unlike for netCDF files `), as zarr does not support "unused" dimensions. For the root group the same restrictions (:ref:`as for netCDF files `) apply. Due to file format specifications the on-disk root group name is always ``"/"`` overriding any given ``DataTree`` root node name. .. _io.zarr.consolidated_metadata: Consolidated Metadata ~~~~~~~~~~~~~~~~~~~~~ Xarray needs to read all of the zarr metadata when it opens a dataset. In some storage mediums, such as with cloud object storage (e.g. `Amazon S3`_), this can introduce significant overhead, because two separate HTTP calls to the object store must be made for each variable in the dataset. By default Xarray uses a feature called *consolidated metadata*, storing all metadata for the entire dataset with a single key (by default called ``.zmetadata``). This typically drastically speeds up opening the store. (For more information on this feature, consult the `zarr docs on consolidating metadata `_.) By default, xarray writes consolidated metadata and attempts to read stores with consolidated metadata, falling back to use non-consolidated metadata for reads. Because this fall-back option is so much slower, xarray issues a ``RuntimeWarning`` with guidance when reading with consolidated metadata fails: Failed to open Zarr store with consolidated metadata, falling back to try reading non-consolidated metadata. This is typically much slower for opening a dataset. To silence this warning, consider: 1. Consolidating metadata in this existing store with :py:func:`zarr.consolidate_metadata`. 2. Explicitly setting ``consolidated=False``, to avoid trying to read consolidate metadata. 3. Explicitly setting ``consolidated=True``, to raise an error in this case instead of falling back to try reading non-consolidated metadata. Fill Values ~~~~~~~~~~~ Zarr arrays have a ``fill_value`` that is used for chunks that were never written to disk. For the Zarr version 2 format, Xarray will set ``fill_value`` to be equal to the CF/NetCDF ``"_FillValue"``. This is ``np.nan`` by default for floats, and unset otherwise. Note that the Zarr library will set a default ``fill_value`` if not specified (usually ``0``). For the Zarr version 3 format, ``_FillValue`` and ```fill_value`` are decoupled. So you can set ``fill_value`` in ``encoding`` as usual. Note that at read-time, you can control whether ``_FillValue`` is masked using the ``mask_and_scale`` kwarg; and whether Zarr's ``fill_value`` is treated as synonymous with ``_FillValue`` using the ``use_zarr_fill_value_as_mask`` kwarg to :py:func:`xarray.open_zarr`. .. _io.kerchunk: Kerchunk -------- `Kerchunk `_ is a Python library that allows you to access chunked and compressed data formats (such as NetCDF3, NetCDF4, HDF5, GRIB2, TIFF & FITS), many of which are primary data formats for many data archives, by viewing the whole archive as an ephemeral `Zarr`_ dataset which allows for parallel, chunk-specific access. Instead of creating a new copy of the dataset in the Zarr spec/format or downloading the files locally, Kerchunk reads through the data archive and extracts the byte range and compression information of each chunk and saves as a ``reference``. These references are then saved as ``json`` files or ``parquet`` (more efficient) for later use. You can view some of these stored in the ``references`` directory `here `_. .. note:: These references follow this `specification `_. Packages like `kerchunk`_ and `virtualizarr `_ help in creating and reading these references. Reading these data archives becomes really easy with ``kerchunk`` in combination with ``xarray``, especially when these archives are large in size. A single combined reference can refer to thousands of the original data files present in these archives. You can view the whole dataset with from this combined reference using the above packages. The following example shows opening a single ``json`` reference to the ``saved_on_disk.h5`` file created above. If the file were instead stored remotely (e.g. ``s3://saved_on_disk.h5``) you can use ``storage_options`` that are used to `configure fsspec `_: .. jupyter-execute:: ds_kerchunked = xr.open_dataset( "./combined.json", engine="kerchunk", storage_options={}, ) ds_kerchunked .. note:: You can refer to the `project pythia kerchunk cookbook `_ and the `pangeo guide on kerchunk `_ for more information. .. _io.iris: Iris ---- The Iris_ tool allows easy reading of common meteorological and climate model formats (including GRIB and UK MetOffice PP files) into ``Cube`` objects which are in many ways very similar to ``DataArray`` objects, while enforcing a CF-compliant data model. DataArray ``to_iris`` and ``from_iris`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If iris is installed, xarray can convert a ``DataArray`` into a ``Cube`` using :py:meth:`DataArray.to_iris`: .. jupyter-execute:: da = xr.DataArray( np.random.rand(4, 5), dims=["x", "y"], coords=dict(x=[10, 20, 30, 40], y=pd.date_range("2000-01-01", periods=5)), ) cube = da.to_iris() print(cube) Conversely, we can create a new ``DataArray`` object from a ``Cube`` using :py:meth:`DataArray.from_iris`: .. jupyter-execute:: da_cube = xr.DataArray.from_iris(cube) da_cube Ncdata ~~~~~~ Ncdata_ provides more sophisticated means of transferring data, including entire datasets. It uses the file saving and loading functions in both projects to provide a more "correct" translation between them, but still with very low overhead and not using actual disk files. Here we load an xarray dataset and convert it to Iris cubes: .. jupyter-execute:: :stderr: ds = xr.tutorial.open_dataset("air_temperature_gradient") cubes = ncdata.iris_xarray.cubes_from_xarray(ds) print(cubes) .. jupyter-execute:: print(cubes[1]) And we can convert the cubes back to an xarray dataset: .. jupyter-execute:: # ensure dataset-level and variable-level attributes loaded correctly iris.FUTURE.save_split_attrs = True ds = ncdata.iris_xarray.cubes_to_xarray(cubes) ds Ncdata can also adjust file data within load and save operations, to fix data loading problems or provide exact save formatting without needing to modify files on disk. See for example : `ncdata usage examples`_ .. _Iris: https://scitools-iris.readthedocs.io .. _Ncdata: https://ncdata.readthedocs.io/en/latest/index.html .. _ncdata usage examples: https://github.com/pp-mo/ncdata/tree/v0.1.2?tab=readme-ov-file#correct-a-miscoded-attribute-in-iris-input OPeNDAP ------- Xarray includes support for `OPeNDAP`__ (via the netCDF4 library or Pydap), which lets us access large datasets over HTTP. __ https://www.opendap.org/ For example, we can open a connection to GBs of weather data produced by the `PRISM`__ project, and hosted by `IRI`__ at Columbia: __ https://www.prism.oregonstate.edu/ __ https://iri.columbia.edu/ .. jupyter-input:: remote_data = xr.open_dataset( "http://iridl.ldeo.columbia.edu/SOURCES/.OSU/.PRISM/.monthly/dods", decode_times=False, ) remote_data .. jupyter-output:: Dimensions: (T: 1422, X: 1405, Y: 621) Coordinates: * X (X) float32 -125.0 -124.958 -124.917 -124.875 -124.833 -124.792 -124.75 ... * T (T) float32 -779.5 -778.5 -777.5 -776.5 -775.5 -774.5 -773.5 -772.5 -771.5 ... * Y (Y) float32 49.9167 49.875 49.8333 49.7917 49.75 49.7083 49.6667 49.625 ... Data variables: ppt (T, Y, X) float64 ... tdmean (T, Y, X) float64 ... tmax (T, Y, X) float64 ... tmin (T, Y, X) float64 ... Attributes: Conventions: IRIDL expires: 1375315200 .. TODO: update this example to show off decode_cf? .. note:: Like many real-world datasets, this dataset does not entirely follow `CF conventions`_. Unexpected formats will usually cause xarray's automatic decoding to fail. The way to work around this is to either set ``decode_cf=False`` in ``open_dataset`` to turn off all use of CF conventions, or by only disabling the troublesome parser. In this case, we set ``decode_times=False`` because the time axis here provides the calendar attribute in a format that xarray does not expect (the integer ``360`` instead of a string like ``'360_day'``). We can select and slice this data any number of times, and nothing is loaded over the network until we look at particular values: .. jupyter-input:: tmax = remote_data["tmax"][:500, ::3, ::3] tmax .. jupyter-output:: [48541500 values with dtype=float64] Coordinates: * Y (Y) float32 49.9167 49.7917 49.6667 49.5417 49.4167 49.2917 ... * X (X) float32 -125.0 -124.875 -124.75 -124.625 -124.5 -124.375 ... * T (T) float32 -779.5 -778.5 -777.5 -776.5 -775.5 -774.5 -773.5 ... Attributes: pointwidth: 120 standard_name: air_temperature units: Celsius_scale expires: 1443657600 .. jupyter-input:: # the data is downloaded automatically when we make the plot tmax[0].plot() .. image:: ../_static/opendap-prism-tmax.png Some servers require authentication before we can access the data. Pydap uses a `Requests`__ session object (which the user can pre-define), and this session object can recover `authentication`__` credentials from a locally stored ``.netrc`` file. For example, to connect to a server that requires NASA's URS authentication, with the username/password credentials stored on a locally accessible ``.netrc``, access to OPeNDAP data should be as simple as this:: import xarray as xr import requests my_session = requests.Session() ds_url = 'https://gpm1.gesdisc.eosdis.nasa.gov/opendap/hyrax/example.nc' ds = xr.open_dataset(ds_url, session=my_session, engine="pydap") Moreover, a bearer token header can be included in a `Requests`__ session object, allowing for token-based authentication which OPeNDAP servers can use to avoid some redirects. Lastly, OPeNDAP servers may provide endpoint URLs for different OPeNDAP protocols, DAP2 and DAP4. To specify which protocol between the two options to use, you can replace the scheme of the url with the name of the protocol. For example:: # dap2 url ds_url = 'dap2://gpm1.gesdisc.eosdis.nasa.gov/opendap/hyrax/example.nc' # dap4 url ds_url = 'dap4://gpm1.gesdisc.eosdis.nasa.gov/opendap/hyrax/example.nc' While most OPeNDAP servers implement DAP2, not all servers implement DAP4. It is recommended to check if the URL you are using `supports DAP4`__ by checking the URL on a browser. __ https://docs.python-requests.org __ https://pydap.github.io/pydap/en/notebooks/Authentication.html __ https://pydap.github.io/pydap/en/faqs/dap2_or_dap4_url.html .. _io.pickle: Pickle ------ The simplest way to serialize an xarray object is to use Python's built-in pickle module: .. jupyter-execute:: import pickle # use the highest protocol (-1) because it is way faster than the default # text based pickle format pkl = pickle.dumps(ds, protocol=-1) pickle.loads(pkl) Pickling is important because it doesn't require any external libraries and lets you use xarray objects with Python modules like :py:mod:`multiprocessing` or :ref:`Dask `. However, pickling is **not recommended for long-term storage**. Restoring a pickle requires that the internal structure of the types for the pickled data remain unchanged. Because the internal design of xarray is still being refined, we make no guarantees (at this point) that objects pickled with this version of xarray will work in future versions. .. note:: When pickling an object opened from a NetCDF file, the pickle file will contain a reference to the file on disk. If you want to store the actual array values, load it into memory first with :py:meth:`Dataset.load` or :py:meth:`Dataset.compute`. .. _dictionary io: Dictionary ---------- We can convert a ``Dataset`` (or a ``DataArray``) to a dict using :py:meth:`Dataset.to_dict`: .. jupyter-execute:: ds = xr.Dataset({"foo": ("x", np.arange(30))}) d = ds.to_dict() d We can create a new xarray object from a dict using :py:meth:`Dataset.from_dict`: .. jupyter-execute:: ds_dict = xr.Dataset.from_dict(d) ds_dict Dictionary support allows for flexible use of xarray objects. It doesn't require external libraries and dicts can easily be pickled, or converted to json, or geojson. All the values are converted to lists, so dicts might be quite large. To export just the dataset schema without the data itself, use the ``data=False`` option: .. jupyter-execute:: ds.to_dict(data=False) .. jupyter-execute:: :hide-code: # We're now done with the dataset named `ds`. Although the `with` statement closed # the dataset, displaying the unpickled pickle of `ds` re-opened "saved_on_disk.nc". # However, `ds` (rather than the unpickled dataset) refers to the open file. Delete # `ds` to close the file. del ds for f in ["saved_on_disk.nc", "saved_on_disk.h5"]: if os.path.exists(f): os.remove(f) This can be useful for generating indices of dataset contents to expose to search indices or other automated data discovery tools. .. _io.rasterio: Rasterio -------- GDAL readable raster data using `rasterio`_ such as GeoTIFFs can be opened using the `rioxarray`_ extension. `rioxarray`_ can also handle geospatial related tasks such as re-projecting and clipping. .. jupyter-input:: import rioxarray rds = rioxarray.open_rasterio("RGB.byte.tif") rds .. jupyter-output:: [1703814 values with dtype=uint8] Coordinates: * band (band) int64 1 2 3 * y (y) float64 2.827e+06 2.826e+06 ... 2.612e+06 2.612e+06 * x (x) float64 1.021e+05 1.024e+05 ... 3.389e+05 3.392e+05 spatial_ref int64 0 Attributes: STATISTICS_MAXIMUM: 255 STATISTICS_MEAN: 29.947726688477 STATISTICS_MINIMUM: 0 STATISTICS_STDDEV: 52.340921626611 transform: (300.0379266750948, 0.0, 101985.0, 0.0, -300.0417827... _FillValue: 0.0 scale_factor: 1.0 add_offset: 0.0 grid_mapping: spatial_ref .. jupyter-input:: rds.rio.crs # CRS.from_epsg(32618) rds4326 = rds.rio.reproject("epsg:4326") rds4326.rio.crs # CRS.from_epsg(4326) rds4326.rio.to_raster("RGB.byte.4326.tif") .. _rasterio: https://rasterio.readthedocs.io/en/latest/ .. _rioxarray: https://corteva.github.io/rioxarray/stable/ .. _test files: https://github.com/rasterio/rasterio/blob/master/tests/data/RGB.byte.tif .. _pyproj: https://github.com/pyproj4/pyproj .. _io.cfgrib: .. jupyter-execute:: :hide-code: import shutil shutil.rmtree("foo.zarr") shutil.rmtree("path/to/directory.zarr") GRIB format via cfgrib ---------------------- Xarray supports reading GRIB files via ECMWF cfgrib_ python driver, if it is installed. To open a GRIB file supply ``engine='cfgrib'`` to :py:func:`open_dataset` after installing cfgrib_: .. jupyter-input:: ds_grib = xr.open_dataset("example.grib", engine="cfgrib") We recommend installing cfgrib via conda:: conda install -c conda-forge cfgrib .. _cfgrib: https://github.com/ecmwf/cfgrib CSV and other formats supported by pandas ----------------------------------------- For more options (tabular formats and CSV files in particular), consider exporting your objects to pandas and using its broad range of `IO tools`_. For CSV files, one might also consider `xarray_extras`_. .. _xarray_extras: https://xarray-extras.readthedocs.io/en/latest/api/csv.html .. _IO tools: https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html Third party libraries --------------------- More formats are supported by extension libraries: - `xarray-mongodb `_: Store xarray objects on MongoDB xarray-2025.09.0/doc/user-guide/options.rst000066400000000000000000000015141505620616400204100ustar00rootroot00000000000000.. currentmodule:: xarray .. _options: Configuration ============= Xarray offers a small number of configuration options through :py:func:`set_options`. With these, you can 1. Control the ``repr``: - ``display_expand_attrs`` - ``display_expand_coords`` - ``display_expand_data`` - ``display_expand_data_vars`` - ``display_max_rows`` - ``display_style`` 2. Control behaviour during operations: ``arithmetic_join``, ``keep_attrs``, ``use_bottleneck``. 3. Control colormaps for plots:``cmap_divergent``, ``cmap_sequential``. 4. Aspects of file reading: ``file_cache_maxsize``, ``warn_on_unclosed_files``. You can set these options either globally :: xr.set_options(arithmetic_join="exact") or locally as a context manager: :: with xr.set_options(arithmetic_join="exact"): # do operation here pass xarray-2025.09.0/doc/user-guide/pandas.rst000066400000000000000000000232611505620616400201660ustar00rootroot00000000000000.. currentmodule:: xarray .. _pandas: =================== Working with pandas =================== One of the most important features of xarray is the ability to convert to and from :py:mod:`pandas` objects to interact with the rest of the PyData ecosystem. For example, for plotting labeled data, we highly recommend using the `visualization built in to pandas itself`__ or provided by the pandas aware libraries such as `Seaborn`__. __ https://pandas.pydata.org/pandas-docs/stable/visualization.html __ https://seaborn.pydata.org/ .. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) Hierarchical and tidy data ~~~~~~~~~~~~~~~~~~~~~~~~~~ Tabular data is easiest to work with when it meets the criteria for `tidy data`__: * Each column holds a different variable. * Each rows holds a different observation. __ https://www.jstatsoft.org/v59/i10/ In this "tidy data" format, we can represent any :py:class:`Dataset` and :py:class:`DataArray` in terms of :py:class:`~pandas.DataFrame` and :py:class:`~pandas.Series`, respectively (and vice-versa). The representation works by flattening non-coordinates to 1D, and turning the tensor product of coordinate indexes into a :py:class:`pandas.MultiIndex`. Dataset and DataFrame --------------------- To convert any dataset to a ``DataFrame`` in tidy form, use the :py:meth:`Dataset.to_dataframe()` method: .. jupyter-execute:: ds = xr.Dataset( {"foo": (("x", "y"), np.random.randn(2, 3))}, coords={ "x": [10, 20], "y": ["a", "b", "c"], "along_x": ("x", np.random.randn(2)), "scalar": 123, }, ) ds .. jupyter-execute:: df = ds.to_dataframe() df We see that each variable and coordinate in the Dataset is now a column in the DataFrame, with the exception of indexes which are in the index. To convert the ``DataFrame`` to any other convenient representation, use ``DataFrame`` methods like :py:meth:`~pandas.DataFrame.reset_index`, :py:meth:`~pandas.DataFrame.stack` and :py:meth:`~pandas.DataFrame.unstack`. For datasets containing dask arrays where the data should be lazily loaded, see the :py:meth:`Dataset.to_dask_dataframe()` method. To create a ``Dataset`` from a ``DataFrame``, use the :py:meth:`Dataset.from_dataframe` class method or the equivalent :py:meth:`pandas.DataFrame.to_xarray` method: .. jupyter-execute:: xr.Dataset.from_dataframe(df) Notice that the dimensions of variables in the ``Dataset`` have now expanded after the round-trip conversion to a ``DataFrame``. This is because every object in a ``DataFrame`` must have the same indices, so we need to broadcast the data of each array to the full size of the new ``MultiIndex``. Likewise, all the coordinates (other than indexes) ended up as variables, because pandas does not distinguish non-index coordinates. DataArray and Series -------------------- ``DataArray`` objects have a complementary representation in terms of a :py:class:`~pandas.Series`. Using a Series preserves the ``Dataset`` to ``DataArray`` relationship, because ``DataFrames`` are dict-like containers of ``Series``. The methods are very similar to those for working with DataFrames: .. jupyter-execute:: s = ds["foo"].to_series() s .. jupyter-execute:: # or equivalently, with Series.to_xarray() xr.DataArray.from_series(s) Both the ``from_series`` and ``from_dataframe`` methods use reindexing, so they work even if the hierarchical index is not a full tensor product: .. jupyter-execute:: s[::2] .. jupyter-execute:: s[::2].to_xarray() Lossless and reversible conversion ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The previous ``Dataset`` example shows that the conversion is not reversible (lossy roundtrip) and that the size of the ``Dataset`` increases. Particularly after a roundtrip, the following deviations are noted: - a non-dimension Dataset ``coordinate`` is converted into ``variable`` - a non-dimension DataArray ``coordinate`` is not converted - ``dtype`` is not always the same (e.g. "str" is converted to "object") - ``attrs`` metadata is not conserved To avoid these problems, the third-party `ntv-pandas `__ library offers lossless and reversible conversions between ``Dataset``/ ``DataArray`` and pandas ``DataFrame`` objects. This solution is particularly interesting for converting any ``DataFrame`` into a ``Dataset`` (the converter finds the multidimensional structure hidden by the tabular structure). The `ntv-pandas examples `__ show how to improve the conversion for the previous ``Dataset`` example and for more complex examples. Multi-dimensional data ~~~~~~~~~~~~~~~~~~~~~~ Tidy data is great, but it sometimes you want to preserve dimensions instead of automatically stacking them into a ``MultiIndex``. :py:meth:`DataArray.to_pandas()` is a shortcut that lets you convert a DataArray directly into a pandas object with the same dimensionality, if available in pandas (i.e., a 1D array is converted to a :py:class:`~pandas.Series` and 2D to :py:class:`~pandas.DataFrame`): .. jupyter-execute:: arr = xr.DataArray( np.random.randn(2, 3), coords=[("x", [10, 20]), ("y", ["a", "b", "c"])] ) df = arr.to_pandas() df To perform the inverse operation of converting any pandas objects into a data array with the same shape, simply use the :py:class:`DataArray` constructor: .. jupyter-execute:: xr.DataArray(df) Both the ``DataArray`` and ``Dataset`` constructors directly convert pandas objects into xarray objects with the same shape. This means that they preserve all use of multi-indexes: .. jupyter-execute:: index = pd.MultiIndex.from_arrays( [["a", "a", "b"], [0, 1, 2]], names=["one", "two"] ) df = pd.DataFrame({"x": 1, "y": 2}, index=index) ds = xr.Dataset(df) ds However, you will need to set dimension names explicitly, either with the ``dims`` argument on in the ``DataArray`` constructor or by calling :py:class:`~Dataset.rename` on the new object. .. _panel transition: Transitioning from pandas.Panel to xarray ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``Panel``, pandas' data structure for 3D arrays, was always a second class data structure compared to the Series and DataFrame. To allow pandas developers to focus more on its core functionality built around the DataFrame, pandas removed ``Panel`` in favor of directing users who use multi-dimensional arrays to xarray. Xarray has most of ``Panel``'s features, a more explicit API (particularly around indexing), and the ability to scale to >3 dimensions with the same interface. As discussed in the :ref:`data structures section of the docs `, there are two primary data structures in xarray: ``DataArray`` and ``Dataset``. You can imagine a ``DataArray`` as a n-dimensional pandas ``Series`` (i.e. a single typed array), and a ``Dataset`` as the ``DataFrame`` equivalent (i.e. a dict of aligned ``DataArray`` objects). So you can represent a Panel, in two ways: - As a 3-dimensional ``DataArray``, - Or as a ``Dataset`` containing a number of 2-dimensional DataArray objects. Let's take a look: .. jupyter-execute:: data = np.random.default_rng(0).random((2, 3, 4)) items = list("ab") major_axis = list("mno") minor_axis = pd.date_range(start="2000", periods=4, name="date") With old versions of pandas (prior to 0.25), this could stored in a ``Panel``: .. jupyter-input:: pd.Panel(data, items, major_axis, minor_axis) .. jupyter-output:: Dimensions: 2 (items) x 3 (major_axis) x 4 (minor_axis) Items axis: a to b Major_axis axis: m to o Minor_axis axis: 2000-01-01 00:00:00 to 2000-01-04 00:00:00 To put this data in a ``DataArray``, write: .. jupyter-execute:: array = xr.DataArray(data, [items, major_axis, minor_axis]) array As you can see, there are three dimensions (each is also a coordinate). Two of the axes of were unnamed, so have been assigned ``dim_0`` and ``dim_1`` respectively, while the third retains its name ``date``. You can also easily convert this data into ``Dataset``: .. jupyter-execute:: array.to_dataset(dim="dim_0") Here, there are two data variables, each representing a DataFrame on panel's ``items`` axis, and labeled as such. Each variable is a 2D array of the respective values along the ``items`` dimension. While the xarray docs are relatively complete, a few items stand out for Panel users: - A DataArray's data is stored as a numpy array, and so can only contain a single type. As a result, a Panel that contains :py:class:`~pandas.DataFrame` objects with multiple types will be converted to ``dtype=object``. A ``Dataset`` of multiple ``DataArray`` objects each with its own dtype will allow original types to be preserved. - :ref:`Indexing ` is similar to pandas, but more explicit and leverages xarray's naming of dimensions. - Because of those features, making much higher dimensional data is very practical. - Variables in ``Dataset`` objects can use a subset of its dimensions. For example, you can have one dataset with Person x Score x Time, and another with Person x Score. - You can use coordinates are used for both dimensions and for variables which _label_ the data variables, so you could have a coordinate Age, that labelled the Person dimension of a Dataset of Person x Score x Time. While xarray may take some getting used to, it's worth it! If anything is unclear, please `post an issue on GitHub `__ or `StackOverflow `__, and we'll endeavor to respond to the specific case or improve the general docs. xarray-2025.09.0/doc/user-guide/plotting.rst000066400000000000000000000666101505620616400205650ustar00rootroot00000000000000.. currentmodule:: xarray .. _plotting: Plotting ======== Introduction ------------ Labeled data enables expressive computations. These same labels can also be used to easily create informative plots. Xarray's plotting capabilities are centered around :py:class:`DataArray` objects. To plot :py:class:`Dataset` objects simply access the relevant DataArrays, i.e. ``dset['var1']``. Dataset specific plotting routines are also available (see :ref:`plot-dataset`). Here we focus mostly on arrays 2d or larger. If your data fits nicely into a pandas DataFrame then you're better off using one of the more developed tools there. Xarray plotting functionality is a thin wrapper around the popular `matplotlib `_ library. Matplotlib syntax and function names were copied as much as possible, which makes for an easy transition between the two. Matplotlib must be installed before xarray can plot. To use xarray's plotting capabilities with time coordinates containing ``cftime.datetime`` objects `nc-time-axis `_ v1.3.0 or later needs to be installed. For more extensive plotting applications consider the following projects: - `Seaborn `_: "provides a high-level interface for drawing attractive statistical graphics." Integrates well with pandas. - `HoloViews `_ and `GeoViews `_: "Composable, declarative data structures for building even complex visualizations easily." Includes native support for xarray objects. - `hvplot `_: ``hvplot`` makes it very easy to produce dynamic plots (backed by ``Holoviews`` or ``Geoviews``) by adding a ``hvplot`` accessor to DataArrays. - `Cartopy `_: Provides cartographic tools. Imports ~~~~~~~ .. jupyter-execute:: :hide-code: # Use defaults so we don't get gridlines in generated docs import matplotlib as mpl mpl.rcdefaults() The following imports are necessary for all of the examples. .. jupyter-execute:: import cartopy.crs as ccrs import matplotlib.pyplot as plt import numpy as np import pandas as pd import xarray as xr For these examples we'll use the North American air temperature dataset. .. jupyter-execute:: airtemps = xr.tutorial.open_dataset("air_temperature") airtemps .. jupyter-execute:: # Convert to celsius air = airtemps.air - 273.15 # copy attributes to get nice figure labels and change Kelvin to Celsius air.attrs = airtemps.air.attrs air.attrs["units"] = "deg C" .. note:: Until :issue:`1614` is solved, you might need to copy over the metadata in ``attrs`` to get informative figure labels (as was done above). DataArrays ---------- One Dimension ~~~~~~~~~~~~~ ================ Simple Example ================ The simplest way to make a plot is to call the :py:func:`DataArray.plot()` method. .. jupyter-execute:: air1d = air.isel(lat=10, lon=10) air1d.plot(); Xarray uses the coordinate name along with metadata ``attrs.long_name``, ``attrs.standard_name``, ``DataArray.name`` and ``attrs.units`` (if available) to label the axes. The names ``long_name``, ``standard_name`` and ``units`` are copied from the `CF-conventions spec `_. When choosing names, the order of precedence is ``long_name``, ``standard_name`` and finally ``DataArray.name``. The y-axis label in the above plot was constructed from the ``long_name`` and ``units`` attributes of ``air1d``. .. jupyter-execute:: air1d.attrs ====================== Additional Arguments ====================== Additional arguments are passed directly to the matplotlib function which does the work. For example, :py:func:`xarray.plot.line` calls matplotlib.pyplot.plot_ passing in the index and the array values as x and y, respectively. So to make a line plot with blue triangles a matplotlib format string can be used: .. _matplotlib.pyplot.plot: https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot .. jupyter-execute:: air1d[:200].plot.line("b-^"); .. note:: Not all xarray plotting methods support passing positional arguments to the wrapped matplotlib functions, but they do all support keyword arguments. Keyword arguments work the same way, and are more explicit. .. jupyter-execute:: air1d[:200].plot.line(color="purple", marker="o"); ========================= Adding to Existing Axis ========================= To add the plot to an existing axis pass in the axis as a keyword argument ``ax``. This works for all xarray plotting methods. In this example ``axs`` is an array consisting of the left and right axes created by ``plt.subplots``. .. jupyter-execute:: fig, axs = plt.subplots(ncols=2) print(axs) air1d.plot(ax=axs[0]) air1d.plot.hist(ax=axs[1]); On the right is a histogram created by :py:func:`xarray.plot.hist`. .. _plotting.figsize: ============================= Controlling the figure size ============================= You can pass a ``figsize`` argument to all xarray's plotting methods to control the figure size. For convenience, xarray's plotting methods also support the ``aspect`` and ``size`` arguments which control the size of the resulting image via the formula ``figsize = (aspect * size, size)``: .. jupyter-execute:: air1d.plot(aspect=2, size=3); This feature also works with :ref:`plotting.faceting`. For facet plots, ``size`` and ``aspect`` refer to a single panel (so that ``aspect * size`` gives the width of each facet in inches), while ``figsize`` refers to the entire figure (as for matplotlib's ``figsize`` argument). .. note:: If ``figsize`` or ``size`` are used, a new figure is created, so this is mutually exclusive with the ``ax`` argument. .. note:: The convention used by xarray (``figsize = (aspect * size, size)``) is borrowed from seaborn: it is therefore `not equivalent to matplotlib's`_. .. _not equivalent to matplotlib's: https://github.com/mwaskom/seaborn/issues/746 .. _plotting.multiplelines: ========================= Determine x-axis values ========================= Per default dimension coordinates are used for the x-axis (here the time coordinates). However, you can also use non-dimension coordinates, MultiIndex levels, and dimensions without coordinates along the x-axis. To illustrate this, let's calculate a 'decimal day' (epoch) from the time and assign it as a non-dimension coordinate: .. jupyter-execute:: decimal_day = (air1d.time - air1d.time[0]) / pd.Timedelta("1d") air1d_multi = air1d.assign_coords(decimal_day=("time", decimal_day.data)) air1d_multi To use ``'decimal_day'`` as x coordinate it must be explicitly specified: .. jupyter-execute:: air1d_multi.plot(x="decimal_day"); Creating a new MultiIndex named ``'date'`` from ``'time'`` and ``'decimal_day'``, it is also possible to use a MultiIndex level as x-axis: .. jupyter-execute:: air1d_multi = air1d_multi.set_index(date=("time", "decimal_day")) air1d_multi.plot(x="decimal_day"); Finally, if a dataset does not have any coordinates it enumerates all data points: .. jupyter-execute:: air1d_multi = air1d_multi.drop_vars(["date", "time", "decimal_day"]) air1d_multi.plot(); The same applies to 2D plots below. ==================================================== Multiple lines showing variation along a dimension ==================================================== It is possible to make line plots of two-dimensional data by calling :py:func:`xarray.plot.line` with appropriate arguments. Consider the 3D variable ``air`` defined above. We can use line plots to check the variation of air temperature at three different latitudes along a longitude line: .. jupyter-execute:: air.isel(lon=10, lat=[19, 21, 22]).plot.line(x="time"); It is required to explicitly specify either 1. ``x``: the dimension to be used for the x-axis, or 2. ``hue``: the dimension you want to represent by multiple lines. Thus, we could have made the previous plot by specifying ``hue='lat'`` instead of ``x='time'``. If required, the automatic legend can be turned off using ``add_legend=False``. Alternatively, ``hue`` can be passed directly to :py:func:`xarray.plot.line` as ``air.isel(lon=10, lat=[19,21,22]).plot.line(hue='lat')``. ======================== Dimension along y-axis ======================== It is also possible to make line plots such that the data are on the x-axis and a dimension is on the y-axis. This can be done by specifying the appropriate ``y`` keyword argument. .. jupyter-execute:: air.isel(time=10, lon=[10, 11]).plot(y="lat", hue="lon"); ============ Step plots ============ As an alternative, also a step plot similar to matplotlib's ``plt.step`` can be made using 1D data. .. jupyter-execute:: air1d[:20].plot.step(where="mid"); The argument ``where`` defines where the steps should be placed, options are ``'pre'`` (default), ``'post'``, and ``'mid'``. This is particularly handy when plotting data grouped with :py:meth:`Dataset.groupby_bins`. .. jupyter-execute:: air_grp = air.mean(["time", "lon"]).groupby_bins("lat", [0, 23.5, 66.5, 90]) air_mean = air_grp.mean() air_std = air_grp.std() air_mean.plot.step() (air_mean + air_std).plot.step(ls=":") (air_mean - air_std).plot.step(ls=":") plt.ylim(-20, 30) plt.title("Zonal mean temperature"); In this case, the actual boundaries of the bins are used and the ``where`` argument is ignored. Other axes kwargs ~~~~~~~~~~~~~~~~~ The keyword arguments ``xincrease`` and ``yincrease`` let you control the axes direction. .. jupyter-execute:: air.isel(time=10, lon=[10, 11]).plot.line( y="lat", hue="lon", xincrease=False, yincrease=False ); In addition, one can use ``xscale, yscale`` to set axes scaling; ``xticks, yticks`` to set axes ticks and ``xlim, ylim`` to set axes limits. These accept the same values as the matplotlib methods ``ax.set_(x,y)scale()``, ``ax.set_(x,y)ticks()``, ``ax.set_(x,y)lim()``, respectively. Two Dimensions ~~~~~~~~~~~~~~ ================ Simple Example ================ The default method :py:meth:`DataArray.plot` calls :py:func:`xarray.plot.pcolormesh` by default when the data is two-dimensional. .. jupyter-execute:: air2d = air.isel(time=500) air2d.plot(); All 2d plots in xarray allow the use of the keyword arguments ``yincrease`` and ``xincrease``. .. jupyter-execute:: air2d.plot(yincrease=False); .. note:: We use :py:func:`xarray.plot.pcolormesh` as the default two-dimensional plot method because it is more flexible than :py:func:`xarray.plot.imshow`. However, for large arrays, ``imshow`` can be much faster than ``pcolormesh``. If speed is important to you and you are plotting a regular mesh, consider using ``imshow``. ================ Missing Values ================ Xarray plots data with :ref:`missing_values`. .. jupyter-execute:: bad_air2d = air2d.copy() bad_air2d[dict(lat=slice(0, 10), lon=slice(0, 25))] = np.nan bad_air2d.plot(); ======================== Nonuniform Coordinates ======================== It's not necessary for the coordinates to be evenly spaced. Both :py:func:`xarray.plot.pcolormesh` (default) and :py:func:`xarray.plot.contourf` can produce plots with nonuniform coordinates. .. jupyter-execute:: b = air2d.copy() # Apply a nonlinear transformation to one of the coords b.coords["lat"] = np.log(b.coords["lat"]) b.plot(); ==================== Other types of plot ==================== There are several other options for plotting 2D data. Contour plot using :py:meth:`DataArray.plot.contour()` .. jupyter-execute:: air2d.plot.contour(); Filled contour plot using :py:meth:`DataArray.plot.contourf()` .. jupyter-execute:: air2d.plot.contourf(); Surface plot using :py:meth:`DataArray.plot.surface()` .. jupyter-execute:: # transpose just to make the example look a bit nicer air2d.T.plot.surface(); ==================== Calling Matplotlib ==================== Since this is a thin wrapper around matplotlib, all the functionality of matplotlib is available. .. jupyter-execute:: air2d.plot(cmap=plt.cm.Blues) plt.title("These colors prove North America\nhas fallen in the ocean") plt.ylabel("latitude") plt.xlabel("longitude"); .. note:: Xarray methods update label information and generally play around with the axes. So any kind of updates to the plot should be done *after* the call to the xarray's plot. In the example below, ``plt.xlabel`` effectively does nothing, since ``d_ylog.plot()`` updates the xlabel. .. jupyter-execute:: plt.xlabel("Never gonna see this.") air2d.plot(); =========== Colormaps =========== Xarray borrows logic from Seaborn to infer what kind of color map to use. For example, consider the original data in Kelvins rather than Celsius: .. jupyter-execute:: airtemps.air.isel(time=0).plot(); The Celsius data contain 0, so a diverging color map was used. The Kelvins do not have 0, so the default color map was used. .. _robust-plotting: ======== Robust ======== Outliers often have an extreme effect on the output of the plot. Here we add two bad data points. This affects the color scale, washing out the plot. .. jupyter-execute:: air_outliers = airtemps.air.isel(time=0).copy() air_outliers[0, 0] = 100 air_outliers[-1, -1] = 400 air_outliers.plot(); This plot shows that we have outliers. The easy way to visualize the data without the outliers is to pass the parameter ``robust=True``. This will use the 2nd and 98th percentiles of the data to compute the color limits. .. jupyter-execute:: air_outliers.plot(robust=True); Observe that the ranges of the color bar have changed. The arrows on the color bar indicate that the colors include data points outside the bounds. ==================== Discrete Colormaps ==================== It is often useful, when visualizing 2d data, to use a discrete colormap, rather than the default continuous colormaps that matplotlib uses. The ``levels`` keyword argument can be used to generate plots with discrete colormaps. For example, to make a plot with 8 discrete color intervals: .. jupyter-execute:: air2d.plot(levels=8); It is also possible to use a list of levels to specify the boundaries of the discrete colormap: .. jupyter-execute:: air2d.plot(levels=[0, 12, 18, 30]); You can also specify a list of discrete colors through the ``colors`` argument: .. jupyter-execute:: flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"] air2d.plot(levels=[0, 12, 18, 30], colors=flatui); Finally, if you have `Seaborn `_ installed, you can also specify a seaborn color palette to the ``cmap`` argument. Note that ``levels`` *must* be specified with seaborn color palettes if using ``imshow`` or ``pcolormesh`` (but not with ``contour`` or ``contourf``, since levels are chosen automatically). .. jupyter-execute:: air2d.plot(levels=10, cmap="husl"); .. _plotting.faceting: Faceting ~~~~~~~~ Faceting here refers to splitting an array along one or two dimensions and plotting each group. Xarray's basic plotting is useful for plotting two dimensional arrays. What about three or four dimensional arrays? That's where facets become helpful. The general approach to plotting here is called β€œsmall multiples”, where the same kind of plot is repeated multiple times, and the specific use of small multiples to display the same relationship conditioned on one or more other variables is often called a β€œtrellis plot”. Consider the temperature data set. There are 4 observations per day for two years which makes for 2920 values along the time dimension. One way to visualize this data is to make a separate plot for each time period. The faceted dimension should not have too many values; faceting on the time dimension will produce 2920 plots. That's too much to be helpful. To handle this situation try performing an operation that reduces the size of the data in some way. For example, we could compute the average air temperature for each month and reduce the size of this dimension from 2920 -> 12. A simpler way is to just take a slice on that dimension. So let's use a slice to pick 6 times throughout the first year. .. jupyter-execute:: t = air.isel(time=slice(0, 365 * 4, 250)) t.coords ================ Simple Example ================ The easiest way to create faceted plots is to pass in ``row`` or ``col`` arguments to the xarray plotting methods/functions. This returns a :py:class:`xarray.plot.FacetGrid` object. .. jupyter-execute:: g_simple = t.plot(x="lon", y="lat", col="time", col_wrap=3); Faceting also works for line plots. .. jupyter-execute:: g_simple_line = t.isel(lat=slice(0, None, 4)).plot( x="lon", hue="lat", col="time", col_wrap=3 ); =============== 4 dimensional =============== For 4 dimensional arrays we can use the rows and columns of the grids. Here we create a 4 dimensional array by taking the original data and adding a fixed amount. Now we can see how the temperature maps would compare if one were much hotter. .. jupyter-execute:: t2 = t.isel(time=slice(0, 2)) t4d = xr.concat([t2, t2 + 40], pd.Index(["normal", "hot"], name="fourth_dim")) # This is a 4d array t4d.coords t4d.plot(x="lon", y="lat", col="time", row="fourth_dim"); ================ Other features ================ Faceted plotting supports other arguments common to xarray 2d plots. .. jupyter-execute:: hasoutliers = t.isel(time=slice(0, 5)).copy() hasoutliers[0, 0, 0] = -100 hasoutliers[-1, -1, -1] = 400 g = hasoutliers.plot.pcolormesh( x="lon", y="lat", col="time", col_wrap=3, robust=True, cmap="viridis", cbar_kwargs={"label": "this has outliers"}, ) =================== FacetGrid Objects =================== The object returned, ``g`` in the above examples, is a :py:class:`~xarray.plot.FacetGrid` object that links a :py:class:`DataArray` to a matplotlib figure with a particular structure. This object can be used to control the behavior of the multiple plots. It borrows an API and code from `Seaborn's FacetGrid `_. The structure is contained within the ``axs`` and ``name_dicts`` attributes, both 2d NumPy object arrays. .. jupyter-execute:: g.axs .. jupyter-execute:: g.name_dicts It's possible to select the :py:class:`xarray.DataArray` or :py:class:`xarray.Dataset` corresponding to the FacetGrid through the ``name_dicts``. .. jupyter-execute:: g.data.loc[g.name_dicts[0, 0]] Here is an example of using the lower level API and then modifying the axes after they have been plotted. .. jupyter-execute:: g = t.plot.imshow(x="lon", y="lat", col="time", col_wrap=3, robust=True) for i, ax in enumerate(g.axs.flat): ax.set_title("Air Temperature %d" % i) bottomright = g.axs[-1, -1] bottomright.annotate("bottom right", (240, 40)); :py:class:`~xarray.plot.FacetGrid` objects have methods that let you customize the automatically generated axis labels, axis ticks and plot titles. See :py:meth:`~xarray.plot.FacetGrid.set_titles`, :py:meth:`~xarray.plot.FacetGrid.set_xlabels`, :py:meth:`~xarray.plot.FacetGrid.set_ylabels` and :py:meth:`~xarray.plot.FacetGrid.set_ticks` for more information. Plotting functions can be applied to each subset of the data by calling :py:meth:`~xarray.plot.FacetGrid.map_dataarray` or to each subplot by calling :py:meth:`~xarray.plot.FacetGrid.map`. TODO: add an example of using the ``map`` method to plot dataset variables (e.g., with ``plt.quiver``). .. _plot-dataset: Datasets -------- Xarray has limited support for plotting Dataset variables against each other. Consider this dataset .. jupyter-execute:: ds = xr.tutorial.scatter_example_dataset(seed=42) ds Scatter ~~~~~~~ Let's plot the ``A`` DataArray as a function of the ``y`` coord .. jupyter-execute:: with xr.set_options(display_expand_data=False): display(ds.A) .. jupyter-execute:: ds.A.plot.scatter(x="y"); Same plot can be displayed using the dataset: .. jupyter-execute:: ds.plot.scatter(x="y", y="A"); Now suppose we want to scatter the ``A`` DataArray against the ``B`` DataArray .. jupyter-execute:: ds.plot.scatter(x="A", y="B"); The ``hue`` kwarg lets you vary the color by variable value .. jupyter-execute:: ds.plot.scatter(x="A", y="B", hue="w"); You can force a legend instead of a colorbar by setting ``add_legend=True, add_colorbar=False``. .. jupyter-execute:: ds.plot.scatter(x="A", y="B", hue="w", add_legend=True, add_colorbar=False); .. jupyter-execute:: ds.plot.scatter(x="A", y="B", hue="w", add_legend=False, add_colorbar=True); The ``markersize`` kwarg lets you vary the point's size by variable value. You can additionally pass ``size_norm`` to control how the variable's values are mapped to point sizes. .. jupyter-execute:: ds.plot.scatter(x="A", y="B", hue="y", markersize="z"); The ``z`` kwarg lets you plot the data along the z-axis as well. .. jupyter-execute:: ds.plot.scatter(x="A", y="B", z="z", hue="y", markersize="x"); Faceting is also possible .. jupyter-execute:: ds.plot.scatter(x="A", y="B", hue="y", markersize="x", row="x", col="w"); And adding the z-axis .. jupyter-execute:: ds.plot.scatter(x="A", y="B", z="z", hue="y", markersize="x", row="x", col="w"); For more advanced scatter plots, we recommend converting the relevant data variables to a pandas DataFrame and using the extensive plotting capabilities of ``seaborn``. Quiver ~~~~~~ Visualizing vector fields is supported with quiver plots: .. jupyter-execute:: ds.isel(w=1, z=1).plot.quiver(x="x", y="y", u="A", v="B"); where ``u`` and ``v`` denote the x and y direction components of the arrow vectors. Again, faceting is also possible: .. jupyter-execute:: ds.plot.quiver(x="x", y="y", u="A", v="B", col="w", row="z", scale=4); ``scale`` is required for faceted quiver plots. The scale determines the number of data units per arrow length unit, i.e. a smaller scale parameter makes the arrow longer. Streamplot ~~~~~~~~~~ Visualizing vector fields is also supported with streamline plots: .. jupyter-execute:: ds.isel(w=1, z=1).plot.streamplot(x="x", y="y", u="A", v="B"); where ``u`` and ``v`` denote the x and y direction components of the vectors tangent to the streamlines. Again, faceting is also possible: .. jupyter-execute:: ds.plot.streamplot(x="x", y="y", u="A", v="B", col="w", row="z"); .. _plot-maps: Maps ---- To follow this section you'll need to have Cartopy installed and working. This script will plot the air temperature on a map. .. jupyter-execute:: :stderr: air = xr.tutorial.open_dataset("air_temperature").air p = air.isel(time=0).plot( subplot_kws=dict(projection=ccrs.Orthographic(-80, 35), facecolor="gray"), transform=ccrs.PlateCarree(), ) p.axes.set_global() p.axes.coastlines(); When faceting on maps, the projection can be transferred to the ``plot`` function using the ``subplot_kws`` keyword. The axes for the subplots created by faceting are accessible in the object returned by ``plot``: .. jupyter-execute:: p = air.isel(time=[0, 4]).plot( transform=ccrs.PlateCarree(), col="time", subplot_kws={"projection": ccrs.Orthographic(-80, 35)}, ) for ax in p.axs.flat: ax.coastlines() ax.gridlines() Details ------- Ways to Use ~~~~~~~~~~~ There are three ways to use the xarray plotting functionality: 1. Use ``plot`` as a convenience method for a DataArray. 2. Access a specific plotting method from the ``plot`` attribute of a DataArray. 3. Directly from the xarray plot submodule. These are provided for user convenience; they all call the same code. .. jupyter-execute:: da = xr.DataArray(range(5)) fig, axs = plt.subplots(ncols=2, nrows=2) da.plot(ax=axs[0, 0]) da.plot.line(ax=axs[0, 1]) xr.plot.plot(da, ax=axs[1, 0]) xr.plot.line(da, ax=axs[1, 1]); Here the output is the same. Since the data is 1 dimensional the line plot was used. The convenience method :py:meth:`xarray.DataArray.plot` dispatches to an appropriate plotting function based on the dimensions of the ``DataArray`` and whether the coordinates are sorted and uniformly spaced. This table describes what gets plotted: =============== =========================== Dimensions Plotting function --------------- --------------------------- 1 :py:func:`xarray.plot.line` 2 :py:func:`xarray.plot.pcolormesh` Anything else :py:func:`xarray.plot.hist` =============== =========================== Coordinates ~~~~~~~~~~~ If you'd like to find out what's really going on in the coordinate system, read on. .. jupyter-execute:: a0 = xr.DataArray(np.zeros((4, 3, 2)), dims=("y", "x", "z"), name="temperature") a0[0, 0, 0] = 1 a = a0.isel(z=0) a The plot will produce an image corresponding to the values of the array. Hence the top left pixel will be a different color than the others. Before reading on, you may want to look at the coordinates and think carefully about what the limits, labels, and orientation for each of the axes should be. .. jupyter-execute:: a.plot(); It may seem strange that the values on the y axis are decreasing with -0.5 on the top. This is because the pixels are centered over their coordinates, and the axis labels and ranges correspond to the values of the coordinates. Multidimensional coordinates ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See also: :ref:`/examples/multidimensional-coords.ipynb`. You can plot irregular grids defined by multidimensional coordinates with xarray, but you'll have to tell the plot function to use these coordinates instead of the default ones: .. jupyter-execute:: lon, lat = np.meshgrid(np.linspace(-20, 20, 5), np.linspace(0, 30, 4)) lon += lat / 10 lat += lon / 10 da = xr.DataArray( np.arange(20).reshape(4, 5), dims=["y", "x"], coords={"lat": (("y", "x"), lat), "lon": (("y", "x"), lon)}, ) da.plot.pcolormesh(x="lon", y="lat"); Note that in this case, xarray still follows the pixel centered convention. This might be undesirable in some cases, for example when your data is defined on a polar projection (:issue:`781`). This is why the default is to not follow this convention when plotting on a map: .. jupyter-execute:: :stderr: ax = plt.subplot(projection=ccrs.PlateCarree()) da.plot.pcolormesh(x="lon", y="lat", ax=ax) ax.scatter(lon, lat, transform=ccrs.PlateCarree()) ax.coastlines() ax.gridlines(draw_labels=True); You can however decide to infer the cell boundaries and use the ``infer_intervals`` keyword: .. jupyter-execute:: ax = plt.subplot(projection=ccrs.PlateCarree()) da.plot.pcolormesh(x="lon", y="lat", ax=ax, infer_intervals=True) ax.scatter(lon, lat, transform=ccrs.PlateCarree()) ax.coastlines() ax.gridlines(draw_labels=True); .. note:: The data model of xarray does not support datasets with `cell boundaries`_ yet. If you want to use these coordinates, you'll have to make the plots outside the xarray framework. .. _cell boundaries: https://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#cell-boundaries One can also make line plots with multidimensional coordinates. In this case, ``hue`` must be a dimension name, not a coordinate name. .. jupyter-execute:: f, ax = plt.subplots(2, 1) da.plot.line(x="lon", hue="y", ax=ax[0]) da.plot.line(x="lon", hue="x", ax=ax[1]); xarray-2025.09.0/doc/user-guide/reshaping.rst000066400000000000000000000264151505620616400207040ustar00rootroot00000000000000.. _reshape: ############################### Reshaping and reorganizing data ############################### Reshaping and reorganizing data refers to the process of changing the structure or organization of data by modifying dimensions, array shapes, order of values, or indexes. Xarray provides several methods to accomplish these tasks. These methods are particularly useful for reshaping xarray objects for use in machine learning packages, such as scikit-learn, that usually require two-dimensional numpy arrays as inputs. Reshaping can also be required before passing data to external visualization tools, for example geospatial data might expect input organized into a particular format corresponding to stacks of satellite images. Importing the library --------------------- .. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) # Use defaults so we don't get gridlines in generated docs import matplotlib as mpl mpl.rcdefaults() Reordering dimensions --------------------- To reorder dimensions on a :py:class:`~xarray.DataArray` or across all variables on a :py:class:`~xarray.Dataset`, use :py:meth:`~xarray.DataArray.transpose`. An ellipsis (`...`) can be used to represent all other dimensions: .. jupyter-execute:: ds = xr.Dataset({"foo": (("x", "y", "z"), [[[42]]]), "bar": (("y", "z"), [[24]])}) ds.transpose("y", "z", "x") # equivalent to ds.transpose(..., "x") .. jupyter-execute:: ds.transpose() # reverses all dimensions Expand and squeeze dimensions ----------------------------- To expand a :py:class:`~xarray.DataArray` or all variables on a :py:class:`~xarray.Dataset` along a new dimension, use :py:meth:`~xarray.DataArray.expand_dims` .. jupyter-execute:: expanded = ds.expand_dims("w") expanded This method attaches a new dimension with size 1 to all data variables. To remove such a size-1 dimension from the :py:class:`~xarray.DataArray` or :py:class:`~xarray.Dataset`, use :py:meth:`~xarray.DataArray.squeeze` .. jupyter-execute:: expanded.squeeze("w") Converting between datasets and arrays -------------------------------------- To convert from a Dataset to a DataArray, use :py:meth:`~xarray.Dataset.to_dataarray`: .. jupyter-execute:: arr = ds.to_dataarray() arr This method broadcasts all data variables in the dataset against each other, then concatenates them along a new dimension into a new array while preserving coordinates. To convert back from a DataArray to a Dataset, use :py:meth:`~xarray.DataArray.to_dataset`: .. jupyter-execute:: arr.to_dataset(dim="variable") The broadcasting behavior of ``to_dataarray`` means that the resulting array includes the union of data variable dimensions: .. jupyter-execute:: ds2 = xr.Dataset({"a": 0, "b": ("x", [3, 4, 5])}) # the input dataset has 4 elements ds2 .. jupyter-execute:: # the resulting array has 6 elements ds2.to_dataarray() Otherwise, the result could not be represented as an orthogonal array. If you use ``to_dataset`` without supplying the ``dim`` argument, the DataArray will be converted into a Dataset of one variable: .. jupyter-execute:: arr.to_dataset(name="combined") .. _reshape.stack: Stack and unstack ----------------- As part of xarray's nascent support for :py:class:`pandas.MultiIndex`, we have implemented :py:meth:`~xarray.DataArray.stack` and :py:meth:`~xarray.DataArray.unstack` method, for combining or splitting dimensions: .. jupyter-execute:: array = xr.DataArray( np.random.randn(2, 3), coords=[("x", ["a", "b"]), ("y", [0, 1, 2])] ) stacked = array.stack(z=("x", "y")) stacked .. jupyter-execute:: stacked.unstack("z") As elsewhere in xarray, an ellipsis (`...`) can be used to represent all unlisted dimensions: .. jupyter-execute:: stacked = array.stack(z=[..., "x"]) stacked These methods are modeled on the :py:class:`pandas.DataFrame` methods of the same name, although in xarray they always create new dimensions rather than adding to the existing index or columns. Like :py:meth:`DataFrame.unstack`, xarray's ``unstack`` always succeeds, even if the multi-index being unstacked does not contain all possible levels. Missing levels are filled in with ``NaN`` in the resulting object: .. jupyter-execute:: stacked2 = stacked[::2] stacked2 .. jupyter-execute:: stacked2.unstack("z") However, xarray's ``stack`` has an important difference from pandas: unlike pandas, it does not automatically drop missing values. Compare: .. jupyter-execute:: array = xr.DataArray([[np.nan, 1], [2, 3]], dims=["x", "y"]) array.stack(z=("x", "y")) .. jupyter-execute:: array.to_pandas().stack() We departed from pandas's behavior here because predictable shapes for new array dimensions is necessary for :ref:`dask`. .. _reshape.stacking_different: Stacking different variables together ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These stacking and unstacking operations are particularly useful for reshaping xarray objects for use in machine learning packages, such as `scikit-learn `_, that usually require two-dimensional numpy arrays as inputs. For datasets with only one variable, we only need ``stack`` and ``unstack``, but combining multiple variables in a :py:class:`xarray.Dataset` is more complicated. If the variables in the dataset have matching numbers of dimensions, we can call :py:meth:`~xarray.Dataset.to_dataarray` and then stack along the the new coordinate. But :py:meth:`~xarray.Dataset.to_dataarray` will broadcast the dataarrays together, which will effectively tile the lower dimensional variable along the missing dimensions. The method :py:meth:`xarray.Dataset.to_stacked_array` allows combining variables of differing dimensions without this wasteful copying while :py:meth:`xarray.DataArray.to_unstacked_dataset` reverses this operation. Just as with :py:meth:`xarray.Dataset.stack` the stacked coordinate is represented by a :py:class:`pandas.MultiIndex` object. These methods are used like this: .. jupyter-execute:: data = xr.Dataset( data_vars={"a": (("x", "y"), [[0, 1, 2], [3, 4, 5]]), "b": ("x", [6, 7])}, coords={"y": ["u", "v", "w"]}, ) data .. jupyter-execute:: stacked = data.to_stacked_array("z", sample_dims=["x"]) stacked .. jupyter-execute:: unstacked = stacked.to_unstacked_dataset("z") unstacked In this example, ``stacked`` is a two dimensional array that we can easily pass to a scikit-learn or another generic numerical method. .. note:: Unlike with ``stack``, in ``to_stacked_array``, the user specifies the dimensions they **do not** want stacked. For a machine learning task, these unstacked dimensions can be interpreted as the dimensions over which samples are drawn, whereas the stacked coordinates are the features. Naturally, all variables should possess these sampling dimensions. .. _reshape.set_index: Set and reset index ------------------- Complementary to stack / unstack, xarray's ``.set_index``, ``.reset_index`` and ``.reorder_levels`` allow easy manipulation of ``DataArray`` or ``Dataset`` multi-indexes without modifying the data and its dimensions. You can create a multi-index from several 1-dimensional variables and/or coordinates using :py:meth:`~xarray.DataArray.set_index`: .. jupyter-execute:: da = xr.DataArray( np.random.rand(4), coords={ "band": ("x", ["a", "a", "b", "b"]), "wavenumber": ("x", np.linspace(200, 400, 4)), }, dims="x", ) da .. jupyter-execute:: mda = da.set_index(x=["band", "wavenumber"]) mda These coordinates can now be used for indexing, e.g., .. jupyter-execute:: mda.sel(band="a") Conversely, you can use :py:meth:`~xarray.DataArray.reset_index` to extract multi-index levels as coordinates (this is mainly useful for serialization): .. jupyter-execute:: mda.reset_index("x") :py:meth:`~xarray.DataArray.reorder_levels` allows changing the order of multi-index levels: .. jupyter-execute:: mda.reorder_levels(x=["wavenumber", "band"]) As of xarray v0.9 coordinate labels for each dimension are optional. You can also use ``.set_index`` / ``.reset_index`` to add / remove labels for one or several dimensions: .. jupyter-execute:: array = xr.DataArray([1, 2, 3], dims="x") array .. jupyter-execute:: array["c"] = ("x", ["a", "b", "c"]) array.set_index(x="c") .. jupyter-execute:: array = array.set_index(x="c") array = array.reset_index("x", drop=True) .. _reshape.shift_and_roll: Shift and roll -------------- To adjust coordinate labels, you can use the :py:meth:`~xarray.Dataset.shift` and :py:meth:`~xarray.Dataset.roll` methods: .. jupyter-execute:: array = xr.DataArray([1, 2, 3, 4], dims="x") array.shift(x=2) .. jupyter-execute:: array.roll(x=2, roll_coords=True) .. _reshape.sort: Sort ---- One may sort a DataArray/Dataset via :py:meth:`~xarray.DataArray.sortby` and :py:meth:`~xarray.Dataset.sortby`. The input can be an individual or list of 1D ``DataArray`` objects: .. jupyter-execute:: ds = xr.Dataset( { "A": (("x", "y"), [[1, 2], [3, 4]]), "B": (("x", "y"), [[5, 6], [7, 8]]), }, coords={"x": ["b", "a"], "y": [1, 0]}, ) dax = xr.DataArray([100, 99], [("x", [0, 1])]) day = xr.DataArray([90, 80], [("y", [0, 1])]) ds.sortby([day, dax]) As a shortcut, you can refer to existing coordinates by name: .. jupyter-execute:: ds.sortby("x") .. jupyter-execute:: ds.sortby(["y", "x"]) .. jupyter-execute:: ds.sortby(["y", "x"], ascending=False) .. _reshape.coarsen: Reshaping via coarsen --------------------- Whilst :py:class:`~xarray.DataArray.coarsen` is normally used for reducing your data's resolution by applying a reduction function (see the :ref:`page on computation`), it can also be used to reorganise your data without applying a computation via :py:meth:`~xarray.computation.rolling.DataArrayCoarsen.construct`. Taking our example tutorial air temperature dataset over the Northern US .. jupyter-execute:: air = xr.tutorial.open_dataset("air_temperature")["air"] air.isel(time=0).plot(x="lon", y="lat"); we can split this up into sub-regions of size ``(9, 18)`` points using :py:meth:`~xarray.computation.rolling.DataArrayCoarsen.construct`: .. jupyter-execute:: regions = air.coarsen(lat=9, lon=18, boundary="pad").construct( lon=("x_coarse", "x_fine"), lat=("y_coarse", "y_fine") ) with xr.set_options(display_expand_data=False): regions 9 new regions have been created, each of size 9 by 18 points. The ``boundary="pad"`` kwarg ensured that all regions are the same size even though the data does not evenly divide into these sizes. By plotting these 9 regions together via :ref:`faceting` we can see how they relate to the original data. .. jupyter-execute:: regions.isel(time=0).plot( x="x_fine", y="y_fine", col="x_coarse", row="y_coarse", yincrease=False ); We are now free to easily apply any custom computation to each coarsened region of our new dataarray. This would involve specifying that applied functions should act over the ``"x_fine"`` and ``"y_fine"`` dimensions, but broadcast over the ``"x_coarse"`` and ``"y_coarse"`` dimensions. xarray-2025.09.0/doc/user-guide/terminology.rst000066400000000000000000000353171505620616400212750ustar00rootroot00000000000000.. currentmodule:: xarray .. _terminology: Terminology =========== *Xarray terminology differs slightly from CF, mathematical conventions, and pandas; so we've put together a glossary of its terms. Here,* ``arr`` *refers to an xarray* :py:class:`DataArray` *in the examples. For more complete examples, please consult the relevant documentation.* .. jupyter-execute:: :hide-code: import numpy as np import xarray as xr .. glossary:: DataArray A multi-dimensional array with labeled or named dimensions. ``DataArray`` objects add metadata such as dimension names, coordinates, and attributes (defined below) to underlying "unlabeled" data structures such as numpy and Dask arrays. If its optional ``name`` property is set, it is a *named DataArray*. Dataset A dict-like collection of ``DataArray`` objects with aligned dimensions. Thus, most operations that can be performed on the dimensions of a single ``DataArray`` can be performed on a dataset. Datasets have data variables (see **Variable** below), dimensions, coordinates, and attributes. Variable A `NetCDF-like variable `_ consisting of dimensions, data, and attributes which describe a single array. The main functional difference between variables and numpy arrays is that numerical operations on variables implement array broadcasting by dimension name. Each ``DataArray`` has an underlying variable that can be accessed via ``arr.variable``. However, a variable is not fully described outside of either a ``Dataset`` or a ``DataArray``. .. note:: The :py:class:`Variable` class is low-level interface and can typically be ignored. However, the word "variable" appears often enough in the code and documentation that is useful to understand. Dimension In mathematics, the *dimension* of data is loosely the number of degrees of freedom for it. A *dimension axis* is a set of all points in which all but one of these degrees of freedom is fixed. We can think of each dimension axis as having a name, for example the "x dimension". In xarray, a ``DataArray`` object's *dimensions* are its named dimension axes ``da.dims``, and the name of the ``i``-th dimension is ``da.dims[i]``. If an array is created without specifying dimension names, the default dimension names will be ``dim_0``, ``dim_1``, and so forth. Coordinate An array that labels a dimension or set of dimensions of another ``DataArray``. In the usual one-dimensional case, the coordinate array's values can loosely be thought of as tick labels along a dimension. We distinguish :term:`Dimension coordinate` vs. :term:`Non-dimension coordinate` and :term:`Indexed coordinate` vs. :term:`Non-indexed coordinate`. A coordinate named ``x`` can be retrieved from ``arr.coords[x]``. A ``DataArray`` can have more coordinates than dimensions because a single dimension can be labeled by multiple coordinate arrays. However, only one coordinate array can be assigned as a particular dimension's dimension coordinate array. Dimension coordinate A one-dimensional coordinate array assigned to ``arr`` with both a name and dimension name in ``arr.dims``. Usually (but not always), a dimension coordinate is also an :term:`Indexed coordinate` so that it can be used for label-based indexing and alignment, like the index found on a :py:class:`pandas.DataFrame` or :py:class:`pandas.Series`. Non-dimension coordinate A coordinate array assigned to ``arr`` with a name in ``arr.coords`` but *not* in ``arr.dims``. These coordinates arrays can be one-dimensional or multidimensional, and they are useful for auxiliary labeling. As an example, multidimensional coordinates are often used in geoscience datasets when :doc:`the data's physical coordinates (such as latitude and longitude) differ from their logical coordinates <../examples/multidimensional-coords>`. Printing ``arr.coords`` will print all of ``arr``'s coordinate names, with the corresponding dimension(s) in parentheses. For example, ``coord_name (dim_name) 1 2 3 ...``. Indexed coordinate A coordinate which has an associated :term:`Index`. Generally this means that the coordinate labels can be used for indexing (selection) and/or alignment. An indexed coordinate may have one or more arbitrary dimensions although in most cases it is also a :term:`Dimension coordinate`. It may or may not be grouped with other indexed coordinates depending on whether they share the same index. Indexed coordinates are marked by an asterisk ``*`` when printing a ``DataArray`` or ``Dataset``. Non-indexed coordinate A coordinate which has no associated :term:`Index`. It may still represent fixed labels along one or more dimensions but it cannot be used for label-based indexing and alignment. Index An *index* is a data structure optimized for efficient data selection and alignment within a discrete or continuous space that is defined by coordinate labels (unless it is a functional index). By default, Xarray creates a :py:class:`~xarray.indexes.PandasIndex` object (i.e., a :py:class:`pandas.Index` wrapper) for each :term:`Dimension coordinate`. For more advanced use cases (e.g., staggered or irregular grids, geospatial indexes), Xarray also accepts any instance of a specialized :py:class:`~xarray.indexes.Index` subclass that is associated to one or more arbitrary coordinates. The index associated with the coordinate ``x`` can be retrieved by ``arr.xindexes[x]`` (or ``arr.indexes["x"]`` if the index is convertible to a :py:class:`pandas.Index` object). If two coordinates ``x`` and ``y`` share the same index, ``arr.xindexes[x]`` and ``arr.xindexes[y]`` both return the same :py:class:`~xarray.indexes.Index` object. name The names of dimensions, coordinates, DataArray objects and data variables can be anything as long as they are :term:`hashable`. However, it is preferred to use :py:class:`str` typed names. scalar By definition, a scalar is not an :term:`array` and when converted to one, it has 0 dimensions. That means that, e.g., :py:class:`int`, :py:class:`float`, and :py:class:`str` objects are "scalar" while :py:class:`list` or :py:class:`tuple` are not. duck array `Duck arrays`__ are array implementations that behave like numpy arrays. They have to define the ``shape``, ``dtype`` and ``ndim`` properties. For integration with ``xarray``, the ``__array__``, ``__array_ufunc__`` and ``__array_function__`` protocols are also required. __ https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html Aligning Aligning refers to the process of ensuring that two or more DataArrays or Datasets have the same dimensions and coordinates, so that they can be combined or compared properly. .. jupyter-execute:: x = xr.DataArray( [[25, 35], [10, 24]], dims=("lat", "lon"), coords={"lat": [35.0, 40.0], "lon": [100.0, 120.0]}, ) y = xr.DataArray( [[20, 5], [7, 13]], dims=("lat", "lon"), coords={"lat": [35.0, 42.0], "lon": [100.0, 120.0]}, ) a, b = xr.align(x, y) # By default, an "inner join" is performed # so "a" is a copy of "x" where coordinates match "y" a Broadcasting A technique that allows operations to be performed on arrays with different shapes and dimensions. When performing operations on arrays with different shapes and dimensions, xarray will automatically attempt to broadcast the arrays to a common shape before the operation is applied. .. jupyter-execute:: # 'a' has shape (3,) and 'b' has shape (4,) a = xr.DataArray(np.array([1, 2, 3]), dims=["x"]) b = xr.DataArray(np.array([4, 5, 6, 7]), dims=["y"]) # 2D array with shape (3, 4) a + b Merging Merging is used to combine two or more Datasets or DataArrays that have different variables or coordinates along the same dimensions. When merging, xarray aligns the variables and coordinates of the different datasets along the specified dimensions and creates a new ``Dataset`` containing all the variables and coordinates. .. jupyter-execute:: # create two 1D arrays with names arr1 = xr.DataArray( [1, 2, 3], dims=["x"], coords={"x": [10, 20, 30]}, name="arr1" ) arr2 = xr.DataArray( [4, 5, 6], dims=["x"], coords={"x": [20, 30, 40]}, name="arr2" ) # merge the two arrays into a new dataset merged_ds = xr.Dataset({"arr1": arr1, "arr2": arr2}) merged_ds Concatenating Concatenating is used to combine two or more Datasets or DataArrays along a dimension. When concatenating, xarray arranges the datasets or dataarrays along a new dimension, and the resulting ``Dataset`` or ``Dataarray`` will have the same variables and coordinates along the other dimensions. .. jupyter-execute:: a = xr.DataArray([[1, 2], [3, 4]], dims=("x", "y")) b = xr.DataArray([[5, 6], [7, 8]], dims=("x", "y")) c = xr.concat([a, b], dim="c") c Combining Combining is the process of arranging two or more DataArrays or Datasets into a single ``DataArray`` or ``Dataset`` using some combination of merging and concatenation operations. .. jupyter-execute:: ds1 = xr.Dataset( {"data": xr.DataArray([[1, 2], [3, 4]], dims=("x", "y"))}, coords={"x": [1, 2], "y": [3, 4]}, ) ds2 = xr.Dataset( {"data": xr.DataArray([[5, 6], [7, 8]], dims=("x", "y"))}, coords={"x": [2, 3], "y": [4, 5]}, ) # combine the datasets combined_ds = xr.combine_by_coords([ds1, ds2], join="outer") combined_ds lazy Lazily-evaluated operations do not load data into memory until necessary. Instead of doing calculations right away, xarray lets you plan what calculations you want to do, like finding the average temperature in a dataset. This planning is called "lazy evaluation." Later, when you're ready to see the final result, you tell xarray, "Okay, go ahead and do those calculations now!" That's when xarray starts working through the steps you planned and gives you the answer you wanted. This lazy approach helps save time and memory because xarray only does the work when you actually need the results. labeled Labeled data has metadata describing the context of the data, not just the raw data values. This contextual information can be labels for array axes (i.e. dimension names) tick labels along axes (stored as Coordinate variables) or unique names for each array. These labels provide context and meaning to the data, making it easier to understand and work with. If you have temperature data for different cities over time. Using xarray, you can label the dimensions: one for cities and another for time. serialization Serialization is the process of converting your data into a format that makes it easy to save and share. When you serialize data in xarray, you're taking all those temperature measurements, along with their labels and other information, and turning them into a format that can be stored in a file or sent over the internet. xarray objects can be serialized into formats which store the labels alongside the data. Some supported serialization formats are files that can then be stored or transferred (e.g. netCDF), whilst others are protocols that allow for data access over a network (e.g. Zarr). indexing :ref:`Indexing` is how you select subsets of your data which you are interested in. - Label-based Indexing: Selecting data by passing a specific label and comparing it to the labels stored in the associated coordinates. You can use labels to specify what you want like "Give me the temperature for New York on July 15th." - Positional Indexing: You can use numbers to refer to positions in the data like "Give me the third temperature value" This is useful when you know the order of your data but don't need to remember the exact labels. - Slicing: You can take a "slice" of your data, like you might want all temperatures from July 1st to July 10th. xarray supports slicing for both positional and label-based indexing. DataTree A tree-like collection of ``Dataset`` objects. A *tree* is made up of one or more *nodes*, each of which can store the same information as a single ``Dataset`` (accessed via ``.dataset``). This data is stored in the same way as in a ``Dataset``, i.e. in the form of data :term:`variables`, :term:`dimensions`, :term:`coordinates`, and attributes. The nodes in a tree are linked to one another, and each node is its own instance of ``DataTree`` object. Each node can have zero or more *children* (stored in a dictionary-like manner under their corresponding *names*), and those child nodes can themselves have children. If a node is a child of another node that other node is said to be its *parent*. Nodes can have a maximum of one parent, and if a node has no parent it is said to be the *root* node of that *tree*. Subtree A section of a *tree*, consisting of a *node* along with all the child nodes below it (and the child nodes below them, i.e. all so-called *descendant* nodes). Excludes the parent node and all nodes above. Group Another word for a subtree, reflecting how the hierarchical structure of a ``DataTree`` allows for grouping related data together. Analogous to a single `netCDF group `_ or `Zarr group `_. xarray-2025.09.0/doc/user-guide/testing.rst000066400000000000000000000264001505620616400203730ustar00rootroot00000000000000.. _testing: Testing your code ================= .. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) .. _testing.hypothesis: Hypothesis testing ------------------ .. note:: Testing with hypothesis is a fairly advanced topic. Before reading this section it is recommended that you take a look at our guide to xarray's :ref:`data structures`, are familiar with conventional unit testing in `pytest `_, and have seen the `hypothesis library documentation `_. `The hypothesis library `_ is a powerful tool for property-based testing. Instead of writing tests for one example at a time, it allows you to write tests parameterized by a source of many dynamically generated examples. For example you might have written a test which you wish to be parameterized by the set of all possible integers via :py:func:`hypothesis.strategies.integers()`. Property-based testing is extremely powerful, because (unlike more conventional example-based testing) it can find bugs that you did not even think to look for! Strategies ~~~~~~~~~~ Each source of examples is called a "strategy", and xarray provides a range of custom strategies which produce xarray data structures containing arbitrary data. You can use these to efficiently test downstream code, quickly ensuring that your code can handle xarray objects of all possible structures and contents. These strategies are accessible in the :py:mod:`xarray.testing.strategies` module, which provides .. currentmodule:: xarray .. autosummary:: testing.strategies.supported_dtypes testing.strategies.names testing.strategies.dimension_names testing.strategies.dimension_sizes testing.strategies.attrs testing.strategies.variables testing.strategies.unique_subset_of These build upon the numpy and array API strategies offered in :py:mod:`hypothesis.extra.numpy` and :py:mod:`hypothesis.extra.array_api`: .. jupyter-execute:: import hypothesis.extra.numpy as npst Generating Examples ~~~~~~~~~~~~~~~~~~~ To see an example of what each of these strategies might produce, you can call one followed by the ``.example()`` method, which is a general hypothesis method valid for all strategies. .. jupyter-execute:: import xarray.testing.strategies as xrst xrst.variables().example() .. jupyter-execute:: xrst.variables().example() .. jupyter-execute:: xrst.variables().example() You can see that calling ``.example()`` multiple times will generate different examples, giving you an idea of the wide range of data that the xarray strategies can generate. In your tests however you should not use ``.example()`` - instead you should parameterize your tests with the :py:func:`hypothesis.given` decorator: .. jupyter-execute:: from hypothesis import given .. jupyter-execute:: @given(xrst.variables()) def test_function_that_acts_on_variables(var): assert func(var) == ... Chaining Strategies ~~~~~~~~~~~~~~~~~~~ Xarray's strategies can accept other strategies as arguments, allowing you to customise the contents of the generated examples. .. jupyter-execute:: # generate a Variable containing an array with a complex number dtype, but all other details still arbitrary from hypothesis.extra.numpy import complex_number_dtypes xrst.variables(dtype=complex_number_dtypes()).example() This also works with custom strategies, or strategies defined in other packages. For example you could imagine creating a ``chunks`` strategy to specify particular chunking patterns for a dask-backed array. Fixing Arguments ~~~~~~~~~~~~~~~~ If you want to fix one aspect of the data structure, whilst allowing variation in the generated examples over all other aspects, then use :py:func:`hypothesis.strategies.just()`. .. jupyter-execute:: import hypothesis.strategies as st # Generates only variable objects with dimensions ["x", "y"] xrst.variables(dims=st.just(["x", "y"])).example() (This is technically another example of chaining strategies - :py:func:`hypothesis.strategies.just()` is simply a special strategy that just contains a single example.) To fix the length of dimensions you can instead pass ``dims`` as a mapping of dimension names to lengths (i.e. following xarray objects' ``.sizes()`` property), e.g. .. jupyter-execute:: # Generates only variables with dimensions ["x", "y"], of lengths 2 & 3 respectively xrst.variables(dims=st.just({"x": 2, "y": 3})).example() You can also use this to specify that you want examples which are missing some part of the data structure, for instance .. jupyter-execute:: # Generates a Variable with no attributes xrst.variables(attrs=st.just({})).example() Through a combination of chaining strategies and fixing arguments, you can specify quite complicated requirements on the objects your chained strategy will generate. .. jupyter-execute:: fixed_x_variable_y_maybe_z = st.fixed_dictionaries( {"x": st.just(2), "y": st.integers(3, 4)}, optional={"z": st.just(2)} ) fixed_x_variable_y_maybe_z.example() .. jupyter-execute:: special_variables = xrst.variables(dims=fixed_x_variable_y_maybe_z) special_variables.example() .. jupyter-execute:: special_variables.example() Here we have used one of hypothesis' built-in strategies :py:func:`hypothesis.strategies.fixed_dictionaries` to create a strategy which generates mappings of dimension names to lengths (i.e. the ``size`` of the xarray object we want). This particular strategy will always generate an ``x`` dimension of length 2, and a ``y`` dimension of length either 3 or 4, and will sometimes also generate a ``z`` dimension of length 2. By feeding this strategy for dictionaries into the ``dims`` argument of xarray's :py:func:`~st.variables` strategy, we can generate arbitrary :py:class:`~xarray.Variable` objects whose dimensions will always match these specifications. Generating Duck-type Arrays ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Xarray objects don't have to wrap numpy arrays, in fact they can wrap any array type which presents the same API as a numpy array (so-called "duck array wrapping", see :ref:`wrapping numpy-like arrays `). Imagine we want to write a strategy which generates arbitrary ``Variable`` objects, each of which wraps a :py:class:`sparse.COO` array instead of a ``numpy.ndarray``. How could we do that? There are two ways: 1. Create a xarray object with numpy data and use the hypothesis' ``.map()`` method to convert the underlying array to a different type: .. jupyter-execute:: import sparse .. jupyter-execute:: def convert_to_sparse(var): return var.copy(data=sparse.COO.from_numpy(var.to_numpy())) .. jupyter-execute:: sparse_variables = xrst.variables(dims=xrst.dimension_names(min_dims=1)).map( convert_to_sparse ) sparse_variables.example() .. jupyter-execute:: sparse_variables.example() 2. Pass a function which returns a strategy which generates the duck-typed arrays directly to the ``array_strategy_fn`` argument of the xarray strategies: .. jupyter-execute:: def sparse_random_arrays(shape: tuple[int, ...]) -> sparse._coo.core.COO: """Strategy which generates random sparse.COO arrays""" if shape is None: shape = npst.array_shapes() else: shape = st.just(shape) density = st.integers(min_value=0, max_value=1) # note sparse.random does not accept a dtype kwarg return st.builds(sparse.random, shape=shape, density=density) def sparse_random_arrays_fn( *, shape: tuple[int, ...], dtype: np.dtype ) -> st.SearchStrategy[sparse._coo.core.COO]: return sparse_random_arrays(shape=shape) .. jupyter-execute:: sparse_random_variables = xrst.variables( array_strategy_fn=sparse_random_arrays_fn, dtype=st.just(np.dtype("float64")) ) sparse_random_variables.example() Either approach is fine, but one may be more convenient than the other depending on the type of the duck array which you want to wrap. Compatibility with the Python Array API Standard ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Xarray aims to be compatible with any duck-array type that conforms to the `Python Array API Standard `_ (see our :ref:`docs on Array API Standard support `). .. warning:: The strategies defined in :py:mod:`testing.strategies` are **not** guaranteed to use array API standard-compliant dtypes by default. For example arrays with the dtype ``np.dtype('float16')`` may be generated by :py:func:`testing.strategies.variables` (assuming the ``dtype`` kwarg was not explicitly passed), despite ``np.dtype('float16')`` not being in the array API standard. If the array type you want to generate has an array API-compliant top-level namespace (e.g. that which is conventionally imported as ``xp`` or similar), you can use this neat trick: .. jupyter-execute:: import numpy as xp # compatible in numpy 2.0 # use `import numpy.array_api as xp` in numpy>=1.23,<2.0 from hypothesis.extra.array_api import make_strategies_namespace xps = make_strategies_namespace(xp) xp_variables = xrst.variables( array_strategy_fn=xps.arrays, dtype=xps.scalar_dtypes(), ) xp_variables.example() Another array API-compliant duck array library would replace the import, e.g. ``import cupy as cp`` instead. Testing over Subsets of Dimensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A common task when testing xarray user code is checking that your function works for all valid input dimensions. We can chain strategies to achieve this, for which the helper strategy :py:func:`~testing.strategies.unique_subset_of` is useful. It works for lists of dimension names .. jupyter-execute:: dims = ["x", "y", "z"] xrst.unique_subset_of(dims).example() .. jupyter-execute:: xrst.unique_subset_of(dims).example() as well as for mappings of dimension names to sizes .. jupyter-execute:: dim_sizes = {"x": 2, "y": 3, "z": 4} xrst.unique_subset_of(dim_sizes).example() .. jupyter-execute:: xrst.unique_subset_of(dim_sizes).example() This is useful because operations like reductions can be performed over any subset of the xarray object's dimensions. For example we can write a pytest test that tests that a reduction gives the expected result when applying that reduction along any possible valid subset of the Variable's dimensions. .. code-block:: python import numpy.testing as npt @given(st.data(), xrst.variables(dims=xrst.dimension_names(min_dims=1))) def test_mean(data, var): """Test that the mean of an xarray Variable is always equal to the mean of the underlying array.""" # specify arbitrary reduction along at least one dimension reduction_dims = data.draw(xrst.unique_subset_of(var.dims, min_size=1)) # create expected result (using nanmean because arrays with Nans will be generated) reduction_axes = tuple(var.get_axis_num(dim) for dim in reduction_dims) expected = np.nanmean(var.data, axis=reduction_axes) # assert property is always satisfied result = var.mean(dim=reduction_dims).data npt.assert_equal(expected, result) xarray-2025.09.0/doc/user-guide/time-series.rst000066400000000000000000000301321505620616400211410ustar00rootroot00000000000000.. currentmodule:: xarray .. _time-series: ================ Time series data ================ A major use case for xarray is multi-dimensional time-series data. Accordingly, we've copied many of features that make working with time-series data in pandas such a joy to xarray. In most cases, we rely on pandas for the core functionality. .. jupyter-execute:: :hide-code: import numpy as np import pandas as pd import xarray as xr np.random.seed(123456) Creating datetime64 data ------------------------ Xarray uses the numpy dtypes :py:class:`numpy.datetime64` and :py:class:`numpy.timedelta64` with specified units (one of ``"s"``, ``"ms"``, ``"us"`` and ``"ns"``) to represent datetime data, which offer vectorized operations with numpy and smooth integration with pandas. To convert to or create regular arrays of :py:class:`numpy.datetime64` data, we recommend using :py:func:`pandas.to_datetime`, :py:class:`pandas.DatetimeIndex`, or :py:func:`xarray.date_range`: .. jupyter-execute:: pd.to_datetime(["2000-01-01", "2000-02-02"]) .. jupyter-execute:: pd.DatetimeIndex( ["2000-01-01 00:00:00", "2000-02-02 00:00:00"], dtype="datetime64[s]" ) .. jupyter-execute:: xr.date_range("2000-01-01", periods=365) .. jupyter-execute:: xr.date_range("2000-01-01", periods=365, unit="s") .. note:: Care has to be taken to create the output with the wanted resolution. For :py:func:`pandas.date_range` the ``unit``-kwarg has to be specified and for :py:func:`pandas.to_datetime` the selection of the resolution isn't possible at all. For that :py:class:`pd.DatetimeIndex` can be used directly. There is more in-depth information in section :ref:`internals.timecoding`. Alternatively, you can supply arrays of Python ``datetime`` objects. These get converted automatically when used as arguments in xarray objects (with us-resolution): .. jupyter-execute:: import datetime xr.Dataset({"time": datetime.datetime(2000, 1, 1)}) When reading or writing netCDF files, xarray automatically decodes datetime and timedelta arrays using `CF conventions`_ (that is, by using a ``units`` attribute like ``'days since 2000-01-01'``). .. _CF conventions: https://cfconventions.org .. note:: When decoding/encoding datetimes for non-standard calendars or for dates before `1582-10-15`_, xarray uses the `cftime`_ library by default. It was previously packaged with the ``netcdf4-python`` package under the name ``netcdftime`` but is now distributed separately. ``cftime`` is an :ref:`optional dependency` of xarray. .. _cftime: https://unidata.github.io/cftime .. _1582-10-15: https://en.wikipedia.org/wiki/Gregorian_calendar You can manual decode arrays in this form by passing a dataset to :py:func:`decode_cf`: .. jupyter-execute:: attrs = {"units": "hours since 2000-01-01"} ds = xr.Dataset({"time": ("time", [0, 1, 2, 3], attrs)}) # Default decoding to 'ns'-resolution xr.decode_cf(ds) .. jupyter-execute:: # Decoding to 's'-resolution coder = xr.coders.CFDatetimeCoder(time_unit="s") xr.decode_cf(ds, decode_times=coder) From xarray 2025.01.2 the resolution of the dates can be one of ``"s"``, ``"ms"``, ``"us"`` or ``"ns"``. One limitation of using ``datetime64[ns]`` is that it limits the native representation of dates to those that fall between the years 1678 and 2262, which gets increased significantly with lower resolutions. When a store contains dates outside of these bounds (or dates < `1582-10-15`_ with a Gregorian, also known as standard, calendar), dates will be returned as arrays of :py:class:`cftime.datetime` objects and a :py:class:`CFTimeIndex` will be used for indexing. :py:class:`CFTimeIndex` enables most of the indexing functionality of a :py:class:`pandas.DatetimeIndex`. See :ref:`CFTimeIndex` for more information. Datetime indexing ----------------- Xarray borrows powerful indexing machinery from pandas (see :ref:`indexing`). This allows for several useful and succinct forms of indexing, particularly for ``datetime64`` data. For example, we support indexing with strings for single items and with the ``slice`` object: .. jupyter-execute:: time = pd.date_range("2000-01-01", freq="h", periods=365 * 24) ds = xr.Dataset({"foo": ("time", np.arange(365 * 24)), "time": time}) ds.sel(time="2000-01") .. jupyter-execute:: ds.sel(time=slice("2000-06-01", "2000-06-10")) You can also select a particular time by indexing with a :py:class:`datetime.time` object: .. jupyter-execute:: ds.sel(time=datetime.time(12)) For more details, read the pandas documentation and the section on :ref:`datetime_component_indexing` (i.e. using the ``.dt`` accessor). .. _dt_accessor: Datetime components ------------------- Similar to `pandas accessors`_, the components of datetime objects contained in a given ``DataArray`` can be quickly computed using a special ``.dt`` accessor. .. _pandas accessors: https://pandas.pydata.org/pandas-docs/stable/basics.html#basics-dt-accessors .. jupyter-execute:: time = pd.date_range("2000-01-01", freq="6h", periods=365 * 4) ds = xr.Dataset({"foo": ("time", np.arange(365 * 4)), "time": time}) ds.time.dt.hour .. jupyter-execute:: ds.time.dt.dayofweek The ``.dt`` accessor works on both coordinate dimensions as well as multi-dimensional data. Xarray also supports a notion of "virtual" or "derived" coordinates for `datetime components`__ implemented by pandas, including "year", "month", "day", "hour", "minute", "second", "dayofyear", "week", "dayofweek", "weekday" and "quarter": __ https://pandas.pydata.org/pandas-docs/stable/api.html#time-date-components .. jupyter-execute:: ds["time.month"] .. jupyter-execute:: ds["time.dayofyear"] For use as a derived coordinate, xarray adds ``'season'`` to the list of datetime components supported by pandas: .. jupyter-execute:: ds["time.season"] .. jupyter-execute:: ds["time"].dt.season The set of valid seasons consists of 'DJF', 'MAM', 'JJA' and 'SON', labeled by the first letters of the corresponding months. You can use these shortcuts with both Datasets and DataArray coordinates. In addition, xarray supports rounding operations ``floor``, ``ceil``, and ``round``. These operations require that you supply a `rounding frequency as a string argument.`__ __ https://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases .. jupyter-execute:: ds["time"].dt.floor("D") The ``.dt`` accessor can also be used to generate formatted datetime strings for arrays utilising the same formatting as the standard `datetime.strftime`_. .. _datetime.strftime: https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior .. jupyter-execute:: ds["time"].dt.strftime("%a, %b %d %H:%M") .. _datetime_component_indexing: Indexing Using Datetime Components ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can use use the ``.dt`` accessor when subsetting your data as well. For example, we can subset for the month of January using the following: .. jupyter-execute:: ds.isel(time=(ds.time.dt.month == 1)) You can also search for multiple months (in this case January through March), using ``isin``: .. jupyter-execute:: ds.isel(time=ds.time.dt.month.isin([1, 2, 3])) .. _resampling: Resampling and grouped operations --------------------------------- .. seealso:: For more generic documentation on grouping, see :ref:`groupby`. Datetime components couple particularly well with grouped operations for analyzing features that repeat over time. Here's how to calculate the mean by time of day: .. jupyter-execute:: ds.groupby("time.hour").mean() For upsampling or downsampling temporal resolutions, xarray offers a :py:meth:`Dataset.resample` method building on the core functionality offered by the pandas method of the same name. Resample uses essentially the same api as :py:meth:`pandas.DataFrame.resample` `in pandas`_. .. _in pandas: https://pandas.pydata.org/pandas-docs/stable/timeseries.html#up-and-downsampling For example, we can downsample our dataset from hourly to 6-hourly: .. jupyter-execute:: ds.resample(time="6h") This will create a specialized :py:class:`~xarray.core.resample.DatasetResample` or :py:class:`~xarray.core.resample.DataArrayResample` object which saves information necessary for resampling. All of the reduction methods which work with :py:class:`Dataset` or :py:class:`DataArray` objects can also be used for resampling: .. jupyter-execute:: ds.resample(time="6h").mean() You can also supply an arbitrary reduction function to aggregate over each resampling group: .. jupyter-execute:: ds.resample(time="6h").reduce(np.mean) You can also resample on the time dimension while applying reducing along other dimensions at the same time by specifying the ``dim`` keyword argument .. code-block:: python ds.resample(time="6h").mean(dim=["time", "latitude", "longitude"]) For upsampling, xarray provides six methods: ``asfreq``, ``ffill``, ``bfill``, ``pad``, ``nearest`` and ``interpolate``. ``interpolate`` extends :py:func:`scipy.interpolate.interp1d` and supports all of its schemes. All of these resampling operations work on both Dataset and DataArray objects with an arbitrary number of dimensions. In order to limit the scope of the methods ``ffill``, ``bfill``, ``pad`` and ``nearest`` the ``tolerance`` argument can be set in coordinate units. Data that has indices outside of the given ``tolerance`` are set to ``NaN``. .. jupyter-execute:: ds.resample(time="1h").nearest(tolerance="1h") It is often desirable to center the time values after a resampling operation. That can be accomplished by updating the resampled dataset time coordinate values using time offset arithmetic via the :py:func:`pandas.tseries.frequencies.to_offset` function. .. jupyter-execute:: resampled_ds = ds.resample(time="6h").mean() offset = pd.tseries.frequencies.to_offset("6h") / 2 resampled_ds["time"] = resampled_ds.get_index("time") + offset resampled_ds .. seealso:: For more examples of using grouped operations on a time dimension, see :doc:`../examples/weather-data`. .. _seasonal_grouping: Handling Seasons ~~~~~~~~~~~~~~~~ Two extremely common time series operations are to group by seasons, and resample to a seasonal frequency. Xarray has historically supported some simple versions of these computations. For example, ``.groupby("time.season")`` (where the seasons are DJF, MAM, JJA, SON) and resampling to a seasonal frequency using Pandas syntax: ``.resample(time="QS-DEC")``. Quite commonly one wants more flexibility in defining seasons. For these use-cases, Xarray provides :py:class:`groupers.SeasonGrouper` and :py:class:`groupers.SeasonResampler`. .. currentmodule:: xarray.groupers .. jupyter-execute:: from xarray.groupers import SeasonGrouper ds.groupby(time=SeasonGrouper(["DJF", "MAM", "JJA", "SON"])).mean() Note how the seasons are in the specified order, unlike ``.groupby("time.season")`` where the seasons are sorted alphabetically. .. jupyter-execute:: ds.groupby("time.season").mean() :py:class:`SeasonGrouper` supports overlapping seasons: .. jupyter-execute:: ds.groupby(time=SeasonGrouper(["DJFM", "MAMJ", "JJAS", "SOND"])).mean() Skipping months is allowed: .. jupyter-execute:: ds.groupby(time=SeasonGrouper(["JJAS"])).mean() Use :py:class:`SeasonResampler` to specify custom seasons. .. jupyter-execute:: from xarray.groupers import SeasonResampler ds.resample(time=SeasonResampler(["DJF", "MAM", "JJA", "SON"])).mean() :py:class:`SeasonResampler` is smart enough to correctly handle years for seasons that span the end of the year (e.g. DJF). By default :py:class:`SeasonResampler` will skip any season that is incomplete (e.g. the first DJF season for a time series that starts in Jan). Pass the ``drop_incomplete=False`` kwarg to :py:class:`SeasonResampler` to disable this behaviour. .. jupyter-execute:: from xarray.groupers import SeasonResampler ds.resample( time=SeasonResampler(["DJF", "MAM", "JJA", "SON"], drop_incomplete=False) ).mean() Seasons need not be of the same length: .. jupyter-execute:: ds.resample(time=SeasonResampler(["JF", "MAM", "JJAS", "OND"])).mean() xarray-2025.09.0/doc/user-guide/weather-climate.rst000066400000000000000000000234571505620616400220020ustar00rootroot00000000000000.. currentmodule:: xarray .. _weather-climate: Weather and climate data ======================== .. jupyter-execute:: :hide-code: import xarray as xr import numpy as np Xarray can leverage metadata that follows the `Climate and Forecast (CF) conventions`_ if present. Examples include :ref:`automatic labelling of plots` with descriptive names and units if proper metadata is present and support for non-standard calendars used in climate science through the ``cftime`` module (explained in the :ref:`CFTimeIndex` section). There are also a number of :ref:`geosciences-focused projects that build on xarray`. .. _Climate and Forecast (CF) conventions: https://cfconventions.org .. _cf_variables: Related Variables ----------------- Several CF variable attributes contain lists of other variables associated with the variable with the attribute. A few of these are now parsed by xarray, with the attribute value popped to encoding on read and the variables in that value interpreted as non-dimension coordinates: - ``coordinates`` - ``bounds`` - ``grid_mapping`` - ``climatology`` - ``geometry`` - ``node_coordinates`` - ``node_count`` - ``part_node_count`` - ``interior_ring`` - ``cell_measures`` - ``formula_terms`` This decoding is controlled by the ``decode_coords`` kwarg to :py:func:`open_dataset` and :py:func:`open_mfdataset`. The CF attribute ``ancillary_variables`` was not included in the list due to the variables listed there being associated primarily with the variable with the attribute, rather than with the dimensions. .. _metpy_accessor: CF-compliant coordinate variables --------------------------------- `MetPy`_ adds a ``metpy`` accessor that allows accessing coordinates with appropriate CF metadata using generic names ``x``, ``y``, ``vertical`` and ``time``. There is also a ``cartopy_crs`` attribute that provides projection information, parsed from the appropriate CF metadata, as a `Cartopy`_ projection object. See the `metpy documentation`_ for more information. .. _`MetPy`: https://unidata.github.io/MetPy/dev/index.html .. _`metpy documentation`: https://unidata.github.io/MetPy/dev/tutorials/xarray_tutorial.html#coordinates .. _`Cartopy`: https://cartopy.readthedocs.io/stable/reference/crs.html .. _CFTimeIndex: Non-standard calendars and dates outside the precision range ------------------------------------------------------------ Through the standalone ``cftime`` library and a custom subclass of :py:class:`pandas.Index`, xarray supports a subset of the indexing functionality enabled through the standard :py:class:`pandas.DatetimeIndex` for dates from non-standard calendars commonly used in climate science or dates using a standard calendar, but outside the `precision range`_ and dates prior to `1582-10-15`_. .. note:: As of xarray version 0.11, by default, :py:class:`cftime.datetime` objects will be used to represent times (either in indexes, as a :py:class:`~xarray.CFTimeIndex`, or in data arrays with dtype object) if any of the following are true: - The dates are from a non-standard calendar - Any dates are outside the nanosecond-precision range (prior xarray version 2025.01.2) - Any dates are outside the time span limited by the resolution (from xarray version 2025.01.2) Otherwise pandas-compatible dates from a standard calendar will be represented with the ``np.datetime64[unit]`` data type (where unit can be one of ``"s"``, ``"ms"``, ``"us"``, ``"ns"``), enabling the use of a :py:class:`pandas.DatetimeIndex` or arrays with dtype ``np.datetime64[unit]`` and their full set of associated features. As of pandas version 2.0.0, pandas supports non-nanosecond precision datetime values. From xarray version 2025.01.2 on, non-nanosecond precision datetime values are also supported in xarray (this can be parameterized via :py:class:`~xarray.coders.CFDatetimeCoder` and ``decode_times`` kwarg). See also :ref:`internals.timecoding`. For example, you can create a DataArray indexed by a time coordinate with dates from a no-leap calendar and a :py:class:`~xarray.CFTimeIndex` will automatically be used: .. jupyter-execute:: from itertools import product from cftime import DatetimeNoLeap dates = [ DatetimeNoLeap(year, month, 1) for year, month in product(range(1, 3), range(1, 13)) ] da = xr.DataArray(np.arange(24), coords=[dates], dims=["time"], name="foo") Xarray also includes a :py:func:`~xarray.date_range` function, which enables creating a :py:class:`~xarray.CFTimeIndex` with regularly-spaced dates. For instance, we can create the same dates and DataArray we created above using (note that ``use_cftime=True`` is not mandatory to return a :py:class:`~xarray.CFTimeIndex` for non-standard calendars, but can be nice to use to be explicit): .. jupyter-execute:: dates = xr.date_range( start="0001", periods=24, freq="MS", calendar="noleap", use_cftime=True ) da = xr.DataArray(np.arange(24), coords=[dates], dims=["time"], name="foo") Mirroring pandas' method with the same name, :py:meth:`~xarray.infer_freq` allows one to infer the sampling frequency of a :py:class:`~xarray.CFTimeIndex` or a 1-D :py:class:`~xarray.DataArray` containing cftime objects. It also works transparently with ``np.datetime64`` and ``np.timedelta64`` data (with "s", "ms", "us" or "ns" resolution). .. jupyter-execute:: xr.infer_freq(dates) With :py:meth:`~xarray.CFTimeIndex.strftime` we can also easily generate formatted strings from the datetime values of a :py:class:`~xarray.CFTimeIndex` directly or through the ``dt`` accessor for a :py:class:`~xarray.DataArray` using the same formatting as the standard `datetime.strftime`_ convention . .. _datetime.strftime: https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior .. jupyter-execute:: dates.strftime("%c") .. jupyter-execute:: da["time"].dt.strftime("%Y%m%d") Conversion between non-standard calendar and to/from pandas DatetimeIndexes is facilitated with the :py:meth:`xarray.Dataset.convert_calendar` method (also available as :py:meth:`xarray.DataArray.convert_calendar`). Here, like elsewhere in xarray, the ``use_cftime`` argument controls which datetime backend is used in the output. The default (``None``) is to use ``pandas`` when possible, i.e. when the calendar is ``standard``/``gregorian`` and dates starting with `1582-10-15`_. There is no such restriction when converting to a ``proleptic_gregorian`` calendar. .. _1582-10-15: https://en.wikipedia.org/wiki/Gregorian_calendar .. jupyter-execute:: dates = xr.date_range( start="2001", periods=24, freq="MS", calendar="noleap", use_cftime=True ) da_nl = xr.DataArray(np.arange(24), coords=[dates], dims=["time"], name="foo") da_std = da.convert_calendar("standard", use_cftime=True) The data is unchanged, only the timestamps are modified. Further options are implemented for the special ``"360_day"`` calendar and for handling missing dates. There is also :py:meth:`xarray.Dataset.interp_calendar` (and :py:meth:`xarray.DataArray.interp_calendar`) for interpolating data between calendars. For data indexed by a :py:class:`~xarray.CFTimeIndex` xarray currently supports: - `Partial datetime string indexing`_: .. jupyter-execute:: da.sel(time="0001") .. jupyter-execute:: da.sel(time=slice("0001-05", "0002-02")) .. note:: For specifying full or partial datetime strings in cftime indexing, xarray supports two versions of the `ISO 8601 standard`_, the basic pattern (YYYYMMDDhhmmss) or the extended pattern (YYYY-MM-DDThh:mm:ss), as well as the default cftime string format (YYYY-MM-DD hh:mm:ss). This is somewhat more restrictive than pandas; in other words, some datetime strings that would be valid for a :py:class:`pandas.DatetimeIndex` are not valid for an :py:class:`~xarray.CFTimeIndex`. - Access of basic datetime components via the ``dt`` accessor (in this case just "year", "month", "day", "hour", "minute", "second", "microsecond", "season", "dayofyear", "dayofweek", and "days_in_month") with the addition of "calendar", absent from pandas: .. jupyter-execute:: da.time.dt.year .. jupyter-execute:: da.time.dt.month .. jupyter-execute:: da.time.dt.season .. jupyter-execute:: da.time.dt.dayofyear .. jupyter-execute:: da.time.dt.dayofweek .. jupyter-execute:: da.time.dt.days_in_month .. jupyter-execute:: da.time.dt.calendar - Rounding of datetimes to fixed frequencies via the ``dt`` accessor: .. jupyter-execute:: da.time.dt.ceil("3D").head() .. jupyter-execute:: da.time.dt.floor("5D").head() .. jupyter-execute:: da.time.dt.round("2D").head() - Group-by operations based on datetime accessor attributes (e.g. by month of the year): .. jupyter-execute:: da.groupby("time.month").sum() - Interpolation using :py:class:`cftime.datetime` objects: .. jupyter-execute:: da.interp(time=[DatetimeNoLeap(1, 1, 15), DatetimeNoLeap(1, 2, 15)]) - Interpolation using datetime strings: .. jupyter-execute:: da.interp(time=["0001-01-15", "0001-02-15"]) - Differentiation: .. jupyter-execute:: da.differentiate("time") - Serialization: .. jupyter-execute:: da.to_netcdf("example-no-leap.nc") reopened = xr.open_dataset("example-no-leap.nc") reopened .. jupyter-execute:: :hide-code: import os reopened.close() os.remove("example-no-leap.nc") - And resampling along the time dimension for data indexed by a :py:class:`~xarray.CFTimeIndex`: .. jupyter-execute:: da.resample(time="81min", closed="right", label="right", offset="3min").mean() .. _precision range: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timestamp-limitations .. _ISO 8601 standard: https://en.wikipedia.org/wiki/ISO_8601 .. _partial datetime string indexing: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#partial-string-indexing xarray-2025.09.0/doc/videos.yml000066400000000000000000000046571505620616400161410ustar00rootroot00000000000000- title: "Xdev Python Tutorial Seminar Series 2022 Thinking with Xarray : High-level computation patterns" src: '' authors: - Deepak Cherian - title: "Xdev Python Tutorial Seminar Series 2021 seminar introducing xarray (2 of 2)" src: '' authors: - Anderson Banihirwe - title: "Xdev Python Tutorial Seminar Series 2021 seminar introducing xarray (1 of 2)" src: '' authors: - Anderson Banihirwe - title: "Xarray's 2020 virtual tutorial" src: '' authors: - Anderson Banihirwe - Deepak Cherian - Martin Durant - title: "Xarray's Tutorial presented at the 2020 SciPy Conference" src: ' ' authors: - Joe Hamman - Deepak Cherian - Ryan Abernathey - Stephan Hoyer - title: "Scipy 2015 talk introducing xarray to a general audience" src: '' authors: - Stephan Hoyer - title: " 2015 Unidata Users Workshop talk and tutorial with (`with answers`_) introducing xarray to users familiar with netCDF" src: '' authors: - Stephan Hoyer xarray-2025.09.0/doc/whats-new.rst000066400000000000000000015410641505620616400165730ustar00rootroot00000000000000.. currentmodule:: xarray .. _whats-new: What's New ========== .. _whats-new.2025.09.0: v2025.09.0 (September 2, 2025) ------------------------------ This release brings a number of small improvements and fixes, especially related to writing DataTree objects and netCDF files to disk. Thanks to the 13 contributors to this release: Benoit Bovy, DHRUVA KUMAR KAUSHAL, Deepak Cherian, Dhruva Kumar Kaushal, Giacomo Caria, Ian Hunt-Isaak, Illviljan, Justus Magin, Kai MΓΌhlbauer, Ruth Comer, Spencer Clark, Stephan Hoyer and Tom Nicholas New Features ~~~~~~~~~~~~ - Support rechunking by :py:class:`~xarray.groupers.SeasonResampler` for seasonal data analysis (:issue:`10425`, :pull:`10519`). By `Dhruva Kumar Kaushal `_. - Add convenience methods to :py:class:`~xarray.Coordinates` (:pull:`10318`) By `Justus Magin `_. - Added :py:func:`load_datatree` for loading ``DataTree`` objects into memory from disk. It has the same relationship to :py:func:`open_datatree`, as :py:func:`load_dataset` has to :py:func:`open_dataset`. By `Stephan Hoyer `_. - ``compute=False`` is now supported by :py:meth:`DataTree.to_netcdf` and :py:meth:`DataTree.to_zarr`. By `Stephan Hoyer `_. - ``open_dataset`` will now correctly infer a path ending in ``.zarr/`` as zarr By `Ian Hunt-Isaak `_. Breaking changes ~~~~~~~~~~~~~~~~ - Following pandas 3.0 (`pandas-dev/pandas#61985 `_), ``Day`` is no longer considered a ``Tick``-like frequency. Therefore non-``None`` values of ``offset`` and non-``"start_day"`` values of ``origin`` will have no effect when resampling to a daily frequency for objects indexed by a :py:class:`xarray.CFTimeIndex`. As in `pandas-dev/pandas#62101 `_ warnings will be emitted if non default values are provided in this context (:issue:`10640`, :pull:`10650`). By `Spencer Clark `_. - The default backend ``engine`` used by :py:meth:`Dataset.to_netcdf` and :py:meth:`DataTree.to_netcdf` is now chosen consistently with :py:func:`open_dataset` and :py:func:`open_datatree`, using whichever netCDF libraries are available and valid, and preferring netCDF4 to h5netcdf to scipy (:issue:`10654`). This will change the default backend in some edge cases (e.g., from scipy to netCDF4 when writing to a file-like object or bytes). To override these new defaults, set ``engine`` explicitly. By `Stephan Hoyer `_. - The return value of :py:meth:`Dataset.to_netcdf` without ``path`` is now a ``memoryview`` object instead of ``bytes`` (:pull:`10656`). This removes an unnecessary memory copy and ensures consistency when using either ``engine="scipy"`` or ``engine="h5netcdf"``. If you need a bytes object, simply wrap the return value of ``to_netcdf()`` with ``bytes()``. By `Stephan Hoyer `_. Bug fixes ~~~~~~~~~ - Fix contour plots not normalizing the colors correctly when using for example logarithmic norms. (:issue:`10551`, :pull:`10565`) By `Jimmy Westling `_. - Fix distribution of ``auto_complex`` keyword argument for open_datatree (:issue:`10631`, :pull:`10632`). By `Kai MΓΌhlbauer `_. - Warn instead of raise in case of misconfiguration of ``unlimited_dims`` originating from dataset.encoding, to prevent breaking users workflows (:issue:`10647`, :pull:`10648`). By `Kai MΓΌhlbauer `_. - :py:meth:`DataTree.to_netcdf` and :py:meth:`DataTree.to_zarr` now avoid redundant computation of Dask arrays with cross-group dependencies (:issue:`10637`). By `Stephan Hoyer `_. - :py:meth:`DataTree.to_netcdf` had h5netcdf hard-coded as default (:issue:`10654`). By `Stephan Hoyer `_. Internal Changes ~~~~~~~~~~~~~~~~ - Run ``TestNetCDF4Data`` as ``TestNetCDF4DataTree`` through ``open_datatree`` (:pull:`10632`). By `Kai MΓΌhlbauer `_. .. _whats-new.2025.08.0: v2025.08.0 (August 14, 2025) ---------------------------- This release brings the ability to load xarray objects asynchronously, write netCDF as bytes, fixes a number of bugs, and starts an important deprecation cycle for changing the default values of keyword arguments for various xarray combining functions. Thanks to the 24 contributors to this release: Alfonso Ladino, Brigitta SipΕ‘cz, Claude, Deepak Cherian, Dimitri Papadopoulos Orfanos, Eric Jansen, Ian Hunt-Isaak, Ilan Gold, Illviljan, Julia Signell, Justus Magin, Kai MΓΌhlbauer, Mathias Hauser, Matthew, Michael Niklas, Miguel Jimenez, Nick Hodgskin, Pratiman, Scott Staniewicz, Spencer Clark, Stephan Hoyer, Tom Nicholas, Yang Yang and jemmajeffree New Features ~~~~~~~~~~~~ - Added :py:meth:`DataTree.prune` method to remove empty nodes while preserving tree structure. Useful for cleaning up DataTree after time-based filtering operations (:issue:`10590`, :pull:`10598`). By `Alfonso Ladino `_. - Added new asynchronous loading methods :py:meth:`Dataset.load_async`, :py:meth:`DataArray.load_async`, :py:meth:`Variable.load_async`. Note that users are expected to limit concurrency themselves - xarray does not internally limit concurrency in any way. (:issue:`10326`, :pull:`10327`) By `Tom Nicholas `_. - :py:meth:`DataTree.to_netcdf` can now write to a file-like object, or return bytes if called without a filepath. (:issue:`10570`) By `Matthew Willson `_. - Added exception handling for invalid files in :py:func:`open_mfdataset`. (:issue:`6736`) By `Pratiman Patel `_. Breaking changes ~~~~~~~~~~~~~~~~ - When writing to NetCDF files with groups, Xarray no longer redefines dimensions that have the same size in parent groups (:issue:`10241`). This conforms with `CF Conventions for group scrope `_ but may require adjustments for code that consumes NetCDF files produced by Xarray. By `Stephan Hoyer `_. Deprecations ~~~~~~~~~~~~ - Start a deprecation cycle for changing the default keyword arguments to :py:func:`concat`, :py:func:`merge`, :py:func:`combine_nested`, :py:func:`combine_by_coords`, and :py:func:`open_mfdataset`. Emits a :py:class:`FutureWarning` when using old defaults and new defaults would result in different behavior. Adds an option: ``use_new_combine_kwarg_defaults`` to opt in to new defaults immediately. New values are: - ``data_vars``: None which means ``all`` when concatenating along a new dimension, and ``"minimal"`` when concatenating along an existing dimension - ``coords``: "minimal" - ``compat``: "override" - ``join``: "exact" (:issue:`8778`, :issue:`1385`, :pull:`10062`). By `Julia Signell `_. Bug fixes ~~~~~~~~~ - Fix Pydap Datatree backend testing. Testing now compares elements of (unordered) two sets (before, lists) (:pull:`10525`). By `Miguel Jimenez-Urias `_. - Fix ``KeyError`` when passing a ``dim`` argument different from the default to ``convert_calendar`` (:pull:`10544`). By `Eric Jansen `_. - Fix transpose of boolean arrays read from disk. (:issue:`10536`) By `Deepak Cherian `_. - Fix detection of the ``h5netcdf`` backend. Xarray now selects ``h5netcdf`` if the default ``netCDF4`` engine is not available (:issue:`10401`, :pull:`10557`). By `Scott Staniewicz `_. - Fix :py:func:`merge` to prevent altering original object depending on join value (:pull:`10596`) By `Julia Signell `_. - Ensure ``unlimited_dims`` passed to :py:meth:`xarray.DataArray.to_netcdf`, :py:meth:`xarray.Dataset.to_netcdf` or :py:meth:`xarray.DataTree.to_netcdf` only contains dimensions present in the object; raise ``ValueError`` otherwise (:issue:`10549`, :pull:`10608`). By `Kai MΓΌhlbauer `_. Documentation ~~~~~~~~~~~~~ - Clarify lazy behaviour and eager loading for ``chunks=None`` in :py:func:`~xarray.open_dataset`, :py:func:`~xarray.open_dataarray`, :py:func:`~xarray.open_datatree`, :py:func:`~xarray.open_groups` and :py:func:`~xarray.open_zarr` (:issue:`10612`, :pull:`10627`). By `Kai MΓΌhlbauer `_. Performance ~~~~~~~~~~~ - Speed up non-numeric scalars when calling :py:meth:`Dataset.interp`. (:issue:`10054`, :pull:`10554`) By `Jimmy Westling `_. .. _whats-new.2025.07.1: v2025.07.1 (July 09, 2025) -------------------------- This release brings a lot of improvements to flexible indexes functionality, including new classes to ease building of new indexes with custom coordinate transforms (:py:class:`indexes.CoordinateTransformIndex`) and tree-like index structures (:py:class:`indexes.NDPointIndex`). See a `new gallery `_ showing off the possibilities enabled by flexible indexes. Thanks to the 7 contributors to this release: Benoit Bovy, Deepak Cherian, Dhruva Kumar Kaushal, Dimitri Papadopoulos Orfanos, Illviljan, Justus Magin and Tom Nicholas New Features ~~~~~~~~~~~~ - New :py:class:`xarray.indexes.NDPointIndex`, which by default uses :py:class:`scipy.spatial.KDTree` under the hood for the selection of irregular, n-dimensional data (:pull:`10478`). By `Benoit Bovy `_. - Allow skipping the creation of default indexes when opening datasets (:pull:`8051`). By `Benoit Bovy `_ and `Justus Magin `_. Bug fixes ~~~~~~~~~ - :py:meth:`Dataset.set_xindex` now raises a helpful error when a custom index creates extra variables that don't match the provided coordinate names, instead of silently ignoring them. The error message suggests using the factory method pattern with :py:meth:`xarray.Coordinates.from_xindex` and :py:meth:`Dataset.assign_coords` for advanced use cases (:issue:`10499`, :pull:`10503`). By `Dhruva Kumar Kaushal `_. Documentation ~~~~~~~~~~~~~ - A `new gallery `_ showing off the possibilities enabled by flexible indexes. Internal Changes ~~~~~~~~~~~~~~~~ - Refactored the ``PandasIndexingAdapter`` and ``CoordinateTransformIndexingAdapter`` internal indexing classes. Coordinate variables that wrap a :py:class:`pandas.RangeIndex`, a :py:class:`pandas.MultiIndex` or a :py:class:`xarray.indexes.CoordinateTransform` are now displayed as lazy variables in the Xarray data reprs (:pull:`10355`). By `Benoit Bovy `_. .. _whats-new.2025.07.0: v2025.07.0 (Jul 3, 2025) ------------------------ This release extends xarray's support for custom index classes, restores support for reading netCDF3 files with SciPy, updates minimum dependencies, and fixes a number of bugs. Thanks to the 17 contributors to this release: Bas Nijholt, Benoit Bovy, Deepak Cherian, Dhruva Kumar Kaushal, Dimitri Papadopoulos Orfanos, Ian Hunt-Isaak, Kai MΓΌhlbauer, Mathias Hauser, Maximilian Roos, Miguel Jimenez, Nick Hodgskin, Scott Henderson, Shuhao Cao, Spencer Clark, Stephan Hoyer, Tom Nicholas and Zsolt Cserna New Features ~~~~~~~~~~~~ - Expose :py:class:`~xarray.indexes.RangeIndex`, and :py:class:`~xarray.indexes.CoordinateTransformIndex` as public api under the ``xarray.indexes`` namespace. By `Deepak Cherian `_. - Support zarr-python's new ``.supports_consolidated_metadata`` store property (:pull:`10457``). by `Tom Nicholas `_. - Better error messages when encoding data to be written to disk fails (:pull:`10464`). By `Stephan Hoyer `_ Breaking changes ~~~~~~~~~~~~~~~~ The minimum versions of some dependencies were changed (:issue:`10417`, :pull:`10438`): By `Dhruva Kumar Kaushal `_. .. list-table:: :header-rows: 1 :widths: 30 20 20 * - Dependency - Old Version - New Version * - Python - 3.10 - 3.11 * - array-api-strict - 1.0 - 1.1 * - boto3 - 1.29 - 1.34 * - bottleneck - 1.3 - 1.4 * - cartopy - 0.22 - 0.23 * - dask-core - 2023.11 - 2024.6 * - distributed - 2023.11 - 2024.6 * - flox - 0.7 - 0.9 * - h5py - 3.8 - 3.11 * - hdf5 - 1.12 - 1.14 * - iris - 3.7 - 3.9 * - lxml - 4.9 - 5.1 * - matplotlib-base - 3.7 - 3.8 * - numba - 0.57 - 0.60 * - numbagg - 0.6 - 0.8 * - numpy - 1.24 - 1.26 * - packaging - 23.2 - 24.1 * - pandas - 2.1 - 2.2 * - pint - 0.22 - 0.24 * - pydap - N/A - 3.5 * - scipy - 1.11 - 1.13 * - sparse - 0.14 - 0.15 * - typing_extensions - 4.8 - Removed * - zarr - 2.16 - 2.18 Bug fixes ~~~~~~~~~ - Fix Pydap test_cmp_local_file for numpy 2.3.0 changes, 1. do always return arrays for all versions and 2. skip astype(str) for numpy >= 2.3.0 for expected data. (:pull:`10421`) By `Kai MΓΌhlbauer `_. - Fix the SciPy backend for netCDF3 files . (:issue:`8909`, :pull:`10376`) By `Deepak Cherian `_. - Check and fix character array string dimension names, issue warnings as needed (:issue:`6352`, :pull:`10395`). By `Kai MΓΌhlbauer `_. - Fix the error message of :py:func:`testing.assert_equal` when two different :py:class:`DataTree` objects are passed (:pull:`10440`). By `Mathias Hauser `_. - Fix :py:func:`testing.assert_equal` with ``check_dim_order=False`` for :py:class:`DataTree` objects (:pull:`10442`). By `Mathias Hauser `_. - Fix Pydap backend testing. Now test forces string arrays to dtype "S" (pydap converts them to unicode type by default). Removes conditional to numpy version. (:issue:`10261`, :pull:`10482`) By `Miguel Jimenez-Urias `_. - Fix attribute overwriting bug when decoding encoded :py:class:`numpy.timedelta64` values from disk with a dtype attribute (:issue:`10468`, :pull:`10469`). By `Spencer Clark `_. - Fix default ``"_FillValue"`` dtype coercion bug when encoding :py:class:`numpy.timedelta64` values to an on-disk format that only supports 32-bit integers (:issue:`10466`, :pull:`10469`). By `Spencer Clark `_. Internal Changes ~~~~~~~~~~~~~~~~ - Forward variable name down to coders for AbstractWritableDataStore.encode_variable and subclasses. (:pull:`10395`). By `Kai MΓΌhlbauer `_. .. _whats-new.2025.06.1: v2025.06.1 (Jun 11, 2025) ------------------------- This is quick bugfix release to remove an unintended dependency on ``typing_extensions``. Thanks to the 4 contributors to this release: Alex Merose, Deepak Cherian, Ilan Gold and Simon Perkins Bug fixes ~~~~~~~~~ - Remove dependency on ``typing_extensions`` (:pull:`10413`). By `Simon Perkins `_. .. _whats-new.2025.06.0: v2025.06.0 (Jun 10, 2025) ------------------------- This release brings HTML reprs to the documentation, fixes to flexible Xarray indexes, performance optimizations, more ergonomic seasonal grouping and resampling with new :py:class:`~xarray.groupers.SeasonGrouper` and :py:class:`~xarray.groupers.SeasonResampler` objects, and bugfixes. Thanks to the 33 contributors to this release: Andrecho, Antoine Gibek, Benoit Bovy, Brian Michell, Christine P. Chai, David Huard, Davis Bennett, Deepak Cherian, Dimitri Papadopoulos Orfanos, Elliott Sales de Andrade, Erik, Erik MΓ₯nsson, Giacomo Caria, Ilan Gold, Illviljan, Jesse Rusak, Jonathan Neuhauser, Justus Magin, Kai MΓΌhlbauer, Kimoon Han, Konstantin Ntokas, Mark Harfouche, Michael Niklas, Nick Hodgskin, Niko Sirmpilatze, Pascal Bourgault, Scott Henderson, Simon Perkins, Spencer Clark, Tom Vo, Trevor James Smith, joseph nowak and micguerr-bopen New Features ~~~~~~~~~~~~ - Switch docs to jupyter-execute sphinx extension for HTML reprs. (:issue:`3893`, :pull:`10383`) By `Scott Henderson `_. - Allow an Xarray index that uses multiple dimensions checking equality with another index for only a subset of those dimensions (i.e., ignoring the dimensions that are excluded from alignment). (:issue:`10243`, :pull:`10293`) By `Benoit Bovy `_. - New :py:class:`~xarray.groupers.SeasonGrouper` and :py:class:`~xarray.groupers.SeasonResampler` objects for ergonomic seasonal aggregation. See the docs on :ref:`seasonal_grouping` or `blog post `_ for more. By `Deepak Cherian `_. - Data corruption issues arising from misaligned Dask and Zarr chunks can now be prevented using the new ``align_chunks`` parameter in :py:meth:`~xarray.DataArray.to_zarr`. This option automatically rechunk the Dask array to align it with the Zarr storage chunks. For now, it is disabled by default, but this could change on the future. (:issue:`9914`, :pull:`10336`) By `Joseph Nowak `_. Documentation ~~~~~~~~~~~~~ - HTML reprs! By `Scott Henderson `_. Bug fixes ~~~~~~~~~ - Fix :py:class:`~xarray.groupers.BinGrouper` when ``labels`` is not specified (:issue:`10284`). By `Deepak Cherian `_. - Allow accessing arbitrary attributes on Pandas ExtensionArrays. By `Deepak Cherian `_. - Fix coding empty (zero-size) timedelta64 arrays, ``units`` taking precedence when encoding, fallback to default values when decoding (:issue:`10310`, :pull:`10313`). By `Kai MΓΌhlbauer `_. - Use dtype from intermediate sum instead of source dtype or "int" for casting of count when calculating mean in rolling for correct operations (preserve float dtypes, correct mean of bool arrays) (:issue:`10340`, :pull:`10341`). By `Kai MΓΌhlbauer `_. - Improve the html ``repr`` of Xarray objects (dark mode, icons and variable attribute / data dropdown sections). (:pull:`10353`, :pull:`10354`) By `Benoit Bovy `_. - Raise an error when attempting to encode :py:class:`numpy.datetime64` values prior to the Gregorian calendar reform date of 1582-10-15 with a ``"standard"`` or ``"gregorian"`` calendar. Previously we would warn and encode these as :py:class:`cftime.DatetimeGregorian` objects, but it is not clear that this is the user's intent, since this implicitly converts the calendar of the datetimes from ``"proleptic_gregorian"`` to ``"gregorian"`` and prevents round-tripping them as :py:class:`numpy.datetime64` values (:pull:`10352`). By `Spencer Clark `_. - Avoid unsafe casts from float to unsigned int in CFMaskCoder (:issue:`9815`, :pull:`9964`). By ` Elliott Sales de Andrade `_. Performance ~~~~~~~~~~~ - Lazily indexed arrays now use less memory to store keys by avoiding copies in :py:class:`~xarray.indexing.VectorizedIndexer` and :py:class:`~xarray.indexing.OuterIndexer` (:issue:`10316`). By `Jesse Rusak `_. - Fix performance regression in interp where more data was loaded than was necessary. (:issue:`10287`). By `Deepak Cherian `_. - Speed up encoding of :py:class:`cftime.datetime` objects by roughly a factor of three (:pull:`8324`). By `Antoine Gibek `_. .. _whats-new.2025.04.0: v2025.04.0 (Apr 29, 2025) ------------------------- This release brings bug fixes, better support for extension arrays including returning a :py:class:`pandas.IntervalArray` from ``groupby_bins``, and performance improvements. Thanks to the 24 contributors to this release: Alban Farchi, Andrecho, Benoit Bovy, Deepak Cherian, Dimitri Papadopoulos Orfanos, Florian Jetter, Giacomo Caria, Ilan Gold, Illviljan, Joren Hammudoglu, Julia Signell, Kai Muehlbauer, Kai MΓΌhlbauer, Mathias Hauser, Mattia Almansi, Michael Sumner, Miguel Jimenez, Nick Hodgskin (🦎 Vecko), Pascal Bourgault, Philip Chmielowiec, Scott Henderson, Spencer Clark, Stephan Hoyer and Tom Nicholas New Features ~~~~~~~~~~~~ - By default xarray now encodes :py:class:`numpy.timedelta64` values by converting to :py:class:`numpy.int64` values and storing ``"dtype"`` and ``"units"`` attributes consistent with the dtype of the in-memory :py:class:`numpy.timedelta64` values, e.g. ``"timedelta64[s]"`` and ``"seconds"`` for second-resolution timedeltas. These values will always be decoded to timedeltas without a warning moving forward. Timedeltas encoded via the previous approach can still be roundtripped exactly, but in the future will not be decoded by default (:issue:`1621`, :issue:`10099`, :pull:`10101`). By `Spencer Clark `_. - Added `scipy-stubs `_ to the ``xarray[types]`` dependencies. By `Joren Hammudoglu `_. - Added a :mod:`xarray.typing` module to expose selected public types for use in downstream libraries and static type checking. (:issue:`10179`, :pull:`10215`). By `Michele Guerreri `_. - Improved compatibility with OPeNDAP DAP4 data model for backend engine ``pydap``. This includes ``datatree`` support, and removing slashes from dimension names. By `Miguel Jimenez-Urias `_. - Allow assigning index coordinates with non-array dimension(s) in a :py:class:`DataArray` by overriding :py:meth:`Index.should_add_coord_to_array`. For example, this enables support for CF boundaries coordinate (e.g., ``time(time)`` and ``time_bnds(time, nbnd)``) in a DataArray (:pull:`10137`). By `Benoit Bovy `_. - Improved support pandas categorical extension as indices (i.e., :py:class:`pandas.IntervalIndex`). (:issue:`9661`, :pull:`9671`) By `Ilan Gold `_. - Improved checks and errors raised when trying to align objects with conflicting indexes. It is now possible to align objects each with multiple indexes sharing common dimension(s). (:issue:`7695`, :pull:`10251`) By `Benoit Bovy `_. Breaking changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed ===================== ========= ======= Package Old New ===================== ========= ======= pydap 3.4 3.5.0 ===================== ========= ======= - Reductions with ``groupby_bins`` or those that involve :py:class:`xarray.groupers.BinGrouper` now return objects indexed by :py:meth:`pandas.IntervalArray` objects, instead of numpy object arrays containing tuples. This change enables interval-aware indexing of such Xarray objects. (:pull:`9671`). By `Ilan Gold `_. - Remove ``PandasExtensionArrayIndex`` from :py:attr:`xarray.Variable.data` when the attribute is a :py:class:`pandas.api.extensions.ExtensionArray` (:pull:`10263`). By `Ilan Gold `_. - The html and text ``repr`` for ``DataTree`` are now truncated. Up to 6 children are displayed for each node -- the first 3 and the last 3 children -- with a ``...`` between them. The number of children to include in the display is configurable via options. For instance use ``set_options(display_max_children=8)`` to display 8 children rather than the default 6. (:pull:`10139`) By `Julia Signell `_. Deprecations ~~~~~~~~~~~~ - The deprecation cycle for the ``eagerly_compute_group`` kwarg to ``groupby`` and ``groupby_bins`` is now complete. By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - :py:meth:`~xarray.Dataset.to_stacked_array` now uses dimensions in order of appearance. This fixes the issue where using :py:meth:`~xarray.Dataset.transpose` before :py:meth:`~xarray.Dataset.to_stacked_array` had no effect. (Mentioned in :issue:`9921`) - Enable ``keep_attrs`` in ``DatasetView.map`` relevant for :py:func:`map_over_datasets` (:pull:`10219`) By `Mathias Hauser `_. - Variables with no temporal dimension are left untouched by :py:meth:`~xarray.Dataset.convert_calendar`. (:issue:`10266`, :pull:`10268`) By `Pascal Bourgault `_. - Enable ``chunk_key_encoding`` in :py:meth:`~xarray.Dataset.to_zarr` for Zarr v2 Datasets (:pull:`10274`) By `BrianMichell `_. Documentation ~~~~~~~~~~~~~ - Fix references to core classes in docs (:issue:`10195`, :pull:`10207`). By `Mattia Almansi `_. - Fix references to point to updated pydap documentation (:pull:`10182`). By `Miguel Jimenez-Urias `_. - Switch to `pydata-sphinx-theme `_ from `sphinx-book-theme `_ (:pull:`8708`). By `Scott Henderson `_. - Add a dedicated 'Complex Numbers' sections to the User Guide (:issue:`10213`, :pull:`10235`). By `Andre Wendlinger `_. Internal Changes ~~~~~~~~~~~~~~~~ - Avoid stacking when grouping by a chunked array. This can be a large performance improvement. By `Deepak Cherian `_. - The implementation of ``Variable.set_dims`` has changed to use array indexing syntax instead of ``np.broadcast_to`` to perform dimension expansions where all new dimensions have a size of 1. This should improve compatibility with duck arrays that do not support broadcasting (:issue:`9462`, :pull:`10277`). By `Mark Harfouche `_. .. _whats-new.2025.03.1: v2025.03.1 (Mar 30, 2025) ------------------------- This release brings the ability to specify ``fill_value`` and ``write_empty_chunks`` for Zarr V3 stores, and a few bug fixes. Thanks to the 10 contributors to this release: Andrecho, Deepak Cherian, Ian Hunt-Isaak, Karl Krauth, Mathias Hauser, Maximilian Roos, Nick Hodgskin (🦎 Vecko), Spencer Clark, Tom Nicholas and wpbonelli. New Features ~~~~~~~~~~~~ - Allow setting a ``fill_value`` for Zarr format 3 arrays. Specify ``fill_value`` in ``encoding`` as usual. (:issue:`10064`). By `Deepak Cherian `_. - Added :py:class:`indexes.RangeIndex` as an alternative, memory saving Xarray index representing a 1-dimensional bounded interval with evenly spaced floating values (:issue:`8473`, :pull:`10076`). By `Benoit Bovy `_. Breaking changes ~~~~~~~~~~~~~~~~ - Explicitly forbid appending a :py:class:`~xarray.DataTree` to zarr using :py:meth:`~xarray.DataTree.to_zarr` with ``append_dim``, because the expected behaviour is currently undefined. (:issue:`9858`, :pull:`10156`) By `Tom Nicholas `_. Bug fixes ~~~~~~~~~ - Update the parameters of :py:meth:`~xarray.DataArray.to_zarr` to match :py:meth:`~xarray.Dataset.to_zarr`. This fixes the issue where using the ``zarr_version`` parameter would raise a deprecation warning telling the user to use a non-existent ``zarr_format`` parameter instead. (:issue:`10163`, :pull:`10164`) By `Karl Krauth `_. - :py:meth:`DataTree.sel` and :py:meth:`DataTree.isel` display the path of the first failed node again (:pull:`10154`). By `Mathias Hauser `_. - Fix grouped and resampled ``first``, ``last`` with datetimes (:issue:`10169`, :pull:`10173`) By `Deepak Cherian `_. - FacetGrid plots now include units in their axis labels when available (:issue:`10184`, :pull:`10185`) By `Andre Wendlinger `_. .. _whats-new.2025.03.0: v2025.03.0 (Mar 20, 2025) ------------------------- This release brings tested support for Python 3.13, support for reading Zarr V3 datasets into a :py:class:`~xarray.DataTree`, significant improvements to datetime & timedelta encoding/decoding, and improvements to the :py:class:`~xarray.DataTree` API; in addition to the usual bug fixes and other improvements. Thanks to the 26 contributors to this release: Alfonso Ladino, Benoit Bovy, Chuck Daniels, Deepak Cherian, Eni, Florian Jetter, Ian Hunt-Isaak, Jan, Joe Hamman, Josh Kihm, Julia Signell, Justus Magin, Kai MΓΌhlbauer, Kobe Vandelanotte, Mathias Hauser, Max Jones, Maximilian Roos, Oliver Watt-Meyer, Sam Levang, Sander van Rijn, Spencer Clark, Stephan Hoyer, Tom Nicholas, Tom White, Vecko and maddogghoek New Features ~~~~~~~~~~~~ - Added :py:meth:`tutorial.open_datatree` and :py:meth:`tutorial.load_datatree` By `Eni Awowale `_. - Added :py:meth:`DataTree.filter_like` to conveniently restructure a DataTree like another DataTree (:issue:`10096`, :pull:`10097`). By `Kobe Vandelanotte `_. - Added :py:meth:`Coordinates.from_xindex` as convenience for creating a new :py:class:`Coordinates` object directly from an existing Xarray index object if the latter supports it (:pull:`10000`) By `Benoit Bovy `_. - Allow kwargs in :py:meth:`DataTree.map_over_datasets` and :py:func:`map_over_datasets` (:issue:`10009`, :pull:`10012`). By `Kai MΓΌhlbauer `_. - support python 3.13 (no free-threading) (:issue:`9664`, :pull:`9681`) By `Justus Magin `_. - Added experimental support for coordinate transforms (not ready for public use yet!) (:pull:`9543`) By `Benoit Bovy `_. - Similar to our :py:class:`numpy.datetime64` encoding path, automatically modify the units when an integer dtype is specified during eager cftime encoding, but the specified units would not allow for an exact round trip (:pull:`9498`). By `Spencer Clark `_. - Support reading to `GPU memory with Zarr `_ (:pull:`10078`). By `Deepak Cherian `_. Performance ~~~~~~~~~~~ - :py:meth:`DatasetGroupBy.first` and :py:meth:`DatasetGroupBy.last` can now use ``flox`` if available. (:issue:`9647`) By `Deepak Cherian `_. Breaking changes ~~~~~~~~~~~~~~~~ - Rolled back code that would attempt to catch integer overflow when encoding times with small integer dtypes (:issue:`8542`), since it was inconsistent with xarray's handling of standard integers, and interfered with encoding times with small integer dtypes and missing values (:pull:`9498`). By `Spencer Clark `_. - Warn instead of raise if phony_dims are detected when using h5netcdf-backend and ``phony_dims=None`` (:issue:`10049`, :pull:`10058`) By `Kai MΓΌhlbauer `_. Deprecations ~~~~~~~~~~~~ - Deprecate :py:func:`~xarray.cftime_range` in favor of :py:func:`~xarray.date_range` with ``use_cftime=True`` (:issue:`9886`, :pull:`10024`). By `Josh Kihm `_. - Move from phony_dims=None to phony_dims="access" for h5netcdf-backend(:issue:`10049`, :pull:`10058`) By `Kai MΓΌhlbauer `_. Bug fixes ~~~~~~~~~ - Fix ``open_datatree`` incompatibilities with Zarr-Python V3 and refactor ``TestZarrDatatreeIO`` accordingly (:issue:`9960`, :pull:`10020`). By `Alfonso Ladino-Rincon `_. - Default to resolution-dependent optimal integer encoding units when saving chunked non-nanosecond :py:class:`numpy.datetime64` or :py:class:`numpy.timedelta64` arrays to disk. Previously units of "nanoseconds" were chosen by default, which are optimal for nanosecond-resolution times, but not for times with coarser resolution. By `Spencer Clark `_ (:pull:`10017`). - Use mean of min/max years as offset in calculation of datetime64 mean (:issue:`10019`, :pull:`10035`). By `Kai MΓΌhlbauer `_. - Fix ``DataArray().drop_attrs(deep=False)`` and add support for attrs to ``DataArray()._replace()``. (:issue:`10027`, :pull:`10030`). By `Jan Haacker `_. - Fix bug preventing encoding times with missing values with small integer dtype (:issue:`9134`, :pull:`9498`). By `Spencer Clark `_. - More robustly raise an error when lazily encoding times and an integer dtype is specified with units that do not allow for an exact round trip (:pull:`9498`). By `Spencer Clark `_. - Prevent false resolution change warnings from being emitted when decoding timedeltas encoded with floating point values, and make it clearer how to silence this warning message in the case that it is rightfully emitted (:issue:`10071`, :pull:`10072`). By `Spencer Clark `_. - Fix ``isel`` for multi-coordinate Xarray indexes (:issue:`10063`, :pull:`10066`). By `Benoit Bovy `_. - Fix dask tokenization when opening each node in :py:func:`xarray.open_datatree` (:issue:`10098`, :pull:`10100`). By `Sam Levang `_. - Improve handling of dtype and NaT when encoding/decoding masked and packaged datetimes and timedeltas (:issue:`8957`, :pull:`10050`). By `Kai MΓΌhlbauer `_. Documentation ~~~~~~~~~~~~~ - Better expose the :py:class:`Coordinates` class in API reference (:pull:`10000`) By `Benoit Bovy `_. .. _whats-new.2025.01.2: v2025.01.2 (Jan 31, 2025) ------------------------- This release brings non-nanosecond datetime and timedelta resolution to xarray, sharded reading in zarr, suggestion of correct names when trying to access non-existent data variables and bug fixes! Thanks to the 16 contributors to this release: Deepak Cherian, Elliott Sales de Andrade, Jacob Prince-Bieker, Jimmy Westling, Joe Hamman, Joseph Nowak, Justus Magin, Kai MΓΌhlbauer, Mattia Almansi, Michael Niklas, Roelof Rietbroek, Salaheddine EL FARISSI, Sam Levang, Spencer Clark, Stephan Hoyer and Tom Nicholas In the last couple of releases xarray has been prepared for allowing non-nanosecond datetime and timedelta resolution. The code had to be changed and adapted in numerous places, affecting especially the test suite. The documentation has been updated accordingly and a new internal chapter on :ref:`internals.timecoding` has been added. To make the transition as smooth as possible this is designed to be fully backwards compatible, keeping the current default of ``'ns'`` resolution on decoding. To opt-into decoding to other resolutions (``'us'``, ``'ms'`` or ``'s'``) an instance of the newly public :py:class:`coders.CFDatetimeCoder` class can be passed through the ``decode_times`` keyword argument (see also :ref:`internals.default_timeunit`): .. code-block:: python coder = xr.coders.CFDatetimeCoder(time_unit="s") ds = xr.open_dataset(filename, decode_times=coder) Similar control of the resolution of decoded timedeltas can be achieved through passing a :py:class:`coders.CFTimedeltaCoder` instance to the ``decode_timedelta`` keyword argument: .. code-block:: python coder = xr.coders.CFTimedeltaCoder(time_unit="s") ds = xr.open_dataset(filename, decode_timedelta=coder) though by default timedeltas will be decoded to the same ``time_unit`` as datetimes. There might slight changes when encoding/decoding times as some warning and error messages have been removed or rewritten. Xarray will now also allow non-nanosecond datetimes (with ``'us'``, ``'ms'`` or ``'s'`` resolution) when creating DataArray's from scratch, picking the lowest possible resolution: .. code:: python xr.DataArray(data=[np.datetime64("2000-01-01", "D")], dims=("time",)) In a future release the current default of ``'ns'`` resolution on decoding will eventually be deprecated. New Features ~~~~~~~~~~~~ - Relax nanosecond resolution restriction in CF time coding and permit :py:class:`numpy.datetime64` or :py:class:`numpy.timedelta64` dtype arrays with ``"s"``, ``"ms"``, ``"us"``, or ``"ns"`` resolution throughout xarray (:issue:`7493`, :pull:`9618`, :pull:`9977`, :pull:`9966`, :pull:`9999`). By `Kai MΓΌhlbauer `_ and `Spencer Clark `_. - Enable the ``compute=False`` option in :py:meth:`DataTree.to_zarr`. (:pull:`9958`). By `Sam Levang `_. - Improve the error message raised when no key is matching the available variables in a dataset. (:pull:`9943`) By `Jimmy Westling `_. - Added a ``time_unit`` argument to :py:meth:`CFTimeIndex.to_datetimeindex`. Note that in a future version of xarray, :py:meth:`CFTimeIndex.to_datetimeindex` will return a microsecond-resolution :py:class:`pandas.DatetimeIndex` instead of a nanosecond-resolution :py:class:`pandas.DatetimeIndex` (:pull:`9965`). By `Spencer Clark `_ and `Kai MΓΌhlbauer `_. - Adds shards to the list of valid_encodings in the zarr backend, so that sharded Zarr V3s can be written (:issue:`9947`, :pull:`9948`). By `Jacob Prince_Bieker `_ Deprecations ~~~~~~~~~~~~ - In a future version of xarray decoding of variables into :py:class:`numpy.timedelta64` values will be disabled by default. To silence warnings associated with this, set ``decode_timedelta`` to ``True``, ``False``, or a :py:class:`coders.CFTimedeltaCoder` instance when opening data (:issue:`1621`, :pull:`9966`). By `Spencer Clark `_. Bug fixes ~~~~~~~~~ - Fix :py:meth:`DataArray.ffill`, :py:meth:`DataArray.bfill`, :py:meth:`Dataset.ffill` and :py:meth:`Dataset.bfill` when the limit is bigger than the chunksize (:issue:`9939`). By `Joseph Nowak `_. - Fix issues related to Pandas v3 ("us" vs. "ns" for python datetime, copy on write) and handling of 0d-numpy arrays in datetime/timedelta decoding (:pull:`9953`). By `Kai MΓΌhlbauer `_. - Remove dask-expr from CI runs, add "pyarrow" dask dependency to windows CI runs, fix related tests (:issue:`9962`, :pull:`9971`). By `Kai MΓΌhlbauer `_. - Use zarr-fixture to prevent thread leakage errors (:pull:`9967`). By `Kai MΓΌhlbauer `_. - Fix weighted ``polyfit`` for arrays with more than two dimensions (:issue:`9972`, :pull:`9974`). By `Mattia Almansi `_. - Preserve order of variables in :py:func:`xarray.combine_by_coords` (:issue:`8828`, :pull:`9070`). By `Kai MΓΌhlbauer `_. - Cast ``numpy`` scalars to arrays in :py:meth:`NamedArray.from_arrays` (:issue:`10005`, :pull:`10008`) By `Justus Magin `_. Documentation ~~~~~~~~~~~~~ - A chapter on :ref:`internals.timecoding` is added to the internal section (:pull:`9618`). By `Kai MΓΌhlbauer `_. - Clarified xarray's policy on API stability in the FAQ. (:issue:`9854`, :pull:`9855`) By `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ - Updated time coding tests to assert exact equality rather than equality with a tolerance, since xarray's minimum supported version of cftime is greater than 1.2.1 (:pull:`9961`). By `Spencer Clark `_. .. _whats-new.2025.01.1: v2025.01.1 (Jan 9, 2025) ------------------------ This is a quick release to bring compatibility with the Zarr V3 release. It also includes an update to the time decoding infrastructure as a step toward `enabling non-nanosecond datetime support `_! New Features ~~~~~~~~~~~~ - Split out :py:class:`coders.CFDatetimeCoder` as public API in ``xr.coders``, make ``decode_times`` keyword argument consume :py:class:`coders.CFDatetimeCoder` (:pull:`9901`). By `Kai MΓΌhlbauer `_. Deprecations ~~~~~~~~~~~~ - Time decoding related kwarg ``use_cftime`` is deprecated. Use keyword argument ``decode_times=CFDatetimeCoder(use_cftime=True)`` in :py:func:`~xarray.open_dataset`, :py:func:`~xarray.open_dataarray`, :py:func:`~xarray.open_datatree`, :py:func:`~xarray.open_groups`, :py:func:`~xarray.open_zarr` and :py:func:`~xarray.decode_cf` instead (:pull:`9901`). By `Kai MΓΌhlbauer `_. .. _whats-new.2025.01.0: v.2025.01.0 (Jan 3, 2025) ------------------------- This release brings much improved read performance with Zarr arrays (without consolidated metadata), better support for additional array types, as well as bugfixes and performance improvements. Thanks to the 20 contributors to this release: Bruce Merry, Davis Bennett, Deepak Cherian, Dimitri Papadopoulos Orfanos, Florian Jetter, Illviljan, Janukan Sivajeyan, Justus Magin, Kai Germaschewski, Kai MΓΌhlbauer, Max Jones, Maximilian Roos, Michael Niklas, Patrick Peglar, Sam Levang, Scott Huberty, Spencer Clark, Stephan Hoyer, Tom Nicholas and Vecko New Features ~~~~~~~~~~~~ - Improve the error message raised when using chunked-array methods if no chunk manager is available or if the requested chunk manager is missing (:pull:`9676`) By `Justus Magin `_. (:pull:`9676`) - Better support wrapping additional array types (e.g. ``cupy`` or ``jax``) by calling generalized duck array operations throughout more xarray methods. (:issue:`7848`, :pull:`9798`). By `Sam Levang `_. - Better performance for reading Zarr arrays in the ``ZarrStore`` class by caching the state of Zarr storage and avoiding redundant IO operations. By default, ``ZarrStore`` stores a snapshot of names and metadata of the in-scope Zarr arrays; this cache is then used when iterating over those Zarr arrays, which avoids IO operations and thereby reduces latency. (:issue:`9853`, :pull:`9861`). By `Davis Bennett `_. - Add ``unit`` - keyword argument to :py:func:`date_range` and ``microsecond`` parsing to iso8601-parser (:pull:`9885`). By `Kai MΓΌhlbauer `_. Breaking changes ~~~~~~~~~~~~~~~~ - Methods including ``dropna``, ``rank``, ``idxmax``, ``idxmin`` require non-dimension arguments to be passed as keyword arguments. The previous behavior, which allowed ``.idxmax('foo', 'all')`` was too easily confused with ``'all'`` being a dimension. The updated equivalent is ``.idxmax('foo', how='all')``. The previous behavior was deprecated in v2023.10.0. By `Maximilian Roos `_. Deprecations ~~~~~~~~~~~~ - Finalize deprecation of ``closed`` parameters of :py:func:`cftime_range` and :py:func:`date_range` (:pull:`9882`). By `Kai MΓΌhlbauer `_. Performance ~~~~~~~~~~~ - Better preservation of chunksizes in :py:meth:`Dataset.idxmin` and :py:meth:`Dataset.idxmax` (:issue:`9425`, :pull:`9800`). By `Deepak Cherian `_. - Much better implementation of vectorized interpolation for dask arrays (:pull:`9881`). By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - Fix type annotations for ``get_axis_num``. (:issue:`9822`, :pull:`9827`). By `Bruce Merry `_. - Fix unintended load on datasets when calling :py:meth:`DataArray.plot.scatter` (:pull:`9818`). By `Jimmy Westling `_. - Fix interpolation when non-numeric coordinate variables are present (:issue:`8099`, :issue:`9839`). By `Deepak Cherian `_. Internal Changes ~~~~~~~~~~~~~~~~ - Move non-CF related ``ensure_dtype_not_object`` from conventions to backends (:pull:`9828`). By `Kai MΓΌhlbauer `_. - Move handling of scalar datetimes into ``_possibly_convert_objects`` within ``as_compatible_data``. This is consistent with how lists of these objects will be converted (:pull:`9900`). By `Kai MΓΌhlbauer `_. - Move ISO-8601 parser from coding.cftimeindex to coding.times to make it available there (prevents circular import), add capability to parse negative and/or five-digit years (:pull:`9899`). By `Kai MΓΌhlbauer `_. - Refactor of time coding to prepare for relaxing nanosecond restriction (:pull:`9906`). By `Kai MΓΌhlbauer `_. .. _whats-new.2024.11.0: v.2024.11.0 (Nov 22, 2024) -------------------------- This release brings better support for wrapping JAX arrays and Astropy Quantity objects, :py:meth:`DataTree.persist`, algorithmic improvements to many methods with dask (:py:meth:`Dataset.polyfit`, :py:meth:`Dataset.ffill`, :py:meth:`Dataset.bfill`, rolling reductions), and bug fixes. Thanks to the 22 contributors to this release: Benoit Bovy, Deepak Cherian, Dimitri Papadopoulos Orfanos, Holly Mandel, James Bourbeau, Joe Hamman, Justus Magin, Kai MΓΌhlbauer, Lukas Trippe, Mathias Hauser, Maximilian Roos, Michael Niklas, Pascal Bourgault, Patrick Hoefler, Sam Levang, Sarah Charlotte Johnson, Scott Huberty, Stephan Hoyer, Tom Nicholas, Virgile Andreani, joseph nowak and tvo New Features ~~~~~~~~~~~~ - Added :py:meth:`DataTree.persist` method (:issue:`9675`, :pull:`9682`). By `Sam Levang `_. - Added ``write_inherited_coords`` option to :py:meth:`DataTree.to_netcdf` and :py:meth:`DataTree.to_zarr` (:pull:`9677`). By `Stephan Hoyer `_. - Support lazy grouping by dask arrays, and allow specifying ordered groups with ``UniqueGrouper(labels=["a", "b", "c"])`` (:issue:`2852`, :issue:`757`). By `Deepak Cherian `_. - Add new ``automatic_rechunk`` kwarg to :py:meth:`DataArrayRolling.construct` and :py:meth:`DatasetRolling.construct`. This is only useful on ``dask>=2024.11.0`` (:issue:`9550`). By `Deepak Cherian `_. - Optimize ffill, bfill with dask when limit is specified (:pull:`9771`). By `Joseph Nowak `_, and `Patrick Hoefler `_. - Allow wrapping ``np.ndarray`` subclasses, e.g. ``astropy.units.Quantity`` (:issue:`9704`, :pull:`9760`). By `Sam Levang `_ and `Tien Vo `_. - Optimize :py:meth:`DataArray.polyfit` and :py:meth:`Dataset.polyfit` with dask, when used with arrays with more than two dimensions. (:issue:`5629`). By `Deepak Cherian `_. - Support for directly opening remote files as string paths (for example, ``s3://bucket/data.nc``) with ``fsspec`` when using the ``h5netcdf`` engine (:issue:`9723`, :pull:`9797`). By `James Bourbeau `_. - Re-implement the :py:mod:`ufuncs` module, which now dynamically dispatches to the underlying array's backend. Provides better support for certain wrapped array types like ``jax.numpy.ndarray``. (:issue:`7848`, :pull:`9776`). By `Sam Levang `_. - Speed up loading of large zarr stores using dask arrays. (:issue:`8902`) By `Deepak Cherian `_. Breaking Changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed ===================== ========= ======= Package Old New ===================== ========= ======= boto3 1.28 1.29 dask-core 2023.9 2023.11 distributed 2023.9 2023.11 h5netcdf 1.2 1.3 numbagg 0.2.1 0.6 typing_extensions 4.7 4.8 ===================== ========= ======= Deprecations ~~~~~~~~~~~~ - Grouping by a chunked array (e.g. dask or cubed) currently eagerly loads that variable in to memory. This behaviour is deprecated. If eager loading was intended, please load such arrays manually using ``.load()`` or ``.compute()``. Else pass ``eagerly_compute_group=False``, and provide expected group labels using the ``labels`` kwarg to a grouper object such as :py:class:`grouper.UniqueGrouper` or :py:class:`grouper.BinGrouper`. Bug fixes ~~~~~~~~~ - Fix inadvertent deep-copying of child data in DataTree (:issue:`9683`, :pull:`9684`). By `Stephan Hoyer `_. - Avoid including parent groups when writing DataTree subgroups to Zarr or netCDF (:pull:`9682`). By `Stephan Hoyer `_. - Fix regression in the interoperability of :py:meth:`DataArray.polyfit` and :py:meth:`xr.polyval` for date-time coordinates. (:pull:`9691`). By `Pascal Bourgault `_. - Fix CF decoding of ``grid_mapping`` to allow all possible formats, add tests (:issue:`9761`, :pull:`9765`). By `Kai MΓΌhlbauer `_. - Add ``User-Agent`` to request-headers when retrieving tutorial data (:issue:`9774`, :pull:`9782`) By `Kai MΓΌhlbauer `_. Documentation ~~~~~~~~~~~~~ - Mention attribute peculiarities in docs/docstrings (:issue:`4798`, :pull:`9700`). By `Kai MΓΌhlbauer `_. Internal Changes ~~~~~~~~~~~~~~~~ - ``persist`` methods now route through the :py:class:`xr.namedarray.parallelcompat.ChunkManagerEntrypoint` (:pull:`9682`). By `Sam Levang `_. .. _whats-new.2024.10.0: v2024.10.0 (Oct 24th, 2024) --------------------------- This release brings official support for ``xarray.DataTree``, and compatibility with zarr-python v3! Aside from these two huge features, it also improves support for vectorised interpolation and fixes various bugs. Thanks to the 31 contributors to this release: Alfonso Ladino, DWesl, Deepak Cherian, Eni, Etienne Schalk, Holly Mandel, Ilan Gold, Illviljan, Joe Hamman, Justus Magin, Kai MΓΌhlbauer, Karl Krauth, Mark Harfouche, Martey Dodoo, Matt Savoie, Maximilian Roos, Patrick Hoefler, Peter Hill, Renat Sibgatulin, Ryan Abernathey, Spencer Clark, Stephan Hoyer, Tom Augspurger, Tom Nicholas, Vecko, Virgile Andreani, Yvonne FrΓΆhlich, carschandler, joseph nowak, mgunyho and owenlittlejohns New Features ~~~~~~~~~~~~ - ``DataTree`` related functionality is now exposed in the main ``xarray`` public API. This includes: ``xarray.DataTree``, ``xarray.open_datatree``, ``xarray.open_groups``, ``xarray.map_over_datasets``, ``xarray.group_subtrees``, ``xarray.register_datatree_accessor`` and ``xarray.testing.assert_isomorphic``. By `Owen Littlejohns `_, `Eni Awowale `_, `Matt Savoie `_, `Stephan Hoyer `_, `Tom Nicholas `_, `Justus Magin `_, and `Alfonso Ladino `_. - A migration guide for users of the prototype `xarray-contrib/datatree repository `_ has been added, and can be found in the ``DATATREE_MIGRATION_GUIDE.md`` file in the repository root. By `Tom Nicholas `_. - Support for Zarr-Python 3 (:issue:`95515`, :pull:`9552`). By `Tom Augspurger `_, `Ryan Abernathey `_ and `Joe Hamman `_. - Added zarr backends for :py:func:`open_groups` (:issue:`9430`, :pull:`9469`). By `Eni Awowale `_. - Added support for vectorized interpolation using additional interpolators from the ``scipy.interpolate`` module (:issue:`9049`, :pull:`9526`). By `Holly Mandel `_. - Implement handling of complex numbers (netcdf4/h5netcdf) and enums (h5netcdf) (:issue:`9246`, :issue:`3297`, :pull:`9509`). By `Kai MΓΌhlbauer `_. - Fix passing missing arguments to when opening hdf5 and netCDF4 datatrees (:issue:`9427`, :pull:`9428`). By `Alfonso Ladino `_. Bug fixes ~~~~~~~~~ - Make illegal path-like variable names when constructing a DataTree from a Dataset (:issue:`9339`, :pull:`9378`) By `Etienne Schalk `_. - Work around `upstream pandas issue `_ to ensure that we can decode times encoded with small integer dtype values (e.g. ``np.int32``) in environments with NumPy 2.0 or greater without needing to fall back to cftime (:pull:`9518`). By `Spencer Clark `_. - Fix bug when encoding times with missing values as floats in the case when the non-missing times could in theory be encoded with integers (:issue:`9488`, :pull:`9497`). By `Spencer Clark `_. - Fix a few bugs affecting groupby reductions with ``flox``. (:issue:`8090`, :issue:`9398`, :issue:`9648`). - Fix a few bugs affecting groupby reductions with ``flox``. (:issue:`8090`, :issue:`9398`). By `Deepak Cherian `_. - Fix the safe_chunks validation option on the to_zarr method (:issue:`5511`, :pull:`9559`). By `Joseph Nowak `_. - Fix binning by multiple variables where some bins have no observations. (:issue:`9630`). By `Deepak Cherian `_. - Fix issue where polyfit wouldn't handle non-dimension coordinates. (:issue:`4375`, :pull:`9369`) By `Karl Krauth `_. Documentation ~~~~~~~~~~~~~ - Migrate documentation for ``datatree`` into main ``xarray`` documentation (:pull:`9033`). For information on previous ``datatree`` releases, please see: `datatree's historical release notes `_. By `Owen Littlejohns `_, `Matt Savoie `_, and `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ .. _whats-new.2024.09.0: v2024.09.0 (Sept 11, 2024) -------------------------- This release drops support for Python 3.9, and adds support for grouping by :ref:`multiple arrays `, while providing numerous performance improvements and bug fixes. Thanks to the 33 contributors to this release: Alfonso Ladino, Andrew Scherer, Anurag Nayak, David Hoese, Deepak Cherian, Diogo Teles Sant'Anna, Dom, Elliott Sales de Andrade, Eni, Holly Mandel, Illviljan, Jack Kelly, Julius Busecke, Justus Magin, Kai MΓΌhlbauer, Manish Kumar Gupta, Matt Savoie, Maximilian Roos, Michele Claus, Miguel Jimenez, Niclas Rieger, Pascal Bourgault, Philip Chmielowiec, Spencer Clark, Stephan Hoyer, Tao Xin, Tiago Sanona, TimothyCera-NOAA, Tom Nicholas, Tom White, Virgile Andreani, oliverhiggs and tiago New Features ~~~~~~~~~~~~ - Add :py:attr:`~core.accessor_dt.DatetimeAccessor.days_in_year` and :py:attr:`~core.accessor_dt.DatetimeAccessor.decimal_year` to the ``DatetimeAccessor`` on ``xr.DataArray``. (:pull:`9105`). By `Pascal Bourgault `_. Performance ~~~~~~~~~~~ - Make chunk manager an option in ``set_options`` (:pull:`9362`). By `Tom White `_. - Support for :ref:`grouping by multiple variables `. This is quite new, so please check your results and report bugs. Binary operations after grouping by multiple arrays are not supported yet. (:issue:`1056`, :issue:`9332`, :issue:`324`, :pull:`9372`). By `Deepak Cherian `_. - Allow data variable specific ``constant_values`` in the dataset ``pad`` function (:pull:`9353`). By `Tiago Sanona `_. - Speed up grouping by avoiding deep-copy of non-dimension coordinates (:issue:`9426`, :pull:`9393`) By `Deepak Cherian `_. Breaking changes ~~~~~~~~~~~~~~~~ - Support for ``python 3.9`` has been dropped (:pull:`8937`) - The minimum versions of some dependencies were changed ===================== ========= ======= Package Old New ===================== ========= ======= boto3 1.26 1.28 cartopy 0.21 0.22 dask-core 2023.4 2023.9 distributed 2023.4 2023.9 h5netcdf 1.1 1.2 iris 3.4 3.7 numba 0.56 0.57 numpy 1.23 1.24 pandas 2.0 2.1 scipy 1.10 1.11 typing_extensions 4.5 4.7 zarr 2.14 2.16 ===================== ========= ======= Bug fixes ~~~~~~~~~ - Fix bug with rechunking to a frequency when some periods contain no data (:issue:`9360`). By `Deepak Cherian `_. - Fix bug causing ``DataTree.from_dict`` to be sensitive to insertion order (:issue:`9276`, :pull:`9292`). By `Tom Nicholas `_. - Fix resampling error with monthly, quarterly, or yearly frequencies with cftime when the time bins straddle the date "0001-01-01". For example, this can happen in certain circumstances when the time coordinate contains the date "0001-01-01". (:issue:`9108`, :pull:`9116`) By `Spencer Clark `_ and `Deepak Cherian `_. - Fix issue with passing parameters to ZarrStore.open_store when opening datatree in zarr format (:issue:`9376`, :pull:`9377`). By `Alfonso Ladino `_ - Fix deprecation warning that was raised when calling ``np.array`` on an ``xr.DataArray`` in NumPy 2.0 (:issue:`9312`, :pull:`9393`) By `Andrew Scherer `_. - Fix passing missing arguments to when opening hdf5 and netCDF4 datatrees (:issue:`9427`, :pull:`9428`). By `Alfonso Ladino `_. - Fix support for using ``pandas.DateOffset``, ``pandas.Timedelta``, and ``datetime.timedelta`` objects as ``resample`` frequencies (:issue:`9408`, :pull:`9413`). By `Oliver Higgs `_. Internal Changes ~~~~~~~~~~~~~~~~ - Re-enable testing ``pydap`` backend with ``numpy>=2`` (:pull:`9391`). By `Miguel Jimenez `_ . .. _whats-new.2024.07.0: v2024.07.0 (Jul 30, 2024) ------------------------- This release extends the API for groupby operations with various `grouper objects `_, and includes improvements to the documentation and numerous bugfixes. Thanks to the 22 contributors to this release: Alfonso Ladino, ChrisCleaner, David Hoese, Deepak Cherian, Dieter WerthmΓΌller, Illviljan, Jessica Scheick, Joel Jaeschke, Justus Magin, K. Arthur Endsley, Kai MΓΌhlbauer, Mark Harfouche, Martin Raspaud, Mathijs Verhaegh, Maximilian Roos, Michael Niklas, MichaΕ‚ GΓ³rny, Moritz Schreiber, Pontus Lurcock, Spencer Clark, Stephan Hoyer and Tom Nicholas New Features ~~~~~~~~~~~~ - Use fastpath when grouping both montonically increasing and decreasing variable in :py:class:`GroupBy` (:issue:`6220`, :pull:`7427`). By `Joel Jaeschke `_. - Introduce new :py:class:`groupers.UniqueGrouper`, :py:class:`groupers.BinGrouper`, and :py:class:`groupers.TimeResampler` objects as a step towards supporting grouping by multiple variables. See the `docs `_ and the `grouper design doc `_ for more. (:issue:`6610`, :pull:`8840`). By `Deepak Cherian `_. - Allow rechunking to a frequency using ``Dataset.chunk(time=TimeResampler("YE"))`` syntax. (:issue:`7559`, :pull:`9109`) Such rechunking allows many time domain analyses to be executed in an embarrassingly parallel fashion. By `Deepak Cherian `_. - Allow per-variable specification of ```mask_and_scale``, ``decode_times``, ``decode_timedelta`` ``use_cftime`` and ``concat_characters`` params in :py:func:`~xarray.open_dataset` (:pull:`9218`). By `Mathijs Verhaegh `_. - Allow chunking for arrays with duplicated dimension names (:issue:`8759`, :pull:`9099`). By `Martin Raspaud `_. - Extract the source url from fsspec objects (:issue:`9142`, :pull:`8923`). By `Justus Magin `_. - Add :py:meth:`DataArray.drop_attrs` & :py:meth:`Dataset.drop_attrs` methods, to return an object without ``attrs``. A ``deep`` parameter controls whether variables' ``attrs`` are also dropped. By `Maximilian Roos `_. (:pull:`8288`) - Added :py:func:`open_groups` for h5netcdf and netCDF4 backends (:issue:`9137`, :pull:`9243`). By `Eni Awowale `_. Breaking changes ~~~~~~~~~~~~~~~~ - The ``base`` and ``loffset`` parameters to :py:meth:`Dataset.resample` and :py:meth:`DataArray.resample` are now removed. These parameters have been deprecated since v2023.03.0. Using the ``origin`` or ``offset`` parameters is recommended as a replacement for using the ``base`` parameter and using time offset arithmetic is recommended as a replacement for using the ``loffset`` parameter. (:pull:`9233`) By `Deepak Cherian `_. - The ``squeeze`` kwarg to ``groupby`` is now ignored. This has been the source of some quite confusing behaviour and has been deprecated since v2024.01.0. ``groupby`` behavior is now always consistent with the existing ``.groupby(..., squeeze=False)`` behavior. No errors will be raised if ``squeeze=False``. (:pull:`9280`) By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - Fix scatter plot broadcasting unnecessarily. (:issue:`9129`, :pull:`9206`) By `Jimmy Westling `_. - Don't convert custom indexes to ``pandas`` indexes when computing a diff (:pull:`9157`) By `Justus Magin `_. - Make :py:func:`testing.assert_allclose` work with numpy 2.0 (:issue:`9165`, :pull:`9166`). By `Pontus Lurcock `_. - Allow diffing objects with array attributes on variables (:issue:`9153`, :pull:`9169`). By `Justus Magin `_. - ``numpy>=2`` compatibility in the ``netcdf4`` backend (:pull:`9136`). By `Justus Magin `_ and `Kai MΓΌhlbauer `_. - Promote floating-point numeric datetimes before decoding (:issue:`9179`, :pull:`9182`). By `Justus Magin `_. - Address regression introduced in :pull:`9002` that prevented objects returned by :py:meth:`DataArray.convert_calendar` to be indexed by a time index in certain circumstances (:issue:`9138`, :pull:`9192`). By `Mark Harfouche `_ and `Spencer Clark `_. - Fix static typing of tolerance arguments by allowing ``str`` type (:issue:`8892`, :pull:`9194`). By `Michael Niklas `_. - Dark themes are now properly detected for ``html[data-theme=dark]``-tags (:pull:`9200`). By `Dieter WerthmΓΌller `_. - Reductions no longer fail for ``np.complex_`` dtype arrays when numbagg is installed. (:pull:`9210`) By `Maximilian Roos `_. Documentation ~~~~~~~~~~~~~ - Adds intro to backend section of docs, including a flow-chart to navigate types of backends (:pull:`9175`). By `Jessica Scheick `_. - Adds a flow-chart diagram to help users navigate help resources (:discussion:`8990`, :pull:`9147`). By `Jessica Scheick `_. - Improvements to Zarr & chunking docs (:pull:`9139`, :pull:`9140`, :pull:`9132`) By `Maximilian Roos `_. - Fix copybutton for multi line examples and double digit ipython cell numbers (:pull:`9264`). By `Moritz Schreiber `_. Internal Changes ~~~~~~~~~~~~~~~~ - Enable typing checks of pandas (:pull:`9213`). By `Michael Niklas `_. .. _whats-new.2024.06.0: v2024.06.0 (Jun 13, 2024) ------------------------- This release brings various performance optimizations and compatibility with the upcoming numpy 2.0 release. Thanks to the 22 contributors to this release: Alfonso Ladino, David Hoese, Deepak Cherian, Eni Awowale, Ilan Gold, Jessica Scheick, Joe Hamman, Justus Magin, Kai MΓΌhlbauer, Mark Harfouche, Mathias Hauser, Matt Savoie, Maximilian Roos, Mike Thramann, Nicolas Karasiak, Owen Littlejohns, Paul Ockenfuß, Philippe THOMY, Scott Henderson, Spencer Clark, Stephan Hoyer and Tom Nicholas Performance ~~~~~~~~~~~ - Small optimization to the netCDF4 and h5netcdf backends (:issue:`9058`, :pull:`9067`). By `Deepak Cherian `_. - Small optimizations to help reduce indexing speed of datasets (:pull:`9002`). By `Mark Harfouche `_. - Performance improvement in ``open_datatree`` method for Zarr, netCDF4 and h5netcdf backends (:issue:`8994`, :pull:`9014`). By `Alfonso Ladino `_. Bug fixes ~~~~~~~~~ - Preserve conversion of timezone-aware pandas Datetime arrays to numpy object arrays (:issue:`9026`, :pull:`9042`). By `Ilan Gold `_. - :py:meth:`DataArrayResample.interpolate` and :py:meth:`DatasetResample.interpolate` method now support arbitrary kwargs such as ``order`` for polynomial interpolation (:issue:`8762`). By `Nicolas Karasiak `_. Documentation ~~~~~~~~~~~~~ - Add link to CF Conventions on packed data and sentence on type determination in the I/O user guide (:issue:`9041`, :pull:`9045`). By `Kai MΓΌhlbauer `_. Internal Changes ~~~~~~~~~~~~~~~~ - Migrates remainder of ``io.py`` to ``xarray/core/datatree_io.py`` and ``TreeAttrAccessMixin`` into ``xarray/core/common.py`` (:pull:`9011`). By `Owen Littlejohns `_ and `Tom Nicholas `_. - Compatibility with numpy 2 (:issue:`8844`, :pull:`8854`, :pull:`8946`). By `Justus Magin `_ and `Stephan Hoyer `_. .. _whats-new.2024.05.0: v2024.05.0 (May 12, 2024) ------------------------- This release brings support for pandas ExtensionArray objects, optimizations when reading Zarr, the ability to concatenate datasets without pandas indexes, more compatibility fixes for the upcoming numpy 2.0, and the migration of most of the xarray-datatree project code into xarray ``main``! Thanks to the 18 contributors to this release: Aimilios Tsouvelekakis, Andrey Akinshin, Deepak Cherian, Eni Awowale, Ilan Gold, Illviljan, Justus Magin, Mark Harfouche, Matt Savoie, Maximilian Roos, Noah C. Benson, Pascal Bourgault, Ray Bell, Spencer Clark, Tom Nicholas, ignamv, owenlittlejohns, and saschahofmann. New Features ~~~~~~~~~~~~ - New "random" method for converting to and from 360_day calendars (:pull:`8603`). By `Pascal Bourgault `_. - Xarray now makes a best attempt not to coerce :py:class:`pandas.api.extensions.ExtensionArray` to a numpy array by supporting 1D ``ExtensionArray`` objects internally where possible. Thus, :py:class:`Dataset` objects initialized with a ``pd.Categorical``, for example, will retain the object. However, one cannot do operations that are not possible on the ``ExtensionArray`` then, such as broadcasting. (:issue:`5287`, :issue:`8463`, :pull:`8723`) By `Ilan Gold `_. - :py:func:`testing.assert_allclose` / :py:func:`testing.assert_equal` now accept a new argument ``check_dims="transpose"``, controlling whether a transposed array is considered equal. (:issue:`5733`, :pull:`8991`) By `Ignacio Martinez Vazquez `_. - Added the option to avoid automatically creating 1D pandas indexes in :py:meth:`Dataset.expand_dims()`, by passing the new kwarg ``create_index_for_new_dim=False``. (:pull:`8960`) By `Tom Nicholas `_. - Avoid automatically re-creating 1D pandas indexes in :py:func:`concat()`. Also added option to avoid creating 1D indexes for new dimension coordinates by passing the new kwarg ``create_index_for_new_dim=False``. (:issue:`8871`, :pull:`8872`) By `Tom Nicholas `_. Breaking changes ~~~~~~~~~~~~~~~~ - The PyNIO backend has been deleted (:issue:`4491`, :pull:`7301`). By `Deepak Cherian `_. - The minimum versions of some dependencies were changed, in particular our minimum supported pandas version is now Pandas 2. ===================== ========= ======= Package Old New ===================== ========= ======= dask-core 2022.12 2023.4 distributed 2022.12 2023.4 h5py 3.7 3.8 matplotlib-base 3.6 3.7 packaging 22.0 23.1 pandas 1.5 2.0 pydap 3.3 3.4 sparse 0.13 0.14 typing_extensions 4.4 4.5 zarr 2.13 2.14 ===================== ========= ======= Bug fixes ~~~~~~~~~ - Following `an upstream bug fix `_ to :py:func:`pandas.date_range`, date ranges produced by :py:func:`xarray.cftime_range` with negative frequencies will now fall fully within the bounds of the provided start and end dates (:pull:`8999`). By `Spencer Clark `_. Internal Changes ~~~~~~~~~~~~~~~~ - Enforces failures on CI when tests raise warnings from within xarray (:pull:`8974`) By `Maximilian Roos `_ - Migrates ``formatting_html`` functionality for ``DataTree`` into ``xarray/core`` (:pull:`8930`) By `Eni Awowale `_, `Julia Signell `_ and `Tom Nicholas `_. - Migrates ``datatree_mapping`` functionality into ``xarray/core`` (:pull:`8948`) By `Matt Savoie `_ `Owen Littlejohns `_ and `Tom Nicholas `_. - Migrates ``extensions``, ``formatting`` and ``datatree_render`` functionality for ``DataTree`` into ``xarray/core``. Also migrates ``testing`` functionality into ``xarray/testing/assertions`` for ``DataTree``. (:pull:`8967`) By `Owen Littlejohns `_ and `Tom Nicholas `_. - Migrates ``ops.py`` functionality into ``xarray/core/datatree_ops.py`` (:pull:`8976`) By `Matt Savoie `_ and `Tom Nicholas `_. - Migrates ``iterator`` functionality into ``xarray/core`` (:pull:`8879`) By `Owen Littlejohns `_, `Matt Savoie `_ and `Tom Nicholas `_. - ``transpose``, ``set_dims``, ``stack`` & ``unstack`` now use a ``dim`` kwarg rather than ``dims`` or ``dimensions``. This is the final change to make xarray methods consistent with their use of ``dim``. Using the existing kwarg will raise a warning. By `Maximilian Roos `_ .. _whats-new.2024.03.0: v2024.03.0 (Mar 29, 2024) ------------------------- This release brings performance improvements for grouped and resampled quantile calculations, CF decoding improvements, minor optimizations to distributed Zarr writes, and compatibility fixes for Numpy 2.0 and Pandas 3.0. Thanks to the 18 contributors to this release: Anderson Banihirwe, Christoph Hasse, Deepak Cherian, Etienne Schalk, Justus Magin, Kai MΓΌhlbauer, Kevin Schwarzwald, Mark Harfouche, Martin, Matt Savoie, Maximilian Roos, Ray Bell, Roberto Chang, Spencer Clark, Tom Nicholas, crusaderky, owenlittlejohns, saschahofmann New Features ~~~~~~~~~~~~ - Partial writes to existing chunks with ``region`` or ``append_dim`` will now raise an error (unless ``safe_chunks=False``); previously an error would only be raised on new variables. (:pull:`8459`, :issue:`8371`, :issue:`8882`) By `Maximilian Roos `_. - Grouped and resampling quantile calculations now use the vectorized algorithm in ``flox>=0.9.4`` if present. By `Deepak Cherian `_. - Do not broadcast in arithmetic operations when global option ``arithmetic_broadcast=False`` (:issue:`6806`, :pull:`8784`). By `Etienne Schalk `_ and `Deepak Cherian `_. - Add the ``.oindex`` property to Explicitly Indexed Arrays for orthogonal indexing functionality. (:issue:`8238`, :pull:`8750`) By `Anderson Banihirwe `_. - Add the ``.vindex`` property to Explicitly Indexed Arrays for vectorized indexing functionality. (:issue:`8238`, :pull:`8780`) By `Anderson Banihirwe `_. - Expand use of ``.oindex`` and ``.vindex`` properties. (:pull:`8790`) By `Anderson Banihirwe `_ and `Deepak Cherian `_. - Allow creating :py:class:`xr.Coordinates` objects with no indexes (:pull:`8711`) By `Benoit Bovy `_ and `Tom Nicholas `_. - Enable plotting of ``datetime.dates``. (:issue:`8866`, :pull:`8873`) By `Sascha Hofmann `_. Breaking changes ~~~~~~~~~~~~~~~~ - Don't allow overwriting index variables with ``to_zarr`` region writes. (:issue:`8589`, :pull:`8876`). By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - The default ``freq`` parameter in :py:meth:`xr.date_range` and :py:meth:`xr.cftime_range` is set to ``'D'`` only if ``periods``, ``start``, or ``end`` are ``None`` (:issue:`8770`, :pull:`8774`). By `Roberto Chang `_. - Ensure that non-nanosecond precision :py:class:`numpy.datetime64` and :py:class:`numpy.timedelta64` values are cast to nanosecond precision values when used in :py:meth:`DataArray.expand_dims` and ::py:meth:`Dataset.expand_dims` (:pull:`8781`). By `Spencer Clark `_. - CF conform handling of ``_FillValue``/``missing_value`` and ``dtype`` in ``CFMaskCoder``/``CFScaleOffsetCoder`` (:issue:`2304`, :issue:`5597`, :issue:`7691`, :pull:`8713`, see also discussion in :pull:`7654`). By `Kai MΓΌhlbauer `_. - Do not cast ``_FillValue``/``missing_value`` in ``CFMaskCoder`` if ``_Unsigned`` is provided (:issue:`8844`, :pull:`8852`). - Adapt handling of copy keyword argument for numpy >= 2.0dev (:issue:`8844`, :pull:`8851`, :pull:`8865`). By `Kai MΓΌhlbauer `_. - Import trapz/trapezoid depending on numpy version (:issue:`8844`, :pull:`8865`). By `Kai MΓΌhlbauer `_. - Warn and return bytes undecoded in case of UnicodeDecodeError in h5netcdf-backend (:issue:`5563`, :pull:`8874`). By `Kai MΓΌhlbauer `_. - Fix bug incorrectly disallowing creation of a dataset with a multidimensional coordinate variable with the same name as one of its dims. (:issue:`8884`, :pull:`8886`) By `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ - Migrates ``treenode`` functionality into ``xarray/core`` (:pull:`8757`) By `Matt Savoie `_ and `Tom Nicholas `_. - Migrates ``datatree`` functionality into ``xarray/core``. (:pull:`8789`) By `Owen Littlejohns `_, `Matt Savoie `_ and `Tom Nicholas `_. .. _whats-new.2024.02.0: v2024.02.0 (Feb 19, 2024) ------------------------- This release brings size information to the text ``repr``, changes to the accepted frequency strings, and various bug fixes. Thanks to our 12 contributors: Anderson Banihirwe, Deepak Cherian, Eivind Jahren, Etienne Schalk, Justus Magin, Marco Wolsza, Mathias Hauser, Matt Savoie, Maximilian Roos, Rambaud Pierrick, Tom Nicholas New Features ~~~~~~~~~~~~ - Added a simple ``nbytes`` representation in DataArrays and Dataset ``repr``. (:issue:`8690`, :pull:`8702`). By `Etienne Schalk `_. - Allow negative frequency strings (e.g. ``"-1YE"``). These strings are for example used in :py:func:`date_range`, and :py:func:`cftime_range` (:pull:`8651`). By `Mathias Hauser `_. - Add :py:meth:`NamedArray.expand_dims`, :py:meth:`NamedArray.permute_dims` and :py:meth:`NamedArray.broadcast_to` (:pull:`8380`) By `Anderson Banihirwe `_. - Xarray now defers to `flox's heuristics `_ to set the default ``method`` for groupby problems. This only applies to ``flox>=0.9``. By `Deepak Cherian `_. - All ``quantile`` methods (e.g. :py:meth:`DataArray.quantile`) now use ``numbagg`` for the calculation of nanquantiles (i.e., ``skipna=True``) if it is installed. This is currently limited to the linear interpolation method (`method='linear'`). (:issue:`7377`, :pull:`8684`) By `Marco Wolsza `_. Breaking changes ~~~~~~~~~~~~~~~~ - :py:func:`infer_freq` always returns the frequency strings as defined in pandas 2.2 (:issue:`8612`, :pull:`8627`). By `Mathias Hauser `_. Deprecations ~~~~~~~~~~~~ - The ``dt.weekday_name`` parameter wasn't functional on modern pandas versions and has been removed. (:issue:`8610`, :pull:`8664`) By `Sam Coleman `_. Bug fixes ~~~~~~~~~ - Fixed a regression that prevented multi-index level coordinates being serialized after resetting or dropping the multi-index (:issue:`8628`, :pull:`8672`). By `Benoit Bovy `_. - Fix bug with broadcasting when wrapping array API-compliant classes. (:issue:`8665`, :pull:`8669`) By `Tom Nicholas `_. - Ensure :py:meth:`DataArray.unstack` works when wrapping array API-compliant classes. (:issue:`8666`, :pull:`8668`) By `Tom Nicholas `_. - Fix negative slicing of Zarr arrays without dask installed. (:issue:`8252`) By `Deepak Cherian `_. - Preserve chunks when writing time-like variables to zarr by enabling lazy CF encoding of time-like variables (:issue:`7132`, :issue:`8230`, :issue:`8432`, :pull:`8575`). By `Spencer Clark `_ and `Mattia Almansi `_. - Preserve chunks when writing time-like variables to zarr by enabling their lazy encoding (:issue:`7132`, :issue:`8230`, :issue:`8432`, :pull:`8253`, :pull:`8575`; see also discussion in :pull:`8253`). By `Spencer Clark `_ and `Mattia Almansi `_. - Raise an informative error if dtype encoding of time-like variables would lead to integer overflow or unsafe conversion from floating point to integer values (:issue:`8542`, :pull:`8575`). By `Spencer Clark `_. - Raise an error when unstacking a MultiIndex that has duplicates as this would lead to silent data loss (:issue:`7104`, :pull:`8737`). By `Mathias Hauser `_. Documentation ~~~~~~~~~~~~~ - Fix ``variables`` arg typo in ``Dataset.sortby()`` docstring (:issue:`8663`, :pull:`8670`) By `Tom Vo `_. - Fixed documentation where the use of the depreciated pandas frequency string prevented the documentation from being built. (:pull:`8638`) By `Sam Coleman `_. Internal Changes ~~~~~~~~~~~~~~~~ - ``DataArray.dt`` now raises an ``AttributeError`` rather than a ``TypeError`` when the data isn't datetime-like. (:issue:`8718`, :pull:`8724`) By `Maximilian Roos `_. - Move ``parallelcompat`` and ``chunk managers`` modules from ``xarray/core`` to ``xarray/namedarray``. (:pull:`8319`) By `Tom Nicholas `_ and `Anderson Banihirwe `_. - Imports ``datatree`` repository and history into internal location. (:pull:`8688`) By `Matt Savoie `_, `Justus Magin `_ and `Tom Nicholas `_. - Adds :py:func:`open_datatree` into ``xarray/backends`` (:pull:`8697`) By `Matt Savoie `_ and `Tom Nicholas `_. - Refactor :py:meth:`xarray.core.indexing.DaskIndexingAdapter.__getitem__` to remove an unnecessary rewrite of the indexer key (:issue:`8377`, :pull:`8758`) By `Anderson Banihirwe `_. .. _whats-new.2024.01.1: v2024.01.1 (23 Jan, 2024) ------------------------- This release is to fix a bug with the rendering of the documentation, but it also includes changes to the handling of pandas frequency strings. Breaking changes ~~~~~~~~~~~~~~~~ - Following pandas, :py:meth:`infer_freq` will return ``"YE"``, instead of ``"Y"`` (formerly ``"A"``). This is to be consistent with the deprecation of the latter frequency string in pandas 2.2. This is a follow up to :pull:`8415` (:issue:`8612`, :pull:`8642`). By `Mathias Hauser `_. Deprecations ~~~~~~~~~~~~ - Following pandas, the frequency string ``"Y"`` (formerly ``"A"``) is deprecated in favor of ``"YE"``. These strings are used, for example, in :py:func:`date_range`, :py:func:`cftime_range`, :py:meth:`DataArray.resample`, and :py:meth:`Dataset.resample` among others (:issue:`8612`, :pull:`8629`). By `Mathias Hauser `_. Documentation ~~~~~~~~~~~~~ - Pin ``sphinx-book-theme`` to ``1.0.1`` to fix a rendering issue with the sidebar in the docs. (:issue:`8619`, :pull:`8632`) By `Tom Nicholas `_. .. _whats-new.2024.01.0: v2024.01.0 (17 Jan, 2024) ------------------------- This release brings support for weights in correlation and covariance functions, a new ``DataArray.cumulative`` aggregation, improvements to ``xr.map_blocks``, an update to our minimum dependencies, and various bugfixes. Thanks to our 17 contributors to this release: Abel Aoun, Deepak Cherian, Illviljan, Johan Mathe, Justus Magin, Kai MΓΌhlbauer, LlorenΓ§ LledΓ³, Mark Harfouche, Markel, Mathias Hauser, Maximilian Roos, Michael Niklas, Niclas Rieger, SΓ©bastien Celles, Tom Nicholas, Trinh Quoc Anh, and crusaderky. New Features ~~~~~~~~~~~~ - :py:meth:`xr.cov` and :py:meth:`xr.corr` now support using weights (:issue:`8527`, :pull:`7392`). By `LlorenΓ§ LledΓ³ `_. - Accept the compression arguments new in netCDF 1.6.0 in the netCDF4 backend. See `netCDF4 documentation `_ for details. Note that some new compression filters needs plugins to be installed which may not be available in all netCDF distributions. By `Markel GarcΓ­a-DΓ­ez `_. (:issue:`6929`, :pull:`7551`) - Add :py:meth:`DataArray.cumulative` & :py:meth:`Dataset.cumulative` to compute cumulative aggregations, such as ``sum``, along a dimension β€” for example ``da.cumulative('time').sum()``. This is similar to pandas' ``.expanding``, and mostly equivalent to ``.cumsum`` methods, or to :py:meth:`DataArray.rolling` with a window length equal to the dimension size. By `Maximilian Roos `_. (:pull:`8512`) - Decode/Encode netCDF4 enums and store the enum definition in dataarrays' dtype metadata. If multiple variables share the same enum in netCDF4, each dataarray will have its own enum definition in their respective dtype metadata. By `Abel Aoun `_. (:issue:`8144`, :pull:`8147`) Breaking changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed (:pull:`8586`): ===================== ========= ======== Package Old New ===================== ========= ======== cartopy 0.20 0.21 dask-core 2022.7 2022.12 distributed 2022.7 2022.12 flox 0.5 0.7 iris 3.2 3.4 matplotlib-base 3.5 3.6 numpy 1.22 1.23 numba 0.55 0.56 packaging 21.3 22.0 seaborn 0.11 0.12 scipy 1.8 1.10 typing_extensions 4.3 4.4 zarr 2.12 2.13 ===================== ========= ======== Deprecations ~~~~~~~~~~~~ - The ``squeeze`` kwarg to GroupBy is now deprecated. (:issue:`2157`, :pull:`8507`) By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - Support non-string hashable dimensions in :py:class:`xarray.DataArray` (:issue:`8546`, :pull:`8559`). By `Michael Niklas `_. - Reverse index output of bottleneck's rolling move_argmax/move_argmin functions (:issue:`8541`, :pull:`8552`). By `Kai MΓΌhlbauer `_. - Vendor ``SerializableLock`` from dask and use as default lock for netcdf4 backends (:issue:`8442`, :pull:`8571`). By `Kai MΓΌhlbauer `_. - Add tests and fixes for empty :py:class:`CFTimeIndex`, including broken html repr (:issue:`7298`, :pull:`8600`). By `Mathias Hauser `_. Internal Changes ~~~~~~~~~~~~~~~~ - The implementation of :py:func:`map_blocks` has changed to minimize graph size and duplication of data. This should be a strict improvement even though the graphs are not always embarrassingly parallel any more. Please open an issue if you spot a regression. (:pull:`8412`, :issue:`8409`). By `Deepak Cherian `_. - Remove null values before plotting. (:pull:`8535`). By `Jimmy Westling `_. - Redirect cumulative reduction functions internally through the :py:class:`ChunkManagerEntryPoint`, potentially allowing :py:meth:`~xarray.DataArray.ffill` and :py:meth:`~xarray.DataArray.bfill` to use non-dask chunked array types. (:pull:`8019`) By `Tom Nicholas `_. .. _whats-new.2023.12.0: v2023.12.0 (2023 Dec 08) ------------------------ This release brings new `hypothesis `_ strategies for testing, significantly faster rolling aggregations as well as ``ffill`` and ``bfill`` with ``numbagg``, a new :py:meth:`Dataset.eval` method, and improvements to reading and writing Zarr arrays (including a new ``"a-"`` mode). Thanks to our 16 contributors: Anderson Banihirwe, Ben Mares, Carl Andersson, Deepak Cherian, Doug Latornell, Gregorio L. Trevisan, Illviljan, Jens Hedegaard Nielsen, Justus Magin, Mathias Hauser, Max Jones, Maximilian Roos, Michael Niklas, Patrick Hoefler, Ryan Abernathey, Tom Nicholas New Features ~~~~~~~~~~~~ - Added hypothesis strategies for generating :py:class:`xarray.Variable` objects containing arbitrary data, useful for parametrizing downstream tests. Accessible under :py:mod:`testing.strategies`, and documented in a new page on testing in the User Guide. (:issue:`6911`, :pull:`8404`) By `Tom Nicholas `_. - :py:meth:`rolling` uses `numbagg `_ for most of its computations by default. Numbagg is up to 5x faster than bottleneck where parallelization is possible. Where parallelization isn't possible β€” for example a 1D array β€” it's about the same speed as bottleneck, and 2-5x faster than pandas' default functions. (:pull:`8493`). numbagg is an optional dependency, so requires installing separately. - Use a concise format when plotting datetime arrays. (:pull:`8449`). By `Jimmy Westling `_. - Avoid overwriting unchanged existing coordinate variables when appending with :py:meth:`Dataset.to_zarr` by setting ``mode='a-'``. By `Ryan Abernathey `_ and `Deepak Cherian `_. - :py:meth:`~xarray.DataArray.rank` now operates on dask-backed arrays, assuming the core dim has exactly one chunk. (:pull:`8475`). By `Maximilian Roos `_. - Add a :py:meth:`Dataset.eval` method, similar to the pandas' method of the same name. (:pull:`7163`). This is currently marked as experimental and doesn't yet support the ``numexpr`` engine. - :py:meth:`Dataset.drop_vars` & :py:meth:`DataArray.drop_vars` allow passing a callable, similar to :py:meth:`Dataset.where` & :py:meth:`Dataset.sortby` & others. (:pull:`8511`). By `Maximilian Roos `_. Breaking changes ~~~~~~~~~~~~~~~~ - Explicitly warn when creating xarray objects with repeated dimension names. Such objects will also now raise when :py:meth:`DataArray.get_axis_num` is called, which means many functions will raise. This latter change is technically a breaking change, but whilst allowed, this behaviour was never actually supported! (:issue:`3731`, :pull:`8491`) By `Tom Nicholas `_. Deprecations ~~~~~~~~~~~~ - As part of an effort to standardize the API, we're renaming the ``dims`` keyword arg to ``dim`` for the minority of functions which current use ``dims``. This started with :py:func:`xarray.dot` & :py:meth:`DataArray.dot` and we'll gradually roll this out across all functions. The warnings are currently ``PendingDeprecationWarning``, which are silenced by default. We'll convert these to ``DeprecationWarning`` in a future release. By `Maximilian Roos `_. - Raise a ``FutureWarning`` warning that the type of :py:meth:`Dataset.dims` will be changed from a mapping of dimension names to lengths to a set of dimension names. This is to increase consistency with :py:meth:`DataArray.dims`. To access a mapping of dimension names to lengths please use :py:meth:`Dataset.sizes`. The same change also applies to ``DatasetGroupBy.dims``. (:issue:`8496`, :pull:`8500`) By `Tom Nicholas `_. - :py:meth:`Dataset.drop` & :py:meth:`DataArray.drop` are now deprecated, since pending deprecation for several years. :py:meth:`DataArray.drop_sel` & :py:meth:`DataArray.drop_var` replace them for labels & variables respectively. (:pull:`8497`) By `Maximilian Roos `_. Bug fixes ~~~~~~~~~ - Fix dtype inference for ``pd.CategoricalIndex`` when categories are backed by a ``pd.ExtensionDtype`` (:pull:`8481`) - Fix writing a variable that requires transposing when not writing to a region (:pull:`8484`) By `Maximilian Roos `_. - Static typing of ``p0`` and ``bounds`` arguments of :py:func:`xarray.DataArray.curvefit` and :py:func:`xarray.Dataset.curvefit` was changed to ``Mapping`` (:pull:`8502`). By `Michael Niklas `_. - Fix typing of :py:func:`xarray.DataArray.to_netcdf` and :py:func:`xarray.Dataset.to_netcdf` when ``compute`` is evaluated to bool instead of a Literal (:pull:`8268`). By `Jens Hedegaard Nielsen `_. Documentation ~~~~~~~~~~~~~ - Added illustration of updating the time coordinate values of a resampled dataset using time offset arithmetic. This is the recommended technique to replace the use of the deprecated ``loffset`` parameter in ``resample`` (:pull:`8479`). By `Doug Latornell `_. - Improved error message when attempting to get a variable which doesn't exist from a Dataset. (:pull:`8474`) By `Maximilian Roos `_. - Fix default value of ``combine_attrs`` in :py:func:`xarray.combine_by_coords` (:pull:`8471`) By `Gregorio L. Trevisan `_. Internal Changes ~~~~~~~~~~~~~~~~ - :py:meth:`DataArray.bfill` & :py:meth:`DataArray.ffill` now use numbagg `_ by default, which is up to 5x faster where parallelization is possible. (:pull:`8339`) By `Maximilian Roos `_. - Update mypy version to 1.7 (:issue:`8448`, :pull:`8501`). By `Michael Niklas `_. .. _whats-new.2023.11.0: v2023.11.0 (Nov 16, 2023) ------------------------- .. tip:: `This is our 10th year anniversary release! `_ Thank you for your love and support. This release brings the ability to use ``opt_einsum`` for :py:func:`xarray.dot` by default, support for auto-detecting ``region`` when writing partial datasets to Zarr, and the use of h5py drivers with ``h5netcdf``. Thanks to the 19 contributors to this release: Aman Bagrecha, Anderson Banihirwe, Ben Mares, Deepak Cherian, Dimitri Papadopoulos Orfanos, Ezequiel Cimadevilla Alvarez, Illviljan, Justus Magin, Katelyn FitzGerald, Kai Muehlbauer, Martin Durant, Maximilian Roos, Metamess, Sam Levang, Spencer Clark, Tom Nicholas, mgunyho, templiert New Features ~~~~~~~~~~~~ - Use `opt_einsum `_ for :py:func:`xarray.dot` by default if installed. By `Deepak Cherian `_. (:issue:`7764`, :pull:`8373`). - Add ``DataArray.dt.total_seconds()`` method to match the Pandas API. (:pull:`8435`). By `Ben Mares `_. - Allow passing ``region="auto"`` in :py:meth:`Dataset.to_zarr` to automatically infer the region to write in the original store. Also implement automatic transpose when dimension order does not match the original store. (:issue:`7702`, :issue:`8421`, :pull:`8434`). By `Sam Levang `_. - Allow the usage of h5py drivers (eg: ros3) via h5netcdf (:pull:`8360`). By `Ezequiel Cimadevilla `_. - Enable VLEN string fill_values, preserve VLEN string dtypes (:issue:`1647`, :issue:`7652`, :issue:`7868`, :pull:`7869`). By `Kai MΓΌhlbauer `_. Breaking changes ~~~~~~~~~~~~~~~~ - drop support for `cdms2 `_. Please use `xcdat `_ instead (:pull:`8441`). By `Justus Magin `_. - Following pandas, :py:meth:`infer_freq` will return ``"Y"``, ``"YS"``, ``"QE"``, ``"ME"``, ``"h"``, ``"min"``, ``"s"``, ``"ms"``, ``"us"``, or ``"ns"`` instead of ``"A"``, ``"AS"``, ``"Q"``, ``"M"``, ``"H"``, ``"T"``, ``"S"``, ``"L"``, ``"U"``, or ``"N"``. This is to be consistent with the deprecation of the latter frequency strings (:issue:`8394`, :pull:`8415`). By `Spencer Clark `_. - Bump minimum tested pint version to ``>=0.22``. By `Deepak Cherian `_. - Minimum supported versions for the following packages have changed: ``h5py >=3.7``, ``h5netcdf>=1.1``. By `Kai MΓΌhlbauer `_. Deprecations ~~~~~~~~~~~~ - The PseudoNetCDF backend has been removed. By `Deepak Cherian `_. - Supplying dimension-ordered sequences to :py:meth:`DataArray.chunk` & :py:meth:`Dataset.chunk` is deprecated in favor of supplying a dictionary of dimensions, or a single ``int`` or ``"auto"`` argument covering all dimensions. Xarray favors using dimensions names rather than positions, and this was one place in the API where dimension positions were used. (:pull:`8341`) By `Maximilian Roos `_. - Following pandas, the frequency strings ``"A"``, ``"AS"``, ``"Q"``, ``"M"``, ``"H"``, ``"T"``, ``"S"``, ``"L"``, ``"U"``, and ``"N"`` are deprecated in favor of ``"Y"``, ``"YS"``, ``"QE"``, ``"ME"``, ``"h"``, ``"min"``, ``"s"``, ``"ms"``, ``"us"``, and ``"ns"``, respectively. These strings are used, for example, in :py:func:`date_range`, :py:func:`cftime_range`, :py:meth:`DataArray.resample`, and :py:meth:`Dataset.resample` among others (:issue:`8394`, :pull:`8415`). By `Spencer Clark `_. - Rename :py:meth:`Dataset.to_array` to :py:meth:`Dataset.to_dataarray` for consistency with :py:meth:`DataArray.to_dataset` & :py:func:`open_dataarray` functions. This is a "soft" deprecation β€” the existing methods work and don't raise any warnings, given the relatively small benefits of the change. By `Maximilian Roos `_. - Finally remove ``keep_attrs`` kwarg from :py:meth:`DataArray.resample` and :py:meth:`Dataset.resample`. These were deprecated a long time ago. By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - Port `bug fix from pandas `_ to eliminate the adjustment of resample bin edges in the case that the resampling frequency has units of days and is greater than one day (e.g. ``"2D"``, ``"3D"`` etc.) and the ``closed`` argument is set to ``"right"`` to xarray's implementation of resample for data indexed by a :py:class:`CFTimeIndex` (:pull:`8393`). By `Spencer Clark `_. - Fix to once again support date offset strings as input to the loffset parameter of resample and test this functionality (:pull:`8422`, :issue:`8399`). By `Katelyn FitzGerald `_. - Fix a bug where :py:meth:`DataArray.to_dataset` silently drops a variable if a coordinate with the same name already exists (:pull:`8433`, :issue:`7823`). By `AndrΓ‘s GunyhΓ³ `_. - Fix for :py:meth:`DataArray.to_zarr` & :py:meth:`Dataset.to_zarr` to close the created zarr store when passing a path with ``.zip`` extension (:pull:`8425`). By `Carl Andersson `_. Documentation ~~~~~~~~~~~~~ - Small updates to documentation on distributed writes: See :ref:`io.zarr.appending` to Zarr. By `Deepak Cherian `_. .. _whats-new.2023.10.1: v2023.10.1 (19 Oct, 2023) ------------------------- This release updates our minimum numpy version in ``pyproject.toml`` to 1.22, consistent with our documentation below. .. _whats-new.2023.10.0: v2023.10.0 (19 Oct, 2023) ------------------------- This release brings performance enhancements to reading Zarr datasets, the ability to use `numbagg `_ for reductions, an expansion in API for ``rolling_exp``, fixes two regressions with datetime decoding, and many other bugfixes and improvements. Groupby reductions will also use ``numbagg`` if ``flox>=0.8.1`` and ``numbagg`` are both installed. Thanks to our 13 contributors: Anderson Banihirwe, Bart Schilperoort, Deepak Cherian, Illviljan, Kai MΓΌhlbauer, Mathias Hauser, Maximilian Roos, Michael Niklas, Pieter Eendebak, Simon HΓΈxbro Hansen, Spencer Clark, Tom White, olimcc New Features ~~~~~~~~~~~~ - Support high-performance reductions with `numbagg `_. This is enabled by default if ``numbagg`` is installed. By `Deepak Cherian `_. (:pull:`8316`) - Add ``corr``, ``cov``, ``std`` & ``var`` to ``.rolling_exp``. By `Maximilian Roos `_. (:pull:`8307`) - :py:meth:`DataArray.where` & :py:meth:`Dataset.where` accept a callable for the ``other`` parameter, passing the object as the only argument. Previously, this was only valid for the ``cond`` parameter. (:issue:`8255`) By `Maximilian Roos `_. - ``.rolling_exp`` functions can now take a ``min_weight`` parameter, to only output values when there are sufficient recent non-nan values. ``numbagg>=0.3.1`` is required. (:pull:`8285`) By `Maximilian Roos `_. - :py:meth:`DataArray.sortby` & :py:meth:`Dataset.sortby` accept a callable for the ``variables`` parameter, passing the object as the only argument. By `Maximilian Roos `_. - ``.rolling_exp`` functions can now operate on dask-backed arrays, assuming the core dim has exactly one chunk. (:pull:`8284`). By `Maximilian Roos `_. Breaking changes ~~~~~~~~~~~~~~~~ - Made more arguments keyword-only (e.g. ``keep_attrs``, ``skipna``) for many :py:class:`xarray.DataArray` and :py:class:`xarray.Dataset` methods (:pull:`6403`). By `Mathias Hauser `_. - :py:meth:`Dataset.to_zarr` & :py:meth:`DataArray.to_zarr` require keyword arguments after the initial 7 positional arguments. By `Maximilian Roos `_. Deprecations ~~~~~~~~~~~~ - Rename :py:meth:`Dataset.reset_encoding` & :py:meth:`DataArray.reset_encoding` to :py:meth:`Dataset.drop_encoding` & :py:meth:`DataArray.drop_encoding` for consistency with other ``drop`` & ``reset`` methods β€” ``drop`` generally removes something, while ``reset`` generally resets to some default or standard value. (:pull:`8287`, :issue:`8259`) By `Maximilian Roos `_. Bug fixes ~~~~~~~~~ - :py:meth:`DataArray.rename` & :py:meth:`Dataset.rename` would emit a warning when the operation was a no-op. (:issue:`8266`) By `Simon Hansen `_. - Fixed a regression introduced in the previous release checking time-like units when encoding/decoding masked data (:issue:`8269`, :pull:`8277`). By `Kai MΓΌhlbauer `_. - Fix datetime encoding precision loss regression introduced in the previous release for datetimes encoded with units requiring floating point values, and a reference date not equal to the first value of the datetime array (:issue:`8271`, :pull:`8272`). By `Spencer Clark `_. - Fix excess metadata requests when using a Zarr store. Prior to this, metadata was re-read every time data was retrieved from the array, now metadata is retrieved only once when they array is initialized. (:issue:`8290`, :pull:`8297`). By `Oliver McCormack `_. - Fix to_zarr ending in a ReadOnlyError when consolidated metadata was used and the write_empty_chunks was provided. (:issue:`8323`, :pull:`8326`) By `Matthijs Amesz `_. Documentation ~~~~~~~~~~~~~ - Added page on the interoperability of xarray objects. (:pull:`7992`) By `Tom Nicholas `_. - Added xarray-regrid to the list of xarray related projects (:pull:`8272`). By `Bart Schilperoort `_. Internal Changes ~~~~~~~~~~~~~~~~ - More improvements to support the Python `array API standard `_ by using duck array ops in more places in the codebase. (:pull:`8267`) By `Tom White `_. .. _whats-new.2023.09.0: v2023.09.0 (Sep 26, 2023) ------------------------- This release continues work on the new :py:class:`xarray.Coordinates` object, allows to provide ``preferred_chunks`` when reading from netcdf files, enables :py:func:`xarray.apply_ufunc` to handle missing core dimensions and fixes several bugs. Thanks to the 24 contributors to this release: Alexander Fischer, Amrest Chinkamol, Benoit Bovy, Darsh Ranjan, Deepak Cherian, Gianfranco Costamagna, Gregorio L. Trevisan, Illviljan, Joe Hamman, JR, Justus Magin, Kai MΓΌhlbauer, Kian-Meng Ang, Kyle Sunden, Martin Raspaud, Mathias Hauser, Mattia Almansi, Maximilian Roos, AndrΓ‘s GunyhΓ³, Michael Niklas, Richard Kleijn, Riulinchen, Tom Nicholas and Wiktor KraΕ›nicki. We welcome the following new contributors to Xarray!: Alexander Fischer, Amrest Chinkamol, Darsh Ranjan, Gianfranco Costamagna, Gregorio L. Trevisan, Kian-Meng Ang, Riulinchen and Wiktor KraΕ›nicki. New Features ~~~~~~~~~~~~ - Added the :py:meth:`Coordinates.assign` method that can be used to combine different collections of coordinates prior to assign them to a Dataset or DataArray (:pull:`8102`) at once. By `BenoΓt Bovy `_. - Provide ``preferred_chunks`` for data read from netcdf files (:issue:`1440`, :pull:`7948`). By `Martin Raspaud `_. - Added ``on_missing_core_dims`` to :py:meth:`apply_ufunc` to allow for copying or dropping a :py:class:`Dataset`'s variables with missing core dimensions (:pull:`8138`). By `Maximilian Roos `_. Breaking changes ~~~~~~~~~~~~~~~~ - The :py:class:`Coordinates` constructor now creates a (pandas) index by default for each dimension coordinate. To keep the previous behavior (no index created), pass an empty dictionary to ``indexes``. The constructor now also extracts and add the indexes from another :py:class:`Coordinates` object passed via ``coords`` (:pull:`8107`). By `BenoΓt Bovy `_. - Static typing of ``xlim`` and ``ylim`` arguments in plotting functions now must be ``tuple[float, float]`` to align with matplotlib requirements. (:issue:`7802`, :pull:`8030`). By `Michael Niklas `_. Deprecations ~~~~~~~~~~~~ - Deprecate passing a :py:class:`pandas.MultiIndex` object directly to the :py:class:`Dataset` and :py:class:`DataArray` constructors as well as to :py:meth:`Dataset.assign` and :py:meth:`Dataset.assign_coords`. A new Xarray :py:class:`Coordinates` object has to be created first using :py:meth:`Coordinates.from_pandas_multiindex` (:pull:`8094`). By `BenoΓt Bovy `_. Bug fixes ~~~~~~~~~ - Improved static typing of reduction methods (:pull:`6746`). By `Richard Kleijn `_. - Fix bug where empty attrs would generate inconsistent tokens (:issue:`6970`, :pull:`8101`). By `Mattia Almansi `_. - Improved handling of multi-coordinate indexes when updating coordinates, including bug fixes (and improved warnings for deprecated features) for pandas multi-indexes (:pull:`8094`). By `BenoΓt Bovy `_. - Fixed a bug in :py:func:`merge` with ``compat='minimal'`` where the coordinate names were not updated properly internally (:issue:`7405`, :issue:`7588`, :pull:`8104`). By `BenoΓt Bovy `_. - Fix bug where :py:class:`DataArray` instances on the right-hand side of :py:meth:`DataArray.__setitem__` lose dimension names (:issue:`7030`, :pull:`8067`). By `Darsh Ranjan `_. - Return ``float64`` in presence of ``NaT`` in :py:class:`~core.accessor_dt.DatetimeAccessor` and special case ``NaT`` handling in :py:meth:`~core.accessor_dt.DatetimeAccessor.isocalendar` (:issue:`7928`, :pull:`8084`). By `Kai MΓΌhlbauer `_. - Fix :py:meth:`~computation.rolling.DatasetRolling.construct` with stride on Datasets without indexes. (:issue:`7021`, :pull:`7578`). By `Amrest Chinkamol `_ and `Michael Niklas `_. - Calling plot with kwargs ``col``, ``row`` or ``hue`` no longer squeezes dimensions passed via these arguments (:issue:`7552`, :pull:`8174`). By `Wiktor KraΕ›nicki `_. - Fixed a bug where casting from ``float`` to ``int64`` (undefined for ``NaN``) led to varying issues (:issue:`7817`, :issue:`7942`, :issue:`7790`, :issue:`6191`, :issue:`7096`, :issue:`1064`, :pull:`7827`). By `Kai MΓΌhlbauer `_. - Fixed a bug where inaccurate ``coordinates`` silently failed to decode variable (:issue:`1809`, :pull:`8195`). By `Kai MΓΌhlbauer `_ - ``.rolling_exp`` functions no longer mistakenly lose non-dimensioned coords (:issue:`6528`, :pull:`8114`). By `Maximilian Roos `_. - In the event that user-provided datetime64/timedelta64 units and integer dtype encoding parameters conflict with each other, override the units to preserve an integer dtype for most faithful serialization to disk (:issue:`1064`, :pull:`8201`). By `Kai MΓΌhlbauer `_. - Static typing of dunder ops methods (like :py:meth:`DataArray.__eq__`) has been fixed. Remaining issues are upstream problems (:issue:`7780`, :pull:`8204`). By `Michael Niklas `_. - Fix type annotation for ``center`` argument of plotting methods (like :py:meth:`xarray.plot.dataarray_plot.pcolormesh`) (:pull:`8261`). By `Pieter Eendebak `_. Documentation ~~~~~~~~~~~~~ - Make documentation of :py:meth:`DataArray.where` clearer (:issue:`7767`, :pull:`7955`). By `Riulinchen `_. Internal Changes ~~~~~~~~~~~~~~~~ - Many error messages related to invalid dimensions or coordinates now always show the list of valid dims/coords (:pull:`8079`). By `AndrΓ‘s GunyhΓ³ `_. - Refactor of encoding and decoding times/timedeltas to preserve nanosecond resolution in arrays that contain missing values (:pull:`7827`). By `Kai MΓΌhlbauer `_. - Transition ``.rolling_exp`` functions to use ``.apply_ufunc`` internally rather than ``.reduce``, as the start of a broader effort to move non-reducing functions away from ```.reduce``, (:pull:`8114`). By `Maximilian Roos `_. - Test range of fill_value's in test_interpolate_pd_compat (:issue:`8146`, :pull:`8189`). By `Kai MΓΌhlbauer `_. .. _whats-new.2023.08.0: v2023.08.0 (Aug 18, 2023) ------------------------- This release brings changes to minimum dependencies, allows reading of datasets where a dimension name is associated with a multidimensional variable (e.g. finite volume ocean model output), and introduces a new :py:class:`xarray.Coordinates` object. Thanks to the 16 contributors to this release: Anderson Banihirwe, Articoking, Benoit Bovy, Deepak Cherian, Harshitha, Ian Carroll, Joe Hamman, Justus Magin, Peter Hill, Rachel Wegener, Riley Kuttruff, Thomas Nicholas, Tom Nicholas, ilgast, quantsnus, vallirep Announcements ~~~~~~~~~~~~~ The :py:class:`xarray.Variable` class is being refactored out to a new project title 'namedarray'. See the `design doc `_ for more details. Reach out to us on this [discussion topic](https://github.com/pydata/xarray/discussions/8080) if you have any thoughts. New Features ~~~~~~~~~~~~ - :py:class:`Coordinates` can now be constructed independently of any Dataset or DataArray (it is also returned by the :py:attr:`Dataset.coords` and :py:attr:`DataArray.coords` properties). ``Coordinates`` objects are useful for passing both coordinate variables and indexes to new Dataset / DataArray objects, e.g., via their constructor or via :py:meth:`Dataset.assign_coords`. We may also wrap coordinate variables in a ``Coordinates`` object in order to skip the automatic creation of (pandas) indexes for dimension coordinates. The :py:class:`Coordinates.from_pandas_multiindex` constructor may be used to create coordinates directly from a :py:class:`pandas.MultiIndex` object (it is preferred over passing it directly as coordinate data, which may be deprecated soon). Like Dataset and DataArray objects, ``Coordinates`` objects may now be used in :py:func:`align` and :py:func:`merge`. (:issue:`6392`, :pull:`7368`). By `BenoΓt Bovy `_. - Visually group together coordinates with the same indexes in the index section of the text repr (:pull:`7225`). By `Justus Magin `_. - Allow creating Xarray objects where a multidimensional variable shares its name with a dimension. Examples include output from finite volume models like FVCOM. (:issue:`2233`, :pull:`7989`) By `Deepak Cherian `_ and `Benoit Bovy `_. - When outputting :py:class:`Dataset` objects as Zarr via :py:meth:`Dataset.to_zarr`, user can now specify that chunks that will contain no valid data will not be written. Originally, this could be done by specifying ``"write_empty_chunks": True`` in the ``encoding`` parameter; however, this setting would not carry over when appending new data to an existing dataset. (:issue:`8009`) Requires ``zarr>=2.11``. Breaking changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed (:pull:`8022`): ===================== ========= ======== Package Old New ===================== ========= ======== boto3 1.20 1.24 cftime 1.5 1.6 dask-core 2022.1 2022.7 distributed 2022.1 2022.7 hfnetcdf 0.13 1.0 iris 3.1 3.2 lxml 4.7 4.9 netcdf4 1.5.7 1.6.0 numpy 1.21 1.22 pint 0.18 0.19 pydap 3.2 3.3 rasterio 1.2 1.3 scipy 1.7 1.8 toolz 0.11 0.12 typing_extensions 4.0 4.3 zarr 2.10 2.12 numbagg 0.1 0.2.1 ===================== ========= ======== Documentation ~~~~~~~~~~~~~ - Added page on the internal design of xarray objects. (:pull:`7991`) By `Tom Nicholas `_. - Added examples to docstrings of :py:meth:`Dataset.assign_attrs`, :py:meth:`Dataset.broadcast_equals`, :py:meth:`Dataset.equals`, :py:meth:`Dataset.identical`, :py:meth:`Dataset.expand_dims`, :py:meth:`Dataset.drop_vars` (:issue:`6793`, :pull:`7937`) By `Harshitha `_. - Add docstrings for the :py:class:`Index` base class and add some documentation on how to create custom, Xarray-compatible indexes (:pull:`6975`) By `BenoΓt Bovy `_. - Added a page clarifying the role of Xarray core team members. (:pull:`7999`) By `Tom Nicholas `_. - Fixed broken links in "See also" section of :py:meth:`Dataset.count` (:issue:`8055`, :pull:`8057`) By `Articoking `_. - Extended the glossary by adding terms Aligning, Broadcasting, Merging, Concatenating, Combining, lazy, labeled, serialization, indexing (:issue:`3355`, :pull:`7732`) By `Harshitha `_. Internal Changes ~~~~~~~~~~~~~~~~ - :py:func:`as_variable` now consistently includes the variable name in any exceptions raised. (:pull:`7995`). By `Peter Hill `_ - :py:func:`encode_dataset_coordinates` now sorts coordinates automatically assigned to ``coordinates`` attributes during serialization (:issue:`8026`, :pull:`8034`). `By Ian Carroll `_. .. _whats-new.2023.07.0: v2023.07.0 (July 17, 2023) -------------------------- This release brings improvements to the documentation on wrapping numpy-like arrays, improved docstrings, and bug fixes. Deprecations ~~~~~~~~~~~~ - ``hue_style`` is being deprecated for scatter plots. (:issue:`7907`, :pull:`7925`). By `Jimmy Westling `_. Bug fixes ~~~~~~~~~ - Ensure no forward slashes in variable and dimension names for HDF5-based engines. (:issue:`7943`, :pull:`7953`) By `Kai MΓΌhlbauer `_. Documentation ~~~~~~~~~~~~~ - Added examples to docstrings of :py:meth:`Dataset.assign_attrs`, :py:meth:`Dataset.broadcast_equals`, :py:meth:`Dataset.equals`, :py:meth:`Dataset.identical`, :py:meth:`Dataset.expand_dims`, :py:meth:`Dataset.drop_vars` (:issue:`6793`, :pull:`7937`) By `Harshitha `_. - Added page on wrapping chunked numpy-like arrays as alternatives to dask arrays. (:pull:`7951`) By `Tom Nicholas `_. - Expanded the page on wrapping numpy-like "duck" arrays. (:pull:`7911`) By `Tom Nicholas `_. - Added examples to docstrings of :py:meth:`Dataset.isel`, :py:meth:`Dataset.reduce`, :py:meth:`Dataset.argmin`, :py:meth:`Dataset.argmax` (:issue:`6793`, :pull:`7881`) By `Harshitha `_ . Internal Changes ~~~~~~~~~~~~~~~~ - Allow chunked non-dask arrays (i.e. Cubed arrays) in groupby operations. (:pull:`7941`) By `Tom Nicholas `_. .. _whats-new.2023.06.0: v2023.06.0 (June 21, 2023) -------------------------- This release adds features to ``curvefit``, improves the performance of concatenation, and fixes various bugs. Thank to our 13 contributors to this release: Anderson Banihirwe, Deepak Cherian, dependabot[bot], Illviljan, Juniper Tyree, Justus Magin, Martin Fleischmann, Mattia Almansi, mgunyho, Rutger van Haasteren, Thomas Nicholas, Tom Nicholas, Tom White. New Features ~~~~~~~~~~~~ - Added support for multidimensional initial guess and bounds in :py:meth:`DataArray.curvefit` (:issue:`7768`, :pull:`7821`). By `AndrΓ‘s GunyhΓ³ `_. - Add an ``errors`` option to :py:meth:`Dataset.curve_fit` that allows returning NaN for the parameters and covariances of failed fits, rather than failing the whole series of fits (:issue:`6317`, :pull:`7891`). By `Dominik StaΕ„czak `_ and `AndrΓ‘s GunyhΓ³ `_. Breaking changes ~~~~~~~~~~~~~~~~ Deprecations ~~~~~~~~~~~~ - Deprecate the `cdms2 `_ conversion methods (:pull:`7876`) By `Justus Magin `_. Performance ~~~~~~~~~~~ - Improve concatenation performance (:issue:`7833`, :pull:`7824`). By `Jimmy Westling `_. Bug fixes ~~~~~~~~~ - Fix bug where weighted ``polyfit`` were changing the original object (:issue:`5644`, :pull:`7900`). By `Mattia Almansi `_. - Don't call ``CachingFileManager.__del__`` on interpreter shutdown (:issue:`7814`, :pull:`7880`). By `Justus Magin `_. - Preserve vlen dtype for empty string arrays (:issue:`7328`, :pull:`7862`). By `Tom White `_ and `Kai MΓΌhlbauer `_. - Ensure dtype of reindex result matches dtype of the original DataArray (:issue:`7299`, :pull:`7917`) By `Anderson Banihirwe `_. - Fix bug where a zero-length zarr ``chunk_store`` was ignored as if it was ``None`` (:pull:`7923`) By `Juniper Tyree `_. Documentation ~~~~~~~~~~~~~ Internal Changes ~~~~~~~~~~~~~~~~ - Minor improvements to support of the python `array api standard `_, internally using the function ``xp.astype()`` instead of the method ``arr.astype()``, as the latter is not in the standard. (:pull:`7847`) By `Tom Nicholas `_. - Xarray now uploads nightly wheels to https://pypi.anaconda.org/scientific-python-nightly-wheels/simple/ (:issue:`7863`, :pull:`7865`). By `Martin Fleischmann `_. - Stop uploading development wheels to TestPyPI (:pull:`7889`) By `Justus Magin `_. - Added an exception catch for ``AttributeError`` along with ``ImportError`` when duck typing the dynamic imports in pycompat.py. This catches some name collisions between packages. (:issue:`7870`, :pull:`7874`) .. _whats-new.2023.05.0: v2023.05.0 (May 18, 2023) ------------------------- This release adds some new methods and operators, updates our deprecation policy for python versions, fixes some bugs with groupby, and introduces experimental support for alternative chunked parallel array computation backends via a new plugin system! **Note:** If you are using a locally-installed development version of xarray then pulling the changes from this release may require you to re-install. This avoids an error where xarray cannot detect dask via the new entrypoints system introduced in :pull:`7019`. See :issue:`7856` for details. Thanks to our 14 contributors: Alan Brammer, crusaderky, David Stansby, dcherian, Deeksha, Deepak Cherian, Illviljan, James McCreight, Joe Hamman, Justus Magin, Kyle Sunden, Max Hollmann, mgunyho, and Tom Nicholas New Features ~~~~~~~~~~~~ - Added new method :py:meth:`DataArray.to_dask_dataframe`, convert a dataarray into a dask dataframe (:issue:`7409`). By `Deeksha `_. - Add support for lshift and rshift binary operators (``<<``, ``>>``) on :py:class:`xr.DataArray` of type :py:class:`int` (:issue:`7727` , :pull:`7741`). By `Alan Brammer `_. - Keyword argument ``data='array'`` to both :py:meth:`xarray.Dataset.to_dict` and :py:meth:`xarray.DataArray.to_dict` will now return data as the underlying array type. Python lists are returned for ``data='list'`` or ``data=True``. Supplying ``data=False`` only returns the schema without data. ``encoding=True`` returns the encoding dictionary for the underlying variable also. (:issue:`1599`, :pull:`7739`) . By `James McCreight `_. Breaking changes ~~~~~~~~~~~~~~~~ - adjust the deprecation policy for python to once again align with NEP-29 (:issue:`7765`, :pull:`7793`) By `Justus Magin `_. Performance ~~~~~~~~~~~ - Optimize ``.dt `` accessor performance with ``CFTimeIndex``. (:pull:`7796`) By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - Fix ``as_compatible_data`` for masked float arrays, now always creates a copy when mask is present (:issue:`2377`, :pull:`7788`). By `Max Hollmann `_. - Fix groupby binary ops when grouped array is subset relative to other. (:issue:`7797`). By `Deepak Cherian `_. - Fix groupby sum, prod for all-NaN groups with ``flox``. (:issue:`7808`). By `Deepak Cherian `_. Internal Changes ~~~~~~~~~~~~~~~~ - Experimental support for wrapping chunked array libraries other than dask. A new ABC is defined - :py:class:`xr.namedarray.parallelcompat.ChunkManagerEntrypoint` - which can be subclassed and then registered by alternative chunked array implementations. (:issue:`6807`, :pull:`7019`) By `Tom Nicholas `_. .. _whats-new.2023.04.2: v2023.04.2 (April 20, 2023) --------------------------- This is a patch release to fix a bug with binning (:issue:`7766`) Bug fixes ~~~~~~~~~ - Fix binning when ``labels`` is specified. (:issue:`7766`). By `Deepak Cherian `_. Documentation ~~~~~~~~~~~~~ - Added examples to docstrings for :py:meth:`xarray.core.accessor_str.StringAccessor` methods. (:pull:`7669`) . By `Mary Gathoni `_. .. _whats-new.2023.04.1: v2023.04.1 (April 18, 2023) --------------------------- This is a patch release to fix a bug with binning (:issue:`7759`) Bug fixes ~~~~~~~~~ - Fix binning by unsorted arrays. (:issue:`7759`) .. _whats-new.2023.04.0: v2023.04.0 (April 14, 2023) --------------------------- This release includes support for pandas v2, allows refreshing of backend engines in a session, and removes deprecated backends for ``rasterio`` and ``cfgrib``. Thanks to our 19 contributors: Chinemere, Tom Coleman, Deepak Cherian, Harshitha, Illviljan, Jessica Scheick, Joe Hamman, Justus Magin, Kai MΓΌhlbauer, Kwonil-Kim, Mary Gathoni, Michael Niklas, Pierre, Scott Henderson, Shreyal Gupta, Spencer Clark, mccloskey, nishtha981, veenstrajelmer We welcome the following new contributors to Xarray!: Mary Gathoni, Harshitha, veenstrajelmer, Chinemere, nishtha981, Shreyal Gupta, Kwonil-Kim, mccloskey. New Features ~~~~~~~~~~~~ - New methods to reset an objects encoding (:py:meth:`Dataset.reset_encoding`, :py:meth:`DataArray.reset_encoding`). (:issue:`7686`, :pull:`7689`). By `Joe Hamman `_. - Allow refreshing backend engines with :py:meth:`xarray.backends.refresh_engines` (:issue:`7478`, :pull:`7523`). By `Michael Niklas `_. - Added ability to save ``DataArray`` objects directly to Zarr using :py:meth:`~xarray.DataArray.to_zarr`. (:issue:`7692`, :pull:`7693`) . By `Joe Hamman `_. Breaking changes ~~~~~~~~~~~~~~~~ - Remove deprecated rasterio backend in favor of rioxarray (:pull:`7392`). By `Scott Henderson `_. Deprecations ~~~~~~~~~~~~ Performance ~~~~~~~~~~~ - Optimize alignment with ``join="exact", copy=False`` by avoiding copies. (:pull:`7736`) By `Deepak Cherian `_. - Avoid unnecessary copies of ``CFTimeIndex``. (:pull:`7735`) By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - Fix :py:meth:`xr.polyval` with non-system standard integer coeffs (:pull:`7619`). By `Shreyal Gupta `_ and `Michael Niklas `_. - Improve error message when trying to open a file which you do not have permission to read (:issue:`6523`, :pull:`7629`). By `Thomas Coleman `_. - Proper plotting when passing :py:class:`~matplotlib.colors.BoundaryNorm` type argument in :py:meth:`DataArray.plot`. (:issue:`4061`, :issue:`7014`,:pull:`7553`) By `Jelmer Veenstra `_. - Ensure the formatting of time encoding reference dates outside the range of nanosecond-precision datetimes remains the same under pandas version 2.0.0 (:issue:`7420`, :pull:`7441`). By `Justus Magin `_ and `Spencer Clark `_. - Various ``dtype`` related fixes needed to support ``pandas>=2.0`` (:pull:`7724`) By `Justus Magin `_. - Preserve boolean dtype within encoding (:issue:`7652`, :pull:`7720`). By `Kai MΓΌhlbauer `_ Documentation ~~~~~~~~~~~~~ - Update FAQ page on how do I open format X file as an xarray dataset? (:issue:`1285`, :pull:`7638`) using :py:func:`~xarray.open_dataset` By `Harshitha `_ , `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ - Don't assume that arrays read from disk will be Numpy arrays. This is a step toward enabling reads from a Zarr store using the `Kvikio `_ or `TensorStore `_ libraries. (:pull:`6874`). By `Deepak Cherian `_. - Remove internal support for reading GRIB files through the ``cfgrib`` backend. ``cfgrib`` now uses the external backend interface, so no existing code should break. By `Deepak Cherian `_. - Implement CF coding functions in ``VariableCoders`` (:pull:`7719`). By `Kai MΓΌhlbauer `_ - Added a config.yml file with messages for the welcome bot when a Github user creates their first ever issue or pull request or has their first PR merged. (:issue:`7685`, :pull:`7685`) By `Nishtha P `_. - Ensure that only nanosecond-precision :py:class:`pd.Timestamp` objects continue to be used internally under pandas version 2.0.0. This is mainly to ease the transition to this latest version of pandas. It should be relaxed when addressing :issue:`7493`. By `Spencer Clark `_ (:issue:`7707`, :pull:`7731`). .. _whats-new.2023.03.0: v2023.03.0 (March 22, 2023) --------------------------- This release brings many bug fixes, and some new features. The maximum pandas version is pinned to ``<2`` until we can support the new pandas datetime types. Thanks to our 19 contributors: Abel Aoun, Alex Goodman, Deepak Cherian, Illviljan, Jody Klymak, Joe Hamman, Justus Magin, Mary Gathoni, Mathias Hauser, Mattia Almansi, Mick, Oriol Abril-Pla, Patrick Hoefler, Paul Ockenfuß, Pierre, Shreyal Gupta, Spencer Clark, Tom Nicholas, Tom Vo New Features ~~~~~~~~~~~~ - Fix :py:meth:`xr.cov` and :py:meth:`xr.corr` now support complex valued arrays (:issue:`7340`, :pull:`7392`). By `Michael Niklas `_. - Allow indexing along unindexed dimensions with dask arrays (:issue:`2511`, :issue:`4276`, :issue:`4663`, :pull:`5873`). By `Abel Aoun `_ and `Deepak Cherian `_. - Support dask arrays in ``first`` and ``last`` reductions. By `Deepak Cherian `_. - Improved performance in ``open_dataset`` for datasets with large object arrays (:issue:`7484`, :pull:`7494`). By `Alex Goodman `_ and `Deepak Cherian `_. Breaking changes ~~~~~~~~~~~~~~~~ Deprecations ~~~~~~~~~~~~ - Following pandas, the ``base`` and ``loffset`` parameters of :py:meth:`xr.DataArray.resample` and :py:meth:`xr.Dataset.resample` have been deprecated and will be removed in a future version of xarray. Using the ``origin`` or ``offset`` parameters is recommended as a replacement for using the ``base`` parameter and using time offset arithmetic is recommended as a replacement for using the ``loffset`` parameter (:pull:`8459`). By `Spencer Clark `_. Bug fixes ~~~~~~~~~ - Improve error message when using in :py:meth:`Dataset.drop_vars` to state which variables can't be dropped. (:pull:`7518`) By `Tom Nicholas `_. - Require to explicitly defining optional dimensions such as hue and markersize for scatter plots. (:issue:`7314`, :pull:`7277`). By `Jimmy Westling `_. - Fix matplotlib raising a UserWarning when plotting a scatter plot with an unfilled marker (:issue:`7313`, :pull:`7318`). By `Jimmy Westling `_. - Fix issue with ``max_gap`` in ``interpolate_na``, when applied to multidimensional arrays. (:issue:`7597`, :pull:`7598`). By `Paul Ockenfuß `_. - Fix :py:meth:`DataArray.plot.pcolormesh` which now works if one of the coordinates has str dtype (:issue:`6775`, :pull:`7612`). By `Michael Niklas `_. Documentation ~~~~~~~~~~~~~ - Clarify language in contributor's guide (:issue:`7495`, :pull:`7595`) By `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ - Pin pandas to ``<2``. By `Deepak Cherian `_. .. _whats-new.2023.02.0: v2023.02.0 (Feb 7, 2023) ------------------------ This release brings a major upgrade to :py:func:`xarray.concat`, many bug fixes, and a bump in supported dependency versions. Thanks to our 11 contributors: Aron Gergely, Deepak Cherian, Illviljan, James Bourbeau, Joe Hamman, Justus Magin, Hauke Schulz, Kai MΓΌhlbauer, Ken Mankoff, Spencer Clark, Tom Nicholas. Breaking changes ~~~~~~~~~~~~~~~~ - Support for ``python 3.8`` has been dropped and the minimum versions of some dependencies were changed (:pull:`7461`): ===================== ========= ======== Package Old New ===================== ========= ======== python 3.8 3.9 numpy 1.20 1.21 pandas 1.3 1.4 dask 2021.11 2022.1 distributed 2021.11 2022.1 h5netcdf 0.11 0.13 lxml 4.6 4.7 numba 5.4 5.5 ===================== ========= ======== Deprecations ~~~~~~~~~~~~ - Following pandas, the ``closed`` parameters of :py:func:`cftime_range` and :py:func:`date_range` are deprecated in favor of the ``inclusive`` parameters, and will be removed in a future version of xarray (:issue:`6985`:, :pull:`7373`). By `Spencer Clark `_. Bug fixes ~~~~~~~~~ - :py:func:`xarray.concat` can now concatenate variables present in some datasets but not others (:issue:`508`, :pull:`7400`). By `Kai MΓΌhlbauer `_ and `Scott Chamberlin `_. - Handle ``keep_attrs`` option in binary operators of :py:meth:`Dataset` (:issue:`7390`, :pull:`7391`). By `Aron Gergely `_. - Improve error message when using dask in :py:func:`apply_ufunc` with ``output_sizes`` not supplied. (:pull:`7509`) By `Tom Nicholas `_. - :py:func:`xarray.Dataset.to_zarr` now drops variable encodings that have been added by xarray during reading a dataset. (:issue:`7129`, :pull:`7500`). By `Hauke Schulz `_. Documentation ~~~~~~~~~~~~~ - Mention the `flox package `_ in GroupBy documentation and docstrings. By `Deepak Cherian `_. .. _whats-new.2023.01.0: v2023.01.0 (Jan 17, 2023) ------------------------- This release includes a number of bug fixes. Thanks to the 14 contributors to this release: Aron Gergely, Benoit Bovy, Deepak Cherian, Ian Carroll, Illviljan, Joe Hamman, Justus Magin, Mark Harfouche, Matthew Roeschke, Paige Martin, Pierre, Sam Levang, Tom White, stefank0. Breaking changes ~~~~~~~~~~~~~~~~ - :py:meth:`CFTimeIndex.get_loc` has removed the ``method`` and ``tolerance`` keyword arguments. Use ``.get_indexer([key], method=..., tolerance=...)`` instead (:pull:`7361`). By `Matthew Roeschke `_. Bug fixes ~~~~~~~~~ - Avoid in-memory broadcasting when converting to a dask dataframe using ``.to_dask_dataframe.`` (:issue:`6811`, :pull:`7472`). By `Jimmy Westling `_. - Accessing the property ``.nbytes`` of a DataArray, or Variable no longer accidentally triggers loading the variable into memory. - Allow numpy-only objects in :py:func:`where` when ``keep_attrs=True`` (:issue:`7362`, :pull:`7364`). By `Sam Levang `_. - add a ``keep_attrs`` parameter to :py:meth:`Dataset.pad`, :py:meth:`DataArray.pad`, and :py:meth:`Variable.pad` (:pull:`7267`). By `Justus Magin `_. - Fixed performance regression in alignment between indexed and non-indexed objects of the same shape (:pull:`7382`). By `BenoΓt Bovy `_. - Preserve original dtype on accessing MultiIndex levels (:issue:`7250`, :pull:`7393`). By `Ian Carroll `_. Internal Changes ~~~~~~~~~~~~~~~~ - Add the pre-commit hook ``absolufy-imports`` to convert relative xarray imports to absolute imports (:pull:`7204`, :pull:`7370`). By `Jimmy Westling `_. .. _whats-new.2022.12.0: v2022.12.0 (2022 Dec 2) ----------------------- This release includes a number of bug fixes and experimental support for Zarr V3. Thanks to the 16 contributors to this release: Deepak Cherian, Francesco Zanetta, Gregory Lee, Illviljan, Joe Hamman, Justus Magin, Luke Conibear, Mark Harfouche, Mathias Hauser, Mick, Mike Taves, Sam Levang, Spencer Clark, Tom Nicholas, Wei Ji, templiert New Features ~~~~~~~~~~~~ - Enable using ``offset`` and ``origin`` arguments in :py:meth:`DataArray.resample` and :py:meth:`Dataset.resample` (:issue:`7266`, :pull:`7284`). By `Spencer Clark `_. - Add experimental support for Zarr's in-progress V3 specification. (:pull:`6475`). By `Gregory Lee `_ and `Joe Hamman `_. Breaking changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed (:pull:`7300`): ========================== ========= ======== Package Old New ========================== ========= ======== boto 1.18 1.20 cartopy 0.19 0.20 distributed 2021.09 2021.11 dask 2021.09 2021.11 h5py 3.1 3.6 hdf5 1.10 1.12 matplotlib-base 3.4 3.5 nc-time-axis 1.3 1.4 netcdf4 1.5.3 1.5.7 packaging 20.3 21.3 pint 0.17 0.18 pseudonetcdf 3.1 3.2 typing_extensions 3.10 4.0 ========================== ========= ======== Deprecations ~~~~~~~~~~~~ - The PyNIO backend has been deprecated (:issue:`4491`, :pull:`7301`). By `Joe Hamman `_. Bug fixes ~~~~~~~~~ - Fix handling of coordinate attributes in :py:func:`where`. (:issue:`7220`, :pull:`7229`) By `Sam Levang `_. - Import ``nc_time_axis`` when needed (:issue:`7275`, :pull:`7276`). By `Michael Niklas `_. - Fix static typing of :py:meth:`xr.polyval` (:issue:`7312`, :pull:`7315`). By `Michael Niklas `_. - Fix multiple reads on fsspec S3 files by resetting file pointer to 0 when reading file streams (:issue:`6813`, :pull:`7304`). By `David Hoese `_ and `Wei Ji Leong `_. - Fix :py:meth:`Dataset.assign_coords` resetting all dimension coordinates to default (pandas) index (:issue:`7346`, :pull:`7347`). By `BenoΓt Bovy `_. Documentation ~~~~~~~~~~~~~ - Add example of reading and writing individual groups to a single netCDF file to I/O docs page. (:pull:`7338`) By `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ .. _whats-new.2022.11.0: v2022.11.0 (Nov 4, 2022) ------------------------ This release brings a number of bugfixes and documentation improvements. Both text and HTML reprs now have a new "Indexes" section, which we expect will help with development of new Index objects. This release also features more support for the Python Array API. Many thanks to the 16 contributors to this release: Daniel Goman, Deepak Cherian, Illviljan, Jessica Scheick, Justus Magin, Mark Harfouche, Maximilian Roos, Mick, Patrick Naylor, Pierre, Spencer Clark, Stephan Hoyer, Tom Nicholas, Tom White New Features ~~~~~~~~~~~~ - Add static typing to plot accessors (:issue:`6949`, :pull:`7052`). By `Michael Niklas `_. - Display the indexes in a new section of the text and HTML reprs (:pull:`6795`, :pull:`7183`, :pull:`7185`) By `Justus Magin `_ and `BenoΓt Bovy `_. - Added methods :py:meth:`DataArrayGroupBy.cumprod` and :py:meth:`DatasetGroupBy.cumprod`. (:pull:`5816`) By `Patrick Naylor `_ Breaking changes ~~~~~~~~~~~~~~~~ - ``repr(ds)`` may not show the same result because it doesn't load small, lazy data anymore. Use ``ds.head().load()`` when wanting to see just a sample of the data. (:issue:`6722`, :pull:`7203`). By `Jimmy Westling `_. - Many arguments of plotmethods have been made keyword-only. - ``xarray.plot.plot`` module renamed to ``xarray.plot.dataarray_plot`` to prevent shadowing of the ``plot`` method. (:issue:`6949`, :pull:`7052`). By `Michael Niklas `_. Deprecations ~~~~~~~~~~~~ - Positional arguments for all plot methods have been deprecated (:issue:`6949`, :pull:`7052`). By `Michael Niklas `_. - ``xarray.plot.FacetGrid.axes`` has been renamed to ``xarray.plot.FacetGrid.axs`` because it's not clear if ``axes`` refers to single or multiple ``Axes`` instances. This aligns with ``matplotlib.pyplot.subplots``. (:pull:`7194`) By `Jimmy Westling `_. Bug fixes ~~~~~~~~~ - Explicitly opening a file multiple times (e.g., after modifying it on disk) now reopens the file from scratch for h5netcdf and scipy netCDF backends, rather than reusing a cached version (:issue:`4240`, :issue:`4862`). By `Stephan Hoyer `_. - Fixed bug where :py:meth:`Dataset.coarsen.construct` would demote non-dimension coordinates to variables. (:pull:`7233`) By `Tom Nicholas `_. - Raise a TypeError when trying to plot empty data (:issue:`7156`, :pull:`7228`). By `Michael Niklas `_. Documentation ~~~~~~~~~~~~~ - Improves overall documentation around available backends, including adding docstrings for :py:func:`xarray.backends.list_engines` Add :py:meth:`__str__` to surface the new :py:class:`BackendEntrypoint` ``description`` and ``url`` attributes. (:issue:`6577`, :pull:`7000`) By `Jessica Scheick `_. - Created docstring examples for :py:meth:`DataArray.cumsum`, :py:meth:`DataArray.cumprod`, :py:meth:`Dataset.cumsum`, :py:meth:`Dataset.cumprod`, :py:meth:`DatasetGroupBy.cumsum`, :py:meth:`DataArrayGroupBy.cumsum`. (:issue:`5816`, :pull:`7152`) By `Patrick Naylor `_ - Add example of using :py:meth:`DataArray.coarsen.construct` to User Guide. (:pull:`7192`) By `Tom Nicholas `_. - Rename ``axes`` to ``axs`` in plotting to align with ``matplotlib.pyplot.subplots``. (:pull:`7194`) By `Jimmy Westling `_. - Add documentation of specific BackendEntrypoints (:pull:`7200`). By `Michael Niklas `_. - Add examples to docstring for :py:meth:`DataArray.drop_vars`, :py:meth:`DataArray.reindex_like`, :py:meth:`DataArray.interp_like`. (:issue:`6793`, :pull:`7123`) By `Daniel Goman `_. Internal Changes ~~~~~~~~~~~~~~~~ - Doctests fail on any warnings (:pull:`7166`) By `Maximilian Roos `_. - Improve import time by lazy loading ``dask.distributed`` (:pull:`7172`). - Explicitly specify ``longdouble=False`` in :py:func:`cftime.date2num` when encoding times to preserve existing behavior and prevent future errors when it is eventually set to ``True`` by default in cftime (:pull:`7171`). By `Spencer Clark `_. - Improved import time by lazily importing backend modules, matplotlib, dask.array and flox. (:issue:`6726`, :pull:`7179`) By `Michael Niklas `_. - Emit a warning under the development version of pandas when we convert non-nanosecond precision datetime or timedelta values to nanosecond precision. This was required in the past, because pandas previously was not compatible with non-nanosecond precision values. However pandas is currently working towards removing this restriction. When things stabilize in pandas we will likely consider relaxing this behavior in xarray as well (:issue:`7175`, :pull:`7201`). By `Spencer Clark `_. .. _whats-new.2022.10.0: v2022.10.0 (Oct 14 2022) ------------------------ This release brings numerous bugfixes, a change in minimum supported versions, and a new scatter plot method for DataArrays. Many thanks to 11 contributors to this release: Anderson Banihirwe, Benoit Bovy, Dan Adriaansen, Illviljan, Justus Magin, Lukas Bindreiter, Mick, Patrick Naylor, Spencer Clark, Thomas Nicholas New Features ~~~~~~~~~~~~ - Add scatter plot for datarrays. Scatter plots now also supports 3d plots with the z argument. (:pull:`6778`) By `Jimmy Westling `_. - Include the variable name in the error message when CF decoding fails to allow for easier identification of problematic variables (:issue:`7145`, :pull:`7147`). By `Spencer Clark `_. Breaking changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed: ========================== ========= ======== Package Old New ========================== ========= ======== cftime 1.4 1.5 distributed 2021.08 2021.09 dask 2021.08 2021.09 iris 2.4 3.1 nc-time-axis 1.2 1.3 numba 0.53 0.54 numpy 1.19 1.20 pandas 1.2 1.3 packaging 20.0 21.0 scipy 1.6 1.7 sparse 0.12 0.13 typing_extensions 3.7 3.10 zarr 2.8 2.10 ========================== ========= ======== Bug fixes ~~~~~~~~~ - Remove nested function from :py:func:`open_mfdataset` to allow Dataset objects to be pickled. (:issue:`7109`, :pull:`7116`) By `Daniel Adriaansen `_. - Support for recursively defined Arrays. Fixes repr and deepcopy. (:issue:`7111`, :pull:`7112`) By `Michael Niklas `_. - Fixed :py:meth:`Dataset.transpose` to raise a more informative error. (:issue:`6502`, :pull:`7120`) By `Patrick Naylor `_ - Fix groupby on a multi-index level coordinate and fix :py:meth:`DataArray.to_index` for multi-index levels (convert to single index). (:issue:`6836`, :pull:`7105`) By `BenoΓt Bovy `_. - Support for open_dataset backends that return datasets containing multi-indexes (:issue:`7139`, :pull:`7150`) By `Lukas Bindreiter `_. .. _whats-new.2022.09.0: v2022.09.0 (September 30, 2022) ------------------------------- This release brings a large number of bugfixes and documentation improvements, as well as an external interface for setting custom indexes! Many thanks to our 40 contributors: Anderson Banihirwe, Andrew Ronald Friedman, Bane Sullivan, Benoit Bovy, ColemanTom, Deepak Cherian, Dimitri Papadopoulos Orfanos, Emma Marshall, Fabian Hofmann, Francesco Nattino, ghislainp, Graham Inggs, Hauke Schulz, Illviljan, James Bourbeau, Jody Klymak, Julia Signell, Justus Magin, Keewis, Ken Mankoff, Luke Conibear, Mathias Hauser, Max Jones, mgunyho, Michael Delgado, Mick, Mike Taves, Oliver Lopez, Patrick Naylor, Paul Hockett, Pierre Manchon, Ray Bell, Riley Brady, Sam Levang, Spencer Clark, Stefaan Lippens, Tom Nicholas, Tom White, Travis A. O'Brien, and Zachary Moon. New Features ~~~~~~~~~~~~ - Add :py:meth:`Dataset.set_xindex` and :py:meth:`Dataset.drop_indexes` and their DataArray counterpart for setting and dropping pandas or custom indexes given a set of arbitrary coordinates. (:pull:`6971`) By `BenoΓt Bovy `_ and `Justus Magin `_. - Enable taking the mean of dask-backed :py:class:`cftime.datetime` arrays (:pull:`6556`, :pull:`6940`). By `Deepak Cherian `_ and `Spencer Clark `_. Bug fixes ~~~~~~~~~ - Allow reading netcdf files where the 'units' attribute is a number. (:pull:`7085`) By `Ghislain Picard `_. - Allow decoding of 0 sized datetimes. (:issue:`1329`, :pull:`6882`) By `Deepak Cherian `_. - Make sure DataArray.name is always a string when used as label for plotting. (:issue:`6826`, :pull:`6832`) By `Jimmy Westling `_. - :py:attr:`DataArray.nbytes` now uses the ``nbytes`` property of the underlying array if available. (:pull:`6797`) By `Max Jones `_. - Rely on the array backend for string formatting. (:pull:`6823`). By `Jimmy Westling `_. - Fix incompatibility with numpy 1.20. (:issue:`6818`, :pull:`6821`) By `Michael Niklas `_. - Fix side effects on index coordinate metadata after aligning objects. (:issue:`6852`, :pull:`6857`) By `BenoΓt Bovy `_. - Make FacetGrid.set_titles send kwargs correctly using ``handle.update(kwargs)``. (:issue:`6839`, :pull:`6843`) By `Oliver Lopez `_. - Fix bug where index variables would be changed inplace. (:issue:`6931`, :pull:`6938`) By `Michael Niklas `_. - Allow taking the mean over non-time dimensions of datasets containing dask-backed cftime arrays. (:issue:`5897`, :pull:`6950`) By `Spencer Clark `_. - Harmonize returned multi-indexed indexes when applying ``concat`` along new dimension. (:issue:`6881`, :pull:`6889`) By `Fabian Hofmann `_. - Fix step plots with ``hue`` arg. (:pull:`6944`) By `AndrΓ‘s GunyhΓ³ `_. - Avoid use of random numbers in ``test_weighted.test_weighted_operations_nonequal_coords``. (:issue:`6504`, :pull:`6961`) By `Luke Conibear `_. - Fix multiple regression issues with :py:meth:`Dataset.set_index` and :py:meth:`Dataset.reset_index`. (:pull:`6992`) By `BenoΓt Bovy `_. - Raise a ``UserWarning`` when renaming a coordinate or a dimension creates a non-indexed dimension coordinate, and suggest the user creating an index either with ``swap_dims`` or ``set_index``. (:issue:`6607`, :pull:`6999`) By `BenoΓt Bovy `_. - Use ``keep_attrs=True`` in grouping and resampling operations by default. (:issue:`7012`) This means :py:attr:`Dataset.attrs` and :py:attr:`DataArray.attrs` are now preserved by default. By `Deepak Cherian `_. - ``Dataset.encoding['source']`` now exists when reading from a Path object. (:issue:`5888`, :pull:`6974`) By `Thomas Coleman `_. - Better dtype consistency for ``rolling.mean()``. (:issue:`7062`, :pull:`7063`) By `Sam Levang `_. - Allow writing NetCDF files including only dimensionless variables using the distributed or multiprocessing scheduler. (:issue:`7013`, :pull:`7040`) By `Francesco Nattino `_. - Fix deepcopy of attrs and encoding of DataArrays and Variables. (:issue:`2835`, :pull:`7089`) By `Michael Niklas `_. - Fix bug where subplot_kwargs were not working when plotting with figsize, size or aspect. (:issue:`7078`, :pull:`7080`) By `Michael Niklas `_. Documentation ~~~~~~~~~~~~~ - Update merge docstrings. (:issue:`6935`, :pull:`7033`) By `Zach Moon `_. - Raise a more informative error when trying to open a non-existent zarr store. (:issue:`6484`, :pull:`7060`) By `Sam Levang `_. - Added examples to docstrings for :py:meth:`DataArray.expand_dims`, :py:meth:`DataArray.drop_duplicates`, :py:meth:`DataArray.reset_coords`, :py:meth:`DataArray.equals`, :py:meth:`DataArray.identical`, :py:meth:`DataArray.broadcast_equals`, :py:meth:`DataArray.bfill`, :py:meth:`DataArray.ffill`, :py:meth:`DataArray.fillna`, :py:meth:`DataArray.dropna`, :py:meth:`DataArray.drop_isel`, :py:meth:`DataArray.drop_sel`, :py:meth:`DataArray.head`, :py:meth:`DataArray.tail`. (:issue:`5816`, :pull:`7088`) By `Patrick Naylor `_. - Add missing docstrings to various array properties. (:pull:`7090`) By `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ - Added test for DataArray attrs deepcopy recursion/nested attrs. (:issue:`2835`, :pull:`7086`) By `Paul hockett `_. .. _whats-new.2022.06.0: v2022.06.0 (July 21, 2022) -------------------------- This release brings a number of bug fixes and improvements, most notably a major internal refactor of the indexing functionality, the use of `flox`_ in ``groupby`` operations, and experimental support for the new Python `Array API standard `_. It also stops testing support for the abandoned PyNIO. Much effort has been made to preserve backwards compatibility as part of the indexing refactor. We are aware of one `unfixed issue `_. Please also see the `whats-new.2022.06.0rc0`_ for a full list of changes. Many thanks to our 18 contributors: Bane Sullivan, Deepak Cherian, Dimitri Papadopoulos Orfanos, Emma Marshall, Hauke Schulz, Illviljan, Julia Signell, Justus Magin, Keewis, Mathias Hauser, Michael Delgado, Mick, Pierre Manchon, Ray Bell, Spencer Clark, Stefaan Lippens, Tom White, Travis A. O'Brien, New Features ~~~~~~~~~~~~ - Add :py:attr:`Dataset.dtypes`, :py:attr:`core.coordinates.DatasetCoordinates.dtypes`, :py:attr:`core.coordinates.DataArrayCoordinates.dtypes` properties: Mapping from variable names to dtypes. (:pull:`6706`) By `Michael Niklas `_. - Initial typing support for :py:meth:`groupby`, :py:meth:`rolling`, :py:meth:`rolling_exp`, :py:meth:`coarsen`, :py:meth:`weighted`, :py:meth:`resample`, (:pull:`6702`) By `Michael Niklas `_. - Experimental support for wrapping any array type that conforms to the python `array api standard `_. (:pull:`6804`) By `Tom White `_. - Allow string formatting of scalar DataArrays. (:pull:`5981`) By `fmaussion `_. Bug fixes ~~~~~~~~~ - :py:meth:`save_mfdataset` now passes ``**kwargs`` on to :py:meth:`Dataset.to_netcdf`, allowing the ``encoding`` and ``unlimited_dims`` options with :py:meth:`save_mfdataset`. (:issue:`6684`) By `Travis A. O'Brien `_. - Fix backend support of pydap versions <3.3.0 (:issue:`6648`, :pull:`6656`). By `Hauke Schulz `_. - :py:meth:`Dataset.where` with ``drop=True`` now behaves correctly with mixed dimensions. (:issue:`6227`, :pull:`6690`) By `Michael Niklas `_. - Accommodate newly raised ``OutOfBoundsTimedelta`` error in the development version of pandas when decoding times outside the range that can be represented with nanosecond-precision values (:issue:`6716`, :pull:`6717`). By `Spencer Clark `_. - :py:meth:`open_dataset` with dask and ``~`` in the path now resolves the home directory instead of raising an error. (:issue:`6707`, :pull:`6710`) By `Michael Niklas `_. - :py:meth:`DataArrayRolling.__iter__` with ``center=True`` now works correctly. (:issue:`6739`, :pull:`6744`) By `Michael Niklas `_. Internal Changes ~~~~~~~~~~~~~~~~ - ``xarray.core.groupby``, ``xarray.core.rolling``, ``xarray.core.rolling_exp``, ``xarray.core.weighted`` and ``xarray.core.resample`` modules are no longer imported by default. (:pull:`6702`) .. _whats-new.2022.06.0rc0: v2022.06.0rc0 (9 June 2022) --------------------------- This pre-release brings a number of bug fixes and improvements, most notably a major internal refactor of the indexing functionality and the use of `flox`_ in ``groupby`` operations. It also stops testing support for the abandoned PyNIO. Install it using :: mamba create -n python=3.10 xarray python -m pip install --pre --upgrade --no-deps xarray Many thanks to the 39 contributors: Abel Soares Siqueira, Alex Santana, Anderson Banihirwe, Benoit Bovy, Blair Bonnett, Brewster Malevich, brynjarmorka, Charles Stern, Christian Jauvin, Deepak Cherian, Emma Marshall, Fabien Maussion, Greg Behm, Guelate Seyo, Illviljan, Joe Hamman, Joseph K Aicher, Justus Magin, Kevin Paul, Louis Stenger, Mathias Hauser, Mattia Almansi, Maximilian Roos, Michael Bauer, Michael Delgado, Mick, ngam, Oleh Khoma, Oriol Abril-Pla, Philippe Blain, PLSeuJ, Sam Levang, Spencer Clark, Stan West, Thomas Nicholas, Thomas Vogt, Tom White, Xianxiang Li Known Regressions ~~~~~~~~~~~~~~~~~ - ``reset_coords(drop=True)`` does not create indexes (:issue:`6607`) New Features ~~~~~~~~~~~~ - The ``zarr`` backend is now able to read NCZarr. By `Mattia Almansi `_. - Add a weighted ``quantile`` method to :py:class:`.computation.weighted.DatasetWeighted` and :py:class:`~computation.weighted.DataArrayWeighted` (:pull:`6059`). By `Christian Jauvin `_ and `David Huard `_. - Add a ``create_index=True`` parameter to :py:meth:`Dataset.stack` and :py:meth:`DataArray.stack` so that the creation of multi-indexes is optional (:pull:`5692`). By `BenoΓt Bovy `_. - Multi-index levels are now accessible through their own, regular coordinates instead of virtual coordinates (:pull:`5692`). By `BenoΓt Bovy `_. - Add a ``display_values_threshold`` option to control the total number of array elements which trigger summarization rather than full repr in (numpy) array detailed views of the html repr (:pull:`6400`). By `BenoΓt Bovy `_. - Allow passing chunks in ``kwargs`` form to :py:meth:`Dataset.chunk`, :py:meth:`DataArray.chunk`, and :py:meth:`Variable.chunk`. (:pull:`6471`) By `Tom Nicholas `_. - Add :py:meth:`core.groupby.DatasetGroupBy.cumsum` and :py:meth:`core.groupby.DataArrayGroupBy.cumsum`. By `Vladislav Skripniuk `_ and `Deepak Cherian `_. (:pull:`3147`, :pull:`6525`, :issue:`3141`) - Expose ``inline_array`` kwarg from ``dask.array.from_array`` in :py:func:`open_dataset`, :py:meth:`Dataset.chunk`, :py:meth:`DataArray.chunk`, and :py:meth:`Variable.chunk`. (:pull:`6471`) - Expose the ``inline_array`` kwarg from :py:func:`dask.array.from_array` in :py:func:`open_dataset`, :py:meth:`Dataset.chunk`, :py:meth:`DataArray.chunk`, and :py:meth:`Variable.chunk`. (:pull:`6471`) By `Tom Nicholas `_. - :py:func:`polyval` now supports :py:class:`Dataset` and :py:class:`DataArray` args of any shape, is faster and requires less memory. (:pull:`6548`) By `Michael Niklas `_. - Improved overall typing. - :py:meth:`Dataset.to_dict` and :py:meth:`DataArray.to_dict` may now optionally include encoding attributes. (:pull:`6635`) By `Joe Hamman `_. - Upload development versions to `TestPyPI `_. By `Justus Magin `_. Breaking changes ~~~~~~~~~~~~~~~~ - PyNIO support is now untested. The minimum versions of some dependencies were changed: =============== ===== ==== Package Old New =============== ===== ==== cftime 1.2 1.4 dask 2.30 2021.4 distributed 2.30 2021.4 h5netcdf 0.8 0.11 matplotlib-base 3.3 3.4 numba 0.51 0.53 numpy 1.18 1.19 pandas 1.1 1.2 pint 0.16 0.17 rasterio 1.1 1.2 scipy 1.5 1.6 sparse 0.11 0.12 zarr 2.5 2.8 =============== ===== ==== - The Dataset and DataArray ``rename```` methods do not implicitly add or drop indexes. (:pull:`5692`). By `BenoΓt Bovy `_. - Many arguments like ``keep_attrs``, ``axis``, and ``skipna`` are now keyword only for all reduction operations like ``.mean``. By `Deepak Cherian `_, `Jimmy Westling `_. - Xarray's ufuncs have been removed, now that they can be replaced by numpy's ufuncs in all supported versions of numpy. By `Maximilian Roos `_. - :py:meth:`xr.polyval` now uses the ``coord`` argument directly instead of its index coordinate. (:pull:`6548`) By `Michael Niklas `_. Bug fixes ~~~~~~~~~ - :py:meth:`Dataset.to_zarr` now allows to write all attribute types supported by ``zarr-python``. By `Mattia Almansi `_. - Set ``skipna=None`` for all ``quantile`` methods (e.g. :py:meth:`Dataset.quantile`) and ensure it skips missing values for float dtypes (consistent with other methods). This should not change the behavior (:pull:`6303`). By `Mathias Hauser `_. - Many bugs fixed by the explicit indexes refactor, mainly related to multi-index (virtual) coordinates. See the corresponding pull-request on GitHub for more details. (:pull:`5692`). By `BenoΓt Bovy `_. - Fixed "unhashable type" error trying to read NetCDF file with variable having its 'units' attribute not ``str`` (e.g. ``numpy.ndarray``) (:issue:`6368`). By `Oleh Khoma `_. - Omit warning about specified dask chunks separating chunks on disk when the underlying array is empty (e.g., because of an empty dimension) (:issue:`6401`). By `Joseph K Aicher `_. - Fixed the poor html repr performance on large multi-indexes (:pull:`6400`). By `BenoΓt Bovy `_. - Allow fancy indexing of duck dask arrays along multiple dimensions. (:pull:`6414`) By `Justus Magin `_. - In the API for backends, support dimensions that express their preferred chunk sizes as a tuple of integers. (:issue:`6333`, :pull:`6334`) By `Stan West `_. - Fix bug in :py:func:`where` when passing non-xarray objects with ``keep_attrs=True``. (:issue:`6444`, :pull:`6461`) By `Sam Levang `_. - Allow passing both ``other`` and ``drop=True`` arguments to :py:meth:`DataArray.where` and :py:meth:`Dataset.where` (:pull:`6466`, :pull:`6467`). By `Michael Delgado `_. - Ensure dtype encoding attributes are not added or modified on variables that contain datetime-like values prior to being passed to :py:func:`xarray.conventions.decode_cf_variable` (:issue:`6453`, :pull:`6489`). By `Spencer Clark `_. - Dark themes are now properly detected in Furo-themed Sphinx documents (:issue:`6500`, :pull:`6501`). By `Kevin Paul `_. - :py:meth:`Dataset.isel`, :py:meth:`DataArray.isel` with ``drop=True`` works as intended with scalar :py:class:`DataArray` indexers. (:issue:`6554`, :pull:`6579`) By `Michael Niklas `_. - Fixed silent overflow issue when decoding times encoded with 32-bit and below unsigned integer data types (:issue:`6589`, :pull:`6598`). By `Spencer Clark `_. - Fixed ``.chunks`` loading lazy data (:issue:`6538`). By `Deepak Cherian `_. Documentation ~~~~~~~~~~~~~ - Revise the documentation for developers on specifying a backend's preferred chunk sizes. In particular, correct the syntax and replace lists with tuples in the examples. (:issue:`6333`, :pull:`6334`) By `Stan West `_. - Mention that :py:meth:`DataArray.rename` can rename coordinates. (:issue:`5458`, :pull:`6665`) By `Michael Niklas `_. - Added examples to :py:meth:`Dataset.thin` and :py:meth:`DataArray.thin` By `Emma Marshall `_. Performance ~~~~~~~~~~~ - GroupBy binary operations are now vectorized. Previously this involved looping over all groups. (:issue:`5804`, :pull:`6160`) By `Deepak Cherian `_. - Substantially improved GroupBy operations using `flox `_. This is auto-enabled when ``flox`` is installed. Use ``xr.set_options(use_flox=False)`` to use the old algorithm. (:issue:`4473`, :issue:`4498`, :issue:`659`, :issue:`2237`, :pull:`271`). By `Deepak Cherian `_, `Anderson Banihirwe `_, `Jimmy Westling `_. Internal Changes ~~~~~~~~~~~~~~~~ - Many internal changes due to the explicit indexes refactor. See the corresponding pull-request on GitHub for more details. (:pull:`5692`). By `BenoΓt Bovy `_. .. _whats-new.2022.03.0: v2022.03.0 (2 March 2022) ------------------------- This release brings a number of small improvements, as well as a move to `calendar versioning `_ (:issue:`6176`). Many thanks to the 16 contributors to the v2022.02.0 release! Aaron Spring, Alan D. Snow, Anderson Banihirwe, crusaderky, Illviljan, Joe Hamman, Jonas Gliß, Lukas Pilz, Martin Bergemann, Mathias Hauser, Maximilian Roos, Romain Caneill, Stan West, Stijn Van Hoey, Tobias KΓΆlling, and Tom Nicholas. New Features ~~~~~~~~~~~~ - Enabled multiplying tick offsets by floats. Allows ``float`` ``n`` in :py:meth:`CFTimeIndex.shift` if ``shift_freq`` is between ``Day`` and ``Microsecond``. (:issue:`6134`, :pull:`6135`). By `Aaron Spring `_. - Enable providing more keyword arguments to the ``pydap`` backend when reading OpenDAP datasets (:issue:`6274`). By `Jonas Gliß `_. - Allow :py:meth:`DataArray.drop_duplicates` to drop duplicates along multiple dimensions at once, and add :py:meth:`Dataset.drop_duplicates`. (:pull:`6307`) By `Tom Nicholas `_. Breaking changes ~~~~~~~~~~~~~~~~ - Renamed the ``interpolation`` keyword of all ``quantile`` methods (e.g. :py:meth:`DataArray.quantile`) to ``method`` for consistency with numpy v1.22.0 (:pull:`6108`). By `Mathias Hauser `_. Deprecations ~~~~~~~~~~~~ Bug fixes ~~~~~~~~~ - Variables which are chunked using dask in larger (but aligned) chunks than the target zarr chunk size can now be stored using ``to_zarr()`` (:pull:`6258`) By `Tobias KΓΆlling `_. - Multi-file datasets containing encoded :py:class:`cftime.datetime` objects can be read in parallel again (:issue:`6226`, :pull:`6249`, :pull:`6305`). By `Martin Bergemann `_ and `Stan West `_. Documentation ~~~~~~~~~~~~~ - Delete files of datasets saved to disk while building the documentation and enable building on Windows via ``sphinx-build`` (:pull:`6237`). By `Stan West `_. Internal Changes ~~~~~~~~~~~~~~~~ .. _whats-new.0.21.1: v0.21.1 (31 January 2022) ------------------------- This is a bugfix release to resolve (:issue:`6216`, :pull:`6207`). Bug fixes ~~~~~~~~~ - Add ``packaging`` as a dependency to Xarray (:issue:`6216`, :pull:`6207`). By `Sebastian Weigand `_ and `Joe Hamman `_. .. _whats-new.0.21.0: v0.21.0 (27 January 2022) ------------------------- Many thanks to the 20 contributors to the v0.21.0 release! Abel Aoun, Anderson Banihirwe, Ant Gib, Chris Roat, Cindy Chiao, Deepak Cherian, Dominik StaΕ„czak, Fabian Hofmann, Illviljan, Jody Klymak, Joseph K Aicher, Mark Harfouche, Mathias Hauser, Matthew Roeschke, Maximilian Roos, Michael Delgado, Pascal Bourgault, Pierre, Ray Bell, Romain Caneill, Tim Heap, Tom Nicholas, Zeb Nicholls, joseph nowak, keewis. New Features ~~~~~~~~~~~~ - New top-level function :py:func:`cross`. (:issue:`3279`, :pull:`5365`). By `Jimmy Westling `_. - ``keep_attrs`` support for :py:func:`where` (:issue:`4141`, :issue:`4682`, :pull:`4687`). By `Justus Magin `_. - Enable the limit option for dask array in the following methods :py:meth:`DataArray.ffill`, :py:meth:`DataArray.bfill`, :py:meth:`Dataset.ffill` and :py:meth:`Dataset.bfill` (:issue:`6112`) By `Joseph Nowak `_. Breaking changes ~~~~~~~~~~~~~~~~ - Rely on matplotlib's default datetime converters instead of pandas' (:issue:`6102`, :pull:`6109`). By `Jimmy Westling `_. - Improve repr readability when there are a large number of dimensions in datasets or dataarrays by wrapping the text once the maximum display width has been exceeded. (:issue:`5546`, :pull:`5662`) By `Jimmy Westling `_. Deprecations ~~~~~~~~~~~~ - Removed the lock kwarg from the zarr and pydap backends, completing the deprecation cycle started in :issue:`5256`. By `Tom Nicholas `_. - Support for ``python 3.7`` has been dropped. (:pull:`5892`) By `Jimmy Westling `_. Bug fixes ~~~~~~~~~ - Preserve chunks when creating a :py:class:`DataArray` from another :py:class:`DataArray` (:pull:`5984`). By `Fabian Hofmann `_. - Properly support :py:meth:`DataArray.ffill`, :py:meth:`DataArray.bfill`, :py:meth:`Dataset.ffill` and :py:meth:`Dataset.bfill` along chunked dimensions (:issue:`6112`). By `Joseph Nowak `_. - Subclasses of ``byte`` and ``str`` (e.g. ``np.str_`` and ``np.bytes_``) will now serialise to disk rather than raising a ``ValueError: unsupported dtype for netCDF4 variable: object`` as they did previously (:pull:`5264`). By `Zeb Nicholls `_. - Fix applying function with non-xarray arguments using :py:func:`xr.map_blocks`. By `Cindy Chiao `_. - No longer raise an error for an all-nan-but-one argument to :py:meth:`DataArray.interpolate_na` when using ``method='nearest'`` (:issue:`5994`, :pull:`6144`). By `Michael Delgado `_. - `dt.season `_ can now handle NaN and NaT. (:pull:`5876`). By `Pierre Loicq `_. - Determination of zarr chunks handles empty lists for encoding chunks or variable chunks that occurs in certain circumstances (:pull:`5526`). By `Chris Roat `_. Internal Changes ~~~~~~~~~~~~~~~~ - Replace ``distutils.version`` with ``packaging.version`` (:issue:`6092`). By `Mathias Hauser `_. - Removed internal checks for ``pd.Panel`` (:issue:`6145`). By `Matthew Roeschke `_. - Add ``pyupgrade`` pre-commit hook (:pull:`6152`). By `Maximilian Roos `_. .. _whats-new.0.20.2: v0.20.2 (9 December 2021) ------------------------- This is a bugfix release to resolve (:issue:`3391`, :issue:`5715`). It also includes performance improvements in unstacking to a ``sparse`` array and a number of documentation improvements. Many thanks to the 20 contributors: Aaron Spring, Alexandre Poux, Deepak Cherian, Enrico Minack, Fabien Maussion, Giacomo Caria, Gijom, Guillaume Maze, Illviljan, Joe Hamman, Joseph Hardin, Kai MΓΌhlbauer, Matt Henderson, Maximilian Roos, Michael Delgado, Robert Gieseke, Sebastian Weigand and Stephan Hoyer. Breaking changes ~~~~~~~~~~~~~~~~ - Use complex nan when interpolating complex values out of bounds by default (instead of real nan) (:pull:`6019`). By `Alexandre Poux `_. Performance ~~~~~~~~~~~ - Significantly faster unstacking to a ``sparse`` array. :pull:`5577` By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - :py:func:`xr.map_blocks` and :py:func:`xr.corr` now work when dask is not installed (:issue:`3391`, :issue:`5715`, :pull:`5731`). By `Gijom `_. - Fix plot.line crash for data of shape ``(1, N)`` in _title_for_slice on format_item (:pull:`5948`). By `Sebastian Weigand `_. - Fix a regression in the removal of duplicate backend entrypoints (:issue:`5944`, :pull:`5959`) By `Kai MΓΌhlbauer `_. - Fix an issue that datasets from being saved when time variables with units that ``cftime`` can parse but pandas can not were present (:pull:`6049`). By `Tim Heap `_. Documentation ~~~~~~~~~~~~~ - Better examples in docstrings for groupby and resampling reductions (:pull:`5871`). By `Deepak Cherian `_, `Maximilian Roos `_, `Jimmy Westling `_ . - Add list-like possibility for tolerance parameter in the reindex functions. By `Antoine Gibek `_, Internal Changes ~~~~~~~~~~~~~~~~ - Use ``importlib`` to replace functionality of ``pkg_resources`` in backend plugins tests. (:pull:`5959`). By `Kai MΓΌhlbauer `_. .. _whats-new.0.20.1: v0.20.1 (5 November 2021) ------------------------- This is a bugfix release to fix :issue:`5930`. Bug fixes ~~~~~~~~~ - Fix a regression in the detection of the backend entrypoints (:issue:`5930`, :pull:`5931`) By `Justus Magin `_. Documentation ~~~~~~~~~~~~~ - Significant improvements to :ref:`api`. By `Deepak Cherian `_. .. _whats-new.0.20.0: v0.20.0 (1 November 2021) ------------------------- This release brings improved support for pint arrays, methods for weighted standard deviation, variance, and sum of squares, the option to disable the use of the bottleneck library, significantly improved performance of unstack, as well as many bugfixes and internal changes. Many thanks to the 40 contributors to this release!: Aaron Spring, Akio Taniguchi, Alan D. Snow, arfy slowy, Benoit Bovy, Christian Jauvin, crusaderky, Deepak Cherian, Giacomo Caria, Illviljan, James Bourbeau, Joe Hamman, Joseph K Aicher, Julien Herzen, Kai MΓΌhlbauer, keewis, lusewell, Martin K. Scherer, Mathias Hauser, Max Grover, Maxime Liquet, Maximilian Roos, Mike Taves, Nathan Lis, pmav99, Pushkar Kopparla, Ray Bell, Rio McMahon, Scott Staniewicz, Spencer Clark, Stefan Bender, Taher Chegini, Thomas Nicholas, Tomas Chor, Tom Augspurger, Victor NegΓrneac, Zachary Blackwood, Zachary Moon, and Zeb Nicholls. New Features ~~~~~~~~~~~~ - Add ``std``, ``var``, ``sum_of_squares`` to :py:class:`~computation.weighted.DatasetWeighted` and :py:class:`~computation.weighted.DataArrayWeighted`. By `Christian Jauvin `_. - Added a :py:func:`get_options` method to xarray's root namespace (:issue:`5698`, :pull:`5716`) By `Pushkar Kopparla `_. - Xarray now does a better job rendering variable names that are long LaTeX sequences when plotting (:issue:`5681`, :pull:`5682`). By `Tomas Chor `_. - Add an option (``"use_bottleneck"``) to disable the use of ``bottleneck`` using :py:func:`set_options` (:pull:`5560`) By `Justus Magin `_. - Added ``**kwargs`` argument to :py:meth:`open_rasterio` to access overviews (:issue:`3269`). By `Pushkar Kopparla `_. - Added ``storage_options`` argument to :py:meth:`to_zarr` (:issue:`5601`, :pull:`5615`). By `Ray Bell `_, `Zachary Blackwood `_ and `Nathan Lis `_. - Added calendar utilities :py:func:`DataArray.convert_calendar`, :py:func:`DataArray.interp_calendar`, :py:func:`date_range`, :py:func:`date_range_like` and :py:attr:`DataArray.dt.calendar` (:issue:`5155`, :pull:`5233`). By `Pascal Bourgault `_. - Histogram plots are set with a title displaying the scalar coords if any, similarly to the other plots (:issue:`5791`, :pull:`5792`). By `Maxime Liquet `_. - Slice plots display the coords units in the same way as x/y/colorbar labels (:pull:`5847`). By `Victor NegΓrneac `_. - Added a new :py:attr:`Dataset.chunksizes`, :py:attr:`DataArray.chunksizes`, and :py:attr:`Variable.chunksizes` property, which will always return a mapping from dimension names to chunking pattern along that dimension, regardless of whether the object is a Dataset, DataArray, or Variable. (:issue:`5846`, :pull:`5900`) By `Tom Nicholas `_. Breaking changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed: =============== ====== ==== Package Old New =============== ====== ==== cftime 1.1 1.2 dask 2.15 2.30 distributed 2.15 2.30 lxml 4.5 4.6 matplotlib-base 3.2 3.3 numba 0.49 0.51 numpy 1.17 1.18 pandas 1.0 1.1 pint 0.15 0.16 scipy 1.4 1.5 seaborn 0.10 0.11 sparse 0.8 0.11 toolz 0.10 0.11 zarr 2.4 2.5 =============== ====== ==== - The ``__repr__`` of a :py:class:`xarray.Dataset`'s ``coords`` and ``data_vars`` ignore ``xarray.set_option(display_max_rows=...)`` and show the full output when called directly as, e.g., ``ds.data_vars`` or ``print(ds.data_vars)`` (:issue:`5545`, :pull:`5580`). By `Stefan Bender `_. Deprecations ~~~~~~~~~~~~ - Deprecate :py:func:`open_rasterio` (:issue:`4697`, :pull:`5808`). By `Alan Snow `_. - Set the default argument for ``roll_coords`` to ``False`` for :py:meth:`DataArray.roll` and :py:meth:`Dataset.roll`. (:pull:`5653`) By `Tom Nicholas `_. - :py:meth:`xarray.open_mfdataset` will now error instead of warn when a value for ``concat_dim`` is passed alongside ``combine='by_coords'``. By `Tom Nicholas `_. Bug fixes ~~~~~~~~~ - Fix ZeroDivisionError from saving dask array with empty dimension (:issue:`5741`). By `Joseph K Aicher `_. - Fixed performance bug where ``cftime`` import attempted within various core operations if ``cftime`` not installed (:pull:`5640`). By `Luke Sewell `_ - Fixed bug when combining named DataArrays using :py:func:`combine_by_coords`. (:pull:`5834`). By `Tom Nicholas `_. - When a custom engine was used in :py:func:`~xarray.open_dataset` the engine wasn't initialized properly, causing missing argument errors or inconsistent method signatures. (:pull:`5684`) By `Jimmy Westling `_. - Numbers are properly formatted in a plot's title (:issue:`5788`, :pull:`5789`). By `Maxime Liquet `_. - Faceted plots will no longer raise a ``pint.UnitStrippedWarning`` when a ``pint.Quantity`` array is plotted, and will correctly display the units of the data in the colorbar (if there is one) (:pull:`5886`). By `Tom Nicholas `_. - With backends, check for path-like objects rather than ``pathlib.Path`` type, use ``os.fspath`` (:pull:`5879`). By `Mike Taves `_. - ``open_mfdataset()`` now accepts a single ``pathlib.Path`` object (:issue:`5881`). By `Panos Mavrogiorgos `_. - Improved performance of :py:meth:`Dataset.unstack` (:pull:`5906`). By `Tom Augspurger `_. Documentation ~~~~~~~~~~~~~ - Users are instructed to try ``use_cftime=True`` if a ``TypeError`` occurs when combining datasets and one of the types involved is a subclass of ``cftime.datetime`` (:pull:`5776`). By `Zeb Nicholls `_. - A clearer error is now raised if a user attempts to assign a Dataset to a single key of another Dataset. (:pull:`5839`) By `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ - Explicit indexes refactor: avoid ``len(index)`` in ``map_blocks`` (:pull:`5670`). By `Deepak Cherian `_. - Explicit indexes refactor: decouple ``xarray.Index``` from ``xarray.Variable`` (:pull:`5636`). By `Benoit Bovy `_. - Fix ``Mapping`` argument typing to allow mypy to pass on ``str`` keys (:pull:`5690`). By `Maximilian Roos `_. - Annotate many of our tests, and fix some of the resulting typing errors. This will also mean our typing annotations are tested as part of CI. (:pull:`5728`). By `Maximilian Roos `_. - Improve the performance of reprs for large datasets or dataarrays. (:pull:`5661`) By `Jimmy Westling `_. - Use isort's ``float_to_top`` config. (:pull:`5695`). By `Maximilian Roos `_. - Remove use of the deprecated ``kind`` argument in :py:meth:`pandas.Index.get_slice_bound` inside :py:class:`xarray.CFTimeIndex` tests (:pull:`5723`). By `Spencer Clark `_. - Refactor ``xarray.core.duck_array_ops`` to no longer special-case dispatching to dask versions of functions when acting on dask arrays, instead relying numpy and dask's adherence to NEP-18 to dispatch automatically. (:pull:`5571`) By `Tom Nicholas `_. - Add an ASV benchmark CI and improve performance of the benchmarks (:pull:`5796`) By `Jimmy Westling `_. - Use ``importlib`` to replace functionality of ``pkg_resources`` such as version setting and loading of resources. (:pull:`5845`). By `Martin K. Scherer `_. .. _whats-new.0.19.0: v0.19.0 (23 July 2021) ---------------------- This release brings improvements to plotting of categorical data, the ability to specify how attributes are combined in xarray operations, a new high-level :py:func:`unify_chunks` function, as well as various deprecations, bug fixes, and minor improvements. Many thanks to the 29 contributors to this release!: Andrew Williams, Augustus, Aureliana Barghini, Benoit Bovy, crusaderky, Deepak Cherian, ellesmith88, Elliott Sales de Andrade, Giacomo Caria, github-actions[bot], Illviljan, Joeperdefloep, joooeey, Julia Kent, Julius Busecke, keewis, Mathias Hauser, Matthias GΓΆbel, Mattia Almansi, Maximilian Roos, Peter Andreas Entschev, Ray Bell, Sander, Santiago Soler, Sebastian, Spencer Clark, Stephan Hoyer, Thomas Hirtz, Thomas Nicholas. New Features ~~~~~~~~~~~~ - Allow passing argument ``missing_dims`` to :py:meth:`Variable.transpose` and :py:meth:`Dataset.transpose` (:issue:`5550`, :pull:`5586`) By `Giacomo Caria `_. - Allow passing a dictionary as coords to a :py:class:`DataArray` (:issue:`5527`, reverts :pull:`1539`, which had deprecated this due to python's inconsistent ordering in earlier versions). By `Sander van Rijn `_. - Added :py:meth:`Dataset.coarsen.construct`, :py:meth:`DataArray.coarsen.construct` (:issue:`5454`, :pull:`5475`). By `Deepak Cherian `_. - Xarray now uses consolidated metadata by default when writing and reading Zarr stores (:issue:`5251`). By `Stephan Hoyer `_. - New top-level function :py:func:`unify_chunks`. By `Mattia Almansi `_. - Allow assigning values to a subset of a dataset using positional or label-based indexing (:issue:`3015`, :pull:`5362`). By `Matthias GΓΆbel `_. - Attempting to reduce a weighted object over missing dimensions now raises an error (:pull:`5362`). By `Mattia Almansi `_. - Add ``.sum`` to :py:meth:`~xarray.DataArray.rolling_exp` and :py:meth:`~xarray.Dataset.rolling_exp` for exponentially weighted rolling sums. These require numbagg 0.2.1; (:pull:`5178`). By `Maximilian Roos `_. - :py:func:`xarray.cov` and :py:func:`xarray.corr` now lazily check for missing values if inputs are dask arrays (:issue:`4804`, :pull:`5284`). By `Andrew Williams `_. - Attempting to ``concat`` list of elements that are not all ``Dataset`` or all ``DataArray`` now raises an error (:issue:`5051`, :pull:`5425`). By `Thomas Hirtz `_. - allow passing a function to ``combine_attrs`` (:pull:`4896`). By `Justus Magin `_. - Allow plotting categorical data (:pull:`5464`). By `Jimmy Westling `_. - Allow removal of the coordinate attribute ``coordinates`` on variables by setting ``.attrs['coordinates']= None`` (:issue:`5510`). By `Elle Smith `_. - Added :py:meth:`DataArray.to_numpy`, :py:meth:`DataArray.as_numpy`, and :py:meth:`Dataset.as_numpy`. (:pull:`5568`). By `Tom Nicholas `_. - Units in plot labels are now automatically inferred from wrapped :py:meth:`pint.Quantity` arrays. (:pull:`5561`). By `Tom Nicholas `_. Breaking changes ~~~~~~~~~~~~~~~~ - The default ``mode`` for :py:meth:`Dataset.to_zarr` when ``region`` is set has changed to the new ``mode="r+"``, which only allows for overriding pre-existing array values. This is a safer default than the prior ``mode="a"``, and allows for higher performance writes (:pull:`5252`). By `Stephan Hoyer `_. - The main parameter to :py:func:`combine_by_coords` is renamed to ``data_objects`` instead of ``datasets`` so anyone calling this method using a named parameter will need to update the name accordingly (:issue:`3248`, :pull:`4696`). By `Augustus Ijams `_. Deprecations ~~~~~~~~~~~~ - Removed the deprecated ``dim`` kwarg to :py:func:`DataArray.integrate` (:pull:`5630`) - Removed the deprecated ``keep_attrs`` kwarg to :py:func:`DataArray.rolling` (:pull:`5630`) - Removed the deprecated ``keep_attrs`` kwarg to :py:func:`DataArray.coarsen` (:pull:`5630`) - Completed deprecation of passing an ``xarray.DataArray`` to :py:func:`Variable` - will now raise a ``TypeError`` (:pull:`5630`) Bug fixes ~~~~~~~~~ - Fix a minor incompatibility between partial datetime string indexing with a :py:class:`CFTimeIndex` and upcoming pandas version 1.3.0 (:issue:`5356`, :pull:`5359`). By `Spencer Clark `_. - Fix 1-level multi-index incorrectly converted to single index (:issue:`5384`, :pull:`5385`). By `Benoit Bovy `_. - Don't cast a duck array in a coordinate to :py:class:`numpy.ndarray` in :py:meth:`DataArray.differentiate` (:pull:`5408`) By `Justus Magin `_. - Fix the ``repr`` of :py:class:`Variable` objects with ``display_expand_data=True`` (:pull:`5406`) By `Justus Magin `_. - Plotting a pcolormesh with ``xscale="log"`` and/or ``yscale="log"`` works as expected after improving the way the interval breaks are generated (:issue:`5333`). By `Santiago Soler `_ - :py:func:`combine_by_coords` can now handle combining a list of unnamed ``DataArray`` as input (:issue:`3248`, :pull:`4696`). By `Augustus Ijams `_. Internal Changes ~~~~~~~~~~~~~~~~ - Run CI on the first & last python versions supported only; currently 3.7 & 3.9. (:pull:`5433`) By `Maximilian Roos `_. - Publish test results & timings on each PR. (:pull:`5537`) By `Maximilian Roos `_. - Explicit indexes refactor: add a ``xarray.Index.query()`` method in which one may eventually provide a custom implementation of label-based data selection (not ready yet for public use). Also refactor the internal, pandas-specific implementation into ``PandasIndex.query()`` and ``PandasMultiIndex.query()`` (:pull:`5322`). By `Benoit Bovy `_. .. _whats-new.0.18.2: v0.18.2 (19 May 2021) --------------------- This release reverts a regression in xarray's unstacking of dask-backed arrays. .. _whats-new.0.18.1: v0.18.1 (18 May 2021) --------------------- This release is intended as a small patch release to be compatible with the new 2021.5.0 ``dask.distributed`` release. It also includes a new ``drop_duplicates`` method, some documentation improvements, the beginnings of our internal Index refactoring, and some bug fixes. Thank you to all 16 contributors! Anderson Banihirwe, Andrew, Benoit Bovy, Brewster Malevich, Giacomo Caria, Illviljan, James Bourbeau, Keewis, Maximilian Roos, Ravin Kumar, Stephan Hoyer, Thomas Nicholas, Tom Nicholas, Zachary Moon. New Features ~~~~~~~~~~~~ - Implement :py:meth:`DataArray.drop_duplicates` to remove duplicate dimension values (:pull:`5239`). By `Andrew Huang `_. - Allow passing ``combine_attrs`` strategy names to the ``keep_attrs`` parameter of :py:func:`apply_ufunc` (:pull:`5041`) By `Justus Magin `_. - :py:meth:`Dataset.interp` now allows interpolation with non-numerical datatypes, such as booleans, instead of dropping them. (:issue:`4761` :pull:`5008`). By `Jimmy Westling `_. - Raise more informative error when decoding time variables with invalid reference dates. (:issue:`5199`, :pull:`5288`). By `Giacomo Caria `_. Bug fixes ~~~~~~~~~ - Opening netCDF files from a path that doesn't end in ``.nc`` without supplying an explicit ``engine`` works again (:issue:`5295`), fixing a bug introduced in 0.18.0. By `Stephan Hoyer `_ Documentation ~~~~~~~~~~~~~ - Clean up and enhance docstrings for the :py:class:`DataArray.plot` and ``Dataset.plot.*`` families of methods (:pull:`5285`). By `Zach Moon `_. - Explanation of deprecation cycles and how to implement them added to contributors guide. (:pull:`5289`) By `Tom Nicholas `_. Internal Changes ~~~~~~~~~~~~~~~~ - Explicit indexes refactor: add an ``xarray.Index`` base class and ``Dataset.xindexes`` / ``DataArray.xindexes`` properties. Also rename ``PandasIndexAdapter`` to ``PandasIndex``, which now inherits from ``xarray.Index`` (:pull:`5102`). By `Benoit Bovy `_. - Replace ``SortedKeysDict`` with python's ``dict``, given dicts are now ordered. By `Maximilian Roos `_. - Updated the release guide for developers. Now accounts for actions that are automated via github actions. (:pull:`5274`). By `Tom Nicholas `_. .. _whats-new.0.18.0: v0.18.0 (6 May 2021) -------------------- This release brings a few important performance improvements, a wide range of usability upgrades, lots of bug fixes, and some new features. These include a plugin API to add backend engines, a new theme for the documentation, curve fitting methods, and several new plotting functions. Many thanks to the 38 contributors to this release: Aaron Spring, Alessandro Amici, Alex Marandon, Alistair Miles, Ana Paula Krelling, Anderson Banihirwe, Aureliana Barghini, Baudouin Raoult, Benoit Bovy, Blair Bonnett, David TrΓ©mouilles, Deepak Cherian, Gabriel Medeiros AbrahΓ£o, Giacomo Caria, Hauke Schulz, Illviljan, Mathias Hauser, Matthias Bussonnier, Mattia Almansi, Maximilian Roos, Ray Bell, Richard Kleijn, Ryan Abernathey, Sam Levang, Spencer Clark, Spencer Jones, Tammas Loughran, Tobias KΓΆlling, Todd, Tom Nicholas, Tom White, Victor NegΓrneac, Xianxiang Li, Zeb Nicholls, crusaderky, dschwoerer, johnomotani, keewis New Features ~~~~~~~~~~~~ - apply ``combine_attrs`` on data variables and coordinate variables when concatenating and merging datasets and dataarrays (:pull:`4902`). By `Justus Magin `_. - Add :py:meth:`Dataset.to_pandas` (:pull:`5247`) By `Giacomo Caria `_. - Add :py:meth:`DataArray.plot.surface` which wraps matplotlib's ``plot_surface`` to make surface plots (:issue:`2235` :issue:`5084` :pull:`5101`). By `John Omotani `_. - Allow passing multiple arrays to :py:meth:`Dataset.__setitem__` (:pull:`5216`). By `Giacomo Caria `_. - Add 'cumulative' option to :py:meth:`Dataset.integrate` and :py:meth:`DataArray.integrate` so that result is a cumulative integral, like :py:func:`scipy.integrate.cumulative_trapezoidal` (:pull:`5153`). By `John Omotani `_. - Add ``safe_chunks`` option to :py:meth:`Dataset.to_zarr` which allows overriding checks made to ensure Dask and Zarr chunk compatibility (:issue:`5056`). By `Ryan Abernathey `_ - Add :py:meth:`Dataset.query` and :py:meth:`DataArray.query` which enable indexing of datasets and data arrays by evaluating query expressions against the values of the data variables (:pull:`4984`). By `Alistair Miles `_. - Allow passing ``combine_attrs`` to :py:meth:`Dataset.merge` (:pull:`4895`). By `Justus Magin `_. - Support for `dask.graph_manipulation `_ (requires dask >=2021.3) By `Guido Imperiale `_ - Add :py:meth:`Dataset.plot.streamplot` for streamplot plots with :py:class:`Dataset` variables (:pull:`5003`). By `John Omotani `_. - Many of the arguments for the :py:attr:`DataArray.str` methods now support providing an array-like input. In this case, the array provided to the arguments is broadcast against the original array and applied elementwise. - :py:attr:`DataArray.str` now supports ``+``, ``*``, and ``%`` operators. These behave the same as they do for :py:class:`str`, except that they follow array broadcasting rules. - A large number of new :py:attr:`DataArray.str` methods were implemented, :py:meth:`DataArray.str.casefold`, :py:meth:`DataArray.str.cat`, :py:meth:`DataArray.str.extract`, :py:meth:`DataArray.str.extractall`, :py:meth:`DataArray.str.findall`, :py:meth:`DataArray.str.format`, :py:meth:`DataArray.str.get_dummies`, :py:meth:`DataArray.str.islower`, :py:meth:`DataArray.str.join`, :py:meth:`DataArray.str.normalize`, :py:meth:`DataArray.str.partition`, :py:meth:`DataArray.str.rpartition`, :py:meth:`DataArray.str.rsplit`, and :py:meth:`DataArray.str.split`. A number of these methods allow for splitting or joining the strings in an array. (:issue:`4622`) By `Todd Jennings `_ - Thanks to the new pluggable backend infrastructure external packages may now use the ``xarray.backends`` entry point to register additional engines to be used in :py:func:`open_dataset`, see the documentation in :ref:`add_a_backend` (:issue:`4309`, :issue:`4803`, :pull:`4989`, :pull:`4810` and many others). The backend refactor has been sponsored with the "Essential Open Source Software for Science" grant from the `Chan Zuckerberg Initiative `_ and developed by `B-Open `_. By `Aureliana Barghini `_ and `Alessandro Amici `_. - :py:attr:`~core.accessor_dt.DatetimeAccessor.date` added (:issue:`4983`, :pull:`4994`). By `Hauke Schulz `_. - Implement ``__getitem__`` for both :py:class:`~core.groupby.DatasetGroupBy` and :py:class:`~core.groupby.DataArrayGroupBy`, inspired by pandas' :py:meth:`~pandas.core.groupby.GroupBy.get_group`. By `Deepak Cherian `_. - Switch the tutorial functions to use `pooch `_ (which is now a optional dependency) and add :py:func:`tutorial.open_rasterio` as a way to open example rasterio files (:issue:`3986`, :pull:`4102`, :pull:`5074`). By `Justus Magin `_. - Add typing information to unary and binary arithmetic operators operating on :py:class:`Dataset`, :py:class:`DataArray`, :py:class:`Variable`, :py:class:`~core.groupby.DatasetGroupBy` or :py:class:`~core.groupby.DataArrayGroupBy` (:pull:`4904`). By `Richard Kleijn `_. - Add a ``combine_attrs`` parameter to :py:func:`open_mfdataset` (:pull:`4971`). By `Justus Magin `_. - Enable passing arrays with a subset of dimensions to :py:meth:`DataArray.clip` & :py:meth:`Dataset.clip`; these methods now use :py:func:`xarray.apply_ufunc`; (:pull:`5184`). By `Maximilian Roos `_. - Disable the ``cfgrib`` backend if the ``eccodes`` library is not installed (:pull:`5083`). By `Baudouin Raoult `_. - Added :py:meth:`DataArray.curvefit` and :py:meth:`Dataset.curvefit` for general curve fitting applications. (:issue:`4300`, :pull:`4849`) By `Sam Levang `_. - Add options to control expand/collapse of sections in display of Dataset and DataArray. The function :py:func:`set_options` now takes keyword arguments ``display_expand_attrs``, ``display_expand_coords``, ``display_expand_data``, ``display_expand_data_vars``, all of which can be one of ``True`` to always expand, ``False`` to always collapse, or ``default`` to expand unless over a pre-defined limit (:pull:`5126`). By `Tom White `_. - Significant speedups in :py:meth:`Dataset.interp` and :py:meth:`DataArray.interp`. (:issue:`4739`, :pull:`4740`). By `Deepak Cherian `_. - Prevent passing ``concat_dim`` to :py:func:`xarray.open_mfdataset` when ``combine='by_coords'`` is specified, which should never have been possible (as :py:func:`xarray.combine_by_coords` has no ``concat_dim`` argument to pass to). Also removes unneeded internal reordering of datasets in :py:func:`xarray.open_mfdataset` when ``combine='by_coords'`` is specified. Fixes (:issue:`5230`). By `Tom Nicholas `_. - Implement ``__setitem__`` for ``xarray.core.indexing.DaskIndexingAdapter`` if dask version supports item assignment. (:issue:`5171`, :pull:`5174`) By `Tammas Loughran `_. Breaking changes ~~~~~~~~~~~~~~~~ - The minimum versions of some dependencies were changed: ============ ====== ==== Package Old New ============ ====== ==== boto3 1.12 1.13 cftime 1.0 1.1 dask 2.11 2.15 distributed 2.11 2.15 matplotlib 3.1 3.2 numba 0.48 0.49 ============ ====== ==== - :py:func:`open_dataset` and :py:func:`open_dataarray` now accept only the first argument as positional, all others need to be passed are keyword arguments. This is part of the refactor to support external backends (:issue:`4309`, :pull:`4989`). By `Alessandro Amici `_. - Functions that are identities for 0d data return the unchanged data if axis is empty. This ensures that Datasets where some variables do not have the averaged dimensions are not accidentally changed (:issue:`4885`, :pull:`5207`). By `David SchwΓΆrer `_. - :py:attr:`DataArray.coarsen` and :py:attr:`Dataset.coarsen` no longer support passing ``keep_attrs`` via its constructor. Pass ``keep_attrs`` via the applied function, i.e. use ``ds.coarsen(...).mean(keep_attrs=False)`` instead of ``ds.coarsen(..., keep_attrs=False).mean()``. Further, coarsen now keeps attributes per default (:pull:`5227`). By `Mathias Hauser `_. - switch the default of the :py:func:`merge` ``combine_attrs`` parameter to ``"override"``. This will keep the current behavior for merging the ``attrs`` of variables but stop dropping the ``attrs`` of the main objects (:pull:`4902`). By `Justus Magin `_. Deprecations ~~~~~~~~~~~~ - Warn when passing ``concat_dim`` to :py:func:`xarray.open_mfdataset` when ``combine='by_coords'`` is specified, which should never have been possible (as :py:func:`xarray.combine_by_coords` has no ``concat_dim`` argument to pass to). Also removes unneeded internal reordering of datasets in :py:func:`xarray.open_mfdataset` when ``combine='by_coords'`` is specified. Fixes (:issue:`5230`), via (:pull:`5231`, :pull:`5255`). By `Tom Nicholas `_. - The ``lock`` keyword argument to :py:func:`open_dataset` and :py:func:`open_dataarray` is now a backend specific option. It will give a warning if passed to a backend that doesn't support it instead of being silently ignored. From the next version it will raise an error. This is part of the refactor to support external backends (:issue:`5073`). By `Tom Nicholas `_ and `Alessandro Amici `_. Bug fixes ~~~~~~~~~ - Properly support :py:meth:`DataArray.ffill`, :py:meth:`DataArray.bfill`, :py:meth:`Dataset.ffill`, :py:meth:`Dataset.bfill` along chunked dimensions. (:issue:`2699`). By `Deepak Cherian `_. - Fix 2d plot failure for certain combinations of dimensions when ``x`` is 1d and ``y`` is 2d (:issue:`5097`, :pull:`5099`). By `John Omotani `_. - Ensure standard calendar times encoded with large values (i.e. greater than approximately 292 years), can be decoded correctly without silently overflowing (:pull:`5050`). This was a regression in xarray 0.17.0. By `Zeb Nicholls `_. - Added support for ``numpy.bool_`` attributes in roundtrips using ``h5netcdf`` engine with ``invalid_netcdf=True`` [which casts ``bool`` s to ``numpy.bool_``] (:issue:`4981`, :pull:`4986`). By `Victor NegΓrneac `_. - Don't allow passing ``axis`` to :py:meth:`Dataset.reduce` methods (:issue:`3510`, :pull:`4940`). By `Justus Magin `_. - Decode values as signed if attribute ``_Unsigned = "false"`` (:issue:`4954`) By `Tobias KΓΆlling `_. - Keep coords attributes when interpolating when the indexer is not a Variable. (:issue:`4239`, :issue:`4839` :pull:`5031`) By `Jimmy Westling `_. - Ensure standard calendar dates encoded with a calendar attribute with some or all uppercase letters can be decoded or encoded to or from ``np.datetime64[ns]`` dates with or without ``cftime`` installed (:issue:`5093`, :pull:`5180`). By `Spencer Clark `_. - Warn on passing ``keep_attrs`` to ``resample`` and ``rolling_exp`` as they are ignored, pass ``keep_attrs`` to the applied function instead (:pull:`5265`). By `Mathias Hauser `_. Documentation ~~~~~~~~~~~~~ - New section on :ref:`add_a_backend` in the "Internals" chapter aimed to backend developers (:issue:`4803`, :pull:`4810`). By `Aureliana Barghini `_. - Add :py:meth:`Dataset.polyfit` and :py:meth:`DataArray.polyfit` under "See also" in the docstrings of :py:meth:`Dataset.polyfit` and :py:meth:`DataArray.polyfit` (:issue:`5016`, :pull:`5020`). By `Aaron Spring `_. - New sphinx theme & rearrangement of the docs (:pull:`4835`). By `Anderson Banihirwe `_. Internal Changes ~~~~~~~~~~~~~~~~ - Enable displaying mypy error codes and ignore only specific error codes using ``# type: ignore[error-code]`` (:pull:`5096`). By `Mathias Hauser `_. - Replace uses of ``raises_regex`` with the more standard ``pytest.raises(Exception, match="foo")``; (:pull:`5188`), (:pull:`5191`). By `Maximilian Roos `_. .. _whats-new.0.17.0: v0.17.0 (24 Feb 2021) --------------------- This release brings a few important performance improvements, a wide range of usability upgrades, lots of bug fixes, and some new features. These include better ``cftime`` support, a new quiver plot, better ``unstack`` performance, more efficient memory use in rolling operations, and some python packaging improvements. We also have a few documentation improvements (and more planned!). Many thanks to the 36 contributors to this release: Alessandro Amici, Anderson Banihirwe, Aureliana Barghini, Ayrton Bourn, Benjamin Bean, Blair Bonnett, Chun Ho Chow, DWesl, Daniel Mesejo-LeΓ³n, Deepak Cherian, Eric Keenan, Illviljan, Jens Hedegaard Nielsen, Jody Klymak, Julien Seguinot, Julius Busecke, Kai MΓΌhlbauer, Leif Denby, Martin Durant, Mathias Hauser, Maximilian Roos, Michael Mann, Ray Bell, RichardScottOZ, Spencer Clark, Tim Gates, Tom Nicholas, Yunus Sevinchan, alexamici, aurghs, crusaderky, dcherian, ghislainp, keewis, rhkleijn Breaking changes ~~~~~~~~~~~~~~~~ - xarray no longer supports python 3.6 The minimum version policy was changed to also apply to projects with irregular releases. As a result, the minimum versions of some dependencies have changed: ============ ====== ==== Package Old New ============ ====== ==== Python 3.6 3.7 setuptools 38.4 40.4 numpy 1.15 1.17 pandas 0.25 1.0 dask 2.9 2.11 distributed 2.9 2.11 bottleneck 1.2 1.3 h5netcdf 0.7 0.8 iris 2.2 2.4 netcdf4 1.4 1.5 pseudonetcdf 3.0 3.1 rasterio 1.0 1.1 scipy 1.3 1.4 seaborn 0.9 0.10 zarr 2.3 2.4 ============ ====== ==== (:issue:`4688`, :pull:`4720`, :pull:`4907`, :pull:`4942`) - As a result of :pull:`4684` the default units encoding for datetime-like values (``np.datetime64[ns]`` or ``cftime.datetime``) will now always be set such that ``int64`` values can be used. In the past, no units finer than "seconds" were chosen, which would sometimes mean that ``float64`` values were required, which would lead to inaccurate I/O round-trips. - Variables referred to in attributes like ``bounds`` and ``grid_mapping`` can be set as coordinate variables. These attributes are moved to :py:attr:`DataArray.encoding` from :py:attr:`DataArray.attrs`. This behaviour is controlled by the ``decode_coords`` kwarg to :py:func:`open_dataset` and :py:func:`open_mfdataset`. The full list of decoded attributes is in :ref:`weather-climate` (:pull:`2844`, :issue:`3689`) - As a result of :pull:`4911` the output from calling :py:meth:`DataArray.sum` or :py:meth:`DataArray.prod` on an integer array with ``skipna=True`` and a non-None value for ``min_count`` will now be a float array rather than an integer array. Deprecations ~~~~~~~~~~~~ - ``dim`` argument to :py:meth:`DataArray.integrate` is being deprecated in favour of a ``coord`` argument, for consistency with :py:meth:`Dataset.integrate`. For now using ``dim`` issues a ``FutureWarning``. It will be removed in version 0.19.0 (:pull:`3993`). By `Tom Nicholas `_. - Deprecated ``autoclose`` kwargs from :py:func:`open_dataset` are removed (:pull:`4725`). By `Aureliana Barghini `_. - the return value of :py:meth:`Dataset.update` is being deprecated to make it work more like :py:meth:`dict.update`. It will be removed in version 0.19.0 (:pull:`4932`). By `Justus Magin `_. New Features ~~~~~~~~~~~~ - :py:meth:`~xarray.cftime_range` and :py:meth:`DataArray.resample` now support millisecond (``"L"`` or ``"ms"``) and microsecond (``"U"`` or ``"us"``) frequencies for ``cftime.datetime`` coordinates (:issue:`4097`, :pull:`4758`). By `Spencer Clark `_. - Significantly higher ``unstack`` performance on numpy-backed arrays which contain missing values; 8x faster than previous versions in our benchmark, and now 2x faster than pandas (:pull:`4746`). By `Maximilian Roos `_. - Add :py:meth:`Dataset.plot.quiver` for quiver plots with :py:class:`Dataset` variables. By `Deepak Cherian `_. - Add ``"drop_conflicts"`` to the strategies supported by the ``combine_attrs`` kwarg (:issue:`4749`, :pull:`4827`). By `Justus Magin `_. - Allow installing from git archives (:pull:`4897`). By `Justus Magin `_. - :py:class:`~computation.rolling.DataArrayCoarsen` and :py:class:`~computation.rolling.DatasetCoarsen` now implement a ``reduce`` method, enabling coarsening operations with custom reduction functions (:issue:`3741`, :pull:`4939`). By `Spencer Clark `_. - Most rolling operations use significantly less memory. (:issue:`4325`). By `Deepak Cherian `_. - Add :py:meth:`Dataset.drop_isel` and :py:meth:`DataArray.drop_isel` (:issue:`4658`, :pull:`4819`). By `Daniel Mesejo `_. - Xarray now leverages updates as of cftime version 1.4.1, which enable exact I/O roundtripping of ``cftime.datetime`` objects (:pull:`4758`). By `Spencer Clark `_. - :py:func:`open_dataset` and :py:func:`open_mfdataset` now accept ``fsspec`` URLs (including globs for the latter) for ``engine="zarr"``, and so allow reading from many remote and other file systems (:pull:`4461`) By `Martin Durant `_ - :py:meth:`DataArray.swap_dims` & :py:meth:`Dataset.swap_dims` now accept dims in the form of kwargs as well as a dict, like most similar methods. By `Maximilian Roos `_. Bug fixes ~~~~~~~~~ - Use specific type checks in ``xarray.core.variable.as_compatible_data`` instead of blanket access to ``values`` attribute (:issue:`2097`) By `Yunus Sevinchan `_. - :py:meth:`DataArray.resample` and :py:meth:`Dataset.resample` do not trigger computations anymore if :py:meth:`Dataset.weighted` or :py:meth:`DataArray.weighted` are applied (:issue:`4625`, :pull:`4668`). By `Julius Busecke `_. - :py:func:`merge` with ``combine_attrs='override'`` makes a copy of the attrs (:issue:`4627`). - By default, when possible, xarray will now always use values of type ``int64`` when encoding and decoding ``numpy.datetime64[ns]`` datetimes. This ensures that maximum precision and accuracy are maintained in the round-tripping process (:issue:`4045`, :pull:`4684`). It also enables encoding and decoding standard calendar dates with time units of nanoseconds (:pull:`4400`). By `Spencer Clark `_ and `Mark Harfouche `_. - :py:meth:`DataArray.astype`, :py:meth:`Dataset.astype` and :py:meth:`Variable.astype` support the ``order`` and ``subok`` parameters again. This fixes a regression introduced in version 0.16.1 (:issue:`4644`, :pull:`4683`). By `Richard Kleijn `_ . - Remove dictionary unpacking when using ``.loc`` to avoid collision with ``.sel`` parameters (:pull:`4695`). By `Anderson Banihirwe `_. - Fix the legend created by :py:meth:`Dataset.plot.scatter` (:issue:`4641`, :pull:`4723`). By `Justus Magin `_. - Fix a crash in orthogonal indexing on geographic coordinates with ``engine='cfgrib'`` (:issue:`4733` :pull:`4737`). By `Alessandro Amici `_. - Coordinates with dtype ``str`` or ``bytes`` now retain their dtype on many operations, e.g. ``reindex``, ``align``, ``concat``, ``assign``, previously they were cast to an object dtype (:issue:`2658` and :issue:`4543`). By `Mathias Hauser `_. - Limit number of data rows when printing large datasets. (:issue:`4736`, :pull:`4750`). By `Jimmy Westling `_. - Add ``missing_dims`` parameter to transpose (:issue:`4647`, :pull:`4767`). By `Daniel Mesejo `_. - Resolve intervals before appending other metadata to labels when plotting (:issue:`4322`, :pull:`4794`). By `Justus Magin `_. - Fix regression when decoding a variable with a ``scale_factor`` and ``add_offset`` given as a list of length one (:issue:`4631`). By `Mathias Hauser `_. - Expand user directory paths (e.g. ``~/``) in :py:func:`open_mfdataset` and :py:meth:`Dataset.to_zarr` (:issue:`4783`, :pull:`4795`). By `Julien Seguinot `_. - Raise DeprecationWarning when trying to typecast a tuple containing a :py:class:`DataArray`. User now prompted to first call ``.data`` on it (:issue:`4483`). By `Chun Ho Chow `_. - Ensure that :py:meth:`Dataset.interp` raises ``ValueError`` when interpolating outside coordinate range and ``bounds_error=True`` (:issue:`4854`, :pull:`4855`). By `Leif Denby `_. - Fix time encoding bug associated with using cftime versions greater than 1.4.0 with xarray (:issue:`4870`, :pull:`4871`). By `Spencer Clark `_. - Stop :py:meth:`DataArray.sum` and :py:meth:`DataArray.prod` computing lazy arrays when called with a ``min_count`` parameter (:issue:`4898`, :pull:`4911`). By `Blair Bonnett `_. - Fix bug preventing the ``min_count`` parameter to :py:meth:`DataArray.sum` and :py:meth:`DataArray.prod` working correctly when calculating over all axes of a float64 array (:issue:`4898`, :pull:`4911`). By `Blair Bonnett `_. - Fix decoding of vlen strings using h5py versions greater than 3.0.0 with h5netcdf backend (:issue:`4570`, :pull:`4893`). By `Kai MΓΌhlbauer `_. - Allow converting :py:class:`Dataset` or :py:class:`DataArray` objects with a ``MultiIndex`` and at least one other dimension to a ``pandas`` object (:issue:`3008`, :pull:`4442`). By `ghislainp `_. Documentation ~~~~~~~~~~~~~ - Add information about requirements for accessor classes (:issue:`2788`, :pull:`4657`). By `Justus Magin `_. - Start a list of external I/O integrating with ``xarray`` (:issue:`683`, :pull:`4566`). By `Justus Magin `_. - Add concat examples and improve combining documentation (:issue:`4620`, :pull:`4645`). By `Ray Bell `_ and `Justus Magin `_. - explicitly mention that :py:meth:`Dataset.update` updates inplace (:issue:`2951`, :pull:`4932`). By `Justus Magin `_. - Added docs on vectorized indexing (:pull:`4711`). By `Eric Keenan `_. Internal Changes ~~~~~~~~~~~~~~~~ - Speed up of the continuous integration tests on azure. - Switched to mamba and use matplotlib-base for a faster installation of all dependencies (:pull:`4672`). - Use ``pytest.mark.skip`` instead of ``pytest.mark.xfail`` for some tests that can currently not succeed (:pull:`4685`). - Run the tests in parallel using pytest-xdist (:pull:`4694`). By `Justus Magin `_ and `Mathias Hauser `_. - Use ``pyproject.toml`` instead of the ``setup_requires`` option for ``setuptools`` (:pull:`4897`). By `Justus Magin `_. - Replace all usages of ``assert x.identical(y)`` with ``assert_identical(x, y)`` for clearer error messages (:pull:`4752`). By `Maximilian Roos `_. - Speed up attribute style access (e.g. ``ds.somevar`` instead of ``ds["somevar"]``) and tab completion in IPython (:issue:`4741`, :pull:`4742`). By `Richard Kleijn `_. - Added the ``set_close`` method to ``Dataset`` and ``DataArray`` for backends to specify how to voluntary release all resources. (:pull:`#4809`) By `Alessandro Amici `_. - Update type hints to work with numpy v1.20 (:pull:`4878`). By `Mathias Hauser `_. - Ensure warnings cannot be turned into exceptions in :py:func:`testing.assert_equal` and the other ``assert_*`` functions (:pull:`4864`). By `Mathias Hauser `_. - Performance improvement when constructing DataArrays. Significantly speeds up repr for Datasets with large number of variables. By `Deepak Cherian `_. .. _whats-new.0.16.2: v0.16.2 (30 Nov 2020) --------------------- This release brings the ability to write to limited regions of ``zarr`` files, open zarr files with :py:func:`open_dataset` and :py:func:`open_mfdataset`, increased support for propagating ``attrs`` using the ``keep_attrs`` flag, as well as numerous bugfixes and documentation improvements. Many thanks to the 31 contributors who contributed to this release: Aaron Spring, Akio Taniguchi, Aleksandar Jelenak, alexamici, Alexandre Poux, Anderson Banihirwe, Andrew Pauling, Ashwin Vishnu, aurghs, Brian Ward, Caleb, crusaderky, Dan Nowacki, darikg, David Brochart, David Huard, Deepak Cherian, Dion HΓ€fner, Gerardo Rivera, Gerrit Holl, Illviljan, inakleinbottle, Jacob Tomlinson, James A. Bednar, jenssss, Joe Hamman, johnomotani, Joris Van den Bossche, Julia Kent, Julius Busecke, Kai MΓΌhlbauer, keewis, Keisuke Fujii, Kyle Cranmer, Luke Volpatti, Mathias Hauser, Maximilian Roos, MichaΓ«l Defferrard, Michal Baumgartner, Nick R. Papior, Pascal Bourgault, Peter Hausamann, PGijsbers, Ray Bell, Romain Martinez, rpgoldman, Russell Manser, Sahid Velji, Samnan Rahee, Sander, Spencer Clark, Stephan Hoyer, Thomas Zilio, Tobias KΓΆlling, Tom Augspurger, Wei Ji, Yash Saboo, Zeb Nicholls, Deprecations ~~~~~~~~~~~~ - :py:attr:`~core.accessor_dt.DatetimeAccessor.weekofyear` and :py:attr:`~core.accessor_dt.DatetimeAccessor.week` have been deprecated. Use ``DataArray.dt.isocalendar().week`` instead (:pull:`4534`). By `Mathias Hauser `_. `Maximilian Roos `_, and `Spencer Clark `_. - :py:attr:`DataArray.rolling` and :py:attr:`Dataset.rolling` no longer support passing ``keep_attrs`` via its constructor. Pass ``keep_attrs`` via the applied function, i.e. use ``ds.rolling(...).mean(keep_attrs=False)`` instead of ``ds.rolling(..., keep_attrs=False).mean()`` Rolling operations now keep their attributes per default (:pull:`4510`). By `Mathias Hauser `_. New Features ~~~~~~~~~~~~ - :py:func:`open_dataset` and :py:func:`open_mfdataset` now works with ``engine="zarr"`` (:issue:`3668`, :pull:`4003`, :pull:`4187`). By `Miguel Jimenez `_ and `Wei Ji Leong `_. - Unary & binary operations follow the ``keep_attrs`` flag (:issue:`3490`, :issue:`4065`, :issue:`3433`, :issue:`3595`, :pull:`4195`). By `Deepak Cherian `_. - Added :py:meth:`~core.accessor_dt.DatetimeAccessor.isocalendar()` that returns a Dataset with year, week, and weekday calculated according to the ISO 8601 calendar. Requires pandas version 1.1.0 or greater (:pull:`4534`). By `Mathias Hauser `_, `Maximilian Roos `_, and `Spencer Clark `_. - :py:meth:`Dataset.to_zarr` now supports a ``region`` keyword for writing to limited regions of existing Zarr stores (:pull:`4035`). See :ref:`io.zarr.appending` for full details. By `Stephan Hoyer `_. - Added typehints in :py:func:`align` to reflect that the same type received in ``objects`` arg will be returned (:pull:`4522`). By `Michal Baumgartner `_. - :py:meth:`Dataset.weighted` and :py:meth:`DataArray.weighted` are now executing value checks lazily if weights are provided as dask arrays (:issue:`4541`, :pull:`4559`). By `Julius Busecke `_. - Added the ``keep_attrs`` keyword to ``rolling_exp.mean()``; it now keeps attributes per default. By `Mathias Hauser `_ (:pull:`4592`). - Added ``freq`` as property to :py:class:`CFTimeIndex` and into the ``CFTimeIndex.repr``. (:issue:`2416`, :pull:`4597`) By `Aaron Spring `_. Bug fixes ~~~~~~~~~ - Fix bug where reference times without padded years (e.g. ``since 1-1-1``) would lose their units when being passed by ``encode_cf_datetime`` (:issue:`4422`, :pull:`4506`). Such units are ambiguous about which digit represents the years (is it YMD or DMY?). Now, if such formatting is encountered, it is assumed that the first digit is the years, they are padded appropriately (to e.g. ``since 0001-1-1``) and a warning that this assumption is being made is issued. Previously, without ``cftime``, such times would be silently parsed incorrectly (at least based on the CF conventions) e.g. "since 1-1-1" would be parsed (via ``pandas`` and ``dateutil``) to ``since 2001-1-1``. By `Zeb Nicholls `_. - Fix :py:meth:`DataArray.plot.step`. By `Deepak Cherian `_. - Fix bug where reading a scalar value from a NetCDF file opened with the ``h5netcdf`` backend would raise a ``ValueError`` when ``decode_cf=True`` (:issue:`4471`, :pull:`4485`). By `Gerrit Holl `_. - Fix bug where datetime64 times are silently changed to incorrect values if they are outside the valid date range for ns precision when provided in some other units (:issue:`4427`, :pull:`4454`). By `Andrew Pauling `_ - Fix silently overwriting the ``engine`` key when passing :py:func:`open_dataset` a file object to an incompatible netCDF (:issue:`4457`). Now incompatible combinations of files and engines raise an exception instead. By `Alessandro Amici `_. - The ``min_count`` argument to :py:meth:`DataArray.sum()` and :py:meth:`DataArray.prod()` is now ignored when not applicable, i.e. when ``skipna=False`` or when ``skipna=None`` and the dtype does not have a missing value (:issue:`4352`). By `Mathias Hauser `_. - :py:func:`combine_by_coords` now raises an informative error when passing coordinates with differing calendars (:issue:`4495`). By `Mathias Hauser `_. - :py:attr:`DataArray.rolling` and :py:attr:`Dataset.rolling` now also keep the attributes and names of of (wrapped) ``DataArray`` objects, previously only the global attributes were retained (:issue:`4497`, :pull:`4510`). By `Mathias Hauser `_. - Improve performance where reading small slices from huge dimensions was slower than necessary (:pull:`4560`). By `Dion HΓ€fner `_. - Fix bug where ``dask_gufunc_kwargs`` was silently changed in :py:func:`apply_ufunc` (:pull:`4576`). By `Kai MΓΌhlbauer `_. Documentation ~~~~~~~~~~~~~ - document the API not supported with duck arrays (:pull:`4530`). By `Justus Magin `_. - Mention the possibility to pass functions to :py:meth:`Dataset.where` or :py:meth:`DataArray.where` in the parameter documentation (:issue:`4223`, :pull:`4613`). By `Justus Magin `_. - Update the docstring of :py:class:`DataArray` and :py:class:`Dataset`. (:pull:`4532`); By `Jimmy Westling `_. - Raise a more informative error when :py:meth:`DataArray.to_dataframe` is is called on a scalar, (:issue:`4228`); By `Pieter Gijsbers `_. - Fix grammar and typos in the :ref:`contributing` guide (:pull:`4545`). By `Sahid Velji `_. - Fix grammar and typos in the :doc:`user-guide/io` guide (:pull:`4553`). By `Sahid Velji `_. - Update link to NumPy docstring standard in the :ref:`contributing` guide (:pull:`4558`). By `Sahid Velji `_. - Add docstrings to ``isnull`` and ``notnull``, and fix the displayed signature (:issue:`2760`, :pull:`4618`). By `Justus Magin `_. Internal Changes ~~~~~~~~~~~~~~~~ - Optional dependencies can be installed along with xarray by specifying extras as ``pip install "xarray[extra]"`` where ``extra`` can be one of ``io``, ``accel``, ``parallel``, ``viz`` and ``complete``. See docs for updated :ref:`installation instructions `. (:issue:`2888`, :pull:`4480`). By `Ashwin Vishnu `_, `Justus Magin `_ and `Mathias Hauser `_. - Removed stray spaces that stem from black removing new lines (:pull:`4504`). By `Mathias Hauser `_. - Ensure tests are not skipped in the ``py38-all-but-dask`` test environment (:issue:`4509`). By `Mathias Hauser `_. - Ignore select numpy warnings around missing values, where xarray handles the values appropriately, (:pull:`4536`); By `Maximilian Roos `_. - Replace the internal use of ``pd.Index.__or__`` and ``pd.Index.__and__`` with ``pd.Index.union`` and ``pd.Index.intersection`` as they will stop working as set operations in the future (:issue:`4565`). By `Mathias Hauser `_. - Add GitHub action for running nightly tests against upstream dependencies (:pull:`4583`). By `Anderson Banihirwe `_. - Ensure all figures are closed properly in plot tests (:pull:`4600`). By `Yash Saboo `_, `Nirupam K N `_ and `Mathias Hauser `_. .. _whats-new.0.16.1: v0.16.1 (2020-09-20) --------------------- This patch release fixes an incompatibility with a recent pandas change, which was causing an issue indexing with a ``datetime64``. It also includes improvements to ``rolling``, ``to_dataframe``, ``cov`` & ``corr`` methods and bug fixes. Our documentation has a number of improvements, including fixing all doctests and confirming their accuracy on every commit. Many thanks to the 36 contributors who contributed to this release: Aaron Spring, Akio Taniguchi, Aleksandar Jelenak, Alexandre Poux, Caleb, Dan Nowacki, Deepak Cherian, Gerardo Rivera, Jacob Tomlinson, James A. Bednar, Joe Hamman, Julia Kent, Kai MΓΌhlbauer, Keisuke Fujii, Mathias Hauser, Maximilian Roos, Nick R. Papior, Pascal Bourgault, Peter Hausamann, Romain Martinez, Russell Manser, Samnan Rahee, Sander, Spencer Clark, Stephan Hoyer, Thomas Zilio, Tobias KΓΆlling, Tom Augspurger, alexamici, crusaderky, darikg, inakleinbottle, jenssss, johnomotani, keewis, and rpgoldman. Breaking changes ~~~~~~~~~~~~~~~~ - :py:meth:`DataArray.astype` and :py:meth:`Dataset.astype` now preserve attributes. Keep the old behavior by passing ``keep_attrs=False`` (:issue:`2049`, :pull:`4314`). By `Dan Nowacki `_ and `Gabriel Joel Mitchell `_. New Features ~~~~~~~~~~~~ - :py:meth:`~xarray.DataArray.rolling` and :py:meth:`~xarray.Dataset.rolling` now accept more than 1 dimension. (:pull:`4219`) By `Keisuke Fujii `_. - :py:meth:`~xarray.DataArray.to_dataframe` and :py:meth:`~xarray.Dataset.to_dataframe` now accept a ``dim_order`` parameter allowing to specify the resulting dataframe's dimensions order (:issue:`4331`, :pull:`4333`). By `Thomas Zilio `_. - Support multiple outputs in :py:func:`xarray.apply_ufunc` when using ``dask='parallelized'``. (:issue:`1815`, :pull:`4060`). By `Kai MΓΌhlbauer `_. - ``min_count`` can be supplied to reductions such as ``.sum`` when specifying multiple dimension to reduce over; (:pull:`4356`). By `Maximilian Roos `_. - :py:func:`xarray.cov` and :py:func:`xarray.corr` now handle missing values; (:pull:`4351`). By `Maximilian Roos `_. - Add support for parsing datetime strings formatted following the default string representation of cftime objects, i.e. YYYY-MM-DD hh:mm:ss, in partial datetime string indexing, as well as :py:meth:`~xarray.cftime_range` (:issue:`4337`). By `Spencer Clark `_. - Build ``CFTimeIndex.__repr__`` explicitly as :py:class:`pandas.Index`. Add ``calendar`` as a new property for :py:class:`CFTimeIndex` and show ``calendar`` and ``length`` in ``CFTimeIndex.__repr__`` (:issue:`2416`, :pull:`4092`) By `Aaron Spring `_. - Use a wrapped array's ``_repr_inline_`` method to construct the collapsed ``repr`` of :py:class:`DataArray` and :py:class:`Dataset` objects and document the new method in :doc:`internals/index`. (:pull:`4248`). By `Justus Magin `_. - Allow per-variable fill values in most functions. (:pull:`4237`). By `Justus Magin `_. - Expose ``use_cftime`` option in :py:func:`~xarray.open_zarr` (:issue:`2886`, :pull:`3229`) By `Samnan Rahee `_ and `Anderson Banihirwe `_. Bug fixes ~~~~~~~~~ - Fix indexing with datetime64 scalars with pandas 1.1 (:issue:`4283`). By `Stephan Hoyer `_ and `Justus Magin `_. - Variables which are chunked using dask only along some dimensions can be chunked while storing with zarr along previously unchunked dimensions (:pull:`4312`) By `Tobias KΓΆlling `_. - Fixed a bug in backend caused by basic installation of Dask (:issue:`4164`, :pull:`4318`) `Sam Morley `_. - Fixed a few bugs with :py:meth:`Dataset.polyfit` when encountering deficient matrix ranks (:issue:`4190`, :pull:`4193`). By `Pascal Bourgault `_. - Fixed inconsistencies between docstring and functionality for :py:meth:`DataArray.str.get` and :py:meth:`DataArray.str.wrap` (:issue:`4334`). By `Mathias Hauser `_. - Fixed overflow issue causing incorrect results in computing means of :py:class:`cftime.datetime` arrays (:issue:`4341`). By `Spencer Clark `_. - Fixed :py:meth:`Dataset.coarsen`, :py:meth:`DataArray.coarsen` dropping attributes on original object (:issue:`4120`, :pull:`4360`). By `Julia Kent `_. - fix the signature of the plot methods. (:pull:`4359`) By `Justus Magin `_. - Fix :py:func:`xarray.apply_ufunc` with ``vectorize=True`` and ``exclude_dims`` (:issue:`3890`). By `Mathias Hauser `_. - Fix ``KeyError`` when doing linear interpolation to an nd ``DataArray`` that contains NaNs (:pull:`4233`). By `Jens Svensmark `_ - Fix incorrect legend labels for :py:meth:`Dataset.plot.scatter` (:issue:`4126`). By `Peter Hausamann `_. - Fix ``dask.optimize`` on ``DataArray`` producing an invalid Dask task graph (:issue:`3698`) By `Tom Augspurger `_ - Fix ``pip install .`` when no ``.git`` directory exists; namely when the xarray source directory has been rsync'ed by PyCharm Professional for a remote deployment over SSH. By `Guido Imperiale `_ - Preserve dimension and coordinate order during :py:func:`xarray.concat` (:issue:`2811`, :issue:`4072`, :pull:`4419`). By `Kai MΓΌhlbauer `_. - Avoid relying on :py:class:`set` objects for the ordering of the coordinates (:pull:`4409`) By `Justus Magin `_. Documentation ~~~~~~~~~~~~~ - Update the docstring of :py:meth:`DataArray.copy` to remove incorrect mention of 'dataset' (:issue:`3606`) By `Sander van Rijn `_. - Removed skipna argument from :py:meth:`DataArray.count`, :py:meth:`DataArray.any`, :py:meth:`DataArray.all`. (:issue:`755`) By `Sander van Rijn `_ - Update the contributing guide to use merges instead of rebasing and state that we squash-merge. (:pull:`4355`). By `Justus Magin `_. - Make sure the examples from the docstrings actually work (:pull:`4408`). By `Justus Magin `_. - Updated Vectorized Indexing to a clearer example. By `Maximilian Roos `_ Internal Changes ~~~~~~~~~~~~~~~~ - Fixed all doctests and enabled their running in CI. By `Justus Magin `_. - Relaxed the :ref:`mindeps_policy` to support: - all versions of setuptools released in the last 42 months (but no older than 38.4) - all versions of dask and dask.distributed released in the last 12 months (but no older than 2.9) - all versions of other packages released in the last 12 months All are up from 6 months (:issue:`4295`) `Guido Imperiale `_. - Use :py:func:`dask.array.apply_gufunc ` instead of :py:func:`dask.array.blockwise` in :py:func:`xarray.apply_ufunc` when using ``dask='parallelized'``. (:pull:`4060`, :pull:`4391`, :pull:`4392`) By `Kai MΓΌhlbauer `_. - Align ``mypy`` versions to ``0.782`` across ``requirements`` and ``.pre-commit-config.yml`` files. (:pull:`4390`) By `Maximilian Roos `_ - Only load resource files when running inside a Jupyter Notebook (:issue:`4294`) By `Guido Imperiale `_ - Silenced most ``numpy`` warnings such as ``Mean of empty slice``. (:pull:`4369`) By `Maximilian Roos `_ - Enable type checking for :py:func:`concat` (:issue:`4238`) By `Mathias Hauser `_. - Updated plot functions for matplotlib version 3.3 and silenced warnings in the plot tests (:pull:`4365`). By `Mathias Hauser `_. - Versions in ``pre-commit.yaml`` are now pinned, to reduce the chances of conflicting versions. (:pull:`4388`) By `Maximilian Roos `_ .. _whats-new.0.16.0: v0.16.0 (2020-07-11) --------------------- This release adds ``xarray.cov`` & ``xarray.corr`` for covariance & correlation respectively; the ``idxmax`` & ``idxmin`` methods, the ``polyfit`` method & ``xarray.polyval`` for fitting polynomials, as well as a number of documentation improvements, other features, and bug fixes. Many thanks to all 44 contributors who contributed to this release: Akio Taniguchi, Andrew Williams, AurΓ©lien Ponte, Benoit Bovy, Dave Cole, David Brochart, Deepak Cherian, Elliott Sales de Andrade, Etienne Combrisson, Hossein Madadi, Huite, Joe Hamman, Kai MΓΌhlbauer, Keisuke Fujii, Maik Riechert, Marek Jacob, Mathias Hauser, Matthieu Ancellin, Maximilian Roos, Noah D Brenowitz, Oriol Abril, Pascal Bourgault, Phillip Butcher, Prajjwal Nijhara, Ray Bell, Ryan Abernathey, Ryan May, Spencer Clark, Spencer Hill, Srijan Saurav, Stephan Hoyer, Taher Chegini, Todd, Tom Nicholas, Yohai Bar Sinai, Yunus Sevinchan, arabidopsis, aurghs, clausmichele, dmey, johnomotani, keewis, raphael dussin, risebell Breaking changes ~~~~~~~~~~~~~~~~ - Minimum supported versions for the following packages have changed: ``dask >=2.9``, ``distributed>=2.9``. By `Deepak Cherian `_ - ``groupby`` operations will restore coord dimension order. Pass ``restore_coord_dims=False`` to revert to previous behavior. - :meth:`DataArray.transpose` will now transpose coordinates by default. Pass ``transpose_coords=False`` to revert to previous behaviour. By `Maximilian Roos `_ - Alternate draw styles for :py:meth:`plot.step` must be passed using the ``drawstyle`` (or ``ds``) keyword argument, instead of the ``linestyle`` (or ``ls``) keyword argument, in line with the `upstream change in Matplotlib `_. (:pull:`3274`) By `Elliott Sales de Andrade `_ - The old ``auto_combine`` function has now been removed in favour of the :py:func:`combine_by_coords` and :py:func:`combine_nested` functions. This also means that the default behaviour of :py:func:`open_mfdataset` has changed to use ``combine='by_coords'`` as the default argument value. (:issue:`2616`, :pull:`3926`) By `Tom Nicholas `_. - The ``DataArray`` and ``Variable`` HTML reprs now expand the data section by default (:issue:`4176`) By `Stephan Hoyer `_. New Features ~~~~~~~~~~~~ - :py:meth:`DataArray.argmin` and :py:meth:`DataArray.argmax` now support sequences of 'dim' arguments, and if a sequence is passed return a dict (which can be passed to :py:meth:`DataArray.isel` to get the value of the minimum) of the indices for each dimension of the minimum or maximum of a DataArray. (:pull:`3936`) By `John Omotani `_, thanks to `Keisuke Fujii `_ for work in :pull:`1469`. - Added :py:func:`xarray.cov` and :py:func:`xarray.corr` (:issue:`3784`, :pull:`3550`, :pull:`4089`). By `Andrew Williams `_ and `Robin Beer `_. - Implement :py:meth:`DataArray.idxmax`, :py:meth:`DataArray.idxmin`, :py:meth:`Dataset.idxmax`, :py:meth:`Dataset.idxmin`. (:issue:`60`, :pull:`3871`) By `Todd Jennings `_ - Added :py:meth:`DataArray.polyfit` and :py:func:`xarray.polyval` for fitting polynomials. (:issue:`3349`, :pull:`3733`, :pull:`4099`) By `Pascal Bourgault `_. - Added :py:meth:`xarray.infer_freq` for extending frequency inferring to CFTime indexes and data (:pull:`4033`). By `Pascal Bourgault `_. - ``chunks='auto'`` is now supported in the ``chunks`` argument of :py:meth:`Dataset.chunk`. (:issue:`4055`) By `Andrew Williams `_ - Control over attributes of result in :py:func:`merge`, :py:func:`concat`, :py:func:`combine_by_coords` and :py:func:`combine_nested` using combine_attrs keyword argument. (:issue:`3865`, :pull:`3877`) By `John Omotani `_ - ``missing_dims`` argument to :py:meth:`Dataset.isel`, :py:meth:`DataArray.isel` and :py:meth:`Variable.isel` to allow replacing the exception when a dimension passed to ``isel`` is not present with a warning, or just ignore the dimension. (:issue:`3866`, :pull:`3923`) By `John Omotani `_ - Support dask handling for :py:meth:`DataArray.idxmax`, :py:meth:`DataArray.idxmin`, :py:meth:`Dataset.idxmax`, :py:meth:`Dataset.idxmin`. (:pull:`3922`, :pull:`4135`) By `Kai MΓΌhlbauer `_ and `Pascal Bourgault `_. - More support for unit aware arrays with pint (:pull:`3643`, :pull:`3975`, :pull:`4163`) By `Justus Magin `_. - Support overriding existing variables in ``to_zarr()`` with ``mode='a'`` even without ``append_dim``, as long as dimension sizes do not change. By `Stephan Hoyer `_. - Allow plotting of boolean arrays. (:pull:`3766`) By `Marek Jacob `_ - Enable using MultiIndex levels as coordinates in 1D and 2D plots (:issue:`3927`). By `Mathias Hauser `_. - A ``days_in_month`` accessor for :py:class:`xarray.CFTimeIndex`, analogous to the ``days_in_month`` accessor for a :py:class:`pandas.DatetimeIndex`, which returns the days in the month each datetime in the index. Now days in month weights for both standard and non-standard calendars can be obtained using the :py:class:`~core.accessor_dt.DatetimeAccessor` (:pull:`3935`). This feature requires cftime version 1.1.0 or greater. By `Spencer Clark `_. - For the netCDF3 backend, added dtype coercions for unsigned integer types. (:issue:`4014`, :pull:`4018`) By `Yunus Sevinchan `_ - :py:meth:`map_blocks` now accepts a ``template`` kwarg. This allows use cases where the result of a computation could not be inferred automatically. By `Deepak Cherian `_ - :py:meth:`map_blocks` can now handle dask-backed xarray objects in ``args``. (:pull:`3818`) By `Deepak Cherian `_ - Add keyword ``decode_timedelta`` to :py:func:`xarray.open_dataset`, (:py:func:`xarray.open_dataarray`, :py:func:`xarray.open_dataarray`, :py:func:`xarray.decode_cf`) that allows to disable/enable the decoding of timedeltas independently of time decoding (:issue:`1621`) `Aureliana Barghini `_ Enhancements ~~~~~~~~~~~~ - Performance improvement of :py:meth:`DataArray.interp` and :py:func:`Dataset.interp` We performs independent interpolation sequentially rather than interpolating in one large multidimensional space. (:issue:`2223`) By `Keisuke Fujii `_. - :py:meth:`DataArray.interp` now support interpolations over chunked dimensions (:pull:`4155`). By `Alexandre Poux `_. - Major performance improvement for :py:meth:`Dataset.from_dataframe` when the dataframe has a MultiIndex (:pull:`4184`). By `Stephan Hoyer `_. - :py:meth:`DataArray.reset_index` and :py:meth:`Dataset.reset_index` now keep coordinate attributes (:pull:`4103`). By `Oriol Abril `_. - Axes kwargs such as ``facecolor`` can now be passed to :py:meth:`DataArray.plot` in ``subplot_kws``. This works for both single axes plots and FacetGrid plots. By `Raphael Dussin `_. - Array items with long string reprs are now limited to a reasonable width (:pull:`3900`) By `Maximilian Roos `_ - Large arrays whose numpy reprs would have greater than 40 lines are now limited to a reasonable length. (:pull:`3905`) By `Maximilian Roos `_ Bug fixes ~~~~~~~~~ - Fix errors combining attrs in :py:func:`open_mfdataset` (:issue:`4009`, :pull:`4173`) By `John Omotani `_ - If groupby receives a ``DataArray`` with name=None, assign a default name (:issue:`158`) By `Phil Butcher `_. - Support dark mode in VS code (:issue:`4024`) By `Keisuke Fujii `_. - Fix bug when converting multiindexed pandas objects to sparse xarray objects. (:issue:`4019`) By `Deepak Cherian `_. - ``ValueError`` is raised when ``fill_value`` is not a scalar in :py:meth:`full_like`. (:issue:`3977`) By `Huite Bootsma `_. - Fix wrong order in converting a ``pd.Series`` with a MultiIndex to ``DataArray``. (:issue:`3951`, :issue:`4186`) By `Keisuke Fujii `_ and `Stephan Hoyer `_. - Fix renaming of coords when one or more stacked coords is not in sorted order during stack+groupby+apply operations. (:issue:`3287`, :pull:`3906`) By `Spencer Hill `_ - Fix a regression where deleting a coordinate from a copied :py:class:`DataArray` can affect the original :py:class:`DataArray`. (:issue:`3899`, :pull:`3871`) By `Todd Jennings `_ - Fix :py:class:`~xarray.plot.FacetGrid` plots with a single contour. (:issue:`3569`, :pull:`3915`). By `Deepak Cherian `_ - Use divergent colormap if ``levels`` spans 0. (:issue:`3524`) By `Deepak Cherian `_ - Fix :py:class:`~xarray.plot.FacetGrid` when ``vmin == vmax``. (:issue:`3734`) By `Deepak Cherian `_ - Fix plotting when ``levels`` is a scalar and ``norm`` is provided. (:issue:`3735`) By `Deepak Cherian `_ - Fix bug where plotting line plots with 2D coordinates depended on dimension order. (:issue:`3933`) By `Tom Nicholas `_. - Fix ``RasterioDeprecationWarning`` when using a ``vrt`` in ``open_rasterio``. (:issue:`3964`) By `Taher Chegini `_. - Fix ``AttributeError`` on displaying a :py:class:`Variable` in a notebook context. (:issue:`3972`, :pull:`3973`) By `Ian Castleden `_. - Fix bug causing :py:meth:`DataArray.interpolate_na` to always drop attributes, and added ``keep_attrs`` argument. (:issue:`3968`) By `Tom Nicholas `_. - Fix bug in time parsing failing to fall back to cftime. This was causing time variables with a time unit of ``'msecs'`` to fail to parse. (:pull:`3998`) By `Ryan May `_. - Fix weighted mean when passing boolean weights (:issue:`4074`). By `Mathias Hauser `_. - Fix html repr in untrusted notebooks: fallback to plain text repr. (:pull:`4053`) By `Benoit Bovy `_. - Fix :py:meth:`DataArray.to_unstacked_dataset` for single-dimension variables. (:issue:`4049`) By `Deepak Cherian `_ - Fix :py:func:`open_rasterio` for ``WarpedVRT`` with specified ``src_crs``. (:pull:`4104`) By `Dave Cole `_. Documentation ~~~~~~~~~~~~~ - update the docstring of :py:meth:`DataArray.assign_coords` : clarify how to add a new coordinate to an existing dimension and illustrative example (:issue:`3952`, :pull:`3958`) By `Etienne Combrisson `_. - update the docstring of :py:meth:`Dataset.diff` and :py:meth:`DataArray.diff` so it does document the ``dim`` parameter as required. (:issue:`1040`, :pull:`3909`) By `Justus Magin `_. - Updated :doc:`Calculating Seasonal Averages from Timeseries of Monthly Means ` example notebook to take advantage of the new ``days_in_month`` accessor for :py:class:`xarray.CFTimeIndex` (:pull:`3935`). By `Spencer Clark `_. - Updated the list of current core developers. (:issue:`3892`) By `Tom Nicholas `_. - Add example for multi-dimensional extrapolation and note different behavior of ``kwargs`` in :py:meth:`Dataset.interp` and :py:meth:`DataArray.interp` for 1-d and n-d interpolation (:pull:`3956`). By `Matthias Riße `_. - Apply ``black`` to all the code in the documentation (:pull:`4012`) By `Justus Magin `_. - Narrative documentation now describes :py:meth:`map_blocks`: :ref:`dask.automatic-parallelization`. By `Deepak Cherian `_. - Document ``.plot``, ``.dt``, ``.str`` accessors the way they are called. (:issue:`3625`, :pull:`3988`) By `Justus Magin `_. - Add documentation for the parameters and return values of :py:meth:`DataArray.sel`. By `Justus Magin `_. Internal Changes ~~~~~~~~~~~~~~~~ - Raise more informative error messages for chunk size conflicts when writing to zarr files. By `Deepak Cherian `_. - Run the ``isort`` pre-commit hook only on python source files and update the ``flake8`` version. (:issue:`3750`, :pull:`3711`) By `Justus Magin `_. - Add `blackdoc `_ to the list of checkers for development. (:pull:`4177`) By `Justus Magin `_. - Add a CI job that runs the tests with every optional dependency except ``dask``. (:issue:`3794`, :pull:`3919`) By `Justus Magin `_. - Use ``async`` / ``await`` for the asynchronous distributed tests. (:issue:`3987`, :pull:`3989`) By `Justus Magin `_. - Various internal code clean-ups (:pull:`4026`, :pull:`4038`). By `Prajjwal Nijhara `_. .. _whats-new.0.15.1: v0.15.1 (23 Mar 2020) --------------------- This release brings many new features such as :py:meth:`Dataset.weighted` methods for weighted array reductions, a new jupyter repr by default, and the start of units integration with pint. There's also the usual batch of usability improvements, documentation additions, and bug fixes. Breaking changes ~~~~~~~~~~~~~~~~ - Raise an error when assigning to the ``.values`` or ``.data`` attribute of dimension coordinates i.e. ``IndexVariable`` objects. This has been broken since v0.12.0. Please use :py:meth:`DataArray.assign_coords` or :py:meth:`Dataset.assign_coords` instead. (:issue:`3470`, :pull:`3862`) By `Deepak Cherian `_ New Features ~~~~~~~~~~~~ - Weighted array reductions are now supported via the new :py:meth:`DataArray.weighted` and :py:meth:`Dataset.weighted` methods. See :ref:`compute.weighted`. (:issue:`422`, :pull:`2922`). By `Mathias Hauser `_. - The new jupyter notebook repr (``Dataset._repr_html_`` and ``DataArray._repr_html_``) (introduced in 0.14.1) is now on by default. To disable, use ``xarray.set_options(display_style="text")``. By `Julia Signell `_. - Added support for :py:class:`pandas.DatetimeIndex`-style rounding of ``cftime.datetime`` objects directly via a :py:class:`CFTimeIndex` or via the :py:class:`~core.accessor_dt.DatetimeAccessor`. By `Spencer Clark `_ - Support new h5netcdf backend keyword ``phony_dims`` (available from h5netcdf v0.8.0 for :py:class:`~xarray.backends.H5NetCDFStore`. By `Kai MΓΌhlbauer `_. - Add partial support for unit aware arrays with pint. (:pull:`3706`, :pull:`3611`) By `Justus Magin `_. - :py:meth:`Dataset.groupby` and :py:meth:`DataArray.groupby` now raise a ``TypeError`` on multiple string arguments. Receiving multiple string arguments often means a user is attempting to pass multiple dimensions as separate arguments and should instead pass a single list of dimensions. (:pull:`3802`) By `Maximilian Roos `_ - :py:func:`map_blocks` can now apply functions that add new unindexed dimensions. By `Deepak Cherian `_ - An ellipsis (``...``) is now supported in the ``dims`` argument of :py:meth:`Dataset.stack` and :py:meth:`DataArray.stack`, meaning all unlisted dimensions, similar to its meaning in :py:meth:`DataArray.transpose`. (:pull:`3826`) By `Maximilian Roos `_ - :py:meth:`Dataset.where` and :py:meth:`DataArray.where` accept a lambda as a first argument, which is then called on the input; replicating pandas' behavior. By `Maximilian Roos `_. - ``skipna`` is available in :py:meth:`Dataset.quantile`, :py:meth:`DataArray.quantile`, :py:meth:`core.groupby.DatasetGroupBy.quantile`, :py:meth:`core.groupby.DataArrayGroupBy.quantile` (:issue:`3843`, :pull:`3844`) By `Aaron Spring `_. - Add a diff summary for ``testing.assert_allclose``. (:issue:`3617`, :pull:`3847`) By `Justus Magin `_. Bug fixes ~~~~~~~~~ - Fix :py:meth:`Dataset.interp` when indexing array shares coordinates with the indexed variable (:issue:`3252`). By `David Huard `_. - Fix recombination of groups in :py:meth:`Dataset.groupby` and :py:meth:`DataArray.groupby` when performing an operation that changes the size of the groups along the grouped dimension. By `Eric Jansen `_. - Fix use of multi-index with categorical values (:issue:`3674`). By `Matthieu Ancellin `_. - Fix alignment with ``join="override"`` when some dimensions are unindexed. (:issue:`3681`). By `Deepak Cherian `_. - Fix :py:meth:`Dataset.swap_dims` and :py:meth:`DataArray.swap_dims` producing index with name reflecting the previous dimension name instead of the new one (:issue:`3748`, :pull:`3752`). By `Joseph K Aicher `_. - Use ``dask_array_type`` instead of ``dask_array.Array`` for type checking. (:issue:`3779`, :pull:`3787`) By `Justus Magin `_. - :py:func:`concat` can now handle coordinate variables only present in one of the objects to be concatenated when ``coords="different"``. By `Deepak Cherian `_. - xarray now respects the over, under and bad colors if set on a provided colormap. (:issue:`3590`, :pull:`3601`) By `johnomotani `_. - ``coarsen`` and ``rolling`` now respect ``xr.set_options(keep_attrs=True)`` to preserve attributes. :py:meth:`Dataset.coarsen` accepts a keyword argument ``keep_attrs`` to change this setting. (:issue:`3376`, :pull:`3801`) By `Andrew Thomas `_. - Delete associated indexes when deleting coordinate variables. (:issue:`3746`). By `Deepak Cherian `_. - Fix :py:meth:`Dataset.to_zarr` when using ``append_dim`` and ``group`` simultaneously. (:issue:`3170`). By `Matthias Meyer `_. - Fix html repr on :py:class:`Dataset` with non-string keys (:pull:`3807`). By `Maximilian Roos `_. Documentation ~~~~~~~~~~~~~ - Fix documentation of :py:class:`DataArray` removing the deprecated mention that when omitted, ``dims`` are inferred from a ``coords``-dict. (:pull:`3821`) By `Sander van Rijn `_. - Improve the :py:func:`where` docstring. By `Maximilian Roos `_ - Update the installation instructions: only explicitly list recommended dependencies (:issue:`3756`). By `Mathias Hauser `_. Internal Changes ~~~~~~~~~~~~~~~~ - Remove the internal ``import_seaborn`` function which handled the deprecation of the ``seaborn.apionly`` entry point (:issue:`3747`). By `Mathias Hauser `_. - Don't test pint integration in combination with datetime objects. (:issue:`3778`, :pull:`3788`) By `Justus Magin `_. - Change test_open_mfdataset_list_attr to only run with dask installed (:issue:`3777`, :pull:`3780`). By `Bruno Pagani `_. - Preserve the ability to index with ``method="nearest"`` with a :py:class:`CFTimeIndex` with pandas versions greater than 1.0.1 (:issue:`3751`). By `Spencer Clark `_. - Greater flexibility and improved test coverage of subtracting various types of objects from a :py:class:`CFTimeIndex`. By `Spencer Clark `_. - Update Azure CI MacOS image, given pending removal. By `Maximilian Roos `_ - Remove xfails for scipy 1.0.1 for tests that append to netCDF files (:pull:`3805`). By `Mathias Hauser `_. - Remove conversion to ``pandas.Panel``, given its removal in pandas in favor of xarray's objects. By `Maximilian Roos `_ .. _whats-new.0.15.0: v0.15.0 (30 Jan 2020) --------------------- This release brings many improvements to xarray's documentation: our examples are now binderized notebooks (`click here `_) and we have new example notebooks from our SciPy 2019 sprint (many thanks to our contributors!). This release also features many API improvements such as a new :py:class:`~core.accessor_dt.TimedeltaAccessor` and support for :py:class:`CFTimeIndex` in :py:meth:`~DataArray.interpolate_na`); as well as many bug fixes. Breaking changes ~~~~~~~~~~~~~~~~ - Bumped minimum tested versions for dependencies: - numpy 1.15 - pandas 0.25 - dask 2.2 - distributed 2.2 - scipy 1.3 - Remove ``compat`` and ``encoding`` kwargs from ``DataArray``, which have been deprecated since 0.12. (:pull:`3650`). Instead, specify the ``encoding`` kwarg when writing to disk or set the :py:attr:`DataArray.encoding` attribute directly. By `Maximilian Roos `_. - :py:func:`xarray.dot`, :py:meth:`DataArray.dot`, and the ``@`` operator now use ``align="inner"`` (except when ``xarray.set_options(arithmetic_join="exact")``; :issue:`3694`) by `Mathias Hauser `_. New Features ~~~~~~~~~~~~ - Implement :py:meth:`DataArray.pad` and :py:meth:`Dataset.pad`. (:issue:`2605`, :pull:`3596`). By `Mark Boer `_. - :py:meth:`DataArray.sel` and :py:meth:`Dataset.sel` now support :py:class:`pandas.CategoricalIndex`. (:issue:`3669`) By `Keisuke Fujii `_. - Support using an existing, opened h5netcdf ``File`` with :py:class:`~xarray.backends.H5NetCDFStore`. This permits creating an :py:class:`~xarray.Dataset` from a h5netcdf ``File`` that has been opened using other means (:issue:`3618`). By `Kai MΓΌhlbauer `_. - Implement ``median`` and ``nanmedian`` for dask arrays. This works by rechunking to a single chunk along all reduction axes. (:issue:`2999`). By `Deepak Cherian `_. - :py:func:`~xarray.concat` now preserves attributes from the first Variable. (:issue:`2575`, :issue:`2060`, :issue:`1614`) By `Deepak Cherian `_. - :py:meth:`Dataset.quantile`, :py:meth:`DataArray.quantile` and ``GroupBy.quantile`` now work with dask Variables. By `Deepak Cherian `_. - Added the ``count`` reduction method to both :py:class:`~computation.rolling.DatasetCoarsen` and :py:class:`~computation.rolling.DataArrayCoarsen` objects. (:pull:`3500`) By `Deepak Cherian `_ - Add ``meta`` kwarg to :py:func:`~xarray.apply_ufunc`; this is passed on to :py:func:`dask.array.blockwise`. (:pull:`3660`) By `Deepak Cherian `_. - Add ``attrs_file`` option in :py:func:`~xarray.open_mfdataset` to choose the source file for global attributes in a multi-file dataset (:issue:`2382`, :pull:`3498`). By `Julien Seguinot `_. - :py:meth:`Dataset.swap_dims` and :py:meth:`DataArray.swap_dims` now allow swapping to dimension names that don't exist yet. (:pull:`3636`) By `Justus Magin `_. - Extend :py:class:`~core.accessor_dt.DatetimeAccessor` properties and support ``.dt`` accessor for timedeltas via :py:class:`~core.accessor_dt.TimedeltaAccessor` (:pull:`3612`) By `Anderson Banihirwe `_. - Improvements to interpolating along time axes (:issue:`3641`, :pull:`3631`). By `David Huard `_. - Support :py:class:`CFTimeIndex` in :py:meth:`DataArray.interpolate_na` - define 1970-01-01 as the default offset for the interpolation index for both :py:class:`pandas.DatetimeIndex` and :py:class:`CFTimeIndex`, - use microseconds in the conversion from timedelta objects to floats to avoid overflow errors. Bug fixes ~~~~~~~~~ - Applying a user-defined function that adds new dimensions using :py:func:`apply_ufunc` and ``vectorize=True`` now works with ``dask > 2.0``. (:issue:`3574`, :pull:`3660`). By `Deepak Cherian `_. - Fix :py:meth:`~xarray.combine_by_coords` to allow for combining incomplete hypercubes of Datasets (:issue:`3648`). By `Ian Bolliger `_. - Fix :py:func:`~xarray.combine_by_coords` when combining cftime coordinates which span long time intervals (:issue:`3535`). By `Spencer Clark `_. - Fix plotting with transposed 2D non-dimensional coordinates. (:issue:`3138`, :pull:`3441`) By `Deepak Cherian `_. - :py:meth:`plot.FacetGrid.set_titles` can now replace existing row titles of a :py:class:`~xarray.plot.FacetGrid` plot. In addition :py:class:`~xarray.plot.FacetGrid` gained two new attributes: :py:attr:`~xarray.plot.FacetGrid.col_labels` and :py:attr:`~xarray.plot.FacetGrid.row_labels` contain :py:class:`matplotlib.text.Text` handles for both column and row labels. These can be used to manually change the labels. By `Deepak Cherian `_. - Fix issue with Dask-backed datasets raising a ``KeyError`` on some computations involving :py:func:`map_blocks` (:pull:`3598`). By `Tom Augspurger `_. - Ensure :py:meth:`Dataset.quantile`, :py:meth:`DataArray.quantile` issue the correct error when ``q`` is out of bounds (:issue:`3634`) by `Mathias Hauser `_. - Fix regression in xarray 0.14.1 that prevented encoding times with certain ``dtype``, ``_FillValue``, and ``missing_value`` encodings (:issue:`3624`). By `Spencer Clark `_ - Raise an error when trying to use :py:meth:`Dataset.rename_dims` to rename to an existing name (:issue:`3438`, :pull:`3645`) By `Justus Magin `_. - :py:meth:`Dataset.rename`, :py:meth:`DataArray.rename` now check for conflicts with MultiIndex level names. - :py:meth:`Dataset.merge` no longer fails when passed a :py:class:`DataArray` instead of a :py:class:`Dataset`. By `Tom Nicholas `_. - Fix a regression in :py:meth:`Dataset.drop`: allow passing any iterable when dropping variables (:issue:`3552`, :pull:`3693`) By `Justus Magin `_. - Fixed errors emitted by ``mypy --strict`` in modules that import xarray. (:issue:`3695`) by `Guido Imperiale `_. - Allow plotting of binned coordinates on the y axis in :py:meth:`plot.line` and :py:meth:`plot.step` plots (:issue:`3571`, :pull:`3685`) by `Julien Seguinot `_. - setuptools is now marked as a dependency of xarray (:pull:`3628`) by `Richard HΓΆchenberger `_. Documentation ~~~~~~~~~~~~~ - Switch doc examples to use `nbsphinx `_ and replace ``sphinx_gallery`` scripts with Jupyter notebooks. (:pull:`3105`, :pull:`3106`, :pull:`3121`) By `Ryan Abernathey `_. - Added :doc:`example notebook ` demonstrating use of xarray with Regional Ocean Modeling System (ROMS) ocean hydrodynamic model output. (:pull:`3116`) By `Robert Hetland `_. - Added :doc:`example notebook ` demonstrating the visualization of ERA5 GRIB data. (:pull:`3199`) By `Zach Bruick `_ and `Stephan Siemen `_. - Added examples for :py:meth:`DataArray.quantile`, :py:meth:`Dataset.quantile` and ``GroupBy.quantile``. (:pull:`3576`) By `Justus Magin `_. - Add new :doc:`example notebook ` example notebook demonstrating vectorization of a 1D function using :py:func:`apply_ufunc` , dask and numba. By `Deepak Cherian `_. - Added example for :py:func:`~xarray.map_blocks`. (:pull:`3667`) By `Riley X. Brady `_. Internal Changes ~~~~~~~~~~~~~~~~ - Make sure dask names change when rechunking by different chunk sizes. Conversely, make sure they stay the same when rechunking by the same chunk size. (:issue:`3350`) By `Deepak Cherian `_. - 2x to 5x speed boost (on small arrays) for :py:meth:`Dataset.isel`, :py:meth:`DataArray.isel`, and :py:meth:`DataArray.__getitem__` when indexing by int, slice, list of int, scalar ndarray, or 1-dimensional ndarray. (:pull:`3533`) by `Guido Imperiale `_. - Removed internal method ``Dataset._from_vars_and_coord_names``, which was dominated by ``Dataset._construct_direct``. (:pull:`3565`) By `Maximilian Roos `_. - Replaced versioneer with setuptools-scm. Moved contents of setup.py to setup.cfg. Removed pytest-runner from setup.py, as per deprecation notice on the pytest-runner project. (:pull:`3714`) by `Guido Imperiale `_. - Use of isort is now enforced by CI. (:pull:`3721`) by `Guido Imperiale `_ .. _whats-new.0.14.1: v0.14.1 (19 Nov 2019) --------------------- Breaking changes ~~~~~~~~~~~~~~~~ - Broken compatibility with ``cftime < 1.0.3`` . By `Deepak Cherian `_. .. warning:: cftime version 1.0.4 is broken (`cftime/126 `_); please use version 1.0.4.2 instead. - All leftover support for dates from non-standard calendars through ``netcdftime``, the module included in versions of netCDF4 prior to 1.4 that eventually became the `cftime `_ package, has been removed in favor of relying solely on the standalone ``cftime`` package (:pull:`3450`). By `Spencer Clark `_. New Features ~~~~~~~~~~~~ - Added the ``sparse`` option to :py:meth:`~xarray.DataArray.unstack`, :py:meth:`~xarray.Dataset.unstack`, :py:meth:`~xarray.DataArray.reindex`, :py:meth:`~xarray.Dataset.reindex` (:issue:`3518`). By `Keisuke Fujii `_. - Added the ``fill_value`` option to :py:meth:`DataArray.unstack` and :py:meth:`Dataset.unstack` (:issue:`3518`, :pull:`3541`). By `Keisuke Fujii `_. - Added the ``max_gap`` kwarg to :py:meth:`~xarray.DataArray.interpolate_na` and :py:meth:`~xarray.Dataset.interpolate_na`. This controls the maximum size of the data gap that will be filled by interpolation. By `Deepak Cherian `_. - Added :py:meth:`Dataset.drop_sel` & :py:meth:`DataArray.drop_sel` for dropping labels. :py:meth:`Dataset.drop_vars` & :py:meth:`DataArray.drop_vars` have been added for dropping variables (including coordinates). The existing :py:meth:`Dataset.drop` & :py:meth:`DataArray.drop` methods remain as a backward compatible option for dropping either labels or variables, but using the more specific methods is encouraged. (:pull:`3475`) By `Maximilian Roos `_ - Added :py:meth:`Dataset.map` & ``GroupBy.map`` & ``Resample.map`` for mapping / applying a function over each item in the collection, reflecting the widely used and least surprising name for this operation. The existing ``apply`` methods remain for backward compatibility, though using the ``map`` methods is encouraged. (:pull:`3459`) By `Maximilian Roos `_ - :py:meth:`Dataset.transpose` and :py:meth:`DataArray.transpose` now support an ellipsis (``...``) to represent all 'other' dimensions. For example, to move one dimension to the front, use ``.transpose('x', ...)``. (:pull:`3421`) By `Maximilian Roos `_ - Changed ``xr.ALL_DIMS`` to equal python's ``Ellipsis`` (``...``), and changed internal usages to use ``...`` directly. As before, you can use this to instruct a ``groupby`` operation to reduce over all dimensions. While we have no plans to remove ``xr.ALL_DIMS``, we suggest using ``...``. (:pull:`3418`) By `Maximilian Roos `_ - :py:func:`xarray.dot`, and :py:meth:`DataArray.dot` now support the ``dims=...`` option to sum over the union of dimensions of all input arrays (:issue:`3423`) by `Mathias Hauser `_. - Added new ``Dataset._repr_html_`` and ``DataArray._repr_html_`` to improve representation of objects in Jupyter. By default this feature is turned off for now. Enable it with ``xarray.set_options(display_style="html")``. (:pull:`3425`) by `Benoit Bovy `_ and `Julia Signell `_. - Implement `dask deterministic hashing `_ for xarray objects. Note that xarray objects with a dask.array backend already used deterministic hashing in previous releases; this change implements it when whole xarray objects are embedded in a dask graph, e.g. when :py:meth:`DataArray.map_blocks` is invoked. (:issue:`3378`, :pull:`3446`, :pull:`3515`) By `Deepak Cherian `_ and `Guido Imperiale `_. - Add the documented-but-missing :py:meth:`~core.groupby.DatasetGroupBy.quantile`. - xarray now respects the ``DataArray.encoding["coordinates"]`` attribute when writing to disk. See :ref:`io.coordinates` for more. (:issue:`3351`, :pull:`3487`) By `Deepak Cherian `_. - Add the documented-but-missing :py:meth:`~core.groupby.DatasetGroupBy.quantile`. (:issue:`3525`, :pull:`3527`). By `Justus Magin `_. Bug fixes ~~~~~~~~~ - Ensure an index of type ``CFTimeIndex`` is not converted to a ``DatetimeIndex`` when calling :py:meth:`Dataset.rename`, :py:meth:`Dataset.rename_dims` and :py:meth:`Dataset.rename_vars`. By `Mathias Hauser `_. (:issue:`3522`). - Fix a bug in :py:meth:`DataArray.set_index` in case that an existing dimension becomes a level variable of MultiIndex. (:pull:`3520`). By `Keisuke Fujii `_. - Harmonize ``_FillValue``, ``missing_value`` during encoding and decoding steps. (:pull:`3502`) By `Anderson Banihirwe `_. - Fix regression introduced in v0.14.0 that would cause a crash if dask is installed but cloudpickle isn't (:issue:`3401`) by `Rhys Doyle `_ - Fix grouping over variables with NaNs. (:issue:`2383`, :pull:`3406`). By `Deepak Cherian `_. - Make alignment and concatenation significantly more efficient by using dask names to compare dask objects prior to comparing values after computation. This change makes it more convenient to carry around large non-dimensional coordinate variables backed by dask arrays. Existing workarounds involving ``reset_coords(drop=True)`` should now be unnecessary in most cases. (:issue:`3068`, :issue:`3311`, :issue:`3454`, :pull:`3453`). By `Deepak Cherian `_. - Add support for cftime>=1.0.4. By `Anderson Banihirwe `_. - Rolling reduction operations no longer compute dask arrays by default. (:issue:`3161`). In addition, the ``allow_lazy`` kwarg to ``reduce`` is deprecated. By `Deepak Cherian `_. - Fix ``GroupBy.reduce`` when reducing over multiple dimensions. (:issue:`3402`). By `Deepak Cherian `_ - Allow appending datetime and bool data variables to zarr stores. (:issue:`3480`). By `Akihiro Matsukawa `_. - Add support for numpy >=1.18 (); bugfix mean() on datetime64 arrays on dask backend (:issue:`3409`, :pull:`3537`). By `Guido Imperiale `_. - Add support for pandas >=0.26 (:issue:`3440`). By `Deepak Cherian `_. - Add support for pseudonetcdf >=3.1 (:pull:`3485`). By `Barron Henderson `_. Documentation ~~~~~~~~~~~~~ - Fix leap year condition in `monthly means example `_. By `MickaΓ«l Lalande `_. - Fix the documentation of :py:meth:`DataArray.resample` and :py:meth:`Dataset.resample`, explicitly stating that a datetime-like dimension is required. (:pull:`3400`) By `Justus Magin `_. - Update the :ref:`terminology` page to address multidimensional coordinates. (:pull:`3410`) By `Jon Thielen `_. - Fix the documentation of :py:meth:`Dataset.integrate` and :py:meth:`DataArray.integrate` and add an example to :py:meth:`Dataset.integrate`. (:pull:`3469`) By `Justus Magin `_. Internal Changes ~~~~~~~~~~~~~~~~ - Added integration tests against `pint `_. (:pull:`3238`, :pull:`3447`, :pull:`3493`, :pull:`3508`) by `Justus Magin `_. .. note:: At the moment of writing, these tests *as well as the ability to use pint in general* require `a highly experimental version of pint `_ (install with ``pip install git+https://github.com/andrewgsavage/pint.git@refs/pull/6/head)``. Even with it, interaction with non-numpy array libraries, e.g. dask or sparse, is broken. - Use Python 3.6 idioms throughout the codebase. (:pull:`3419`) By `Maximilian Roos `_ - Run basic CI tests on Python 3.8. (:pull:`3477`) By `Maximilian Roos `_ - Enable type checking on default sentinel values (:pull:`3472`) By `Maximilian Roos `_ - Add ``Variable._replace`` for simpler replacing of a subset of attributes (:pull:`3472`) By `Maximilian Roos `_ .. _whats-new.0.14.0: v0.14.0 (14 Oct 2019) --------------------- Breaking changes ~~~~~~~~~~~~~~~~ - This release introduces a rolling policy for minimum dependency versions: :ref:`mindeps_policy`. Several minimum versions have been increased: ============ ================== ==== Package Old New ============ ================== ==== Python 3.5.3 3.6 numpy 1.12 1.14 pandas 0.19.2 0.24 dask 0.16 (tested: 2.4) 1.2 bottleneck 1.1 (tested: 1.2) 1.2 matplotlib 1.5 (tested: 3.1) 3.1 ============ ================== ==== Obsolete patch versions (x.y.Z) are not tested anymore. The oldest supported versions of all optional dependencies are now covered by automated tests (before, only the very latest versions were tested). (:issue:`3222`, :issue:`3293`, :issue:`3340`, :issue:`3346`, :issue:`3358`). By `Guido Imperiale `_. - Dropped the ``drop=False`` optional parameter from :py:meth:`Variable.isel`. It was unused and doesn't make sense for a Variable. (:pull:`3375`). By `Guido Imperiale `_. - Remove internal usage of :py:class:`collections.OrderedDict`. After dropping support for Python <=3.5, most uses of ``OrderedDict`` in xarray were no longer necessary. We have removed the internal use of the ``OrderedDict`` in favor of Python's builtin ``dict`` object which is now ordered itself. This change will be most obvious when interacting with the ``attrs`` property on Dataset and DataArray objects. (:issue:`3380`, :pull:`3389`). By `Joe Hamman `_. New functions/methods ~~~~~~~~~~~~~~~~~~~~~ - Added :py:func:`~xarray.map_blocks`, modeled after :py:func:`dask.array.map_blocks`. Also added :py:meth:`Dataset.unify_chunks`, :py:meth:`DataArray.unify_chunks` and :py:meth:`testing.assert_chunks_equal`. (:pull:`3276`). By `Deepak Cherian `_ and `Guido Imperiale `_. Enhancements ~~~~~~~~~~~~ - ``core.groupby.GroupBy`` enhancements. By `Deepak Cherian `_. - Added a repr (:pull:`3344`). Example:: >>> da.groupby("time.season") DataArrayGroupBy, grouped over 'season' 4 groups with labels 'DJF', 'JJA', 'MAM', 'SON' - Added a ``GroupBy.dims`` property that mirrors the dimensions of each group (:issue:`3344`). - Speed up :py:meth:`Dataset.isel` up to 33% and :py:meth:`DataArray.isel` up to 25% for small arrays (:issue:`2799`, :pull:`3375`). By `Guido Imperiale `_. Bug fixes ~~~~~~~~~ - Reintroduce support for :mod:`weakref` (broken in v0.13.0). Support has been reinstated for :py:class:`~xarray.DataArray` and :py:class:`~xarray.Dataset` objects only. Internal xarray objects remain unaddressable by weakref in order to save memory (:issue:`3317`). By `Guido Imperiale `_. - Line plots with the ``x`` or ``y`` argument set to a 1D non-dimensional coord now plot the correct data for 2D DataArrays (:issue:`3334`). By `Tom Nicholas `_. - Make :py:func:`~xarray.concat` more robust when merging variables present in some datasets but not others (:issue:`508`). By `Deepak Cherian `_. - The default behaviour of reducing across all dimensions for :py:class:`~xarray.core.groupby.DataArrayGroupBy` objects has now been properly removed as was done for :py:class:`~xarray.core.groupby.DatasetGroupBy` in 0.13.0 (:issue:`3337`). Use ``xarray.ALL_DIMS`` if you need to replicate previous behaviour. Also raise nicer error message when no groups are created (:issue:`1764`). By `Deepak Cherian `_. - Fix error in concatenating unlabeled dimensions (:pull:`3362`). By `Deepak Cherian `_. - Warn if the ``dim`` kwarg is passed to rolling operations. This is redundant since a dimension is specified when the :py:class:`~computation.rolling.DatasetRolling` or :py:class:`~computation.rolling.DataArrayRolling` object is created. (:pull:`3362`). By `Deepak Cherian `_. Documentation ~~~~~~~~~~~~~ - Created a glossary of important xarray terms (:issue:`2410`, :pull:`3352`). By `Gregory Gundersen `_. - Created a "How do I..." section (:ref:`howdoi`) for solutions to common questions. (:pull:`3357`). By `Deepak Cherian `_. - Add examples for :py:meth:`Dataset.swap_dims` and :py:meth:`DataArray.swap_dims` (:pull:`3331`, :pull:`3331`). By `Justus Magin `_. - Add examples for :py:meth:`align`, :py:meth:`merge`, :py:meth:`combine_by_coords`, :py:meth:`full_like`, :py:meth:`zeros_like`, :py:meth:`ones_like`, :py:meth:`Dataset.pipe`, :py:meth:`Dataset.assign`, :py:meth:`Dataset.reindex`, :py:meth:`Dataset.fillna` (:pull:`3328`). By `Anderson Banihirwe `_. - Fixed documentation to clean up an unwanted file created in ``ipython`` example (:pull:`3353`). By `Gregory Gundersen `_. .. _whats-new.0.13.0: v0.13.0 (17 Sep 2019) --------------------- This release includes many exciting changes: wrapping of `NEP18 `_ compliant numpy-like arrays; new :py:meth:`~Dataset.plot.scatter` plotting method that can scatter two ``DataArrays`` in a ``Dataset`` against each other; support for converting pandas DataFrames to xarray objects that wrap ``pydata/sparse``; and more! Breaking changes ~~~~~~~~~~~~~~~~ - This release increases the minimum required Python version from 3.5.0 to 3.5.3 (:issue:`3089`). By `Guido Imperiale `_. - The ``isel_points`` and ``sel_points`` methods are removed, having been deprecated since v0.10.0. These are redundant with the ``isel`` / ``sel`` methods. See :ref:`vectorized_indexing` for the details By `Maximilian Roos `_ - The ``inplace`` kwarg for public methods now raises an error, having been deprecated since v0.11.0. By `Maximilian Roos `_ - :py:func:`~xarray.concat` now requires the ``dim`` argument. Its ``indexers``, ``mode`` and ``concat_over`` kwargs have now been removed. By `Deepak Cherian `_ - Passing a list of colors in ``cmap`` will now raise an error, having been deprecated since v0.6.1. - Most xarray objects now define ``__slots__``. This reduces overall RAM usage by ~22% (not counting the underlying numpy buffers); on CPython 3.7/x64, a trivial DataArray has gone down from 1.9kB to 1.5kB. Caveats: - Pickle streams produced by older versions of xarray can't be loaded using this release, and vice versa. - Any user code that was accessing the ``__dict__`` attribute of xarray objects will break. The best practice to attach custom metadata to xarray objects is to use the ``attrs`` dictionary. - Any user code that defines custom subclasses of xarray classes must now explicitly define ``__slots__`` itself. Subclasses that don't add any attributes must state so by defining ``__slots__ = ()`` right after the class header. Omitting ``__slots__`` will now cause a ``FutureWarning`` to be logged, and will raise an error in a later release. (:issue:`3250`) by `Guido Imperiale `_. - The default dimension for :py:meth:`Dataset.groupby`, :py:meth:`Dataset.resample`, :py:meth:`DataArray.groupby` and :py:meth:`DataArray.resample` reductions is now the grouping or resampling dimension. - :py:meth:`DataArray.to_dataset` requires ``name`` to be passed as a kwarg (previously ambiguous positional arguments were deprecated) - Reindexing with variables of a different dimension now raise an error (previously deprecated) - ``xarray.broadcast_array`` is removed (previously deprecated in favor of :py:func:`~xarray.broadcast`) - ``Variable.expand_dims`` is removed (previously deprecated in favor of :py:meth:`Variable.set_dims`) New functions/methods ~~~~~~~~~~~~~~~~~~~~~ - xarray can now wrap around any `NEP18 `_ compliant numpy-like library (important: read notes about ``NUMPY_EXPERIMENTAL_ARRAY_FUNCTION`` in the above link). Added explicit test coverage for `sparse `_. (:issue:`3117`, :issue:`3202`). This requires ``sparse>=0.8.0``. By `Nezar Abdennur `_ and `Guido Imperiale `_. - :py:meth:`~Dataset.from_dataframe` and :py:meth:`~DataArray.from_series` now support ``sparse=True`` for converting pandas objects into xarray objects wrapping sparse arrays. This is particularly useful with sparsely populated hierarchical indexes. (:issue:`3206`) By `Stephan Hoyer `_. - The xarray package is now discoverable by mypy (although typing hints coverage is not complete yet). mypy type checking is now enforced by CI. Libraries that depend on xarray and use mypy can now remove from their setup.cfg the lines:: [mypy-xarray] ignore_missing_imports = True (:issue:`2877`, :issue:`3088`, :issue:`3090`, :issue:`3112`, :issue:`3117`, :issue:`3207`) By `Guido Imperiale `_ and `Maximilian Roos `_. - Added :py:meth:`DataArray.broadcast_like` and :py:meth:`Dataset.broadcast_like`. By `Deepak Cherian `_ and `David Mertz `_. - Dataset plotting API for visualizing dependencies between two DataArrays! Currently only :py:meth:`Dataset.plot.scatter` is implemented. By `Yohai Bar Sinai `_ and `Deepak Cherian `_ - Added :py:meth:`DataArray.head`, :py:meth:`DataArray.tail` and :py:meth:`DataArray.thin`; as well as :py:meth:`Dataset.head`, :py:meth:`Dataset.tail` and :py:meth:`Dataset.thin` methods. (:issue:`319`) By `Gerardo Rivera `_. Enhancements ~~~~~~~~~~~~ - Multiple enhancements to :py:func:`~xarray.concat` and :py:func:`~xarray.open_mfdataset`. By `Deepak Cherian `_ - Added ``compat='override'``. When merging, this option picks the variable from the first dataset and skips all comparisons. - Added ``join='override'``. When aligning, this only checks that index sizes are equal among objects and skips checking indexes for equality. - :py:func:`~xarray.concat` and :py:func:`~xarray.open_mfdataset` now support the ``join`` kwarg. It is passed down to :py:func:`~xarray.align`. - :py:func:`~xarray.concat` now calls :py:func:`~xarray.merge` on variables that are not concatenated (i.e. variables without ``concat_dim`` when ``data_vars`` or ``coords`` are ``"minimal"``). :py:func:`~xarray.concat` passes its new ``compat`` kwarg down to :py:func:`~xarray.merge`. (:issue:`2064`) Users can avoid a common bottleneck when using :py:func:`~xarray.open_mfdataset` on a large number of files with variables that are known to be aligned and some of which need not be concatenated. Slow equality comparisons can now be avoided, for e.g.:: data = xr.open_mfdataset(files, concat_dim='time', data_vars='minimal', coords='minimal', compat='override', join='override') - In :py:meth:`~xarray.Dataset.to_zarr`, passing ``mode`` is not mandatory if ``append_dim`` is set, as it will automatically be set to ``'a'`` internally. By `David Brochart `_. - Added the ability to initialize an empty or full DataArray with a single value. (:issue:`277`) By `Gerardo Rivera `_. - :py:func:`~xarray.Dataset.to_netcdf()` now supports the ``invalid_netcdf`` kwarg when used with ``engine="h5netcdf"``. It is passed to ``h5netcdf.File``. By `Ulrich Herter `_. - ``xarray.Dataset.drop`` now supports keyword arguments; dropping index labels by using both ``dim`` and ``labels`` or using a :py:class:`~core.coordinates.DataArrayCoordinates` object are deprecated (:issue:`2910`). By `Gregory Gundersen `_. - Added examples of :py:meth:`Dataset.set_index` and :py:meth:`DataArray.set_index`, as well are more specific error messages when the user passes invalid arguments (:issue:`3176`). By `Gregory Gundersen `_. - :py:meth:`Dataset.filter_by_attrs` now filters the coordinates as well as the variables. By `Spencer Jones `_. Bug fixes ~~~~~~~~~ - Improve "missing dimensions" error message for :py:func:`~xarray.apply_ufunc` (:issue:`2078`). By `Rick Russotto `_. - :py:meth:`~xarray.DataArray.assign_coords` now supports dictionary arguments (:issue:`3231`). By `Gregory Gundersen `_. - Fix regression introduced in v0.12.2 where ``copy(deep=True)`` would convert unicode indices to dtype=object (:issue:`3094`). By `Guido Imperiale `_. - Improved error handling and documentation for ``.expand_dims()`` read-only view. - Fix tests for big-endian systems (:issue:`3125`). By `Graham Inggs `_. - XFAIL several tests which are expected to fail on ARM systems due to a ``datetime`` issue in NumPy (:issue:`2334`). By `Graham Inggs `_. - Fix KeyError that arises when using .sel method with float values different from coords float type (:issue:`3137`). By `Hasan Ahmad `_. - Fixed bug in ``combine_by_coords()`` causing a ``ValueError`` if the input had an unused dimension with coordinates which were not monotonic (:issue:`3150`). By `Tom Nicholas `_. - Fixed crash when applying ``distributed.Client.compute()`` to a DataArray (:issue:`3171`). By `Guido Imperiale `_. - Better error message when using groupby on an empty DataArray (:issue:`3037`). By `Hasan Ahmad `_. - Fix error that arises when using open_mfdataset on a series of netcdf files having differing values for a variable attribute of type list. (:issue:`3034`) By `Hasan Ahmad `_. - Prevent :py:meth:`~xarray.DataArray.argmax` and :py:meth:`~xarray.DataArray.argmin` from calling dask compute (:issue:`3237`). By `Ulrich Herter `_. - Plots in 2 dimensions (pcolormesh, contour) now allow to specify levels as numpy array (:issue:`3284`). By `Mathias Hauser `_. - Fixed bug in :meth:`DataArray.quantile` failing to keep attributes when ``keep_attrs`` was True (:issue:`3304`). By `David Huard `_. Documentation ~~~~~~~~~~~~~ - Created a `PR checklist `_ as a quick reference for tasks before creating a new PR or pushing new commits. By `Gregory Gundersen `_. - Fixed documentation to clean up unwanted files created in ``ipython`` examples (:issue:`3227`). By `Gregory Gundersen `_. .. _whats-new.0.12.3: v0.12.3 (10 July 2019) ---------------------- New functions/methods ~~~~~~~~~~~~~~~~~~~~~ - New methods :py:meth:`Dataset.to_stacked_array` and :py:meth:`DataArray.to_unstacked_dataset` for reshaping Datasets of variables with different dimensions (:issue:`1317`). This is useful for feeding data from xarray into machine learning models, as described in :ref:`reshape.stacking_different`. By `Noah Brenowitz `_. Enhancements ~~~~~~~~~~~~ - Support for renaming ``Dataset`` variables and dimensions independently with :py:meth:`~Dataset.rename_vars` and :py:meth:`~Dataset.rename_dims` (:issue:`3026`). By `Julia Kent `_. - Add ``scales``, ``offsets``, ``units`` and ``descriptions`` attributes to :py:class:`~xarray.DataArray` returned by :py:func:`~xarray.open_rasterio`. (:issue:`3013`) By `Erle Carrara `_. Bug fixes ~~~~~~~~~ - Resolved deprecation warnings from newer versions of matplotlib and dask. - Compatibility fixes for the upcoming pandas 0.25 and NumPy 1.17 releases. By `Stephan Hoyer `_. - Fix summaries for multiindex coordinates (:issue:`3079`). By `Jonas HΓΆrsch `_. - Fix HDF5 error that could arise when reading multiple groups from a file at once (:issue:`2954`). By `Stephan Hoyer `_. .. _whats-new.0.12.2: v0.12.2 (29 June 2019) ---------------------- New functions/methods ~~~~~~~~~~~~~~~~~~~~~ - Two new functions, :py:func:`~xarray.combine_nested` and :py:func:`~xarray.combine_by_coords`, allow for combining datasets along any number of dimensions, instead of the one-dimensional list of datasets supported by :py:func:`~xarray.concat`. The new ``combine_nested`` will accept the datasets as a nested list-of-lists, and combine by applying a series of concat and merge operations. The new ``combine_by_coords`` instead uses the dimension coordinates of datasets to order them. :py:func:`~xarray.open_mfdataset` can use either ``combine_nested`` or ``combine_by_coords`` to combine datasets along multiple dimensions, by specifying the argument ``combine='nested'`` or ``combine='by_coords'``. The older function ``auto_combine`` has been deprecated, because its functionality has been subsumed by the new functions. To avoid FutureWarnings switch to using ``combine_nested`` or ``combine_by_coords``, (or set the ``combine`` argument in ``open_mfdataset``). (:issue:`2159`) By `Tom Nicholas `_. - :py:meth:`~xarray.DataArray.rolling_exp` and :py:meth:`~xarray.Dataset.rolling_exp` added, similar to pandas' ``pd.DataFrame.ewm`` method. Calling ``.mean`` on the resulting object will return an exponentially weighted moving average. By `Maximilian Roos `_. - New :py:func:`DataArray.str ` for string related manipulations, based on ``pandas.Series.str``. By `0x0L `_. - Added ``strftime`` method to ``.dt`` accessor, making it simpler to hand a datetime ``DataArray`` to other code expecting formatted dates and times. (:issue:`2090`). :py:meth:`~xarray.CFTimeIndex.strftime` is also now available on :py:class:`CFTimeIndex`. By `Alan Brammer `_ and `Ryan May `_. - ``GroupBy.quantile`` is now a method of ``GroupBy`` objects (:issue:`3018`). By `David Huard `_. - Argument and return types are added to most methods on ``DataArray`` and ``Dataset``, allowing static type checking both within xarray and external libraries. Type checking with `mypy `_ is enabled in CI (though not required yet). By `Guido Imperiale `_ and `Maximilian Roos `_. Enhancements to existing functionality ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Add ``keepdims`` argument for reduce operations (:issue:`2170`) By `Scott Wales `_. - Enable ``@`` operator for DataArray. This is equivalent to :py:meth:`DataArray.dot` By `Maximilian Roos `_. - Add ``fill_value`` argument for reindex, align, and merge operations to enable custom fill values. (:issue:`2876`) By `Zach Griffith `_. - :py:meth:`DataArray.transpose` now accepts a keyword argument ``transpose_coords`` which enables transposition of coordinates in the same way as :py:meth:`Dataset.transpose`. :py:meth:`DataArray.groupby` :py:meth:`DataArray.groupby_bins`, and :py:meth:`DataArray.resample` now accept a keyword argument ``restore_coord_dims`` which keeps the order of the dimensions of multi-dimensional coordinates intact (:issue:`1856`). By `Peter Hausamann `_. - Clean up Python 2 compatibility in code (:issue:`2950`) By `Guido Imperiale `_. - Better warning message when supplying invalid objects to ``xr.merge`` (:issue:`2948`). By `Mathias Hauser `_. - Add ``errors`` keyword argument to ``Dataset.drop`` and :py:meth:`Dataset.drop_dims` that allows ignoring errors if a passed label or dimension is not in the dataset (:issue:`2994`). By `Andrew Ross `_. IO related enhancements ~~~~~~~~~~~~~~~~~~~~~~~ - Implement :py:func:`~xarray.load_dataset` and :py:func:`~xarray.load_dataarray` as alternatives to :py:func:`~xarray.open_dataset` and :py:func:`~xarray.open_dataarray` to open, load into memory, and close files, returning the Dataset or DataArray. These functions are helpful for avoiding file-lock errors when trying to write to files opened using ``open_dataset()`` or ``open_dataarray()``. (:issue:`2887`) By `Dan Nowacki `_. - It is now possible to extend existing :ref:`io.zarr` datasets, by using ``mode='a'`` and the new ``append_dim`` argument in :py:meth:`~xarray.Dataset.to_zarr`. By `Jendrik JΓΆrdening `_, `David Brochart `_, `Ryan Abernathey `_ and `Shikhar Goenka `_. - ``xr.open_zarr`` now accepts manually specified chunks with the ``chunks=`` parameter. ``auto_chunk=True`` is equivalent to ``chunks='auto'`` for backwards compatibility. The ``overwrite_encoded_chunks`` parameter is added to remove the original zarr chunk encoding. By `Lily Wang `_. - netCDF chunksizes are now only dropped when original_shape is different, not when it isn't found. (:issue:`2207`) By `Karel van de Plassche `_. - Character arrays' character dimension name decoding and encoding handled by ``var.encoding['char_dim_name']`` (:issue:`2895`) By `James McCreight `_. - open_rasterio() now supports rasterio.vrt.WarpedVRT with custom transform, width and height (:issue:`2864`). By `Julien Michel `_. Bug fixes ~~~~~~~~~ - Rolling operations on xarray objects containing dask arrays could silently compute the incorrect result or use large amounts of memory (:issue:`2940`). By `Stephan Hoyer `_. - Don't set encoding attributes on bounds variables when writing to netCDF. (:issue:`2921`) By `Deepak Cherian `_. - NetCDF4 output: variables with unlimited dimensions must be chunked (not contiguous) on output. (:issue:`1849`) By `James McCreight `_. - indexing with an empty list creates an object with zero-length axis (:issue:`2882`) By `Mayeul d'Avezac `_. - Return correct count for scalar datetime64 arrays (:issue:`2770`) By `Dan Nowacki `_. - Fixed max, min exception when applied to a multiIndex (:issue:`2923`) By `Ian Castleden `_ - A deep copy deep-copies the coords (:issue:`1463`) By `Martin Pletcher `_. - Increased support for ``missing_value`` (:issue:`2871`) By `Deepak Cherian `_. - Removed usages of ``pytest.config``, which is deprecated (:issue:`2988`) By `Maximilian Roos `_. - Fixed performance issues with cftime installed (:issue:`3000`) By `0x0L `_. - Replace incorrect usages of ``message`` in pytest assertions with ``match`` (:issue:`3011`) By `Maximilian Roos `_. - Add explicit pytest markers, now required by pytest (:issue:`3032`). By `Maximilian Roos `_. - Test suite fixes for newer versions of pytest (:issue:`3011`, :issue:`3032`). By `Maximilian Roos `_ and `Stephan Hoyer `_. .. _whats-new.0.12.1: v0.12.1 (4 April 2019) ---------------------- Enhancements ~~~~~~~~~~~~ - Allow ``expand_dims`` method to support inserting/broadcasting dimensions with size > 1. (:issue:`2710`) By `Martin Pletcher `_. Bug fixes ~~~~~~~~~ - Dataset.copy(deep=True) now creates a deep copy of the attrs (:issue:`2835`). By `Andras Gefferth `_. - Fix incorrect ``indexes`` resulting from various ``Dataset`` operations (e.g., ``swap_dims``, ``isel``, ``reindex``, ``[]``) (:issue:`2842`, :issue:`2856`). By `Stephan Hoyer `_. .. _whats-new.0.12.0: v0.12.0 (15 March 2019) ----------------------- Highlights include: - Removed support for Python 2. This is the first version of xarray that is Python 3 only! - New :py:meth:`~xarray.DataArray.coarsen` and :py:meth:`~xarray.DataArray.integrate` methods. See :ref:`compute.coarsen` and :ref:`compute.using_coordinates` for details. - Many improvements to cftime support. See below for details. Deprecations ~~~~~~~~~~~~ - The ``compat`` argument to ``Dataset`` and the ``encoding`` argument to ``DataArray`` are deprecated and will be removed in a future release. (:issue:`1188`) By `Maximilian Roos `_. cftime related enhancements ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Resampling of standard and non-standard calendars indexed by :py:class:`~xarray.CFTimeIndex` is now possible. (:issue:`2191`). By `Jwen Fai Low `_ and `Spencer Clark `_. - Taking the mean of arrays of :py:class:`cftime.datetime` objects, and by extension, use of :py:meth:`~xarray.DataArray.coarsen` with :py:class:`cftime.datetime` coordinates is now possible. By `Spencer Clark `_. - Internal plotting now supports ``cftime.datetime`` objects as time series. (:issue:`2164`) By `Julius Busecke `_ and `Spencer Clark `_. - :py:meth:`~xarray.cftime_range` now supports QuarterBegin and QuarterEnd offsets (:issue:`2663`). By `Jwen Fai Low `_ - :py:meth:`~xarray.open_dataset` now accepts a ``use_cftime`` argument, which can be used to require that ``cftime.datetime`` objects are always used, or never used when decoding dates encoded with a standard calendar. This can be used to ensure consistent date types are returned when using :py:meth:`~xarray.open_mfdataset` (:issue:`1263`) and/or to silence serialization warnings raised if dates from a standard calendar are found to be outside the :py:class:`pandas.Timestamp`-valid range (:issue:`2754`). By `Spencer Clark `_. - :py:meth:`pandas.Series.dropna` is now supported for a :py:class:`pandas.Series` indexed by a :py:class:`~xarray.CFTimeIndex` (:issue:`2688`). By `Spencer Clark `_. Other enhancements ~~~~~~~~~~~~~~~~~~ - Added ability to open netcdf4/hdf5 file-like objects with ``open_dataset``. Requires (h5netcdf>0.7 and h5py>2.9.0). (:issue:`2781`) By `Scott Henderson `_ - Add ``data=False`` option to ``to_dict()`` methods. (:issue:`2656`) By `Ryan Abernathey `_ - :py:meth:`DataArray.coarsen` and :py:meth:`Dataset.coarsen` are newly added. See :ref:`compute.coarsen` for details. (:issue:`2525`) By `Keisuke Fujii `_. - Upsampling an array via interpolation with resample is now dask-compatible, as long as the array is not chunked along the resampling dimension. By `Spencer Clark `_. - :py:func:`xarray.testing.assert_equal` and :py:func:`xarray.testing.assert_identical` now provide a more detailed report showing what exactly differs between the two objects (dimensions / coordinates / variables / attributes) (:issue:`1507`). By `Benoit Bovy `_. - Add ``tolerance`` option to ``resample()`` methods ``bfill``, ``pad``, ``nearest``. (:issue:`2695`) By `Hauke Schulz `_. - :py:meth:`DataArray.integrate` and :py:meth:`Dataset.integrate` are newly added. See :ref:`compute.using_coordinates` for the detail. (:issue:`1332`) By `Keisuke Fujii `_. - Added :py:meth:`~xarray.Dataset.drop_dims` (:issue:`1949`). By `Kevin Squire `_. Bug fixes ~~~~~~~~~ - Silenced warnings that appear when using pandas 0.24. By `Stephan Hoyer `_ - Interpolating via resample now internally specifies ``bounds_error=False`` as an argument to ``scipy.interpolate.interp1d``, allowing for interpolation from higher frequencies to lower frequencies. Datapoints outside the bounds of the original time coordinate are now filled with NaN (:issue:`2197`). By `Spencer Clark `_. - Line plots with the ``x`` argument set to a non-dimensional coord now plot the correct data for 1D DataArrays. (:issue:`2725`). By `Tom Nicholas `_. - Subtracting a scalar ``cftime.datetime`` object from a :py:class:`CFTimeIndex` now results in a :py:class:`pandas.TimedeltaIndex` instead of raising a ``TypeError`` (:issue:`2671`). By `Spencer Clark `_. - backend_kwargs are no longer ignored when using open_dataset with pynio engine (:issue:'2380') By `Jonathan Joyce `_. - Fix ``open_rasterio`` creating a WKT CRS instead of PROJ.4 with ``rasterio`` 1.0.14+ (:issue:`2715`). By `David Hoese `_. - Masking data arrays with :py:meth:`xarray.DataArray.where` now returns an array with the name of the original masked array (:issue:`2748` and :issue:`2457`). By `Yohai Bar-Sinai `_. - Fixed error when trying to reduce a DataArray using a function which does not require an axis argument. (:issue:`2768`) By `Tom Nicholas `_. - Concatenating a sequence of :py:class:`~xarray.DataArray` with varying names sets the name of the output array to ``None``, instead of the name of the first input array. If the names are the same it sets the name to that, instead to the name of the first DataArray in the list as it did before. (:issue:`2775`). By `Tom Nicholas `_. - Per the `CF conventions section on calendars `_, specifying ``'standard'`` as the calendar type in :py:meth:`~xarray.cftime_range` now correctly refers to the ``'gregorian'`` calendar instead of the ``'proleptic_gregorian'`` calendar (:issue:`2761`). .. _whats-new.0.11.3: v0.11.3 (26 January 2019) ------------------------- Bug fixes ~~~~~~~~~ - Saving files with times encoded with reference dates with timezones (e.g. '2000-01-01T00:00:00-05:00') no longer raises an error (:issue:`2649`). By `Spencer Clark `_. - Fixed performance regression with ``open_mfdataset`` (:issue:`2662`). By `Tom Nicholas `_. - Fixed supplying an explicit dimension in the ``concat_dim`` argument to to ``open_mfdataset`` (:issue:`2647`). By `Ben Root `_. .. _whats-new.0.11.2: v0.11.2 (2 January 2019) ------------------------ Removes inadvertently introduced setup dependency on pytest-runner (:issue:`2641`). Otherwise, this release is exactly equivalent to 0.11.1. .. warning:: This is the last xarray release that will support Python 2.7. Future releases will be Python 3 only, but older versions of xarray will always be available for Python 2.7 users. For the more details, see: - :issue:`Xarray Github issue discussing dropping Python 2 <1829>` - `Python 3 Statement `__ - `Tips on porting to Python 3 `__ .. _whats-new.0.11.1: v0.11.1 (29 December 2018) -------------------------- This minor release includes a number of enhancements and bug fixes, and two (slightly) breaking changes. Breaking changes ~~~~~~~~~~~~~~~~ - Minimum rasterio version increased from 0.36 to 1.0 (for ``open_rasterio``) - Time bounds variables are now also decoded according to CF conventions (:issue:`2565`). The previous behavior was to decode them only if they had specific time attributes, now these attributes are copied automatically from the corresponding time coordinate. This might break downstream code that was relying on these variables to be brake downstream code that was relying on these variables to be not decoded. By `Fabien Maussion `_. Enhancements ~~~~~~~~~~~~ - Ability to read and write consolidated metadata in zarr stores (:issue:`2558`). By `Ryan Abernathey `_. - :py:class:`CFTimeIndex` uses slicing for string indexing when possible (like :py:class:`pandas.DatetimeIndex`), which avoids unnecessary copies. By `Stephan Hoyer `_ - Enable passing ``rasterio.io.DatasetReader`` or ``rasterio.vrt.WarpedVRT`` to ``open_rasterio`` instead of file path string. Allows for in-memory reprojection, see (:issue:`2588`). By `Scott Henderson `_. - Like :py:class:`pandas.DatetimeIndex`, :py:class:`CFTimeIndex` now supports "dayofyear" and "dayofweek" accessors (:issue:`2597`). Note this requires a version of cftime greater than 1.0.2. By `Spencer Clark `_. - The option ``'warn_for_unclosed_files'`` (False by default) has been added to allow users to enable a warning when files opened by xarray are deallocated but were not explicitly closed. This is mostly useful for debugging; we recommend enabling it in your test suites if you use xarray for IO. By `Stephan Hoyer `_ - Support Dask ``HighLevelGraphs`` by `Matthew Rocklin `_. - :py:meth:`DataArray.resample` and :py:meth:`Dataset.resample` now supports the ``loffset`` kwarg just like pandas. By `Deepak Cherian `_ - Datasets are now guaranteed to have a ``'source'`` encoding, so the source file name is always stored (:issue:`2550`). By `Tom Nicholas `_. - The ``apply`` methods for ``DatasetGroupBy``, ``DataArrayGroupBy``, ``DatasetResample`` and ``DataArrayResample`` now support passing positional arguments to the applied function as a tuple to the ``args`` argument. By `Matti Eskelinen `_. - 0d slices of ndarrays are now obtained directly through indexing, rather than extracting and wrapping a scalar, avoiding unnecessary copying. By `Daniel Wennberg `_. - Added support for ``fill_value`` with :py:meth:`~xarray.DataArray.shift` and :py:meth:`~xarray.Dataset.shift` By `Maximilian Roos `_ Bug fixes ~~~~~~~~~ - Ensure files are automatically closed, if possible, when no longer referenced by a Python variable (:issue:`2560`). By `Stephan Hoyer `_ - Fixed possible race conditions when reading/writing to disk in parallel (:issue:`2595`). By `Stephan Hoyer `_ - Fix h5netcdf saving scalars with filters or chunks (:issue:`2563`). By `Martin Raspaud `_. - Fix parsing of ``_Unsigned`` attribute set by OPENDAP servers. (:issue:`2583`). By `Deepak Cherian `_ - Fix failure in time encoding when exporting to netCDF with versions of pandas less than 0.21.1 (:issue:`2623`). By `Spencer Clark `_. - Fix MultiIndex selection to update label and level (:issue:`2619`). By `Keisuke Fujii `_. .. _whats-new.0.11.0: v0.11.0 (7 November 2018) ------------------------- Breaking changes ~~~~~~~~~~~~~~~~ - Finished deprecations (changed behavior with this release): - ``Dataset.T`` has been removed as a shortcut for :py:meth:`Dataset.transpose`. Call :py:meth:`Dataset.transpose` directly instead. - Iterating over a ``Dataset`` now includes only data variables, not coordinates. Similarly, calling ``len`` and ``bool`` on a ``Dataset`` now includes only data variables. - ``DataArray.__contains__`` (used by Python's ``in`` operator) now checks array data, not coordinates. - The old resample syntax from before xarray 0.10, e.g., ``data.resample('1D', dim='time', how='mean')``, is no longer supported will raise an error in most cases. You need to use the new resample syntax instead, e.g., ``data.resample(time='1D').mean()`` or ``data.resample({'time': '1D'}).mean()``. - New deprecations (behavior will be changed in xarray 0.12): - Reduction of :py:meth:`DataArray.groupby` and :py:meth:`DataArray.resample` without dimension argument will change in the next release. Now we warn a FutureWarning. By `Keisuke Fujii `_. - The ``inplace`` kwarg of a number of ``DataArray`` and ``Dataset`` methods is being deprecated and will be removed in the next release. By `Deepak Cherian `_. - Refactored storage backends: - Xarray's storage backends now automatically open and close files when necessary, rather than requiring opening a file with ``autoclose=True``. A global least-recently-used cache is used to store open files; the default limit of 128 open files should suffice in most cases, but can be adjusted if necessary with ``xarray.set_options(file_cache_maxsize=...)``. The ``autoclose`` argument to ``open_dataset`` and related functions has been deprecated and is now a no-op. This change, along with an internal refactor of xarray's storage backends, should significantly improve performance when reading and writing netCDF files with Dask, especially when working with many files or using Dask Distributed. By `Stephan Hoyer `_ - Support for non-standard calendars used in climate science: - Xarray will now always use :py:class:`cftime.datetime` objects, rather than by default trying to coerce them into ``np.datetime64[ns]`` objects. A :py:class:`~xarray.CFTimeIndex` will be used for indexing along time coordinates in these cases. - A new method :py:meth:`~xarray.CFTimeIndex.to_datetimeindex` has been added to aid in converting from a :py:class:`~xarray.CFTimeIndex` to a :py:class:`pandas.DatetimeIndex` for the remaining use-cases where using a :py:class:`~xarray.CFTimeIndex` is still a limitation (e.g. for resample or plotting). - Setting the ``enable_cftimeindex`` option is now a no-op and emits a ``FutureWarning``. Enhancements ~~~~~~~~~~~~ - :py:meth:`xarray.DataArray.plot.line` can now accept multidimensional coordinate variables as input. ``hue`` must be a dimension name in this case. (:issue:`2407`) By `Deepak Cherian `_. - Added support for Python 3.7. (:issue:`2271`). By `Joe Hamman `_. - Added support for plotting data with ``pandas.Interval`` coordinates, such as those created by :py:meth:`~xarray.DataArray.groupby_bins` By `Maximilian Maahn `_. - Added :py:meth:`~xarray.CFTimeIndex.shift` for shifting the values of a CFTimeIndex by a specified frequency. (:issue:`2244`). By `Spencer Clark `_. - Added support for using ``cftime.datetime`` coordinates with :py:meth:`~xarray.DataArray.differentiate`, :py:meth:`~xarray.Dataset.differentiate`, :py:meth:`~xarray.DataArray.interp`, and :py:meth:`~xarray.Dataset.interp`. By `Spencer Clark `_ - There is now a global option to either always keep or always discard dataset and dataarray attrs upon operations. The option is set with ``xarray.set_options(keep_attrs=True)``, and the default is to use the old behaviour. By `Tom Nicholas `_. - Added a new backend for the GRIB file format based on ECMWF *cfgrib* python driver and *ecCodes* C-library. (:issue:`2475`) By `Alessandro Amici `_, sponsored by `ECMWF `_. - Resample now supports a dictionary mapping from dimension to frequency as its first argument, e.g., ``data.resample({'time': '1D'}).mean()``. This is consistent with other xarray functions that accept either dictionaries or keyword arguments. By `Stephan Hoyer `_. - The preferred way to access tutorial data is now to load it lazily with :py:meth:`xarray.tutorial.open_dataset`. :py:meth:`xarray.tutorial.load_dataset` calls ``Dataset.load()`` prior to returning (and is now deprecated). This was changed in order to facilitate using tutorial datasets with dask. By `Joe Hamman `_. - ``DataArray`` can now use ``xr.set_option(keep_attrs=True)`` and retain attributes in binary operations, such as (``+, -, * ,/``). Default behaviour is unchanged (*Attributes will be dismissed*). By `Michael Blaschek `_ Bug fixes ~~~~~~~~~ - ``FacetGrid`` now properly uses the ``cbar_kwargs`` keyword argument. (:issue:`1504`, :issue:`1717`) By `Deepak Cherian `_. - Addition and subtraction operators used with a CFTimeIndex now preserve the index's type. (:issue:`2244`). By `Spencer Clark `_. - We now properly handle arrays of ``datetime.datetime`` and ``datetime.timedelta`` provided as coordinates. (:issue:`2512`) By `Deepak Cherian `_. - ``xarray.DataArray.roll`` correctly handles multidimensional arrays. (:issue:`2445`) By `Keisuke Fujii `_. - ``xarray.plot()`` now properly accepts a ``norm`` argument and does not override the norm's ``vmin`` and ``vmax``. (:issue:`2381`) By `Deepak Cherian `_. - ``xarray.DataArray.std()`` now correctly accepts ``ddof`` keyword argument. (:issue:`2240`) By `Keisuke Fujii `_. - Restore matplotlib's default of plotting dashed negative contours when a single color is passed to ``DataArray.contour()`` e.g. ``colors='k'``. By `Deepak Cherian `_. - Fix a bug that caused some indexing operations on arrays opened with ``open_rasterio`` to error (:issue:`2454`). By `Stephan Hoyer `_. - Subtracting one CFTimeIndex from another now returns a ``pandas.TimedeltaIndex``, analogous to the behavior for DatetimeIndexes (:issue:`2484`). By `Spencer Clark `_. - Adding a TimedeltaIndex to, or subtracting a TimedeltaIndex from a CFTimeIndex is now allowed (:issue:`2484`). By `Spencer Clark `_. - Avoid use of Dask's deprecated ``get=`` parameter in tests by `Matthew Rocklin `_. - An ``OverflowError`` is now accurately raised and caught during the encoding process if a reference date is used that is so distant that the dates must be encoded using cftime rather than NumPy (:issue:`2272`). By `Spencer Clark `_. - Chunked datasets can now roundtrip to Zarr storage continually with ``to_zarr`` and ``open_zarr`` (:issue:`2300`). By `Lily Wang `_. .. _whats-new.0.10.9: v0.10.9 (21 September 2018) --------------------------- This minor release contains a number of backwards compatible enhancements. Announcements of note: - Xarray is now a NumFOCUS fiscally sponsored project! Read `the announcement `_ for more details. - We have a new :doc:`roadmap` that outlines our future development plans. - ``Dataset.apply`` now properly documents the way ``func`` is called. By `Matti Eskelinen `_. Enhancements ~~~~~~~~~~~~ - :py:meth:`~xarray.DataArray.differentiate` and :py:meth:`~xarray.Dataset.differentiate` are newly added. (:issue:`1332`) By `Keisuke Fujii `_. - Default colormap for sequential and divergent data can now be set via :py:func:`~xarray.set_options()` (:issue:`2394`) By `Julius Busecke `_. - min_count option is newly supported in :py:meth:`~xarray.DataArray.sum`, :py:meth:`~xarray.DataArray.prod` and :py:meth:`~xarray.Dataset.sum`, and :py:meth:`~xarray.Dataset.prod`. (:issue:`2230`) By `Keisuke Fujii `_. - :py:func:`~plot.plot()` now accepts the kwargs ``xscale, yscale, xlim, ylim, xticks, yticks`` just like pandas. Also ``xincrease=False, yincrease=False`` now use matplotlib's axis inverting methods instead of setting limits. By `Deepak Cherian `_. (:issue:`2224`) - DataArray coordinates and Dataset coordinates and data variables are now displayed as ``a b ... y z`` rather than ``a b c d ...``. (:issue:`1186`) By `Seth P `_. - A new CFTimeIndex-enabled :py:func:`cftime_range` function for use in generating dates from standard or non-standard calendars. By `Spencer Clark `_. - When interpolating over a ``datetime64`` axis, you can now provide a datetime string instead of a ``datetime64`` object. E.g. ``da.interp(time='1991-02-01')`` (:issue:`2284`) By `Deepak Cherian `_. - A clear error message is now displayed if a ``set`` or ``dict`` is passed in place of an array (:issue:`2331`) By `Maximilian Roos `_. - Applying ``unstack`` to a large DataArray or Dataset is now much faster if the MultiIndex has not been modified after stacking the indices. (:issue:`1560`) By `Maximilian Maahn `_. - You can now control whether or not to offset the coordinates when using the ``roll`` method and the current behavior, coordinates rolled by default, raises a deprecation warning unless explicitly setting the keyword argument. (:issue:`1875`) By `Andrew Huang `_. - You can now call ``unstack`` without arguments to unstack every MultiIndex in a DataArray or Dataset. By `Julia Signell `_. - Added the ability to pass a data kwarg to ``copy`` to create a new object with the same metadata as the original object but using new values. By `Julia Signell `_. Bug fixes ~~~~~~~~~ - ``xarray.plot.imshow()`` correctly uses the ``origin`` argument. (:issue:`2379`) By `Deepak Cherian `_. - Fixed ``DataArray.to_iris()`` failure while creating ``DimCoord`` by falling back to creating ``AuxCoord``. Fixed dependency on ``var_name`` attribute being set. (:issue:`2201`) By `Thomas Voigt `_. - Fixed a bug in ``zarr`` backend which prevented use with datasets with invalid chunk size encoding after reading from an existing store (:issue:`2278`). By `Joe Hamman `_. - Tests can be run in parallel with pytest-xdist By `Tony Tung `_. - Follow up the renamings in dask; from dask.ghost to dask.overlap By `Keisuke Fujii `_. - Now raises a ValueError when there is a conflict between dimension names and level names of MultiIndex. (:issue:`2299`) By `Keisuke Fujii `_. - Follow up the renamings in dask; from dask.ghost to dask.overlap By `Keisuke Fujii `_. - Now :py:func:`~xarray.apply_ufunc` raises a ValueError when the size of ``input_core_dims`` is inconsistent with the number of arguments. (:issue:`2341`) By `Keisuke Fujii `_. - Fixed ``Dataset.filter_by_attrs()`` behavior not matching ``netCDF4.Dataset.get_variables_by_attributes()``. When more than one ``key=value`` is passed into ``Dataset.filter_by_attrs()`` it will now return a Dataset with variables which pass all the filters. (:issue:`2315`) By `Andrew Barna `_. .. _whats-new.0.10.8: v0.10.8 (18 July 2018) ---------------------- Breaking changes ~~~~~~~~~~~~~~~~ - Xarray no longer supports python 3.4. Additionally, the minimum supported versions of the following dependencies has been updated and/or clarified: - pandas: 0.18 -> 0.19 - NumPy: 1.11 -> 1.12 - Dask: 0.9 -> 0.16 - Matplotlib: unspecified -> 1.5 (:issue:`2204`). By `Joe Hamman `_. Enhancements ~~~~~~~~~~~~ - :py:meth:`~xarray.DataArray.interp_like` and :py:meth:`~xarray.Dataset.interp_like` methods are newly added. (:issue:`2218`) By `Keisuke Fujii `_. - Added support for curvilinear and unstructured generic grids to :py:meth:`~xarray.DataArray.to_cdms2` and :py:meth:`~xarray.DataArray.from_cdms2` (:issue:`2262`). By `Stephane Raynaud `_. Bug fixes ~~~~~~~~~ - Fixed a bug in ``zarr`` backend which prevented use with datasets with incomplete chunks in multiple dimensions (:issue:`2225`). By `Joe Hamman `_. - Fixed a bug in :py:meth:`~Dataset.to_netcdf` which prevented writing datasets when the arrays had different chunk sizes (:issue:`2254`). By `Mike Neish `_. - Fixed masking during the conversion to cdms2 objects by :py:meth:`~xarray.DataArray.to_cdms2` (:issue:`2262`). By `Stephane Raynaud `_. - Fixed a bug in 2D plots which incorrectly raised an error when 2D coordinates weren't monotonic (:issue:`2250`). By `Fabien Maussion `_. - Fixed warning raised in :py:meth:`~Dataset.to_netcdf` due to deprecation of ``effective_get`` in dask (:issue:`2238`). By `Joe Hamman `_. .. _whats-new.0.10.7: v0.10.7 (7 June 2018) --------------------- Enhancements ~~~~~~~~~~~~ - Plot labels now make use of metadata that follow CF conventions (:issue:`2135`). By `Deepak Cherian `_ and `Ryan Abernathey `_. - Line plots now support facetting with ``row`` and ``col`` arguments (:issue:`2107`). By `Yohai Bar Sinai `_. - :py:meth:`~xarray.DataArray.interp` and :py:meth:`~xarray.Dataset.interp` methods are newly added. See :ref:`interp` for the detail. (:issue:`2079`) By `Keisuke Fujii `_. Bug fixes ~~~~~~~~~ - Fixed a bug in ``rasterio`` backend which prevented use with ``distributed``. The ``rasterio`` backend now returns pickleable objects (:issue:`2021`). By `Joe Hamman `_. .. _whats-new.0.10.6: v0.10.6 (31 May 2018) --------------------- The minor release includes a number of bug-fixes and backwards compatible enhancements. Enhancements ~~~~~~~~~~~~ - New PseudoNetCDF backend for many Atmospheric data formats including GEOS-Chem, CAMx, NOAA arlpacked bit and many others. See ``io.PseudoNetCDF`` for more details. By `Barron Henderson `_. - The :py:class:`Dataset` constructor now aligns :py:class:`DataArray` arguments in ``data_vars`` to indexes set explicitly in ``coords``, where previously an error would be raised. (:issue:`674`) By `Maximilian Roos `_. - :py:meth:`~DataArray.sel`, :py:meth:`~DataArray.isel` & :py:meth:`~DataArray.reindex`, (and their :py:class:`Dataset` counterparts) now support supplying a ``dict`` as a first argument, as an alternative to the existing approach of supplying ``kwargs``. This allows for more robust behavior of dimension names which conflict with other keyword names, or are not strings. By `Maximilian Roos `_. - :py:meth:`~DataArray.rename` now supports supplying ``**kwargs``, as an alternative to the existing approach of supplying a ``dict`` as the first argument. By `Maximilian Roos `_. - :py:meth:`~DataArray.cumsum` and :py:meth:`~DataArray.cumprod` now support aggregation over multiple dimensions at the same time. This is the default behavior when dimensions are not specified (previously this raised an error). By `Stephan Hoyer `_ - :py:meth:`DataArray.dot` and :py:func:`dot` are partly supported with older dask<0.17.4. (related to :issue:`2203`) By `Keisuke Fujii `_. - Xarray now uses `Versioneer `__ to manage its version strings. (:issue:`1300`). By `Joe Hamman `_. Bug fixes ~~~~~~~~~ - Fixed a regression in 0.10.4, where explicitly specifying ``dtype='S1'`` or ``dtype=str`` in ``encoding`` with ``to_netcdf()`` raised an error (:issue:`2149`). `Stephan Hoyer `_ - :py:func:`apply_ufunc` now directly validates output variables (:issue:`1931`). By `Stephan Hoyer `_. - Fixed a bug where ``to_netcdf(..., unlimited_dims='bar')`` yielded NetCDF files with spurious 0-length dimensions (i.e. ``b``, ``a``, and ``r``) (:issue:`2134`). By `Joe Hamman `_. - Removed spurious warnings with ``Dataset.update(Dataset)`` (:issue:`2161`) and ``array.equals(array)`` when ``array`` contains ``NaT`` (:issue:`2162`). By `Stephan Hoyer `_. - Aggregations with :py:meth:`Dataset.reduce` (including ``mean``, ``sum``, etc) no longer drop unrelated coordinates (:issue:`1470`). Also fixed a bug where non-scalar data-variables that did not include the aggregation dimension were improperly skipped. By `Stephan Hoyer `_ - Fix :meth:`~DataArray.stack` with non-unique coordinates on pandas 0.23 (:issue:`2160`). By `Stephan Hoyer `_ - Selecting data indexed by a length-1 ``CFTimeIndex`` with a slice of strings now behaves as it does when using a length-1 ``DatetimeIndex`` (i.e. it no longer falsely returns an empty array when the slice includes the value in the index) (:issue:`2165`). By `Spencer Clark `_. - Fix ``DataArray.groupby().reduce()`` mutating coordinates on the input array when grouping over dimension coordinates with duplicated entries (:issue:`2153`). By `Stephan Hoyer `_ - Fix ``Dataset.to_netcdf()`` cannot create group with ``engine="h5netcdf"`` (:issue:`2177`). By `Stephan Hoyer `_ .. _whats-new.0.10.4: v0.10.4 (16 May 2018) ---------------------- The minor release includes a number of bug-fixes and backwards compatible enhancements. A highlight is ``CFTimeIndex``, which offers support for non-standard calendars used in climate modeling. Documentation ~~~~~~~~~~~~~ - New FAQ entry, :ref:`ecosystem`. By `Deepak Cherian `_. - :ref:`assigning_values` now includes examples on how to select and assign values to a :py:class:`~xarray.DataArray` with ``.loc``. By `Chiara Lepore `_. Enhancements ~~~~~~~~~~~~ - Add an option for using a ``CFTimeIndex`` for indexing times with non-standard calendars and/or outside the Timestamp-valid range; this index enables a subset of the functionality of a standard ``pandas.DatetimeIndex``. See :ref:`CFTimeIndex` for full details. (:issue:`789`, :issue:`1084`, :issue:`1252`) By `Spencer Clark `_ with help from `Stephan Hoyer `_. - Allow for serialization of ``cftime.datetime`` objects (:issue:`789`, :issue:`1084`, :issue:`2008`, :issue:`1252`) using the standalone ``cftime`` library. By `Spencer Clark `_. - Support writing lists of strings as netCDF attributes (:issue:`2044`). By `Dan Nowacki `_. - :py:meth:`~xarray.Dataset.to_netcdf` with ``engine='h5netcdf'`` now accepts h5py encoding settings ``compression`` and ``compression_opts``, along with the NetCDF4-Python style settings ``gzip=True`` and ``complevel``. This allows using any compression plugin installed in hdf5, e.g. LZF (:issue:`1536`). By `Guido Imperiale `_. - :py:meth:`~xarray.dot` on dask-backed data will now call :func:`dask.array.einsum`. This greatly boosts speed and allows chunking on the core dims. The function now requires dask >= 0.17.3 to work on dask-backed data (:issue:`2074`). By `Guido Imperiale `_. - ``plot.line()`` learned new kwargs: ``xincrease``, ``yincrease`` that change the direction of the respective axes. By `Deepak Cherian `_. - Added the ``parallel`` option to :py:func:`open_mfdataset`. This option uses ``dask.delayed`` to parallelize the open and preprocessing steps within ``open_mfdataset``. This is expected to provide performance improvements when opening many files, particularly when used in conjunction with dask's multiprocessing or distributed schedulers (:issue:`1981`). By `Joe Hamman `_. - New ``compute`` option in :py:meth:`~xarray.Dataset.to_netcdf`, :py:meth:`~xarray.Dataset.to_zarr`, and :py:func:`~xarray.save_mfdataset` to allow for the lazy computation of netCDF and zarr stores. This feature is currently only supported by the netCDF4 and zarr backends. (:issue:`1784`). By `Joe Hamman `_. Bug fixes ~~~~~~~~~ - ``ValueError`` is raised when coordinates with the wrong size are assigned to a :py:class:`DataArray`. (:issue:`2112`) By `Keisuke Fujii `_. - Fixed a bug in :py:meth:`~xarray.DataArray.rolling` with bottleneck. Also, fixed a bug in rolling an integer dask array. (:issue:`2113`) By `Keisuke Fujii `_. - Fixed a bug where ``keep_attrs=True`` flag was neglected if :py:func:`apply_ufunc` was used with :py:class:`Variable`. (:issue:`2114`) By `Keisuke Fujii `_. - When assigning a :py:class:`DataArray` to :py:class:`Dataset`, any conflicted non-dimensional coordinates of the DataArray are now dropped. (:issue:`2068`) By `Keisuke Fujii `_. - Better error handling in ``open_mfdataset`` (:issue:`2077`). By `Stephan Hoyer `_. - ``plot.line()`` does not call ``autofmt_xdate()`` anymore. Instead it changes the rotation and horizontal alignment of labels without removing the x-axes of any other subplots in the figure (if any). By `Deepak Cherian `_. - Colorbar limits are now determined by excluding Β±Infs too. By `Deepak Cherian `_. By `Joe Hamman `_. - Fixed ``to_iris`` to maintain lazy dask array after conversion (:issue:`2046`). By `Alex Hilson `_ and `Stephan Hoyer `_. .. _whats-new.0.10.3: v0.10.3 (13 April 2018) ------------------------ The minor release includes a number of bug-fixes and backwards compatible enhancements. Enhancements ~~~~~~~~~~~~ - :py:meth:`~xarray.DataArray.isin` and :py:meth:`~xarray.Dataset.isin` methods, which test each value in the array for whether it is contained in the supplied list, returning a bool array. See :ref:`selecting values with isin` for full details. Similar to the ``np.isin`` function. By `Maximilian Roos `_. - Some speed improvement to construct :py:class:`~xarray.computation.rolling.DataArrayRolling` object (:issue:`1993`) By `Keisuke Fujii `_. - Handle variables with different values for ``missing_value`` and ``_FillValue`` by masking values for both attributes; previously this resulted in a ``ValueError``. (:issue:`2016`) By `Ryan May `_. Bug fixes ~~~~~~~~~ - Fixed ``decode_cf`` function to operate lazily on dask arrays (:issue:`1372`). By `Ryan Abernathey `_. - Fixed labeled indexing with slice bounds given by xarray objects with datetime64 or timedelta64 dtypes (:issue:`1240`). By `Stephan Hoyer `_. - Attempting to convert an xarray.Dataset into a numpy array now raises an informative error message. By `Stephan Hoyer `_. - Fixed a bug in decode_cf_datetime where ``int32`` arrays weren't parsed correctly (:issue:`2002`). By `Fabien Maussion `_. - When calling ``xr.auto_combine()`` or ``xr.open_mfdataset()`` with a ``concat_dim``, the resulting dataset will have that one-element dimension (it was silently dropped, previously) (:issue:`1988`). By `Ben Root `_. .. _whats-new.0.10.2: v0.10.2 (13 March 2018) ----------------------- The minor release includes a number of bug-fixes and enhancements, along with one possibly **backwards incompatible change**. Backwards incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The addition of ``__array_ufunc__`` for xarray objects (see below) means that NumPy `ufunc methods`_ (e.g., ``np.add.reduce``) that previously worked on ``xarray.DataArray`` objects by converting them into NumPy arrays will now raise ``NotImplementedError`` instead. In all cases, the work-around is simple: convert your objects explicitly into NumPy arrays before calling the ufunc (e.g., with ``.values``). .. _ufunc methods: https://numpy.org/doc/stable/reference/ufuncs.html#methods Enhancements ~~~~~~~~~~~~ - Added :py:func:`~xarray.dot`, equivalent to :py:func:`numpy.einsum`. Also, :py:func:`~xarray.DataArray.dot` now supports ``dims`` option, which specifies the dimensions to sum over. (:issue:`1951`) By `Keisuke Fujii `_. - Support for writing xarray datasets to netCDF files (netcdf4 backend only) when using the `dask.distributed `_ scheduler (:issue:`1464`). By `Joe Hamman `_. - Support lazy vectorized-indexing. After this change, flexible indexing such as orthogonal/vectorized indexing, becomes possible for all the backend arrays. Also, lazy ``transpose`` is now also supported. (:issue:`1897`) By `Keisuke Fujii `_. - Implemented NumPy's ``__array_ufunc__`` protocol for all xarray objects (:issue:`1617`). This enables using NumPy ufuncs directly on ``xarray.Dataset`` objects with recent versions of NumPy (v1.13 and newer): .. code:: python ds = xr.Dataset({"a": 1}) np.sin(ds) This obliviates the need for the ``xarray.ufuncs`` module, which will be deprecated in the future when xarray drops support for older versions of NumPy. By `Stephan Hoyer `_. - Improve :py:func:`~xarray.DataArray.rolling` logic. :py:func:`~xarray.computation.rolling.DataArrayRolling` object now supports :py:func:`~xarray.computation.rolling.DataArrayRolling.construct` method that returns a view of the DataArray / Dataset object with the rolling-window dimension added to the last axis. This enables more flexible operation, such as strided rolling, windowed rolling, ND-rolling, short-time FFT and convolution. (:issue:`1831`, :issue:`1142`, :issue:`819`) By `Keisuke Fujii `_. - :py:func:`~plot.line()` learned to make plots with data on x-axis if so specified. (:issue:`575`) By `Deepak Cherian `_. Bug fixes ~~~~~~~~~ - Raise an informative error message when using ``apply_ufunc`` with numpy v1.11 (:issue:`1956`). By `Stephan Hoyer `_. - Fix the precision drop after indexing datetime64 arrays (:issue:`1932`). By `Keisuke Fujii `_. - Silenced irrelevant warnings issued by ``open_rasterio`` (:issue:`1964`). By `Stephan Hoyer `_. - Fix kwarg ``colors`` clashing with auto-inferred ``cmap`` (:issue:`1461`) By `Deepak Cherian `_. - Fix :py:func:`~xarray.plot.imshow` error when passed an RGB array with size one in a spatial dimension. By `Zac Hatfield-Dodds `_. .. _whats-new.0.10.1: v0.10.1 (25 February 2018) -------------------------- The minor release includes a number of bug-fixes and backwards compatible enhancements. Documentation ~~~~~~~~~~~~~ - Added a new guide on :ref:`contributing` (:issue:`640`) By `Joe Hamman `_. - Added apply_ufunc example to :ref:`/examples/weather-data.ipynb#Toy-weather-data` (:issue:`1844`). By `Liam Brannigan `_. - New entry ``Why don’t aggregations return Python scalars?`` in the :ref:`faq` (:issue:`1726`). By `0x0L `_. Enhancements ~~~~~~~~~~~~ **New functions and methods**: - Added :py:meth:`DataArray.to_iris` and :py:meth:`DataArray.from_iris` for converting data arrays to and from Iris_ Cubes with the same data and coordinates (:issue:`621` and :issue:`37`). By `Neil Parley `_ and `Duncan Watson-Parris `_. - Experimental support for using `Zarr`_ as storage layer for xarray (:issue:`1223`). By `Ryan Abernathey `_ and `Joe Hamman `_. - New :py:meth:`~xarray.DataArray.rank` on arrays and datasets. Requires bottleneck (:issue:`1731`). By `0x0L `_. - ``.dt`` accessor can now ceil, floor and round timestamps to specified frequency. By `Deepak Cherian `_. **Plotting enhancements**: - :func:`xarray.plot.imshow` now handles RGB and RGBA images. Saturation can be adjusted with ``vmin`` and ``vmax``, or with ``robust=True``. By `Zac Hatfield-Dodds `_. - :py:func:`~plot.contourf()` learned to contour 2D variables that have both a 1D coordinate (e.g. time) and a 2D coordinate (e.g. depth as a function of time) (:issue:`1737`). By `Deepak Cherian `_. - :py:func:`~plot.plot()` rotates x-axis ticks if x-axis is time. By `Deepak Cherian `_. - :py:func:`~plot.line()` can draw multiple lines if provided with a 2D variable. By `Deepak Cherian `_. **Other enhancements**: - Reduce methods such as :py:func:`DataArray.sum()` now handles object-type array. .. code:: python da = xr.DataArray(np.array([True, False, np.nan], dtype=object), dims="x") da.sum() (:issue:`1866`) By `Keisuke Fujii `_. - Reduce methods such as :py:func:`DataArray.sum()` now accepts ``dtype`` arguments. (:issue:`1838`) By `Keisuke Fujii `_. - Added nodatavals attribute to DataArray when using :py:func:`~xarray.open_rasterio`. (:issue:`1736`). By `Alan Snow `_. - Use ``pandas.Grouper`` class in xarray resample methods rather than the deprecated ``pandas.TimeGrouper`` class (:issue:`1766`). By `Joe Hamman `_. - Experimental support for parsing ENVI metadata to coordinates and attributes in :py:func:`xarray.open_rasterio`. By `Matti Eskelinen `_. - Reduce memory usage when decoding a variable with a scale_factor, by converting 8-bit and 16-bit integers to float32 instead of float64 (:pull:`1840`), and keeping float16 and float32 as float32 (:issue:`1842`). Correspondingly, encoded variables may also be saved with a smaller dtype. By `Zac Hatfield-Dodds `_. - Speed of reindexing/alignment with dask array is orders of magnitude faster when inserting missing values (:issue:`1847`). By `Stephan Hoyer `_. - Fix ``axis`` keyword ignored when applying ``np.squeeze`` to ``DataArray`` (:issue:`1487`). By `Florian Pinault `_. - ``netcdf4-python`` has moved the its time handling in the ``netcdftime`` module to a standalone package (`netcdftime`_). As such, xarray now considers `netcdftime`_ an optional dependency. One benefit of this change is that it allows for encoding/decoding of datetimes with non-standard calendars without the ``netcdf4-python`` dependency (:issue:`1084`). By `Joe Hamman `_. .. _Zarr: http://zarr.readthedocs.io/ .. _Iris: http://scitools-iris.readthedocs.io .. _netcdftime: https://unidata.github.io/netcdftime **New functions/methods** - New :py:meth:`~xarray.DataArray.rank` on arrays and datasets. Requires bottleneck (:issue:`1731`). By `0x0L `_. Bug fixes ~~~~~~~~~ - Rolling aggregation with ``center=True`` option now gives the same result with pandas including the last element (:issue:`1046`). By `Keisuke Fujii `_. - Support indexing with a 0d-np.ndarray (:issue:`1921`). By `Keisuke Fujii `_. - Added warning in api.py of a netCDF4 bug that occurs when the filepath has 88 characters (:issue:`1745`). By `Liam Brannigan `_. - Fixed encoding of multi-dimensional coordinates in :py:meth:`~Dataset.to_netcdf` (:issue:`1763`). By `Mike Neish `_. - Fixed chunking with non-file-based rasterio datasets (:issue:`1816`) and refactored rasterio test suite. By `Ryan Abernathey `_ - Bug fix in open_dataset(engine='pydap') (:issue:`1775`) By `Keisuke Fujii `_. - Bug fix in vectorized assignment (:issue:`1743`, :issue:`1744`). Now item assignment to :py:meth:`~DataArray.__setitem__` checks - Bug fix in vectorized assignment (:issue:`1743`, :issue:`1744`). Now item assignment to :py:meth:`DataArray.__setitem__` checks coordinates of target, destination and keys. If there are any conflict among these coordinates, ``IndexError`` will be raised. By `Keisuke Fujii `_. - Properly point ``DataArray.__dask_scheduler__`` to ``dask.threaded.get``. By `Matthew Rocklin `_. - Bug fixes in :py:meth:`DataArray.plot.imshow`: all-NaN arrays and arrays with size one in some dimension can now be plotted, which is good for exploring satellite imagery (:issue:`1780`). By `Zac Hatfield-Dodds `_. - Fixed ``UnboundLocalError`` when opening netCDF file (:issue:`1781`). By `Stephan Hoyer `_. - The ``variables``, ``attrs``, and ``dimensions`` properties have been deprecated as part of a bug fix addressing an issue where backends were unintentionally loading the datastores data and attributes repeatedly during writes (:issue:`1798`). By `Joe Hamman `_. - Compatibility fixes to plotting module for NumPy 1.14 and pandas 0.22 (:issue:`1813`). By `Joe Hamman `_. - Bug fix in encoding coordinates with ``{'_FillValue': None}`` in netCDF metadata (:issue:`1865`). By `Chris Roth `_. - Fix indexing with lists for arrays loaded from netCDF files with ``engine='h5netcdf`` (:issue:`1864`). By `Stephan Hoyer `_. - Corrected a bug with incorrect coordinates for non-georeferenced geotiff files (:issue:`1686`). Internally, we now use the rasterio coordinate transform tool instead of doing the computations ourselves. A ``parse_coordinates`` kwarg has been added to :py:func:`~open_rasterio` (set to ``True`` per default). By `Fabien Maussion `_. - The colors of discrete colormaps are now the same regardless if ``seaborn`` is installed or not (:issue:`1896`). By `Fabien Maussion `_. - Fixed dtype promotion rules in :py:func:`where` and :py:func:`concat` to match pandas (:issue:`1847`). A combination of strings/numbers or unicode/bytes now promote to object dtype, instead of strings or unicode. By `Stephan Hoyer `_. - Fixed bug where :py:meth:`~xarray.DataArray.isnull` was loading data stored as dask arrays (:issue:`1937`). By `Joe Hamman `_. .. _whats-new.0.10.0: v0.10.0 (20 November 2017) -------------------------- This is a major release that includes bug fixes, new features and a few backwards incompatible changes. Highlights include: - Indexing now supports broadcasting over dimensions, similar to NumPy's vectorized indexing (but better!). - :py:meth:`~DataArray.resample` has a new groupby-like API like pandas. - :py:func:`~xarray.apply_ufunc` facilitates wrapping and parallelizing functions written for NumPy arrays. - Performance improvements, particularly for dask and :py:func:`open_mfdataset`. Breaking changes ~~~~~~~~~~~~~~~~ - xarray now supports a form of vectorized indexing with broadcasting, where the result of indexing depends on dimensions of indexers, e.g., ``array.sel(x=ind)`` with ``ind.dims == ('y',)``. Alignment between coordinates on indexed and indexing objects is also now enforced. Due to these changes, existing uses of xarray objects to index other xarray objects will break in some cases. The new indexing API is much more powerful, supporting outer, diagonal and vectorized indexing in a single interface. The ``isel_points`` and ``sel_points`` methods are deprecated, since they are now redundant with the ``isel`` / ``sel`` methods. See :ref:`vectorized_indexing` for the details (:issue:`1444`, :issue:`1436`). By `Keisuke Fujii `_ and `Stephan Hoyer `_. - A new resampling interface to match pandas' groupby-like API was added to :py:meth:`Dataset.resample` and :py:meth:`DataArray.resample` (:issue:`1272`). :ref:`Timeseries resampling ` is fully supported for data with arbitrary dimensions as is both downsampling and upsampling (including linear, quadratic, cubic, and spline interpolation). Old syntax: .. jupyter-input:: ds.resample("24H", dim="time", how="max") New syntax: .. jupyter-input:: ds.resample(time="24H").max() Note that both versions are currently supported, but using the old syntax will produce a warning encouraging users to adopt the new syntax. By `Daniel Rothenberg `_. - Calling ``repr()`` or printing xarray objects at the command line or in a Jupyter Notebook will not longer automatically compute dask variables or load data on arrays lazily loaded from disk (:issue:`1522`). By `Guido Imperiale `_. - Supplying ``coords`` as a dictionary to the ``DataArray`` constructor without also supplying an explicit ``dims`` argument is no longer supported. This behavior was deprecated in version 0.9 but will now raise an error (:issue:`727`). - Several existing features have been deprecated and will change to new behavior in xarray v0.11. If you use any of them with xarray v0.10, you should see a ``FutureWarning`` that describes how to update your code: - ``Dataset.T`` has been deprecated an alias for ``Dataset.transpose()`` (:issue:`1232`). In the next major version of xarray, it will provide short- cut lookup for variables or attributes with name ``'T'``. - ``DataArray.__contains__`` (e.g., ``key in data_array``) currently checks for membership in ``DataArray.coords``. In the next major version of xarray, it will check membership in the array data found in ``DataArray.values`` instead (:issue:`1267`). - Direct iteration over and counting a ``Dataset`` (e.g., ``[k for k in ds]``, ``ds.keys()``, ``ds.values()``, ``len(ds)`` and ``if ds``) currently includes all variables, both data and coordinates. For improved usability and consistency with pandas, in the next major version of xarray these will change to only include data variables (:issue:`884`). Use ``ds.variables``, ``ds.data_vars`` or ``ds.coords`` as alternatives. - Changes to minimum versions of dependencies: - Old numpy < 1.11 and pandas < 0.18 are no longer supported (:issue:`1512`). By `Keisuke Fujii `_. - The minimum supported version bottleneck has increased to 1.1 (:issue:`1279`). By `Joe Hamman `_. Enhancements ~~~~~~~~~~~~ **New functions/methods** - New helper function :py:func:`~xarray.apply_ufunc` for wrapping functions written to work on NumPy arrays to support labels on xarray objects (:issue:`770`). ``apply_ufunc`` also support automatic parallelization for many functions with dask. See :ref:`compute.wrapping-custom` and :ref:`dask.automatic-parallelization` for details. By `Stephan Hoyer `_. - Added new method :py:meth:`Dataset.to_dask_dataframe`, convert a dataset into a dask dataframe. This allows lazy loading of data from a dataset containing dask arrays (:issue:`1462`). By `James Munroe `_. - New function :py:func:`~xarray.where` for conditionally switching between values in xarray objects, like :py:func:`numpy.where`: .. jupyter-input:: import xarray as xr arr = xr.DataArray([[1, 2, 3], [4, 5, 6]], dims=("x", "y")) xr.where(arr % 2, "even", "odd") .. jupyter-output:: array([['even', 'odd', 'even'], ['odd', 'even', 'odd']], dtype='`_. - Added :py:func:`~xarray.show_versions` function to aid in debugging (:issue:`1485`). By `Joe Hamman `_. **Performance improvements** - :py:func:`~xarray.concat` was computing variables that aren't in memory (e.g. dask-based) multiple times; :py:func:`~xarray.open_mfdataset` was loading them multiple times from disk. Now, both functions will instead load them at most once and, if they do, store them in memory in the concatenated array/dataset (:issue:`1521`). By `Guido Imperiale `_. - Speed-up (x 100) of ``xarray.conventions.decode_cf_datetime``. By `Christian Chwala `_. **IO related improvements** - Unicode strings (``str`` on Python 3) are now round-tripped successfully even when written as character arrays (e.g., as netCDF3 files or when using ``engine='scipy'``) (:issue:`1638`). This is controlled by the ``_Encoding`` attribute convention, which is also understood directly by the netCDF4-Python interface. See :ref:`io.string-encoding` for full details. By `Stephan Hoyer `_. - Support for ``data_vars`` and ``coords`` keywords from :py:func:`~xarray.concat` added to :py:func:`~xarray.open_mfdataset` (:issue:`438`). Using these keyword arguments can significantly reduce memory usage and increase speed. By `Oleksandr Huziy `_. - Support for :py:class:`pathlib.Path` objects added to :py:func:`~xarray.open_dataset`, :py:func:`~xarray.open_mfdataset`, ``xarray.to_netcdf``, and :py:func:`~xarray.save_mfdataset` (:issue:`799`): .. jupyter-input:: from pathlib import Path # In Python 2, use pathlib2! data_dir = Path("data/") one_file = data_dir / "dta_for_month_01.nc" xr.open_dataset(one_file) By `Willi Rath `_. - You can now explicitly disable any default ``_FillValue`` (``NaN`` for floating point values) by passing the encoding ``{'_FillValue': None}`` (:issue:`1598`). By `Stephan Hoyer `_. - More attributes available in :py:attr:`~xarray.Dataset.attrs` dictionary when raster files are opened with :py:func:`~xarray.open_rasterio`. By `Greg Brener `_. - Support for NetCDF files using an ``_Unsigned`` attribute to indicate that a a signed integer data type should be interpreted as unsigned bytes (:issue:`1444`). By `Eric Bruning `_. - Support using an existing, opened netCDF4 ``Dataset`` with :py:class:`~xarray.backends.NetCDF4DataStore`. This permits creating an :py:class:`~xarray.Dataset` from a netCDF4 ``Dataset`` that has been opened using other means (:issue:`1459`). By `Ryan May `_. - Changed :py:class:`~xarray.backends.PydapDataStore` to take a Pydap dataset. This permits opening Opendap datasets that require authentication, by instantiating a Pydap dataset with a session object. Also added :py:meth:`xarray.backends.PydapDataStore.open` which takes a url and session object (:issue:`1068`). By `Philip Graae `_. - Support reading and writing unlimited dimensions with h5netcdf (:issue:`1636`). By `Joe Hamman `_. **Other improvements** - Added ``_ipython_key_completions_`` to xarray objects, to enable autocompletion for dictionary-like access in IPython, e.g., ``ds['tem`` + tab -> ``ds['temperature']`` (:issue:`1628`). By `Keisuke Fujii `_. - Support passing keyword arguments to ``load``, ``compute``, and ``persist`` methods. Any keyword arguments supplied to these methods are passed on to the corresponding dask function (:issue:`1523`). By `Joe Hamman `_. - Encoding attributes are now preserved when xarray objects are concatenated. The encoding is copied from the first object (:issue:`1297`). By `Joe Hamman `_ and `Gerrit Holl `_. - Support applying rolling window operations using bottleneck's moving window functions on data stored as dask arrays (:issue:`1279`). By `Joe Hamman `_. - Experimental support for the Dask collection interface (:issue:`1674`). By `Matthew Rocklin `_. Bug fixes ~~~~~~~~~ - Suppress ``RuntimeWarning`` issued by ``numpy`` for "invalid value comparisons" (e.g. ``NaN``). Xarray now behaves similarly to pandas in its treatment of binary and unary operations on objects with NaNs (:issue:`1657`). By `Joe Hamman `_. - Unsigned int support for reduce methods with ``skipna=True`` (:issue:`1562`). By `Keisuke Fujii `_. - Fixes to ensure xarray works properly with pandas 0.21: - Fix :py:meth:`~xarray.DataArray.isnull` method (:issue:`1549`). - :py:meth:`~xarray.DataArray.to_series` and :py:meth:`~xarray.Dataset.to_dataframe` should not return a ``pandas.MultiIndex`` for 1D data (:issue:`1548`). - Fix plotting with datetime64 axis labels (:issue:`1661`). By `Stephan Hoyer `_. - :py:func:`~xarray.open_rasterio` method now shifts the rasterio coordinates so that they are centered in each pixel (:issue:`1468`). By `Greg Brener `_. - :py:meth:`~xarray.Dataset.rename` method now doesn't throw errors if some ``Variable`` is renamed to the same name as another ``Variable`` as long as that other ``Variable`` is also renamed (:issue:`1477`). This method now does throw when two ``Variables`` would end up with the same name after the rename (since one of them would get overwritten in this case). By `Prakhar Goel `_. - Fix :py:func:`xarray.testing.assert_allclose` to actually use ``atol`` and ``rtol`` arguments when called on ``DataArray`` objects (:issue:`1488`). By `Stephan Hoyer `_. - xarray ``quantile`` methods now properly raise a ``TypeError`` when applied to objects with data stored as ``dask`` arrays (:issue:`1529`). By `Joe Hamman `_. - Fix positional indexing to allow the use of unsigned integers (:issue:`1405`). By `Joe Hamman `_ and `Gerrit Holl `_. - Creating a :py:class:`Dataset` now raises ``MergeError`` if a coordinate shares a name with a dimension but is comprised of arbitrary dimensions (:issue:`1120`). By `Joe Hamman `_. - :py:func:`~xarray.open_rasterio` method now skips rasterio's ``crs`` attribute if its value is ``None`` (:issue:`1520`). By `Leevi Annala `_. - Fix :py:func:`xarray.DataArray.to_netcdf` to return bytes when no path is provided (:issue:`1410`). By `Joe Hamman `_. - Fix :py:func:`xarray.save_mfdataset` to properly raise an informative error when objects other than ``Dataset`` are provided (:issue:`1555`). By `Joe Hamman `_. - :py:func:`xarray.Dataset.copy` would not preserve the encoding property (:issue:`1586`). By `Guido Imperiale `_. - :py:func:`xarray.concat` would eagerly load dask variables into memory if the first argument was a numpy variable (:issue:`1588`). By `Guido Imperiale `_. - Fix bug in :py:meth:`~xarray.Dataset.to_netcdf` when writing in append mode (:issue:`1215`). By `Joe Hamman `_. - Fix ``netCDF4`` backend to properly roundtrip the ``shuffle`` encoding option (:issue:`1606`). By `Joe Hamman `_. - Fix bug when using ``pytest`` class decorators to skipping certain unittests. The previous behavior unintentionally causing additional tests to be skipped (:issue:`1531`). By `Joe Hamman `_. - Fix pynio backend for upcoming release of pynio with Python 3 support (:issue:`1611`). By `Ben Hillman `_. - Fix ``seaborn`` import warning for Seaborn versions 0.8 and newer when the ``apionly`` module was deprecated. (:issue:`1633`). By `Joe Hamman `_. - Fix COMPAT: MultiIndex checking is fragile (:issue:`1833`). By `Florian Pinault `_. - Fix ``rasterio`` backend for Rasterio versions 1.0alpha10 and newer. (:issue:`1641`). By `Chris Holden `_. Bug fixes after rc1 ~~~~~~~~~~~~~~~~~~~ - Suppress warning in IPython autocompletion, related to the deprecation of ``.T`` attributes (:issue:`1675`). By `Keisuke Fujii `_. - Fix a bug in lazily-indexing netCDF array. (:issue:`1688`) By `Keisuke Fujii `_. - (Internal bug) MemoryCachedArray now supports the orthogonal indexing. Also made some internal cleanups around array wrappers (:issue:`1429`). By `Keisuke Fujii `_. - (Internal bug) MemoryCachedArray now always wraps ``np.ndarray`` by ``NumpyIndexingAdapter``. (:issue:`1694`) By `Keisuke Fujii `_. - Fix importing xarray when running Python with ``-OO`` (:issue:`1706`). By `Stephan Hoyer `_. - Saving a netCDF file with a coordinates with a spaces in its names now raises an appropriate warning (:issue:`1689`). By `Stephan Hoyer `_. - Fix two bugs that were preventing dask arrays from being specified as coordinates in the DataArray constructor (:issue:`1684`). By `Joe Hamman `_. - Fixed ``apply_ufunc`` with ``dask='parallelized'`` for scalar arguments (:issue:`1697`). By `Stephan Hoyer `_. - Fix "Chunksize cannot exceed dimension size" error when writing netCDF4 files loaded from disk (:issue:`1225`). By `Stephan Hoyer `_. - Validate the shape of coordinates with names matching dimensions in the DataArray constructor (:issue:`1709`). By `Stephan Hoyer `_. - Raise ``NotImplementedError`` when attempting to save a MultiIndex to a netCDF file (:issue:`1547`). By `Stephan Hoyer `_. - Remove netCDF dependency from rasterio backend tests. By `Matti Eskelinen `_ Bug fixes after rc2 ~~~~~~~~~~~~~~~~~~~ - Fixed unexpected behavior in ``Dataset.set_index()`` and ``DataArray.set_index()`` introduced by pandas 0.21.0. Setting a new index with a single variable resulted in 1-level ``pandas.MultiIndex`` instead of a simple ``pandas.Index`` (:issue:`1722`). By `Benoit Bovy `_. - Fixed unexpected memory loading of backend arrays after ``print``. (:issue:`1720`). By `Keisuke Fujii `_. .. _whats-new.0.9.6: v0.9.6 (8 June 2017) -------------------- This release includes a number of backwards compatible enhancements and bug fixes. Enhancements ~~~~~~~~~~~~ - New :py:meth:`~xarray.Dataset.sortby` method to ``Dataset`` and ``DataArray`` that enable sorting along dimensions (:issue:`967`). See :ref:`the docs ` for examples. By `Chun-Wei Yuan `_ and `Kyle Heuton `_. - Add ``.dt`` accessor to DataArrays for computing datetime-like properties for the values they contain, similar to ``pandas.Series`` (:issue:`358`). By `Daniel Rothenberg `_. - Renamed internal dask arrays created by ``open_dataset`` to match new dask conventions (:issue:`1343`). By `Ryan Abernathey `_. - :py:meth:`~xarray.as_variable` is now part of the public API (:issue:`1303`). By `Benoit Bovy `_. - :py:func:`~xarray.align` now supports ``join='exact'``, which raises an error instead of aligning when indexes to be aligned are not equal. By `Stephan Hoyer `_. - New function :py:func:`~xarray.open_rasterio` for opening raster files with the `rasterio `_ library. See :ref:`the docs ` for details. By `Joe Hamman `_, `Nic Wayand `_ and `Fabien Maussion `_ Bug fixes ~~~~~~~~~ - Fix error from repeated indexing of datasets loaded from disk (:issue:`1374`). By `Stephan Hoyer `_. - Fix a bug where ``.isel_points`` wrongly assigns unselected coordinate to ``data_vars``. By `Keisuke Fujii `_. - Tutorial datasets are now checked against a reference MD5 sum to confirm successful download (:issue:`1392`). By `Matthew Gidden `_. - ``DataArray.chunk()`` now accepts dask specific kwargs like ``Dataset.chunk()`` does. By `Fabien Maussion `_. - Support for ``engine='pydap'`` with recent releases of Pydap (3.2.2+), including on Python 3 (:issue:`1174`). Documentation ~~~~~~~~~~~~~ - A new `gallery `_ allows to add interactive examples to the documentation. By `Fabien Maussion `_. Testing ~~~~~~~ - Fix test suite failure caused by changes to ``pandas.cut`` function (:issue:`1386`). By `Ryan Abernathey `_. - Enhanced tests suite by use of ``@network`` decorator, which is controlled via ``--run-network-tests`` command line argument to ``py.test`` (:issue:`1393`). By `Matthew Gidden `_. .. _whats-new.0.9.5: v0.9.5 (17 April, 2017) ----------------------- Remove an inadvertently introduced print statement. .. _whats-new.0.9.3: v0.9.3 (16 April, 2017) ----------------------- This minor release includes bug-fixes and backwards compatible enhancements. Enhancements ~~~~~~~~~~~~ - New :py:meth:`~xarray.DataArray.persist` method to Datasets and DataArrays to enable persisting data in distributed memory when using Dask (:issue:`1344`). By `Matthew Rocklin `_. - New :py:meth:`~xarray.DataArray.expand_dims` method for ``DataArray`` and ``Dataset`` (:issue:`1326`). By `Keisuke Fujii `_. Bug fixes ~~~~~~~~~ - Fix ``.where()`` with ``drop=True`` when arguments do not have indexes (:issue:`1350`). This bug, introduced in v0.9, resulted in xarray producing incorrect results in some cases. By `Stephan Hoyer `_. - Fixed writing to file-like objects with :py:meth:`~xarray.Dataset.to_netcdf` (:issue:`1320`). `Stephan Hoyer `_. - Fixed explicitly setting ``engine='scipy'`` with ``to_netcdf`` when not providing a path (:issue:`1321`). `Stephan Hoyer `_. - Fixed open_dataarray does not pass properly its parameters to open_dataset (:issue:`1359`). `Stephan Hoyer `_. - Ensure test suite works when runs from an installed version of xarray (:issue:`1336`). Use ``@pytest.mark.slow`` instead of a custom flag to mark slow tests. By `Stephan Hoyer `_ .. _whats-new.0.9.2: v0.9.2 (2 April 2017) --------------------- The minor release includes bug-fixes and backwards compatible enhancements. Enhancements ~~~~~~~~~~~~ - ``rolling`` on Dataset is now supported (:issue:`859`). - ``.rolling()`` on Dataset is now supported (:issue:`859`). By `Keisuke Fujii `_. - When bottleneck version 1.1 or later is installed, use bottleneck for rolling ``var``, ``argmin``, ``argmax``, and ``rank`` computations. Also, rolling median now accepts a ``min_periods`` argument (:issue:`1276`). By `Joe Hamman `_. - When ``.plot()`` is called on a 2D DataArray and only one dimension is specified with ``x=`` or ``y=``, the other dimension is now guessed (:issue:`1291`). By `Vincent Noel `_. - Added new method :py:meth:`~Dataset.assign_attrs` to ``DataArray`` and ``Dataset``, a chained-method compatible implementation of the ``dict.update`` method on attrs (:issue:`1281`). By `Henry S. Harrison `_. - Added new ``autoclose=True`` argument to :py:func:`~xarray.open_mfdataset` to explicitly close opened files when not in use to prevent occurrence of an OS Error related to too many open files (:issue:`1198`). Note, the default is ``autoclose=False``, which is consistent with previous xarray behavior. By `Phillip J. Wolfram `_. - The ``repr()`` of ``Dataset`` and ``DataArray`` attributes uses a similar format to coordinates and variables, with vertically aligned entries truncated to fit on a single line (:issue:`1319`). Hopefully this will stop people writing ``data.attrs = {}`` and discarding metadata in notebooks for the sake of cleaner output. The full metadata is still available as ``data.attrs``. By `Zac Hatfield-Dodds `_. - Enhanced tests suite by use of ``@slow`` and ``@flaky`` decorators, which are controlled via ``--run-flaky`` and ``--skip-slow`` command line arguments to ``py.test`` (:issue:`1336`). By `Stephan Hoyer `_ and `Phillip J. Wolfram `_. - New aggregation on rolling objects :py:meth:`~computation.rolling.DataArrayRolling.count` which providing a rolling count of valid values (:issue:`1138`). Bug fixes ~~~~~~~~~ - Rolling operations now keep preserve original dimension order (:issue:`1125`). By `Keisuke Fujii `_. - Fixed ``sel`` with ``method='nearest'`` on Python 2.7 and 64-bit Windows (:issue:`1140`). `Stephan Hoyer `_. - Fixed ``where`` with ``drop='True'`` for empty masks (:issue:`1341`). By `Stephan Hoyer `_ and `Phillip J. Wolfram `_. .. _whats-new.0.9.1: v0.9.1 (30 January 2017) ------------------------ Renamed the "Unindexed dimensions" section in the ``Dataset`` and ``DataArray`` repr (added in v0.9.0) to "Dimensions without coordinates" (:issue:`1199`). .. _whats-new.0.9.0: v0.9.0 (25 January 2017) ------------------------ This major release includes five months worth of enhancements and bug fixes from 24 contributors, including some significant changes that are not fully backwards compatible. Highlights include: - Coordinates are now *optional* in the xarray data model, even for dimensions. - Changes to caching, lazy loading and pickling to improve xarray's experience for parallel computing. - Improvements for accessing and manipulating ``pandas.MultiIndex`` levels. - Many new methods and functions, including :py:meth:`~DataArray.quantile`, :py:meth:`~DataArray.cumsum`, :py:meth:`~DataArray.cumprod` :py:attr:`~DataArray.combine_first` :py:meth:`~DataArray.set_index`, :py:meth:`~DataArray.reset_index`, :py:meth:`~DataArray.reorder_levels`, :py:func:`~xarray.full_like`, :py:func:`~xarray.zeros_like`, :py:func:`~xarray.ones_like` :py:func:`~xarray.open_dataarray`, :py:meth:`~DataArray.compute`, :py:meth:`Dataset.info`, :py:func:`testing.assert_equal`, :py:func:`testing.assert_identical`, and :py:func:`testing.assert_allclose`. Breaking changes ~~~~~~~~~~~~~~~~ - Index coordinates for each dimensions are now optional, and no longer created by default :issue:`1017`. You can identify such dimensions without coordinates by their appearance in list of "Dimensions without coordinates" in the ``Dataset`` or ``DataArray`` repr: .. jupyter-input:: xr.Dataset({"foo": (("x", "y"), [[1, 2]])}) .. jupyter-output:: Dimensions: (x: 1, y: 2) Dimensions without coordinates: x, y Data variables: foo (x, y) int64 1 2 This has a number of implications: - :py:func:`~align` and :py:meth:`~Dataset.reindex` can now error, if dimensions labels are missing and dimensions have different sizes. - Because pandas does not support missing indexes, methods such as ``to_dataframe``/``from_dataframe`` and ``stack``/``unstack`` no longer roundtrip faithfully on all inputs. Use :py:meth:`~Dataset.reset_index` to remove undesired indexes. - ``Dataset.__delitem__`` and :py:meth:`~Dataset.drop` no longer delete/drop variables that have dimensions matching a deleted/dropped variable. - ``DataArray.coords.__delitem__`` is now allowed on variables matching dimension names. - ``.sel`` and ``.loc`` now handle indexing along a dimension without coordinate labels by doing integer based indexing. See :ref:`indexing.missing_coordinates` for an example. - :py:attr:`~Dataset.indexes` is no longer guaranteed to include all dimensions names as keys. The new method :py:meth:`~Dataset.get_index` has been added to get an index for a dimension guaranteed, falling back to produce a default ``RangeIndex`` if necessary. - The default behavior of ``merge`` is now ``compat='no_conflicts'``, so some merges will now succeed in cases that previously raised ``xarray.MergeError``. Set ``compat='broadcast_equals'`` to restore the previous default. See :ref:`combining.no_conflicts` for more details. - Reading :py:attr:`~DataArray.values` no longer always caches values in a NumPy array :issue:`1128`. Caching of ``.values`` on variables read from netCDF files on disk is still the default when :py:func:`open_dataset` is called with ``cache=True``. By `Guido Imperiale `_ and `Stephan Hoyer `_. - Pickling a ``Dataset`` or ``DataArray`` linked to a file on disk no longer caches its values into memory before pickling (:issue:`1128`). Instead, pickle stores file paths and restores objects by reopening file references. This enables preliminary, experimental use of xarray for opening files with `dask.distributed `_. By `Stephan Hoyer `_. - Coordinates used to index a dimension are now loaded eagerly into :py:class:`pandas.Index` objects, instead of loading the values lazily. By `Guido Imperiale `_. - Automatic levels for 2d plots are now guaranteed to land on ``vmin`` and ``vmax`` when these kwargs are explicitly provided (:issue:`1191`). The automated level selection logic also slightly changed. By `Fabien Maussion `_. - ``DataArray.rename()`` behavior changed to strictly change the ``DataArray.name`` if called with string argument, or strictly change coordinate names if called with dict-like argument. By `Markus Gonser `_. - By default ``to_netcdf()`` add a ``_FillValue = NaN`` attributes to float types. By `Frederic Laliberte `_. - ``repr`` on ``DataArray`` objects uses an shortened display for NumPy array data that is less likely to overflow onto multiple pages (:issue:`1207`). By `Stephan Hoyer `_. - xarray no longer supports python 3.3, versions of dask prior to v0.9.0, or versions of bottleneck prior to v1.0. Deprecations ~~~~~~~~~~~~ - Renamed the ``Coordinate`` class from xarray's low level API to :py:class:`~xarray.IndexVariable`. ``Variable.to_variable`` and ``Variable.to_coord`` have been renamed to :py:meth:`~xarray.Variable.to_base_variable` and :py:meth:`~xarray.Variable.to_index_variable`. - Deprecated supplying ``coords`` as a dictionary to the ``DataArray`` constructor without also supplying an explicit ``dims`` argument. The old behavior encouraged relying on the iteration order of dictionaries, which is a bad practice (:issue:`727`). - Removed a number of methods deprecated since v0.7.0 or earlier: ``load_data``, ``vars``, ``drop_vars``, ``dump``, ``dumps`` and the ``variables`` keyword argument to ``Dataset``. - Removed the dummy module that enabled ``import xray``. Enhancements ~~~~~~~~~~~~ - Added new method :py:meth:`~DataArray.combine_first` to ``DataArray`` and ``Dataset``, based on the pandas method of the same name (see :ref:`combine`). By `Chun-Wei Yuan `_. - Added the ability to change default automatic alignment (arithmetic_join="inner") for binary operations via :py:func:`~xarray.set_options()` (see :ref:`math automatic alignment`). By `Chun-Wei Yuan `_. - Add checking of ``attr`` names and values when saving to netCDF, raising useful error messages if they are invalid. (:issue:`911`). By `Robin Wilson `_. - Added ability to save ``DataArray`` objects directly to netCDF files using :py:meth:`~xarray.DataArray.to_netcdf`, and to load directly from netCDF files using :py:func:`~xarray.open_dataarray` (:issue:`915`). These remove the need to convert a ``DataArray`` to a ``Dataset`` before saving as a netCDF file, and deals with names to ensure a perfect 'roundtrip' capability. By `Robin Wilson `_. - Multi-index levels are now accessible as "virtual" coordinate variables, e.g., ``ds['time']`` can pull out the ``'time'`` level of a multi-index (see :ref:`coordinates`). ``sel`` also accepts providing multi-index levels as keyword arguments, e.g., ``ds.sel(time='2000-01')`` (see :ref:`multi-level indexing`). By `Benoit Bovy `_. - Added ``set_index``, ``reset_index`` and ``reorder_levels`` methods to easily create and manipulate (multi-)indexes (see :ref:`reshape.set_index`). By `Benoit Bovy `_. - Added the ``compat`` option ``'no_conflicts'`` to ``merge``, allowing the combination of xarray objects with disjoint (:issue:`742`) or overlapping (:issue:`835`) coordinates as long as all present data agrees. By `Johnnie Gray `_. See :ref:`combining.no_conflicts` for more details. - It is now possible to set ``concat_dim=None`` explicitly in :py:func:`~xarray.open_mfdataset` to disable inferring a dimension along which to concatenate. By `Stephan Hoyer `_. - Added methods :py:meth:`DataArray.compute`, :py:meth:`Dataset.compute`, and :py:meth:`Variable.compute` as a non-mutating alternative to :py:meth:`~DataArray.load`. By `Guido Imperiale `_. - Adds DataArray and Dataset methods :py:meth:`~xarray.DataArray.cumsum` and :py:meth:`~xarray.DataArray.cumprod`. By `Phillip J. Wolfram `_. - New properties :py:attr:`Dataset.sizes` and :py:attr:`DataArray.sizes` for providing consistent access to dimension length on both ``Dataset`` and ``DataArray`` (:issue:`921`). By `Stephan Hoyer `_. - New keyword argument ``drop=True`` for :py:meth:`~DataArray.sel`, :py:meth:`~DataArray.isel` and :py:meth:`~DataArray.squeeze` for dropping scalar coordinates that arise from indexing. ``DataArray`` (:issue:`242`). By `Stephan Hoyer `_. - New top-level functions :py:func:`~xarray.full_like`, :py:func:`~xarray.zeros_like`, and :py:func:`~xarray.ones_like` By `Guido Imperiale `_. - Overriding a preexisting attribute with :py:func:`~xarray.register_dataset_accessor` or :py:func:`~xarray.register_dataarray_accessor` now issues a warning instead of raising an error (:issue:`1082`). By `Stephan Hoyer `_. - Options for axes sharing between subplots are exposed to :py:class:`~xarray.plot.FacetGrid` and :py:func:`~xarray.plot.plot`, so axes sharing can be disabled for polar plots. By `Bas Hoonhout `_. - New utility functions :py:func:`~xarray.testing.assert_equal`, :py:func:`~xarray.testing.assert_identical`, and :py:func:`~xarray.testing.assert_allclose` for asserting relationships between xarray objects, designed for use in a pytest test suite. - ``figsize``, ``size`` and ``aspect`` plot arguments are now supported for all plots (:issue:`897`). See :ref:`plotting.figsize` for more details. By `Stephan Hoyer `_ and `Fabien Maussion `_. - New :py:meth:`~Dataset.info` method to summarize ``Dataset`` variables and attributes. The method prints to a buffer (e.g. ``stdout``) with output similar to what the command line utility ``ncdump -h`` produces (:issue:`1150`). By `Joe Hamman `_. - Added the ability write unlimited netCDF dimensions with the ``scipy`` and ``netcdf4`` backends via the new ``xray.Dataset.encoding`` attribute or via the ``unlimited_dims`` argument to ``xray.Dataset.to_netcdf``. By `Joe Hamman `_. - New :py:meth:`~DataArray.quantile` method to calculate quantiles from DataArray objects (:issue:`1187`). By `Joe Hamman `_. Bug fixes ~~~~~~~~~ - ``groupby_bins`` now restores empty bins by default (:issue:`1019`). By `Ryan Abernathey `_. - Fix issues for dates outside the valid range of pandas timestamps (:issue:`975`). By `Mathias Hauser `_. - Unstacking produced flipped array after stacking decreasing coordinate values (:issue:`980`). By `Stephan Hoyer `_. - Setting ``dtype`` via the ``encoding`` parameter of ``to_netcdf`` failed if the encoded dtype was the same as the dtype of the original array (:issue:`873`). By `Stephan Hoyer `_. - Fix issues with variables where both attributes ``_FillValue`` and ``missing_value`` are set to ``NaN`` (:issue:`997`). By `Marco ZΓΌhlke `_. - ``.where()`` and ``.fillna()`` now preserve attributes (:issue:`1009`). By `Fabien Maussion `_. - Applying :py:func:`broadcast()` to an xarray object based on the dask backend won't accidentally convert the array from dask to numpy anymore (:issue:`978`). By `Guido Imperiale `_. - ``Dataset.concat()`` now preserves variables order (:issue:`1027`). By `Fabien Maussion `_. - Fixed an issue with pcolormesh (:issue:`781`). A new ``infer_intervals`` keyword gives control on whether the cell intervals should be computed or not. By `Fabien Maussion `_. - Grouping over an dimension with non-unique values with ``groupby`` gives correct groups. By `Stephan Hoyer `_. - Fixed accessing coordinate variables with non-string names from ``.coords``. By `Stephan Hoyer `_. - :py:meth:`~xarray.DataArray.rename` now simultaneously renames the array and any coordinate with the same name, when supplied via a :py:class:`dict` (:issue:`1116`). By `Yves Delley `_. - Fixed sub-optimal performance in certain operations with object arrays (:issue:`1121`). By `Yves Delley `_. - Fix ``.groupby(group)`` when ``group`` has datetime dtype (:issue:`1132`). By `Jonas SΓΈlvsteen `_. - Fixed a bug with facetgrid (the ``norm`` keyword was ignored, :issue:`1159`). By `Fabien Maussion `_. - Resolved a concurrency bug that could cause Python to crash when simultaneously reading and writing netCDF4 files with dask (:issue:`1172`). By `Stephan Hoyer `_. - Fix to make ``.copy()`` actually copy dask arrays, which will be relevant for future releases of dask in which dask arrays will be mutable (:issue:`1180`). By `Stephan Hoyer `_. - Fix opening NetCDF files with multi-dimensional time variables (:issue:`1229`). By `Stephan Hoyer `_. Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - ``xarray.Dataset.isel_points`` and ``xarray.Dataset.sel_points`` now use vectorised indexing in numpy and dask (:issue:`1161`), which can result in several orders of magnitude speedup. By `Jonathan Chambers `_. .. _whats-new.0.8.2: v0.8.2 (18 August 2016) ----------------------- This release includes a number of bug fixes and minor enhancements. Breaking changes ~~~~~~~~~~~~~~~~ - :py:func:`~xarray.broadcast` and :py:func:`~xarray.concat` now auto-align inputs, using ``join=outer``. Previously, these functions raised ``ValueError`` for non-aligned inputs. By `Guido Imperiale `_. Enhancements ~~~~~~~~~~~~ - New documentation on :ref:`panel transition`. By `Maximilian Roos `_. - New ``Dataset`` and ``DataArray`` methods :py:meth:`~xarray.Dataset.to_dict` and :py:meth:`~xarray.Dataset.from_dict` to allow easy conversion between dictionaries and xarray objects (:issue:`432`). See :ref:`dictionary IO` for more details. By `Julia Signell `_. - Added ``exclude`` and ``indexes`` optional parameters to :py:func:`~xarray.align`, and ``exclude`` optional parameter to :py:func:`~xarray.broadcast`. By `Guido Imperiale `_. - Better error message when assigning variables without dimensions (:issue:`971`). By `Stephan Hoyer `_. - Better error message when reindex/align fails due to duplicate index values (:issue:`956`). By `Stephan Hoyer `_. Bug fixes ~~~~~~~~~ - Ensure xarray works with h5netcdf v0.3.0 for arrays with ``dtype=str`` (:issue:`953`). By `Stephan Hoyer `_. - ``Dataset.__dir__()`` (i.e. the method python calls to get autocomplete options) failed if one of the dataset's keys was not a string (:issue:`852`). By `Maximilian Roos `_. - ``Dataset`` constructor can now take arbitrary objects as values (:issue:`647`). By `Maximilian Roos `_. - Clarified ``copy`` argument for :py:meth:`~xarray.DataArray.reindex` and :py:func:`~xarray.align`, which now consistently always return new xarray objects (:issue:`927`). - Fix ``open_mfdataset`` with ``engine='pynio'`` (:issue:`936`). By `Stephan Hoyer `_. - ``groupby_bins`` sorted bin labels as strings (:issue:`952`). By `Stephan Hoyer `_. - Fix bug introduced by v0.8.0 that broke assignment to datasets when both the left and right side have the same non-unique index values (:issue:`956`). .. _whats-new.0.8.1: v0.8.1 (5 August 2016) ---------------------- Bug fixes ~~~~~~~~~ - Fix bug in v0.8.0 that broke assignment to Datasets with non-unique indexes (:issue:`943`). By `Stephan Hoyer `_. .. _whats-new.0.8.0: v0.8.0 (2 August 2016) ---------------------- This release includes four months of new features and bug fixes, including several breaking changes. .. _v0.8.0.breaking: Breaking changes ~~~~~~~~~~~~~~~~ - Dropped support for Python 2.6 (:issue:`855`). - Indexing on multi-index now drop levels, which is consistent with pandas. It also changes the name of the dimension / coordinate when the multi-index is reduced to a single index (:issue:`802`). - Contour plots no longer add a colorbar per default (:issue:`866`). Filled contour plots are unchanged. - ``DataArray.values`` and ``.data`` now always returns an NumPy array-like object, even for 0-dimensional arrays with object dtype (:issue:`867`). Previously, ``.values`` returned native Python objects in such cases. To convert the values of scalar arrays to Python objects, use the ``.item()`` method. Enhancements ~~~~~~~~~~~~ - Groupby operations now support grouping over multidimensional variables. A new method called :py:meth:`~xarray.Dataset.groupby_bins` has also been added to allow users to specify bins for grouping. The new features are described in :ref:`groupby.multidim` and :ref:`/examples/multidimensional-coords.ipynb`. By `Ryan Abernathey `_. - DataArray and Dataset method :py:meth:`where` now supports a ``drop=True`` option that clips coordinate elements that are fully masked. By `Phillip J. Wolfram `_. - New top level :py:func:`merge` function allows for combining variables from any number of ``Dataset`` and/or ``DataArray`` variables. See :ref:`merge` for more details. By `Stephan Hoyer `_. - :py:meth:`DataArray.resample` and :py:meth:`Dataset.resample` now support the ``keep_attrs=False`` option that determines whether variable and dataset attributes are retained in the resampled object. By `Jeremy McGibbon `_. - Better multi-index support in :py:meth:`DataArray.sel`, :py:meth:`DataArray.loc`, :py:meth:`Dataset.sel` and :py:meth:`Dataset.loc`, which now behave more closely to pandas and which also accept dictionaries for indexing based on given level names and labels (see :ref:`multi-level indexing`). By `Benoit Bovy `_. - New (experimental) decorators :py:func:`~xarray.register_dataset_accessor` and :py:func:`~xarray.register_dataarray_accessor` for registering custom xarray extensions without subclassing. They are described in the new documentation page on :ref:`internals`. By `Stephan Hoyer `_. - Round trip boolean datatypes. Previously, writing boolean datatypes to netCDF formats would raise an error since netCDF does not have a ``bool`` datatype. This feature reads/writes a ``dtype`` attribute to boolean variables in netCDF files. By `Joe Hamman `_. - 2D plotting methods now have two new keywords (``cbar_ax`` and ``cbar_kwargs``), allowing more control on the colorbar (:issue:`872`). By `Fabien Maussion `_. - New Dataset method :py:meth:`Dataset.filter_by_attrs`, akin to ``netCDF4.Dataset.get_variables_by_attributes``, to easily filter data variables using its attributes. `Filipe Fernandes `_. Bug fixes ~~~~~~~~~ - Attributes were being retained by default for some resampling operations when they should not. With the ``keep_attrs=False`` option, they will no longer be retained by default. This may be backwards-incompatible with some scripts, but the attributes may be kept by adding the ``keep_attrs=True`` option. By `Jeremy McGibbon `_. - Concatenating xarray objects along an axis with a MultiIndex or PeriodIndex preserves the nature of the index (:issue:`875`). By `Stephan Hoyer `_. - Fixed bug in arithmetic operations on DataArray objects whose dimensions are numpy structured arrays or recarrays :issue:`861`, :issue:`837`. By `Maciek Swat `_. - ``decode_cf_timedelta`` now accepts arrays with ``ndim`` >1 (:issue:`842`). This fixes issue :issue:`665`. `Filipe Fernandes `_. - Fix a bug where ``xarray.ufuncs`` that take two arguments would incorrectly use to numpy functions instead of dask.array functions (:issue:`876`). By `Stephan Hoyer `_. - Support for pickling functions from ``xarray.ufuncs`` (:issue:`901`). By `Stephan Hoyer `_. - ``Variable.copy(deep=True)`` no longer converts MultiIndex into a base Index (:issue:`769`). By `Benoit Bovy `_. - Fixes for groupby on dimensions with a multi-index (:issue:`867`). By `Stephan Hoyer `_. - Fix printing datasets with unicode attributes on Python 2 (:issue:`892`). By `Stephan Hoyer `_. - Fixed incorrect test for dask version (:issue:`891`). By `Stephan Hoyer `_. - Fixed ``dim`` argument for ``isel_points``/``sel_points`` when a ``pandas.Index`` is passed. By `Stephan Hoyer `_. - :py:func:`~xarray.plot.contour` now plots the correct number of contours (:issue:`866`). By `Fabien Maussion `_. .. _whats-new.0.7.2: v0.7.2 (13 March 2016) ---------------------- This release includes two new, entirely backwards compatible features and several bug fixes. Enhancements ~~~~~~~~~~~~ - New DataArray method :py:meth:`DataArray.dot` for calculating the dot product of two DataArrays along shared dimensions. By `Dean Pospisil `_. - Rolling window operations on DataArray objects are now supported via a new :py:meth:`DataArray.rolling` method. For example: .. jupyter-input:: import xarray as xr import numpy as np arr = xr.DataArray(np.arange(0, 7.5, 0.5).reshape(3, 5), dims=("x", "y")) arr .. jupyter-output:: array([[ 0. , 0.5, 1. , 1.5, 2. ], [ 2.5, 3. , 3.5, 4. , 4.5], [ 5. , 5.5, 6. , 6.5, 7. ]]) Coordinates: * x (x) int64 0 1 2 * y (y) int64 0 1 2 3 4 .. jupyter-input:: arr.rolling(y=3, min_periods=2).mean() .. jupyter-output:: array([[ nan, 0.25, 0.5 , 1. , 1.5 ], [ nan, 2.75, 3. , 3.5 , 4. ], [ nan, 5.25, 5.5 , 6. , 6.5 ]]) Coordinates: * x (x) int64 0 1 2 * y (y) int64 0 1 2 3 4 See :ref:`compute.rolling` for more details. By `Joe Hamman `_. Bug fixes ~~~~~~~~~ - Fixed an issue where plots using pcolormesh and Cartopy axes were being distorted by the inference of the axis interval breaks. This change chooses not to modify the coordinate variables when the axes have the attribute ``projection``, allowing Cartopy to handle the extent of pcolormesh plots (:issue:`781`). By `Joe Hamman `_. - 2D plots now better handle additional coordinates which are not ``DataArray`` dimensions (:issue:`788`). By `Fabien Maussion `_. .. _whats-new.0.7.1: v0.7.1 (16 February 2016) ------------------------- This is a bug fix release that includes two small, backwards compatible enhancements. We recommend that all users upgrade. Enhancements ~~~~~~~~~~~~ - Numerical operations now return empty objects on no overlapping labels rather than raising ``ValueError`` (:issue:`739`). - :py:class:`~pandas.Series` is now supported as valid input to the ``Dataset`` constructor (:issue:`740`). Bug fixes ~~~~~~~~~ - Restore checks for shape consistency between data and coordinates in the DataArray constructor (:issue:`758`). - Single dimension variables no longer transpose as part of a broader ``.transpose``. This behavior was causing ``pandas.PeriodIndex`` dimensions to lose their type (:issue:`749`) - :py:class:`~xarray.Dataset` labels remain as their native type on ``.to_dataset``. Previously they were coerced to strings (:issue:`745`) - Fixed a bug where replacing a ``DataArray`` index coordinate would improperly align the coordinate (:issue:`725`). - ``DataArray.reindex_like`` now maintains the dtype of complex numbers when reindexing leads to NaN values (:issue:`738`). - ``Dataset.rename`` and ``DataArray.rename`` support the old and new names being the same (:issue:`724`). - Fix :py:meth:`~xarray.Dataset.from_dataframe` for DataFrames with Categorical column and a MultiIndex index (:issue:`737`). - Fixes to ensure xarray works properly after the upcoming pandas v0.18 and NumPy v1.11 releases. Acknowledgments ~~~~~~~~~~~~~~~ The following individuals contributed to this release: - Edward Richards - Maximilian Roos - Rafael Guedes - Spencer Hill - Stephan Hoyer .. _whats-new.0.7.0: v0.7.0 (21 January 2016) ------------------------ This major release includes redesign of :py:class:`~xarray.DataArray` internals, as well as new methods for reshaping, rolling and shifting data. It includes preliminary support for :py:class:`pandas.MultiIndex`, as well as a number of other features and bug fixes, several of which offer improved compatibility with pandas. New name ~~~~~~~~ The project formerly known as "xray" is now "xarray", pronounced "x-array"! This avoids a namespace conflict with the entire field of x-ray science. Renaming our project seemed like the right thing to do, especially because some scientists who work with actual x-rays are interested in using this project in their work. Thanks for your understanding and patience in this transition. You can now find our documentation and code repository at new URLs: - https://docs.xarray.dev - https://github.com/pydata/xarray/ To ease the transition, we have simultaneously released v0.7.0 of both ``xray`` and ``xarray`` on the Python Package Index. These packages are identical. For now, ``import xray`` still works, except it issues a deprecation warning. This will be the last xray release. Going forward, we recommend switching your import statements to ``import xarray as xr``. .. _v0.7.0.breaking: Breaking changes ~~~~~~~~~~~~~~~~ - The internal data model used by ``xray.DataArray`` has been rewritten to fix several outstanding issues (:issue:`367`, :issue:`634`, `this stackoverflow report`_). Internally, ``DataArray`` is now implemented in terms of ``._variable`` and ``._coords`` attributes instead of holding variables in a ``Dataset`` object. This refactor ensures that if a DataArray has the same name as one of its coordinates, the array and the coordinate no longer share the same data. In practice, this means that creating a DataArray with the same ``name`` as one of its dimensions no longer automatically uses that array to label the corresponding coordinate. You will now need to provide coordinate labels explicitly. Here's the old behavior: .. jupyter-input:: xray.DataArray([4, 5, 6], dims="x", name="x") .. jupyter-output:: array([4, 5, 6]) Coordinates: * x (x) int64 4 5 6 and the new behavior (compare the values of the ``x`` coordinate): .. jupyter-input:: xray.DataArray([4, 5, 6], dims="x", name="x") .. jupyter-output:: array([4, 5, 6]) Coordinates: * x (x) int64 0 1 2 - It is no longer possible to convert a DataArray to a Dataset with ``xray.DataArray.to_dataset`` if it is unnamed. This will now raise ``ValueError``. If the array is unnamed, you need to supply the ``name`` argument. .. _this stackoverflow report: http://stackoverflow.com/questions/33158558/python-xray-extract-first-and-last-time-value-within-each-month-of-a-timeseries Enhancements ~~~~~~~~~~~~ - Basic support for :py:class:`~pandas.MultiIndex` coordinates on xray objects, including indexing, :py:meth:`~DataArray.stack` and :py:meth:`~DataArray.unstack`: .. jupyter-input:: df = pd.DataFrame({"foo": range(3), "x": ["a", "b", "b"], "y": [0, 0, 1]}) s = df.set_index(["x", "y"])["foo"] arr = xray.DataArray(s, dims="z") arr .. jupyter-output:: array([0, 1, 2]) Coordinates: * z (z) object ('a', 0) ('b', 0) ('b', 1) .. jupyter-input:: arr.indexes["z"] .. jupyter-output:: MultiIndex(levels=[[u'a', u'b'], [0, 1]], labels=[[0, 1, 1], [0, 0, 1]], names=[u'x', u'y']) .. jupyter-input:: arr.unstack("z") .. jupyter-output:: array([[ 0., nan], [ 1., 2.]]) Coordinates: * x (x) object 'a' 'b' * y (y) int64 0 1 .. jupyter-input:: arr.unstack("z").stack(z=("x", "y")) .. jupyter-output:: array([ 0., nan, 1., 2.]) Coordinates: * z (z) object ('a', 0) ('a', 1) ('b', 0) ('b', 1) See :ref:`reshape.stack` for more details. .. warning:: xray's MultiIndex support is still experimental, and we have a long to- do list of desired additions (:issue:`719`), including better display of multi-index levels when printing a ``Dataset``, and support for saving datasets with a MultiIndex to a netCDF file. User contributions in this area would be greatly appreciated. - Support for reading GRIB, HDF4 and other file formats via PyNIO_. - Better error message when a variable is supplied with the same name as one of its dimensions. - Plotting: more control on colormap parameters (:issue:`642`). ``vmin`` and ``vmax`` will not be silently ignored anymore. Setting ``center=False`` prevents automatic selection of a divergent colormap. - New ``xray.Dataset.shift`` and ``xray.Dataset.roll`` methods for shifting/rotating datasets or arrays along a dimension: .. code:: python array = xray.DataArray([5, 6, 7, 8], dims="x") array.shift(x=2) array.roll(x=2) Notice that ``shift`` moves data independently of coordinates, but ``roll`` moves both data and coordinates. - Assigning a ``pandas`` object directly as a ``Dataset`` variable is now permitted. Its index names correspond to the ``dims`` of the ``Dataset``, and its data is aligned. - Passing a :py:class:`pandas.DataFrame` or ``pandas.Panel`` to a Dataset constructor is now permitted. - New function ``xray.broadcast`` for explicitly broadcasting ``DataArray`` and ``Dataset`` objects against each other. For example: .. code:: python a = xray.DataArray([1, 2, 3], dims="x") b = xray.DataArray([5, 6], dims="y") a b a2, b2 = xray.broadcast(a, b) a2 b2 .. _PyNIO: https://www.pyngl.ucar.edu/Nio.shtml Bug fixes ~~~~~~~~~ - Fixes for several issues found on ``DataArray`` objects with the same name as one of their coordinates (see :ref:`v0.7.0.breaking` for more details). - ``DataArray.to_masked_array`` always returns masked array with mask being an array (not a scalar value) (:issue:`684`) - Allows for (imperfect) repr of Coords when underlying index is PeriodIndex (:issue:`645`). - Fixes for several issues found on ``DataArray`` objects with the same name as one of their coordinates (see :ref:`v0.7.0.breaking` for more details). - Attempting to assign a ``Dataset`` or ``DataArray`` variable/attribute using attribute-style syntax (e.g., ``ds.foo = 42``) now raises an error rather than silently failing (:issue:`656`, :issue:`714`). - You can now pass pandas objects with non-numpy dtypes (e.g., ``categorical`` or ``datetime64`` with a timezone) into xray without an error (:issue:`716`). Acknowledgments ~~~~~~~~~~~~~~~ The following individuals contributed to this release: - Antony Lee - Fabien Maussion - Joe Hamman - Maximilian Roos - Stephan Hoyer - Takeshi Kanmae - femtotrader v0.6.1 (21 October 2015) ------------------------ This release contains a number of bug and compatibility fixes, as well as enhancements to plotting, indexing and writing files to disk. Note that the minimum required version of dask for use with xray is now version 0.6. API Changes ~~~~~~~~~~~ - The handling of colormaps and discrete color lists for 2D plots in ``xray.DataArray.plot`` was changed to provide more compatibility with matplotlib's ``contour`` and ``contourf`` functions (:issue:`538`). Now discrete lists of colors should be specified using ``colors`` keyword, rather than ``cmap``. Enhancements ~~~~~~~~~~~~ - Faceted plotting through ``xray.plot.FacetGrid`` and the ``xray.plot.plot`` method. See :ref:`plotting.faceting` for more details and examples. - ``xray.Dataset.sel`` and ``xray.Dataset.reindex`` now support the ``tolerance`` argument for controlling nearest-neighbor selection (:issue:`629`): .. jupyter-input:: array = xray.DataArray([1, 2, 3], dims="x") array.reindex(x=[0.9, 1.5], method="nearest", tolerance=0.2) .. jupyter-output:: array([ 2., nan]) Coordinates: * x (x) float64 0.9 1.5 This feature requires pandas v0.17 or newer. - New ``encoding`` argument in ``xray.Dataset.to_netcdf`` for writing netCDF files with compression, as described in the new documentation section on :ref:`io.netcdf.writing_encoded`. - Add ``xray.Dataset.real`` and ``xray.Dataset.imag`` attributes to Dataset and DataArray (:issue:`553`). - More informative error message with ``xray.Dataset.from_dataframe`` if the frame has duplicate columns. - xray now uses deterministic names for dask arrays it creates or opens from disk. This allows xray users to take advantage of dask's nascent support for caching intermediate computation results. See :issue:`555` for an example. Bug fixes ~~~~~~~~~ - Forwards compatibility with the latest pandas release (v0.17.0). We were using some internal pandas routines for datetime conversion, which unfortunately have now changed upstream (:issue:`569`). - Aggregation functions now correctly skip ``NaN`` for data for ``complex128`` dtype (:issue:`554`). - Fixed indexing 0d arrays with unicode dtype (:issue:`568`). - ``xray.DataArray.name`` and Dataset keys must be a string or None to be written to netCDF (:issue:`533`). - ``xray.DataArray.where`` now uses dask instead of numpy if either the array or ``other`` is a dask array. Previously, if ``other`` was a numpy array the method was evaluated eagerly. - Global attributes are now handled more consistently when loading remote datasets using ``engine='pydap'`` (:issue:`574`). - It is now possible to assign to the ``.data`` attribute of DataArray objects. - ``coordinates`` attribute is now kept in the encoding dictionary after decoding (:issue:`610`). - Compatibility with numpy 1.10 (:issue:`617`). Acknowledgments ~~~~~~~~~~~~~~~ The following individuals contributed to this release: - Ryan Abernathey - Pete Cable - Clark Fitzgerald - Joe Hamman - Stephan Hoyer - Scott Sinclair v0.6.0 (21 August 2015) ----------------------- This release includes numerous bug fixes and enhancements. Highlights include the introduction of a plotting module and the new Dataset and DataArray methods ``xray.Dataset.isel_points``, ``xray.Dataset.sel_points``, ``xray.Dataset.where`` and ``xray.Dataset.diff``. There are no breaking changes from v0.5.2. Enhancements ~~~~~~~~~~~~ - Plotting methods have been implemented on DataArray objects ``xray.DataArray.plot`` through integration with matplotlib (:issue:`185`). For an introduction, see :ref:`plotting`. - Variables in netCDF files with multiple missing values are now decoded as NaN after issuing a warning if open_dataset is called with mask_and_scale=True. - We clarified our rules for when the result from an xray operation is a copy vs. a view (see :ref:`copies_vs_views` for more details). - Dataset variables are now written to netCDF files in order of appearance when using the netcdf4 backend (:issue:`479`). - Added ``xray.Dataset.isel_points`` and ``xray.Dataset.sel_points`` to support pointwise indexing of Datasets and DataArrays (:issue:`475`). .. jupyter-input:: da = xray.DataArray( ...: np.arange(56).reshape((7, 8)), ...: coords={"x": list("abcdefg"), "y": 10 * np.arange(8)}, ...: dims=["x", "y"], ...: ) da .. jupyter-output:: array([[ 0, 1, 2, 3, 4, 5, 6, 7], [ 8, 9, 10, 11, 12, 13, 14, 15], [16, 17, 18, 19, 20, 21, 22, 23], [24, 25, 26, 27, 28, 29, 30, 31], [32, 33, 34, 35, 36, 37, 38, 39], [40, 41, 42, 43, 44, 45, 46, 47], [48, 49, 50, 51, 52, 53, 54, 55]]) Coordinates: * y (y) int64 0 10 20 30 40 50 60 70 * x (x) |S1 'a' 'b' 'c' 'd' 'e' 'f' 'g' .. jupyter-input:: # we can index by position along each dimension da.isel_points(x=[0, 1, 6], y=[0, 1, 0], dim="points") .. jupyter-output:: array([ 0, 9, 48]) Coordinates: y (points) int64 0 10 0 x (points) |S1 'a' 'b' 'g' * points (points) int64 0 1 2 .. jupyter-input:: # or equivalently by label da.sel_points(x=["a", "b", "g"], y=[0, 10, 0], dim="points") .. jupyter-output:: array([ 0, 9, 48]) Coordinates: y (points) int64 0 10 0 x (points) |S1 'a' 'b' 'g' * points (points) int64 0 1 2 - New ``xray.Dataset.where`` method for masking xray objects according to some criteria. This works particularly well with multi-dimensional data: .. code:: python ds = xray.Dataset(coords={"x": range(100), "y": range(100)}) ds["distance"] = np.sqrt(ds.x**2 + ds.y**2) ds.distance.where(ds.distance < 100).plot() - Added new methods ``xray.DataArray.diff`` and ``xray.Dataset.diff`` for finite difference calculations along a given axis. - New ``xray.DataArray.to_masked_array`` convenience method for returning a numpy.ma.MaskedArray. .. code:: python da = xray.DataArray(np.random.random_sample(size=(5, 4))) da.where(da < 0.5) da.where(da < 0.5).to_masked_array(copy=True) - Added new flag "drop_variables" to ``xray.open_dataset`` for excluding variables from being parsed. This may be useful to drop variables with problems or inconsistent values. Bug fixes ~~~~~~~~~ - Fixed aggregation functions (e.g., sum and mean) on big-endian arrays when bottleneck is installed (:issue:`489`). - Dataset aggregation functions dropped variables with unsigned integer dtype (:issue:`505`). - ``.any()`` and ``.all()`` were not lazy when used on xray objects containing dask arrays. - Fixed an error when attempting to saving datetime64 variables to netCDF files when the first element is ``NaT`` (:issue:`528`). - Fix pickle on DataArray objects (:issue:`515`). - Fixed unnecessary coercion of float64 to float32 when using netcdf3 and netcdf4_classic formats (:issue:`526`). v0.5.2 (16 July 2015) --------------------- This release contains bug fixes, several additional options for opening and saving netCDF files, and a backwards incompatible rewrite of the advanced options for ``xray.concat``. Backwards incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The optional arguments ``concat_over`` and ``mode`` in ``xray.concat`` have been removed and replaced by ``data_vars`` and ``coords``. The new arguments are both more easily understood and more robustly implemented, and allowed us to fix a bug where ``concat`` accidentally loaded data into memory. If you set values for these optional arguments manually, you will need to update your code. The default behavior should be unchanged. Enhancements ~~~~~~~~~~~~ - ``xray.open_mfdataset`` now supports a ``preprocess`` argument for preprocessing datasets prior to concatenaton. This is useful if datasets cannot be otherwise merged automatically, e.g., if the original datasets have conflicting index coordinates (:issue:`443`). - ``xray.open_dataset`` and ``xray.open_mfdataset`` now use a global thread lock by default for reading from netCDF files with dask. This avoids possible segmentation faults for reading from netCDF4 files when HDF5 is not configured properly for concurrent access (:issue:`444`). - Added support for serializing arrays of complex numbers with ``engine='h5netcdf'``. - The new ``xray.save_mfdataset`` function allows for saving multiple datasets to disk simultaneously. This is useful when processing large datasets with dask.array. For example, to save a dataset too big to fit into memory to one file per year, we could write: .. jupyter-input:: years, datasets = zip(*ds.groupby("time.year")) paths = ["%s.nc" % y for y in years] xray.save_mfdataset(datasets, paths) Bug fixes ~~~~~~~~~ - Fixed ``min``, ``max``, ``argmin`` and ``argmax`` for arrays with string or unicode types (:issue:`453`). - ``xray.open_dataset`` and ``xray.open_mfdataset`` support supplying chunks as a single integer. - Fixed a bug in serializing scalar datetime variable to netCDF. - Fixed a bug that could occur in serialization of 0-dimensional integer arrays. - Fixed a bug where concatenating DataArrays was not always lazy (:issue:`464`). - When reading datasets with h5netcdf, bytes attributes are decoded to strings. This allows conventions decoding to work properly on Python 3 (:issue:`451`). v0.5.1 (15 June 2015) --------------------- This minor release fixes a few bugs and an inconsistency with pandas. It also adds the ``pipe`` method, copied from pandas. Enhancements ~~~~~~~~~~~~ - Added ``xray.Dataset.pipe``, replicating the `new pandas method`_ in version 0.16.2. See :ref:`transforming datasets` for more details. - ``xray.Dataset.assign`` and ``xray.Dataset.assign_coords`` now assign new variables in sorted (alphabetical) order, mirroring the behavior in pandas. Previously, the order was arbitrary. .. _new pandas method: http://pandas.pydata.org/pandas-docs/version/0.16.2/whatsnew.html#pipe Bug fixes ~~~~~~~~~ - ``xray.concat`` fails in an edge case involving identical coordinate variables (:issue:`425`) - We now decode variables loaded from netCDF3 files with the scipy engine using native endianness (:issue:`416`). This resolves an issue when aggregating these arrays with bottleneck installed. v0.5 (1 June 2015) ------------------ Highlights ~~~~~~~~~~ The headline feature in this release is experimental support for out-of-core computing (data that doesn't fit into memory) with :doc:`user-guide/dask`. This includes a new top-level function ``xray.open_mfdataset`` that makes it easy to open a collection of netCDF (using dask) as a single ``xray.Dataset`` object. For more on dask, read the `blog post introducing xray + dask`_ and the new documentation section :doc:`user-guide/dask`. .. _blog post introducing xray + dask: https://www.anaconda.com/blog/developer-blog/xray-dask-out-core-labeled-arrays-python/ Dask makes it possible to harness parallelism and manipulate gigantic datasets with xray. It is currently an optional dependency, but it may become required in the future. Backwards incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The logic used for choosing which variables are concatenated with ``xray.concat`` has changed. Previously, by default any variables which were equal across a dimension were not concatenated. This lead to some surprising behavior, where the behavior of groupby and concat operations could depend on runtime values (:issue:`268`). For example: .. jupyter-input:: ds = xray.Dataset({"x": 0}) xray.concat([ds, ds], dim="y") .. jupyter-output:: Dimensions: () Coordinates: *empty* Data variables: x int64 0 Now, the default always concatenates data variables: .. code:: python In [1]: ds = xray.Dataset({"x": 0}) In [2]: xray.concat([ds, ds], dim="y") Out[2]: Size: 16B Dimensions: (y: 2) Dimensions without coordinates: y Data variables: x (y) int64 16B 0 0 .. code:: python xray.concat([ds, ds], dim="y") To obtain the old behavior, supply the argument ``concat_over=[]``. Enhancements ~~~~~~~~~~~~ - New ``xray.Dataset.to_dataarray`` and enhanced ``xray.DataArray.to_dataset`` methods make it easy to switch back and forth between arrays and datasets: .. code:: python ds = xray.Dataset( {"a": 1, "b": ("x", [1, 2, 3])}, coords={"c": 42}, attrs={"Conventions": "None"}, ) ds.to_dataarray() ds.to_dataarray().to_dataset(dim="variable") - New ``xray.Dataset.fillna`` method to fill missing values, modeled off the pandas method of the same name: .. code:: python array = xray.DataArray([np.nan, 1, np.nan, 3], dims="x") array.fillna(0) ``fillna`` works on both ``Dataset`` and ``DataArray`` objects, and uses index based alignment and broadcasting like standard binary operations. It also can be applied by group, as illustrated in :ref:`/examples/weather-data.ipynb#Fill-missing-values-with-climatology`. - New ``xray.Dataset.assign`` and ``xray.Dataset.assign_coords`` methods patterned off the new :py:meth:`DataFrame.assign ` method in pandas: .. code:: python ds = xray.Dataset({"y": ("x", [1, 2, 3])}) ds.assign(z=lambda ds: ds.y**2) ds.assign_coords(z=("x", ["a", "b", "c"])) These methods return a new Dataset (or DataArray) with updated data or coordinate variables. - ``xray.Dataset.sel`` now supports the ``method`` parameter, which works like the parameter of the same name on ``xray.Dataset.reindex``. It provides a simple interface for doing nearest-neighbor interpolation: .. use verbatim because I can't seem to install pandas 0.16.1 on RTD :( .. jupyter-input:: ds.sel(x=1.1, method="nearest") .. jupyter-output:: Dimensions: () Coordinates: x int64 1 Data variables: y int64 2 .. jupyter-input:: ds.sel(x=[1.1, 2.1], method="pad") .. jupyter-output:: Dimensions: (x: 2) Coordinates: * x (x) int64 1 2 Data variables: y (x) int64 2 3 See :ref:`nearest neighbor lookups` for more details. - You can now control the underlying backend used for accessing remote datasets (via OPeNDAP) by specifying ``engine='netcdf4'`` or ``engine='pydap'``. - xray now provides experimental support for reading and writing netCDF4 files directly via `h5py`_ with the `h5netcdf`_ package, avoiding the netCDF4-Python package. You will need to install h5netcdf and specify ``engine='h5netcdf'`` to try this feature. - Accessing data from remote datasets now has retrying logic (with exponential backoff) that should make it robust to occasional bad responses from DAP servers. - You can control the width of the Dataset repr with ``xray.set_options``. It can be used either as a context manager, in which case the default is restored outside the context: .. code:: python ds = xray.Dataset({"x": np.arange(1000)}) with xray.set_options(display_width=40): print(ds) Or to set a global option: .. jupyter-input:: xray.set_options(display_width=80) The default value for the ``display_width`` option is 80. .. _h5py: http://www.h5py.org/ .. _h5netcdf: https://github.com/shoyer/h5netcdf Deprecations ~~~~~~~~~~~~ - The method ``load_data()`` has been renamed to the more succinct ``xray.Dataset.load``. v0.4.1 (18 March 2015) ---------------------- The release contains bug fixes and several new features. All changes should be fully backwards compatible. Enhancements ~~~~~~~~~~~~ - New documentation sections on :ref:`time-series` and :ref:`combining multiple files`. - ``xray.Dataset.resample`` lets you resample a dataset or data array to a new temporal resolution. The syntax is the `same as pandas`_, except you need to supply the time dimension explicitly: .. code:: python time = pd.date_range("2000-01-01", freq="6H", periods=10) array = xray.DataArray(np.arange(10), [("time", time)]) array.resample("1D", dim="time") You can specify how to do the resampling with the ``how`` argument and other options such as ``closed`` and ``label`` let you control labeling: .. code:: python array.resample("1D", dim="time", how="sum", label="right") If the desired temporal resolution is higher than the original data (upsampling), xray will insert missing values: .. code:: python array.resample("3H", "time") - ``first`` and ``last`` methods on groupby objects let you take the first or last examples from each group along the grouped axis: .. code:: python array.groupby("time.day").first() These methods combine well with ``resample``: .. code:: python array.resample("1D", dim="time", how="first") - ``xray.Dataset.swap_dims`` allows for easily swapping one dimension out for another: .. code:: python ds = xray.Dataset({"x": range(3), "y": ("x", list("abc"))}) ds.swap_dims({"x": "y"}) This was possible in earlier versions of xray, but required some contortions. - ``xray.open_dataset`` and ``xray.Dataset.to_netcdf`` now accept an ``engine`` argument to explicitly select which underlying library (netcdf4 or scipy) is used for reading/writing a netCDF file. .. _same as pandas: http://pandas.pydata.org/pandas-docs/stable/timeseries.html#up-and-downsampling Bug fixes ~~~~~~~~~ - Fixed a bug where data netCDF variables read from disk with ``engine='scipy'`` could still be associated with the file on disk, even after closing the file (:issue:`341`). This manifested itself in warnings about mmapped arrays and segmentation faults (if the data was accessed). - Silenced spurious warnings about all-NaN slices when using nan-aware aggregation methods (:issue:`344`). - Dataset aggregations with ``keep_attrs=True`` now preserve attributes on data variables, not just the dataset itself. - Tests for xray now pass when run on Windows (:issue:`360`). - Fixed a regression in v0.4 where saving to netCDF could fail with the error ``ValueError: could not automatically determine time units``. v0.4 (2 March, 2015) -------------------- This is one of the biggest releases yet for xray: it includes some major changes that may break existing code, along with the usual collection of minor enhancements and bug fixes. On the plus side, this release includes all hitherto planned breaking changes, so the upgrade path for xray should be smoother going forward. Breaking changes ~~~~~~~~~~~~~~~~ - We now automatically align index labels in arithmetic, dataset construction, merging and updating. This means the need for manually invoking methods like ``xray.align`` and ``xray.Dataset.reindex_like`` should be vastly reduced. :ref:`For arithmetic`, we align based on the **intersection** of labels: .. code:: python lhs = xray.DataArray([1, 2, 3], [("x", [0, 1, 2])]) rhs = xray.DataArray([2, 3, 4], [("x", [1, 2, 3])]) lhs + rhs :ref:`For dataset construction and merging`, we align based on the **union** of labels: .. code:: python xray.Dataset({"foo": lhs, "bar": rhs}) :ref:`For update and __setitem__`, we align based on the **original** object: .. code:: python lhs.coords["rhs"] = rhs lhs - Aggregations like ``mean`` or ``median`` now skip missing values by default: .. code:: python xray.DataArray([1, 2, np.nan, 3]).mean() You can turn this behavior off by supplying the keyword argument ``skipna=False``. These operations are lightning fast thanks to integration with bottleneck_, which is a new optional dependency for xray (numpy is used if bottleneck is not installed). - Scalar coordinates no longer conflict with constant arrays with the same value (e.g., in arithmetic, merging datasets and concat), even if they have different shape (:issue:`243`). For example, the coordinate ``c`` here persists through arithmetic, even though it has different shapes on each DataArray: .. code:: python a = xray.DataArray([1, 2], coords={"c": 0}, dims="x") b = xray.DataArray([1, 2], coords={"c": ("x", [0, 0])}, dims="x") (a + b).coords This functionality can be controlled through the ``compat`` option, which has also been added to the ``xray.Dataset`` constructor. - Datetime shortcuts such as ``'time.month'`` now return a ``DataArray`` with the name ``'month'``, not ``'time.month'`` (:issue:`345`). This makes it easier to index the resulting arrays when they are used with ``groupby``: .. code:: python time = xray.DataArray( pd.date_range("2000-01-01", periods=365), dims="time", name="time" ) counts = time.groupby("time.month").count() counts.sel(month=2) Previously, you would need to use something like ``counts.sel(**{'time.month': 2}})``, which is much more awkward. - The ``season`` datetime shortcut now returns an array of string labels such ``'DJF'``: .. code-block:: ipython In[92]: ds = xray.Dataset({"t": pd.date_range("2000-01-01", periods=12, freq="M")}) In[93]: ds["t.season"] Out[93]: array(['DJF', 'DJF', 'MAM', ..., 'SON', 'SON', 'DJF'], dtype='`_. - Use functions that return generic ndarrays with DataArray.groupby.apply and Dataset.apply (:issue:`327` and :issue:`329`). Thanks Jeff Gerard! - Consolidated the functionality of ``dumps`` (writing a dataset to a netCDF3 bytestring) into ``xray.Dataset.to_netcdf`` (:issue:`333`). - ``xray.Dataset.to_netcdf`` now supports writing to groups in netCDF4 files (:issue:`333`). It also finally has a full docstring -- you should read it! - ``xray.open_dataset`` and ``xray.Dataset.to_netcdf`` now work on netCDF3 files when netcdf4-python is not installed as long as scipy is available (:issue:`333`). - The new ``xray.Dataset.drop`` and ``xray.DataArray.drop`` methods makes it easy to drop explicitly listed variables or index labels: .. code:: python # drop variables ds = xray.Dataset({"x": 0, "y": 1}) ds.drop("x") # drop index labels arr = xray.DataArray([1, 2, 3], coords=[("x", list("abc"))]) arr.drop(["a", "c"], dim="x") - ``xray.Dataset.broadcast_equals`` has been added to correspond to the new ``compat`` option. - Long attributes are now truncated at 500 characters when printing a dataset (:issue:`338`). This should make things more convenient for working with datasets interactively. - Added a new documentation example, :ref:`/examples/monthly-means.ipynb`. Thanks Joe Hamman! Bug fixes ~~~~~~~~~ - Several bug fixes related to decoding time units from netCDF files (:issue:`316`, :issue:`330`). Thanks Stefan Pfenninger! - xray no longer requires ``decode_coords=False`` when reading datasets with unparsable coordinate attributes (:issue:`308`). - Fixed ``DataArray.loc`` indexing with ``...`` (:issue:`318`). - Fixed an edge case that resulting in an error when reindexing multi-dimensional variables (:issue:`315`). - Slicing with negative step sizes (:issue:`312`). - Invalid conversion of string arrays to numeric dtype (:issue:`305`). - Fixed ``repr()`` on dataset objects with non-standard dates (:issue:`347`). Deprecations ~~~~~~~~~~~~ - ``dump`` and ``dumps`` have been deprecated in favor of ``xray.Dataset.to_netcdf``. - ``drop_vars`` has been deprecated in favor of ``xray.Dataset.drop``. Future plans ~~~~~~~~~~~~ The biggest feature I'm excited about working toward in the immediate future is supporting out-of-core operations in xray using Dask_, a part of the Blaze_ project. For a preview of using Dask with weather data, read `this blog post`_ by Matthew Rocklin. See :issue:`328` for more details. .. _Dask: https://dask.org .. _Blaze: https://blaze.pydata.org .. _this blog post: https://matthewrocklin.com/blog/work/2015/02/13/Towards-OOC-Slicing-and-Stacking v0.3.2 (23 December, 2014) -------------------------- This release focused on bug-fixes, speedups and resolving some niggling inconsistencies. There are a few cases where the behavior of xray differs from the previous version. However, I expect that in almost all cases your code will continue to run unmodified. .. warning:: xray now requires pandas v0.15.0 or later. This was necessary for supporting TimedeltaIndex without too many painful hacks. Backwards incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Arrays of :py:class:`datetime.datetime` objects are now automatically cast to ``datetime64[ns]`` arrays when stored in an xray object, using machinery borrowed from pandas: .. code:: python from datetime import datetime xray.Dataset({"t": [datetime(2000, 1, 1)]}) - xray now has support (including serialization to netCDF) for :py:class:`~pandas.TimedeltaIndex`. :py:class:`datetime.timedelta` objects are thus accordingly cast to ``timedelta64[ns]`` objects when appropriate. - Masked arrays are now properly coerced to use ``NaN`` as a sentinel value (:issue:`259`). Enhancements ~~~~~~~~~~~~ - Due to popular demand, we have added experimental attribute style access as a shortcut for dataset variables, coordinates and attributes: .. code:: python ds = xray.Dataset({"tmin": ([], 25, {"units": "celsius"})}) ds.tmin.units Tab-completion for these variables should work in editors such as IPython. However, setting variables or attributes in this fashion is not yet supported because there are some unresolved ambiguities (:issue:`300`). - You can now use a dictionary for indexing with labeled dimensions. This provides a safe way to do assignment with labeled dimensions: .. code:: python array = xray.DataArray(np.zeros(5), dims=["x"]) array[dict(x=slice(3))] = 1 array - Non-index coordinates can now be faithfully written to and restored from netCDF files. This is done according to CF conventions when possible by using the ``coordinates`` attribute on a data variable. When not possible, xray defines a global ``coordinates`` attribute. - Preliminary support for converting ``xray.DataArray`` objects to and from CDAT_ ``cdms2`` variables. - We sped up any operation that involves creating a new Dataset or DataArray (e.g., indexing, aggregation, arithmetic) by a factor of 30 to 50%. The full speed up requires cyordereddict_ to be installed. .. _CDAT: http://uvcdat.llnl.gov/ .. _cyordereddict: https://github.com/shoyer/cyordereddict Bug fixes ~~~~~~~~~ - Fix for ``to_dataframe()`` with 0d string/object coordinates (:issue:`287`) - Fix for ``to_netcdf`` with 0d string variable (:issue:`284`) - Fix writing datetime64 arrays to netcdf if NaT is present (:issue:`270`) - Fix align silently upcasts data arrays when NaNs are inserted (:issue:`264`) Future plans ~~~~~~~~~~~~ - I am contemplating switching to the terms "coordinate variables" and "data variables" instead of the (currently used) "coordinates" and "variables", following their use in `CF Conventions`_ (:issue:`293`). This would mostly have implications for the documentation, but I would also change the ``Dataset`` attribute ``vars`` to ``data``. - I no longer certain that automatic label alignment for arithmetic would be a good idea for xray -- it is a feature from pandas that I have not missed (:issue:`186`). - The main API breakage that I *do* anticipate in the next release is finally making all aggregation operations skip missing values by default (:issue:`130`). I'm pretty sick of writing ``ds.reduce(np.nanmean, 'time')``. - The next version of xray (0.4) will remove deprecated features and aliases whose use currently raises a warning. If you have opinions about any of these anticipated changes, I would love to hear them -- please add a note to any of the referenced GitHub issues. .. _CF Conventions: http://cfconventions.org/Data/cf-conventions/cf-conventions-1.6/build/cf-conventions.html v0.3.1 (22 October, 2014) ------------------------- This is mostly a bug-fix release to make xray compatible with the latest release of pandas (v0.15). We added several features to better support working with missing values and exporting xray objects to pandas. We also reorganized the internal API for serializing and deserializing datasets, but this change should be almost entirely transparent to users. Other than breaking the experimental DataStore API, there should be no backwards incompatible changes. New features ~~~~~~~~~~~~ - Added ``xray.Dataset.count`` and ``xray.Dataset.dropna`` methods, copied from pandas, for working with missing values (:issue:`247`, :issue:`58`). - Added ``xray.DataArray.to_pandas`` for converting a data array into the pandas object with the same dimensionality (1D to Series, 2D to DataFrame, etc.) (:issue:`255`). - Support for reading gzipped netCDF3 files (:issue:`239`). - Reduced memory usage when writing netCDF files (:issue:`251`). - 'missing_value' is now supported as an alias for the '_FillValue' attribute on netCDF variables (:issue:`245`). - Trivial indexes, equivalent to ``range(n)`` where ``n`` is the length of the dimension, are no longer written to disk (:issue:`245`). Bug fixes ~~~~~~~~~ - Compatibility fixes for pandas v0.15 (:issue:`262`). - Fixes for display and indexing of ``NaT`` (not-a-time) (:issue:`238`, :issue:`240`) - Fix slicing by label was an argument is a data array (:issue:`250`). - Test data is now shipped with the source distribution (:issue:`253`). - Ensure order does not matter when doing arithmetic with scalar data arrays (:issue:`254`). - Order of dimensions preserved with ``DataArray.to_dataframe`` (:issue:`260`). v0.3 (21 September 2014) ------------------------ New features ~~~~~~~~~~~~ - **Revamped coordinates**: "coordinates" now refer to all arrays that are not used to index a dimension. Coordinates are intended to allow for keeping track of arrays of metadata that describe the grid on which the points in "variable" arrays lie. They are preserved (when unambiguous) even though mathematical operations. - **Dataset math** ``xray.Dataset`` objects now support all arithmetic operations directly. Dataset-array operations map across all dataset variables; dataset-dataset operations act on each pair of variables with the same name. - **GroupBy math**: This provides a convenient shortcut for normalizing by the average value of a group. - The dataset ``__repr__`` method has been entirely overhauled; dataset objects now show their values when printed. - You can now index a dataset with a list of variables to return a new dataset: ``ds[['foo', 'bar']]``. Backwards incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - ``Dataset.__eq__`` and ``Dataset.__ne__`` are now element-wise operations instead of comparing all values to obtain a single boolean. Use the method ``xray.Dataset.equals`` instead. Deprecations ~~~~~~~~~~~~ - ``Dataset.noncoords`` is deprecated: use ``Dataset.vars`` instead. - ``Dataset.select_vars`` deprecated: index a ``Dataset`` with a list of variable names instead. - ``DataArray.select_vars`` and ``DataArray.drop_vars`` deprecated: use ``xray.DataArray.reset_coords`` instead. v0.2 (14 August 2014) --------------------- This is major release that includes some new features and quite a few bug fixes. Here are the highlights: - There is now a direct constructor for ``DataArray`` objects, which makes it possible to create a DataArray without using a Dataset. This is highlighted in the refreshed ``tutorial``. - You can perform aggregation operations like ``mean`` directly on ``xray.Dataset`` objects, thanks to Joe Hamman. These aggregation methods also worked on grouped datasets. - xray now works on Python 2.6, thanks to Anna Kuznetsova. - A number of methods and attributes were given more sensible (usually shorter) names: ``labeled`` -> ``sel``, ``indexed`` -> ``isel``, ``select`` -> ``select_vars``, ``unselect`` -> ``drop_vars``, ``dimensions`` -> ``dims``, ``coordinates`` -> ``coords``, ``attributes`` -> ``attrs``. - New ``xray.Dataset.load_data`` and ``xray.Dataset.close`` methods for datasets facilitate lower level of control of data loaded from disk. v0.1.1 (20 May 2014) -------------------- xray 0.1.1 is a bug-fix release that includes changes that should be almost entirely backwards compatible with v0.1: - Python 3 support (:issue:`53`) - Required numpy version relaxed to 1.7 (:issue:`129`) - Return numpy.datetime64 arrays for non-standard calendars (:issue:`126`) - Support for opening datasets associated with NetCDF4 groups (:issue:`127`) - Bug-fixes for concatenating datetime arrays (:issue:`134`) Special thanks to new contributors Thomas Kluyver, Joe Hamman and Alistair Miles. v0.1 (2 May 2014) ----------------- Initial release. ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/licenses/��������������������������������������������������������������������������0000775�0000000�0000000�00000000000�15056206164�0015151�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/licenses/ANYTREE_LICENSE�����������������������������������������������������������0000664�0000000�0000000�00000026135�15056206164�0017354�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/licenses/DASK_LICENSE��������������������������������������������������������������0000664�0000000�0000000�00000002715�15056206164�0016765�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Copyright (c) 2014-2018, Anaconda, Inc. and contributors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of Anaconda nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ���������������������������������������������������xarray-2025.09.0/licenses/ICOMOON_LICENSE�����������������������������������������������������������0000664�0000000�0000000�00000044340�15056206164�0017346�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Attribution 4.0 International ======================================================================= Creative Commons Corporation ("Creative Commons") is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an "as-is" basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. Using Creative Commons Public Licenses Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC- licensed material, or material used under an exception or limitation to copyright. More considerations for licensors: wiki.creativecommons.org/Considerations_for_licensors Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor's permission is not necessary for any reason--for example, because of any applicable exception or limitation to copyright--then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More considerations for the public: wiki.creativecommons.org/Considerations_for_licensees ======================================================================= Creative Commons Attribution 4.0 International Public License By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. Section 1 -- Definitions. a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. c. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. d. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. e. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. f. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License. g. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. h. Licensor means the individual(s) or entity(ies) granting rights under this Public License. i. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. j. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. k. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. Section 2 -- Scope. a. License grant. 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: a. reproduce and Share the Licensed Material, in whole or in part; and b. produce, reproduce, and Share Adapted Material. 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 3. Term. The term of this Public License is specified in Section 6(a). 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a) (4) never produces Adapted Material. 5. Downstream recipients. a. Offer from the Licensor -- Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. b. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). b. Other rights. 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 2. Patent and trademark rights are not licensed under this Public License. 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties. Section 3 -- License Conditions. Your exercise of the Licensed Rights is expressly made subject to the following conditions. a. Attribution. 1. If You Share the Licensed Material (including in modified form), You must: a. retain the following if it is supplied by the Licensor with the Licensed Material: i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); ii. a copyright notice; iii. a notice that refers to this Public License; iv. a notice that refers to the disclaimer of warranties; v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; b. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and c. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. 4. If You Share Adapted Material You produce, the Adapter's License You apply must not prevent recipients of the Adapted Material from complying with this Public License. Section 4 -- Sui Generis Database Rights. Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database; b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. Section 5 -- Disclaimer of Warranties and Limitation of Liability. a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. Section 6 -- Term and Termination. a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 2. upon express reinstatement by the Licensor. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. Section 7 -- Other Terms and Conditions. a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. Section 8 -- Interpretation. a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. ======================================================================= Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the β€œLicensor.” The text of the Creative Commons public licenses is dedicated to the public domain under the CC0 Public Domain Dedication. Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark "Creative Commons" or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. Creative Commons may be contacted at creativecommons.org. ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/licenses/NUMPY_LICENSE�������������������������������������������������������������0000664�0000000�0000000�00000003007�15056206164�0017146�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Copyright (c) 2005-2011, NumPy Developers. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the NumPy Developers nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/licenses/PANDAS_LICENSE������������������������������������������������������������0000664�0000000�0000000�00000003216�15056206164�0017206�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������pandas license ============== Copyright (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team All rights reserved. Copyright (c) 2008-2011 AQR Capital Management, LLC All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/licenses/PYTHON_LICENSE������������������������������������������������������������0000664�0000000�0000000�00000030731�15056206164�0017263�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������A. HISTORY OF THE SOFTWARE ========================== Python was created in the early 1990s by Guido van Rossum at Stichting Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands as a successor of a language called ABC. Guido remains Python's principal author, although it includes many contributions from others. In 1995, Guido continued his work on Python at the Corporation for National Research Initiatives (CNRI, see http://www.cnri.reston.va.us) in Reston, Virginia where he released several versions of the software. In May 2000, Guido and the Python core development team moved to BeOpen.com to form the BeOpen PythonLabs team. In October of the same year, the PythonLabs team moved to Digital Creations (now Zope Corporation, see http://www.zope.com). In 2001, the Python Software Foundation (PSF, see http://www.python.org/psf/) was formed, a non-profit organization created specifically to own Python-related Intellectual Property. Zope Corporation is a sponsoring member of the PSF. All Python releases are Open Source (see http://www.opensource.org for the Open Source Definition). Historically, most, but not all, Python releases have also been GPL-compatible; the table below summarizes the various releases. Release Derived Year Owner GPL- from compatible? (1) 0.9.0 thru 1.2 1991-1995 CWI yes 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes 1.6 1.5.2 2000 CNRI no 2.0 1.6 2000 BeOpen.com no 1.6.1 1.6 2001 CNRI yes (2) 2.1 2.0+1.6.1 2001 PSF no 2.0.1 2.0+1.6.1 2001 PSF yes 2.1.1 2.1+2.0.1 2001 PSF yes 2.1.2 2.1.1 2002 PSF yes 2.1.3 2.1.2 2002 PSF yes 2.2 and above 2.1.1 2001-now PSF yes Footnotes: (1) GPL-compatible doesn't mean that we're distributing Python under the GPL. All Python licenses, unlike the GPL, let you distribute a modified version without making your changes open source. The GPL-compatible licenses make it possible to combine Python with other software that is released under the GPL; the others don't. (2) According to Richard Stallman, 1.6.1 is not GPL-compatible, because its license has a choice of law clause. According to CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 is "not incompatible" with the GPL. Thanks to the many outside volunteers who have worked under Guido's direction to make these releases possible. B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON =============================================================== PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 -------------------------------------------- 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" are retained in Python alone or in any derivative version prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python. 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this License Agreement. BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 ------------------------------------------- BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the Individual or Organization ("Licensee") accessing and otherwise using this software in source or binary form and its associated documentation ("the Software"). 2. Subject to the terms and conditions of this BeOpen Python License Agreement, BeOpen hereby grants Licensee a non-exclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use the Software alone or in any derivative version, provided, however, that the BeOpen Python License is retained in the Software, alone or in any derivative version prepared by Licensee. 3. BeOpen is making the Software available to Licensee on an "AS IS" basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 5. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 6. This License Agreement shall be governed by and interpreted in all respects by the law of the State of California, excluding conflict of law provisions. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between BeOpen and Licensee. This License Agreement does not grant permission to use BeOpen trademarks or trade names in a trademark sense to endorse or promote products or services of Licensee, or any third party. As an exception, the "BeOpen Python" logos available at http://www.pythonlabs.com/logos.html may be used according to the permissions granted on that web page. 7. By copying, installing or otherwise using the software, Licensee agrees to be bound by the terms and conditions of this License Agreement. CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 --------------------------------------- 1. This LICENSE AGREEMENT is between the Corporation for National Research Initiatives, having an office at 1895 Preston White Drive, Reston, VA 20191 ("CNRI"), and the Individual or Organization ("Licensee") accessing and otherwise using Python 1.6.1 software in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, CNRI hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python 1.6.1 alone or in any derivative version, provided, however, that CNRI's License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) 1995-2001 Corporation for National Research Initiatives; All Rights Reserved" are retained in Python 1.6.1 alone or in any derivative version prepared by Licensee. Alternately, in lieu of CNRI's License Agreement, Licensee may substitute the following text (omitting the quotes): "Python 1.6.1 is made available subject to the terms and conditions in CNRI's License Agreement. This Agreement together with Python 1.6.1 may be located on the Internet using the following unique, persistent identifier (known as a handle): 1895.22/1013. This Agreement may also be obtained from a proxy server on the Internet using the following URL: http://hdl.handle.net/1895.22/1013". 3. In the event Licensee prepares a derivative work that is based on or incorporates Python 1.6.1 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python 1.6.1. 4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. This License Agreement shall be governed by the federal intellectual property law of the United States, including without limitation the federal copyright law, and, to the extent such U.S. federal law does not apply, by the law of the Commonwealth of Virginia, excluding Virginia's conflict of law provisions. Notwithstanding the foregoing, with regard to derivative works based on Python 1.6.1 that incorporate non-separable material that was previously distributed under the GNU General Public License (GPL), the law of the Commonwealth of Virginia shall govern this License Agreement only as to issues arising under or with respect to Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between CNRI and Licensee. This License Agreement does not grant permission to use CNRI trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By clicking on the "ACCEPT" button where indicated, or by copying, installing or otherwise using Python 1.6.1, Licensee agrees to be bound by the terms and conditions of this License Agreement. ACCEPT CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 -------------------------------------------------- Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands. All rights reserved. Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of Stichting Mathematisch Centrum or CWI not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ���������������������������������������xarray-2025.09.0/licenses/SCIKIT_LEARN_LICENSE������������������������������������������������������0000664�0000000�0000000�00000002773�15056206164�0020156�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������BSD 3-Clause License Copyright (c) 2007-2021 The scikit-learn developers. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE �����xarray-2025.09.0/licenses/SEABORN_LICENSE�����������������������������������������������������������0000664�0000000�0000000�00000002732�15056206164�0017333�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Copyright (c) 2012-2013, Michael L. Waskom All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the {organization} nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ��������������������������������������xarray-2025.09.0/properties/������������������������������������������������������������������������0000775�0000000�0000000�00000000000�15056206164�0015540�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/properties/README.md���������������������������������������������������������������0000664�0000000�0000000�00000001741�15056206164�0017022�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# Property-based tests using Hypothesis This directory contains property-based tests using a library called [Hypothesis](https://github.com/HypothesisWorks/hypothesis-python). The property tests for xarray are a work in progress - more are always welcome. They are stored in a separate directory because they tend to run more examples and thus take longer, and so that local development can run a test suite without needing to `pip install hypothesis`. ## Hang on, "property-based" tests? Instead of making assertions about operations on a particular piece of data, you use Hypothesis to describe a _kind_ of data, then make assertions that should hold for _any_ example of this kind. For example: "given a 2d ndarray of dtype uint8 `arr`, `xr.DataArray(arr).plot.imshow()` never raises an exception". Hypothesis will then try many random examples, and report a minimised failing input for each error it finds. [See the docs for more info.](https://hypothesis.readthedocs.io/en/master/) �������������������������������xarray-2025.09.0/properties/__init__.py�������������������������������������������������������������0000664�0000000�0000000�00000000000�15056206164�0017637�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/properties/conftest.py�������������������������������������������������������������0000664�0000000�0000000�00000001375�15056206164�0017745�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������import pytest def pytest_addoption(parser): parser.addoption( "--run-slow-hypothesis", action="store_true", default=False, help="run slow hypothesis tests", ) def pytest_collection_modifyitems(config, items): if config.getoption("--run-slow-hypothesis"): return skip_slow_hyp = pytest.mark.skip(reason="need --run-slow-hypothesis option to run") for item in items: if "slow_hypothesis" in item.keywords: item.add_marker(skip_slow_hyp) try: from hypothesis import settings except ImportError: pass else: # Run for a while - arrays are a bigger search space than usual settings.register_profile("ci", deadline=None, print_blob=True) settings.load_profile("ci") �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/properties/test_encode_decode.py���������������������������������������������������0000664�0000000�0000000�00000003134�15056206164�0021712�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������""" Property-based tests for encoding/decoding methods. These ones pass, just as you'd hope! """ import warnings import pytest pytest.importorskip("hypothesis") # isort: split import hypothesis.extra.numpy as npst import hypothesis.strategies as st import numpy as np from hypothesis import given import xarray as xr from xarray.coding.times import _parse_iso8601 from xarray.testing.strategies import CFTimeStrategyISO8601, variables from xarray.tests import requires_cftime @pytest.mark.slow @given(original=variables()) def test_CFMask_coder_roundtrip(original) -> None: coder = xr.coding.variables.CFMaskCoder() roundtripped = coder.decode(coder.encode(original)) xr.testing.assert_identical(original, roundtripped) @pytest.mark.xfail @pytest.mark.slow @given(var=variables(dtype=npst.floating_dtypes())) def test_CFMask_coder_decode(var) -> None: var[0] = -99 var.attrs["_FillValue"] = -99 coder = xr.coding.variables.CFMaskCoder() decoded = coder.decode(var) assert np.isnan(decoded[0]) @pytest.mark.slow @given(original=variables()) def test_CFScaleOffset_coder_roundtrip(original) -> None: coder = xr.coding.variables.CFScaleOffsetCoder() roundtripped = coder.decode(coder.encode(original)) xr.testing.assert_identical(original, roundtripped) @requires_cftime @given(dt=st.datetimes() | CFTimeStrategyISO8601()) def test_iso8601_decode(dt): iso = dt.isoformat() with warnings.catch_warnings(): warnings.filterwarnings("ignore", message=".*date/calendar/year zero.*") parsed, _ = _parse_iso8601(type(dt), iso) assert dt == parsed ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/properties/test_index_manipulation.py����������������������������������������������0000664�0000000�0000000�00000022475�15056206164�0023052�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������import itertools import warnings import numpy as np import pytest import xarray as xr from xarray import Dataset from xarray.testing import _assert_internal_invariants pytest.importorskip("hypothesis") pytestmark = pytest.mark.slow_hypothesis import hypothesis.extra.numpy as npst import hypothesis.strategies as st from hypothesis import note, settings from hypothesis.stateful import ( RuleBasedStateMachine, initialize, invariant, precondition, rule, ) import xarray.testing.strategies as xrst @st.composite def unique(draw, strategy): # https://stackoverflow.com/questions/73737073/create-hypothesis-strategy-that-returns-unique-values seen = draw(st.shared(st.builds(set), key="key-for-unique-elems")) return draw( strategy.filter(lambda x: x not in seen).map(lambda x: seen.add(x) or x) ) # Share to ensure we get unique names on each draw, # so we don't try to add two variables with the same name # or stack to a dimension with a name that already exists in the Dataset. UNIQUE_NAME = unique(strategy=xrst.names()) DIM_NAME = xrst.dimension_names(name_strategy=UNIQUE_NAME, min_dims=1, max_dims=1) index_variables = st.builds( xr.Variable, data=npst.arrays( dtype=xrst.pandas_index_dtypes(), shape=npst.array_shapes(min_dims=1, max_dims=1), elements=dict(allow_nan=False, allow_infinity=False, allow_subnormal=False), unique=True, ), dims=DIM_NAME, attrs=xrst.attrs(), ) def add_dim_coord_and_data_var(ds, var): (name,) = var.dims # dim coord ds[name] = var # non-dim coord of same size; this allows renaming ds[name + "_"] = var class DatasetStateMachine(RuleBasedStateMachine): # Can't use bundles because we'd need pre-conditions on consumes(bundle) # indexed_dims = Bundle("indexed_dims") # multi_indexed_dims = Bundle("multi_indexed_dims") def __init__(self): super().__init__() self.dataset = Dataset() self.check_default_indexes = True # We track these separately as lists so we can guarantee order of iteration over them. # Order of iteration over Dataset.dims is not guaranteed self.indexed_dims = [] self.multi_indexed_dims = [] @initialize(var=index_variables) def init_ds(self, var): """Initialize the Dataset so that at least one rule will always fire.""" (name,) = var.dims add_dim_coord_and_data_var(self.dataset, var) self.indexed_dims.append(name) # TODO: stacking with a timedelta64 index and unstacking converts it to object @rule(var=index_variables) def add_dim_coord(self, var): (name,) = var.dims note(f"adding dimension coordinate {name}") add_dim_coord_and_data_var(self.dataset, var) self.indexed_dims.append(name) @rule(var=index_variables) def assign_coords(self, var): (name,) = var.dims note(f"assign_coords: {name}") self.dataset = self.dataset.assign_coords({name: var}) self.indexed_dims.append(name) @property def has_indexed_dims(self) -> bool: return bool(self.indexed_dims + self.multi_indexed_dims) @rule(data=st.data()) @precondition(lambda self: self.has_indexed_dims) def reset_index(self, data): dim = data.draw(st.sampled_from(self.indexed_dims + self.multi_indexed_dims)) self.check_default_indexes = False note(f"> resetting {dim}") self.dataset = self.dataset.reset_index(dim) if dim in self.indexed_dims: del self.indexed_dims[self.indexed_dims.index(dim)] elif dim in self.multi_indexed_dims: del self.multi_indexed_dims[self.multi_indexed_dims.index(dim)] @rule(newname=UNIQUE_NAME, data=st.data(), create_index=st.booleans()) @precondition(lambda self: bool(self.indexed_dims)) def stack(self, newname, data, create_index): oldnames = data.draw( st.lists( st.sampled_from(self.indexed_dims), min_size=1, max_size=3 if create_index else None, unique=True, ) ) note(f"> stacking {oldnames} as {newname}") self.dataset = self.dataset.stack( {newname: oldnames}, create_index=create_index ) if create_index: self.multi_indexed_dims += [newname] # if create_index is False, then we just drop these for dim in oldnames: del self.indexed_dims[self.indexed_dims.index(dim)] @rule(data=st.data()) @precondition(lambda self: bool(self.multi_indexed_dims)) def unstack(self, data): # TODO: add None dim = data.draw(st.sampled_from(self.multi_indexed_dims)) note(f"> unstacking {dim}") if dim is not None: pd_index = self.dataset.xindexes[dim].index self.dataset = self.dataset.unstack(dim) del self.multi_indexed_dims[self.multi_indexed_dims.index(dim)] if dim is not None: self.indexed_dims.extend(pd_index.names) else: # TODO: fix this pass @rule(newname=UNIQUE_NAME, data=st.data()) @precondition(lambda self: bool(self.dataset.variables)) def rename_vars(self, newname, data): dim = data.draw(st.sampled_from(sorted(self.dataset.variables))) # benbovy: "skip the default indexes invariant test when the name of an # existing dimension coordinate is passed as input kwarg or dict key # to .rename_vars()." self.check_default_indexes = False note(f"> renaming {dim} to {newname}") self.dataset = self.dataset.rename_vars({dim: newname}) if dim in self.indexed_dims: del self.indexed_dims[self.indexed_dims.index(dim)] elif dim in self.multi_indexed_dims: del self.multi_indexed_dims[self.multi_indexed_dims.index(dim)] @precondition(lambda self: bool(self.dataset.dims)) @rule(data=st.data()) def drop_dims(self, data): dims = data.draw( st.lists( st.sampled_from(sorted(self.dataset.dims)), min_size=1, unique=True, ) ) note(f"> drop_dims: {dims}") # TODO: dropping a multi-index dimension raises a DeprecationWarning with warnings.catch_warnings(): warnings.simplefilter("ignore", category=DeprecationWarning) self.dataset = self.dataset.drop_dims(dims) for dim in dims: if dim in self.indexed_dims: del self.indexed_dims[self.indexed_dims.index(dim)] elif dim in self.multi_indexed_dims: del self.multi_indexed_dims[self.multi_indexed_dims.index(dim)] @precondition(lambda self: bool(self.indexed_dims)) @rule(data=st.data()) def drop_indexes(self, data): self.check_default_indexes = False dims = data.draw( st.lists(st.sampled_from(self.indexed_dims), min_size=1, unique=True) ) note(f"> drop_indexes: {dims}") self.dataset = self.dataset.drop_indexes(dims) for dim in dims: if dim in self.indexed_dims: del self.indexed_dims[self.indexed_dims.index(dim)] elif dim in self.multi_indexed_dims: del self.multi_indexed_dims[self.multi_indexed_dims.index(dim)] @property def swappable_dims(self): ds = self.dataset options = [] for dim in self.indexed_dims: choices = [ name for name, var in ds._variables.items() if var.dims == (dim,) # TODO: Avoid swapping a dimension to itself and name != dim ] options.extend( (a, b) for a, b in itertools.zip_longest((dim,), choices, fillvalue=dim) ) return options @rule(data=st.data()) # TODO: swap_dims is basically all broken if a multiindex is present # TODO: Avoid swapping from Index to a MultiIndex level # TODO: Avoid swapping from MultiIndex to a level of the same MultiIndex # TODO: Avoid swapping when a MultiIndex is present @precondition(lambda self: not bool(self.multi_indexed_dims)) @precondition(lambda self: bool(self.swappable_dims)) def swap_dims(self, data): ds = self.dataset options = self.swappable_dims dim, to = data.draw(st.sampled_from(options)) note( f"> swapping {dim} to {to}, found swappable dims: {options}, all_dims: {tuple(self.dataset.dims)}" ) self.dataset = ds.swap_dims({dim: to}) del self.indexed_dims[self.indexed_dims.index(dim)] self.indexed_dims += [to] @invariant() def assert_invariants(self): # note(f"> ===\n\n {self.dataset!r} \n===\n\n") _assert_internal_invariants(self.dataset, self.check_default_indexes) DatasetStateMachine.TestCase.settings = settings(max_examples=300, deadline=None) DatasetTest = DatasetStateMachine.TestCase @pytest.mark.skip(reason="failure detected by hypothesis") def test_unstack_object(): ds = xr.Dataset() ds["0"] = np.array(["", "\x000"], dtype=object) ds.stack({"1": ["0"]}).unstack() @pytest.mark.skip(reason="failure detected by hypothesis") def test_unstack_timedelta_index(): ds = xr.Dataset() ds["0"] = np.array([0, 1, 2, 3], dtype="timedelta64[ns]") ds.stack({"1": ["0"]}).unstack() ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/properties/test_pandas_roundtrip.py������������������������������������������������0000664�0000000�0000000�00000013365�15056206164�0022535�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������""" Property-based tests for roundtripping between xarray and pandas objects. """ from functools import partial import numpy as np import pandas as pd import pytest import xarray as xr pytest.importorskip("hypothesis") import hypothesis.extra.numpy as npst # isort:skip import hypothesis.extra.pandas as pdst # isort:skip import hypothesis.strategies as st # isort:skip from hypothesis import given # isort:skip from xarray.tests import has_pyarrow numeric_dtypes = st.one_of( npst.unsigned_integer_dtypes(endianness="="), npst.integer_dtypes(endianness="="), npst.floating_dtypes(endianness="="), ) numeric_series = numeric_dtypes.flatmap(lambda dt: pdst.series(dtype=dt)) @st.composite def dataframe_strategy(draw): tz = draw(st.timezones()) dtype = pd.DatetimeTZDtype(unit="ns", tz=tz) datetimes = st.datetimes( min_value=pd.Timestamp("1677-09-21T00:12:43.145224193"), max_value=pd.Timestamp("2262-04-11T23:47:16.854775807"), timezones=st.just(tz), ) df = pdst.data_frames( [ pdst.column("datetime_col", elements=datetimes), pdst.column("other_col", elements=st.integers()), ], index=pdst.range_indexes(min_size=1, max_size=10), ) return draw(df).astype({"datetime_col": dtype}) an_array = npst.arrays( dtype=numeric_dtypes, shape=npst.array_shapes(max_dims=2), # can only convert 1D/2D to pandas ) @st.composite def datasets_1d_vars(draw) -> xr.Dataset: """Generate datasets with only 1D variables Suitable for converting to pandas dataframes. """ # Generate an index for the dataset idx = draw(pdst.indexes(dtype="u8", min_size=0, max_size=100)) # Generate 1-3 variables, 1D with the same length as the index vars_strategy = st.dictionaries( keys=st.text(), values=npst.arrays(dtype=numeric_dtypes, shape=len(idx)).map( partial(xr.Variable, ("rows",)) ), min_size=1, max_size=3, ) return xr.Dataset(draw(vars_strategy), coords={"rows": idx}) @given(st.data(), an_array) def test_roundtrip_dataarray(data, arr) -> None: names = data.draw( st.lists(st.text(), min_size=arr.ndim, max_size=arr.ndim, unique=True).map( tuple ) ) coords = {name: np.arange(n) for (name, n) in zip(names, arr.shape, strict=True)} original = xr.DataArray(arr, dims=names, coords=coords) roundtripped = xr.DataArray(original.to_pandas()) xr.testing.assert_identical(original, roundtripped) @given(datasets_1d_vars()) def test_roundtrip_dataset(dataset) -> None: df = dataset.to_dataframe() assert isinstance(df, pd.DataFrame) roundtripped = xr.Dataset(df) xr.testing.assert_identical(dataset, roundtripped) @given(numeric_series, st.text()) def test_roundtrip_pandas_series(ser, ix_name) -> None: # Need to name the index, otherwise Xarray calls it 'dim_0'. ser.index.name = ix_name arr = xr.DataArray(ser) roundtripped = arr.to_pandas() pd.testing.assert_series_equal(ser, roundtripped) xr.testing.assert_identical(arr, roundtripped.to_xarray()) # Dataframes with columns of all the same dtype - for roundtrip to DataArray numeric_homogeneous_dataframe = numeric_dtypes.flatmap( lambda dt: pdst.data_frames(columns=pdst.columns(["a", "b", "c"], dtype=dt)) ) @pytest.mark.xfail @given(numeric_homogeneous_dataframe) def test_roundtrip_pandas_dataframe(df) -> None: # Need to name the indexes, otherwise Xarray names them 'dim_0', 'dim_1'. df.index.name = "rows" df.columns.name = "cols" arr = xr.DataArray(df) roundtripped = arr.to_pandas() pd.testing.assert_frame_equal(df, roundtripped) xr.testing.assert_identical(arr, roundtripped.to_xarray()) @given(df=dataframe_strategy()) def test_roundtrip_pandas_dataframe_datetime(df) -> None: # Need to name the indexes, otherwise Xarray names them 'dim_0', 'dim_1'. df.index.name = "rows" df.columns.name = "cols" dataset = xr.Dataset.from_dataframe(df) roundtripped = dataset.to_dataframe() roundtripped.columns.name = "cols" # why? pd.testing.assert_frame_equal(df, roundtripped) xr.testing.assert_identical(dataset, roundtripped.to_xarray()) @pytest.mark.parametrize( "extension_array", [ pd.Categorical(["a", "b", "c"]), pd.array(["a", "b", "c"], dtype="string"), pd.arrays.IntervalArray( [pd.Interval(0, 1), pd.Interval(1, 5), pd.Interval(2, 6)] ), pd.arrays.TimedeltaArray._from_sequence(pd.TimedeltaIndex(["1h", "2h", "3h"])), pd.arrays.DatetimeArray._from_sequence( pd.DatetimeIndex(["2023-01-01", "2023-01-02", "2023-01-03"], freq="D") ), np.array([1, 2, 3], dtype="int64"), ] + ([pd.array([1, 2, 3], dtype="int64[pyarrow]")] if has_pyarrow else []), ids=["cat", "string", "interval", "timedelta", "datetime", "numpy"] + (["pyarrow"] if has_pyarrow else []), ) @pytest.mark.parametrize("is_index", [True, False]) def test_roundtrip_1d_pandas_extension_array(extension_array, is_index) -> None: df = pd.DataFrame({"arr": extension_array}) if is_index: df = df.set_index("arr") arr = xr.Dataset.from_dataframe(df)["arr"] roundtripped = arr.to_pandas() df_arr_to_test = df.index if is_index else df["arr"] assert (df_arr_to_test == roundtripped).all() # `NumpyExtensionArray` types are not roundtripped, including `StringArray` which subtypes. if isinstance(extension_array, pd.arrays.NumpyExtensionArray): # type: ignore[attr-defined] assert isinstance(arr.data, np.ndarray) else: assert ( df_arr_to_test.dtype == (roundtripped.index if is_index else roundtripped).dtype ) xr.testing.assert_identical(arr, roundtripped.to_xarray()) ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/properties/test_properties.py������������������������������������������������������0000664�0000000�0000000�00000003721�15056206164�0021350�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������import itertools import pytest pytest.importorskip("hypothesis") import hypothesis.strategies as st from hypothesis import given, note import xarray as xr import xarray.testing.strategies as xrst from xarray.groupers import find_independent_seasons, season_to_month_tuple @given(attrs=xrst.simple_attrs) def test_assert_identical(attrs): v = xr.Variable(dims=(), data=0, attrs=attrs) xr.testing.assert_identical(v, v.copy(deep=True)) ds = xr.Dataset(attrs=attrs) xr.testing.assert_identical(ds, ds.copy(deep=True)) @given( roll=st.integers(min_value=0, max_value=12), breaks=st.lists( st.integers(min_value=0, max_value=11), min_size=1, max_size=12, unique=True ), ) def test_property_season_month_tuple(roll, breaks): chars = list("JFMAMJJASOND") months = tuple(range(1, 13)) rolled_chars = chars[roll:] + chars[:roll] rolled_months = months[roll:] + months[:roll] breaks = sorted(breaks) if breaks[0] != 0: breaks = [0] + breaks if breaks[-1] != 12: breaks = breaks + [12] seasons = tuple( "".join(rolled_chars[start:stop]) for start, stop in itertools.pairwise(breaks) ) actual = season_to_month_tuple(seasons) expected = tuple( rolled_months[start:stop] for start, stop in itertools.pairwise(breaks) ) assert expected == actual @given(data=st.data(), nmonths=st.integers(min_value=1, max_value=11)) def test_property_find_independent_seasons(data, nmonths): chars = "JFMAMJJASOND" # if stride > nmonths, then we can't infer season order stride = data.draw(st.integers(min_value=1, max_value=nmonths)) chars = chars + chars[:nmonths] seasons = [list(chars[i : i + nmonths]) for i in range(0, 12, stride)] note(seasons) groups = find_independent_seasons(seasons) for group in groups: inds = tuple(itertools.chain(*group.inds)) assert len(inds) == len(set(inds)) assert len(group.codes) == len(set(group.codes)) �����������������������������������������������xarray-2025.09.0/pyproject.toml���������������������������������������������������������������������0000664�0000000�0000000�00000031426�15056206164�0016266�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������[project] authors = [{ name = "xarray Developers", email = "xarray@googlegroups.com" }] classifiers = [ "Development Status :: 5 - Production/Stable", "Operating System :: OS Independent", "Intended Audience :: Science/Research", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Topic :: Scientific/Engineering", ] description = "N-D labeled arrays and datasets in Python" dynamic = ["version"] license = "Apache-2.0" name = "xarray" readme = "README.md" requires-python = ">=3.11" dependencies = ["numpy>=1.26", "packaging>=24.1", "pandas>=2.2"] # We don't encode minimum requirements here (though if we can write a script to # generate the text from `min_deps_check.py`, that's welcome...). We do add # `numba>=0.54` here because of https://github.com/astral-sh/uv/issues/7881; # note that it's not a direct dependency of xarray. [project.optional-dependencies] accel = [ "scipy>=1.13", "bottleneck", "numbagg>=0.8", "numba>=0.59", "flox>=0.9", "opt_einsum", "numpy<2.3", # numba has not updated yet: https://github.com/numba/numba/issues/10105 ] complete = ["xarray[accel,etc,io,parallel,viz]"] io = [ "netCDF4>=1.6.0", "h5netcdf", "pydap", "scipy>=1.13", "zarr>=2.18", "fsspec", "cftime", "pooch", ] etc = ["sparse>=0.15"] parallel = ["dask[complete]"] viz = ["cartopy>=0.23", "matplotlib", "nc-time-axis", "seaborn"] types = [ "pandas-stubs", "scipy-stubs", "types-PyYAML", "types-Pygments", "types-colorama", "types-decorator", "types-defusedxml", "types-docutils", "types-networkx", "types-pexpect", "types-psutil", "types-pycurl", "types-openpyxl", "types-python-dateutil", "types-pytz", "types-setuptools", ] [dependency-groups] dev = [ "hypothesis", "jinja2", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-env", "pytest-mypy-plugins", "pytest-timeout", "pytest-xdist", "pytest-asyncio", "ruff>=0.8.0", "sphinx", "sphinx_autosummary_accessors", "xarray[complete,types]", ] [project.urls] Documentation = "https://docs.xarray.dev" SciPy2015-talk = "https://www.youtube.com/watch?v=X0pAhJgySxk" homepage = "https://xarray.dev/" issue-tracker = "https://github.com/pydata/xarray/issues" source-code = "https://github.com/pydata/xarray" [project.entry-points."xarray.chunkmanagers"] dask = "xarray.namedarray.daskmanager:DaskManager" [build-system] build-backend = "setuptools.build_meta" requires = ["setuptools>=77.0.3", "setuptools-scm>=8"] [tool.setuptools.packages.find] include = ["xarray*"] [tool.setuptools_scm] fallback_version = "9999" [tool.coverage.run] omit = [ "*/xarray/tests/*", "*/xarray/compat/dask_array_compat.py", "*/xarray/compat/npcompat.py", "*/xarray/compat/pdcompat.py", "*/xarray/namedarray/pycompat.py", "*/xarray/core/types.py", ] source = ["xarray"] [tool.coverage.report] exclude_lines = ["pragma: no cover", "if TYPE_CHECKING"] [tool.mypy] enable_error_code = ["ignore-without-code", "redundant-self", "redundant-expr"] exclude = ['build', 'xarray/util/generate_.*\.py'] files = "xarray" show_error_context = true warn_redundant_casts = true warn_unused_configs = true warn_unused_ignores = true # Much of the numerical computing stack doesn't have type annotations yet. [[tool.mypy.overrides]] ignore_missing_imports = true module = [ "affine.*", "bottleneck.*", "cartopy.*", "cf_units.*", "cfgrib.*", "cftime.*", "cloudpickle.*", "cubed.*", "cupy.*", "fsspec.*", "h5netcdf.*", "h5py.*", "iris.*", "mpl_toolkits.*", "nc_time_axis.*", "netCDF4.*", "netcdftime.*", "numcodecs.*", "opt_einsum.*", "pint.*", "pooch.*", "pyarrow.*", "pydap.*", "scipy.*", "seaborn.*", "setuptools", "sparse.*", "toolz.*", "zarr.*", "numpy.exceptions.*", # remove once support for `numpy<2.0` has been dropped "array_api_strict.*", ] # Gradually we want to add more modules to this list, ratcheting up our total # coverage. Once a module is here, functions are checked by mypy regardless of # whether they have type annotations. It would be especially useful to have test # files listed here, because without them being checked, we don't have a great # way of testing our annotations. [[tool.mypy.overrides]] check_untyped_defs = true module = [ "xarray.core.accessor_dt", "xarray.core.accessor_str", "xarray.structure.alignment", "xarray.computation.*", "xarray.indexes.*", "xarray.tests.*", ] # This then excludes some modules from the above list. (So ideally we remove # from here in time...) [[tool.mypy.overrides]] check_untyped_defs = false module = [ "xarray.tests.test_coarsen", "xarray.tests.test_coding_times", "xarray.tests.test_combine", "xarray.tests.test_computation", "xarray.tests.test_concat", "xarray.tests.test_coordinates", "xarray.tests.test_dask", "xarray.tests.test_dataarray", "xarray.tests.test_duck_array_ops", "xarray.tests.test_indexing", "xarray.tests.test_merge", "xarray.tests.test_sparse", "xarray.tests.test_ufuncs", "xarray.tests.test_units", "xarray.tests.test_variable", "xarray.tests.test_weighted", ] # Use strict = true whenever namedarray has become standalone. In the meantime # don't forget to add all new files related to namedarray here: # ref: https://mypy.readthedocs.io/en/stable/existing_code.html#introduce-stricter-options [[tool.mypy.overrides]] # Start off with these warn_unused_ignores = true # Getting these passing should be easy strict_concatenate = true strict_equality = true # Strongly recommend enabling this one as soon as you can check_untyped_defs = true # These shouldn't be too much additional work, but may be tricky to # get passing if you use a lot of untyped libraries disallow_any_generics = true disallow_subclassing_any = true disallow_untyped_decorators = true # These next few are various gradations of forcing use of type annotations disallow_incomplete_defs = true disallow_untyped_calls = true disallow_untyped_defs = true # This one isn't too hard to get passing, but return on investment is lower no_implicit_reexport = true # This one can be tricky to get passing if you use a lot of untyped libraries warn_return_any = true module = ["xarray.namedarray.*", "xarray.tests.test_namedarray"] # We disable pyright here for now, since including it means that all errors show # up in devs' VS Code, which then makes it more difficult to work with actual # errors. It overrides local VS Code settings so isn't escapable. # [tool.pyright] # defineConstant = {DEBUG = true} # # Enabling this means that developers who have disabled the warning locally β€” # # because not all dependencies are installable β€” are overridden # # reportMissingImports = true # reportMissingTypeStubs = false [tool.ruff] extend-exclude = ["doc", "_typed_ops.pyi"] [tool.ruff.lint] extend-select = [ "YTT", # flake8-2020 "B", # flake8-bugbear "C4", # flake8-comprehensions "ISC", # flake8-implicit-str-concat "PIE", # flake8-pie "TID", # flake8-tidy-imports (absolute imports) "PYI", # flake8-pyi "SIM", # flake8-simplify "FLY", # flynt "I", # isort "PERF", # Perflint "W", # pycodestyle warnings "PGH", # pygrep-hooks "PLC", # Pylint Convention "PLE", # Pylint Errors "PLR", # Pylint Refactor "PLW", # Pylint Warnings "UP", # pyupgrade "FURB", # refurb "RUF", ] extend-safe-fixes = [ "TID252", # absolute imports ] ignore = [ "C40", # unnecessary generator, comprehension, or literal "PIE790", # unnecessary pass statement "PYI019", # use `Self` instead of custom TypeVar "PYI041", # use `float` instead of `int | float` "SIM102", # use a single `if` statement instead of nested `if` statements "SIM108", # use ternary operator instead of `if`-`else`-block "SIM117", # use a single `with` statement instead of nested `with` statements "SIM118", # use `key in dict` instead of `key in dict.keys()` "SIM300", # yoda condition detected "PERF203", # try-except within a loop incurs performance overhead "E402", # module level import not at top of file "E731", # do not assign a lambda expression, use a def "PLC0415", # `import` should be at the top-level of a file "PLC0206", # extracting value from dictionary without calling `.items()` "PLR091", # too many arguments / branches / statements "PLR2004", # magic value used in comparison "PLW0603", # using the global statement to update is discouraged "PLW0642", # reassigned `self` variable in instance method "PLW1641", # object does not implement `__hash__` method "PLW2901", # `for` loop variable overwritten by assignment target "UP007", # use X | Y for type annotations "FURB105", # unnecessary empty string passed to `print` "RUF001", # string contains ambiguous unicode character "RUF002", # docstring contains ambiguous acute accent unicode character "RUF003", # comment contains ambiguous no-break space unicode character "RUF005", # consider unpacking operator instead of concatenation "RUF012", # mutable class attributes ] [tool.ruff.lint.per-file-ignores] # don't enforce absolute imports "asv_bench/**" = ["TID252"] # comparison with itself in tests "xarray/tests/**" = ["PLR0124"] # looks like ruff bugs "xarray/core/_typed_ops.py" = ["PYI034"] "xarray/namedarray/_typing.py" = ["PYI018", "PYI046"] [tool.ruff.lint.isort] known-first-party = ["xarray"] [tool.ruff.lint.flake8-tidy-imports] # Disallow all relative imports. ban-relative-imports = "all" [tool.ruff.lint.flake8-tidy-imports.banned-api] "pandas.api.types.is_extension_array_dtype".msg = "Use xarray.core.utils.is_allowed_extension_array{_dtype} instead. Only use the banend API if the incoming data has already been sanitized by xarray" [tool.pytest.ini_options] addopts = [ "--strict-config", "--strict-markers", "--mypy-only-local-stub", "--mypy-pyproject-toml-file=pyproject.toml", ] # We want to forbid warnings from within xarray in our tests β€” instead we should # fix our own code, or mark the test itself as expecting a warning. So this: # - Converts any warning from xarray into an error # - Allows some warnings ("default") which the test suite currently raises, # since it wasn't practical to fix them all before merging this config. The # warnings are reported in CI (since it uses `default`, not `ignore`). # # Over time, we can remove these rules allowing warnings. A valued contribution # is removing a line, seeing what breaks, and then fixing the library code or # tests so that it doesn't raise warnings. # # There are some instance where we'll want to add to these rules: # - While we only raise errors on warnings from within xarray, a dependency can # raise a warning with a stacklevel such that it's interpreted to be raised # from xarray and this will mistakenly convert it to an error. If that # happens, please feel free to add a rule switching it to `default` here, and # disabling the error. # - If these settings get in the way of making progress, it's also acceptable to # temporarily add additional `default` rules. # - But we should only add `ignore` rules if we're confident that we'll never # need to address a warning. filterwarnings = [ "error:::xarray.*", # Zarr 2 V3 implementation "default:Zarr-Python is not in alignment with the final V3 specification", # TODO: this is raised for vlen-utf8, consolidated metadata, U1 dtype "default:is currently not part .* the Zarr version 3 specification.", # TODO: remove once we know how to deal with a changed signature in protocols "default:::xarray.tests.test_strategies", ] log_cli_level = "INFO" markers = [ "flaky: flaky tests", "mypy: type annotation tests", "network: tests requiring a network connection", "slow: slow tests", "slow_hypothesis: slow hypothesis tests", ] minversion = "7" python_files = ["test_*.py"] testpaths = ["xarray/tests", "properties"] [tool.aliases] test = "pytest" [tool.repo-review] ignore = [ "PP308", # This option creates a large amount of log lines. ] [tool.typos] [tool.typos.default] extend-ignore-identifiers-re = [ # Variable names "nd_.*", ".*_nd", "ba_.*", ".*_ba", "ser_.*", ".*_ser", # Function/class names "NDArray.*", ".*NDArray.*", ] [tool.typos.default.extend-words] # NumPy function names arange = "arange" ond = "ond" aso = "aso" # Technical terms nd = "nd" nin = "nin" # Variable names ba = "ba" ser = "ser" fo = "fo" iy = "iy" vart = "vart" ede = "ede" # Organization/Institution names Stichting = "Stichting" Mathematisch = "Mathematisch" # People's names Soler = "Soler" Bruning = "Bruning" Tung = "Tung" Claus = "Claus" Celles = "Celles" slowy = "slowy" Commun = "Commun" # Tests Ome = "Ome" SUR = "SUR" Tio = "Tio" Ono = "Ono" abl = "abl" # Technical terms splitted = "splitted" childs = "childs" cutted = "cutted" LOCA = "LOCA" [tool.typos.type.jupyter] extend-ignore-re = ["\"id\": \".*\""] ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/setup.py���������������������������������������������������������������������������0000775�0000000�0000000�00000000150�15056206164�0015055�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/usr/bin/env python from setuptools import setup setup(use_scm_version={"fallback_version": "9999"}) ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/����������������������������������������������������������������������������0000775�0000000�0000000�00000000000�15056206164�0014652�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/__init__.py�����������������������������������������������������������������0000664�0000000�0000000�00000007321�15056206164�0016766�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from importlib.metadata import version as _version from xarray import coders, groupers, indexes, testing, tutorial, ufuncs from xarray.backends.api import ( load_dataarray, load_dataset, load_datatree, open_dataarray, open_dataset, open_datatree, open_groups, open_mfdataset, save_mfdataset, ) from xarray.backends.zarr import open_zarr from xarray.coding.cftime_offsets import cftime_range, date_range, date_range_like from xarray.coding.cftimeindex import CFTimeIndex from xarray.coding.frequencies import infer_freq from xarray.computation.apply_ufunc import ( apply_ufunc, ) from xarray.computation.computation import ( corr, cov, cross, dot, polyval, where, ) from xarray.conventions import SerializationWarning, decode_cf from xarray.core.common import ALL_DIMS, full_like, ones_like, zeros_like from xarray.core.coordinates import Coordinates, CoordinateValidationError from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.datatree_mapping import map_over_datasets from xarray.core.extensions import ( register_dataarray_accessor, register_dataset_accessor, register_datatree_accessor, ) from xarray.core.indexes import Index from xarray.core.indexing import IndexSelResult from xarray.core.options import get_options, set_options from xarray.core.parallel import map_blocks from xarray.core.treenode import ( InvalidTreeError, NotFoundInTreeError, TreeIsomorphismError, group_subtrees, ) from xarray.core.variable import IndexVariable, Variable, as_variable from xarray.namedarray.core import NamedArray from xarray.structure.alignment import AlignmentError, align, broadcast from xarray.structure.chunks import unify_chunks from xarray.structure.combine import combine_by_coords, combine_nested from xarray.structure.concat import concat from xarray.structure.merge import Context, MergeError, merge from xarray.util.print_versions import show_versions try: __version__ = _version("xarray") except Exception: # Local copy or not installed with setuptools. # Disable minimum version checks on downstream libraries. __version__ = "9999" # A hardcoded __all__ variable is necessary to appease # `mypy --strict` running in projects that import xarray. __all__ = ( # noqa: RUF022 # Sub-packages "coders", "groupers", "indexes", "testing", "tutorial", "ufuncs", # Top-level functions "align", "apply_ufunc", "as_variable", "broadcast", "cftime_range", "combine_by_coords", "combine_nested", "concat", "corr", "cov", "cross", "date_range", "date_range_like", "decode_cf", "dot", "full_like", "get_options", "group_subtrees", "infer_freq", "load_dataarray", "load_dataset", "load_datatree", "map_blocks", "map_over_datasets", "merge", "ones_like", "open_dataarray", "open_dataset", "open_datatree", "open_groups", "open_mfdataset", "open_zarr", "polyval", "register_dataarray_accessor", "register_dataset_accessor", "register_datatree_accessor", "save_mfdataset", "set_options", "show_versions", "unify_chunks", "where", "zeros_like", # Classes "CFTimeIndex", "Context", "Coordinates", "DataArray", "DataTree", "Dataset", "Index", "IndexSelResult", "IndexVariable", "NamedArray", "Variable", # Exceptions "AlignmentError", "CoordinateValidationError", "InvalidTreeError", "MergeError", "NotFoundInTreeError", "SerializationWarning", "TreeIsomorphismError", # Constants "ALL_DIMS", "__version__", ) ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/backends/�������������������������������������������������������������������0000775�0000000�0000000�00000000000�15056206164�0016424�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/backends/__init__.py��������������������������������������������������������0000664�0000000�0000000�00000002673�15056206164�0020545�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Backend objects for saving and loading data DataStores provide a uniform interface for saving and loading data in different formats. They should not be used directly, but rather through Dataset objects. """ from xarray.backends.common import AbstractDataStore, BackendArray, BackendEntrypoint from xarray.backends.file_manager import ( CachingFileManager, DummyFileManager, FileManager, ) from xarray.backends.h5netcdf_ import H5netcdfBackendEntrypoint, H5NetCDFStore from xarray.backends.memory import InMemoryDataStore from xarray.backends.netCDF4_ import NetCDF4BackendEntrypoint, NetCDF4DataStore from xarray.backends.plugins import list_engines, refresh_engines from xarray.backends.pydap_ import PydapBackendEntrypoint, PydapDataStore from xarray.backends.scipy_ import ScipyBackendEntrypoint, ScipyDataStore from xarray.backends.store import StoreBackendEntrypoint from xarray.backends.zarr import ZarrBackendEntrypoint, ZarrStore __all__ = [ "AbstractDataStore", "BackendArray", "BackendEntrypoint", "CachingFileManager", "DummyFileManager", "FileManager", "H5NetCDFStore", "H5netcdfBackendEntrypoint", "InMemoryDataStore", "NetCDF4BackendEntrypoint", "NetCDF4DataStore", "PydapBackendEntrypoint", "PydapDataStore", "ScipyBackendEntrypoint", "ScipyDataStore", "StoreBackendEntrypoint", "ZarrBackendEntrypoint", "ZarrStore", "list_engines", "refresh_engines", ] ���������������������������������������������������������������������xarray-2025.09.0/xarray/backends/api.py�������������������������������������������������������������0000664�0000000�0000000�00000301601�15056206164�0017550�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import importlib.util import os from collections.abc import ( Callable, Hashable, Iterable, Mapping, MutableMapping, Sequence, ) from functools import partial from io import IOBase from itertools import starmap from numbers import Number from typing import ( TYPE_CHECKING, Any, Literal, TypeVar, Union, cast, overload, ) import numpy as np from xarray import backends, conventions from xarray.backends import plugins from xarray.backends.common import ( AbstractDataStore, AbstractWritableDataStore, ArrayWriter, BytesIOProxy, T_PathFileOrDataStore, _find_absolute_paths, _normalize_path, ) from xarray.backends.locks import get_dask_scheduler from xarray.coders import CFDatetimeCoder, CFTimedeltaCoder from xarray.core import dtypes, indexing from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.indexes import Index from xarray.core.treenode import group_subtrees from xarray.core.types import NetcdfWriteModes, ReadBuffer, ZarrWriteModes from xarray.core.utils import emit_user_level_warning, is_remote_uri from xarray.namedarray.daskmanager import DaskManager from xarray.namedarray.parallelcompat import guess_chunkmanager from xarray.structure.chunks import _get_chunk, _maybe_chunk from xarray.structure.combine import ( _infer_concat_order_from_positions, _nested_combine, combine_by_coords, ) from xarray.util.deprecation_helpers import ( _COMPAT_DEFAULT, _COORDS_DEFAULT, _DATA_VARS_DEFAULT, _JOIN_DEFAULT, CombineKwargDefault, ) if TYPE_CHECKING: try: from dask.delayed import Delayed except ImportError: Delayed = None # type: ignore[assignment, misc] from xarray.backends.common import BackendEntrypoint from xarray.core.types import ( CombineAttrsOptions, CompatOptions, ErrorOptionsWithWarn, JoinOptions, NestedSequence, ReadBuffer, T_Chunks, ZarrStoreLike, ) T_NetcdfEngine = Literal["netcdf4", "scipy", "h5netcdf"] T_Engine = Union[ T_NetcdfEngine, Literal["pydap", "zarr"], # noqa: PYI051 type[BackendEntrypoint], str, # no nice typing support for custom backends None, ] T_NetcdfTypes = Literal[ "NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", "NETCDF3_CLASSIC" ] DATAARRAY_NAME = "__xarray_dataarray_name__" DATAARRAY_VARIABLE = "__xarray_dataarray_variable__" def get_default_netcdf_write_engine( format: T_NetcdfTypes | None, to_fileobject_or_memoryview: bool, ) -> Literal["netcdf4", "h5netcdf", "scipy"]: """Return the default netCDF library to use for writing a netCDF file.""" module_names = { "netcdf4": "netCDF4", "scipy": "scipy", "h5netcdf": "h5netcdf", } candidates = list(plugins.NETCDF_BACKENDS_ORDER) if format is not None: if format.upper().startswith("NETCDF3"): candidates.remove("h5netcdf") elif format.upper().startswith("NETCDF4"): candidates.remove("scipy") else: raise ValueError(f"unexpected {format=}") if to_fileobject_or_memoryview: candidates.remove("netcdf4") for engine in candidates: module_name = module_names[engine] if importlib.util.find_spec(module_name) is not None: return cast(Literal["netcdf4", "h5netcdf", "scipy"], engine) format_str = f" with {format=}" if format is not None else "" libraries = ", ".join(module_names[c] for c in candidates) raise ValueError( f"cannot write NetCDF files{format_str} because none of the suitable " f"backend libraries ({libraries}) are installed" ) def _validate_dataset_names(dataset: Dataset) -> None: """DataArray.name and Dataset keys must be a string or None""" def check_name(name: Hashable): if isinstance(name, str): if not name: raise ValueError( f"Invalid name {name!r} for DataArray or Dataset key: " "string must be length 1 or greater for " "serialization to netCDF or zarr files" ) elif name is not None: raise TypeError( f"Invalid name {name!r} for DataArray or Dataset key: " "must be either a string or None for serialization to netCDF " "or zarr files" ) for k in dataset.variables: check_name(k) def _validate_attrs(dataset, engine, invalid_netcdf=False): """`attrs` must have a string key and a value which is either: a number, a string, an ndarray, a list/tuple of numbers/strings, or a numpy.bool_. Notes ----- A numpy.bool_ is only allowed when using the h5netcdf engine with `invalid_netcdf=True`. """ valid_types = (str, Number, np.ndarray, np.number, list, tuple, bytes) if invalid_netcdf and engine == "h5netcdf": valid_types += (np.bool_,) def check_attr(name, value, valid_types): if isinstance(name, str): if not name: raise ValueError( f"Invalid name for attr {name!r}: string must be " "length 1 or greater for serialization to " "netCDF files" ) else: raise TypeError( f"Invalid name for attr: {name!r} must be a string for " "serialization to netCDF files" ) if not isinstance(value, valid_types): raise TypeError( f"Invalid value for attr {name!r}: {value!r}. For serialization to " "netCDF files, its value must be of one of the following types: " f"{', '.join([vtype.__name__ for vtype in valid_types])}" ) if isinstance(value, bytes) and engine == "h5netcdf": try: value.decode("utf-8") except UnicodeDecodeError as e: raise ValueError( f"Invalid value provided for attribute '{name!r}': {value!r}. " "Only binary data derived from UTF-8 encoded strings is allowed " f"for the '{engine}' engine. Consider using the 'netcdf4' engine." ) from e if b"\x00" in value: raise ValueError( f"Invalid value provided for attribute '{name!r}': {value!r}. " f"Null characters are not permitted for the '{engine}' engine. " "Consider using the 'netcdf4' engine." ) # Check attrs on the dataset itself for k, v in dataset.attrs.items(): check_attr(k, v, valid_types) # Check attrs on each variable within the dataset for variable in dataset.variables.values(): for k, v in variable.attrs.items(): check_attr(k, v, valid_types) def _sanitize_unlimited_dims(dataset, unlimited_dims): msg_origin = "unlimited_dims-kwarg" if unlimited_dims is None: unlimited_dims = dataset.encoding.get("unlimited_dims", None) msg_origin = "dataset.encoding" if unlimited_dims is not None: if isinstance(unlimited_dims, str) or not isinstance(unlimited_dims, Iterable): unlimited_dims = [unlimited_dims] else: unlimited_dims = list(unlimited_dims) dataset_dims = set(dataset.dims) unlimited_dims = set(unlimited_dims) if undeclared_dims := (unlimited_dims - dataset_dims): msg = ( f"Unlimited dimension(s) {undeclared_dims!r} declared in {msg_origin!r}, " f"but not part of current dataset dimensions. " f"Consider removing {undeclared_dims!r} from {msg_origin!r}." ) if msg_origin == "unlimited_dims-kwarg": raise ValueError(msg) else: emit_user_level_warning(msg) return unlimited_dims def _resolve_decoders_kwargs(decode_cf, open_backend_dataset_parameters, **decoders): for d in list(decoders): if decode_cf is False and d in open_backend_dataset_parameters: decoders[d] = False if decoders[d] is None: decoders.pop(d) return decoders def _get_mtime(filename_or_obj): # if passed an actual file path, augment the token with # the file modification time mtime = None try: path = os.fspath(filename_or_obj) except TypeError: path = None if path and not is_remote_uri(path): mtime = os.path.getmtime(os.path.expanduser(filename_or_obj)) return mtime def _protect_dataset_variables_inplace(dataset: Dataset, cache: bool) -> None: for name, variable in dataset.variables.items(): if name not in dataset._indexes: # no need to protect IndexVariable objects data: indexing.ExplicitlyIndexedNDArrayMixin data = indexing.CopyOnWriteArray(variable._data) if cache: data = indexing.MemoryCachedArray(data) variable.data = data def _protect_datatree_variables_inplace(tree: DataTree, cache: bool) -> None: for node in tree.subtree: _protect_dataset_variables_inplace(node.dataset, cache) def _finalize_store(writes, store): """Finalize this store by explicitly syncing and closing""" del writes # ensure writing is done first store.close() def delayed_close_after_writes(writes, store): import dask return dask.delayed(_finalize_store)(writes, store) def _multi_file_closer(closers): for closer in closers: closer() def load_dataset(filename_or_obj: T_PathFileOrDataStore, **kwargs) -> Dataset: """Open, load into memory, and close a Dataset from a file or file-like object. This is a thin wrapper around :py:meth:`~xarray.open_dataset`. It differs from `open_dataset` in that it loads the Dataset into memory, closes the file, and returns the Dataset. In contrast, `open_dataset` keeps the file handle open and lazy loads its contents. All parameters are passed directly to `open_dataset`. See that documentation for further details. Returns ------- dataset : Dataset The newly created Dataset. See Also -------- open_dataset """ if "cache" in kwargs: raise TypeError("cache has no effect in this context") with open_dataset(filename_or_obj, **kwargs) as ds: return ds.load() def load_dataarray(filename_or_obj: T_PathFileOrDataStore, **kwargs) -> DataArray: """Open, load into memory, and close a DataArray from a file or file-like object containing a single data variable. This is a thin wrapper around :py:meth:`~xarray.open_dataarray`. It differs from `open_dataarray` in that it loads the Dataset into memory, closes the file, and returns the Dataset. In contrast, `open_dataarray` keeps the file handle open and lazy loads its contents. All parameters are passed directly to `open_dataarray`. See that documentation for further details. Returns ------- datarray : DataArray The newly created DataArray. See Also -------- open_dataarray """ if "cache" in kwargs: raise TypeError("cache has no effect in this context") with open_dataarray(filename_or_obj, **kwargs) as da: return da.load() def load_datatree(filename_or_obj: T_PathFileOrDataStore, **kwargs) -> DataTree: """Open, load into memory, and close a DataTree from a file or file-like object. This is a thin wrapper around :py:meth:`~xarray.open_datatree`. It differs from `open_datatree` in that it loads the DataTree into memory, closes the file, and returns the DataTree. In contrast, `open_datatree` keeps the file handle open and lazy loads its contents. All parameters are passed directly to `open_datatree`. See that documentation for further details. Returns ------- datatree : DataTree The newly created DataTree. See Also -------- open_datatree """ if "cache" in kwargs: raise TypeError("cache has no effect in this context") with open_datatree(filename_or_obj, **kwargs) as dt: return dt.load() def _chunk_ds( backend_ds, filename_or_obj, engine, chunks, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, **extra_tokens, ): chunkmanager = guess_chunkmanager(chunked_array_type) # TODO refactor to move this dask-specific logic inside the DaskManager class if isinstance(chunkmanager, DaskManager): from dask.base import tokenize mtime = _get_mtime(filename_or_obj) token = tokenize(filename_or_obj, mtime, engine, chunks, **extra_tokens) name_prefix = "open_dataset-" else: # not used token = (None,) name_prefix = None variables = {} for name, var in backend_ds.variables.items(): var_chunks = _get_chunk(var, chunks, chunkmanager) variables[name] = _maybe_chunk( name, var, var_chunks, overwrite_encoded_chunks=overwrite_encoded_chunks, name_prefix=name_prefix, token=token, inline_array=inline_array, chunked_array_type=chunkmanager, from_array_kwargs=from_array_kwargs.copy(), ) return backend_ds._replace(variables) def _maybe_create_default_indexes(ds): to_index = { name: coord.variable for name, coord in ds.coords.items() if coord.dims == (name,) and name not in ds.xindexes } return ds.assign_coords(Coordinates(to_index)) def _dataset_from_backend_dataset( backend_ds, filename_or_obj, engine, chunks, cache, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, create_default_indexes, **extra_tokens, ): if not isinstance(chunks, int | dict) and chunks not in {None, "auto"}: raise ValueError( f"chunks must be an int, dict, 'auto', or None. Instead found {chunks}." ) _protect_dataset_variables_inplace(backend_ds, cache) if create_default_indexes: ds = _maybe_create_default_indexes(backend_ds) else: ds = backend_ds if chunks is not None: ds = _chunk_ds( ds, filename_or_obj, engine, chunks, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, **extra_tokens, ) ds.set_close(backend_ds._close) # Ensure source filename always stored in dataset object if "source" not in ds.encoding: path = getattr(filename_or_obj, "path", filename_or_obj) if isinstance(path, str | os.PathLike): ds.encoding["source"] = _normalize_path(path) return ds def _datatree_from_backend_datatree( backend_tree, filename_or_obj, engine, chunks, cache, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, create_default_indexes, **extra_tokens, ): if not isinstance(chunks, int | dict) and chunks not in {None, "auto"}: raise ValueError( f"chunks must be an int, dict, 'auto', or None. Instead found {chunks}." ) _protect_datatree_variables_inplace(backend_tree, cache) if create_default_indexes: tree = backend_tree.map_over_datasets(_maybe_create_default_indexes) else: tree = backend_tree if chunks is not None: tree = DataTree.from_dict( { path: _chunk_ds( node.dataset, filename_or_obj, engine, chunks, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, node=path, **extra_tokens, ) for path, [node] in group_subtrees(tree) }, name=tree.name, ) if create_default_indexes or chunks is not None: for path, [node] in group_subtrees(backend_tree): tree[path].set_close(node._close) # Ensure source filename always stored in dataset object if "source" not in tree.encoding: path = getattr(filename_or_obj, "path", filename_or_obj) if isinstance(path, str | os.PathLike): tree.encoding["source"] = _normalize_path(path) return tree def open_dataset( filename_or_obj: T_PathFileOrDataStore, *, engine: T_Engine = None, chunks: T_Chunks = None, cache: bool | None = None, decode_cf: bool | None = None, mask_and_scale: bool | Mapping[str, bool] | None = None, decode_times: bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] | None = None, decode_timedelta: bool | CFTimedeltaCoder | Mapping[str, bool | CFTimedeltaCoder] | None = None, use_cftime: bool | Mapping[str, bool] | None = None, concat_characters: bool | Mapping[str, bool] | None = None, decode_coords: Literal["coordinates", "all"] | bool | None = None, drop_variables: str | Iterable[str] | None = None, create_default_indexes: bool = True, inline_array: bool = False, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, backend_kwargs: dict[str, Any] | None = None, **kwargs, ) -> Dataset: """Open and decode a dataset from a file or file-like object. Parameters ---------- filename_or_obj : str, Path, file-like, bytes, memoryview or DataStore Strings and Path objects are interpreted as a path to a netCDF file or an OpenDAP URL and opened with python-netCDF4, unless the filename ends with .gz, in which case the file is gunzipped and opened with scipy.io.netcdf (only netCDF3 supported). Bytes, memoryview and file-like objects are opened by scipy.io.netcdf (netCDF3) or h5netcdf (netCDF4). engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\ , installed backend \ or subclass of xarray.backends.BackendEntrypoint, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for "netcdf4". A custom backend class (a subclass of ``BackendEntrypoint``) can also be used. chunks : int, dict, 'auto' or None, default: None If provided, used to load the data into dask arrays. - ``chunks="auto"`` will use dask ``auto`` chunking taking into account the engine preferred chunks. - ``chunks=None`` skips using dask. This uses xarray's internally private :ref:`lazy indexing classes `, but data is eagerly loaded into memory as numpy arrays when accessed. This can be more efficient for smaller arrays or when large arrays are sliced before computation. - ``chunks=-1`` loads the data with dask using a single chunk for all arrays. - ``chunks={}`` loads the data with dask using the engine's preferred chunk size, generally identical to the format's chunk size. If not available, a single chunk for all arrays. See dask chunking for more details. cache : bool, optional If True, cache data loaded from the underlying datastore in memory as NumPy arrays when accessed to avoid reading from the underlying data- store multiple times. Defaults to True unless you specify the `chunks` argument to use dask, in which case it defaults to False. Does not change the behavior of coordinates corresponding to dimensions, which always load their data from disk into a ``pandas.Index``. decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool or dict-like, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_times : bool, CFDatetimeCoder or dict-like, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, use :py:class:`coders.CFDatetimeCoder` or leave them encoded as numbers. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_timedelta : bool, CFTimedeltaCoder, or dict-like, optional If True, decode variables and coordinates with time units in {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of ``decode_times``; if ``decode_times`` is a :py:class:`coders.CFDatetimeCoder` instance, this takes the form of a :py:class:`coders.CFTimedeltaCoder` instance with a matching ``time_unit``. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. use_cftime: bool or dict-like, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. .. deprecated:: 2025.01.1 Please pass a :py:class:`coders.CFDatetimeCoder` instance initialized with ``use_cftime`` to the ``decode_times`` kwarg instead. concat_characters : bool or dict-like, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_coords : bool or {"coordinates", "all"}, optional Controls which variables are set as coordinate variables: - "coordinates" or True: Set variables referred to in the ``'coordinates'`` attribute of the datasets or individual variables as coordinate variables. - "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and other attributes as coordinate variables. Only existing variables can be set as coordinates. Missing variables will be silently ignored. drop_variables: str or iterable of str, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. create_default_indexes : bool, default: True If True, create pandas indexes for :term:`dimension coordinates `, which loads the coordinate data into memory. Set it to False if you want to avoid loading data into memory. Note that backends can still choose to create other indexes. If you want to control that, please refer to the backend's documentation. inline_array: bool, default: False How to include the array in the dask task graph. By default(``inline_array=False``) the array is included in a task by itself, and each chunk refers to that task by its key. With ``inline_array=True``, Dask will instead inline the array directly in the values of the task graph. See :py:func:`dask.array.from_array`. chunked_array_type: str, optional Which chunked array type to coerce this datasets' arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. backend_kwargs: dict Additional keyword arguments passed on to the engine open function, equivalent to `**kwargs`. **kwargs: dict Additional keyword arguments passed on to the engine open function. For example: - 'group': path to the netCDF4 group in the given file to open given as a str,supported by "netcdf4", "h5netcdf", "zarr". - 'lock': resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. Supported by "netcdf4", "h5netcdf", "scipy". See engine open function for kwargs accepted by each specific engine. Returns ------- dataset : Dataset The newly created dataset. Notes ----- ``open_dataset`` opens the file with read-only access. When you modify values of a Dataset, even one linked to files on disk, only the in-memory copy you are manipulating in xarray is modified: the original file on disk is never touched. See Also -------- open_mfdataset """ if cache is None: cache = chunks is None if backend_kwargs is not None: kwargs.update(backend_kwargs) if engine is None: engine = plugins.guess_engine(filename_or_obj) if from_array_kwargs is None: from_array_kwargs = {} backend = plugins.get_backend(engine) decoders = _resolve_decoders_kwargs( decode_cf, open_backend_dataset_parameters=backend.open_dataset_parameters, mask_and_scale=mask_and_scale, decode_times=decode_times, decode_timedelta=decode_timedelta, concat_characters=concat_characters, use_cftime=use_cftime, decode_coords=decode_coords, ) overwrite_encoded_chunks = kwargs.pop("overwrite_encoded_chunks", None) backend_ds = backend.open_dataset( filename_or_obj, drop_variables=drop_variables, **decoders, **kwargs, ) ds = _dataset_from_backend_dataset( backend_ds, filename_or_obj, engine, chunks, cache, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, drop_variables=drop_variables, create_default_indexes=create_default_indexes, **decoders, **kwargs, ) return ds def open_dataarray( filename_or_obj: T_PathFileOrDataStore, *, engine: T_Engine = None, chunks: T_Chunks = None, cache: bool | None = None, decode_cf: bool | None = None, mask_and_scale: bool | None = None, decode_times: bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] | None = None, decode_timedelta: bool | CFTimedeltaCoder | None = None, use_cftime: bool | None = None, concat_characters: bool | None = None, decode_coords: Literal["coordinates", "all"] | bool | None = None, drop_variables: str | Iterable[str] | None = None, create_default_indexes: bool = True, inline_array: bool = False, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, backend_kwargs: dict[str, Any] | None = None, **kwargs, ) -> DataArray: """Open an DataArray from a file or file-like object containing a single data variable. This is designed to read netCDF files with only one data variable. If multiple variables are present then a ValueError is raised. Parameters ---------- filename_or_obj : str, Path, file-like, bytes, memoryview or DataStore Strings and Path objects are interpreted as a path to a netCDF file or an OpenDAP URL and opened with python-netCDF4, unless the filename ends with .gz, in which case the file is gunzipped and opened with scipy.io.netcdf (only netCDF3 supported). Bytes, memoryview and file-like objects are opened by scipy.io.netcdf (netCDF3) or h5netcdf (netCDF4). engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\ , installed backend \ or subclass of xarray.backends.BackendEntrypoint, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for "netcdf4". chunks : int, dict, 'auto' or None, default: None If provided, used to load the data into dask arrays. - ``chunks='auto'`` will use dask ``auto`` chunking taking into account the engine preferred chunks. - ``chunks=None`` skips using dask. This uses xarray's internally private :ref:`lazy indexing classes `, but data is eagerly loaded into memory as numpy arrays when accessed. This can be more efficient for smaller arrays, though results may vary. - ``chunks=-1`` loads the data with dask using a single chunk for all arrays. - ``chunks={}`` loads the data with dask using engine preferred chunks if exposed by the backend, otherwise with a single chunk for all arrays. See dask chunking for more details. cache : bool, optional If True, cache data loaded from the underlying datastore in memory as NumPy arrays when accessed to avoid reading from the underlying data- store multiple times. Defaults to True unless you specify the `chunks` argument to use dask, in which case it defaults to False. Does not change the behavior of coordinates corresponding to dimensions, which always load their data from disk into a ``pandas.Index``. decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. This keyword may not be supported by all the backends. decode_times : bool, CFDatetimeCoder or dict-like, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, use :py:class:`coders.CFDatetimeCoder` or leave them encoded as numbers. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_timedelta : bool, optional If True, decode variables and coordinates with time units in {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of ``decode_times``; if ``decode_times`` is a :py:class:`coders.CFDatetimeCoder` instance, this takes the form of a :py:class:`coders.CFTimedeltaCoder` instance with a matching ``time_unit``. This keyword may not be supported by all the backends. use_cftime: bool, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. This keyword may not be supported by all the backends. .. deprecated:: 2025.01.1 Please pass a :py:class:`coders.CFDatetimeCoder` instance initialized with ``use_cftime`` to the ``decode_times`` kwarg instead. concat_characters : bool, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. This keyword may not be supported by all the backends. decode_coords : bool or {"coordinates", "all"}, optional Controls which variables are set as coordinate variables: - "coordinates" or True: Set variables referred to in the ``'coordinates'`` attribute of the datasets or individual variables as coordinate variables. - "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and other attributes as coordinate variables. Only existing variables can be set as coordinates. Missing variables will be silently ignored. drop_variables: str or iterable of str, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. create_default_indexes : bool, default: True If True, create pandas indexes for :term:`dimension coordinates `, which loads the coordinate data into memory. Set it to False if you want to avoid loading data into memory. Note that backends can still choose to create other indexes. If you want to control that, please refer to the backend's documentation. inline_array: bool, default: False How to include the array in the dask task graph. By default(``inline_array=False``) the array is included in a task by itself, and each chunk refers to that task by its key. With ``inline_array=True``, Dask will instead inline the array directly in the values of the task graph. See :py:func:`dask.array.from_array`. chunked_array_type: str, optional Which chunked array type to coerce the underlying data array to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. backend_kwargs: dict Additional keyword arguments passed on to the engine open function, equivalent to `**kwargs`. **kwargs: dict Additional keyword arguments passed on to the engine open function. For example: - 'group': path to the netCDF4 group in the given file to open given as a str,supported by "netcdf4", "h5netcdf", "zarr". - 'lock': resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. Supported by "netcdf4", "h5netcdf", "scipy". See engine open function for kwargs accepted by each specific engine. Notes ----- This is designed to be fully compatible with `DataArray.to_netcdf`. Saving using `DataArray.to_netcdf` and then loading with this function will produce an identical result. All parameters are passed directly to `xarray.open_dataset`. See that documentation for further details. See also -------- open_dataset """ dataset = open_dataset( filename_or_obj, decode_cf=decode_cf, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, engine=engine, chunks=chunks, cache=cache, drop_variables=drop_variables, create_default_indexes=create_default_indexes, inline_array=inline_array, chunked_array_type=chunked_array_type, from_array_kwargs=from_array_kwargs, backend_kwargs=backend_kwargs, use_cftime=use_cftime, decode_timedelta=decode_timedelta, **kwargs, ) if len(dataset.data_vars) != 1: if len(dataset.data_vars) == 0: msg = "Given file dataset contains no data variables." else: msg = ( "Given file dataset contains more than one data " "variable. Please read with xarray.open_dataset and " "then select the variable you want." ) raise ValueError(msg) else: (data_array,) = dataset.data_vars.values() data_array.set_close(dataset._close) # Reset names if they were changed during saving # to ensure that we can 'roundtrip' perfectly if DATAARRAY_NAME in dataset.attrs: data_array.name = dataset.attrs[DATAARRAY_NAME] del dataset.attrs[DATAARRAY_NAME] if data_array.name == DATAARRAY_VARIABLE: data_array.name = None return data_array def open_datatree( filename_or_obj: T_PathFileOrDataStore, *, engine: T_Engine = None, chunks: T_Chunks = None, cache: bool | None = None, decode_cf: bool | None = None, mask_and_scale: bool | Mapping[str, bool] | None = None, decode_times: bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] | None = None, decode_timedelta: bool | CFTimedeltaCoder | Mapping[str, bool | CFTimedeltaCoder] | None = None, use_cftime: bool | Mapping[str, bool] | None = None, concat_characters: bool | Mapping[str, bool] | None = None, decode_coords: Literal["coordinates", "all"] | bool | None = None, drop_variables: str | Iterable[str] | None = None, create_default_indexes: bool = True, inline_array: bool = False, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, backend_kwargs: dict[str, Any] | None = None, **kwargs, ) -> DataTree: """ Open and decode a DataTree from a file or file-like object, creating one tree node for each group in the file. Parameters ---------- filename_or_obj : str, Path, file-like, bytes or DataStore Strings and Path objects are interpreted as a path to a netCDF file or Zarr store. Bytes and memoryview objects are interpreted as file contents. engine : {"netcdf4", "h5netcdf", "zarr", None}, \ installed backend or xarray.backends.BackendEntrypoint, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for "netcdf4". A custom backend class (a subclass of ``BackendEntrypoint``) can also be used. chunks : int, dict, 'auto' or None, default: None If provided, used to load the data into dask arrays. - ``chunks="auto"`` will use dask ``auto`` chunking taking into account the engine preferred chunks. - ``chunks=None`` skips using dask. This uses xarray's internally private :ref:`lazy indexing classes `, but data is eagerly loaded into memory as numpy arrays when accessed. This can be more efficient for smaller arrays, though results may vary. - ``chunks=-1`` loads the data with dask using a single chunk for all arrays. - ``chunks={}`` loads the data with dask using the engine's preferred chunk size, generally identical to the format's chunk size. If not available, a single chunk for all arrays. See dask chunking for more details. cache : bool, optional If True, cache data loaded from the underlying datastore in memory as NumPy arrays when accessed to avoid reading from the underlying data- store multiple times. Defaults to True unless you specify the `chunks` argument to use dask, in which case it defaults to False. Does not change the behavior of coordinates corresponding to dimensions, which always load their data from disk into a ``pandas.Index``. decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool or dict-like, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_times : bool, CFDatetimeCoder or dict-like, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, use :py:class:`coders.CFDatetimeCoder` or leave them encoded as numbers. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_timedelta : bool or dict-like, optional If True, decode variables and coordinates with time units in {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of ``decode_times``; if ``decode_times`` is a :py:class:`coders.CFDatetimeCoder` instance, this takes the form of a :py:class:`coders.CFTimedeltaCoder` instance with a matching ``time_unit``. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. use_cftime: bool or dict-like, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. .. deprecated:: 2025.01.1 Please pass a :py:class:`coders.CFDatetimeCoder` instance initialized with ``use_cftime`` to the ``decode_times`` kwarg instead. concat_characters : bool or dict-like, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_coords : bool or {"coordinates", "all"}, optional Controls which variables are set as coordinate variables: - "coordinates" or True: Set variables referred to in the ``'coordinates'`` attribute of the datasets or individual variables as coordinate variables. - "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and other attributes as coordinate variables. Only existing variables can be set as coordinates. Missing variables will be silently ignored. drop_variables: str or iterable of str, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. create_default_indexes : bool, default: True If True, create pandas indexes for :term:`dimension coordinates `, which loads the coordinate data into memory. Set it to False if you want to avoid loading data into memory. Note that backends can still choose to create other indexes. If you want to control that, please refer to the backend's documentation. inline_array: bool, default: False How to include the array in the dask task graph. By default(``inline_array=False``) the array is included in a task by itself, and each chunk refers to that task by its key. With ``inline_array=True``, Dask will instead inline the array directly in the values of the task graph. See :py:func:`dask.array.from_array`. chunked_array_type: str, optional Which chunked array type to coerce this datasets' arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. backend_kwargs: dict Additional keyword arguments passed on to the engine open function, equivalent to `**kwargs`. **kwargs: dict Additional keyword arguments passed on to the engine open function. For example: - 'group': path to the group in the given file to open as the root group as a str. - 'lock': resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. Supported by "netcdf4", "h5netcdf", "scipy". See engine open function for kwargs accepted by each specific engine. Returns ------- tree : DataTree The newly created datatree. Notes ----- ``open_datatree`` opens the file with read-only access. When you modify values of a DataTree, even one linked to files on disk, only the in-memory copy you are manipulating in xarray is modified: the original file on disk is never touched. See Also -------- xarray.open_groups xarray.open_dataset """ if cache is None: cache = chunks is None if backend_kwargs is not None: kwargs.update(backend_kwargs) if engine is None: engine = plugins.guess_engine(filename_or_obj) if from_array_kwargs is None: from_array_kwargs = {} backend = plugins.get_backend(engine) decoders = _resolve_decoders_kwargs( decode_cf, open_backend_dataset_parameters=backend.open_dataset_parameters, mask_and_scale=mask_and_scale, decode_times=decode_times, decode_timedelta=decode_timedelta, concat_characters=concat_characters, use_cftime=use_cftime, decode_coords=decode_coords, ) overwrite_encoded_chunks = kwargs.pop("overwrite_encoded_chunks", None) backend_tree = backend.open_datatree( filename_or_obj, drop_variables=drop_variables, **decoders, **kwargs, ) tree = _datatree_from_backend_datatree( backend_tree, filename_or_obj, engine, chunks, cache, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, drop_variables=drop_variables, create_default_indexes=create_default_indexes, **decoders, **kwargs, ) return tree def open_groups( filename_or_obj: T_PathFileOrDataStore, *, engine: T_Engine = None, chunks: T_Chunks = None, cache: bool | None = None, decode_cf: bool | None = None, mask_and_scale: bool | Mapping[str, bool] | None = None, decode_times: bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] | None = None, decode_timedelta: bool | CFTimedeltaCoder | Mapping[str, bool | CFTimedeltaCoder] | None = None, use_cftime: bool | Mapping[str, bool] | None = None, concat_characters: bool | Mapping[str, bool] | None = None, decode_coords: Literal["coordinates", "all"] | bool | None = None, drop_variables: str | Iterable[str] | None = None, create_default_indexes: bool = True, inline_array: bool = False, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, backend_kwargs: dict[str, Any] | None = None, **kwargs, ) -> dict[str, Dataset]: """ Open and decode a file or file-like object, creating a dictionary containing one xarray Dataset for each group in the file. Useful for an HDF file ("netcdf4" or "h5netcdf") containing many groups that are not alignable with their parents and cannot be opened directly with ``open_datatree``. It is encouraged to use this function to inspect your data, then make the necessary changes to make the structure coercible to a `DataTree` object before calling `DataTree.from_dict()` and proceeding with your analysis. Parameters ---------- filename_or_obj : str, Path, file-like, bytes, memoryview or DataStore Strings and Path objects are interpreted as a path to a netCDF file or Zarr store. Bytes and memoryview objects are interpreted as file contents. engine : {"netcdf4", "h5netcdf", "zarr", None}, \ installed backend or xarray.backends.BackendEntrypoint, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for "netcdf4". A custom backend class (a subclass of ``BackendEntrypoint``) can also be used. chunks : int, dict, 'auto' or None, default: None If provided, used to load the data into dask arrays. - ``chunks="auto"`` will use dask ``auto`` chunking taking into account the engine preferred chunks. - ``chunks=None`` skips using dask. This uses xarray's internally private :ref:`lazy indexing classes `, but data is eagerly loaded into memory as numpy arrays when accessed. This can be more efficient for smaller arrays, though results may vary. - ``chunks=-1`` loads the data with dask using a single chunk for all arrays. - ``chunks={}`` loads the data with dask using the engine's preferred chunk size, generally identical to the format's chunk size. If not available, a single chunk for all arrays. See dask chunking for more details. cache : bool, optional If True, cache data loaded from the underlying datastore in memory as NumPy arrays when accessed to avoid reading from the underlying data- store multiple times. Defaults to True unless you specify the `chunks` argument to use dask, in which case it defaults to False. Does not change the behavior of coordinates corresponding to dimensions, which always load their data from disk into a ``pandas.Index``. decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool or dict-like, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_times : bool, CFDatetimeCoder or dict-like, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, use :py:class:`coders.CFDatetimeCoder` or leave them encoded as numbers. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_timedelta : bool or dict-like, optional If True, decode variables and coordinates with time units in {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of ``decode_times``; if ``decode_times`` is a :py:class:`coders.CFDatetimeCoder` instance, this takes the form of a :py:class:`coders.CFTimedeltaCoder` instance with a matching ``time_unit``. This keyword may not be supported by all the backends. use_cftime: bool or dict-like, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. .. deprecated:: 2025.01.1 Please pass a :py:class:`coders.CFDatetimeCoder` instance initialized with ``use_cftime`` to the ``decode_times`` kwarg instead. concat_characters : bool or dict-like, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. Pass a mapping, e.g. ``{"my_variable": False}``, to toggle this feature per-variable individually. This keyword may not be supported by all the backends. decode_coords : bool or {"coordinates", "all"}, optional Controls which variables are set as coordinate variables: - "coordinates" or True: Set variables referred to in the ``'coordinates'`` attribute of the datasets or individual variables as coordinate variables. - "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and other attributes as coordinate variables. Only existing variables can be set as coordinates. Missing variables will be silently ignored. drop_variables: str or iterable of str, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. create_default_indexes : bool, default: True If True, create pandas indexes for :term:`dimension coordinates `, which loads the coordinate data into memory. Set it to False if you want to avoid loading data into memory. Note that backends can still choose to create other indexes. If you want to control that, please refer to the backend's documentation. inline_array: bool, default: False How to include the array in the dask task graph. By default(``inline_array=False``) the array is included in a task by itself, and each chunk refers to that task by its key. With ``inline_array=True``, Dask will instead inline the array directly in the values of the task graph. See :py:func:`dask.array.from_array`. chunked_array_type: str, optional Which chunked array type to coerce this datasets' arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. backend_kwargs: dict Additional keyword arguments passed on to the engine open function, equivalent to `**kwargs`. **kwargs: dict Additional keyword arguments passed on to the engine open function. For example: - 'group': path to the group in the given file to open as the root group as a str. - 'lock': resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. Supported by "netcdf4", "h5netcdf", "scipy". See engine open function for kwargs accepted by each specific engine. Returns ------- groups : dict of str to xarray.Dataset The groups as Dataset objects Notes ----- ``open_groups`` opens the file with read-only access. When you modify values of a Dataset, even one linked to files on disk, only the in-memory copy you are manipulating in xarray is modified: the original file on disk is never touched. See Also -------- xarray.open_datatree xarray.open_dataset xarray.DataTree.from_dict """ if cache is None: cache = chunks is None if backend_kwargs is not None: kwargs.update(backend_kwargs) if engine is None: engine = plugins.guess_engine(filename_or_obj) if from_array_kwargs is None: from_array_kwargs = {} backend = plugins.get_backend(engine) decoders = _resolve_decoders_kwargs( decode_cf, open_backend_dataset_parameters=(), mask_and_scale=mask_and_scale, decode_times=decode_times, decode_timedelta=decode_timedelta, concat_characters=concat_characters, use_cftime=use_cftime, decode_coords=decode_coords, ) overwrite_encoded_chunks = kwargs.pop("overwrite_encoded_chunks", None) backend_groups = backend.open_groups_as_dict( filename_or_obj, drop_variables=drop_variables, **decoders, **kwargs, ) groups = { name: _dataset_from_backend_dataset( backend_ds, filename_or_obj, engine, chunks, cache, overwrite_encoded_chunks, inline_array, chunked_array_type, from_array_kwargs, drop_variables=drop_variables, create_default_indexes=create_default_indexes, **decoders, **kwargs, ) for name, backend_ds in backend_groups.items() } return groups _FLike = TypeVar("_FLike", bound=Union[str, ReadBuffer]) def _remove_path( paths: NestedSequence[_FLike], paths_to_remove: set[_FLike] ) -> NestedSequence[_FLike]: # Initialize an empty list to store the result result: list[Union[_FLike, NestedSequence[_FLike]]] = [] for item in paths: if isinstance(item, list): # If the current item is a list, recursively call remove_elements on it nested_result = _remove_path(item, paths_to_remove) if nested_result: # Only add non-empty lists to avoid adding empty lists result.append(nested_result) elif item not in paths_to_remove: # Add the item to the result if it is not in the set of elements to remove result.append(item) return result def open_mfdataset( paths: str | os.PathLike | ReadBuffer | NestedSequence[str | os.PathLike | ReadBuffer], chunks: T_Chunks = None, concat_dim: ( str | DataArray | Index | Sequence[str] | Sequence[DataArray] | Sequence[Index] | None ) = None, compat: CompatOptions | CombineKwargDefault = _COMPAT_DEFAULT, preprocess: Callable[[Dataset], Dataset] | None = None, engine: T_Engine = None, data_vars: Literal["all", "minimal", "different"] | None | list[str] | CombineKwargDefault = _DATA_VARS_DEFAULT, coords=_COORDS_DEFAULT, combine: Literal["by_coords", "nested"] = "by_coords", parallel: bool = False, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, attrs_file: str | os.PathLike | None = None, combine_attrs: CombineAttrsOptions = "override", errors: ErrorOptionsWithWarn = "raise", **kwargs, ) -> Dataset: """Open multiple files as a single dataset. If combine='by_coords' then the function ``combine_by_coords`` is used to combine the datasets into one before returning the result, and if combine='nested' then ``combine_nested`` is used. The filepaths must be structured according to which combining function is used, the details of which are given in the documentation for ``combine_by_coords`` and ``combine_nested``. By default ``combine='by_coords'`` will be used. Requires dask to be installed. See documentation for details on dask [1]_. Global attributes from the ``attrs_file`` are used for the combined dataset. Parameters ---------- paths : str or nested sequence of paths Either a string glob in the form ``"path/to/my/files/*.nc"`` or an explicit list of files to open. Paths can be given as strings or as pathlib Paths. If concatenation along more than one dimension is desired, then ``paths`` must be a nested list-of-lists (see ``combine_nested`` for details). (A string glob will be expanded to a 1-dimensional list.) chunks : int, dict, 'auto' or None, optional Dictionary with keys given by dimension names and values given by chunk sizes. In general, these should divide the dimensions of each dataset. If int, chunk each dimension by ``chunks``. By default, chunks will be chosen to load entire input files into memory at once. This has a major impact on performance: please see the full documentation for more details [2]_. This argument is evaluated on a per-file basis, so chunk sizes that span multiple files will be ignored. concat_dim : str, DataArray, Index or a Sequence of these or None, optional Dimensions to concatenate files along. You only need to provide this argument if ``combine='nested'``, and if any of the dimensions along which you want to concatenate is not a dimension in the original datasets, e.g., if you want to stack a collection of 2D arrays along a third dimension. Set ``concat_dim=[..., None, ...]`` explicitly to disable concatenation along a particular dimension. Default is None, which for a 1D list of filepaths is equivalent to opening the files separately and then merging them with ``xarray.merge``. combine : {"by_coords", "nested"}, optional Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to combine all the data. Default is to use ``xarray.combine_by_coords``. compat : {"identical", "equals", "broadcast_equals", \ "no_conflicts", "override"}, default: "no_conflicts" String indicating how to compare variables of the same name for potential conflicts when merging: * "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. * "equals": all values and dimensions must be the same. * "identical": all values, dimensions and attributes must be the same. * "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. * "override": skip comparing and pick variable from first dataset preprocess : callable, optional If provided, call this function on each dataset prior to concatenation. You can find the file-name from which each dataset was loaded in ``ds.encoding["source"]``. engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\ , installed backend \ or subclass of xarray.backends.BackendEntrypoint, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for "netcdf4". data_vars : {"minimal", "different", "all"} or list of str, default: "all" These data variables will be concatenated together: * "minimal": Only data variables in which the dimension already appears are included. * "different": Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. * "all": All data variables will be concatenated. * list of str: The listed data variables will be concatenated, in addition to the "minimal" data variables. coords : {"minimal", "different", "all"} or list of str, optional These coordinate variables will be concatenated together: * "minimal": Only coordinates in which the dimension already appears are included. * "different": Coordinates which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of coordinate variables into memory if they are not already loaded. * "all": All coordinate variables will be concatenated, except those corresponding to other dimensions. * list of str: The listed coordinate variables will be concatenated, in addition the "minimal" coordinates. parallel : bool, default: False If True, the open and preprocess steps of this function will be performed in parallel using ``dask.delayed``. Default is False. join : {"outer", "inner", "left", "right", "exact", "override"}, default: "outer" String indicating how to combine differing indexes (excluding concat_dim) in objects - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. attrs_file : str or path-like, optional Path of the file used to read global attributes from. By default global attributes are read from the first file provided, with wildcard matches sorted by filename. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "override" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. errors : {"raise", "warn", "ignore"}, default: "raise" String indicating how to handle errors in opening dataset. - "raise": invalid dataset will raise an exception. - "warn": a warning will be issued for each invalid dataset. - "ignore": invalid dataset will be ignored. **kwargs : optional Additional arguments passed on to :py:func:`xarray.open_dataset`. For an overview of some of the possible options, see the documentation of :py:func:`xarray.open_dataset` Returns ------- xarray.Dataset Notes ----- ``open_mfdataset`` opens files with read-only access. When you modify values of a Dataset, even one linked to files on disk, only the in-memory copy you are manipulating in xarray is modified: the original file on disk is never touched. See Also -------- combine_by_coords combine_nested open_dataset Examples -------- A user might want to pass additional arguments into ``preprocess`` when applying some operation to many individual files that are being opened. One route to do this is through the use of ``functools.partial``. >>> from functools import partial >>> def _preprocess(x, lon_bnds, lat_bnds): ... return x.sel(lon=slice(*lon_bnds), lat=slice(*lat_bnds)) ... >>> lon_bnds, lat_bnds = (-110, -105), (40, 45) >>> partial_func = partial(_preprocess, lon_bnds=lon_bnds, lat_bnds=lat_bnds) >>> ds = xr.open_mfdataset( ... "file_*.nc", concat_dim="time", preprocess=partial_func ... ) # doctest: +SKIP It is also possible to use any argument to ``open_dataset`` together with ``open_mfdataset``, such as for example ``drop_variables``: >>> ds = xr.open_mfdataset( ... "file.nc", drop_variables=["varname_1", "varname_2"] # any list of vars ... ) # doctest: +SKIP References ---------- .. [1] https://docs.xarray.dev/en/stable/dask.html .. [2] https://docs.xarray.dev/en/stable/dask.html#chunking-and-performance """ paths = _find_absolute_paths(paths, engine=engine, **kwargs) if not paths: raise OSError("no files to open") paths1d: list[str | ReadBuffer] if combine == "nested": if isinstance(concat_dim, str | DataArray) or concat_dim is None: concat_dim = [concat_dim] # type: ignore[assignment] # This creates a flat list which is easier to iterate over, whilst # encoding the originally-supplied structure as "ids". # The "ids" are not used at all if combine='by_coords`. combined_ids_paths = _infer_concat_order_from_positions(paths) ids, paths1d = ( list(combined_ids_paths.keys()), list(combined_ids_paths.values()), ) elif concat_dim is not None: raise ValueError( "When combine='by_coords', passing a value for `concat_dim` has no " "effect. To manually combine along a specific dimension you should " "instead specify combine='nested' along with a value for `concat_dim`.", ) else: paths1d = paths # type: ignore[assignment] open_kwargs = dict(engine=engine, chunks=chunks or {}, **kwargs) if parallel: import dask # wrap the open_dataset, getattr, and preprocess with delayed open_ = dask.delayed(open_dataset) getattr_ = dask.delayed(getattr) if preprocess is not None: preprocess = dask.delayed(preprocess) else: open_ = open_dataset getattr_ = getattr if errors not in ("raise", "warn", "ignore"): raise ValueError( f"'errors' must be 'raise', 'warn' or 'ignore', got '{errors}'" ) datasets = [] invalid_paths = set() for p in paths1d: try: ds = open_(p, **open_kwargs) datasets.append(ds) except Exception as e: if errors == "raise": raise elif errors == "warn": emit_user_level_warning(f"Could not open {p} due to {e}. Ignoring.") # remove invalid paths invalid_paths.add(p) if invalid_paths: paths = _remove_path(paths, invalid_paths) if combine == "nested": # Create new ids and paths based on removed items combined_ids_paths = _infer_concat_order_from_positions(paths) ids = list(combined_ids_paths.keys()) closers = [getattr_(ds, "_close") for ds in datasets] if preprocess is not None: datasets = [preprocess(ds) for ds in datasets] if parallel: # calling compute here will return the datasets/file_objs lists, # the underlying datasets will still be stored as dask arrays datasets, closers = dask.compute(datasets, closers) # Combine all datasets, closing them in case of a ValueError try: if combine == "nested": # Combined nested list by successive concat and merge operations # along each dimension, using structure given by "ids" combined = _nested_combine( datasets, concat_dims=concat_dim, compat=compat, data_vars=data_vars, coords=coords, ids=ids, join=join, combine_attrs=combine_attrs, fill_value=dtypes.NA, ) elif combine == "by_coords": # Redo ordering from coordinates, ignoring how they were ordered # previously combined = combine_by_coords( datasets, compat=compat, data_vars=data_vars, coords=coords, join=join, combine_attrs=combine_attrs, ) else: raise ValueError( f"{combine} is an invalid option for the keyword argument ``combine``" ) except ValueError: for ds in datasets: ds.close() raise combined.set_close(partial(_multi_file_closer, closers)) # read global attributes from the attrs_file or from the first dataset if attrs_file is not None: if isinstance(attrs_file, os.PathLike): attrs_file = cast(str, os.fspath(attrs_file)) combined.attrs = datasets[paths1d.index(attrs_file)].attrs return combined WRITEABLE_STORES: dict[T_NetcdfEngine, Callable] = { "netcdf4": backends.NetCDF4DataStore.open, "scipy": backends.ScipyDataStore, "h5netcdf": backends.H5NetCDFStore.open, } def get_writable_netcdf_store( target, engine: T_NetcdfEngine, *, format: T_NetcdfTypes | None, mode: NetcdfWriteModes, autoclose: bool, invalid_netcdf: bool, auto_complex: bool | None, ) -> AbstractWritableDataStore: """Create a store for writing to a netCDF file.""" try: store_open = WRITEABLE_STORES[engine] except KeyError as err: raise ValueError(f"unrecognized engine for to_netcdf: {engine!r}") from err if format is not None: format = format.upper() # type: ignore[assignment] kwargs = dict(autoclose=True) if autoclose else {} if invalid_netcdf: if engine == "h5netcdf": kwargs["invalid_netcdf"] = invalid_netcdf else: raise ValueError( f"unrecognized option 'invalid_netcdf' for engine {engine}" ) if auto_complex is not None: kwargs["auto_complex"] = auto_complex return store_open(target, mode=mode, format=format, **kwargs) # multifile=True returns writer and datastore @overload def to_netcdf( dataset: Dataset, path_or_file: str | os.PathLike | None = None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, *, multifile: Literal[True], invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> tuple[ArrayWriter, AbstractDataStore]: ... # path=None writes to bytes or memoryview, depending on store @overload def to_netcdf( dataset: Dataset, path_or_file: None = None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, multifile: Literal[False] = False, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> memoryview: ... # compute=False returns dask.Delayed @overload def to_netcdf( dataset: Dataset, path_or_file: str | os.PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, *, compute: Literal[False], multifile: Literal[False] = False, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> Delayed: ... # default return None @overload def to_netcdf( dataset: Dataset, path_or_file: str | os.PathLike | IOBase, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: Literal[True] = True, multifile: Literal[False] = False, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> None: ... # if compute cannot be evaluated at type check time # we may get back either Delayed or None @overload def to_netcdf( dataset: Dataset, path_or_file: str | os.PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = False, multifile: Literal[False] = False, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> Delayed | None: ... # if multifile cannot be evaluated at type check time # we may get back either writer and datastore or Delayed or None @overload def to_netcdf( dataset: Dataset, path_or_file: str | os.PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = False, multifile: bool = False, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> tuple[ArrayWriter, AbstractDataStore] | Delayed | None: ... # Any @overload def to_netcdf( dataset: Dataset, path_or_file: str | os.PathLike | IOBase | None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = False, multifile: bool = False, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> tuple[ArrayWriter, AbstractDataStore] | memoryview | Delayed | None: ... def to_netcdf( dataset: Dataset, path_or_file: str | os.PathLike | IOBase | None = None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, multifile: bool = False, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> tuple[ArrayWriter, AbstractDataStore] | memoryview | Delayed | None: """This function creates an appropriate datastore for writing a dataset to disk as a netCDF file See `Dataset.to_netcdf` for full API docs. The ``multifile`` argument is only for the private use of save_mfdataset. """ if encoding is None: encoding = {} path_or_file = _normalize_path(path_or_file) if engine is None: to_fileobject_or_memoryview = not isinstance(path_or_file, str) engine = get_default_netcdf_write_engine(format, to_fileobject_or_memoryview) # validate Dataset keys, DataArray names, and attr keys/values _validate_dataset_names(dataset) _validate_attrs(dataset, engine, invalid_netcdf) # sanitize unlimited_dims unlimited_dims = _sanitize_unlimited_dims(dataset, unlimited_dims) # handle scheduler specific logic scheduler = get_dask_scheduler() have_chunks = any(v.chunks is not None for v in dataset.variables.values()) autoclose = have_chunks and scheduler in ["distributed", "multiprocessing"] if autoclose and engine == "scipy": raise NotImplementedError( f"Writing netCDF files with the {engine} backend " f"is not currently supported with dask's {scheduler} scheduler" ) if path_or_file is None: if not compute: raise NotImplementedError( "to_netcdf() with compute=False is not yet implemented when " "returning a memoryview" ) target = BytesIOProxy() else: target = path_or_file # type: ignore[assignment] store = get_writable_netcdf_store( target, engine, mode=mode, format=format, autoclose=autoclose, invalid_netcdf=invalid_netcdf, auto_complex=auto_complex, ) if group is not None: store = store.get_child_store(group) writer = ArrayWriter() # TODO: figure out how to refactor this logic (here and in save_mfdataset) # to avoid this mess of conditionals try: # TODO: allow this work (setting up the file for writing array data) # to be parallelized with dask dump_to_store( dataset, store, writer, encoding=encoding, unlimited_dims=unlimited_dims ) if autoclose: store.close() if multifile: return writer, store writes = writer.sync(compute=compute) finally: if not multifile: if compute: store.close() else: store.sync() if path_or_file is None: assert isinstance(target, BytesIOProxy) # created in this function return target.getbuffer() if not compute: return delayed_close_after_writes(writes, store) return None def dump_to_store( dataset, store, writer=None, encoder=None, encoding=None, unlimited_dims=None ): """Store dataset contents to a backends.*DataStore object.""" if writer is None: writer = ArrayWriter() if encoding is None: encoding = {} variables, attrs = conventions.encode_dataset_coordinates(dataset) check_encoding = set() for k, enc in encoding.items(): # no need to shallow copy the variable again; that already happened # in encode_dataset_coordinates variables[k].encoding = enc check_encoding.add(k) if encoder: variables, attrs = encoder(variables, attrs) store.store(variables, attrs, check_encoding, writer, unlimited_dims=unlimited_dims) def save_mfdataset( datasets, paths, mode="w", format=None, groups=None, engine=None, compute=True, **kwargs, ): """Write multiple datasets to disk as netCDF files simultaneously. This function is intended for use with datasets consisting of dask.array objects, in which case it can write the multiple datasets to disk simultaneously using a shared thread pool. When not using dask, it is no different than calling ``to_netcdf`` repeatedly. Parameters ---------- datasets : list of Dataset List of datasets to save. paths : list of str or list of path-like objects List of paths to which to save each corresponding dataset. mode : {"w", "a"}, optional Write ("w") or append ("a") mode. If mode="w", any existing file at these locations will be overwritten. format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \ "NETCDF3_CLASSIC"}, optional File format for the resulting netCDF file: * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API features. * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only netCDF 3 compatible API features. * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format, which fully supports 2+ GB files, but is only compatible with clients linked against netCDF version 3.6.0 or later. * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not handle 2+ GB files very well. All formats are supported by the netCDF4-python library. scipy.io.netcdf only supports the last two formats. The default format is NETCDF4 if you are saving a file to disk and have the netCDF4-python library available. Otherwise, xarray falls back to using scipy to write netCDF files and defaults to the NETCDF3_64BIT format (scipy does not support netCDF4). groups : list of str, optional Paths to the netCDF4 group in each corresponding file to which to save datasets (only works for format="NETCDF4"). The groups will be created if necessary. engine : {"netcdf4", "scipy", "h5netcdf"}, optional Engine to use when writing netCDF files. If not provided, the default engine is chosen based on available dependencies, with a preference for "netcdf4" if writing to a file on disk. See `Dataset.to_netcdf` for additional information. compute : bool If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. **kwargs : dict, optional Additional arguments are passed along to ``to_netcdf``. Examples -------- Save a dataset into one netCDF per year of data: >>> ds = xr.Dataset( ... {"a": ("time", np.linspace(0, 1, 48))}, ... coords={"time": pd.date_range("2010-01-01", freq="ME", periods=48)}, ... ) >>> ds Size: 768B Dimensions: (time: 48) Coordinates: * time (time) datetime64[ns] 384B 2010-01-31 2010-02-28 ... 2013-12-31 Data variables: a (time) float64 384B 0.0 0.02128 0.04255 ... 0.9574 0.9787 1.0 >>> years, datasets = zip(*ds.groupby("time.year")) >>> paths = [f"{y}.nc" for y in years] >>> xr.save_mfdataset(datasets, paths) """ if mode == "w" and len(set(paths)) < len(paths): raise ValueError( "cannot use mode='w' when writing multiple datasets to the same path" ) for obj in datasets: if not isinstance(obj, Dataset): raise TypeError( "save_mfdataset only supports writing Dataset " f"objects, received type {type(obj)}" ) if groups is None: groups = [None] * len(datasets) if len({len(datasets), len(paths), len(groups)}) > 1: raise ValueError( "must supply lists of the same length for the " "datasets, paths and groups arguments to " "save_mfdataset" ) writers, stores = zip( *[ to_netcdf( ds, path, mode, format, group, engine, compute=compute, multifile=True, **kwargs, ) for ds, path, group in zip(datasets, paths, groups, strict=True) ], strict=True, ) try: writes = [w.sync(compute=compute) for w in writers] finally: for store in stores: if compute: store.close() else: store.sync() if not compute: import dask return dask.delayed( list(starmap(delayed_close_after_writes, zip(writes, stores, strict=True))) ) def get_writable_zarr_store( store: ZarrStoreLike | None = None, *, chunk_store: MutableMapping | str | os.PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, ) -> backends.ZarrStore: """Create a store for writing to Zarr.""" from xarray.backends.zarr import _choose_default_mode, _get_mappers kwargs, mapper, chunk_mapper = _get_mappers( storage_options=storage_options, store=store, chunk_store=chunk_store ) mode = _choose_default_mode(mode=mode, append_dim=append_dim, region=region) if mode == "r+": already_consolidated = consolidated consolidate_on_close = False else: already_consolidated = False consolidate_on_close = consolidated or consolidated is None return backends.ZarrStore.open_group( store=mapper, mode=mode, synchronizer=synchronizer, group=group, consolidated=already_consolidated, consolidate_on_close=consolidate_on_close, chunk_store=chunk_mapper, append_dim=append_dim, write_region=region, safe_chunks=safe_chunks, align_chunks=align_chunks, zarr_version=zarr_version, zarr_format=zarr_format, write_empty=write_empty_chunks, **kwargs, ) # compute=True returns ZarrStore @overload def to_zarr( dataset: Dataset, store: ZarrStoreLike | None = None, chunk_store: MutableMapping | str | os.PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: Literal[True] = True, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> backends.ZarrStore: ... # compute=False returns dask.Delayed @overload def to_zarr( dataset: Dataset, store: ZarrStoreLike | None = None, chunk_store: MutableMapping | str | os.PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: Literal[False], consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> Delayed: ... def to_zarr( dataset: Dataset, store: ZarrStoreLike | None = None, chunk_store: MutableMapping | str | os.PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: bool = True, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> backends.ZarrStore | Delayed: """This function creates an appropriate datastore for writing a dataset to a zarr ztore See `Dataset.to_zarr` for full API docs. """ # validate Dataset keys, DataArray names _validate_dataset_names(dataset) # Load empty arrays to avoid bug saving zero length dimensions (Issue #5741) # TODO: delete when min dask>=2023.12.1 # https://github.com/dask/dask/pull/10506 for v in dataset.variables.values(): if v.size == 0: v.load() if encoding is None: encoding = {} zstore = get_writable_zarr_store( store, chunk_store=chunk_store, mode=mode, synchronizer=synchronizer, group=group, consolidated=consolidated, append_dim=append_dim, region=region, safe_chunks=safe_chunks, align_chunks=align_chunks, storage_options=storage_options, zarr_version=zarr_version, zarr_format=zarr_format, write_empty_chunks=write_empty_chunks, ) dataset = zstore._validate_and_autodetect_region(dataset) zstore._validate_encoding(encoding) writer = ArrayWriter() # TODO: figure out how to properly handle unlimited_dims try: dump_to_store(dataset, zstore, writer, encoding=encoding) writes = writer.sync( compute=compute, chunkmanager_store_kwargs=chunkmanager_store_kwargs ) finally: if compute: zstore.close() if not compute: return delayed_close_after_writes(writes, zstore) return zstore �������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/backends/chunks.py����������������������������������������������������������0000664�0000000�0000000�00000025645�15056206164�0020305�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������import numpy as np from xarray.core.datatree import Variable def align_nd_chunks( nd_var_chunks: tuple[tuple[int, ...], ...], nd_backend_chunks: tuple[tuple[int, ...], ...], ) -> tuple[tuple[int, ...], ...]: if len(nd_backend_chunks) != len(nd_var_chunks): raise ValueError( "The number of dimensions on the backend and the variable must be the same." ) nd_aligned_chunks: list[tuple[int, ...]] = [] for backend_chunks, var_chunks in zip( nd_backend_chunks, nd_var_chunks, strict=True ): # Validate that they have the same number of elements if sum(backend_chunks) != sum(var_chunks): raise ValueError( "The number of elements in the backend does not " "match the number of elements in the variable. " "This inconsistency should never occur at this stage." ) # Validate if the backend_chunks satisfy the condition that all the values # excluding the borders are equal if len(set(backend_chunks[1:-1])) > 1: raise ValueError( f"This function currently supports aligning chunks " f"only when backend chunks are of uniform size, excluding borders. " f"If you encounter this error, please report itβ€”this scenario should never occur " f"unless there is an internal misuse. " f"Backend chunks: {backend_chunks}" ) # The algorithm assumes that there are always two borders on the # Backend and the Array if not, the result is going to be the same # as the input, and there is nothing to optimize if len(backend_chunks) == 1: nd_aligned_chunks.append(backend_chunks) continue if len(var_chunks) == 1: nd_aligned_chunks.append(var_chunks) continue # Size of the chunk on the backend fixed_chunk = max(backend_chunks) # The ideal size of the chunks is the maximum of the two; this would avoid # that we use more memory than expected max_chunk = max(fixed_chunk, *var_chunks) # The algorithm assumes that the chunks on this array are aligned except the last one # because it can be considered a partial one aligned_chunks: list[int] = [] # For simplicity of the algorithm, let's transform the Array chunks in such a way that # we remove the partial chunks. To achieve this, we add artificial data to the borders t_var_chunks = list(var_chunks) t_var_chunks[0] += fixed_chunk - backend_chunks[0] t_var_chunks[-1] += fixed_chunk - backend_chunks[-1] # The unfilled_size is the amount of space that has not been filled on the last # processed chunk; this is equivalent to the amount of data that would need to be # added to a partial Zarr chunk to fill it up to the fixed_chunk size unfilled_size = 0 for var_chunk in t_var_chunks: # Ideally, we should try to preserve the original Dask chunks, but this is only # possible if the last processed chunk was aligned (unfilled_size == 0) ideal_chunk = var_chunk if unfilled_size: # If that scenario is not possible, the best option is to merge the chunks ideal_chunk = var_chunk + aligned_chunks[-1] while ideal_chunk: if not unfilled_size: # If the previous chunk is filled, let's add a new chunk # of size 0 that will be used on the merging step to simplify the algorithm aligned_chunks.append(0) if ideal_chunk > max_chunk: # If the ideal_chunk is bigger than the max_chunk, # we need to increase the last chunk as much as possible # but keeping it aligned, and then add a new chunk max_increase = max_chunk - aligned_chunks[-1] max_increase = ( max_increase - (max_increase - unfilled_size) % fixed_chunk ) aligned_chunks[-1] += max_increase else: # Perfect scenario where the chunks can be merged without any split. aligned_chunks[-1] = ideal_chunk ideal_chunk -= aligned_chunks[-1] unfilled_size = ( fixed_chunk - aligned_chunks[-1] % fixed_chunk ) % fixed_chunk # Now we have to remove the artificial data added to the borders for order in [-1, 1]: border_size = fixed_chunk - backend_chunks[::order][0] aligned_chunks = aligned_chunks[::order] aligned_chunks[0] -= border_size t_var_chunks = t_var_chunks[::order] t_var_chunks[0] -= border_size if ( len(aligned_chunks) >= 2 and aligned_chunks[0] + aligned_chunks[1] <= max_chunk and aligned_chunks[0] != t_var_chunks[0] ): # The artificial data added to the border can introduce inefficient chunks # on the borders, for that reason, we will check if we can merge them or not # Example: # backend_chunks = [6, 6, 1] # var_chunks = [6, 7] # t_var_chunks = [6, 12] # The ideal output should preserve the same var_chunks, but the previous loop # is going to produce aligned_chunks = [6, 6, 6] # And after removing the artificial data, we will end up with aligned_chunks = [6, 6, 1] # which is not ideal and can be merged into a single chunk aligned_chunks[1] += aligned_chunks[0] aligned_chunks = aligned_chunks[1:] t_var_chunks = t_var_chunks[::order] aligned_chunks = aligned_chunks[::order] nd_aligned_chunks.append(tuple(aligned_chunks)) return tuple(nd_aligned_chunks) def build_grid_chunks( size: int, chunk_size: int, region: slice | None = None, ) -> tuple[int, ...]: if region is None: region = slice(0, size) region_start = region.start or 0 # Generate the zarr chunks inside the region of this dim chunks_on_region = [chunk_size - (region_start % chunk_size)] chunks_on_region.extend([chunk_size] * ((size - chunks_on_region[0]) // chunk_size)) if (size - chunks_on_region[0]) % chunk_size != 0: chunks_on_region.append((size - chunks_on_region[0]) % chunk_size) return tuple(chunks_on_region) def grid_rechunk( v: Variable, enc_chunks: tuple[int, ...], region: tuple[slice, ...], ) -> Variable: nd_var_chunks = v.chunks if not nd_var_chunks: return v nd_grid_chunks = tuple( build_grid_chunks( sum(var_chunks), region=interval, chunk_size=chunk_size, ) for var_chunks, chunk_size, interval in zip( nd_var_chunks, enc_chunks, region, strict=True ) ) nd_aligned_chunks = align_nd_chunks( nd_var_chunks=nd_var_chunks, nd_backend_chunks=nd_grid_chunks, ) v = v.chunk(dict(zip(v.dims, nd_aligned_chunks, strict=True))) return v def validate_grid_chunks_alignment( nd_var_chunks: tuple[tuple[int, ...], ...] | None, enc_chunks: tuple[int, ...], backend_shape: tuple[int, ...], region: tuple[slice, ...], allow_partial_chunks: bool, name: str, ): if nd_var_chunks is None: return base_error = ( "Specified Zarr chunks encoding['chunks']={enc_chunks!r} for " "variable named {name!r} would overlap multiple Dask chunks. " "Check the chunk at position {var_chunk_pos}, which has a size of " "{var_chunk_size} on dimension {dim_i}. It is unaligned with " "backend chunks of size {chunk_size} in region {region}. " "Writing this array in parallel with Dask could lead to corrupted data. " "To resolve this issue, consider one of the following options: " "- Rechunk the array using `chunk()`. " "- Modify or delete `encoding['chunks']`. " "- Set `safe_chunks=False`. " "- Enable automatic chunks alignment with `align_chunks=True`." ) for dim_i, chunk_size, var_chunks, interval, size in zip( range(len(enc_chunks)), enc_chunks, nd_var_chunks, region, backend_shape, strict=True, ): for i, chunk in enumerate(var_chunks[1:-1]): if chunk % chunk_size: raise ValueError( base_error.format( var_chunk_pos=i + 1, var_chunk_size=chunk, name=name, dim_i=dim_i, chunk_size=chunk_size, region=interval, enc_chunks=enc_chunks, ) ) interval_start = interval.start or 0 if len(var_chunks) > 1: # The first border size is the amount of data that needs to be updated on the # first chunk taking into account the region slice. first_border_size = chunk_size if allow_partial_chunks: first_border_size = chunk_size - interval_start % chunk_size if (var_chunks[0] - first_border_size) % chunk_size: raise ValueError( base_error.format( var_chunk_pos=0, var_chunk_size=var_chunks[0], name=name, dim_i=dim_i, chunk_size=chunk_size, region=interval, enc_chunks=enc_chunks, ) ) if not allow_partial_chunks: region_stop = interval.stop or size error_on_last_chunk = base_error.format( var_chunk_pos=len(var_chunks) - 1, var_chunk_size=var_chunks[-1], name=name, dim_i=dim_i, chunk_size=chunk_size, region=interval, enc_chunks=enc_chunks, ) if interval_start % chunk_size: # The last chunk which can also be the only one is a partial chunk # if it is not aligned at the beginning raise ValueError(error_on_last_chunk) if np.ceil(region_stop / chunk_size) == np.ceil(size / chunk_size): # If the region is covering the last chunk then check # if the reminder with the default chunk size # is equal to the size of the last chunk if var_chunks[-1] % chunk_size != size % chunk_size: raise ValueError(error_on_last_chunk) elif var_chunks[-1] % chunk_size: raise ValueError(error_on_last_chunk) �������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/backends/common.py����������������������������������������������������������0000664�0000000�0000000�00000063537�15056206164�0020304�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import logging import os import time import traceback from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence from dataclasses import dataclass from glob import glob from typing import ( TYPE_CHECKING, Any, ClassVar, Self, TypeVar, Union, overload, ) import numpy as np import pandas as pd from xarray.coding import strings, variables from xarray.coding.variables import SerializationWarning from xarray.conventions import cf_encoder from xarray.core import indexing from xarray.core.datatree import DataTree, Variable from xarray.core.types import ReadBuffer from xarray.core.utils import ( FrozenDict, NdimSizeLenMixin, attempt_import, emit_user_level_warning, is_remote_uri, ) from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import is_chunked_array from xarray.namedarray.utils import is_duck_dask_array if TYPE_CHECKING: from xarray.core.dataset import Dataset from xarray.core.types import NestedSequence T_Name = Union[Hashable, None] # Create a logger object, but don't add any handlers. Leave that to user code. logger = logging.getLogger(__name__) NONE_VAR_NAME = "__values__" T = TypeVar("T") @overload def _normalize_path(path: str | os.PathLike) -> str: ... @overload def _normalize_path(path: T) -> T: ... def _normalize_path(path: str | os.PathLike | T) -> str | T: """ Normalize pathlikes to string. Parameters ---------- path : Path to file. Examples -------- >>> from pathlib import Path >>> directory = Path(xr.backends.common.__file__).parent >>> paths_path = Path(directory).joinpath("comm*n.py") >>> paths_str = xr.backends.common._normalize_path(paths_path) >>> print([type(p) for p in (paths_str,)]) [] """ if isinstance(path, os.PathLike): path = os.fspath(path) if isinstance(path, str) and not is_remote_uri(path): path = os.path.abspath(os.path.expanduser(path)) return path # type:ignore [return-value] @overload def _find_absolute_paths( paths: str | os.PathLike | Sequence[str | os.PathLike], **kwargs, ) -> list[str]: ... @overload def _find_absolute_paths( paths: ReadBuffer | Sequence[ReadBuffer], **kwargs, ) -> list[ReadBuffer]: ... @overload def _find_absolute_paths( paths: NestedSequence[str | os.PathLike], **kwargs ) -> NestedSequence[str]: ... @overload def _find_absolute_paths( paths: NestedSequence[ReadBuffer], **kwargs ) -> NestedSequence[ReadBuffer]: ... @overload def _find_absolute_paths( paths: str | os.PathLike | ReadBuffer | NestedSequence[str | os.PathLike | ReadBuffer], **kwargs, ) -> NestedSequence[str | ReadBuffer]: ... def _find_absolute_paths( paths: str | os.PathLike | ReadBuffer | NestedSequence[str | os.PathLike | ReadBuffer], **kwargs, ) -> NestedSequence[str | ReadBuffer]: """ Find absolute paths from the pattern. Parameters ---------- paths : Path(s) to file(s). Can include wildcards like * . **kwargs : Extra kwargs. Mainly for fsspec. Examples -------- >>> from pathlib import Path >>> directory = Path(xr.backends.common.__file__).parent >>> paths = str(Path(directory).joinpath("comm*n.py")) # Find common with wildcard >>> paths = xr.backends.common._find_absolute_paths(paths) >>> [Path(p).name for p in paths] ['common.py'] """ if isinstance(paths, str): if is_remote_uri(paths) and kwargs.get("engine") == "zarr": if TYPE_CHECKING: import fsspec else: fsspec = attempt_import("fsspec") fs, _, _ = fsspec.core.get_fs_token_paths( paths, mode="rb", storage_options=kwargs.get("backend_kwargs", {}).get( "storage_options", {} ), expand=False, ) tmp_paths = fs.glob(fs._strip_protocol(paths)) # finds directories return [fs.get_mapper(path) for path in tmp_paths] elif is_remote_uri(paths): raise ValueError( "cannot do wild-card matching for paths that are remote URLs " f"unless engine='zarr' is specified. Got paths: {paths}. " "Instead, supply paths as an explicit list of strings." ) else: return sorted(glob(_normalize_path(paths))) elif isinstance(paths, os.PathLike): return [_normalize_path(paths)] elif isinstance(paths, ReadBuffer): return [paths] def _normalize_path_list( lpaths: NestedSequence[str | os.PathLike | ReadBuffer], ) -> NestedSequence[str | ReadBuffer]: paths = [] for p in lpaths: if isinstance(p, str | os.PathLike): paths.append(_normalize_path(p)) elif isinstance(p, list): paths.append(_normalize_path_list(p)) # type: ignore[arg-type] else: paths.append(p) # type: ignore[arg-type] return paths return _normalize_path_list(paths) @dataclass class BytesIOProxy: """Proxy object for a write that a memoryview.""" getvalue: Callable[[], memoryview] | None = None def getbuffer(self) -> memoryview: """Get the value of this write as bytes or memory.""" if self.getvalue is None: raise ValueError("must set getvalue before fetching value") return self.getvalue() def _open_remote_file(file, mode, storage_options=None): import fsspec fs, _, paths = fsspec.get_fs_token_paths( file, mode=mode, storage_options=storage_options ) return fs.open(paths[0], mode=mode) def _encode_variable_name(name): if name is None: name = NONE_VAR_NAME return name def _decode_variable_name(name): if name == NONE_VAR_NAME: name = None return name def _iter_nc_groups(root, parent="/"): from xarray.core.treenode import NodePath parent = NodePath(parent) yield str(parent) for path, group in root.groups.items(): gpath = parent / path yield from _iter_nc_groups(group, parent=gpath) def find_root_and_group(ds): """Find the root and group name of a netCDF4/h5netcdf dataset.""" hierarchy = () while ds.parent is not None: hierarchy = (ds.name.split("/")[-1],) + hierarchy ds = ds.parent group = "/" + "/".join(hierarchy) return ds, group def collect_ancestor_dimensions(group) -> dict[str, int]: """Returns dimensions defined in parent groups. If dimensions are defined in multiple ancestors, use the size of the closest ancestor. """ dims = {} while (group := group.parent) is not None: for k, v in group.dimensions.items(): if k not in dims: dims[k] = len(v) return dims def datatree_from_dict_with_io_cleanup(groups_dict: Mapping[str, Dataset]) -> DataTree: """DataTree.from_dict with file clean-up.""" try: tree = DataTree.from_dict(groups_dict) except Exception: for ds in groups_dict.values(): ds.close() raise for path, ds in groups_dict.items(): tree[path].set_close(ds._close) return tree def robust_getitem(array, key, catch=Exception, max_retries=6, initial_delay=500): """ Robustly index an array, using retry logic with exponential backoff if any of the errors ``catch`` are raised. The initial_delay is measured in ms. With the default settings, the maximum delay will be in the range of 32-64 seconds. """ assert max_retries >= 0 for n in range(max_retries + 1): try: return array[key] except catch: if n == max_retries: raise base_delay = initial_delay * 2**n next_delay = base_delay + np.random.randint(base_delay) msg = ( f"getitem failed, waiting {next_delay} ms before trying again " f"({max_retries - n} tries remaining). Full traceback: {traceback.format_exc()}" ) logger.debug(msg) time.sleep(1e-3 * next_delay) class BackendArray(NdimSizeLenMixin, indexing.ExplicitlyIndexed): __slots__ = () async def async_getitem(self, key: indexing.ExplicitIndexer) -> np.typing.ArrayLike: raise NotImplementedError("Backend does not support asynchronous loading") def get_duck_array(self, dtype: np.typing.DTypeLike = None): key = indexing.BasicIndexer((slice(None),) * self.ndim) return self[key] # type: ignore[index] async def async_get_duck_array(self, dtype: np.typing.DTypeLike = None): key = indexing.BasicIndexer((slice(None),) * self.ndim) return await self.async_getitem(key) class AbstractDataStore: __slots__ = () def get_child_store(self, group: str) -> Self: # pragma: no cover """Get a store corresponding to the indicated child group.""" raise NotImplementedError() def get_dimensions(self): # pragma: no cover raise NotImplementedError() def get_parent_dimensions(self): # pragma: no cover return {} def get_attrs(self): # pragma: no cover raise NotImplementedError() def get_variables(self): # pragma: no cover raise NotImplementedError() def get_encoding(self): return {} def load(self): """ This loads the variables and attributes simultaneously. A centralized loading function makes it easier to create data stores that do automatic encoding/decoding. For example:: class SuffixAppendingDataStore(AbstractDataStore): def load(self): variables, attributes = AbstractDataStore.load(self) variables = {"%s_suffix" % k: v for k, v in variables.items()} attributes = {"%s_suffix" % k: v for k, v in attributes.items()} return variables, attributes This function will be called anytime variables or attributes are requested, so care should be taken to make sure its fast. """ variables = FrozenDict( (_decode_variable_name(k), v) for k, v in self.get_variables().items() ) attributes = FrozenDict(self.get_attrs()) return variables, attributes def close(self): pass def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): self.close() T_PathFileOrDataStore = ( str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore ) class ArrayWriter: __slots__ = ("lock", "regions", "sources", "targets") def __init__(self, lock=None): self.sources = [] self.targets = [] self.regions = [] self.lock = lock def add(self, source, target, region=None): if is_chunked_array(source): self.sources.append(source) self.targets.append(target) self.regions.append(region) elif region: target[region] = source else: target[...] = source def sync(self, compute=True, chunkmanager_store_kwargs=None): if self.sources: chunkmanager = get_chunked_array_type(*self.sources) # TODO: consider wrapping targets with dask.delayed, if this makes # for any discernible difference in performance, e.g., # targets = [dask.delayed(t) for t in self.targets] if chunkmanager_store_kwargs is None: chunkmanager_store_kwargs = {} delayed_store = chunkmanager.store( self.sources, self.targets, lock=self.lock, compute=compute, flush=True, regions=self.regions, **chunkmanager_store_kwargs, ) self.sources = [] self.targets = [] self.regions = [] return delayed_store class AbstractWritableDataStore(AbstractDataStore): __slots__ = () def encode(self, variables, attributes): """ Encode the variables and attributes in this store Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs attributes : dict-like Dictionary of key/value (attribute name / attribute) pairs Returns ------- variables : dict-like attributes : dict-like """ encoded_variables = {} for k, v in variables.items(): try: encoded_variables[k] = self.encode_variable(v) except Exception as e: e.add_note(f"Raised while encoding variable {k!r} with value {v!r}") raise encoded_attributes = {} for k, v in attributes.items(): try: encoded_attributes[k] = self.encode_attribute(v) except Exception as e: e.add_note(f"Raised while encoding attribute {k!r} with value {v!r}") raise return encoded_variables, encoded_attributes def encode_variable(self, v, name=None): """encode one variable""" return v def encode_attribute(self, a): """encode one attribute""" return a def prepare_variable(self, name, variable, check_encoding, unlimited_dims): raise NotImplementedError() def set_dimension(self, dim, length, is_unlimited): # pragma: no cover raise NotImplementedError() def set_attribute(self, k, v): # pragma: no cover raise NotImplementedError() def set_variable(self, k, v): # pragma: no cover raise NotImplementedError() def store_dataset(self, dataset): """ in stores, variables are all variables AND coordinates in xarray.Dataset variables are variables NOT coordinates, so here we pass the whole dataset in instead of doing dataset.variables """ self.store(dataset, dataset.attrs) def store( self, variables, attributes, check_encoding_set=frozenset(), writer=None, unlimited_dims=None, ): """ Top level method for putting data on this store, this method: - encodes variables/attributes - sets dimensions - sets variables Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs attributes : dict-like Dictionary of key/value (attribute name / attribute) pairs check_encoding_set : list-like List of variables that should be checked for invalid encoding values writer : ArrayWriter unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions. """ if writer is None: writer = ArrayWriter() variables, attributes = self.encode(variables, attributes) self.set_attributes(attributes) self.set_dimensions(variables, unlimited_dims=unlimited_dims) self.set_variables( variables, check_encoding_set, writer, unlimited_dims=unlimited_dims ) def set_attributes(self, attributes): """ This provides a centralized method to set the dataset attributes on the data store. Parameters ---------- attributes : dict-like Dictionary of key/value (attribute name / attribute) pairs """ for k, v in attributes.items(): self.set_attribute(k, v) def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=None): """ This provides a centralized method to set the variables on the data store. Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs check_encoding_set : list-like List of variables that should be checked for invalid encoding values writer : ArrayWriter unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions. """ for vn, v in variables.items(): name = _encode_variable_name(vn) check = vn in check_encoding_set target, source = self.prepare_variable( name, v, check, unlimited_dims=unlimited_dims ) writer.add(source, target) def set_dimensions(self, variables, unlimited_dims=None): """ This provides a centralized method to set the dimensions on the data store. Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions. """ if unlimited_dims is None: unlimited_dims = set() parent_dims = self.get_parent_dimensions() existing_dims = self.get_dimensions() dims = {} for v in unlimited_dims: # put unlimited_dims first dims[v] = None for v in variables.values(): dims |= v.sizes for dim, length in dims.items(): if dim in existing_dims and length != existing_dims[dim]: raise ValueError( "Unable to update size for existing dimension" f"{dim!r} ({length} != {existing_dims[dim]})" ) elif dim not in existing_dims and length != parent_dims.get(dim): is_unlimited = dim in unlimited_dims self.set_dimension(dim, length, is_unlimited) def sync(self): """Write all buffered data to disk.""" raise NotImplementedError() def _infer_dtype(array, name=None): """Given an object array with no missing values, infer its dtype from all elements.""" if array.dtype.kind != "O": raise TypeError("infer_type must be called on a dtype=object array") if array.size == 0: return np.dtype(float) native_dtypes = set(np.vectorize(type, otypes=[object])(array.ravel())) if len(native_dtypes) > 1 and native_dtypes != {bytes, str}: native_dtype_names = ", ".join(x.__name__ for x in native_dtypes) raise ValueError( f"unable to infer dtype on variable {name!r}; object array " f"contains mixed native types: {native_dtype_names}" ) element = array[(0,) * array.ndim] # We use the base types to avoid subclasses of bytes and str (which might # not play nice with e.g. hdf5 datatypes), such as those from numpy if isinstance(element, bytes): return strings.create_vlen_dtype(bytes) elif isinstance(element, str): return strings.create_vlen_dtype(str) dtype = np.array(element).dtype if dtype.kind != "O": return dtype raise ValueError( f"unable to infer dtype on variable {name!r}; xarray " "cannot serialize arbitrary Python objects" ) def _copy_with_dtype(data, dtype: np.typing.DTypeLike): """Create a copy of an array with the given dtype. We use this instead of np.array() to ensure that custom object dtypes end up on the resulting array. """ result = np.empty(data.shape, dtype) result[...] = data return result def ensure_dtype_not_object(var: Variable, name: T_Name = None) -> Variable: if var.dtype.kind == "O": dims, data, attrs, encoding = variables.unpack_for_encoding(var) # leave vlen dtypes unchanged if strings.check_vlen_dtype(data.dtype) is not None: return var if is_duck_dask_array(data): emit_user_level_warning( f"variable {name} has data in the form of a dask array with " "dtype=object, which means it is being loaded into memory " "to determine a data type that can be safely stored on disk. " "To avoid this, coerce this variable to a fixed-size dtype " "with astype() before saving it.", category=SerializationWarning, ) data = data.compute() missing = pd.isnull(data) if missing.any(): # nb. this will fail for dask.array data non_missing_values = data[~missing] inferred_dtype = _infer_dtype(non_missing_values, name) # There is no safe bit-pattern for NA in typical binary string # formats, we so can't set a fill_value. Unfortunately, this means # we can't distinguish between missing values and empty strings. fill_value: bytes | str if strings.is_bytes_dtype(inferred_dtype): fill_value = b"" elif strings.is_unicode_dtype(inferred_dtype): fill_value = "" else: # insist on using float for numeric values if not np.issubdtype(inferred_dtype, np.floating): inferred_dtype = np.dtype(float) fill_value = inferred_dtype.type(np.nan) data = _copy_with_dtype(data, dtype=inferred_dtype) data[missing] = fill_value else: data = _copy_with_dtype(data, dtype=_infer_dtype(data, name)) assert data.dtype.kind != "O" or data.dtype.metadata var = Variable(dims, data, attrs, encoding, fastpath=True) return var class WritableCFDataStore(AbstractWritableDataStore): __slots__ = () def encode(self, variables, attributes): # All NetCDF files get CF encoded by default, without this attempting # to write times, for example, would fail. variables, attributes = cf_encoder(variables, attributes) variables = { k: ensure_dtype_not_object(v, name=k) for k, v in variables.items() } return super().encode(variables, attributes) class BackendEntrypoint: """ ``BackendEntrypoint`` is a class container and it is the main interface for the backend plugins, see :ref:`RST backend_entrypoint`. It shall implement: - ``open_dataset`` method: it shall implement reading from file, variables decoding and it returns an instance of :py:class:`~xarray.Dataset`. It shall take in input at least ``filename_or_obj`` argument and ``drop_variables`` keyword argument. For more details see :ref:`RST open_dataset`. - ``guess_can_open`` method: it shall return ``True`` if the backend is able to open ``filename_or_obj``, ``False`` otherwise. The implementation of this method is not mandatory. - ``open_datatree`` method: it shall implement reading from file, variables decoding and it returns an instance of :py:class:`~datatree.DataTree`. It shall take in input at least ``filename_or_obj`` argument. The implementation of this method is not mandatory. For more details see . Attributes ---------- open_dataset_parameters : tuple, default: None A list of ``open_dataset`` method parameters. The setting of this attribute is not mandatory. description : str, default: "" A short string describing the engine. The setting of this attribute is not mandatory. url : str, default: "" A string with the URL to the backend's documentation. The setting of this attribute is not mandatory. """ open_dataset_parameters: ClassVar[tuple | None] = None description: ClassVar[str] = "" url: ClassVar[str] = "" def __repr__(self) -> str: txt = f"<{type(self).__name__}>" if self.description: txt += f"\n {self.description}" if self.url: txt += f"\n Learn more at {self.url}" return txt def open_dataset( self, filename_or_obj: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, *, drop_variables: str | Iterable[str] | None = None, ) -> Dataset: """ Backend open_dataset method used by Xarray in :py:func:`~xarray.open_dataset`. """ raise NotImplementedError() def guess_can_open( self, filename_or_obj: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, ) -> bool: """ Backend open_dataset method used by Xarray in :py:func:`~xarray.open_dataset`. """ return False def open_datatree( self, filename_or_obj: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, *, drop_variables: str | Iterable[str] | None = None, ) -> DataTree: """ Backend open_datatree method used by Xarray in :py:func:`~xarray.open_datatree`. """ raise NotImplementedError() def open_groups_as_dict( self, filename_or_obj: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, *, drop_variables: str | Iterable[str] | None = None, ) -> dict[str, Dataset]: """ Opens a dictionary mapping from group names to Datasets. Called by :py:func:`~xarray.open_groups`. This function exists to provide a universal way to open all groups in a file, before applying any additional consistency checks or requirements necessary to create a `DataTree` object (typically done using :py:meth:`~xarray.DataTree.from_dict`). """ raise NotImplementedError() # mapping of engine name to (module name, BackendEntrypoint Class) BACKEND_ENTRYPOINTS: dict[str, tuple[str | None, type[BackendEntrypoint]]] = {} �����������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/backends/file_manager.py����������������������������������������������������0000664�0000000�0000000�00000030471�15056206164�0021414�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import atexit import contextlib import io import threading import uuid import warnings from collections.abc import Hashable from typing import Any from xarray.backends.locks import acquire from xarray.backends.lru_cache import LRUCache from xarray.core import utils from xarray.core.options import OPTIONS # Global cache for storing open files. FILE_CACHE: LRUCache[Any, io.IOBase] = LRUCache( maxsize=OPTIONS["file_cache_maxsize"], on_evict=lambda k, v: v.close() ) assert FILE_CACHE.maxsize, "file cache must be at least size one" REF_COUNTS: dict[Any, int] = {} _DEFAULT_MODE = utils.ReprObject("") class FileManager: """Manager for acquiring and closing a file object. Use FileManager subclasses (CachingFileManager in particular) on backend storage classes to automatically handle issues related to keeping track of many open files and transferring them between multiple processes. """ def acquire(self, needs_lock=True): """Acquire the file object from this manager.""" raise NotImplementedError() def acquire_context(self, needs_lock=True): """Context manager for acquiring a file. Yields a file object. The context manager unwinds any actions taken as part of acquisition (i.e., removes it from any cache) if an exception is raised from the context. It *does not* automatically close the file. """ raise NotImplementedError() def close(self, needs_lock=True): """Close the file object associated with this manager, if needed.""" raise NotImplementedError() class CachingFileManager(FileManager): """Wrapper for automatically opening and closing file objects. Unlike files, CachingFileManager objects can be safely pickled and passed between processes. They should be explicitly closed to release resources, but a per-process least-recently-used cache for open files ensures that you can safely create arbitrarily large numbers of FileManager objects. Don't directly close files acquired from a FileManager. Instead, call FileManager.close(), which ensures that closed files are removed from the cache as well. Example usage:: manager = FileManager(open, "example.txt", mode="w") f = manager.acquire() f.write(...) manager.close() # ensures file is closed Note that as long as previous files are still cached, acquiring a file multiple times from the same FileManager is essentially free:: f1 = manager.acquire() f2 = manager.acquire() assert f1 is f2 """ def __init__( self, opener, *args, mode=_DEFAULT_MODE, kwargs=None, lock=None, cache=None, manager_id: Hashable | None = None, ref_counts=None, ): """Initialize a CachingFileManager. The cache, manager_id and ref_counts arguments exist solely to facilitate dependency injection, and should only be set for tests. Parameters ---------- opener : callable Function that when called like ``opener(*args, **kwargs)`` returns an open file object. The file object must implement a ``close()`` method. *args Positional arguments for opener. A ``mode`` argument should be provided as a keyword argument (see below). All arguments must be hashable. mode : optional If provided, passed as a keyword argument to ``opener`` along with ``**kwargs``. ``mode='w' `` has special treatment: after the first call it is replaced by ``mode='a'`` in all subsequent function to avoid overriding the newly created file. kwargs : dict, optional Keyword arguments for opener, excluding ``mode``. All values must be hashable. lock : duck-compatible threading.Lock, optional Lock to use when modifying the cache inside acquire() and close(). By default, uses a new threading.Lock() object. If set, this object should be pickleable. cache : MutableMapping, optional Mapping to use as a cache for open files. By default, uses xarray's global LRU file cache. Because ``cache`` typically points to a global variable and contains non-picklable file objects, an unpickled FileManager objects will be restored with the default cache. manager_id : hashable, optional Identifier for this CachingFileManager. ref_counts : dict, optional Optional dict to use for keeping track the number of references to the same file. """ self._opener = opener self._args = args self._mode = mode self._kwargs = {} if kwargs is None else dict(kwargs) self._use_default_lock = lock is None or lock is False self._lock = threading.Lock() if self._use_default_lock else lock # cache[self._key] stores the file associated with this object. if cache is None: cache = FILE_CACHE self._cache = cache if manager_id is None: # Each call to CachingFileManager should separately open files. manager_id = str(uuid.uuid4()) self._manager_id = manager_id self._key = self._make_key() # ref_counts[self._key] stores the number of CachingFileManager objects # in memory referencing this same file. We use this to know if we can # close a file when the manager is deallocated. if ref_counts is None: ref_counts = REF_COUNTS self._ref_counter = _RefCounter(ref_counts) self._ref_counter.increment(self._key) def _make_key(self): """Make a key for caching files in the LRU cache.""" value = ( self._opener, self._args, "a" if self._mode == "w" else self._mode, tuple(sorted(self._kwargs.items())), self._manager_id, ) return _HashedSequence(value) @contextlib.contextmanager def _optional_lock(self, needs_lock): """Context manager for optionally acquiring a lock.""" if needs_lock: with self._lock: yield else: yield def acquire(self, needs_lock=True): """Acquire a file object from the manager. A new file is only opened if it has expired from the least-recently-used cache. This method uses a lock, which ensures that it is thread-safe. You can safely acquire a file in multiple threads at the same time, as long as the underlying file object is thread-safe. Returns ------- file-like An open file object, as returned by ``opener(*args, **kwargs)``. """ file, _ = self._acquire_with_cache_info(needs_lock) return file @contextlib.contextmanager def acquire_context(self, needs_lock=True): """Context manager for acquiring a file.""" file, cached = self._acquire_with_cache_info(needs_lock) try: yield file except Exception: if not cached: self.close(needs_lock) raise def _acquire_with_cache_info(self, needs_lock=True): """Acquire a file, returning the file and whether it was cached.""" with self._optional_lock(needs_lock): try: file = self._cache[self._key] except KeyError: kwargs = self._kwargs if self._mode is not _DEFAULT_MODE: kwargs = kwargs.copy() kwargs["mode"] = self._mode file = self._opener(*self._args, **kwargs) if self._mode == "w": # ensure file doesn't get overridden when opened again self._mode = "a" self._cache[self._key] = file return file, False else: return file, True def close(self, needs_lock=True): """Explicitly close any associated file object (if necessary).""" # TODO: remove needs_lock if/when we have a reentrant lock in # dask.distributed: https://github.com/dask/dask/issues/3832 with self._optional_lock(needs_lock): default = None file = self._cache.pop(self._key, default) if file is not None: file.close() def __del__(self) -> None: # If we're the only CachingFileManger referencing a unclosed file, # remove it from the cache upon garbage collection. # # We keep track of our own reference count because we don't want to # close files if another identical file manager needs it. This can # happen if a CachingFileManager is pickled and unpickled without # closing the original file. ref_count = self._ref_counter.decrement(self._key) if not ref_count and self._key in self._cache: if acquire(self._lock, blocking=False): # Only close files if we can do so immediately. try: self.close(needs_lock=False) finally: self._lock.release() if OPTIONS["warn_for_unclosed_files"]: warnings.warn( f"deallocating {self}, but file is not already closed. " "This may indicate a bug.", RuntimeWarning, stacklevel=2, ) def __getstate__(self): """State for pickling.""" # cache is intentionally omitted: we don't want to try to serialize # these global objects. lock = None if self._use_default_lock else self._lock return ( self._opener, self._args, self._mode, self._kwargs, lock, self._manager_id, ) def __setstate__(self, state) -> None: """Restore from a pickle.""" opener, args, mode, kwargs, lock, manager_id = state self.__init__( # type: ignore[misc] opener, *args, mode=mode, kwargs=kwargs, lock=lock, manager_id=manager_id ) def __repr__(self) -> str: args_string = ", ".join(map(repr, self._args)) if self._mode is not _DEFAULT_MODE: args_string += f", mode={self._mode!r}" return ( f"{type(self).__name__}({self._opener!r}, {args_string}, " f"kwargs={self._kwargs}, manager_id={self._manager_id!r})" ) @atexit.register def _remove_del_method(): # We don't need to close unclosed files at program exit, and may not be able # to, because Python is cleaning up imports / globals. del CachingFileManager.__del__ class _RefCounter: """Class for keeping track of reference counts.""" def __init__(self, counts): self._counts = counts self._lock = threading.Lock() def increment(self, name): with self._lock: count = self._counts[name] = self._counts.get(name, 0) + 1 return count def decrement(self, name): with self._lock: count = self._counts[name] - 1 if count: self._counts[name] = count else: del self._counts[name] return count class _HashedSequence(list): """Speedup repeated look-ups by caching hash values. Based on what Python uses internally in functools.lru_cache. Python doesn't perform this optimization automatically: https://bugs.python.org/issue1462796 """ def __init__(self, tuple_value): self[:] = tuple_value self.hashvalue = hash(tuple_value) def __hash__(self): return self.hashvalue class DummyFileManager(FileManager): """FileManager that simply wraps an open file in the FileManager interface.""" def __init__(self, value, *, close=None): if close is None: close = value.close self._value = value self._close = close def acquire(self, needs_lock=True): del needs_lock # ignored return self._value @contextlib.contextmanager def acquire_context(self, needs_lock=True): del needs_lock yield self._value def close(self, needs_lock=True): del needs_lock # ignored self._close() �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/backends/h5netcdf_.py�������������������������������������������������������0000664�0000000�0000000�00000051331�15056206164�0020640�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import functools import io import os from collections.abc import Iterable from typing import TYPE_CHECKING, Any, Self import numpy as np from xarray.backends.common import ( BACKEND_ENTRYPOINTS, BackendEntrypoint, BytesIOProxy, T_PathFileOrDataStore, WritableCFDataStore, _normalize_path, _open_remote_file, collect_ancestor_dimensions, datatree_from_dict_with_io_cleanup, find_root_and_group, ) from xarray.backends.file_manager import ( CachingFileManager, DummyFileManager, FileManager, ) from xarray.backends.locks import HDF5_LOCK, combine_locks, ensure_lock, get_write_lock from xarray.backends.netCDF4_ import ( BaseNetCDF4Array, _build_and_get_enum, _encode_nc4_variable, _ensure_no_forward_slash_in_name, _extract_nc4_variable_encoding, _get_datatype, _nc4_require_group, ) from xarray.backends.store import StoreBackendEntrypoint from xarray.core import indexing from xarray.core.utils import ( FrozenDict, emit_user_level_warning, is_remote_uri, read_magic_number_from_file, try_read_magic_number_from_file_or_path, ) from xarray.core.variable import Variable if TYPE_CHECKING: import h5netcdf from xarray.backends.common import AbstractDataStore from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.types import ReadBuffer class H5NetCDFArrayWrapper(BaseNetCDF4Array): def get_array(self, needs_lock=True): ds = self.datastore._acquire(needs_lock) return ds.variables[self.variable_name] def __getitem__(self, key): return indexing.explicit_indexing_adapter( key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR, self._getitem ) def _getitem(self, key): with self.datastore.lock: array = self.get_array(needs_lock=False) return array[key] def _read_attributes(h5netcdf_var): # GH451 # to ensure conventions decoding works properly on Python 3, decode all # bytes attributes to strings attrs = {} for k, v in h5netcdf_var.attrs.items(): if k not in ["_FillValue", "missing_value"] and isinstance(v, bytes): try: v = v.decode("utf-8") except UnicodeDecodeError: emit_user_level_warning( f"'utf-8' codec can't decode bytes for attribute " f"{k!r} of h5netcdf object {h5netcdf_var.name!r}, " f"returning bytes undecoded.", UnicodeWarning, ) attrs[k] = v return attrs _extract_h5nc_encoding = functools.partial( _extract_nc4_variable_encoding, lsd_okay=False, h5py_okay=True, backend="h5netcdf", unlimited_dims=None, ) def _h5netcdf_create_group(dataset, name): return dataset.create_group(name) class H5NetCDFStore(WritableCFDataStore): """Store for reading and writing data via h5netcdf""" __slots__ = ( "_filename", "_group", "_manager", "_mode", "autoclose", "format", "is_remote", "lock", ) def __init__( self, manager: FileManager | h5netcdf.File | h5netcdf.Group, group=None, mode=None, lock=HDF5_LOCK, autoclose=False, ): import h5netcdf if isinstance(manager, h5netcdf.File | h5netcdf.Group): if group is None: root, group = find_root_and_group(manager) else: if type(manager) is not h5netcdf.File: raise ValueError( "must supply a h5netcdf.File if the group argument is provided" ) root = manager manager = DummyFileManager(root) self._manager = manager self._group = group self._mode = mode self.format = None # todo: utilizing find_root_and_group seems a bit clunky # making filename available on h5netcdf.Group seems better self._filename = find_root_and_group(self.ds)[0].filename self.is_remote = is_remote_uri(self._filename) self.lock = ensure_lock(lock) self.autoclose = autoclose def get_child_store(self, group: str) -> Self: if self._group is not None: group = os.path.join(self._group, group) return type(self)( self._manager, group=group, mode=self._mode, lock=self.lock, autoclose=self.autoclose, ) @classmethod def open( cls, filename, mode="r", format=None, group=None, lock=None, autoclose=False, invalid_netcdf=None, phony_dims=None, decode_vlen_strings=True, driver=None, driver_kwds=None, storage_options: dict[str, Any] | None = None, ): import h5netcdf if isinstance(filename, str) and is_remote_uri(filename) and driver is None: mode_ = "rb" if mode == "r" else mode filename = _open_remote_file( filename, mode=mode_, storage_options=storage_options ) if isinstance(filename, BytesIOProxy): source = filename filename = io.BytesIO() source.getvalue = filename.getbuffer if isinstance(filename, io.IOBase) and mode == "r": magic_number = read_magic_number_from_file(filename) if not magic_number.startswith(b"\211HDF\r\n\032\n"): raise ValueError( f"{magic_number!r} is not the signature of a valid netCDF4 file" ) if format not in [None, "NETCDF4"]: raise ValueError("invalid format for h5netcdf backend") kwargs = { "invalid_netcdf": invalid_netcdf, "decode_vlen_strings": decode_vlen_strings, "driver": driver, } if driver_kwds is not None: kwargs.update(driver_kwds) if phony_dims is not None: kwargs["phony_dims"] = phony_dims if lock is None: if mode == "r": lock = HDF5_LOCK else: lock = combine_locks([HDF5_LOCK, get_write_lock(filename)]) manager = ( CachingFileManager(h5netcdf.File, filename, mode=mode, kwargs=kwargs) if isinstance(filename, str) else h5netcdf.File(filename, mode=mode, **kwargs) ) return cls(manager, group=group, mode=mode, lock=lock, autoclose=autoclose) def _acquire(self, needs_lock=True): with self._manager.acquire_context(needs_lock) as root: ds = _nc4_require_group( root, self._group, self._mode, create_group=_h5netcdf_create_group ) return ds @property def ds(self): return self._acquire() def open_store_variable(self, name, var): import h5netcdf import h5py dimensions = var.dimensions data = indexing.LazilyIndexedArray(H5NetCDFArrayWrapper(name, self)) attrs = _read_attributes(var) # netCDF4 specific encoding encoding = { "chunksizes": var.chunks, "fletcher32": var.fletcher32, "shuffle": var.shuffle, } if var.chunks: encoding["preferred_chunks"] = dict( zip(var.dimensions, var.chunks, strict=True) ) # Convert h5py-style compression options to NetCDF4-Python # style, if possible if var.compression == "gzip": encoding["zlib"] = True encoding["complevel"] = var.compression_opts elif var.compression is not None: encoding["compression"] = var.compression encoding["compression_opts"] = var.compression_opts # save source so __repr__ can detect if it's local or not encoding["source"] = self._filename encoding["original_shape"] = data.shape vlen_dtype = h5py.check_dtype(vlen=var.dtype) if vlen_dtype is str: encoding["dtype"] = str elif vlen_dtype is not None: # pragma: no cover # xarray doesn't support writing arbitrary vlen dtypes yet. pass # just check if datatype is available and create dtype # this check can be removed if h5netcdf >= 1.4.0 for any environment elif (datatype := getattr(var, "datatype", None)) and isinstance( datatype, h5netcdf.core.EnumType ): encoding["dtype"] = np.dtype( data.dtype, metadata={ "enum": datatype.enum_dict, "enum_name": datatype.name, }, ) else: encoding["dtype"] = var.dtype return Variable(dimensions, data, attrs, encoding) def get_variables(self): return FrozenDict( (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items() ) def get_attrs(self): return FrozenDict(_read_attributes(self.ds)) def get_dimensions(self): return FrozenDict((k, len(v)) for k, v in self.ds.dimensions.items()) def get_parent_dimensions(self): return FrozenDict(collect_ancestor_dimensions(self.ds)) def get_encoding(self): return { "unlimited_dims": { k for k, v in self.ds.dimensions.items() if v.isunlimited() } } def set_dimension(self, name, length, is_unlimited=False): _ensure_no_forward_slash_in_name(name) if is_unlimited: self.ds.dimensions[name] = None self.ds.resize_dimension(name, length) else: self.ds.dimensions[name] = length def set_attribute(self, key, value): self.ds.attrs[key] = value def encode_variable(self, variable, name=None): return _encode_nc4_variable(variable, name=name) def prepare_variable( self, name, variable, check_encoding=False, unlimited_dims=None ): import h5py _ensure_no_forward_slash_in_name(name) attrs = variable.attrs.copy() dtype = _get_datatype(variable, raise_on_invalid_encoding=check_encoding) fillvalue = attrs.pop("_FillValue", None) if dtype is str: dtype = h5py.special_dtype(vlen=str) # check enum metadata and use h5netcdf.core.EnumType if ( hasattr(self.ds, "enumtypes") and (meta := np.dtype(dtype).metadata) and (e_name := meta.get("enum_name")) and (e_dict := meta.get("enum")) ): dtype = _build_and_get_enum(self, name, dtype, e_name, e_dict) encoding = _extract_h5nc_encoding(variable, raise_on_invalid=check_encoding) kwargs = {} # Convert from NetCDF4-Python style compression settings to h5py style # If both styles are used together, h5py takes precedence # If set_encoding=True, raise ValueError in case of mismatch if encoding.pop("zlib", False): if check_encoding and encoding.get("compression") not in (None, "gzip"): raise ValueError("'zlib' and 'compression' encodings mismatch") encoding.setdefault("compression", "gzip") if ( check_encoding and "complevel" in encoding and "compression_opts" in encoding and encoding["complevel"] != encoding["compression_opts"] ): raise ValueError("'complevel' and 'compression_opts' encodings mismatch") complevel = encoding.pop("complevel", 0) if complevel != 0: encoding.setdefault("compression_opts", complevel) encoding["chunks"] = encoding.pop("chunksizes", None) # Do not apply compression, filters or chunking to scalars. if variable.shape: for key in [ "compression", "compression_opts", "shuffle", "chunks", "fletcher32", ]: if key in encoding: kwargs[key] = encoding[key] if name not in self.ds: nc4_var = self.ds.create_variable( name, dtype=dtype, dimensions=variable.dims, fillvalue=fillvalue, **kwargs, ) else: nc4_var = self.ds[name] for k, v in attrs.items(): nc4_var.attrs[k] = v target = H5NetCDFArrayWrapper(name, self) return target, variable.data def sync(self): self.ds.sync() def close(self, **kwargs): self._manager.close(**kwargs) def _check_phony_dims(phony_dims): emit_phony_dims_warning = False if phony_dims is None: emit_phony_dims_warning = True phony_dims = "access" return emit_phony_dims_warning, phony_dims def _emit_phony_dims_warning(): emit_user_level_warning( "The 'phony_dims' kwarg now defaults to 'access'. " "Previously 'phony_dims=None' would raise an error. " "For full netcdf equivalence please use phony_dims='sort'.", UserWarning, ) def _normalize_filename_or_obj( filename_or_obj: T_PathFileOrDataStore, ) -> str | ReadBuffer | AbstractDataStore: if isinstance(filename_or_obj, bytes | memoryview): return io.BytesIO(filename_or_obj) else: return _normalize_path(filename_or_obj) class H5netcdfBackendEntrypoint(BackendEntrypoint): """ Backend for netCDF files based on the h5netcdf package. It can open ".nc", ".nc4", ".cdf" files but will only be selected as the default if the "netcdf4" engine is not available. Additionally it can open valid HDF5 files, see https://h5netcdf.org/#invalid-netcdf-files for more info. It will not be detected as valid backend for such files, so make sure to specify ``engine="h5netcdf"`` in ``open_dataset``. For more information about the underlying library, visit: https://h5netcdf.org See Also -------- backends.H5NetCDFStore backends.NetCDF4BackendEntrypoint backends.ScipyBackendEntrypoint """ description = ( "Open netCDF (.nc, .nc4 and .cdf) and most HDF5 files using h5netcdf in Xarray" ) url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.H5netcdfBackendEntrypoint.html" def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool: filename_or_obj = _normalize_filename_or_obj(filename_or_obj) magic_number = try_read_magic_number_from_file_or_path(filename_or_obj) if magic_number is not None: return magic_number.startswith(b"\211HDF\r\n\032\n") if isinstance(filename_or_obj, str | os.PathLike): _, ext = os.path.splitext(filename_or_obj) return ext in {".nc", ".nc4", ".cdf"} return False def open_dataset( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, format=None, group=None, lock=None, invalid_netcdf=None, phony_dims=None, decode_vlen_strings=True, driver=None, driver_kwds=None, storage_options: dict[str, Any] | None = None, ) -> Dataset: # Keep this message for some versions # remove and set phony_dims="access" above emit_phony_dims_warning, phony_dims = _check_phony_dims(phony_dims) filename_or_obj = _normalize_filename_or_obj(filename_or_obj) store = H5NetCDFStore.open( filename_or_obj, format=format, group=group, lock=lock, invalid_netcdf=invalid_netcdf, phony_dims=phony_dims, decode_vlen_strings=decode_vlen_strings, driver=driver, driver_kwds=driver_kwds, storage_options=storage_options, ) store_entrypoint = StoreBackendEntrypoint() ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) # only warn if phony_dims exist in file # remove together with the above check # after some versions if store.ds._root._phony_dim_count > 0 and emit_phony_dims_warning: _emit_phony_dims_warning() return ds def open_datatree( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, format=None, group: str | None = None, lock=None, invalid_netcdf=None, phony_dims=None, decode_vlen_strings=True, driver=None, driver_kwds=None, **kwargs, ) -> DataTree: groups_dict = self.open_groups_as_dict( filename_or_obj, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, format=format, group=group, lock=lock, invalid_netcdf=invalid_netcdf, phony_dims=phony_dims, decode_vlen_strings=decode_vlen_strings, driver=driver, driver_kwds=driver_kwds, **kwargs, ) return datatree_from_dict_with_io_cleanup(groups_dict) def open_groups_as_dict( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, format=None, group: str | None = None, lock=None, invalid_netcdf=None, phony_dims=None, decode_vlen_strings=True, driver=None, driver_kwds=None, **kwargs, ) -> dict[str, Dataset]: from xarray.backends.common import _iter_nc_groups from xarray.core.treenode import NodePath from xarray.core.utils import close_on_error # Keep this message for some versions # remove and set phony_dims="access" above emit_phony_dims_warning, phony_dims = _check_phony_dims(phony_dims) filename_or_obj = _normalize_filename_or_obj(filename_or_obj) store = H5NetCDFStore.open( filename_or_obj, format=format, group=group, lock=lock, invalid_netcdf=invalid_netcdf, phony_dims=phony_dims, decode_vlen_strings=decode_vlen_strings, driver=driver, driver_kwds=driver_kwds, ) # Check for a group and make it a parent if it exists if group: parent = NodePath("/") / NodePath(group) else: parent = NodePath("/") manager = store._manager groups_dict = {} for path_group in _iter_nc_groups(store.ds, parent=parent): group_store = H5NetCDFStore(manager, group=path_group, **kwargs) store_entrypoint = StoreBackendEntrypoint() with close_on_error(group_store): group_ds = store_entrypoint.open_dataset( group_store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) if group: group_name = str(NodePath(path_group).relative_to(parent)) else: group_name = str(NodePath(path_group)) groups_dict[group_name] = group_ds # only warn if phony_dims exist in file # remove together with the above check # after some versions if store.ds._phony_dim_count > 0 and emit_phony_dims_warning: _emit_phony_dims_warning() return groups_dict BACKEND_ENTRYPOINTS["h5netcdf"] = ("h5netcdf", H5netcdfBackendEntrypoint) �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/backends/locks.py�����������������������������������������������������������0000664�0000000�0000000�00000016770�15056206164�0020124�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import multiprocessing import threading import uuid import weakref from collections.abc import Hashable, MutableMapping from typing import Any, ClassVar from weakref import WeakValueDictionary # SerializableLock is adapted from Dask: # https://github.com/dask/dask/blob/74e898f0ec712e8317ba86cc3b9d18b6b9922be0/dask/utils.py#L1160-L1224 # Used under the terms of Dask's license, see licenses/DASK_LICENSE. class SerializableLock: """A Serializable per-process Lock This wraps a normal ``threading.Lock`` object and satisfies the same interface. However, this lock can also be serialized and sent to different processes. It will not block concurrent operations between processes (for this you should look at ``dask.multiprocessing.Lock`` or ``locket.lock_file`` but will consistently deserialize into the same lock. So if we make a lock in one process:: lock = SerializableLock() And then send it over to another process multiple times:: bytes = pickle.dumps(lock) a = pickle.loads(bytes) b = pickle.loads(bytes) Then the deserialized objects will operate as though they were the same lock, and collide as appropriate. This is useful for consistently protecting resources on a per-process level. The creation of locks is itself not threadsafe. """ _locks: ClassVar[WeakValueDictionary[Hashable, threading.Lock]] = ( WeakValueDictionary() ) token: Hashable lock: threading.Lock def __init__(self, token: Hashable | None = None): self.token = token or str(uuid.uuid4()) if self.token in SerializableLock._locks: self.lock = SerializableLock._locks[self.token] else: self.lock = threading.Lock() SerializableLock._locks[self.token] = self.lock def acquire(self, *args, **kwargs): return self.lock.acquire(*args, **kwargs) def release(self, *args, **kwargs): return self.lock.release(*args, **kwargs) def __enter__(self): self.lock.__enter__() def __exit__(self, *args): self.lock.__exit__(*args) def locked(self): return self.lock.locked() def __getstate__(self): return self.token def __setstate__(self, token): self.__init__(token) def __str__(self): return f"<{self.__class__.__name__}: {self.token}>" __repr__ = __str__ # Locks used by multiple backends. # Neither HDF5 nor the netCDF-C library are thread-safe. HDF5_LOCK = SerializableLock() NETCDFC_LOCK = SerializableLock() _FILE_LOCKS: MutableMapping[Any, threading.Lock] = weakref.WeakValueDictionary() def _get_threaded_lock(key): try: lock = _FILE_LOCKS[key] except KeyError: lock = _FILE_LOCKS[key] = threading.Lock() return lock def _get_multiprocessing_lock(key): # TODO: make use of the key -- maybe use locket.py? # https://github.com/mwilliamson/locket.py del key # unused return multiprocessing.Lock() def _get_lock_maker(scheduler=None): """Returns an appropriate function for creating resource locks. Parameters ---------- scheduler : str or None Dask scheduler being used. See Also -------- dask.utils.get_scheduler_lock """ if scheduler is None or scheduler == "threaded": return _get_threaded_lock elif scheduler == "multiprocessing": return _get_multiprocessing_lock elif scheduler == "distributed": # Lazy import distributed since it is can add a significant # amount of time to import try: from dask.distributed import Lock as DistributedLock except ImportError: DistributedLock = None return DistributedLock else: raise KeyError(scheduler) def get_dask_scheduler(get=None, collection=None) -> str | None: """Determine the dask scheduler that is being used. None is returned if no dask scheduler is active. See Also -------- dask.base.get_scheduler """ try: # Fix for bug caused by dask installation that doesn't involve the toolz library # Issue: 4164 import dask from dask.base import get_scheduler actual_get = get_scheduler(get, collection) except ImportError: return None try: from dask.distributed import Client if isinstance(actual_get.__self__, Client): return "distributed" except (ImportError, AttributeError): pass try: # As of dask=2.6, dask.multiprocessing requires cloudpickle to be installed # Dependency removed in https://github.com/dask/dask/pull/5511 if actual_get is dask.multiprocessing.get: return "multiprocessing" except AttributeError: pass return "threaded" def get_write_lock(key): """Get a scheduler appropriate lock for writing to the given resource. Parameters ---------- key : str Name of the resource for which to acquire a lock. Typically a filename. Returns ------- Lock object that can be used like a threading.Lock object. """ scheduler = get_dask_scheduler() lock_maker = _get_lock_maker(scheduler) return lock_maker(key) def acquire(lock, blocking=True): """Acquire a lock, possibly in a non-blocking fashion. Includes backwards compatibility hacks for old versions of Python, dask and dask-distributed. """ if blocking: # no arguments needed return lock.acquire() else: # "blocking" keyword argument not supported for: # - threading.Lock on Python 2. # - dask.SerializableLock with dask v1.0.0 or earlier. # - multiprocessing.Lock calls the argument "block" instead. # - dask.distributed.Lock uses the blocking argument as the first one return lock.acquire(blocking) class CombinedLock: """A combination of multiple locks. Like a locked door, a CombinedLock is locked if any of its constituent locks are locked. """ def __init__(self, locks): self.locks = tuple(set(locks)) # remove duplicates def acquire(self, blocking=True): return all(acquire(lock, blocking=blocking) for lock in self.locks) def release(self): for lock in self.locks: lock.release() def __enter__(self): for lock in self.locks: lock.__enter__() def __exit__(self, *args): for lock in self.locks: lock.__exit__(*args) def locked(self): return any(lock.locked for lock in self.locks) def __repr__(self): return f"CombinedLock({list(self.locks)!r})" class DummyLock: """DummyLock provides the lock API without any actual locking.""" def acquire(self, blocking=True): pass def release(self): pass def __enter__(self): pass def __exit__(self, *args): pass def locked(self): return False def combine_locks(locks): """Combine a sequence of locks into a single lock.""" all_locks = [] for lock in locks: if isinstance(lock, CombinedLock): all_locks.extend(lock.locks) elif lock is not None: all_locks.append(lock) num_locks = len(all_locks) if num_locks > 1: return CombinedLock(all_locks) elif num_locks == 1: return all_locks[0] else: return DummyLock() def ensure_lock(lock): """Ensure that the given object is a lock.""" if lock is None or lock is False: return DummyLock() return lock ��������xarray-2025.09.0/xarray/backends/lru_cache.py�������������������������������������������������������0000664�0000000�0000000�00000007115�15056206164�0020727�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import threading from collections import OrderedDict from collections.abc import Callable, Iterator, MutableMapping from typing import Any, TypeVar K = TypeVar("K") V = TypeVar("V") class LRUCache(MutableMapping[K, V]): """Thread-safe LRUCache based on an OrderedDict. All dict operations (__getitem__, __setitem__, __contains__) update the priority of the relevant key and take O(1) time. The dict is iterated over in order from the oldest to newest key, which means that a complete pass over the dict should not affect the order of any entries. When a new item is set and the maximum size of the cache is exceeded, the oldest item is dropped and called with ``on_evict(key, value)``. The ``maxsize`` property can be used to view or adjust the capacity of the cache, e.g., ``cache.maxsize = new_size``. """ _cache: OrderedDict[K, V] _maxsize: int _lock: threading.RLock _on_evict: Callable[[K, V], Any] | None __slots__ = ("_cache", "_lock", "_maxsize", "_on_evict") def __init__(self, maxsize: int, on_evict: Callable[[K, V], Any] | None = None): """ Parameters ---------- maxsize : int Integer maximum number of items to hold in the cache. on_evict : callable, optional Function to call like ``on_evict(key, value)`` when items are evicted. """ if not isinstance(maxsize, int): raise TypeError("maxsize must be an integer") if maxsize < 0: raise ValueError("maxsize must be non-negative") self._maxsize = maxsize self._cache = OrderedDict() self._lock = threading.RLock() self._on_evict = on_evict def __getitem__(self, key: K) -> V: # record recent use of the key by moving it to the front of the list with self._lock: value = self._cache[key] self._cache.move_to_end(key) return value def _enforce_size_limit(self, capacity: int) -> None: """Shrink the cache if necessary, evicting the oldest items.""" while len(self._cache) > capacity: key, value = self._cache.popitem(last=False) if self._on_evict is not None: self._on_evict(key, value) def __setitem__(self, key: K, value: V) -> None: with self._lock: if key in self._cache: # insert the new value at the end del self._cache[key] self._cache[key] = value elif self._maxsize: # make room if necessary self._enforce_size_limit(self._maxsize - 1) self._cache[key] = value elif self._on_evict is not None: # not saving, immediately evict self._on_evict(key, value) def __delitem__(self, key: K) -> None: del self._cache[key] def __iter__(self) -> Iterator[K]: # create a list, so accessing the cache during iteration cannot change # the iteration order return iter(list(self._cache)) def __len__(self) -> int: return len(self._cache) @property def maxsize(self) -> int: """Maximum number of items can be held in the cache.""" return self._maxsize @maxsize.setter def maxsize(self, size: int) -> None: """Resize the cache, evicting the oldest items if necessary.""" if size < 0: raise ValueError("maxsize must be non-negative") with self._lock: self._enforce_size_limit(size) self._maxsize = size ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/backends/memory.py����������������������������������������������������������0000664�0000000�0000000�00000002765�15056206164�0020320�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import copy import numpy as np from xarray.backends.common import AbstractWritableDataStore from xarray.core import indexing from xarray.core.variable import Variable class InMemoryDataStore(AbstractWritableDataStore): """ Stores dimensions, variables and attributes in ordered dictionaries, making this store fast compared to stores which save to disk. This store exists purely for internal testing purposes. """ def __init__(self, variables=None, attributes=None): self._variables = {} if variables is None else variables self._attributes = {} if attributes is None else attributes def get_attrs(self): return self._attributes def get_variables(self): res = {} for k, v in self._variables.items(): v = v.copy(deep=True) res[k] = v v._data = indexing.LazilyIndexedArray(v._data) return res def get_dimensions(self): return {d: s for v in self._variables.values() for d, s in v.dims.items()} def prepare_variable(self, k, v, *args, **kwargs): new_var = Variable(v.dims, np.empty_like(v), v.attrs) self._variables[k] = new_var return new_var, v.data def set_attribute(self, k, v): # copy to imitate writing to disk. self._attributes[k] = copy.deepcopy(v) def set_dimension(self, dim, length, unlimited_dims=None): # in this model, dimensions are accounted for in the variables pass �����������xarray-2025.09.0/xarray/backends/netCDF4_.py��������������������������������������������������������0000664�0000000�0000000�00000064473�15056206164�0020342�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import functools import operator import os from collections.abc import Iterable from contextlib import suppress from typing import TYPE_CHECKING, Any, Self import numpy as np from xarray.backends.common import ( BACKEND_ENTRYPOINTS, BackendArray, BackendEntrypoint, T_PathFileOrDataStore, WritableCFDataStore, _normalize_path, collect_ancestor_dimensions, datatree_from_dict_with_io_cleanup, find_root_and_group, robust_getitem, ) from xarray.backends.file_manager import CachingFileManager, DummyFileManager from xarray.backends.locks import ( HDF5_LOCK, NETCDFC_LOCK, combine_locks, ensure_lock, get_write_lock, ) from xarray.backends.netcdf3 import encode_nc3_attr_value, encode_nc3_variable from xarray.backends.store import StoreBackendEntrypoint from xarray.coding.strings import ( CharacterArrayCoder, EncodedStringCoder, create_vlen_dtype, is_unicode_dtype, ) from xarray.coding.variables import pop_to from xarray.core import indexing from xarray.core.utils import ( FrozenDict, close_on_error, is_remote_uri, try_read_magic_number_from_path, ) from xarray.core.variable import Variable if TYPE_CHECKING: from h5netcdf.core import EnumType as h5EnumType from netCDF4 import EnumType as ncEnumType from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree # This lookup table maps from dtype.byteorder to a readable endian # string used by netCDF4. _endian_lookup = {"=": "native", ">": "big", "<": "little", "|": "native"} NETCDF4_PYTHON_LOCK = combine_locks([NETCDFC_LOCK, HDF5_LOCK]) class BaseNetCDF4Array(BackendArray): __slots__ = ("datastore", "dtype", "shape", "variable_name") def __init__(self, variable_name, datastore): self.datastore = datastore self.variable_name = variable_name array = self.get_array() self.shape = array.shape dtype = array.dtype if dtype is str: # use object dtype (with additional vlen string metadata) because that's # the only way in numpy to represent variable length strings and to # check vlen string dtype in further steps # it also prevents automatic string concatenation via # conventions.decode_cf_variable dtype = create_vlen_dtype(str) self.dtype = dtype def __setitem__(self, key, value): with self.datastore.lock: data = self.get_array(needs_lock=False) data[key] = value if self.datastore.autoclose: self.datastore.close(needs_lock=False) def get_array(self, needs_lock=True): raise NotImplementedError("Virtual Method") class NetCDF4ArrayWrapper(BaseNetCDF4Array): __slots__ = () def get_array(self, needs_lock=True): ds = self.datastore._acquire(needs_lock) variable = ds.variables[self.variable_name] variable.set_auto_maskandscale(False) # only added in netCDF4-python v1.2.8 with suppress(AttributeError): variable.set_auto_chartostring(False) return variable def __getitem__(self, key): return indexing.explicit_indexing_adapter( key, self.shape, indexing.IndexingSupport.OUTER, self._getitem ) def _getitem(self, key): if self.datastore.is_remote: # pragma: no cover getitem = functools.partial(robust_getitem, catch=RuntimeError) else: getitem = operator.getitem try: with self.datastore.lock: original_array = self.get_array(needs_lock=False) array = getitem(original_array, key) except IndexError as err: # Catch IndexError in netCDF4 and return a more informative # error message. This is most often called when an unsorted # indexer is used before the data is loaded from disk. msg = ( "The indexing operation you are attempting to perform " "is not valid on netCDF4.Variable object. Try loading " "your data into memory first by calling .load()." ) raise IndexError(msg) from err return array def _encode_nc4_variable(var, name=None): for coder in [ EncodedStringCoder(allows_unicode=True), CharacterArrayCoder(), ]: var = coder.encode(var, name=name) return var def _check_encoding_dtype_is_vlen_string(dtype): if dtype is not str: raise AssertionError( # pragma: no cover f"unexpected dtype encoding {dtype!r}. This shouldn't happen: please " "file a bug report at github.com/pydata/xarray" ) def _get_datatype( var, nc_format="NETCDF4", raise_on_invalid_encoding=False ) -> np.dtype: if nc_format == "NETCDF4": return _nc4_dtype(var) if "dtype" in var.encoding: encoded_dtype = var.encoding["dtype"] _check_encoding_dtype_is_vlen_string(encoded_dtype) if raise_on_invalid_encoding: raise ValueError( "encoding dtype=str for vlen strings is only supported " "with format='NETCDF4'." ) return var.dtype def _nc4_dtype(var): if "dtype" in var.encoding: dtype = var.encoding.pop("dtype") _check_encoding_dtype_is_vlen_string(dtype) elif is_unicode_dtype(var.dtype): dtype = str elif var.dtype.kind in ["i", "u", "f", "c", "S"]: dtype = var.dtype else: raise ValueError(f"unsupported dtype for netCDF4 variable: {var.dtype}") return dtype def _netcdf4_create_group(dataset, name): return dataset.createGroup(name) def _nc4_require_group(ds, group, mode, create_group=_netcdf4_create_group): if group in {None, "", "/"}: # use the root group return ds else: # make sure it's a string if not isinstance(group, str): raise ValueError("group must be a string or None") # support path-like syntax path = group.strip("/").split("/") for key in path: try: ds = ds.groups[key] except KeyError as e: if mode != "r": ds = create_group(ds, key) else: # wrap error to provide slightly more helpful message raise OSError(f"group not found: {key}", e) from e return ds def _ensure_no_forward_slash_in_name(name): if "/" in name: raise ValueError( f"Forward slashes '/' are not allowed in variable and dimension names (got {name!r}). " "Forward slashes are used as hierarchy-separators for " "HDF5-based files ('netcdf4'/'h5netcdf')." ) def _ensure_fill_value_valid(data, attributes): # work around for netCDF4/scipy issue where _FillValue has the wrong type: # https://github.com/Unidata/netcdf4-python/issues/271 if data.dtype.kind == "S" and "_FillValue" in attributes: attributes["_FillValue"] = np.bytes_(attributes["_FillValue"]) def _force_native_endianness(var): # possible values for byteorder are: # = native # < little-endian # > big-endian # | not applicable # Below we check if the data type is not native or NA if var.dtype.byteorder not in ["=", "|"]: # if endianness is specified explicitly, convert to the native type data = var.data.astype(var.dtype.newbyteorder("=")) var = Variable(var.dims, data, var.attrs, var.encoding) # if endian exists, remove it from the encoding. var.encoding.pop("endian", None) # check to see if encoding has a value for endian its 'native' if var.encoding.get("endian", "native") != "native": raise NotImplementedError( "Attempt to write non-native endian type, " "this is not supported by the netCDF4 " "python library." ) return var def _extract_nc4_variable_encoding( variable: Variable, raise_on_invalid=False, lsd_okay=True, h5py_okay=False, backend="netCDF4", unlimited_dims=None, ) -> dict[str, Any]: if unlimited_dims is None: unlimited_dims = () encoding = variable.encoding.copy() safe_to_drop = {"source", "original_shape"} valid_encodings = { "zlib", "complevel", "fletcher32", "contiguous", "chunksizes", "shuffle", "_FillValue", "dtype", "compression", "significant_digits", "quantize_mode", "blosc_shuffle", "szip_coding", "szip_pixels_per_block", "endian", } if lsd_okay: valid_encodings.add("least_significant_digit") if h5py_okay: valid_encodings.add("compression_opts") if not raise_on_invalid and encoding.get("chunksizes") is not None: # It's possible to get encoded chunksizes larger than a dimension size # if the original file had an unlimited dimension. This is problematic # if the new file no longer has an unlimited dimension. chunksizes = encoding["chunksizes"] chunks_too_big = any( c > d and dim not in unlimited_dims for c, d, dim in zip( chunksizes, variable.shape, variable.dims, strict=False ) ) has_original_shape = "original_shape" in encoding changed_shape = ( has_original_shape and encoding.get("original_shape") != variable.shape ) if chunks_too_big or changed_shape: del encoding["chunksizes"] var_has_unlim_dim = any(dim in unlimited_dims for dim in variable.dims) if not raise_on_invalid and var_has_unlim_dim and "contiguous" in encoding.keys(): del encoding["contiguous"] for k in safe_to_drop: if k in encoding: del encoding[k] if raise_on_invalid: invalid = [k for k in encoding if k not in valid_encodings] if invalid: raise ValueError( f"unexpected encoding parameters for {backend!r} backend: {invalid!r}. Valid " f"encodings are: {valid_encodings!r}" ) else: for k in list(encoding): if k not in valid_encodings: del encoding[k] return encoding def _is_list_of_strings(value) -> bool: arr = np.asarray(value) return arr.dtype.kind in ["U", "S"] and arr.size > 1 def _build_and_get_enum( store, var_name: str, dtype: np.dtype, enum_name: str, enum_dict: dict[str, int] ) -> ncEnumType | h5EnumType: """ Add or get the netCDF4 Enum based on the dtype in encoding. The return type should be ``netCDF4.EnumType``, but we avoid importing netCDF4 globally for performances. """ if enum_name not in store.ds.enumtypes: create_func = ( store.ds.createEnumType if isinstance(store, NetCDF4DataStore) else store.ds.create_enumtype ) return create_func( dtype, enum_name, enum_dict, ) datatype = store.ds.enumtypes[enum_name] if datatype.enum_dict != enum_dict: error_msg = ( f"Cannot save variable `{var_name}` because an enum" f" `{enum_name}` already exists in the Dataset but has" " a different definition. To fix this error, make sure" " all variables have a uniquely named enum in their" " `encoding['dtype'].metadata` or, if they should share" " the same enum type, make sure the enums are identical." ) raise ValueError(error_msg) return datatype class NetCDF4DataStore(WritableCFDataStore): """Store for reading and writing data via the Python-NetCDF4 library. This store supports NetCDF3, NetCDF4 and OpenDAP datasets. """ __slots__ = ( "_filename", "_group", "_manager", "_mode", "autoclose", "format", "is_remote", "lock", ) def __init__( self, manager, group=None, mode=None, lock=NETCDF4_PYTHON_LOCK, autoclose=False ): import netCDF4 if isinstance(manager, netCDF4.Dataset): if group is None: root, group = find_root_and_group(manager) else: if type(manager) is not netCDF4.Dataset: raise ValueError( "must supply a root netCDF4.Dataset if the group " "argument is provided" ) root = manager manager = DummyFileManager(root) self._manager = manager self._group = group self._mode = mode self.format = self.ds.data_model self._filename = self.ds.filepath() self.is_remote = is_remote_uri(self._filename) self.lock = ensure_lock(lock) self.autoclose = autoclose def get_child_store(self, group: str) -> Self: if self._group is not None: group = os.path.join(self._group, group) return type(self)( self._manager, group=group, mode=self._mode, lock=self.lock, autoclose=self.autoclose, ) @classmethod def open( cls, filename, mode="r", format="NETCDF4", group=None, clobber=True, diskless=False, persist=False, auto_complex=None, lock=None, lock_maker=None, autoclose=False, ): import netCDF4 if isinstance(filename, os.PathLike): filename = os.fspath(filename) if not isinstance(filename, str): raise ValueError( "can only read bytes or file-like objects " "with engine='scipy' or 'h5netcdf'" ) if format is None: format = "NETCDF4" if lock is None: if mode == "r": if is_remote_uri(filename): lock = NETCDFC_LOCK else: lock = NETCDF4_PYTHON_LOCK else: if format is None or format.startswith("NETCDF4"): base_lock = NETCDF4_PYTHON_LOCK else: base_lock = NETCDFC_LOCK lock = combine_locks([base_lock, get_write_lock(filename)]) kwargs = dict( clobber=clobber, diskless=diskless, persist=persist, format=format, ) if auto_complex is not None: kwargs["auto_complex"] = auto_complex manager = CachingFileManager( netCDF4.Dataset, filename, mode=mode, kwargs=kwargs ) return cls(manager, group=group, mode=mode, lock=lock, autoclose=autoclose) def _acquire(self, needs_lock=True): with self._manager.acquire_context(needs_lock) as root: ds = _nc4_require_group(root, self._group, self._mode) return ds @property def ds(self): return self._acquire() def open_store_variable(self, name: str, var): import netCDF4 dimensions = var.dimensions attributes = {k: var.getncattr(k) for k in var.ncattrs()} data = indexing.LazilyIndexedArray(NetCDF4ArrayWrapper(name, self)) encoding: dict[str, Any] = {} if isinstance(var.datatype, netCDF4.EnumType): encoding["dtype"] = np.dtype( data.dtype, metadata={ "enum": var.datatype.enum_dict, "enum_name": var.datatype.name, }, ) else: encoding["dtype"] = var.dtype _ensure_fill_value_valid(data, attributes) # netCDF4 specific encoding; save _FillValue for later filters = var.filters() if filters is not None: encoding.update(filters) chunking = var.chunking() if chunking is not None: if chunking == "contiguous": encoding["contiguous"] = True encoding["chunksizes"] = None else: encoding["contiguous"] = False encoding["chunksizes"] = tuple(chunking) encoding["preferred_chunks"] = dict( zip(var.dimensions, chunking, strict=True) ) # TODO: figure out how to round-trip "endian-ness" without raising # warnings from netCDF4 # encoding['endian'] = var.endian() pop_to(attributes, encoding, "least_significant_digit") # save source so __repr__ can detect if it's local or not encoding["source"] = self._filename encoding["original_shape"] = data.shape return Variable(dimensions, data, attributes, encoding) def get_variables(self): return FrozenDict( (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items() ) def get_attrs(self): return FrozenDict((k, self.ds.getncattr(k)) for k in self.ds.ncattrs()) def get_dimensions(self): return FrozenDict((k, len(v)) for k, v in self.ds.dimensions.items()) def get_parent_dimensions(self): return FrozenDict(collect_ancestor_dimensions(self.ds)) def get_encoding(self): return { "unlimited_dims": { k for k, v in self.ds.dimensions.items() if v.isunlimited() } } def set_dimension(self, name, length, is_unlimited=False): _ensure_no_forward_slash_in_name(name) dim_length = length if not is_unlimited else None self.ds.createDimension(name, size=dim_length) def set_attribute(self, key, value): if self.format != "NETCDF4": value = encode_nc3_attr_value(value) if _is_list_of_strings(value): # encode as NC_STRING if attr is list of strings self.ds.setncattr_string(key, value) else: self.ds.setncattr(key, value) def encode_variable(self, variable, name=None): variable = _force_native_endianness(variable) if self.format == "NETCDF4": variable = _encode_nc4_variable(variable, name=name) else: variable = encode_nc3_variable(variable, name=name) return variable def prepare_variable( self, name, variable: Variable, check_encoding=False, unlimited_dims=None ): _ensure_no_forward_slash_in_name(name) attrs = variable.attrs.copy() fill_value = attrs.pop("_FillValue", None) datatype: np.dtype | ncEnumType | h5EnumType datatype = _get_datatype( variable, self.format, raise_on_invalid_encoding=check_encoding ) # check enum metadata and use netCDF4.EnumType if ( (meta := np.dtype(datatype).metadata) and (e_name := meta.get("enum_name")) and (e_dict := meta.get("enum")) ): datatype = _build_and_get_enum(self, name, datatype, e_name, e_dict) encoding = _extract_nc4_variable_encoding( variable, raise_on_invalid=check_encoding, unlimited_dims=unlimited_dims ) if name in self.ds.variables: nc4_var = self.ds.variables[name] else: default_args = dict( varname=name, datatype=datatype, dimensions=variable.dims, zlib=False, complevel=4, shuffle=True, fletcher32=False, contiguous=False, chunksizes=None, endian="native", least_significant_digit=None, fill_value=fill_value, ) default_args.update(encoding) default_args.pop("_FillValue", None) nc4_var = self.ds.createVariable(**default_args) nc4_var.setncatts(attrs) target = NetCDF4ArrayWrapper(name, self) return target, variable.data def sync(self): self.ds.sync() def close(self, **kwargs): self._manager.close(**kwargs) class NetCDF4BackendEntrypoint(BackendEntrypoint): """ Backend for netCDF files based on the netCDF4 package. It can open ".nc", ".nc4", ".cdf" files and will be chosen as default for these files. Additionally it can open valid HDF5 files, see https://h5netcdf.org/#invalid-netcdf-files for more info. It will not be detected as valid backend for such files, so make sure to specify ``engine="netcdf4"`` in ``open_dataset``. For more information about the underlying library, visit: https://unidata.github.io/netcdf4-python See Also -------- backends.NetCDF4DataStore backends.H5netcdfBackendEntrypoint backends.ScipyBackendEntrypoint """ description = ( "Open netCDF (.nc, .nc4 and .cdf) and most HDF5 files using netCDF4 in Xarray" ) url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.NetCDF4BackendEntrypoint.html" def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool: if isinstance(filename_or_obj, str) and is_remote_uri(filename_or_obj): return True magic_number = try_read_magic_number_from_path(filename_or_obj) if magic_number is not None: # netcdf 3 or HDF5 return magic_number.startswith((b"CDF", b"\211HDF\r\n\032\n")) if isinstance(filename_or_obj, str | os.PathLike): _, ext = os.path.splitext(filename_or_obj) return ext in {".nc", ".nc4", ".cdf"} return False def open_dataset( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group=None, mode="r", format="NETCDF4", clobber=True, diskless=False, persist=False, auto_complex=None, lock=None, autoclose=False, ) -> Dataset: filename_or_obj = _normalize_path(filename_or_obj) store = NetCDF4DataStore.open( filename_or_obj, mode=mode, format=format, group=group, clobber=clobber, diskless=diskless, persist=persist, auto_complex=auto_complex, lock=lock, autoclose=autoclose, ) store_entrypoint = StoreBackendEntrypoint() with close_on_error(store): ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) return ds def open_datatree( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group: str | None = None, format="NETCDF4", clobber=True, diskless=False, persist=False, auto_complex=None, lock=None, autoclose=False, **kwargs, ) -> DataTree: groups_dict = self.open_groups_as_dict( filename_or_obj, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, group=group, format=format, clobber=clobber, diskless=diskless, persist=persist, auto_complex=auto_complex, lock=lock, autoclose=autoclose, **kwargs, ) return datatree_from_dict_with_io_cleanup(groups_dict) def open_groups_as_dict( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group: str | None = None, format="NETCDF4", clobber=True, diskless=False, persist=False, auto_complex=None, lock=None, autoclose=False, **kwargs, ) -> dict[str, Dataset]: from xarray.backends.common import _iter_nc_groups from xarray.core.treenode import NodePath filename_or_obj = _normalize_path(filename_or_obj) store = NetCDF4DataStore.open( filename_or_obj, group=group, format=format, clobber=clobber, diskless=diskless, persist=persist, auto_complex=auto_complex, lock=lock, autoclose=autoclose, ) # Check for a group and make it a parent if it exists if group: parent = NodePath("/") / NodePath(group) else: parent = NodePath("/") manager = store._manager groups_dict = {} for path_group in _iter_nc_groups(store.ds, parent=parent): group_store = NetCDF4DataStore(manager, group=path_group, **kwargs) store_entrypoint = StoreBackendEntrypoint() with close_on_error(group_store): group_ds = store_entrypoint.open_dataset( group_store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) if group: group_name = str(NodePath(path_group).relative_to(parent)) else: group_name = str(NodePath(path_group)) groups_dict[group_name] = group_ds return groups_dict BACKEND_ENTRYPOINTS["netcdf4"] = ("netCDF4", NetCDF4BackendEntrypoint) �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/backends/netcdf3.py���������������������������������������������������������0000664�0000000�0000000�00000013140�15056206164�0020323�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import unicodedata import numpy as np from xarray import coding from xarray.core.variable import Variable # Special characters that are permitted in netCDF names except in the # 0th position of the string _specialchars = '_.@+- !"#$%&\\()*,:;<=>?[]^`{|}~' # The following are reserved names in CDL and may not be used as names of # variables, dimension, attributes _reserved_names = { "byte", "char", "short", "ushort", "int", "uint", "int64", "uint64", "float", "real", "double", "bool", "string", } # These data-types aren't supported by netCDF3, so they are automatically # coerced instead as indicated by the "coerce_nc3_dtype" function _nc3_dtype_coercions = { "int64": "int32", "uint64": "int32", "uint32": "int32", "uint16": "int16", "uint8": "int8", "bool": "int8", } # encode all strings as UTF-8 STRING_ENCODING = "utf-8" COERCION_VALUE_ERROR = ( "could not safely cast array from {dtype} to {new_dtype}. While it is not " "always the case, a common reason for this is that xarray has deemed it " "safest to encode np.datetime64[ns] or np.timedelta64[ns] values with " "int64 values representing units of 'nanoseconds'. This is either due to " "the fact that the times are known to require nanosecond precision for an " "accurate round trip, or that the times are unknown prior to writing due " "to being contained in a chunked array. Ways to work around this are " "either to use a backend that supports writing int64 values, or to " "manually specify the encoding['units'] and encoding['dtype'] (e.g. " "'seconds since 1970-01-01' and np.dtype('int32')) on the time " "variable(s) such that the times can be serialized in a netCDF3 file " "(note that depending on the situation, however, this latter option may " "result in an inaccurate round trip)." ) def coerce_nc3_dtype(arr): """Coerce an array to a data type that can be stored in a netCDF-3 file This function performs the dtype conversions as specified by the ``_nc3_dtype_coercions`` mapping: int64 -> int32 uint64 -> int32 uint32 -> int32 uint16 -> int16 uint8 -> int8 bool -> int8 Data is checked for equality, or equivalence (non-NaN values) using the ``(cast_array == original_array).all()``. """ dtype = str(arr.dtype) if dtype in _nc3_dtype_coercions: new_dtype = _nc3_dtype_coercions[dtype] # TODO: raise a warning whenever casting the data-type instead? cast_arr = arr.astype(new_dtype) if not (cast_arr == arr).all(): raise ValueError( COERCION_VALUE_ERROR.format(dtype=dtype, new_dtype=new_dtype) ) arr = cast_arr return arr def encode_nc3_attr_value(value): if isinstance(value, bytes): pass elif isinstance(value, str): value = value.encode(STRING_ENCODING) else: value = coerce_nc3_dtype(np.atleast_1d(value)) if value.ndim > 1: raise ValueError("netCDF attributes must be 1-dimensional") return value def encode_nc3_attrs(attrs): return {k: encode_nc3_attr_value(v) for k, v in attrs.items()} def _maybe_prepare_times(var): # checks for integer-based time-like and # replaces np.iinfo(np.int64).min with _FillValue or np.nan # this keeps backwards compatibility data = var.data if data.dtype.kind in "iu": units = var.attrs.get("units", None) if units is not None and coding.variables._is_time_like(units): mask = data == np.iinfo(np.int64).min if mask.any(): data = np.where(mask, var.attrs.get("_FillValue", np.nan), data) return data def encode_nc3_variable(var, name=None): for coder in [ coding.strings.EncodedStringCoder(allows_unicode=False), coding.strings.CharacterArrayCoder(), ]: var = coder.encode(var, name=name) data = _maybe_prepare_times(var) data = coerce_nc3_dtype(data) attrs = encode_nc3_attrs(var.attrs) return Variable(var.dims, data, attrs, var.encoding) def _isalnumMUTF8(c): """Return True if the given UTF-8 encoded character is alphanumeric or multibyte. Input is not checked! """ return c.isalnum() or (len(c.encode("utf-8")) > 1) def is_valid_nc3_name(s): """Test whether an object can be validly converted to a netCDF-3 dimension, variable or attribute name Earlier versions of the netCDF C-library reference implementation enforced a more restricted set of characters in creating new names, but permitted reading names containing arbitrary bytes. This specification extends the permitted characters in names to include multi-byte UTF-8 encoded Unicode and additional printing characters from the US-ASCII alphabet. The first character of a name must be alphanumeric, a multi-byte UTF-8 character, or '_' (reserved for special names with meaning to implementations, such as the "_FillValue" attribute). Subsequent characters may also include printing special characters, except for '/' which is not allowed in names. Names that have trailing space characters are also not permitted. """ if not isinstance(s, str): return False num_bytes = len(s.encode("utf-8")) return ( (unicodedata.normalize("NFC", s) == s) and (s not in _reserved_names) and (num_bytes >= 0) and ("/" not in s) and (s[-1] != " ") and (_isalnumMUTF8(s[0]) or (s[0] == "_")) and all(_isalnumMUTF8(c) or c in _specialchars for c in s) ) ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/backends/plugins.py���������������������������������������������������������0000664�0000000�0000000�00000020154�15056206164�0020461�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import functools import inspect import itertools import warnings from collections.abc import Callable from importlib.metadata import entry_points from typing import TYPE_CHECKING, Any from xarray.backends.common import BACKEND_ENTRYPOINTS, BackendEntrypoint from xarray.core.utils import module_available if TYPE_CHECKING: import os from importlib.metadata import EntryPoint, EntryPoints from xarray.backends.common import AbstractDataStore from xarray.core.types import ReadBuffer NETCDF_BACKENDS_ORDER = ["netcdf4", "h5netcdf", "scipy"] def remove_duplicates(entrypoints: EntryPoints) -> list[EntryPoint]: # sort and group entrypoints by name entrypoints_sorted = sorted(entrypoints, key=lambda ep: ep.name) entrypoints_grouped = itertools.groupby(entrypoints_sorted, key=lambda ep: ep.name) # check if there are multiple entrypoints for the same name unique_entrypoints = [] for name, _matches in entrypoints_grouped: # remove equal entrypoints matches = list(set(_matches)) unique_entrypoints.append(matches[0]) matches_len = len(matches) if matches_len > 1: all_module_names = [e.value.split(":")[0] for e in matches] selected_module_name = all_module_names[0] warnings.warn( f"Found {matches_len} entrypoints for the engine name {name}:" f"\n {all_module_names}.\n " f"The entrypoint {selected_module_name} will be used.", RuntimeWarning, stacklevel=2, ) return unique_entrypoints def detect_parameters(open_dataset: Callable) -> tuple[str, ...]: signature = inspect.signature(open_dataset) parameters = signature.parameters parameters_list = [] for name, param in parameters.items(): if param.kind in ( inspect.Parameter.VAR_KEYWORD, inspect.Parameter.VAR_POSITIONAL, ): raise TypeError( f"All the parameters in {open_dataset!r} signature should be explicit. " "*args and **kwargs is not supported" ) if name != "self": parameters_list.append(name) return tuple(parameters_list) def backends_dict_from_pkg( entrypoints: list[EntryPoint], ) -> dict[str, type[BackendEntrypoint]]: backend_entrypoints = {} for entrypoint in entrypoints: name = entrypoint.name try: backend = entrypoint.load() backend_entrypoints[name] = backend except Exception as ex: warnings.warn( f"Engine {name!r} loading failed:\n{ex}", RuntimeWarning, stacklevel=2 ) return backend_entrypoints def set_missing_parameters( backend_entrypoints: dict[str, type[BackendEntrypoint]], ) -> None: for backend in backend_entrypoints.values(): if backend.open_dataset_parameters is None: open_dataset = backend.open_dataset backend.open_dataset_parameters = detect_parameters(open_dataset) def sort_backends( backend_entrypoints: dict[str, type[BackendEntrypoint]], ) -> dict[str, type[BackendEntrypoint]]: ordered_backends_entrypoints = {} for be_name in NETCDF_BACKENDS_ORDER: if backend_entrypoints.get(be_name) is not None: ordered_backends_entrypoints[be_name] = backend_entrypoints.pop(be_name) ordered_backends_entrypoints.update( {name: backend_entrypoints[name] for name in sorted(backend_entrypoints)} ) return ordered_backends_entrypoints def build_engines(entrypoints: EntryPoints) -> dict[str, BackendEntrypoint]: backend_entrypoints: dict[str, type[BackendEntrypoint]] = {} for backend_name, (module_name, backend) in BACKEND_ENTRYPOINTS.items(): if module_name is None or module_available(module_name): backend_entrypoints[backend_name] = backend entrypoints_unique = remove_duplicates(entrypoints) external_backend_entrypoints = backends_dict_from_pkg(entrypoints_unique) backend_entrypoints.update(external_backend_entrypoints) backend_entrypoints = sort_backends(backend_entrypoints) set_missing_parameters(backend_entrypoints) return {name: backend() for name, backend in backend_entrypoints.items()} @functools.lru_cache(maxsize=1) def list_engines() -> dict[str, BackendEntrypoint]: """ Return a dictionary of available engines and their BackendEntrypoint objects. Returns ------- dictionary Notes ----- This function lives in the backends namespace (``engs=xr.backends.list_engines()``). If available, more information is available about each backend via ``engs["eng_name"]``. """ entrypoints = entry_points(group="xarray.backends") return build_engines(entrypoints) def refresh_engines() -> None: """Refreshes the backend engines based on installed packages.""" list_engines.cache_clear() def guess_engine( store_spec: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, ) -> str | type[BackendEntrypoint]: engines = list_engines() for engine, backend in engines.items(): try: if backend.guess_can_open(store_spec): return engine except PermissionError: raise except Exception: warnings.warn( f"{engine!r} fails while guessing", RuntimeWarning, stacklevel=2 ) compatible_engines = [] for engine, (_, backend_cls) in BACKEND_ENTRYPOINTS.items(): try: backend = backend_cls() if backend.guess_can_open(store_spec): compatible_engines.append(engine) except Exception: warnings.warn( f"{engine!r} fails while guessing", RuntimeWarning, stacklevel=2 ) installed_engines = [k for k in engines if k != "store"] if not compatible_engines: if installed_engines: error_msg = ( "did not find a match in any of xarray's currently installed IO " f"backends {installed_engines}. Consider explicitly selecting one of the " "installed engines via the ``engine`` parameter, or installing " "additional IO dependencies, see:\n" "https://docs.xarray.dev/en/stable/getting-started-guide/installing.html\n" "https://docs.xarray.dev/en/stable/user-guide/io.html" ) else: error_msg = ( "xarray is unable to open this file because it has no currently " "installed IO backends. Xarray's read/write support requires " "installing optional IO dependencies, see:\n" "https://docs.xarray.dev/en/stable/getting-started-guide/installing.html\n" "https://docs.xarray.dev/en/stable/user-guide/io" ) else: error_msg = ( "found the following matches with the input file in xarray's IO " f"backends: {compatible_engines}. But their dependencies may not be installed, see:\n" "https://docs.xarray.dev/en/stable/user-guide/io.html \n" "https://docs.xarray.dev/en/stable/getting-started-guide/installing.html" ) raise ValueError(error_msg) def get_backend(engine: str | type[BackendEntrypoint]) -> BackendEntrypoint: """Select open_dataset method based on current engine.""" if isinstance(engine, str): engines = list_engines() if engine not in engines: raise ValueError( f"unrecognized engine '{engine}' must be one of your download engines: {list(engines)}. " "To install additional dependencies, see:\n" "https://docs.xarray.dev/en/stable/user-guide/io.html \n" "https://docs.xarray.dev/en/stable/getting-started-guide/installing.html" ) backend = engines[engine] elif issubclass(engine, BackendEntrypoint): backend = engine() else: raise TypeError( "engine must be a string or a subclass of " f"xarray.backends.BackendEntrypoint: {engine}" ) return backend ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/backends/pydap_.py����������������������������������������������������������0000664�0000000�0000000�00000030533�15056206164�0020256�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations from collections.abc import Iterable from typing import TYPE_CHECKING, Any import numpy as np from xarray.backends.common import ( BACKEND_ENTRYPOINTS, AbstractDataStore, BackendArray, BackendEntrypoint, T_PathFileOrDataStore, _normalize_path, datatree_from_dict_with_io_cleanup, robust_getitem, ) from xarray.backends.store import StoreBackendEntrypoint from xarray.core import indexing from xarray.core.utils import ( Frozen, FrozenDict, close_on_error, is_remote_uri, ) from xarray.core.variable import Variable from xarray.namedarray.pycompat import integer_types if TYPE_CHECKING: import os from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.types import ReadBuffer class PydapArrayWrapper(BackendArray): def __init__(self, array): self.array = array @property def shape(self) -> tuple[int, ...]: return self.array.shape @property def dtype(self): return self.array.dtype def __getitem__(self, key): return indexing.explicit_indexing_adapter( key, self.shape, indexing.IndexingSupport.BASIC, self._getitem ) def _getitem(self, key): result = robust_getitem(self.array, key, catch=ValueError) # in some cases, pydap doesn't squeeze axes automatically like numpy result = np.asarray(result) axis = tuple(n for n, k in enumerate(key) if isinstance(k, integer_types)) if result.ndim + len(axis) != self.array.ndim and axis: result = np.squeeze(result, axis) return result def get_group(ds, group): if group in {None, "", "/"}: # use the root group return ds else: try: return ds[group] except KeyError as e: # wrap error to provide slightly more helpful message raise KeyError(f"group not found: {group}", e) from e class PydapDataStore(AbstractDataStore): """Store for accessing OpenDAP datasets with pydap. This store provides an alternative way to access OpenDAP datasets that may be useful if the netCDF4 library is not available. """ def __init__(self, dataset, group=None): """ Parameters ---------- ds : pydap DatasetType group: str or None (default None) The group to open. If None, the root group is opened. """ self.dataset = dataset self.group = group @classmethod def open( cls, url, group=None, application=None, session=None, output_grid=None, timeout=None, verify=None, user_charset=None, ): from pydap.client import open_url from pydap.net import DEFAULT_TIMEOUT if output_grid is not None: # output_grid is no longer passed to pydap.client.open_url from xarray.core.utils import emit_user_level_warning emit_user_level_warning( "`output_grid` is deprecated and will be removed in a future version" " of xarray. Will be set to `None`, the new default. ", DeprecationWarning, ) output_grid = False # new default behavior kwargs = { "url": url, "application": application, "session": session, "output_grid": output_grid or False, "timeout": timeout or DEFAULT_TIMEOUT, "verify": verify or True, "user_charset": user_charset, } if isinstance(url, str): # check uit begins with an acceptable scheme dataset = open_url(**kwargs) elif hasattr(url, "ds"): # pydap dataset dataset = url.ds args = {"dataset": dataset} if group: # only then, change the default args["group"] = group return cls(**args) def open_store_variable(self, var): data = indexing.LazilyIndexedArray(PydapArrayWrapper(var)) try: dimensions = [ dim.split("/")[-1] if dim.startswith("/") else dim for dim in var.dims ] except AttributeError: # GridType does not have a dims attribute - instead get `dimensions` # see https://github.com/pydap/pydap/issues/485 dimensions = var.dimensions return Variable(dimensions, data, var.attributes) def get_variables(self): # get first all variables arrays, excluding any container type like, # `Groups`, `Sequence` or `Structure` types try: _vars = list(self.ds.variables()) _vars += list(self.ds.grids()) # dap2 objects except AttributeError: from pydap.model import GroupType _vars = [ var for var in self.ds.keys() # check the key is not a BaseType or GridType if not isinstance(self.ds[var], GroupType) ] return FrozenDict((k, self.open_store_variable(self.ds[k])) for k in _vars) def get_attrs(self): """Remove any opendap specific attributes""" opendap_attrs = ( "configuration", "build_dmrpp", "bes", "libdap", "invocation", "dimensions", ) attrs = self.ds.attributes list(map(attrs.pop, opendap_attrs, [None] * 6)) return Frozen(attrs) def get_dimensions(self): return Frozen(self.ds.dimensions) @property def ds(self): return get_group(self.dataset, self.group) class PydapBackendEntrypoint(BackendEntrypoint): """ Backend for steaming datasets over the internet using the Data Access Protocol, also known as DODS or OPeNDAP based on the pydap package. This backend is selected by default for urls. For more information about the underlying library, visit: https://pydap.github.io/pydap/en/intro.html See Also -------- backends.PydapDataStore """ description = "Open remote datasets via OPeNDAP using pydap in Xarray" url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.PydapBackendEntrypoint.html" def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool: return isinstance(filename_or_obj, str) and is_remote_uri(filename_or_obj) def open_dataset( self, filename_or_obj: ( str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore ), *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group=None, application=None, session=None, output_grid=None, timeout=None, verify=None, user_charset=None, ) -> Dataset: store = PydapDataStore.open( url=filename_or_obj, group=group, application=application, session=session, output_grid=output_grid, timeout=timeout, verify=verify, user_charset=user_charset, ) store_entrypoint = StoreBackendEntrypoint() with close_on_error(store): ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) return ds def open_datatree( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group: str | None = None, application=None, session=None, timeout=None, verify=None, user_charset=None, ) -> DataTree: groups_dict = self.open_groups_as_dict( filename_or_obj, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, group=group, application=None, session=None, timeout=None, verify=None, user_charset=None, ) return datatree_from_dict_with_io_cleanup(groups_dict) def open_groups_as_dict( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group: str | None = None, application=None, session=None, timeout=None, verify=None, user_charset=None, ) -> dict[str, Dataset]: from xarray.core.treenode import NodePath filename_or_obj = _normalize_path(filename_or_obj) store = PydapDataStore.open( url=filename_or_obj, application=application, session=session, timeout=timeout, verify=verify, user_charset=user_charset, ) # Check for a group and make it a parent if it exists if group: parent = str(NodePath("/") / NodePath(group)) else: parent = str(NodePath("/")) groups_dict = {} group_names = [parent] # construct fully qualified path to group try: # this works for pydap >= 3.5.1 Groups = store.ds[parent].groups() except AttributeError: # THIS IS ONLY NEEDED FOR `pydap == 3.5.0` # `pydap>= 3.5.1` has a new method `groups()` # that returns a dict of group names and their paths def group_fqn(store, path=None, g_fqn=None) -> dict[str, str]: """To be removed for pydap > 3.5.0. Derives the fully qualifying name of a Group.""" from pydap.model import GroupType if not path: path = "/" # parent if not g_fqn: g_fqn = {} groups = [ store[key].id for key in store.keys() if isinstance(store[key], GroupType) ] for g in groups: g_fqn.update({g: path}) subgroups = [ var for var in store[g] if isinstance(store[g][var], GroupType) ] if len(subgroups) > 0: npath = path + g g_fqn = group_fqn(store[g], npath, g_fqn) return g_fqn Groups = group_fqn(store.ds) group_names += [ str(NodePath(path_to_group) / NodePath(group)) for group, path_to_group in Groups.items() ] for path_group in group_names: # get a group from the store store.group = path_group store_entrypoint = StoreBackendEntrypoint() with close_on_error(store): group_ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) if group: group_name = str(NodePath(path_group).relative_to(parent)) else: group_name = str(NodePath(path_group)) groups_dict[group_name] = group_ds return groups_dict BACKEND_ENTRYPOINTS["pydap"] = ("pydap", PydapBackendEntrypoint) ���������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/backends/scipy_.py����������������������������������������������������������0000664�0000000�0000000�00000031352�15056206164�0020270�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import gzip import io import os from collections.abc import Iterable from typing import TYPE_CHECKING, Any import numpy as np from xarray.backends.common import ( BACKEND_ENTRYPOINTS, BackendArray, BackendEntrypoint, BytesIOProxy, T_PathFileOrDataStore, WritableCFDataStore, _normalize_path, ) from xarray.backends.file_manager import CachingFileManager, DummyFileManager from xarray.backends.locks import ensure_lock, get_write_lock from xarray.backends.netcdf3 import ( encode_nc3_attr_value, encode_nc3_variable, is_valid_nc3_name, ) from xarray.backends.store import StoreBackendEntrypoint from xarray.core import indexing from xarray.core.utils import ( Frozen, FrozenDict, close_on_error, module_available, try_read_magic_number_from_file_or_path, ) from xarray.core.variable import Variable if TYPE_CHECKING: import scipy.io from xarray.backends.common import AbstractDataStore from xarray.core.dataset import Dataset from xarray.core.types import ReadBuffer HAS_NUMPY_2_0 = module_available("numpy", minversion="2.0.0.dev0") def _decode_string(s): if isinstance(s, bytes): return s.decode("utf-8", "replace") return s def _decode_attrs(d): # don't decode _FillValue from bytes -> unicode, because we want to ensure # that its type matches the data exactly return {k: v if k == "_FillValue" else _decode_string(v) for (k, v) in d.items()} class ScipyArrayWrapper(BackendArray): def __init__(self, variable_name, datastore): self.datastore = datastore self.variable_name = variable_name array = self.get_variable().data self.shape = array.shape self.dtype = np.dtype(array.dtype.kind + str(array.dtype.itemsize)) def get_variable(self, needs_lock=True): ds = self.datastore._manager.acquire(needs_lock) return ds.variables[self.variable_name] def _getitem(self, key): with self.datastore.lock: data = self.get_variable(needs_lock=False).data return data[key] def __getitem__(self, key): data = indexing.explicit_indexing_adapter( key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR, self._getitem ) # Copy data if the source file is mmapped. This makes things consistent # with the netCDF4 library by ensuring we can safely read arrays even # after closing associated files. copy = self.datastore.ds.use_mmap # adapt handling of copy-kwarg to numpy 2.0 # see https://github.com/numpy/numpy/issues/25916 # and https://github.com/numpy/numpy/pull/25922 copy = None if HAS_NUMPY_2_0 and copy is False else copy return np.array(data, dtype=self.dtype, copy=copy) def __setitem__(self, key, value): with self.datastore.lock: data = self.get_variable(needs_lock=False) try: data[key] = value except TypeError: if key is Ellipsis: # workaround for GH: scipy/scipy#6880 data[:] = value else: raise def _open_scipy_netcdf(filename, mode, mmap, version): import scipy.io # if the string ends with .gz, then gunzip and open as netcdf file if isinstance(filename, str) and filename.endswith(".gz"): try: return scipy.io.netcdf_file( gzip.open(filename), mode=mode, mmap=mmap, version=version ) except TypeError as e: # TODO: gzipped loading only works with NetCDF3 files. errmsg = e.args[0] if "is not a valid NetCDF 3 file" in errmsg: raise ValueError( "gzipped file loading only supports NetCDF 3 files." ) from e else: raise try: return scipy.io.netcdf_file(filename, mode=mode, mmap=mmap, version=version) except TypeError as e: # netcdf3 message is obscure in this case errmsg = e.args[0] if "is not a valid NetCDF 3 file" in errmsg: msg = """ If this is a NetCDF4 file, you may need to install the netcdf4 library, e.g., $ pip install netcdf4 """ errmsg += msg raise TypeError(errmsg) from e else: raise class ScipyDataStore(WritableCFDataStore): """Store for reading and writing data via scipy.io.netcdf_file. This store has the advantage of being able to be initialized with a StringIO object, allow for serialization without writing to disk. It only supports the NetCDF3 file-format. """ def __init__( self, filename_or_obj, mode="r", format=None, group=None, mmap=None, lock=None ): if group is not None: raise ValueError("cannot save to a group with the scipy.io.netcdf backend") if format is None or format == "NETCDF3_64BIT": version = 2 elif format == "NETCDF3_CLASSIC": version = 1 else: raise ValueError(f"invalid format for scipy.io.netcdf backend: {format!r}") if lock is None and mode != "r" and isinstance(filename_or_obj, str): lock = get_write_lock(filename_or_obj) self.lock = ensure_lock(lock) if isinstance(filename_or_obj, BytesIOProxy): source = filename_or_obj filename_or_obj = io.BytesIO() source.getvalue = filename_or_obj.getbuffer if isinstance(filename_or_obj, str): # path manager = CachingFileManager( _open_scipy_netcdf, filename_or_obj, mode=mode, lock=lock, kwargs=dict(mmap=mmap, version=version), ) elif hasattr(filename_or_obj, "seek"): # file object # Note: checking for .seek matches the check for file objects # in scipy.io.netcdf_file scipy_dataset = _open_scipy_netcdf( filename_or_obj, mode=mode, mmap=mmap, version=version ) # scipy.io.netcdf_file.close() incorrectly closes file objects that # were passed in as constructor arguments: # https://github.com/scipy/scipy/issues/13905 # Instead of closing such files, only call flush(), which is # equivalent as long as the netcdf_file object is not mmapped. # This suffices to keep BytesIO objects open long enough to read # their contents from to_netcdf(), but underlying files still get # closed when the netcdf_file is garbage collected (via __del__), # and will need to be fixed upstream in scipy. assert not scipy_dataset.use_mmap # no mmap for file objects manager = DummyFileManager(scipy_dataset, close=scipy_dataset.flush) else: raise ValueError( f"cannot open {filename_or_obj=} with scipy.io.netcdf_file" ) self._manager = manager @property def ds(self) -> scipy.io.netcdf_file: return self._manager.acquire() def open_store_variable(self, name, var): return Variable( var.dimensions, indexing.LazilyIndexedArray(ScipyArrayWrapper(name, self)), _decode_attrs(var._attributes), ) def get_variables(self): return FrozenDict( (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items() ) def get_attrs(self): return Frozen(_decode_attrs(self.ds._attributes)) def get_dimensions(self): return Frozen(self.ds.dimensions) def get_encoding(self): return { "unlimited_dims": {k for k, v in self.ds.dimensions.items() if v is None} } def set_dimension(self, name, length, is_unlimited=False): if name in self.ds.dimensions: raise ValueError( f"{type(self).__name__} does not support modifying dimensions" ) dim_length = length if not is_unlimited else None self.ds.createDimension(name, dim_length) def _validate_attr_key(self, key): if not is_valid_nc3_name(key): raise ValueError("Not a valid attribute name") def set_attribute(self, key, value): self._validate_attr_key(key) value = encode_nc3_attr_value(value) setattr(self.ds, key, value) def encode_variable(self, variable, name=None): variable = encode_nc3_variable(variable, name=name) return variable def prepare_variable( self, name, variable, check_encoding=False, unlimited_dims=None ): if ( check_encoding and variable.encoding and variable.encoding != {"_FillValue": None} ): raise ValueError( f"unexpected encoding for scipy backend: {list(variable.encoding)}" ) data = variable.data # nb. this still creates a numpy array in all memory, even though we # don't write the data yet; scipy.io.netcdf does not support incremental # writes. if name not in self.ds.variables: self.ds.createVariable(name, data.dtype, variable.dims) scipy_var = self.ds.variables[name] for k, v in variable.attrs.items(): self._validate_attr_key(k) setattr(scipy_var, k, v) target = ScipyArrayWrapper(name, self) return target, data def sync(self): self.ds.sync() def close(self): self._manager.close() def _normalize_filename_or_obj( filename_or_obj: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, ) -> str | ReadBuffer | AbstractDataStore: if isinstance(filename_or_obj, bytes | memoryview): return io.BytesIO(filename_or_obj) else: return _normalize_path(filename_or_obj) # type: ignore[return-value] class ScipyBackendEntrypoint(BackendEntrypoint): """ Backend for netCDF files based on the scipy package. It can open ".nc", ".nc4", ".cdf" and ".gz" files but will only be selected as the default if the "netcdf4" and "h5netcdf" engines are not available. It has the advantage that is is a lightweight engine that has no system requirements (unlike netcdf4 and h5netcdf). Additionally it can open gizp compressed (".gz") files. For more information about the underlying library, visit: https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_file.html See Also -------- backends.ScipyDataStore backends.NetCDF4BackendEntrypoint backends.H5netcdfBackendEntrypoint """ description = "Open netCDF files (.nc, .nc4, .cdf and .gz) using scipy in Xarray" url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.ScipyBackendEntrypoint.html" def guess_can_open( self, filename_or_obj: T_PathFileOrDataStore, ) -> bool: filename_or_obj = _normalize_filename_or_obj(filename_or_obj) magic_number = try_read_magic_number_from_file_or_path(filename_or_obj) if magic_number is not None and magic_number.startswith(b"\x1f\x8b"): with gzip.open(filename_or_obj) as f: # type: ignore[arg-type] magic_number = try_read_magic_number_from_file_or_path(f) if magic_number is not None: return magic_number.startswith(b"CDF") if isinstance(filename_or_obj, str | os.PathLike): _, ext = os.path.splitext(filename_or_obj) return ext in {".nc", ".nc4", ".cdf", ".gz"} return False def open_dataset( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, mode="r", format=None, group=None, mmap=None, lock=None, ) -> Dataset: filename_or_obj = _normalize_filename_or_obj(filename_or_obj) store = ScipyDataStore( filename_or_obj, mode=mode, format=format, group=group, mmap=mmap, lock=lock ) store_entrypoint = StoreBackendEntrypoint() with close_on_error(store): ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) return ds BACKEND_ENTRYPOINTS["scipy"] = ("scipy", ScipyBackendEntrypoint) ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/backends/store.py�����������������������������������������������������������0000664�0000000�0000000�00000004453�15056206164�0020140�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations from collections.abc import Iterable from typing import TYPE_CHECKING from xarray import conventions from xarray.backends.common import ( BACKEND_ENTRYPOINTS, AbstractDataStore, BackendEntrypoint, T_PathFileOrDataStore, ) from xarray.core.coordinates import Coordinates from xarray.core.dataset import Dataset if TYPE_CHECKING: pass class StoreBackendEntrypoint(BackendEntrypoint): description = "Open AbstractDataStore instances in Xarray" url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.StoreBackendEntrypoint.html" def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool: return isinstance(filename_or_obj, AbstractDataStore) def open_dataset( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, set_indexes: bool = True, use_cftime=None, decode_timedelta=None, ) -> Dataset: assert isinstance(filename_or_obj, AbstractDataStore) vars, attrs = filename_or_obj.load() encoding = filename_or_obj.get_encoding() vars, attrs, coord_names = conventions.decode_cf_variables( vars, attrs, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) # split data and coordinate variables (promote dimension coordinates) data_vars = {} coord_vars = {} for name, var in vars.items(): if name in coord_names or var.dims == (name,): coord_vars[name] = var else: data_vars[name] = var # explicit Coordinates object with no index passed coords = Coordinates(coord_vars, indexes={}) ds = Dataset(data_vars, coords=coords, attrs=attrs) ds.set_close(filename_or_obj.close) ds.encoding = encoding return ds BACKEND_ENTRYPOINTS["store"] = (None, StoreBackendEntrypoint) ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/backends/zarr.py������������������������������������������������������������0000664�0000000�0000000�00000215677�15056206164�0017776�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import base64 import json import os import struct from collections.abc import Hashable, Iterable, Mapping from typing import TYPE_CHECKING, Any, Literal, Self, cast import numpy as np import pandas as pd from xarray import coding, conventions from xarray.backends.chunks import grid_rechunk, validate_grid_chunks_alignment from xarray.backends.common import ( BACKEND_ENTRYPOINTS, AbstractWritableDataStore, BackendArray, BackendEntrypoint, T_PathFileOrDataStore, _encode_variable_name, _normalize_path, datatree_from_dict_with_io_cleanup, ensure_dtype_not_object, ) from xarray.backends.store import StoreBackendEntrypoint from xarray.core import indexing from xarray.core.treenode import NodePath from xarray.core.types import ZarrWriteModes from xarray.core.utils import ( FrozenDict, HiddenKeyDict, attempt_import, close_on_error, emit_user_level_warning, ) from xarray.core.variable import Variable from xarray.namedarray.parallelcompat import guess_chunkmanager from xarray.namedarray.pycompat import integer_types from xarray.namedarray.utils import module_available if TYPE_CHECKING: from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.types import ZarrArray, ZarrGroup def _get_mappers(*, storage_options, store, chunk_store): # expand str and path-like arguments store = _normalize_path(store) chunk_store = _normalize_path(chunk_store) kwargs = {} if storage_options is None: mapper = store chunk_mapper = chunk_store else: if not isinstance(store, str): raise ValueError( f"store must be a string to use storage_options. Got {type(store)}" ) if _zarr_v3(): kwargs["storage_options"] = storage_options mapper = store chunk_mapper = chunk_store else: from fsspec import get_mapper mapper = get_mapper(store, **storage_options) if chunk_store is not None: chunk_mapper = get_mapper(chunk_store, **storage_options) else: chunk_mapper = chunk_store return kwargs, mapper, chunk_mapper def _choose_default_mode( *, mode: ZarrWriteModes | None, append_dim: Hashable | None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None, ) -> ZarrWriteModes: if mode is None: if append_dim is not None: mode = "a" elif region is not None: mode = "r+" else: mode = "w-" if mode not in ["a", "a-"] and append_dim is not None: raise ValueError("cannot set append_dim unless mode='a' or mode=None") if mode not in ["a", "a-", "r+"] and region is not None: raise ValueError( "cannot set region unless mode='a', mode='a-', mode='r+' or mode=None" ) if mode not in ["w", "w-", "a", "a-", "r+"]: raise ValueError( "The only supported options for mode are 'w', " f"'w-', 'a', 'a-', and 'r+', but mode={mode!r}" ) return mode def _zarr_v3() -> bool: return module_available("zarr", minversion="3") # need some special secret attributes to tell us the dimensions DIMENSION_KEY = "_ARRAY_DIMENSIONS" ZarrFormat = Literal[2, 3] class FillValueCoder: """Handle custom logic to safely encode and decode fill values in Zarr. Possibly redundant with logic in xarray/coding/variables.py but needs to be isolated from NetCDF-specific logic. """ @classmethod def encode(cls, value: int | float | str | bytes, dtype: np.dtype[Any]) -> Any: if dtype.kind in "S": # byte string, this implies that 'value' must also be `bytes` dtype. assert isinstance(value, bytes) return base64.standard_b64encode(value).decode() elif dtype.kind in "b": # boolean return bool(value) elif dtype.kind in "iu": # todo: do we want to check for decimals? return int(value) elif dtype.kind in "f": return base64.standard_b64encode(struct.pack(" list scalar array -> scalar other -> other (no change) """ if isinstance(value, np.ndarray): encoded = value.tolist() elif isinstance(value, np.generic): encoded = value.item() else: encoded = value return encoded def has_zarr_async_index() -> bool: try: import zarr return hasattr(zarr.AsyncArray, "oindex") except (ImportError, AttributeError): return False class ZarrArrayWrapper(BackendArray): __slots__ = ("_array", "dtype", "shape") def __init__(self, zarr_array): # some callers attempt to evaluate an array if an `array` property exists on the object. # we prefix with _ to avoid this inference. # TODO type hint this? self._array = zarr_array self.shape = self._array.shape # preserve vlen string object dtype (GH 7328) if ( not _zarr_v3() and self._array.filters is not None and any(filt.codec_id == "vlen-utf8" for filt in self._array.filters) ): dtype = coding.strings.create_vlen_dtype(str) else: dtype = self._array.dtype self.dtype = dtype def get_array(self): return self._array def _oindex(self, key): return self._array.oindex[key] def _vindex(self, key): return self._array.vindex[key] def _getitem(self, key): return self._array[key] async def _async_getitem(self, key): if not _zarr_v3(): raise NotImplementedError( "For lazy basic async indexing with zarr, zarr-python=>v3.0.0 is required" ) async_array = self._array._async_array return await async_array.getitem(key) async def _async_oindex(self, key): if not has_zarr_async_index(): raise NotImplementedError( "For lazy orthogonal async indexing with zarr, zarr-python=>v3.1.2 is required" ) async_array = self._array._async_array return await async_array.oindex.getitem(key) async def _async_vindex(self, key): if not has_zarr_async_index(): raise NotImplementedError( "For lazy vectorized async indexing with zarr, zarr-python=>v3.1.2 is required" ) async_array = self._array._async_array return await async_array.vindex.getitem(key) def __getitem__(self, key): array = self._array if isinstance(key, indexing.BasicIndexer): method = self._getitem elif isinstance(key, indexing.VectorizedIndexer): method = self._vindex elif isinstance(key, indexing.OuterIndexer): method = self._oindex return indexing.explicit_indexing_adapter( key, array.shape, indexing.IndexingSupport.VECTORIZED, method ) # if self.ndim == 0: # could possibly have a work-around for 0d data here async def async_getitem(self, key): array = self._array if isinstance(key, indexing.BasicIndexer): method = self._async_getitem elif isinstance(key, indexing.VectorizedIndexer): method = self._async_vindex elif isinstance(key, indexing.OuterIndexer): method = self._async_oindex return await indexing.async_explicit_indexing_adapter( key, array.shape, indexing.IndexingSupport.VECTORIZED, method ) def _determine_zarr_chunks(enc_chunks, var_chunks, ndim, name): """ Given encoding chunks (possibly None or []) and variable chunks (possibly None or []). """ # zarr chunk spec: # chunks : int or tuple of ints, optional # Chunk shape. If not provided, will be guessed from shape and dtype. # if there are no chunks in encoding and the variable data is a numpy # array, then we let zarr use its own heuristics to pick the chunks if not var_chunks and not enc_chunks: return None # if there are no chunks in encoding but there are dask chunks, we try to # use the same chunks in zarr # However, zarr chunks needs to be uniform for each array # https://zarr-specs.readthedocs.io/en/latest/v2/v2.0.html#chunks # while dask chunks can be variable sized # https://dask.pydata.org/en/latest/array-design.html#chunks if var_chunks and not enc_chunks: if any(len(set(chunks[:-1])) > 1 for chunks in var_chunks): raise ValueError( "Zarr requires uniform chunk sizes except for final chunk. " f"Variable named {name!r} has incompatible dask chunks: {var_chunks!r}. " "Consider rechunking using `chunk()`." ) if any((chunks[0] < chunks[-1]) for chunks in var_chunks): raise ValueError( "Final chunk of Zarr array must be the same size or smaller " f"than the first. Variable named {name!r} has incompatible Dask chunks {var_chunks!r}." "Consider either rechunking using `chunk()` or instead deleting " "or modifying `encoding['chunks']`." ) # return the first chunk for each dimension return tuple(chunk[0] for chunk in var_chunks) # From here on, we are dealing with user-specified chunks in encoding # zarr allows chunks to be an integer, in which case it uses the same chunk # size on each dimension. # Here we re-implement this expansion ourselves. That makes the logic of # checking chunk compatibility easier if isinstance(enc_chunks, integer_types): enc_chunks_tuple = ndim * (enc_chunks,) else: enc_chunks_tuple = tuple(enc_chunks) if len(enc_chunks_tuple) != ndim: # throw away encoding chunks, start over return _determine_zarr_chunks( None, var_chunks, ndim, name, ) for x in enc_chunks_tuple: if not isinstance(x, int): raise TypeError( "zarr chunk sizes specified in `encoding['chunks']` " "must be an int or a tuple of ints. " f"Instead found encoding['chunks']={enc_chunks_tuple!r} " f"for variable named {name!r}." ) # if there are chunks in encoding and the variable data is a numpy array, # we use the specified chunks if not var_chunks: return enc_chunks_tuple return enc_chunks_tuple def _get_zarr_dims_and_attrs(zarr_obj, dimension_key, try_nczarr): # Zarr V3 explicitly stores the dimension names in the metadata try: # if this exists, we are looking at a Zarr V3 array # convert None to empty tuple dimensions = zarr_obj.metadata.dimension_names or () except AttributeError: # continue to old code path pass else: attributes = dict(zarr_obj.attrs) if len(zarr_obj.shape) != len(dimensions): raise KeyError( "Zarr object is missing the `dimension_names` metadata which is " "required for xarray to determine variable dimensions." ) return dimensions, attributes # Zarr arrays do not have dimensions. To get around this problem, we add # an attribute that specifies the dimension. We have to hide this attribute # when we send the attributes to the user. # zarr_obj can be either a zarr group or zarr array try: # Xarray-Zarr dimensions = zarr_obj.attrs[dimension_key] except KeyError as e: if not try_nczarr: raise KeyError( f"Zarr object is missing the attribute `{dimension_key}`, which is " "required for xarray to determine variable dimensions." ) from e # NCZarr defines dimensions through metadata in .zarray zarray_path = os.path.join(zarr_obj.path, ".zarray") if _zarr_v3(): import asyncio zarray_str = asyncio.run(zarr_obj.store.get(zarray_path)).to_bytes() else: zarray_str = zarr_obj.store.get(zarray_path) zarray = json.loads(zarray_str) try: # NCZarr uses Fully Qualified Names dimensions = [ os.path.basename(dim) for dim in zarray["_NCZARR_ARRAY"]["dimrefs"] ] except KeyError as e: raise KeyError( f"Zarr object is missing the attribute `{dimension_key}` and the NCZarr metadata, " "which are required for xarray to determine variable dimensions." ) from e nc_attrs = [attr for attr in zarr_obj.attrs if attr.lower().startswith("_nc")] attributes = HiddenKeyDict(zarr_obj.attrs, [dimension_key] + nc_attrs) return dimensions, attributes def extract_zarr_variable_encoding( variable, raise_on_invalid=False, name=None, *, zarr_format: ZarrFormat, ): """ Extract zarr encoding dictionary from xarray Variable Parameters ---------- variable : Variable raise_on_invalid : bool, optional name: str | Hashable, optional zarr_format: Literal[2,3] Returns ------- encoding : dict Zarr encoding for `variable` """ encoding = variable.encoding.copy() safe_to_drop = {"source", "original_shape", "preferred_chunks"} valid_encodings = { "chunks", "shards", "compressor", # TODO: delete when min zarr >=3 "compressors", "filters", "serializer", "cache_metadata", "write_empty_chunks", "chunk_key_encoding", } if zarr_format == 3: valid_encodings.add("fill_value") for k in safe_to_drop: if k in encoding: del encoding[k] if raise_on_invalid: invalid = [k for k in encoding if k not in valid_encodings] if "fill_value" in invalid and zarr_format == 2: msg = " Use `_FillValue` to set the Zarr array `fill_value`" else: msg = "" if invalid: raise ValueError( f"unexpected encoding parameters for zarr backend: {invalid!r}." + msg ) else: for k in list(encoding): if k not in valid_encodings: del encoding[k] chunks = _determine_zarr_chunks( enc_chunks=encoding.get("chunks"), var_chunks=variable.chunks, ndim=variable.ndim, name=name, ) if _zarr_v3() and chunks is None: chunks = "auto" encoding["chunks"] = chunks return encoding # Function below is copied from conventions.encode_cf_variable. # The only change is to raise an error for object dtypes. def encode_zarr_variable(var, needs_copy=True, name=None): """ Converts an Variable into an Variable which follows some of the CF conventions: - Nans are masked using _FillValue (or the deprecated missing_value) - Rescaling via: scale_factor and add_offset - datetimes are converted to the CF 'units since time' format - dtype encodings are enforced. Parameters ---------- var : Variable A variable holding un-encoded data. Returns ------- out : Variable A variable which has been encoded as described above. """ var = conventions.encode_cf_variable(var, name=name) var = ensure_dtype_not_object(var, name=name) # zarr allows unicode, but not variable-length strings, so it's both # simpler and more compact to always encode as UTF-8 explicitly. # TODO: allow toggling this explicitly via dtype in encoding. # TODO: revisit this now that Zarr _does_ allow variable-length strings coder = coding.strings.EncodedStringCoder(allows_unicode=True) var = coder.encode(var, name=name) var = coding.strings.ensure_fixed_length_bytes(var) return var def _validate_datatypes_for_zarr_append(vname, existing_var, new_var): """If variable exists in the store, confirm dtype of the data to append is compatible with existing dtype. """ if ( np.issubdtype(new_var.dtype, np.number) or np.issubdtype(new_var.dtype, np.datetime64) or np.issubdtype(new_var.dtype, np.bool_) or new_var.dtype == object or (new_var.dtype.kind in ("S", "U") and existing_var.dtype == object) ): # We can skip dtype equality checks under two conditions: (1) if the var to append is # new to the dataset, because in this case there is no existing var to compare it to; # or (2) if var to append's dtype is known to be easy-to-append, because in this case # we can be confident appending won't cause problems. Examples of dtypes which are not # easy-to-append include length-specified strings of type `|S*` or ` Self: zarr_group = self.zarr_group.require_group(group) return type(self)( zarr_group=zarr_group, mode=self._mode, consolidate_on_close=self._consolidate_on_close, append_dim=self._append_dim, write_region=self._write_region, safe_chunks=self._safe_chunks, write_empty=self._write_empty, close_store_on_close=self._close_store_on_close, use_zarr_fill_value_as_mask=self._use_zarr_fill_value_as_mask, align_chunks=self._align_chunks, cache_members=self._cache_members, ) @property def members(self) -> dict[str, ZarrArray | ZarrGroup]: """ Model the arrays and groups contained in self.zarr_group as a dict. If `self._cache_members` is true, the dict is cached. Otherwise, it is retrieved from storage. """ if not self._cache_members: return self._fetch_members() else: return self._members def _fetch_members(self) -> dict[str, ZarrArray | ZarrGroup]: """ Get the arrays and groups defined in the zarr group modelled by this Store """ import zarr if zarr.__version__ >= "3": return dict(self.zarr_group.members()) else: return dict(self.zarr_group.items()) def array_keys(self) -> tuple[str, ...]: from zarr import Array as ZarrArray return tuple( key for (key, node) in self.members.items() if isinstance(node, ZarrArray) ) def arrays(self) -> tuple[tuple[str, ZarrArray], ...]: from zarr import Array as ZarrArray return tuple( (key, node) for (key, node) in self.members.items() if isinstance(node, ZarrArray) ) @property def ds(self): # TODO: consider deprecating this in favor of zarr_group return self.zarr_group def open_store_variable(self, name): zarr_array = self.members[name] data = indexing.LazilyIndexedArray(ZarrArrayWrapper(zarr_array)) try_nczarr = self._mode == "r" dimensions, attributes = _get_zarr_dims_and_attrs( zarr_array, DIMENSION_KEY, try_nczarr ) attributes = dict(attributes) encoding = { "chunks": zarr_array.chunks, "preferred_chunks": dict(zip(dimensions, zarr_array.chunks, strict=True)), } if _zarr_v3(): encoding.update( { "compressors": zarr_array.compressors, "filters": zarr_array.filters, "shards": zarr_array.shards, } ) if self.zarr_group.metadata.zarr_format == 3: encoding.update({"serializer": zarr_array.serializer}) else: encoding.update( { "compressor": zarr_array.compressor, "filters": zarr_array.filters, } ) if self._use_zarr_fill_value_as_mask: # Setting this attribute triggers CF decoding for missing values # by interpreting Zarr's fill_value to mean the same as netCDF's _FillValue if zarr_array.fill_value is not None: attributes["_FillValue"] = zarr_array.fill_value elif "_FillValue" in attributes: attributes["_FillValue"] = FillValueCoder.decode( attributes["_FillValue"], zarr_array.dtype ) return Variable(dimensions, data, attributes, encoding) def get_variables(self): return FrozenDict((k, self.open_store_variable(k)) for k in self.array_keys()) def get_attrs(self): return { k: v for k, v in self.zarr_group.attrs.asdict().items() if not k.lower().startswith("_nc") } def get_dimensions(self): try_nczarr = self._mode == "r" dimensions = {} for _k, v in self.arrays(): dim_names, _ = _get_zarr_dims_and_attrs(v, DIMENSION_KEY, try_nczarr) for d, s in zip(dim_names, v.shape, strict=True): if d in dimensions and dimensions[d] != s: raise ValueError( f"found conflicting lengths for dimension {d} " f"({s} != {dimensions[d]})" ) dimensions[d] = s return dimensions def set_dimensions(self, variables, unlimited_dims=None): if unlimited_dims is not None: raise NotImplementedError( "Zarr backend doesn't know how to handle unlimited dimensions" ) def set_attributes(self, attributes): _put_attrs(self.zarr_group, attributes) def encode_variable(self, variable, name=None): variable = encode_zarr_variable(variable, name=name) return variable def encode_attribute(self, a): return encode_zarr_attr_value(a) def store( self, variables, attributes, check_encoding_set=frozenset(), writer=None, unlimited_dims=None, ): """ Top level method for putting data on this store, this method: - encodes variables/attributes - sets dimensions - sets variables Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs attributes : dict-like Dictionary of key/value (attribute name / attribute) pairs check_encoding_set : list-like List of variables that should be checked for invalid encoding values writer : ArrayWriter unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions. dimension on which the zarray will be appended only needed in append mode """ if TYPE_CHECKING: import zarr else: zarr = attempt_import("zarr") if self._mode == "w": # always overwrite, so we don't care about existing names, # and consistency of encoding new_variable_names = set(variables) existing_keys = {} existing_variable_names = {} else: existing_keys = self.array_keys() existing_variable_names = { vn for vn in variables if _encode_variable_name(vn) in existing_keys } new_variable_names = set(variables) - existing_variable_names if self._mode == "r+" and ( new_names := [k for k in variables if k not in existing_keys] ): raise ValueError( f"dataset contains non-pre-existing variables {new_names!r}, " "which is not allowed in ``xarray.Dataset.to_zarr()`` with " "``mode='r+'``. To allow writing new variables, set ``mode='a'``." ) if self._append_dim is not None and self._append_dim not in existing_keys: # For dimensions without coordinate values, we must parse # the _ARRAY_DIMENSIONS attribute on *all* arrays to check if it # is a valid existing dimension name. # TODO: This `get_dimensions` method also does shape checking # which isn't strictly necessary for our check. existing_dims = self.get_dimensions() if self._append_dim not in existing_dims: raise ValueError( f"append_dim={self._append_dim!r} does not match any existing " f"dataset dimensions {existing_dims}" ) variables_encoded, attributes = self.encode( {vn: variables[vn] for vn in new_variable_names}, attributes ) if existing_variable_names: # We make sure that values to be appended are encoded *exactly* # as the current values in the store. # To do so, we decode variables directly to access the proper encoding, # without going via xarray.Dataset to avoid needing to load # index variables into memory. existing_vars, _, _ = conventions.decode_cf_variables( variables={ k: self.open_store_variable(name=k) for k in existing_variable_names }, # attributes = {} since we don't care about parsing the global # "coordinates" attribute attributes={}, ) # Modified variables must use the same encoding as the store. vars_with_encoding = {} for vn in existing_variable_names: _validate_datatypes_for_zarr_append( vn, existing_vars[vn], variables[vn] ) vars_with_encoding[vn] = variables[vn].copy(deep=False) vars_with_encoding[vn].encoding = existing_vars[vn].encoding vars_with_encoding, _ = self.encode(vars_with_encoding, {}) variables_encoded.update(vars_with_encoding) for var_name in existing_variable_names: variables_encoded[var_name] = _validate_and_transpose_existing_dims( var_name, variables_encoded[var_name], existing_vars[var_name], self._write_region, self._append_dim, ) if self._mode not in ["r", "r+"]: self.set_attributes(attributes) self.set_dimensions(variables_encoded, unlimited_dims=unlimited_dims) # if we are appending to an append_dim, only write either # - new variables not already present, OR # - variables with the append_dim in their dimensions # We do NOT overwrite other variables. if self._mode == "a-" and self._append_dim is not None: variables_to_set = { k: v for k, v in variables_encoded.items() if (k not in existing_variable_names) or (self._append_dim in v.dims) } else: variables_to_set = variables_encoded self.set_variables( variables_to_set, check_encoding_set, writer, unlimited_dims=unlimited_dims ) if self._consolidate_on_close: kwargs = {} if _zarr_v3(): kwargs["zarr_format"] = self.zarr_group.metadata.zarr_format zarr.consolidate_metadata(self.zarr_group.store, **kwargs) def _open_existing_array(self, *, name) -> ZarrArray: import zarr from zarr import Array as ZarrArray # TODO: if mode="a", consider overriding the existing variable # metadata. This would need some case work properly with region # and append_dim. if self._write_empty is not None: # Write to zarr_group.chunk_store instead of zarr_group.store # See https://github.com/pydata/xarray/pull/8326#discussion_r1365311316 for a longer explanation # The open_consolidated() enforces a mode of r or r+ # (and to_zarr with region provided enforces a read mode of r+), # and this function makes sure the resulting Group has a store of type ConsolidatedMetadataStore # and a 'normal Store subtype for chunk_store. # The exact type depends on if a local path was used, or a URL of some sort, # but the point is that it's not a read-only ConsolidatedMetadataStore. # It is safe to write chunk data to the chunk_store because no metadata would be changed by # to_zarr with the region parameter: # - Because the write mode is enforced to be r+, no new variables can be added to the store # (this is also checked and enforced in xarray.backends.api.py::to_zarr()). # - Existing variables already have their attrs included in the consolidated metadata file. # - The size of dimensions can not be expanded, that would require a call using `append_dim` # which is mutually exclusive with `region` empty: dict[str, bool] | dict[str, dict[str, bool]] if _zarr_v3(): empty = dict(config={"write_empty_chunks": self._write_empty}) else: empty = dict(write_empty_chunks=self._write_empty) zarr_array = zarr.open( store=( self.zarr_group.store if _zarr_v3() else self.zarr_group.chunk_store ), # TODO: see if zarr should normalize these strings. path="/".join([self.zarr_group.name.rstrip("/"), name]).lstrip("/"), **empty, ) else: zarr_array = self.zarr_group[name] return cast(ZarrArray, zarr_array) def _create_new_array( self, *, name, shape, dtype, fill_value, encoding, attrs ) -> ZarrArray: if coding.strings.check_vlen_dtype(dtype) is str: dtype = str if self._write_empty is not None: if ( "write_empty_chunks" in encoding and encoding["write_empty_chunks"] != self._write_empty ): raise ValueError( 'Differing "write_empty_chunks" values in encoding and parameters' f'Got {encoding["write_empty_chunks"] = } and {self._write_empty = }' ) else: encoding["write_empty_chunks"] = self._write_empty if _zarr_v3(): # zarr v3 deprecated origin and write_empty_chunks # instead preferring to pass them via the config argument encoding["config"] = {} for c in ("write_empty_chunks", "order"): if c in encoding: encoding["config"][c] = encoding.pop(c) zarr_array = self.zarr_group.create( name, shape=shape, dtype=dtype, fill_value=fill_value, **encoding, ) zarr_array = _put_attrs(zarr_array, attrs) return zarr_array def set_variables( self, variables: dict[str, Variable], check_encoding_set, writer, unlimited_dims=None, ): """ This provides a centralized method to set the variables on the data store. Parameters ---------- variables : dict-like Dictionary of key/value (variable name / xr.Variable) pairs check_encoding_set : list-like List of variables that should be checked for invalid encoding values writer unlimited_dims : list-like List of dimension names that should be treated as unlimited dimensions. """ existing_keys = self.array_keys() is_zarr_v3_format = _zarr_v3() and self.zarr_group.metadata.zarr_format == 3 for vn, v in variables.items(): name = _encode_variable_name(vn) attrs = v.attrs.copy() dims = v.dims dtype = v.dtype shape = v.shape if self._use_zarr_fill_value_as_mask: fill_value = attrs.pop("_FillValue", None) else: fill_value = v.encoding.pop("fill_value", None) if "_FillValue" in attrs: # replace with encoded fill value fv = attrs.pop("_FillValue") if fv is not None: attrs["_FillValue"] = FillValueCoder.encode(fv, dtype) # _FillValue is never a valid encoding for Zarr # TODO: refactor this logic so we don't need to check this here if "_FillValue" in v.encoding: if v.encoding.get("_FillValue") is not None: raise ValueError("Zarr does not support _FillValue in encoding.") else: del v.encoding["_FillValue"] zarr_shape = None write_region = self._write_region if self._write_region is not None else {} write_region = {dim: write_region.get(dim, slice(None)) for dim in dims} if self._mode != "w" and name in existing_keys: # existing variable zarr_array = self._open_existing_array(name=name) if self._append_dim is not None and self._append_dim in dims: # resize existing variable append_axis = dims.index(self._append_dim) assert write_region[self._append_dim] == slice(None) write_region[self._append_dim] = slice( zarr_array.shape[append_axis], None ) new_shape = ( zarr_array.shape[:append_axis] + (zarr_array.shape[append_axis] + v.shape[append_axis],) + zarr_array.shape[append_axis + 1 :] ) zarr_array.resize(new_shape) zarr_shape = zarr_array.shape region = tuple(write_region[dim] for dim in dims) # We need to do this for both new and existing variables to ensure we're not # writing to a partial chunk, even though we don't use the `encoding` value # when writing to an existing variable. See # https://github.com/pydata/xarray/issues/8371 for details. # Note: Ideally there should be two functions, one for validating the chunks and # another one for extracting the encoding. encoding = extract_zarr_variable_encoding( v, raise_on_invalid=vn in check_encoding_set, name=vn, zarr_format=3 if is_zarr_v3_format else 2, ) if self._align_chunks and isinstance(encoding["chunks"], tuple): v = grid_rechunk( v=v, enc_chunks=encoding["chunks"], region=region, ) if self._safe_chunks and isinstance(encoding["chunks"], tuple): # the hard case # DESIGN CHOICE: do not allow multiple dask chunks on a single zarr chunk # this avoids the need to get involved in zarr synchronization / locking # From zarr docs: # "If each worker in a parallel computation is writing to a # separate region of the array, and if region boundaries are perfectly aligned # with chunk boundaries, then no synchronization is required." # TODO: incorporate synchronizer to allow writes from multiple dask # threads shape = zarr_shape or v.shape validate_grid_chunks_alignment( nd_var_chunks=v.chunks, enc_chunks=encoding["chunks"], region=region, allow_partial_chunks=self._mode != "r+", name=name, backend_shape=shape, ) if self._mode == "w" or name not in existing_keys: # new variable encoded_attrs = {k: self.encode_attribute(v) for k, v in attrs.items()} # the magic for storing the hidden dimension data if is_zarr_v3_format: encoding["dimension_names"] = dims else: encoded_attrs[DIMENSION_KEY] = dims encoding["overwrite"] = self._mode == "w" zarr_array = self._create_new_array( name=name, dtype=dtype, shape=shape, fill_value=fill_value, encoding=encoding, attrs=encoded_attrs, ) writer.add(v.data, zarr_array, region) def sync(self) -> None: pass def close(self) -> None: if self._close_store_on_close: self.zarr_group.store.close() def _auto_detect_regions(self, ds, region): for dim, val in region.items(): if val != "auto": continue if dim not in ds._variables: # unindexed dimension region[dim] = slice(0, ds.sizes[dim]) continue variable = conventions.decode_cf_variable( dim, self.open_store_variable(dim).compute() ) assert variable.dims == (dim,) index = pd.Index(variable.data) idxs = index.get_indexer(ds[dim].data) if (idxs == -1).any(): raise KeyError( f"Not all values of coordinate '{dim}' in the new array were" " found in the original store. Writing to a zarr region slice" " requires that no dimensions or metadata are changed by the write." ) if (np.diff(idxs) != 1).any(): raise ValueError( f"The auto-detected region of coordinate '{dim}' for writing new data" " to the original store had non-contiguous indices. Writing to a zarr" " region slice requires that the new data constitute a contiguous subset" " of the original store." ) region[dim] = slice(idxs[0], idxs[-1] + 1) return region def _validate_and_autodetect_region(self, ds: Dataset) -> Dataset: if self._write_region is None: return ds region = self._write_region if region == "auto": region = dict.fromkeys(ds.dims, "auto") if not isinstance(region, dict): raise TypeError(f"``region`` must be a dict, got {type(region)}") if any(v == "auto" for v in region.values()): if self._mode not in ["r+", "a"]: raise ValueError( f"``mode`` must be 'r+' or 'a' when using ``region='auto'``, got {self._mode!r}" ) region = self._auto_detect_regions(ds, region) # validate before attempting to auto-detect since the auto-detection # should always return a valid slice. for k, v in region.items(): if k not in ds.dims: raise ValueError( f"all keys in ``region`` are not in Dataset dimensions, got " f"{list(region)} and {list(ds.dims)}" ) if not isinstance(v, slice): raise TypeError( "all values in ``region`` must be slice objects, got " f"region={region}" ) if v.step not in {1, None}: raise ValueError( "step on all slices in ``region`` must be 1 or None, got " f"region={region}" ) non_matching_vars = [ k for k, v in ds.variables.items() if not set(region).intersection(v.dims) ] if non_matching_vars: raise ValueError( f"when setting `region` explicitly in to_zarr(), all " f"variables in the dataset to write must have at least " f"one dimension in common with the region's dimensions " f"{list(region.keys())}, but that is not " f"the case for some variables here. To drop these variables " f"from this dataset before exporting to zarr, write: " f".drop_vars({non_matching_vars!r})" ) if self._append_dim is not None and self._append_dim in region: raise ValueError( f"cannot list the same dimension in both ``append_dim`` and " f"``region`` with to_zarr(), got {self._append_dim} in both" ) self._write_region = region # can't modify indexes with region writes return ds.drop_vars(ds.indexes) def _validate_encoding(self, encoding) -> None: if encoding and self._mode in ["a", "a-", "r+"]: existing_var_names = self.array_keys() for var_name in existing_var_names: if var_name in encoding: raise ValueError( f"variable {var_name!r} already exists, but encoding was provided" ) def open_zarr( store, group=None, synchronizer=None, chunks="auto", decode_cf=True, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables=None, consolidated=None, overwrite_encoded_chunks=False, chunk_store=None, storage_options=None, decode_timedelta=None, use_cftime=None, zarr_version=None, zarr_format=None, use_zarr_fill_value_as_mask=None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, create_default_indexes=True, **kwargs, ): """Load and decode a dataset from a Zarr store. The `store` object should be a valid store for a Zarr group. `store` variables must contain dimension metadata encoded in the `_ARRAY_DIMENSIONS` attribute or must have NCZarr format. Parameters ---------- store : MutableMapping or str A MutableMapping where a Zarr Group has been stored or a path to a directory in file system where a Zarr DirectoryStore has been stored. synchronizer : object, optional Array synchronizer provided to zarr group : str, optional Group path. (a.k.a. `path` in zarr terminology.) chunks : int, dict, 'auto' or None, default: 'auto' If provided, used to load the data into dask arrays. - ``chunks='auto'`` will use dask ``auto`` chunking taking into account the engine preferred chunks. - ``chunks=None`` skips using dask. This uses xarray's internally private :ref:`lazy indexing classes `, but data is eagerly loaded into memory as numpy arrays when accessed. This can be more efficient for smaller arrays, though results may vary. - ``chunks=-1`` loads the data with dask using a single chunk for all arrays. - ``chunks={}`` loads the data with dask using engine preferred chunks if exposed by the backend, otherwise with a single chunk for all arrays. See dask chunking for more details. overwrite_encoded_chunks : bool, optional Whether to drop the zarr chunks encoded for each variable when a dataset is loaded with specified chunk sizes (default: False) decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. decode_times : bool, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers. concat_characters : bool, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. decode_coords : bool, optional If True, decode the 'coordinates' attribute to identify coordinates in the resulting dataset. drop_variables : str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. consolidated : bool, optional Whether to open the store using zarr's consolidated metadata capability. Only works for stores that have already been consolidated. By default (`consolidate=None`), attempts to read consolidated metadata, falling back to read non-consolidated metadata if that fails. When the experimental ``zarr_version=3``, ``consolidated`` must be either be ``None`` or ``False``. chunk_store : MutableMapping, optional A separate Zarr store only for chunk data. storage_options : dict, optional Any additional parameters for the storage backend (ignored for local paths). decode_timedelta : bool, optional If True, decode variables and coordinates with time units in {'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of decode_time. use_cftime : bool, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. zarr_version : int or None, optional .. deprecated:: 2024.9.1 Use ``zarr_format`` instead. zarr_format : int or None, optional The desired zarr format to target (currently 2 or 3). The default of None will attempt to determine the zarr version from ``store`` when possible, otherwise defaulting to the default version used by the zarr-python library installed. use_zarr_fill_value_as_mask : bool, optional If True, use the zarr Array ``fill_value`` to mask the data, the same as done for NetCDF data with ``_FillValue`` or ``missing_value`` attributes. If False, the ``fill_value`` is ignored and the data are not masked. If None, this defaults to True for ``zarr_version=2`` and False for ``zarr_version=3``. chunked_array_type: str, optional Which chunked array type to coerce this datasets' arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the ``ChunkManagerEntrypoint.from_array`` method used to create chunked arrays, via whichever chunk manager is specified through the ``chunked_array_type`` kwarg. Defaults to ``{'manager': 'dask'}``, meaning additional kwargs will be passed eventually to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. create_default_indexes : bool, default: True If True, create pandas indexes for :term:`dimension coordinates `, which loads the coordinate data into memory. Set it to False if you want to avoid loading data into memory. Note that backends can still choose to create other indexes. If you want to control that, please refer to the backend's documentation. Returns ------- dataset : Dataset The newly created dataset. See Also -------- open_dataset open_mfdataset References ---------- https://zarr.readthedocs.io/ """ from xarray.backends.api import open_dataset if from_array_kwargs is None: from_array_kwargs = {} if chunks == "auto": try: guess_chunkmanager( chunked_array_type ) # attempt to import that parallel backend chunks = {} except (ValueError, ImportError): chunks = None if kwargs: raise TypeError( "open_zarr() got unexpected keyword arguments " + ",".join(kwargs.keys()) ) backend_kwargs = { "synchronizer": synchronizer, "consolidated": consolidated, "overwrite_encoded_chunks": overwrite_encoded_chunks, "chunk_store": chunk_store, "storage_options": storage_options, "zarr_version": zarr_version, "zarr_format": zarr_format, } ds = open_dataset( filename_or_obj=store, group=group, decode_cf=decode_cf, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, engine="zarr", chunks=chunks, drop_variables=drop_variables, create_default_indexes=create_default_indexes, chunked_array_type=chunked_array_type, from_array_kwargs=from_array_kwargs, backend_kwargs=backend_kwargs, decode_timedelta=decode_timedelta, use_cftime=use_cftime, zarr_version=zarr_version, use_zarr_fill_value_as_mask=use_zarr_fill_value_as_mask, ) return ds class ZarrBackendEntrypoint(BackendEntrypoint): """ Backend for ".zarr" files based on the zarr package. For more information about the underlying library, visit: https://zarr.readthedocs.io/en/stable See Also -------- backends.ZarrStore """ description = "Open zarr files (.zarr) using zarr in Xarray" url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.ZarrBackendEntrypoint.html" def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool: if isinstance(filename_or_obj, str | os.PathLike): # allow a trailing slash to account for an autocomplete # adding it. _, ext = os.path.splitext(str(filename_or_obj).rstrip("/")) return ext in [".zarr"] return False def open_dataset( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group=None, mode="r", synchronizer=None, consolidated=None, chunk_store=None, storage_options=None, zarr_version=None, zarr_format=None, store=None, engine=None, use_zarr_fill_value_as_mask=None, cache_members: bool = True, ) -> Dataset: filename_or_obj = _normalize_path(filename_or_obj) if not store: store = ZarrStore.open_group( filename_or_obj, group=group, mode=mode, synchronizer=synchronizer, consolidated=consolidated, consolidate_on_close=False, chunk_store=chunk_store, storage_options=storage_options, zarr_version=zarr_version, use_zarr_fill_value_as_mask=None, zarr_format=zarr_format, cache_members=cache_members, ) store_entrypoint = StoreBackendEntrypoint() with close_on_error(store): ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) return ds def open_datatree( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group: str | None = None, mode="r", synchronizer=None, consolidated=None, chunk_store=None, storage_options=None, zarr_version=None, zarr_format=None, ) -> DataTree: filename_or_obj = _normalize_path(filename_or_obj) groups_dict = self.open_groups_as_dict( filename_or_obj=filename_or_obj, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, group=group, mode=mode, synchronizer=synchronizer, consolidated=consolidated, chunk_store=chunk_store, storage_options=storage_options, zarr_version=zarr_version, zarr_format=zarr_format, ) return datatree_from_dict_with_io_cleanup(groups_dict) def open_groups_as_dict( self, filename_or_obj: T_PathFileOrDataStore, *, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables: str | Iterable[str] | None = None, use_cftime=None, decode_timedelta=None, group: str | None = None, mode="r", synchronizer=None, consolidated=None, chunk_store=None, storage_options=None, zarr_version=None, zarr_format=None, ) -> dict[str, Dataset]: filename_or_obj = _normalize_path(filename_or_obj) # Check for a group and make it a parent if it exists if group: parent = str(NodePath("/") / NodePath(group)) else: parent = str(NodePath("/")) stores = ZarrStore.open_store( filename_or_obj, group=parent, mode=mode, synchronizer=synchronizer, consolidated=consolidated, consolidate_on_close=False, chunk_store=chunk_store, storage_options=storage_options, zarr_version=zarr_version, zarr_format=zarr_format, ) groups_dict = {} for path_group, store in stores.items(): store_entrypoint = StoreBackendEntrypoint() with close_on_error(store): group_ds = store_entrypoint.open_dataset( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) if group: group_name = str(NodePath(path_group).relative_to(parent)) else: group_name = str(NodePath(path_group)) groups_dict[group_name] = group_ds return groups_dict def _iter_zarr_groups(root: ZarrGroup, parent: str = "/") -> Iterable[str]: parent_nodepath = NodePath(parent) yield str(parent_nodepath) for path, group in root.groups(): gpath = parent_nodepath / path yield from _iter_zarr_groups(group, parent=str(gpath)) def _get_open_params( store, mode, synchronizer, group, consolidated, consolidate_on_close, chunk_store, storage_options, zarr_version, use_zarr_fill_value_as_mask, zarr_format, ): if TYPE_CHECKING: import zarr else: zarr = attempt_import("zarr") # zarr doesn't support pathlib.Path objects yet. zarr-python#601 if isinstance(store, os.PathLike): store = os.fspath(store) open_kwargs = dict( # mode='a-' is a handcrafted xarray specialty mode="a" if mode == "a-" else mode, synchronizer=synchronizer, path=group, ) open_kwargs["storage_options"] = storage_options zarr_format = _handle_zarr_version_or_format( zarr_version=zarr_version, zarr_format=zarr_format ) if _zarr_v3(): open_kwargs["zarr_format"] = zarr_format else: open_kwargs["zarr_version"] = zarr_format if chunk_store is not None: open_kwargs["chunk_store"] = chunk_store if consolidated is None: consolidated = False if _zarr_v3(): # TODO: replace AssertionError after https://github.com/zarr-developers/zarr-python/issues/2821 is resolved missing_exc = AssertionError else: missing_exc = zarr.errors.GroupNotFoundError if _zarr_v3(): # zarr 3.0.8 and earlier did not support this property - it was effectively assumed true if not getattr(store, "supports_consolidated_metadata", True): consolidated = consolidate_on_close = False if consolidated in [None, True]: # open the root of the store, in case there is metadata consolidated there group = open_kwargs.pop("path") if consolidated: # TODO: an option to pass the metadata_key keyword zarr_root_group = zarr.open_consolidated(store, **open_kwargs) elif consolidated is None: # same but with more error handling in case no consolidated metadata found try: zarr_root_group = zarr.open_consolidated(store, **open_kwargs) except (ValueError, KeyError): # ValueError in zarr-python 3.x, KeyError in 2.x. try: zarr_root_group = zarr.open_group(store, **open_kwargs) emit_user_level_warning( "Failed to open Zarr store with consolidated metadata, " "but successfully read with non-consolidated metadata. " "This is typically much slower for opening a dataset. " "To silence this warning, consider:\n" "1. Consolidating metadata in this existing store with " "zarr.consolidate_metadata().\n" "2. Explicitly setting consolidated=False, to avoid trying " "to read consolidate metadata, or\n" "3. Explicitly setting consolidated=True, to raise an " "error in this case instead of falling back to try " "reading non-consolidated metadata.", RuntimeWarning, ) except missing_exc as err: raise FileNotFoundError( f"No such file or directory: '{store}'" ) from err # but the user should still receive a DataTree whose root is the group they asked for if group and group != "/": zarr_group = zarr_root_group[group.removeprefix("/")] else: zarr_group = zarr_root_group else: if _zarr_v3(): # we have determined that we don't want to use consolidated metadata # so we set that to False to avoid trying to read it open_kwargs["use_consolidated"] = False zarr_group = zarr.open_group(store, **open_kwargs) close_store_on_close = zarr_group.store is not store # we use this to determine how to handle fill_value is_zarr_v3_format = _zarr_v3() and zarr_group.metadata.zarr_format == 3 if use_zarr_fill_value_as_mask is None: if is_zarr_v3_format: # for new data, we use a better default use_zarr_fill_value_as_mask = False else: # this was the default for v2 and should apply to most existing Zarr data use_zarr_fill_value_as_mask = True return ( zarr_group, consolidate_on_close, close_store_on_close, use_zarr_fill_value_as_mask, ) def _handle_zarr_version_or_format( *, zarr_version: ZarrFormat | None, zarr_format: ZarrFormat | None ) -> ZarrFormat | None: """handle the deprecated zarr_version kwarg and return zarr_format""" if ( zarr_format is not None and zarr_version is not None and zarr_format != zarr_version ): raise ValueError( f"zarr_format {zarr_format} does not match zarr_version {zarr_version}, please only set one" ) if zarr_version is not None: emit_user_level_warning( "zarr_version is deprecated, use zarr_format", FutureWarning ) return zarr_version return zarr_format BACKEND_ENTRYPOINTS["zarr"] = ("zarr", ZarrBackendEntrypoint) �����������������������������������������������������������������xarray-2025.09.0/xarray/coders.py�������������������������������������������������������������������0000664�0000000�0000000�00000000323�15056206164�0016501�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������""" This module provides coder objects that encapsulate the "encoding/decoding" process. """ from xarray.coding.times import CFDatetimeCoder, CFTimedeltaCoder __all__ = ["CFDatetimeCoder", "CFTimedeltaCoder"] �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/coding/���������������������������������������������������������������������0000775�0000000�0000000�00000000000�15056206164�0016115�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/coding/__init__.py����������������������������������������������������������0000664�0000000�0000000�00000000000�15056206164�0020214�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/coding/calendar_ops.py������������������������������������������������������0000664�0000000�0000000�00000037424�15056206164�0021133�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import numpy as np import pandas as pd from xarray.coding.cftime_offsets import date_range_like, get_date_type from xarray.coding.cftimeindex import CFTimeIndex from xarray.coding.times import ( _should_cftime_be_used, convert_times, ) from xarray.core.common import ( _contains_datetime_like_objects, full_like, is_np_datetime_like, ) try: import cftime except ImportError: cftime = None _CALENDARS_WITHOUT_YEAR_ZERO = [ "gregorian", "proleptic_gregorian", "julian", "standard", ] def convert_calendar( obj, calendar, dim="time", align_on=None, missing=None, use_cftime=None, ): """Transform a time-indexed Dataset or DataArray to one that uses another calendar. This function only converts the individual timestamps; it does not modify any data except in dropping invalid/surplus dates, or inserting values for missing dates. If the source and target calendars are both from a standard type, only the type of the time array is modified. When converting to a calendar with a leap year from to a calendar without a leap year, the 29th of February will be removed from the array. In the other direction the 29th of February will be missing in the output, unless `missing` is specified, in which case that value is inserted. For conversions involving the `360_day` calendar, see Notes. This method is safe to use with sub-daily data as it doesn't touch the time part of the timestamps. Parameters ---------- obj : DataArray or Dataset Input DataArray or Dataset with a time coordinate of a valid dtype (:py:class:`numpy.datetime64` or :py:class:`cftime.datetime`). calendar : str The target calendar name. dim : str Name of the time coordinate in the input DataArray or Dataset. align_on : {None, 'date', 'year', 'random'} Must be specified when either the source or target is a `"360_day"` calendar; ignored otherwise. See Notes. missing : any, optional By default, i.e. if the value is None, this method will simply attempt to convert the dates in the source calendar to the same dates in the target calendar, and drop any of those that are not possible to represent. If a value is provided, a new time coordinate will be created in the target calendar with the same frequency as the original time coordinate; for any dates that are not present in the source, the data will be filled with this value. Note that using this mode requires that the source data have an inferable frequency; for more information see :py:func:`xarray.infer_freq`. For certain frequency, source, and target calendar combinations, this could result in many missing values, see notes. use_cftime : bool, optional Whether to use cftime objects in the output, only used if `calendar` is one of {"proleptic_gregorian", "gregorian" or "standard"}. If True, the new time axis uses cftime objects. If None (default), it uses :py:class:`numpy.datetime64` values if the date range permits it, and :py:class:`cftime.datetime` objects if not. If False, it uses :py:class:`numpy.datetime64` or fails. Returns ------- Copy of source with the time coordinate converted to the target calendar. If `missing` was None (default), invalid dates in the new calendar are dropped, but missing dates are not inserted. If `missing` was given, the new data is reindexed to have a time axis with the same frequency as the source, but in the new calendar; any missing datapoints are filled with `missing`. Notes ----- Passing a value to `missing` is only usable if the source's time coordinate as an inferable frequencies (see :py:func:`~xarray.infer_freq`) and is only appropriate if the target coordinate, generated from this frequency, has dates equivalent to the source. It is usually **not** appropriate to use this mode with: - Period-end frequencies: 'A', 'Y', 'Q' or 'M', in opposition to 'AS' 'YS', 'QS' and 'MS' - Sub-monthly frequencies that do not divide a day evenly: 'W', 'nD' where `n != 1` or 'mH' where 24 % m != 0). If one of the source or target calendars is `"360_day"`, `align_on` must be specified and two options are offered. "year" The dates are translated according to their relative position in the year, ignoring their original month and day information, meaning that the missing/surplus days are added/removed at regular intervals. From a `360_day` to a standard calendar, the output will be missing the following dates (day of year in parentheses): To a leap year: January 31st (31), March 31st (91), June 1st (153), July 31st (213), September 31st (275) and November 30th (335). To a non-leap year: February 6th (36), April 19th (109), July 2nd (183), September 12th (255), November 25th (329). From a standard calendar to a `"360_day"`, the following dates in the source array will be dropped: From a leap year: January 31st (31), April 1st (92), June 1st (153), August 1st (214), September 31st (275), December 1st (336) From a non-leap year: February 6th (37), April 20th (110), July 2nd (183), September 13th (256), November 25th (329) This option is best used on daily and subdaily data. "date" The month/day information is conserved and invalid dates are dropped from the output. This means that when converting from a `"360_day"` to a standard calendar, all 31sts (Jan, March, May, July, August, October and December) will be missing as there is no equivalent dates in the `"360_day"` calendar and the 29th (on non-leap years) and 30th of February will be dropped as there are no equivalent dates in a standard calendar. This option is best used with data on a frequency coarser than daily. "random" Similar to "year", each day of year of the source is mapped to another day of year of the target. However, instead of having always the same missing days according the source and target years, here 5 days are chosen randomly, one for each fifth of the year. However, February 29th is always missing when converting to a leap year, or its value is dropped when converting from a leap year. This is similar to the method used in the LOCA dataset (see Pierce, Cayan, and Thrasher (2014). doi:10.1175/JHM-D-14-0082.1). This option is best used on daily data. """ from xarray.core.dataarray import DataArray time = obj[dim] if not _contains_datetime_like_objects(time.variable): raise ValueError(f"Coordinate {dim} must contain datetime objects.") use_cftime = _should_cftime_be_used(time, calendar, use_cftime) source_calendar = time.dt.calendar # Do nothing if request calendar is the same as the source # AND source is np XOR use_cftime if source_calendar == calendar and is_np_datetime_like(time.dtype) ^ use_cftime: return obj if (time.dt.year == 0).any() and calendar in _CALENDARS_WITHOUT_YEAR_ZERO: raise ValueError( f"Source time coordinate contains dates with year 0, which is not supported by target calendar {calendar}." ) if (source_calendar == "360_day" or calendar == "360_day") and align_on is None: raise ValueError( "Argument `align_on` must be specified with either 'date' or " "'year' when converting to or from a '360_day' calendar." ) if source_calendar != "360_day" and calendar != "360_day": align_on = "date" out = obj.copy() if align_on in ["year", "random"]: # Special case for conversion involving 360_day calendar if align_on == "year": # Instead of translating dates directly, this tries to keep the position within a year similar. new_doy = _interpolate_day_of_year(time, target_calendar=calendar) elif align_on == "random": # The 5 days to remove are randomly chosen, one for each of the five 72-days periods of the year. new_doy = time.groupby(f"{dim}.year").map( _random_day_of_year, target_calendar=calendar, use_cftime=use_cftime ) # Convert the source datetimes, but override the day of year with our new day of years. out[dim] = DataArray( [ _convert_to_new_calendar_with_new_day_of_year( date, newdoy, calendar, use_cftime ) for date, newdoy in zip(time.variable._data.array, new_doy, strict=True) ], dims=(dim,), name=dim, ) # Remove duplicate timestamps, happens when reducing the number of days out = out.isel({dim: np.unique(out[dim], return_index=True)[1]}) elif align_on == "date": new_times = convert_times( time.data, get_date_type(calendar, use_cftime=use_cftime), raise_on_invalid=False, ) out[dim] = new_times # Remove NaN that where put on invalid dates in target calendar out = out.sel({dim: out[dim].notnull()}) if use_cftime: # Reassign times to ensure time index of output is a CFTimeIndex # (previously it was an Index due to the presence of NaN values). # Note this is not needed in the case that the output time index is # a DatetimeIndex, since DatetimeIndexes can handle NaN values. out[dim] = CFTimeIndex(out[dim].data) if missing is not None: time_target = date_range_like(time, calendar=calendar, use_cftime=use_cftime) out = out.reindex({dim: time_target}, fill_value=missing) # Copy attrs but remove `calendar` if still present. out[dim].attrs.update(time.attrs) out[dim].attrs.pop("calendar", None) return out def _is_leap_year(years, calendar): func = np.vectorize(cftime.is_leap_year) return func(years, calendar=calendar) def _days_in_year(years, calendar): """The number of days in the year according to given calendar.""" if calendar == "360_day": return full_like(years, 360) return _is_leap_year(years, calendar).astype(int) + 365 def _interpolate_day_of_year(times, target_calendar): """Returns the nearest day in the target calendar of the corresponding "decimal year" in the source calendar.""" source_calendar = times.dt.calendar return np.round( _days_in_year(times.dt.year, target_calendar) * times.dt.dayofyear / _days_in_year(times.dt.year, source_calendar) ).astype(int) def _random_day_of_year(time, target_calendar, use_cftime): """Return a day of year in the new calendar. Removes Feb 29th and five other days chosen randomly within five sections of 72 days. """ year = time.dt.year[0] source_calendar = time.dt.calendar new_doy = np.arange(360) + 1 rm_idx = np.random.default_rng().integers(0, 72, 5) + 72 * np.arange(5) if source_calendar == "360_day": for idx in rm_idx: new_doy[idx + 1 :] = new_doy[idx + 1 :] + 1 if _days_in_year(year, target_calendar) == 366: new_doy[new_doy >= 60] = new_doy[new_doy >= 60] + 1 elif target_calendar == "360_day": new_doy = np.insert(new_doy, rm_idx - np.arange(5), -1) if _days_in_year(year, source_calendar) == 366: new_doy = np.insert(new_doy, 60, -1) return new_doy[time.dt.dayofyear - 1] def _convert_to_new_calendar_with_new_day_of_year( date, day_of_year, calendar, use_cftime ): """Convert a datetime object to another calendar with a new day of year. Redefines the day of year (and thus ignores the month and day information from the source datetime). Nanosecond information is lost as cftime.datetime doesn't support it. """ new_date = cftime.num2date( day_of_year - 1, f"days since {date.year}-01-01", calendar=calendar if use_cftime else "standard", ) try: return get_date_type(calendar, use_cftime)( date.year, new_date.month, new_date.day, date.hour, date.minute, date.second, date.microsecond, ) except ValueError: return np.nan def _decimal_year_cftime(time, year, days_in_year, *, date_class): year_start = date_class(year, 1, 1) delta = np.timedelta64(time - year_start, "ns") days_in_year = np.timedelta64(days_in_year, "D") return year + delta / days_in_year def _decimal_year_numpy(time, year, days_in_year, *, dtype): time = np.asarray(time).astype(dtype) year_start = np.datetime64(int(year) - 1970, "Y").astype(dtype) delta = time - year_start days_in_year = np.timedelta64(days_in_year, "D") return year + delta / days_in_year def _decimal_year(times): """Convert a datetime DataArray to decimal years according to its calendar. The decimal year of a timestamp is its year plus its sub-year component converted to the fraction of its year. Ex: '2000-03-01 12:00' is 2000.1653 in a standard calendar, 2000.16301 in a "noleap" or 2000.16806 in a "360_day". """ if times.dtype == "O": function = _decimal_year_cftime kwargs = {"date_class": get_date_type(times.dt.calendar, True)} else: function = _decimal_year_numpy kwargs = {"dtype": times.dtype} from xarray.computation.apply_ufunc import apply_ufunc return apply_ufunc( function, times, times.dt.year, times.dt.days_in_year, kwargs=kwargs, vectorize=True, dask="parallelized", output_dtypes=[np.float64], ) def interp_calendar(source, target, dim="time"): """Interpolates a DataArray or Dataset indexed by a time coordinate to another calendar based on decimal year measure. Each timestamp in `source` and `target` are first converted to their decimal year equivalent then `source` is interpolated on the target coordinate. The decimal year of a timestamp is its year plus its sub-year component converted to the fraction of its year. For example "2000-03-01 12:00" is 2000.1653 in a standard calendar or 2000.16301 in a `"noleap"` calendar. This method should only be used when the time (HH:MM:SS) information of time coordinate is not important. Parameters ---------- source: DataArray or Dataset The source data to interpolate; must have a time coordinate of a valid dtype (:py:class:`numpy.datetime64` or :py:class:`cftime.datetime` objects) target: DataArray, DatetimeIndex, or CFTimeIndex The target time coordinate of a valid dtype (np.datetime64 or cftime objects) dim : str The time coordinate name. Return ------ DataArray or Dataset The source interpolated on the decimal years of target, """ from xarray.core.dataarray import DataArray if isinstance(target, pd.DatetimeIndex | CFTimeIndex): target = DataArray(target, dims=(dim,), name=dim) if not _contains_datetime_like_objects( source[dim].variable ) or not _contains_datetime_like_objects(target.variable): raise ValueError( f"Both 'source.{dim}' and 'target' must contain datetime objects." ) target_calendar = target.dt.calendar if ( source[dim].time.dt.year == 0 ).any() and target_calendar in _CALENDARS_WITHOUT_YEAR_ZERO: raise ValueError( f"Source time coordinate contains dates with year 0, which is not supported by target calendar {target_calendar}." ) out = source.copy() out[dim] = _decimal_year(source[dim]) target_idx = _decimal_year(target) out = out.interp(**{dim: target_idx}) out[dim] = target return out ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/coding/cftime_offsets.py����������������������������������������������������0000664�0000000�0000000�00000172653�15056206164�0021505�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Time offset classes for use with cftime.datetime objects""" # The offset classes and mechanisms for generating time ranges defined in # this module were copied/adapted from those defined in pandas. See in # particular the objects and methods defined in pandas.tseries.offsets # and pandas.core.indexes.datetimes. # For reference, here is a copy of the pandas copyright notice: # (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team # All rights reserved. # Copyright (c) 2008-2011 AQR Capital Management, LLC # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the copyright holder nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import re import warnings from collections.abc import Mapping from datetime import datetime, timedelta from functools import partial from typing import TYPE_CHECKING, ClassVar, Literal, TypeVar, get_args import numpy as np import pandas as pd from packaging.version import Version from xarray.coding.cftimeindex import CFTimeIndex from xarray.coding.times import ( _is_standard_calendar, _parse_iso8601, _should_cftime_be_used, convert_time_or_go_back, format_cftime_datetime, ) from xarray.compat.pdcompat import ( count_not_none, default_precision_timestamp, ) from xarray.core.common import _contains_datetime_like_objects, is_np_datetime_like from xarray.core.types import InclusiveOptions from xarray.core.utils import attempt_import, emit_user_level_warning if TYPE_CHECKING: from xarray.core.types import ( PDDatetimeUnitOptions, Self, TypeAlias, ) DayOption: TypeAlias = Literal["start", "end"] T_FreqStr = TypeVar("T_FreqStr", str, None) def get_date_type(calendar, use_cftime=True): """Return the cftime date type for a given calendar name.""" if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if _is_standard_calendar(calendar) and not use_cftime: return default_precision_timestamp calendars = { "noleap": cftime.DatetimeNoLeap, "360_day": cftime.Datetime360Day, "365_day": cftime.DatetimeNoLeap, "366_day": cftime.DatetimeAllLeap, "gregorian": cftime.DatetimeGregorian, "proleptic_gregorian": cftime.DatetimeProlepticGregorian, "julian": cftime.DatetimeJulian, "all_leap": cftime.DatetimeAllLeap, "standard": cftime.DatetimeGregorian, } return calendars[calendar] class BaseCFTimeOffset: _freq: ClassVar[str | None] = None _day_option: ClassVar[DayOption | None] = None n: int def __init__(self, n: int = 1) -> None: if not isinstance(n, int): raise TypeError( "The provided multiple 'n' must be an integer. " f"Instead a value of type {type(n)!r} was provided." ) self.n = n def rule_code(self) -> str | None: return self._freq def __eq__(self, other: object) -> bool: if not isinstance(other, BaseCFTimeOffset): return NotImplemented return self.n == other.n and self.rule_code() == other.rule_code() def __ne__(self, other: object) -> bool: return not self == other def __add__(self, other): return self.__apply__(other) def __sub__(self, other): if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if isinstance(other, cftime.datetime): raise TypeError("Cannot subtract a cftime.datetime from a time offset.") elif type(other) is type(self): return type(self)(self.n - other.n) else: return NotImplemented def __mul__(self, other: int) -> Self: if not isinstance(other, int): return NotImplemented return type(self)(n=other * self.n) def __neg__(self) -> Self: return self * -1 def __rmul__(self, other): return self.__mul__(other) def __radd__(self, other): return self.__add__(other) def __rsub__(self, other): if isinstance(other, BaseCFTimeOffset) and type(self) is not type(other): raise TypeError("Cannot subtract cftime offsets of differing types") return -self + other def __apply__(self, other): return NotImplemented def onOffset(self, date) -> bool: """Check if the given date is in the set of possible dates created using a length-one version of this offset class.""" test_date = (self + date) - self return date == test_date def rollforward(self, date): if self.onOffset(date): return date else: return date + type(self)() def rollback(self, date): if self.onOffset(date): return date else: return date - type(self)() def __str__(self): return f"<{type(self).__name__}: n={self.n}>" def __repr__(self): return str(self) def _get_offset_day(self, other): # subclass must implement `_day_option`; calling from the base class # will raise NotImplementedError. return _get_day_of_month(other, self._day_option) class Tick(BaseCFTimeOffset): # analogous https://github.com/pandas-dev/pandas/blob/ccb25ab1d24c4fb9691270706a59c8d319750870/pandas/_libs/tslibs/offsets.pyx#L806 def _next_higher_resolution(self) -> Tick: self_type = type(self) if self_type is Day: return Hour(self.n * 24) if self_type is Hour: return Minute(self.n * 60) if self_type is Minute: return Second(self.n * 60) if self_type is Second: return Millisecond(self.n * 1000) if self_type is Millisecond: return Microsecond(self.n * 1000) raise ValueError("Could not convert to integer offset at any resolution") def __mul__(self, other: int | float) -> Tick: if not isinstance(other, int | float): return NotImplemented if isinstance(other, float): n = other * self.n # If the new `n` is an integer, we can represent it using the # same BaseCFTimeOffset subclass as self, otherwise we need to move up # to a higher-resolution subclass if np.isclose(n % 1, 0): return type(self)(int(n)) new_self = self._next_higher_resolution() return new_self * other return type(self)(n=other * self.n) def as_timedelta(self) -> timedelta: """All Tick subclasses must implement an as_timedelta method.""" raise NotImplementedError def _get_day_of_month(other, day_option: DayOption) -> int: """Find the day in `other`'s month that satisfies a BaseCFTimeOffset's onOffset policy, as described by the `day_option` argument. Parameters ---------- other : cftime.datetime day_option : 'start', 'end' 'start': returns 1 'end': returns last day of the month Returns ------- day_of_month : int """ if day_option == "start": return 1 elif day_option == "end": return other.daysinmonth elif day_option is None: # Note: unlike `_shift_month`, _get_day_of_month does not # allow day_option = None raise NotImplementedError() raise ValueError(day_option) def _adjust_n_months(other_day, n, reference_day): """Adjust the number of times a monthly offset is applied based on the day of a given date, and the reference day provided. """ if n > 0 and other_day < reference_day: n = n - 1 elif n <= 0 and other_day > reference_day: n = n + 1 return n def _adjust_n_years(other, n, month, reference_day): """Adjust the number of times an annual offset is applied based on another date, and the reference day provided""" if n > 0: if other.month < month or (other.month == month and other.day < reference_day): n -= 1 elif other.month > month or (other.month == month and other.day > reference_day): n += 1 return n def _shift_month(date, months, day_option: DayOption = "start"): """Shift the date to a month start or end a given number of months away.""" _ = attempt_import("cftime") has_year_zero = date.has_year_zero year = date.year + (date.month + months) // 12 month = (date.month + months) % 12 if month == 0: month = 12 year -= 1 if not has_year_zero: if date.year < 0 <= year: year += 1 elif year <= 0 < date.year: year -= 1 # Silence warnings associated with generating dates with years < 1. with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="this date/calendar/year zero") if day_option == "start": day = 1 elif day_option == "end": reference = type(date)(year, month, 1, has_year_zero=has_year_zero) day = reference.daysinmonth else: raise ValueError(day_option) return date.replace(year=year, month=month, day=day) def roll_qtrday( other, n: int, month: int, day_option: DayOption, modby: int = 3 ) -> int: """Possibly increment or decrement the number of periods to shift based on rollforward/rollbackward conventions. Parameters ---------- other : cftime.datetime n : number of periods to increment, before adjusting for rolling month : int reference month giving the first month of the year day_option : 'start', 'end' The convention to use in finding the day in a given month against which to compare for rollforward/rollbackward decisions. modby : int 3 for quarters, 12 for years Returns ------- n : int number of periods to increment See Also -------- _get_day_of_month : Find the day in a month provided an offset. """ months_since = other.month % modby - month % modby if n > 0: if months_since < 0 or ( months_since == 0 and other.day < _get_day_of_month(other, day_option) ): # pretend to roll back if on same month but # before compare_day n -= 1 elif months_since > 0 or ( months_since == 0 and other.day > _get_day_of_month(other, day_option) ): # make sure to roll forward, so negate n += 1 return n def _validate_month(month: int | None, default_month: int) -> int: result_month = default_month if month is None else month if not isinstance(result_month, int): raise TypeError( "'self.month' must be an integer value between 1 " "and 12. Instead, it was set to a value of " f"{result_month!r}" ) elif not (1 <= result_month <= 12): raise ValueError( "'self.month' must be an integer value between 1 " "and 12. Instead, it was set to a value of " f"{result_month!r}" ) return result_month class MonthBegin(BaseCFTimeOffset): _freq = "MS" def __apply__(self, other): n = _adjust_n_months(other.day, self.n, 1) return _shift_month(other, n, "start") def onOffset(self, date) -> bool: """Check if the given date is in the set of possible dates created using a length-one version of this offset class.""" return date.day == 1 class MonthEnd(BaseCFTimeOffset): _freq = "ME" def __apply__(self, other): n = _adjust_n_months(other.day, self.n, other.daysinmonth) return _shift_month(other, n, "end") def onOffset(self, date) -> bool: """Check if the given date is in the set of possible dates created using a length-one version of this offset class.""" return date.day == date.daysinmonth _MONTH_ABBREVIATIONS = { 1: "JAN", 2: "FEB", 3: "MAR", 4: "APR", 5: "MAY", 6: "JUN", 7: "JUL", 8: "AUG", 9: "SEP", 10: "OCT", 11: "NOV", 12: "DEC", } class QuarterOffset(BaseCFTimeOffset): """Quarter representation copied off of pandas/tseries/offsets.py""" _default_month: ClassVar[int] month: int def __init__(self, n: int = 1, month: int | None = None) -> None: BaseCFTimeOffset.__init__(self, n) self.month = _validate_month(month, self._default_month) def __apply__(self, other): # months_since: find the calendar quarter containing other.month, # e.g. if other.month == 8, the calendar quarter is [Jul, Aug, Sep]. # Then find the month in that quarter containing an onOffset date for # self. `months_since` is the number of months to shift other.month # to get to this on-offset month. months_since = other.month % 3 - self.month % 3 qtrs = roll_qtrday( other, self.n, self.month, day_option=self._day_option, modby=3 ) months = qtrs * 3 - months_since return _shift_month(other, months, self._day_option) def onOffset(self, date) -> bool: """Check if the given date is in the set of possible dates created using a length-one version of this offset class.""" mod_month = (date.month - self.month) % 3 return mod_month == 0 and date.day == self._get_offset_day(date) def __sub__(self, other: Self) -> Self: if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if isinstance(other, cftime.datetime): raise TypeError("Cannot subtract cftime.datetime from offset.") if type(other) is type(self) and other.month == self.month: return type(self)(self.n - other.n, month=self.month) return NotImplemented def __mul__(self, other): if isinstance(other, float): return NotImplemented return type(self)(n=other * self.n, month=self.month) def rule_code(self) -> str: return f"{self._freq}-{_MONTH_ABBREVIATIONS[self.month]}" def __str__(self): return f"<{type(self).__name__}: n={self.n}, month={self.month}>" class QuarterBegin(QuarterOffset): # When converting a string to an offset, pandas converts # 'QS' to a QuarterBegin offset starting in the month of # January. When creating a QuarterBegin offset directly # from the constructor, however, the default month is March. # We follow that behavior here. _default_month = 3 _freq = "QS" _day_option = "start" def rollforward(self, date): """Roll date forward to nearest start of quarter""" if self.onOffset(date): return date else: return date + QuarterBegin(month=self.month) def rollback(self, date): """Roll date backward to nearest start of quarter""" if self.onOffset(date): return date else: return date - QuarterBegin(month=self.month) class QuarterEnd(QuarterOffset): # When converting a string to an offset, pandas converts # 'Q' to a QuarterEnd offset starting in the month of # December. When creating a QuarterEnd offset directly # from the constructor, however, the default month is March. # We follow that behavior here. _default_month = 3 _freq = "QE" _day_option = "end" def rollforward(self, date): """Roll date forward to nearest end of quarter""" if self.onOffset(date): return date else: return date + QuarterEnd(month=self.month) def rollback(self, date): """Roll date backward to nearest end of quarter""" if self.onOffset(date): return date else: return date - QuarterEnd(month=self.month) class YearOffset(BaseCFTimeOffset): _default_month: ClassVar[int] month: int def __init__(self, n: int = 1, month: int | None = None) -> None: BaseCFTimeOffset.__init__(self, n) self.month = _validate_month(month, self._default_month) def __apply__(self, other): reference_day = _get_day_of_month(other, self._day_option) years = _adjust_n_years(other, self.n, self.month, reference_day) months = years * 12 + (self.month - other.month) return _shift_month(other, months, self._day_option) def __sub__(self, other): if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if isinstance(other, cftime.datetime): raise TypeError("Cannot subtract cftime.datetime from offset.") elif type(other) is type(self) and other.month == self.month: return type(self)(self.n - other.n, month=self.month) else: return NotImplemented def __mul__(self, other): if isinstance(other, float): return NotImplemented return type(self)(n=other * self.n, month=self.month) def rule_code(self) -> str: return f"{self._freq}-{_MONTH_ABBREVIATIONS[self.month]}" def __str__(self) -> str: return f"<{type(self).__name__}: n={self.n}, month={self.month}>" class YearBegin(YearOffset): _freq = "YS" _day_option = "start" _default_month = 1 def onOffset(self, date) -> bool: """Check if the given date is in the set of possible dates created using a length-one version of this offset class.""" return date.day == 1 and date.month == self.month def rollforward(self, date): """Roll date forward to nearest start of year""" if self.onOffset(date): return date else: return date + YearBegin(month=self.month) def rollback(self, date): """Roll date backward to nearest start of year""" if self.onOffset(date): return date else: return date - YearBegin(month=self.month) class YearEnd(YearOffset): _freq = "YE" _day_option = "end" _default_month = 12 def onOffset(self, date) -> bool: """Check if the given date is in the set of possible dates created using a length-one version of this offset class.""" return date.day == date.daysinmonth and date.month == self.month def rollforward(self, date): """Roll date forward to nearest end of year""" if self.onOffset(date): return date else: return date + YearEnd(month=self.month) def rollback(self, date): """Roll date backward to nearest end of year""" if self.onOffset(date): return date else: return date - YearEnd(month=self.month) class Day(BaseCFTimeOffset): """Day offset following definition in pandas/_libs/tslibs/offsets.pyx""" _freq = "D" def __apply__(self, other): if isinstance(other, Day): return Day(self.n + other.n) else: return other + timedelta(days=self.n) def onOffset(self, date) -> bool: return True class Hour(Tick): _freq = "h" def as_timedelta(self) -> timedelta: return timedelta(hours=self.n) def __apply__(self, other): return other + self.as_timedelta() class Minute(Tick): _freq = "min" def as_timedelta(self) -> timedelta: return timedelta(minutes=self.n) def __apply__(self, other): return other + self.as_timedelta() class Second(Tick): _freq = "s" def as_timedelta(self) -> timedelta: return timedelta(seconds=self.n) def __apply__(self, other): return other + self.as_timedelta() class Millisecond(Tick): _freq = "ms" def as_timedelta(self) -> timedelta: return timedelta(milliseconds=self.n) def __apply__(self, other): return other + self.as_timedelta() class Microsecond(Tick): _freq = "us" def as_timedelta(self) -> timedelta: return timedelta(microseconds=self.n) def __apply__(self, other): return other + self.as_timedelta() def _generate_anchored_offsets( base_freq: str, offset: type[YearOffset | QuarterOffset] ) -> dict[str, type[BaseCFTimeOffset]]: offsets: dict[str, type[BaseCFTimeOffset]] = {} for month, abbreviation in _MONTH_ABBREVIATIONS.items(): anchored_freq = f"{base_freq}-{abbreviation}" offsets[anchored_freq] = partial(offset, month=month) # type: ignore[assignment] return offsets _FREQUENCIES: Mapping[str, type[BaseCFTimeOffset]] = { "A": YearEnd, "AS": YearBegin, "Y": YearEnd, "YE": YearEnd, "YS": YearBegin, "Q": partial(QuarterEnd, month=12), # type: ignore[dict-item] "QE": partial(QuarterEnd, month=12), # type: ignore[dict-item] "QS": partial(QuarterBegin, month=1), # type: ignore[dict-item] "M": MonthEnd, "ME": MonthEnd, "MS": MonthBegin, "D": Day, "H": Hour, "h": Hour, "T": Minute, "min": Minute, "S": Second, "s": Second, "L": Millisecond, "ms": Millisecond, "U": Microsecond, "us": Microsecond, **_generate_anchored_offsets("AS", YearBegin), **_generate_anchored_offsets("A", YearEnd), **_generate_anchored_offsets("YS", YearBegin), **_generate_anchored_offsets("Y", YearEnd), **_generate_anchored_offsets("YE", YearEnd), **_generate_anchored_offsets("QS", QuarterBegin), **_generate_anchored_offsets("Q", QuarterEnd), **_generate_anchored_offsets("QE", QuarterEnd), } _FREQUENCY_CONDITION = "|".join(_FREQUENCIES.keys()) _PATTERN = rf"^((?P[+-]?\d+)|())(?P({_FREQUENCY_CONDITION}))$" # pandas defines these offsets as "Tick" objects, which for instance have # distinct behavior from daily or longer frequencies in resample. CFTIME_TICKS = (Hour, Minute, Second) def _generate_anchored_deprecated_frequencies( deprecated: str, recommended: str ) -> dict[str, str]: pairs = {} for abbreviation in _MONTH_ABBREVIATIONS.values(): anchored_deprecated = f"{deprecated}-{abbreviation}" anchored_recommended = f"{recommended}-{abbreviation}" pairs[anchored_deprecated] = anchored_recommended return pairs _DEPRECATED_FREQUENCIES: dict[str, str] = { "A": "YE", "Y": "YE", "AS": "YS", "Q": "QE", "M": "ME", "H": "h", "T": "min", "S": "s", "L": "ms", "U": "us", **_generate_anchored_deprecated_frequencies("A", "YE"), **_generate_anchored_deprecated_frequencies("Y", "YE"), **_generate_anchored_deprecated_frequencies("AS", "YS"), **_generate_anchored_deprecated_frequencies("Q", "QE"), } _DEPRECATION_MESSAGE = ( "{deprecated_freq!r} is deprecated and will be removed in a future " "version. Please use {recommended_freq!r} instead of " "{deprecated_freq!r}." ) def _emit_freq_deprecation_warning(deprecated_freq): recommended_freq = _DEPRECATED_FREQUENCIES[deprecated_freq] message = _DEPRECATION_MESSAGE.format( deprecated_freq=deprecated_freq, recommended_freq=recommended_freq ) emit_user_level_warning(message, FutureWarning) def to_offset( freq: BaseCFTimeOffset | str | timedelta | pd.Timedelta | pd.DateOffset, warn: bool = True, ) -> BaseCFTimeOffset: """Convert a frequency string to the appropriate subclass of BaseCFTimeOffset.""" if isinstance(freq, BaseCFTimeOffset): return freq if isinstance(freq, timedelta | pd.Timedelta): return delta_to_tick(freq) if isinstance(freq, pd.DateOffset): freq = _legacy_to_new_freq(freq.freqstr) match = re.match(_PATTERN, freq) if match is None: raise ValueError("Invalid frequency string provided") freq_data = match.groupdict() freq = freq_data["freq"] if warn and freq in _DEPRECATED_FREQUENCIES: _emit_freq_deprecation_warning(freq) multiples = freq_data["multiple"] multiples = 1 if multiples is None else int(multiples) return _FREQUENCIES[freq](n=multiples) def delta_to_tick(delta: timedelta | pd.Timedelta) -> Tick: """Adapted from pandas.tslib.delta_to_tick""" if isinstance(delta, pd.Timedelta) and delta.nanoseconds != 0: # pandas.Timedelta has nanoseconds, but these are not supported raise ValueError( "Unable to convert 'pandas.Timedelta' object with non-zero " "nanoseconds to 'CFTimeOffset' object" ) if delta.microseconds == 0: seconds = delta.days * 86400 + delta.seconds if seconds % 3600 == 0: return Hour(n=seconds // 3600) elif seconds % 60 == 0: return Minute(n=seconds // 60) else: return Second(n=seconds) # Regardless of the days and seconds this will always be a Millisecond # or Microsecond object elif delta.microseconds % 1_000 == 0: return Millisecond(n=delta.microseconds // 1_000) else: return Microsecond(n=delta.microseconds) def to_cftime_datetime(date_str_or_date, calendar=None): if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if isinstance(date_str_or_date, str): if calendar is None: raise ValueError( "If converting a string to a cftime.datetime object, " "a calendar type must be provided" ) date, _ = _parse_iso8601(get_date_type(calendar), date_str_or_date) return date elif isinstance(date_str_or_date, cftime.datetime): return date_str_or_date elif isinstance(date_str_or_date, datetime | pd.Timestamp): return cftime.DatetimeProlepticGregorian(*date_str_or_date.timetuple()) else: raise TypeError( "date_str_or_date must be a string or a " "subclass of cftime.datetime. Instead got " f"{date_str_or_date!r}." ) def normalize_date(date): """Round datetime down to midnight.""" return date.replace(hour=0, minute=0, second=0, microsecond=0) def _get_normalized_cfdate(date, calendar, normalize): """convert to cf datetime and round down to midnight if normalize.""" if date is None: return date cf_date = to_cftime_datetime(date, calendar) return normalize_date(cf_date) if normalize else cf_date def _generate_linear_date_range(start, end, periods): """Generate an equally-spaced sequence of cftime.datetime objects between and including two dates (whose length equals the number of periods).""" if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") total_seconds = (end - start).total_seconds() values = np.linspace(0.0, total_seconds, periods, endpoint=True) units = f"seconds since {format_cftime_datetime(start)}" calendar = start.calendar return cftime.num2date( values, units=units, calendar=calendar, only_use_cftime_datetimes=True ) def _generate_linear_date_range_with_freq(start, end, periods, freq): """Generate a regular range of cftime.datetime objects with a given frequency. Adapted from pandas.tseries.offsets.generate_range (now at pandas.core.arrays.datetimes._generate_range). Parameters ---------- start : cftime.datetime, or None Start of range end : cftime.datetime, or None End of range periods : int, or None Number of elements in the sequence freq: str Step size between cftime.datetime objects. Not None. Returns ------- A generator object of cftime.datetime objects """ offset = to_offset(freq) if start: # From pandas GH 56147 / 56832 to account for negative direction and # range bounds if offset.n >= 0: start = offset.rollforward(start) else: start = offset.rollback(start) if periods is None and end < start and offset.n >= 0: end = None periods = 0 if end is None: end = start + (periods - 1) * offset if start is None: start = end - (periods - 1) * offset current = start if offset.n >= 0: while current <= end: yield current next_date = current + offset if next_date <= current: raise ValueError(f"Offset {offset} did not increment date") current = next_date else: while current >= end: yield current next_date = current + offset if next_date >= current: raise ValueError(f"Offset {offset} did not decrement date") current = next_date def cftime_range( start=None, end=None, periods=None, freq=None, normalize=False, name=None, inclusive: InclusiveOptions = "both", calendar="standard", ) -> CFTimeIndex: """Return a fixed frequency CFTimeIndex. .. deprecated:: 2025.02.0 Use :py:func:`~xarray.date_range` with ``use_cftime=True`` instead. Parameters ---------- start : str or cftime.datetime, optional Left bound for generating dates. end : str or cftime.datetime, optional Right bound for generating dates. periods : int, optional Number of periods to generate. freq : str or None, default: "D" Frequency strings can have multiples, e.g. "5h" and negative values, e.g. "-1D". normalize : bool, default: False Normalize start/end dates to midnight before generating date range. name : str, default: None Name of the resulting index inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; whether to set each bound as closed or open. .. versionadded:: 2023.02.0 calendar : str, default: "standard" Calendar type for the datetimes. Returns ------- CFTimeIndex Notes ----- This function is an analog of ``pandas.date_range`` for use in generating sequences of ``cftime.datetime`` objects. It supports most of the features of ``pandas.date_range`` (e.g. specifying how the index is ``closed`` on either side, or whether or not to ``normalize`` the start and end bounds); however, there are some notable exceptions: - You cannot specify a ``tz`` (time zone) argument. - Start or end dates specified as partial-datetime strings must use the `ISO-8601 format `_. - It supports many, but not all, frequencies supported by ``pandas.date_range``. For example it does not currently support any of the business-related or semi-monthly frequencies. - Compound sub-monthly frequencies are not supported, e.g. '1H1min', as these can easily be written in terms of the finest common resolution, e.g. '61min'. Valid simple frequency strings for use with ``cftime``-calendars include any multiples of the following. +--------+--------------------------+ | Alias | Description | +========+==========================+ | YE | Year-end frequency | +--------+--------------------------+ | YS | Year-start frequency | +--------+--------------------------+ | QE | Quarter-end frequency | +--------+--------------------------+ | QS | Quarter-start frequency | +--------+--------------------------+ | ME | Month-end frequency | +--------+--------------------------+ | MS | Month-start frequency | +--------+--------------------------+ | D | Day frequency | +--------+--------------------------+ | h | Hour frequency | +--------+--------------------------+ | min | Minute frequency | +--------+--------------------------+ | s | Second frequency | +--------+--------------------------+ | ms | Millisecond frequency | +--------+--------------------------+ | us | Microsecond frequency | +--------+--------------------------+ Any multiples of the following anchored offsets are also supported. +------------+--------------------------------------------------------------------+ | Alias | Description | +============+====================================================================+ | Y(E,S)-JAN | Annual frequency, anchored at the (end, beginning) of January | +------------+--------------------------------------------------------------------+ | Y(E,S)-FEB | Annual frequency, anchored at the (end, beginning) of February | +------------+--------------------------------------------------------------------+ | Y(E,S)-MAR | Annual frequency, anchored at the (end, beginning) of March | +------------+--------------------------------------------------------------------+ | Y(E,S)-APR | Annual frequency, anchored at the (end, beginning) of April | +------------+--------------------------------------------------------------------+ | Y(E,S)-MAY | Annual frequency, anchored at the (end, beginning) of May | +------------+--------------------------------------------------------------------+ | Y(E,S)-JUN | Annual frequency, anchored at the (end, beginning) of June | +------------+--------------------------------------------------------------------+ | Y(E,S)-JUL | Annual frequency, anchored at the (end, beginning) of July | +------------+--------------------------------------------------------------------+ | Y(E,S)-AUG | Annual frequency, anchored at the (end, beginning) of August | +------------+--------------------------------------------------------------------+ | Y(E,S)-SEP | Annual frequency, anchored at the (end, beginning) of September | +------------+--------------------------------------------------------------------+ | Y(E,S)-OCT | Annual frequency, anchored at the (end, beginning) of October | +------------+--------------------------------------------------------------------+ | Y(E,S)-NOV | Annual frequency, anchored at the (end, beginning) of November | +------------+--------------------------------------------------------------------+ | Y(E,S)-DEC | Annual frequency, anchored at the (end, beginning) of December | +------------+--------------------------------------------------------------------+ | Q(E,S)-JAN | Quarter frequency, anchored at the (end, beginning) of January | +------------+--------------------------------------------------------------------+ | Q(E,S)-FEB | Quarter frequency, anchored at the (end, beginning) of February | +------------+--------------------------------------------------------------------+ | Q(E,S)-MAR | Quarter frequency, anchored at the (end, beginning) of March | +------------+--------------------------------------------------------------------+ | Q(E,S)-APR | Quarter frequency, anchored at the (end, beginning) of April | +------------+--------------------------------------------------------------------+ | Q(E,S)-MAY | Quarter frequency, anchored at the (end, beginning) of May | +------------+--------------------------------------------------------------------+ | Q(E,S)-JUN | Quarter frequency, anchored at the (end, beginning) of June | +------------+--------------------------------------------------------------------+ | Q(E,S)-JUL | Quarter frequency, anchored at the (end, beginning) of July | +------------+--------------------------------------------------------------------+ | Q(E,S)-AUG | Quarter frequency, anchored at the (end, beginning) of August | +------------+--------------------------------------------------------------------+ | Q(E,S)-SEP | Quarter frequency, anchored at the (end, beginning) of September | +------------+--------------------------------------------------------------------+ | Q(E,S)-OCT | Quarter frequency, anchored at the (end, beginning) of October | +------------+--------------------------------------------------------------------+ | Q(E,S)-NOV | Quarter frequency, anchored at the (end, beginning) of November | +------------+--------------------------------------------------------------------+ | Q(E,S)-DEC | Quarter frequency, anchored at the (end, beginning) of December | +------------+--------------------------------------------------------------------+ Finally, the following calendar aliases are supported. +--------------------------------+---------------------------------------+ | Alias | Date type | +================================+=======================================+ | standard, gregorian | ``cftime.DatetimeGregorian`` | +--------------------------------+---------------------------------------+ | proleptic_gregorian | ``cftime.DatetimeProlepticGregorian`` | +--------------------------------+---------------------------------------+ | noleap, 365_day | ``cftime.DatetimeNoLeap`` | +--------------------------------+---------------------------------------+ | all_leap, 366_day | ``cftime.DatetimeAllLeap`` | +--------------------------------+---------------------------------------+ | 360_day | ``cftime.Datetime360Day`` | +--------------------------------+---------------------------------------+ | julian | ``cftime.DatetimeJulian`` | +--------------------------------+---------------------------------------+ Examples -------- This function returns a ``CFTimeIndex``, populated with ``cftime.datetime`` objects associated with the specified calendar type, e.g. >>> xr.date_range( ... start="2000", periods=6, freq="2MS", calendar="noleap", use_cftime=True ... ) CFTimeIndex([2000-01-01 00:00:00, 2000-03-01 00:00:00, 2000-05-01 00:00:00, 2000-07-01 00:00:00, 2000-09-01 00:00:00, 2000-11-01 00:00:00], dtype='object', length=6, calendar='noleap', freq='2MS') As in the standard pandas function, three of the ``start``, ``end``, ``periods``, or ``freq`` arguments must be specified at a given time, with the other set to ``None``. See the `pandas documentation `_ for more examples of the behavior of ``date_range`` with each of the parameters. See Also -------- pandas.date_range """ emit_user_level_warning( "cftime_range() is deprecated, please use xarray.date_range(..., use_cftime=True) instead.", DeprecationWarning, ) return date_range( start=start, end=end, periods=periods, freq=freq, normalize=normalize, name=name, inclusive=inclusive, calendar=calendar, use_cftime=True, ) def _cftime_range( start=None, end=None, periods=None, freq=None, normalize=False, name=None, inclusive: InclusiveOptions = "both", calendar="standard", ) -> CFTimeIndex: """Return a fixed frequency CFTimeIndex. Parameters ---------- start : str or cftime.datetime, optional Left bound for generating dates. end : str or cftime.datetime, optional Right bound for generating dates. periods : int, optional Number of periods to generate. freq : str or None, default: "D" Frequency strings can have multiples, e.g. "5h" and negative values, e.g. "-1D". normalize : bool, default: False Normalize start/end dates to midnight before generating date range. name : str, default: None Name of the resulting index inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; whether to set each bound as closed or open. calendar : str, default: "standard" Calendar type for the datetimes. Returns ------- CFTimeIndex Notes ----- see cftime_range """ if freq is None and any(arg is None for arg in [periods, start, end]): freq = "D" # Adapted from pandas.core.indexes.datetimes._generate_range. if count_not_none(start, end, periods, freq) != 3: raise ValueError( "Exactly three of 'start', 'end', 'periods', or 'freq' must be " "specified to generate a date range. Note that 'freq' defaults to " "'D' in the event that any of 'start', 'end', or 'periods' are " "None." ) start = _get_normalized_cfdate(start, calendar, normalize) end = _get_normalized_cfdate(end, calendar, normalize) if freq is None: dates = _generate_linear_date_range(start, end, periods) else: dates = np.array( list(_generate_linear_date_range_with_freq(start, end, periods, freq)) ) if not TYPE_CHECKING and inclusive not in get_args(InclusiveOptions): raise ValueError( f"Argument `inclusive` must be either 'both', 'neither', " f"'left', or 'right'. Got {inclusive}." ) if len(dates) and inclusive != "both": if inclusive != "left" and dates[0] == start: dates = dates[1:] if inclusive != "right" and dates[-1] == end: dates = dates[:-1] return CFTimeIndex(dates, name=name) def date_range( start=None, end=None, periods=None, freq=None, tz=None, normalize=False, name=None, inclusive: InclusiveOptions = "both", unit: PDDatetimeUnitOptions = "ns", calendar="standard", use_cftime=None, ): """Return a fixed frequency datetime index. The type (:py:class:`xarray.CFTimeIndex` or :py:class:`pandas.DatetimeIndex`) of the returned index depends on the requested calendar and on `use_cftime`. Parameters ---------- start : str or datetime-like, optional Left bound for generating dates. end : str or datetime-like, optional Right bound for generating dates. periods : int, optional Number of periods to generate. freq : str or None, default: "D" Frequency strings can have multiples, e.g. "5h" and negative values, e.g. "-1D". tz : str or tzinfo, optional Time zone name for returning localized DatetimeIndex, for example 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is timezone-naive. Only valid with pandas DatetimeIndex. normalize : bool, default: False Normalize start/end dates to midnight before generating date range. name : str, default: None Name of the resulting index inclusive : {"both", "neither", "left", "right"}, default: "both" Include boundaries; whether to set each bound as closed or open. .. versionadded:: 2023.02.0 unit : {"s", "ms", "us", "ns"}, default "ns" Specify the desired resolution of the result. .. versionadded:: 2024.12.0 calendar : str, default: "standard" Calendar type for the datetimes. use_cftime : boolean, optional If True, always return a CFTimeIndex. If False, return a pd.DatetimeIndex if possible or raise a ValueError. If None (default), return a pd.DatetimeIndex if possible, otherwise return a CFTimeIndex. Overridden to False if `tz` is not None. Returns ------- CFTimeIndex or pd.DatetimeIndex Notes ----- When ``use_cftime=True``, or a calendar other than "standard", "gregorian", or "proleptic_gregorian" is provided, this function is an analog of ``pandas.date_range`` for use in generating sequences of ``cftime.datetime`` objects. It supports most of the features of ``pandas.date_range`` (e.g. specifying how the index is ``closed`` on either side, or whether or not to ``normalize`` the start and end bounds); however, there are some notable exceptions: - You cannot specify a ``tz`` (time zone) argument. - Start or end dates specified as partial-datetime strings must use the `ISO-8601 format `_. - It supports many, but not all, frequencies supported by ``pandas.date_range``. For example it does not currently support any of the business-related or semi-monthly frequencies. - Compound sub-monthly frequencies are not supported, e.g. '1H1min', as these can easily be written in terms of the finest common resolution, e.g. '61min'. Valid simple frequency strings for use with ``cftime``-calendars include any multiples of the following. +--------+--------------------------+ | Alias | Description | +========+==========================+ | YE | Year-end frequency | +--------+--------------------------+ | YS | Year-start frequency | +--------+--------------------------+ | QE | Quarter-end frequency | +--------+--------------------------+ | QS | Quarter-start frequency | +--------+--------------------------+ | ME | Month-end frequency | +--------+--------------------------+ | MS | Month-start frequency | +--------+--------------------------+ | D | Day frequency | +--------+--------------------------+ | h | Hour frequency | +--------+--------------------------+ | min | Minute frequency | +--------+--------------------------+ | s | Second frequency | +--------+--------------------------+ | ms | Millisecond frequency | +--------+--------------------------+ | us | Microsecond frequency | +--------+--------------------------+ Any multiples of the following anchored offsets are also supported. +------------+--------------------------------------------------------------------+ | Alias | Description | +============+====================================================================+ | Y(E,S)-JAN | Annual frequency, anchored at the (end, beginning) of January | +------------+--------------------------------------------------------------------+ | Y(E,S)-FEB | Annual frequency, anchored at the (end, beginning) of February | +------------+--------------------------------------------------------------------+ | Y(E,S)-MAR | Annual frequency, anchored at the (end, beginning) of March | +------------+--------------------------------------------------------------------+ | Y(E,S)-APR | Annual frequency, anchored at the (end, beginning) of April | +------------+--------------------------------------------------------------------+ | Y(E,S)-MAY | Annual frequency, anchored at the (end, beginning) of May | +------------+--------------------------------------------------------------------+ | Y(E,S)-JUN | Annual frequency, anchored at the (end, beginning) of June | +------------+--------------------------------------------------------------------+ | Y(E,S)-JUL | Annual frequency, anchored at the (end, beginning) of July | +------------+--------------------------------------------------------------------+ | Y(E,S)-AUG | Annual frequency, anchored at the (end, beginning) of August | +------------+--------------------------------------------------------------------+ | Y(E,S)-SEP | Annual frequency, anchored at the (end, beginning) of September | +------------+--------------------------------------------------------------------+ | Y(E,S)-OCT | Annual frequency, anchored at the (end, beginning) of October | +------------+--------------------------------------------------------------------+ | Y(E,S)-NOV | Annual frequency, anchored at the (end, beginning) of November | +------------+--------------------------------------------------------------------+ | Y(E,S)-DEC | Annual frequency, anchored at the (end, beginning) of December | +------------+--------------------------------------------------------------------+ | Q(E,S)-JAN | Quarter frequency, anchored at the (end, beginning) of January | +------------+--------------------------------------------------------------------+ | Q(E,S)-FEB | Quarter frequency, anchored at the (end, beginning) of February | +------------+--------------------------------------------------------------------+ | Q(E,S)-MAR | Quarter frequency, anchored at the (end, beginning) of March | +------------+--------------------------------------------------------------------+ | Q(E,S)-APR | Quarter frequency, anchored at the (end, beginning) of April | +------------+--------------------------------------------------------------------+ | Q(E,S)-MAY | Quarter frequency, anchored at the (end, beginning) of May | +------------+--------------------------------------------------------------------+ | Q(E,S)-JUN | Quarter frequency, anchored at the (end, beginning) of June | +------------+--------------------------------------------------------------------+ | Q(E,S)-JUL | Quarter frequency, anchored at the (end, beginning) of July | +------------+--------------------------------------------------------------------+ | Q(E,S)-AUG | Quarter frequency, anchored at the (end, beginning) of August | +------------+--------------------------------------------------------------------+ | Q(E,S)-SEP | Quarter frequency, anchored at the (end, beginning) of September | +------------+--------------------------------------------------------------------+ | Q(E,S)-OCT | Quarter frequency, anchored at the (end, beginning) of October | +------------+--------------------------------------------------------------------+ | Q(E,S)-NOV | Quarter frequency, anchored at the (end, beginning) of November | +------------+--------------------------------------------------------------------+ | Q(E,S)-DEC | Quarter frequency, anchored at the (end, beginning) of December | +------------+--------------------------------------------------------------------+ Finally, the following calendar aliases are supported. +--------------------------------+---------------------------------------+----------------------------+ | Alias | Date type | Available use_cftime=False | +================================+=======================================+============================+ | standard, gregorian | ``cftime.DatetimeGregorian`` | True | +--------------------------------+---------------------------------------+----------------------------+ | proleptic_gregorian | ``cftime.DatetimeProlepticGregorian`` | True | +--------------------------------+---------------------------------------+----------------------------+ | noleap, 365_day | ``cftime.DatetimeNoLeap`` | False | +--------------------------------+---------------------------------------+----------------------------+ | all_leap, 366_day | ``cftime.DatetimeAllLeap`` | False | +--------------------------------+---------------------------------------+----------------------------+ | 360_day | ``cftime.Datetime360Day`` | False | +--------------------------------+---------------------------------------+----------------------------+ | julian | ``cftime.DatetimeJulian`` | False | +--------------------------------+---------------------------------------+----------------------------+ As in the standard pandas function, exactly three of ``start``, ``end``, ``periods``, or ``freq`` are required to generate a date range. Note that ``freq`` defaults to ``"D"`` in the event that any of ``start``, ``end``, or ``periods`` are set to ``None``. See :py:func:`pandas.date_range`. for more examples of the behavior of ``date_range`` with each of the parameters. Examples -------- This function returns a ``CFTimeIndex``, populated with ``cftime.datetime`` objects associated with the specified calendar type, e.g. >>> xr.date_range( ... start="2000", periods=6, freq="2MS", calendar="noleap", use_cftime=True ... ) CFTimeIndex([2000-01-01 00:00:00, 2000-03-01 00:00:00, 2000-05-01 00:00:00, 2000-07-01 00:00:00, 2000-09-01 00:00:00, 2000-11-01 00:00:00], dtype='object', length=6, calendar='noleap', freq='2MS') See also -------- pandas.date_range cftime_range date_range_like """ if tz is not None: use_cftime = False if _is_standard_calendar(calendar) and use_cftime is not True: try: return pd.date_range( # type: ignore[call-overload,unused-ignore] start=start, end=end, periods=periods, # TODO remove translation once requiring pandas >= 2.2 freq=_new_to_legacy_freq(freq), tz=tz, normalize=normalize, name=name, inclusive=inclusive, unit=unit, ) except pd.errors.OutOfBoundsDatetime as err: if use_cftime is False: raise ValueError( "Date range is invalid for pandas DatetimeIndex, try using `use_cftime=True`." ) from err elif use_cftime is False: raise ValueError( f"Invalid calendar {calendar} for pandas DatetimeIndex, try using `use_cftime=True`." ) return _cftime_range( start=start, end=end, periods=periods, freq=freq, normalize=normalize, name=name, inclusive=inclusive, calendar=calendar, ) def _new_to_legacy_freq(freq): # xarray will now always return "ME" and "QE" for MonthEnd and QuarterEnd # frequencies, but older versions of pandas do not support these as # frequency strings. Until xarray's minimum pandas version is 2.2 or above, # we add logic to continue using the deprecated "M" and "Q" frequency # strings in these circumstances. # NOTE: other conversions ("h" -> "H", ..., "ns" -> "N") not required # TODO: remove once requiring pandas >= 2.2 if not freq or Version(pd.__version__) >= Version("2.2"): return freq try: freq_as_offset = to_offset(freq) except ValueError: # freq may be valid in pandas but not in xarray return freq if isinstance(freq_as_offset, MonthEnd) and "ME" in freq: freq = freq.replace("ME", "M") elif isinstance(freq_as_offset, QuarterEnd) and "QE" in freq: freq = freq.replace("QE", "Q") elif isinstance(freq_as_offset, YearBegin) and "YS" in freq: freq = freq.replace("YS", "AS") elif isinstance(freq_as_offset, YearEnd): # testing for "Y" is required as this was valid in xarray 2023.11 - 2024.01 if "Y-" in freq: # Check for and replace "Y-" instead of just "Y" to prevent # corrupting anchored offsets that contain "Y" in the month # abbreviation, e.g. "Y-MAY" -> "A-MAY". freq = freq.replace("Y-", "A-") elif "YE-" in freq: freq = freq.replace("YE-", "A-") elif "A-" not in freq and freq.endswith("Y"): freq = freq.replace("Y", "A") elif freq.endswith("YE"): freq = freq.replace("YE", "A") return freq def _legacy_to_new_freq(freq: T_FreqStr) -> T_FreqStr: # to avoid internal deprecation warnings when freq is determined using pandas < 2.2 # TODO: remove once requiring pandas >= 2.2 if not freq or Version(pd.__version__) >= Version("2.2"): return freq try: freq_as_offset = to_offset(freq, warn=False) except ValueError: # freq may be valid in pandas but not in xarray return freq if isinstance(freq_as_offset, MonthEnd) and "ME" not in freq: freq = freq.replace("M", "ME") elif isinstance(freq_as_offset, QuarterEnd) and "QE" not in freq: freq = freq.replace("Q", "QE") elif isinstance(freq_as_offset, YearBegin) and "YS" not in freq: freq = freq.replace("AS", "YS") elif isinstance(freq_as_offset, YearEnd): if "A-" in freq: # Check for and replace "A-" instead of just "A" to prevent # corrupting anchored offsets that contain "Y" in the month # abbreviation, e.g. "A-MAY" -> "YE-MAY". freq = freq.replace("A-", "YE-") elif "Y-" in freq: freq = freq.replace("Y-", "YE-") elif freq.endswith("A"): # the "A-MAY" case is already handled above freq = freq.replace("A", "YE") elif "YE" not in freq and freq.endswith("Y"): # the "Y-MAY" case is already handled above freq = freq.replace("Y", "YE") elif isinstance(freq_as_offset, Hour): freq = freq.replace("H", "h") elif isinstance(freq_as_offset, Minute): freq = freq.replace("T", "min") elif isinstance(freq_as_offset, Second): freq = freq.replace("S", "s") elif isinstance(freq_as_offset, Millisecond): freq = freq.replace("L", "ms") elif isinstance(freq_as_offset, Microsecond): freq = freq.replace("U", "us") return freq def date_range_like(source, calendar, use_cftime=None): """Generate a datetime array with the same frequency, start and end as another one, but in a different calendar. Parameters ---------- source : DataArray, CFTimeIndex, or pd.DatetimeIndex 1D datetime array calendar : str New calendar name. use_cftime : bool, optional If True, the output uses :py:class:`cftime.datetime` objects. If None (default), :py:class:`numpy.datetime64` values are used if possible. If False, :py:class:`numpy.datetime64` values are used or an error is raised. Returns ------- DataArray 1D datetime coordinate with the same start, end and frequency as the source, but in the new calendar. The start date is assumed to exist in the target calendar. If the end date doesn't exist, the code tries 1 and 2 calendar days before. There is a special case when the source time series is daily or coarser and the end of the input range is on the last day of the month. Then the output range will also end on the last day of the month in the new calendar. """ from xarray.coding.frequencies import infer_freq from xarray.core.dataarray import DataArray if not isinstance(source, pd.DatetimeIndex | CFTimeIndex) and ( (isinstance(source, DataArray) and (source.ndim != 1)) or not _contains_datetime_like_objects(source.variable) ): raise ValueError( "'source' must be a 1D array of datetime objects for inferring its range." ) freq = infer_freq(source) if freq is None: raise ValueError( "`date_range_like` was unable to generate a range as the source frequency was not inferable." ) # TODO remove once requiring pandas >= 2.2 freq = _legacy_to_new_freq(freq) use_cftime = _should_cftime_be_used(source, calendar, use_cftime) source_start = source.values.min() source_end = source.values.max() freq_as_offset = to_offset(freq) if freq_as_offset.n < 0: source_start, source_end = source_end, source_start if is_np_datetime_like(source.dtype): # We want to use datetime fields (datetime64 object don't have them) source_calendar = "standard" source_start = default_precision_timestamp(source_start) source_end = default_precision_timestamp(source_end) elif isinstance(source, CFTimeIndex): source_calendar = source.calendar else: # DataArray source_calendar = source.dt.calendar if calendar == source_calendar and is_np_datetime_like(source.dtype) ^ use_cftime: return source date_type = get_date_type(calendar, use_cftime) start = convert_time_or_go_back(source_start, date_type) end = convert_time_or_go_back(source_end, date_type) # For the cases where the source ends on the end of the month, we expect the same in the new calendar. if source_end.day == source_end.daysinmonth and isinstance( freq_as_offset, YearEnd | QuarterEnd | MonthEnd | Day ): end = end.replace(day=end.daysinmonth) return date_range( start=start.isoformat(), end=end.isoformat(), freq=freq, calendar=calendar, ) �������������������������������������������������������������������������������������xarray-2025.09.0/xarray/coding/cftimeindex.py�������������������������������������������������������0000664�0000000�0000000�00000074051�15056206164�0020775�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""DatetimeIndex analog for cftime.datetime objects""" # The pandas.Index subclass defined here was copied and adapted for # use with cftime.datetime objects based on the source code defining # pandas.DatetimeIndex. # For reference, here is a copy of the pandas copyright notice: # (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team # All rights reserved. # Copyright (c) 2008-2011 AQR Capital Management, LLC # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the copyright holder nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import math from datetime import timedelta from typing import TYPE_CHECKING, Any import numpy as np import pandas as pd from packaging.version import Version from xarray.coding.times import ( _STANDARD_CALENDARS, _parse_iso8601, cftime_to_nptime, infer_calendar_name, ) from xarray.core.common import _contains_cftime_datetimes from xarray.core.options import OPTIONS from xarray.core.types import PDDatetimeUnitOptions from xarray.core.utils import attempt_import, emit_user_level_warning, is_scalar if TYPE_CHECKING: from xarray.coding.cftime_offsets import BaseCFTimeOffset from xarray.core.types import Self # constants for cftimeindex.repr CFTIME_REPR_LENGTH = 19 ITEMS_IN_REPR_MAX_ELSE_ELLIPSIS = 100 REPR_ELLIPSIS_SHOW_ITEMS_FRONT_END = 10 OUT_OF_BOUNDS_TIMEDELTA_ERRORS: tuple[type[Exception], ...] try: OUT_OF_BOUNDS_TIMEDELTA_ERRORS = (pd.errors.OutOfBoundsTimedelta, OverflowError) except AttributeError: OUT_OF_BOUNDS_TIMEDELTA_ERRORS = (OverflowError,) def _parsed_string_to_bounds(date_type, resolution, parsed): """Generalization of pandas.tseries.index.DatetimeIndex._parsed_string_to_bounds for use with non-standard calendars and cftime.datetime objects. """ if resolution == "year": return ( date_type(parsed.year, 1, 1), date_type(parsed.year + 1, 1, 1) - timedelta(microseconds=1), ) elif resolution == "month": if parsed.month == 12: end = date_type(parsed.year + 1, 1, 1) - timedelta(microseconds=1) else: end = date_type(parsed.year, parsed.month + 1, 1) - timedelta( microseconds=1 ) return date_type(parsed.year, parsed.month, 1), end elif resolution == "day": start = date_type(parsed.year, parsed.month, parsed.day) return start, start + timedelta(days=1, microseconds=-1) elif resolution == "hour": start = date_type(parsed.year, parsed.month, parsed.day, parsed.hour) return start, start + timedelta(hours=1, microseconds=-1) elif resolution == "minute": start = date_type( parsed.year, parsed.month, parsed.day, parsed.hour, parsed.minute ) return start, start + timedelta(minutes=1, microseconds=-1) elif resolution == "second": start = date_type( parsed.year, parsed.month, parsed.day, parsed.hour, parsed.minute, parsed.second, ) return start, start + timedelta(seconds=1, microseconds=-1) else: raise KeyError def get_date_field(datetimes, field): """Adapted from pandas.tslib.get_date_field""" return np.array([getattr(date, field) for date in datetimes], dtype=np.int64) def _field_accessor(name, docstring=None, min_cftime_version="0.0"): """Adapted from pandas.tseries.index._field_accessor""" def f(self, min_cftime_version=min_cftime_version): if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if Version(cftime.__version__) >= Version(min_cftime_version): return get_date_field(self._data, name) else: raise ImportError( f"The {name:!r} accessor requires a minimum " f"version of cftime of {min_cftime_version}. Found an " f"installed version of {cftime.__version__}." ) f.__name__ = name f.__doc__ = docstring return property(f) def get_date_type(self): if self._data.size: return type(self._data[0]) else: return None def assert_all_valid_date_type(data): if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if len(data) > 0: sample = data[0] date_type = type(sample) if not isinstance(sample, cftime.datetime): raise TypeError( "CFTimeIndex requires cftime.datetime " f"objects. Got object of {date_type}." ) if not all(isinstance(value, date_type) for value in data): raise TypeError( "CFTimeIndex requires using datetime " f"objects of all the same type. Got\n{data}." ) def format_row(times, indent=0, separator=", ", row_end=",\n"): """Format a single row from format_times.""" return indent * " " + separator.join(map(str, times)) + row_end def format_times( index, max_width, offset, separator=", ", first_row_offset=0, intermediate_row_end=",\n", last_row_end="", ): """Format values of cftimeindex as pd.Index.""" n_per_row = max(max_width // (CFTIME_REPR_LENGTH + len(separator)), 1) n_rows = math.ceil(len(index) / n_per_row) representation = "" for row in range(n_rows): indent = first_row_offset if row == 0 else offset row_end = last_row_end if row == n_rows - 1 else intermediate_row_end times_for_row = index[row * n_per_row : (row + 1) * n_per_row] representation += format_row( times_for_row, indent=indent, separator=separator, row_end=row_end ) return representation def format_attrs(index, separator=", "): """Format attributes of CFTimeIndex for __repr__.""" attrs = { "dtype": f"'{index.dtype}'", "length": f"{len(index)}", "calendar": f"{index.calendar!r}", "freq": f"{index.freq!r}", } attrs_str = [f"{k}={v}" for k, v in attrs.items()] attrs_str = f"{separator}".join(attrs_str) return attrs_str class CFTimeIndex(pd.Index): """Custom Index for working with CF calendars and dates All elements of a CFTimeIndex must be cftime.datetime objects. Parameters ---------- data : array or CFTimeIndex Sequence of cftime.datetime objects to use in index name : str, default: None Name of the resulting index See Also -------- date_range """ _data: np.ndarray year = _field_accessor("year", "The year of the datetime") month = _field_accessor("month", "The month of the datetime") day = _field_accessor("day", "The days of the datetime") hour = _field_accessor("hour", "The hours of the datetime") minute = _field_accessor("minute", "The minutes of the datetime") second = _field_accessor("second", "The seconds of the datetime") microsecond = _field_accessor("microsecond", "The microseconds of the datetime") dayofyear = _field_accessor( "dayofyr", "The ordinal day of year of the datetime", "1.0.2.1" ) dayofweek = _field_accessor("dayofwk", "The day of week of the datetime", "1.0.2.1") days_in_month = _field_accessor( "daysinmonth", "The number of days in the month of the datetime", "1.1.0.0" ) date_type = property(get_date_type) def __new__(cls, data, name=None, **kwargs): assert_all_valid_date_type(data) if name is None and hasattr(data, "name"): name = data.name result = object.__new__(cls) result._data = np.array(data, dtype="O") result.name = name result._cache = {} return result def __repr__(self): """ Return a string representation for this object. """ klass_name = type(self).__name__ display_width = OPTIONS["display_width"] offset = len(klass_name) + 2 if len(self) <= ITEMS_IN_REPR_MAX_ELSE_ELLIPSIS: datastr = format_times( self.values, display_width, offset=offset, first_row_offset=0 ) else: front_str = format_times( self.values[:REPR_ELLIPSIS_SHOW_ITEMS_FRONT_END], display_width, offset=offset, first_row_offset=0, last_row_end=",", ) end_str = format_times( self.values[-REPR_ELLIPSIS_SHOW_ITEMS_FRONT_END:], display_width, offset=offset, first_row_offset=offset, ) datastr = "\n".join([front_str, f"{' ' * offset}...", end_str]) attrs_str = format_attrs(self) # oneliner only if smaller than display_width full_repr_str = f"{klass_name}([{datastr}], {attrs_str})" if len(full_repr_str) > display_width: # if attrs_str too long, one per line if len(attrs_str) >= display_width - offset: attrs_str = attrs_str.replace(",", f",\n{' ' * (offset - 2)}") full_repr_str = ( f"{klass_name}([{datastr}],\n{' ' * (offset - 1)}{attrs_str})" ) return full_repr_str def _partial_date_slice(self, resolution, parsed): """Adapted from pandas.tseries.index.DatetimeIndex._partial_date_slice Note that when using a CFTimeIndex, if a partial-date selection returns a single element, it will never be converted to a scalar coordinate; this is in slight contrast to the behavior when using a DatetimeIndex, which sometimes will return a DataArray with a scalar coordinate depending on the resolution of the datetimes used in defining the index. For example: >>> from cftime import DatetimeNoLeap >>> da = xr.DataArray( ... [1, 2], ... coords=[[DatetimeNoLeap(2001, 1, 1), DatetimeNoLeap(2001, 2, 1)]], ... dims=["time"], ... ) >>> da.sel(time="2001-01-01") Size: 8B array([1]) Coordinates: * time (time) object 8B 2001-01-01 00:00:00 >>> da = xr.DataArray( ... [1, 2], ... coords=[[pd.Timestamp(2001, 1, 1), pd.Timestamp(2001, 2, 1)]], ... dims=["time"], ... ) >>> da.sel(time="2001-01-01") Size: 8B array(1) Coordinates: time datetime64[ns] 8B 2001-01-01 >>> da = xr.DataArray( ... [1, 2], ... coords=[[pd.Timestamp(2001, 1, 1, 1), pd.Timestamp(2001, 2, 1)]], ... dims=["time"], ... ) >>> da.sel(time="2001-01-01") Size: 8B array([1]) Coordinates: * time (time) datetime64[ns] 8B 2001-01-01T01:00:00 """ start, end = _parsed_string_to_bounds(self.date_type, resolution, parsed) times = self._data if self.is_monotonic_increasing: if len(times) and ( (start < times[0] and end < times[0]) or (start > times[-1] and end > times[-1]) ): # we are out of range raise KeyError # a monotonic (sorted) series can be sliced left = times.searchsorted(start, side="left") right = times.searchsorted(end, side="right") return slice(left, right) lhs_mask = times >= start rhs_mask = times <= end return np.flatnonzero(lhs_mask & rhs_mask) def _get_string_slice(self, key): """Adapted from pandas.tseries.index.DatetimeIndex._get_string_slice""" parsed, resolution = _parse_iso8601(self.date_type, key) try: loc = self._partial_date_slice(resolution, parsed) except KeyError as err: raise KeyError(key) from err return loc def _get_nearest_indexer(self, target, limit, tolerance): """Adapted from pandas.Index._get_nearest_indexer""" left_indexer = self.get_indexer(target, "pad", limit=limit) right_indexer = self.get_indexer(target, "backfill", limit=limit) left_distances = abs(self.values[left_indexer] - target.values) right_distances = abs(self.values[right_indexer] - target.values) if self.is_monotonic_increasing: condition = (left_distances < right_distances) | (right_indexer == -1) else: condition = (left_distances <= right_distances) | (right_indexer == -1) indexer = np.where(condition, left_indexer, right_indexer) if tolerance is not None: indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _filter_indexer_tolerance(self, target, indexer, tolerance): """Adapted from pandas.Index._filter_indexer_tolerance""" if isinstance(target, pd.Index): distance = abs(self.values[indexer] - target.values) else: distance = abs(self.values[indexer] - target) indexer = np.where(distance <= tolerance, indexer, -1) return indexer def get_loc(self, key): """Adapted from pandas.tseries.index.DatetimeIndex.get_loc""" if isinstance(key, str): return self._get_string_slice(key) else: return super().get_loc(key) def _maybe_cast_slice_bound(self, label, side): """Adapted from pandas.tseries.index.DatetimeIndex._maybe_cast_slice_bound """ if not isinstance(label, str): return label parsed, resolution = _parse_iso8601(self.date_type, label) start, end = _parsed_string_to_bounds(self.date_type, resolution, parsed) if self.is_monotonic_decreasing and len(self) > 1: return end if side == "left" else start return start if side == "left" else end # TODO: Add ability to use integer range outside of iloc? # e.g. series[1:5]. def get_value(self, series, key): """Adapted from pandas.tseries.index.DatetimeIndex.get_value""" if np.asarray(key).dtype == np.dtype(bool): return series.iloc[key] elif isinstance(key, slice): return series.iloc[self.slice_indexer(key.start, key.stop, key.step)] else: return series.iloc[self.get_loc(key)] def __contains__(self, key: Any) -> bool: """Adapted from pandas.tseries.base.DatetimeIndexOpsMixin.__contains__""" try: result = self.get_loc(key) return ( is_scalar(result) or isinstance(result, slice) or (isinstance(result, np.ndarray) and result.size > 0) ) except (KeyError, TypeError, ValueError): return False def contains(self, key: Any) -> bool: """Needed for .loc based partial-string indexing""" return self.__contains__(key) def shift( # type: ignore[override,unused-ignore] self, periods: int | float, freq: str | timedelta | BaseCFTimeOffset | None = None, ) -> Self: """Shift the CFTimeIndex a multiple of the given frequency. See the documentation for :py:func:`~xarray.date_range` for a complete listing of valid frequency strings. Parameters ---------- periods : int, float if freq of days or below Periods to shift by freq : str, datetime.timedelta or BaseCFTimeOffset A frequency string or datetime.timedelta object to shift by Returns ------- CFTimeIndex See Also -------- pandas.DatetimeIndex.shift Examples -------- >>> index = xr.date_range("2000", periods=1, freq="ME", use_cftime=True) >>> index CFTimeIndex([2000-01-31 00:00:00], dtype='object', length=1, calendar='standard', freq=None) >>> index.shift(1, "ME") CFTimeIndex([2000-02-29 00:00:00], dtype='object', length=1, calendar='standard', freq=None) >>> index.shift(1.5, "24h") CFTimeIndex([2000-02-01 12:00:00], dtype='object', length=1, calendar='standard', freq=None) """ from xarray.coding.cftime_offsets import BaseCFTimeOffset if freq is None: # None type is required to be compatible with base pd.Index class raise TypeError( f"`freq` argument cannot be None for {type(self).__name__}.shift" ) if isinstance(freq, timedelta): return self + periods * freq if isinstance(freq, str | BaseCFTimeOffset): from xarray.coding.cftime_offsets import to_offset return self + periods * to_offset(freq) raise TypeError( f"'freq' must be of type str or datetime.timedelta, got {type(freq)}." ) def __add__(self, other) -> Self: if isinstance(other, pd.TimedeltaIndex): other = other.to_pytimedelta() return type(self)(np.array(self) + other) def __radd__(self, other) -> Self: if isinstance(other, pd.TimedeltaIndex): other = other.to_pytimedelta() return type(self)(other + np.array(self)) def __sub__(self, other): if _contains_datetime_timedeltas(other): return type(self)(np.array(self) - other) if isinstance(other, pd.TimedeltaIndex): return type(self)(np.array(self) - other.to_pytimedelta()) if _contains_cftime_datetimes(np.array(other)): try: return pd.TimedeltaIndex(np.array(self) - np.array(other)) except OUT_OF_BOUNDS_TIMEDELTA_ERRORS as err: raise ValueError( "The time difference exceeds the range of values " "that can be expressed at the nanosecond resolution." ) from err return NotImplemented def __rsub__(self, other): try: return pd.TimedeltaIndex(other - np.array(self)) except OUT_OF_BOUNDS_TIMEDELTA_ERRORS as err: raise ValueError( "The time difference exceeds the range of values " "that can be expressed at the nanosecond resolution." ) from err def to_datetimeindex( self, unsafe: bool = False, time_unit: PDDatetimeUnitOptions | None = None ) -> pd.DatetimeIndex: """If possible, convert this index to a pandas.DatetimeIndex. Parameters ---------- unsafe : bool Flag to turn off calendar mismatch warnings (default ``False``). time_unit : str Time resolution of resulting DatetimeIndex. Can be one of `"s"`, ``"ms"``, ``"us"``, or ``"ns"`` (default ``"ns"``). Returns ------- pandas.DatetimeIndex Raises ------ ValueError If the CFTimeIndex contains dates that are not possible in the standard calendar or outside the range representable by the specified ``time_unit``. Warns ----- RuntimeWarning If converting from a non-standard calendar, or a Gregorian calendar with dates prior to the reform (1582-10-15). Warnings -------- Note that for non-proleptic Gregorian calendars, this will change the calendar type of the index. In that case the result of this method should be used with caution. Examples -------- >>> times = xr.date_range( ... "2000", periods=2, calendar="gregorian", use_cftime=True ... ) >>> times CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00], dtype='object', length=2, calendar='standard', freq=None) >>> times.to_datetimeindex(time_unit="ns") DatetimeIndex(['2000-01-01', '2000-01-02'], dtype='datetime64[ns]', freq=None) """ if not self._data.size: return pd.DatetimeIndex([]) if time_unit is None: emit_user_level_warning( "In a future version of xarray to_datetimeindex will default " "to returning a 'us'-resolution DatetimeIndex instead of a " "'ns'-resolution DatetimeIndex. This warning can be silenced " "by explicitly passing the `time_unit` keyword argument.", FutureWarning, ) time_unit = "ns" nptimes = cftime_to_nptime(self, time_unit=time_unit) calendar = infer_calendar_name(self) if calendar not in _STANDARD_CALENDARS and not unsafe: emit_user_level_warning( "Converting a CFTimeIndex with dates from a non-standard " f"calendar, {calendar!r}, to a pandas.DatetimeIndex, which " "uses dates from the standard calendar. This may lead to " "subtle errors in operations that depend on the length of " "time between dates.", RuntimeWarning, ) if calendar == "standard" and not unsafe: reform_date = self.date_type(1582, 10, 15) if self.min() < reform_date: emit_user_level_warning( "Converting a CFTimeIndex with dates from a Gregorian " "calendar that fall before the reform date of 1582-10-15 " "to a pandas.DatetimeIndex. During this time period the " "Gregorian calendar and the proleptic Gregorian calendar " "of the DatetimeIndex do not exactly align. This warning " "can be silenced by setting unsafe=True.", RuntimeWarning, ) return pd.DatetimeIndex(nptimes) def strftime(self, date_format): """ Return an Index of formatted strings specified by date_format, which supports the same string format as the python standard library. Details of the string format can be found in `python string format doc `__ Parameters ---------- date_format : str Date format string (e.g. "%Y-%m-%d") Returns ------- pandas.Index Index of formatted strings Examples -------- >>> rng = xr.date_range( ... start="2000", ... periods=5, ... freq="2MS", ... calendar="noleap", ... use_cftime=True, ... ) >>> rng.strftime("%B %d, %Y, %r") Index(['January 01, 2000, 12:00:00 AM', 'March 01, 2000, 12:00:00 AM', 'May 01, 2000, 12:00:00 AM', 'July 01, 2000, 12:00:00 AM', 'September 01, 2000, 12:00:00 AM'], dtype='object') """ return pd.Index([date.strftime(date_format) for date in self._data]) @property def asi8(self): """Convert to integers with units of microseconds since 1970-01-01.""" from xarray.core.resample_cftime import exact_cftime_datetime_difference if not self._data.size: return np.array([], dtype=np.int64) epoch = self.date_type(1970, 1, 1) return np.array( [ _total_microseconds(exact_cftime_datetime_difference(epoch, date)) for date in self.values ], dtype=np.int64, ) @property def calendar(self): """The calendar used by the datetimes in the index.""" if not self._data.size: return None return infer_calendar_name(self) @property def freq(self): """The frequency used by the dates in the index.""" from xarray.coding.frequencies import infer_freq # min 3 elemtents required to determine freq if self._data.size < 3: return None return infer_freq(self) def _round_via_method(self, freq, method): """Round dates using a specified method.""" from xarray.coding.cftime_offsets import CFTIME_TICKS, Day, to_offset if not self._data.size: return CFTimeIndex(np.array(self)) offset = to_offset(freq) if isinstance(offset, Day): # Following pandas, "In the 'round' context, Day unambiguously # means 24h, not calendar-day" offset_as_timedelta = timedelta(days=offset.n) elif isinstance(offset, CFTIME_TICKS): offset_as_timedelta = offset.as_timedelta() else: raise ValueError(f"{offset} is a non-fixed frequency") unit = _total_microseconds(offset_as_timedelta) values = self.asi8 rounded = method(values, unit) return _cftimeindex_from_i8(rounded, self.date_type, self.name) def floor(self, freq): """Round dates down to fixed frequency. Parameters ---------- freq : str The frequency level to round the index to. Must be a fixed frequency like 'S' (second) not 'ME' (month end). See `frequency aliases `_ for a list of possible values. Returns ------- CFTimeIndex """ return self._round_via_method(freq, _floor_int) def ceil(self, freq): """Round dates up to fixed frequency. Parameters ---------- freq : str The frequency level to round the index to. Must be a fixed frequency like 'S' (second) not 'ME' (month end). See `frequency aliases `_ for a list of possible values. Returns ------- CFTimeIndex """ return self._round_via_method(freq, _ceil_int) def round(self, freq): """Round dates to a fixed frequency. Parameters ---------- freq : str The frequency level to round the index to. Must be a fixed frequency like 'S' (second) not 'ME' (month end). See `frequency aliases `_ for a list of possible values. Returns ------- CFTimeIndex """ return self._round_via_method(freq, _round_to_nearest_half_even) @property def is_leap_year(self): if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") func = np.vectorize(cftime.is_leap_year) return func(self.year, calendar=self.calendar) def _parse_array_of_cftime_strings(strings, date_type): """Create a numpy array from an array of strings. For use in generating dates from strings for use with interp. Assumes the array is either 0-dimensional or 1-dimensional. Parameters ---------- strings : array of strings Strings to convert to dates date_type : cftime.datetime type Calendar type to use for dates Returns ------- np.array """ return np.array([_parse_iso8601(date_type, s)[0] for s in strings.ravel()]).reshape( strings.shape ) def _contains_datetime_timedeltas(array): """Check if an input array contains datetime.timedelta objects.""" array = np.atleast_1d(array) return isinstance(array[0], timedelta) def _cftimeindex_from_i8(values, date_type, name): """Construct a CFTimeIndex from an array of integers. Parameters ---------- values : np.array Integers representing microseconds since 1970-01-01. date_type : cftime.datetime Type of date for the index. name : str Name of the index. Returns ------- CFTimeIndex """ epoch = date_type(1970, 1, 1) dates = np.array([epoch + timedelta(microseconds=int(value)) for value in values]) return CFTimeIndex(dates, name=name) def _total_microseconds(delta): """Compute the total number of microseconds of a datetime.timedelta. Parameters ---------- delta : datetime.timedelta Input timedelta. Returns ------- int """ return delta / timedelta(microseconds=1) def _floor_int(values, unit): """Copied from pandas.""" return values - np.remainder(values, unit) def _ceil_int(values, unit): """Copied from pandas.""" return values + np.remainder(-values, unit) def _round_to_nearest_half_even(values, unit): """Copied from pandas.""" if unit % 2: return _ceil_int(values - unit // 2, unit) quotient, remainder = np.divmod(values, unit) mask = np.logical_or( remainder > (unit // 2), np.logical_and(remainder == (unit // 2), quotient % 2) ) quotient[mask] += 1 return quotient * unit ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/coding/common.py������������������������������������������������������������0000664�0000000�0000000�00000011715�15056206164�0017764�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations from collections.abc import Callable, Hashable, MutableMapping from typing import TYPE_CHECKING, Any, Union import numpy as np from xarray.core import indexing from xarray.core.variable import Variable from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import is_chunked_array if TYPE_CHECKING: T_VarTuple = tuple[tuple[Hashable, ...], Any, dict, dict] T_Name = Union[Hashable, None] class SerializationWarning(RuntimeWarning): """Warnings about encoding/decoding issues in serialization.""" class VariableCoder: """Base class for encoding and decoding transformations on variables. We use coders for transforming variables between xarray's data model and a format suitable for serialization. For example, coders apply CF conventions for how data should be represented in netCDF files. Subclasses should implement encode() and decode(), which should satisfy the identity ``coder.decode(coder.encode(variable)) == variable``. If any options are necessary, they should be implemented as arguments to the __init__ method. The optional name argument to encode() and decode() exists solely for the sake of better error messages, and should correspond to the name of variables in the underlying store. """ def encode(self, variable: Variable, name: T_Name = None) -> Variable: """Convert an encoded variable to a decoded variable""" raise NotImplementedError() def decode(self, variable: Variable, name: T_Name = None) -> Variable: """Convert a decoded variable to an encoded variable""" raise NotImplementedError() class _ElementwiseFunctionArray(indexing.ExplicitlyIndexedNDArrayMixin): """Lazily computed array holding values of elemwise-function. Do not construct this object directly: call lazy_elemwise_func instead. Values are computed upon indexing or coercion to a NumPy array. """ def __init__(self, array, func: Callable, dtype: np.typing.DTypeLike): assert not is_chunked_array(array) self.array = indexing.as_indexable(array) self.func = func self._dtype = dtype @property def dtype(self) -> np.dtype: return np.dtype(self._dtype) def transpose(self, order): # For elementwise functions, we can compose transpose and function application return type(self)(self.array.transpose(order), self.func, self.dtype) def _oindex_get(self, key): return type(self)(self.array.oindex[key], self.func, self.dtype) def _vindex_get(self, key): return type(self)(self.array.vindex[key], self.func, self.dtype) def __getitem__(self, key): return type(self)(self.array[key], self.func, self.dtype) def get_duck_array(self): return self.func(self.array.get_duck_array()) async def async_get_duck_array(self): return self.func(await self.array.async_get_duck_array()) def __repr__(self) -> str: return f"{type(self).__name__}({self.array!r}, func={self.func!r}, dtype={self.dtype!r})" def lazy_elemwise_func(array, func: Callable, dtype: np.typing.DTypeLike): """Lazily apply an element-wise function to an array. Parameters ---------- array : any valid value of Variable._data func : callable Function to apply to indexed slices of an array. For use with dask, this should be a pickle-able object. dtype : coercible to np.dtype Dtype for the result of this function. Returns ------- Either a dask.array.Array or _ElementwiseFunctionArray. """ if is_chunked_array(array): chunkmanager = get_chunked_array_type(array) return chunkmanager.map_blocks(func, array, dtype=dtype) # type: ignore[arg-type] else: return _ElementwiseFunctionArray(array, func, dtype) def safe_setitem(dest, key: Hashable, value, name: T_Name = None): if key in dest: var_str = f" on variable {name!r}" if name else "" raise ValueError( f"failed to prevent overwriting existing key {key} in attrs{var_str}. " "This is probably an encoding field used by xarray to describe " "how a variable is serialized. To proceed, remove this key from " "the variable's attributes manually." ) dest[key] = value def pop_to( source: MutableMapping, dest: MutableMapping, key: Hashable, name: T_Name = None ) -> Any: """ A convenience function which pops a key k from source to dest. None values are not passed on. If k already exists in dest an error is raised. """ value = source.pop(key, None) if value is not None: safe_setitem(dest, key, value, name=name) return value def unpack_for_encoding(var: Variable) -> T_VarTuple: return var.dims, var.data, var.attrs.copy(), var.encoding.copy() def unpack_for_decoding(var: Variable) -> T_VarTuple: return var.dims, var._data, var.attrs.copy(), var.encoding.copy() ���������������������������������������������������xarray-2025.09.0/xarray/coding/frequencies.py�������������������������������������������������������0000664�0000000�0000000�00000022244�15056206164�0021004�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""FrequencyInferer analog for cftime.datetime objects""" # The infer_freq method and the _CFTimeFrequencyInferer # subclass defined here were copied and adapted for # use with cftime.datetime objects based on the source code in # pandas.tseries.Frequencies._FrequencyInferer # For reference, here is a copy of the pandas copyright notice: # (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team # All rights reserved. # Copyright (c) 2008-2011 AQR Capital Management, LLC # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the copyright holder nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import numpy as np import pandas as pd from xarray.coding.cftime_offsets import _MONTH_ABBREVIATIONS, _legacy_to_new_freq from xarray.coding.cftimeindex import CFTimeIndex from xarray.core.common import _contains_datetime_like_objects from xarray.core.dtypes import _is_numpy_subdtype _ONE_MICRO = 1 _ONE_MILLI = _ONE_MICRO * 1000 _ONE_SECOND = _ONE_MILLI * 1000 _ONE_MINUTE = 60 * _ONE_SECOND _ONE_HOUR = 60 * _ONE_MINUTE _ONE_DAY = 24 * _ONE_HOUR def infer_freq(index): """ Infer the most likely frequency given the input index. Parameters ---------- index : CFTimeIndex, DataArray, DatetimeIndex, TimedeltaIndex, Series If not passed a CFTimeIndex, this simply calls `pandas.infer_freq`. If passed a Series or a DataArray will use the values of the series (NOT THE INDEX). Returns ------- str or None None if no discernible frequency. Raises ------ TypeError If the index is not datetime-like. ValueError If there are fewer than three values or the index is not 1D. """ from xarray.core.dataarray import DataArray from xarray.core.variable import Variable if isinstance(index, DataArray | pd.Series): if index.ndim != 1: raise ValueError("'index' must be 1D") elif not _contains_datetime_like_objects(Variable("dim", index)): raise ValueError("'index' must contain datetime-like objects") dtype = np.asarray(index).dtype if _is_numpy_subdtype(dtype, "datetime64"): index = pd.DatetimeIndex(index.values) elif _is_numpy_subdtype(dtype, "timedelta64"): index = pd.TimedeltaIndex(index.values) else: index = CFTimeIndex(index.values) if isinstance(index, CFTimeIndex): inferer = _CFTimeFrequencyInferer(index) return inferer.get_freq() return _legacy_to_new_freq(pd.infer_freq(index)) class _CFTimeFrequencyInferer: # (pd.tseries.frequencies._FrequencyInferer): def __init__(self, index): self.index = index self.values = index.asi8 if len(index) < 3: raise ValueError("Need at least 3 dates to infer frequency") self.is_monotonic = ( self.index.is_monotonic_decreasing or self.index.is_monotonic_increasing ) self._deltas = None self._year_deltas = None self._month_deltas = None def get_freq(self): """Find the appropriate frequency string to describe the inferred frequency of self.index Adapted from `pandas.tsseries.frequencies._FrequencyInferer.get_freq` for CFTimeIndexes. Returns ------- str or None """ if not self.is_monotonic or not self.index.is_unique: return None delta = self.deltas[0] # Smallest delta if _is_multiple(delta, _ONE_DAY): return self._infer_daily_rule() # There is no possible intraday frequency with a non-unique delta # Different from pandas: we don't need to manage DST and business offsets in cftime elif len(self.deltas) != 1: return None if _is_multiple(delta, _ONE_HOUR): return _maybe_add_count("h", delta / _ONE_HOUR) elif _is_multiple(delta, _ONE_MINUTE): return _maybe_add_count("min", delta / _ONE_MINUTE) elif _is_multiple(delta, _ONE_SECOND): return _maybe_add_count("s", delta / _ONE_SECOND) elif _is_multiple(delta, _ONE_MILLI): return _maybe_add_count("ms", delta / _ONE_MILLI) else: return _maybe_add_count("us", delta / _ONE_MICRO) def _infer_daily_rule(self): annual_rule = self._get_annual_rule() if annual_rule: nyears = self.year_deltas[0] month = _MONTH_ABBREVIATIONS[self.index[0].month] alias = f"{annual_rule}-{month}" return _maybe_add_count(alias, nyears) quartely_rule = self._get_quartely_rule() if quartely_rule: nquarters = self.month_deltas[0] / 3 mod_dict = {0: 12, 2: 11, 1: 10} month = _MONTH_ABBREVIATIONS[mod_dict[self.index[0].month % 3]] alias = f"{quartely_rule}-{month}" return _maybe_add_count(alias, nquarters) monthly_rule = self._get_monthly_rule() if monthly_rule: return _maybe_add_count(monthly_rule, self.month_deltas[0]) if len(self.deltas) == 1: # Daily as there is no "Weekly" offsets with CFTime days = self.deltas[0] / _ONE_DAY return _maybe_add_count("D", days) # CFTime has no business freq and no "week of month" (WOM) return None def _get_annual_rule(self): if len(self.year_deltas) > 1: return None if len(np.unique(self.index.month)) > 1: return None return {"cs": "YS", "ce": "YE"}.get(month_anchor_check(self.index)) def _get_quartely_rule(self): if len(self.month_deltas) > 1: return None if self.month_deltas[0] % 3 != 0: return None return {"cs": "QS", "ce": "QE"}.get(month_anchor_check(self.index)) def _get_monthly_rule(self): if len(self.month_deltas) > 1: return None return {"cs": "MS", "ce": "ME"}.get(month_anchor_check(self.index)) @property def deltas(self): """Sorted unique timedeltas as microseconds.""" if self._deltas is None: self._deltas = _unique_deltas(self.values) return self._deltas @property def year_deltas(self): """Sorted unique year deltas.""" if self._year_deltas is None: self._year_deltas = _unique_deltas(self.index.year) return self._year_deltas @property def month_deltas(self): """Sorted unique month deltas.""" if self._month_deltas is None: self._month_deltas = _unique_deltas(self.index.year * 12 + self.index.month) return self._month_deltas def _unique_deltas(arr): """Sorted unique deltas of numpy array""" return np.sort(np.unique(np.diff(arr))) def _is_multiple(us, mult: int): """Whether us is a multiple of mult""" return us % mult == 0 def _maybe_add_count(base: str, count: float): """If count is greater than 1, add it to the base offset string""" if count != 1: assert count == int(count) count = int(count) return f"{count}{base}" else: return base def month_anchor_check(dates): """Return the monthly offset string. Return "cs" if all dates are the first days of the month, "ce" if all dates are the last day of the month, None otherwise. Replicated pandas._libs.tslibs.resolution.month_position_check but without business offset handling. """ calendar_end = True calendar_start = True for date in dates: if calendar_start: calendar_start &= date.day == 1 if calendar_end: cal = date.day == date.daysinmonth calendar_end &= cal elif not calendar_start: break if calendar_end: return "ce" elif calendar_start: return "cs" else: return None ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/coding/strings.py�����������������������������������������������������������0000664�0000000�0000000�00000024641�15056206164�0020167�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Coders for strings.""" from __future__ import annotations import re from functools import partial import numpy as np from xarray.coding.variables import ( VariableCoder, lazy_elemwise_func, pop_to, safe_setitem, unpack_for_decoding, unpack_for_encoding, ) from xarray.core import indexing from xarray.core.utils import emit_user_level_warning, module_available from xarray.core.variable import Variable from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import is_chunked_array HAS_NUMPY_2_0 = module_available("numpy", minversion="2.0.0.dev0") def create_vlen_dtype(element_type): if element_type not in (str, bytes): raise TypeError(f"unsupported type for vlen_dtype: {element_type!r}") # based on h5py.special_dtype return np.dtype("O", metadata={"element_type": element_type}) def check_vlen_dtype(dtype): if dtype.kind != "O" or dtype.metadata is None: return None else: # check xarray (element_type) as well as h5py (vlen) return dtype.metadata.get("element_type", dtype.metadata.get("vlen")) def is_unicode_dtype(dtype): return dtype.kind == "U" or check_vlen_dtype(dtype) is str def is_bytes_dtype(dtype): return dtype.kind == "S" or check_vlen_dtype(dtype) is bytes class EncodedStringCoder(VariableCoder): """Transforms between unicode strings and fixed-width UTF-8 bytes.""" def __init__(self, allows_unicode=True): self.allows_unicode = allows_unicode def encode(self, variable: Variable, name=None) -> Variable: dims, data, attrs, encoding = unpack_for_encoding(variable) contains_unicode = is_unicode_dtype(data.dtype) encode_as_char = encoding.get("dtype") == "S1" if encode_as_char: del encoding["dtype"] # no longer relevant if contains_unicode and (encode_as_char or not self.allows_unicode): if "_FillValue" in attrs: raise NotImplementedError( f"variable {name!r} has a _FillValue specified, but " "_FillValue is not yet supported on unicode strings: " "https://github.com/pydata/xarray/issues/1647" ) string_encoding = encoding.pop("_Encoding", "utf-8") safe_setitem(attrs, "_Encoding", string_encoding, name=name) # TODO: figure out how to handle this in a lazy way with dask data = encode_string_array(data, string_encoding) return Variable(dims, data, attrs, encoding) else: variable.encoding = encoding return variable def decode(self, variable: Variable, name=None) -> Variable: dims, data, attrs, encoding = unpack_for_decoding(variable) if "_Encoding" in attrs: string_encoding = pop_to(attrs, encoding, "_Encoding") func = partial(decode_bytes_array, encoding=string_encoding) data = lazy_elemwise_func(data, func, np.dtype(object)) return Variable(dims, data, attrs, encoding) def decode_bytes_array(bytes_array, encoding="utf-8"): # This is faster than using np.char.decode() or np.vectorize() bytes_array = np.asarray(bytes_array) decoded = [x.decode(encoding) for x in bytes_array.ravel()] return np.array(decoded, dtype=object).reshape(bytes_array.shape) def encode_string_array(string_array, encoding="utf-8"): string_array = np.asarray(string_array) encoded = [x.encode(encoding) for x in string_array.ravel()] return np.array(encoded, dtype=bytes).reshape(string_array.shape) def ensure_fixed_length_bytes(var: Variable) -> Variable: """Ensure that a variable with vlen bytes is converted to fixed width.""" if check_vlen_dtype(var.dtype) is bytes: dims, data, attrs, encoding = unpack_for_encoding(var) # TODO: figure out how to handle this with dask data = np.asarray(data, dtype=np.bytes_) return Variable(dims, data, attrs, encoding) else: return var def validate_char_dim_name(strlen, encoding, name) -> str: """Check character array dimension naming and size and return it.""" if (char_dim_name := encoding.pop("char_dim_name", None)) is not None: # 1 - extract all characters up to last number sequence # 2 - extract last number sequence match = re.search(r"^(.*?)(\d+)(?!.*\d)", char_dim_name) if match: new_dim_name = match.group(1) if int(match.group(2)) != strlen: emit_user_level_warning( f"String dimension naming mismatch on variable {name!r}. {char_dim_name!r} provided by encoding, but data has length of '{strlen}'. Using '{new_dim_name}{strlen}' instead of {char_dim_name!r} to prevent possible naming clash.\n" "To silence this warning either remove 'char_dim_name' from encoding or provide a fitting name." ) char_dim_name = f"{new_dim_name}{strlen}" elif ( original_shape := encoding.get("original_shape", [-1])[-1] ) != -1 and original_shape != strlen: emit_user_level_warning( f"String dimension length mismatch on variable {name!r}. '{original_shape}' provided by encoding, but data has length of '{strlen}'. Using '{char_dim_name}{strlen}' instead of {char_dim_name!r} to prevent possible naming clash.\n" f"To silence this warning remove 'original_shape' from encoding." ) char_dim_name = f"{char_dim_name}{strlen}" else: char_dim_name = f"string{strlen}" return char_dim_name class CharacterArrayCoder(VariableCoder): """Transforms between arrays containing bytes and character arrays.""" def encode(self, variable, name=None): variable = ensure_fixed_length_bytes(variable) dims, data, attrs, encoding = unpack_for_encoding(variable) if data.dtype.kind == "S" and encoding.get("dtype") is not str: data = bytes_to_char(data) char_dim_name = validate_char_dim_name(data.shape[-1], encoding, name) dims = dims + (char_dim_name,) return Variable(dims, data, attrs, encoding) def decode(self, variable, name=None): dims, data, attrs, encoding = unpack_for_decoding(variable) if data.dtype == "S1" and dims: encoding["char_dim_name"] = dims[-1] dims = dims[:-1] data = char_to_bytes(data) return Variable(dims, data, attrs, encoding) def bytes_to_char(arr): """Convert numpy/dask arrays from fixed width bytes to characters.""" if arr.dtype.kind != "S": raise ValueError("argument must have a fixed-width bytes dtype") if is_chunked_array(arr): chunkmanager = get_chunked_array_type(arr) return chunkmanager.map_blocks( _numpy_bytes_to_char, arr, dtype="S1", chunks=arr.chunks + ((arr.dtype.itemsize,)), new_axis=[arr.ndim], ) return _numpy_bytes_to_char(arr) def _numpy_bytes_to_char(arr): """Like netCDF4.stringtochar, but faster and more flexible.""" # adapt handling of copy-kwarg to numpy 2.0 # see https://github.com/numpy/numpy/issues/25916 # and https://github.com/numpy/numpy/pull/25922 copy = None if HAS_NUMPY_2_0 else False # ensure the array is contiguous arr = np.array(arr, copy=copy, order="C", dtype=np.bytes_) return arr.reshape(arr.shape + (1,)).view("S1") def char_to_bytes(arr): """Convert numpy/dask arrays from characters to fixed width bytes.""" if arr.dtype != "S1": raise ValueError("argument must have dtype='S1'") if not arr.ndim: # no dimension to concatenate along return arr size = arr.shape[-1] if not size: # can't make an S0 dtype return np.zeros(arr.shape[:-1], dtype=np.bytes_) if is_chunked_array(arr): chunkmanager = get_chunked_array_type(arr) if len(arr.chunks[-1]) > 1: raise ValueError( "cannot stacked dask character array with " f"multiple chunks in the last dimension: {arr}" ) dtype = np.dtype("S" + str(arr.shape[-1])) return chunkmanager.map_blocks( _numpy_char_to_bytes, arr, dtype=dtype, chunks=arr.chunks[:-1], drop_axis=[arr.ndim - 1], ) else: return StackedBytesArray(arr) def _numpy_char_to_bytes(arr): """Like netCDF4.chartostring, but faster and more flexible.""" # adapt handling of copy-kwarg to numpy 2.0 # see https://github.com/numpy/numpy/issues/25916 # and https://github.com/numpy/numpy/pull/25922 copy = None if HAS_NUMPY_2_0 else False # based on: https://stackoverflow.com/a/10984878/809705 arr = np.array(arr, copy=copy, order="C") dtype = "S" + str(arr.shape[-1]) return arr.view(dtype).reshape(arr.shape[:-1]) class StackedBytesArray(indexing.ExplicitlyIndexedNDArrayMixin): """Wrapper around array-like objects to create a new indexable object where values, when accessed, are automatically stacked along the last dimension. >>> indexer = indexing.BasicIndexer((slice(None),)) >>> np.array(StackedBytesArray(np.array(["a", "b", "c"], dtype="S1"))[indexer]) array(b'abc', dtype='|S3') """ def __init__(self, array): """ Parameters ---------- array : array-like Original array of values to wrap. """ if array.dtype != "S1": raise ValueError( "can only use StackedBytesArray if argument has dtype='S1'" ) self.array = indexing.as_indexable(array) @property def dtype(self): return np.dtype("S" + str(self.array.shape[-1])) @property def shape(self) -> tuple[int, ...]: return self.array.shape[:-1] def __repr__(self): return f"{type(self).__name__}({self.array!r})" def _vindex_get(self, key): return type(self)(self.array.vindex[key]) def _oindex_get(self, key): return type(self)(self.array.oindex[key]) def __getitem__(self, key): # require slicing the last dimension completely key = type(key)(indexing.expanded_indexer(key.tuple, self.array.ndim)) if key.tuple[-1] != slice(None): raise IndexError("too many indices") return type(self)(self.array[key]) def get_duck_array(self): return _numpy_char_to_bytes(self.array.get_duck_array()) �����������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/coding/times.py�������������������������������������������������������������0000664�0000000�0000000�00000165324�15056206164�0017623�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import contextlib import re import warnings from collections.abc import Callable, Hashable from datetime import datetime, timedelta from functools import partial from typing import TYPE_CHECKING, Union, cast import numpy as np import pandas as pd from pandas.errors import OutOfBoundsDatetime, OutOfBoundsTimedelta from xarray.coding.common import ( SerializationWarning, VariableCoder, lazy_elemwise_func, pop_to, safe_setitem, unpack_for_decoding, unpack_for_encoding, ) from xarray.compat.pdcompat import default_precision_timestamp, timestamp_as_unit from xarray.core import indexing from xarray.core.common import contains_cftime_datetimes, is_np_datetime_like from xarray.core.duck_array_ops import array_all, asarray, ravel, reshape from xarray.core.formatting import first_n_items, format_timestamp, last_item from xarray.core.utils import attempt_import, emit_user_level_warning from xarray.core.variable import Variable from xarray.namedarray.parallelcompat import T_ChunkedArray, get_chunked_array_type from xarray.namedarray.pycompat import is_chunked_array, to_numpy from xarray.namedarray.utils import is_duck_dask_array try: import cftime except ImportError: cftime = None from xarray.core.types import ( CFCalendar, CFTimeDatetime, NPDatetimeUnitOptions, PDDatetimeUnitOptions, T_DuckArray, ) T_Name = Union[Hashable, None] # standard calendars recognized by cftime _STANDARD_CALENDARS = {"standard", "gregorian", "proleptic_gregorian"} _NS_PER_TIME_DELTA = { "ns": 1, "us": int(1e3), "ms": int(1e6), "s": int(1e9), "m": int(1e9) * 60, "h": int(1e9) * 60 * 60, "D": int(1e9) * 60 * 60 * 24, } _US_PER_TIME_DELTA = { "microseconds": 1, "milliseconds": 1_000, "seconds": 1_000_000, "minutes": 60 * 1_000_000, "hours": 60 * 60 * 1_000_000, "days": 24 * 60 * 60 * 1_000_000, } _NETCDF_TIME_UNITS_CFTIME = [ "days", "hours", "minutes", "seconds", "milliseconds", "microseconds", ] _NETCDF_TIME_UNITS_NUMPY = _NETCDF_TIME_UNITS_CFTIME + ["nanoseconds"] TIME_UNITS = frozenset( [ "days", "hours", "minutes", "seconds", "milliseconds", "microseconds", "nanoseconds", ] ) _INVALID_LITERAL_TIMEDELTA64_ENCODING_KEYS = [ "add_offset", "scale_factor", ] def _is_standard_calendar(calendar: str) -> bool: return calendar.lower() in _STANDARD_CALENDARS def _is_numpy_compatible_time_range(times): if is_np_datetime_like(times.dtype): return True # times array contains cftime objects times = np.asarray(times) tmin = times.min() tmax = times.max() try: # before relaxing the nanosecond constrained # this raised OutOfBoundsDatetime for # times < 1678 and times > 2262 # this isn't the case anymore for other resolutions like "s" # now, we raise for dates before 1582-10-15 _check_date_is_after_shift(tmin, "standard") _check_date_is_after_shift(tmax, "standard") convert_time_or_go_back(tmin, pd.Timestamp) convert_time_or_go_back(tmax, pd.Timestamp) except pd.errors.OutOfBoundsDatetime: return False except ValueError as err: if err.args[0] == "year 0 is out of range": return False raise else: return True def _netcdf_to_numpy_timeunit(units: str) -> NPDatetimeUnitOptions: units = units.lower() if not units.endswith("s"): units = f"{units}s" return cast( NPDatetimeUnitOptions, { "nanoseconds": "ns", "microseconds": "us", "milliseconds": "ms", "seconds": "s", "minutes": "m", "hours": "h", "days": "D", }[units], ) def _numpy_to_netcdf_timeunit(units: NPDatetimeUnitOptions) -> str: return { "ns": "nanoseconds", "us": "microseconds", "ms": "milliseconds", "s": "seconds", "m": "minutes", "h": "hours", "D": "days", }[units] def _numpy_dtype_to_netcdf_timeunit(dtype: np.dtype) -> str: unit, _ = np.datetime_data(dtype) unit = cast(NPDatetimeUnitOptions, unit) return _numpy_to_netcdf_timeunit(unit) def _ensure_padded_year(ref_date: str) -> str: # Reference dates without a padded year (e.g. since 1-1-1 or since 2-3-4) # are ambiguous (is it YMD or DMY?). This can lead to some very odd # behaviour e.g. pandas (via dateutil) passes '1-1-1 00:00:0.0' as # '2001-01-01 00:00:00' (because it assumes a) DMY and b) that year 1 is # shorthand for 2001 (like 02 would be shorthand for year 2002)). # Here we ensure that there is always a four-digit year, with the # assumption being that year comes first if we get something ambiguous. matches_year = re.match(r".*\d{4}.*", ref_date) if matches_year: # all good, return return ref_date # No four-digit strings, assume the first digits are the year and pad # appropriately matches_start_digits = re.match(r"(\d+)(.*)", ref_date) if not matches_start_digits: raise ValueError(f"invalid reference date for time units: {ref_date}") ref_year, everything_else = (s for s in matches_start_digits.groups()) ref_date_padded = f"{int(ref_year):04d}{everything_else}" warning_msg = ( f"Ambiguous reference date string: {ref_date}. The first value is " "assumed to be the year hence will be padded with zeros to remove " f"the ambiguity (the padded reference date string is: {ref_date_padded}). " "To remove this message, remove the ambiguity by padding your reference " "date strings with zeros." ) warnings.warn(warning_msg, SerializationWarning, stacklevel=2) return ref_date_padded def _unpack_netcdf_time_units(units: str) -> tuple[str, str]: # CF datetime units follow the format: "UNIT since DATE" # this parses out the unit and date allowing for extraneous # whitespace. It also ensures that the year is padded with zeros # so it will be correctly understood by pandas (via dateutil). matches = re.match(r"(.+) since (.+)", units) if not matches: raise ValueError(f"invalid time units: {units}") delta_units, ref_date = (s.strip() for s in matches.groups()) ref_date = _ensure_padded_year(ref_date) return delta_units, ref_date def named(name: str, pattern: str) -> str: return "(?P<" + name + ">" + pattern + ")" def optional(x: str) -> str: return "(?:" + x + ")?" def trailing_optional(xs: list[str]) -> str: if not xs: return "" return xs[0] + optional(trailing_optional(xs[1:])) def build_pattern( date_sep: str = r"\-", datetime_sep: str = r"T", time_sep: str = r"\:", micro_sep: str = r".", ) -> str: pieces = [ (None, "year", r"[+-]?\d{4,5}"), (date_sep, "month", r"\d{2}"), (date_sep, "day", r"\d{2}"), (datetime_sep, "hour", r"\d{2}"), (time_sep, "minute", r"\d{2}"), (time_sep, "second", r"\d{2}"), (micro_sep, "microsecond", r"\d{1,6}"), ] pattern_list = [] for sep, name, sub_pattern in pieces: pattern_list.append((sep or "") + named(name, sub_pattern)) # TODO: allow timezone offsets? return "^" + trailing_optional(pattern_list) + "$" _BASIC_PATTERN = build_pattern(date_sep="", time_sep="") _EXTENDED_PATTERN = build_pattern() _CFTIME_PATTERN = build_pattern(datetime_sep=" ") _PATTERNS = [_BASIC_PATTERN, _EXTENDED_PATTERN, _CFTIME_PATTERN] def parse_iso8601_like(datetime_string: str) -> dict[str, str | None]: for pattern in _PATTERNS: match = re.match(pattern, datetime_string) if match: return match.groupdict() raise ValueError( f"no ISO-8601 or cftime-string-like match for string: {datetime_string}" ) def _parse_iso8601(date_type, timestr): default = date_type(1, 1, 1) result = parse_iso8601_like(timestr) replace = {} for attr in ["year", "month", "day", "hour", "minute", "second", "microsecond"]: value = result.get(attr, None) if value is not None: resolution = attr if attr == "microsecond": if len(value) <= 3: resolution = "millisecond" # convert match string into valid microsecond value value = 10 ** (6 - len(value)) * int(value) replace[attr] = int(value) return default.replace(**replace), resolution def _maybe_strip_tz_from_timestamp(date: pd.Timestamp) -> pd.Timestamp: # If the ref_date Timestamp is timezone-aware, convert to UTC and # make it timezone-naive (GH 2649). if date.tz is not None: return date.tz_convert("UTC").tz_convert(None) return date def _unpack_time_unit_and_ref_date( units: str, ) -> tuple[NPDatetimeUnitOptions, pd.Timestamp]: # same us _unpack_netcdf_time_units but finalizes ref_date for # processing in encode_cf_datetime time_unit, _ref_date = _unpack_netcdf_time_units(units) time_unit = _netcdf_to_numpy_timeunit(time_unit) ref_date = pd.Timestamp(_ref_date) ref_date = _maybe_strip_tz_from_timestamp(ref_date) return time_unit, ref_date def _unpack_time_units_and_ref_date_cftime(units: str, calendar: str): # same as _unpack_netcdf_time_units but finalizes ref_date for # processing in encode_cf_datetime time_units, ref_date = _unpack_netcdf_time_units(units) ref_date = cftime.num2date( 0, units=f"microseconds since {ref_date}", calendar=calendar, only_use_cftime_datetimes=True, ) return time_units, ref_date def _decode_cf_datetime_dtype( data, units: str, calendar: str | None, use_cftime: bool | None, time_unit: PDDatetimeUnitOptions = "ns", ) -> np.dtype: # Verify that at least the first and last date can be decoded # successfully. Otherwise, tracebacks end up swallowed by # Dataset.__repr__ when users try to view their lazily decoded array. values = indexing.ImplicitToExplicitIndexingAdapter(indexing.as_indexable(data)) example_value = np.concatenate( [to_numpy(first_n_items(values, 1)), to_numpy(last_item(values))] ) try: result = decode_cf_datetime( example_value, units, calendar, use_cftime, time_unit ) except Exception as err: calendar_msg = ( "the default calendar" if calendar is None else f"calendar {calendar!r}" ) msg = ( f"unable to decode time units {units!r} with {calendar_msg!r}. Try " "opening your dataset with decode_times=False or installing cftime " "if it is not installed." ) raise ValueError(msg) from err else: dtype = getattr(result, "dtype", np.dtype("object")) return dtype def _decode_datetime_with_cftime( num_dates: np.ndarray, units: str, calendar: str ) -> np.ndarray: if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") if num_dates.size > 0: return np.asarray( cftime.num2date(num_dates, units, calendar, only_use_cftime_datetimes=True) ) else: return np.array([], dtype=object) def _check_date_for_units_since_refdate( date, unit: NPDatetimeUnitOptions, ref_date: pd.Timestamp ) -> pd.Timestamp: # check for out-of-bounds floats and raise if date > np.iinfo("int64").max or date < np.iinfo("int64").min: raise OutOfBoundsTimedelta( f"Value {date} can't be represented as Datetime/Timedelta." ) delta = date * np.timedelta64(1, unit) if not np.isnan(delta): # this will raise on dtype overflow for integer dtypes if date.dtype.kind in "u" and not np.int64(delta) == date: raise OutOfBoundsTimedelta( "DType overflow in Datetime/Timedelta calculation." ) # this will raise on overflow if ref_date + delta # can't be represented in the current ref_date resolution return timestamp_as_unit(ref_date + delta, ref_date.unit) else: # if date is exactly NaT (np.iinfo("int64").min) return NaT # to make follow-up checks work return pd.Timestamp("NaT") def _check_timedelta_range(value, data_unit, time_unit): if value > np.iinfo("int64").max or value < np.iinfo("int64").min: OutOfBoundsTimedelta(f"Value {value} can't be represented as Timedelta.") # on windows multiplying nan leads to RuntimeWarning with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "invalid value encountered in multiply", RuntimeWarning ) delta = value * np.timedelta64(1, data_unit) if not np.isnan(delta): # this will raise on dtype overflow for integer dtypes if value.dtype.kind in "u" and not np.int64(delta) == value: raise OutOfBoundsTimedelta( "DType overflow in Datetime/Timedelta calculation." ) # this will raise on overflow if delta cannot be represented with the # resolutions supported by pandas. pd.to_timedelta(delta) def _align_reference_date_and_unit( ref_date: pd.Timestamp, unit: NPDatetimeUnitOptions ) -> pd.Timestamp: # align to the highest needed resolution of ref_date or unit if np.timedelta64(1, ref_date.unit) > np.timedelta64(1, unit): # this will raise accordingly # if data can't be represented in the higher resolution return timestamp_as_unit(ref_date, cast(PDDatetimeUnitOptions, unit)) return ref_date def _check_date_is_after_shift( date: pd.Timestamp | datetime | CFTimeDatetime, calendar: str ) -> None: # if we have gregorian/standard we need to raise # if we are outside the well-defined date range # proleptic_gregorian and standard/gregorian are only equivalent # if reference date and date range is >= 1582-10-15 if calendar != "proleptic_gregorian" and date < type(date)(1582, 10, 15): raise OutOfBoundsDatetime( f"Dates before 1582-10-15 cannot be decoded " f"with pandas using {calendar!r} calendar: {date}" ) def _check_higher_resolution( flat_num_dates: np.ndarray, time_unit: PDDatetimeUnitOptions, ) -> tuple[np.ndarray, PDDatetimeUnitOptions]: """Iterate until fitting resolution found.""" res: list[PDDatetimeUnitOptions] = ["s", "ms", "us", "ns"] new_units = res[res.index(time_unit) :] for new_time_unit in new_units: if not ((np.unique(flat_num_dates % 1) > 0).any() and new_time_unit != "ns"): break flat_num_dates *= 1000 return flat_num_dates, new_time_unit def _decode_datetime_with_pandas( flat_num_dates: np.ndarray, units: str, calendar: str, time_resolution: PDDatetimeUnitOptions = "ns", ) -> np.ndarray: if not _is_standard_calendar(calendar): raise OutOfBoundsDatetime( f"Cannot decode times from a non-standard calendar, {calendar!r}, using " "pandas." ) # Work around pandas.to_timedelta issue with dtypes smaller than int64 and # NumPy 2.0 by casting all int and uint data to int64 and uint64, # respectively. See https://github.com/pandas-dev/pandas/issues/56996 for # more details. if flat_num_dates.dtype.kind == "i": flat_num_dates = flat_num_dates.astype(np.int64) elif flat_num_dates.dtype.kind == "u": flat_num_dates = flat_num_dates.astype(np.uint64) try: time_unit, ref_date = _unpack_time_unit_and_ref_date(units) ref_date = _align_reference_date_and_unit(ref_date, time_unit) # here the highest wanted resolution is set ref_date = _align_reference_date_and_unit(ref_date, time_resolution) except ValueError as err: # ValueError is raised by pd.Timestamp for non-ISO timestamp # strings, in which case we fall back to using cftime raise OutOfBoundsDatetime from err _check_date_is_after_shift(ref_date, calendar) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "invalid value encountered", RuntimeWarning) if flat_num_dates.size > 0: # avoid size 0 datetimes GH1329 _check_date_for_units_since_refdate( flat_num_dates.min(), time_unit, ref_date ) _check_date_for_units_since_refdate( flat_num_dates.max(), time_unit, ref_date ) # To avoid integer overflow when converting to nanosecond units for integer # dtypes smaller than np.int64 cast all integer and unsigned integer dtype # arrays to np.int64 (GH 2002, GH 6589). Note this is safe even in the case # of np.uint64 values, because any np.uint64 value that would lead to # overflow when converting to np.int64 would not be representable with a # timedelta64 value, and therefore would raise an error in the lines above. if flat_num_dates.dtype.kind in "iu": flat_num_dates = flat_num_dates.astype(np.int64) elif flat_num_dates.dtype.kind in "f": flat_num_dates = flat_num_dates.astype(np.float64) timedeltas = _numbers_to_timedelta( flat_num_dates, time_unit, ref_date.unit, "datetimes" ) # add timedeltas to ref_date return ref_date + timedeltas def decode_cf_datetime( num_dates, units: str, calendar: str | None = None, use_cftime: bool | None = None, time_unit: PDDatetimeUnitOptions = "ns", ) -> np.ndarray: """Given an array of numeric dates in netCDF format, convert it into a numpy array of date time objects. For standard (Gregorian) calendars, this function uses vectorized operations, which makes it much faster than cftime.num2date. In such a case, the returned array will be of type np.datetime64. Note that time unit in `units` must not be smaller than microseconds and not larger than days. See Also -------- cftime.num2date """ num_dates = to_numpy(num_dates) flat_num_dates = ravel(num_dates) if calendar is None: calendar = "standard" if use_cftime is None: try: dates = _decode_datetime_with_pandas( flat_num_dates, units, calendar, time_unit ) except (KeyError, OutOfBoundsDatetime, OutOfBoundsTimedelta, OverflowError): dates = _decode_datetime_with_cftime( flat_num_dates.astype(float), units, calendar ) # retrieve cftype dates_min = dates[np.nanargmin(num_dates)] dates_max = dates[np.nanargmax(num_dates)] cftype = type(dates_min) # create first day of gregorian calendar in current cf calendar type border = cftype(1582, 10, 15) # "ns" borders # between ['1677-09-21T00:12:43.145224193', '2262-04-11T23:47:16.854775807'] lower = cftype(1677, 9, 21, 0, 12, 43, 145224) upper = cftype(2262, 4, 11, 23, 47, 16, 854775) if dates_min < border: if _is_standard_calendar(calendar): emit_user_level_warning( "Unable to decode time axis into full " "numpy.datetime64 objects, continuing using " "cftime.datetime objects instead, reason: dates prior " "reform date (1582-10-15). To silence this warning specify " "'use_cftime=True'.", SerializationWarning, ) elif time_unit == "ns" and (dates_min < lower or dates_max > upper): emit_user_level_warning( "Unable to decode time axis into full " "numpy.datetime64[ns] objects, continuing using " "cftime.datetime objects instead, reason: dates out " "of range. To silence this warning use a coarser resolution " "'time_unit' or specify 'use_cftime=True'.", SerializationWarning, ) elif _is_standard_calendar(calendar): dates = cftime_to_nptime(dates, time_unit=time_unit) elif use_cftime: dates = _decode_datetime_with_cftime(flat_num_dates, units, calendar) else: dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar, time_unit) return reshape(dates, num_dates.shape) def to_datetime_unboxed(value, **kwargs): result = pd.to_datetime(value, **kwargs).to_numpy() assert np.issubdtype(result.dtype, "datetime64") return result def _numbers_to_timedelta( flat_num: np.ndarray, time_unit: NPDatetimeUnitOptions, ref_unit: PDDatetimeUnitOptions, datatype: str, target_unit: PDDatetimeUnitOptions | None = None, ) -> np.ndarray: """Transform numbers to np.timedelta64.""" # keep NaT/nan mask if flat_num.dtype.kind == "f": nan = np.asarray(np.isnan(flat_num)) elif flat_num.dtype.kind == "i": nan = np.asarray(flat_num == np.iinfo(np.int64).min) # in case we need to change the unit, we fix the numbers here # this should be safe, as errors would have been raised above ns_time_unit = _NS_PER_TIME_DELTA[time_unit] ns_ref_date_unit = _NS_PER_TIME_DELTA[ref_unit] if ns_time_unit > ns_ref_date_unit: flat_num = np.asarray(flat_num * np.int64(ns_time_unit / ns_ref_date_unit)) time_unit = ref_unit # estimate fitting resolution for floating point values # this iterates until all floats are fractionless or time_unit == "ns" if flat_num.dtype.kind == "f" and time_unit != "ns": flat_num, new_time_unit = _check_higher_resolution( flat_num, cast(PDDatetimeUnitOptions, time_unit) ) if time_unit != new_time_unit: if target_unit is None or np.timedelta64(1, target_unit) > np.timedelta64( 1, new_time_unit ): if datatype == "datetimes": kwarg = "decode_times" coder = "CFDatetimeCoder" else: kwarg = "decode_timedelta" coder = "CFTimedeltaCoder" formatted_kwarg = f"{kwarg}={coder}(time_unit={new_time_unit!r})" message = ( f"Can't decode floating point {datatype} to {time_unit!r} " f"without precision loss; decoding to {new_time_unit!r} " f"instead. To silence this warning pass {formatted_kwarg} " f"to your opening function." ) emit_user_level_warning(message, SerializationWarning) time_unit = new_time_unit # Cast input ordinals to integers and properly handle NaN/NaT # to prevent casting NaN to int with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) flat_num = flat_num.astype(np.int64) if nan.any(): flat_num[nan] = np.iinfo(np.int64).min # cast to wanted type return flat_num.astype(f"timedelta64[{time_unit}]") def decode_cf_timedelta( num_timedeltas, units: str, time_unit: PDDatetimeUnitOptions = "ns" ) -> np.ndarray: """Given an array of numeric timedeltas in netCDF format, convert it into a numpy timedelta64 ["s", "ms", "us", "ns"] array. """ num_timedeltas = to_numpy(num_timedeltas) unit = _netcdf_to_numpy_timeunit(units) # special case empty arrays is_empty_array = num_timedeltas.size == 0 with warnings.catch_warnings(): warnings.filterwarnings("ignore", "All-NaN slice encountered", RuntimeWarning) if not is_empty_array: _check_timedelta_range(np.nanmin(num_timedeltas), unit, time_unit) _check_timedelta_range(np.nanmax(num_timedeltas), unit, time_unit) timedeltas = _numbers_to_timedelta( num_timedeltas, unit, "s", "timedeltas", target_unit=time_unit ) pd_timedeltas = pd.to_timedelta(ravel(timedeltas)) if not is_empty_array and np.isnat(timedeltas).all(): empirical_unit = time_unit else: empirical_unit = pd_timedeltas.unit if is_empty_array or np.timedelta64(1, time_unit) > np.timedelta64( 1, empirical_unit ): time_unit = empirical_unit if time_unit not in {"s", "ms", "us", "ns"}: raise ValueError( f"time_unit must be one of 's', 'ms', 'us', or 'ns'. Got: {time_unit}" ) result = pd_timedeltas.as_unit(time_unit).to_numpy() return reshape(result, num_timedeltas.shape) def _unit_timedelta_cftime(units: str) -> timedelta: return timedelta(microseconds=_US_PER_TIME_DELTA[units]) def _unit_timedelta_numpy(units: str) -> np.timedelta64: numpy_units = _netcdf_to_numpy_timeunit(units) return np.timedelta64(1, numpy_units) def _infer_time_units_from_diff(unique_timedeltas) -> str: # todo: check, if this function works correctly wrt np.timedelta64 unit_timedelta: Callable[[str], timedelta] | Callable[[str], np.timedelta64] zero_timedelta: timedelta | np.timedelta64 unique_timedeltas = asarray(unique_timedeltas) if unique_timedeltas.dtype == np.dtype("O"): time_units = _NETCDF_TIME_UNITS_CFTIME unit_timedelta = _unit_timedelta_cftime zero_timedelta = timedelta(microseconds=0) else: time_units = _NETCDF_TIME_UNITS_NUMPY unit_timedelta = _unit_timedelta_numpy zero_timedelta = np.timedelta64(0, "ns") for time_unit in time_units: if array_all(unique_timedeltas % unit_timedelta(time_unit) == zero_timedelta): return time_unit return "seconds" def _time_units_to_timedelta(units: str) -> timedelta: return timedelta(microseconds=_US_PER_TIME_DELTA[units]) def infer_calendar_name(dates) -> CFCalendar: """Given an array of datetimes, infer the CF calendar name""" if is_np_datetime_like(dates.dtype): return "proleptic_gregorian" elif dates.dtype == np.dtype("O") and dates.size > 0: # Logic copied from core.common.contains_cftime_datetimes. if cftime is not None: sample = np.asarray(dates).flat[0] if is_duck_dask_array(sample): sample = sample.compute() if isinstance(sample, np.ndarray): sample = sample.item() if isinstance(sample, cftime.datetime): return sample.calendar # Error raise if dtype is neither datetime or "O", if cftime is not importable, and if element of 'O' dtype is not cftime. raise ValueError("Array does not contain datetime objects.") def infer_datetime_units(dates) -> str: """Given an array of datetimes, returns a CF compatible time-unit string of the form "{time_unit} since {date[0]}", where `time_unit` is 'days', 'hours', 'minutes' or 'seconds' (the first one that can evenly divide all unique time deltas in `dates`) """ dates = ravel(np.asarray(dates)) if np.issubdtype(np.asarray(dates).dtype, "datetime64"): dates = to_datetime_unboxed(dates) dates = dates[pd.notnull(dates)] reference_date = dates[0] if len(dates) > 0 else "1970-01-01" reference_date = pd.Timestamp(reference_date) else: reference_date = dates[0] if len(dates) > 0 else "1970-01-01" reference_date = format_cftime_datetime(reference_date) unique_timedeltas = np.unique(np.diff(dates)) units = _infer_time_units_from_diff(unique_timedeltas) return f"{units} since {reference_date}" def format_cftime_datetime(date) -> str: """Converts a cftime.datetime object to a string with the format: YYYY-MM-DD HH:MM:SS.UUUUUU """ return f"{date.year:04d}-{date.month:02d}-{date.day:02d} {date.hour:02d}:{date.minute:02d}:{date.second:02d}.{date.microsecond:06d}" def infer_timedelta_units(deltas) -> str: """Given an array of timedeltas, returns a CF compatible time-unit from {'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly divide all unique time deltas in `deltas`) """ deltas = ravel(deltas) unique_timedeltas = np.unique(deltas[pd.notnull(deltas)]) return _infer_time_units_from_diff(unique_timedeltas) def cftime_to_nptime( times, raise_on_invalid: bool = True, time_unit: PDDatetimeUnitOptions = "ns" ) -> np.ndarray: """Given an array of cftime.datetime objects, return an array of numpy.datetime64 objects of the same size If raise_on_invalid is True (default), invalid dates trigger a ValueError. Otherwise, the invalid element is replaced by np.NaT.""" times = np.asarray(times) new = [] dt: np.datetime64 for _i, t in np.ndenumerate(times): try: # We expect either "us" resolution or "s" resolution depending on # whether 'microseconds' are defined for the input or not. dt = ( pd.Timestamp(np.datetime64(t.isoformat())).as_unit(time_unit).to_numpy() ) except ValueError as e: if raise_on_invalid: raise ValueError( f"Cannot convert date {t} to a date in the " f"standard calendar. Reason: {e}." ) from e else: dt = np.datetime64("NaT") new.append(dt) return np.asarray(new).reshape(times.shape) def convert_times(times, date_type, raise_on_invalid: bool = True) -> np.ndarray: """Given an array of datetimes, return the same dates in another cftime or numpy date type. Useful to convert between calendars in numpy and cftime or between cftime calendars. If raise_on_valid is True (default), invalid dates trigger a ValueError. Otherwise, the invalid element is replaced by np.nan for cftime types and np.NaT for np.datetime64. """ if date_type in (pd.Timestamp, np.datetime64) and not is_np_datetime_like( times.dtype ): return cftime_to_nptime(times, raise_on_invalid=raise_on_invalid) if is_np_datetime_like(times.dtype): # Convert datetime64 objects to Timestamps since those have year, month, day, etc. attributes times = pd.DatetimeIndex(times) new = np.empty(times.shape, dtype="O") for i, t in enumerate(times): try: dt = date_type( t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond ) except ValueError as e: if raise_on_invalid: raise ValueError( f"Cannot convert date {t} to a date in the " f"{date_type(2000, 1, 1).calendar} calendar. Reason: {e}." ) from e else: dt = np.nan new[i] = dt return new def convert_time_or_go_back(date, date_type): """Convert a single date to a new date_type (cftime.datetime or pd.Timestamp). If the new date is invalid, it goes back a day and tries again. If it is still invalid, goes back a second day. This is meant to convert end-of-month dates into a new calendar. """ if date_type == pd.Timestamp: date_type = default_precision_timestamp try: return date_type( date.year, date.month, date.day, date.hour, date.minute, date.second, date.microsecond, ) except OutOfBoundsDatetime: raise except ValueError: # Day is invalid, happens at the end of months, try again the day before try: return date_type( date.year, date.month, date.day - 1, date.hour, date.minute, date.second, date.microsecond, ) except ValueError: # Still invalid, happens for 360_day to non-leap february. Try again 2 days before date. return date_type( date.year, date.month, date.day - 2, date.hour, date.minute, date.second, date.microsecond, ) def _should_cftime_be_used( source, target_calendar: str, use_cftime: bool | None ) -> bool: """Return whether conversion of the source to the target calendar should result in a cftime-backed array. Source is a 1D datetime array, target_cal a string (calendar name) and use_cftime is a boolean or None. If use_cftime is None, this returns True if the source's range and target calendar are convertible to np.datetime64 objects. """ # Arguments Checks for target if use_cftime is not True: if _is_standard_calendar(target_calendar): if _is_numpy_compatible_time_range(source): # Conversion is possible with pandas, force False if it was None return False elif use_cftime is False: raise ValueError( "Source time range is not valid for numpy datetimes. Try using `use_cftime=True`." ) elif use_cftime is False: raise ValueError( f"Calendar '{target_calendar}' is only valid with cftime. Try using `use_cftime=True`." ) return True def _cleanup_netcdf_time_units(units: str) -> str: time_units, ref_date = _unpack_netcdf_time_units(units) time_units = time_units.lower() if not time_units.endswith("s"): time_units = f"{time_units}s" # don't worry about reifying the units if they're out of bounds or # formatted badly with contextlib.suppress(OutOfBoundsDatetime, ValueError): units = f"{time_units} since {format_timestamp(ref_date)}" return units def _encode_datetime_with_cftime(dates, units: str, calendar: str) -> np.ndarray: """Fallback method for encoding dates using cftime. This method is more flexible than xarray's parsing using datetime64[ns] arrays but also slower because it loops over each element. """ if TYPE_CHECKING: import cftime else: cftime = attempt_import("cftime") dates = np.asarray(dates) original_shape = dates.shape if np.issubdtype(dates.dtype, np.datetime64): # numpy's broken datetime conversion only works for us precision dates = dates.astype("M8[us]").astype(datetime) dates = np.atleast_1d(dates) # Find all the None position none_position = dates == None # noqa: E711 filtered_dates = dates[~none_position] # Since netCDF files do not support storing float128 values, we ensure # that float64 values are used by setting longdouble=False in num2date. # This try except logic can be removed when xarray's minimum version of # cftime is at least 1.6.2. try: encoded_nums = cftime.date2num( filtered_dates, units, calendar, longdouble=False ) except TypeError: encoded_nums = cftime.date2num(filtered_dates, units, calendar) if filtered_dates.size == none_position.size: return encoded_nums.reshape(original_shape) # Create a full matrix of NaN # And fill the num dates in the not NaN or None position result = np.full(dates.shape, np.nan) result[np.nonzero(~none_position)] = encoded_nums return result.reshape(original_shape) def cast_to_int_if_safe(num) -> np.ndarray: int_num = np.asarray(num, dtype=np.int64) if array_all(num == int_num): num = int_num return num def _division(deltas, delta, floor): if floor: # calculate int64 floor division # to preserve integer dtype if possible (GH 4045, GH7817). num = deltas // delta.astype(np.int64) num = num.astype(np.int64, copy=False) else: num = deltas / delta return num def encode_cf_datetime( dates: T_DuckArray, # type: ignore[misc] units: str | None = None, calendar: str | None = None, dtype: np.dtype | None = None, ) -> tuple[T_DuckArray, str, str]: """Given an array of datetime objects, returns the tuple `(num, units, calendar)` suitable for a CF compliant time variable. Unlike `date2num`, this function can handle datetime64 arrays. See Also -------- cftime.date2num """ dates = asarray(dates) if is_chunked_array(dates): return _lazily_encode_cf_datetime(dates, units, calendar, dtype) else: return _eagerly_encode_cf_datetime(dates, units, calendar, dtype) def _infer_needed_units_numpy(ref_date, data_units): needed_units, data_ref_date = _unpack_time_unit_and_ref_date(data_units) needed_units = _numpy_to_netcdf_timeunit(needed_units) ref_delta = abs(data_ref_date - ref_date).to_timedelta64() data_delta = _unit_timedelta_numpy(needed_units) if (ref_delta % data_delta) > np.timedelta64(0, "ns"): needed_units = _infer_time_units_from_diff(ref_delta) return needed_units def _infer_needed_units_cftime(ref_date, data_units, calendar): needed_units, data_ref_date = _unpack_time_units_and_ref_date_cftime( data_units, calendar ) ref_delta = abs(data_ref_date - ref_date) data_delta = _time_units_to_timedelta(needed_units) if (ref_delta % data_delta) > timedelta(seconds=0): needed_units = _infer_time_units_from_diff(ref_delta) return needed_units def _eagerly_encode_cf_datetime( dates: T_DuckArray, # type: ignore[misc] units: str | None = None, calendar: str | None = None, dtype: np.dtype | None = None, allow_units_modification: bool = True, ) -> tuple[T_DuckArray, str, str]: dates = asarray(dates) data_units = infer_datetime_units(dates) if units is None: units = data_units else: units = _cleanup_netcdf_time_units(units) if calendar is None: calendar = infer_calendar_name(dates) raise_incompatible_units_error = False raise_gregorian_proleptic_gregorian_mismatch_error = False try: if not _is_standard_calendar(calendar) or dates.dtype.kind == "O": # parse with cftime instead raise OutOfBoundsDatetime assert np.issubdtype(dates.dtype, "datetime64") if calendar in ["standard", "gregorian"] and np.nanmin(dates).astype( "=M8[us]" ).astype(datetime) < datetime(1582, 10, 15): raise_gregorian_proleptic_gregorian_mismatch_error = True time_unit, ref_date = _unpack_time_unit_and_ref_date(units) # calendar equivalence only for days after the reform _check_date_is_after_shift(ref_date, calendar) time_delta = np.timedelta64(1, time_unit) # Wrap the dates in a DatetimeIndex to do the subtraction to ensure # an OverflowError is raised if the ref_date is too far away from # dates to be encoded (GH 2272). # DatetimeIndex will convert to units of ["s", "ms", "us", "ns"] dates_as_index = pd.DatetimeIndex(ravel(dates)) time_deltas = dates_as_index - ref_date # retrieve needed units to faithfully encode to int64 needed_units = _infer_needed_units_numpy(ref_date, data_units) needed_time_delta = _unit_timedelta_numpy(needed_units) floor_division = np.issubdtype(dtype, np.integer) or dtype is None if time_delta > needed_time_delta: floor_division = False if dtype is None: emit_user_level_warning( f"Times can't be serialized faithfully to int64 with requested units {units!r}. " f"Resolution of {needed_units!r} needed. Serializing times to floating point instead. " f"Set encoding['dtype'] to integer dtype to serialize to int64. " f"Set encoding['dtype'] to floating point dtype to silence this warning." ) elif np.issubdtype(dtype, np.integer) and allow_units_modification: new_units = f"{needed_units} since {format_timestamp(ref_date)}" emit_user_level_warning( f"Times can't be serialized faithfully to int64 with requested units {units!r}. " f"Serializing with units {new_units!r} instead. " f"Set encoding['dtype'] to floating point dtype to serialize with units {units!r}. " f"Set encoding['units'] to {new_units!r} to silence this warning ." ) units = new_units time_delta = needed_time_delta floor_division = True elif np.issubdtype(dtype, np.integer) and not allow_units_modification: new_units = f"{needed_units} since {format_timestamp(ref_date)}" raise_incompatible_units_error = True # get resolution of TimedeltaIndex and align time_delta # todo: check, if this works in any case num = _division( time_deltas, time_delta.astype(f"=m8[{time_deltas.unit}]"), floor_division ) num = reshape(num.values, dates.shape) except (OutOfBoundsDatetime, OverflowError, ValueError): time_units, ref_date = _unpack_time_units_and_ref_date_cftime(units, calendar) time_delta_cftime = _time_units_to_timedelta(time_units) needed_units = _infer_needed_units_cftime(ref_date, data_units, calendar) needed_time_delta_cftime = _time_units_to_timedelta(needed_units) if ( np.issubdtype(dtype, np.integer) and time_delta_cftime > needed_time_delta_cftime ): new_units = f"{needed_units} since {format_cftime_datetime(ref_date)}" if allow_units_modification: emit_user_level_warning( f"Times can't be serialized faithfully to int64 with requested units {units!r}. " f"Serializing with units {new_units!r} instead. " f"Set encoding['dtype'] to floating point dtype to serialize with units {units!r}. " f"Set encoding['units'] to {new_units!r} to silence this warning ." ) units = new_units else: raise_incompatible_units_error = True num = _encode_datetime_with_cftime(dates, units, calendar) # do it now only for cftime-based flow # we already covered for this in pandas-based flow num = cast_to_int_if_safe(num) if raise_incompatible_units_error: raise ValueError( f"Times can't be serialized faithfully to int64 with requested units {units!r}. " f"Consider setting encoding['dtype'] to a floating point dtype to serialize with " f"units {units!r}. Consider setting encoding['units'] to {new_units!r} to " f"serialize with an integer dtype." ) if raise_gregorian_proleptic_gregorian_mismatch_error: raise ValueError( f"Unable to encode np.datetime64 values with {calendar} " f"calendar, because some or all values are prior to the reform " f"date of 1582-10-15. To encode these times, set " f"encoding['calendar'] to 'proleptic_gregorian' instead, which " f"is the true calendar that np.datetime64 values use. The " f"'standard' or 'gregorian' calendar is only equivalent to the " f"'proleptic_gregorian' calendar after the reform date." ) return num, units, calendar def _encode_cf_datetime_within_map_blocks( dates: T_DuckArray, # type: ignore[misc] units: str, calendar: str, dtype: np.dtype, ) -> T_DuckArray: num, *_ = _eagerly_encode_cf_datetime( dates, units, calendar, dtype, allow_units_modification=False ) return num def _lazily_encode_cf_datetime( dates: T_ChunkedArray, units: str | None = None, calendar: str | None = None, dtype: np.dtype | None = None, ) -> tuple[T_ChunkedArray, str, str]: if calendar is None: # This will only trigger minor compute if dates is an object dtype array. calendar = infer_calendar_name(dates) if units is None and dtype is None: if dates.dtype == "O": units = "microseconds since 1970-01-01" dtype = np.dtype("int64") else: netcdf_unit = _numpy_dtype_to_netcdf_timeunit(dates.dtype) units = f"{netcdf_unit} since 1970-01-01" dtype = np.dtype("int64") if units is None or dtype is None: raise ValueError( f"When encoding chunked arrays of datetime values, both the units " f"and dtype must be prescribed or both must be unprescribed. " f"Prescribing only one or the other is not currently supported. " f"Got a units encoding of {units} and a dtype encoding of {dtype}." ) chunkmanager = get_chunked_array_type(dates) num = chunkmanager.map_blocks( _encode_cf_datetime_within_map_blocks, dates, units, calendar, dtype, dtype=dtype, ) return num, units, calendar def encode_cf_timedelta( timedeltas: T_DuckArray, # type: ignore[misc] units: str | None = None, dtype: np.dtype | None = None, ) -> tuple[T_DuckArray, str]: timedeltas = asarray(timedeltas) if is_chunked_array(timedeltas): return _lazily_encode_cf_timedelta(timedeltas, units, dtype) else: return _eagerly_encode_cf_timedelta(timedeltas, units, dtype) def _eagerly_encode_cf_timedelta( timedeltas: T_DuckArray, # type: ignore[misc] units: str | None = None, dtype: np.dtype | None = None, allow_units_modification: bool = True, ) -> tuple[T_DuckArray, str]: data_units = infer_timedelta_units(timedeltas) if units is None: units = data_units # units take precedence in the case of zero-size array if timedeltas.size == 0: data_units = units time_delta = _unit_timedelta_numpy(units) time_deltas = pd.TimedeltaIndex(ravel(timedeltas)) # get resolution of TimedeltaIndex and align time_delta deltas_unit = time_deltas.unit time_delta = time_delta.astype(f"=m8[{deltas_unit}]") # retrieve needed units to faithfully encode to int64 needed_units = data_units if data_units != units: needed_units = _infer_time_units_from_diff(np.unique(time_deltas.dropna())) # needed time delta to encode faithfully to int64 needed_time_delta = _unit_timedelta_numpy(needed_units) floor_division = np.issubdtype(dtype, np.integer) or dtype is None if time_delta > needed_time_delta: floor_division = False if dtype is None: emit_user_level_warning( f"Timedeltas can't be serialized faithfully to int64 with requested units {units!r}. " f"Resolution of {needed_units!r} needed. Serializing timeseries to floating point instead. " f"Set encoding['dtype'] to integer dtype to serialize to int64. " f"Set encoding['dtype'] to floating point dtype to silence this warning." ) elif np.issubdtype(dtype, np.integer) and allow_units_modification: emit_user_level_warning( f"Timedeltas can't be serialized faithfully with requested units {units!r}. " f"Serializing with units {needed_units!r} instead. " f"Set encoding['dtype'] to floating point dtype to serialize with units {units!r}. " f"Set encoding['units'] to {needed_units!r} to silence this warning ." ) units = needed_units time_delta = needed_time_delta time_delta = time_delta.astype(f"=m8[{deltas_unit}]") floor_division = True elif np.issubdtype(dtype, np.integer) and not allow_units_modification: raise ValueError( f"Timedeltas can't be serialized faithfully to int64 with requested units {units!r}. " f"Consider setting encoding['dtype'] to a floating point dtype to serialize with " f"units {units!r}. Consider setting encoding['units'] to {needed_units!r} to " f"serialize with an integer dtype." ) num = _division(time_deltas, time_delta, floor_division) num = reshape(num.values, timedeltas.shape) return num, units def _encode_cf_timedelta_within_map_blocks( timedeltas: T_DuckArray, # type: ignore[misc] units: str, dtype: np.dtype, ) -> T_DuckArray: num, _ = _eagerly_encode_cf_timedelta( timedeltas, units, dtype, allow_units_modification=False ) return num def _lazily_encode_cf_timedelta( timedeltas: T_ChunkedArray, units: str | None = None, dtype: np.dtype | None = None ) -> tuple[T_ChunkedArray, str]: if units is None and dtype is None: units = _numpy_dtype_to_netcdf_timeunit(timedeltas.dtype) dtype = np.dtype("int64") if units is None or dtype is None: raise ValueError( f"When encoding chunked arrays of timedelta values, both the " f"units and dtype must be prescribed or both must be " f"unprescribed. Prescribing only one or the other is not " f"currently supported. Got a units encoding of {units} and a " f"dtype encoding of {dtype}." ) chunkmanager = get_chunked_array_type(timedeltas) num = chunkmanager.map_blocks( _encode_cf_timedelta_within_map_blocks, timedeltas, units, dtype, dtype=dtype, ) return num, units class CFDatetimeCoder(VariableCoder): """Coder for CF Datetime coding. Parameters ---------- use_cftime : bool, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64`` objects. If False, always decode times to ``np.datetime64`` objects; if this is not possible raise an error. May not be supported by all the backends. time_unit : PDDatetimeUnitOptions Target resolution when decoding dates. Defaults to "ns". """ def __init__( self, use_cftime: bool | None = None, time_unit: PDDatetimeUnitOptions = "ns", ) -> None: self.use_cftime = use_cftime self.time_unit = time_unit def encode(self, variable: Variable, name: T_Name = None) -> Variable: if np.issubdtype( variable.data.dtype, np.datetime64 ) or contains_cftime_datetimes(variable): dims, data, attrs, encoding = unpack_for_encoding(variable) units = encoding.pop("units", None) calendar = encoding.pop("calendar", None) dtype = encoding.get("dtype", None) # in the case of packed data we need to encode into # float first, the correct dtype will be established # via CFScaleOffsetCoder/CFMaskCoder if "add_offset" in encoding or "scale_factor" in encoding: dtype = data.dtype if data.dtype.kind == "f" else "float64" (data, units, calendar) = encode_cf_datetime(data, units, calendar, dtype) safe_setitem(attrs, "units", units, name=name) safe_setitem(attrs, "calendar", calendar, name=name) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable def decode(self, variable: Variable, name: T_Name = None) -> Variable: units = variable.attrs.get("units", None) if isinstance(units, str) and "since" in units: dims, data, attrs, encoding = unpack_for_decoding(variable) units = pop_to(attrs, encoding, "units") calendar = pop_to(attrs, encoding, "calendar") dtype = _decode_cf_datetime_dtype( data, units, calendar, self.use_cftime, self.time_unit ) transform = partial( decode_cf_datetime, units=units, calendar=calendar, use_cftime=self.use_cftime, time_unit=self.time_unit, ) data = lazy_elemwise_func(data, transform, dtype) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable def has_timedelta64_encoding_dtype(attrs_or_encoding: dict) -> bool: dtype = attrs_or_encoding.get("dtype") return isinstance(dtype, str) and dtype.startswith("timedelta64") def resolve_time_unit_from_attrs_dtype( attrs_dtype: str, name: T_Name ) -> PDDatetimeUnitOptions: dtype = np.dtype(attrs_dtype) resolution, _ = np.datetime_data(dtype) resolution = cast(NPDatetimeUnitOptions, resolution) time_unit: PDDatetimeUnitOptions if np.timedelta64(1, resolution) > np.timedelta64(1, "s"): time_unit = "s" message = ( f"Following pandas, xarray only supports decoding to timedelta64 " f"values with a resolution of 's', 'ms', 'us', or 'ns'. Encoded " f"values for variable {name!r} have a resolution of " f"{resolution!r}. Attempting to decode to a resolution of 's'. " f"Note, depending on the encoded values, this may lead to an " f"OverflowError. Additionally, data will not be identically round " f"tripped; xarray will choose an encoding dtype of " f"'timedelta64[s]' when re-encoding." ) emit_user_level_warning(message) elif np.timedelta64(1, resolution) < np.timedelta64(1, "ns"): time_unit = "ns" message = ( f"Following pandas, xarray only supports decoding to timedelta64 " f"values with a resolution of 's', 'ms', 'us', or 'ns'. Encoded " f"values for variable {name!r} have a resolution of " f"{resolution!r}. Attempting to decode to a resolution of 'ns'. " f"Note, depending on the encoded values, this may lead to loss of " f"precision. Additionally, data will not be identically round " f"tripped; xarray will choose an encoding dtype of " f"'timedelta64[ns]' when re-encoding." ) emit_user_level_warning(message) else: time_unit = cast(PDDatetimeUnitOptions, resolution) return time_unit class CFTimedeltaCoder(VariableCoder): """Coder for CF Timedelta coding. Parameters ---------- time_unit : PDDatetimeUnitOptions Target resolution when decoding timedeltas via units. Defaults to "ns". When decoding via dtype, the resolution is specified in the dtype attribute, so this parameter is ignored. decode_via_units : bool Whether to decode timedeltas based on the presence of a timedelta-like units attribute, e.g. "seconds". Defaults to True, but in the future will default to False. decode_via_dtype : bool Whether to decode timedeltas based on the presence of a np.timedelta64 dtype attribute, e.g. "timedelta64[s]". Defaults to True. """ def __init__( self, time_unit: PDDatetimeUnitOptions | None = None, decode_via_units: bool = True, decode_via_dtype: bool = True, ) -> None: self.time_unit = time_unit self.decode_via_units = decode_via_units self.decode_via_dtype = decode_via_dtype self._emit_decode_timedelta_future_warning = False def encode(self, variable: Variable, name: T_Name = None) -> Variable: if np.issubdtype(variable.data.dtype, np.timedelta64): dims, data, attrs, encoding = unpack_for_encoding(variable) dtype = encoding.get("dtype", None) units = encoding.pop("units", None) # in the case of packed data we need to encode into # float first, the correct dtype will be established # via CFScaleOffsetCoder/CFMaskCoder if "add_offset" in encoding or "scale_factor" in encoding: dtype = data.dtype if data.dtype.kind == "f" else "float64" resolution, _ = np.datetime_data(variable.dtype) attrs_dtype = f"timedelta64[{resolution}]" safe_setitem(attrs, "dtype", attrs_dtype, name=name) data, units = encode_cf_timedelta(data, units, dtype) safe_setitem(attrs, "units", units, name=name) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable def decode(self, variable: Variable, name: T_Name = None) -> Variable: units = variable.attrs.get("units", None) has_timedelta_units = isinstance(units, str) and units in TIME_UNITS has_timedelta_dtype = has_timedelta64_encoding_dtype(variable.attrs) is_dtype_decodable = has_timedelta_units and has_timedelta_dtype is_units_decodable = has_timedelta_units if (is_dtype_decodable and self.decode_via_dtype) or ( is_units_decodable and self.decode_via_units ): dims, data, attrs, encoding = unpack_for_decoding(variable) units = pop_to(attrs, encoding, "units") if is_dtype_decodable: attrs_dtype = attrs.pop("dtype") if self.time_unit is None: time_unit = resolve_time_unit_from_attrs_dtype(attrs_dtype, name) else: time_unit = self.time_unit else: if self._emit_decode_timedelta_future_warning: var_string = f"the variable {name!r}" if name else "" emit_user_level_warning( "In a future version, xarray will not decode " f"{var_string} into a timedelta64 dtype based on the " "presence of a timedelta-like 'units' attribute by " "default. Instead it will rely on the presence of a " "timedelta64 'dtype' attribute, which is now xarray's " "default way of encoding timedelta64 values.\n" "To continue decoding into a timedelta64 dtype, either " "set `decode_timedelta=True` when opening this " "dataset, or add the attribute " "`dtype='timedelta64[ns]'` to this variable on disk.\n" "To opt-in to future behavior, set " "`decode_timedelta=False`.", FutureWarning, ) if self.time_unit is None: time_unit = "ns" else: time_unit = self.time_unit # Handle edge case that decode_via_dtype=False and # decode_via_units=True, and timedeltas were encoded with a # dtype attribute. We need to remove the dtype attribute # to prevent an error during round tripping. if has_timedelta_dtype: attrs.pop("dtype") dtype = np.dtype(f"timedelta64[{time_unit}]") transform = partial(decode_cf_timedelta, units=units, time_unit=time_unit) data = lazy_elemwise_func(data, transform, dtype=dtype) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/coding/variables.py���������������������������������������������������������0000664�0000000�0000000�00000063243�15056206164�0020447�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Coders for individual Variable objects.""" from __future__ import annotations import warnings from collections.abc import Hashable, MutableMapping from functools import partial from typing import TYPE_CHECKING, Any, Union import numpy as np import pandas as pd from xarray.coding.common import ( SerializationWarning, VariableCoder, lazy_elemwise_func, pop_to, safe_setitem, unpack_for_decoding, unpack_for_encoding, ) from xarray.coding.times import CFDatetimeCoder, CFTimedeltaCoder from xarray.core import dtypes, duck_array_ops, indexing from xarray.core.types import Self from xarray.core.variable import Variable if TYPE_CHECKING: T_VarTuple = tuple[tuple[Hashable, ...], Any, dict, dict] T_Name = Union[Hashable, None] class NativeEndiannessArray(indexing.ExplicitlyIndexedNDArrayMixin): """Decode arrays on the fly from non-native to native endianness This is useful for decoding arrays from netCDF3 files (which are all big endian) into native endianness, so they can be used with Cython functions, such as those found in bottleneck and pandas. >>> x = np.arange(5, dtype=">i2") >>> x.dtype dtype('>i2') >>> NativeEndiannessArray(x).dtype dtype('int16') >>> indexer = indexing.BasicIndexer((slice(None),)) >>> NativeEndiannessArray(x)[indexer].dtype dtype('int16') """ __slots__ = ("array",) def __init__(self, array) -> None: self.array = indexing.as_indexable(array) @property def dtype(self) -> np.dtype: return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize)) def _oindex_get(self, key): return type(self)(self.array.oindex[key]) def _vindex_get(self, key): return type(self)(self.array.vindex[key]) def __getitem__(self, key) -> Self: return type(self)(self.array[key]) def get_duck_array(self): return duck_array_ops.astype(self.array.get_duck_array(), dtype=self.dtype) def transpose(self, order): return type(self)(self.array.transpose(order)) class BoolTypeArray(indexing.ExplicitlyIndexedNDArrayMixin): """Decode arrays on the fly from integer to boolean datatype This is useful for decoding boolean arrays from integer typed netCDF variables. >>> x = np.array([1, 0, 1, 1, 0], dtype="i1") >>> x.dtype dtype('int8') >>> BoolTypeArray(x).dtype dtype('bool') >>> indexer = indexing.BasicIndexer((slice(None),)) >>> BoolTypeArray(x)[indexer].dtype dtype('bool') """ __slots__ = ("array",) def __init__(self, array) -> None: self.array = indexing.as_indexable(array) @property def dtype(self) -> np.dtype: return np.dtype("bool") def _oindex_get(self, key): return type(self)(self.array.oindex[key]) def _vindex_get(self, key): return type(self)(self.array.vindex[key]) def __getitem__(self, key) -> Self: return type(self)(self.array[key]) def get_duck_array(self): return duck_array_ops.astype(self.array.get_duck_array(), dtype=self.dtype) def transpose(self, order): return type(self)(self.array.transpose(order)) def _apply_mask( data: np.ndarray, encoded_fill_values: list, decoded_fill_value: Any, dtype: np.typing.DTypeLike, ) -> np.ndarray: """Mask all matching values in a NumPy arrays.""" data = np.asarray(data, dtype=dtype) condition = False for fv in encoded_fill_values: condition |= data == fv return np.where(condition, decoded_fill_value, data) def _is_time_like(units): # test for time-like # return "datetime" for datetime-like # return "timedelta" for timedelta-like if units is None: return False time_strings = [ "days", "hours", "minutes", "seconds", "milliseconds", "microseconds", "nanoseconds", ] units = str(units) # to prevent detecting units like `days accumulated` as time-like # special casing for datetime-units and timedelta-units (GH-8269) if "since" in units: from xarray.coding.times import _unpack_netcdf_time_units try: _unpack_netcdf_time_units(units) except ValueError: return False return "datetime" else: return "timedelta" if any(tstr == units for tstr in time_strings) else False def _check_fill_values(attrs, name, dtype): """Check _FillValue and missing_value if available. Return dictionary with raw fill values and set with encoded fill values. Issue SerializationWarning if appropriate. """ raw_fill_dict = {} for attr in ("missing_value", "_FillValue"): pop_to(attrs, raw_fill_dict, attr, name=name) encoded_fill_values = set() for k in list(raw_fill_dict): v = raw_fill_dict[k] kfill = {fv for fv in np.ravel(v) if not pd.isnull(fv)} if not kfill and np.issubdtype(dtype, np.integer): warnings.warn( f"variable {name!r} has non-conforming {k!r} " f"{v!r} defined, dropping {k!r} entirely.", SerializationWarning, stacklevel=3, ) del raw_fill_dict[k] else: encoded_fill_values |= kfill if len(encoded_fill_values) > 1: warnings.warn( f"variable {name!r} has multiple fill values " f"{encoded_fill_values} defined, decoding all values to NaN.", SerializationWarning, stacklevel=3, ) return raw_fill_dict, encoded_fill_values def _convert_unsigned_fill_value( name: T_Name, data: Any, unsigned: str, raw_fill_value: Any, encoded_fill_values: set, ) -> Any: if data.dtype.kind == "i": if unsigned == "true": unsigned_dtype = np.dtype(f"u{data.dtype.itemsize}") transform = partial(np.asarray, dtype=unsigned_dtype) if raw_fill_value is not None: new_fill = np.array(raw_fill_value, dtype=data.dtype) encoded_fill_values.remove(raw_fill_value) # use view here to prevent OverflowError encoded_fill_values.add(new_fill.view(unsigned_dtype).item()) data = lazy_elemwise_func(data, transform, unsigned_dtype) elif data.dtype.kind == "u": if unsigned == "false": signed_dtype = np.dtype(f"i{data.dtype.itemsize}") transform = partial(np.asarray, dtype=signed_dtype) data = lazy_elemwise_func(data, transform, signed_dtype) if raw_fill_value is not None: new_fill = signed_dtype.type(raw_fill_value) encoded_fill_values.remove(raw_fill_value) encoded_fill_values.add(new_fill) else: warnings.warn( f"variable {name!r} has _Unsigned attribute but is not " "of integer type. Ignoring attribute.", SerializationWarning, stacklevel=3, ) return data def _encode_unsigned_fill_value( name: T_Name, fill_value: Any, encoded_dtype: np.dtype, ) -> Any: try: if hasattr(fill_value, "item"): # if numpy type, convert to python native integer to determine overflow # otherwise numpy unsigned ints will silently cast to the signed counterpart fill_value = fill_value.item() # passes if provided fill value fits in encoded on-disk type new_fill = encoded_dtype.type(fill_value) except OverflowError: encoded_kind_str = "signed" if encoded_dtype.kind == "i" else "unsigned" warnings.warn( f"variable {name!r} will be stored as {encoded_kind_str} integers " f"but _FillValue attribute can't be represented as a " f"{encoded_kind_str} integer.", SerializationWarning, stacklevel=3, ) # user probably provided the fill as the in-memory dtype, # convert to on-disk type to match CF standard orig_kind = "u" if encoded_dtype.kind == "i" else "i" orig_dtype = np.dtype(f"{orig_kind}{encoded_dtype.itemsize}") # use view here to prevent OverflowError new_fill = np.array(fill_value, dtype=orig_dtype).view(encoded_dtype).item() return new_fill class CFMaskCoder(VariableCoder): """Mask or unmask fill values according to CF conventions.""" def __init__( self, decode_times: bool | CFDatetimeCoder = False, decode_timedelta: bool | CFTimedeltaCoder = False, ) -> None: self.decode_times = decode_times self.decode_timedelta = decode_timedelta def encode(self, variable: Variable, name: T_Name = None): dims, data, attrs, encoding = unpack_for_encoding(variable) dtype = np.dtype(encoding.get("dtype", data.dtype)) # from netCDF best practices # https://docs.unidata.ucar.edu/nug/current/best_practices.html#bp_Unsigned-Data # "_Unsigned = "true" to indicate that # integer data should be treated as unsigned" has_unsigned = encoding.get("_Unsigned") is not None fv = encoding.get("_FillValue") mv = encoding.get("missing_value") fill_value = None fv_exists = fv is not None mv_exists = mv is not None if not fv_exists and not mv_exists: return variable if fv_exists and mv_exists and not duck_array_ops.allclose_or_equiv(fv, mv): raise ValueError( f"Variable {name!r} has conflicting _FillValue ({fv}) and missing_value ({mv}). Cannot encode data." ) if fv_exists: # Ensure _FillValue is cast to same dtype as data's # but not for packed data if has_unsigned: encoding["_FillValue"] = _encode_unsigned_fill_value(name, fv, dtype) elif "add_offset" not in encoding and "scale_factor" not in encoding: encoding["_FillValue"] = dtype.type(fv) else: encoding["_FillValue"] = fv fill_value = pop_to(encoding, attrs, "_FillValue", name=name) if mv_exists: # try to use _FillValue, if it exists to align both values # or use missing_value and ensure it's cast to same dtype as data's # but not for packed data encoding["missing_value"] = attrs.get( "_FillValue", ( _encode_unsigned_fill_value(name, mv, dtype) if has_unsigned else ( dtype.type(mv) if "add_offset" not in encoding and "scale_factor" not in encoding else mv ) ), ) fill_value = pop_to(encoding, attrs, "missing_value", name=name) # apply fillna if fill_value is not None and not pd.isnull(fill_value): # special case DateTime to properly handle NaT if _is_time_like(attrs.get("units")): if data.dtype.kind in "iu": data = duck_array_ops.where( data != np.iinfo(np.int64).min, data, fill_value ) else: # if we have float data (data was packed prior masking) # we just fillna data = duck_array_ops.fillna(data, fill_value) # but if the fill_value is of integer type # we need to round and cast if np.array(fill_value).dtype.kind in "iu": data = duck_array_ops.astype( duck_array_ops.around(data), type(fill_value) ) else: data = duck_array_ops.fillna(data, fill_value) if fill_value is not None and has_unsigned: pop_to(encoding, attrs, "_Unsigned") # XXX: Is this actually needed? Doesn't the backend handle this? # two-stage casting to prevent undefined cast from float to unsigned int # first float -> int with corresponding itemsize # second int -> int/uint to final itemsize signed_dtype = np.dtype(f"i{data.itemsize}") data = duck_array_ops.astype( duck_array_ops.astype( duck_array_ops.around(data), signed_dtype, copy=False ), dtype, copy=False, ) attrs["_FillValue"] = fill_value return Variable(dims, data, attrs, encoding, fastpath=True) def decode(self, variable: Variable, name: T_Name = None): raw_fill_dict, encoded_fill_values = _check_fill_values( variable.attrs, name, variable.dtype ) if "_Unsigned" not in variable.attrs and not raw_fill_dict: return variable dims, data, attrs, encoding = unpack_for_decoding(variable) # Even if _Unsigned is used, retain on-disk _FillValue for attr, value in raw_fill_dict.items(): safe_setitem(encoding, attr, value, name=name) if "_Unsigned" in attrs: unsigned = pop_to(attrs, encoding, "_Unsigned") data = _convert_unsigned_fill_value( name, data, unsigned, raw_fill_dict.get("_FillValue"), encoded_fill_values, ) if encoded_fill_values: dtype: np.typing.DTypeLike decoded_fill_value: Any # in case of packed data we have to decode into float # in any case if "scale_factor" in attrs or "add_offset" in attrs: dtype, decoded_fill_value = ( _choose_float_dtype(data.dtype, attrs), np.nan, ) else: # in case of no-packing special case DateTime/Timedelta to properly # handle NaT, we need to check if time-like will be decoded # or not in further processing is_time_like = _is_time_like(attrs.get("units")) if ( (is_time_like == "datetime" and self.decode_times) or (is_time_like == "timedelta" and self.decode_timedelta) ) and data.dtype.kind in "iu": dtype = np.int64 decoded_fill_value = np.iinfo(np.int64).min else: dtype, decoded_fill_value = dtypes.maybe_promote(data.dtype) transform = partial( _apply_mask, encoded_fill_values=encoded_fill_values, decoded_fill_value=decoded_fill_value, dtype=dtype, ) data = lazy_elemwise_func(data, transform, dtype) return Variable(dims, data, attrs, encoding, fastpath=True) def _scale_offset_decoding(data, scale_factor, add_offset, dtype: np.typing.DTypeLike): data = data.astype(dtype=dtype, copy=True) if scale_factor is not None: data *= scale_factor if add_offset is not None: data += add_offset return data def _choose_float_dtype( dtype: np.dtype, mapping: MutableMapping ) -> type[np.floating[Any]]: """Return a float dtype that can losslessly represent `dtype` values.""" # check scale/offset first to derive wanted float dtype # see https://github.com/pydata/xarray/issues/5597#issuecomment-879561954 scale_factor = mapping.get("scale_factor") add_offset = mapping.get("add_offset") if scale_factor is not None or add_offset is not None: # get the type from scale_factor/add_offset to determine # the needed floating point type if scale_factor is not None: scale_type = np.dtype(type(scale_factor)) if add_offset is not None: offset_type = np.dtype(type(add_offset)) # CF conforming, both scale_factor and add-offset are given and # of same floating point type (float32/64) if ( add_offset is not None and scale_factor is not None and offset_type == scale_type and scale_type in [np.float32, np.float64] ): # in case of int32 -> we need upcast to float64 # due to precision issues if dtype.itemsize == 4 and np.issubdtype(dtype, np.integer): return np.float64 return scale_type.type # Not CF conforming and add_offset given: # A scale factor is entirely safe (vanishing into the mantissa), # but a large integer offset could lead to loss of precision. # Sensitivity analysis can be tricky, so we just use a float64 # if there's any offset at all - better unoptimised than wrong! if add_offset is not None: return np.float64 # return dtype depending on given scale_factor return scale_type.type # If no scale_factor or add_offset is given, use some general rules. # Keep float32 as-is. Upcast half-precision to single-precision, # because float16 is "intended for storage but not computation" if dtype.itemsize <= 4 and np.issubdtype(dtype, np.floating): return np.float32 # float32 can exactly represent all integers up to 24 bits if dtype.itemsize <= 2 and np.issubdtype(dtype, np.integer): return np.float32 # For all other types and circumstances, we just use float64. # Todo: with nc-complex from netcdf4-python >= 1.7.0 this is available # (safe because eg. complex numbers are not supported in NetCDF) return np.float64 class CFScaleOffsetCoder(VariableCoder): """Scale and offset variables according to CF conventions. Follows the formula: decode_values = encoded_values * scale_factor + add_offset """ def __init__( self, decode_times: bool | CFDatetimeCoder = False, decode_timedelta: bool | CFTimedeltaCoder = False, ) -> None: self.decode_times = decode_times self.decode_timedelta = decode_timedelta def encode(self, variable: Variable, name: T_Name = None) -> Variable: dims, data, attrs, encoding = unpack_for_encoding(variable) if "scale_factor" in encoding or "add_offset" in encoding: # if we have a _FillValue/masked_value we do not want to cast now # but leave that to CFMaskCoder dtype = data.dtype if "_FillValue" not in encoding and "missing_value" not in encoding: dtype = _choose_float_dtype(data.dtype, encoding) # but still we need a copy prevent changing original data data = duck_array_ops.astype(data, dtype=dtype, copy=True) if "add_offset" in encoding: data -= pop_to(encoding, attrs, "add_offset", name=name) if "scale_factor" in encoding: data /= pop_to(encoding, attrs, "scale_factor", name=name) return Variable(dims, data, attrs, encoding, fastpath=True) def decode(self, variable: Variable, name: T_Name = None) -> Variable: _attrs = variable.attrs if "scale_factor" in _attrs or "add_offset" in _attrs: dims, data, attrs, encoding = unpack_for_decoding(variable) scale_factor = pop_to(attrs, encoding, "scale_factor", name=name) add_offset = pop_to(attrs, encoding, "add_offset", name=name) if duck_array_ops.ndim(scale_factor) > 0: scale_factor = np.asarray(scale_factor).item() if duck_array_ops.ndim(add_offset) > 0: add_offset = np.asarray(add_offset).item() # if we have a _FillValue/masked_value in encoding we already have the wanted # floating point dtype here (via CFMaskCoder), so no check is necessary # only check in other cases and for time-like dtype = data.dtype is_time_like = _is_time_like(attrs.get("units")) if ( ("_FillValue" not in encoding and "missing_value" not in encoding) or (is_time_like == "datetime" and self.decode_times) or (is_time_like == "timedelta" and self.decode_timedelta) ): dtype = _choose_float_dtype(dtype, encoding) transform = partial( _scale_offset_decoding, scale_factor=scale_factor, add_offset=add_offset, dtype=dtype, ) data = lazy_elemwise_func(data, transform, dtype) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable class DefaultFillvalueCoder(VariableCoder): """Encode default _FillValue if needed.""" def encode(self, variable: Variable, name: T_Name = None) -> Variable: dims, data, attrs, encoding = unpack_for_encoding(variable) # make NaN the fill value for float types if ( "_FillValue" not in attrs and "_FillValue" not in encoding and np.issubdtype(variable.dtype, np.floating) ): attrs["_FillValue"] = variable.dtype.type(np.nan) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable def decode(self, variable: Variable, name: T_Name = None) -> Variable: raise NotImplementedError() class BooleanCoder(VariableCoder): """Code boolean values.""" def encode(self, variable: Variable, name: T_Name = None) -> Variable: if ( (variable.dtype == bool) and ("dtype" not in variable.encoding) and ("dtype" not in variable.attrs) ): dims, data, attrs, encoding = unpack_for_encoding(variable) attrs["dtype"] = "bool" data = duck_array_ops.astype(data, dtype="i1", copy=True) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable def decode(self, variable: Variable, name: T_Name = None) -> Variable: if variable.attrs.get("dtype", False) == "bool": dims, data, attrs, encoding = unpack_for_decoding(variable) # overwrite (!) dtype in encoding, and remove from attrs # needed for correct subsequent encoding encoding["dtype"] = attrs.pop("dtype") data = BoolTypeArray(data) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable class EndianCoder(VariableCoder): """Decode Endianness to native.""" def encode(self): raise NotImplementedError() def decode(self, variable: Variable, name: T_Name = None) -> Variable: dims, data, attrs, encoding = unpack_for_decoding(variable) if not data.dtype.isnative: data = NativeEndiannessArray(data) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable class NonStringCoder(VariableCoder): """Encode NonString variables if dtypes differ.""" def encode(self, variable: Variable, name: T_Name = None) -> Variable: if "dtype" in variable.encoding and variable.encoding["dtype"] not in ( "S1", str, ): dims, data, attrs, encoding = unpack_for_encoding(variable) dtype = np.dtype(encoding.pop("dtype")) if dtype != variable.dtype: if np.issubdtype(dtype, np.integer): if ( np.issubdtype(variable.dtype, np.floating) and "_FillValue" not in variable.attrs and "missing_value" not in variable.attrs ): warnings.warn( f"saving variable {name} with floating " "point data as an integer dtype without " "any _FillValue to use for NaNs", SerializationWarning, stacklevel=10, ) data = duck_array_ops.round(data) data = duck_array_ops.astype(data, dtype=dtype) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable def decode(self): raise NotImplementedError() class ObjectVLenStringCoder(VariableCoder): def encode(self): raise NotImplementedError def decode(self, variable: Variable, name: T_Name = None) -> Variable: if variable.dtype.kind == "O" and variable.encoding.get("dtype", False) is str: variable = variable.astype(variable.encoding["dtype"]) return variable else: return variable class Numpy2StringDTypeCoder(VariableCoder): # Convert Numpy 2 StringDType arrays to object arrays for backwards compatibility # TODO: remove this if / when we decide to allow StringDType arrays in Xarray def encode(self): raise NotImplementedError def decode(self, variable: Variable, name: T_Name = None) -> Variable: if variable.dtype.kind == "T": return variable.astype(object) else: return variable class NativeEnumCoder(VariableCoder): """Encode Enum into variable dtype metadata.""" def encode(self, variable: Variable, name: T_Name = None) -> Variable: if ( "dtype" in variable.encoding and np.dtype(variable.encoding["dtype"]).metadata and "enum" in variable.encoding["dtype"].metadata ): dims, data, attrs, encoding = unpack_for_encoding(variable) data = data.astype(dtype=variable.encoding.pop("dtype")) return Variable(dims, data, attrs, encoding, fastpath=True) else: return variable def decode(self, variable: Variable, name: T_Name = None) -> Variable: raise NotImplementedError() �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/compat/���������������������������������������������������������������������0000775�0000000�0000000�00000000000�15056206164�0016135�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/compat/__init__.py����������������������������������������������������������0000664�0000000�0000000�00000000000�15056206164�0020234�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/compat/array_api_compat.py��������������������������������������������������0000664�0000000�0000000�00000004767�15056206164�0022037�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������import numpy as np from xarray.namedarray.pycompat import array_type def is_weak_scalar_type(t): return isinstance(t, bool | int | float | complex | str | bytes) def _future_array_api_result_type(*arrays_and_dtypes, xp): # fallback implementation for `xp.result_type` with python scalars. Can be removed once a # version of the Array API that includes https://github.com/data-apis/array-api/issues/805 # can be required strongly_dtyped = [t for t in arrays_and_dtypes if not is_weak_scalar_type(t)] weakly_dtyped = [t for t in arrays_and_dtypes if is_weak_scalar_type(t)] if not strongly_dtyped: strongly_dtyped = [ xp.asarray(x) if not isinstance(x, type) else x for x in weakly_dtyped ] weakly_dtyped = [] dtype = xp.result_type(*strongly_dtyped) if not weakly_dtyped: return dtype possible_dtypes = { complex: "complex64", float: "float32", int: "int8", bool: "bool", str: "str", bytes: "bytes", } dtypes = [possible_dtypes.get(type(x), "object") for x in weakly_dtyped] return xp.result_type(dtype, *dtypes) def result_type(*arrays_and_dtypes, xp) -> np.dtype: if xp is np or any( isinstance(getattr(t, "dtype", t), np.dtype) for t in arrays_and_dtypes ): return xp.result_type(*arrays_and_dtypes) else: return _future_array_api_result_type(*arrays_and_dtypes, xp=xp) def get_array_namespace(*values): def _get_single_namespace(x): if hasattr(x, "__array_namespace__"): return x.__array_namespace__() elif isinstance(x, array_type("cupy")): # cupy is fully compliant from xarray's perspective, but will not expose # __array_namespace__ until at least v14. Special case it for now import cupy as cp return cp else: return np namespaces = {_get_single_namespace(t) for t in values} non_numpy = namespaces - {np} if len(non_numpy) > 1: names = [module.__name__ for module in non_numpy] raise TypeError(f"Mixed array types {names} are not supported.") elif non_numpy: [xp] = non_numpy else: xp = np return xp def to_like_array(array, like): # Mostly for cupy compatibility, because cupy binary ops require all cupy arrays xp = get_array_namespace(like) if xp is not np: return xp.asarray(array) # avoid casting things like pint quantities to numpy arrays return array ���������xarray-2025.09.0/xarray/compat/dask_array_compat.py�������������������������������������������������0000664�0000000�0000000�00000002032�15056206164�0022167�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from typing import Any from xarray.namedarray.utils import module_available def reshape_blockwise( x: Any, shape: int | tuple[int, ...], chunks: tuple[tuple[int, ...], ...] | None = None, ): if module_available("dask", "2024.08.2"): from dask.array import reshape_blockwise return reshape_blockwise(x, shape=shape, chunks=chunks) else: return x.reshape(shape) def sliding_window_view( x, window_shape, axis=None, *, automatic_rechunk=True, **kwargs ): # Backcompat for handling `automatic_rechunk`, delete when dask>=2024.11.0 # Note that subok, writeable are unsupported by dask, so we ignore those in kwargs from dask.array.lib.stride_tricks import sliding_window_view if module_available("dask", "2024.11.0"): return sliding_window_view( x, window_shape=window_shape, axis=axis, automatic_rechunk=automatic_rechunk ) else: # automatic_rechunk is not supported return sliding_window_view(x, window_shape=window_shape, axis=axis) ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/compat/dask_array_ops.py����������������������������������������������������0000664�0000000�0000000�00000011022�15056206164�0021504�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import math from xarray.compat.dask_array_compat import reshape_blockwise from xarray.core import dtypes, nputils def dask_rolling_wrapper(moving_func, a, window, min_count=None, axis=-1): """Wrapper to apply bottleneck moving window funcs on dask arrays""" dtype, _ = dtypes.maybe_promote(a.dtype) return a.data.map_overlap( moving_func, depth={axis: (window - 1, 0)}, axis=axis, dtype=dtype, window=window, min_count=min_count, ) def least_squares(lhs, rhs, rcond=None, skipna=False): import dask.array as da # The trick here is that the core dimension is axis 0. # All other dimensions need to be reshaped down to one axis for `lstsq` # (which only accepts 2D input) # and this needs to be undone after running `lstsq` # The order of values in the reshaped axes is irrelevant. # There are big gains to be had by simply reshaping the blocks on a blockwise # basis, and then undoing that transform. # We use a specific `reshape_blockwise` method in dask for this optimization if rhs.ndim > 2: out_shape = rhs.shape reshape_chunks = rhs.chunks rhs = reshape_blockwise(rhs, (rhs.shape[0], math.prod(rhs.shape[1:]))) else: out_shape = None lhs_da = da.from_array(lhs, chunks=(rhs.chunks[0], lhs.shape[1])) if skipna: added_dim = rhs.ndim == 1 if added_dim: rhs = rhs.reshape(rhs.shape[0], 1) results = da.apply_along_axis( nputils._nanpolyfit_1d, 0, rhs, lhs_da, dtype=float, shape=(lhs.shape[1] + 1,), rcond=rcond, ) coeffs = results[:-1, ...] residuals = results[-1, ...] if added_dim: coeffs = coeffs.reshape(coeffs.shape[0]) residuals = residuals.reshape(residuals.shape[0]) else: # Residuals here are (1, 1) but should be (K,) as rhs is (N, K) # See issue dask/dask#6516 coeffs, residuals, _, _ = da.linalg.lstsq(lhs_da, rhs) if out_shape is not None: coeffs = reshape_blockwise( coeffs, shape=(coeffs.shape[0], *out_shape[1:]), chunks=((coeffs.shape[0],), *reshape_chunks[1:]), ) residuals = reshape_blockwise( residuals, shape=out_shape[1:], chunks=reshape_chunks[1:] ) return coeffs, residuals def _fill_with_last_one(a, b): import numpy as np # cumreduction apply the push func over all the blocks first so, # the only missing part is filling the missing values using the # last data of the previous chunk return np.where(np.isnan(b), a, b) def _dtype_push(a, axis, dtype=None): from xarray.core.duck_array_ops import _push # Not sure why the blelloch algorithm force to receive a dtype return _push(a, axis=axis) def push(array, n, axis, method="blelloch"): """ Dask-aware bottleneck.push """ import dask.array as da import numpy as np from xarray.core.duck_array_ops import _push from xarray.core.nputils import nanlast if n is not None and all(n <= size for size in array.chunks[axis]): return array.map_overlap(_push, depth={axis: (n, 0)}, n=n, axis=axis) # TODO: Replace all this function # once https://github.com/pydata/xarray/issues/9229 being implemented pushed_array = da.reductions.cumreduction( func=_dtype_push, binop=_fill_with_last_one, ident=np.nan, x=array, axis=axis, dtype=array.dtype, method=method, preop=nanlast, ) if n is not None and 0 < n < array.shape[axis] - 1: # The idea is to calculate a cumulative sum of a bitmask # created from the isnan method, but every time a False is found the sum # must be restarted, and the final result indicates the amount of contiguous # nan values found in the original array on every position nan_bitmask = da.isnan(array, dtype=int) cumsum_nan = nan_bitmask.cumsum(axis=axis, method=method) valid_positions = da.where(nan_bitmask == 0, cumsum_nan, np.nan) valid_positions = push(valid_positions, None, axis, method=method) # All the NaNs at the beginning are converted to 0 valid_positions = da.nan_to_num(valid_positions) valid_positions = cumsum_nan - valid_positions valid_positions = valid_positions <= n pushed_array = da.where(valid_positions, pushed_array, np.nan) return pushed_array ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/compat/npcompat.py����������������������������������������������������������0000664�0000000�0000000�00000006266�15056206164�0020342�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# Copyright (c) 2005-2011, NumPy Developers. # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the NumPy Developers nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations from typing import Any try: # requires numpy>=2.0 from numpy import isdtype # type: ignore[attr-defined,unused-ignore] HAS_STRING_DTYPE = True except ImportError: import numpy as np from numpy.typing import DTypeLike kind_mapping = { "bool": np.bool_, "signed integer": np.signedinteger, "unsigned integer": np.unsignedinteger, "integral": np.integer, "real floating": np.floating, "complex floating": np.complexfloating, "numeric": np.number, } def isdtype( dtype: np.dtype[Any] | type[Any], kind: DTypeLike | tuple[DTypeLike, ...] ) -> bool: kinds = kind if isinstance(kind, tuple) else (kind,) str_kinds = {k for k in kinds if isinstance(k, str)} type_kinds = {k.type for k in kinds if isinstance(k, np.dtype)} if unknown_kind_types := set(kinds) - str_kinds - type_kinds: raise TypeError( f"kind must be str, np.dtype or a tuple of these, got {unknown_kind_types}" ) if unknown_kinds := {k for k in str_kinds if k not in kind_mapping}: raise ValueError( f"unknown kind: {unknown_kinds}, must be a np.dtype or one of {list(kind_mapping)}" ) # verified the dtypes already, no need to check again translated_kinds = {kind_mapping[k] for k in str_kinds} | type_kinds if isinstance(dtype, np.generic): return isinstance(dtype, translated_kinds) else: return any(np.issubdtype(dtype, k) for k in translated_kinds) HAS_STRING_DTYPE = False ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/compat/pdcompat.py����������������������������������������������������������0000664�0000000�0000000�00000006622�15056206164�0020324�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# For reference, here is a copy of the pandas copyright notice: # BSD 3-Clause License # Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team # All rights reserved. # Copyright (c) 2011-2025, Open source contributors. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations from enum import Enum from typing import Literal import pandas as pd from xarray.core.types import PDDatetimeUnitOptions def count_not_none(*args) -> int: """Compute the number of non-None arguments. Copied from pandas.core.common.count_not_none (not part of the public API) """ return sum(arg is not None for arg in args) class _NoDefault(Enum): """Used by pandas to specify a default value for a deprecated argument. Copied from pandas._libs.lib._NoDefault. See also: - pandas-dev/pandas#30788 - pandas-dev/pandas#40684 - pandas-dev/pandas#40715 - pandas-dev/pandas#47045 """ no_default = "NO_DEFAULT" def __repr__(self) -> str: return "" no_default = ( _NoDefault.no_default ) # Sentinel indicating the default value following pandas NoDefault = Literal[_NoDefault.no_default] # For typing following pandas def timestamp_as_unit(date: pd.Timestamp, unit: PDDatetimeUnitOptions) -> pd.Timestamp: """Convert the underlying int64 representation to the given unit. Compatibility function for pandas issue where "as_unit" is not defined for pandas.Timestamp in pandas versions < 2.2. Can be removed minimum pandas version is >= 2.2. """ if hasattr(date, "as_unit"): date = date.as_unit(unit) elif hasattr(date, "_as_unit"): date = date._as_unit(unit) return date def default_precision_timestamp(*args, **kwargs) -> pd.Timestamp: """Return a Timestamp object with the default precision. Xarray default is "ns". """ dt = pd.Timestamp(*args, **kwargs) if dt.unit != "ns": dt = timestamp_as_unit(dt, "ns") return dt ��������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/compat/toolzcompat.py�������������������������������������������������������0000664�0000000�0000000�00000004416�15056206164�0021067�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# This file contains functions copied from the toolz library in accordance # with its license. The original copyright notice is duplicated below. # Copyright (c) 2013 Matthew Rocklin # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # a. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # b. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # c. Neither the name of toolz nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH # DAMAGE. def sliding_window(n, seq): """A sequence of overlapping subsequences >>> list(sliding_window(2, [1, 2, 3, 4])) [(1, 2), (2, 3), (3, 4)] This function creates a sliding window suitable for transformations like sliding means / smoothing >>> mean = lambda seq: float(sum(seq)) / len(seq) >>> list(map(mean, sliding_window(2, [1, 2, 3, 4]))) [1.5, 2.5, 3.5] """ import collections import itertools return zip( *( collections.deque(itertools.islice(it, i), 0) or it for i, it in enumerate(itertools.tee(seq, n)) ), strict=False, ) ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/computation/����������������������������������������������������������������0000775�0000000�0000000�00000000000�15056206164�0017214�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/computation/__init__.py�����������������������������������������������������0000664�0000000�0000000�00000000000�15056206164�0021313�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/computation/apply_ufunc.py��������������������������������������������������0000664�0000000�0000000�00000134621�15056206164�0022122�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������""" Functions for applying functions that act on arrays to xarray's labeled data. """ from __future__ import annotations import functools import itertools import operator import warnings from collections import Counter from collections.abc import ( Callable, Hashable, Iterable, Iterator, Mapping, Sequence, ) from collections.abc import ( Set as AbstractSet, ) from typing import TYPE_CHECKING, Any, Literal import numpy as np from xarray.core import duck_array_ops, utils from xarray.core.formatting import limit_lines from xarray.core.indexes import Index, filter_indexes_from_coords from xarray.core.options import _get_keep_attrs from xarray.core.utils import is_dict_like, result_name from xarray.core.variable import Variable from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import is_chunked_array from xarray.structure.alignment import deep_align from xarray.structure.merge import merge_attrs, merge_coordinates_without_align if TYPE_CHECKING: from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import CombineAttrsOptions, JoinOptions MissingCoreDimOptions = Literal["raise", "copy", "drop"] _NO_FILL_VALUE = utils.ReprObject("") _JOINS_WITHOUT_FILL_VALUES = frozenset({"inner", "exact"}) def _first_of_type(args, kind): """Return either first object of type 'kind' or raise if not found.""" for arg in args: if isinstance(arg, kind): return arg raise ValueError("This should be unreachable.") def _all_of_type(args, kind): """Return all objects of type 'kind'""" return [arg for arg in args if isinstance(arg, kind)] class _UFuncSignature: """Core dimensions signature for a given function. Based on the signature provided by generalized ufuncs in NumPy. Attributes ---------- input_core_dims : tuple[tuple, ...] Core dimension names on each input variable. output_core_dims : tuple[tuple, ...] Core dimension names on each output variable. """ __slots__ = ( "_all_core_dims", "_all_input_core_dims", "_all_output_core_dims", "input_core_dims", "output_core_dims", ) def __init__(self, input_core_dims, output_core_dims=((),)): self.input_core_dims = tuple(tuple(a) for a in input_core_dims) self.output_core_dims = tuple(tuple(a) for a in output_core_dims) self._all_input_core_dims = None self._all_output_core_dims = None self._all_core_dims = None @property def all_input_core_dims(self): if self._all_input_core_dims is None: self._all_input_core_dims = frozenset( dim for dims in self.input_core_dims for dim in dims ) return self._all_input_core_dims @property def all_output_core_dims(self): if self._all_output_core_dims is None: self._all_output_core_dims = frozenset( dim for dims in self.output_core_dims for dim in dims ) return self._all_output_core_dims @property def all_core_dims(self): if self._all_core_dims is None: self._all_core_dims = self.all_input_core_dims | self.all_output_core_dims return self._all_core_dims @property def dims_map(self): return { core_dim: f"dim{n}" for n, core_dim in enumerate(sorted(self.all_core_dims)) } @property def num_inputs(self): return len(self.input_core_dims) @property def num_outputs(self): return len(self.output_core_dims) def __eq__(self, other): try: return ( self.input_core_dims == other.input_core_dims and self.output_core_dims == other.output_core_dims ) except AttributeError: return False def __ne__(self, other): return not self == other def __repr__(self): return f"{type(self).__name__}({list(self.input_core_dims)!r}, {list(self.output_core_dims)!r})" def __str__(self): comma_separated = ",".join lhs = comma_separated( f"({comma_separated(dims)})" for dims in self.input_core_dims ) rhs = comma_separated( f"({comma_separated(dims)})" for dims in self.output_core_dims ) return f"{lhs}->{rhs}" def to_gufunc_string(self, exclude_dims=frozenset()): """Create an equivalent signature string for a NumPy gufunc. Unlike __str__, handles dimensions that don't map to Python identifiers. Also creates unique names for input_core_dims contained in exclude_dims. """ input_core_dims = [ [self.dims_map[dim] for dim in core_dims] for core_dims in self.input_core_dims ] output_core_dims = [ [self.dims_map[dim] for dim in core_dims] for core_dims in self.output_core_dims ] # enumerate input_core_dims contained in exclude_dims to make them unique if exclude_dims: exclude_dims = [self.dims_map[dim] for dim in exclude_dims] counter: Counter = Counter() def _enumerate(dim): if dim in exclude_dims: n = counter[dim] counter.update([dim]) dim = f"{dim}_{n}" return dim input_core_dims = [ [_enumerate(dim) for dim in arg] for arg in input_core_dims ] alt_signature = type(self)(input_core_dims, output_core_dims) return str(alt_signature) def _get_coords_list(args: Iterable[Any]) -> list[Coordinates]: coords_list = [] for arg in args: try: coords = arg.coords except AttributeError: pass # skip this argument else: coords_list.append(coords) return coords_list def build_output_coords_and_indexes( args: Iterable[Any], signature: _UFuncSignature, exclude_dims: AbstractSet = frozenset(), combine_attrs: CombineAttrsOptions = "override", ) -> tuple[list[dict[Any, Variable]], list[dict[Any, Index]]]: """Build output coordinates and indexes for an operation. Parameters ---------- args : Iterable List of raw operation arguments. Any valid types for xarray operations are OK, e.g., scalars, Variable, DataArray, Dataset. signature : _UfuncSignature Core dimensions signature for the operation. exclude_dims : set, optional Dimensions excluded from the operation. Coordinates along these dimensions are dropped. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "drop" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. Returns ------- Dictionaries of Variable and Index objects with merged coordinates. """ coords_list = _get_coords_list(args) if len(coords_list) == 1 and not exclude_dims: # we can skip the expensive merge (unpacked_coords,) = coords_list merged_vars = dict(unpacked_coords.variables) merged_indexes = dict(unpacked_coords.xindexes) else: merged_vars, merged_indexes = merge_coordinates_without_align( coords_list, exclude_dims=exclude_dims, combine_attrs=combine_attrs ) output_coords = [] output_indexes = [] for output_dims in signature.output_core_dims: dropped_dims = signature.all_input_core_dims - set(output_dims) if dropped_dims: filtered_coords = { k: v for k, v in merged_vars.items() if dropped_dims.isdisjoint(v.dims) } filtered_indexes = filter_indexes_from_coords( merged_indexes, set(filtered_coords) ) else: filtered_coords = merged_vars filtered_indexes = merged_indexes output_coords.append(filtered_coords) output_indexes.append(filtered_indexes) return output_coords, output_indexes def apply_dataarray_vfunc( func, *args, signature: _UFuncSignature, join: JoinOptions = "inner", exclude_dims=frozenset(), keep_attrs="override", ) -> tuple[DataArray, ...] | DataArray: """Apply a variable level function over DataArray, Variable and/or ndarray objects. """ from xarray.core.dataarray import DataArray if len(args) > 1: args = tuple( deep_align( args, join=join, copy=False, exclude=exclude_dims, raise_on_invalid=False, ) ) objs = _all_of_type(args, DataArray) if keep_attrs == "drop": name = result_name(args) else: first_obj = _first_of_type(args, DataArray) name = first_obj.name result_coords, result_indexes = build_output_coords_and_indexes( args, signature, exclude_dims, combine_attrs=keep_attrs ) data_vars = [getattr(a, "variable", a) for a in args] result_var = func(*data_vars) out: tuple[DataArray, ...] | DataArray if signature.num_outputs > 1: out = tuple( DataArray( variable, coords=coords, indexes=indexes, name=name, fastpath=True ) for variable, coords, indexes in zip( result_var, result_coords, result_indexes, strict=True ) ) else: (coords,) = result_coords (indexes,) = result_indexes out = DataArray( result_var, coords=coords, indexes=indexes, name=name, fastpath=True ) attrs = merge_attrs([x.attrs for x in objs], combine_attrs=keep_attrs) if isinstance(out, tuple): for da in out: da.attrs = attrs else: out.attrs = attrs return out def ordered_set_union(all_keys: list[Iterable]) -> Iterable: return {key: None for keys in all_keys for key in keys}.keys() def ordered_set_intersection(all_keys: list[Iterable]) -> Iterable: intersection = set(all_keys[0]) for keys in all_keys[1:]: intersection.intersection_update(keys) return [key for key in all_keys[0] if key in intersection] def assert_and_return_exact_match(all_keys): first_keys = all_keys[0] for keys in all_keys[1:]: if keys != first_keys: raise ValueError( "exact match required for all data variable names, " f"but {list(keys)} != {list(first_keys)}: {set(keys) ^ set(first_keys)} are not in both." ) return first_keys _JOINERS: dict[str, Callable] = { "inner": ordered_set_intersection, "outer": ordered_set_union, "left": operator.itemgetter(0), "right": operator.itemgetter(-1), "exact": assert_and_return_exact_match, } def join_dict_keys(objects: Iterable[Mapping | Any], how: str = "inner") -> Iterable: joiner = _JOINERS[how] all_keys = [obj.keys() for obj in objects if hasattr(obj, "keys")] return joiner(all_keys) def collect_dict_values( objects: Iterable[Mapping | Any], keys: Iterable, fill_value: object = None ) -> list[list]: return [ [obj.get(key, fill_value) if is_dict_like(obj) else obj for obj in objects] for key in keys ] def _as_variables_or_variable(arg) -> Variable | tuple[Variable]: try: return arg.variables except AttributeError: try: return arg.variable except AttributeError: return arg def _unpack_dict_tuples( result_vars: Mapping[Any, tuple[Variable, ...]], num_outputs: int ) -> tuple[dict[Hashable, Variable], ...]: out: tuple[dict[Hashable, Variable], ...] = tuple({} for _ in range(num_outputs)) for name, values in result_vars.items(): for value, results_dict in zip(values, out, strict=True): results_dict[name] = value return out def _check_core_dims(signature, variable_args, name): """ Check if an arg has all the core dims required by the signature. Slightly awkward design, of returning the error message. But we want to give a detailed error message, which requires inspecting the variable in the inner loop. """ missing = [] for i, (core_dims, variable_arg) in enumerate( zip(signature.input_core_dims, variable_args, strict=True) ): # Check whether all the dims are on the variable. Note that we need the # `hasattr` to check for a dims property, to protect against the case where # a numpy array is passed in. if hasattr(variable_arg, "dims") and set(core_dims) - set(variable_arg.dims): missing += [[i, variable_arg, core_dims]] if missing: message = "" for i, variable_arg, core_dims in missing: message += f"Missing core dims {set(core_dims) - set(variable_arg.dims)} from arg number {i + 1} on a variable named `{name}`:\n{variable_arg}\n\n" message += "Either add the core dimension, or if passing a dataset alternatively pass `on_missing_core_dim` as `copy` or `drop`. " return message return True def apply_dict_of_variables_vfunc( func, *args, signature: _UFuncSignature, join="inner", fill_value=None, on_missing_core_dim: MissingCoreDimOptions = "raise", ): """Apply a variable level function over dicts of DataArray, DataArray, Variable and ndarray objects. """ args = tuple(_as_variables_or_variable(arg) for arg in args) names = join_dict_keys(args, how=join) grouped_by_name = collect_dict_values(args, names, fill_value) result_vars = {} for name, variable_args in zip(names, grouped_by_name, strict=True): core_dim_present = _check_core_dims(signature, variable_args, name) if core_dim_present is True: result_vars[name] = func(*variable_args) elif on_missing_core_dim == "raise": raise ValueError(core_dim_present) elif on_missing_core_dim == "copy": result_vars[name] = variable_args[0] elif on_missing_core_dim == "drop": pass else: raise ValueError( f"Invalid value for `on_missing_core_dim`: {on_missing_core_dim!r}" ) if signature.num_outputs > 1: return _unpack_dict_tuples(result_vars, signature.num_outputs) else: return result_vars def _fast_dataset( variables: dict[Hashable, Variable], coord_variables: Mapping[Hashable, Variable], indexes: dict[Hashable, Index], ) -> Dataset: """Create a dataset as quickly as possible. Beware: the `variables` dict is modified INPLACE. """ from xarray.core.dataset import Dataset variables.update(coord_variables) coord_names = set(coord_variables) return Dataset._construct_direct(variables, coord_names, indexes=indexes) def apply_dataset_vfunc( func, *args, signature: _UFuncSignature, join="inner", dataset_join="exact", fill_value=_NO_FILL_VALUE, exclude_dims=frozenset(), keep_attrs="override", on_missing_core_dim: MissingCoreDimOptions = "raise", ) -> Dataset | tuple[Dataset, ...]: """Apply a variable level function over Dataset, dict of DataArray, DataArray, Variable and/or ndarray objects. """ from xarray.core.dataset import Dataset if dataset_join not in _JOINS_WITHOUT_FILL_VALUES and fill_value is _NO_FILL_VALUE: raise TypeError( "to apply an operation to datasets with different " "data variables with apply_ufunc, you must supply the " "dataset_fill_value argument." ) objs = _all_of_type(args, Dataset) if len(args) > 1: args = tuple( deep_align( args, join=join, copy=False, exclude=exclude_dims, raise_on_invalid=False, ) ) list_of_coords, list_of_indexes = build_output_coords_and_indexes( args, signature, exclude_dims, combine_attrs=keep_attrs ) args = tuple(getattr(arg, "data_vars", arg) for arg in args) result_vars = apply_dict_of_variables_vfunc( func, *args, signature=signature, join=dataset_join, fill_value=fill_value, on_missing_core_dim=on_missing_core_dim, ) out: Dataset | tuple[Dataset, ...] if signature.num_outputs > 1: out = tuple( itertools.starmap( _fast_dataset, zip(result_vars, list_of_coords, list_of_indexes, strict=True), ) ) else: (coord_vars,) = list_of_coords (indexes,) = list_of_indexes out = _fast_dataset(result_vars, coord_vars, indexes=indexes) attrs = merge_attrs([x.attrs for x in objs], combine_attrs=keep_attrs) if isinstance(out, tuple): for ds in out: ds.attrs = attrs else: out.attrs = attrs return out def _iter_over_selections(obj, dim, values): """Iterate over selections of an xarray object in the provided order.""" from xarray.core.groupby import _dummy_copy dummy = None for value in values: try: obj_sel = obj.sel(**{dim: value}) except (KeyError, IndexError): if dummy is None: dummy = _dummy_copy(obj) obj_sel = dummy yield obj_sel def apply_groupby_func(func, *args): """Apply a dataset or datarray level function over GroupBy, Dataset, DataArray, Variable and/or ndarray objects. """ from xarray.core.groupby import GroupBy, peek_at groupbys = [arg for arg in args if isinstance(arg, GroupBy)] assert groupbys, "must have at least one groupby to iterate over" first_groupby = groupbys[0] (grouper,) = first_groupby.groupers if any(not grouper.group.equals(gb.groupers[0].group) for gb in groupbys[1:]): # type: ignore[union-attr] raise ValueError( "apply_ufunc can only perform operations over " "multiple GroupBy objects at once if they are all " "grouped the same way" ) grouped_dim = grouper.name unique_values = grouper.unique_coord.values iterators = [] for arg in args: iterator: Iterator[Any] if isinstance(arg, GroupBy): iterator = (value for _, value in arg) elif hasattr(arg, "dims") and grouped_dim in arg.dims: if isinstance(arg, Variable): raise ValueError( "groupby operations cannot be performed with " "xarray.Variable objects that share a dimension with " "the grouped dimension" ) iterator = _iter_over_selections(arg, grouped_dim, unique_values) else: iterator = itertools.repeat(arg) iterators.append(iterator) applied: Iterator = itertools.starmap(func, zip(*iterators, strict=False)) applied_example, applied = peek_at(applied) combine = first_groupby._combine # type: ignore[attr-defined] if isinstance(applied_example, tuple): combined = tuple(combine(output) for output in zip(*applied, strict=True)) else: combined = combine(applied) return combined def unified_dim_sizes( variables: Iterable[Variable], exclude_dims: AbstractSet = frozenset() ) -> dict[Hashable, int]: dim_sizes: dict[Hashable, int] = {} for var in variables: if len(set(var.dims)) < len(var.dims): raise ValueError( "broadcasting cannot handle duplicate " f"dimensions on a variable: {list(var.dims)}" ) for dim, size in zip(var.dims, var.shape, strict=True): if dim not in exclude_dims: if dim not in dim_sizes: dim_sizes[dim] = size elif dim_sizes[dim] != size: raise ValueError( "operands cannot be broadcast together " "with mismatched lengths for dimension " f"{dim}: {dim_sizes[dim]} vs {size}" ) return dim_sizes SLICE_NONE = slice(None) def broadcast_compat_data( variable: Variable, broadcast_dims: tuple[Hashable, ...], core_dims: tuple[Hashable, ...], ) -> Any: data = variable.data old_dims = variable.dims new_dims = broadcast_dims + core_dims if new_dims == old_dims: # optimize for the typical case return data set_old_dims = set(old_dims) set_new_dims = set(new_dims) unexpected_dims = [d for d in old_dims if d not in set_new_dims] if unexpected_dims: raise ValueError( "operand to apply_ufunc encountered unexpected " f"dimensions {unexpected_dims!r} on an input variable: these are core " "dimensions on other input or output variables" ) # for consistency with numpy, keep broadcast dimensions to the left old_broadcast_dims = tuple(d for d in broadcast_dims if d in set_old_dims) reordered_dims = old_broadcast_dims + core_dims if reordered_dims != old_dims: order = tuple(old_dims.index(d) for d in reordered_dims) data = duck_array_ops.transpose(data, order) if new_dims != reordered_dims: key_parts: list[slice | None] = [] for dim in new_dims: if dim in set_old_dims: key_parts.append(SLICE_NONE) elif key_parts: # no need to insert new axes at the beginning that are already # handled by broadcasting key_parts.append(np.newaxis) data = data[tuple(key_parts)] return data def _vectorize(func, signature, output_dtypes, exclude_dims): if signature.all_core_dims: func = np.vectorize( func, otypes=output_dtypes, signature=signature.to_gufunc_string(exclude_dims), ) else: func = np.vectorize(func, otypes=output_dtypes) return func def apply_variable_ufunc( func, *args, signature: _UFuncSignature, exclude_dims=frozenset(), dask="forbidden", output_dtypes=None, vectorize=False, keep_attrs="override", dask_gufunc_kwargs=None, ) -> Variable | tuple[Variable, ...]: """Apply a ndarray level function over Variable and/or ndarray objects.""" from xarray.core.formatting import short_array_repr from xarray.core.variable import Variable, as_compatible_data dim_sizes = unified_dim_sizes( (a for a in args if hasattr(a, "dims")), exclude_dims=exclude_dims ) broadcast_dims = tuple( dim for dim in dim_sizes if dim not in signature.all_core_dims ) output_dims = [broadcast_dims + out for out in signature.output_core_dims] input_data = [ ( broadcast_compat_data(arg, broadcast_dims, core_dims) if isinstance(arg, Variable) else arg ) for arg, core_dims in zip(args, signature.input_core_dims, strict=True) ] if any(is_chunked_array(array) for array in input_data): if dask == "forbidden": raise ValueError( "apply_ufunc encountered a chunked array on an " "argument, but handling for chunked arrays has not " "been enabled. Either set the ``dask`` argument " "or load your data into memory first with " "``.load()`` or ``.compute()``" ) elif dask == "parallelized": chunkmanager = get_chunked_array_type(*input_data) numpy_func = func if dask_gufunc_kwargs is None: dask_gufunc_kwargs = {} else: dask_gufunc_kwargs = dask_gufunc_kwargs.copy() allow_rechunk = dask_gufunc_kwargs.get("allow_rechunk", None) if allow_rechunk is None: for n, (data, core_dims) in enumerate( zip(input_data, signature.input_core_dims, strict=True) ): if is_chunked_array(data): # core dimensions cannot span multiple chunks for axis, dim in enumerate(core_dims, start=-len(core_dims)): if len(data.chunks[axis]) != 1: raise ValueError( f"dimension {dim} on {n}th function argument to " "apply_ufunc with dask='parallelized' consists of " "multiple chunks, but is also a core dimension. To " "fix, either rechunk into a single array chunk along " f"this dimension, i.e., ``.chunk(dict({dim}=-1))``, or " "pass ``allow_rechunk=True`` in ``dask_gufunc_kwargs`` " "but beware that this may significantly increase memory usage." ) dask_gufunc_kwargs["allow_rechunk"] = True output_sizes = dask_gufunc_kwargs.pop("output_sizes", {}) if output_sizes: output_sizes_renamed = {} for key, value in output_sizes.items(): if key not in signature.all_output_core_dims: raise ValueError( f"dimension '{key}' in 'output_sizes' must correspond to output_core_dims" ) output_sizes_renamed[signature.dims_map[key]] = value dask_gufunc_kwargs["output_sizes"] = output_sizes_renamed for key in signature.all_output_core_dims: if ( key not in signature.all_input_core_dims or key in exclude_dims ) and key not in output_sizes: raise ValueError( f"dimension '{key}' in 'output_core_dims' needs corresponding (dim, size) in 'output_sizes'" ) def func(*arrays): res = chunkmanager.apply_gufunc( numpy_func, signature.to_gufunc_string(exclude_dims), *arrays, vectorize=vectorize, output_dtypes=output_dtypes, **dask_gufunc_kwargs, ) return res elif dask == "allowed": pass else: raise ValueError( f"unknown setting for chunked array handling in apply_ufunc: {dask}" ) elif vectorize: func = _vectorize( func, signature, output_dtypes=output_dtypes, exclude_dims=exclude_dims ) result_data = func(*input_data) if signature.num_outputs == 1: result_data = (result_data,) elif ( not isinstance(result_data, tuple) or len(result_data) != signature.num_outputs ): raise ValueError( f"applied function does not have the number of " f"outputs specified in the ufunc signature. " f"Received a {type(result_data)} with {len(result_data)} elements. " f"Expected a tuple of {signature.num_outputs} elements:\n\n" f"{limit_lines(repr(result_data), limit=10)}" ) objs = _all_of_type(args, Variable) attrs = merge_attrs( [obj.attrs for obj in objs], combine_attrs=keep_attrs, ) output: list[Variable] = [] for dims, data in zip(output_dims, result_data, strict=True): data = as_compatible_data(data) if data.ndim != len(dims): raise ValueError( "applied function returned data with an unexpected " f"number of dimensions. Received {data.ndim} dimension(s) but " f"expected {len(dims)} dimensions with names {dims!r}, from:\n\n" f"{short_array_repr(data)}" ) var = Variable(dims, data, fastpath=True) for dim, new_size in var.sizes.items(): if dim in dim_sizes and new_size != dim_sizes[dim]: raise ValueError( f"size of dimension '{dim}' on inputs was unexpectedly " f"changed by applied function from {dim_sizes[dim]} to {new_size}. Only " "dimensions specified in ``exclude_dims`` with " "xarray.apply_ufunc are allowed to change size. " "The data returned was:\n\n" f"{short_array_repr(data)}" ) var.attrs = attrs output.append(var) if signature.num_outputs == 1: return output[0] else: return tuple(output) def apply_array_ufunc(func, *args, dask="forbidden"): """Apply a ndarray level function over ndarray objects.""" if any(is_chunked_array(arg) for arg in args): if dask == "forbidden": raise ValueError( "apply_ufunc encountered a dask array on an " "argument, but handling for dask arrays has not " "been enabled. Either set the ``dask`` argument " "or load your data into memory first with " "``.load()`` or ``.compute()``" ) elif dask == "parallelized": raise ValueError( "cannot use dask='parallelized' for apply_ufunc " "unless at least one input is an xarray object" ) elif dask == "allowed": pass else: raise ValueError(f"unknown setting for dask array handling: {dask}") return func(*args) def apply_ufunc( func: Callable, *args: Any, input_core_dims: Sequence[Sequence] | None = None, output_core_dims: Sequence[Sequence] | None = ((),), exclude_dims: AbstractSet = frozenset(), vectorize: bool = False, join: JoinOptions = "exact", dataset_join: str = "exact", dataset_fill_value: object = _NO_FILL_VALUE, keep_attrs: bool | str | None = None, kwargs: Mapping | None = None, dask: Literal["forbidden", "allowed", "parallelized"] = "forbidden", output_dtypes: Sequence | None = None, output_sizes: Mapping[Any, int] | None = None, meta: Any = None, dask_gufunc_kwargs: dict[str, Any] | None = None, on_missing_core_dim: MissingCoreDimOptions = "raise", ) -> Any: """Apply a vectorized function for unlabeled arrays on xarray objects. The function will be mapped over the data variable(s) of the input arguments using xarray's standard rules for labeled computation, including alignment, broadcasting, looping over GroupBy/Dataset variables, and merging of coordinates. Parameters ---------- func : callable Function to call like ``func(*args, **kwargs)`` on unlabeled arrays (``.data``) that returns an array or tuple of arrays. If multiple arguments with non-matching dimensions are supplied, this function is expected to vectorize (broadcast) over axes of positional arguments in the style of NumPy universal functions [1]_ (if this is not the case, set ``vectorize=True``). If this function returns multiple outputs, you must set ``output_core_dims`` as well. *args : Dataset, DataArray, DataArrayGroupBy, DatasetGroupBy, Variable, \ numpy.ndarray, dask.array.Array or scalar Mix of labeled and/or unlabeled arrays to which to apply the function. input_core_dims : sequence of sequence, optional List of the same length as ``args`` giving the list of core dimensions on each input argument that should not be broadcast. By default, we assume there are no core dimensions on any input arguments. For example, ``input_core_dims=[[], ['time']]`` indicates that all dimensions on the first argument and all dimensions other than 'time' on the second argument should be broadcast. Core dimensions are automatically moved to the last axes of input variables before applying ``func``, which facilitates using NumPy style generalized ufuncs [2]_. output_core_dims : list of tuple, optional List of the same length as the number of output arguments from ``func``, giving the list of core dimensions on each output that were not broadcast on the inputs. By default, we assume that ``func`` outputs exactly one array, with axes corresponding to each broadcast dimension. Core dimensions are assumed to appear as the last dimensions of each output in the provided order. exclude_dims : set, optional Core dimensions on the inputs to exclude from alignment and broadcasting entirely. Any input coordinates along these dimensions will be dropped. Each excluded dimension must also appear in ``input_core_dims`` for at least one argument. Only dimensions listed here are allowed to change size between input and output objects. vectorize : bool, optional If True, then assume ``func`` only takes arrays defined over core dimensions as input and vectorize it automatically with :py:func:`numpy.vectorize`. This option exists for convenience, but is almost always slower than supplying a pre-vectorized function. join : {"outer", "inner", "left", "right", "exact"}, default: "exact" Method for joining the indexes of the passed objects along each dimension, and the variables of Dataset objects with mismatched data variables: - 'outer': use the union of object indexes - 'inner': use the intersection of object indexes - 'left': use indexes from the first object with each dimension - 'right': use indexes from the last object with each dimension - 'exact': raise `ValueError` instead of aligning when indexes to be aligned are not equal dataset_join : {"outer", "inner", "left", "right", "exact"}, default: "exact" Method for joining variables of Dataset objects with mismatched data variables. - 'outer': take variables from both Dataset objects - 'inner': take only overlapped variables - 'left': take only variables from the first object - 'right': take only variables from the last object - 'exact': data variables on all Dataset objects must match exactly dataset_fill_value : optional Value used in place of missing variables on Dataset inputs when the datasets do not share the exact same ``data_vars``. Required if ``dataset_join not in {'inner', 'exact'}``, otherwise ignored. keep_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", "override"} or bool, optional - 'drop' or False: empty attrs on returned xarray object. - 'identical': all attrs must be the same on every object. - 'no_conflicts': attrs from all objects are combined, any that have the same name must also have the same value. - 'drop_conflicts': attrs from all objects are combined, any that have the same name but different values are dropped. - 'override' or True: skip comparing and copy attrs from the first object to the result. kwargs : dict, optional Optional keyword arguments passed directly on to call ``func``. dask : {"forbidden", "allowed", "parallelized"}, default: "forbidden" How to handle applying to objects containing lazy data in the form of dask arrays: - 'forbidden' (default): raise an error if a dask array is encountered. - 'allowed': pass dask arrays directly on to ``func``. Prefer this option if ``func`` natively supports dask arrays. - 'parallelized': automatically parallelize ``func`` if any of the inputs are a dask array by using :py:func:`dask.array.apply_gufunc`. Multiple output arguments are supported. Only use this option if ``func`` does not natively support dask arrays (e.g. converts them to numpy arrays). dask_gufunc_kwargs : dict, optional Optional keyword arguments passed to :py:func:`dask.array.apply_gufunc` if dask='parallelized'. Possible keywords are ``output_sizes``, ``allow_rechunk`` and ``meta``. output_dtypes : list of dtype, optional Optional list of output dtypes. Only used if ``dask='parallelized'`` or ``vectorize=True``. output_sizes : dict, optional Optional mapping from dimension names to sizes for outputs. Only used if dask='parallelized' and new dimensions (not found on inputs) appear on outputs. ``output_sizes`` should be given in the ``dask_gufunc_kwargs`` parameter. It will be removed as direct parameter in a future version. meta : optional Size-0 object representing the type of array wrapped by dask array. Passed on to :py:func:`dask.array.apply_gufunc`. ``meta`` should be given in the ``dask_gufunc_kwargs`` parameter . It will be removed as direct parameter a future version. on_missing_core_dim : {"raise", "copy", "drop"}, default: "raise" How to handle missing core dimensions on input variables. Returns ------- Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or numpy.ndarray, the first type on that list to appear on an input. Notes ----- This function is designed for the more common case where ``func`` can work on numpy arrays. If ``func`` needs to manipulate a whole xarray object subset to each block it is possible to use :py:func:`xarray.map_blocks`. Note that due to the overhead :py:func:`xarray.map_blocks` is considerably slower than ``apply_ufunc``. Examples -------- Calculate the vector magnitude of two arguments: >>> def magnitude(a, b): ... func = lambda x, y: np.sqrt(x**2 + y**2) ... return xr.apply_ufunc(func, a, b) ... You can now apply ``magnitude()`` to :py:class:`DataArray` and :py:class:`Dataset` objects, with automatically preserved dimensions and coordinates, e.g., >>> array = xr.DataArray([1, 2, 3], coords=[("x", [0.1, 0.2, 0.3])]) >>> magnitude(array, -array) Size: 24B array([1.41421356, 2.82842712, 4.24264069]) Coordinates: * x (x) float64 24B 0.1 0.2 0.3 Plain scalars, numpy arrays and a mix of these with xarray objects is also supported: >>> magnitude(3, 4) np.float64(5.0) >>> magnitude(3, np.array([0, 4])) array([3., 5.]) >>> magnitude(array, 0) Size: 24B array([1., 2., 3.]) Coordinates: * x (x) float64 24B 0.1 0.2 0.3 Other examples of how you could use ``apply_ufunc`` to write functions to (very nearly) replicate existing xarray functionality: Compute the mean (``.mean``) over one dimension: >>> def mean(obj, dim): ... # note: apply always moves core dimensions to the end ... return apply_ufunc( ... np.mean, obj, input_core_dims=[[dim]], kwargs={"axis": -1} ... ) ... Inner product over a specific dimension (like :py:func:`dot`): >>> def _inner(x, y): ... result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis]) ... return result[..., 0, 0] ... >>> def inner_product(a, b, dim): ... return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]]) ... Stack objects along a new dimension (like :py:func:`concat`): >>> def stack(objects, dim, new_coord): ... # note: this version does not stack coordinates ... func = lambda *x: np.stack(x, axis=-1) ... result = apply_ufunc( ... func, ... *objects, ... output_core_dims=[[dim]], ... join="outer", ... dataset_fill_value=np.nan ... ) ... result[dim] = new_coord ... return result ... If your function is not vectorized but can be applied only to core dimensions, you can use ``vectorize=True`` to turn into a vectorized function. This wraps :py:func:`numpy.vectorize`, so the operation isn't terribly fast. Here we'll use it to calculate the distance between empirical samples from two probability distributions, using a scipy function that needs to be applied to vectors: >>> import scipy.stats >>> def earth_mover_distance(first_samples, second_samples, dim="ensemble"): ... return apply_ufunc( ... scipy.stats.wasserstein_distance, ... first_samples, ... second_samples, ... input_core_dims=[[dim], [dim]], ... vectorize=True, ... ) ... Most of NumPy's builtin functions already broadcast their inputs appropriately for use in ``apply_ufunc``. You may find helper functions such as :py:func:`numpy.broadcast_arrays` helpful in writing your function. ``apply_ufunc`` also works well with :py:func:`numba.vectorize` and :py:func:`numba.guvectorize`. See Also -------- numpy.broadcast_arrays numba.vectorize numba.guvectorize dask.array.apply_gufunc xarray.map_blocks Notes ----- :ref:`dask.automatic-parallelization` User guide describing :py:func:`apply_ufunc` and :py:func:`map_blocks`. :doc:`xarray-tutorial:advanced/apply_ufunc/apply_ufunc` Advanced Tutorial on applying numpy function using :py:func:`apply_ufunc` References ---------- .. [1] https://numpy.org/doc/stable/reference/ufuncs.html .. [2] https://numpy.org/doc/stable/reference/c-api/generalized-ufuncs.html """ from xarray.core.dataarray import DataArray from xarray.core.groupby import GroupBy from xarray.core.variable import Variable if input_core_dims is None: input_core_dims = ((),) * (len(args)) elif len(input_core_dims) != len(args): raise ValueError( f"input_core_dims must be None or a tuple with the length same to " f"the number of arguments. " f"Given {len(input_core_dims)} input_core_dims: {input_core_dims}, " f" but number of args is {len(args)}." ) if kwargs is None: kwargs = {} signature = _UFuncSignature(input_core_dims, output_core_dims) if exclude_dims: if not isinstance(exclude_dims, set): raise TypeError( f"Expected exclude_dims to be a 'set'. Received '{type(exclude_dims).__name__}' instead." ) if not exclude_dims <= signature.all_core_dims: raise ValueError( f"each dimension in `exclude_dims` must also be a " f"core dimension in the function signature. " f"Please make {(exclude_dims - signature.all_core_dims)} a core dimension" ) # handle dask_gufunc_kwargs if dask == "parallelized": if dask_gufunc_kwargs is None: dask_gufunc_kwargs = {} else: dask_gufunc_kwargs = dask_gufunc_kwargs.copy() # todo: remove warnings after deprecation cycle if meta is not None: warnings.warn( "``meta`` should be given in the ``dask_gufunc_kwargs`` parameter." " It will be removed as direct parameter in a future version.", FutureWarning, stacklevel=2, ) dask_gufunc_kwargs.setdefault("meta", meta) if output_sizes is not None: warnings.warn( "``output_sizes`` should be given in the ``dask_gufunc_kwargs`` " "parameter. It will be removed as direct parameter in a future " "version.", FutureWarning, stacklevel=2, ) dask_gufunc_kwargs.setdefault("output_sizes", output_sizes) if kwargs: if "where" in kwargs and isinstance(kwargs["where"], DataArray): kwargs["where"] = kwargs["where"].data # type:ignore[index] func = functools.partial(func, **kwargs) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) if isinstance(keep_attrs, bool): keep_attrs = "override" if keep_attrs else "drop" variables_vfunc = functools.partial( apply_variable_ufunc, func, signature=signature, exclude_dims=exclude_dims, keep_attrs=keep_attrs, dask=dask, vectorize=vectorize, output_dtypes=output_dtypes, dask_gufunc_kwargs=dask_gufunc_kwargs, ) # feed groupby-apply_ufunc through apply_groupby_func if any(isinstance(a, GroupBy) for a in args): this_apply = functools.partial( apply_ufunc, func, input_core_dims=input_core_dims, output_core_dims=output_core_dims, exclude_dims=exclude_dims, join=join, dataset_join=dataset_join, dataset_fill_value=dataset_fill_value, keep_attrs=keep_attrs, dask=dask, vectorize=vectorize, output_dtypes=output_dtypes, dask_gufunc_kwargs=dask_gufunc_kwargs, ) return apply_groupby_func(this_apply, *args) # feed datasets apply_variable_ufunc through apply_dataset_vfunc elif any(is_dict_like(a) for a in args): return apply_dataset_vfunc( variables_vfunc, *args, signature=signature, join=join, exclude_dims=exclude_dims, dataset_join=dataset_join, fill_value=dataset_fill_value, keep_attrs=keep_attrs, on_missing_core_dim=on_missing_core_dim, ) # feed DataArray apply_variable_ufunc through apply_dataarray_vfunc elif any(isinstance(a, DataArray) for a in args): return apply_dataarray_vfunc( variables_vfunc, *args, signature=signature, join=join, exclude_dims=exclude_dims, keep_attrs=keep_attrs, ) # feed Variables directly through apply_variable_ufunc elif any(isinstance(a, Variable) for a in args): return variables_vfunc(*args) else: # feed anything else through apply_array_ufunc return apply_array_ufunc(func, *args, dask=dask) ���������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/computation/arithmetic.py���������������������������������������������������0000664�0000000�0000000�00000010351�15056206164�0021717�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Base classes implementing arithmetic for xarray objects.""" from __future__ import annotations import numbers import numpy as np from xarray.computation.ops import IncludeNumpySameMethods, IncludeReduceMethods # _typed_ops.py is a generated file from xarray.core._typed_ops import ( DataArrayGroupByOpsMixin, DataArrayOpsMixin, DatasetGroupByOpsMixin, DatasetOpsMixin, VariableOpsMixin, ) from xarray.core.common import ImplementsArrayReduce, ImplementsDatasetReduce from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.namedarray.utils import is_duck_array class SupportsArithmetic: """Base class for xarray types that support arithmetic. Used by Dataset, DataArray, Variable and GroupBy. """ __slots__ = () # TODO: implement special methods for arithmetic here rather than injecting # them in xarray/computation/ops.py. Ideally, do so by inheriting from # numpy.lib.mixins.NDArrayOperatorsMixin. # TODO: allow extending this with some sort of registration system _HANDLED_TYPES = ( np.generic, numbers.Number, bytes, str, ) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): from xarray.computation.apply_ufunc import apply_ufunc # See the docstring example for numpy.lib.mixins.NDArrayOperatorsMixin. out = kwargs.get("out", ()) for x in inputs + out: if not is_duck_array(x) and not isinstance( x, self._HANDLED_TYPES + (SupportsArithmetic,) ): return NotImplemented if ufunc.signature is not None: raise NotImplementedError( f"{ufunc} not supported: xarray objects do not directly implement " "generalized ufuncs. Instead, use xarray.apply_ufunc or " "explicitly convert to xarray objects to NumPy arrays " "(e.g., with `.values`)." ) if method != "__call__": # TODO: support other methods, e.g., reduce and accumulate. raise NotImplementedError( f"{method} method for ufunc {ufunc} is not implemented on xarray objects, " "which currently only support the __call__ method. As an " "alternative, consider explicitly converting xarray objects " "to NumPy arrays (e.g., with `.values`)." ) if any(isinstance(o, SupportsArithmetic) for o in out): # TODO: implement this with logic like _inplace_binary_op. This # will be necessary to use NDArrayOperatorsMixin. raise NotImplementedError( "xarray objects are not yet supported in the `out` argument " "for ufuncs. As an alternative, consider explicitly " "converting xarray objects to NumPy arrays (e.g., with " "`.values`)." ) join = dataset_join = OPTIONS["arithmetic_join"] return apply_ufunc( ufunc, *inputs, input_core_dims=((),) * ufunc.nin, output_core_dims=((),) * ufunc.nout, join=join, dataset_join=dataset_join, dataset_fill_value=np.nan, kwargs=kwargs, dask="allowed", keep_attrs=_get_keep_attrs(default=True), ) class VariableArithmetic( ImplementsArrayReduce, IncludeNumpySameMethods, SupportsArithmetic, VariableOpsMixin, ): __slots__ = () # prioritize our operations over those of numpy.ndarray (priority=0) __array_priority__ = 50 class DatasetArithmetic( ImplementsDatasetReduce, SupportsArithmetic, DatasetOpsMixin, ): __slots__ = () __array_priority__ = 50 class DataArrayArithmetic( ImplementsArrayReduce, IncludeNumpySameMethods, SupportsArithmetic, DataArrayOpsMixin, ): __slots__ = () # priority must be higher than Variable to properly work with binary ufuncs __array_priority__ = 60 class DataArrayGroupbyArithmetic( SupportsArithmetic, DataArrayGroupByOpsMixin, ): __slots__ = () class DatasetGroupbyArithmetic( SupportsArithmetic, DatasetGroupByOpsMixin, ): __slots__ = () class CoarsenArithmetic(IncludeReduceMethods): __slots__ = () ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/computation/computation.py��������������������������������������������������0000664�0000000�0000000�00000075100�15056206164�0022133�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������""" Functions for applying functions that act on arrays to xarray's labeled data. NOTE: This module is currently large and contains various computational functionality. The long-term plan is to break it down into more focused submodules. """ from __future__ import annotations import functools from collections import Counter from collections.abc import ( Callable, Hashable, ) from typing import TYPE_CHECKING, Any, Literal, cast, overload import numpy as np from xarray.compat.array_api_compat import to_like_array from xarray.core import dtypes, duck_array_ops, utils from xarray.core.common import zeros_like from xarray.core.duck_array_ops import datetime_to_numeric from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.core.types import Dims, T_DataArray from xarray.core.utils import ( is_scalar, parse_dims_as_set, ) from xarray.core.variable import Variable from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import is_chunked_array from xarray.structure.alignment import align from xarray.util.deprecation_helpers import deprecate_dims if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset MissingCoreDimOptions = Literal["raise", "copy", "drop"] _NO_FILL_VALUE = utils.ReprObject("") _JOINS_WITHOUT_FILL_VALUES = frozenset({"inner", "exact"}) def cov( da_a: T_DataArray, da_b: T_DataArray, dim: Dims = None, ddof: int = 1, weights: T_DataArray | None = None, ) -> T_DataArray: """ Compute covariance between two DataArray objects along a shared dimension. Parameters ---------- da_a : DataArray Array to compute. da_b : DataArray Array to compute. dim : str, iterable of hashable, "..." or None, optional The dimension along which the covariance will be computed ddof : int, default: 1 If ddof=1, covariance is normalized by N-1, giving an unbiased estimate, else normalization is by N. weights : DataArray, optional Array of weights. Returns ------- covariance : DataArray See Also -------- pandas.Series.cov : corresponding pandas function xarray.corr : respective function to calculate correlation Examples -------- >>> from xarray import DataArray >>> da_a = DataArray( ... np.array([[1, 2, 3], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]), ... dims=("space", "time"), ... coords=[ ... ("space", ["IA", "IL", "IN"]), ... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)), ... ], ... ) >>> da_a Size: 72B array([[1. , 2. , 3. ], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]) Coordinates: * space (space) >> da_b = DataArray( ... np.array([[0.2, 0.4, 0.6], [15, 10, 5], [3.2, 0.6, 1.8]]), ... dims=("space", "time"), ... coords=[ ... ("space", ["IA", "IL", "IN"]), ... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)), ... ], ... ) >>> da_b Size: 72B array([[ 0.2, 0.4, 0.6], [15. , 10. , 5. ], [ 3.2, 0.6, 1.8]]) Coordinates: * space (space) >> xr.cov(da_a, da_b) Size: 8B array(-3.53055556) >>> xr.cov(da_a, da_b, dim="time") Size: 24B array([ 0.2 , -0.5 , 1.69333333]) Coordinates: * space (space) >> weights = DataArray( ... [4, 2, 1], ... dims=("space"), ... coords=[ ... ("space", ["IA", "IL", "IN"]), ... ], ... ) >>> weights Size: 24B array([4, 2, 1]) Coordinates: * space (space) >> xr.cov(da_a, da_b, dim="space", weights=weights) Size: 24B array([-4.69346939, -4.49632653, -3.37959184]) Coordinates: * time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03 """ from xarray.core.dataarray import DataArray if any(not isinstance(arr, DataArray) for arr in [da_a, da_b]): raise TypeError( "Only xr.DataArray is supported." f"Given {[type(arr) for arr in [da_a, da_b]]}." ) if weights is not None and not isinstance(weights, DataArray): raise TypeError(f"Only xr.DataArray is supported. Given {type(weights)}.") return _cov_corr(da_a, da_b, weights=weights, dim=dim, ddof=ddof, method="cov") def corr( da_a: T_DataArray, da_b: T_DataArray, dim: Dims = None, weights: T_DataArray | None = None, ) -> T_DataArray: """ Compute the Pearson correlation coefficient between two DataArray objects along a shared dimension. Parameters ---------- da_a : DataArray Array to compute. da_b : DataArray Array to compute. dim : str, iterable of hashable, "..." or None, optional The dimension along which the correlation will be computed weights : DataArray, optional Array of weights. Returns ------- correlation: DataArray See Also -------- pandas.Series.corr : corresponding pandas function xarray.cov : underlying covariance function Examples -------- >>> from xarray import DataArray >>> da_a = DataArray( ... np.array([[1, 2, 3], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]), ... dims=("space", "time"), ... coords=[ ... ("space", ["IA", "IL", "IN"]), ... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)), ... ], ... ) >>> da_a Size: 72B array([[1. , 2. , 3. ], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]) Coordinates: * space (space) >> da_b = DataArray( ... np.array([[0.2, 0.4, 0.6], [15, 10, 5], [3.2, 0.6, 1.8]]), ... dims=("space", "time"), ... coords=[ ... ("space", ["IA", "IL", "IN"]), ... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)), ... ], ... ) >>> da_b Size: 72B array([[ 0.2, 0.4, 0.6], [15. , 10. , 5. ], [ 3.2, 0.6, 1.8]]) Coordinates: * space (space) >> xr.corr(da_a, da_b) Size: 8B array(-0.57087777) >>> xr.corr(da_a, da_b, dim="time") Size: 24B array([ 1., -1., 1.]) Coordinates: * space (space) >> weights = DataArray( ... [4, 2, 1], ... dims=("space"), ... coords=[ ... ("space", ["IA", "IL", "IN"]), ... ], ... ) >>> weights Size: 24B array([4, 2, 1]) Coordinates: * space (space) >> xr.corr(da_a, da_b, dim="space", weights=weights) Size: 24B array([-0.50240504, -0.83215028, -0.99057446]) Coordinates: * time (time) datetime64[ns] 24B 2000-01-01 2000-01-02 2000-01-03 """ from xarray.core.dataarray import DataArray if any(not isinstance(arr, DataArray) for arr in [da_a, da_b]): raise TypeError( "Only xr.DataArray is supported." f"Given {[type(arr) for arr in [da_a, da_b]]}." ) if weights is not None and not isinstance(weights, DataArray): raise TypeError(f"Only xr.DataArray is supported. Given {type(weights)}.") return _cov_corr(da_a, da_b, weights=weights, dim=dim, method="corr") def _cov_corr( da_a: T_DataArray, da_b: T_DataArray, weights: T_DataArray | None = None, dim: Dims = None, ddof: int = 0, method: Literal["cov", "corr"] | None = None, ) -> T_DataArray: """ Internal method for xr.cov() and xr.corr() so only have to sanitize the input arrays once and we don't repeat code. """ # 1. Broadcast the two arrays da_a, da_b = align(da_a, da_b, join="inner", copy=False) # 2. Ignore the nans valid_values = da_a.notnull() & da_b.notnull() da_a = da_a.where(valid_values) da_b = da_b.where(valid_values) # 3. Detrend along the given dim if weights is not None: demeaned_da_a = da_a - da_a.weighted(weights).mean(dim=dim) demeaned_da_b = da_b - da_b.weighted(weights).mean(dim=dim) else: demeaned_da_a = da_a - da_a.mean(dim=dim) demeaned_da_b = da_b - da_b.mean(dim=dim) # 4. Compute covariance along the given dim # N.B. `skipna=True` is required or auto-covariance is computed incorrectly. E.g. # Try xr.cov(da,da) for da = xr.DataArray([[1, 2], [1, np.nan]], dims=["x", "time"]) if weights is not None: cov = ( (demeaned_da_a.conj() * demeaned_da_b) .weighted(weights) .mean(dim=dim, skipna=True) ) else: cov = (demeaned_da_a.conj() * demeaned_da_b).mean(dim=dim, skipna=True) if method == "cov": # Adjust covariance for degrees of freedom valid_count = valid_values.sum(dim) adjust = valid_count / (valid_count - ddof) # I think the cast is required because of `T_DataArray` + `T_Xarray` (would be # the same with `T_DatasetOrArray`) # https://github.com/pydata/xarray/pull/8384#issuecomment-1784228026 return cast(T_DataArray, cov * adjust) else: # Compute std and corr if weights is not None: da_a_std = da_a.weighted(weights).std(dim=dim) da_b_std = da_b.weighted(weights).std(dim=dim) else: da_a_std = da_a.std(dim=dim) da_b_std = da_b.std(dim=dim) corr = cov / (da_a_std * da_b_std) return cast(T_DataArray, corr) def cross( a: DataArray | Variable, b: DataArray | Variable, *, dim: Hashable ) -> DataArray | Variable: """ Compute the cross product of two (arrays of) vectors. The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular to both `a` and `b`. The vectors in `a` and `b` are defined by the values along the dimension `dim` and can have sizes 1, 2 or 3. Where the size of either `a` or `b` is 1 or 2, the remaining components of the input vector is assumed to be zero and the cross product calculated accordingly. In cases where both input vectors have dimension 2, the z-component of the cross product is returned. Parameters ---------- a, b : DataArray or Variable Components of the first and second vector(s). dim : hashable The dimension along which the cross product will be computed. Must be available in both vectors. Examples -------- Vector cross-product with 3 dimensions: >>> a = xr.DataArray([1, 2, 3]) >>> b = xr.DataArray([4, 5, 6]) >>> xr.cross(a, b, dim="dim_0") Size: 24B array([-3, 6, -3]) Dimensions without coordinates: dim_0 Vector cross-product with 3 dimensions but zeros at the last axis yields the same results as with 2 dimensions: >>> a = xr.DataArray([1, 2, 0]) >>> b = xr.DataArray([4, 5, 0]) >>> xr.cross(a, b, dim="dim_0") Size: 24B array([ 0, 0, -3]) Dimensions without coordinates: dim_0 Multiple vector cross-products. Note that the direction of the cross product vector is defined by the right-hand rule: >>> a = xr.DataArray( ... [[1, 2, 3], [4, 5, 6]], ... dims=("time", "cartesian"), ... coords=dict( ... time=(["time"], [0, 1]), ... cartesian=(["cartesian"], ["x", "y", "z"]), ... ), ... ) >>> b = xr.DataArray( ... [[4, 5, 6], [1, 2, 3]], ... dims=("time", "cartesian"), ... coords=dict( ... time=(["time"], [0, 1]), ... cartesian=(["cartesian"], ["x", "y", "z"]), ... ), ... ) >>> xr.cross(a, b, dim="cartesian") Size: 48B array([[-3, 6, -3], [ 3, -6, 3]]) Coordinates: * time (time) int64 16B 0 1 * cartesian (cartesian) >> ds_a = xr.Dataset(dict(x=("dim_0", [1]), y=("dim_0", [2]), z=("dim_0", [3]))) >>> ds_b = xr.Dataset(dict(x=("dim_0", [4]), y=("dim_0", [5]), z=("dim_0", [6]))) >>> c = xr.cross( ... ds_a.to_dataarray("cartesian"), ... ds_b.to_dataarray("cartesian"), ... dim="cartesian", ... ) >>> c.to_dataset(dim="cartesian") Size: 24B Dimensions: (dim_0: 1) Dimensions without coordinates: dim_0 Data variables: x (dim_0) int64 8B -3 y (dim_0) int64 8B 6 z (dim_0) int64 8B -3 See Also -------- numpy.cross : Corresponding numpy function """ if dim not in a.dims: raise ValueError(f"Dimension {dim!r} not on a") elif dim not in b.dims: raise ValueError(f"Dimension {dim!r} not on b") if not 1 <= a.sizes[dim] <= 3: raise ValueError( f"The size of {dim!r} on a must be 1, 2, or 3 to be " f"compatible with a cross product but is {a.sizes[dim]}" ) elif not 1 <= b.sizes[dim] <= 3: raise ValueError( f"The size of {dim!r} on b must be 1, 2, or 3 to be " f"compatible with a cross product but is {b.sizes[dim]}" ) all_dims = list(dict.fromkeys(a.dims + b.dims)) if a.sizes[dim] != b.sizes[dim]: # Arrays have different sizes. Append zeros where the smaller # array is missing a value, zeros will not affect np.cross: if ( not isinstance(a, Variable) # Only used to make mypy happy. and dim in getattr(a, "coords", {}) and not isinstance(b, Variable) # Only used to make mypy happy. and dim in getattr(b, "coords", {}) ): # If the arrays have coords we know which indexes to fill # with zeros: a, b = align( a, b, fill_value=0, join="outer", exclude=set(all_dims) - {dim}, ) elif min(a.sizes[dim], b.sizes[dim]) == 2: # If the array doesn't have coords we can only infer # that it has composite values if the size is at least 2. # Once padded, rechunk the padded array because apply_ufunc # requires core dimensions not to be chunked: if a.sizes[dim] < b.sizes[dim]: a = a.pad({dim: (0, 1)}, constant_values=0) # TODO: Should pad or apply_ufunc handle correct chunking? a = a.chunk({dim: -1}) if is_chunked_array(a.data) else a else: b = b.pad({dim: (0, 1)}, constant_values=0) # TODO: Should pad or apply_ufunc handle correct chunking? b = b.chunk({dim: -1}) if is_chunked_array(b.data) else b else: raise ValueError( f"{dim!r} on {'a' if a.sizes[dim] == 1 else 'b'} is incompatible:" " dimensions without coordinates must have have a length of 2 or 3" ) from xarray.computation.apply_ufunc import apply_ufunc c = apply_ufunc( duck_array_ops.cross, a, b, input_core_dims=[[dim], [dim]], output_core_dims=[[dim] if a.sizes[dim] == 3 else []], dask="parallelized", output_dtypes=[np.result_type(a, b)], ) c = c.transpose(*all_dims, missing_dims="ignore") return c @deprecate_dims def dot( *arrays, dim: Dims = None, **kwargs: Any, ): """Generalized dot product for xarray objects. Like ``np.einsum``, but provides a simpler interface based on array dimension names. Parameters ---------- *arrays : DataArray or Variable Arrays to compute. dim : str, iterable of hashable, "..." or None, optional Which dimensions to sum over. Ellipsis ('...') sums over all dimensions. If not specified, then all the common dimensions are summed over. **kwargs : dict Additional keyword arguments passed to ``numpy.einsum`` or ``dask.array.einsum`` Returns ------- DataArray See Also -------- numpy.einsum dask.array.einsum opt_einsum.contract Notes ----- We recommend installing the optional ``opt_einsum`` package, or alternatively passing ``optimize=True``, which is passed through to ``np.einsum``, and works for most array backends. Examples -------- >>> da_a = xr.DataArray(np.arange(3 * 2).reshape(3, 2), dims=["a", "b"]) >>> da_b = xr.DataArray(np.arange(3 * 2 * 2).reshape(3, 2, 2), dims=["a", "b", "c"]) >>> da_c = xr.DataArray(np.arange(2 * 3).reshape(2, 3), dims=["c", "d"]) >>> da_a Size: 48B array([[0, 1], [2, 3], [4, 5]]) Dimensions without coordinates: a, b >>> da_b Size: 96B array([[[ 0, 1], [ 2, 3]], [[ 4, 5], [ 6, 7]], [[ 8, 9], [10, 11]]]) Dimensions without coordinates: a, b, c >>> da_c Size: 48B array([[0, 1, 2], [3, 4, 5]]) Dimensions without coordinates: c, d >>> xr.dot(da_a, da_b, dim=["a", "b"]) Size: 16B array([110, 125]) Dimensions without coordinates: c >>> xr.dot(da_a, da_b, dim=["a"]) Size: 32B array([[40, 46], [70, 79]]) Dimensions without coordinates: b, c >>> xr.dot(da_a, da_b, da_c, dim=["b", "c"]) Size: 72B array([[ 9, 14, 19], [ 93, 150, 207], [273, 446, 619]]) Dimensions without coordinates: a, d >>> xr.dot(da_a, da_b) Size: 16B array([110, 125]) Dimensions without coordinates: c >>> xr.dot(da_a, da_b, dim=...) Size: 8B array(235) """ from xarray.core.dataarray import DataArray if any(not isinstance(arr, Variable | DataArray) for arr in arrays): raise TypeError( "Only xr.DataArray and xr.Variable are supported." f"Given {[type(arr) for arr in arrays]}." ) if len(arrays) == 0: raise TypeError("At least one array should be given.") common_dims: set[Hashable] = set.intersection(*(set(arr.dims) for arr in arrays)) all_dims = [] for arr in arrays: all_dims += [d for d in arr.dims if d not in all_dims] einsum_axes = "abcdefghijklmnopqrstuvwxyz" dim_map = {d: einsum_axes[i] for i, d in enumerate(all_dims)} dot_dims: set[Hashable] if dim is None: # find dimensions that occur more than once dim_counts: Counter = Counter() for arr in arrays: dim_counts.update(arr.dims) dot_dims = {d for d, c in dim_counts.items() if c > 1} else: dot_dims = parse_dims_as_set(dim, all_dims=set(all_dims)) # dimensions to be parallelized broadcast_dims = common_dims - dot_dims input_core_dims = [ [d for d in arr.dims if d not in broadcast_dims] for arr in arrays ] output_core_dims = [ [d for d in all_dims if d not in dot_dims and d not in broadcast_dims] ] # construct einsum subscripts, such as '...abc,...ab->...c' # Note: input_core_dims are always moved to the last position subscripts_list = [ "..." + "".join(dim_map[d] for d in ds) for ds in input_core_dims ] subscripts = ",".join(subscripts_list) subscripts += "->..." + "".join(dim_map[d] for d in output_core_dims[0]) join = OPTIONS["arithmetic_join"] # using "inner" emulates `(a * b).sum()` for all joins (except "exact") if join != "exact": join = "inner" # subscripts should be passed to np.einsum as arg, not as kwargs. We need # to construct a partial function for apply_ufunc to work. func = functools.partial(duck_array_ops.einsum, subscripts, **kwargs) from xarray.computation.apply_ufunc import apply_ufunc result = apply_ufunc( func, *arrays, input_core_dims=input_core_dims, output_core_dims=output_core_dims, join=join, dask="allowed", ) return result.transpose(*all_dims, missing_dims="ignore") def where(cond, x, y, keep_attrs=None): """Return elements from `x` or `y` depending on `cond`. Performs xarray-like broadcasting across input arguments. All dimension coordinates on `x` and `y` must be aligned with each other and with `cond`. Parameters ---------- cond : scalar, array, Variable, DataArray or Dataset When True, return values from `x`, otherwise returns values from `y`. x : scalar, array, Variable, DataArray or Dataset values to choose from where `cond` is True y : scalar, array, Variable, DataArray or Dataset values to choose from where `cond` is False keep_attrs : bool or str or callable, optional How to treat attrs. If True, keep the attrs of `x`. Returns ------- Dataset, DataArray, Variable or array In priority order: Dataset, DataArray, Variable or array, whichever type appears as an input argument. Examples -------- >>> x = xr.DataArray( ... 0.1 * np.arange(10), ... dims=["lat"], ... coords={"lat": np.arange(10)}, ... name="sst", ... ) >>> x Size: 80B array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]) Coordinates: * lat (lat) int64 80B 0 1 2 3 4 5 6 7 8 9 >>> xr.where(x < 0.5, x, x * 100) Size: 80B array([ 0. , 0.1, 0.2, 0.3, 0.4, 50. , 60. , 70. , 80. , 90. ]) Coordinates: * lat (lat) int64 80B 0 1 2 3 4 5 6 7 8 9 >>> y = xr.DataArray( ... 0.1 * np.arange(9).reshape(3, 3), ... dims=["lat", "lon"], ... coords={"lat": np.arange(3), "lon": 10 + np.arange(3)}, ... name="sst", ... ) >>> y Size: 72B array([[0. , 0.1, 0.2], [0.3, 0.4, 0.5], [0.6, 0.7, 0.8]]) Coordinates: * lat (lat) int64 24B 0 1 2 * lon (lon) int64 24B 10 11 12 >>> xr.where(y.lat < 1, y, -1) Size: 72B array([[ 0. , 0.1, 0.2], [-1. , -1. , -1. ], [-1. , -1. , -1. ]]) Coordinates: * lat (lat) int64 24B 0 1 2 * lon (lon) int64 24B 10 11 12 >>> cond = xr.DataArray([True, False], dims=["x"]) >>> x = xr.DataArray([1, 2], dims=["y"]) >>> xr.where(cond, x, 0) Size: 32B array([[1, 2], [0, 0]]) Dimensions without coordinates: x, y See Also -------- numpy.where : corresponding numpy function Dataset.where, DataArray.where : equivalent methods """ from xarray.core.dataset import Dataset if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) # alignment for three arguments is complicated, so don't support it yet from xarray.computation.apply_ufunc import apply_ufunc result = apply_ufunc( duck_array_ops.where, cond, x, y, join="exact", dataset_join="exact", dask="allowed", keep_attrs=keep_attrs, ) # keep the attributes of x, the second parameter, by default to # be consistent with the `where` method of `DataArray` and `Dataset` # rebuild the attrs from x at each level of the output, which could be # Dataset, DataArray, or Variable, and also handle coords if keep_attrs is True and hasattr(result, "attrs"): if isinstance(y, Dataset) and not isinstance(x, Dataset): # handle special case where x gets promoted to Dataset result.attrs = {} if getattr(x, "name", None) in result.data_vars: result[x.name].attrs = getattr(x, "attrs", {}) else: # otherwise, fill in global attrs and variable attrs (if they exist) result.attrs = getattr(x, "attrs", {}) for v in getattr(result, "data_vars", []): result[v].attrs = getattr(getattr(x, v, None), "attrs", {}) for c in getattr(result, "coords", []): # always fill coord attrs of x result[c].attrs = getattr(getattr(x, c, None), "attrs", {}) return result @overload def polyval( coord: DataArray, coeffs: DataArray, degree_dim: Hashable = "degree" ) -> DataArray: ... @overload def polyval( coord: DataArray, coeffs: Dataset, degree_dim: Hashable = "degree" ) -> Dataset: ... @overload def polyval( coord: Dataset, coeffs: DataArray, degree_dim: Hashable = "degree" ) -> Dataset: ... @overload def polyval( coord: Dataset, coeffs: Dataset, degree_dim: Hashable = "degree" ) -> Dataset: ... @overload def polyval( coord: Dataset | DataArray, coeffs: Dataset | DataArray, degree_dim: Hashable = "degree", ) -> Dataset | DataArray: ... def polyval( coord: Dataset | DataArray, coeffs: Dataset | DataArray, degree_dim: Hashable = "degree", ) -> Dataset | DataArray: """Evaluate a polynomial at specific values Parameters ---------- coord : DataArray or Dataset Values at which to evaluate the polynomial. coeffs : DataArray or Dataset Coefficients of the polynomial. degree_dim : Hashable, default: "degree" Name of the polynomial degree dimension in `coeffs`. Returns ------- DataArray or Dataset Evaluated polynomial. See Also -------- xarray.DataArray.polyfit numpy.polynomial.polynomial.polyval """ if degree_dim not in coeffs._indexes: raise ValueError( f"Dimension `{degree_dim}` should be a coordinate variable with labels." ) if not np.issubdtype(coeffs[degree_dim].dtype, np.integer): raise ValueError( f"Dimension `{degree_dim}` should be of integer dtype. Received {coeffs[degree_dim].dtype} instead." ) max_deg = coeffs[degree_dim].max().item() coeffs = coeffs.reindex( {degree_dim: np.arange(max_deg + 1)}, fill_value=0, copy=False ) coord = _ensure_numeric(coord) # using Horner's method # https://en.wikipedia.org/wiki/Horner%27s_method res = zeros_like(coord) + coeffs.isel({degree_dim: max_deg}, drop=True) for deg in range(max_deg - 1, -1, -1): res *= coord res += coeffs.isel({degree_dim: deg}, drop=True) return res def _ensure_numeric(data: Dataset | DataArray) -> Dataset | DataArray: """Converts all datetime64 variables to float64 Parameters ---------- data : DataArray or Dataset Variables with possible datetime dtypes. Returns ------- DataArray or Dataset Variables with datetime64 dtypes converted to float64. """ from xarray.core.dataset import Dataset def _cfoffset(x: DataArray) -> Any: scalar = x.compute().data[0] if not is_scalar(scalar): # we do not get a scalar back on dask == 2021.04.1 scalar = scalar.item() return type(scalar)(1970, 1, 1) def to_floatable(x: DataArray) -> DataArray: if x.dtype.kind in "MO": # datetimes (CFIndexes are object type) offset = ( np.datetime64("1970-01-01") if x.dtype.kind == "M" else _cfoffset(x) ) return x.copy( data=datetime_to_numeric(x.data, offset=offset, datetime_unit="ns"), ) elif x.dtype.kind == "m": # timedeltas return duck_array_ops.astype(x, dtype=float) return x if isinstance(data, Dataset): return data.map(to_floatable) else: return to_floatable(data) def _calc_idxminmax( *, array, func: Callable, dim: Hashable | None = None, skipna: bool | None = None, fill_value: Any = dtypes.NA, keep_attrs: bool | None = None, ): """Apply common operations for idxmin and idxmax.""" # This function doesn't make sense for scalars so don't try if not array.ndim: raise ValueError("This function does not apply for scalars") if dim is not None: pass # Use the dim if available elif array.ndim == 1: # it is okay to guess the dim if there is only 1 dim = array.dims[0] else: # The dim is not specified and ambiguous. Don't guess. raise ValueError("Must supply 'dim' argument for multidimensional arrays") if dim not in array.dims: raise KeyError( f"Dimension {dim!r} not found in array dimensions {array.dims!r}" ) if dim not in array.coords: raise KeyError( f"Dimension {dim!r} is not one of the coordinates {tuple(array.coords.keys())}" ) # These are dtypes with NaN values argmin and argmax can handle na_dtypes = "cfO" if skipna or (skipna is None and array.dtype.kind in na_dtypes): # Need to skip NaN values since argmin and argmax can't handle them allna = array.isnull().all(dim) array = array.where(~allna, 0) # This will run argmin or argmax. indx = func(array, dim=dim, axis=None, keep_attrs=keep_attrs, skipna=skipna) # Handle chunked arrays (e.g. dask). coord = array[dim]._variable.to_base_variable() if is_chunked_array(array.data): chunkmanager = get_chunked_array_type(array.data) coord_array = chunkmanager.from_array( array[dim].data, chunks=((array.sizes[dim],),) ) coord = coord.copy(data=coord_array) else: coord = coord.copy(data=to_like_array(array[dim].data, array.data)) res = indx._replace(coord[(indx.variable,)]).rename(dim) if skipna or (skipna is None and array.dtype.kind in na_dtypes): # Put the NaN values back in after removing them res = res.where(~allna, fill_value) # Copy attributes from argmin/argmax, if any res.attrs = indx.attrs return res ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/computation/fit.py����������������������������������������������������������0000664�0000000�0000000�00000046672�15056206164�0020367�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Fitting operations for DataArrays and Datasets.""" from __future__ import annotations import inspect import warnings from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence from inspect import Parameter from types import MappingProxyType from typing import ( Any, Literal, Union, ) import numpy as np # remove once numpy 2.0 is the oldest supported version try: from numpy.exceptions import RankWarning except ImportError: from numpy import RankWarning # type: ignore[no-redef,attr-defined,unused-ignore] from xarray.computation.apply_ufunc import apply_ufunc from xarray.computation.computation import _ensure_numeric, where from xarray.core.dataarray import DataArray from xarray.core.duck_array_ops import is_duck_dask_array, least_squares from xarray.core.types import Dims, ErrorOptions from xarray.core.variable import Variable from xarray.structure.alignment import broadcast def _get_func_args(func, param_names): """Use `inspect.signature` to try accessing `func` args. Otherwise, ensure they are provided by user. """ func_args: Union[dict[str, Parameter], MappingProxyType[str, Parameter]] try: func_args = inspect.signature(func).parameters except ValueError as err: func_args = {} # type: ignore[assignment,unused-ignore] if not param_names: raise ValueError( "Unable to inspect `func` signature, and `param_names` was not provided." ) from err if param_names: params = param_names else: params = list(func_args)[1:] if any( (p.kind in [p.VAR_POSITIONAL, p.VAR_KEYWORD]) for p in func_args.values() ): raise ValueError( "`param_names` must be provided because `func` takes variable length arguments." ) return params, func_args def _initialize_curvefit_params(params, p0, bounds, func_args): """Set initial guess and bounds for curvefit. Priority: 1) passed args 2) func signature 3) scipy defaults """ def _initialize_feasible(lb, ub): # Mimics functionality of scipy.optimize.minpack._initialize_feasible lb_finite = np.isfinite(lb) ub_finite = np.isfinite(ub) p0 = where( lb_finite, where( ub_finite, 0.5 * (lb + ub), # both bounds finite lb + 1, # lower bound finite, upper infinite ), where( ub_finite, ub - 1, # lower bound infinite, upper finite 0, # both bounds infinite ), ) return p0 param_defaults = dict.fromkeys(params, 1) bounds_defaults = dict.fromkeys(params, (-np.inf, np.inf)) for p in params: if p in func_args and func_args[p].default is not func_args[p].empty: param_defaults[p] = func_args[p].default if p in bounds: lb, ub = bounds[p] bounds_defaults[p] = (lb, ub) param_defaults[p] = where( (param_defaults[p] < lb) | (param_defaults[p] > ub), _initialize_feasible(lb, ub), param_defaults[p], ) if p in p0: param_defaults[p] = p0[p] return param_defaults, bounds_defaults def polyfit( obj, dim: Hashable, deg: int, skipna: bool | None = None, rcond: np.floating[Any] | float | None = None, w: Hashable | Any = None, full: bool = False, cov: bool | Literal["unscaled"] = False, ): """ Least squares polynomial fit. This replicates the behaviour of `numpy.polyfit` but differs by skipping invalid values when `skipna = True`. Parameters ---------- obj : Dataset or DataArray Object to perform the polyfit on dim : hashable Coordinate along which to fit the polynomials. deg : int Degree of the fitting polynomial. skipna : bool or None, optional If True, removes all invalid values before fitting each 1D slices of the array. Default is True if data is stored in a dask.array or if there is any invalid values, False otherwise. rcond : float or None, optional Relative condition number to the fit. w : hashable or Any, optional Weights to apply to the y-coordinate of the sample points. Can be an array-like object or the name of a coordinate in the dataset. full : bool, default: False Whether to return the residuals, matrix rank and singular values in addition to the coefficients. cov : bool or "unscaled", default: False Whether to return to the covariance matrix in addition to the coefficients. The matrix is not scaled if `cov='unscaled'`. Returns ------- Dataset A single dataset which contains (for each "var" in the input dataset): [var]_polyfit_coefficients The coefficients of the best fit for each variable in this dataset. [var]_polyfit_residuals The residuals of the least-square computation for each variable (only included if `full=True`) When the matrix rank is deficient, np.nan is returned. [dim]_matrix_rank The effective rank of the scaled Vandermonde coefficient matrix (only included if `full=True`) The rank is computed ignoring the NaN values that might be skipped. [dim]_singular_values The singular values of the scaled Vandermonde coefficient matrix (only included if `full=True`) [var]_polyfit_covariance The covariance matrix of the polynomial coefficient estimates (only included if `full=False` and `cov=True`) Warns ----- RankWarning The rank of the coefficient matrix in the least-squares fit is deficient. The warning is not raised with in-memory (not dask) data and `full=True`. See Also -------- numpy.polyfit numpy.polyval xarray.polyval """ variables: dict[Hashable, Variable] = {} skipna_da = skipna x = np.asarray(_ensure_numeric(obj.coords[dim]).astype(np.float64)) xname = f"{obj[dim].name}_" order = int(deg) + 1 degree_coord_values = np.arange(order)[::-1] lhs = np.vander(x, order) if rcond is None: rcond = x.shape[0] * np.finfo(x.dtype).eps # Weights: if w is not None: if isinstance(w, Hashable): w = obj.coords[w] w = np.asarray(w) if w.ndim != 1: raise TypeError("Expected a 1-d array for weights.") if w.shape[0] != lhs.shape[0]: raise TypeError(f"Expected w and {dim} to have the same length") lhs *= w[:, np.newaxis] # Scaling scale = np.sqrt((lhs * lhs).sum(axis=0)) lhs /= scale from xarray.core import utils degree_dim = utils.get_temp_dimname(obj.dims, "degree") rank = np.linalg.matrix_rank(lhs) if full: rank = Variable(dims=(), data=rank) variables[xname + "matrix_rank"] = rank _sing = np.linalg.svd(lhs, compute_uv=False) variables[xname + "singular_values"] = Variable( dims=(degree_dim,), data=np.concatenate([np.full((order - rank.data,), np.nan), _sing]), ) # If we have a coordinate get its underlying dimension. (true_dim,) = obj.coords[dim].dims other_coords = { dim: obj._variables[dim] for dim in set(obj.dims) - {true_dim} if dim in obj._variables } present_dims: set[Hashable] = set() for name, var in obj._variables.items(): if name in obj._coord_names or name in obj.dims: continue if true_dim not in var.dims: continue if is_duck_dask_array(var._data) and (rank != order or full or skipna is None): # Current algorithm with dask and skipna=False neither supports # deficient ranks nor does it output the "full" info (issue dask/dask#6516) skipna_da = True elif skipna is None: skipna_da = bool(np.any(var.isnull())) if var.ndim > 1: rhs = var.transpose(true_dim, ...) other_dims = rhs.dims[1:] scale_da = scale.reshape(-1, *((1,) * len(other_dims))) else: rhs = var scale_da = scale other_dims = () present_dims.update(other_dims) if w is not None: rhs = rhs * w.reshape(-1, *((1,) * len(other_dims))) with warnings.catch_warnings(): if full: # Copy np.polyfit behavior warnings.simplefilter("ignore", RankWarning) else: # Raise only once per variable warnings.simplefilter("once", RankWarning) coeffs, residuals = least_squares( lhs, rhs.data, rcond=rcond, skipna=skipna_da ) from xarray.core.dataarray import _THIS_ARRAY if name is _THIS_ARRAY: # When polyfit is called on a DataArray, ensure the resulting # dataset is backwards compatible with previous behavior name = "" elif isinstance(name, str): name = f"{name}_" else: # For other non-string names name = "" variables[name + "polyfit_coefficients"] = Variable( data=coeffs / scale_da, dims=(degree_dim,) + other_dims ) if full or (cov is True): variables[name + "polyfit_residuals"] = Variable( data=residuals if var.ndim > 1 else residuals.squeeze(), dims=other_dims, ) fac: Variable | int if cov: Vbase = np.linalg.inv(np.dot(lhs.T, lhs)) Vbase /= np.outer(scale, scale) if cov == "unscaled": fac = 1 else: if x.shape[0] <= order: raise ValueError( "The number of data points must exceed order to scale the covariance matrix." ) fac = variables[name + "polyfit_residuals"] / (x.shape[0] - order) variables[name + "polyfit_covariance"] = ( Variable(data=Vbase, dims=("cov_i", "cov_j")) * fac ) return type(obj)( data_vars=variables, coords={ degree_dim: degree_coord_values, **{ name: coord for name, coord in other_coords.items() if name in present_dims }, }, attrs=obj.attrs.copy(), ) def curvefit( obj, coords: str | DataArray | Iterable[str | DataArray], func: Callable[..., Any], reduce_dims: Dims = None, skipna: bool = True, p0: Mapping[str, float | DataArray] | None = None, bounds: Mapping[str, tuple[float | DataArray, float | DataArray]] | None = None, param_names: Sequence[str] | None = None, errors: ErrorOptions = "raise", kwargs: dict[str, Any] | None = None, ): """ Curve fitting optimization for arbitrary functions. Wraps `scipy.optimize.curve_fit` with `apply_ufunc`. Parameters ---------- obj : Dataset or DataArray Object to perform the curvefit on coords : hashable, DataArray, or sequence of hashable or DataArray Independent coordinate(s) over which to perform the curve fitting. Must share at least one dimension with the calling object. When fitting multi-dimensional functions, supply `coords` as a sequence in the same order as arguments in `func`. To fit along existing dimensions of the calling object, `coords` can also be specified as a str or sequence of strs. func : callable User specified function in the form `f(x, *params)` which returns a numpy array of length `len(x)`. `params` are the fittable parameters which are optimized by scipy curve_fit. `x` can also be specified as a sequence containing multiple coordinates, e.g. `f((x0, x1), *params)`. reduce_dims : str, Iterable of Hashable or None, optional Additional dimension(s) over which to aggregate while fitting. For example, calling `ds.curvefit(coords='time', reduce_dims=['lat', 'lon'], ...)` will aggregate all lat and lon points and fit the specified function along the time dimension. skipna : bool, default: True Whether to skip missing values when fitting. Default is True. p0 : dict-like, optional Optional dictionary of parameter names to initial guesses passed to the `curve_fit` `p0` arg. If the values are DataArrays, they will be appropriately broadcast to the coordinates of the array. If none or only some parameters are passed, the rest will be assigned initial values following the default scipy behavior. bounds : dict-like, optional Optional dictionary of parameter names to tuples of bounding values passed to the `curve_fit` `bounds` arg. If any of the bounds are DataArrays, they will be appropriately broadcast to the coordinates of the array. If none or only some parameters are passed, the rest will be unbounded following the default scipy behavior. param_names : sequence of hashable, optional Sequence of names for the fittable parameters of `func`. If not supplied, this will be automatically determined by arguments of `func`. `param_names` should be manually supplied when fitting a function that takes a variable number of parameters. errors : {"raise", "ignore"}, default: "raise" If 'raise', any errors from the `scipy.optimize_curve_fit` optimization will raise an exception. If 'ignore', the coefficients and covariances for the coordinates where the fitting failed will be NaN. kwargs : optional Additional keyword arguments to passed to scipy curve_fit. Returns ------- Dataset A single dataset which contains: [var]_curvefit_coefficients The coefficients of the best fit. [var]_curvefit_covariance The covariance matrix of the coefficient estimates. See Also -------- Dataset.polyfit scipy.optimize.curve_fit """ from scipy.optimize import curve_fit if p0 is None: p0 = {} if bounds is None: bounds = {} if kwargs is None: kwargs = {} reduce_dims_: list[Hashable] if not reduce_dims: reduce_dims_ = [] elif isinstance(reduce_dims, str) or not isinstance(reduce_dims, Iterable): reduce_dims_ = [reduce_dims] else: reduce_dims_ = list(reduce_dims) if isinstance(coords, str | DataArray) or not isinstance(coords, Iterable): coords = [coords] coords_: Sequence[DataArray] = [ obj[coord] if isinstance(coord, str) else coord for coord in coords ] # Determine whether any coords are dims on self for coord in coords_: reduce_dims_ += [c for c in obj.dims if coord.equals(obj[c])] reduce_dims_ = list(set(reduce_dims_)) preserved_dims = list(set(obj.dims) - set(reduce_dims_)) if not reduce_dims_: raise ValueError( "No arguments to `coords` were identified as a dimension on the calling " "object, and no dims were supplied to `reduce_dims`. This would result " "in fitting on scalar data." ) # Check that initial guess and bounds only contain coordinates that are in preserved_dims for param, guess in p0.items(): if isinstance(guess, DataArray): unexpected = set(guess.dims) - set(preserved_dims) if unexpected: raise ValueError( f"Initial guess for '{param}' has unexpected dimensions " f"{tuple(unexpected)}. It should only have dimensions that are in data " f"dimensions {preserved_dims}." ) for param, (lb, ub) in bounds.items(): for label, bound in zip(("Lower", "Upper"), (lb, ub), strict=True): if isinstance(bound, DataArray): unexpected = set(bound.dims) - set(preserved_dims) if unexpected: raise ValueError( f"{label} bound for '{param}' has unexpected dimensions " f"{tuple(unexpected)}. It should only have dimensions that are in data " f"dimensions {preserved_dims}." ) if errors not in ["raise", "ignore"]: raise ValueError('errors must be either "raise" or "ignore"') # Broadcast all coords with each other coords_ = broadcast(*coords_) coords_ = [coord.broadcast_like(obj, exclude=preserved_dims) for coord in coords_] n_coords = len(coords_) params, func_args = _get_func_args(func, param_names) param_defaults, bounds_defaults = _initialize_curvefit_params( params, p0, bounds, func_args ) n_params = len(params) def _wrapper(Y, *args, **kwargs): # Wrap curve_fit with raveled coordinates and pointwise NaN handling # *args contains: # - the coordinates # - initial guess # - lower bounds # - upper bounds coords__ = args[:n_coords] p0_ = args[n_coords + 0 * n_params : n_coords + 1 * n_params] lb = args[n_coords + 1 * n_params : n_coords + 2 * n_params] ub = args[n_coords + 2 * n_params :] x = np.vstack([c.ravel() for c in coords__]) y = Y.ravel() if skipna: mask = np.all([np.any(~np.isnan(x), axis=0), ~np.isnan(y)], axis=0) x = x[:, mask] y = y[mask] if y.size == 0: popt = np.full([n_params], np.nan) pcov = np.full([n_params, n_params], np.nan) return popt, pcov x = np.squeeze(x) try: popt, pcov = curve_fit(func, x, y, p0=p0_, bounds=(lb, ub), **kwargs) except RuntimeError: if errors == "raise": raise popt = np.full([n_params], np.nan) pcov = np.full([n_params, n_params], np.nan) return popt, pcov from xarray.core.dataarray import _THIS_ARRAY result = type(obj)() for name, da in obj.data_vars.items(): if name is _THIS_ARRAY: # When curvefit is called on a DataArray, ensure the resulting # dataset is backwards compatible with previous behavior var_name = "" else: var_name = f"{name}_" input_core_dims = [reduce_dims_ for _ in range(n_coords + 1)] input_core_dims.extend( [[] for _ in range(3 * n_params)] ) # core_dims for p0 and bounds popt, pcov = apply_ufunc( _wrapper, da, *coords_, *param_defaults.values(), *[b[0] for b in bounds_defaults.values()], *[b[1] for b in bounds_defaults.values()], vectorize=True, dask="parallelized", input_core_dims=input_core_dims, output_core_dims=[["param"], ["cov_i", "cov_j"]], dask_gufunc_kwargs={ "output_sizes": { "param": n_params, "cov_i": n_params, "cov_j": n_params, }, }, output_dtypes=(np.float64, np.float64), exclude_dims=set(reduce_dims_), kwargs=kwargs, ) result[var_name + "curvefit_coefficients"] = popt result[var_name + "curvefit_covariance"] = pcov result = result.assign_coords({"param": params, "cov_i": params, "cov_j": params}) result.attrs = obj.attrs.copy() return result ����������������������������������������������������������������������xarray-2025.09.0/xarray/computation/nanops.py�������������������������������������������������������0000664�0000000�0000000�00000012777�15056206164�0021102�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import warnings import numpy as np from xarray.core import dtypes, duck_array_ops, nputils, utils from xarray.core.duck_array_ops import ( astype, count, fillna, isnull, sum_where, where, where_method, ) def _maybe_null_out(result, axis, mask, min_count=1): """ xarray version of pandas.core.nanops._maybe_null_out """ if axis is not None and getattr(result, "ndim", False): null_mask = ( np.take(mask.shape, axis).prod() - duck_array_ops.sum(mask, axis) - min_count ) < 0 dtype, fill_value = dtypes.maybe_promote(result.dtype) result = where(null_mask, fill_value, astype(result, dtype)) elif getattr(result, "dtype", None) not in dtypes.NAT_TYPES: null_mask = mask.size - duck_array_ops.sum(mask) result = where(null_mask < min_count, np.nan, result) return result def _nan_argminmax_object(func, fill_value, value, axis=None, **kwargs): """In house nanargmin, nanargmax for object arrays. Always return integer type """ valid_count = count(value, axis=axis) value = fillna(value, fill_value) data = getattr(np, func)(value, axis=axis, **kwargs) # TODO This will evaluate dask arrays and might be costly. if duck_array_ops.array_any(valid_count == 0): raise ValueError("All-NaN slice encountered") return data def _nan_minmax_object(func, fill_value, value, axis=None, **kwargs): """In house nanmin and nanmax for object array""" valid_count = count(value, axis=axis) filled_value = fillna(value, fill_value) data = getattr(np, func)(filled_value, axis=axis, **kwargs) if not hasattr(data, "dtype"): # scalar case data = fill_value if valid_count == 0 else data # we've computed a single min, max value of type object. # don't let np.array turn a tuple back into an array return utils.to_0d_object_array(data) return where_method(data, valid_count != 0) def nanmin(a, axis=None, out=None): if a.dtype.kind == "O": return _nan_minmax_object("min", dtypes.get_pos_infinity(a.dtype), a, axis) return nputils.nanmin(a, axis=axis) def nanmax(a, axis=None, out=None): if a.dtype.kind == "O": return _nan_minmax_object("max", dtypes.get_neg_infinity(a.dtype), a, axis) return nputils.nanmax(a, axis=axis) def nanargmin(a, axis=None): if a.dtype.kind == "O": fill_value = dtypes.get_pos_infinity(a.dtype) return _nan_argminmax_object("argmin", fill_value, a, axis=axis) return nputils.nanargmin(a, axis=axis) def nanargmax(a, axis=None): if a.dtype.kind == "O": fill_value = dtypes.get_neg_infinity(a.dtype) return _nan_argminmax_object("argmax", fill_value, a, axis=axis) return nputils.nanargmax(a, axis=axis) def nansum(a, axis=None, dtype=None, out=None, min_count=None): mask = isnull(a) result = sum_where(a, axis=axis, dtype=dtype, where=mask) if min_count is not None: return _maybe_null_out(result, axis, mask, min_count) else: return result def _nanmean_ddof_object(ddof, value, axis=None, dtype=None, **kwargs): """In house nanmean. ddof argument will be used in _nanvar method""" valid_count = count(value, axis=axis) value = fillna(value, 0) # As dtype inference is impossible for object dtype, we assume float # https://github.com/dask/dask/issues/3162 if dtype is None and value.dtype.kind == "O": dtype = float data = np.sum(value, axis=axis, dtype=dtype, **kwargs) data = data / (valid_count - ddof) return where_method(data, valid_count != 0) def nanmean(a, axis=None, dtype=None, out=None): if a.dtype.kind == "O": return _nanmean_ddof_object(0, a, axis=axis, dtype=dtype) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", r"Mean of empty slice", category=RuntimeWarning ) return nputils.nanmean(a, axis=axis, dtype=dtype) def nanmedian(a, axis=None, out=None): # The dask algorithm works by rechunking to one chunk along axis # Make sure we trigger the dask error when passing all dimensions # so that we don't rechunk the entire array to one chunk and # possibly blow memory if axis is not None and len(np.atleast_1d(axis)) == a.ndim: axis = None return nputils.nanmedian(a, axis=axis) def _nanvar_object(value, axis=None, ddof=0, keepdims=False, **kwargs): value_mean = _nanmean_ddof_object( ddof=0, value=value, axis=axis, keepdims=True, **kwargs ) squared = (astype(value, value_mean.dtype) - value_mean) ** 2 return _nanmean_ddof_object(ddof, squared, axis=axis, keepdims=keepdims, **kwargs) def nanvar(a, axis=None, dtype=None, out=None, ddof=0): if a.dtype.kind == "O": return _nanvar_object(a, axis=axis, dtype=dtype, ddof=ddof) return nputils.nanvar(a, axis=axis, dtype=dtype, ddof=ddof) def nanstd(a, axis=None, dtype=None, out=None, ddof=0): return nputils.nanstd(a, axis=axis, dtype=dtype, ddof=ddof) def nanprod(a, axis=None, dtype=None, out=None, min_count=None): mask = isnull(a) result = nputils.nanprod(a, axis=axis, dtype=dtype) if min_count is not None: return _maybe_null_out(result, axis, mask, min_count) else: return result def nancumsum(a, axis=None, dtype=None, out=None): return nputils.nancumsum(a, axis=axis, dtype=dtype) def nancumprod(a, axis=None, dtype=None, out=None): return nputils.nancumprod(a, axis=axis, dtype=dtype) �xarray-2025.09.0/xarray/computation/ops.py����������������������������������������������������������0000664�0000000�0000000�00000022214�15056206164�0020370�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Define core operations for xarray objects. TODO(shoyer): rewrite this module, making use of xarray.computation.computation, NumPy's __array_ufunc__ and mixin classes instead of the unintuitive "inject" functions. """ from __future__ import annotations import operator from typing import TYPE_CHECKING, Literal import numpy as np from xarray.core import dtypes, duck_array_ops if TYPE_CHECKING: pass try: import bottleneck as bn has_bottleneck = True except ImportError: # use numpy methods instead bn = np has_bottleneck = False NUM_BINARY_OPS = [ "add", "sub", "mul", "truediv", "floordiv", "mod", "pow", "and", "xor", "or", "lshift", "rshift", ] # methods which pass on the numpy return value unchanged # be careful not to list methods that we would want to wrap later NUMPY_SAME_METHODS = ["item", "searchsorted"] # methods which remove an axis REDUCE_METHODS = ["all", "any"] NAN_REDUCE_METHODS = [ "max", "min", "mean", "prod", "sum", "std", "var", "median", ] # TODO: wrap take, dot, sort _CUM_DOCSTRING_TEMPLATE = """\ Apply `{name}` along some dimension of {cls}. Parameters ---------- {extra_args} skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `{name}`. Returns ------- cumvalue : {cls} New {cls} object with `{name}` applied to its data along the indicated dimension. """ _REDUCE_DOCSTRING_TEMPLATE = """\ Reduce this {cls}'s data by applying `{name}` along some dimension(s). Parameters ---------- {extra_args}{skip_na_docs}{min_count_docs} keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to the appropriate array function for calculating `{name}` on this object's data. Returns ------- reduced : {cls} New {cls} object with `{name}` applied to its data and the indicated dimension(s) removed. """ _SKIPNA_DOCSTRING = """ skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64).""" _MINCOUNT_DOCSTRING = """ min_count : int, default: None The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. New in version 0.10.8: Added with the default being None. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array.""" def fillna(data, other, join="left", dataset_join="left"): """Fill missing values in this object with data from the other object. Follows normal broadcasting and alignment rules. Parameters ---------- join : {"outer", "inner", "left", "right"}, optional Method for joining the indexes of the passed objects along each dimension - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": raise `ValueError` instead of aligning when indexes to be aligned are not equal dataset_join : {"outer", "inner", "left", "right"}, optional Method for joining variables of Dataset objects with mismatched data variables. - "outer": take variables from both Dataset objects - "inner": take only overlapped variables - "left": take only variables from the first object - "right": take only variables from the last object """ from xarray.computation.apply_ufunc import apply_ufunc return apply_ufunc( duck_array_ops.fillna, data, other, join=join, dask="allowed", dataset_join=dataset_join, dataset_fill_value=np.nan, keep_attrs=True, ) # TODO: type this properly def where_method(self, cond, other=dtypes.NA): # type: ignore[unused-ignore,has-type] """Return elements from `self` or `other` depending on `cond`. Parameters ---------- cond : DataArray or Dataset with boolean dtype Locations at which to preserve this objects values. other : scalar, DataArray or Dataset, optional Value to use for locations in this object where ``cond`` is False. By default, inserts missing values. Returns ------- Same type as caller. """ from xarray.computation.apply_ufunc import apply_ufunc # alignment for three arguments is complicated, so don't support it yet join: Literal["inner", "exact"] = "inner" if other is dtypes.NA else "exact" return apply_ufunc( duck_array_ops.where_method, self, cond, other, join=join, dataset_join=join, dask="allowed", keep_attrs=True, ) def _call_possibly_missing_method(arg, name, args, kwargs): try: method = getattr(arg, name) except AttributeError: duck_array_ops.fail_on_dask_array_input(arg, func_name=name) if hasattr(arg, "data"): duck_array_ops.fail_on_dask_array_input(arg.data, func_name=name) raise else: return method(*args, **kwargs) def _values_method_wrapper(name): def func(self, *args, **kwargs): return _call_possibly_missing_method(self.data, name, args, kwargs) func.__name__ = name func.__doc__ = getattr(np.ndarray, name).__doc__ return func def _method_wrapper(name): def func(self, *args, **kwargs): return _call_possibly_missing_method(self, name, args, kwargs) func.__name__ = name func.__doc__ = getattr(np.ndarray, name).__doc__ return func def _func_slash_method_wrapper(f, name=None): # try to wrap a method, but if not found use the function # this is useful when patching in a function as both a DataArray and # Dataset method if name is None: name = f.__name__ def func(self, *args, **kwargs): try: return getattr(self, name)(*args, **kwargs) except AttributeError: return f(self, *args, **kwargs) func.__name__ = name func.__doc__ = f.__doc__ return func def inject_reduce_methods(cls): methods = ( [ (name, getattr(duck_array_ops, f"array_{name}"), False) for name in REDUCE_METHODS ] + [(name, getattr(duck_array_ops, name), True) for name in NAN_REDUCE_METHODS] + [("count", duck_array_ops.count, False)] ) for name, f, include_skipna in methods: numeric_only = getattr(f, "numeric_only", False) available_min_count = getattr(f, "available_min_count", False) skip_na_docs = _SKIPNA_DOCSTRING if include_skipna else "" min_count_docs = _MINCOUNT_DOCSTRING if available_min_count else "" func = cls._reduce_method(f, include_skipna, numeric_only) func.__name__ = name func.__doc__ = _REDUCE_DOCSTRING_TEMPLATE.format( name=name, cls=cls.__name__, extra_args=cls._reduce_extra_args_docstring.format(name=name), skip_na_docs=skip_na_docs, min_count_docs=min_count_docs, ) setattr(cls, name, func) def op_str(name): return f"__{name}__" def get_op(name): return getattr(operator, op_str(name)) NON_INPLACE_OP = {get_op("i" + name): get_op(name) for name in NUM_BINARY_OPS} def inplace_to_noninplace_op(f): return NON_INPLACE_OP[f] # _typed_ops.py uses the following wrapped functions as a kind of unary operator argsort = _method_wrapper("argsort") conj = _method_wrapper("conj") conjugate = _method_wrapper("conj") round_ = _func_slash_method_wrapper(duck_array_ops.around, name="round") def inject_numpy_same(cls): # these methods don't return arrays of the same shape as the input, so # don't try to patch these in for Dataset objects for name in NUMPY_SAME_METHODS: setattr(cls, name, _values_method_wrapper(name)) class IncludeReduceMethods: __slots__ = () def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) if getattr(cls, "_reduce_method", None): inject_reduce_methods(cls) class IncludeNumpySameMethods: __slots__ = () def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) inject_numpy_same(cls) # some methods not applicable to Dataset objects ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/computation/rolling.py������������������������������������������������������0000664�0000000�0000000�00000141613�15056206164�0021242�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import functools import itertools import math import warnings from collections.abc import Callable, Hashable, Iterator, Mapping from typing import TYPE_CHECKING, Any, Generic, TypeVar import numpy as np from xarray.compat import dask_array_ops from xarray.computation.arithmetic import CoarsenArithmetic from xarray.core import dtypes, duck_array_ops, utils from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.core.types import CoarsenBoundaryOptions, SideOptions, T_Xarray from xarray.core.utils import ( either_dict_or_kwargs, is_duck_dask_array, module_available, ) from xarray.util.deprecation_helpers import _deprecate_positional_args try: import bottleneck except ImportError: # use numpy methods instead bottleneck = None if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset RollingKey = Any _T = TypeVar("_T") _ROLLING_REDUCE_DOCSTRING_TEMPLATE = """\ Reduce this object's data windows by applying `{name}` along its dimension. Parameters ---------- keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. **kwargs : dict Additional keyword arguments passed on to `{name}`. Returns ------- reduced : same type as caller New object with `{name}` applied along its rolling dimension. """ class Rolling(Generic[T_Xarray]): """A object that implements the moving window pattern. See Also -------- xarray.Dataset.groupby xarray.DataArray.groupby xarray.Dataset.rolling xarray.DataArray.rolling """ __slots__ = ("center", "dim", "min_periods", "obj", "window") _attributes = ("window", "min_periods", "center", "dim") dim: list[Hashable] window: list[int] center: list[bool] obj: T_Xarray min_periods: int def __init__( self, obj: T_Xarray, windows: Mapping[Any, int], min_periods: int | None = None, center: bool | Mapping[Any, bool] = False, ) -> None: """ Moving window object. Parameters ---------- obj : Dataset or DataArray Object to window. windows : mapping of hashable to int A mapping from the name of the dimension to create the rolling window along (e.g. `time`) to the size of the moving window. min_periods : int or None, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. center : bool or dict-like Hashable to bool, default: False Set the labels at the center of the window. If dict-like, set this property per rolling dimension. Returns ------- rolling : type of input argument """ self.dim = [] self.window = [] for d, w in windows.items(): self.dim.append(d) if w <= 0: raise ValueError("window must be > 0") self.window.append(w) self.center = self._mapping_to_list(center, default=False) self.obj = obj missing_dims = tuple(dim for dim in self.dim if dim not in self.obj.dims) if missing_dims: # NOTE: we raise KeyError here but ValueError in Coarsen. raise KeyError( f"Window dimensions {missing_dims} not found in {self.obj.__class__.__name__} " f"dimensions {tuple(self.obj.dims)}" ) # attributes if min_periods is not None and min_periods <= 0: raise ValueError("min_periods must be greater than zero or None") self.min_periods = ( math.prod(self.window) if min_periods is None else min_periods ) def __repr__(self) -> str: """provide a nice str repr of our rolling object""" attrs = ",".join( f"{k}->{w}{'(center)' if c else ''}" for k, w, c in zip(self.dim, self.window, self.center, strict=True) ) return f"{self.__class__.__name__} [{attrs}]" def __len__(self) -> int: return math.prod(self.obj.sizes[d] for d in self.dim) @property def ndim(self) -> int: return len(self.dim) def _reduce_method( # type: ignore[misc] name: str, fillna: Any, rolling_agg_func: Callable | None = None, automatic_rechunk: bool = False, ) -> Callable[..., T_Xarray]: """Constructs reduction methods built on a numpy reduction function (e.g. sum), a numbagg reduction function (e.g. move_sum), a bottleneck reduction function (e.g. move_sum), or a Rolling reduction (_mean). The logic here for which function to run is quite diffuse, across this method & _array_reduce. Arguably we could refactor this. But one constraint is that we need context of xarray options, of the functions each library offers, of the array (e.g. dtype). Set automatic_rechunk=True when the reduction method makes a memory copy. """ if rolling_agg_func: array_agg_func = None else: array_agg_func = getattr(duck_array_ops, name) bottleneck_move_func = getattr(bottleneck, "move_" + name, None) if module_available("numbagg"): import numbagg numbagg_move_func = getattr(numbagg, "move_" + name, None) else: numbagg_move_func = None def method(self, keep_attrs=None, **kwargs): keep_attrs = self._get_keep_attrs(keep_attrs) return self._array_reduce( array_agg_func=array_agg_func, bottleneck_move_func=bottleneck_move_func, numbagg_move_func=numbagg_move_func, rolling_agg_func=rolling_agg_func, keep_attrs=keep_attrs, fillna=fillna, sliding_window_view_kwargs=dict(automatic_rechunk=automatic_rechunk), **kwargs, ) method.__name__ = name method.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(name=name) return method def _mean(self, keep_attrs, **kwargs): result = self.sum(keep_attrs=False, **kwargs) # use dtype of result for casting of count # this allows for GH #7062 and GH #8864, fixes GH #10340 result /= duck_array_ops.astype( self.count(keep_attrs=False), dtype=result.dtype, copy=False ) if keep_attrs: result.attrs = self.obj.attrs return result _mean.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(name="mean") # automatic_rechunk is set to True for reductions that make a copy. # std, var could be optimized after which we can set it to False # See #4325 argmax = _reduce_method("argmax", dtypes.NINF, automatic_rechunk=True) argmin = _reduce_method("argmin", dtypes.INF, automatic_rechunk=True) max = _reduce_method("max", dtypes.NINF) min = _reduce_method("min", dtypes.INF) prod = _reduce_method("prod", 1) sum = _reduce_method("sum", 0) mean = _reduce_method("mean", None, _mean) std = _reduce_method("std", None, automatic_rechunk=True) var = _reduce_method("var", None, automatic_rechunk=True) median = _reduce_method("median", None, automatic_rechunk=True) def _counts(self, keep_attrs: bool | None) -> T_Xarray: raise NotImplementedError() def count(self, keep_attrs: bool | None = None) -> T_Xarray: keep_attrs = self._get_keep_attrs(keep_attrs) rolling_count = self._counts(keep_attrs=keep_attrs) enough_periods = rolling_count >= self.min_periods return rolling_count.where(enough_periods) count.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(name="count") def _mapping_to_list( self, arg: _T | Mapping[Any, _T], default: _T | None = None, allow_default: bool = True, allow_allsame: bool = True, ) -> list[_T]: if utils.is_dict_like(arg): if allow_default: return [arg.get(d, default) for d in self.dim] for d in self.dim: if d not in arg: raise KeyError(f"Argument has no dimension key {d}.") return [arg[d] for d in self.dim] if allow_allsame: # for single argument return [arg] * self.ndim # type: ignore[list-item] # no check for negatives if self.ndim == 1: return [arg] # type: ignore[list-item] # no check for negatives raise ValueError(f"Mapping argument is necessary for {self.ndim}d-rolling.") def _get_keep_attrs(self, keep_attrs): if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) return keep_attrs class DataArrayRolling(Rolling["DataArray"]): __slots__ = ("window_labels",) def __init__( self, obj: DataArray, windows: Mapping[Any, int], min_periods: int | None = None, center: bool | Mapping[Any, bool] = False, ) -> None: """ Moving window object for DataArray. You should use DataArray.rolling() method to construct this object instead of the class constructor. Parameters ---------- obj : DataArray Object to window. windows : mapping of hashable to int A mapping from the name of the dimension to create the rolling exponential window along (e.g. `time`) to the size of the moving window. min_periods : int, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. center : bool, default: False Set the labels at the center of the window. The default, False, sets the labels at the right edge of the window. Returns ------- rolling : type of input argument See Also -------- xarray.DataArray.rolling xarray.DataArray.groupby xarray.Dataset.rolling xarray.Dataset.groupby """ super().__init__(obj, windows, min_periods=min_periods, center=center) # TODO legacy attribute self.window_labels = self.obj[self.dim[0]] def __iter__(self) -> Iterator[tuple[DataArray, DataArray]]: if self.ndim > 1: raise ValueError("__iter__ is only supported for 1d-rolling") dim0 = self.dim[0] window0 = int(self.window[0]) offset = (window0 + 1) // 2 if self.center[0] else 1 stops = np.arange(offset, self.obj.sizes[dim0] + offset) starts = stops - window0 starts[: window0 - offset] = 0 for label, start, stop in zip(self.window_labels, starts, stops, strict=True): window = self.obj.isel({dim0: slice(start, stop)}) counts = window.count(dim=[dim0]) window = window.where(counts >= self.min_periods) yield (label, window) @_deprecate_positional_args("v2024.11.0") def construct( self, window_dim: Hashable | Mapping[Any, Hashable] | None = None, *, stride: int | Mapping[Any, int] = 1, fill_value: Any = dtypes.NA, keep_attrs: bool | None = None, sliding_window_view_kwargs: Mapping[Any, Any] | None = None, **window_dim_kwargs: Hashable, ) -> DataArray: """ Convert this rolling object to xr.DataArray, where the window dimension is stacked as a new dimension Parameters ---------- window_dim : Hashable or dict-like to Hashable, optional A mapping from dimension name to the new window dimension names. stride : int or mapping of int, default: 1 Size of stride for the rolling window. fill_value : default: dtypes.NA Filling value to match the dimension size. keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. sliding_window_view_kwargs : Mapping Keyword arguments that should be passed to the underlying array type's ``sliding_window_view`` function. **window_dim_kwargs : Hashable, optional The keyword arguments form of ``window_dim`` {dim: new_name, ...}. Returns ------- DataArray a view of the original array. By default, the returned array is not writeable. For numpy arrays, one can pass ``writeable=True`` in ``sliding_window_view_kwargs``. See Also -------- numpy.lib.stride_tricks.sliding_window_view dask.array.lib.stride_tricks.sliding_window_view Notes ----- With dask arrays, it's possible to pass the ``automatic_rechunk`` kwarg as ``sliding_window_view_kwargs={"automatic_rechunk": True}``. This controls whether dask should automatically rechunk the output to avoid exploding chunk sizes. Automatically rechunking is the default behaviour. Importantly, each chunk will be a view of the data so large chunk sizes are only safe if *no* copies are made later. Examples -------- >>> da = xr.DataArray(np.arange(8).reshape(2, 4), dims=("a", "b")) >>> rolling = da.rolling(b=3) >>> rolling.construct("window_dim") Size: 192B array([[[nan, nan, 0.], [nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.]], [[nan, nan, 4.], [nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.]]]) Dimensions without coordinates: a, b, window_dim >>> rolling = da.rolling(b=3, center=True) >>> rolling.construct("window_dim") Size: 192B array([[[nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.], [ 2., 3., nan]], [[nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.], [ 6., 7., nan]]]) Dimensions without coordinates: a, b, window_dim """ if sliding_window_view_kwargs is None: sliding_window_view_kwargs = {} return self._construct( self.obj, window_dim=window_dim, stride=stride, fill_value=fill_value, keep_attrs=keep_attrs, sliding_window_view_kwargs=sliding_window_view_kwargs, **window_dim_kwargs, ) def _construct( self, obj: DataArray, *, window_dim: Hashable | Mapping[Any, Hashable] | None = None, stride: int | Mapping[Any, int] = 1, fill_value: Any = dtypes.NA, keep_attrs: bool | None = None, sliding_window_view_kwargs: Mapping[Any, Any] | None = None, **window_dim_kwargs: Hashable, ) -> DataArray: from xarray.core.dataarray import DataArray if sliding_window_view_kwargs is None: sliding_window_view_kwargs = {} keep_attrs = self._get_keep_attrs(keep_attrs) if window_dim is None: if len(window_dim_kwargs) == 0: raise ValueError( "Either window_dim or window_dim_kwargs need to be specified." ) window_dim = {d: window_dim_kwargs[str(d)] for d in self.dim} window_dims = self._mapping_to_list( window_dim, allow_default=False, allow_allsame=False ) strides = self._mapping_to_list(stride, default=1) window = obj.variable.rolling_window( self.dim, self.window, window_dims, center=self.center, fill_value=fill_value, **sliding_window_view_kwargs, ) attrs = obj.attrs if keep_attrs else {} result = DataArray( window, dims=obj.dims + tuple(window_dims), coords=obj.coords, attrs=attrs, name=obj.name, ) return result.isel( {d: slice(None, None, s) for d, s in zip(self.dim, strides, strict=True)} ) def reduce( self, func: Callable, keep_attrs: bool | None = None, *, sliding_window_view_kwargs: Mapping[Any, Any] | None = None, **kwargs: Any, ) -> DataArray: """Reduce each window by applying `func`. Equivalent to ``.construct(...).reduce(func, ...)``. Parameters ---------- func : callable Function which can be called in the form `func(x, **kwargs)` to return the result of collapsing an np.ndarray over an the rolling dimension. keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. sliding_window_view_kwargs Keyword arguments that should be passed to the underlying array type's ``sliding_window_view`` function. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : DataArray Array with summarized data. See Also -------- numpy.lib.stride_tricks.sliding_window_view dask.array.lib.stride_tricks.sliding_window_view Notes ----- With dask arrays, it's possible to pass the ``automatic_rechunk`` kwarg as ``sliding_window_view_kwargs={"automatic_rechunk": True}``. This controls whether dask should automatically rechunk the output to avoid exploding chunk sizes. Automatically rechunking is the default behaviour. Importantly, each chunk will be a view of the data so large chunk sizes are only safe if *no* copies are made later. Examples -------- >>> da = xr.DataArray(np.arange(8).reshape(2, 4), dims=("a", "b")) >>> rolling = da.rolling(b=3) >>> rolling.construct("window_dim") Size: 192B array([[[nan, nan, 0.], [nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.]], [[nan, nan, 4.], [nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.]]]) Dimensions without coordinates: a, b, window_dim >>> rolling.reduce(np.sum) Size: 64B array([[nan, nan, 3., 6.], [nan, nan, 15., 18.]]) Dimensions without coordinates: a, b >>> rolling = da.rolling(b=3, min_periods=1) >>> rolling.reduce(np.nansum) Size: 64B array([[ 0., 1., 3., 6.], [ 4., 9., 15., 18.]]) Dimensions without coordinates: a, b """ keep_attrs = self._get_keep_attrs(keep_attrs) rolling_dim = { d: utils.get_temp_dimname(self.obj.dims, f"_rolling_dim_{d}") for d in self.dim } # save memory with reductions GH4325 fillna = kwargs.pop("fillna", dtypes.NA) if fillna is not dtypes.NA: obj = self.obj.fillna(fillna) else: obj = self.obj windows = self._construct( obj, window_dim=rolling_dim, keep_attrs=keep_attrs, fill_value=fillna, sliding_window_view_kwargs=sliding_window_view_kwargs, ) dim = list(rolling_dim.values()) result = windows.reduce(func, dim=dim, keep_attrs=keep_attrs, **kwargs) # Find valid windows based on count. counts = self._counts(keep_attrs=False) return result.where(counts >= self.min_periods) def _counts(self, keep_attrs: bool | None) -> DataArray: """Number of non-nan entries in each rolling window.""" rolling_dim = { d: utils.get_temp_dimname(self.obj.dims, f"_rolling_dim_{d}") for d in self.dim } # We use False as the fill_value instead of np.nan, since boolean # array is faster to be reduced than object array. # The use of skipna==False is also faster since it does not need to # copy the strided array. dim = list(rolling_dim.values()) counts = ( self.obj.notnull(keep_attrs=keep_attrs) .rolling( dict(zip(self.dim, self.window, strict=True)), center={d: self.center[i] for i, d in enumerate(self.dim)}, ) .construct(rolling_dim, fill_value=False, keep_attrs=keep_attrs) .sum(dim=dim, skipna=False, keep_attrs=keep_attrs) ) return counts def _numbagg_reduce(self, func, keep_attrs, **kwargs): # Some of this is copied from `_bottleneck_reduce`, we could reduce this as part # of a wider refactor. axis = self.obj.get_axis_num(self.dim[0]) padded = self.obj.variable if self.center[0]: if is_duck_dask_array(padded.data): # workaround to make the padded chunk size larger than # self.window - 1 shift = -(self.window[0] + 1) // 2 offset = (self.window[0] - 1) // 2 valid = (slice(None),) * axis + ( slice(offset, offset + self.obj.shape[axis]), ) else: shift = (-self.window[0] // 2) + 1 valid = (slice(None),) * axis + (slice(-shift, None),) padded = padded.pad({self.dim[0]: (0, -shift)}, mode="constant") if is_duck_dask_array(padded.data) and False: raise AssertionError("should not be reachable") else: values = func( padded.data, window=self.window[0], min_count=self.min_periods, axis=axis, ) if self.center[0]: values = values[valid] attrs = self.obj.attrs if keep_attrs else {} return self.obj.__class__( values, self.obj.coords, attrs=attrs, name=self.obj.name ) def _bottleneck_reduce(self, func, keep_attrs, **kwargs): # bottleneck doesn't allow min_count to be 0, although it should # work the same as if min_count = 1 # Note bottleneck only works with 1d-rolling. if self.min_periods == 0: min_count = 1 else: min_count = self.min_periods axis = self.obj.get_axis_num(self.dim[0]) padded = self.obj.variable if self.center[0]: if is_duck_dask_array(padded.data): # workaround to make the padded chunk size larger than # self.window - 1 shift = -(self.window[0] + 1) // 2 offset = (self.window[0] - 1) // 2 valid = (slice(None),) * axis + ( slice(offset, offset + self.obj.shape[axis]), ) else: shift = (-self.window[0] // 2) + 1 valid = (slice(None),) * axis + (slice(-shift, None),) padded = padded.pad({self.dim[0]: (0, -shift)}, mode="constant") if is_duck_dask_array(padded.data): values = dask_array_ops.dask_rolling_wrapper( func, padded, axis=axis, window=self.window[0], min_count=min_count ) else: values = func( padded.data, window=self.window[0], min_count=min_count, axis=axis ) # index 0 is at the rightmost edge of the window # need to reverse index here # see GH #8541 if func in [bottleneck.move_argmin, bottleneck.move_argmax]: values = self.window[0] - 1 - values if self.center[0]: values = values[valid] attrs = self.obj.attrs if keep_attrs else {} return self.obj.__class__( values, self.obj.coords, attrs=attrs, name=self.obj.name ) def _array_reduce( self, array_agg_func, bottleneck_move_func, numbagg_move_func, rolling_agg_func, keep_attrs, fillna, **kwargs, ): if "dim" in kwargs: warnings.warn( f"Reductions are applied along the rolling dimension(s) " f"'{self.dim}'. Passing the 'dim' kwarg to reduction " f"operations has no effect.", DeprecationWarning, stacklevel=3, ) del kwargs["dim"] xp = duck_array_ops.get_array_namespace(self.obj.data) if ( OPTIONS["use_numbagg"] and module_available("numbagg") and numbagg_move_func is not None # TODO: we could at least allow this for the equivalent of `apply_ufunc`'s # "parallelized". `rolling_exp` does this, as an example (but rolling_exp is # much simpler) and not is_duck_dask_array(self.obj.data) # Numbagg doesn't handle object arrays and generally has dtype consistency, # so doesn't deal well with bool arrays which are expected to change type. and self.obj.data.dtype.kind not in "ObMm" # TODO: we could also allow this, probably as part of a refactoring of this # module, so we can use the machinery in `self.reduce`. and self.ndim == 1 and xp is np ): import numbagg # Numbagg has a default ddof of 1. I (@max-sixty) think we should make # this the default in xarray too, but until we do, don't use numbagg for # std and var unless ddof is set to 1. if ( numbagg_move_func not in [numbagg.move_std, numbagg.move_var] or kwargs.get("ddof") == 1 ): return self._numbagg_reduce( numbagg_move_func, keep_attrs=keep_attrs, **kwargs ) if ( OPTIONS["use_bottleneck"] and bottleneck_move_func is not None and ( not is_duck_dask_array(self.obj.data) or module_available("dask", "2024.11.0") ) and self.ndim == 1 and xp is np ): return self._bottleneck_reduce( bottleneck_move_func, keep_attrs=keep_attrs, **kwargs ) if rolling_agg_func: return rolling_agg_func(self, keep_attrs=self._get_keep_attrs(keep_attrs)) if fillna is not None: if fillna is dtypes.INF: fillna = dtypes.get_pos_infinity(self.obj.dtype, max_for_int=True) elif fillna is dtypes.NINF: fillna = dtypes.get_neg_infinity(self.obj.dtype, min_for_int=True) kwargs.setdefault("skipna", False) kwargs.setdefault("fillna", fillna) return self.reduce(array_agg_func, keep_attrs=keep_attrs, **kwargs) class DatasetRolling(Rolling["Dataset"]): __slots__ = ("rollings",) def __init__( self, obj: Dataset, windows: Mapping[Any, int], min_periods: int | None = None, center: bool | Mapping[Any, bool] = False, ) -> None: """ Moving window object for Dataset. You should use Dataset.rolling() method to construct this object instead of the class constructor. Parameters ---------- obj : Dataset Object to window. windows : mapping of hashable to int A mapping from the name of the dimension to create the rolling exponential window along (e.g. `time`) to the size of the moving window. min_periods : int, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. center : bool or mapping of hashable to bool, default: False Set the labels at the center of the window. The default, False, sets the labels at the right edge of the window. Returns ------- rolling : type of input argument See Also -------- xarray.Dataset.rolling xarray.DataArray.rolling xarray.Dataset.groupby xarray.DataArray.groupby """ super().__init__(obj, windows, min_periods, center) # Keep each Rolling object as a dictionary self.rollings = {} for key, da in self.obj.data_vars.items(): # keeps rollings only for the dataset depending on self.dim dims, center = [], {} for i, d in enumerate(self.dim): if d in da.dims: dims.append(d) center[d] = self.center[i] if dims: w = {d: windows[d] for d in dims} self.rollings[key] = DataArrayRolling(da, w, min_periods, center) def _dataset_implementation(self, func, keep_attrs, **kwargs): from xarray.core.dataset import Dataset keep_attrs = self._get_keep_attrs(keep_attrs) reduced = {} for key, da in self.obj.data_vars.items(): if any(d in da.dims for d in self.dim): reduced[key] = func(self.rollings[key], keep_attrs=keep_attrs, **kwargs) else: reduced[key] = self.obj[key].copy() # we need to delete the attrs of the copied DataArray if not keep_attrs: reduced[key].attrs = {} attrs = self.obj.attrs if keep_attrs else {} return Dataset(reduced, coords=self.obj.coords, attrs=attrs) def reduce( self, func: Callable, keep_attrs: bool | None = None, sliding_window_view_kwargs: Mapping[Any, Any] | None = None, **kwargs: Any, ) -> DataArray: """Reduce the items in this group by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, **kwargs)` to return the result of collapsing an np.ndarray over an the rolling dimension. keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. sliding_window_view_kwargs : Mapping Keyword arguments that should be passed to the underlying array type's ``sliding_window_view`` function. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : DataArray Array with summarized data. See Also -------- numpy.lib.stride_tricks.sliding_window_view dask.array.lib.stride_tricks.sliding_window_view Notes ----- With dask arrays, it's possible to pass the ``automatic_rechunk`` kwarg as ``sliding_window_view_kwargs={"automatic_rechunk": True}``. This controls whether dask should automatically rechunk the output to avoid exploding chunk sizes. Automatically rechunking is the default behaviour. Importantly, each chunk will be a view of the data so large chunk sizes are only safe if *no* copies are made later. """ return self._dataset_implementation( functools.partial(DataArrayRolling.reduce, func=func), keep_attrs=keep_attrs, sliding_window_view_kwargs=sliding_window_view_kwargs, **kwargs, ) def _counts(self, keep_attrs: bool | None) -> Dataset: return self._dataset_implementation( DataArrayRolling._counts, keep_attrs=keep_attrs ) def _array_reduce( self, array_agg_func, bottleneck_move_func, rolling_agg_func, keep_attrs, **kwargs, ): return self._dataset_implementation( functools.partial( DataArrayRolling._array_reduce, array_agg_func=array_agg_func, bottleneck_move_func=bottleneck_move_func, rolling_agg_func=rolling_agg_func, ), keep_attrs=keep_attrs, **kwargs, ) @_deprecate_positional_args("v2024.11.0") def construct( self, window_dim: Hashable | Mapping[Any, Hashable] | None = None, *, stride: int | Mapping[Any, int] = 1, fill_value: Any = dtypes.NA, keep_attrs: bool | None = None, sliding_window_view_kwargs: Mapping[Any, Any] | None = None, **window_dim_kwargs: Hashable, ) -> Dataset: """ Convert this rolling object to xr.Dataset, where the window dimension is stacked as a new dimension Parameters ---------- window_dim : str or mapping, optional A mapping from dimension name to the new window dimension names. Just a string can be used for 1d-rolling. stride : int, optional size of stride for the rolling window. fill_value : Any, default: dtypes.NA Filling value to match the dimension size. sliding_window_view_kwargs Keyword arguments that should be passed to the underlying array type's ``sliding_window_view`` function. **window_dim_kwargs : {dim: new_name, ...}, optional The keyword arguments form of ``window_dim``. Returns ------- Dataset Dataset with views of the original arrays. By default, the returned arrays are not writeable. For numpy arrays, one can pass ``writeable=True`` in ``sliding_window_view_kwargs``. See Also -------- numpy.lib.stride_tricks.sliding_window_view dask.array.lib.stride_tricks.sliding_window_view Notes ----- With dask arrays, it's possible to pass the ``automatic_rechunk`` kwarg as ``sliding_window_view_kwargs={"automatic_rechunk": True}``. This controls whether dask should automatically rechunk the output to avoid exploding chunk sizes. Automatically rechunking is the default behaviour. Importantly, each chunk will be a view of the data so large chunk sizes are only safe if *no* copies are made later. """ from xarray.core.dataset import Dataset keep_attrs = self._get_keep_attrs(keep_attrs) if window_dim is None: if len(window_dim_kwargs) == 0: raise ValueError( "Either window_dim or window_dim_kwargs need to be specified." ) window_dim = {d: window_dim_kwargs[str(d)] for d in self.dim} window_dims = self._mapping_to_list( window_dim, allow_default=False, allow_allsame=False ) strides = self._mapping_to_list(stride, default=1) dataset = {} for key, da in self.obj.data_vars.items(): # keeps rollings only for the dataset depending on self.dim dims = [d for d in self.dim if d in da.dims] if dims: wi = {d: window_dims[i] for i, d in enumerate(self.dim) if d in da.dims} st = {d: strides[i] for i, d in enumerate(self.dim) if d in da.dims} dataset[key] = self.rollings[key].construct( window_dim=wi, fill_value=fill_value, stride=st, keep_attrs=keep_attrs, sliding_window_view_kwargs=sliding_window_view_kwargs, ) else: dataset[key] = da.copy() # as the DataArrays can be copied we need to delete the attrs if not keep_attrs: dataset[key].attrs = {} # Need to stride coords as well. TODO: is there a better way? coords = self.obj.isel( {d: slice(None, None, s) for d, s in zip(self.dim, strides, strict=True)} ).coords attrs = self.obj.attrs if keep_attrs else {} return Dataset(dataset, coords=coords, attrs=attrs) class Coarsen(CoarsenArithmetic, Generic[T_Xarray]): """A object that implements the coarsen. See Also -------- Dataset.coarsen DataArray.coarsen """ __slots__ = ( "boundary", "coord_func", "obj", "side", "trim_excess", "windows", ) _attributes = ("windows", "side", "trim_excess") obj: T_Xarray windows: Mapping[Hashable, int] side: SideOptions | Mapping[Hashable, SideOptions] boundary: CoarsenBoundaryOptions coord_func: Mapping[Hashable, str | Callable] def __init__( self, obj: T_Xarray, windows: Mapping[Any, int], boundary: CoarsenBoundaryOptions, side: SideOptions | Mapping[Any, SideOptions], coord_func: str | Callable | Mapping[Any, str | Callable], ) -> None: """ Moving window object. Parameters ---------- obj : Dataset or DataArray Object to window. windows : mapping of hashable to int A mapping from the name of the dimension to create the rolling exponential window along (e.g. `time`) to the size of the moving window. boundary : {"exact", "trim", "pad"} If 'exact', a ValueError will be raised if dimension size is not a multiple of window size. If 'trim', the excess indexes are trimmed. If 'pad', NA will be padded. side : 'left' or 'right' or mapping from dimension to 'left' or 'right' coord_func : function (name) or mapping from coordinate name to function (name). Returns ------- coarsen """ self.obj = obj self.windows = windows self.side = side self.boundary = boundary missing_dims = tuple(dim for dim in windows.keys() if dim not in self.obj.dims) if missing_dims: raise ValueError( f"Window dimensions {missing_dims} not found in {self.obj.__class__.__name__} " f"dimensions {tuple(self.obj.dims)}" ) if utils.is_dict_like(coord_func): coord_func_map = coord_func else: coord_func_map = dict.fromkeys(self.obj.dims, coord_func) for c in self.obj.coords: if c not in coord_func_map: coord_func_map[c] = duck_array_ops.mean # type: ignore[index] self.coord_func = coord_func_map def _get_keep_attrs(self, keep_attrs): if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) return keep_attrs def __repr__(self) -> str: """provide a nice str repr of our coarsen object""" attrs = ",".join( f"{k}->{getattr(self, k)}" for k in self._attributes if getattr(self, k, None) is not None ) return f"{self.__class__.__name__} [{attrs}]" def construct( self, window_dim=None, keep_attrs=None, **window_dim_kwargs, ) -> T_Xarray: """ Convert this Coarsen object to a DataArray or Dataset, where the coarsening dimension is split or reshaped to two new dimensions. Parameters ---------- window_dim: mapping A mapping from existing dimension name to new dimension names. The size of the second dimension will be the length of the coarsening window. keep_attrs: bool, optional Preserve attributes if True **window_dim_kwargs : {dim: new_name, ...} The keyword arguments form of ``window_dim``. Returns ------- Dataset or DataArray with reshaped dimensions Examples -------- >>> da = xr.DataArray(np.arange(24), dims="time") >>> da.coarsen(time=12).construct(time=("year", "month")) Size: 192B array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]]) Dimensions without coordinates: year, month See Also -------- DataArrayRolling.construct DatasetRolling.construct """ from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset window_dim = either_dict_or_kwargs( window_dim, window_dim_kwargs, "Coarsen.construct" ) if not window_dim: raise ValueError( "Either window_dim or window_dim_kwargs need to be specified." ) bad_new_dims = tuple( win for win, dims in window_dim.items() if len(dims) != 2 or isinstance(dims, str) ) if bad_new_dims: raise ValueError( f"Please provide exactly two dimension names for the following coarsening dimensions: {bad_new_dims}" ) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) missing_dims = set(window_dim) - set(self.windows) if missing_dims: raise ValueError( f"'window_dim' must contain entries for all dimensions to coarsen. Missing {missing_dims}" ) extra_windows = set(self.windows) - set(window_dim) if extra_windows: raise ValueError( f"'window_dim' includes dimensions that will not be coarsened: {extra_windows}" ) reshaped = Dataset() if isinstance(self.obj, DataArray): obj = self.obj._to_temp_dataset() else: obj = self.obj reshaped.attrs = obj.attrs if keep_attrs else {} for key, var in obj.variables.items(): reshaped_dims = tuple( itertools.chain(*[window_dim.get(dim, [dim]) for dim in list(var.dims)]) ) if reshaped_dims != var.dims: windows = {w: self.windows[w] for w in window_dim if w in var.dims} reshaped_var, _ = var.coarsen_reshape(windows, self.boundary, self.side) attrs = var.attrs if keep_attrs else {} reshaped[key] = (reshaped_dims, reshaped_var, attrs) else: reshaped[key] = var # should handle window_dim being unindexed should_be_coords = (set(window_dim) & set(self.obj.coords)) | set( self.obj.coords ) result = reshaped.set_coords(should_be_coords) if isinstance(self.obj, DataArray): return self.obj._from_temp_dataset(result) else: return result class DataArrayCoarsen(Coarsen["DataArray"]): __slots__ = () _reduce_extra_args_docstring = """""" @classmethod def _reduce_method( cls, func: Callable, include_skipna: bool = False, numeric_only: bool = False ) -> Callable[..., DataArray]: """ Return a wrapped function for injecting reduction methods. see ops.inject_reduce_methods """ kwargs: dict[str, Any] = {} if include_skipna: kwargs["skipna"] = None def wrapped_func( self: DataArrayCoarsen, keep_attrs: bool | None = None, **kwargs ) -> DataArray: from xarray.core.dataarray import DataArray keep_attrs = self._get_keep_attrs(keep_attrs) reduced = self.obj.variable.coarsen( self.windows, func, self.boundary, self.side, keep_attrs, **kwargs ) coords = {} for c, v in self.obj.coords.items(): if c == self.obj.name: coords[c] = reduced elif any(d in self.windows for d in v.dims): coords[c] = v.variable.coarsen( self.windows, self.coord_func[c], self.boundary, self.side, keep_attrs, **kwargs, ) else: coords[c] = v return DataArray( reduced, dims=self.obj.dims, coords=coords, name=self.obj.name ) return wrapped_func def reduce( self, func: Callable, keep_attrs: bool | None = None, **kwargs ) -> DataArray: """Reduce the items in this group by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis, **kwargs)` to return the result of collapsing an np.ndarray over the coarsening dimensions. It must be possible to provide the `axis` argument with a tuple of integers. keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : DataArray Array with summarized data. Examples -------- >>> da = xr.DataArray(np.arange(8).reshape(2, 4), dims=("a", "b")) >>> coarsen = da.coarsen(b=2) >>> coarsen.reduce(np.sum) Size: 32B array([[ 1, 5], [ 9, 13]]) Dimensions without coordinates: a, b """ wrapped_func = self._reduce_method(func) return wrapped_func(self, keep_attrs=keep_attrs, **kwargs) class DatasetCoarsen(Coarsen["Dataset"]): __slots__ = () _reduce_extra_args_docstring = """""" @classmethod def _reduce_method( cls, func: Callable, include_skipna: bool = False, numeric_only: bool = False ) -> Callable[..., Dataset]: """ Return a wrapped function for injecting reduction methods. see ops.inject_reduce_methods """ kwargs: dict[str, Any] = {} if include_skipna: kwargs["skipna"] = None def wrapped_func( self: DatasetCoarsen, keep_attrs: bool | None = None, **kwargs ) -> Dataset: from xarray.core.dataset import Dataset keep_attrs = self._get_keep_attrs(keep_attrs) if keep_attrs: attrs = self.obj.attrs else: attrs = {} reduced = {} for key, da in self.obj.data_vars.items(): reduced[key] = da.variable.coarsen( self.windows, func, self.boundary, self.side, keep_attrs=keep_attrs, **kwargs, ) coords = {} for c, v in self.obj.coords.items(): # variable.coarsen returns variables not containing the window dims # unchanged (maybe removes attrs) coords[c] = v.variable.coarsen( self.windows, self.coord_func[c], self.boundary, self.side, keep_attrs=keep_attrs, **kwargs, ) return Dataset(reduced, coords=coords, attrs=attrs) return wrapped_func def reduce(self, func: Callable, keep_attrs=None, **kwargs) -> Dataset: """Reduce the items in this group by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis, **kwargs)` to return the result of collapsing an np.ndarray over the coarsening dimensions. It must be possible to provide the `axis` argument with a tuple of integers. keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Dataset Arrays with summarized data. """ wrapped_func = self._reduce_method(func) return wrapped_func(self, keep_attrs=keep_attrs, **kwargs) ���������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/computation/rolling_exp.py��������������������������������������������������0000664�0000000�0000000�00000022255�15056206164�0022116�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations from collections.abc import Mapping from typing import Any, Generic import numpy as np from xarray.compat.pdcompat import count_not_none from xarray.computation.apply_ufunc import apply_ufunc from xarray.core.options import _get_keep_attrs from xarray.core.types import T_DataWithCoords from xarray.core.utils import module_available def _get_alpha( com: float | None = None, span: float | None = None, halflife: float | None = None, alpha: float | None = None, ) -> float: """ Convert com, span, halflife to alpha. """ valid_count = count_not_none(com, span, halflife, alpha) if valid_count > 1: raise ValueError("com, span, halflife, and alpha are mutually exclusive") # Convert to alpha if com is not None: if com < 0: raise ValueError("commust satisfy: com>= 0") return 1 / (com + 1) elif span is not None: if span < 1: raise ValueError("span must satisfy: span >= 1") return 2 / (span + 1) elif halflife is not None: if halflife <= 0: raise ValueError("halflife must satisfy: halflife > 0") return 1 - np.exp(np.log(0.5) / halflife) elif alpha is not None: if not 0 < alpha <= 1: raise ValueError("alpha must satisfy: 0 < alpha <= 1") return alpha else: raise ValueError("Must pass one of comass, span, halflife, or alpha") class RollingExp(Generic[T_DataWithCoords]): """ Exponentially-weighted moving window object. Similar to EWM in pandas Parameters ---------- obj : Dataset or DataArray Object to window. windows : mapping of hashable to int (or float for alpha type) A mapping from the name of the dimension to create the rolling exponential window along (e.g. `time`) to the size of the moving window. window_type : {"span", "com", "halflife", "alpha"}, default: "span" The format of the previously supplied window. Each is a simple numerical transformation of the others. Described in detail: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.ewm.html Returns ------- RollingExp : type of input argument """ def __init__( self, obj: T_DataWithCoords, windows: Mapping[Any, int | float], window_type: str = "span", min_weight: float = 0.0, ): if not module_available("numbagg"): raise ImportError( "numbagg >= 0.2.1 is required for rolling_exp but currently numbagg is not installed" ) self.obj: T_DataWithCoords = obj dim, window = next(iter(windows.items())) self.dim = dim self.alpha = _get_alpha(**{window_type: window}) self.min_weight = min_weight # Don't pass min_weight=0 so we can support older versions of numbagg kwargs = dict(alpha=self.alpha, axis=-1) if min_weight > 0: kwargs["min_weight"] = min_weight self.kwargs = kwargs def mean(self, keep_attrs: bool | None = None) -> T_DataWithCoords: """ Exponentially weighted moving average. Parameters ---------- keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. Examples -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").mean() Size: 40B array([1. , 1. , 1.69230769, 1.9 , 1.96694215]) Dimensions without coordinates: x """ import numbagg if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) dim_order = self.obj.dims return apply_ufunc( numbagg.move_exp_nanmean, self.obj, input_core_dims=[[self.dim]], kwargs=self.kwargs, output_core_dims=[[self.dim]], keep_attrs=keep_attrs, on_missing_core_dim="copy", dask="parallelized", ).transpose(*dim_order) def sum(self, keep_attrs: bool | None = None) -> T_DataWithCoords: """ Exponentially weighted moving sum. Parameters ---------- keep_attrs : bool, default: None If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. If None uses the global default. Examples -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").sum() Size: 40B array([1. , 1.33333333, 2.44444444, 2.81481481, 2.9382716 ]) Dimensions without coordinates: x """ import numbagg if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) dim_order = self.obj.dims return apply_ufunc( numbagg.move_exp_nansum, self.obj, input_core_dims=[[self.dim]], kwargs=self.kwargs, output_core_dims=[[self.dim]], keep_attrs=keep_attrs, on_missing_core_dim="copy", dask="parallelized", ).transpose(*dim_order) def std(self) -> T_DataWithCoords: """ Exponentially weighted moving standard deviation. `keep_attrs` is always True for this method. Drop attrs separately to remove attrs. Examples -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").std() Size: 40B array([ nan, 0. , 0.67936622, 0.42966892, 0.25389527]) Dimensions without coordinates: x """ import numbagg dim_order = self.obj.dims return apply_ufunc( numbagg.move_exp_nanstd, self.obj, input_core_dims=[[self.dim]], kwargs=self.kwargs, output_core_dims=[[self.dim]], keep_attrs=True, on_missing_core_dim="copy", dask="parallelized", ).transpose(*dim_order) def var(self) -> T_DataWithCoords: """ Exponentially weighted moving variance. `keep_attrs` is always True for this method. Drop attrs separately to remove attrs. Examples -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").var() Size: 40B array([ nan, 0. , 0.46153846, 0.18461538, 0.06446281]) Dimensions without coordinates: x """ dim_order = self.obj.dims import numbagg return apply_ufunc( numbagg.move_exp_nanvar, self.obj, input_core_dims=[[self.dim]], kwargs=self.kwargs, output_core_dims=[[self.dim]], keep_attrs=True, on_missing_core_dim="copy", dask="parallelized", ).transpose(*dim_order) def cov(self, other: T_DataWithCoords) -> T_DataWithCoords: """ Exponentially weighted moving covariance. `keep_attrs` is always True for this method. Drop attrs separately to remove attrs. Examples -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").cov(da**2) Size: 40B array([ nan, 0. , 1.38461538, 0.55384615, 0.19338843]) Dimensions without coordinates: x """ dim_order = self.obj.dims import numbagg return apply_ufunc( numbagg.move_exp_nancov, self.obj, other, input_core_dims=[[self.dim], [self.dim]], kwargs=self.kwargs, output_core_dims=[[self.dim]], keep_attrs=True, on_missing_core_dim="copy", dask="parallelized", ).transpose(*dim_order) def corr(self, other: T_DataWithCoords) -> T_DataWithCoords: """ Exponentially weighted moving correlation. `keep_attrs` is always True for this method. Drop attrs separately to remove attrs. Examples -------- >>> da = xr.DataArray([1, 1, 2, 2, 2], dims="x") >>> da.rolling_exp(x=2, window_type="span").corr(da.shift(x=1)) Size: 40B array([ nan, nan, nan, 0.4330127 , 0.48038446]) Dimensions without coordinates: x """ dim_order = self.obj.dims import numbagg return apply_ufunc( numbagg.move_exp_nancorr, self.obj, other, input_core_dims=[[self.dim], [self.dim]], kwargs=self.kwargs, output_core_dims=[[self.dim]], keep_attrs=True, on_missing_core_dim="copy", dask="parallelized", ).transpose(*dim_order) ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/computation/weighted.py�����������������������������������������������������0000664�0000000�0000000�00000046432�15056206164�0021377�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations from collections.abc import Hashable, Iterable, Sequence from typing import TYPE_CHECKING, Generic, Literal, cast import numpy as np from numpy.typing import ArrayLike from xarray.computation.apply_ufunc import apply_ufunc from xarray.computation.computation import dot from xarray.core import duck_array_ops, utils from xarray.core.types import Dims, T_DataArray, T_Xarray from xarray.namedarray.utils import is_duck_dask_array from xarray.structure.alignment import align, broadcast # Weighted quantile methods are a subset of the numpy supported quantile methods. QUANTILE_METHODS = Literal[ "linear", "interpolated_inverted_cdf", "hazen", "weibull", "median_unbiased", "normal_unbiased", ] _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE = """ Reduce this {cls}'s data by a weighted ``{fcn}`` along some dimension(s). Parameters ---------- dim : Hashable or Iterable of Hashable, optional Dimension(s) over which to apply the weighted ``{fcn}``. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. Returns ------- reduced : {cls} New {cls} object with weighted ``{fcn}`` applied to its data and the indicated dimension(s) removed. Notes ----- Returns {on_zero} if the ``weights`` sum to 0.0 along the reduced dimension(s). """ _SUM_OF_WEIGHTS_DOCSTRING = """ Calculate the sum of weights, accounting for missing values in the data. Parameters ---------- dim : str or sequence of str, optional Dimension(s) over which to sum the weights. keep_attrs : bool, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. Returns ------- reduced : {cls} New {cls} object with the sum of the weights over the given dimension. """ _WEIGHTED_QUANTILE_DOCSTRING_TEMPLATE = """ Apply a weighted ``quantile`` to this {cls}'s data along some dimension(s). Weights are interpreted as *sampling weights* (or probability weights) and describe how a sample is scaled to the whole population [1]_. There are other possible interpretations for weights, *precision weights* describing the precision of observations, or *frequency weights* counting the number of identical observations, however, they are not implemented here. For compatibility with NumPy's non-weighted ``quantile`` (which is used by ``DataArray.quantile`` and ``Dataset.quantile``), the only interpolation method supported by this weighted version corresponds to the default "linear" option of ``numpy.quantile``. This is "Type 7" option, described in Hyndman and Fan (1996) [2]_. The implementation is largely inspired by a blog post from A. Akinshin's (2023) [3]_. Parameters ---------- q : float or sequence of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or sequence of str, optional Dimension(s) over which to apply the weighted ``quantile``. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. Returns ------- quantiles : {cls} New {cls} object with weighted ``quantile`` applied to its data and the indicated dimension(s) removed. See Also -------- numpy.nanquantile, pandas.Series.quantile, Dataset.quantile, DataArray.quantile Notes ----- Returns NaN if the ``weights`` sum to 0.0 along the reduced dimension(s). References ---------- .. [1] https://notstatschat.rbind.io/2020/08/04/weights-in-statistics/ .. [2] Hyndman, R. J. & Fan, Y. (1996). Sample Quantiles in Statistical Packages. The American Statistician, 50(4), 361–365. https://doi.org/10.2307/2684934 .. [3] Akinshin, A. (2023) "Weighted quantile estimators" arXiv:2304.07265 [stat.ME] https://arxiv.org/abs/2304.07265 """ if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset class Weighted(Generic[T_Xarray]): """An object that implements weighted operations. You should create a Weighted object by using the ``DataArray.weighted`` or ``Dataset.weighted`` methods. See Also -------- Dataset.weighted DataArray.weighted """ __slots__ = ("obj", "weights") def __init__(self, obj: T_Xarray, weights: T_DataArray) -> None: """ Create a Weighted object Parameters ---------- obj : DataArray or Dataset Object over which the weighted reduction operation is applied. weights : DataArray An array of weights associated with the values in the obj. Each value in the obj contributes to the reduction operation according to its associated weight. Notes ----- ``weights`` must be a ``DataArray`` and cannot contain missing values. Missing values can be replaced by ``weights.fillna(0)``. """ from xarray.core.dataarray import DataArray if not isinstance(weights, DataArray): raise ValueError("`weights` must be a DataArray") def _weight_check(w): # Ref https://github.com/pydata/xarray/pull/4559/files#r515968670 if duck_array_ops.array_any(duck_array_ops.isnull(w)): raise ValueError( "`weights` cannot contain missing values. " "Missing values can be replaced by `weights.fillna(0)`." ) return w if is_duck_dask_array(weights.data): # assign to copy - else the check is not triggered weights = weights.copy( data=weights.data.map_blocks(_weight_check, dtype=weights.dtype), # type: ignore[call-arg, arg-type] deep=False, ) else: _weight_check(weights.data) self.obj: T_Xarray = obj self.weights: T_DataArray = weights def _check_dim(self, dim: Dims): """raise an error if any dimension is missing""" dims: list[Hashable] if isinstance(dim, str) or not isinstance(dim, Iterable): dims = [dim] if dim else [] else: dims = list(dim) all_dims = set(self.obj.dims).union(set(self.weights.dims)) missing_dims = set(dims) - all_dims if missing_dims: raise ValueError( f"Dimensions {tuple(missing_dims)} not found in {self.__class__.__name__} dimensions {tuple(all_dims)}" ) @staticmethod def _reduce( da: T_DataArray, weights: T_DataArray, dim: Dims = None, skipna: bool | None = None, ) -> T_DataArray: """reduce using dot; equivalent to (da * weights).sum(dim, skipna) for internal use only """ # need to infer dims as we use `dot` if dim is None: dim = ... # need to mask invalid values in da, as `dot` does not implement skipna if skipna or (skipna is None and da.dtype.kind in "cfO"): da = da.fillna(0.0) # `dot` does not broadcast arrays, so this avoids creating a large # DataArray (if `weights` has additional dimensions) return dot(da, weights, dim=dim) def _sum_of_weights(self, da: T_DataArray, dim: Dims = None) -> T_DataArray: """Calculate the sum of weights, accounting for missing values""" # we need to mask data values that are nan; else the weights are wrong mask = da.notnull() # bool -> int, because ``xr.dot([True, True], [True, True])`` -> True # (and not 2); GH4074 if self.weights.dtype == bool: sum_of_weights = self._reduce( mask, duck_array_ops.astype(self.weights, dtype=int), dim=dim, skipna=False, ) else: sum_of_weights = self._reduce(mask, self.weights, dim=dim, skipna=False) # 0-weights are not valid valid_weights = sum_of_weights != 0.0 return sum_of_weights.where(valid_weights) def _sum_of_squares( self, da: T_DataArray, dim: Dims = None, skipna: bool | None = None, ) -> T_DataArray: """Reduce a DataArray by a weighted ``sum_of_squares`` along some dimension(s).""" demeaned = da - da.weighted(self.weights).mean(dim=dim) # TODO: unsure why mypy complains about these being DataArray return types # rather than T_DataArray? return self._reduce((demeaned**2), self.weights, dim=dim, skipna=skipna) # type: ignore[return-value] def _weighted_sum( self, da: T_DataArray, dim: Dims = None, skipna: bool | None = None, ) -> T_DataArray: """Reduce a DataArray by a weighted ``sum`` along some dimension(s).""" return self._reduce(da, self.weights, dim=dim, skipna=skipna) # type: ignore[return-value] def _weighted_mean( self, da: T_DataArray, dim: Dims = None, skipna: bool | None = None, ) -> T_DataArray: """Reduce a DataArray by a weighted ``mean`` along some dimension(s).""" weighted_sum = self._weighted_sum(da, dim=dim, skipna=skipna) sum_of_weights = self._sum_of_weights(da, dim=dim) return weighted_sum / sum_of_weights def _weighted_var( self, da: T_DataArray, dim: Dims = None, skipna: bool | None = None, ) -> T_DataArray: """Reduce a DataArray by a weighted ``var`` along some dimension(s).""" sum_of_squares = self._sum_of_squares(da, dim=dim, skipna=skipna) sum_of_weights = self._sum_of_weights(da, dim=dim) return sum_of_squares / sum_of_weights def _weighted_std( self, da: T_DataArray, dim: Dims = None, skipna: bool | None = None, ) -> T_DataArray: """Reduce a DataArray by a weighted ``std`` along some dimension(s).""" return cast("T_DataArray", np.sqrt(self._weighted_var(da, dim, skipna))) def _weighted_quantile( self, da: T_DataArray, q: ArrayLike, dim: Dims = None, skipna: bool | None = None, ) -> T_DataArray: """Apply a weighted ``quantile`` to a DataArray along some dimension(s).""" def _get_h(n: float, q: np.ndarray, method: QUANTILE_METHODS) -> np.ndarray: """Return the interpolation parameter.""" # Note that options are not yet exposed in the public API. h: np.ndarray if method == "linear": h = (n - 1) * q + 1 elif method == "interpolated_inverted_cdf": h = n * q elif method == "hazen": h = n * q + 0.5 elif method == "weibull": h = (n + 1) * q elif method == "median_unbiased": h = (n + 1 / 3) * q + 1 / 3 elif method == "normal_unbiased": h = (n + 1 / 4) * q + 3 / 8 else: raise ValueError(f"Invalid method: {method}.") return h.clip(1, n) def _weighted_quantile_1d( data: np.ndarray, weights: np.ndarray, q: np.ndarray, skipna: bool, method: QUANTILE_METHODS = "linear", ) -> np.ndarray: # This algorithm has been adapted from: # https://aakinshin.net/posts/weighted-quantiles/#reference-implementation is_nan = np.isnan(data) if skipna: # Remove nans from data and weights not_nan = ~is_nan data = data[not_nan] weights = weights[not_nan] elif is_nan.any(): # Return nan if data contains any nan return np.full(q.size, np.nan) # Filter out data (and weights) associated with zero weights, which also flattens them nonzero_weights = weights != 0 data = data[nonzero_weights] weights = weights[nonzero_weights] n = data.size if n == 0: # Possibly empty after nan or zero weight filtering above return np.full(q.size, np.nan) # Kish's effective sample size nw = weights.sum() ** 2 / (weights**2).sum() # Sort data and weights sorter = np.argsort(data) data = data[sorter] weights = weights[sorter] # Normalize and sum the weights weights = weights / weights.sum() weights_cum = np.append(0, weights.cumsum()) # Vectorize the computation by transposing q with respect to weights q = np.atleast_2d(q).T # Get the interpolation parameter for each q h = _get_h(nw, q, method) # Find the samples contributing to the quantile computation (at *positions* between (h-1)/nw and h/nw) u = np.maximum((h - 1) / nw, np.minimum(h / nw, weights_cum)) # Compute their relative weight v = u * nw - h + 1 w = np.diff(v) # Apply the weights return (data * w).sum(axis=1) if skipna is None and da.dtype.kind in "cfO": skipna = True q = np.atleast_1d(np.asarray(q, dtype=np.float64)) if q.ndim > 1: raise ValueError("q must be a scalar or 1d") if np.any((q < 0) | (q > 1)): raise ValueError("q values must be between 0 and 1") if dim is None: dim = da.dims if utils.is_scalar(dim): dim = [dim] # To satisfy mypy dim = cast(Sequence, dim) # need to align *and* broadcast # - `_weighted_quantile_1d` requires arrays with the same shape # - broadcast does an outer join, which can introduce NaN to weights # - therefore we first need to do align(..., join="inner") # TODO: use broadcast(..., join="inner") once available # see https://github.com/pydata/xarray/issues/6304 da, weights = align(da, self.weights, join="inner") da, weights = broadcast(da, weights) result = apply_ufunc( _weighted_quantile_1d, da, weights, input_core_dims=[dim, dim], output_core_dims=[["quantile"]], output_dtypes=[np.float64], dask_gufunc_kwargs=dict(output_sizes={"quantile": len(q)}), dask="parallelized", vectorize=True, kwargs={"q": q, "skipna": skipna}, ) result = result.transpose("quantile", ...) result = result.assign_coords(quantile=q).squeeze() return result def _implementation(self, func, dim, **kwargs): raise NotImplementedError("Use `Dataset.weighted` or `DataArray.weighted`") def sum_of_weights( self, dim: Dims = None, *, keep_attrs: bool | None = None, ) -> T_Xarray: return self._implementation( self._sum_of_weights, dim=dim, keep_attrs=keep_attrs ) def sum_of_squares( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, ) -> T_Xarray: return self._implementation( self._sum_of_squares, dim=dim, skipna=skipna, keep_attrs=keep_attrs ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, ) -> T_Xarray: return self._implementation( self._weighted_sum, dim=dim, skipna=skipna, keep_attrs=keep_attrs ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, ) -> T_Xarray: return self._implementation( self._weighted_mean, dim=dim, skipna=skipna, keep_attrs=keep_attrs ) def var( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, ) -> T_Xarray: return self._implementation( self._weighted_var, dim=dim, skipna=skipna, keep_attrs=keep_attrs ) def std( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, ) -> T_Xarray: return self._implementation( self._weighted_std, dim=dim, skipna=skipna, keep_attrs=keep_attrs ) def quantile( self, q: ArrayLike, *, dim: Dims = None, keep_attrs: bool | None = None, skipna: bool = True, ) -> T_Xarray: return self._implementation( self._weighted_quantile, q=q, dim=dim, skipna=skipna, keep_attrs=keep_attrs ) def __repr__(self) -> str: """provide a nice str repr of our Weighted object""" klass = self.__class__.__name__ weight_dims = ", ".join(map(str, self.weights.dims)) return f"{klass} with weights along dimensions: {weight_dims}" class DataArrayWeighted(Weighted["DataArray"]): def _implementation(self, func, dim, **kwargs) -> DataArray: self._check_dim(dim) dataset = self.obj._to_temp_dataset() dataset = dataset.map(func, dim=dim, **kwargs) return self.obj._from_temp_dataset(dataset) class DatasetWeighted(Weighted["Dataset"]): def _implementation(self, func, dim, **kwargs) -> Dataset: self._check_dim(dim) return self.obj.map(func, dim=dim, **kwargs) def _inject_docstring(cls, cls_name): cls.sum_of_weights.__doc__ = _SUM_OF_WEIGHTS_DOCSTRING.format(cls=cls_name) cls.sum.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format( cls=cls_name, fcn="sum", on_zero="0" ) cls.mean.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format( cls=cls_name, fcn="mean", on_zero="NaN" ) cls.sum_of_squares.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format( cls=cls_name, fcn="sum_of_squares", on_zero="0" ) cls.var.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format( cls=cls_name, fcn="var", on_zero="NaN" ) cls.std.__doc__ = _WEIGHTED_REDUCE_DOCSTRING_TEMPLATE.format( cls=cls_name, fcn="std", on_zero="NaN" ) cls.quantile.__doc__ = _WEIGHTED_QUANTILE_DOCSTRING_TEMPLATE.format(cls=cls_name) _inject_docstring(DataArrayWeighted, "DataArray") _inject_docstring(DatasetWeighted, "Dataset") ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/conventions.py��������������������������������������������������������������0000664�0000000�0000000�00000076224�15056206164�0017604�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import itertools import warnings from collections import defaultdict from collections.abc import Hashable, Iterable, Mapping, MutableMapping from typing import TYPE_CHECKING, Any, Literal, TypeVar, Union, cast import numpy as np from xarray.coders import CFDatetimeCoder, CFTimedeltaCoder from xarray.coding import strings, variables from xarray.coding.variables import SerializationWarning, pop_to from xarray.core import indexing from xarray.core.common import ( _contains_datetime_like_objects, contains_cftime_datetimes, ) from xarray.core.utils import emit_user_level_warning from xarray.core.variable import IndexVariable, Variable from xarray.namedarray.utils import is_duck_array CF_RELATED_DATA = ( "bounds", "grid_mapping", "climatology", "geometry", "node_coordinates", "node_count", "part_node_count", "interior_ring", "cell_measures", "formula_terms", ) CF_RELATED_DATA_NEEDS_PARSING = ( "grid_mapping", "cell_measures", "formula_terms", ) if TYPE_CHECKING: from xarray.backends.common import AbstractDataStore from xarray.core.dataset import Dataset T_VarTuple = tuple[tuple[Hashable, ...], Any, dict, dict] T_Name = Union[Hashable, None] T_Variables = Mapping[Any, Variable] T_Attrs = MutableMapping[Any, Any] T_DropVariables = Union[str, Iterable[Hashable], None] T_DatasetOrAbstractstore = Union[Dataset, AbstractDataStore] def ensure_not_multiindex(var: Variable, name: T_Name = None) -> None: # only the pandas multi-index dimension coordinate cannot be serialized (tuple values) if isinstance(var._data, indexing.PandasMultiIndexingAdapter): if name is None and isinstance(var, IndexVariable): name = var.name if var.dims == (name,): raise NotImplementedError( f"variable {name!r} is a MultiIndex, which cannot yet be " "serialized. Instead, either use reset_index() " "to convert MultiIndex levels into coordinate variables instead " "or use https://cf-xarray.readthedocs.io/en/latest/coding.html." ) def encode_cf_variable( var: Variable, needs_copy: bool = True, name: T_Name = None ) -> Variable: """ Converts a Variable into a Variable which follows some of the CF conventions: - Nans are masked using _FillValue (or the deprecated missing_value) - Rescaling via: scale_factor and add_offset - datetimes are converted to the CF 'units since time' format - dtype encodings are enforced. Parameters ---------- var : Variable A variable holding un-encoded data. Returns ------- out : Variable A variable which has been encoded as described above. """ ensure_not_multiindex(var, name=name) for coder in [ CFDatetimeCoder(), CFTimedeltaCoder(), variables.CFScaleOffsetCoder(), variables.CFMaskCoder(), variables.NativeEnumCoder(), variables.NonStringCoder(), variables.DefaultFillvalueCoder(), variables.BooleanCoder(), ]: var = coder.encode(var, name=name) for attr_name in CF_RELATED_DATA: pop_to(var.encoding, var.attrs, attr_name) return var def decode_cf_variable( name: Hashable, var: Variable, concat_characters: bool = True, mask_and_scale: bool = True, decode_times: bool | CFDatetimeCoder = True, decode_endianness: bool = True, stack_char_dim: bool = True, use_cftime: bool | None = None, decode_timedelta: bool | CFTimedeltaCoder | None = None, ) -> Variable: """ Decodes a variable which may hold CF encoded information. This includes variables that have been masked and scaled, which hold CF style time variables (this is almost always the case if the dataset has been serialized) and which have strings encoded as character arrays. Parameters ---------- name : str Name of the variable. Used for better error messages. var : Variable A variable holding potentially CF encoded information. concat_characters : bool Should character arrays be concatenated to strings, for example: ["h", "e", "l", "l", "o"] -> "hello" mask_and_scale : bool Lazily scale (using scale_factor and add_offset) and mask (using _FillValue). If the _Unsigned attribute is present treat integer arrays as unsigned. decode_times : bool or CFDatetimeCoder Decode cf times ("hours since 2000-01-01") to np.datetime64. decode_endianness : bool Decode arrays from non-native to native endianness. stack_char_dim : bool Whether to stack characters into bytes along the last dimension of this array. Passed as an argument because we need to look at the full dataset to figure out if this is appropriate. use_cftime : bool, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. .. deprecated:: 2025.01.1 Please pass a :py:class:`coders.CFDatetimeCoder` instance initialized with ``use_cftime`` to the ``decode_times`` kwarg instead. decode_timedelta : None, bool, or CFTimedeltaCoder Decode cf timedeltas ("hours") to np.timedelta64. Returns ------- out : Variable A variable holding the decoded equivalent of var. """ # Ensure datetime-like Variables are passed through unmodified (GH 6453) if _contains_datetime_like_objects(var): return var original_dtype = var.dtype decode_timedelta_was_none = decode_timedelta is None if decode_timedelta is None: if isinstance(decode_times, CFDatetimeCoder): decode_timedelta = CFTimedeltaCoder(time_unit=decode_times.time_unit) else: decode_timedelta = bool(decode_times) if concat_characters: if stack_char_dim: var = strings.CharacterArrayCoder().decode(var, name=name) var = strings.EncodedStringCoder().decode(var) if original_dtype.kind == "O": var = variables.ObjectVLenStringCoder().decode(var) original_dtype = var.dtype if original_dtype.kind == "T": var = variables.Numpy2StringDTypeCoder().decode(var) if mask_and_scale: for coder in [ variables.CFMaskCoder( decode_times=decode_times, decode_timedelta=decode_timedelta ), variables.CFScaleOffsetCoder( decode_times=decode_times, decode_timedelta=decode_timedelta ), ]: var = coder.decode(var, name=name) if decode_timedelta: if isinstance(decode_timedelta, bool): decode_timedelta = CFTimedeltaCoder( decode_via_units=decode_timedelta, decode_via_dtype=decode_timedelta ) decode_timedelta._emit_decode_timedelta_future_warning = ( decode_timedelta_was_none ) var = decode_timedelta.decode(var, name=name) if decode_times: # remove checks after end of deprecation cycle if not isinstance(decode_times, CFDatetimeCoder): if use_cftime is not None: emit_user_level_warning( "Usage of 'use_cftime' as a kwarg is deprecated. " "Please pass a 'CFDatetimeCoder' instance initialized " "with 'use_cftime' to the 'decode_times' kwarg instead.\n" "Example usage:\n" " time_coder = xr.coders.CFDatetimeCoder(use_cftime=True)\n" " ds = xr.open_dataset(decode_times=time_coder)\n", DeprecationWarning, ) decode_times = CFDatetimeCoder(use_cftime=use_cftime) elif use_cftime is not None: raise TypeError( "Usage of 'use_cftime' as a kwarg is not allowed " "if a 'CFDatetimeCoder' instance is passed to " "'decode_times'. Please set 'use_cftime' " "when initializing 'CFDatetimeCoder' instead.\n" "Example usage:\n" " time_coder = xr.coders.CFDatetimeCoder(use_cftime=True)\n" " ds = xr.open_dataset(decode_times=time_coder)\n", ) var = decode_times.decode(var, name=name) if decode_endianness and not var.dtype.isnative: var = variables.EndianCoder().decode(var) original_dtype = var.dtype var = variables.BooleanCoder().decode(var) dimensions, data, attributes, encoding = variables.unpack_for_decoding(var) encoding.setdefault("dtype", original_dtype) if ( # we don't need to lazily index duck arrays not is_duck_array(data) # These arrays already support lazy indexing # OR for IndexingAdapters, it makes no sense to wrap them and not isinstance(data, indexing.ExplicitlyIndexedNDArrayMixin) ): # this path applies to bare BackendArray objects. # It is not hit for any internal Xarray backend data = indexing.LazilyIndexedArray(data) return Variable(dimensions, data, attributes, encoding=encoding, fastpath=True) def _update_bounds_attributes(variables: T_Variables) -> None: """Adds time attributes to time bounds variables. Variables handling time bounds ("Cell boundaries" in the CF conventions) do not necessarily carry the necessary attributes to be decoded. This copies the attributes from the time variable to the associated boundaries. See Also: http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/ cf-conventions.html#cell-boundaries https://github.com/pydata/xarray/issues/2565 """ # For all time variables with bounds for v in variables.values(): attrs = v.attrs units = attrs.get("units") has_date_units = isinstance(units, str) and "since" in units if has_date_units and "bounds" in attrs and attrs["bounds"] in variables: bounds_attrs = variables[attrs["bounds"]].attrs bounds_attrs.setdefault("units", attrs["units"]) if "calendar" in attrs: bounds_attrs.setdefault("calendar", attrs["calendar"]) def _update_bounds_encoding(variables: T_Variables) -> None: """Adds time encoding to time bounds variables. Variables handling time bounds ("Cell boundaries" in the CF conventions) do not necessarily carry the necessary attributes to be decoded. This copies the encoding from the time variable to the associated bounds variable so that we write CF-compliant files. See Also: http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/ cf-conventions.html#cell-boundaries https://github.com/pydata/xarray/issues/2565 """ # For all time variables with bounds for name, v in variables.items(): attrs = v.attrs encoding = v.encoding has_date_units = "units" in encoding and "since" in encoding["units"] is_datetime_type = np.issubdtype( v.dtype, np.datetime64 ) or contains_cftime_datetimes(v) if ( is_datetime_type and not has_date_units and "bounds" in attrs and attrs["bounds"] in variables ): emit_user_level_warning( f"Variable {name} has datetime type and a " f"bounds variable but {name}.encoding does not have " f"units specified. The units encodings for {name} " f"and {attrs['bounds']} will be determined independently " "and may not be equal, counter to CF-conventions. " "If this is a concern, specify a units encoding for " f"{name} before writing to a file.", ) if has_date_units and "bounds" in attrs and attrs["bounds"] in variables: bounds_encoding = variables[attrs["bounds"]].encoding bounds_encoding.setdefault("units", encoding["units"]) if "calendar" in encoding: bounds_encoding.setdefault("calendar", encoding["calendar"]) T = TypeVar("T") U = TypeVar("U") def _item_or_default(obj: Mapping[Any, T | U] | T, key: Hashable, default: T) -> T | U: """ Return item by key if obj is mapping and key is present, else return default value. """ return obj.get(key, default) if isinstance(obj, Mapping) else obj def decode_cf_variables( variables: T_Variables, attributes: T_Attrs, concat_characters: bool | Mapping[str, bool] = True, mask_and_scale: bool | Mapping[str, bool] = True, decode_times: bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] = True, decode_coords: bool | Literal["coordinates", "all"] = True, drop_variables: T_DropVariables = None, use_cftime: bool | Mapping[str, bool] | None = None, decode_timedelta: bool | CFTimedeltaCoder | Mapping[str, bool | CFTimedeltaCoder] | None = None, ) -> tuple[T_Variables, T_Attrs, set[Hashable]]: """ Decode several CF encoded variables. See: decode_cf_variable """ # Only emit one instance of the decode_timedelta default change # FutureWarning. This can be removed once this change is made. warnings.filterwarnings("once", "decode_timedelta", FutureWarning) dimensions_used_by = defaultdict(list) for v in variables.values(): for d in v.dims: dimensions_used_by[d].append(v) def stackable(dim: Hashable) -> bool: # figure out if a dimension can be concatenated over if dim in variables: return False for v in dimensions_used_by[dim]: if v.dtype.kind != "S" or dim != v.dims[-1]: return False return True coord_names = set() if isinstance(drop_variables, str): drop_variables = [drop_variables] elif drop_variables is None: drop_variables = [] drop_variables = set(drop_variables) # Time bounds coordinates might miss the decoding attributes if decode_times: _update_bounds_attributes(variables) new_vars = {} for k, v in variables.items(): if k in drop_variables: continue stack_char_dim = ( _item_or_default(concat_characters, k, True) and v.dtype == "S1" and v.ndim > 0 and stackable(v.dims[-1]) ) try: new_vars[k] = decode_cf_variable( k, v, concat_characters=_item_or_default(concat_characters, k, True), mask_and_scale=_item_or_default(mask_and_scale, k, True), decode_times=cast( bool | CFDatetimeCoder, _item_or_default(decode_times, k, True) ), stack_char_dim=stack_char_dim, use_cftime=_item_or_default(use_cftime, k, None), decode_timedelta=_item_or_default(decode_timedelta, k, None), ) except Exception as e: raise type(e)(f"Failed to decode variable {k!r}: {e}") from e if decode_coords in [True, "coordinates", "all"]: var_attrs = new_vars[k].attrs if "coordinates" in var_attrs: var_coord_names = [ c for c in var_attrs["coordinates"].split() if c in variables ] # propagate as is new_vars[k].encoding["coordinates"] = var_attrs["coordinates"] del var_attrs["coordinates"] # but only use as coordinate if existing if var_coord_names: coord_names.update(var_coord_names) if decode_coords == "all": for attr_name in CF_RELATED_DATA: if attr_name in var_attrs: # fixes stray colon attr_val = var_attrs[attr_name].replace(" :", ":") var_names = attr_val.split() # if grid_mapping is a single string, do not enter here if ( attr_name in CF_RELATED_DATA_NEEDS_PARSING and len(var_names) > 1 ): # map the keys to list of strings # "A: b c d E: f g" returns # {"A": ["b", "c", "d"], "E": ["f", "g"]} roles_and_names = defaultdict(list) key = None for vname in var_names: if ":" in vname: key = vname.strip(":") else: if key is None: raise ValueError( f"First element {vname!r} of [{attr_val!r}] misses ':', " f"cannot decode {attr_name!r}." ) roles_and_names[key].append(vname) # for grid_mapping keys are var_names if attr_name == "grid_mapping": var_names = list(roles_and_names.keys()) else: # for cell_measures and formula_terms values are var names var_names = list(itertools.chain(*roles_and_names.values())) # consistency check (one element per key) if len(var_names) != len(roles_and_names.keys()): emit_user_level_warning( f"Attribute {attr_name!r} has malformed content [{attr_val!r}], " f"decoding {var_names!r} to coordinates." ) if all(var_name in variables for var_name in var_names): new_vars[k].encoding[attr_name] = attr_val coord_names.update(var_names) else: referenced_vars_not_in_variables = [ proj_name for proj_name in var_names if proj_name not in variables ] emit_user_level_warning( f"Variable(s) referenced in {attr_name} not in variables: {referenced_vars_not_in_variables}", ) del var_attrs[attr_name] if decode_coords and isinstance(attributes.get("coordinates", None), str): attributes = dict(attributes) crds = attributes.pop("coordinates") coord_names.update(crds.split()) return new_vars, attributes, coord_names def decode_cf( obj: T_DatasetOrAbstractstore, concat_characters: bool = True, mask_and_scale: bool = True, decode_times: bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] = True, decode_coords: bool | Literal["coordinates", "all"] = True, drop_variables: T_DropVariables = None, use_cftime: bool | None = None, decode_timedelta: bool | CFTimedeltaCoder | Mapping[str, bool | CFTimedeltaCoder] | None = None, ) -> Dataset: """Decode the given Dataset or Datastore according to CF conventions into a new Dataset. Parameters ---------- obj : Dataset or DataStore Object to decode. concat_characters : bool, optional Should character arrays be concatenated to strings, for example: ["h", "e", "l", "l", "o"] -> "hello" mask_and_scale : bool, optional Lazily scale (using scale_factor and add_offset) and mask (using _FillValue). decode_times : bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder], optional Decode cf times (e.g., integers since "hours since 2000-01-01") to np.datetime64. decode_coords : bool or {"coordinates", "all"}, optional Controls which variables are set as coordinate variables: - "coordinates" or True: Set variables referred to in the ``'coordinates'`` attribute of the datasets or individual variables as coordinate variables. - "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and other attributes as coordinate variables. drop_variables : str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. use_cftime : bool, optional Only relevant if encoded dates come from a standard calendar (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to ``cftime.datetime`` objects, regardless of whether or not they can be represented using ``np.datetime64[ns]`` objects. If False, always decode times to ``np.datetime64[ns]`` objects; if this is not possible raise an error. .. deprecated:: 2025.01.1 Please pass a :py:class:`coders.CFDatetimeCoder` instance initialized with ``use_cftime`` to the ``decode_times`` kwarg instead. decode_timedelta : bool | CFTimedeltaCoder | Mapping[str, bool | CFTimedeltaCoder], optional If True or :py:class:`CFTimedeltaCoder`, decode variables and coordinates with time units in {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same behavior as decode_times. The resolution of the decoded timedeltas can be configured with the ``time_unit`` argument in the :py:class:`CFTimedeltaCoder` passed. Returns ------- decoded : Dataset """ from xarray.backends.common import AbstractDataStore from xarray.core.dataset import Dataset vars: T_Variables attrs: T_Attrs if isinstance(obj, Dataset): vars = obj._variables attrs = obj.attrs extra_coords = set(obj.coords) close = obj._close encoding = obj.encoding elif isinstance(obj, AbstractDataStore): vars, attrs = obj.load() extra_coords = set() close = obj.close encoding = obj.get_encoding() else: raise TypeError("can only decode Dataset or DataStore objects") vars, attrs, coord_names = decode_cf_variables( vars, attrs, concat_characters, mask_and_scale, decode_times, decode_coords, drop_variables=drop_variables, use_cftime=use_cftime, decode_timedelta=decode_timedelta, ) ds = Dataset(vars, attrs=attrs) ds = ds.set_coords(coord_names.union(extra_coords).intersection(vars)) ds.set_close(close) ds.encoding = encoding return ds def cf_decoder( variables: T_Variables, attributes: T_Attrs, concat_characters: bool = True, mask_and_scale: bool = True, decode_times: bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] = True, ) -> tuple[T_Variables, T_Attrs]: """ Decode a set of CF encoded variables and attributes. Parameters ---------- variables : dict A dictionary mapping from variable name to xarray.Variable attributes : dict A dictionary mapping from attribute name to value concat_characters : bool Should character arrays be concatenated to strings, for example: ["h", "e", "l", "l", "o"] -> "hello" mask_and_scale : bool Lazily scale (using scale_factor and add_offset) and mask (using _FillValue). decode_times : bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] Decode cf times ("hours since 2000-01-01") to np.datetime64. Returns ------- decoded_variables : dict A dictionary mapping from variable name to xarray.Variable objects. decoded_attributes : dict A dictionary mapping from attribute name to values. See Also -------- decode_cf_variable """ variables, attributes, _ = decode_cf_variables( variables, attributes, concat_characters, mask_and_scale, decode_times, ) return variables, attributes def _encode_coordinates( variables: T_Variables, attributes: T_Attrs, non_dim_coord_names ): # calculate global and variable specific coordinates non_dim_coord_names = set(non_dim_coord_names) for name in list(non_dim_coord_names): if isinstance(name, str) and " " in name: emit_user_level_warning( f"coordinate {name!r} has a space in its name, which means it " "cannot be marked as a coordinate on disk and will be " "saved as a data variable instead", category=SerializationWarning, ) non_dim_coord_names.discard(name) global_coordinates = non_dim_coord_names.copy() variable_coordinates = defaultdict(set) not_technically_coordinates = set() for coord_name in non_dim_coord_names: target_dims = variables[coord_name].dims for k, v in variables.items(): if ( k not in non_dim_coord_names and k not in v.dims and set(target_dims) <= set(v.dims) ): variable_coordinates[k].add(coord_name) if any( coord_name in v.encoding.get(attr_name, tuple()) for attr_name in CF_RELATED_DATA ): not_technically_coordinates.add(coord_name) global_coordinates.discard(coord_name) variables = {k: v.copy(deep=False) for k, v in variables.items()} # keep track of variable names written to file under the "coordinates" attributes written_coords = set() for name, var in variables.items(): encoding = var.encoding attrs = var.attrs if "coordinates" in attrs and "coordinates" in encoding: raise ValueError( f"'coordinates' found in both attrs and encoding for variable {name!r}." ) # if coordinates set to None, don't write coordinates attribute if ("coordinates" in attrs and attrs.get("coordinates") is None) or ( "coordinates" in encoding and encoding.get("coordinates") is None ): # make sure "coordinates" is removed from attrs/encoding attrs.pop("coordinates", None) encoding.pop("coordinates", None) continue # this will copy coordinates from encoding to attrs if "coordinates" in attrs # after the next line, "coordinates" is never in encoding # we get support for attrs["coordinates"] for free. coords_str = pop_to(encoding, attrs, "coordinates") or attrs.get("coordinates") if not coords_str and variable_coordinates[name]: coordinates_text = " ".join( str(coord_name) for coord_name in sorted(variable_coordinates[name]) if coord_name not in not_technically_coordinates ) if coordinates_text: attrs["coordinates"] = coordinates_text if "coordinates" in attrs: written_coords.update(attrs["coordinates"].split()) # These coordinates are not associated with any particular variables, so we # save them under a global 'coordinates' attribute so xarray can roundtrip # the dataset faithfully. Because this serialization goes beyond CF # conventions, only do it if necessary. # Reference discussion: # https://cfconventions.org/mailing-list-archive/Data/7400.html global_coordinates.difference_update(written_coords) if global_coordinates: attributes = dict(attributes) if "coordinates" in attributes: emit_user_level_warning( f"cannot serialize global coordinates {global_coordinates!r} because the global " f"attribute 'coordinates' already exists. This may prevent faithful roundtripping" f"of xarray datasets", category=SerializationWarning, ) else: attributes["coordinates"] = " ".join(sorted(map(str, global_coordinates))) return variables, attributes def encode_dataset_coordinates(dataset: Dataset): """Encode coordinates on the given dataset object into variable specific and global attributes. When possible, this is done according to CF conventions. Parameters ---------- dataset : Dataset Object to encode. Returns ------- variables : dict attrs : dict """ non_dim_coord_names = set(dataset.coords) - set(dataset.dims) return _encode_coordinates( dataset._variables, dataset.attrs, non_dim_coord_names=non_dim_coord_names ) def cf_encoder(variables: T_Variables, attributes: T_Attrs): """ Encode a set of CF encoded variables and attributes. Takes a dicts of variables and attributes and encodes them to conform to CF conventions as much as possible. This includes masking, scaling, character array handling, and CF-time encoding. Parameters ---------- variables : dict A dictionary mapping from variable name to xarray.Variable attributes : dict A dictionary mapping from attribute name to value Returns ------- encoded_variables : dict A dictionary mapping from variable name to xarray.Variable, encoded_attributes : dict A dictionary mapping from attribute name to value See Also -------- decode_cf_variable, encode_cf_variable """ # add encoding for time bounds variables if present. _update_bounds_encoding(variables) new_vars = {} for k, v in variables.items(): try: new_vars[k] = encode_cf_variable(v, name=k) except Exception as e: e.add_note(f"Raised while encoding variable {k!r} with value {v!r}") raise # Remove attrs from bounds variables (issue #2921) for var in new_vars.values(): bounds = var.attrs.get("bounds") if bounds and bounds in new_vars: # see http://cfconventions.org/cf-conventions/cf-conventions.html#cell-boundaries for attr in [ "units", "standard_name", "axis", "positive", "calendar", "long_name", "leap_month", "leap_year", "month_lengths", ]: if ( attr in new_vars[bounds].attrs and attr in var.attrs and new_vars[bounds].attrs[attr] == var.attrs[attr] ): new_vars[bounds].attrs.pop(attr) return new_vars, attributes ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/convert.py������������������������������������������������������������������0000664�0000000�0000000�00000014763�15056206164�0016717�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Functions for converting to and from xarray objects""" from collections import Counter import numpy as np from xarray.coders import CFDatetimeCoder from xarray.coding.times import CFTimedeltaCoder from xarray.conventions import decode_cf from xarray.core import duck_array_ops from xarray.core.dataarray import DataArray from xarray.core.dtypes import get_fill_value from xarray.namedarray.pycompat import array_type iris_forbidden_keys = { "standard_name", "long_name", "units", "bounds", "axis", "calendar", "leap_month", "leap_year", "month_lengths", "coordinates", "grid_mapping", "climatology", "cell_methods", "formula_terms", "compress", "missing_value", "add_offset", "scale_factor", "valid_max", "valid_min", "valid_range", "_FillValue", } cell_methods_strings = { "point", "sum", "maximum", "median", "mid_range", "minimum", "mean", "mode", "standard_deviation", "variance", } def encode(var): return CFTimedeltaCoder().encode(CFDatetimeCoder().encode(var.variable)) def _filter_attrs(attrs, ignored_attrs): """Return attrs that are not in ignored_attrs""" return {k: v for k, v in attrs.items() if k not in ignored_attrs} def _pick_attrs(attrs, keys): """Return attrs with keys in keys list""" return {k: v for k, v in attrs.items() if k in keys} def _get_iris_args(attrs): """Converts the xarray attrs into args that can be passed into Iris""" # iris.unit is deprecated in Iris v1.9 import cf_units args = {"attributes": _filter_attrs(attrs, iris_forbidden_keys)} args.update(_pick_attrs(attrs, ("standard_name", "long_name"))) unit_args = _pick_attrs(attrs, ("calendar",)) if "units" in attrs: args["units"] = cf_units.Unit(attrs["units"], **unit_args) return args # TODO: Add converting bounds from xarray to Iris and back def to_iris(dataarray): """Convert a DataArray into a Iris Cube""" # Iris not a hard dependency import iris from iris.fileformats.netcdf import parse_cell_methods dim_coords = [] aux_coords = [] for coord_name in dataarray.coords: coord = encode(dataarray.coords[coord_name]) coord_args = _get_iris_args(coord.attrs) coord_args["var_name"] = coord_name axis = None if coord.dims: axis = dataarray.get_axis_num(coord.dims) if coord_name in dataarray.dims: try: iris_coord = iris.coords.DimCoord(coord.values, **coord_args) dim_coords.append((iris_coord, axis)) except ValueError: iris_coord = iris.coords.AuxCoord(coord.values, **coord_args) aux_coords.append((iris_coord, axis)) else: iris_coord = iris.coords.AuxCoord(coord.values, **coord_args) aux_coords.append((iris_coord, axis)) args = _get_iris_args(dataarray.attrs) args["var_name"] = dataarray.name args["dim_coords_and_dims"] = dim_coords args["aux_coords_and_dims"] = aux_coords if "cell_methods" in dataarray.attrs: args["cell_methods"] = parse_cell_methods(dataarray.attrs["cell_methods"]) masked_data = duck_array_ops.masked_invalid(dataarray.data) cube = iris.cube.Cube(masked_data, **args) return cube def _iris_obj_to_attrs(obj): """Return a dictionary of attrs when given a Iris object""" attrs = {"standard_name": obj.standard_name, "long_name": obj.long_name} if obj.units.calendar: attrs["calendar"] = obj.units.calendar if obj.units.origin != "1" and not obj.units.is_unknown(): attrs["units"] = obj.units.origin attrs.update(obj.attributes) return {k: v for k, v in attrs.items() if v is not None} def _iris_cell_methods_to_str(cell_methods_obj): """Converts a Iris cell methods into a string""" cell_methods = [] for cell_method in cell_methods_obj: names = "".join(f"{n}: " for n in cell_method.coord_names) intervals = " ".join( f"interval: {interval}" for interval in cell_method.intervals ) comments = " ".join(f"comment: {comment}" for comment in cell_method.comments) extra = f"{intervals} {comments}".strip() if extra: extra = f" ({extra})" cell_methods.append(names + cell_method.method + extra) return " ".join(cell_methods) def _name(iris_obj, default="unknown"): """Mimics `iris_obj.name()` but with different name resolution order. Similar to iris_obj.name() method, but using iris_obj.var_name first to enable roundtripping. """ return iris_obj.var_name or iris_obj.standard_name or iris_obj.long_name or default def from_iris(cube): """Convert a Iris cube into an DataArray""" import iris.exceptions name = _name(cube) if name == "unknown": name = None dims = [] for i in range(cube.ndim): try: dim_coord = cube.coord(dim_coords=True, dimensions=(i,)) dims.append(_name(dim_coord)) except iris.exceptions.CoordinateNotFoundError: dims.append(f"dim_{i}") if len(set(dims)) != len(dims): duplicates = [k for k, v in Counter(dims).items() if v > 1] raise ValueError(f"Duplicate coordinate name {duplicates}.") coords = {} for coord in cube.coords(): coord_attrs = _iris_obj_to_attrs(coord) coord_dims = [dims[i] for i in cube.coord_dims(coord)] if coord_dims: coords[_name(coord)] = (coord_dims, coord.points, coord_attrs) else: coords[_name(coord)] = ((), coord.points.item(), coord_attrs) array_attrs = _iris_obj_to_attrs(cube) cell_methods = _iris_cell_methods_to_str(cube.cell_methods) if cell_methods: array_attrs["cell_methods"] = cell_methods # Deal with iris 1.* and 2.* cube_data = cube.core_data() if hasattr(cube, "core_data") else cube.data # Deal with dask and numpy masked arrays dask_array_type = array_type("dask") if isinstance(cube_data, dask_array_type): from dask.array import ma as dask_ma filled_data = dask_ma.filled(cube_data, get_fill_value(cube.dtype)) elif isinstance(cube_data, np.ma.MaskedArray): filled_data = np.ma.filled(cube_data, get_fill_value(cube.dtype)) else: filled_data = cube_data dataarray = DataArray( filled_data, coords=coords, name=name, attrs=array_attrs, dims=dims ) decoded_ds = decode_cf(dataarray._to_temp_dataset()) return dataarray._from_temp_dataset(decoded_ds) �������������xarray-2025.09.0/xarray/core/�����������������������������������������������������������������������0000775�0000000�0000000�00000000000�15056206164�0015602�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/core/__init__.py������������������������������������������������������������0000664�0000000�0000000�00000000000�15056206164�0017701�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/core/_aggregations.py�������������������������������������������������������0000664�0000000�0000000�00001224230�15056206164�0020771�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������"""Mixin classes with reduction operations.""" # This file was generated using xarray.util.generate_aggregations. Do not edit manually. from __future__ import annotations from collections.abc import Callable, Sequence from typing import TYPE_CHECKING, Any from xarray.core import duck_array_ops from xarray.core.options import OPTIONS from xarray.core.types import Dims, Self from xarray.core.utils import contains_only_chunked_or_numpy, module_available if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset flox_available = module_available("flox") class DataTreeAggregations: __slots__ = () def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> Self: raise NotImplementedError() def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count Dataset.count DataArray.count :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.count() Group: / Dimensions: () Data variables: foo int64 8B 5 """ return self.reduce( duck_array_ops.count, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all Dataset.all DataArray.all :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict( ... foo=( ... "time", ... np.array([True, True, True, True, True, False], dtype=bool), ... ) ... ), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.all() Group: / Dimensions: () Data variables: foo bool 1B False """ return self.reduce( duck_array_ops.array_all, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any Dataset.any DataArray.any :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict( ... foo=( ... "time", ... np.array([True, True, True, True, True, False], dtype=bool), ... ) ... ), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.any() Group: / Dimensions: () Data variables: foo bool 1B True """ return self.reduce( duck_array_ops.array_any, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max Dataset.max DataArray.max :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.max() Group: / Dimensions: () Data variables: foo float64 8B 3.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.max(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan """ return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min Dataset.min DataArray.min :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.min() Group: / Dimensions: () Data variables: foo float64 8B 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.min(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan """ return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean Dataset.mean DataArray.mean :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.mean() Group: / Dimensions: () Data variables: foo float64 8B 1.6 Use ``skipna`` to control whether NaNs are ignored. >>> dt.mean(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan """ return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod Dataset.prod DataArray.prod :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.prod() Group: / Dimensions: () Data variables: foo float64 8B 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.prod(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> dt.prod(skipna=True, min_count=2) Group: / Dimensions: () Data variables: foo float64 8B 0.0 """ return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum Dataset.sum DataArray.sum :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.sum() Group: / Dimensions: () Data variables: foo float64 8B 8.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.sum(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> dt.sum(skipna=True, min_count=2) Group: / Dimensions: () Data variables: foo float64 8B 8.0 """ return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std Dataset.std DataArray.std :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.std() Group: / Dimensions: () Data variables: foo float64 8B 1.02 Use ``skipna`` to control whether NaNs are ignored. >>> dt.std(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan Specify ``ddof=1`` for an unbiased estimate. >>> dt.std(skipna=True, ddof=1) Group: / Dimensions: () Data variables: foo float64 8B 1.14 """ return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var Dataset.var DataArray.var :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.var() Group: / Dimensions: () Data variables: foo float64 8B 1.04 Use ``skipna`` to control whether NaNs are ignored. >>> dt.var(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan Specify ``ddof=1`` for an unbiased estimate. >>> dt.var(skipna=True, ddof=1) Group: / Dimensions: () Data variables: foo float64 8B 1.3 """ return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median Dataset.median DataArray.median :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.median() Group: / Dimensions: () Data variables: foo float64 8B 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.median(skipna=False) Group: / Dimensions: () Data variables: foo float64 8B nan """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum Dataset.cumsum DataArray.cumsum DataTree.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.cumsum() Group: / Dimensions: (time: 6) Dimensions without coordinates: time Data variables: foo (time) float64 48B 1.0 3.0 6.0 6.0 8.0 8.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.cumsum(skipna=False) Group: / Dimensions: (time: 6) Dimensions without coordinates: time Data variables: foo (time) float64 48B 1.0 3.0 6.0 6.0 8.0 nan """ return self.reduce( duck_array_ops.cumsum, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumprod( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod Dataset.cumprod DataArray.cumprod DataTree.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> dt.cumprod() Group: / Dimensions: (time: 6) Dimensions without coordinates: time Data variables: foo (time) float64 48B 1.0 2.0 6.0 0.0 0.0 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.cumprod(skipna=False) Group: / Dimensions: (time: 6) Dimensions without coordinates: time Data variables: foo (time) float64 48B 1.0 2.0 6.0 0.0 0.0 nan """ return self.reduce( duck_array_ops.cumprod, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) class DatasetAggregations: __slots__ = () def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> Self: raise NotImplementedError() def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count DataArray.count :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.count() Size: 8B Dimensions: () Data variables: da int64 8B 5 """ return self.reduce( duck_array_ops.count, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all DataArray.all :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.all() Size: 1B Dimensions: () Data variables: da bool 1B False """ return self.reduce( duck_array_ops.array_all, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any DataArray.any :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.any() Size: 1B Dimensions: () Data variables: da bool 1B True """ return self.reduce( duck_array_ops.array_any, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max DataArray.max :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.max() Size: 8B Dimensions: () Data variables: da float64 8B 3.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.max(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan """ return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min DataArray.min :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.min() Size: 8B Dimensions: () Data variables: da float64 8B 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.min(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan """ return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean DataArray.mean :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.mean() Size: 8B Dimensions: () Data variables: da float64 8B 1.6 Use ``skipna`` to control whether NaNs are ignored. >>> ds.mean(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan """ return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod DataArray.prod :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.prod() Size: 8B Dimensions: () Data variables: da float64 8B 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.prod(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.prod(skipna=True, min_count=2) Size: 8B Dimensions: () Data variables: da float64 8B 0.0 """ return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum DataArray.sum :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.sum() Size: 8B Dimensions: () Data variables: da float64 8B 8.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.sum(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.sum(skipna=True, min_count=2) Size: 8B Dimensions: () Data variables: da float64 8B 8.0 """ return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std DataArray.std :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.std() Size: 8B Dimensions: () Data variables: da float64 8B 1.02 Use ``skipna`` to control whether NaNs are ignored. >>> ds.std(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.std(skipna=True, ddof=1) Size: 8B Dimensions: () Data variables: da float64 8B 1.14 """ return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var DataArray.var :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.var() Size: 8B Dimensions: () Data variables: da float64 8B 1.04 Use ``skipna`` to control whether NaNs are ignored. >>> ds.var(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.var(skipna=True, ddof=1) Size: 8B Dimensions: () Data variables: da float64 8B 1.3 """ return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median DataArray.median :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.median() Size: 8B Dimensions: () Data variables: da float64 8B 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.median(skipna=False) Size: 8B Dimensions: () Data variables: da float64 8B nan """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum DataArray.cumsum Dataset.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.cumsum() Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 3.0 6.0 6.0 8.0 8.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.cumsum(skipna=False) Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 3.0 6.0 6.0 8.0 nan """ return self.reduce( duck_array_ops.cumsum, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumprod( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this Dataset's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod DataArray.cumprod Dataset.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.cumprod() Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 6.0 0.0 0.0 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.cumprod(skipna=False) Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 6.0 0.0 0.0 nan """ return self.reduce( duck_array_ops.cumprod, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) class DataArrayAggregations: __slots__ = () def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> Self: raise NotImplementedError() def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count Dataset.count :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.count() Size: 8B array(5) """ return self.reduce( duck_array_ops.count, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all Dataset.all :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.all() Size: 1B array(False) """ return self.reduce( duck_array_ops.array_all, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any Dataset.any :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.any() Size: 1B array(True) """ return self.reduce( duck_array_ops.array_any, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max Dataset.max :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.max() Size: 8B array(3.) Use ``skipna`` to control whether NaNs are ignored. >>> da.max(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min Dataset.min :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.min() Size: 8B array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> da.min(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean Dataset.mean :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.mean() Size: 8B array(1.6) Use ``skipna`` to control whether NaNs are ignored. >>> da.mean(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod Dataset.prod :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.prod() Size: 8B array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> da.prod(skipna=False) Size: 8B array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.prod(skipna=True, min_count=2) Size: 8B array(0.) """ return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, keep_attrs=keep_attrs, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum Dataset.sum :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.sum() Size: 8B array(8.) Use ``skipna`` to control whether NaNs are ignored. >>> da.sum(skipna=False) Size: 8B array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.sum(skipna=True, min_count=2) Size: 8B array(8.) """ return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, keep_attrs=keep_attrs, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std Dataset.std :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.std() Size: 8B array(1.0198039) Use ``skipna`` to control whether NaNs are ignored. >>> da.std(skipna=False) Size: 8B array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> da.std(skipna=True, ddof=1) Size: 8B array(1.14017543) """ return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, keep_attrs=keep_attrs, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var Dataset.var :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.var() Size: 8B array(1.04) Use ``skipna`` to control whether NaNs are ignored. >>> da.var(skipna=False) Size: 8B array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> da.var(skipna=True, ddof=1) Size: 8B array(1.3) """ return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, keep_attrs=keep_attrs, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median Dataset.median :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.median() Size: 8B array(2.) Use ``skipna`` to control whether NaNs are ignored. >>> da.median(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataArray's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum Dataset.cumsum DataArray.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.cumsum() Size: 48B array([1., 3., 6., 6., 8., 8.]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.cumsum(skipna=False) Size: 48B array([ 1., 3., 6., 6., 8., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) Self: """ Reduce this DataArray's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod Dataset.cumprod DataArray.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.cumprod() Size: 48B array([1., 2., 6., 0., 0., 0.]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.cumprod(skipna=False) Size: 48B array([ 1., 2., 6., 0., 0., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) Dataset: raise NotImplementedError() def _flox_reduce( self, dim: Dims, **kwargs: Any, ) -> Dataset: raise NotImplementedError() def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count Dataset.count :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").count() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) int64 24B 1 2 2 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="count", dim=dim, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.count, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all Dataset.all :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").all() Size: 27B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) bool 3B False True True """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="all", dim=dim, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_all, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any Dataset.any :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").any() Size: 27B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) bool 3B True True True """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="any", dim=dim, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_any, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max Dataset.max :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").max() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 1.0 2.0 3.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").max(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 2.0 3.0 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="max", dim=dim, skipna=skipna, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min Dataset.min :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").min() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 1.0 2.0 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").min(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 2.0 0.0 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="min", dim=dim, skipna=skipna, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean Dataset.mean :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").mean() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 1.0 2.0 1.5 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").mean(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 2.0 1.5 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="mean", dim=dim, skipna=skipna, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod Dataset.prod :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").prod() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 1.0 4.0 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").prod(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 4.0 0.0 Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.groupby("labels").prod(skipna=True, min_count=2) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 4.0 0.0 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="prod", dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum Dataset.sum :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").sum() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 1.0 4.0 3.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").sum(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 4.0 3.0 Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.groupby("labels").sum(skipna=True, min_count=2) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 4.0 3.0 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="sum", dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std Dataset.std :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").std() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 0.0 0.0 1.5 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").std(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 0.0 1.5 Specify ``ddof=1`` for an unbiased estimate. >>> ds.groupby("labels").std(skipna=True, ddof=1) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 0.0 2.121 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="std", dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var Dataset.var :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").var() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 0.0 0.0 2.25 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").var(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 0.0 2.25 Specify ``ddof=1`` for an unbiased estimate. >>> ds.groupby("labels").var(skipna=True, ddof=1) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 0.0 4.5 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="var", dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median Dataset.median :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").median() Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B 1.0 2.0 1.5 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").median(skipna=False) Size: 48B Dimensions: (labels: 3) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Data variables: da (labels) float64 24B nan 2.0 1.5 """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum Dataset.cumsum Dataset.cumulative :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").cumsum() Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 3.0 3.0 4.0 1.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").cumsum(skipna=False) Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 3.0 3.0 4.0 nan """ return self.reduce( duck_array_ops.cumsum, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumprod( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod Dataset.cumprod Dataset.cumulative :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.groupby("labels").cumprod() Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 3.0 0.0 4.0 1.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.groupby("labels").cumprod(skipna=False) Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 3.0 0.0 4.0 nan """ return self.reduce( duck_array_ops.cumprod, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) class DatasetResampleAggregations: _obj: Dataset def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> Dataset: raise NotImplementedError() def _flox_reduce( self, dim: Dims, **kwargs: Any, ) -> Dataset: raise NotImplementedError() def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count Dataset.count :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").count() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) int64 24B 1 3 1 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="count", dim=dim, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.count, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all Dataset.all :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").all() Size: 27B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) bool 3B True True False """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="all", dim=dim, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_all, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any Dataset.any :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 78B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").any() Size: 27B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) bool 3B True True True """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="any", dim=dim, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_any, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max Dataset.max :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").max() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 3.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").max(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 3.0 nan """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="max", dim=dim, skipna=skipna, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min Dataset.min :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").min() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 0.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").min(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 0.0 nan """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="min", dim=dim, skipna=skipna, numeric_only=False, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean Dataset.mean :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").mean() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 1.667 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").mean(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 1.667 nan """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="mean", dim=dim, skipna=skipna, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod Dataset.prod :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").prod() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 0.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").prod(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 0.0 nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.resample(time="3ME").prod(skipna=True, min_count=2) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B nan 0.0 nan """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="prod", dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum Dataset.sum :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").sum() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 5.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").sum(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 5.0 nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> ds.resample(time="3ME").sum(skipna=True, min_count=2) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B nan 5.0 nan """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="sum", dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std Dataset.std :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").std() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 0.0 1.247 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").std(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 0.0 1.247 nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.resample(time="3ME").std(skipna=True, ddof=1) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B nan 1.528 nan """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="std", dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var Dataset.var :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").var() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 0.0 1.556 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").var(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 0.0 1.556 nan Specify ``ddof=1`` for an unbiased estimate. >>> ds.resample(time="3ME").var(skipna=True, ddof=1) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B nan 2.333 nan """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="var", dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median Dataset.median :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").median() Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 2.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").median(skipna=False) Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Data variables: da (time) float64 24B 1.0 2.0 nan """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum Dataset.cumsum Dataset.cumulative :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").cumsum() Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 5.0 5.0 2.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").cumsum(skipna=False) Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 5.0 5.0 2.0 nan """ return self.reduce( duck_array_ops.cumsum, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumprod( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Dataset: """ Reduce this Dataset's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : Dataset New Dataset with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod Dataset.cumprod Dataset.cumulative :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da)) >>> ds Size: 120B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> ds.resample(time="3ME").cumprod() Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 6.0 0.0 2.0 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> ds.resample(time="3ME").cumprod(skipna=False) Size: 48B Dimensions: (time: 6) Dimensions without coordinates: time Data variables: da (time) float64 48B 1.0 2.0 6.0 0.0 2.0 nan """ return self.reduce( duck_array_ops.cumprod, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) class DataArrayGroupByAggregations: _obj: DataArray def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> DataArray: raise NotImplementedError() def _flox_reduce( self, dim: Dims, **kwargs: Any, ) -> DataArray: raise NotImplementedError() def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count DataArray.count :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").count() Size: 24B array([1, 2, 2]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="count", dim=dim, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.count, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all DataArray.all :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").all() Size: 3B array([False, True, True]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="all", dim=dim, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_all, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any DataArray.any :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").any() Size: 3B array([ True, True, True]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="any", dim=dim, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_any, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max DataArray.max :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").max() Size: 24B array([1., 2., 3.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").max(skipna=False) Size: 24B array([nan, 2., 3.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="max", dim=dim, skipna=skipna, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min DataArray.min :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").min() Size: 24B array([1., 2., 0.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").min(skipna=False) Size: 24B array([nan, 2., 0.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="min", dim=dim, skipna=skipna, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean DataArray.mean :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").mean() Size: 24B array([1. , 2. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").mean(skipna=False) Size: 24B array([nan, 2. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="mean", dim=dim, skipna=skipna, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod DataArray.prod :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").prod() Size: 24B array([1., 4., 0.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").prod(skipna=False) Size: 24B array([nan, 4., 0.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.groupby("labels").prod(skipna=True, min_count=2) Size: 24B array([nan, 4., 0.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="prod", dim=dim, skipna=skipna, min_count=min_count, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, keep_attrs=keep_attrs, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum DataArray.sum :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").sum() Size: 24B array([1., 4., 3.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").sum(skipna=False) Size: 24B array([nan, 4., 3.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.groupby("labels").sum(skipna=True, min_count=2) Size: 24B array([nan, 4., 3.]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="sum", dim=dim, skipna=skipna, min_count=min_count, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, keep_attrs=keep_attrs, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std DataArray.std :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").std() Size: 24B array([0. , 0. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").std(skipna=False) Size: 24B array([nan, 0. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Specify ``ddof=1`` for an unbiased estimate. >>> da.groupby("labels").std(skipna=True, ddof=1) Size: 24B array([ nan, 0. , 2.12132034]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="std", dim=dim, skipna=skipna, ddof=ddof, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, keep_attrs=keep_attrs, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var DataArray.var :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").var() Size: 24B array([0. , 0. , 2.25]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").var(skipna=False) Size: 24B array([ nan, 0. , 2.25]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Specify ``ddof=1`` for an unbiased estimate. >>> da.groupby("labels").var(skipna=True, ddof=1) Size: 24B array([nan, 0. , 4.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="var", dim=dim, skipna=skipna, ddof=ddof, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, keep_attrs=keep_attrs, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median DataArray.median :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").median() Size: 24B array([1. , 2. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' Use ``skipna`` to control whether NaNs are ignored. >>> da.groupby("labels").median(skipna=False) Size: 24B array([nan, 2. , 1.5]) Coordinates: * labels (labels) object 24B 'a' 'b' 'c' """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum DataArray.cumsum DataArray.cumulative :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").cumsum() Size: 48B array([1., 2., 3., 3., 4., 1.]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").cumsum(skipna=False) Size: 48B array([ 1., 2., 3., 3., 4., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) DataArray: """ Reduce this DataArray's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the GroupBy dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod DataArray.cumprod DataArray.cumulative :ref:`groupby` User guide on groupby operations. Notes ----- Use the ``flox`` package to significantly speed up groupby computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").cumprod() Size: 48B array([1., 2., 3., 0., 4., 1.]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.groupby("labels").cumprod(skipna=False) Size: 48B array([ 1., 2., 3., 0., 4., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) DataArray: raise NotImplementedError() def _flox_reduce( self, dim: Dims, **kwargs: Any, ) -> DataArray: raise NotImplementedError() def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count DataArray.count :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").count() Size: 24B array([1, 3, 1]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="count", dim=dim, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.count, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all DataArray.all :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").all() Size: 3B array([ True, True, False]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="all", dim=dim, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_all, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any DataArray.any :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([True, True, True, True, True, False], dtype=bool), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 6B array([ True, True, True, True, True, False]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").any() Size: 3B array([ True, True, True]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="any", dim=dim, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.array_any, dim=dim, keep_attrs=keep_attrs, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max DataArray.max :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").max() Size: 24B array([1., 3., 2.]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").max(skipna=False) Size: 24B array([ 1., 3., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="max", dim=dim, skipna=skipna, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min DataArray.min :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").min() Size: 24B array([1., 0., 2.]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").min(skipna=False) Size: 24B array([ 1., 0., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="min", dim=dim, skipna=skipna, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean DataArray.mean :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").mean() Size: 24B array([1. , 1.66666667, 2. ]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").mean(skipna=False) Size: 24B array([1. , 1.66666667, nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="mean", dim=dim, skipna=skipna, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod DataArray.prod :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").prod() Size: 24B array([1., 0., 2.]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").prod(skipna=False) Size: 24B array([ 1., 0., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.resample(time="3ME").prod(skipna=True, min_count=2) Size: 24B array([nan, 0., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="prod", dim=dim, skipna=skipna, min_count=min_count, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, keep_attrs=keep_attrs, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum DataArray.sum :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").sum() Size: 24B array([1., 5., 2.]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").sum(skipna=False) Size: 24B array([ 1., 5., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Specify ``min_count`` for finer control over when NaNs are ignored. >>> da.resample(time="3ME").sum(skipna=True, min_count=2) Size: 24B array([nan, 5., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="sum", dim=dim, skipna=skipna, min_count=min_count, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, keep_attrs=keep_attrs, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std DataArray.std :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").std() Size: 24B array([0. , 1.24721913, 0. ]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").std(skipna=False) Size: 24B array([0. , 1.24721913, nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Specify ``ddof=1`` for an unbiased estimate. >>> da.resample(time="3ME").std(skipna=True, ddof=1) Size: 24B array([ nan, 1.52752523, nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="std", dim=dim, skipna=skipna, ddof=ddof, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, keep_attrs=keep_attrs, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var DataArray.var :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").var() Size: 24B array([0. , 1.55555556, 0. ]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").var(skipna=False) Size: 24B array([0. , 1.55555556, nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Specify ``ddof=1`` for an unbiased estimate. >>> da.resample(time="3ME").var(skipna=True, ddof=1) Size: 24B array([ nan, 2.33333333, nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ if ( flox_available and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="var", dim=dim, skipna=skipna, ddof=ddof, # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, keep_attrs=keep_attrs, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median DataArray.median :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").median() Size: 24B array([1., 2., 2.]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 Use ``skipna`` to control whether NaNs are ignored. >>> da.resample(time="3ME").median(skipna=False) Size: 24B array([ 1., 2., nan]) Coordinates: * time (time) datetime64[ns] 24B 2001-01-31 2001-04-30 2001-07-31 """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, keep_attrs=keep_attrs, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> DataArray: """ Reduce this DataArray's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum DataArray.cumsum DataArray.cumulative :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").cumsum() Size: 48B array([1., 2., 5., 5., 2., 2.]) Coordinates: labels (time) >> da.resample(time="3ME").cumsum(skipna=False) Size: 48B array([ 1., 2., 5., 5., 2., nan]) Coordinates: labels (time) DataArray: """ Reduce this DataArray's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the Resample dimensions. If "...", will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataArray New DataArray with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod DataArray.cumprod DataArray.cumulative :ref:`resampling` User guide on resampling operations. Notes ----- Use the ``flox`` package to significantly speed up resampling computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more. Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> da = xr.DataArray( ... np.array([1, 2, 3, 0, 2, np.nan]), ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> da Size: 48B array([ 1., 2., 3., 0., 2., nan]) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) >> da.resample(time="3ME").cumprod() Size: 48B array([1., 2., 6., 0., 2., 2.]) Coordinates: labels (time) >> da.resample(time="3ME").cumprod(skipna=False) Size: 48B array([ 1., 2., 6., 0., 2., nan]) Coordinates: labels (time) Self: raise NotImplementedError def __add__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.add) def __sub__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.sub) def __mul__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.mul) def __pow__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.pow) def __truediv__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.truediv) def __floordiv__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.floordiv) def __mod__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.mod) def __and__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.and_) def __xor__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.xor) def __or__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.or_) def __lshift__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.lshift) def __rshift__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.rshift) def __lt__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.lt) def __le__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.le) def __gt__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.gt) def __ge__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.ge) def __eq__(self, other: DtCompatible) -> Self: # type:ignore[override] return self._binary_op(other, nputils.array_eq) def __ne__(self, other: DtCompatible) -> Self: # type:ignore[override] return self._binary_op(other, nputils.array_ne) # When __eq__ is defined but __hash__ is not, then an object is unhashable, # and it should be declared as follows: __hash__: None # type:ignore[assignment] def __radd__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.add, reflexive=True) def __rsub__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.sub, reflexive=True) def __rmul__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.mul, reflexive=True) def __rpow__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.pow, reflexive=True) def __rtruediv__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.truediv, reflexive=True) def __rfloordiv__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.floordiv, reflexive=True) def __rmod__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.mod, reflexive=True) def __rand__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.and_, reflexive=True) def __rxor__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.xor, reflexive=True) def __ror__(self, other: DtCompatible) -> Self: return self._binary_op(other, operator.or_, reflexive=True) def _unary_op(self, f: Callable, *args: Any, **kwargs: Any) -> Self: raise NotImplementedError def __neg__(self) -> Self: return self._unary_op(operator.neg) def __pos__(self) -> Self: return self._unary_op(operator.pos) def __abs__(self) -> Self: return self._unary_op(operator.abs) def __invert__(self) -> Self: return self._unary_op(operator.invert) def round(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.round_, *args, **kwargs) def argsort(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.argsort, *args, **kwargs) def conj(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conj, *args, **kwargs) def conjugate(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conjugate, *args, **kwargs) __add__.__doc__ = operator.add.__doc__ __sub__.__doc__ = operator.sub.__doc__ __mul__.__doc__ = operator.mul.__doc__ __pow__.__doc__ = operator.pow.__doc__ __truediv__.__doc__ = operator.truediv.__doc__ __floordiv__.__doc__ = operator.floordiv.__doc__ __mod__.__doc__ = operator.mod.__doc__ __and__.__doc__ = operator.and_.__doc__ __xor__.__doc__ = operator.xor.__doc__ __or__.__doc__ = operator.or_.__doc__ __lshift__.__doc__ = operator.lshift.__doc__ __rshift__.__doc__ = operator.rshift.__doc__ __lt__.__doc__ = operator.lt.__doc__ __le__.__doc__ = operator.le.__doc__ __gt__.__doc__ = operator.gt.__doc__ __ge__.__doc__ = operator.ge.__doc__ __eq__.__doc__ = nputils.array_eq.__doc__ __ne__.__doc__ = nputils.array_ne.__doc__ __radd__.__doc__ = operator.add.__doc__ __rsub__.__doc__ = operator.sub.__doc__ __rmul__.__doc__ = operator.mul.__doc__ __rpow__.__doc__ = operator.pow.__doc__ __rtruediv__.__doc__ = operator.truediv.__doc__ __rfloordiv__.__doc__ = operator.floordiv.__doc__ __rmod__.__doc__ = operator.mod.__doc__ __rand__.__doc__ = operator.and_.__doc__ __rxor__.__doc__ = operator.xor.__doc__ __ror__.__doc__ = operator.or_.__doc__ __neg__.__doc__ = operator.neg.__doc__ __pos__.__doc__ = operator.pos.__doc__ __abs__.__doc__ = operator.abs.__doc__ __invert__.__doc__ = operator.invert.__doc__ round.__doc__ = ops.round_.__doc__ argsort.__doc__ = ops.argsort.__doc__ conj.__doc__ = ops.conj.__doc__ conjugate.__doc__ = ops.conjugate.__doc__ class DatasetOpsMixin: __slots__ = () def _binary_op( self, other: DsCompatible, f: Callable, reflexive: bool = False ) -> Self: raise NotImplementedError @overload def __add__(self, other: DataTree) -> DataTree: ... @overload def __add__(self, other: DsCompatible) -> Self: ... def __add__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.add) @overload def __sub__(self, other: DataTree) -> DataTree: ... @overload def __sub__(self, other: DsCompatible) -> Self: ... def __sub__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.sub) @overload def __mul__(self, other: DataTree) -> DataTree: ... @overload def __mul__(self, other: DsCompatible) -> Self: ... def __mul__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.mul) @overload def __pow__(self, other: DataTree) -> DataTree: ... @overload def __pow__(self, other: DsCompatible) -> Self: ... def __pow__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.pow) @overload def __truediv__(self, other: DataTree) -> DataTree: ... @overload def __truediv__(self, other: DsCompatible) -> Self: ... def __truediv__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.truediv) @overload def __floordiv__(self, other: DataTree) -> DataTree: ... @overload def __floordiv__(self, other: DsCompatible) -> Self: ... def __floordiv__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.floordiv) @overload def __mod__(self, other: DataTree) -> DataTree: ... @overload def __mod__(self, other: DsCompatible) -> Self: ... def __mod__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.mod) @overload def __and__(self, other: DataTree) -> DataTree: ... @overload def __and__(self, other: DsCompatible) -> Self: ... def __and__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.and_) @overload def __xor__(self, other: DataTree) -> DataTree: ... @overload def __xor__(self, other: DsCompatible) -> Self: ... def __xor__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.xor) @overload def __or__(self, other: DataTree) -> DataTree: ... @overload def __or__(self, other: DsCompatible) -> Self: ... def __or__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.or_) @overload def __lshift__(self, other: DataTree) -> DataTree: ... @overload def __lshift__(self, other: DsCompatible) -> Self: ... def __lshift__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.lshift) @overload def __rshift__(self, other: DataTree) -> DataTree: ... @overload def __rshift__(self, other: DsCompatible) -> Self: ... def __rshift__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.rshift) @overload def __lt__(self, other: DataTree) -> DataTree: ... @overload def __lt__(self, other: DsCompatible) -> Self: ... def __lt__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.lt) @overload def __le__(self, other: DataTree) -> DataTree: ... @overload def __le__(self, other: DsCompatible) -> Self: ... def __le__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.le) @overload def __gt__(self, other: DataTree) -> DataTree: ... @overload def __gt__(self, other: DsCompatible) -> Self: ... def __gt__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.gt) @overload def __ge__(self, other: DataTree) -> DataTree: ... @overload def __ge__(self, other: DsCompatible) -> Self: ... def __ge__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, operator.ge) @overload # type:ignore[override] def __eq__(self, other: DataTree) -> DataTree: ... @overload def __eq__(self, other: DsCompatible) -> Self: ... def __eq__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, nputils.array_eq) @overload # type:ignore[override] def __ne__(self, other: DataTree) -> DataTree: ... @overload def __ne__(self, other: DsCompatible) -> Self: ... def __ne__(self, other: DsCompatible) -> Self | DataTree: return self._binary_op(other, nputils.array_ne) # When __eq__ is defined but __hash__ is not, then an object is unhashable, # and it should be declared as follows: __hash__: None # type:ignore[assignment] def __radd__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.add, reflexive=True) def __rsub__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.sub, reflexive=True) def __rmul__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.mul, reflexive=True) def __rpow__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.pow, reflexive=True) def __rtruediv__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.truediv, reflexive=True) def __rfloordiv__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.floordiv, reflexive=True) def __rmod__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.mod, reflexive=True) def __rand__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.and_, reflexive=True) def __rxor__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.xor, reflexive=True) def __ror__(self, other: DsCompatible) -> Self: return self._binary_op(other, operator.or_, reflexive=True) def _inplace_binary_op(self, other: DsCompatible, f: Callable) -> Self: raise NotImplementedError def __iadd__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.iadd) def __isub__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.isub) def __imul__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.imul) def __ipow__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ipow) def __itruediv__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.itruediv) def __ifloordiv__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ifloordiv) def __imod__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.imod) def __iand__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.iand) def __ixor__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ixor) def __ior__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ior) def __ilshift__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ilshift) def __irshift__(self, other: DsCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.irshift) def _unary_op(self, f: Callable, *args: Any, **kwargs: Any) -> Self: raise NotImplementedError def __neg__(self) -> Self: return self._unary_op(operator.neg) def __pos__(self) -> Self: return self._unary_op(operator.pos) def __abs__(self) -> Self: return self._unary_op(operator.abs) def __invert__(self) -> Self: return self._unary_op(operator.invert) def round(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.round_, *args, **kwargs) def argsort(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.argsort, *args, **kwargs) def conj(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conj, *args, **kwargs) def conjugate(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conjugate, *args, **kwargs) __add__.__doc__ = operator.add.__doc__ __sub__.__doc__ = operator.sub.__doc__ __mul__.__doc__ = operator.mul.__doc__ __pow__.__doc__ = operator.pow.__doc__ __truediv__.__doc__ = operator.truediv.__doc__ __floordiv__.__doc__ = operator.floordiv.__doc__ __mod__.__doc__ = operator.mod.__doc__ __and__.__doc__ = operator.and_.__doc__ __xor__.__doc__ = operator.xor.__doc__ __or__.__doc__ = operator.or_.__doc__ __lshift__.__doc__ = operator.lshift.__doc__ __rshift__.__doc__ = operator.rshift.__doc__ __lt__.__doc__ = operator.lt.__doc__ __le__.__doc__ = operator.le.__doc__ __gt__.__doc__ = operator.gt.__doc__ __ge__.__doc__ = operator.ge.__doc__ __eq__.__doc__ = nputils.array_eq.__doc__ __ne__.__doc__ = nputils.array_ne.__doc__ __radd__.__doc__ = operator.add.__doc__ __rsub__.__doc__ = operator.sub.__doc__ __rmul__.__doc__ = operator.mul.__doc__ __rpow__.__doc__ = operator.pow.__doc__ __rtruediv__.__doc__ = operator.truediv.__doc__ __rfloordiv__.__doc__ = operator.floordiv.__doc__ __rmod__.__doc__ = operator.mod.__doc__ __rand__.__doc__ = operator.and_.__doc__ __rxor__.__doc__ = operator.xor.__doc__ __ror__.__doc__ = operator.or_.__doc__ __iadd__.__doc__ = operator.iadd.__doc__ __isub__.__doc__ = operator.isub.__doc__ __imul__.__doc__ = operator.imul.__doc__ __ipow__.__doc__ = operator.ipow.__doc__ __itruediv__.__doc__ = operator.itruediv.__doc__ __ifloordiv__.__doc__ = operator.ifloordiv.__doc__ __imod__.__doc__ = operator.imod.__doc__ __iand__.__doc__ = operator.iand.__doc__ __ixor__.__doc__ = operator.ixor.__doc__ __ior__.__doc__ = operator.ior.__doc__ __ilshift__.__doc__ = operator.ilshift.__doc__ __irshift__.__doc__ = operator.irshift.__doc__ __neg__.__doc__ = operator.neg.__doc__ __pos__.__doc__ = operator.pos.__doc__ __abs__.__doc__ = operator.abs.__doc__ __invert__.__doc__ = operator.invert.__doc__ round.__doc__ = ops.round_.__doc__ argsort.__doc__ = ops.argsort.__doc__ conj.__doc__ = ops.conj.__doc__ conjugate.__doc__ = ops.conjugate.__doc__ class DataArrayOpsMixin: __slots__ = () def _binary_op( self, other: DaCompatible, f: Callable, reflexive: bool = False ) -> Self: raise NotImplementedError @overload def __add__(self, other: Dataset) -> Dataset: ... @overload def __add__(self, other: DataTree) -> DataTree: ... @overload def __add__(self, other: DaCompatible) -> Self: ... def __add__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.add) @overload def __sub__(self, other: Dataset) -> Dataset: ... @overload def __sub__(self, other: DataTree) -> DataTree: ... @overload def __sub__(self, other: DaCompatible) -> Self: ... def __sub__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.sub) @overload def __mul__(self, other: Dataset) -> Dataset: ... @overload def __mul__(self, other: DataTree) -> DataTree: ... @overload def __mul__(self, other: DaCompatible) -> Self: ... def __mul__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.mul) @overload def __pow__(self, other: Dataset) -> Dataset: ... @overload def __pow__(self, other: DataTree) -> DataTree: ... @overload def __pow__(self, other: DaCompatible) -> Self: ... def __pow__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.pow) @overload def __truediv__(self, other: Dataset) -> Dataset: ... @overload def __truediv__(self, other: DataTree) -> DataTree: ... @overload def __truediv__(self, other: DaCompatible) -> Self: ... def __truediv__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.truediv) @overload def __floordiv__(self, other: Dataset) -> Dataset: ... @overload def __floordiv__(self, other: DataTree) -> DataTree: ... @overload def __floordiv__(self, other: DaCompatible) -> Self: ... def __floordiv__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.floordiv) @overload def __mod__(self, other: Dataset) -> Dataset: ... @overload def __mod__(self, other: DataTree) -> DataTree: ... @overload def __mod__(self, other: DaCompatible) -> Self: ... def __mod__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.mod) @overload def __and__(self, other: Dataset) -> Dataset: ... @overload def __and__(self, other: DataTree) -> DataTree: ... @overload def __and__(self, other: DaCompatible) -> Self: ... def __and__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.and_) @overload def __xor__(self, other: Dataset) -> Dataset: ... @overload def __xor__(self, other: DataTree) -> DataTree: ... @overload def __xor__(self, other: DaCompatible) -> Self: ... def __xor__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.xor) @overload def __or__(self, other: Dataset) -> Dataset: ... @overload def __or__(self, other: DataTree) -> DataTree: ... @overload def __or__(self, other: DaCompatible) -> Self: ... def __or__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.or_) @overload def __lshift__(self, other: Dataset) -> Dataset: ... @overload def __lshift__(self, other: DataTree) -> DataTree: ... @overload def __lshift__(self, other: DaCompatible) -> Self: ... def __lshift__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.lshift) @overload def __rshift__(self, other: Dataset) -> Dataset: ... @overload def __rshift__(self, other: DataTree) -> DataTree: ... @overload def __rshift__(self, other: DaCompatible) -> Self: ... def __rshift__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.rshift) @overload def __lt__(self, other: Dataset) -> Dataset: ... @overload def __lt__(self, other: DataTree) -> DataTree: ... @overload def __lt__(self, other: DaCompatible) -> Self: ... def __lt__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.lt) @overload def __le__(self, other: Dataset) -> Dataset: ... @overload def __le__(self, other: DataTree) -> DataTree: ... @overload def __le__(self, other: DaCompatible) -> Self: ... def __le__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.le) @overload def __gt__(self, other: Dataset) -> Dataset: ... @overload def __gt__(self, other: DataTree) -> DataTree: ... @overload def __gt__(self, other: DaCompatible) -> Self: ... def __gt__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.gt) @overload def __ge__(self, other: Dataset) -> Dataset: ... @overload def __ge__(self, other: DataTree) -> DataTree: ... @overload def __ge__(self, other: DaCompatible) -> Self: ... def __ge__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, operator.ge) @overload # type:ignore[override] def __eq__(self, other: Dataset) -> Dataset: ... @overload def __eq__(self, other: DataTree) -> DataTree: ... @overload def __eq__(self, other: DaCompatible) -> Self: ... def __eq__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, nputils.array_eq) @overload # type:ignore[override] def __ne__(self, other: Dataset) -> Dataset: ... @overload def __ne__(self, other: DataTree) -> DataTree: ... @overload def __ne__(self, other: DaCompatible) -> Self: ... def __ne__(self, other: DaCompatible) -> Self | Dataset | DataTree: return self._binary_op(other, nputils.array_ne) # When __eq__ is defined but __hash__ is not, then an object is unhashable, # and it should be declared as follows: __hash__: None # type:ignore[assignment] def __radd__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.add, reflexive=True) def __rsub__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.sub, reflexive=True) def __rmul__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.mul, reflexive=True) def __rpow__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.pow, reflexive=True) def __rtruediv__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.truediv, reflexive=True) def __rfloordiv__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.floordiv, reflexive=True) def __rmod__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.mod, reflexive=True) def __rand__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.and_, reflexive=True) def __rxor__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.xor, reflexive=True) def __ror__(self, other: DaCompatible) -> Self: return self._binary_op(other, operator.or_, reflexive=True) def _inplace_binary_op(self, other: DaCompatible, f: Callable) -> Self: raise NotImplementedError def __iadd__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.iadd) def __isub__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.isub) def __imul__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.imul) def __ipow__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ipow) def __itruediv__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.itruediv) def __ifloordiv__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ifloordiv) def __imod__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.imod) def __iand__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.iand) def __ixor__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ixor) def __ior__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ior) def __ilshift__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ilshift) def __irshift__(self, other: DaCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.irshift) def _unary_op(self, f: Callable, *args: Any, **kwargs: Any) -> Self: raise NotImplementedError def __neg__(self) -> Self: return self._unary_op(operator.neg) def __pos__(self) -> Self: return self._unary_op(operator.pos) def __abs__(self) -> Self: return self._unary_op(operator.abs) def __invert__(self) -> Self: return self._unary_op(operator.invert) def round(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.round_, *args, **kwargs) def argsort(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.argsort, *args, **kwargs) def conj(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conj, *args, **kwargs) def conjugate(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conjugate, *args, **kwargs) __add__.__doc__ = operator.add.__doc__ __sub__.__doc__ = operator.sub.__doc__ __mul__.__doc__ = operator.mul.__doc__ __pow__.__doc__ = operator.pow.__doc__ __truediv__.__doc__ = operator.truediv.__doc__ __floordiv__.__doc__ = operator.floordiv.__doc__ __mod__.__doc__ = operator.mod.__doc__ __and__.__doc__ = operator.and_.__doc__ __xor__.__doc__ = operator.xor.__doc__ __or__.__doc__ = operator.or_.__doc__ __lshift__.__doc__ = operator.lshift.__doc__ __rshift__.__doc__ = operator.rshift.__doc__ __lt__.__doc__ = operator.lt.__doc__ __le__.__doc__ = operator.le.__doc__ __gt__.__doc__ = operator.gt.__doc__ __ge__.__doc__ = operator.ge.__doc__ __eq__.__doc__ = nputils.array_eq.__doc__ __ne__.__doc__ = nputils.array_ne.__doc__ __radd__.__doc__ = operator.add.__doc__ __rsub__.__doc__ = operator.sub.__doc__ __rmul__.__doc__ = operator.mul.__doc__ __rpow__.__doc__ = operator.pow.__doc__ __rtruediv__.__doc__ = operator.truediv.__doc__ __rfloordiv__.__doc__ = operator.floordiv.__doc__ __rmod__.__doc__ = operator.mod.__doc__ __rand__.__doc__ = operator.and_.__doc__ __rxor__.__doc__ = operator.xor.__doc__ __ror__.__doc__ = operator.or_.__doc__ __iadd__.__doc__ = operator.iadd.__doc__ __isub__.__doc__ = operator.isub.__doc__ __imul__.__doc__ = operator.imul.__doc__ __ipow__.__doc__ = operator.ipow.__doc__ __itruediv__.__doc__ = operator.itruediv.__doc__ __ifloordiv__.__doc__ = operator.ifloordiv.__doc__ __imod__.__doc__ = operator.imod.__doc__ __iand__.__doc__ = operator.iand.__doc__ __ixor__.__doc__ = operator.ixor.__doc__ __ior__.__doc__ = operator.ior.__doc__ __ilshift__.__doc__ = operator.ilshift.__doc__ __irshift__.__doc__ = operator.irshift.__doc__ __neg__.__doc__ = operator.neg.__doc__ __pos__.__doc__ = operator.pos.__doc__ __abs__.__doc__ = operator.abs.__doc__ __invert__.__doc__ = operator.invert.__doc__ round.__doc__ = ops.round_.__doc__ argsort.__doc__ = ops.argsort.__doc__ conj.__doc__ = ops.conj.__doc__ conjugate.__doc__ = ops.conjugate.__doc__ class VariableOpsMixin: __slots__ = () def _binary_op( self, other: VarCompatible, f: Callable, reflexive: bool = False ) -> Self: raise NotImplementedError @overload def __add__(self, other: T_DA) -> T_DA: ... @overload def __add__(self, other: Dataset) -> Dataset: ... @overload def __add__(self, other: DataTree) -> DataTree: ... @overload def __add__(self, other: VarCompatible) -> Self: ... def __add__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.add) @overload def __sub__(self, other: T_DA) -> T_DA: ... @overload def __sub__(self, other: Dataset) -> Dataset: ... @overload def __sub__(self, other: DataTree) -> DataTree: ... @overload def __sub__(self, other: VarCompatible) -> Self: ... def __sub__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.sub) @overload def __mul__(self, other: T_DA) -> T_DA: ... @overload def __mul__(self, other: Dataset) -> Dataset: ... @overload def __mul__(self, other: DataTree) -> DataTree: ... @overload def __mul__(self, other: VarCompatible) -> Self: ... def __mul__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.mul) @overload def __pow__(self, other: T_DA) -> T_DA: ... @overload def __pow__(self, other: Dataset) -> Dataset: ... @overload def __pow__(self, other: DataTree) -> DataTree: ... @overload def __pow__(self, other: VarCompatible) -> Self: ... def __pow__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.pow) @overload def __truediv__(self, other: T_DA) -> T_DA: ... @overload def __truediv__(self, other: Dataset) -> Dataset: ... @overload def __truediv__(self, other: DataTree) -> DataTree: ... @overload def __truediv__(self, other: VarCompatible) -> Self: ... def __truediv__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.truediv) @overload def __floordiv__(self, other: T_DA) -> T_DA: ... @overload def __floordiv__(self, other: Dataset) -> Dataset: ... @overload def __floordiv__(self, other: DataTree) -> DataTree: ... @overload def __floordiv__(self, other: VarCompatible) -> Self: ... def __floordiv__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.floordiv) @overload def __mod__(self, other: T_DA) -> T_DA: ... @overload def __mod__(self, other: Dataset) -> Dataset: ... @overload def __mod__(self, other: DataTree) -> DataTree: ... @overload def __mod__(self, other: VarCompatible) -> Self: ... def __mod__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.mod) @overload def __and__(self, other: T_DA) -> T_DA: ... @overload def __and__(self, other: Dataset) -> Dataset: ... @overload def __and__(self, other: DataTree) -> DataTree: ... @overload def __and__(self, other: VarCompatible) -> Self: ... def __and__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.and_) @overload def __xor__(self, other: T_DA) -> T_DA: ... @overload def __xor__(self, other: Dataset) -> Dataset: ... @overload def __xor__(self, other: DataTree) -> DataTree: ... @overload def __xor__(self, other: VarCompatible) -> Self: ... def __xor__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.xor) @overload def __or__(self, other: T_DA) -> T_DA: ... @overload def __or__(self, other: Dataset) -> Dataset: ... @overload def __or__(self, other: DataTree) -> DataTree: ... @overload def __or__(self, other: VarCompatible) -> Self: ... def __or__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.or_) @overload def __lshift__(self, other: T_DA) -> T_DA: ... @overload def __lshift__(self, other: Dataset) -> Dataset: ... @overload def __lshift__(self, other: DataTree) -> DataTree: ... @overload def __lshift__(self, other: VarCompatible) -> Self: ... def __lshift__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.lshift) @overload def __rshift__(self, other: T_DA) -> T_DA: ... @overload def __rshift__(self, other: Dataset) -> Dataset: ... @overload def __rshift__(self, other: DataTree) -> DataTree: ... @overload def __rshift__(self, other: VarCompatible) -> Self: ... def __rshift__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.rshift) @overload def __lt__(self, other: T_DA) -> T_DA: ... @overload def __lt__(self, other: Dataset) -> Dataset: ... @overload def __lt__(self, other: DataTree) -> DataTree: ... @overload def __lt__(self, other: VarCompatible) -> Self: ... def __lt__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.lt) @overload def __le__(self, other: T_DA) -> T_DA: ... @overload def __le__(self, other: Dataset) -> Dataset: ... @overload def __le__(self, other: DataTree) -> DataTree: ... @overload def __le__(self, other: VarCompatible) -> Self: ... def __le__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.le) @overload def __gt__(self, other: T_DA) -> T_DA: ... @overload def __gt__(self, other: Dataset) -> Dataset: ... @overload def __gt__(self, other: DataTree) -> DataTree: ... @overload def __gt__(self, other: VarCompatible) -> Self: ... def __gt__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.gt) @overload def __ge__(self, other: T_DA) -> T_DA: ... @overload def __ge__(self, other: Dataset) -> Dataset: ... @overload def __ge__(self, other: DataTree) -> DataTree: ... @overload def __ge__(self, other: VarCompatible) -> Self: ... def __ge__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, operator.ge) @overload # type:ignore[override] def __eq__(self, other: T_DA) -> T_DA: ... @overload def __eq__(self, other: Dataset) -> Dataset: ... @overload def __eq__(self, other: DataTree) -> DataTree: ... @overload def __eq__(self, other: VarCompatible) -> Self: ... def __eq__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, nputils.array_eq) @overload # type:ignore[override] def __ne__(self, other: T_DA) -> T_DA: ... @overload def __ne__(self, other: Dataset) -> Dataset: ... @overload def __ne__(self, other: DataTree) -> DataTree: ... @overload def __ne__(self, other: VarCompatible) -> Self: ... def __ne__(self, other: VarCompatible) -> Self | T_DA | Dataset | DataTree: return self._binary_op(other, nputils.array_ne) # When __eq__ is defined but __hash__ is not, then an object is unhashable, # and it should be declared as follows: __hash__: None # type:ignore[assignment] def __radd__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.add, reflexive=True) def __rsub__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.sub, reflexive=True) def __rmul__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.mul, reflexive=True) def __rpow__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.pow, reflexive=True) def __rtruediv__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.truediv, reflexive=True) def __rfloordiv__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.floordiv, reflexive=True) def __rmod__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.mod, reflexive=True) def __rand__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.and_, reflexive=True) def __rxor__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.xor, reflexive=True) def __ror__(self, other: VarCompatible) -> Self: return self._binary_op(other, operator.or_, reflexive=True) def _inplace_binary_op(self, other: VarCompatible, f: Callable) -> Self: raise NotImplementedError def __iadd__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.iadd) def __isub__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.isub) def __imul__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.imul) def __ipow__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ipow) def __itruediv__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.itruediv) def __ifloordiv__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ifloordiv) def __imod__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.imod) def __iand__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.iand) def __ixor__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ixor) def __ior__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ior) def __ilshift__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.ilshift) def __irshift__(self, other: VarCompatible) -> Self: # type:ignore[misc] return self._inplace_binary_op(other, operator.irshift) def _unary_op(self, f: Callable, *args: Any, **kwargs: Any) -> Self: raise NotImplementedError def __neg__(self) -> Self: return self._unary_op(operator.neg) def __pos__(self) -> Self: return self._unary_op(operator.pos) def __abs__(self) -> Self: return self._unary_op(operator.abs) def __invert__(self) -> Self: return self._unary_op(operator.invert) def round(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.round_, *args, **kwargs) def argsort(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.argsort, *args, **kwargs) def conj(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conj, *args, **kwargs) def conjugate(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op(ops.conjugate, *args, **kwargs) __add__.__doc__ = operator.add.__doc__ __sub__.__doc__ = operator.sub.__doc__ __mul__.__doc__ = operator.mul.__doc__ __pow__.__doc__ = operator.pow.__doc__ __truediv__.__doc__ = operator.truediv.__doc__ __floordiv__.__doc__ = operator.floordiv.__doc__ __mod__.__doc__ = operator.mod.__doc__ __and__.__doc__ = operator.and_.__doc__ __xor__.__doc__ = operator.xor.__doc__ __or__.__doc__ = operator.or_.__doc__ __lshift__.__doc__ = operator.lshift.__doc__ __rshift__.__doc__ = operator.rshift.__doc__ __lt__.__doc__ = operator.lt.__doc__ __le__.__doc__ = operator.le.__doc__ __gt__.__doc__ = operator.gt.__doc__ __ge__.__doc__ = operator.ge.__doc__ __eq__.__doc__ = nputils.array_eq.__doc__ __ne__.__doc__ = nputils.array_ne.__doc__ __radd__.__doc__ = operator.add.__doc__ __rsub__.__doc__ = operator.sub.__doc__ __rmul__.__doc__ = operator.mul.__doc__ __rpow__.__doc__ = operator.pow.__doc__ __rtruediv__.__doc__ = operator.truediv.__doc__ __rfloordiv__.__doc__ = operator.floordiv.__doc__ __rmod__.__doc__ = operator.mod.__doc__ __rand__.__doc__ = operator.and_.__doc__ __rxor__.__doc__ = operator.xor.__doc__ __ror__.__doc__ = operator.or_.__doc__ __iadd__.__doc__ = operator.iadd.__doc__ __isub__.__doc__ = operator.isub.__doc__ __imul__.__doc__ = operator.imul.__doc__ __ipow__.__doc__ = operator.ipow.__doc__ __itruediv__.__doc__ = operator.itruediv.__doc__ __ifloordiv__.__doc__ = operator.ifloordiv.__doc__ __imod__.__doc__ = operator.imod.__doc__ __iand__.__doc__ = operator.iand.__doc__ __ixor__.__doc__ = operator.ixor.__doc__ __ior__.__doc__ = operator.ior.__doc__ __ilshift__.__doc__ = operator.ilshift.__doc__ __irshift__.__doc__ = operator.irshift.__doc__ __neg__.__doc__ = operator.neg.__doc__ __pos__.__doc__ = operator.pos.__doc__ __abs__.__doc__ = operator.abs.__doc__ __invert__.__doc__ = operator.invert.__doc__ round.__doc__ = ops.round_.__doc__ argsort.__doc__ = ops.argsort.__doc__ conj.__doc__ = ops.conj.__doc__ conjugate.__doc__ = ops.conjugate.__doc__ class DatasetGroupByOpsMixin: __slots__ = () def _binary_op( self, other: Dataset | DataArray, f: Callable, reflexive: bool = False ) -> Dataset: raise NotImplementedError def __add__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.add) def __sub__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.sub) def __mul__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.mul) def __pow__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.pow) def __truediv__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.truediv) def __floordiv__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.floordiv) def __mod__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.mod) def __and__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.and_) def __xor__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.xor) def __or__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.or_) def __lshift__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.lshift) def __rshift__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.rshift) def __lt__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.lt) def __le__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.le) def __gt__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.gt) def __ge__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.ge) def __eq__(self, other: Dataset | DataArray) -> Dataset: # type:ignore[override] return self._binary_op(other, nputils.array_eq) def __ne__(self, other: Dataset | DataArray) -> Dataset: # type:ignore[override] return self._binary_op(other, nputils.array_ne) # When __eq__ is defined but __hash__ is not, then an object is unhashable, # and it should be declared as follows: __hash__: None # type:ignore[assignment] def __radd__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.add, reflexive=True) def __rsub__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.sub, reflexive=True) def __rmul__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.mul, reflexive=True) def __rpow__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.pow, reflexive=True) def __rtruediv__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.truediv, reflexive=True) def __rfloordiv__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.floordiv, reflexive=True) def __rmod__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.mod, reflexive=True) def __rand__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.and_, reflexive=True) def __rxor__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.xor, reflexive=True) def __ror__(self, other: Dataset | DataArray) -> Dataset: return self._binary_op(other, operator.or_, reflexive=True) __add__.__doc__ = operator.add.__doc__ __sub__.__doc__ = operator.sub.__doc__ __mul__.__doc__ = operator.mul.__doc__ __pow__.__doc__ = operator.pow.__doc__ __truediv__.__doc__ = operator.truediv.__doc__ __floordiv__.__doc__ = operator.floordiv.__doc__ __mod__.__doc__ = operator.mod.__doc__ __and__.__doc__ = operator.and_.__doc__ __xor__.__doc__ = operator.xor.__doc__ __or__.__doc__ = operator.or_.__doc__ __lshift__.__doc__ = operator.lshift.__doc__ __rshift__.__doc__ = operator.rshift.__doc__ __lt__.__doc__ = operator.lt.__doc__ __le__.__doc__ = operator.le.__doc__ __gt__.__doc__ = operator.gt.__doc__ __ge__.__doc__ = operator.ge.__doc__ __eq__.__doc__ = nputils.array_eq.__doc__ __ne__.__doc__ = nputils.array_ne.__doc__ __radd__.__doc__ = operator.add.__doc__ __rsub__.__doc__ = operator.sub.__doc__ __rmul__.__doc__ = operator.mul.__doc__ __rpow__.__doc__ = operator.pow.__doc__ __rtruediv__.__doc__ = operator.truediv.__doc__ __rfloordiv__.__doc__ = operator.floordiv.__doc__ __rmod__.__doc__ = operator.mod.__doc__ __rand__.__doc__ = operator.and_.__doc__ __rxor__.__doc__ = operator.xor.__doc__ __ror__.__doc__ = operator.or_.__doc__ class DataArrayGroupByOpsMixin: __slots__ = () def _binary_op( self, other: T_Xarray, f: Callable, reflexive: bool = False ) -> T_Xarray: raise NotImplementedError def __add__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.add) def __sub__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.sub) def __mul__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mul) def __pow__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.pow) def __truediv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.truediv) def __floordiv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.floordiv) def __mod__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mod) def __and__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.and_) def __xor__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.xor) def __or__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.or_) def __lshift__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.lshift) def __rshift__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.rshift) def __lt__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.lt) def __le__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.le) def __gt__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.gt) def __ge__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.ge) def __eq__(self, other: T_Xarray) -> T_Xarray: # type:ignore[override] return self._binary_op(other, nputils.array_eq) def __ne__(self, other: T_Xarray) -> T_Xarray: # type:ignore[override] return self._binary_op(other, nputils.array_ne) # When __eq__ is defined but __hash__ is not, then an object is unhashable, # and it should be declared as follows: __hash__: None # type:ignore[assignment] def __radd__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.add, reflexive=True) def __rsub__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.sub, reflexive=True) def __rmul__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mul, reflexive=True) def __rpow__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.pow, reflexive=True) def __rtruediv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.truediv, reflexive=True) def __rfloordiv__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.floordiv, reflexive=True) def __rmod__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.mod, reflexive=True) def __rand__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.and_, reflexive=True) def __rxor__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.xor, reflexive=True) def __ror__(self, other: T_Xarray) -> T_Xarray: return self._binary_op(other, operator.or_, reflexive=True) __add__.__doc__ = operator.add.__doc__ __sub__.__doc__ = operator.sub.__doc__ __mul__.__doc__ = operator.mul.__doc__ __pow__.__doc__ = operator.pow.__doc__ __truediv__.__doc__ = operator.truediv.__doc__ __floordiv__.__doc__ = operator.floordiv.__doc__ __mod__.__doc__ = operator.mod.__doc__ __and__.__doc__ = operator.and_.__doc__ __xor__.__doc__ = operator.xor.__doc__ __or__.__doc__ = operator.or_.__doc__ __lshift__.__doc__ = operator.lshift.__doc__ __rshift__.__doc__ = operator.rshift.__doc__ __lt__.__doc__ = operator.lt.__doc__ __le__.__doc__ = operator.le.__doc__ __gt__.__doc__ = operator.gt.__doc__ __ge__.__doc__ = operator.ge.__doc__ __eq__.__doc__ = nputils.array_eq.__doc__ __ne__.__doc__ = nputils.array_ne.__doc__ __radd__.__doc__ = operator.add.__doc__ __rsub__.__doc__ = operator.sub.__doc__ __rmul__.__doc__ = operator.mul.__doc__ __rpow__.__doc__ = operator.pow.__doc__ __rtruediv__.__doc__ = operator.truediv.__doc__ __rfloordiv__.__doc__ = operator.floordiv.__doc__ __rmod__.__doc__ = operator.mod.__doc__ __rand__.__doc__ = operator.and_.__doc__ __rxor__.__doc__ = operator.xor.__doc__ __ror__.__doc__ = operator.or_.__doc__ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/core/accessor_dt.py���������������������������������������������������������0000664�0000000�0000000�00000055566�15056206164�0020466�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import warnings from typing import TYPE_CHECKING, Generic import numpy as np import pandas as pd from xarray.coding.calendar_ops import _decimal_year from xarray.coding.times import infer_calendar_name from xarray.core import duck_array_ops from xarray.core.common import ( _contains_datetime_like_objects, full_like, is_np_datetime_like, is_np_timedelta_like, ) from xarray.core.types import T_DataArray from xarray.core.variable import IndexVariable, Variable from xarray.namedarray.utils import is_duck_dask_array if TYPE_CHECKING: from typing import Self from numpy.typing import DTypeLike from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import CFCalendar def _season_from_months(months): """Compute season (DJF, MAM, JJA, SON) from month ordinal""" # TODO: Move "season" accessor upstream into pandas seasons = np.array(["DJF", "MAM", "JJA", "SON", "nan"]) months = np.asarray(months) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="invalid value encountered in floor_divide" ) warnings.filterwarnings( "ignore", message="invalid value encountered in remainder" ) idx = (months // 3) % 4 idx[np.isnan(idx)] = 4 return seasons[idx.astype(int)] def _access_through_cftimeindex(values, name): """Coerce an array of datetime-like values to a CFTimeIndex and access requested datetime component """ from xarray.coding.cftimeindex import CFTimeIndex if not isinstance(values, CFTimeIndex): values_as_cftimeindex = CFTimeIndex(duck_array_ops.ravel(values)) else: values_as_cftimeindex = values if name == "season": months = values_as_cftimeindex.month field_values = _season_from_months(months) elif name == "date": raise AttributeError( "'CFTimeIndex' object has no attribute `date`. Consider using the floor method " "instead, for instance: `.time.dt.floor('D')`." ) else: field_values = getattr(values_as_cftimeindex, name) return field_values.reshape(values.shape) def _access_through_series(values, name): """Coerce an array of datetime-like values to a pandas Series and access requested datetime component """ values_as_series = pd.Series(duck_array_ops.ravel(values), copy=False) if name == "season": months = values_as_series.dt.month.values field_values = _season_from_months(months) elif name == "total_seconds": field_values = values_as_series.dt.total_seconds().values elif name == "isocalendar": # special NaT-handling can be removed when # https://github.com/pandas-dev/pandas/issues/54657 is resolved field_values = values_as_series.dt.isocalendar() # test for and apply needed dtype hasna = any(field_values.year.isnull()) if hasna: field_values = np.dstack( [ getattr(field_values, name).astype(np.float64, copy=False).values for name in ["year", "week", "day"] ] ) else: field_values = np.array(field_values, dtype=np.int64) # isocalendar returns iso- year, week, and weekday -> reshape return field_values.T.reshape(3, *values.shape) else: field_values = getattr(values_as_series.dt, name).values return field_values.reshape(values.shape) def _get_date_field(values, name, dtype): """Indirectly access pandas' libts.get_date_field by wrapping data as a Series and calling through `.dt` attribute. Parameters ---------- values : np.ndarray or dask.array-like Array-like container of datetime-like values name : str Name of datetime field to access dtype : dtype-like dtype for output date field values Returns ------- datetime_fields : same type as values Array-like of datetime fields accessed for each element in values """ if is_np_datetime_like(values.dtype): access_method = _access_through_series else: access_method = _access_through_cftimeindex if is_duck_dask_array(values): from dask.array import map_blocks new_axis = chunks = None # isocalendar adds an axis if name == "isocalendar": chunks = (3,) + values.chunksize new_axis = 0 return map_blocks( access_method, values, name, dtype=dtype, new_axis=new_axis, chunks=chunks ) else: out = access_method(values, name) # cast only for integer types to keep float64 in presence of NaT # see https://github.com/pydata/xarray/issues/7928 if np.issubdtype(out.dtype, np.integer): out = out.astype(dtype, copy=False) return out def _round_through_series_or_index(values, name, freq): """Coerce an array of datetime-like values to a pandas Series or xarray CFTimeIndex and apply requested rounding """ from xarray.coding.cftimeindex import CFTimeIndex if is_np_datetime_like(values.dtype): values_as_series = pd.Series(duck_array_ops.ravel(values), copy=False) method = getattr(values_as_series.dt, name) else: values_as_cftimeindex = CFTimeIndex(duck_array_ops.ravel(values)) method = getattr(values_as_cftimeindex, name) field_values = method(freq=freq).values return field_values.reshape(values.shape) def _round_field(values, name, freq): """Indirectly access rounding functions by wrapping data as a Series or CFTimeIndex Parameters ---------- values : np.ndarray or dask.array-like Array-like container of datetime-like values name : {"ceil", "floor", "round"} Name of rounding function freq : str a freq string indicating the rounding resolution Returns ------- rounded timestamps : same type as values Array-like of datetime fields accessed for each element in values """ if is_duck_dask_array(values): from dask.array import map_blocks dtype = np.datetime64 if is_np_datetime_like(values.dtype) else np.dtype("O") return map_blocks( _round_through_series_or_index, values, name, freq=freq, dtype=dtype ) else: return _round_through_series_or_index(values, name, freq) def _strftime_through_cftimeindex(values, date_format: str): """Coerce an array of cftime-like values to a CFTimeIndex and access requested datetime component """ from xarray.coding.cftimeindex import CFTimeIndex values_as_cftimeindex = CFTimeIndex(duck_array_ops.ravel(values)) field_values = values_as_cftimeindex.strftime(date_format) return field_values.to_numpy().reshape(values.shape) def _strftime_through_series(values, date_format: str): """Coerce an array of datetime-like values to a pandas Series and apply string formatting """ values_as_series = pd.Series(duck_array_ops.ravel(values), copy=False) strs = values_as_series.dt.strftime(date_format) return strs.to_numpy().reshape(values.shape) def _strftime(values, date_format): if is_np_datetime_like(values.dtype): access_method = _strftime_through_series else: access_method = _strftime_through_cftimeindex if is_duck_dask_array(values): from dask.array import map_blocks return map_blocks(access_method, values, date_format) else: return access_method(values, date_format) def _index_or_data(obj): if isinstance(obj.variable, IndexVariable): return obj.to_index() else: return obj.data class TimeAccessor(Generic[T_DataArray]): __slots__ = ("_obj",) def __init__(self, obj: T_DataArray) -> None: self._obj = obj def _date_field(self, name: str, dtype: DTypeLike) -> T_DataArray: if dtype is None: dtype = self._obj.dtype result = _get_date_field(_index_or_data(self._obj), name, dtype) newvar = Variable( dims=self._obj.dims, attrs=self._obj.attrs, encoding=self._obj.encoding, data=result, ) return self._obj._replace(newvar, name=name) def _tslib_round_accessor(self, name: str, freq: str) -> T_DataArray: result = _round_field(_index_or_data(self._obj), name, freq) newvar = Variable( dims=self._obj.dims, attrs=self._obj.attrs, encoding=self._obj.encoding, data=result, ) return self._obj._replace(newvar, name=name) def floor(self, freq: str) -> T_DataArray: """ Round timestamps downward to specified frequency resolution. Parameters ---------- freq : str a freq string indicating the rounding resolution e.g. "D" for daily resolution Returns ------- floor-ed timestamps : same type as values Array-like of datetime fields accessed for each element in values """ return self._tslib_round_accessor("floor", freq) def ceil(self, freq: str) -> T_DataArray: """ Round timestamps upward to specified frequency resolution. Parameters ---------- freq : str a freq string indicating the rounding resolution e.g. "D" for daily resolution Returns ------- ceil-ed timestamps : same type as values Array-like of datetime fields accessed for each element in values """ return self._tslib_round_accessor("ceil", freq) def round(self, freq: str) -> T_DataArray: """ Round timestamps to specified frequency resolution. Parameters ---------- freq : str a freq string indicating the rounding resolution e.g. "D" for daily resolution Returns ------- rounded timestamps : same type as values Array-like of datetime fields accessed for each element in values """ return self._tslib_round_accessor("round", freq) class DatetimeAccessor(TimeAccessor[T_DataArray]): """Access datetime fields for DataArrays with datetime-like dtypes. Fields can be accessed through the `.dt` attribute for applicable DataArrays. Examples --------- >>> dates = pd.date_range(start="2000/01/01", freq="D", periods=10) >>> ts = xr.DataArray(dates, dims=("time")) >>> ts Size: 80B array(['2000-01-01T00:00:00.000000000', '2000-01-02T00:00:00.000000000', '2000-01-03T00:00:00.000000000', '2000-01-04T00:00:00.000000000', '2000-01-05T00:00:00.000000000', '2000-01-06T00:00:00.000000000', '2000-01-07T00:00:00.000000000', '2000-01-08T00:00:00.000000000', '2000-01-09T00:00:00.000000000', '2000-01-10T00:00:00.000000000'], dtype='datetime64[ns]') Coordinates: * time (time) datetime64[ns] 80B 2000-01-01 2000-01-02 ... 2000-01-10 >>> ts.dt # doctest: +ELLIPSIS >>> ts.dt.dayofyear Size: 80B array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) Coordinates: * time (time) datetime64[ns] 80B 2000-01-01 2000-01-02 ... 2000-01-10 >>> ts.dt.quarter Size: 80B array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) Coordinates: * time (time) datetime64[ns] 80B 2000-01-01 2000-01-02 ... 2000-01-10 """ def strftime(self, date_format: str) -> T_DataArray: """ Return an array of formatted strings specified by date_format, which supports the same string format as the python standard library. Details of the string format can be found in `python string format doc `__ Parameters ---------- date_format : str date format string (e.g. "%Y-%m-%d") Returns ------- formatted strings : same type as values Array-like of strings formatted for each element in values Examples -------- >>> import datetime >>> rng = xr.Dataset({"time": datetime.datetime(2000, 1, 1)}) >>> rng["time"].dt.strftime("%B %d, %Y, %r") Size: 8B array('January 01, 2000, 12:00:00 AM', dtype=object) """ obj_type = type(self._obj) result = _strftime(self._obj.data, date_format) return obj_type( result, name="strftime", coords=self._obj.coords, dims=self._obj.dims ) def isocalendar(self) -> Dataset: """Dataset containing ISO year, week number, and weekday. Notes ----- The iso year and weekday differ from the nominal year and weekday. """ from xarray.core.dataset import Dataset if not is_np_datetime_like(self._obj.data.dtype): raise AttributeError("'CFTimeIndex' object has no attribute 'isocalendar'") values = _get_date_field(self._obj.data, "isocalendar", np.int64) obj_type = type(self._obj) data_vars = {} for i, name in enumerate(["year", "week", "weekday"]): data_vars[name] = obj_type( values[i], name=name, coords=self._obj.coords, dims=self._obj.dims ) return Dataset(data_vars) @property def year(self) -> T_DataArray: """The year of the datetime""" return self._date_field("year", np.int64) @property def month(self) -> T_DataArray: """The month as January=1, December=12""" return self._date_field("month", np.int64) @property def day(self) -> T_DataArray: """The days of the datetime""" return self._date_field("day", np.int64) @property def hour(self) -> T_DataArray: """The hours of the datetime""" return self._date_field("hour", np.int64) @property def minute(self) -> T_DataArray: """The minutes of the datetime""" return self._date_field("minute", np.int64) @property def second(self) -> T_DataArray: """The seconds of the datetime""" return self._date_field("second", np.int64) @property def microsecond(self) -> T_DataArray: """The microseconds of the datetime""" return self._date_field("microsecond", np.int64) @property def nanosecond(self) -> T_DataArray: """The nanoseconds of the datetime""" return self._date_field("nanosecond", np.int64) @property def weekofyear(self) -> DataArray: "The week ordinal of the year" warnings.warn( "dt.weekofyear and dt.week have been deprecated. Please use " "dt.isocalendar().week instead.", FutureWarning, stacklevel=2, ) weekofyear = self.isocalendar().week return weekofyear week = weekofyear @property def dayofweek(self) -> T_DataArray: """The day of the week with Monday=0, Sunday=6""" return self._date_field("dayofweek", np.int64) weekday = dayofweek @property def dayofyear(self) -> T_DataArray: """The ordinal day of the year""" return self._date_field("dayofyear", np.int64) @property def quarter(self) -> T_DataArray: """The quarter of the date""" return self._date_field("quarter", np.int64) @property def days_in_month(self) -> T_DataArray: """The number of days in the month""" return self._date_field("days_in_month", np.int64) daysinmonth = days_in_month @property def season(self) -> T_DataArray: """Season of the year""" return self._date_field("season", object) @property def time(self) -> T_DataArray: """Timestamps corresponding to datetimes""" return self._date_field("time", object) @property def date(self) -> T_DataArray: """Date corresponding to datetimes""" return self._date_field("date", object) @property def is_month_start(self) -> T_DataArray: """Indicate whether the date is the first day of the month""" return self._date_field("is_month_start", bool) @property def is_month_end(self) -> T_DataArray: """Indicate whether the date is the last day of the month""" return self._date_field("is_month_end", bool) @property def is_quarter_start(self) -> T_DataArray: """Indicate whether the date is the first day of a quarter""" return self._date_field("is_quarter_start", bool) @property def is_quarter_end(self) -> T_DataArray: """Indicate whether the date is the last day of a quarter""" return self._date_field("is_quarter_end", bool) @property def is_year_start(self) -> T_DataArray: """Indicate whether the date is the first day of a year""" return self._date_field("is_year_start", bool) @property def is_year_end(self) -> T_DataArray: """Indicate whether the date is the last day of the year""" return self._date_field("is_year_end", bool) @property def is_leap_year(self) -> T_DataArray: """Indicate if the date belongs to a leap year""" return self._date_field("is_leap_year", bool) @property def calendar(self) -> CFCalendar: """The name of the calendar of the dates. Only relevant for arrays of :py:class:`cftime.datetime` objects, returns "proleptic_gregorian" for arrays of :py:class:`numpy.datetime64` values. """ return infer_calendar_name(self._obj.data) @property def days_in_year(self) -> T_DataArray: """Each datetime as the year plus the fraction of the year elapsed.""" if self.calendar == "360_day": result = full_like(self.year, 360) else: result = self.is_leap_year.astype(int) + 365 newvar = Variable( dims=self._obj.dims, attrs=self._obj.attrs, encoding=self._obj.encoding, data=result, ) return self._obj._replace(newvar, name="days_in_year") @property def decimal_year(self) -> T_DataArray: """Convert the dates as a fractional year.""" result = _decimal_year(self._obj) newvar = Variable( dims=self._obj.dims, attrs=self._obj.attrs, encoding=self._obj.encoding, data=result, ) return self._obj._replace(newvar, name="decimal_year") class TimedeltaAccessor(TimeAccessor[T_DataArray]): """Access Timedelta fields for DataArrays with Timedelta-like dtypes. Fields can be accessed through the `.dt` attribute for applicable DataArrays. Examples -------- >>> dates = pd.timedelta_range(start="1 day", freq="6h", periods=20) >>> ts = xr.DataArray(dates, dims=("time")) >>> ts Size: 160B array([ 86400000000000, 108000000000000, 129600000000000, 151200000000000, 172800000000000, 194400000000000, 216000000000000, 237600000000000, 259200000000000, 280800000000000, 302400000000000, 324000000000000, 345600000000000, 367200000000000, 388800000000000, 410400000000000, 432000000000000, 453600000000000, 475200000000000, 496800000000000], dtype='timedelta64[ns]') Coordinates: * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 >>> ts.dt # doctest: +ELLIPSIS >>> ts.dt.days Size: 160B array([1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5]) Coordinates: * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 >>> ts.dt.microseconds Size: 160B array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) Coordinates: * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 >>> ts.dt.seconds Size: 160B array([ 0, 21600, 43200, 64800, 0, 21600, 43200, 64800, 0, 21600, 43200, 64800, 0, 21600, 43200, 64800, 0, 21600, 43200, 64800]) Coordinates: * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 >>> ts.dt.total_seconds() Size: 160B array([ 86400., 108000., 129600., 151200., 172800., 194400., 216000., 237600., 259200., 280800., 302400., 324000., 345600., 367200., 388800., 410400., 432000., 453600., 475200., 496800.]) Coordinates: * time (time) timedelta64[ns] 160B 1 days 00:00:00 ... 5 days 18:00:00 """ @property def days(self) -> T_DataArray: """Number of days for each element""" return self._date_field("days", np.int64) @property def seconds(self) -> T_DataArray: """Number of seconds (>= 0 and less than 1 day) for each element""" return self._date_field("seconds", np.int64) @property def microseconds(self) -> T_DataArray: """Number of microseconds (>= 0 and less than 1 second) for each element""" return self._date_field("microseconds", np.int64) @property def nanoseconds(self) -> T_DataArray: """Number of nanoseconds (>= 0 and less than 1 microsecond) for each element""" return self._date_field("nanoseconds", np.int64) # Not defined as a property in order to match the Pandas API def total_seconds(self) -> T_DataArray: """Total duration of each element expressed in seconds.""" return self._date_field("total_seconds", np.float64) class CombinedDatetimelikeAccessor( DatetimeAccessor[T_DataArray], TimedeltaAccessor[T_DataArray] ): def __new__(cls, obj: T_DataArray) -> Self: # CombinedDatetimelikeAccessor isn't really instantiated. Instead # we need to choose which parent (datetime or timedelta) is # appropriate. Since we're checking the dtypes anyway, we'll just # do all the validation here. if not _contains_datetime_like_objects(obj.variable): # We use an AttributeError here so that `obj.dt` raises an error that # `getattr` expects; https://github.com/pydata/xarray/issues/8718. It's a # bit unusual in a `__new__`, but that's the only case where we use this # class. raise AttributeError( "'.dt' accessor only available for " "DataArray with datetime64 timedelta64 dtype or " "for arrays containing cftime datetime " "objects." ) if is_np_timedelta_like(obj.dtype): return TimedeltaAccessor(obj) # type: ignore[return-value] else: return DatetimeAccessor(obj) # type: ignore[return-value] ������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/core/accessor_str.py��������������������������������������������������������0000664�0000000�0000000�00000302521�15056206164�0020651�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# The StringAccessor class defined below is an adaptation of the # pandas string methods source code (see pd.core.strings) # For reference, here is a copy of the pandas copyright notice: # (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team # All rights reserved. # Copyright (c) 2008-2011 AQR Capital Management, LLC # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the copyright holder nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import codecs import re import textwrap from collections.abc import Callable, Hashable, Mapping from functools import reduce from operator import or_ as set_union from re import Pattern from typing import TYPE_CHECKING, Any, Generic from unicodedata import normalize import numpy as np from xarray.core import duck_array_ops from xarray.core.types import T_DataArray if TYPE_CHECKING: from numpy.typing import DTypeLike from xarray.core.dataarray import DataArray _cpython_optimized_encoders = ( "utf-8", "utf8", "latin-1", "latin1", "iso-8859-1", "mbcs", "ascii", ) _cpython_optimized_decoders = _cpython_optimized_encoders + ("utf-16", "utf-32") def _contains_obj_type(*, pat: Any, checker: Any) -> bool: """Determine if the object fits some rule or is array of objects that do so.""" if isinstance(checker, type): targtype = checker checker = lambda x: isinstance(x, targtype) if checker(pat): return True # If it is not an object array it can't contain compiled re if getattr(pat, "dtype", "no") != np.object_: return False return _apply_str_ufunc(func=checker, obj=pat).all() def _contains_str_like(pat: Any) -> bool: """Determine if the object is a str-like or array of str-like.""" if isinstance(pat, str | bytes): return True if not hasattr(pat, "dtype"): return False return pat.dtype.kind in ["U", "S"] def _contains_compiled_re(pat: Any) -> bool: """Determine if the object is a compiled re or array of compiled re.""" return _contains_obj_type(pat=pat, checker=re.Pattern) def _contains_callable(pat: Any) -> bool: """Determine if the object is a callable or array of callables.""" return _contains_obj_type(pat=pat, checker=callable) def _apply_str_ufunc( *, func: Callable, obj: Any, dtype: DTypeLike = None, output_core_dims: list | tuple = ((),), output_sizes: Mapping[Any, int] | None = None, func_args: tuple = (), func_kwargs: Mapping = {}, ) -> Any: # TODO handling of na values ? if dtype is None: dtype = obj.dtype dask_gufunc_kwargs = dict() if output_sizes is not None: dask_gufunc_kwargs["output_sizes"] = output_sizes from xarray.computation.apply_ufunc import apply_ufunc return apply_ufunc( func, obj, *func_args, vectorize=True, dask="parallelized", output_dtypes=[dtype], output_core_dims=output_core_dims, dask_gufunc_kwargs=dask_gufunc_kwargs, **func_kwargs, ) class StringAccessor(Generic[T_DataArray]): r"""Vectorized string functions for string-like arrays. Similar to pandas, fields can be accessed through the `.str` attribute for applicable DataArrays. >>> da = xr.DataArray(["some", "text", "in", "an", "array"]) >>> da.str.len() Size: 40B array([4, 4, 2, 2, 5]) Dimensions without coordinates: dim_0 It also implements ``+``, ``*``, and ``%``, which operate as elementwise versions of the corresponding ``str`` methods. These will automatically broadcast for array-like inputs. >>> da1 = xr.DataArray(["first", "second", "third"], dims=["X"]) >>> da2 = xr.DataArray([1, 2, 3], dims=["Y"]) >>> da1.str + da2 Size: 252B array([['first1', 'first2', 'first3'], ['second1', 'second2', 'second3'], ['third1', 'third2', 'third3']], dtype='>> da1 = xr.DataArray(["a", "b", "c", "d"], dims=["X"]) >>> reps = xr.DataArray([3, 4], dims=["Y"]) >>> da1.str * reps Size: 128B array([['aaa', 'aaaa'], ['bbb', 'bbbb'], ['ccc', 'cccc'], ['ddd', 'dddd']], dtype='>> da1 = xr.DataArray(["%s_%s", "%s-%s", "%s|%s"], dims=["X"]) >>> da2 = xr.DataArray([1, 2], dims=["Y"]) >>> da3 = xr.DataArray([0.1, 0.2], dims=["Z"]) >>> da1.str % (da2, da3) Size: 240B array([[['1_0.1', '1_0.2'], ['2_0.1', '2_0.2']], [['1-0.1', '1-0.2'], ['2-0.1', '2-0.2']], [['1|0.1', '1|0.2'], ['2|0.1', '2|0.2']]], dtype='>> da1 = xr.DataArray(["%(a)s"], dims=["X"]) >>> da2 = xr.DataArray([1, 2, 3], dims=["Y"]) >>> da1 % {"a": da2} Size: 8B array([' Size: 24B\narray([1, 2, 3])\nDimensions without coordinates: Y'], dtype=object) Dimensions without coordinates: X """ __slots__ = ("_obj",) def __init__(self, obj: T_DataArray) -> None: self._obj = obj def _stringify(self, invar: Any) -> str | bytes | Any: """ Convert a string-like to the correct string/bytes type. This is mostly here to tell mypy a pattern is a str/bytes not a re.Pattern. """ if hasattr(invar, "astype"): return invar.astype(self._obj.dtype.kind) else: return self._obj.dtype.type(invar) def _apply( self, *, func: Callable, dtype: DTypeLike = None, output_core_dims: list | tuple = ((),), output_sizes: Mapping[Any, int] | None = None, func_args: tuple = (), func_kwargs: Mapping = {}, ) -> T_DataArray: return _apply_str_ufunc( obj=self._obj, func=func, dtype=dtype, output_core_dims=output_core_dims, output_sizes=output_sizes, func_args=func_args, func_kwargs=func_kwargs, ) def _re_compile( self, *, pat: str | bytes | Pattern | Any, flags: int = 0, case: bool | None = None, ) -> Pattern | Any: is_compiled_re = isinstance(pat, re.Pattern) if is_compiled_re and flags != 0: raise ValueError("Flags cannot be set when pat is a compiled regex.") if is_compiled_re and case is not None: raise ValueError("Case cannot be set when pat is a compiled regex.") if is_compiled_re: # no-op, needed to tell mypy this isn't a string return re.compile(pat) if case is None: case = True # The case is handled by the re flags internally. # Add it to the flags if necessary. if not case: flags |= re.IGNORECASE if getattr(pat, "dtype", None) != np.object_: pat = self._stringify(pat) def func(x): return re.compile(x, flags=flags) if isinstance(pat, np.ndarray): # apply_ufunc doesn't work for numpy arrays with output object dtypes func_ = np.vectorize(func) return func_(pat) else: return _apply_str_ufunc(func=func, obj=pat, dtype=np.object_) def len(self) -> T_DataArray: """ Compute the length of each string in the array. Returns ------- lengths array : array of int """ return self._apply(func=len, dtype=int) def __getitem__( self, key: int | slice, ) -> T_DataArray: if isinstance(key, slice): return self.slice(start=key.start, stop=key.stop, step=key.step) else: return self.get(key) def __add__(self, other: Any) -> T_DataArray: return self.cat(other, sep="") def __mul__( self, num: int | Any, ) -> T_DataArray: return self.repeat(num) def __mod__( self, other: Any, ) -> T_DataArray: if isinstance(other, dict): other = {key: self._stringify(val) for key, val in other.items()} return self._apply(func=lambda x: x % other) elif isinstance(other, tuple): other = tuple(self._stringify(x) for x in other) return self._apply(func=lambda x, *y: x % y, func_args=other) else: return self._apply(func=lambda x, y: x % y, func_args=(other,)) def get( self, i: int | Any, default: str | bytes = "", ) -> T_DataArray: """ Extract character number `i` from each string in the array. If `i` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- i : int or array-like of int Position of element to extract. If array-like, it is broadcast. default : str or bytes, default: "" Value for out-of-range index. Returns ------- items : array of object """ def f(x, iind): islice = slice(-1, None) if iind == -1 else slice(iind, iind + 1) item = x[islice] return item or default return self._apply(func=f, func_args=(i,)) def slice( self, start: int | Any | None = None, stop: int | Any | None = None, step: int | Any | None = None, ) -> T_DataArray: """ Slice substrings from each string in the array. If `start`, `stop`, or 'step` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- start : int or array-like of int, optional Start position for slice operation. If array-like, it is broadcast. stop : int or array-like of int, optional Stop position for slice operation. If array-like, it is broadcast. step : int or array-like of int, optional Step size for slice operation. If array-like, it is broadcast. Returns ------- sliced strings : same type as values """ f = lambda x, istart, istop, istep: x[slice(istart, istop, istep)] return self._apply(func=f, func_args=(start, stop, step)) def slice_replace( self, start: int | Any | None = None, stop: int | Any | None = None, repl: str | bytes | Any = "", ) -> T_DataArray: """ Replace a positional slice of a string with another value. If `start`, `stop`, or 'repl` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- start : int or array-like of int, optional Left index position to use for the slice. If not specified (None), the slice is unbounded on the left, i.e. slice from the start of the string. If array-like, it is broadcast. stop : int or array-like of int, optional Right index position to use for the slice. If not specified (None), the slice is unbounded on the right, i.e. slice until the end of the string. If array-like, it is broadcast. repl : str or array-like of str, default: "" String for replacement. If not specified, the sliced region is replaced with an empty string. If array-like, it is broadcast. Returns ------- replaced : same type as values """ repl = self._stringify(repl) def func(x, istart, istop, irepl): if len(x[istart:istop]) == 0: local_stop = istart else: local_stop = istop y = self._stringify("") if istart is not None: y += x[:istart] y += irepl if istop is not None: y += x[local_stop:] return y return self._apply(func=func, func_args=(start, stop, repl)) def cat(self, *others, sep: str | bytes | Any = "") -> T_DataArray: """ Concatenate strings elementwise in the DataArray with other strings. The other strings can either be string scalars or other array-like. Dimensions are automatically broadcast together. An optional separator `sep` can also be specified. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- *others : str or array-like of str Strings or array-like of strings to concatenate elementwise with the current DataArray. sep : str or array-like of str, default: "". Separator to use between strings. It is broadcast in the same way as the other input strings. If array-like, its dimensions will be placed at the end of the output array dimensions. Returns ------- concatenated : same type as values Examples -------- Create a string array >>> myarray = xr.DataArray( ... ["11111", "4"], ... dims=["X"], ... ) Create some arrays to concatenate with it >>> values_1 = xr.DataArray( ... ["a", "bb", "cccc"], ... dims=["Y"], ... ) >>> values_2 = np.array(3.4) >>> values_3 = "" >>> values_4 = np.array("test", dtype=np.str_) Determine the separator to use >>> seps = xr.DataArray( ... [" ", ", "], ... dims=["ZZ"], ... ) Concatenate the arrays using the separator >>> myarray.str.cat(values_1, values_2, values_3, values_4, sep=seps) Size: 1kB array([[['11111 a 3.4 test', '11111, a, 3.4, , test'], ['11111 bb 3.4 test', '11111, bb, 3.4, , test'], ['11111 cccc 3.4 test', '11111, cccc, 3.4, , test']], [['4 a 3.4 test', '4, a, 3.4, , test'], ['4 bb 3.4 test', '4, bb, 3.4, , test'], ['4 cccc 3.4 test', '4, cccc, 3.4, , test']]], dtype=' T_DataArray: """ Concatenate strings in a DataArray along a particular dimension. An optional separator `sep` can also be specified. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable, optional Dimension along which the strings should be concatenated. Only one dimension is allowed at a time. Optional for 0D or 1D DataArrays, required for multidimensional DataArrays. sep : str or array-like, default: "". Separator to use between strings. It is broadcast in the same way as the other input strings. If array-like, its dimensions will be placed at the end of the output array dimensions. Returns ------- joined : same type as values Examples -------- Create an array >>> values = xr.DataArray( ... [["a", "bab", "abc"], ["abcd", "", "abcdef"]], ... dims=["X", "Y"], ... ) Determine the separator >>> seps = xr.DataArray( ... ["-", "_"], ... dims=["ZZ"], ... ) Join the strings along a given dimension >>> values.str.join(dim="Y", sep=seps) Size: 192B array([['a-bab-abc', 'a_bab_abc'], ['abcd--abcdef', 'abcd__abcdef']], dtype=' 1 and dim is None: raise ValueError("Dimension must be specified for multidimensional arrays.") if self._obj.ndim > 1: # Move the target dimension to the start and split along it dimshifted = list(self._obj.transpose(dim, ...)) elif self._obj.ndim == 1: dimshifted = list(self._obj) else: dimshifted = [self._obj] start, *others = dimshifted # concatenate the resulting arrays return start.str.cat(*others, sep=sep) def format( self, *args: Any, **kwargs: Any, ) -> T_DataArray: """ Perform python string formatting on each element of the DataArray. This is equivalent to calling `str.format` on every element of the DataArray. The replacement values can either be a string-like scalar or array-like of string-like values. If array-like, the values will be broadcast and applied elementwiseto the input DataArray. .. note:: Array-like values provided as `*args` will have their dimensions added even if those arguments are not used in any string formatting. .. warning:: Array-like arguments are only applied elementwise for `*args`. For `**kwargs`, values are used as-is. Parameters ---------- *args : str or bytes or array-like of str or bytes Values for positional formatting. If array-like, the values are broadcast and applied elementwise. The dimensions will be placed at the end of the output array dimensions in the order they are provided. **kwargs : str or bytes or array-like of str or bytes Values for keyword-based formatting. These are **not** broadcast or applied elementwise. Returns ------- formatted : same type as values Examples -------- Create an array to format. >>> values = xr.DataArray( ... ["{} is {adj0}", "{} and {} are {adj1}"], ... dims=["X"], ... ) Set the values to fill. >>> noun0 = xr.DataArray( ... ["spam", "egg"], ... dims=["Y"], ... ) >>> noun1 = xr.DataArray( ... ["lancelot", "arthur"], ... dims=["ZZ"], ... ) >>> adj0 = "unexpected" >>> adj1 = "like a duck" Insert the values into the array >>> values.str.format(noun0, noun1, adj0=adj0, adj1=adj1) Size: 1kB array([[['spam is unexpected', 'spam is unexpected'], ['egg is unexpected', 'egg is unexpected']], [['spam and lancelot are like a duck', 'spam and arthur are like a duck'], ['egg and lancelot are like a duck', 'egg and arthur are like a duck']]], dtype=' T_DataArray: """ Convert strings in the array to be capitalized. Returns ------- capitalized : same type as values Examples -------- >>> da = xr.DataArray( ... ["temperature", "PRESSURE", "PreCipiTation", "daily rainfall"], dims="x" ... ) >>> da Size: 224B array(['temperature', 'PRESSURE', 'PreCipiTation', 'daily rainfall'], dtype='>> capitalized = da.str.capitalize() >>> capitalized Size: 224B array(['Temperature', 'Pressure', 'Precipitation', 'Daily rainfall'], dtype=' T_DataArray: """ Convert strings in the array to lowercase. Returns ------- lowered : same type as values Examples -------- >>> da = xr.DataArray(["Temperature", "PRESSURE"], dims="x") >>> da Size: 88B array(['Temperature', 'PRESSURE'], dtype='>> lowered = da.str.lower() >>> lowered Size: 88B array(['temperature', 'pressure'], dtype=' T_DataArray: """ Convert strings in the array to be swapcased. Returns ------- swapcased : same type as values Examples -------- >>> import xarray as xr >>> da = xr.DataArray(["temperature", "PRESSURE", "HuMiDiTy"], dims="x") >>> da Size: 132B array(['temperature', 'PRESSURE', 'HuMiDiTy'], dtype='>> swapcased = da.str.swapcase() >>> swapcased Size: 132B array(['TEMPERATURE', 'pressure', 'hUmIdItY'], dtype=' T_DataArray: """ Convert strings in the array to titlecase. Returns ------- titled : same type as values Examples -------- >>> da = xr.DataArray(["temperature", "PRESSURE", "HuMiDiTy"], dims="x") >>> da Size: 132B array(['temperature', 'PRESSURE', 'HuMiDiTy'], dtype='>> titled = da.str.title() >>> titled Size: 132B array(['Temperature', 'Pressure', 'Humidity'], dtype=' T_DataArray: """ Convert strings in the array to uppercase. Returns ------- uppered : same type as values Examples -------- >>> da = xr.DataArray(["temperature", "HuMiDiTy"], dims="x") >>> da Size: 88B array(['temperature', 'HuMiDiTy'], dtype='>> uppered = da.str.upper() >>> uppered Size: 88B array(['TEMPERATURE', 'HUMIDITY'], dtype=' T_DataArray: """ Convert strings in the array to be casefolded. Casefolding is similar to converting to lowercase, but removes all case distinctions. This is important in some languages that have more complicated cases and case conversions. For example, the 'ß' character in German is case-folded to 'ss', whereas it is lowercased to 'ß'. Returns ------- casefolded : same type as values Examples -------- >>> da = xr.DataArray(["TEMPERATURE", "HuMiDiTy"], dims="x") >>> da Size: 88B array(['TEMPERATURE', 'HuMiDiTy'], dtype='>> casefolded = da.str.casefold() >>> casefolded Size: 88B array(['temperature', 'humidity'], dtype='>> da = xr.DataArray(["ß", "Δ°"], dims="x") >>> da Size: 8B array(['ß', 'Δ°'], dtype='>> casefolded = da.str.casefold() >>> casefolded Size: 16B array(['ss', 'iΜ‡'], dtype=' T_DataArray: """ Return the Unicode normal form for the strings in the datarray. For more information on the forms, see the documentation for :func:`unicodedata.normalize`. Parameters ---------- form : {"NFC", "NFKC", "NFD", "NFKD"} Unicode form. Returns ------- normalized : same type as values """ return self._apply(func=lambda x: normalize(form, x)) # type: ignore[arg-type] def isalnum(self) -> T_DataArray: """ Check whether all characters in each string are alphanumeric. Returns ------- isalnum : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["H2O", "NaCl-"], dims="x") >>> da Size: 40B array(['H2O', 'NaCl-'], dtype='>> isalnum = da.str.isalnum() >>> isalnum Size: 2B array([ True, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isalnum(), dtype=bool) def isalpha(self) -> T_DataArray: """ Check whether all characters in each string are alphabetic. Returns ------- isalpha : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["Mn", "H2O", "NaCl-"], dims="x") >>> da Size: 60B array(['Mn', 'H2O', 'NaCl-'], dtype='>> isalpha = da.str.isalpha() >>> isalpha Size: 3B array([ True, False, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isalpha(), dtype=bool) def isdecimal(self) -> T_DataArray: """ Check whether all characters in each string are decimal. Returns ------- isdecimal : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["2.3", "123", "0"], dims="x") >>> da Size: 36B array(['2.3', '123', '0'], dtype='>> isdecimal = da.str.isdecimal() >>> isdecimal Size: 3B array([False, True, True]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isdecimal(), dtype=bool) def isdigit(self) -> T_DataArray: """ Check whether all characters in each string are digits. Returns ------- isdigit : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["123", "1.2", "0", "CO2", "NaCl"], dims="x") >>> da Size: 80B array(['123', '1.2', '0', 'CO2', 'NaCl'], dtype='>> isdigit = da.str.isdigit() >>> isdigit Size: 5B array([ True, False, True, False, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isdigit(), dtype=bool) def islower(self) -> T_DataArray: """ Check whether all characters in each string are lowercase. Returns ------- islower : array of bool Array of boolean values with the same shape as the original array indicating whether all characters of each element of the string array are lowercase (True) or not (False). Examples -------- >>> da = xr.DataArray(["temperature", "HUMIDITY", "pREciPiTaTioN"], dims="x") >>> da Size: 156B array(['temperature', 'HUMIDITY', 'pREciPiTaTioN'], dtype='>> islower = da.str.islower() >>> islower Size: 3B array([ True, False, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.islower(), dtype=bool) def isnumeric(self) -> T_DataArray: """ Check whether all characters in each string are numeric. Returns ------- isnumeric : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["123", "2.3", "H2O", "NaCl-", "Mn"], dims="x") >>> da Size: 100B array(['123', '2.3', 'H2O', 'NaCl-', 'Mn'], dtype='>> isnumeric = da.str.isnumeric() >>> isnumeric Size: 5B array([ True, False, False, False, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isnumeric(), dtype=bool) def isspace(self) -> T_DataArray: """ Check whether all characters in each string are spaces. Returns ------- isspace : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["", " ", "\\t", "\\n"], dims="x") >>> da Size: 16B array(['', ' ', '\\t', '\\n'], dtype='>> isspace = da.str.isspace() >>> isspace Size: 4B array([False, True, True, True]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isspace(), dtype=bool) def istitle(self) -> T_DataArray: """ Check whether all characters in each string are titlecase. Returns ------- istitle : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray( ... [ ... "The Evolution Of Species", ... "The Theory of relativity", ... "the quantum mechanics of atoms", ... ], ... dims="title", ... ) >>> da Size: 360B array(['The Evolution Of Species', 'The Theory of relativity', 'the quantum mechanics of atoms'], dtype='>> istitle = da.str.istitle() >>> istitle Size: 3B array([ True, False, False]) Dimensions without coordinates: title """ return self._apply(func=lambda x: x.istitle(), dtype=bool) def isupper(self) -> T_DataArray: """ Check whether all characters in each string are uppercase. Returns ------- isupper : array of bool Array of boolean values with the same shape as the original array. Examples -------- >>> da = xr.DataArray(["TEMPERATURE", "humidity", "PreCIpiTAtioN"], dims="x") >>> da Size: 156B array(['TEMPERATURE', 'humidity', 'PreCIpiTAtioN'], dtype='>> isupper = da.str.isupper() >>> isupper Size: 3B array([ True, False, False]) Dimensions without coordinates: x """ return self._apply(func=lambda x: x.isupper(), dtype=bool) def count( self, pat: str | bytes | Pattern | Any, flags: int = 0, case: bool | None = None ) -> T_DataArray: """ Count occurrences of pattern in each string of the array. This function is used to count the number of times a particular regex pattern is repeated in each of the string elements of the :class:`~xarray.DataArray`. The pattern `pat` can either be a single ``str`` or `re.Pattern` or array-like of ``str`` or `re.Pattern`. If array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern or array-like of str or re.Pattern A string containing a regular expression or a compiled regular expression object. If array-like, it is broadcast. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags `_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. Returns ------- counts : array of int Examples -------- >>> da = xr.DataArray(["jjklmn", "opjjqrs", "t-JJ99vwx"], dims="x") >>> da Size: 108B array(['jjklmn', 'opjjqrs', 't-JJ99vwx'], dtype='>> da.str.count("jj") Size: 24B array([1, 1, 0]) Dimensions without coordinates: x Enable case-insensitive matching by setting case to false: >>> counts = da.str.count("jj", case=False) >>> counts Size: 24B array([1, 1, 1]) Dimensions without coordinates: x Using regex: >>> pat = "JJ[0-9]{2}[a-z]{3}" >>> counts = da.str.count(pat) >>> counts Size: 24B array([0, 0, 1]) Dimensions without coordinates: x Using an array of strings (the pattern will be broadcast against the array): >>> pat = xr.DataArray(["jj", "JJ"], dims="y") >>> counts = da.str.count(pat) >>> counts Size: 48B array([[1, 0], [1, 0], [0, 1]]) Dimensions without coordinates: x, y """ pat = self._re_compile(pat=pat, flags=flags, case=case) func = lambda x, ipat: len(ipat.findall(x)) return self._apply(func=func, func_args=(pat,), dtype=int) def startswith(self, pat: str | bytes | Any) -> T_DataArray: """ Test if the start of each string in the array matches a pattern. The pattern `pat` can either be a ``str`` or array-like of ``str``. If array-like, it will be broadcast and applied elementwise. Parameters ---------- pat : str Character sequence. Regular expressions are not accepted. If array-like, it is broadcast. Returns ------- startswith : array of bool An array of booleans indicating whether the given pattern matches the start of each string element. Examples -------- >>> da = xr.DataArray(["$100", "Β£23", "100"], dims="x") >>> da Size: 48B array(['$100', 'Β£23', '100'], dtype='>> startswith = da.str.startswith("$") >>> startswith Size: 3B array([ True, False, False]) Dimensions without coordinates: x """ pat = self._stringify(pat) func = lambda x, y: x.startswith(y) return self._apply(func=func, func_args=(pat,), dtype=bool) def endswith(self, pat: str | bytes | Any) -> T_DataArray: """ Test if the end of each string in the array matches a pattern. The pattern `pat` can either be a ``str`` or array-like of ``str``. If array-like, it will be broadcast and applied elementwise. Parameters ---------- pat : str Character sequence. Regular expressions are not accepted. If array-like, it is broadcast. Returns ------- endswith : array of bool A Series of booleans indicating whether the given pattern matches the end of each string element. Examples -------- >>> da = xr.DataArray(["10C", "10c", "100F"], dims="x") >>> da Size: 48B array(['10C', '10c', '100F'], dtype='>> endswith = da.str.endswith("C") >>> endswith Size: 3B array([ True, False, False]) Dimensions without coordinates: x """ pat = self._stringify(pat) func = lambda x, y: x.endswith(y) return self._apply(func=func, func_args=(pat,), dtype=bool) def pad( self, width: int | Any, side: str = "left", fillchar: str | bytes | Any = " ", ) -> T_DataArray: """ Pad strings in the array up to width. If `width` or 'fillchar` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Minimum width of resulting string; additional characters will be filled with character defined in ``fillchar``. If array-like, it is broadcast. side : {"left", "right", "both"}, default: "left" Side from which to fill resulting string. fillchar : str or array-like of str, default: " " Additional character for filling, default is a space. If array-like, it is broadcast. Returns ------- filled : same type as values Array with a minimum number of char in each element. Examples -------- Pad strings in the array with a single string on the left side. Define the string in the array. >>> da = xr.DataArray(["PAR184", "TKO65", "NBO9139", "NZ39"], dims="x") >>> da Size: 112B array(['PAR184', 'TKO65', 'NBO9139', 'NZ39'], dtype='>> filled = da.str.pad(8, side="left", fillchar="0") >>> filled Size: 128B array(['00PAR184', '000TKO65', '0NBO9139', '0000NZ39'], dtype='>> filled = da.str.pad(8, side="right", fillchar="0") >>> filled Size: 128B array(['PAR18400', 'TKO65000', 'NBO91390', 'NZ390000'], dtype='>> filled = da.str.pad(8, side="both", fillchar="0") >>> filled Size: 128B array(['0PAR1840', '0TKO6500', 'NBO91390', '00NZ3900'], dtype='>> width = xr.DataArray([8, 10], dims="y") >>> filled = da.str.pad(width, side="left", fillchar="0") >>> filled Size: 320B array([['00PAR184', '0000PAR184'], ['000TKO65', '00000TKO65'], ['0NBO9139', '000NBO9139'], ['0000NZ39', '000000NZ39']], dtype='>> fillchar = xr.DataArray(["0", "-"], dims="y") >>> filled = da.str.pad(8, side="left", fillchar=fillchar) >>> filled Size: 256B array([['00PAR184', '--PAR184'], ['000TKO65', '---TKO65'], ['0NBO9139', '-NBO9139'], ['0000NZ39', '----NZ39']], dtype=' T_DataArray: """ Wrapper function to handle padding operations """ fillchar = self._stringify(fillchar) def overfunc(x, iwidth, ifillchar): if len(ifillchar) != 1: raise TypeError("fillchar must be a character, not str") return func(x, int(iwidth), ifillchar) return self._apply(func=overfunc, func_args=(width, fillchar)) def center( self, width: int | Any, fillchar: str | bytes | Any = " " ) -> T_DataArray: """ Pad left and right side of each string in the array. If `width` or 'fillchar` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Minimum width of resulting string; additional characters will be filled with ``fillchar``. If array-like, it is broadcast. fillchar : str or array-like of str, default: " " Additional character for filling, default is a space. If array-like, it is broadcast. Returns ------- filled : same type as values """ func = self._obj.dtype.type.center return self._padder(func=func, width=width, fillchar=fillchar) def ljust( self, width: int | Any, fillchar: str | bytes | Any = " ", ) -> T_DataArray: """ Pad right side of each string in the array. If `width` or 'fillchar` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Minimum width of resulting string; additional characters will be filled with ``fillchar``. If array-like, it is broadcast. fillchar : str or array-like of str, default: " " Additional character for filling, default is a space. If array-like, it is broadcast. Returns ------- filled : same type as values """ func = self._obj.dtype.type.ljust return self._padder(func=func, width=width, fillchar=fillchar) def rjust( self, width: int | Any, fillchar: str | bytes | Any = " ", ) -> T_DataArray: """ Pad left side of each string in the array. If `width` or 'fillchar` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Minimum width of resulting string; additional characters will be filled with ``fillchar``. If array-like, it is broadcast. fillchar : str or array-like of str, default: " " Additional character for filling, default is a space. If array-like, it is broadcast. Returns ------- filled : same type as values """ func = self._obj.dtype.type.rjust return self._padder(func=func, width=width, fillchar=fillchar) def zfill(self, width: int | Any) -> T_DataArray: """ Pad each string in the array by prepending '0' characters. Strings in the array are padded with '0' characters on the left of the string to reach a total string length `width`. Strings in the array with length greater or equal to `width` are unchanged. If `width` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Minimum length of resulting string; strings with length less than `width` be prepended with '0' characters. If array-like, it is broadcast. Returns ------- filled : same type as values """ return self.rjust(width, fillchar="0") def contains( self, pat: str | bytes | Pattern | Any, case: bool | None = None, flags: int = 0, regex: bool = True, ) -> T_DataArray: """ Test if pattern or regex is contained within each string of the array. Return boolean array based on whether a given pattern or regex is contained within a string of the array. The pattern `pat` can either be a single ``str`` or `re.Pattern` or array-like of ``str`` or `re.Pattern`. If array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern or array-like of str or re.Pattern Character sequence, a string containing a regular expression, or a compiled regular expression object. If array-like, it is broadcast. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags `_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. regex : bool, default: True If True, assumes the pat is a regular expression. If False, treats the pat as a literal string. Cannot be set to `False` if `pat` is a compiled regex. Returns ------- contains : array of bool An array of boolean values indicating whether the given pattern is contained within the string of each element of the array. """ is_compiled_re = _contains_compiled_re(pat) if is_compiled_re and not regex: raise ValueError( "Must use regular expression matching for regular expression object." ) if regex: if not is_compiled_re: pat = self._re_compile(pat=pat, flags=flags, case=case) def func(x, ipat): if ipat.groups > 0: # pragma: no cover raise ValueError("This pattern has match groups.") return bool(ipat.search(x)) else: pat = self._stringify(pat) if case or case is None: func = lambda x, ipat: ipat in x elif self._obj.dtype.char == "U": uppered = self.casefold() uppat = StringAccessor(pat).casefold() # type: ignore[type-var] # hack? return uppered.str.contains(uppat, regex=False) # type: ignore[return-value] else: uppered = self.upper() uppat = StringAccessor(pat).upper() # type: ignore[type-var] # hack? return uppered.str.contains(uppat, regex=False) # type: ignore[return-value] return self._apply(func=func, func_args=(pat,), dtype=bool) def match( self, pat: str | bytes | Pattern | Any, case: bool | None = None, flags: int = 0, ) -> T_DataArray: """ Determine if each string in the array matches a regular expression. The pattern `pat` can either be a single ``str`` or `re.Pattern` or array-like of ``str`` or `re.Pattern`. If array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern or array-like of str or re.Pattern A string containing a regular expression or a compiled regular expression object. If array-like, it is broadcast. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags `_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. Returns ------- matched : array of bool """ pat = self._re_compile(pat=pat, flags=flags, case=case) func = lambda x, ipat: bool(ipat.match(x)) return self._apply(func=func, func_args=(pat,), dtype=bool) def strip( self, to_strip: str | bytes | Any = None, side: str = "both" ) -> T_DataArray: """ Remove leading and trailing characters. Strip whitespaces (including newlines) or a set of specified characters from each string in the array from left and/or right sides. `to_strip` can either be a ``str`` or array-like of ``str``. If array-like, it will be broadcast and applied elementwise. Parameters ---------- to_strip : str or array-like of str or None, default: None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. If array-like, it is broadcast. side : {"left", "right", "both"}, default: "both" Side from which to strip. Returns ------- stripped : same type as values """ if to_strip is not None: to_strip = self._stringify(to_strip) if side == "both": func = lambda x, y: x.strip(y) elif side == "left": func = lambda x, y: x.lstrip(y) elif side == "right": func = lambda x, y: x.rstrip(y) else: # pragma: no cover raise ValueError("Invalid side") return self._apply(func=func, func_args=(to_strip,)) def lstrip(self, to_strip: str | bytes | Any = None) -> T_DataArray: """ Remove leading characters. Strip whitespaces (including newlines) or a set of specified characters from each string in the array from the left side. `to_strip` can either be a ``str`` or array-like of ``str``. If array-like, it will be broadcast and applied elementwise. Parameters ---------- to_strip : str or array-like of str or None, default: None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. If array-like, it is broadcast. Returns ------- stripped : same type as values """ return self.strip(to_strip, side="left") def rstrip(self, to_strip: str | bytes | Any = None) -> T_DataArray: """ Remove trailing characters. Strip whitespaces (including newlines) or a set of specified characters from each string in the array from the right side. `to_strip` can either be a ``str`` or array-like of ``str``. If array-like, it will be broadcast and applied elementwise. Parameters ---------- to_strip : str or array-like of str or None, default: None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. If array-like, it is broadcast. Returns ------- stripped : same type as values """ return self.strip(to_strip, side="right") def wrap(self, width: int | Any, **kwargs) -> T_DataArray: """ Wrap long strings in the array in paragraphs with length less than `width`. This method has the same keyword parameters and defaults as :class:`textwrap.TextWrapper`. If `width` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- width : int or array-like of int Maximum line-width. If array-like, it is broadcast. **kwargs keyword arguments passed into :class:`textwrap.TextWrapper`. Returns ------- wrapped : same type as values """ ifunc = lambda x: textwrap.TextWrapper(width=x, **kwargs) tw = StringAccessor(width)._apply(func=ifunc, dtype=np.object_) # type: ignore[type-var] # hack? func = lambda x, itw: "\n".join(itw.wrap(x)) return self._apply(func=func, func_args=(tw,)) # Mapping is only covariant in its values, maybe use a custom CovariantMapping? def translate(self, table: Mapping[Any, str | bytes | int | None]) -> T_DataArray: """ Map characters of each string through the given mapping table. Parameters ---------- table : dict-like from and to str or bytes or int A a mapping of Unicode ordinals to Unicode ordinals, strings, int or None. Unmapped characters are left untouched. Characters mapped to None are deleted. :meth:`str.maketrans` is a helper function for making translation tables. Returns ------- translated : same type as values """ func = lambda x: x.translate(table) return self._apply(func=func) def repeat( self, repeats: int | Any, ) -> T_DataArray: """ Repeat each string in the array. If `repeats` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- repeats : int or array-like of int Number of repetitions. If array-like, it is broadcast. Returns ------- repeated : same type as values Array of repeated string objects. """ func = lambda x, y: x * y return self._apply(func=func, func_args=(repeats,)) def find( self, sub: str | bytes | Any, start: int | Any = 0, end: int | Any = None, side: str = "left", ) -> T_DataArray: """ Return lowest or highest indexes in each strings in the array where the substring is fully contained between [start:end]. Return -1 on failure. If `start`, `end`, or 'sub` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- sub : str or array-like of str Substring being searched. If array-like, it is broadcast. start : int or array-like of int Left edge index. If array-like, it is broadcast. end : int or array-like of int Right edge index. If array-like, it is broadcast. side : {"left", "right"}, default: "left" Starting side for search. Returns ------- found : array of int """ sub = self._stringify(sub) if side == "left": method = "find" elif side == "right": method = "rfind" else: # pragma: no cover raise ValueError("Invalid side") func = lambda x, isub, istart, iend: getattr(x, method)(isub, istart, iend) return self._apply(func=func, func_args=(sub, start, end), dtype=int) def rfind( self, sub: str | bytes | Any, start: int | Any = 0, end: int | Any = None, ) -> T_DataArray: """ Return highest indexes in each strings in the array where the substring is fully contained between [start:end]. Return -1 on failure. If `start`, `end`, or 'sub` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- sub : str or array-like of str Substring being searched. If array-like, it is broadcast. start : int or array-like of int Left edge index. If array-like, it is broadcast. end : int or array-like of int Right edge index. If array-like, it is broadcast. Returns ------- found : array of int """ return self.find(sub, start=start, end=end, side="right") def index( self, sub: str | bytes | Any, start: int | Any = 0, end: int | Any = None, side: str = "left", ) -> T_DataArray: """ Return lowest or highest indexes in each strings where the substring is fully contained between [start:end]. This is the same as ``str.find`` except instead of returning -1, it raises a ValueError when the substring is not found. If `start`, `end`, or 'sub` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- sub : str or array-like of str Substring being searched. If array-like, it is broadcast. start : int or array-like of int Left edge index. If array-like, it is broadcast. end : int or array-like of int Right edge index. If array-like, it is broadcast. side : {"left", "right"}, default: "left" Starting side for search. Returns ------- found : array of int Raises ------ ValueError substring is not found """ sub = self._stringify(sub) if side == "left": method = "index" elif side == "right": method = "rindex" else: # pragma: no cover raise ValueError("Invalid side") func = lambda x, isub, istart, iend: getattr(x, method)(isub, istart, iend) return self._apply(func=func, func_args=(sub, start, end), dtype=int) def rindex( self, sub: str | bytes | Any, start: int | Any = 0, end: int | Any = None, ) -> T_DataArray: """ Return highest indexes in each strings where the substring is fully contained between [start:end]. This is the same as ``str.rfind`` except instead of returning -1, it raises a ValueError when the substring is not found. If `start`, `end`, or 'sub` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- sub : str or array-like of str Substring being searched. If array-like, it is broadcast. start : int or array-like of int Left edge index. If array-like, it is broadcast. end : int or array-like of int Right edge index. If array-like, it is broadcast. Returns ------- found : array of int Raises ------ ValueError substring is not found """ return self.index(sub, start=start, end=end, side="right") def replace( self, pat: str | bytes | Pattern | Any, repl: str | bytes | Callable | Any, n: int | Any = -1, case: bool | None = None, flags: int = 0, regex: bool = True, ) -> T_DataArray: """ Replace occurrences of pattern/regex in the array with some string. If `pat`, `repl`, or 'n` is array-like, they are broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern or array-like of str or re.Pattern String can be a character sequence or regular expression. If array-like, it is broadcast. repl : str or callable or array-like of str or callable Replacement string or a callable. The callable is passed the regex match object and must return a replacement string to be used. See :func:`re.sub`. If array-like, it is broadcast. n : int or array of int, default: -1 Number of replacements to make from start. Use ``-1`` to replace all. If array-like, it is broadcast. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags `_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. regex : bool, default: True If True, assumes the passed-in pattern is a regular expression. If False, treats the pattern as a literal string. Cannot be set to False if `pat` is a compiled regex or `repl` is a callable. Returns ------- replaced : same type as values A copy of the object with all matching occurrences of `pat` replaced by `repl`. """ if _contains_str_like(repl): repl = self._stringify(repl) elif not _contains_callable(repl): # pragma: no cover raise TypeError("repl must be a string or callable") is_compiled_re = _contains_compiled_re(pat) if not regex and is_compiled_re: raise ValueError( "Cannot use a compiled regex as replacement pattern with regex=False" ) if not regex and callable(repl): raise ValueError("Cannot use a callable replacement when regex=False") if regex: pat = self._re_compile(pat=pat, flags=flags, case=case) func = lambda x, ipat, irepl, i_n: ipat.sub( repl=irepl, string=x, count=max(i_n, 0) ) else: pat = self._stringify(pat) func = lambda x, ipat, irepl, i_n: x.replace(ipat, irepl, i_n) return self._apply(func=func, func_args=(pat, repl, n)) def extract( self, pat: str | bytes | Pattern | Any, dim: Hashable, case: bool | None = None, flags: int = 0, ) -> T_DataArray: r""" Extract the first match of capture groups in the regex pat as a new dimension in a DataArray. For each string in the DataArray, extract groups from the first match of regular expression pat. If `pat` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern or array-like of str or re.Pattern A string containing a regular expression or a compiled regular expression object. If array-like, it is broadcast. dim : hashable or None Name of the new dimension to store the captured strings in. If None, the pattern must have only one capture group and the resulting DataArray will have the same size as the original. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags `_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. Returns ------- extracted : same type as values or object array Raises ------ ValueError `pat` has no capture groups. ValueError `dim` is None and there is more than one capture group. ValueError `case` is set when `pat` is a compiled regular expression. KeyError The given dimension is already present in the DataArray. Examples -------- Create a string array >>> value = xr.DataArray( ... [ ... [ ... "a_Xy_0", ... "ab_xY_10-bab_Xy_110-baab_Xy_1100", ... "abc_Xy_01-cbc_Xy_2210", ... ], ... [ ... "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", ... "", ... "abcdef_Xy_101-fef_Xy_5543210", ... ], ... ], ... dims=["X", "Y"], ... ) Extract matches >>> value.str.extract(r"(\w+)_Xy_(\d*)", dim="match") Size: 288B array([[['a', '0'], ['bab', '110'], ['abc', '01']], [['abcd', ''], ['', ''], ['abcdef', '101']]], dtype=' T_DataArray: r""" Extract all matches of capture groups in the regex pat as new dimensions in a DataArray. For each string in the DataArray, extract groups from all matches of regular expression pat. Equivalent to applying re.findall() to all the elements in the DataArray and splitting the results across dimensions. If `pat` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern A string containing a regular expression or a compiled regular expression object. If array-like, it is broadcast. group_dim : hashable Name of the new dimensions corresponding to the capture groups. This dimension is added to the new DataArray first. match_dim : hashable Name of the new dimensions corresponding to the matches for each group. This dimension is added to the new DataArray second. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags `_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. Returns ------- extracted : same type as values or object array Raises ------ ValueError `pat` has no capture groups. ValueError `case` is set when `pat` is a compiled regular expression. KeyError Either of the given dimensions is already present in the DataArray. KeyError The given dimensions names are the same. Examples -------- Create a string array >>> value = xr.DataArray( ... [ ... [ ... "a_Xy_0", ... "ab_xY_10-bab_Xy_110-baab_Xy_1100", ... "abc_Xy_01-cbc_Xy_2210", ... ], ... [ ... "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", ... "", ... "abcdef_Xy_101-fef_Xy_5543210", ... ], ... ], ... dims=["X", "Y"], ... ) Extract matches >>> value.str.extractall( ... r"(\w+)_Xy_(\d*)", group_dim="group", match_dim="match" ... ) Size: 1kB array([[[['a', '0'], ['', ''], ['', '']], [['bab', '110'], ['baab', '1100'], ['', '']], [['abc', '01'], ['cbc', '2210'], ['', '']]], [[['abcd', ''], ['dcd', '33210'], ['dccd', '332210']], [['', ''], ['', ''], ['', '']], [['abcdef', '101'], ['fef', '5543210'], ['', '']]]], dtype=' T_DataArray: r""" Find all occurrences of pattern or regular expression in the DataArray. Equivalent to applying re.findall() to all the elements in the DataArray. Results in an object array of lists. If there is only one capture group, the lists will be a sequence of matches. If there are multiple capture groups, the lists will be a sequence of lists, each of which contains a sequence of matches. If `pat` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- pat : str or re.Pattern A string containing a regular expression or a compiled regular expression object. If array-like, it is broadcast. case : bool, default: True If True, case sensitive. Cannot be set if `pat` is a compiled regex. Equivalent to setting the `re.IGNORECASE` flag. flags : int, default: 0 Flags to pass through to the re module, e.g. `re.IGNORECASE`. see `compilation-flags `_. ``0`` means no flags. Flags can be combined with the bitwise or operator ``|``. Cannot be set if `pat` is a compiled regex. Returns ------- extracted : object array Raises ------ ValueError `pat` has no capture groups. ValueError `case` is set when `pat` is a compiled regular expression. Examples -------- Create a string array >>> value = xr.DataArray( ... [ ... [ ... "a_Xy_0", ... "ab_xY_10-bab_Xy_110-baab_Xy_1100", ... "abc_Xy_01-cbc_Xy_2210", ... ], ... [ ... "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", ... "", ... "abcdef_Xy_101-fef_Xy_5543210", ... ], ... ], ... dims=["X", "Y"], ... ) Extract matches >>> value.str.findall(r"(\w+)_Xy_(\d*)") Size: 48B array([[list([('a', '0')]), list([('bab', '110'), ('baab', '1100')]), list([('abc', '01'), ('cbc', '2210')])], [list([('abcd', ''), ('dcd', '33210'), ('dccd', '332210')]), list([]), list([('abcdef', '101'), ('fef', '5543210')])]], dtype=object) Dimensions without coordinates: X, Y See Also -------- DataArray.str.extract DataArray.str.extractall re.compile re.findall pandas.Series.str.findall """ pat = self._re_compile(pat=pat, flags=flags, case=case) def func(x, ipat): if ipat.groups == 0: raise ValueError("No capture groups found in pattern.") return ipat.findall(x) return self._apply(func=func, func_args=(pat,), dtype=np.object_) def _partitioner( self, *, func: Callable, dim: Hashable | None, sep: str | bytes | Any | None, ) -> T_DataArray: """ Implements logic for `partition` and `rpartition`. """ sep = self._stringify(sep) if dim is None: listfunc = lambda x, isep: list(func(x, isep)) return self._apply(func=listfunc, func_args=(sep,), dtype=np.object_) # _apply breaks on an empty array in this case if not self._obj.size: return self._obj.copy().expand_dims({dim: 0}, axis=-1) arrfunc = lambda x, isep: np.array(func(x, isep), dtype=self._obj.dtype) # dtype MUST be object or strings can be truncated # See: https://github.com/numpy/numpy/issues/8352 return duck_array_ops.astype( self._apply( func=arrfunc, func_args=(sep,), dtype=np.object_, output_core_dims=[[dim]], output_sizes={dim: 3}, ), self._obj.dtype.kind, ) def partition( self, dim: Hashable | None, sep: str | bytes | Any = " ", ) -> T_DataArray: """ Split the strings in the DataArray at the first occurrence of separator `sep`. This method splits the string at the first occurrence of `sep`, and returns 3 elements containing the part before the separator, the separator itself, and the part after the separator. If the separator is not found, return 3 elements containing the string itself, followed by two empty strings. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable or None Name for the dimension to place the 3 elements in. If `None`, place the results as list elements in an object DataArray. sep : str or bytes or array-like, default: " " String to split on. If array-like, it is broadcast. Returns ------- partitioned : same type as values or object array See Also -------- DataArray.str.rpartition str.partition pandas.Series.str.partition """ return self._partitioner(func=self._obj.dtype.type.partition, dim=dim, sep=sep) def rpartition( self, dim: Hashable | None, sep: str | bytes | Any = " ", ) -> T_DataArray: """ Split the strings in the DataArray at the last occurrence of separator `sep`. This method splits the string at the last occurrence of `sep`, and returns 3 elements containing the part before the separator, the separator itself, and the part after the separator. If the separator is not found, return 3 elements containing two empty strings, followed by the string itself. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable or None Name for the dimension to place the 3 elements in. If `None`, place the results as list elements in an object DataArray. sep : str or bytes or array-like, default: " " String to split on. If array-like, it is broadcast. Returns ------- rpartitioned : same type as values or object array See Also -------- DataArray.str.partition str.rpartition pandas.Series.str.rpartition """ return self._partitioner(func=self._obj.dtype.type.rpartition, dim=dim, sep=sep) def _splitter( self, *, func: Callable, pre: bool, dim: Hashable, sep: str | bytes | Any | None, maxsplit: int, ) -> DataArray: """ Implements logic for `split` and `rsplit`. """ if sep is not None: sep = self._stringify(sep) if dim is None: f_none = lambda x, isep: func(x, isep, maxsplit) return self._apply(func=f_none, func_args=(sep,), dtype=np.object_) # _apply breaks on an empty array in this case if not self._obj.size: return self._obj.copy().expand_dims({dim: 0}, axis=-1) f_count = lambda x, isep: max(len(func(x, isep, maxsplit)), 1) maxsplit = ( self._apply(func=f_count, func_args=(sep,), dtype=np.int_).max().data.item() - 1 ) def _dosplit(mystr, sep, maxsplit=maxsplit, dtype=self._obj.dtype): res = func(mystr, sep, maxsplit) if len(res) < maxsplit + 1: pad = [""] * (maxsplit + 1 - len(res)) if pre: res += pad else: res = pad + res return np.array(res, dtype=dtype) # dtype MUST be object or strings can be truncated # See: https://github.com/numpy/numpy/issues/8352 return duck_array_ops.astype( self._apply( func=_dosplit, func_args=(sep,), dtype=np.object_, output_core_dims=[[dim]], output_sizes={dim: maxsplit}, ), self._obj.dtype.kind, ) def split( self, dim: Hashable | None, sep: str | bytes | Any = None, maxsplit: int = -1, ) -> DataArray: r""" Split strings in a DataArray around the given separator/delimiter `sep`. Splits the string in the DataArray from the beginning, at the specified delimiter string. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable or None Name for the dimension to place the results in. If `None`, place the results as list elements in an object DataArray. sep : str, default: None String to split on. If ``None`` (the default), split on any whitespace. If array-like, it is broadcast. maxsplit : int, default: -1 Limit number of splits in output, starting from the beginning. If -1 (the default), return all splits. Returns ------- splitted : same type as values or object array Examples -------- Create a string DataArray >>> values = xr.DataArray( ... [ ... ["abc def", "spam\t\teggs\tswallow", "red_blue"], ... ["test0\ntest1\ntest2\n\ntest3", "", "abra ka\nda\tbra"], ... ], ... dims=["X", "Y"], ... ) Split once and put the results in a new dimension >>> values.str.split(dim="splitted", maxsplit=1) Size: 864B array([[['abc', 'def'], ['spam', 'eggs\tswallow'], ['red_blue', '']], [['test0', 'test1\ntest2\n\ntest3'], ['', ''], ['abra', 'ka\nda\tbra']]], dtype='>> values.str.split(dim="splitted") Size: 768B array([[['abc', 'def', '', ''], ['spam', 'eggs', 'swallow', ''], ['red_blue', '', '', '']], [['test0', 'test1', 'test2', 'test3'], ['', '', '', ''], ['abra', 'ka', 'da', 'bra']]], dtype='>> values.str.split(dim=None, maxsplit=1) Size: 48B array([[list(['abc', 'def']), list(['spam', 'eggs\tswallow']), list(['red_blue'])], [list(['test0', 'test1\ntest2\n\ntest3']), list([]), list(['abra', 'ka\nda\tbra'])]], dtype=object) Dimensions without coordinates: X, Y Split as many times as needed and put the results in a list >>> values.str.split(dim=None) Size: 48B array([[list(['abc', 'def']), list(['spam', 'eggs', 'swallow']), list(['red_blue'])], [list(['test0', 'test1', 'test2', 'test3']), list([]), list(['abra', 'ka', 'da', 'bra'])]], dtype=object) Dimensions without coordinates: X, Y Split only on spaces >>> values.str.split(dim="splitted", sep=" ") Size: 2kB array([[['abc', 'def', ''], ['spam\t\teggs\tswallow', '', ''], ['red_blue', '', '']], [['test0\ntest1\ntest2\n\ntest3', '', ''], ['', '', ''], ['abra', '', 'ka\nda\tbra']]], dtype=' DataArray: r""" Split strings in a DataArray around the given separator/delimiter `sep`. Splits the string in the DataArray from the end, at the specified delimiter string. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable or None Name for the dimension to place the results in. If `None`, place the results as list elements in an object DataArray sep : str, default: None String to split on. If ``None`` (the default), split on any whitespace. If array-like, it is broadcast. maxsplit : int, default: -1 Limit number of splits in output, starting from the end. If -1 (the default), return all splits. The final number of split values may be less than this if there are no DataArray elements with that many values. Returns ------- rsplitted : same type as values or object array Examples -------- Create a string DataArray >>> values = xr.DataArray( ... [ ... ["abc def", "spam\t\teggs\tswallow", "red_blue"], ... ["test0\ntest1\ntest2\n\ntest3", "", "abra ka\nda\tbra"], ... ], ... dims=["X", "Y"], ... ) Split once and put the results in a new dimension >>> values.str.rsplit(dim="splitted", maxsplit=1) Size: 816B array([[['abc', 'def'], ['spam\t\teggs', 'swallow'], ['', 'red_blue']], [['test0\ntest1\ntest2', 'test3'], ['', ''], ['abra ka\nda', 'bra']]], dtype='>> values.str.rsplit(dim="splitted") Size: 768B array([[['', '', 'abc', 'def'], ['', 'spam', 'eggs', 'swallow'], ['', '', '', 'red_blue']], [['test0', 'test1', 'test2', 'test3'], ['', '', '', ''], ['abra', 'ka', 'da', 'bra']]], dtype='>> values.str.rsplit(dim=None, maxsplit=1) Size: 48B array([[list(['abc', 'def']), list(['spam\t\teggs', 'swallow']), list(['red_blue'])], [list(['test0\ntest1\ntest2', 'test3']), list([]), list(['abra ka\nda', 'bra'])]], dtype=object) Dimensions without coordinates: X, Y Split as many times as needed and put the results in a list >>> values.str.rsplit(dim=None) Size: 48B array([[list(['abc', 'def']), list(['spam', 'eggs', 'swallow']), list(['red_blue'])], [list(['test0', 'test1', 'test2', 'test3']), list([]), list(['abra', 'ka', 'da', 'bra'])]], dtype=object) Dimensions without coordinates: X, Y Split only on spaces >>> values.str.rsplit(dim="splitted", sep=" ") Size: 2kB array([[['', 'abc', 'def'], ['', '', 'spam\t\teggs\tswallow'], ['', '', 'red_blue']], [['', '', 'test0\ntest1\ntest2\n\ntest3'], ['', '', ''], ['abra', '', 'ka\nda\tbra']]], dtype=' DataArray: """ Return DataArray of dummy/indicator variables. Each string in the DataArray is split at `sep`. A new dimension is created with coordinates for each unique result, and the corresponding element of that dimension is `True` if that result is present and `False` if not. If `sep` is array-like, it is broadcast against the array and applied elementwise. Parameters ---------- dim : hashable Name for the dimension to place the results in. sep : str, default: "|". String to split on. If array-like, it is broadcast. Returns ------- dummies : array of bool Examples -------- Create a string array >>> values = xr.DataArray( ... [ ... ["a|ab~abc|abc", "ab", "a||abc|abcd"], ... ["abcd|ab|a", "abc|ab~abc", "|a"], ... ], ... dims=["X", "Y"], ... ) Extract dummy values >>> values.str.get_dummies(dim="dummies") Size: 30B array([[[ True, False, True, False, True], [False, True, False, False, False], [ True, False, True, True, False]], [[ True, True, False, True, False], [False, False, True, False, True], [ True, False, False, False, False]]]) Coordinates: * dummies (dummies) T_DataArray: """ Decode character string in the array using indicated encoding. Parameters ---------- encoding : str The encoding to use. Please see the Python documentation `codecs standard encoders `_ section for a list of encodings handlers. errors : str, default: "strict" The handler for encoding errors. Please see the Python documentation `codecs error handlers `_ for a list of error handlers. Returns ------- decoded : same type as values """ if encoding in _cpython_optimized_decoders: func = lambda x: x.decode(encoding, errors) else: decoder = codecs.getdecoder(encoding) func = lambda x: decoder(x, errors)[0] return self._apply(func=func, dtype=np.str_) def encode(self, encoding: str, errors: str = "strict") -> T_DataArray: """ Encode character string in the array using indicated encoding. Parameters ---------- encoding : str The encoding to use. Please see the Python documentation `codecs standard encoders `_ section for a list of encodings handlers. errors : str, default: "strict" The handler for encoding errors. Please see the Python documentation `codecs error handlers `_ for a list of error handlers. Returns ------- encoded : same type as values """ if encoding in _cpython_optimized_encoders: func = lambda x: x.encode(encoding, errors) else: encoder = codecs.getencoder(encoding) func = lambda x: encoder(x, errors)[0] return self._apply(func=func, dtype=np.bytes_) �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������xarray-2025.09.0/xarray/core/common.py��������������������������������������������������������������0000664�0000000�0000000�00000223011�15056206164�0017443�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������from __future__ import annotations import datetime import warnings from collections.abc import Callable, Hashable, Iterable, Iterator, Mapping from contextlib import suppress from html import escape from textwrap import dedent from typing import TYPE_CHECKING, Any, Concatenate, ParamSpec, TypeVar, Union, overload import numpy as np import pandas as pd from xarray.core import dtypes, duck_array_ops, formatting, formatting_html from xarray.core.indexing import BasicIndexer, ExplicitlyIndexed from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.core.types import ResampleCompatible from xarray.core.utils import ( Frozen, either_dict_or_kwargs, is_scalar, ) from xarray.namedarray.core import _raise_if_any_duplicate_dimensions from xarray.namedarray.parallelcompat import get_chunked_array_type, guess_chunkmanager from xarray.namedarray.pycompat import is_chunked_array try: import cftime except ImportError: cftime = None # Used as a sentinel value to indicate a all dimensions ALL_DIMS = ... if TYPE_CHECKING: from numpy.typing import DTypeLike from xarray.computation.rolling_exp import RollingExp from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.indexes import Index from xarray.core.resample import Resample from xarray.core.types import ( DatetimeLike, DTypeLikeSave, ScalarOrArray, Self, SideOptions, T_Chunks, T_DataWithCoords, T_Variable, ) from xarray.core.variable import Variable from xarray.groupers import Resampler DTypeMaybeMapping = Union[DTypeLikeSave, Mapping[Any, DTypeLikeSave]] T_Resample = TypeVar("T_Resample", bound="Resample") C = TypeVar("C") T = TypeVar("T") P = ParamSpec("P") class ImplementsArrayReduce: __slots__ = () @classmethod def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool): if include_skipna: def wrapped_func(self, dim=None, axis=None, skipna=None, **kwargs): return self.reduce( func=func, dim=dim, axis=axis, skipna=skipna, **kwargs ) else: def wrapped_func(self, dim=None, axis=None, **kwargs): # type: ignore[misc] return self.reduce(func=func, dim=dim, axis=axis, **kwargs) return wrapped_func _reduce_extra_args_docstring = dedent( """\ dim : str or sequence of str, optional Dimension(s) over which to apply `{name}`. axis : int or sequence of int, optional Axis(es) over which to apply `{name}`. Only one of the 'dim' and 'axis' arguments can be supplied. If neither are supplied, then `{name}` is calculated over axes.""" ) _cum_extra_args_docstring = dedent( """\ dim : str or sequence of str, optional Dimension over which to apply `{name}`. axis : int or sequence of int, optional Axis over which to apply `{name}`. Only one of the 'dim' and 'axis' arguments can be supplied.""" ) class ImplementsDatasetReduce: __slots__ = () @classmethod def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool): if include_skipna: def wrapped_func(self, dim=None, skipna=None, **kwargs): return self.reduce( func=func, dim=dim, skipna=skipna, numeric_only=numeric_only, **kwargs, ) else: def wrapped_func(self, dim=None, **kwargs): # type: ignore[misc] return self.reduce( func=func, dim=dim, numeric_only=numeric_only, **kwargs ) return wrapped_func _reduce_extra_args_docstring = dedent( """ dim : str or sequence of str, optional Dimension(s) over which to apply `{name}`. By default `{name}` is applied over all dimensions. """ ).strip() _cum_extra_args_docstring = dedent( """ dim : str or sequence of str, optional Dimension over which to apply `{name}`. axis : int or sequence of int, optional Axis over which to apply `{name}`. Only one of the 'dim' and 'axis' arguments can be supplied. """ ).strip() class AbstractArray: """Shared base class for DataArray and Variable.""" __slots__ = () def __bool__(self: Any) -> bool: return bool(self.values) def __float__(self: Any) -> float: return float(self.values) def __int__(self: Any) -> int: return int(self.values) def __complex__(self: Any) -> complex: return complex(self.values) def __array__( self: Any, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None ) -> np.ndarray: if not copy: if np.lib.NumpyVersion(np.__version__) >= "2.0.0": copy = None elif np.lib.NumpyVersion(np.__version__) <= "1.28.0": copy = False else: # 2.0.0 dev versions, handle cases where copy may or may not exist try: np.array([1]).__array__(copy=None) copy = None except TypeError: copy = False return np.array(self.values, dtype=dtype, copy=copy) def __repr__(self) -> str: return formatting.array_repr(self) def _repr_html_(self): if OPTIONS["display_style"] == "text": return f"
{escape(repr(self))}
" return formatting_html.array_repr(self) def __format__(self: Any, format_spec: str = "") -> str: if format_spec != "": if self.shape == (): # Scalar values might be ok use format_spec with instead of repr: return self.data.__format__(format_spec) else: # TODO: If it's an array the formatting.array_repr(self) should # take format_spec as an input. If we'd only use self.data we # lose all the information about coords for example which is # important information: raise NotImplementedError( "Using format_spec is only supported" f" when shape is (). Got shape = {self.shape}." ) else: return self.__repr__() def _iter(self: Any) -> Iterator[Any]: for n in range(len(self)): yield self[n] def __iter__(self: Any) -> Iterator[Any]: if self.ndim == 0: raise TypeError("iteration over a 0-d array") return self._iter() @overload def get_axis_num(self, dim: str) -> int: ... # type: ignore [overload-overlap] @overload def get_axis_num(self, dim: Iterable[Hashable]) -> tuple[int, ...]: ... @overload def get_axis_num(self, dim: Hashable) -> int: ... def get_axis_num(self, dim: Hashable | Iterable[Hashable]) -> int | tuple[int, ...]: """Return axis number(s) corresponding to dimension(s) in this array. Parameters ---------- dim : str or iterable of str Dimension name(s) for which to lookup axes. Returns ------- int or tuple of int Axis number or numbers corresponding to the given dimensions. """ if not isinstance(dim, str) and isinstance(dim, Iterable): return tuple(self._get_axis_num(d) for d in dim) else: return self._get_axis_num(dim) def _get_axis_num(self: Any, dim: Hashable) -> int: _raise_if_any_duplicate_dimensions(self.dims) try: return self.dims.index(dim) except ValueError as err: raise ValueError( f"{dim!r} not found in array dimensions {self.dims!r}" ) from err @property def sizes(self: Any) -> Mapping[Hashable, int]: """Ordered mapping from dimension names to lengths. Immutable. See Also -------- Dataset.sizes """ return Frozen(dict(zip(self.dims, self.shape, strict=True))) class AttrAccessMixin: """Mixin class that allows getting keys with attribute access""" __slots__ = () def __init_subclass__(cls, **kwargs): """Verify that all subclasses explicitly define ``__slots__``. If they don't, raise error in the core xarray module and a FutureWarning in third-party extensions. """ if not hasattr(object.__new__(cls), "__dict__"): pass elif cls.__module__.startswith("xarray."): raise AttributeError(f"{cls.__name__} must explicitly define __slots__") else: cls.__setattr__ = cls._setattr_dict warnings.warn( f"xarray subclass {cls.__name__} should explicitly define __slots__", FutureWarning, stacklevel=2, ) super().__init_subclass__(**kwargs) @property def _attr_sources(self) -> Iterable[Mapping[Hashable, Any]]: """Places to look-up items for attribute-style access""" yield from () @property def _item_sources(self) -> Iterable[Mapping[Hashable, Any]]: """Places to look-up items for key-autocompletion""" yield from () def __getattr__(self, name: str) -> Any: if name not in {"__dict__", "__setstate__"}: # this avoids an infinite loop when pickle looks for the # __setstate__ attribute before the xarray object is initialized for source in self._attr_sources: with suppress(KeyError): return source[name] raise AttributeError( f"{type(self).__name__!r} object has no attribute {name!r}" ) # This complicated two-method design boosts overall performance of simple operations # - particularly DataArray methods that perform a _to_temp_dataset() round-trip - by # a whopping 8% compared to a single method that checks hasattr(self, "__dict__") at # runtime before every single assignment. All of this is just temporary until the # FutureWarning can be changed into a hard crash. def _setattr_dict(self, name: str, value: Any) -> None: """Deprecated third party subclass (see ``__init_subclass__`` above)""" object.__setattr__(self, name, value) if name in self.__dict__: # Custom, non-slotted attr, or improperly assigned variable? warnings.warn( f"Setting attribute {name!r} on a {type(self).__name__!r} object. Explicitly define __slots__ " "to suppress this warning for legitimate custom attributes and " "raise an error when attempting variables assignments.", FutureWarning, stacklevel=2, ) def __setattr__(self, name: str, value: Any) -> None: """Objects with ``__slots__`` raise AttributeError if you try setting an undeclared attribute. This is desirable, but the error message could use some improvement. """ try: object.__setattr__(self, name, value) except AttributeError as e: # Don't accidentally shadow custom AttributeErrors, e.g. # DataArray.dims.setter if str(e) != f"{type(self).__name__!r} object has no attribute {name!r}": raise raise AttributeError( f"cannot set attribute {name!r} on a {type(self).__name__!r} object. Use __setitem__ style" "assignment (e.g., `ds['name'] = ...`) instead of assigning variables." ) from e def __dir__(self) -> list[str]: """Provide method name lookup and completion. Only provide 'public' methods. """ extra_attrs = { item for source in self._attr_sources for item in source if isinstance(item, str) } return sorted(set(dir(type(self))) | extra_attrs) def _ipython_key_completions_(self) -> list[str]: """Provide method for the key-autocompletions in IPython. See https://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion For the details. """ items = { item for source in self._item_sources for item in source if isinstance(item, str) } return list(items) class TreeAttrAccessMixin(AttrAccessMixin): """Mixin class that allows getting keys with attribute access""" # TODO: Ensure ipython tab completion can include both child datatrees and # variables from Dataset objects on relevant nodes. __slots__ = () def __init_subclass__(cls, **kwargs): """This method overrides the check from ``AttrAccessMixin`` that ensures ``__dict__`` is absent in a class, with ``__slots__`` used instead. ``DataTree`` has some dynamically defined attributes in addition to those defined in ``__slots__``. (GH9068) """ if not hasattr(object.__new__(cls), "__dict__"): pass def get_squeeze_dims( xarray_obj, dim: Hashable | Iterable[Hashable] | None = None, axis: int | Iterable[int] | None = None, ) -> list[Hashable]: """Get a list of dimensions to squeeze out.""" if dim is not None and axis is not None: raise ValueError("cannot use both parameters `axis` and `dim`") if dim is None and axis is None: return [d for d, s in xarray_obj.sizes.items() if s == 1] if isinstance(dim, Iterable) and not isinstance(dim, str): dim = list(dim) elif dim is not None: dim = [dim] else: assert axis is not None if isinstance(axis, int): axis = [axis] axis = list(axis) if any(not isinstance(a, int) for a in axis): raise TypeError("parameter `axis` must be int or iterable of int.") alldims = list(xarray_obj.sizes.keys()) dim = [alldims[a] for a in axis] if any(xarray_obj.sizes[k] > 1 for k in dim): raise ValueError( "cannot select a dimension to squeeze out which has length greater than one" ) return dim class DataWithCoords(AttrAccessMixin): """Shared base class for Dataset and DataArray.""" _close: Callable[[], None] | None _indexes: dict[Hashable, Index] __slots__ = ("_close",) def squeeze( self, dim: Hashable | Iterable[Hashable] | None = None, drop: bool = False, axis: int | Iterable[int] | None = None, ) -> Self: """Return a new object with squeezed data. Parameters ---------- dim : None or Hashable or iterable of Hashable, optional Selects a subset of the length one dimensions. If a dimension is selected with length greater than one, an error is raised. If None, all length one dimensions are squeezed. drop : bool, default: False If ``drop=True``, drop squeezed coordinates instead of making them scalar. axis : None or int or iterable of int, optional Like dim, but positional. Returns ------- squeezed : same type as caller This object, but with with all or a subset of the dimensions of length 1 removed. See Also -------- numpy.squeeze """ dims = get_squeeze_dims(self, dim, axis) return self.isel(drop=drop, **dict.fromkeys(dims, 0)) def clip( self, min: ScalarOrArray | None = None, max: ScalarOrArray | None = None, *, keep_attrs: bool | None = None, ) -> Self: """ Return an array whose values are limited to ``[min, max]``. At least one of max or min must be given. Parameters ---------- min : None or Hashable, optional Minimum value. If None, no lower clipping is performed. max : None or Hashable, optional Maximum value. If None, no upper clipping is performed. keep_attrs : bool or None, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- clipped : same type as caller This object, but with with values < min are replaced with min, and those > max with max. See Also -------- numpy.clip : equivalent function """ from xarray.computation.apply_ufunc import apply_ufunc if keep_attrs is None: # When this was a unary func, the default was True, so retaining the # default. keep_attrs = _get_keep_attrs(default=True) return apply_ufunc( duck_array_ops.clip, self, min, max, keep_attrs=keep_attrs, dask="allowed" ) def get_index(self, key: Hashable) -> pd.Index: """Get an index for a dimension, with fall-back to a default RangeIndex""" if key not in self.dims: raise KeyError(key) try: return self._indexes[key].to_pandas_index() except KeyError: return pd.Index(range(self.sizes[key]), name=key) def _calc_assign_results( self: C, kwargs: Mapping[Any, T | Callable[[C], T]] ) -> dict[Hashable, T]: return {k: v(self) if callable(v) else v for k, v in kwargs.items()} def assign_coords( self, coords: Mapping | None = None, **coords_kwargs: Any, ) -> Self: """Assign new coordinates to this object. Returns a new object with all the original data in addition to the new coordinates. Parameters ---------- coords : mapping of dim to coord, optional A mapping whose keys are the names of the coordinates and values are the coordinates to assign. The mapping will generally be a dict or :class:`Coordinates`. * If a value is a standard data value β€” for example, a ``DataArray``, scalar, or array β€” the data is simply assigned as a coordinate. * If a value is callable, it is called with this object as the only parameter, and the return value is used as new coordinate variables. * A coordinate can also be defined and attached to an existing dimension using a tuple with the first element the dimension name and the second element the values for this new coordinate. **coords_kwargs : optional The keyword arguments form of ``coords``. One of ``coords`` or ``coords_kwargs`` must be provided. Returns ------- assigned : same type as caller A new object with the new coordinates in addition to the existing data. Examples -------- Convert `DataArray` longitude coordinates from 0-359 to -180-179: >>> da = xr.DataArray( ... np.random.rand(4), ... coords=[np.array([358, 359, 0, 1])], ... dims="lon", ... ) >>> da Size: 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: * lon (lon) int64 32B 358 359 0 1 >>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180)) Size: 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: * lon (lon) int64 32B -2 -1 0 1 The function also accepts dictionary arguments: >>> da.assign_coords({"lon": (((da.lon + 180) % 360) - 180)}) Size: 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: * lon (lon) int64 32B -2 -1 0 1 New coordinate can also be attached to an existing dimension: >>> lon_2 = np.array([300, 289, 0, 1]) >>> da.assign_coords(lon_2=("lon", lon_2)) Size: 32B array([0.5488135 , 0.71518937, 0.60276338, 0.54488318]) Coordinates: * lon (lon) int64 32B 358 359 0 1 lon_2 (lon) int64 32B 300 289 0 1 Note that the same result can also be obtained with a dict e.g. >>> _ = da.assign_coords({"lon_2": ("lon", lon_2)}) Note the same method applies to `Dataset` objects. Convert `Dataset` longitude coordinates from 0-359 to -180-179: >>> temperature = np.linspace(20, 32, num=16).reshape(2, 2, 4) >>> precipitation = 2 * np.identity(4).reshape(2, 2, 4) >>> ds = xr.Dataset( ... data_vars=dict( ... temperature=(["x", "y", "time"], temperature), ... precipitation=(["x", "y", "time"], precipitation), ... ), ... coords=dict( ... lon=(["x", "y"], [[260.17, 260.68], [260.21, 260.77]]), ... lat=(["x", "y"], [[42.25, 42.21], [42.63, 42.59]]), ... time=pd.date_range("2014-09-06", periods=4), ... reference_time=pd.Timestamp("2014-09-05"), ... ), ... attrs=dict(description="Weather-related data"), ... ) >>> ds Size: 360B Dimensions: (x: 2, y: 2, time: 4) Coordinates: lon (x, y) float64 32B 260.2 260.7 260.2 260.8 lat (x, y) float64 32B 42.25 42.21 42.63 42.59 * time (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09 reference_time datetime64[ns] 8B 2014-09-05 Dimensions without coordinates: x, y Data variables: temperature (x, y, time) float64 128B 20.0 20.8 21.6 ... 30.4 31.2 32.0 precipitation (x, y, time) float64 128B 2.0 0.0 0.0 0.0 ... 0.0 0.0 2.0 Attributes: description: Weather-related data >>> ds.assign_coords(lon=(((ds.lon + 180) % 360) - 180)) Size: 360B Dimensions: (x: 2, y: 2, time: 4) Coordinates: lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23 lat (x, y) float64 32B 42.25 42.21 42.63 42.59 * time (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09 reference_time datetime64[ns] 8B 2014-09-05 Dimensions without coordinates: x, y Data variables: temperature (x, y, time) float64 128B 20.0 20.8 21.6 ... 30.4 31.2 32.0 precipitation (x, y, time) float64 128B 2.0 0.0 0.0 0.0 ... 0.0 0.0 2.0 Attributes: description: Weather-related data See Also -------- Dataset.assign Dataset.swap_dims Dataset.set_coords """ from xarray.core.coordinates import Coordinates coords_combined = either_dict_or_kwargs(coords, coords_kwargs, "assign_coords") data = self.copy(deep=False) results: Coordinates | dict[Hashable, Any] if isinstance(coords, Coordinates): results = coords else: results = self._calc_assign_results(coords_combined) data.coords.update(results) return data def assign_attrs(self, *args: Any, **kwargs: Any) -> Self: """Assign new attrs to this object. Returns a new object equivalent to ``self.attrs.update(*args, **kwargs)``. Parameters ---------- *args positional arguments passed into ``attrs.update``. **kwargs keyword arguments passed into ``attrs.update``. Examples -------- >>> dataset = xr.Dataset({"temperature": [25, 30, 27]}) >>> dataset Size: 24B Dimensions: (temperature: 3) Coordinates: * temperature (temperature) int64 24B 25 30 27 Data variables: *empty* >>> new_dataset = dataset.assign_attrs( ... units="Celsius", description="Temperature data" ... ) >>> new_dataset Size: 24B Dimensions: (temperature: 3) Coordinates: * temperature (temperature) int64 24B 25 30 27 Data variables: *empty* Attributes: units: Celsius description: Temperature data # Attributes of the new dataset >>> new_dataset.attrs {'units': 'Celsius', 'description': 'Temperature data'} Returns ------- assigned : same type as caller A new object with the new attrs in addition to the existing data. See Also -------- Dataset.assign """ out = self.copy(deep=False) out.attrs.update(*args, **kwargs) return out @overload def pipe( self, func: Callable[Concatenate[Self, P], T], *args: P.args, **kwargs: P.kwargs, ) -> T: ... @overload def pipe( self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any, ) -> T: ... def pipe( self, func: Callable[Concatenate[Self, P], T] | tuple[Callable[P, T], str], *args: P.args, **kwargs: P.kwargs, ) -> T: """ Apply ``func(self, *args, **kwargs)`` This method replicates the pandas method of the same name. Parameters ---------- func : callable function to apply to this xarray object (Dataset/DataArray). ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the xarray object. *args positional arguments passed into ``func``. **kwargs a dictionary of keyword arguments passed into ``func``. Returns ------- object : Any the return type of ``func``. Notes ----- Use ``.pipe`` when chaining together functions that expect xarray or pandas objects, e.g., instead of writing .. code:: python f(g(h(ds), arg1=a), arg2=b, arg3=c) You can write .. code:: python (ds.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)) If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``f`` takes its data as ``arg2``: .. code:: python (ds.pipe(h).pipe(g, arg1=a).pipe((f, "arg2"), arg1=a, arg3=c)) Examples -------- >>> x = xr.Dataset( ... { ... "temperature_c": ( ... ("lat", "lon"), ... 20 * np.random.rand(4).reshape(2, 2), ... ), ... "precipitation": (("lat", "lon"), np.random.rand(4).reshape(2, 2)), ... }, ... coords={"lat": [10, 20], "lon": [150, 160]}, ... ) >>> x Size: 96B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 * lon (lon) int64 16B 150 160 Data variables: temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9 precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918 >>> def adder(data, arg): ... return data + arg ... >>> def div(data, arg): ... return data / arg ... >>> def sub_mult(data, sub_arg, mult_arg): ... return (data * mult_arg) - sub_arg ... >>> x.pipe(adder, 2) Size: 96B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 * lon (lon) int64 16B 150 160 Data variables: temperature_c (lat, lon) float64 32B 12.98 16.3 14.06 12.9 precipitation (lat, lon) float64 32B 2.424 2.646 2.438 2.892 >>> x.pipe(adder, arg=2) Size: 96B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 * lon (lon) int64 16B 150 160 Data variables: temperature_c (lat, lon) float64 32B 12.98 16.3 14.06 12.9 precipitation (lat, lon) float64 32B 2.424 2.646 2.438 2.892 >>> ( ... x.pipe(adder, arg=2) ... .pipe(div, arg=2) ... .pipe(sub_mult, sub_arg=2, mult_arg=2) ... ) Size: 96B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 * lon (lon) int64 16B 150 160 Data variables: temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9 precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918 See Also -------- pandas.DataFrame.pipe """ if isinstance(func, tuple): # Use different var when unpacking function from tuple because the type # signature of the unpacked function differs from the expected type # signature in the case where only a function is given, rather than a tuple. # This makes type checkers happy at both call sites below. f, target = func if target in kwargs: raise ValueError( f"{target} is both the pipe target and a keyword argument" ) kwargs[target] = self return f(*args, **kwargs) return func(self, *args, **kwargs) def rolling_exp( self: T_DataWithCoords, window: Mapping[Any, int] | None = None, window_type: str = "span", **window_kwargs, ) -> RollingExp[T_DataWithCoords]: """ Exponentially-weighted moving window. Similar to EWM in pandas Requires the optional Numbagg dependency. Parameters ---------- window : mapping of hashable to int, optional A mapping from the name of the dimension to create the rolling exponential window along (e.g. `time`) to the size of the moving window. window_type : {"span", "com", "halflife", "alpha"}, default: "span" The format of the previously supplied window. Each is a simple numerical transformation of the others. Described in detail: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.ewm.html **window_kwargs : optional The keyword arguments form of ``window``. One of window or window_kwargs must be provided. See Also -------- core.rolling_exp.RollingExp """ if "keep_attrs" in window_kwargs: warnings.warn( "Passing ``keep_attrs`` to ``rolling_exp`` has no effect. Pass" " ``keep_attrs`` directly to the applied function, e.g." " ``rolling_exp(...).mean(keep_attrs=False)``.", stacklevel=2, ) window = either_dict_or_kwargs(window, window_kwargs, "rolling_exp") from xarray.computation.rolling_exp import RollingExp return RollingExp(self, window, window_type) def _resample( self, resample_cls: type[T_Resample], indexer: Mapping[Hashable, ResampleCompatible | Resampler] | None, skipna: bool | None, closed: SideOptions | None, label: SideOptions | None, offset: pd.Timedelta | datetime.timedelta | str | None, origin: str | DatetimeLike, restore_coord_dims: bool | None, **indexer_kwargs: ResampleCompatible | Resampler, ) -> T_Resample: """Returns a Resample object for performing resampling operations. Handles both downsampling and upsampling. The resampled dimension must be a datetime-like coordinate. If any intervals contain no values from the original object, they will be given the value ``NaN``. Parameters ---------- indexer : {dim: freq}, optional Mapping from the dimension name to resample frequency [1]_. The dimension must be datetime-like. skipna : bool, optional Whether to skip missing values when aggregating in downsampling. closed : {"left", "right"}, optional Side of each interval to treat as closed. label : {"left", "right"}, optional Side of each interval to use for labeling. origin : {'epoch', 'start', 'start_day', 'end', 'end_day'}, pd.Timestamp, datetime.datetime, np.datetime64, or cftime.datetime, default 'start_day' The datetime on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a datetime is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day offset : pd.Timedelta, datetime.timedelta, or str, default is None An offset timedelta added to the origin. restore_coord_dims : bool, optional If True, also restore the dimension order of multi-dimensional coordinates. **indexer_kwargs : {dim: freq} The keyword arguments form of ``indexer``. One of indexer or indexer_kwargs must be provided. Returns ------- resampled : same type as caller This object resampled. Examples -------- Downsample monthly time-series data to seasonal data: >>> da = xr.DataArray( ... np.linspace(0, 11, num=12), ... coords=[ ... pd.date_range( ... "1999-12-15", ... periods=12, ... freq=pd.DateOffset(months=1), ... ) ... ], ... dims="time", ... ) >>> da Size: 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.resample(time="QS-DEC").mean() Size: 32B array([ 1., 4., 7., 10.]) Coordinates: * time (time) datetime64[ns] 32B 1999-12-01 2000-03-01 ... 2000-09-01 Upsample monthly time-series data to daily data: >>> da.resample(time="1D").interpolate("linear") # +doctest: ELLIPSIS Size: 3kB array([ 0. , 0.03225806, 0.06451613, 0.09677419, 0.12903226, 0.16129032, 0.19354839, 0.22580645, 0.25806452, 0.29032258, 0.32258065, 0.35483871, 0.38709677, 0.41935484, 0.4516129 , 0.48387097, 0.51612903, 0.5483871 , 0.58064516, 0.61290323, 0.64516129, 0.67741935, 0.70967742, 0.74193548, 0.77419355, 0.80645161, 0.83870968, 0.87096774, 0.90322581, 0.93548387, 0.96774194, 1. , 1.03225806, 1.06451613, 1.09677419, 1.12903226, 1.16129032, 1.19354839, 1.22580645, 1.25806452, 1.29032258, 1.32258065, 1.35483871, 1.38709677, 1.41935484, 1.4516129 , 1.48387097, 1.51612903, 1.5483871 , 1.58064516, 1.61290323, 1.64516129, 1.67741935, 1.70967742, 1.74193548, 1.77419355, 1.80645161, 1.83870968, 1.87096774, 1.90322581, 1.93548387, 1.96774194, 2. , 2.03448276, 2.06896552, 2.10344828, 2.13793103, 2.17241379, 2.20689655, 2.24137931, 2.27586207, 2.31034483, 2.34482759, 2.37931034, 2.4137931 , 2.44827586, 2.48275862, 2.51724138, 2.55172414, 2.5862069 , 2.62068966, 2.65517241, 2.68965517, 2.72413793, 2.75862069, 2.79310345, 2.82758621, 2.86206897, 2.89655172, 2.93103448, 2.96551724, 3. , 3.03225806, 3.06451613, 3.09677419, 3.12903226, 3.16129032, 3.19354839, 3.22580645, 3.25806452, ... 7.87096774, 7.90322581, 7.93548387, 7.96774194, 8. , 8.03225806, 8.06451613, 8.09677419, 8.12903226, 8.16129032, 8.19354839, 8.22580645, 8.25806452, 8.29032258, 8.32258065, 8.35483871, 8.38709677, 8.41935484, 8.4516129 , 8.48387097, 8.51612903, 8.5483871 , 8.58064516, 8.61290323, 8.64516129, 8.67741935, 8.70967742, 8.74193548, 8.77419355, 8.80645161, 8.83870968, 8.87096774, 8.90322581, 8.93548387, 8.96774194, 9. , 9.03333333, 9.06666667, 9.1 , 9.13333333, 9.16666667, 9.2 , 9.23333333, 9.26666667, 9.3 , 9.33333333, 9.36666667, 9.4 , 9.43333333, 9.46666667, 9.5 , 9.53333333, 9.56666667, 9.6 , 9.63333333, 9.66666667, 9.7 , 9.73333333, 9.76666667, 9.8 , 9.83333333, 9.86666667, 9.9 , 9.93333333, 9.96666667, 10. , 10.03225806, 10.06451613, 10.09677419, 10.12903226, 10.16129032, 10.19354839, 10.22580645, 10.25806452, 10.29032258, 10.32258065, 10.35483871, 10.38709677, 10.41935484, 10.4516129 , 10.48387097, 10.51612903, 10.5483871 , 10.58064516, 10.61290323, 10.64516129, 10.67741935, 10.70967742, 10.74193548, 10.77419355, 10.80645161, 10.83870968, 10.87096774, 10.90322581, 10.93548387, 10.96774194, 11. ]) Coordinates: * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-11-15 Limit scope of upsampling method >>> da.resample(time="1D").nearest(tolerance="1D") Size: 3kB array([ 0., 0., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 1., 1., 1., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 2., 2., 2., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 3., 3., 3., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 4., 4., 4., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 5., 5., 5., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 6., 6., 6., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 7., 7., 7., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 8., 8., 8., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 9., 9., 9., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 10., 10., 10., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 11., 11.]) Coordinates: * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-11-15 See Also -------- pandas.Series.resample pandas.DataFrame.resample References ---------- .. [1] https://pandas.pydata.org/docs/user_guide/timeseries.html#dateoffset-objects """ # TODO support non-string indexer after removing the old API. from xarray.core.dataarray import DataArray from xarray.core.groupby import ResolvedGrouper from xarray.core.resample import RESAMPLE_DIM from xarray.groupers import Resampler, TimeResampler indexer = either_dict_or_kwargs(indexer, indexer_kwargs, "resample") if len(indexer) != 1: raise ValueError("Resampling only supported along single dimensions.") dim, freq = next(iter(indexer.items())) dim_name: Hashable = dim dim_coord = self[dim] group = DataArray( dim_coord, coords=dim_coord.coords, dims=dim_coord.dims, name=RESAMPLE_DIM ) grouper: Resampler if isinstance(freq, ResampleCompatible): grouper = TimeResampler( freq=freq, closed=closed, label=label, origin=origin, offset=offset ) elif isinstance(freq, Resampler): grouper = freq else: raise ValueError( "freq must be an object of type 'str', 'datetime.timedelta', " "'pandas.Timedelta', 'pandas.DateOffset', or 'TimeResampler'. " f"Received {type(freq)} instead." ) rgrouper = ResolvedGrouper(grouper, group, self) return resample_cls( self, (rgrouper,), dim=dim_name, resample_dim=RESAMPLE_DIM, restore_coord_dims=restore_coord_dims, ) def where(self, cond: Any, other: Any = dtypes.NA, drop: bool = False) -> Self: """Filter elements from this object according to a condition. Returns elements from 'DataArray', where 'cond' is True, otherwise fill in 'other'. This operation follows the normal broadcasting and alignment rules that xarray uses for binary arithmetic. Parameters ---------- cond : DataArray, Dataset, or callable Locations at which to preserve this object's values. dtype must be `bool`. If a callable, the callable is passed this object, and the result is used as the value for cond. other : scalar, DataArray, Dataset, or callable, optional Value to use for locations in this object where ``cond`` is False. By default, these locations are filled with NA. If a callable, it must expect this object as its only parameter. drop : bool, default: False If True, coordinate labels that only correspond to False values of the condition are dropped from the result. Returns ------- DataArray or Dataset Same xarray type as caller, with dtype float64. Examples -------- >>> a = xr.DataArray(np.arange(25).reshape(5, 5), dims=("x", "y")) >>> a Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Dimensions without coordinates: x, y >>> a.where(a.x + a.y < 4) Size: 200B array([[ 0., 1., 2., 3., nan], [ 5., 6., 7., nan, nan], [10., 11., nan, nan, nan], [15., nan, nan, nan, nan], [nan, nan, nan, nan, nan]]) Dimensions without coordinates: x, y >>> a.where(a.x + a.y < 5, -1) Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, -1], [10, 11, 12, -1, -1], [15, 16, -1, -1, -1], [20, -1, -1, -1, -1]]) Dimensions without coordinates: x, y >>> a.where(a.x + a.y < 4, drop=True) Size: 128B array([[ 0., 1., 2., 3.], [ 5., 6., 7., nan], [10., 11., nan, nan], [15., nan, nan, nan]]) Dimensions without coordinates: x, y >>> a.where(lambda x: x.x + x.y < 4, lambda x: -x) Size: 200B array([[ 0, 1, 2, 3, -4], [ 5, 6, 7, -8, -9], [ 10, 11, -12, -13, -14], [ 15, -16, -17, -18, -19], [-20, -21, -22, -23, -24]]) Dimensions without coordinates: x, y >>> a.where(a.x + a.y < 4, drop=True) Size: 128B array([[ 0., 1., 2., 3.], [ 5., 6., 7., nan], [10., 11., nan, nan], [15., nan, nan, nan]]) Dimensions without coordinates: x, y See Also -------- numpy.where : corresponding numpy function where : equivalent function """ from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.structure.alignment import align if callable(cond): cond = cond(self) if callable(other): other = other(self) if drop: if not isinstance(cond, Dataset | DataArray): raise TypeError( f"cond argument is {cond!r} but must be a {Dataset!r} or {DataArray!r} (or a callable than returns one)." ) self, cond = align(self, cond) def _dataarray_indexer(dim: Hashable) -> DataArray: return cond.any(dim=(d for d in cond.dims if d != dim)) def _dataset_indexer(dim: Hashable) -> DataArray: cond_wdim = cond.drop_vars( var for var in cond if dim not in cond[var].dims ) keepany = cond_wdim.any(dim=(d for d in cond.dims if d != dim)) return keepany.to_dataarray().any("variable") _get_indexer = ( _dataarray_indexer if isinstance(cond, DataArray) else _dataset_indexer ) indexers = {} for dim in cond.sizes.keys(): indexers[dim] = _get_indexer(dim) self = self.isel(**indexers) cond = cond.isel(**indexers) from xarray.computation import ops return ops.where_method(self, cond, other) def set_close(self, close: Callable[[], None] | None) -> None: """Register the function that releases any resources linked to this object. This method controls how xarray cleans up resources associated with this object when the ``.close()`` method is called. It is mostly intended for backend developers and it is rarely needed by regular end-users. Parameters ---------- close : callable The function that when called like ``close()`` releases any resources linked to this object. """ self._close = close def close(self) -> None: """Release any resources linked to this object.""" if self._close is not None: self._close() self._close = None def isnull(self, keep_attrs: bool | None = None) -> Self: """Test each value in the array for whether it is a missing value. Parameters ---------- keep_attrs : bool or None, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- isnull : DataArray or Dataset Same type and shape as object, but the dtype of the data is bool. See Also -------- pandas.isnull Examples -------- >>> array = xr.DataArray([1, np.nan, 3], dims="x") >>> array Size: 24B array([ 1., nan, 3.]) Dimensions without coordinates: x >>> array.isnull() Size: 3B array([False, True, False]) Dimensions without coordinates: x """ from xarray.computation.apply_ufunc import apply_ufunc if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) return apply_ufunc( duck_array_ops.isnull, self, dask="allowed", keep_attrs=keep_attrs, ) def notnull(self, keep_attrs: bool | None = None) -> Self: """Test each value in the array for whether it is not a missing value. Parameters ---------- keep_attrs : bool or None, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- notnull : DataArray or Dataset Same type and shape as object, but the dtype of the data is bool. See Also -------- pandas.notnull Examples -------- >>> array = xr.DataArray([1, np.nan, 3], dims="x") >>> array Size: 24B array([ 1., nan, 3.]) Dimensions without coordinates: x >>> array.notnull() Size: 3B array([ True, False, True]) Dimensions without coordinates: x """ from xarray.computation.apply_ufunc import apply_ufunc if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) return apply_ufunc( duck_array_ops.notnull, self, dask="allowed", keep_attrs=keep_attrs, ) def isin(self, test_elements: Any) -> Self: """Tests each value in the array for whether it is in test elements. Parameters ---------- test_elements : array_like The values against which to test each value of `element`. This argument is flattened if an array or array_like. See numpy notes for behavior with non-array-like parameters. Returns ------- isin : DataArray or Dataset Has the same type and shape as this object, but with a bool dtype. Examples -------- >>> array = xr.DataArray([1, 2, 3], dims="x") >>> array.isin([1, 3]) Size: 3B array([ True, False, True]) Dimensions without coordinates: x See Also -------- numpy.isin """ from xarray.computation.apply_ufunc import apply_ufunc from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.variable import Variable if isinstance(test_elements, Dataset): raise TypeError( f"isin() argument must be convertible to an array: {test_elements}" ) elif isinstance(test_elements, Variable | DataArray): # need to explicitly pull out data to support dask arrays as the # second argument test_elements = test_elements.data return apply_ufunc( duck_array_ops.isin, self, kwargs=dict(test_elements=test_elements), dask="allowed", ) def astype( self, dtype, *, order=None, casting=None, subok=None, copy=None, keep_attrs=True, ) -> Self: """ Copy of the xarray object, with data cast to a specified type. Leaves coordinate dtype unchanged. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout order of the result. β€˜C’ means C order, β€˜F’ means Fortran order, β€˜A’ means β€˜F’ order if all the arrays are Fortran contiguous, β€˜C’ order otherwise, and β€˜K’ means as close to the order the array elements appear in memory as possible. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array. copy : bool, optional By default, astype always returns a newly allocated array. If this is set to False and the `dtype` requirement is satisfied, the input array is returned instead of a copy. keep_attrs : bool, optional By default, astype keeps attributes. Set to False to remove attributes in the returned object. Returns ------- out : same as object New object with data cast to the specified type. Notes ----- The ``order``, ``casting``, ``subok`` and ``copy`` arguments are only passed through to the ``astype`` method of the underlying array when a value different than ``None`` is supplied. Make sure to only supply these arguments if the underlying array class supports them. See Also -------- numpy.ndarray.astype dask.array.Array.astype sparse.COO.astype """ from xarray.computation.apply_ufunc import apply_ufunc kwargs = dict(order=order, casting=casting, subok=subok, copy=copy) kwargs = {k: v for k, v in kwargs.items() if v is not None} return apply_ufunc( duck_array_ops.astype, self, dtype, kwargs=kwargs, keep_attrs=keep_attrs, dask="allowed", ) def __enter__(self) -> Self: return self def __exit__(self, exc_type, exc_value, traceback) -> None: self.close() def __getitem__(self, value): # implementations of this class should implement this method raise NotImplementedError() @overload def full_like( other: DataArray, fill_value: Any, dtype: DTypeLikeSave | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> DataArray: ... @overload def full_like( other: Dataset, fill_value: Any, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset: ... @overload def full_like( other: Variable, fill_value: Any, dtype: DTypeLikeSave | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Variable: ... @overload def full_like( other: Dataset | DataArray, fill_value: Any, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = {}, # noqa: B006 chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray: ... @overload def full_like( other: Dataset | DataArray | Variable, fill_value: Any, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray | Variable: ... def full_like( other: Dataset | DataArray | Variable, fill_value: Any, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray | Variable: """ Return a new object with the same shape and type as a given object. Returned object will be chunked if if the given object is chunked, or if chunks or chunked_array_type are specified. Parameters ---------- other : DataArray, Dataset or Variable The reference object in input fill_value : scalar or dict-like Value to fill the new object with before returning it. If other is a Dataset, may also be a dict-like mapping data variables to fill values. dtype : dtype or dict-like of dtype, optional dtype of the new array. If a dict-like, maps dtypes to variables. If omitted, it defaults to other.dtype. chunks : int, "auto", tuple of int or mapping of Hashable to int, optional Chunk sizes along each dimension, e.g., ``5``, ``"auto"``, ``(5, 5)`` or ``{"x": 5, "y": 5}``. chunked_array_type: str, optional Which chunked array type to coerce the underlying data array to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. Returns ------- out : same as object New object with the same shape and type as other, with the data filled with fill_value. Coords will be copied from other. If other is based on dask, the new one will be as well, and will be split in the same chunks. Examples -------- >>> x = xr.DataArray( ... np.arange(6).reshape(2, 3), ... dims=["lat", "lon"], ... coords={"lat": [1, 2], "lon": [0, 1, 2]}, ... ) >>> x Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> xr.full_like(x, 1) Size: 48B array([[1, 1, 1], [1, 1, 1]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> xr.full_like(x, 0.5) Size: 48B array([[0, 0, 0], [0, 0, 0]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> xr.full_like(x, 0.5, dtype=np.double) Size: 48B array([[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> xr.full_like(x, np.nan, dtype=np.double) Size: 48B array([[nan, nan, nan], [nan, nan, nan]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> ds = xr.Dataset( ... {"a": ("x", [3, 5, 2]), "b": ("x", [9, 1, 0])}, coords={"x": [2, 4, 6]} ... ) >>> ds Size: 72B Dimensions: (x: 3) Coordinates: * x (x) int64 24B 2 4 6 Data variables: a (x) int64 24B 3 5 2 b (x) int64 24B 9 1 0 >>> xr.full_like(ds, fill_value={"a": 1, "b": 2}) Size: 72B Dimensions: (x: 3) Coordinates: * x (x) int64 24B 2 4 6 Data variables: a (x) int64 24B 1 1 1 b (x) int64 24B 2 2 2 >>> xr.full_like(ds, fill_value={"a": 1, "b": 2}, dtype={"a": bool, "b": float}) Size: 51B Dimensions: (x: 3) Coordinates: * x (x) int64 24B 2 4 6 Data variables: a (x) bool 3B True True True b (x) float64 24B 2.0 2.0 2.0 See Also -------- zeros_like ones_like """ from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.variable import Variable if not is_scalar(fill_value) and not ( isinstance(other, Dataset) and isinstance(fill_value, dict) ): raise ValueError( f"fill_value must be scalar or, for datasets, a dict-like. Received {fill_value} instead." ) if isinstance(other, Dataset): if not isinstance(fill_value, dict): fill_value = dict.fromkeys(other.data_vars.keys(), fill_value) dtype_: Mapping[Any, DTypeLikeSave] if not isinstance(dtype, Mapping): dtype_ = dict.fromkeys(other.data_vars.keys(), dtype) else: dtype_ = dtype data_vars = { k: _full_like_variable( v.variable, fill_value.get(k, dtypes.NA), dtype_.get(k, None), chunks, chunked_array_type, from_array_kwargs, ) for k, v in other.data_vars.items() } return Dataset(data_vars, coords=other.coords, attrs=other.attrs) elif isinstance(other, DataArray): if isinstance(dtype, Mapping): raise ValueError("'dtype' cannot be dict-like when passing a DataArray") return DataArray( _full_like_variable( other.variable, fill_value, dtype, chunks, chunked_array_type, from_array_kwargs, ), dims=other.dims, coords=other.coords, attrs=other.attrs, name=other.name, ) elif isinstance(other, Variable): if isinstance(dtype, Mapping): raise ValueError("'dtype' cannot be dict-like when passing a Variable") return _full_like_variable( other, fill_value, dtype, chunks, chunked_array_type, from_array_kwargs ) else: raise TypeError("Expected DataArray, Dataset, or Variable") def _full_like_variable( other: Variable, fill_value: Any, dtype: DTypeLike | None = None, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Variable: """Inner function of full_like, where other must be a variable""" from xarray.core.variable import Variable if fill_value is dtypes.NA: fill_value = dtypes.get_fill_value(dtype if dtype is not None else other.dtype) if ( is_chunked_array(other.data) or chunked_array_type is not None or chunks is not None ): if chunked_array_type is None: chunkmanager = get_chunked_array_type(other.data) else: chunkmanager = guess_chunkmanager(chunked_array_type) if dtype is None: dtype = other.dtype if from_array_kwargs is None: from_array_kwargs = {} data = chunkmanager.array_api.full( other.shape, fill_value, dtype=dtype, chunks=chunks or other.data.chunks, **from_array_kwargs, ) else: data = duck_array_ops.full_like(other.data, fill_value, dtype=dtype) return Variable(dims=other.dims, data=data, attrs=other.attrs) @overload def zeros_like( other: DataArray, dtype: DTypeLikeSave | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> DataArray: ... @overload def zeros_like( other: Dataset, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset: ... @overload def zeros_like( other: Variable, dtype: DTypeLikeSave | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Variable: ... @overload def zeros_like( other: Dataset | DataArray, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray: ... @overload def zeros_like( other: Dataset | DataArray | Variable, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray | Variable: ... def zeros_like( other: Dataset | DataArray | Variable, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray | Variable: """Return a new object of zeros with the same shape and type as a given dataarray or dataset. Parameters ---------- other : DataArray, Dataset or Variable The reference object. The output will have the same dimensions and coordinates as this object. dtype : dtype, optional dtype of the new array. If omitted, it defaults to other.dtype. chunks : int, "auto", tuple of int or mapping of Hashable to int, optional Chunk sizes along each dimension, e.g., ``5``, ``"auto"``, ``(5, 5)`` or ``{"x": 5, "y": 5}``. chunked_array_type: str, optional Which chunked array type to coerce the underlying data array to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. Returns ------- out : DataArray, Dataset or Variable New object of zeros with the same shape and type as other. Examples -------- >>> x = xr.DataArray( ... np.arange(6).reshape(2, 3), ... dims=["lat", "lon"], ... coords={"lat": [1, 2], "lon": [0, 1, 2]}, ... ) >>> x Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> xr.zeros_like(x) Size: 48B array([[0, 0, 0], [0, 0, 0]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> xr.zeros_like(x, dtype=float) Size: 48B array([[0., 0., 0.], [0., 0., 0.]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 See Also -------- ones_like full_like """ return full_like( other, 0, dtype, chunks=chunks, chunked_array_type=chunked_array_type, from_array_kwargs=from_array_kwargs, ) @overload def ones_like( other: DataArray, dtype: DTypeLikeSave | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> DataArray: ... @overload def ones_like( other: Dataset, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset: ... @overload def ones_like( other: Variable, dtype: DTypeLikeSave | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Variable: ... @overload def ones_like( other: Dataset | DataArray, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray: ... @overload def ones_like( other: Dataset | DataArray | Variable, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray | Variable: ... def ones_like( other: Dataset | DataArray | Variable, dtype: DTypeMaybeMapping | None = None, *, chunks: T_Chunks = None, chunked_array_type: str | None = None, from_array_kwargs: dict[str, Any] | None = None, ) -> Dataset | DataArray | Variable: """Return a new object of ones with the same shape and type as a given dataarray or dataset. Parameters ---------- other : DataArray, Dataset, or Variable The reference object. The output will have the same dimensions and coordinates as this object. dtype : dtype, optional dtype of the new array. If omitted, it defaults to other.dtype. chunks : int, "auto", tuple of int or mapping of Hashable to int, optional Chunk sizes along each dimension, e.g., ``5``, ``"auto"``, ``(5, 5)`` or ``{"x": 5, "y": 5}``. chunked_array_type: str, optional Which chunked array type to coerce the underlying data array to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. Returns ------- out : same as object New object of ones with the same shape and type as other. Examples -------- >>> x = xr.DataArray( ... np.arange(6).reshape(2, 3), ... dims=["lat", "lon"], ... coords={"lat": [1, 2], "lon": [0, 1, 2]}, ... ) >>> x Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 >>> xr.ones_like(x) Size: 48B array([[1, 1, 1], [1, 1, 1]]) Coordinates: * lat (lat) int64 16B 1 2 * lon (lon) int64 24B 0 1 2 See Also -------- zeros_like full_like """ return full_like( other, 1, dtype, chunks=chunks, chunked_array_type=chunked_array_type, from_array_kwargs=from_array_kwargs, ) def get_chunksizes( variables: Iterable[Variable], ) -> Mapping[Any, tuple[int, ...]]: chunks: dict[Any, tuple[int, ...]] = {} for v in variables: if hasattr(v._data, "chunks"): for dim, c in v.chunksizes.items(): if dim in chunks and c != chunks[dim]: raise ValueError( f"Object has inconsistent chunks along dimension {dim}. " "This can be fixed by calling unify_chunks()." ) chunks[dim] = c return Frozen(chunks) def is_np_datetime_like(dtype: DTypeLike) -> bool: """Check if a dtype is a subclass of the numpy datetime types""" return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64) def is_np_timedelta_like(dtype: DTypeLike) -> bool: """Check whether dtype is of the timedelta64 dtype.""" return np.issubdtype(dtype, np.timedelta64) def _contains_cftime_datetimes(array: Any) -> bool: """Check if an array inside a Variable contains cftime.datetime objects""" if cftime is None: return False if array.dtype == np.dtype("O") and array.size > 0: first_idx = (0,) * array.ndim if isinstance(array, ExplicitlyIndexed): first_idx = BasicIndexer(first_idx) sample = array[first_idx] return isinstance(np.asarray(sample).item(), cftime.datetime) return False def contains_cftime_datetimes(var: T_Variable) -> bool: """Check if an xarray.Variable contains cftime.datetime objects""" return _contains_cftime_datetimes(var._data) def _contains_datetime_like_objects(var: T_Variable) -> bool: """Check if a variable contains datetime like objects (either np.datetime64, np.timedelta64, or cftime.datetime) """ return is_np_datetime_like(var.dtype) or contains_cftime_datetimes(var) xarray-2025.09.0/xarray/core/coordinate_transform.py000066400000000000000000000065701505620616400224060ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Hashable, Iterable, Mapping from typing import Any, overload import numpy as np class CoordinateTransform: """Abstract coordinate transform with dimension & coordinate names. .. caution:: This API is experimental and subject to change. Please report any bugs or surprising behaviour you encounter. """ coord_names: tuple[Hashable, ...] dims: tuple[str, ...] dim_size: dict[str, int] dtype: Any def __init__( self, coord_names: Iterable[Hashable], dim_size: Mapping[str, int], dtype: Any = None, ): self.coord_names = tuple(coord_names) self.dims = tuple(dim_size) self.dim_size = dict(dim_size) if dtype is None: dtype = np.dtype(np.float64) self.dtype = dtype def forward(self, dim_positions: dict[str, Any]) -> dict[Hashable, Any]: """Perform grid -> world coordinate transformation. Parameters ---------- dim_positions : dict Grid location(s) along each dimension (axis). Returns ------- coord_labels : dict World coordinate labels. """ # TODO: cache the results in order to avoid re-computing # all labels when accessing the values of each coordinate one at a time raise NotImplementedError def reverse(self, coord_labels: dict[Hashable, Any]) -> dict[str, Any]: """Perform world -> grid coordinate reverse transformation. Parameters ---------- labels : dict World coordinate labels. Returns ------- dim_positions : dict Grid relative location(s) along each dimension (axis). """ raise NotImplementedError @overload def equals(self, other: CoordinateTransform) -> bool: ... @overload def equals( self, other: CoordinateTransform, *, exclude: frozenset[Hashable] | None = None ) -> bool: ... def equals(self, other: CoordinateTransform, **kwargs) -> bool: """Check equality with another CoordinateTransform of the same kind. Parameters ---------- other : CoordinateTransform The other CoordinateTransform object to compare with this object. exclude : frozenset of hashable, optional Dimensions excluded from checking. It is None by default, (i.e., when this method is not called in the context of alignment). For a n-dimensional transform this option allows a CoordinateTransform to optionally ignore any dimension in ``exclude`` when comparing ``self`` with ``other``. For a 1-dimensional transform this kwarg can be safely ignored, as this method is not called when all of the transform's dimensions are also excluded from alignment. """ raise NotImplementedError def generate_coords( self, dims: tuple[str, ...] | None = None ) -> dict[Hashable, Any]: """Compute all coordinate labels at once.""" if dims is None: dims = self.dims positions = np.meshgrid( *[np.arange(self.dim_size[d]) for d in dims], indexing="ij", ) dim_positions = {dim: positions[i] for i, dim in enumerate(dims)} return self.forward(dim_positions) xarray-2025.09.0/xarray/core/coordinates.py000066400000000000000000001353131505620616400204740ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Callable, Hashable, Iterable, Iterator, Mapping, Sequence from contextlib import contextmanager from typing import ( TYPE_CHECKING, Any, Generic, cast, ) import numpy as np import pandas as pd from xarray.core import formatting from xarray.core.indexes import ( Index, Indexes, PandasIndex, PandasMultiIndex, assert_no_index_corrupted, create_default_index_implicit, ) from xarray.core.types import DataVars, ErrorOptions, Self, T_DataArray, T_Xarray from xarray.core.utils import ( Frozen, ReprObject, either_dict_or_kwargs, emit_user_level_warning, ) from xarray.core.variable import Variable, as_variable, calculate_dimensions from xarray.structure.alignment import Aligner from xarray.structure.merge import merge_coordinates_without_align, merge_coords if TYPE_CHECKING: from xarray.core.common import DataWithCoords from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree # Used as the key corresponding to a DataArray's variable when converting # arbitrary DataArray objects to datasets _THIS_ARRAY = ReprObject("") class AbstractCoordinates(Mapping[Hashable, "T_DataArray"]): _data: DataWithCoords __slots__ = ("_data",) def __getitem__(self, key: Hashable) -> T_DataArray: raise NotImplementedError() @property def _names(self) -> set[Hashable]: raise NotImplementedError() @property def dims(self) -> Frozen[Hashable, int] | tuple[Hashable, ...]: raise NotImplementedError() @property def dtypes(self) -> Frozen[Hashable, np.dtype]: raise NotImplementedError() @property def indexes(self) -> Indexes[pd.Index]: """Mapping of pandas.Index objects used for label based indexing. Raises an error if this Coordinates object has indexes that cannot be coerced to pandas.Index objects. See Also -------- Coordinates.xindexes """ return self._data.indexes @property def xindexes(self) -> Indexes[Index]: """Mapping of :py:class:`~xarray.indexes.Index` objects used for label based indexing. """ return self._data.xindexes @property def variables(self): raise NotImplementedError() def _update_coords(self, coords, indexes): raise NotImplementedError() def _drop_coords(self, coord_names): raise NotImplementedError() def __iter__(self) -> Iterator[Hashable]: # needs to be in the same order as the dataset variables for k in self.variables: if k in self._names: yield k def __len__(self) -> int: return len(self._names) def __contains__(self, key: Hashable) -> bool: return key in self._names def __repr__(self) -> str: return formatting.coords_repr(self) def to_dataset(self) -> Dataset: raise NotImplementedError() def to_index(self, ordered_dims: Sequence[Hashable] | None = None) -> pd.Index: """Convert all index coordinates into a :py:class:`pandas.Index`. Parameters ---------- ordered_dims : sequence of hashable, optional Possibly reordered version of this object's dimensions indicating the order in which dimensions should appear on the result. Returns ------- pandas.Index Index subclass corresponding to the outer-product of all dimension coordinates. This will be a MultiIndex if this object is has more than more dimension. """ if ordered_dims is None: ordered_dims = list(self.dims) elif set(ordered_dims) != set(self.dims): raise ValueError( "ordered_dims must match dims, but does not: " f"{ordered_dims} vs {self.dims}" ) if len(ordered_dims) == 0: raise ValueError("no valid index for a 0-dimensional object") elif len(ordered_dims) == 1: (dim,) = ordered_dims return self._data.get_index(dim) else: indexes = [self._data.get_index(k) for k in ordered_dims] # compute the sizes of the repeat and tile for the cartesian product # (taken from pandas.core.reshape.util) index_lengths = np.fromiter( (len(index) for index in indexes), dtype=np.intp ) cumprod_lengths = np.cumprod(index_lengths) if cumprod_lengths[-1] == 0: # if any factor is empty, the cartesian product is empty repeat_counts = np.zeros_like(cumprod_lengths) else: # sizes of the repeats repeat_counts = cumprod_lengths[-1] / cumprod_lengths # sizes of the tiles tile_counts = np.roll(cumprod_lengths, 1) tile_counts[0] = 1 # loop over the indexes # for each MultiIndex or Index compute the cartesian product of the codes code_list = [] level_list = [] names = [] for i, index in enumerate(indexes): if isinstance(index, pd.MultiIndex): codes, levels = index.codes, index.levels else: code, level = pd.factorize(index) codes = [code] levels = [level] # compute the cartesian product code_list += [ np.tile(np.repeat(code, repeat_counts[i]), tile_counts[i]) for code in codes ] level_list += [list(level) for level in levels] names += index.names return pd.MultiIndex( levels=level_list, codes=[list(c) for c in code_list], names=names ) class Coordinates(AbstractCoordinates): """Dictionary like container for Xarray coordinates (variables + indexes). This collection is a mapping of coordinate names to :py:class:`~xarray.DataArray` objects. It can be passed directly to the :py:class:`~xarray.Dataset` and :py:class:`~xarray.DataArray` constructors via their `coords` argument. This will add both the coordinates variables and their index. Coordinates are either: - returned via the :py:attr:`Dataset.coords`, :py:attr:`DataArray.coords`, and :py:attr:`DataTree.coords` properties, - built from Xarray or Pandas index objects (e.g., :py:meth:`Coordinates.from_xindex` or :py:meth:`Coordinates.from_pandas_multiindex`), - built manually from input coordinate data and Xarray ``Index`` objects via :py:meth:`Coordinates.__init__` (beware that no consistency check is done on those inputs). To create new coordinates from an existing Xarray ``Index`` object, use :py:meth:`Coordinates.from_xindex` instead of :py:meth:`Coordinates.__init__`. The latter is useful, e.g., for creating coordinates with no default index. Parameters ---------- coords: dict-like, optional Mapping where keys are coordinate names and values are objects that can be converted into a :py:class:`~xarray.Variable` object (see :py:func:`~xarray.as_variable`). If another :py:class:`~xarray.Coordinates` object is passed, its indexes will be added to the new created object. indexes: dict-like, optional Mapping where keys are coordinate names and values are :py:class:`~xarray.indexes.Index` objects. If None (default), pandas indexes will be created for each dimension coordinate. Passing an empty dictionary will skip this default behavior. Examples -------- Create a dimension coordinate with a default (pandas) index: >>> xr.Coordinates({"x": [1, 2]}) Coordinates: * x (x) int64 16B 1 2 Create a dimension coordinate with no index: >>> xr.Coordinates(coords={"x": [1, 2]}, indexes={}) Coordinates: x (x) int64 16B 1 2 Create a new Coordinates object from existing dataset coordinates (indexes are passed): >>> ds = xr.Dataset(coords={"x": [1, 2]}) >>> xr.Coordinates(ds.coords) Coordinates: * x (x) int64 16B 1 2 Create indexed coordinates from a ``pandas.MultiIndex`` object: >>> midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]]) >>> xr.Coordinates.from_pandas_multiindex(midx, "x") Coordinates: * x (x) object 32B MultiIndex * x_level_0 (x) object 32B 'a' 'a' 'b' 'b' * x_level_1 (x) int64 32B 0 1 0 1 Create a new Dataset object by passing a Coordinates object: >>> midx_coords = xr.Coordinates.from_pandas_multiindex(midx, "x") >>> xr.Dataset(coords=midx_coords) Size: 96B Dimensions: (x: 4) Coordinates: * x (x) object 32B MultiIndex * x_level_0 (x) object 32B 'a' 'a' 'b' 'b' * x_level_1 (x) int64 32B 0 1 0 1 Data variables: *empty* """ _data: DataWithCoords __slots__ = ("_data",) def __init__( self, coords: Mapping[Any, Any] | None = None, indexes: Mapping[Any, Index] | None = None, ) -> None: # When coordinates are constructed directly, an internal Dataset is # created so that it is compatible with the DatasetCoordinates and # DataArrayCoordinates classes serving as a proxy for the data. # TODO: refactor DataArray / Dataset so that Coordinates store the data. from xarray.core.dataset import Dataset if coords is None: coords = {} variables: dict[Hashable, Variable] default_indexes: dict[Hashable, PandasIndex] = {} coords_obj_indexes: dict[Hashable, Index] = {} if isinstance(coords, Coordinates): if indexes is not None: raise ValueError( "passing both a ``Coordinates`` object and a mapping of indexes " "to ``Coordinates.__init__`` is not allowed " "(this constructor does not support merging them)" ) variables = {k: v.copy() for k, v in coords.variables.items()} coords_obj_indexes = dict(coords.xindexes) else: variables = {} for name, data in coords.items(): var = as_variable(data, name=name, auto_convert=False) if var.dims == (name,) and indexes is None: index, index_vars = create_default_index_implicit(var, list(coords)) default_indexes.update(dict.fromkeys(index_vars, index)) variables.update(index_vars) else: variables[name] = var if indexes is None: indexes = {} else: indexes = dict(indexes) indexes.update(default_indexes) indexes.update(coords_obj_indexes) no_coord_index = set(indexes) - set(variables) if no_coord_index: raise ValueError( f"no coordinate variables found for these indexes: {no_coord_index}" ) for k, idx in indexes.items(): if not isinstance(idx, Index): raise TypeError(f"'{k}' is not an `xarray.indexes.Index` object") # maybe convert to base variable for k, v in variables.items(): if k not in indexes: variables[k] = v.to_base_variable() self._data = Dataset._construct_direct( coord_names=set(variables), variables=variables, indexes=indexes ) @classmethod def _construct_direct( cls, coords: dict[Any, Variable], indexes: dict[Any, Index], dims: dict[Any, int] | None = None, ) -> Self: from xarray.core.dataset import Dataset obj = object.__new__(cls) obj._data = Dataset._construct_direct( coord_names=set(coords), variables=coords, indexes=indexes, dims=dims, ) return obj @classmethod def from_xindex(cls, index: Index) -> Self: """Create Xarray coordinates from an existing Xarray index. Parameters ---------- index : Index Xarray index object. The index must support generating new coordinate variables from itself. Returns ------- coords : Coordinates A collection of Xarray indexed coordinates created from the index. """ variables = index.create_variables() if not variables: raise ValueError( "`Coordinates.from_xindex()` only supports index objects that can generate " "new coordinate variables from scratch. The given index (shown below) did not " f"create any coordinate.\n{index!r}" ) indexes = dict.fromkeys(variables, index) return cls(coords=variables, indexes=indexes) @classmethod def from_pandas_multiindex(cls, midx: pd.MultiIndex, dim: Hashable) -> Self: """Wrap a pandas multi-index as Xarray coordinates (dimension + levels). The returned coordinate variables can be directly assigned to a :py:class:`~xarray.Dataset` or :py:class:`~xarray.DataArray` via the ``coords`` argument of their constructor. Parameters ---------- midx : :py:class:`pandas.MultiIndex` Pandas multi-index object. dim : str Dimension name. Returns ------- coords : Coordinates A collection of Xarray indexed coordinates created from the multi-index. """ xr_idx = PandasMultiIndex(midx, dim) variables = xr_idx.create_variables() indexes = dict.fromkeys(variables, xr_idx) return cls(coords=variables, indexes=indexes) @property def _names(self) -> set[Hashable]: return self._data._coord_names @property def dims(self) -> Frozen[Hashable, int] | tuple[Hashable, ...]: """Mapping from dimension names to lengths or tuple of dimension names.""" return self._data.dims @property def sizes(self) -> Frozen[Hashable, int]: """Mapping from dimension names to lengths.""" return self._data.sizes @property def dtypes(self) -> Frozen[Hashable, np.dtype]: """Mapping from coordinate names to dtypes. Cannot be modified directly. See Also -------- Dataset.dtypes """ return Frozen({n: v.dtype for n, v in self._data.variables.items()}) @property def variables(self) -> Mapping[Hashable, Variable]: """Low level interface to Coordinates contents as dict of Variable objects. This dictionary is frozen to prevent mutation. """ return self._data.variables def to_dataset(self) -> Dataset: """Convert these coordinates into a new Dataset.""" names = [name for name in self._data._variables if name in self._names] return self._data._copy_listed(names) def __getitem__(self, key: Hashable) -> DataArray: return self._data[key] def __delitem__(self, key: Hashable) -> None: # redirect to DatasetCoordinates.__delitem__ del self._data.coords[key] def equals(self, other: Self) -> bool: """Two Coordinates objects are equal if they have matching variables, all of which are equal. See Also -------- Coordinates.identical """ if not isinstance(other, Coordinates): return False return self.to_dataset().equals(other.to_dataset()) def identical(self, other: Self) -> bool: """Like equals, but also checks all variable attributes. See Also -------- Coordinates.equals """ if not isinstance(other, Coordinates): return False return self.to_dataset().identical(other.to_dataset()) def _update_coords( self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index] ) -> None: # redirect to DatasetCoordinates._update_coords self._data.coords._update_coords(coords, indexes) def _drop_coords(self, coord_names): # redirect to DatasetCoordinates._drop_coords self._data.coords._drop_coords(coord_names) def _merge_raw(self, other, reflexive): """For use with binary arithmetic.""" if other is None: variables = dict(self.variables) indexes = dict(self.xindexes) else: coord_list = [self, other] if not reflexive else [other, self] variables, indexes = merge_coordinates_without_align(coord_list) return variables, indexes @contextmanager def _merge_inplace(self, other): """For use with in-place binary arithmetic.""" if other is None: yield else: # don't include indexes in prioritized, because we didn't align # first and we want indexes to be checked prioritized = { k: (v, None) for k, v in self.variables.items() if k not in self.xindexes } variables, indexes = merge_coordinates_without_align( [self, other], prioritized ) yield self._update_coords(variables, indexes) def merge(self, other: Mapping[Any, Any] | None) -> Dataset: """Merge two sets of coordinates to create a new Dataset The method implements the logic used for joining coordinates in the result of a binary operation performed on xarray objects: - If two index coordinates conflict (are not equal), an exception is raised. You must align your data before passing it to this method. - If an index coordinate and a non-index coordinate conflict, the non- index coordinate is dropped. - If two non-index coordinates conflict, both are dropped. Parameters ---------- other : dict-like, optional A :py:class:`Coordinates` object or any mapping that can be turned into coordinates. Returns ------- merged : Dataset A new Dataset with merged coordinates. """ from xarray.core.dataset import Dataset if other is None: return self.to_dataset() if not isinstance(other, Coordinates): other = Dataset(coords=other).coords coords, indexes = merge_coordinates_without_align([self, other]) coord_names = set(coords) return Dataset._construct_direct( variables=coords, coord_names=coord_names, indexes=indexes ) def __or__(self, other: Mapping[Any, Any] | None) -> Coordinates: """Merge two sets of coordinates to create a new Coordinates object The method implements the logic used for joining coordinates in the result of a binary operation performed on xarray objects: - If two index coordinates conflict (are not equal), an exception is raised. You must align your data before passing it to this method. - If an index coordinate and a non-index coordinate conflict, the non- index coordinate is dropped. - If two non-index coordinates conflict, both are dropped. Parameters ---------- other : dict-like, optional A :py:class:`Coordinates` object or any mapping that can be turned into coordinates. Returns ------- merged : Coordinates A new Coordinates object with merged coordinates. See Also -------- Coordinates.merge """ return self.merge(other).coords def __setitem__(self, key: Hashable, value: Any) -> None: self.update({key: value}) def update(self, other: Mapping[Any, Any]) -> None: """Update this Coordinates variables with other coordinate variables.""" if not len(other): return other_coords: Coordinates if isinstance(other, Coordinates): # Coordinates object: just pass it (default indexes won't be created) other_coords = other else: other_coords = create_coords_with_default_indexes( getattr(other, "variables", other) ) # Discard original indexed coordinates prior to merge allows to: # - fail early if the new coordinates don't preserve the integrity of existing # multi-coordinate indexes # - drop & replace coordinates without alignment (note: we must keep indexed # coordinates extracted from the DataArray objects passed as values to # `other` - if any - as those are still used for aligning the old/new coordinates) coords_to_align = drop_indexed_coords(set(other_coords) & set(other), self) coords, indexes = merge_coords( [coords_to_align, other_coords], priority_arg=1, indexes=coords_to_align.xindexes, ) # special case for PandasMultiIndex: updating only its dimension coordinate # is still allowed but depreciated. # It is the only case where we need to actually drop coordinates here (multi-index levels) # TODO: remove when removing PandasMultiIndex's dimension coordinate. self._drop_coords(self._names - coords_to_align._names) self._update_coords(coords, indexes) def assign(self, coords: Mapping | None = None, **coords_kwargs: Any) -> Self: """Assign new coordinates (and indexes) to a Coordinates object, returning a new object with all the original coordinates in addition to the new ones. Parameters ---------- coords : mapping of dim to coord, optional A mapping whose keys are the names of the coordinates and values are the coordinates to assign. The mapping will generally be a dict or :class:`Coordinates`. * If a value is a standard data value β€” for example, a ``DataArray``, scalar, or array β€” the data is simply assigned as a coordinate. * A coordinate can also be defined and attached to an existing dimension using a tuple with the first element the dimension name and the second element the values for this new coordinate. **coords_kwargs The keyword arguments form of ``coords``. One of ``coords`` or ``coords_kwargs`` must be provided. Returns ------- new_coords : Coordinates A new Coordinates object with the new coordinates (and indexes) in addition to all the existing coordinates. Examples -------- >>> coords = xr.Coordinates() >>> coords Coordinates: *empty* >>> coords.assign(x=[1, 2]) Coordinates: * x (x) int64 16B 1 2 >>> midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]]) >>> coords.assign(xr.Coordinates.from_pandas_multiindex(midx, "y")) Coordinates: * y (y) object 32B MultiIndex * y_level_0 (y) object 32B 'a' 'a' 'b' 'b' * y_level_1 (y) int64 32B 0 1 0 1 """ # TODO: this doesn't support a callable, which is inconsistent with `DataArray.assign_coords` coords = either_dict_or_kwargs(coords, coords_kwargs, "assign") new_coords = self.copy() new_coords.update(coords) return new_coords def _overwrite_indexes( self, indexes: Mapping[Any, Index], variables: Mapping[Any, Variable] | None = None, ) -> Self: results = self.to_dataset()._overwrite_indexes(indexes, variables) # TODO: remove cast once we get rid of DatasetCoordinates # and DataArrayCoordinates (i.e., Dataset and DataArray encapsulate Coordinates) return cast(Self, results.coords) def _reindex_callback( self, aligner: Aligner, dim_pos_indexers: dict[Hashable, Any], variables: dict[Hashable, Variable], indexes: dict[Hashable, Index], fill_value: Any, exclude_dims: frozenset[Hashable], exclude_vars: frozenset[Hashable], ) -> Self: """Callback called from ``Aligner`` to create a new reindexed Coordinate.""" aligned = self.to_dataset()._reindex_callback( aligner, dim_pos_indexers, variables, indexes, fill_value, exclude_dims, exclude_vars, ) # TODO: remove cast once we get rid of DatasetCoordinates # and DataArrayCoordinates (i.e., Dataset and DataArray encapsulate Coordinates) return cast(Self, aligned.coords) def _ipython_key_completions_(self): """Provide method for the key-autocompletions in IPython.""" return self._data._ipython_key_completions_() def copy( self, deep: bool = False, memo: dict[int, Any] | None = None, ) -> Self: """Return a copy of this Coordinates object.""" # do not copy indexes (may corrupt multi-coordinate indexes) # TODO: disable variables deepcopy? it may also be problematic when they # encapsulate index objects like pd.Index variables = { k: v._copy(deep=deep, memo=memo) for k, v in self.variables.items() } # TODO: getting an error with `self._construct_direct`, possibly because of how # a subclass implements `_construct_direct`. (This was originally the same # runtime code, but we switched the type definitions in #8216, which # necessitates the cast.) return cast( Self, Coordinates._construct_direct( coords=variables, indexes=dict(self.xindexes), dims=dict(self.sizes) ), ) def drop_vars( self, names: str | Iterable[Hashable] | Callable[ [Coordinates | Dataset | DataArray | DataTree], str | Iterable[Hashable], ], *, errors: ErrorOptions = "raise", ) -> Self: """Drop variables from this Coordinates object. Note that indexes that depend on these variables will also be dropped. Parameters ---------- names : hashable or iterable or callable Name(s) of variables to drop. If a callable, this is object is passed as its only argument and its result is used. errors : {"raise", "ignore"}, default: "raise" Error treatment. - ``'raise'``: raises a :py:class:`ValueError` error if any of the variable passed are not in the dataset - ``'ignore'``: any given names that are in the dataset are dropped and no error is raised. """ return cast(Self, self.to_dataset().drop_vars(names, errors=errors).coords) def drop_dims( self, drop_dims: str | Iterable[Hashable], *, errors: ErrorOptions = "raise", ) -> Self: """Drop dimensions and associated variables from this dataset. Parameters ---------- drop_dims : str or Iterable of Hashable Dimension or dimensions to drop. errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the dimensions passed are not in the dataset. If 'ignore', any given dimensions that are in the dataset are dropped and no error is raised. Returns ------- obj : Coordinates Coordinates object without the given dimensions (or any coordinates containing those dimensions). """ return cast(Self, self.to_dataset().drop_dims(drop_dims, errors=errors).coords) def rename_dims( self, dims_dict: Mapping[Any, Hashable] | None = None, **dims: Hashable, ) -> Self: """Returns a new object with renamed dimensions only. Parameters ---------- dims_dict : dict-like, optional Dictionary whose keys are current dimension names and whose values are the desired names. The desired names must not be the name of an existing dimension or Variable in the Coordinates. **dims : optional Keyword form of ``dims_dict``. One of dims_dict or dims must be provided. Returns ------- renamed : Coordinates Coordinates object with renamed dimensions. """ return cast(Self, self.to_dataset().rename_dims(dims_dict, **dims).coords) def rename_vars( self, name_dict: Mapping[Any, Hashable] | None = None, **names: Hashable, ) -> Coordinates: """Returns a new object with renamed variables. Parameters ---------- name_dict : dict-like, optional Dictionary whose keys are current variable or coordinate names and whose values are the desired names. **names : optional Keyword form of ``name_dict``. One of name_dict or names must be provided. Returns ------- renamed : Coordinates Coordinates object with renamed variables """ return cast(Self, self.to_dataset().rename_vars(name_dict, **names).coords) class DatasetCoordinates(Coordinates): """Dictionary like container for Dataset coordinates (variables + indexes). This collection can be passed directly to the :py:class:`~xarray.Dataset` and :py:class:`~xarray.DataArray` constructors via their `coords` argument. This will add both the coordinates variables and their index. """ _data: Dataset __slots__ = ("_data",) def __init__(self, dataset: Dataset): self._data = dataset @property def _names(self) -> set[Hashable]: return self._data._coord_names @property def dims(self) -> Frozen[Hashable, int]: # deliberately display all dims, not just those on coordinate variables - see https://github.com/pydata/xarray/issues/9466 return self._data.dims @property def dtypes(self) -> Frozen[Hashable, np.dtype]: """Mapping from coordinate names to dtypes. Cannot be modified directly, but is updated when adding new variables. See Also -------- Dataset.dtypes """ return Frozen( { n: v.dtype for n, v in self._data._variables.items() if n in self._data._coord_names } ) @property def variables(self) -> Mapping[Hashable, Variable]: return Frozen( {k: v for k, v in self._data.variables.items() if k in self._names} ) def __getitem__(self, key: Hashable) -> DataArray: if key in self._data.data_vars: raise KeyError(key) return self._data[key] def to_dataset(self) -> Dataset: """Convert these coordinates into a new Dataset""" names = [name for name in self._data._variables if name in self._names] return self._data._copy_listed(names) def _update_coords( self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index] ) -> None: variables = self._data._variables.copy() variables.update(coords) # check for inconsistent state *before* modifying anything in-place dims = calculate_dimensions(variables) new_coord_names = set(coords) for dim in dims: if dim in variables: new_coord_names.add(dim) self._data._variables = variables self._data._coord_names.update(new_coord_names) self._data._dims = dims # TODO(shoyer): once ._indexes is always populated by a dict, modify # it to update inplace instead. original_indexes = dict(self._data.xindexes) original_indexes.update(indexes) self._data._indexes = original_indexes def _drop_coords(self, coord_names): # should drop indexed coordinates only for name in coord_names: del self._data._variables[name] del self._data._indexes[name] self._data._coord_names.difference_update(coord_names) def __delitem__(self, key: Hashable) -> None: if key in self: del self._data[key] else: raise KeyError( f"{key!r} is not in coordinate variables {tuple(self.keys())}" ) def _ipython_key_completions_(self): """Provide method for the key-autocompletions in IPython.""" return [ key for key in self._data._ipython_key_completions_() if key not in self._data.data_vars ] class DataTreeCoordinates(Coordinates): """ Dictionary like container for coordinates of a DataTree node (variables + indexes). This collection can be passed directly to the :py:class:`~xarray.Dataset` and :py:class:`~xarray.DataArray` constructors via their `coords` argument. This will add both the coordinates variables and their index. """ # TODO: This only needs to be a separate class from `DatasetCoordinates` because DataTree nodes store their variables differently # internally than how Datasets do, see https://github.com/pydata/xarray/issues/9203. _data: DataTree # type: ignore[assignment] # complaining that DataTree is not a subclass of DataWithCoords - this can be fixed by refactoring, see #9203 __slots__ = ("_data",) def __init__(self, datatree: DataTree): self._data = datatree @property def _names(self) -> set[Hashable]: return set(self._data._coord_variables) @property def dims(self) -> Frozen[Hashable, int]: # deliberately display all dims, not just those on coordinate variables - see https://github.com/pydata/xarray/issues/9466 return Frozen(self._data.dims) @property def dtypes(self) -> Frozen[Hashable, np.dtype]: """Mapping from coordinate names to dtypes. Cannot be modified directly, but is updated when adding new variables. See Also -------- Dataset.dtypes """ return Frozen({n: v.dtype for n, v in self._data._coord_variables.items()}) @property def variables(self) -> Mapping[Hashable, Variable]: return Frozen(self._data._coord_variables) def __getitem__(self, key: Hashable) -> DataArray: if key not in self._data._coord_variables: raise KeyError(key) return self._data.dataset[key] def to_dataset(self) -> Dataset: """Convert these coordinates into a new Dataset""" return self._data.dataset._copy_listed(self._names) def _update_coords( self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index] ) -> None: from xarray.core.datatree import check_alignment # create updated node (`.to_dataset` makes a copy so this doesn't modify in-place) node_ds = self._data.to_dataset(inherit=False) node_ds.coords._update_coords(coords, indexes) # check consistency *before* modifying anything in-place # TODO can we clean up the signature of check_alignment to make this less awkward? if self._data.parent is not None: parent_ds = self._data.parent._to_dataset_view( inherit=True, rebuild_dims=False ) else: parent_ds = None check_alignment(self._data.path, node_ds, parent_ds, self._data.children) # assign updated attributes coord_variables = dict(node_ds.coords.variables) self._data._node_coord_variables = coord_variables self._data._node_dims = node_ds._dims self._data._node_indexes = node_ds._indexes def _drop_coords(self, coord_names): # should drop indexed coordinates only for name in coord_names: del self._data._node_coord_variables[name] del self._data._node_indexes[name] def __delitem__(self, key: Hashable) -> None: if key in self: del self._data[key] # type: ignore[arg-type] # see https://github.com/pydata/xarray/issues/8836 else: raise KeyError(key) def _ipython_key_completions_(self): """Provide method for the key-autocompletions in IPython.""" return [ key for key in self._data._ipython_key_completions_() if key in self._data._coord_variables ] class DataArrayCoordinates(Coordinates, Generic[T_DataArray]): """Dictionary like container for DataArray coordinates (variables + indexes). This collection can be passed directly to the :py:class:`~xarray.Dataset` and :py:class:`~xarray.DataArray` constructors via their `coords` argument. This will add both the coordinates variables and their index. """ _data: T_DataArray __slots__ = ("_data",) def __init__(self, dataarray: T_DataArray) -> None: self._data = dataarray @property def dims(self) -> tuple[Hashable, ...]: return self._data.dims @property def dtypes(self) -> Frozen[Hashable, np.dtype]: """Mapping from coordinate names to dtypes. Cannot be modified directly, but is updated when adding new variables. See Also -------- DataArray.dtype """ return Frozen({n: v.dtype for n, v in self._data._coords.items()}) @property def _names(self) -> set[Hashable]: return set(self._data._coords) def __getitem__(self, key: Hashable) -> T_DataArray: return self._data._getitem_coord(key) def _update_coords( self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index] ) -> None: validate_dataarray_coords( self._data.shape, Coordinates._construct_direct(coords, indexes), self.dims ) self._data._coords = coords self._data._indexes = indexes def _drop_coords(self, coord_names): # should drop indexed coordinates only for name in coord_names: del self._data._coords[name] del self._data._indexes[name] @property def variables(self): return Frozen(self._data._coords) def to_dataset(self) -> Dataset: from xarray.core.dataset import Dataset coords = {k: v.copy(deep=False) for k, v in self._data._coords.items()} indexes = dict(self._data.xindexes) return Dataset._construct_direct(coords, set(coords), indexes=indexes) def __delitem__(self, key: Hashable) -> None: if key not in self: raise KeyError( f"{key!r} is not in coordinate variables {tuple(self.keys())}" ) assert_no_index_corrupted(self._data.xindexes, {key}) del self._data._coords[key] if key in self._data._indexes: del self._data._indexes[key] def _ipython_key_completions_(self): """Provide method for the key-autocompletions in IPython.""" return self._data._ipython_key_completions_() def drop_indexed_coords( coords_to_drop: set[Hashable], coords: Coordinates ) -> Coordinates: """Drop indexed coordinates associated with coordinates in coords_to_drop. This will raise an error in case it corrupts any passed index and its coordinate variables. """ new_variables = dict(coords.variables) new_indexes = dict(coords.xindexes) for idx, idx_coords in coords.xindexes.group_by_index(): idx_drop_coords = set(idx_coords) & coords_to_drop # special case for pandas multi-index: still allow but deprecate # dropping only its dimension coordinate. # TODO: remove when removing PandasMultiIndex's dimension coordinate. if isinstance(idx, PandasMultiIndex) and idx_drop_coords == {idx.dim}: idx_drop_coords.update(idx.index.names) emit_user_level_warning( f"updating coordinate {idx.dim!r}, which is a PandasMultiIndex, would leave " f"the multi-index level coordinates {list(idx.index.names)!r} in an inconsistent state. " f"This will raise an error in the future. Use `.drop_vars({list(idx_coords)!r})` " "to drop the coordinates' values before assigning new coordinate values.", FutureWarning, ) elif idx_drop_coords and len(idx_drop_coords) != len(idx_coords): idx_drop_coords_str = ", ".join(f"{k!r}" for k in idx_drop_coords) idx_coords_str = ", ".join(f"{k!r}" for k in idx_coords) raise ValueError( f"cannot drop or update coordinate(s) {idx_drop_coords_str}, which would corrupt " f"the following index built from coordinates {idx_coords_str}:\n" f"{idx}" ) for k in idx_drop_coords: del new_variables[k] del new_indexes[k] return Coordinates._construct_direct(coords=new_variables, indexes=new_indexes) def assert_coordinate_consistent(obj: T_Xarray, coords: Mapping[Any, Variable]) -> None: """Make sure the dimension coordinate of obj is consistent with coords. obj: DataArray or Dataset coords: Dict-like of variables """ for k in obj.dims: # make sure there are no conflict in dimension coordinates if k in coords and k in obj.coords and not coords[k].equals(obj[k].variable): raise IndexError( f"dimension coordinate {k!r} conflicts between " f"indexed and indexing objects:\n{obj[k]}\nvs.\n{coords[k]}" ) def create_coords_with_default_indexes( coords: Mapping[Any, Any], data_vars: DataVars | None = None ) -> Coordinates: """Returns a Coordinates object from a mapping of coordinates (arbitrary objects). Create default (pandas) indexes for each of the input dimension coordinates. Extract coordinates from each input DataArray. """ # Note: data_vars is needed here only because a pd.MultiIndex object # can be promoted as coordinates. # TODO: It won't be relevant anymore when this behavior will be dropped # in favor of the more explicit ``Coordinates.from_pandas_multiindex()``. from xarray.core.dataarray import DataArray all_variables = dict(coords) if data_vars is not None: all_variables.update(data_vars) indexes: dict[Hashable, Index] = {} variables: dict[Hashable, Variable] = {} # promote any pandas multi-index in data_vars as coordinates coords_promoted: dict[Hashable, Any] = {} pd_mindex_keys: list[Hashable] = [] for k, v in all_variables.items(): if isinstance(v, pd.MultiIndex): coords_promoted[k] = v pd_mindex_keys.append(k) elif k in coords: coords_promoted[k] = v if pd_mindex_keys: pd_mindex_keys_fmt = ",".join([f"'{k}'" for k in pd_mindex_keys]) emit_user_level_warning( f"the `pandas.MultiIndex` object(s) passed as {pd_mindex_keys_fmt} coordinate(s) or " "data variable(s) will no longer be implicitly promoted and wrapped into " "multiple indexed coordinates in the future " "(i.e., one coordinate for each multi-index level + one dimension coordinate). " "If you want to keep this behavior, you need to first wrap it explicitly using " "`mindex_coords = xarray.Coordinates.from_pandas_multiindex(mindex_obj, 'dim')` " "and pass it as coordinates, e.g., `xarray.Dataset(coords=mindex_coords)`, " "`dataset.assign_coords(mindex_coords)` or `dataarray.assign_coords(mindex_coords)`.", FutureWarning, ) dataarray_coords: list[DataArrayCoordinates] = [] for name, obj in coords_promoted.items(): if isinstance(obj, DataArray): dataarray_coords.append(obj.coords) variable = as_variable(obj, name=name, auto_convert=False) if variable.dims == (name,): # still needed to convert to IndexVariable first due to some # pandas multi-index edge cases. variable = variable.to_index_variable() idx, idx_vars = create_default_index_implicit(variable, all_variables) indexes.update(dict.fromkeys(idx_vars, idx)) variables.update(idx_vars) all_variables.update(idx_vars) else: variables[name] = variable new_coords = Coordinates._construct_direct(coords=variables, indexes=indexes) # extract and merge coordinates and indexes from input DataArrays if dataarray_coords: prioritized = {k: (v, indexes.get(k)) for k, v in variables.items()} variables, indexes = merge_coordinates_without_align( dataarray_coords + [new_coords], prioritized=prioritized, ) new_coords = Coordinates._construct_direct(coords=variables, indexes=indexes) return new_coords class CoordinateValidationError(ValueError): """Error class for Xarray coordinate validation failures.""" def validate_dataarray_coords( shape: tuple[int, ...], coords: Coordinates | Mapping[Hashable, Variable], dim: tuple[Hashable, ...], ): """Validate coordinates ``coords`` to include in a DataArray defined by ``shape`` and dimensions ``dim``. If a coordinate is associated with an index, the validation is performed by the index. By default the coordinate dimensions must match (a subset of) the array dimensions (in any order) to conform to the DataArray model. The index may override this behavior with other validation rules, though. Non-index coordinates must all conform to the DataArray model. Scalar coordinates are always valid. """ sizes = dict(zip(dim, shape, strict=True)) dim_set = set(dim) indexes: Mapping[Hashable, Index] if isinstance(coords, Coordinates): indexes = coords.xindexes else: indexes = {} for k, v in coords.items(): if k in indexes: invalid = not indexes[k].should_add_coord_to_array(k, v, dim_set) else: invalid = any(d not in dim for d in v.dims) if invalid: raise CoordinateValidationError( f"coordinate {k} has dimensions {v.dims}, but these " "are not a subset of the DataArray " f"dimensions {dim}" ) for d, s in v.sizes.items(): if d in sizes and s != sizes[d]: raise CoordinateValidationError( f"conflicting sizes for dimension {d!r}: " f"length {sizes[d]} on the data but length {s} on " f"coordinate {k!r}" ) def coordinates_from_variable(variable: Variable) -> Coordinates: (name,) = variable.dims new_index, index_vars = create_default_index_implicit(variable) indexes = dict.fromkeys(index_vars, new_index) new_vars = new_index.create_variables() new_vars[name].attrs = variable.attrs return Coordinates(new_vars, indexes) xarray-2025.09.0/xarray/core/dataarray.py000066400000000000000000010730451505620616400201360ustar00rootroot00000000000000from __future__ import annotations import copy import datetime import warnings from collections.abc import ( Callable, Hashable, Iterable, Mapping, MutableMapping, Sequence, ) from functools import partial from os import PathLike from types import EllipsisType from typing import TYPE_CHECKING, Any, Generic, Literal, NoReturn, TypeVar, overload import numpy as np import pandas as pd from xarray.coding.calendar_ops import convert_calendar, interp_calendar from xarray.coding.cftimeindex import CFTimeIndex from xarray.computation import computation, ops from xarray.computation.arithmetic import DataArrayArithmetic from xarray.core import dtypes, indexing, utils from xarray.core._aggregations import DataArrayAggregations from xarray.core.accessor_dt import CombinedDatetimelikeAccessor from xarray.core.accessor_str import StringAccessor from xarray.core.common import AbstractArray, DataWithCoords, get_chunksizes from xarray.core.coordinates import ( Coordinates, DataArrayCoordinates, assert_coordinate_consistent, create_coords_with_default_indexes, validate_dataarray_coords, ) from xarray.core.dataset import Dataset from xarray.core.extension_array import PandasExtensionArray from xarray.core.formatting import format_item from xarray.core.indexes import ( Index, Indexes, PandasMultiIndex, filter_indexes_from_coords, isel_indexes, ) from xarray.core.indexing import is_fancy_indexer, map_index_queries from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.core.types import ( Bins, DaCompatible, NetcdfWriteModes, T_Chunks, T_DataArray, T_DataArrayOrSet, ZarrWriteModes, ) from xarray.core.utils import ( Default, FilteredMapping, ReprObject, _default, either_dict_or_kwargs, hashable, infix_dims, result_name, ) from xarray.core.variable import ( IndexVariable, Variable, as_compatible_data, as_variable, ) from xarray.plot.accessor import DataArrayPlotAccessor from xarray.plot.utils import _get_units_from_attrs from xarray.structure import alignment from xarray.structure.alignment import ( _broadcast_helper, _get_broadcast_dims_map_common_coords, align, ) from xarray.structure.chunks import unify_chunks from xarray.structure.merge import PANDAS_TYPES, MergeError from xarray.util.deprecation_helpers import _deprecate_positional_args, deprecate_dims if TYPE_CHECKING: from dask.dataframe import DataFrame as DaskDataFrame from dask.delayed import Delayed from iris.cube import Cube as iris_Cube from numpy.typing import ArrayLike from xarray.backends import ZarrStore from xarray.backends.api import T_NetcdfEngine, T_NetcdfTypes from xarray.computation.rolling import DataArrayCoarsen, DataArrayRolling from xarray.computation.weighted import DataArrayWeighted from xarray.core.groupby import DataArrayGroupBy from xarray.core.resample import DataArrayResample from xarray.core.types import ( CoarsenBoundaryOptions, DatetimeLike, DatetimeUnitOptions, Dims, ErrorOptions, ErrorOptionsWithWarn, GroupIndices, GroupInput, InterpOptions, PadModeOptions, PadReflectOptions, QuantileMethods, QueryEngineOptions, QueryParserOptions, ReindexMethodOptions, ResampleCompatible, Self, SideOptions, T_ChunkDimFreq, T_ChunksFreq, T_Xarray, ) from xarray.groupers import Grouper, Resampler from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint T_XarrayOther = TypeVar("T_XarrayOther", bound="DataArray" | Dataset) def _infer_coords_and_dims( shape: tuple[int, ...], coords: ( Sequence[Sequence | pd.Index | DataArray | Variable | np.ndarray] | Mapping | None ), dims: str | Iterable[Hashable] | None, ) -> tuple[Mapping[Hashable, Any], tuple[Hashable, ...]]: """All the logic for creating a new DataArray""" if ( coords is not None and not utils.is_dict_like(coords) and len(coords) != len(shape) ): raise ValueError( f"coords is not dict-like, but it has {len(coords)} items, " f"which does not match the {len(shape)} dimensions of the " "data" ) if isinstance(dims, str): dims = (dims,) elif dims is None: dims = [f"dim_{n}" for n in range(len(shape))] if coords is not None and len(coords) == len(shape): # try to infer dimensions from coords if utils.is_dict_like(coords): dims = list(coords.keys()) else: for n, (dim, coord) in enumerate(zip(dims, coords, strict=True)): coord = as_variable( coord, name=dim, auto_convert=False ).to_index_variable() dims[n] = coord.name dims_tuple = tuple(dims) if len(dims_tuple) != len(shape): raise ValueError( "different number of dimensions on data " f"and dims: {len(shape)} vs {len(dims_tuple)}" ) for d in dims_tuple: if not hashable(d): raise TypeError(f"Dimension {d} is not hashable") new_coords: Mapping[Hashable, Any] if isinstance(coords, Coordinates): new_coords = coords else: new_coords = {} if utils.is_dict_like(coords): for k, v in coords.items(): new_coords[k] = as_variable(v, name=k, auto_convert=False) if new_coords[k].dims == (k,): new_coords[k] = new_coords[k].to_index_variable() elif coords is not None: for dim, coord in zip(dims_tuple, coords, strict=True): var = as_variable(coord, name=dim, auto_convert=False) var.dims = (dim,) new_coords[dim] = var.to_index_variable() validate_dataarray_coords(shape, new_coords, dims_tuple) return new_coords, dims_tuple def _check_data_shape( data: Any, coords: ( Sequence[Sequence | pd.Index | DataArray | Variable | np.ndarray] | Mapping | None ), dims: str | Iterable[Hashable] | None, ) -> Any: if data is dtypes.NA: data = np.nan if coords is not None and utils.is_scalar(data, include_0d=False): if utils.is_dict_like(coords): if dims is None: return data else: data_shape = tuple( ( as_variable(coords[k], k, auto_convert=False).size if k in coords.keys() else 1 ) for k in dims ) else: data_shape = tuple( as_variable(coord, "foo", auto_convert=False).size for coord in coords ) data = np.full(data_shape, data) return data class _LocIndexer(Generic[T_DataArray]): __slots__ = ("data_array",) def __init__(self, data_array: T_DataArray): self.data_array = data_array def __getitem__(self, key) -> T_DataArray: if not utils.is_dict_like(key): # expand the indexer so we can handle Ellipsis labels = indexing.expanded_indexer(key, self.data_array.ndim) key = dict(zip(self.data_array.dims, labels, strict=True)) return self.data_array.sel(key) def __setitem__(self, key, value) -> None: if not utils.is_dict_like(key): # expand the indexer so we can handle Ellipsis labels = indexing.expanded_indexer(key, self.data_array.ndim) key = dict(zip(self.data_array.dims, labels, strict=True)) dim_indexers = map_index_queries(self.data_array, key).dim_indexers self.data_array[dim_indexers] = value # Used as the key corresponding to a DataArray's variable when converting # arbitrary DataArray objects to datasets _THIS_ARRAY = ReprObject("") class DataArray( AbstractArray, DataWithCoords, DataArrayArithmetic, DataArrayAggregations, ): """N-dimensional array with labeled coordinates and dimensions. DataArray provides a wrapper around numpy ndarrays that uses labeled dimensions and coordinates to support metadata aware operations. The API is similar to that for the pandas Series or DataFrame, but DataArray objects can have any number of dimensions, and their contents have fixed data types. Additional features over raw numpy arrays: - Apply operations over dimensions by name: ``x.sum('time')``. - Select or assign values by integer location (like numpy): ``x[:10]`` or by label (like pandas): ``x.loc['2014-01-01']`` or ``x.sel(time='2014-01-01')``. - Mathematical operations (e.g., ``x - y``) vectorize across multiple dimensions (known in numpy as "broadcasting") based on dimension names, regardless of their original order. - Keep track of arbitrary metadata in the form of a Python dictionary: ``x.attrs`` - Convert to a pandas Series: ``x.to_series()``. Getting items from or doing mathematical operations with a DataArray always returns another DataArray. Parameters ---------- data : array_like Values for this array. Must be an ``numpy.ndarray``, ndarray like, or castable to an ``ndarray``. If a self-described xarray or pandas object, attempts are made to use this array's metadata to fill in other unspecified arguments. A view of the array's data is used instead of a copy if possible. coords : sequence or dict of array_like or :py:class:`~xarray.Coordinates`, optional Coordinates (tick labels) to use for indexing along each dimension. The following notations are accepted: - mapping {dimension name: array-like} - sequence of tuples that are valid arguments for ``xarray.Variable()`` - (dims, data) - (dims, data, attrs) - (dims, data, attrs, encoding) Additionally, it is possible to define a coord whose name does not match the dimension name, or a coord based on multiple dimensions, with one of the following notations: - mapping {coord name: DataArray} - mapping {coord name: Variable} - mapping {coord name: (dimension name, array-like)} - mapping {coord name: (tuple of dimension names, array-like)} Alternatively, a :py:class:`~xarray.Coordinates` object may be used in order to explicitly pass indexes (e.g., a multi-index or any custom Xarray index) or to bypass the creation of a default index for any :term:`Dimension coordinate` included in that object. dims : Hashable or sequence of Hashable, optional Name(s) of the data dimension(s). Must be either a Hashable (only for 1D data) or a sequence of Hashables with length equal to the number of dimensions. If this argument is omitted, dimension names are taken from ``coords`` (if possible) and otherwise default to ``['dim_0', ... 'dim_n']``. name : str or None, optional Name of this array. attrs : dict_like or None, optional Attributes to assign to the new instance. By default, an empty attribute dictionary is initialized. (see FAQ, :ref:`approach to metadata`) indexes : :py:class:`~xarray.Indexes` or dict-like, optional For internal use only. For passing indexes objects to the new DataArray, use the ``coords`` argument instead with a :py:class:`~xarray.Coordinate` object (both coordinate variables and indexes will be extracted from the latter). Examples -------- Create data: >>> np.random.seed(0) >>> temperature = 15 + 8 * np.random.randn(2, 2, 3) >>> lon = [[-99.83, -99.32], [-99.79, -99.23]] >>> lat = [[42.25, 42.21], [42.63, 42.59]] >>> time = pd.date_range("2014-09-06", periods=3) >>> reference_time = pd.Timestamp("2014-09-05") Initialize a dataarray with multiple dimensions: >>> da = xr.DataArray( ... data=temperature, ... dims=["x", "y", "time"], ... coords=dict( ... lon=(["x", "y"], lon), ... lat=(["x", "y"], lat), ... time=time, ... reference_time=reference_time, ... ), ... attrs=dict( ... description="Ambient temperature.", ... units="degC", ... ), ... ) >>> da Size: 96B array([[[29.11241877, 18.20125767, 22.82990387], [32.92714559, 29.94046392, 7.18177696]], [[22.60070734, 13.78914233, 14.17424919], [18.28478802, 16.15234857, 26.63418806]]]) Coordinates: lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23 lat (x, y) float64 32B 42.25 42.21 42.63 42.59 * time (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08 reference_time datetime64[ns] 8B 2014-09-05 Dimensions without coordinates: x, y Attributes: description: Ambient temperature. units: degC Find out where the coldest temperature was: >>> da.isel(da.argmin(...)) Size: 8B array(7.18177696) Coordinates: lon float64 8B -99.32 lat float64 8B 42.21 time datetime64[ns] 8B 2014-09-08 reference_time datetime64[ns] 8B 2014-09-05 Attributes: description: Ambient temperature. units: degC """ _cache: dict[str, Any] _coords: dict[Any, Variable] _close: Callable[[], None] | None _indexes: dict[Hashable, Index] _name: Hashable | None _variable: Variable __slots__ = ( "__weakref__", "_cache", "_close", "_coords", "_indexes", "_name", "_variable", ) dt = utils.UncachedAccessor(CombinedDatetimelikeAccessor["DataArray"]) def __init__( self, data: Any = dtypes.NA, coords: ( Sequence[Sequence | pd.Index | DataArray | Variable | np.ndarray] | Mapping | None ) = None, dims: str | Iterable[Hashable] | None = None, name: Hashable | None = None, attrs: Mapping | None = None, # internal parameters indexes: Mapping[Hashable, Index] | None = None, fastpath: bool = False, ) -> None: if fastpath: variable = data assert dims is None assert attrs is None assert indexes is not None else: if indexes is not None: raise ValueError( "Explicitly passing indexes via the `indexes` argument is not supported " "when `fastpath=False`. Use the `coords` argument instead." ) # try to fill in arguments from data if they weren't supplied if coords is None: if isinstance(data, DataArray): coords = data.coords elif isinstance(data, pd.Series): coords = [data.index] elif isinstance(data, pd.DataFrame): coords = [data.index, data.columns] elif isinstance(data, pd.Index | IndexVariable): coords = [data] if dims is None: dims = getattr(data, "dims", getattr(coords, "dims", None)) if name is None: name = getattr(data, "name", None) if attrs is None and not isinstance(data, PANDAS_TYPES): attrs = getattr(data, "attrs", None) data = _check_data_shape(data, coords, dims) data = as_compatible_data(data) coords, dims = _infer_coords_and_dims(data.shape, coords, dims) variable = Variable(dims, data, attrs, fastpath=True) if not isinstance(coords, Coordinates): coords = create_coords_with_default_indexes(coords) indexes = dict(coords.xindexes) coords = {k: v.copy() for k, v in coords.variables.items()} # These fully describe a DataArray self._variable = variable assert isinstance(coords, dict) self._coords = coords self._name = name self._indexes = dict(indexes) self._close = None @classmethod def _construct_direct( cls, variable: Variable, coords: dict[Any, Variable], name: Hashable, indexes: dict[Hashable, Index], ) -> Self: """Shortcut around __init__ for internal use when we want to skip costly validation """ obj = object.__new__(cls) obj._variable = variable obj._coords = coords obj._name = name obj._indexes = indexes obj._close = None return obj def _replace( self, variable: Variable | None = None, coords=None, name: Hashable | Default | None = _default, attrs=_default, indexes=None, ) -> Self: if variable is None: variable = self.variable if coords is None: coords = self._coords if indexes is None: indexes = self._indexes if name is _default: name = self.name if attrs is _default: attrs = copy.copy(self.attrs) else: variable = variable.copy() variable.attrs = attrs return type(self)(variable, coords, name=name, indexes=indexes, fastpath=True) def _replace_maybe_drop_dims( self, variable: Variable, name: Hashable | Default | None = _default, ) -> Self: if self.sizes == variable.sizes: coords = self._coords.copy() indexes = self._indexes elif set(self.dims) == set(variable.dims): # Shape has changed (e.g. from reduce(..., keepdims=True) new_sizes = dict(zip(self.dims, variable.shape, strict=True)) coords = { k: v for k, v in self._coords.items() if v.shape == tuple(new_sizes[d] for d in v.dims) } indexes = filter_indexes_from_coords(self._indexes, set(coords)) else: allowed_dims = set(variable.dims) coords = { k: v for k, v in self._coords.items() if set(v.dims) <= allowed_dims } indexes = filter_indexes_from_coords(self._indexes, set(coords)) return self._replace(variable, coords, name, indexes=indexes) def _overwrite_indexes( self, indexes: Mapping[Any, Index], variables: Mapping[Any, Variable] | None = None, drop_coords: list[Hashable] | None = None, rename_dims: Mapping[Any, Any] | None = None, ) -> Self: """Maybe replace indexes and their corresponding coordinates.""" if not indexes: return self if variables is None: variables = {} if drop_coords is None: drop_coords = [] new_variable = self.variable.copy() new_coords = self._coords.copy() new_indexes = dict(self._indexes) for name in indexes: new_coords[name] = variables[name] new_indexes[name] = indexes[name] for name in drop_coords: new_coords.pop(name) new_indexes.pop(name) if rename_dims: new_variable.dims = tuple(rename_dims.get(d, d) for d in new_variable.dims) return self._replace( variable=new_variable, coords=new_coords, indexes=new_indexes ) def _to_temp_dataset(self) -> Dataset: return self._to_dataset_whole(name=_THIS_ARRAY, shallow_copy=False) def _from_temp_dataset( self, dataset: Dataset, name: Hashable | Default | None = _default ) -> Self: variable = dataset._variables.pop(_THIS_ARRAY) coords = dataset._variables indexes = dataset._indexes return self._replace(variable, coords, name, indexes=indexes) def _to_dataset_split(self, dim: Hashable) -> Dataset: """splits dataarray along dimension 'dim'""" def subset(dim, label): array = self.loc[{dim: label}] array.attrs = {} return as_variable(array) variables_from_split = { label: subset(dim, label) for label in self.get_index(dim) } coord_names = set(self._coords) - {dim} ambiguous_vars = set(variables_from_split) & coord_names if ambiguous_vars: rename_msg_fmt = ", ".join([f"{v}=..." for v in sorted(ambiguous_vars)]) raise ValueError( f"Splitting along the dimension {dim!r} would produce the variables " f"{tuple(sorted(ambiguous_vars))} which are also existing coordinate " f"variables. Use DataArray.rename({rename_msg_fmt}) or " f"DataArray.assign_coords({dim}=...) to resolve this ambiguity." ) variables = variables_from_split | { k: v for k, v in self._coords.items() if k != dim } indexes = filter_indexes_from_coords(self._indexes, coord_names) dataset = Dataset._construct_direct( variables, coord_names, indexes=indexes, attrs=self.attrs ) return dataset def _to_dataset_whole( self, name: Hashable = None, shallow_copy: bool = True ) -> Dataset: if name is None: name = self.name if name is None: raise ValueError( "unable to convert unnamed DataArray to a " "Dataset without providing an explicit name" ) if name in self.coords: raise ValueError( "cannot create a Dataset from a DataArray with " "the same name as one of its coordinates" ) # use private APIs for speed: this is called by _to_temp_dataset(), # which is used in the guts of a lot of operations (e.g., reindex) variables = self._coords.copy() variables[name] = self.variable if shallow_copy: for k in variables: variables[k] = variables[k].copy(deep=False) indexes = self._indexes coord_names = set(self._coords) return Dataset._construct_direct(variables, coord_names, indexes=indexes) def to_dataset( self, dim: Hashable = None, *, name: Hashable = None, promote_attrs: bool = False, ) -> Dataset: """Convert a DataArray to a Dataset. Parameters ---------- dim : Hashable, optional Name of the dimension on this array along which to split this array into separate variables. If not provided, this array is converted into a Dataset of one variable. name : Hashable, optional Name to substitute for this array's name. Only valid if ``dim`` is not provided. promote_attrs : bool, default: False Set to True to shallow copy attrs of DataArray to returned Dataset. Returns ------- dataset : Dataset """ if dim is not None and dim not in self.dims: raise TypeError( f"{dim} is not a dim. If supplying a ``name``, pass as a kwarg." ) if dim is not None: if name is not None: raise TypeError("cannot supply both dim and name arguments") result = self._to_dataset_split(dim) else: result = self._to_dataset_whole(name) if promote_attrs: result.attrs = dict(self.attrs) return result @property def name(self) -> Hashable | None: """The name of this array.""" return self._name @name.setter def name(self, value: Hashable | None) -> None: self._name = value @property def variable(self) -> Variable: """Low level interface to the Variable object for this DataArray.""" return self._variable @property def dtype(self) -> np.dtype: """ Data-type of the array’s elements. See Also -------- ndarray.dtype numpy.dtype """ return self.variable.dtype @property def shape(self) -> tuple[int, ...]: """ Tuple of array dimensions. See Also -------- numpy.ndarray.shape """ return self.variable.shape @property def size(self) -> int: """ Number of elements in the array. Equal to ``np.prod(a.shape)``, i.e., the product of the array’s dimensions. See Also -------- numpy.ndarray.size """ return self.variable.size @property def nbytes(self) -> int: """ Total bytes consumed by the elements of this DataArray's data. If the underlying data array does not include ``nbytes``, estimates the bytes consumed based on the ``size`` and ``dtype``. """ return self.variable.nbytes @property def ndim(self) -> int: """ Number of array dimensions. See Also -------- numpy.ndarray.ndim """ return self.variable.ndim def __len__(self) -> int: return len(self.variable) @property def data(self) -> Any: """ The DataArray's data as an array. The underlying array type (e.g. dask, sparse, pint) is preserved. See Also -------- DataArray.to_numpy DataArray.as_numpy DataArray.values """ return self.variable.data @data.setter def data(self, value: Any) -> None: self.variable.data = value @property def values(self) -> np.ndarray: """ The array's data converted to numpy.ndarray. This will attempt to convert the array naively using np.array(), which will raise an error if the array type does not support coercion like this (e.g. cupy). Note that this array is not copied; operations on it follow numpy's rules of what generates a view vs. a copy, and changes to this array may be reflected in the DataArray as well. """ return self.variable.values @values.setter def values(self, value: Any) -> None: self.variable.values = value def to_numpy(self) -> np.ndarray: """ Coerces wrapped data to numpy and returns a numpy.ndarray. See Also -------- DataArray.as_numpy : Same but returns the surrounding DataArray instead. Dataset.as_numpy DataArray.values DataArray.data """ return self.variable.to_numpy() def as_numpy(self) -> Self: """ Coerces wrapped data and coordinates into numpy arrays, returning a DataArray. See Also -------- DataArray.to_numpy : Same but returns only the data as a numpy.ndarray object. Dataset.as_numpy : Converts all variables in a Dataset. DataArray.values DataArray.data """ coords = {k: v.as_numpy() for k, v in self._coords.items()} return self._replace(self.variable.as_numpy(), coords, indexes=self._indexes) @property def _in_memory(self) -> bool: return self.variable._in_memory def _to_index(self) -> pd.Index: return self.variable._to_index() def to_index(self) -> pd.Index: """Convert this variable to a pandas.Index. Only possible for 1D arrays. """ return self.variable.to_index() @property def dims(self) -> tuple[Hashable, ...]: """Tuple of dimension names associated with this array. Note that the type of this property is inconsistent with `Dataset.dims`. See `Dataset.sizes` and `DataArray.sizes` for consistently named properties. See Also -------- DataArray.sizes Dataset.dims """ return self.variable.dims @dims.setter def dims(self, value: Any) -> NoReturn: raise AttributeError( "you cannot assign dims on a DataArray. Use " ".rename() or .swap_dims() instead." ) def _item_key_to_dict(self, key: Any) -> Mapping[Hashable, Any]: if utils.is_dict_like(key): return key key = indexing.expanded_indexer(key, self.ndim) return dict(zip(self.dims, key, strict=True)) def _getitem_coord(self, key: Any) -> Self: from xarray.core.dataset_utils import _get_virtual_variable try: var = self._coords[key] except KeyError: dim_sizes = dict(zip(self.dims, self.shape, strict=True)) _, key, var = _get_virtual_variable(self._coords, key, dim_sizes) return self._replace_maybe_drop_dims(var, name=key) def __getitem__(self, key: Any) -> Self: if isinstance(key, str): return self._getitem_coord(key) else: # xarray-style array indexing return self.isel(indexers=self._item_key_to_dict(key)) def __setitem__(self, key: Any, value: Any) -> None: if isinstance(key, str): self.coords[key] = value else: # Coordinates in key, value and self[key] should be consistent. # TODO Coordinate consistency in key is checked here, but it # causes unnecessary indexing. It should be optimized. obj = self[key] if isinstance(value, DataArray): assert_coordinate_consistent(value, obj.coords.variables) value = value.variable # DataArray key -> Variable key key = { k: v.variable if isinstance(v, DataArray) else v for k, v in self._item_key_to_dict(key).items() } self.variable[key] = value def __delitem__(self, key: Any) -> None: del self.coords[key] @property def _attr_sources(self) -> Iterable[Mapping[Hashable, Any]]: """Places to look-up items for attribute-style access""" yield from self._item_sources yield self.attrs @property def _item_sources(self) -> Iterable[Mapping[Hashable, Any]]: """Places to look-up items for key-completion""" yield FilteredMapping(keys=self._coords, mapping=self.coords) # virtual coordinates yield FilteredMapping(keys=self.dims, mapping=self.coords) def __contains__(self, key: Any) -> bool: return key in self.data @property def loc(self) -> _LocIndexer: """Attribute for location based indexing like pandas.""" return _LocIndexer(self) @property def attrs(self) -> dict[Any, Any]: """Dictionary storing arbitrary metadata with this array.""" return self.variable.attrs @attrs.setter def attrs(self, value: Mapping[Any, Any]) -> None: self.variable.attrs = dict(value) @property def encoding(self) -> dict[Any, Any]: """Dictionary of format-specific settings for how this array should be serialized.""" return self.variable.encoding @encoding.setter def encoding(self, value: Mapping[Any, Any]) -> None: self.variable.encoding = dict(value) def reset_encoding(self) -> Self: warnings.warn( "reset_encoding is deprecated since 2023.11, use `drop_encoding` instead", stacklevel=2, ) return self.drop_encoding() def drop_encoding(self) -> Self: """Return a new DataArray without encoding on the array or any attached coords.""" ds = self._to_temp_dataset().drop_encoding() return self._from_temp_dataset(ds) @property def indexes(self) -> Indexes: """Mapping of pandas.Index objects used for label based indexing. Raises an error if this Dataset has indexes that cannot be coerced to pandas.Index objects. See Also -------- DataArray.xindexes """ return self.xindexes.to_pandas_indexes() @property def xindexes(self) -> Indexes[Index]: """Mapping of :py:class:`~xarray.indexes.Index` objects used for label based indexing. """ return Indexes(self._indexes, {k: self._coords[k] for k in self._indexes}) @property def coords(self) -> DataArrayCoordinates: """Mapping of :py:class:`~xarray.DataArray` objects corresponding to coordinate variables. See Also -------- Coordinates """ return DataArrayCoordinates(self) @overload def reset_coords( self, names: Dims = None, *, drop: Literal[False] = False, ) -> Dataset: ... @overload def reset_coords( self, names: Dims = None, *, drop: Literal[True], ) -> Self: ... def reset_coords( self, names: Dims = None, *, drop: bool = False, ) -> Self | Dataset: """Given names of coordinates, reset them to become variables. Parameters ---------- names : str, Iterable of Hashable or None, optional Name(s) of non-index coordinates in this dataset to reset into variables. By default, all non-index coordinates are reset. drop : bool, default: False If True, remove coordinates instead of converting them into variables. Returns ------- Dataset, or DataArray if ``drop == True`` Examples -------- >>> temperature = np.arange(25).reshape(5, 5) >>> pressure = np.arange(50, 75).reshape(5, 5) >>> da = xr.DataArray( ... data=temperature, ... dims=["x", "y"], ... coords=dict( ... lon=("x", np.arange(10, 15)), ... lat=("y", np.arange(20, 25)), ... Pressure=(["x", "y"], pressure), ... ), ... name="Temperature", ... ) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: lon (x) int64 40B 10 11 12 13 14 lat (y) int64 40B 20 21 22 23 24 Pressure (x, y) int64 200B 50 51 52 53 54 55 56 57 ... 68 69 70 71 72 73 74 Dimensions without coordinates: x, y Return Dataset with target coordinate as a data variable rather than a coordinate variable: >>> da.reset_coords(names="Pressure") Size: 480B Dimensions: (x: 5, y: 5) Coordinates: lon (x) int64 40B 10 11 12 13 14 lat (y) int64 40B 20 21 22 23 24 Dimensions without coordinates: x, y Data variables: Pressure (x, y) int64 200B 50 51 52 53 54 55 56 ... 68 69 70 71 72 73 74 Temperature (x, y) int64 200B 0 1 2 3 4 5 6 7 8 ... 17 18 19 20 21 22 23 24 Return DataArray without targeted coordinate: >>> da.reset_coords(names="Pressure", drop=True) Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: lon (x) int64 40B 10 11 12 13 14 lat (y) int64 40B 20 21 22 23 24 Dimensions without coordinates: x, y """ if names is None: names = set(self.coords) - set(self._indexes) dataset = self.coords.to_dataset().reset_coords(names, drop) if drop: return self._replace(coords=dataset._variables) if self.name is None: raise ValueError( "cannot reset_coords with drop=False on an unnamed DataArray" ) dataset[self.name] = self.variable return dataset def __dask_tokenize__(self) -> object: from dask.base import normalize_token return normalize_token((type(self), self._variable, self._coords, self._name)) def __dask_graph__(self): return self._to_temp_dataset().__dask_graph__() def __dask_keys__(self): return self._to_temp_dataset().__dask_keys__() def __dask_layers__(self): return self._to_temp_dataset().__dask_layers__() @property def __dask_optimize__(self): return self._to_temp_dataset().__dask_optimize__ @property def __dask_scheduler__(self): return self._to_temp_dataset().__dask_scheduler__ def __dask_postcompute__(self): func, args = self._to_temp_dataset().__dask_postcompute__() return self._dask_finalize, (self.name, func) + args def __dask_postpersist__(self): func, args = self._to_temp_dataset().__dask_postpersist__() return self._dask_finalize, (self.name, func) + args @classmethod def _dask_finalize(cls, results, name, func, *args, **kwargs) -> Self: ds = func(results, *args, **kwargs) variable = ds._variables.pop(_THIS_ARRAY) coords = ds._variables indexes = ds._indexes return cls(variable, coords, name=name, indexes=indexes, fastpath=True) def load(self, **kwargs) -> Self: """Trigger loading data into memory and return this dataarray. Data will be computed and/or loaded from disk or a remote source. Unlike ``.compute``, the original dataarray is modified and returned. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. However, this method can be necessary when working with many file objects on disk. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. Returns ------- object : DataArray Same object but with lazy data and coordinates as in-memory arrays. See Also -------- dask.compute DataArray.load_async DataArray.compute Dataset.load Variable.load """ ds = self._to_temp_dataset().load(**kwargs) new = self._from_temp_dataset(ds) self._variable = new._variable self._coords = new._coords return self async def load_async(self, **kwargs) -> Self: """Trigger and await asynchronous loading of data into memory and return this dataarray. Data will be computed and/or loaded from disk or a remote source. Unlike ``.compute``, the original dataarray is modified and returned. Only works when opening data lazily from IO storage backends which support lazy asynchronous loading. Otherwise will raise a NotImplementedError. Note users are expected to limit concurrency themselves - xarray does not internally limit concurrency in any way. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. Returns ------- object : Dataarray Same object but with lazy data and coordinates as in-memory arrays. See Also -------- dask.compute DataArray.compute DataArray.load Dataset.load_async Variable.load_async """ temp_ds = self._to_temp_dataset() ds = await temp_ds.load_async(**kwargs) new = self._from_temp_dataset(ds) self._variable = new._variable self._coords = new._coords return self def compute(self, **kwargs) -> Self: """Trigger loading data into memory and return a new dataarray. Data will be computed and/or loaded from disk or a remote source. Unlike ``.load``, the original dataarray is left unaltered. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. However, this method can be necessary when working with many file objects on disk. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. Returns ------- object : DataArray New object with the data and all coordinates as in-memory arrays. See Also -------- dask.compute DataArray.load DataArray.load_async Dataset.compute Variable.compute """ new = self.copy(deep=False) return new.load(**kwargs) def persist(self, **kwargs) -> Self: """Trigger computation in constituent dask arrays This keeps them as dask arrays but encourages them to keep data in memory. This is particularly useful when on a distributed machine. When on a single machine consider using ``.compute()`` instead. Like compute (but unlike load), the original dataset is left unaltered. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.persist``. Returns ------- object : DataArray New object with all dask-backed data and coordinates as persisted dask arrays. See Also -------- dask.persist """ ds = self._to_temp_dataset().persist(**kwargs) return self._from_temp_dataset(ds) def copy(self, deep: bool = True, data: Any = None) -> Self: """Returns a copy of this array. If `deep=True`, a deep copy is made of the data array. Otherwise, a shallow copy is made, and the returned data array's values are a new view of this data array's values. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, optional Whether the data array and its coordinates are loaded into memory and copied onto the new object. Default is True. data : array_like, optional Data to use in the new object. Must have same shape as original. When `data` is used, `deep` is ignored for all data variables, and only used for coords. Returns ------- copy : DataArray New object with dimensions, attributes, coordinates, name, encoding, and optionally data copied from original. Examples -------- Shallow versus deep copy >>> array = xr.DataArray([1, 2, 3], dims="x", coords={"x": ["a", "b", "c"]}) >>> array.copy() Size: 24B array([1, 2, 3]) Coordinates: * x (x) >> array_0 = array.copy(deep=False) >>> array_0[0] = 7 >>> array_0 Size: 24B array([7, 2, 3]) Coordinates: * x (x) >> array Size: 24B array([7, 2, 3]) Coordinates: * x (x) >> array.copy(data=[0.1, 0.2, 0.3]) Size: 24B array([0.1, 0.2, 0.3]) Coordinates: * x (x) >> array Size: 24B array([7, 2, 3]) Coordinates: * x (x) Self: variable = self.variable._copy(deep=deep, data=data, memo=memo) indexes, index_vars = self.xindexes.copy_indexes(deep=deep) coords = {} for k, v in self._coords.items(): if k in index_vars: coords[k] = index_vars[k] else: coords[k] = v._copy(deep=deep, memo=memo) return self._replace(variable, coords, indexes=indexes) def __copy__(self) -> Self: return self._copy(deep=False) def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: return self._copy(deep=True, memo=memo) # mutable objects should not be Hashable # https://github.com/python/mypy/issues/4266 __hash__ = None # type: ignore[assignment] @property def chunks(self) -> tuple[tuple[int, ...], ...] | None: """ Tuple of block lengths for this dataarray's data, in order of dimensions, or None if the underlying data is not a dask array. See Also -------- DataArray.chunk DataArray.chunksizes xarray.unify_chunks """ return self.variable.chunks @property def chunksizes(self) -> Mapping[Any, tuple[int, ...]]: """ Mapping from dimension names to block lengths for this dataarray's data. If this dataarray does not contain chunked arrays, the mapping will be empty. Cannot be modified directly, but can be modified by calling .chunk(). Differs from DataArray.chunks because it returns a mapping of dimensions to chunk shapes instead of a tuple of chunk shapes. See Also -------- DataArray.chunk DataArray.chunks xarray.unify_chunks """ all_variables = [self.variable] + [c.variable for c in self.coords.values()] return get_chunksizes(all_variables) def chunk( self, chunks: T_ChunksFreq = {}, # noqa: B006 # {} even though it's technically unsafe, is being used intentionally here (#4667) *, name_prefix: str = "xarray-", token: str | None = None, lock: bool = False, inline_array: bool = False, chunked_array_type: str | ChunkManagerEntrypoint | None = None, from_array_kwargs=None, **chunks_kwargs: T_ChunkDimFreq, ) -> Self: """Coerce this array's data into a dask arrays with the given chunks. If this variable is a non-dask array, it will be converted to dask array. If it's a dask array, it will be rechunked to the given chunk sizes. If neither chunks is not provided for one or more dimensions, chunk sizes along that dimension will not be updated; non-dask arrays will be converted into dask arrays with a single block. Along datetime-like dimensions, a pandas frequency string is also accepted. Parameters ---------- chunks : int, "auto", tuple of int or mapping of hashable to int or a pandas frequency string, optional Chunk sizes along each dimension, e.g., ``5``, ``"auto"``, ``(5, 5)`` or ``{"x": 5, "y": 5}`` or ``{"x": 5, "time": "YE"}``. name_prefix : str, optional Prefix for the name of the new dask array. token : str, optional Token uniquely identifying this array. lock : bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. inline_array: bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. chunked_array_type: str, optional Which chunked array type to coerce the underlying data array to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. **chunks_kwargs : {dim: chunks, ...}, optional The keyword arguments form of ``chunks``. One of chunks or chunks_kwargs must be provided. Returns ------- chunked : xarray.DataArray See Also -------- DataArray.chunks DataArray.chunksizes xarray.unify_chunks dask.array.from_array """ chunk_mapping: T_ChunksFreq if chunks is None: warnings.warn( "None value for 'chunks' is deprecated. " "It will raise an error in the future. Use instead '{}'", category=FutureWarning, stacklevel=2, ) chunk_mapping = {} if isinstance(chunks, float | str | int): # ignoring type; unclear why it won't accept a Literal into the value. chunk_mapping = dict.fromkeys(self.dims, chunks) elif isinstance(chunks, tuple | list): utils.emit_user_level_warning( "Supplying chunks as dimension-order tuples is deprecated. " "It will raise an error in the future. Instead use a dict with dimension names as keys.", category=DeprecationWarning, ) if len(chunks) != len(self.dims): raise ValueError( f"chunks must have the same number of elements as dimensions. " f"Expected {len(self.dims)} elements, got {len(chunks)}." ) chunk_mapping = dict(zip(self.dims, chunks, strict=True)) else: chunk_mapping = either_dict_or_kwargs(chunks, chunks_kwargs, "chunk") ds = self._to_temp_dataset().chunk( chunk_mapping, name_prefix=name_prefix, token=token, lock=lock, inline_array=inline_array, chunked_array_type=chunked_array_type, from_array_kwargs=from_array_kwargs, ) return self._from_temp_dataset(ds) def isel( self, indexers: Mapping[Any, Any] | None = None, drop: bool = False, missing_dims: ErrorOptionsWithWarn = "raise", **indexers_kwargs: Any, ) -> Self: """Return a new DataArray whose data is given by selecting indexes along the specified dimension(s). Parameters ---------- indexers : dict, optional A dict with keys matching dimensions and values given by integers, slice objects or arrays. indexer can be a integer, slice, array-like or DataArray. If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. drop : bool, default: False If ``drop=True``, drop coordinates variables indexed by integers instead of making them scalar. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the DataArray: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions **indexers_kwargs : {dim: indexer, ...}, optional The keyword arguments form of ``indexers``. Returns ------- indexed : xarray.DataArray See Also -------- :func:`Dataset.isel ` :func:`DataArray.sel ` :doc:`xarray-tutorial:intermediate/indexing/indexing` Tutorial material on indexing with Xarray objects :doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic` Tutorial material on basics of indexing Examples -------- >>> da = xr.DataArray(np.arange(25).reshape(5, 5), dims=("x", "y")) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Dimensions without coordinates: x, y >>> tgt_x = xr.DataArray(np.arange(0, 5), dims="points") >>> tgt_y = xr.DataArray(np.arange(0, 5), dims="points") >>> da = da.isel(x=tgt_x, y=tgt_y) >>> da Size: 40B array([ 0, 6, 12, 18, 24]) Dimensions without coordinates: points """ indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel") if any(is_fancy_indexer(idx) for idx in indexers.values()): ds = self._to_temp_dataset()._isel_fancy( indexers, drop=drop, missing_dims=missing_dims ) return self._from_temp_dataset(ds) # Much faster algorithm for when all indexers are ints, slices, one-dimensional # lists, or zero or one-dimensional np.ndarray's variable = self._variable.isel(indexers, missing_dims=missing_dims) indexes, index_variables = isel_indexes(self.xindexes, indexers) coords = {} for coord_name, coord_value in self._coords.items(): if coord_name in index_variables: coord_value = index_variables[coord_name] else: coord_indexers = { k: v for k, v in indexers.items() if k in coord_value.dims } if coord_indexers: coord_value = coord_value.isel(coord_indexers) if drop and coord_value.ndim == 0: continue coords[coord_name] = coord_value return self._replace(variable=variable, coords=coords, indexes=indexes) def sel( self, indexers: Mapping[Any, Any] | None = None, method: str | None = None, tolerance=None, drop: bool = False, **indexers_kwargs: Any, ) -> Self: """Return a new DataArray whose data is given by selecting index labels along the specified dimension(s). In contrast to `DataArray.isel`, indexers for this method should use labels instead of integers. Under the hood, this method is powered by using pandas's powerful Index objects. This makes label based indexing essentially just as fast as using integer indexing. It also means this method uses pandas's (well documented) logic for indexing. This means you can use string shortcuts for datetime indexes (e.g., '2000-01' to select all values in January 2000). It also means that slices are treated as inclusive of both the start and stop values, unlike normal Python indexing. .. warning:: Do not try to assign values when using any of the indexing methods ``isel`` or ``sel``:: da = xr.DataArray([0, 1, 2, 3], dims=["x"]) # DO NOT do this da.isel(x=[0, 1, 2])[1] = -1 Assigning values with the chained indexing using ``.sel`` or ``.isel`` fails silently. Parameters ---------- indexers : dict, optional A dict with keys matching dimensions and values given by scalars, slices or arrays of tick labels. For dimensions with multi-index, the indexer may also be a dict-like object with keys matching index level names. If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for inexact matches: - None (default): only exact matches - pad / ffill: propagate last valid index value forward - backfill / bfill: propagate next valid index value backward - nearest: use nearest valid index value tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. drop : bool, optional If ``drop=True``, drop coordinates variables in `indexers` instead of making them scalar. **indexers_kwargs : {dim: indexer, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Returns ------- obj : DataArray A new DataArray with the same contents as this DataArray, except the data and each dimension is indexed by the appropriate indexers. If indexer DataArrays have coordinates that do not conflict with this object, then these coordinates will be attached. In general, each array's data will be a view of the array's data in this DataArray, unless vectorized indexing was triggered by using an array indexer, in which case the data will be a copy. See Also -------- :func:`Dataset.sel ` :func:`DataArray.isel ` :doc:`xarray-tutorial:intermediate/indexing/indexing` Tutorial material on indexing with Xarray objects :doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic` Tutorial material on basics of indexing Examples -------- >>> da = xr.DataArray( ... np.arange(25).reshape(5, 5), ... coords={"x": np.arange(5), "y": np.arange(5)}, ... dims=("x", "y"), ... ) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: * x (x) int64 40B 0 1 2 3 4 * y (y) int64 40B 0 1 2 3 4 >>> tgt_x = xr.DataArray(np.linspace(0, 4, num=5), dims="points") >>> tgt_y = xr.DataArray(np.linspace(0, 4, num=5), dims="points") >>> da = da.sel(x=tgt_x, y=tgt_y, method="nearest") >>> da Size: 40B array([ 0, 6, 12, 18, 24]) Coordinates: x (points) int64 40B 0 1 2 3 4 y (points) int64 40B 0 1 2 3 4 Dimensions without coordinates: points """ ds = self._to_temp_dataset().sel( indexers=indexers, drop=drop, method=method, tolerance=tolerance, **indexers_kwargs, ) return self._from_temp_dataset(ds) def _shuffle( self, dim: Hashable, *, indices: GroupIndices, chunks: T_Chunks ) -> Self: ds = self._to_temp_dataset()._shuffle(dim=dim, indices=indices, chunks=chunks) return self._from_temp_dataset(ds) def head( self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, ) -> Self: """Return a new DataArray whose data is given by the the first `n` values along the specified dimension(s). Default `n` = 5 See Also -------- Dataset.head DataArray.tail DataArray.thin Examples -------- >>> da = xr.DataArray( ... np.arange(25).reshape(5, 5), ... dims=("x", "y"), ... ) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Dimensions without coordinates: x, y >>> da.head(x=1) Size: 40B array([[0, 1, 2, 3, 4]]) Dimensions without coordinates: x, y >>> da.head({"x": 2, "y": 2}) Size: 32B array([[0, 1], [5, 6]]) Dimensions without coordinates: x, y """ ds = self._to_temp_dataset().head(indexers, **indexers_kwargs) return self._from_temp_dataset(ds) def tail( self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, ) -> Self: """Return a new DataArray whose data is given by the the last `n` values along the specified dimension(s). Default `n` = 5 See Also -------- Dataset.tail DataArray.head DataArray.thin Examples -------- >>> da = xr.DataArray( ... np.arange(25).reshape(5, 5), ... dims=("x", "y"), ... ) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Dimensions without coordinates: x, y >>> da.tail(y=1) Size: 40B array([[ 4], [ 9], [14], [19], [24]]) Dimensions without coordinates: x, y >>> da.tail({"x": 2, "y": 2}) Size: 32B array([[18, 19], [23, 24]]) Dimensions without coordinates: x, y """ ds = self._to_temp_dataset().tail(indexers, **indexers_kwargs) return self._from_temp_dataset(ds) def thin( self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, ) -> Self: """Return a new DataArray whose data is given by each `n` value along the specified dimension(s). Examples -------- >>> x_arr = np.arange(0, 26) >>> x_arr array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]) >>> x = xr.DataArray( ... np.reshape(x_arr, (2, 13)), ... dims=("x", "y"), ... coords={"x": [0, 1], "y": np.arange(0, 13)}, ... ) >>> x Size: 208B array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]]) Coordinates: * x (x) int64 16B 0 1 * y (y) int64 104B 0 1 2 3 4 5 6 7 8 9 10 11 12 >>> >>> x.thin(3) Size: 40B array([[ 0, 3, 6, 9, 12]]) Coordinates: * x (x) int64 8B 0 * y (y) int64 40B 0 3 6 9 12 >>> x.thin({"x": 2, "y": 5}) Size: 24B array([[ 0, 5, 10]]) Coordinates: * x (x) int64 8B 0 * y (y) int64 24B 0 5 10 See Also -------- Dataset.thin DataArray.head DataArray.tail """ ds = self._to_temp_dataset().thin(indexers, **indexers_kwargs) return self._from_temp_dataset(ds) def broadcast_like( self, other: T_DataArrayOrSet, *, exclude: Iterable[Hashable] | None = None, ) -> Self: """Broadcast this DataArray against another Dataset or DataArray. This is equivalent to xr.broadcast(other, self)[1] xarray objects are broadcast against each other in arithmetic operations, so this method is not be necessary for most uses. If no change is needed, the input data is returned to the output without being copied. If new coords are added by the broadcast, their values are NaN filled. Parameters ---------- other : Dataset or DataArray Object against which to broadcast this array. exclude : iterable of Hashable, optional Dimensions that must not be broadcasted Returns ------- new_da : DataArray The caller broadcasted against ``other``. Examples -------- >>> arr1 = xr.DataArray( ... np.random.randn(2, 3), ... dims=("x", "y"), ... coords={"x": ["a", "b"], "y": ["a", "b", "c"]}, ... ) >>> arr2 = xr.DataArray( ... np.random.randn(3, 2), ... dims=("x", "y"), ... coords={"x": ["a", "b", "c"], "y": ["a", "b"]}, ... ) >>> arr1 Size: 48B array([[ 1.76405235, 0.40015721, 0.97873798], [ 2.2408932 , 1.86755799, -0.97727788]]) Coordinates: * x (x) >> arr2 Size: 48B array([[ 0.95008842, -0.15135721], [-0.10321885, 0.4105985 ], [ 0.14404357, 1.45427351]]) Coordinates: * x (x) >> arr1.broadcast_like(arr2) Size: 72B array([[ 1.76405235, 0.40015721, 0.97873798], [ 2.2408932 , 1.86755799, -0.97727788], [ nan, nan, nan]]) Coordinates: * x (x) Self: """Callback called from ``Aligner`` to create a new reindexed DataArray.""" if isinstance(fill_value, dict): fill_value = fill_value.copy() sentinel = object() value = fill_value.pop(self.name, sentinel) if value is not sentinel: fill_value[_THIS_ARRAY] = value ds = self._to_temp_dataset() reindexed = ds._reindex_callback( aligner, dim_pos_indexers, variables, indexes, fill_value, exclude_dims, exclude_vars, ) da = self._from_temp_dataset(reindexed) da.encoding = self.encoding return da def reindex_like( self, other: T_DataArrayOrSet, *, method: ReindexMethodOptions = None, tolerance: float | Iterable[float] | str | None = None, copy: bool = True, fill_value=dtypes.NA, ) -> Self: """ Conform this object onto the indexes of another object, for indexes which the objects share. Missing values are filled with ``fill_value``. The default fill value is NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to pandas.Index objects, which provides coordinates upon which to index the variables in this dataset. The indexes on this other object need not be the same as the indexes on this dataset. Any mismatched index values will be filled in with NaN, and any mismatched dimension names will simply be ignored. method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for filling index values from other not found on this data array: - None (default): don't fill gaps - pad / ffill: propagate last valid index value forward - backfill / bfill: propagate next valid index value backward - nearest: use nearest valid index value tolerance : float | Iterable[float] | str | None, default: None Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like must be the same size as the index and its dtype must exactly match the index’s type. copy : bool, default: True If ``copy=True``, data in the return value is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, a new xarray object is always returned. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names (including coordinates) to fill values. Use this data array's name to refer to the data array's values. Returns ------- reindexed : DataArray Another dataset array, with this array's data but coordinates from the other object. Examples -------- >>> data = np.arange(12).reshape(4, 3) >>> da1 = xr.DataArray( ... data=data, ... dims=["x", "y"], ... coords={"x": [10, 20, 30, 40], "y": [70, 80, 90]}, ... ) >>> da1 Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) int64 32B 10 20 30 40 * y (y) int64 24B 70 80 90 >>> da2 = xr.DataArray( ... data=data, ... dims=["x", "y"], ... coords={"x": [40, 30, 20, 10], "y": [90, 80, 70]}, ... ) >>> da2 Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) int64 32B 40 30 20 10 * y (y) int64 24B 90 80 70 Reindexing with both DataArrays having the same coordinates set, but in different order: >>> da1.reindex_like(da2) Size: 96B array([[11, 10, 9], [ 8, 7, 6], [ 5, 4, 3], [ 2, 1, 0]]) Coordinates: * x (x) int64 32B 40 30 20 10 * y (y) int64 24B 90 80 70 Reindexing with the other array having additional coordinates: >>> da3 = xr.DataArray( ... data=data, ... dims=["x", "y"], ... coords={"x": [20, 10, 29, 39], "y": [70, 80, 90]}, ... ) >>> da1.reindex_like(da3) Size: 96B array([[ 3., 4., 5.], [ 0., 1., 2.], [nan, nan, nan], [nan, nan, nan]]) Coordinates: * x (x) int64 32B 20 10 29 39 * y (y) int64 24B 70 80 90 Filling missing values with the previous valid index with respect to the coordinates' value: >>> da1.reindex_like(da3, method="ffill") Size: 96B array([[3, 4, 5], [0, 1, 2], [3, 4, 5], [6, 7, 8]]) Coordinates: * x (x) int64 32B 20 10 29 39 * y (y) int64 24B 70 80 90 Filling missing values while tolerating specified error for inexact matches: >>> da1.reindex_like(da3, method="ffill", tolerance=5) Size: 96B array([[ 3., 4., 5.], [ 0., 1., 2.], [nan, nan, nan], [nan, nan, nan]]) Coordinates: * x (x) int64 32B 20 10 29 39 * y (y) int64 24B 70 80 90 Filling missing values with manually specified values: >>> da1.reindex_like(da3, fill_value=19) Size: 96B array([[ 3, 4, 5], [ 0, 1, 2], [19, 19, 19], [19, 19, 19]]) Coordinates: * x (x) int64 32B 20 10 29 39 * y (y) int64 24B 70 80 90 Note that unlike ``broadcast_like``, ``reindex_like`` doesn't create new dimensions: >>> da1.sel(x=20) Size: 24B array([3, 4, 5]) Coordinates: x int64 8B 20 * y (y) int64 24B 70 80 90 ...so ``b`` in not added here: >>> da1.sel(x=20).reindex_like(da1) Size: 24B array([3, 4, 5]) Coordinates: x int64 8B 20 * y (y) int64 24B 70 80 90 See Also -------- DataArray.reindex DataArray.broadcast_like align """ return alignment.reindex_like( self, other=other, method=method, tolerance=tolerance, copy=copy, fill_value=fill_value, ) def reindex( self, indexers: Mapping[Any, Any] | None = None, *, method: ReindexMethodOptions = None, tolerance: float | Iterable[float] | str | None = None, copy: bool = True, fill_value=dtypes.NA, **indexers_kwargs: Any, ) -> Self: """Conform this object onto the indexes of another object, filling in missing values with ``fill_value``. The default fill value is NaN. Parameters ---------- indexers : dict, optional Dictionary with keys given by dimension names and values given by arrays of coordinates tick labels. Any mismatched coordinate values will be filled in with NaN, and any mismatched dimension names will simply be ignored. One of indexers or indexers_kwargs must be provided. copy : bool, optional If ``copy=True``, data in the return value is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, a new xarray object is always returned. method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional Method to use for filling index values in ``indexers`` not found on this data array: - None (default): don't fill gaps - pad / ffill: propagate last valid index value forward - backfill / bfill: propagate next valid index value backward - nearest: use nearest valid index value tolerance : float | Iterable[float] | str | None, default: None Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like must be the same size as the index and its dtype must exactly match the index’s type. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names (including coordinates) to fill values. Use this data array's name to refer to the data array's values. **indexers_kwargs : {dim: indexer, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Returns ------- reindexed : DataArray Another dataset array, with this array's data but replaced coordinates. Examples -------- Reverse latitude: >>> da = xr.DataArray( ... np.arange(4), ... coords=[np.array([90, 89, 88, 87])], ... dims="lat", ... ) >>> da Size: 32B array([0, 1, 2, 3]) Coordinates: * lat (lat) int64 32B 90 89 88 87 >>> da.reindex(lat=da.lat[::-1]) Size: 32B array([3, 2, 1, 0]) Coordinates: * lat (lat) int64 32B 87 88 89 90 See Also -------- DataArray.reindex_like align """ indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs, "reindex") return alignment.reindex( self, indexers=indexers, method=method, tolerance=tolerance, copy=copy, fill_value=fill_value, ) def interp( self, coords: Mapping[Any, Any] | None = None, method: InterpOptions = "linear", assume_sorted: bool = False, kwargs: Mapping[str, Any] | None = None, **coords_kwargs: Any, ) -> Self: """ Interpolate a DataArray onto new coordinates. Performs univariate or multivariate interpolation of a Dataset onto new coordinates, utilizing either NumPy or SciPy interpolation routines. Out-of-range values are filled with NaN, unless specified otherwise via `kwargs` to the numpy/scipy interpolant. Parameters ---------- coords : dict, optional Mapping from dimension names to the new coordinates. New coordinate can be a scalar, array-like or DataArray. If DataArrays are passed as new coordinates, their dimensions are used for the broadcasting. Missing values are skipped. method : { "linear", "nearest", "zero", "slinear", "quadratic", "cubic", \ "quintic", "polynomial", "pchip", "barycentric", "krogh", "akima", "makima" } Interpolation method to use (see descriptions above). assume_sorted : bool, default: False If False, values of x can be in any order and they are sorted first. If True, x has to be an array of monotonically increasing values. kwargs : dict-like or None, default: None Additional keyword arguments passed to scipy's interpolator. Valid options and their behavior depend whether ``interp1d`` or ``interpn`` is used. **coords_kwargs : {dim: coordinate, ...}, optional The keyword arguments form of ``coords``. One of coords or coords_kwargs must be provided. Returns ------- interpolated : DataArray New dataarray on the new coordinates. Notes ----- - SciPy is required for certain interpolation methods. - When interpolating along multiple dimensions with methods `linear` and `nearest`, the process attempts to decompose the interpolation into independent interpolations along one dimension at a time. - The specific interpolation method and dimensionality determine which interpolant is used: 1. **Interpolation along one dimension of 1D data (`method='linear'`)** - Uses :py:func:`numpy.interp`, unless `fill_value='extrapolate'` is provided via `kwargs`. 2. **Interpolation along one dimension of N-dimensional data (N β‰₯ 1)** - Methods {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "quintic", "polynomial"} use :py:func:`scipy.interpolate.interp1d`, unless conditions permit the use of :py:func:`numpy.interp` (as in the case of `method='linear'` for 1D data). - If `method='polynomial'`, the `order` keyword argument must also be provided. 3. **Special interpolants for interpolation along one dimension of N-dimensional data (N β‰₯ 1)** - Depending on the `method`, the following interpolants from :py:class:`scipy.interpolate` are used: - `"pchip"`: :py:class:`scipy.interpolate.PchipInterpolator` - `"barycentric"`: :py:class:`scipy.interpolate.BarycentricInterpolator` - `"krogh"`: :py:class:`scipy.interpolate.KroghInterpolator` - `"akima"` or `"makima"`: :py:class:`scipy.interpolate.Akima1dInterpolator` (`makima` is handled by passing the `makima` flag). 4. **Interpolation along multiple dimensions of multi-dimensional data** - Uses :py:func:`scipy.interpolate.interpn` for methods {"linear", "nearest", "slinear", "cubic", "quintic", "pchip"}. See Also -------- :mod:`scipy.interpolate` :doc:`xarray-tutorial:fundamentals/02.2_manipulating_dimensions` Tutorial material on manipulating data resolution using :py:func:`~xarray.DataArray.interp` Examples -------- >>> da = xr.DataArray( ... data=[[1, 4, 2, 9], [2, 7, 6, np.nan], [6, np.nan, 5, 8]], ... dims=("x", "y"), ... coords={"x": [0, 1, 2], "y": [10, 12, 14, 16]}, ... ) >>> da Size: 96B array([[ 1., 4., 2., 9.], [ 2., 7., 6., nan], [ 6., nan, 5., 8.]]) Coordinates: * x (x) int64 24B 0 1 2 * y (y) int64 32B 10 12 14 16 1D linear interpolation (the default): >>> da.interp(x=[0, 0.75, 1.25, 1.75]) Size: 128B array([[1. , 4. , 2. , nan], [1.75, 6.25, 5. , nan], [3. , nan, 5.75, nan], [5. , nan, 5.25, nan]]) Coordinates: * y (y) int64 32B 10 12 14 16 * x (x) float64 32B 0.0 0.75 1.25 1.75 1D nearest interpolation: >>> da.interp(x=[0, 0.75, 1.25, 1.75], method="nearest") Size: 128B array([[ 1., 4., 2., 9.], [ 2., 7., 6., nan], [ 2., 7., 6., nan], [ 6., nan, 5., 8.]]) Coordinates: * y (y) int64 32B 10 12 14 16 * x (x) float64 32B 0.0 0.75 1.25 1.75 1D linear extrapolation: >>> da.interp( ... x=[1, 1.5, 2.5, 3.5], ... method="linear", ... kwargs={"fill_value": "extrapolate"}, ... ) Size: 128B array([[ 2. , 7. , 6. , nan], [ 4. , nan, 5.5, nan], [ 8. , nan, 4.5, nan], [12. , nan, 3.5, nan]]) Coordinates: * y (y) int64 32B 10 12 14 16 * x (x) float64 32B 1.0 1.5 2.5 3.5 2D linear interpolation: >>> da.interp(x=[0, 0.75, 1.25, 1.75], y=[11, 13, 15], method="linear") Size: 96B array([[2.5 , 3. , nan], [4. , 5.625, nan], [ nan, nan, nan], [ nan, nan, nan]]) Coordinates: * x (x) float64 32B 0.0 0.75 1.25 1.75 * y (y) int64 24B 11 13 15 """ if self.dtype.kind not in "uifc": raise TypeError( f"interp only works for a numeric type array. Given {self.dtype}." ) ds = self._to_temp_dataset().interp( coords, method=method, kwargs=kwargs, assume_sorted=assume_sorted, **coords_kwargs, ) return self._from_temp_dataset(ds) def interp_like( self, other: T_Xarray, method: InterpOptions = "linear", assume_sorted: bool = False, kwargs: Mapping[str, Any] | None = None, ) -> Self: """Interpolate this object onto the coordinates of another object, filling out of range values with NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. Missing values are skipped. method : { "linear", "nearest", "zero", "slinear", "quadratic", "cubic", \ "quintic", "polynomial", "pchip", "barycentric", "krogh", "akima", "makima" } Interpolation method to use (see descriptions above). assume_sorted : bool, default: False If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs : dict, optional Additional keyword arguments passed to the interpolant. Returns ------- interpolated : DataArray Another dataarray by interpolating this dataarray's data along the coordinates of the other object. Notes ----- - scipy is required. - If the dataarray has object-type coordinates, reindex is used for these coordinates instead of the interpolation. - When interpolating along multiple dimensions with methods `linear` and `nearest`, the process attempts to decompose the interpolation into independent interpolations along one dimension at a time. - The specific interpolation method and dimensionality determine which interpolant is used: 1. **Interpolation along one dimension of 1D data (`method='linear'`)** - Uses :py:func:`numpy.interp`, unless `fill_value='extrapolate'` is provided via `kwargs`. 2. **Interpolation along one dimension of N-dimensional data (N β‰₯ 1)** - Methods {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "quintic", "polynomial"} use :py:func:`scipy.interpolate.interp1d`, unless conditions permit the use of :py:func:`numpy.interp` (as in the case of `method='linear'` for 1D data). - If `method='polynomial'`, the `order` keyword argument must also be provided. 3. **Special interpolants for interpolation along one dimension of N-dimensional data (N β‰₯ 1)** - Depending on the `method`, the following interpolants from :py:class:`scipy.interpolate` are used: - `"pchip"`: :py:class:`scipy.interpolate.PchipInterpolator` - `"barycentric"`: :py:class:`scipy.interpolate.BarycentricInterpolator` - `"krogh"`: :py:class:`scipy.interpolate.KroghInterpolator` - `"akima"` or `"makima"`: :py:class:`scipy.interpolate.Akima1dInterpolator` (`makima` is handled by passing the `makima` flag). 4. **Interpolation along multiple dimensions of multi-dimensional data** - Uses :py:func:`scipy.interpolate.interpn` for methods {"linear", "nearest", "slinear", "cubic", "quintic", "pchip"}. See Also -------- :func:`DataArray.interp` :func:`DataArray.reindex_like` :mod:`scipy.interpolate` Examples -------- >>> data = np.arange(12).reshape(4, 3) >>> da1 = xr.DataArray( ... data=data, ... dims=["x", "y"], ... coords={"x": [10, 20, 30, 40], "y": [70, 80, 90]}, ... ) >>> da1 Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) int64 32B 10 20 30 40 * y (y) int64 24B 70 80 90 >>> da2 = xr.DataArray( ... data=data, ... dims=["x", "y"], ... coords={"x": [10, 20, 29, 39], "y": [70, 80, 90]}, ... ) >>> da2 Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) int64 32B 10 20 29 39 * y (y) int64 24B 70 80 90 Interpolate the values in the coordinates of the other DataArray with respect to the source's values: >>> da2.interp_like(da1) Size: 96B array([[0. , 1. , 2. ], [3. , 4. , 5. ], [6.3, 7.3, 8.3], [nan, nan, nan]]) Coordinates: * x (x) int64 32B 10 20 30 40 * y (y) int64 24B 70 80 90 Could also extrapolate missing values: >>> da2.interp_like(da1, kwargs={"fill_value": "extrapolate"}) Size: 96B array([[ 0. , 1. , 2. ], [ 3. , 4. , 5. ], [ 6.3, 7.3, 8.3], [ 9.3, 10.3, 11.3]]) Coordinates: * x (x) int64 32B 10 20 30 40 * y (y) int64 24B 70 80 90 """ if self.dtype.kind not in "uifc": raise TypeError( f"interp only works for a numeric type array. Given {self.dtype}." ) ds = self._to_temp_dataset().interp_like( other, method=method, kwargs=kwargs, assume_sorted=assume_sorted ) return self._from_temp_dataset(ds) def rename( self, new_name_or_name_dict: Hashable | Mapping[Any, Hashable] | None = None, **names: Hashable, ) -> Self: """Returns a new DataArray with renamed coordinates, dimensions or a new name. Parameters ---------- new_name_or_name_dict : str or dict-like, optional If the argument is dict-like, it used as a mapping from old names to new names for coordinates or dimensions. Otherwise, use the argument as the new name for this array. **names : Hashable, optional The keyword arguments form of a mapping from old names to new names for coordinates or dimensions. One of new_name_or_name_dict or names must be provided. Returns ------- renamed : DataArray Renamed array or array with renamed coordinates. See Also -------- Dataset.rename DataArray.swap_dims """ if new_name_or_name_dict is None and not names: # change name to None? return self._replace(name=None) if utils.is_dict_like(new_name_or_name_dict) or new_name_or_name_dict is None: # change dims/coords name_dict = either_dict_or_kwargs(new_name_or_name_dict, names, "rename") dataset = self._to_temp_dataset()._rename(name_dict) return self._from_temp_dataset(dataset) if utils.hashable(new_name_or_name_dict) and names: # change name + dims/coords dataset = self._to_temp_dataset()._rename(names) dataarray = self._from_temp_dataset(dataset) return dataarray._replace(name=new_name_or_name_dict) # only change name return self._replace(name=new_name_or_name_dict) def swap_dims( self, dims_dict: Mapping[Any, Hashable] | None = None, **dims_kwargs, ) -> Self: """Returns a new DataArray with swapped dimensions. Parameters ---------- dims_dict : dict-like Dictionary whose keys are current dimension names and whose values are new names. **dims_kwargs : {existing_dim: new_dim, ...}, optional The keyword arguments form of ``dims_dict``. One of dims_dict or dims_kwargs must be provided. Returns ------- swapped : DataArray DataArray with swapped dimensions. Examples -------- >>> arr = xr.DataArray( ... data=[0, 1], ... dims="x", ... coords={"x": ["a", "b"], "y": ("x", [0, 1])}, ... ) >>> arr Size: 16B array([0, 1]) Coordinates: * x (x) >> arr.swap_dims({"x": "y"}) Size: 16B array([0, 1]) Coordinates: x (y) >> arr.swap_dims({"x": "z"}) Size: 16B array([0, 1]) Coordinates: x (z) Self: """Return a new object with an additional axis (or axes) inserted at the corresponding position in the array shape. The new object is a view into the underlying array, not a copy. If dim is already a scalar coordinate, it will be promoted to a 1D coordinate consisting of a single value. The automatic creation of indexes to back new 1D coordinate variables controlled by the create_index_for_new_dim kwarg. Parameters ---------- dim : Hashable, sequence of Hashable, dict, or None, optional Dimensions to include on the new variable. If provided as str or sequence of str, then dimensions are inserted with length 1. If provided as a dict, then the keys are the new dimensions and the values are either integers (giving the length of the new dimensions) or sequence/ndarray (giving the coordinates of the new dimensions). axis : int, sequence of int, or None, default: None Axis position(s) where new axis is to be inserted (position(s) on the result array). If a sequence of integers is passed, multiple axes are inserted. In this case, dim arguments should be same length list. If axis=None is passed, all the axes will be inserted to the start of the result array. create_index_for_new_dim : bool, default: True Whether to create new ``PandasIndex`` objects when the object being expanded contains scalar variables with names in ``dim``. **dim_kwargs : int or sequence or ndarray The keywords are arbitrary dimensions being inserted and the values are either the lengths of the new dims (if int is given), or their coordinates. Note, this is an alternative to passing a dict to the dim kwarg and will only be used if dim is None. Returns ------- expanded : DataArray This object, but with additional dimension(s). See Also -------- Dataset.expand_dims Examples -------- >>> da = xr.DataArray(np.arange(5), dims=("x")) >>> da Size: 40B array([0, 1, 2, 3, 4]) Dimensions without coordinates: x Add new dimension of length 2: >>> da.expand_dims(dim={"y": 2}) Size: 80B array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]) Dimensions without coordinates: y, x >>> da.expand_dims(dim={"y": 2}, axis=1) Size: 80B array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]) Dimensions without coordinates: x, y Add a new dimension with coordinates from array: >>> da.expand_dims(dim={"y": np.arange(5)}, axis=0) Size: 200B array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]) Coordinates: * y (y) int64 40B 0 1 2 3 4 Dimensions without coordinates: x """ if isinstance(dim, int): raise TypeError("dim should be Hashable or sequence/mapping of Hashables") elif isinstance(dim, Sequence) and not isinstance(dim, str): if len(dim) != len(set(dim)): raise ValueError("dims should not contain duplicate values.") dim = dict.fromkeys(dim, 1) elif dim is not None and not isinstance(dim, Mapping): dim = {dim: 1} dim = either_dict_or_kwargs(dim, dim_kwargs, "expand_dims") ds = self._to_temp_dataset().expand_dims( dim, axis, create_index_for_new_dim=create_index_for_new_dim ) return self._from_temp_dataset(ds) def set_index( self, indexes: Mapping[Any, Hashable | Sequence[Hashable]] | None = None, append: bool = False, **indexes_kwargs: Hashable | Sequence[Hashable], ) -> Self: """Set DataArray (multi-)indexes using one or more existing coordinates. This legacy method is limited to pandas (multi-)indexes and 1-dimensional "dimension" coordinates. See :py:meth:`~DataArray.set_xindex` for setting a pandas or a custom Xarray-compatible index from one or more arbitrary coordinates. Parameters ---------- indexes : {dim: index, ...} Mapping from names matching dimensions and values given by (lists of) the names of existing coordinates or variables to set as new (multi-)index. append : bool, default: False If True, append the supplied index(es) to the existing index(es). Otherwise replace the existing index(es). **indexes_kwargs : optional The keyword arguments form of ``indexes``. One of indexes or indexes_kwargs must be provided. Returns ------- obj : DataArray Another DataArray, with this data but replaced coordinates. Examples -------- >>> arr = xr.DataArray( ... data=np.ones((2, 3)), ... dims=["x", "y"], ... coords={"x": range(2), "y": range(3), "a": ("x", [3, 4])}, ... ) >>> arr Size: 48B array([[1., 1., 1.], [1., 1., 1.]]) Coordinates: * x (x) int64 16B 0 1 * y (y) int64 24B 0 1 2 a (x) int64 16B 3 4 >>> arr.set_index(x="a") Size: 48B array([[1., 1., 1.], [1., 1., 1.]]) Coordinates: * x (x) int64 16B 3 4 * y (y) int64 24B 0 1 2 See Also -------- DataArray.reset_index DataArray.set_xindex """ ds = self._to_temp_dataset().set_index(indexes, append=append, **indexes_kwargs) return self._from_temp_dataset(ds) def reset_index( self, dims_or_levels: Hashable | Sequence[Hashable], drop: bool = False, ) -> Self: """Reset the specified index(es) or multi-index level(s). This legacy method is specific to pandas (multi-)indexes and 1-dimensional "dimension" coordinates. See the more generic :py:meth:`~DataArray.drop_indexes` and :py:meth:`~DataArray.set_xindex` method to respectively drop and set pandas or custom indexes for arbitrary coordinates. Parameters ---------- dims_or_levels : Hashable or sequence of Hashable Name(s) of the dimension(s) and/or multi-index level(s) that will be reset. drop : bool, default: False If True, remove the specified indexes and/or multi-index levels instead of extracting them as new coordinates (default: False). Returns ------- obj : DataArray Another dataarray, with this dataarray's data but replaced coordinates. See Also -------- DataArray.set_index DataArray.set_xindex DataArray.drop_indexes """ ds = self._to_temp_dataset().reset_index(dims_or_levels, drop=drop) return self._from_temp_dataset(ds) def set_xindex( self, coord_names: str | Sequence[Hashable], index_cls: type[Index] | None = None, **options, ) -> Self: """Set a new, Xarray-compatible index from one or more existing coordinate(s). Parameters ---------- coord_names : str or list Name(s) of the coordinate(s) used to build the index. If several names are given, their order matters. index_cls : subclass of :class:`~xarray.indexes.Index` The type of index to create. By default, try setting a pandas (multi-)index from the supplied coordinates. **options Options passed to the index constructor. Returns ------- obj : DataArray Another dataarray, with this dataarray's data and with a new index. """ ds = self._to_temp_dataset().set_xindex(coord_names, index_cls, **options) return self._from_temp_dataset(ds) def reorder_levels( self, dim_order: Mapping[Any, Sequence[int | Hashable]] | None = None, **dim_order_kwargs: Sequence[int | Hashable], ) -> Self: """Rearrange index levels using input order. Parameters ---------- dim_order dict-like of Hashable to int or Hashable: optional Mapping from names matching dimensions and values given by lists representing new level orders. Every given dimension must have a multi-index. **dim_order_kwargs : optional The keyword arguments form of ``dim_order``. One of dim_order or dim_order_kwargs must be provided. Returns ------- obj : DataArray Another dataarray, with this dataarray's data but replaced coordinates. """ ds = self._to_temp_dataset().reorder_levels(dim_order, **dim_order_kwargs) return self._from_temp_dataset(ds) @partial(deprecate_dims, old_name="dimensions") def stack( self, dim: Mapping[Any, Sequence[Hashable]] | None = None, create_index: bool | None = True, index_cls: type[Index] = PandasMultiIndex, **dim_kwargs: Sequence[Hashable | EllipsisType], ) -> Self: """ Stack any number of existing dimensions into a single new dimension. New dimensions will be added at the end, and the corresponding coordinate variables will be combined into a MultiIndex. Parameters ---------- dim : mapping of Hashable to sequence of Hashable Mapping of the form `new_name=(dim1, dim2, ...)`. Names of new dimensions, and the existing dimensions that they replace. An ellipsis (`...`) will be replaced by all unlisted dimensions. Passing a list containing an ellipsis (`stacked_dim=[...]`) will stack over all dimensions. create_index : bool or None, default: True If True, create a multi-index for each of the stacked dimensions. If False, don't create any index. If None, create a multi-index only if exactly one single (1-d) coordinate index is found for every dimension to stack. index_cls: class, optional Can be used to pass a custom multi-index type. Must be an Xarray index that implements `.stack()`. By default, a pandas multi-index wrapper is used. **dim_kwargs The keyword arguments form of ``dim``. One of dim or dim_kwargs must be provided. Returns ------- stacked : DataArray DataArray with stacked data. Examples -------- >>> arr = xr.DataArray( ... np.arange(6).reshape(2, 3), ... coords=[("x", ["a", "b"]), ("y", [0, 1, 2])], ... ) >>> arr Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * x (x) >> stacked = arr.stack(z=("x", "y")) >>> stacked.indexes["z"] MultiIndex([('a', 0), ('a', 1), ('a', 2), ('b', 0), ('b', 1), ('b', 2)], name='z') See Also -------- DataArray.unstack """ ds = self._to_temp_dataset().stack( dim, create_index=create_index, index_cls=index_cls, **dim_kwargs, ) return self._from_temp_dataset(ds) def unstack( self, dim: Dims = None, *, fill_value: Any = dtypes.NA, sparse: bool = False, ) -> Self: """ Unstack existing dimensions corresponding to MultiIndexes into multiple new dimensions. New dimensions will be added at the end. Parameters ---------- dim : str, Iterable of Hashable or None, optional Dimension(s) over which to unstack. By default unstacks all MultiIndexes. fill_value : scalar or dict-like, default: nan Value to be filled. If a dict-like, maps variable names to fill values. Use the data array's name to refer to its name. If not provided or if the dict-like does not contain all variables, the dtype's NA value will be used. sparse : bool, default: False Use sparse-array if True Returns ------- unstacked : DataArray Array with unstacked data. Examples -------- >>> arr = xr.DataArray( ... np.arange(6).reshape(2, 3), ... coords=[("x", ["a", "b"]), ("y", [0, 1, 2])], ... ) >>> arr Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * x (x) >> stacked = arr.stack(z=("x", "y")) >>> stacked.indexes["z"] MultiIndex([('a', 0), ('a', 1), ('a', 2), ('b', 0), ('b', 1), ('b', 2)], name='z') >>> roundtripped = stacked.unstack() >>> arr.identical(roundtripped) True See Also -------- DataArray.stack """ ds = self._to_temp_dataset().unstack(dim, fill_value=fill_value, sparse=sparse) return self._from_temp_dataset(ds) def to_unstacked_dataset(self, dim: Hashable, level: int | Hashable = 0) -> Dataset: """Unstack DataArray expanding to Dataset along a given level of a stacked coordinate. This is the inverse operation of Dataset.to_stacked_array. Parameters ---------- dim : Hashable Name of existing dimension to unstack level : int or Hashable, default: 0 The MultiIndex level to expand to a dataset along. Can either be the integer index of the level or its name. Returns ------- unstacked: Dataset Examples -------- >>> arr = xr.DataArray( ... np.arange(6).reshape(2, 3), ... coords=[("x", ["a", "b"]), ("y", [0, 1, 2])], ... ) >>> data = xr.Dataset({"a": arr, "b": arr.isel(y=0)}) >>> data Size: 96B Dimensions: (x: 2, y: 3) Coordinates: * x (x) >> stacked = data.to_stacked_array("z", ["x"]) >>> stacked.indexes["z"] MultiIndex([('a', 0), ('a', 1), ('a', 2), ('b', nan)], name='z') >>> roundtripped = stacked.to_unstacked_dataset(dim="z") >>> data.identical(roundtripped) True See Also -------- Dataset.to_stacked_array """ idx = self._indexes[dim].to_pandas_index() if not isinstance(idx, pd.MultiIndex): raise ValueError(f"'{dim}' is not a stacked coordinate") level_number = idx._get_level_number(level) # type: ignore[attr-defined] variables = idx.levels[level_number] variable_dim = idx.names[level_number] # pull variables out of datarray data_dict = {} for k in variables: data_dict[k] = self.sel({variable_dim: k}, drop=True).squeeze(drop=True) # unstacked dataset return Dataset(data_dict) @deprecate_dims def transpose( self, *dim: Hashable, transpose_coords: bool = True, missing_dims: ErrorOptionsWithWarn = "raise", ) -> Self: """Return a new DataArray object with transposed dimensions. Parameters ---------- *dim : Hashable, optional By default, reverse the dimensions. Otherwise, reorder the dimensions to this order. transpose_coords : bool, default: True If True, also transpose the coordinates of this DataArray. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the DataArray: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions Returns ------- transposed : DataArray The returned DataArray's array is transposed. Notes ----- This operation returns a view of this array's data. It is lazy for dask-backed DataArrays but not for numpy-backed DataArrays -- the data will be fully loaded. See Also -------- numpy.transpose Dataset.transpose """ if dim: dim = tuple(infix_dims(dim, self.dims, missing_dims)) variable = self.variable.transpose(*dim) if transpose_coords: coords: dict[Hashable, Variable] = {} for name, coord in self.coords.items(): coord_dims = tuple(d for d in dim if d in coord.dims) coords[name] = coord.variable.transpose(*coord_dims) return self._replace(variable, coords) else: return self._replace(variable) @property def T(self) -> Self: return self.transpose() def drop_vars( self, names: str | Iterable[Hashable] | Callable[[Self], str | Iterable[Hashable]], *, errors: ErrorOptions = "raise", ) -> Self: """Returns an array with dropped variables. Parameters ---------- names : Hashable or iterable of Hashable or Callable Name(s) of variables to drop. If a Callable, this object is passed as its only argument and its result is used. errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the variable passed are not in the dataset. If 'ignore', any given names that are in the DataArray are dropped and no error is raised. Returns ------- dropped : Dataset New Dataset copied from `self` with variables removed. Examples ------- >>> data = np.arange(12).reshape(4, 3) >>> da = xr.DataArray( ... data=data, ... dims=["x", "y"], ... coords={"x": [10, 20, 30, 40], "y": [70, 80, 90]}, ... ) >>> da Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) int64 32B 10 20 30 40 * y (y) int64 24B 70 80 90 Removing a single variable: >>> da.drop_vars("x") Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * y (y) int64 24B 70 80 90 Dimensions without coordinates: x Removing a list of variables: >>> da.drop_vars(["x", "y"]) Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Dimensions without coordinates: x, y >>> da.drop_vars(lambda x: x.coords) Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Dimensions without coordinates: x, y """ if callable(names): names = names(self) ds = self._to_temp_dataset().drop_vars(names, errors=errors) return self._from_temp_dataset(ds) def drop_indexes( self, coord_names: Hashable | Iterable[Hashable], *, errors: ErrorOptions = "raise", ) -> Self: """Drop the indexes assigned to the given coordinates. Parameters ---------- coord_names : hashable or iterable of hashable Name(s) of the coordinate(s) for which to drop the index. errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the coordinates passed have no index or are not in the dataset. If 'ignore', no error is raised. Returns ------- dropped : DataArray A new dataarray with dropped indexes. """ ds = self._to_temp_dataset().drop_indexes(coord_names, errors=errors) return self._from_temp_dataset(ds) def drop( self, labels: Mapping[Any, Any] | None = None, dim: Hashable | None = None, *, errors: ErrorOptions = "raise", **labels_kwargs, ) -> Self: """Backward compatible method based on `drop_vars` and `drop_sel` Using either `drop_vars` or `drop_sel` is encouraged See Also -------- DataArray.drop_vars DataArray.drop_sel """ ds = self._to_temp_dataset().drop(labels, dim, errors=errors, **labels_kwargs) return self._from_temp_dataset(ds) def drop_sel( self, labels: Mapping[Any, Any] | None = None, *, errors: ErrorOptions = "raise", **labels_kwargs, ) -> Self: """Drop index labels from this DataArray. Parameters ---------- labels : mapping of Hashable to Any Index labels to drop errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the index labels passed are not in the dataset. If 'ignore', any given labels that are in the dataset are dropped and no error is raised. **labels_kwargs : {dim: label, ...}, optional The keyword arguments form of ``dim`` and ``labels`` Returns ------- dropped : DataArray Examples -------- >>> da = xr.DataArray( ... np.arange(25).reshape(5, 5), ... coords={"x": np.arange(0, 9, 2), "y": np.arange(0, 13, 3)}, ... dims=("x", "y"), ... ) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: * x (x) int64 40B 0 2 4 6 8 * y (y) int64 40B 0 3 6 9 12 >>> da.drop_sel(x=[0, 2], y=9) Size: 96B array([[10, 11, 12, 14], [15, 16, 17, 19], [20, 21, 22, 24]]) Coordinates: * x (x) int64 24B 4 6 8 * y (y) int64 32B 0 3 6 12 >>> da.drop_sel({"x": 6, "y": [0, 3]}) Size: 96B array([[ 2, 3, 4], [ 7, 8, 9], [12, 13, 14], [22, 23, 24]]) Coordinates: * x (x) int64 32B 0 2 4 8 * y (y) int64 24B 6 9 12 """ if labels_kwargs or isinstance(labels, dict): labels = either_dict_or_kwargs(labels, labels_kwargs, "drop") ds = self._to_temp_dataset().drop_sel(labels, errors=errors) return self._from_temp_dataset(ds) def drop_isel( self, indexers: Mapping[Any, Any] | None = None, **indexers_kwargs ) -> Self: """Drop index positions from this DataArray. Parameters ---------- indexers : mapping of Hashable to Any or None, default: None Index locations to drop **indexers_kwargs : {dim: position, ...}, optional The keyword arguments form of ``dim`` and ``positions`` Returns ------- dropped : DataArray Raises ------ IndexError Examples -------- >>> da = xr.DataArray(np.arange(25).reshape(5, 5), dims=("X", "Y")) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Dimensions without coordinates: X, Y >>> da.drop_isel(X=[0, 4], Y=2) Size: 96B array([[ 5, 6, 8, 9], [10, 11, 13, 14], [15, 16, 18, 19]]) Dimensions without coordinates: X, Y >>> da.drop_isel({"X": 3, "Y": 3}) Size: 128B array([[ 0, 1, 2, 4], [ 5, 6, 7, 9], [10, 11, 12, 14], [20, 21, 22, 24]]) Dimensions without coordinates: X, Y """ dataset = self._to_temp_dataset() dataset = dataset.drop_isel(indexers=indexers, **indexers_kwargs) return self._from_temp_dataset(dataset) def dropna( self, dim: Hashable, *, how: Literal["any", "all"] = "any", thresh: int | None = None, ) -> Self: """Returns a new array with dropped labels for missing values along the provided dimension. Parameters ---------- dim : Hashable Dimension along which to drop missing values. Dropping along multiple dimensions simultaneously is not yet supported. how : {"any", "all"}, default: "any" - any : if any NA values are present, drop that label - all : if all values are NA, drop that label thresh : int or None, default: None If supplied, require this many non-NA values. Returns ------- dropped : DataArray Examples -------- >>> temperature = [ ... [0, 4, 2, 9], ... [np.nan, np.nan, np.nan, np.nan], ... [np.nan, 4, 2, 0], ... [3, 1, 0, 0], ... ] >>> da = xr.DataArray( ... data=temperature, ... dims=["Y", "X"], ... coords=dict( ... lat=("Y", np.array([-20.0, -20.25, -20.50, -20.75])), ... lon=("X", np.array([10.0, 10.25, 10.5, 10.75])), ... ), ... ) >>> da Size: 128B array([[ 0., 4., 2., 9.], [nan, nan, nan, nan], [nan, 4., 2., 0.], [ 3., 1., 0., 0.]]) Coordinates: lat (Y) float64 32B -20.0 -20.25 -20.5 -20.75 lon (X) float64 32B 10.0 10.25 10.5 10.75 Dimensions without coordinates: Y, X >>> da.dropna(dim="Y", how="any") Size: 64B array([[0., 4., 2., 9.], [3., 1., 0., 0.]]) Coordinates: lat (Y) float64 16B -20.0 -20.75 lon (X) float64 32B 10.0 10.25 10.5 10.75 Dimensions without coordinates: Y, X Drop values only if all values along the dimension are NaN: >>> da.dropna(dim="Y", how="all") Size: 96B array([[ 0., 4., 2., 9.], [nan, 4., 2., 0.], [ 3., 1., 0., 0.]]) Coordinates: lat (Y) float64 24B -20.0 -20.5 -20.75 lon (X) float64 32B 10.0 10.25 10.5 10.75 Dimensions without coordinates: Y, X """ ds = self._to_temp_dataset().dropna(dim, how=how, thresh=thresh) return self._from_temp_dataset(ds) def fillna(self, value: Any) -> Self: """Fill missing values in this object. This operation follows the normal broadcasting and alignment rules that xarray uses for binary arithmetic, except the result is aligned to this object (``join='left'``) instead of aligned to the intersection of index coordinates (``join='inner'``). Parameters ---------- value : scalar, ndarray or DataArray Used to fill all matching missing values in this array. If the argument is a DataArray, it is first aligned with (reindexed to) this array. Returns ------- filled : DataArray Examples -------- >>> da = xr.DataArray( ... np.array([1, 4, np.nan, 0, 3, np.nan]), ... dims="Z", ... coords=dict( ... Z=("Z", np.arange(6)), ... height=("Z", np.array([0, 10, 20, 30, 40, 50])), ... ), ... ) >>> da Size: 48B array([ 1., 4., nan, 0., 3., nan]) Coordinates: * Z (Z) int64 48B 0 1 2 3 4 5 height (Z) int64 48B 0 10 20 30 40 50 Fill all NaN values with 0: >>> da.fillna(0) Size: 48B array([1., 4., 0., 0., 3., 0.]) Coordinates: * Z (Z) int64 48B 0 1 2 3 4 5 height (Z) int64 48B 0 10 20 30 40 50 Fill NaN values with corresponding values in array: >>> da.fillna(np.array([2, 9, 4, 2, 8, 9])) Size: 48B array([1., 4., 4., 0., 3., 9.]) Coordinates: * Z (Z) int64 48B 0 1 2 3 4 5 height (Z) int64 48B 0 10 20 30 40 50 """ if utils.is_dict_like(value): raise TypeError( "cannot provide fill value as a dictionary with fillna on a DataArray" ) out = ops.fillna(self, value) return out def interpolate_na( self, dim: Hashable | None = None, method: InterpOptions = "linear", limit: int | None = None, use_coordinate: bool | str = True, max_gap: ( None | int | float | str | pd.Timedelta | np.timedelta64 | datetime.timedelta ) = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """Fill in NaNs by interpolating according to different methods. Parameters ---------- dim : Hashable or None, optional Specifies the dimension along which to interpolate. method : {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "polynomial", \ "barycentric", "krogh", "pchip", "spline", "akima"}, default: "linear" String indicating which method to use for interpolation: - 'linear': linear interpolation. Additional keyword arguments are passed to :py:func:`numpy.interp` - 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'polynomial': are passed to :py:func:`scipy.interpolate.interp1d`. If ``method='polynomial'``, the ``order`` keyword argument must also be provided. - 'barycentric', 'krogh', 'pchip', 'spline', 'akima': use their respective :py:class:`scipy.interpolate` classes. use_coordinate : bool or str, default: True Specifies which index to use as the x values in the interpolation formulated as `y = f(x)`. If False, values are treated as if equally-spaced along ``dim``. If True, the IndexVariable `dim` is used. If ``use_coordinate`` is a string, it specifies the name of a coordinate variable to use as the index. limit : int or None, default: None Maximum number of consecutive NaNs to fill. Must be greater than 0 or None for no limit. This filling is done regardless of the size of the gap in the data. To only interpolate over gaps less than a given length, see ``max_gap``. max_gap : int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default: None Maximum size of gap, a continuous sequence of NaNs, that will be filled. Use None for no limit. When interpolating along a datetime64 dimension and ``use_coordinate=True``, ``max_gap`` can be one of the following: - a string that is valid input for pandas.to_timedelta - a :py:class:`numpy.timedelta64` object - a :py:class:`pandas.Timedelta` object - a :py:class:`datetime.timedelta` object Otherwise, ``max_gap`` must be an int or a float. Use of ``max_gap`` with unlabeled dimensions has not been implemented yet. Gap length is defined as the difference between coordinate values at the first data point after a gap and the last value before a gap. For gaps at the beginning (end), gap length is defined as the difference between coordinate values at the first (last) valid data point and the first (last) NaN. For example, consider:: array([nan, nan, nan, 1., nan, nan, 4., nan, nan]) Coordinates: * x (x) int64 0 1 2 3 4 5 6 7 8 The gap lengths are 3-0 = 3; 6-3 = 3; and 8-6 = 2 respectively keep_attrs : bool or None, default: None If True, the dataarray's attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : dict, optional parameters passed verbatim to the underlying interpolation function Returns ------- interpolated: DataArray Filled in DataArray. See Also -------- numpy.interp scipy.interpolate Examples -------- >>> da = xr.DataArray( ... [np.nan, 2, 3, np.nan, 0], dims="x", coords={"x": [0, 1, 2, 3, 4]} ... ) >>> da Size: 40B array([nan, 2., 3., nan, 0.]) Coordinates: * x (x) int64 40B 0 1 2 3 4 >>> da.interpolate_na(dim="x", method="linear") Size: 40B array([nan, 2. , 3. , 1.5, 0. ]) Coordinates: * x (x) int64 40B 0 1 2 3 4 >>> da.interpolate_na(dim="x", method="linear", fill_value="extrapolate") Size: 40B array([1. , 2. , 3. , 1.5, 0. ]) Coordinates: * x (x) int64 40B 0 1 2 3 4 """ from xarray.core.missing import interp_na return interp_na( self, dim=dim, method=method, limit=limit, use_coordinate=use_coordinate, max_gap=max_gap, keep_attrs=keep_attrs, **kwargs, ) def ffill(self, dim: Hashable, limit: int | None = None) -> Self: """Fill NaN values by propagating values forward *Requires bottleneck.* Parameters ---------- dim : Hashable Specifies the dimension along which to propagate values when filling. limit : int or None, default: None The maximum number of consecutive NaN values to forward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater than 0 or None for no limit. Must be None or greater than or equal to axis length if filling along chunked axes (dimensions). Returns ------- filled : DataArray Examples -------- >>> temperature = np.array( ... [ ... [np.nan, 1, 3], ... [0, np.nan, 5], ... [5, np.nan, np.nan], ... [3, np.nan, np.nan], ... [0, 2, 0], ... ] ... ) >>> da = xr.DataArray( ... data=temperature, ... dims=["Y", "X"], ... coords=dict( ... lat=("Y", np.array([-20.0, -20.25, -20.50, -20.75, -21.0])), ... lon=("X", np.array([10.0, 10.25, 10.5])), ... ), ... ) >>> da Size: 120B array([[nan, 1., 3.], [ 0., nan, 5.], [ 5., nan, nan], [ 3., nan, nan], [ 0., 2., 0.]]) Coordinates: lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 lon (X) float64 24B 10.0 10.25 10.5 Dimensions without coordinates: Y, X Fill all NaN values: >>> da.ffill(dim="Y", limit=None) Size: 120B array([[nan, 1., 3.], [ 0., 1., 5.], [ 5., 1., 5.], [ 3., 1., 5.], [ 0., 2., 0.]]) Coordinates: lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 lon (X) float64 24B 10.0 10.25 10.5 Dimensions without coordinates: Y, X Fill only the first of consecutive NaN values: >>> da.ffill(dim="Y", limit=1) Size: 120B array([[nan, 1., 3.], [ 0., 1., 5.], [ 5., nan, 5.], [ 3., nan, nan], [ 0., 2., 0.]]) Coordinates: lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 lon (X) float64 24B 10.0 10.25 10.5 Dimensions without coordinates: Y, X """ from xarray.core.missing import ffill return ffill(self, dim, limit=limit) def bfill(self, dim: Hashable, limit: int | None = None) -> Self: """Fill NaN values by propagating values backward *Requires bottleneck.* Parameters ---------- dim : str Specifies the dimension along which to propagate values when filling. limit : int or None, default: None The maximum number of consecutive NaN values to backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater than 0 or None for no limit. Must be None or greater than or equal to axis length if filling along chunked axes (dimensions). Returns ------- filled : DataArray Examples -------- >>> temperature = np.array( ... [ ... [0, 1, 3], ... [0, np.nan, 5], ... [5, np.nan, np.nan], ... [3, np.nan, np.nan], ... [np.nan, 2, 0], ... ] ... ) >>> da = xr.DataArray( ... data=temperature, ... dims=["Y", "X"], ... coords=dict( ... lat=("Y", np.array([-20.0, -20.25, -20.50, -20.75, -21.0])), ... lon=("X", np.array([10.0, 10.25, 10.5])), ... ), ... ) >>> da Size: 120B array([[ 0., 1., 3.], [ 0., nan, 5.], [ 5., nan, nan], [ 3., nan, nan], [nan, 2., 0.]]) Coordinates: lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 lon (X) float64 24B 10.0 10.25 10.5 Dimensions without coordinates: Y, X Fill all NaN values: >>> da.bfill(dim="Y", limit=None) Size: 120B array([[ 0., 1., 3.], [ 0., 2., 5.], [ 5., 2., 0.], [ 3., 2., 0.], [nan, 2., 0.]]) Coordinates: lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 lon (X) float64 24B 10.0 10.25 10.5 Dimensions without coordinates: Y, X Fill only the first of consecutive NaN values: >>> da.bfill(dim="Y", limit=1) Size: 120B array([[ 0., 1., 3.], [ 0., nan, 5.], [ 5., nan, nan], [ 3., 2., 0.], [nan, 2., 0.]]) Coordinates: lat (Y) float64 40B -20.0 -20.25 -20.5 -20.75 -21.0 lon (X) float64 24B 10.0 10.25 10.5 Dimensions without coordinates: Y, X """ from xarray.core.missing import bfill return bfill(self, dim, limit=limit) def combine_first(self, other: Self) -> Self: """Combine two DataArray objects, with union of coordinates. This operation follows the normal broadcasting and alignment rules of ``join='outer'``. Default to non-null values of array calling the method. Use np.nan to fill in vacant cells after alignment. Parameters ---------- other : DataArray Used to fill all matching missing values in this array. Returns ------- DataArray """ return ops.fillna(self, other, join="outer") def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> Self: """Reduce this array by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `f(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. dim : "...", str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. By default `func` is applied over all dimensions. axis : int or sequence of int, optional Axis(es) over which to repeatedly apply `func`. Only one of the 'dim' and 'axis' arguments can be supplied. If neither are supplied, then the reduction is calculated over the flattened array (by calling `f(x)` without an axis argument). keep_attrs : bool or None, optional If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one. Coordinates that use these dimensions are removed. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : DataArray DataArray with this object's array replaced with an array with summarized data and the indicated dimension(s) removed. """ var = self.variable.reduce(func, dim, axis, keep_attrs, keepdims, **kwargs) return self._replace_maybe_drop_dims(var) def to_pandas(self) -> Self | pd.Series | pd.DataFrame: """Convert this array into a pandas object with the same shape. The type of the returned object depends on the number of DataArray dimensions: * 0D -> `xarray.DataArray` * 1D -> `pandas.Series` * 2D -> `pandas.DataFrame` Only works for arrays with 2 or fewer dimensions. The DataArray constructor performs the inverse transformation. Returns ------- result : DataArray | Series | DataFrame DataArray, pandas Series or pandas DataFrame. """ # TODO: consolidate the info about pandas constructors and the # attributes that correspond to their indexes into a separate module? constructors: dict[int, Callable] = { 0: lambda x: x, 1: pd.Series, 2: pd.DataFrame, } try: constructor = constructors[self.ndim] except KeyError as err: raise ValueError( f"Cannot convert arrays with {self.ndim} dimensions into " "pandas objects. Requires 2 or fewer dimensions." ) from err indexes = [self.get_index(dim) for dim in self.dims] if isinstance(self._variable._data, PandasExtensionArray): values = self._variable._data.array else: values = self.values pandas_object = constructor(values, *indexes) if isinstance(pandas_object, pd.Series): pandas_object.name = self.name return pandas_object def to_dataframe( self, name: Hashable | None = None, dim_order: Sequence[Hashable] | None = None ) -> pd.DataFrame: """Convert this array and its coordinates into a tidy pandas.DataFrame. The DataFrame is indexed by the Cartesian product of index coordinates (in the form of a :py:class:`pandas.MultiIndex`). Other coordinates are included as columns in the DataFrame. For 1D and 2D DataArrays, see also :py:func:`DataArray.to_pandas` which doesn't rely on a MultiIndex to build the DataFrame. Parameters ---------- name: Hashable or None, optional Name to give to this array (required if unnamed). dim_order: Sequence of Hashable or None, optional Hierarchical dimension order for the resulting dataframe. Array content is transposed to this order and then written out as flat vectors in contiguous order, so the last dimension in this list will be contiguous in the resulting DataFrame. This has a major influence on which operations are efficient on the resulting dataframe. If provided, must include all dimensions of this DataArray. By default, dimensions are sorted according to the DataArray dimensions order. Returns ------- result: DataFrame DataArray as a pandas DataFrame. See also -------- DataArray.to_pandas DataArray.to_series """ if name is None: name = self.name if name is None: raise ValueError( "cannot convert an unnamed DataArray to a " "DataFrame: use the ``name`` parameter" ) if self.ndim == 0: raise ValueError("cannot convert a scalar to a DataFrame") # By using a unique name, we can convert a DataArray into a DataFrame # even if it shares a name with one of its coordinates. # I would normally use unique_name = object() but that results in a # dataframe with columns in the wrong order, for reasons I have not # been able to debug (possibly a pandas bug?). unique_name = "__unique_name_identifier_z98xfz98xugfg73ho__" ds = self._to_dataset_whole(name=unique_name) if dim_order is None: ordered_dims = dict(zip(self.dims, self.shape, strict=True)) else: ordered_dims = ds._normalize_dim_order(dim_order=dim_order) df = ds._to_dataframe(ordered_dims) df.columns = [name if c == unique_name else c for c in df.columns] return df def to_series(self) -> pd.Series: """Convert this array into a pandas.Series. The Series is indexed by the Cartesian product of index coordinates (in the form of a :py:class:`pandas.MultiIndex`). Returns ------- result : Series DataArray as a pandas Series. See also -------- DataArray.to_pandas DataArray.to_dataframe """ index = self.coords.to_index() return pd.Series(self.values.reshape(-1), index=index, name=self.name) def to_masked_array(self, copy: bool = True) -> np.ma.MaskedArray: """Convert this array into a numpy.ma.MaskedArray Parameters ---------- copy : bool, default: True If True make a copy of the array in the result. If False, a MaskedArray view of DataArray.values is returned. Returns ------- result : MaskedArray Masked where invalid values (nan or inf) occur. """ values = self.to_numpy() # only compute lazy arrays once isnull = pd.isnull(values) return np.ma.MaskedArray(data=values, mask=isnull, copy=copy) # path=None writes to bytes @overload def to_netcdf( self, path: None = None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> memoryview: ... # compute=False returns dask.Delayed @overload def to_netcdf( self, path: str | PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, *, compute: Literal[False], invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> Delayed: ... # default return None @overload def to_netcdf( self, path: str | PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: Literal[True] = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> None: ... # if compute cannot be evaluated at type check time # we may get back either Delayed or None @overload def to_netcdf( self, path: str | PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> Delayed | None: ... def to_netcdf( self, path: str | PathLike | None = None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Hashable, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> memoryview | Delayed | None: """Write DataArray contents to a netCDF file. Parameters ---------- path : str, path-like, file-like or None, optional Path to which to save this datatree, or a file-like object to write it to (which must support read and write and be seekable) or None (default) to return in-memory bytes as a memoryview. mode : {"w", "a"}, default: "w" Write ('w') or append ('a') mode. If mode='w', any existing file at this location will be overwritten. If mode='a', existing variables will be overwritten. format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \ "NETCDF3_CLASSIC"}, optional File format for the resulting netCDF file: * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API features. * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only netCDF 3 compatible API features. * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format, which fully supports 2+ GB files, but is only compatible with clients linked against netCDF version 3.6.0 or later. * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not handle 2+ GB files very well. All formats are supported by the netCDF4-python library. scipy.io.netcdf only supports the last two formats. The default format is NETCDF4 if you are saving a file to disk and have the netCDF4-python library available. Otherwise, xarray falls back to using scipy to write netCDF files and defaults to the NETCDF3_64BIT format (scipy does not support netCDF4). group : str, optional Path to the netCDF4 group in the given file to open (only works for format='NETCDF4'). The group(s) will be created if necessary. engine : {"netcdf4", "scipy", "h5netcdf"}, optional Engine to use when writing netCDF files. If not provided, the default engine is chosen based on available dependencies, with a preference for 'netcdf4' if writing to a file on disk. encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., ``{"my_variable": {"dtype": "int16", "scale_factor": 0.1, "zlib": True}, ...}`` The `h5netcdf` engine supports both the NetCDF4-style compression encoding parameters ``{"zlib": True, "complevel": 9}`` and the h5py ones ``{"compression": "gzip", "compression_opts": 9}``. This allows using any compression plugin installed in the HDF5 library, e.g. LZF. unlimited_dims : iterable of Hashable, optional Dimension(s) that should be serialized as unlimited dimensions. By default, no dimensions are treated as unlimited dimensions. Note that unlimited_dims may also be set via ``dataset.encoding["unlimited_dims"]``. compute: bool, default: True If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. invalid_netcdf: bool, default: False Only valid along with ``engine="h5netcdf"``. If True, allow writing hdf5 files which are invalid netcdf as described in https://github.com/h5netcdf/h5netcdf. Returns ------- * ``memoryview`` if path is None * ``dask.delayed.Delayed`` if compute is False * None otherwise Notes ----- Only xarray.Dataset objects can be written to netCDF files, so the xarray.DataArray is converted to a xarray.Dataset object containing a single variable. If the DataArray has no name, or if the name is the same as a coordinate name, then it is given the name ``"__xarray_dataarray_variable__"``. [netCDF4 backend only] netCDF4 enums are decoded into the dataarray dtype metadata. See Also -------- Dataset.to_netcdf """ from xarray.backends.api import DATAARRAY_NAME, DATAARRAY_VARIABLE, to_netcdf if self.name is None: # If no name is set then use a generic xarray name dataset = self.to_dataset(name=DATAARRAY_VARIABLE) elif self.name in self.coords or self.name in self.dims: # The name is the same as one of the coords names, which netCDF # doesn't support, so rename it but keep track of the old name dataset = self.to_dataset(name=DATAARRAY_VARIABLE) dataset.attrs[DATAARRAY_NAME] = self.name else: # No problems with the name - so we're fine! dataset = self.to_dataset() return to_netcdf( # type: ignore[return-value] # mypy cannot resolve the overloads:( dataset, path, mode=mode, format=format, group=group, engine=engine, encoding=encoding, unlimited_dims=unlimited_dims, compute=compute, multifile=False, invalid_netcdf=invalid_netcdf, auto_complex=auto_complex, ) # compute=True (default) returns ZarrStore @overload def to_zarr( self, store: MutableMapping | str | PathLike[str] | None = None, chunk_store: MutableMapping | str | PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, *, encoding: Mapping | None = None, compute: Literal[True] = True, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> ZarrStore: ... # compute=False returns dask.Delayed @overload def to_zarr( self, store: MutableMapping | str | PathLike[str] | None = None, chunk_store: MutableMapping | str | PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: Literal[False], consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> Delayed: ... def to_zarr( self, store: MutableMapping | str | PathLike[str] | None = None, chunk_store: MutableMapping | str | PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: bool = True, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> ZarrStore | Delayed: """Write DataArray contents to a Zarr store Zarr chunks are determined in the following way: - From the ``chunks`` attribute in each variable's ``encoding`` (can be set via `DataArray.chunk`). - If the variable is a Dask array, from the dask chunks - If neither Dask chunks nor encoding chunks are present, chunks will be determined automatically by Zarr - If both Dask chunks and encoding chunks are present, encoding chunks will be used, provided that there is a many-to-one relationship between encoding chunks and dask chunks (i.e. Dask chunks are bigger than and evenly divide encoding chunks); otherwise raise a ``ValueError``. This restriction ensures that no synchronization / locks are required when writing. To disable this restriction, use ``safe_chunks=False``. Parameters ---------- store : MutableMapping, str or path-like, optional Store or path to directory in local or remote file system. chunk_store : MutableMapping, str or path-like, optional Store or path to directory in local or remote file system only for Zarr array chunks. Requires zarr-python v2.4.0 or later. mode : {"w", "w-", "a", "a-", r+", None}, optional Persistence mode: "w" means create (overwrite if exists); "w-" means create (fail if exists); "a" means override all existing variables including dimension coordinates (create if does not exist); "a-" means only append those variables that have ``append_dim``. "r+" means modify existing array *values* only (raise an error if any metadata or shapes would change). The default mode is "a" if ``append_dim`` is set. Otherwise, it is "r+" if ``region`` is set and ``w-`` otherwise. synchronizer : object, optional Zarr array synchronizer. group : str, optional Group path. (a.k.a. `path` in zarr terminology.) encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., ``{"my_variable": {"dtype": "int16", "scale_factor": 0.1,}, ...}`` compute : bool, default: True If True write array data immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed to write array data later. Metadata is always updated eagerly. consolidated : bool, optional If True, apply zarr's `consolidate_metadata` function to the store after writing metadata and read existing stores with consolidated metadata; if False, do not. The default (`consolidated=None`) means write consolidated metadata and attempt to read consolidated metadata for existing stores (falling back to non-consolidated). When the experimental ``zarr_version=3``, ``consolidated`` must be either be ``None`` or ``False``. append_dim : hashable, optional If set, the dimension along which the data will be appended. All other dimensions on overridden variables must remain the same size. region : dict, optional Optional mapping from dimension names to integer slices along dataarray dimensions to indicate the region of existing zarr array(s) in which to write this datarray's data. For example, ``{'x': slice(0, 1000), 'y': slice(10000, 11000)}`` would indicate that values should be written to the region ``0:1000`` along ``x`` and ``10000:11000`` along ``y``. Two restrictions apply to the use of ``region``: - If ``region`` is set, _all_ variables in a dataarray must have at least one dimension in common with the region. Other variables should be written in a separate call to ``to_zarr()``. - Dimensions cannot be included in both ``region`` and ``append_dim`` at the same time. To create empty arrays to fill in with ``region``, use a separate call to ``to_zarr()`` with ``compute=False``. See "Modifying existing Zarr stores" in the reference documentation for full details. Users are expected to ensure that the specified region aligns with Zarr chunk boundaries, and that dask chunks are also aligned. Xarray makes limited checks that these multiple chunk boundaries line up. It is possible to write incomplete chunks and corrupt the data with this option if you are not careful. safe_chunks : bool, default: True If True, only allow writes to when there is a many-to-one relationship between Zarr chunks (specified in encoding) and Dask chunks. Set False to override this restriction; however, data may become corrupted if Zarr arrays are written in parallel. This option may be useful in combination with ``compute=False`` to initialize a Zarr store from an existing DataArray with arbitrary chunk structure. In addition to the many-to-one relationship validation, it also detects partial chunks writes when using the region parameter, these partial chunks are considered unsafe in the mode "r+" but safe in the mode "a". Note: Even with these validations it can still be unsafe to write two or more chunked arrays in the same location in parallel if they are not writing in independent regions, for those cases it is better to use a synchronizer. align_chunks: bool, default False If True, rechunks the Dask array to align with Zarr chunks before writing. This ensures each Dask chunk maps to one or more contiguous Zarr chunks, which avoids race conditions. Internally, the process sets safe_chunks=False and tries to preserve the original Dask chunking as much as possible. Note: While this alignment avoids write conflicts stemming from chunk boundary misalignment, it does not protect against race conditions if multiple uncoordinated processes write to the same Zarr array concurrently. storage_options : dict, optional Any additional parameters for the storage backend (ignored for local paths). zarr_version : int or None, optional .. deprecated:: 2024.9.1 Use ``zarr_format`` instead. zarr_format : int or None, optional The desired zarr format to target (currently 2 or 3). The default of None will attempt to determine the zarr version from ``store`` when possible, otherwise defaulting to the default version used by the zarr-python library installed. write_empty_chunks : bool or None, optional If True, all chunks will be stored regardless of their contents. If False, each chunk is compared to the array's fill value prior to storing. If a chunk is uniformly equal to the fill value, then that chunk is not be stored, and the store entry for that chunk's key is deleted. This setting enables sparser storage, as only chunks with non-fill-value data are stored, at the expense of overhead associated with checking the data of each chunk. If None (default) fall back to specification(s) in ``encoding`` or Zarr defaults. A ``ValueError`` will be raised if the value of this (if not None) differs with ``encoding``. chunkmanager_store_kwargs : dict, optional Additional keyword arguments passed on to the `ChunkManager.store` method used to store chunked arrays. For example for a dask array additional kwargs will be passed eventually to :py:func:`dask.array.store()`. Experimental API that should not be relied upon. Returns ------- * ``dask.delayed.Delayed`` if compute is False * ZarrStore otherwise References ---------- https://zarr.readthedocs.io/ Notes ----- Zarr chunking behavior: If chunks are found in the encoding argument or attribute corresponding to any DataArray, those chunks are used. If a DataArray is a dask array, it is written with those chunks. If not other chunks are found, Zarr uses its own heuristics to choose automatic chunk sizes. encoding: The encoding attribute (if exists) of the DataArray(s) will be used. Override any existing encodings by providing the ``encoding`` kwarg. ``fill_value`` handling: There exists a subtlety in interpreting zarr's ``fill_value`` property. For zarr v2 format arrays, ``fill_value`` is *always* interpreted as an invalid value similar to the ``_FillValue`` attribute in CF/netCDF. For Zarr v3 format arrays, only an explicit ``_FillValue`` attribute will be used to mask the data if requested using ``mask_and_scale=True``. See this `Github issue `_ for more. See Also -------- Dataset.to_zarr :ref:`io.zarr` The I/O user guide, with more details and examples. """ from xarray.backends.api import DATAARRAY_NAME, DATAARRAY_VARIABLE, to_zarr if self.name is None: # If no name is set then use a generic xarray name dataset = self.to_dataset(name=DATAARRAY_VARIABLE) elif self.name in self.coords or self.name in self.dims: # The name is the same as one of the coords names, which the netCDF data model # does not support, so rename it but keep track of the old name dataset = self.to_dataset(name=DATAARRAY_VARIABLE) dataset.attrs[DATAARRAY_NAME] = self.name else: # No problems with the name - so we're fine! dataset = self.to_dataset() return to_zarr( # type: ignore[call-overload,misc] dataset, store=store, chunk_store=chunk_store, mode=mode, synchronizer=synchronizer, group=group, encoding=encoding, compute=compute, consolidated=consolidated, append_dim=append_dim, region=region, safe_chunks=safe_chunks, align_chunks=align_chunks, storage_options=storage_options, zarr_version=zarr_version, zarr_format=zarr_format, write_empty_chunks=write_empty_chunks, chunkmanager_store_kwargs=chunkmanager_store_kwargs, ) def to_dict( self, data: bool | Literal["list", "array"] = "list", encoding: bool = False ) -> dict[str, Any]: """ Convert this xarray.DataArray into a dictionary following xarray naming conventions. Converts all variables and attributes to native Python objects. Useful for converting to json. To avoid datetime incompatibility use decode_times=False kwarg in xarray.open_dataset. Parameters ---------- data : bool or {"list", "array"}, default: "list" Whether to include the actual data in the dictionary. When set to False, returns just the schema. If set to "array", returns data as underlying array type. If set to "list" (or True for backwards compatibility), returns data in lists of Python data types. Note that for obtaining the "list" output efficiently, use `da.compute().to_dict(data="list")`. encoding : bool, default: False Whether to include the Dataset's encoding in the dictionary. Returns ------- dict: dict See Also -------- DataArray.from_dict Dataset.to_dict """ d = self.variable.to_dict(data=data) d.update({"coords": {}, "name": self.name}) for k, coord in self.coords.items(): d["coords"][k] = coord.variable.to_dict(data=data) if encoding: d["encoding"] = dict(self.encoding) return d @classmethod def from_dict(cls, d: Mapping[str, Any]) -> Self: """Convert a dictionary into an xarray.DataArray Parameters ---------- d : dict Mapping with a minimum structure of {"dims": [...], "data": [...]} Returns ------- obj : xarray.DataArray See Also -------- DataArray.to_dict Dataset.from_dict Examples -------- >>> d = {"dims": "t", "data": [1, 2, 3]} >>> da = xr.DataArray.from_dict(d) >>> da Size: 24B array([1, 2, 3]) Dimensions without coordinates: t >>> d = { ... "coords": { ... "t": {"dims": "t", "data": [0, 1, 2], "attrs": {"units": "s"}} ... }, ... "attrs": {"title": "air temperature"}, ... "dims": "t", ... "data": [10, 20, 30], ... "name": "a", ... } >>> da = xr.DataArray.from_dict(d) >>> da Size: 24B array([10, 20, 30]) Coordinates: * t (t) int64 24B 0 1 2 Attributes: title: air temperature """ coords = None if "coords" in d: try: coords = { k: (v["dims"], v["data"], v.get("attrs")) for k, v in d["coords"].items() } except KeyError as e: raise ValueError( f"cannot convert dict when coords are missing the key '{e.args[0]}'" ) from e try: data = d["data"] except KeyError as err: raise ValueError("cannot convert dict without the key 'data''") from err else: obj = cls(data, coords, d.get("dims"), d.get("name"), d.get("attrs")) obj.encoding.update(d.get("encoding", {})) return obj @classmethod def from_series(cls, series: pd.Series, sparse: bool = False) -> DataArray: """Convert a pandas.Series into an xarray.DataArray. If the series's index is a MultiIndex, it will be expanded into a tensor product of one-dimensional coordinates (filling in missing values with NaN). Thus this operation should be the inverse of the `to_series` method. Parameters ---------- series : Series Pandas Series object to convert. sparse : bool, default: False If sparse=True, creates a sparse array instead of a dense NumPy array. Requires the pydata/sparse package. See Also -------- DataArray.to_series Dataset.from_dataframe """ temp_name = "__temporary_name" df = pd.DataFrame({temp_name: series}) ds = Dataset.from_dataframe(df, sparse=sparse) result = ds[temp_name] result.name = series.name return result def to_iris(self) -> iris_Cube: """Convert this array into a iris.cube.Cube""" from xarray.convert import to_iris return to_iris(self) @classmethod def from_iris(cls, cube: iris_Cube) -> Self: """Convert a iris.cube.Cube into an xarray.DataArray""" from xarray.convert import from_iris return from_iris(cube) def _all_compat(self, other: Self, compat_str: str) -> bool: """Helper function for equals, broadcast_equals, and identical""" def compat(x, y): return getattr(x.variable, compat_str)(y.variable) return utils.dict_equiv(self.coords, other.coords, compat=compat) and compat( self, other ) def broadcast_equals(self, other: Self) -> bool: """Two DataArrays are broadcast equal if they are equal after broadcasting them against each other such that they have the same dimensions. Parameters ---------- other : DataArray DataArray to compare to. Returns ---------- equal : bool True if the two DataArrays are broadcast equal. See Also -------- DataArray.equals DataArray.identical Examples -------- >>> a = xr.DataArray([1, 2], dims="X") >>> b = xr.DataArray([[1, 1], [2, 2]], dims=["X", "Y"]) >>> a Size: 16B array([1, 2]) Dimensions without coordinates: X >>> b Size: 32B array([[1, 1], [2, 2]]) Dimensions without coordinates: X, Y .equals returns True if two DataArrays have the same values, dimensions, and coordinates. .broadcast_equals returns True if the results of broadcasting two DataArrays against each other have the same values, dimensions, and coordinates. >>> a.equals(b) False >>> a2, b2 = xr.broadcast(a, b) >>> a2.equals(b2) True >>> a.broadcast_equals(b) True """ try: return self._all_compat(other, "broadcast_equals") except (TypeError, AttributeError): return False def equals(self, other: Self) -> bool: """True if two DataArrays have the same dimensions, coordinates and values; otherwise False. DataArrays can still be equal (like pandas objects) if they have NaN values in the same locations. This method is necessary because `v1 == v2` for ``DataArray`` does element-wise comparisons (like numpy.ndarrays). Parameters ---------- other : DataArray DataArray to compare to. Returns ---------- equal : bool True if the two DataArrays are equal. See Also -------- DataArray.broadcast_equals DataArray.identical Examples -------- >>> a = xr.DataArray([1, 2, 3], dims="X") >>> b = xr.DataArray([1, 2, 3], dims="X", attrs=dict(units="m")) >>> c = xr.DataArray([1, 2, 3], dims="Y") >>> d = xr.DataArray([3, 2, 1], dims="X") >>> a Size: 24B array([1, 2, 3]) Dimensions without coordinates: X >>> b Size: 24B array([1, 2, 3]) Dimensions without coordinates: X Attributes: units: m >>> c Size: 24B array([1, 2, 3]) Dimensions without coordinates: Y >>> d Size: 24B array([3, 2, 1]) Dimensions without coordinates: X >>> a.equals(b) True >>> a.equals(c) False >>> a.equals(d) False """ try: return self._all_compat(other, "equals") except (TypeError, AttributeError): return False def identical(self, other: Self) -> bool: """Like equals, but also checks the array name and attributes, and attributes on all coordinates. Parameters ---------- other : DataArray DataArray to compare to. Returns ---------- equal : bool True if the two DataArrays are identical. See Also -------- DataArray.broadcast_equals DataArray.equals Examples -------- >>> a = xr.DataArray([1, 2, 3], dims="X", attrs=dict(units="m"), name="Width") >>> b = xr.DataArray([1, 2, 3], dims="X", attrs=dict(units="m"), name="Width") >>> c = xr.DataArray([1, 2, 3], dims="X", attrs=dict(units="ft"), name="Width") >>> a Size: 24B array([1, 2, 3]) Dimensions without coordinates: X Attributes: units: m >>> b Size: 24B array([1, 2, 3]) Dimensions without coordinates: X Attributes: units: m >>> c Size: 24B array([1, 2, 3]) Dimensions without coordinates: X Attributes: units: ft >>> a.equals(b) True >>> a.identical(b) True >>> a.equals(c) True >>> a.identical(c) False """ try: return self.name == other.name and self._all_compat(other, "identical") except (TypeError, AttributeError): return False def __array_wrap__(self, obj, context=None, return_scalar=False) -> Self: new_var = self.variable.__array_wrap__(obj, context, return_scalar) return self._replace(new_var) def __matmul__(self, obj: T_Xarray) -> T_Xarray: return self.dot(obj) def __rmatmul__(self, other: T_Xarray) -> T_Xarray: # currently somewhat duplicative, as only other DataArrays are # compatible with matmul return computation.dot(other, self) def _unary_op(self, f: Callable, *args, **kwargs) -> Self: keep_attrs = kwargs.pop("keep_attrs", None) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) with warnings.catch_warnings(): warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered") warnings.filterwarnings( "ignore", r"Mean of empty slice", category=RuntimeWarning ) with np.errstate(all="ignore"): da = self.__array_wrap__(f(self.variable.data, *args, **kwargs)) if keep_attrs: da.attrs = self.attrs return da def _binary_op( self, other: DaCompatible, f: Callable, reflexive: bool = False ) -> Self: from xarray.core.datatree import DataTree from xarray.core.groupby import GroupBy if isinstance(other, DataTree | Dataset | GroupBy): return NotImplemented if isinstance(other, DataArray): align_type = OPTIONS["arithmetic_join"] self, other = align(self, other, join=align_type, copy=False) other_variable_or_arraylike: DaCompatible = getattr(other, "variable", other) other_coords = getattr(other, "coords", None) variable = ( f(self.variable, other_variable_or_arraylike) if not reflexive else f(other_variable_or_arraylike, self.variable) ) coords, indexes = self.coords._merge_raw(other_coords, reflexive) name = result_name([self, other]) return self._replace(variable, coords, name, indexes=indexes) def _inplace_binary_op(self, other: DaCompatible, f: Callable) -> Self: from xarray.core.groupby import GroupBy if isinstance(other, GroupBy): raise TypeError( "in-place operations between a DataArray and " "a grouped object are not permitted" ) # n.b. we can't align other to self (with other.reindex_like(self)) # because `other` may be converted into floats, which would cause # in-place arithmetic to fail unpredictably. Instead, we simply # don't support automatic alignment with in-place arithmetic. other_coords = getattr(other, "coords", None) other_variable = getattr(other, "variable", other) try: with self.coords._merge_inplace(other_coords): f(self.variable, other_variable) except MergeError as exc: raise MergeError( "Automatic alignment is not supported for in-place operations.\n" "Consider aligning the indices manually or using a not-in-place operation.\n" "See https://github.com/pydata/xarray/issues/3910 for more explanations." ) from exc return self def _copy_attrs_from(self, other: DataArray | Dataset | Variable) -> None: self.attrs = other.attrs plot = utils.UncachedAccessor(DataArrayPlotAccessor) def _title_for_slice(self, truncate: int = 50) -> str: """ If the dataarray has 1 dimensional coordinates or comes from a slice we can show that info in the title Parameters ---------- truncate : int, default: 50 maximum number of characters for title Returns ------- title : string Can be used for plot titles """ one_dims = [] for dim, coord in self.coords.items(): if coord.size == 1: one_dims.append( f"{dim} = {format_item(coord.values)}{_get_units_from_attrs(coord)}" ) title = ", ".join(one_dims) if len(title) > truncate: title = title[: (truncate - 3)] + "..." return title def diff( self, dim: Hashable, n: int = 1, *, label: Literal["upper", "lower"] = "upper", ) -> Self: """Calculate the n-th order discrete difference along given axis. Parameters ---------- dim : Hashable Dimension over which to calculate the finite difference. n : int, default: 1 The number of times values are differenced. label : {"upper", "lower"}, default: "upper" The new coordinate in dimension ``dim`` will have the values of either the minuend's or subtrahend's coordinate for values 'upper' and 'lower', respectively. Returns ------- difference : DataArray The n-th order finite difference of this object. Notes ----- `n` matches numpy's behavior and is different from pandas' first argument named `periods`. Examples -------- >>> arr = xr.DataArray([5, 5, 6, 6], [[1, 2, 3, 4]], ["x"]) >>> arr.diff("x") Size: 24B array([0, 1, 0]) Coordinates: * x (x) int64 24B 2 3 4 >>> arr.diff("x", 2) Size: 16B array([ 1, -1]) Coordinates: * x (x) int64 16B 3 4 See Also -------- DataArray.differentiate """ ds = self._to_temp_dataset().diff(n=n, dim=dim, label=label) return self._from_temp_dataset(ds) def shift( self, shifts: Mapping[Any, int] | None = None, fill_value: Any = dtypes.NA, **shifts_kwargs: int, ) -> Self: """Shift this DataArray by an offset along one or more dimensions. Only the data is moved; coordinates stay in place. This is consistent with the behavior of ``shift`` in pandas. Values shifted from beyond array bounds will appear at one end of each dimension, which are filled according to `fill_value`. For periodic offsets instead see `roll`. Parameters ---------- shifts : mapping of Hashable to int or None, optional Integer offset to shift along each of the given dimensions. Positive offsets shift to the right; negative offsets shift to the left. fill_value : scalar, optional Value to use for newly missing values **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- shifted : DataArray DataArray with the same coordinates and attributes but shifted data. See Also -------- roll Examples -------- >>> arr = xr.DataArray([5, 6, 7], dims="x") >>> arr.shift(x=1) Size: 24B array([nan, 5., 6.]) Dimensions without coordinates: x """ variable = self.variable.shift( shifts=shifts, fill_value=fill_value, **shifts_kwargs ) return self._replace(variable=variable) def roll( self, shifts: Mapping[Hashable, int] | None = None, roll_coords: bool = False, **shifts_kwargs: int, ) -> Self: """Roll this array by an offset along one or more dimensions. Unlike shift, roll treats the given dimensions as periodic, so will not create any missing values to be filled. Unlike shift, roll may rotate all variables, including coordinates if specified. The direction of rotation is consistent with :py:func:`numpy.roll`. Parameters ---------- shifts : mapping of Hashable to int, optional Integer offset to rotate each of the given dimensions. Positive offsets roll to the right; negative offsets roll to the left. roll_coords : bool, default: False Indicates whether to roll the coordinates by the offset too. **shifts_kwargs : {dim: offset, ...}, optional The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- rolled : DataArray DataArray with the same attributes but rolled data and coordinates. See Also -------- shift Examples -------- >>> arr = xr.DataArray([5, 6, 7], dims="x") >>> arr.roll(x=1) Size: 24B array([7, 5, 6]) Dimensions without coordinates: x """ ds = self._to_temp_dataset().roll( shifts=shifts, roll_coords=roll_coords, **shifts_kwargs ) return self._from_temp_dataset(ds) @property def real(self) -> Self: """ The real part of the array. See Also -------- numpy.ndarray.real """ return self._replace(self.variable.real) @property def imag(self) -> Self: """ The imaginary part of the array. See Also -------- numpy.ndarray.imag """ return self._replace(self.variable.imag) @deprecate_dims def dot( self, other: T_Xarray, dim: Dims = None, ) -> T_Xarray: """Perform dot product of two DataArrays along their shared dims. Equivalent to taking taking tensordot over all shared dims. Parameters ---------- other : DataArray The other array with which the dot product is performed. dim : ..., str, Iterable of Hashable or None, optional Which dimensions to sum over. Ellipsis (`...`) sums over all dimensions. If not specified, then all the common dimensions are summed over. Returns ------- result : DataArray Array resulting from the dot product over all shared dimensions. See Also -------- dot numpy.tensordot Examples -------- >>> da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4)) >>> da = xr.DataArray(da_vals, dims=["x", "y", "z"]) >>> dm_vals = np.arange(4) >>> dm = xr.DataArray(dm_vals, dims=["z"]) >>> dm.dims ('z',) >>> da.dims ('x', 'y', 'z') >>> dot_result = da.dot(dm) >>> dot_result.dims ('x', 'y') """ if isinstance(other, Dataset): raise NotImplementedError( "dot products are not yet supported with Dataset objects." ) if not isinstance(other, DataArray): raise TypeError("dot only operates on DataArrays.") return computation.dot(self, other, dim=dim) def sortby( self, variables: ( Hashable | DataArray | Sequence[Hashable | DataArray] | Callable[[Self], Hashable | DataArray | Sequence[Hashable | DataArray]] ), ascending: bool = True, ) -> Self: """Sort object by labels or values (along an axis). Sorts the dataarray, either along specified dimensions, or according to values of 1-D dataarrays that share dimension with calling object. If the input variables are dataarrays, then the dataarrays are aligned (via left-join) to the calling object prior to sorting by cell values. NaNs are sorted to the end, following Numpy convention. If multiple sorts along the same dimension is given, numpy's lexsort is performed along that dimension: https://numpy.org/doc/stable/reference/generated/numpy.lexsort.html and the FIRST key in the sequence is used as the primary sort key, followed by the 2nd key, etc. Parameters ---------- variables : Hashable, DataArray, sequence of Hashable or DataArray, or Callable 1D DataArray objects or name(s) of 1D variable(s) in coords whose values are used to sort this array. If a callable, the callable is passed this object, and the result is used as the value for cond. ascending : bool, default: True Whether to sort by ascending or descending order. Returns ------- sorted : DataArray A new dataarray where all the specified dims are sorted by dim labels. See Also -------- Dataset.sortby numpy.sort pandas.sort_values pandas.sort_index Examples -------- >>> da = xr.DataArray( ... np.arange(5, 0, -1), ... coords=[pd.date_range("1/1/2000", periods=5)], ... dims="time", ... ) >>> da Size: 40B array([5, 4, 3, 2, 1]) Coordinates: * time (time) datetime64[ns] 40B 2000-01-01 2000-01-02 ... 2000-01-05 >>> da.sortby(da) Size: 40B array([1, 2, 3, 4, 5]) Coordinates: * time (time) datetime64[ns] 40B 2000-01-05 2000-01-04 ... 2000-01-01 >>> da.sortby(lambda x: x) Size: 40B array([1, 2, 3, 4, 5]) Coordinates: * time (time) datetime64[ns] 40B 2000-01-05 2000-01-04 ... 2000-01-01 """ # We need to convert the callable here rather than pass it through to the # dataset method, since otherwise the dataset method would try to call the # callable with the dataset as the object if callable(variables): variables = variables(self) ds = self._to_temp_dataset().sortby(variables, ascending=ascending) return self._from_temp_dataset(ds) def quantile( self, q: ArrayLike, dim: Dims = None, *, method: QuantileMethods = "linear", keep_attrs: bool | None = None, skipna: bool | None = None, interpolation: QuantileMethods | None = None, ) -> Self: """Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements. Parameters ---------- q : float or array-like of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or Iterable of Hashable, optional Dimension(s) over which to apply quantile. method : str, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points. The options sorted by their R type as summarized in the H&F paper [1]_ are: 1. "inverted_cdf" 2. "averaged_inverted_cdf" 3. "closest_observation" 4. "interpolated_inverted_cdf" 5. "hazen" 6. "weibull" 7. "linear" (default) 8. "median_unbiased" 9. "normal_unbiased" The first three methods are discontiuous. The following discontinuous variations of the default "linear" (7.) option are also available: * "lower" * "higher" * "midpoint" * "nearest" See :py:func:`numpy.quantile` or [1]_ for details. The "method" argument was previously called "interpolation", renamed in accordance with numpy version 1.22.0. keep_attrs : bool or None, optional If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- quantiles : DataArray If `q` is a single quantile, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the quantile and a quantile dimension is added to the return array. The other dimensions are the dimensions that remain after the reduction of the array. See Also -------- numpy.nanquantile, numpy.quantile, pandas.Series.quantile, Dataset.quantile Examples -------- >>> da = xr.DataArray( ... data=[[0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]], ... coords={"x": [7, 9], "y": [1, 1.5, 2, 2.5]}, ... dims=("x", "y"), ... ) >>> da.quantile(0) # or da.quantile(0, dim=...) Size: 8B array(0.7) Coordinates: quantile float64 8B 0.0 >>> da.quantile(0, dim="x") Size: 32B array([0.7, 4.2, 2.6, 1.5]) Coordinates: * y (y) float64 32B 1.0 1.5 2.0 2.5 quantile float64 8B 0.0 >>> da.quantile([0, 0.5, 1]) Size: 24B array([0.7, 3.4, 9.4]) Coordinates: * quantile (quantile) float64 24B 0.0 0.5 1.0 >>> da.quantile([0, 0.5, 1], dim="x") Size: 96B array([[0.7 , 4.2 , 2.6 , 1.5 ], [3.6 , 5.75, 6. , 1.7 ], [6.5 , 7.3 , 9.4 , 1.9 ]]) Coordinates: * y (y) float64 32B 1.0 1.5 2.0 2.5 * quantile (quantile) float64 24B 0.0 0.5 1.0 References ---------- .. [1] R. J. Hyndman and Y. Fan, "Sample quantiles in statistical packages," The American Statistician, 50(4), pp. 361-365, 1996 """ ds = self._to_temp_dataset().quantile( q, dim=dim, keep_attrs=keep_attrs, method=method, skipna=skipna, interpolation=interpolation, ) return self._from_temp_dataset(ds) def rank( self, dim: Hashable, *, pct: bool = False, keep_attrs: bool | None = None, ) -> Self: """Ranks the data. Equal values are assigned a rank that is the average of the ranks that would have been otherwise assigned to all of the values within that set. Ranks begin at 1, not 0. If pct, computes percentage ranks. NaNs in the input array are returned as NaNs. The `bottleneck` library is required. Parameters ---------- dim : Hashable Dimension over which to compute rank. pct : bool, default: False If True, compute percentage ranks, otherwise compute integer ranks. keep_attrs : bool or None, optional If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. Returns ------- ranked : DataArray DataArray with the same coordinates and dtype 'float64'. Examples -------- >>> arr = xr.DataArray([5, 6, 7], dims="x") >>> arr.rank("x") Size: 24B array([1., 2., 3.]) Dimensions without coordinates: x """ ds = self._to_temp_dataset().rank(dim, pct=pct, keep_attrs=keep_attrs) return self._from_temp_dataset(ds) def differentiate( self, coord: Hashable, edge_order: Literal[1, 2] = 1, datetime_unit: DatetimeUnitOptions = None, ) -> Self: """Differentiate the array with the second order accurate central differences. .. note:: This feature is limited to simple cartesian geometry, i.e. coord must be one dimensional. Parameters ---------- coord : Hashable The coordinate to be used to compute the gradient. edge_order : {1, 2}, default: 1 N-th order accurate differences at the boundaries. datetime_unit : {"W", "D", "h", "m", "s", "ms", \ "us", "ns", "ps", "fs", "as", None}, optional Unit to compute gradient. Only valid for datetime coordinate. "Y" and "M" are not available as datetime_unit. Returns ------- differentiated: DataArray See also -------- numpy.gradient: corresponding numpy function Examples -------- >>> da = xr.DataArray( ... np.arange(12).reshape(4, 3), ... dims=["x", "y"], ... coords={"x": [0, 0.1, 1.1, 1.2]}, ... ) >>> da Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) float64 32B 0.0 0.1 1.1 1.2 Dimensions without coordinates: y >>> >>> da.differentiate("x") Size: 96B array([[30. , 30. , 30. ], [27.54545455, 27.54545455, 27.54545455], [27.54545455, 27.54545455, 27.54545455], [30. , 30. , 30. ]]) Coordinates: * x (x) float64 32B 0.0 0.1 1.1 1.2 Dimensions without coordinates: y """ ds = self._to_temp_dataset().differentiate(coord, edge_order, datetime_unit) return self._from_temp_dataset(ds) def integrate( self, coord: Hashable | Sequence[Hashable] = None, datetime_unit: DatetimeUnitOptions = None, ) -> Self: """Integrate along the given coordinate using the trapezoidal rule. .. note:: This feature is limited to simple cartesian geometry, i.e. coord must be one dimensional. Parameters ---------- coord : Hashable, or sequence of Hashable Coordinate(s) used for the integration. datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \ 'ps', 'fs', 'as', None}, optional Specify the unit if a datetime coordinate is used. Returns ------- integrated : DataArray See also -------- Dataset.integrate numpy.trapz : corresponding numpy function Examples -------- >>> da = xr.DataArray( ... np.arange(12).reshape(4, 3), ... dims=["x", "y"], ... coords={"x": [0, 0.1, 1.1, 1.2]}, ... ) >>> da Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) float64 32B 0.0 0.1 1.1 1.2 Dimensions without coordinates: y >>> >>> da.integrate("x") Size: 24B array([5.4, 6.6, 7.8]) Dimensions without coordinates: y """ ds = self._to_temp_dataset().integrate(coord, datetime_unit) return self._from_temp_dataset(ds) def cumulative_integrate( self, coord: Hashable | Sequence[Hashable] = None, datetime_unit: DatetimeUnitOptions = None, ) -> Self: """Integrate cumulatively along the given coordinate using the trapezoidal rule. .. note:: This feature is limited to simple cartesian geometry, i.e. coord must be one dimensional. The first entry of the cumulative integral is always 0, in order to keep the length of the dimension unchanged between input and output. Parameters ---------- coord : Hashable, or sequence of Hashable Coordinate(s) used for the integration. datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \ 'ps', 'fs', 'as', None}, optional Specify the unit if a datetime coordinate is used. Returns ------- integrated : DataArray See also -------- Dataset.cumulative_integrate scipy.integrate.cumulative_trapezoid : corresponding scipy function Examples -------- >>> da = xr.DataArray( ... np.arange(12).reshape(4, 3), ... dims=["x", "y"], ... coords={"x": [0, 0.1, 1.1, 1.2]}, ... ) >>> da Size: 96B array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Coordinates: * x (x) float64 32B 0.0 0.1 1.1 1.2 Dimensions without coordinates: y >>> >>> da.cumulative_integrate("x") Size: 96B array([[0. , 0. , 0. ], [0.15, 0.25, 0.35], [4.65, 5.75, 6.85], [5.4 , 6.6 , 7.8 ]]) Coordinates: * x (x) float64 32B 0.0 0.1 1.1 1.2 Dimensions without coordinates: y """ ds = self._to_temp_dataset().cumulative_integrate(coord, datetime_unit) return self._from_temp_dataset(ds) def unify_chunks(self) -> Self: """Unify chunk size along all chunked dimensions of this DataArray. Returns ------- DataArray with consistent chunk sizes for all dask-array variables See Also -------- dask.array.core.unify_chunks """ return unify_chunks(self)[0] def map_blocks( self, func: Callable[..., T_Xarray], args: Sequence[Any] = (), kwargs: Mapping[str, Any] | None = None, template: DataArray | Dataset | None = None, ) -> T_Xarray: """ Apply a function to each block of this DataArray. .. warning:: This method is experimental and its signature may change. Parameters ---------- func : callable User-provided function that accepts a DataArray as its first parameter. The function will receive a subset or 'block' of this DataArray (see below), corresponding to one chunk along each chunked dimension. ``func`` will be executed as ``func(subset_dataarray, *subset_args, **kwargs)``. This function must return either a single DataArray or a single Dataset. This function cannot add a new chunked dimension. args : sequence Passed to func after unpacking and subsetting any xarray objects by blocks. xarray objects in args must be aligned with this object, otherwise an error is raised. kwargs : mapping Passed verbatim to func after unpacking. xarray objects, if any, will not be subset to blocks. Passing dask collections in kwargs is not allowed. template : DataArray or Dataset, optional xarray object representing the final result after compute is called. If not provided, the function will be first run on mocked-up data, that looks like this object but has sizes 0, to determine properties of the returned object such as dtype, variable names, attributes, new dimensions and new indexes (if any). ``template`` must be provided if the function changes the size of existing dimensions. When provided, ``attrs`` on variables in `template` are copied over to the result. Any ``attrs`` set by ``func`` will be ignored. Returns ------- A single DataArray or Dataset with dask backend, reassembled from the outputs of the function. Notes ----- This function is designed for when ``func`` needs to manipulate a whole xarray object subset to each block. Each block is loaded into memory. In the more common case where ``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``. If none of the variables in this object is backed by dask arrays, calling this function is equivalent to calling ``func(obj, *args, **kwargs)``. See Also -------- :func:`dask.array.map_blocks ` :func:`xarray.apply_ufunc ` :func:`xarray.Dataset.map_blocks ` :doc:`xarray-tutorial:advanced/map_blocks/map_blocks` Advanced Tutorial on map_blocks with dask Examples -------- Calculate an anomaly from climatology using ``.groupby()``. Using ``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``, its indices, and its methods like ``.groupby()``. >>> def calculate_anomaly(da, groupby_type="time.month"): ... gb = da.groupby(groupby_type) ... clim = gb.mean(dim="time") ... return gb - clim ... >>> time = xr.date_range("1990-01", "1992-01", freq="ME", use_cftime=True) >>> month = xr.DataArray(time.month, coords={"time": time}, dims=["time"]) >>> np.random.seed(123) >>> array = xr.DataArray( ... np.random.rand(len(time)), ... dims=["time"], ... coords={"time": time, "month": month}, ... ).chunk() >>> array.map_blocks(calculate_anomaly, template=array).compute() Size: 192B array([ 0.12894847, 0.11323072, -0.0855964 , -0.09334032, 0.26848862, 0.12382735, 0.22460641, 0.07650108, -0.07673453, -0.22865714, -0.19063865, 0.0590131 , -0.12894847, -0.11323072, 0.0855964 , 0.09334032, -0.26848862, -0.12382735, -0.22460641, -0.07650108, 0.07673453, 0.22865714, 0.19063865, -0.0590131 ]) Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 month (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12 Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments to the function being applied in ``xr.map_blocks()``: >>> array.map_blocks( ... calculate_anomaly, kwargs={"groupby_type": "time.year"}, template=array ... ) # doctest: +ELLIPSIS Size: 192B dask.array<-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray> Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 month (time) int64 192B dask.array """ from xarray.core.parallel import map_blocks return map_blocks(func, self, args, kwargs, template) def polyfit( self, dim: Hashable, deg: int, skipna: bool | None = None, rcond: float | None = None, w: Hashable | Any | None = None, full: bool = False, cov: bool | Literal["unscaled"] = False, ) -> Dataset: """ Least squares polynomial fit. This replicates the behaviour of `numpy.polyfit` but differs by skipping invalid values when `skipna = True`. Parameters ---------- dim : Hashable Coordinate along which to fit the polynomials. deg : int Degree of the fitting polynomial. skipna : bool or None, optional If True, removes all invalid values before fitting each 1D slices of the array. Default is True if data is stored in a dask.array or if there is any invalid values, False otherwise. rcond : float or None, optional Relative condition number to the fit. w : Hashable, array-like or None, optional Weights to apply to the y-coordinate of the sample points. Can be an array-like object or the name of a coordinate in the dataset. full : bool, default: False Whether to return the residuals, matrix rank and singular values in addition to the coefficients. cov : bool or "unscaled", default: False Whether to return to the covariance matrix in addition to the coefficients. The matrix is not scaled if `cov='unscaled'`. Returns ------- polyfit_results : Dataset A single dataset which contains: polyfit_coefficients The coefficients of the best fit. polyfit_residuals The residuals of the least-square computation (only included if `full=True`). When the matrix rank is deficient, np.nan is returned. [dim]_matrix_rank The effective rank of the scaled Vandermonde coefficient matrix (only included if `full=True`) [dim]_singular_value The singular values of the scaled Vandermonde coefficient matrix (only included if `full=True`) polyfit_covariance The covariance matrix of the polynomial coefficient estimates (only included if `full=False` and `cov=True`) See Also -------- numpy.polyfit numpy.polyval xarray.polyval DataArray.curvefit """ # For DataArray, use the original implementation by converting to a dataset return self._to_temp_dataset().polyfit( dim, deg, skipna=skipna, rcond=rcond, w=w, full=full, cov=cov ) def pad( self, pad_width: Mapping[Any, int | tuple[int, int]] | None = None, mode: PadModeOptions = "constant", stat_length: ( int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None ) = None, constant_values: ( float | tuple[float, float] | Mapping[Any, tuple[float, float]] | None ) = None, end_values: int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None = None, reflect_type: PadReflectOptions = None, keep_attrs: bool | None = None, **pad_width_kwargs: Any, ) -> Self: """Pad this array along one or more dimensions. .. warning:: This function is experimental and its behaviour is likely to change especially regarding padding of dimension coordinates (or IndexVariables). When using one of the modes ("edge", "reflect", "symmetric", "wrap"), coordinates will be padded with the same mode, otherwise coordinates are padded using the "constant" mode with fill_value dtypes.NA. Parameters ---------- pad_width : mapping of Hashable to tuple of int Mapping with the form of {dim: (pad_before, pad_after)} describing the number of values padded along each dimension. {dim: pad} is a shortcut for pad_before = pad_after = pad mode : {"constant", "edge", "linear_ramp", "maximum", "mean", "median", \ "minimum", "reflect", "symmetric", "wrap"}, default: "constant" How to pad the DataArray (taken from numpy docs): - "constant": Pads with a constant value. - "edge": Pads with the edge values of array. - "linear_ramp": Pads with the linear ramp between end_value and the array edge value. - "maximum": Pads with the maximum value of all or part of the vector along each axis. - "mean": Pads with the mean value of all or part of the vector along each axis. - "median": Pads with the median value of all or part of the vector along each axis. - "minimum": Pads with the minimum value of all or part of the vector along each axis. - "reflect": Pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - "symmetric": Pads with the reflection of the vector mirrored along the edge of the array. - "wrap": Pads with the wrap of the vector along the axis. The first values are used to pad the end and the end values are used to pad the beginning. stat_length : int, tuple or mapping of Hashable to tuple, default: None Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. {dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)} unique statistic lengths along each dimension. ((before, after),) yields same before and after statistic lengths for each dimension. (stat_length,) or int is a shortcut for before = after = statistic length for all axes. Default is ``None``, to use the entire axis. constant_values : scalar, tuple or mapping of Hashable to tuple, default: 0 Used in 'constant'. The values to set the padded values for each axis. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique pad constants along each dimension. ``((before, after),)`` yields same before and after constants for each dimension. ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all dimensions. Default is 0. end_values : scalar, tuple or mapping of Hashable to tuple, default: 0 Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique end values along each dimension. ``((before, after),)`` yields same before and after end values for each axis. ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all axes. Default is 0. reflect_type : {"even", "odd", None}, optional Used in "reflect", and "symmetric". The "even" style is the default with an unaltered reflection around the edge value. For the "odd" style, the extended part of the array is created by subtracting the reflected values from two times the edge value. keep_attrs : bool or None, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. **pad_width_kwargs The keyword arguments form of ``pad_width``. One of ``pad_width`` or ``pad_width_kwargs`` must be provided. Returns ------- padded : DataArray DataArray with the padded coordinates and data. See Also -------- DataArray.shift, DataArray.roll, DataArray.bfill, DataArray.ffill, numpy.pad, dask.array.pad Notes ----- For ``mode="constant"`` and ``constant_values=None``, integer types will be promoted to ``float`` and padded with ``np.nan``. Padding coordinates will drop their corresponding index (if any) and will reset default indexes for dimension coordinates. Examples -------- >>> arr = xr.DataArray([5, 6, 7], coords=[("x", [0, 1, 2])]) >>> arr.pad(x=(1, 2), constant_values=0) Size: 48B array([0, 5, 6, 7, 0, 0]) Coordinates: * x (x) float64 48B nan 0.0 1.0 2.0 nan nan >>> da = xr.DataArray( ... [[0, 1, 2, 3], [10, 11, 12, 13]], ... dims=["x", "y"], ... coords={"x": [0, 1], "y": [10, 20, 30, 40], "z": ("x", [100, 200])}, ... ) >>> da.pad(x=1) Size: 128B array([[nan, nan, nan, nan], [ 0., 1., 2., 3.], [10., 11., 12., 13.], [nan, nan, nan, nan]]) Coordinates: * x (x) float64 32B nan 0.0 1.0 nan * y (y) int64 32B 10 20 30 40 z (x) float64 32B nan 100.0 200.0 nan Careful, ``constant_values`` are coerced to the data type of the array which may lead to a loss of precision: >>> da.pad(x=1, constant_values=1.23456789) Size: 128B array([[ 1, 1, 1, 1], [ 0, 1, 2, 3], [10, 11, 12, 13], [ 1, 1, 1, 1]]) Coordinates: * x (x) float64 32B nan 0.0 1.0 nan * y (y) int64 32B 10 20 30 40 z (x) float64 32B nan 100.0 200.0 nan """ ds = self._to_temp_dataset().pad( pad_width=pad_width, mode=mode, stat_length=stat_length, constant_values=constant_values, end_values=end_values, reflect_type=reflect_type, keep_attrs=keep_attrs, **pad_width_kwargs, ) return self._from_temp_dataset(ds) def idxmin( self, dim: Hashable | None = None, *, skipna: bool | None = None, fill_value: Any = dtypes.NA, keep_attrs: bool | None = None, ) -> Self: """Return the coordinate label of the minimum value along a dimension. Returns a new `DataArray` named after the dimension with the values of the coordinate labels along that dimension corresponding to minimum values along that dimension. In comparison to :py:meth:`~DataArray.argmin`, this returns the coordinate label while :py:meth:`~DataArray.argmin` returns the index. Parameters ---------- dim : str, optional Dimension over which to apply `idxmin`. This is optional for 1D arrays, but required for arrays with 2 or more dimensions. skipna : bool or None, default: None If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. keep_attrs : bool or None, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- reduced : DataArray New `DataArray` object with `idxmin` applied to its data and the indicated dimension removed. See Also -------- Dataset.idxmin, DataArray.idxmax, DataArray.min, DataArray.argmin Examples -------- >>> array = xr.DataArray( ... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]} ... ) >>> array.min() Size: 8B array(-2) >>> array.argmin(...) {'x': Size: 8B array(4)} >>> array.idxmin() Size: 4B array('e', dtype='>> array = xr.DataArray( ... [ ... [2.0, 1.0, 2.0, 0.0, -2.0], ... [-4.0, np.nan, 2.0, np.nan, -2.0], ... [np.nan, np.nan, 1.0, np.nan, np.nan], ... ], ... dims=["y", "x"], ... coords={"y": [-1, 0, 1], "x": np.arange(5.0) ** 2}, ... ) >>> array.min(dim="x") Size: 24B array([-2., -4., 1.]) Coordinates: * y (y) int64 24B -1 0 1 >>> array.argmin(dim="x") Size: 24B array([4, 0, 2]) Coordinates: * y (y) int64 24B -1 0 1 >>> array.idxmin(dim="x") Size: 24B array([16., 0., 4.]) Coordinates: * y (y) int64 24B -1 0 1 """ return computation._calc_idxminmax( array=self, func=lambda x, *args, **kwargs: x.argmin(*args, **kwargs), dim=dim, skipna=skipna, fill_value=fill_value, keep_attrs=keep_attrs, ) def idxmax( self, dim: Hashable = None, *, skipna: bool | None = None, fill_value: Any = dtypes.NA, keep_attrs: bool | None = None, ) -> Self: """Return the coordinate label of the maximum value along a dimension. Returns a new `DataArray` named after the dimension with the values of the coordinate labels along that dimension corresponding to maximum values along that dimension. In comparison to :py:meth:`~DataArray.argmax`, this returns the coordinate label while :py:meth:`~DataArray.argmax` returns the index. Parameters ---------- dim : Hashable, optional Dimension over which to apply `idxmax`. This is optional for 1D arrays, but required for arrays with 2 or more dimensions. skipna : bool or None, default: None If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. keep_attrs : bool or None, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- reduced : DataArray New `DataArray` object with `idxmax` applied to its data and the indicated dimension removed. See Also -------- Dataset.idxmax, DataArray.idxmin, DataArray.max, DataArray.argmax Examples -------- >>> array = xr.DataArray( ... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]} ... ) >>> array.max() Size: 8B array(2) >>> array.argmax(...) {'x': Size: 8B array(1)} >>> array.idxmax() Size: 4B array('b', dtype='>> array = xr.DataArray( ... [ ... [2.0, 1.0, 2.0, 0.0, -2.0], ... [-4.0, np.nan, 2.0, np.nan, -2.0], ... [np.nan, np.nan, 1.0, np.nan, np.nan], ... ], ... dims=["y", "x"], ... coords={"y": [-1, 0, 1], "x": np.arange(5.0) ** 2}, ... ) >>> array.max(dim="x") Size: 24B array([2., 2., 1.]) Coordinates: * y (y) int64 24B -1 0 1 >>> array.argmax(dim="x") Size: 24B array([0, 2, 2]) Coordinates: * y (y) int64 24B -1 0 1 >>> array.idxmax(dim="x") Size: 24B array([0., 4., 4.]) Coordinates: * y (y) int64 24B -1 0 1 """ return computation._calc_idxminmax( array=self, func=lambda x, *args, **kwargs: x.argmax(*args, **kwargs), dim=dim, skipna=skipna, fill_value=fill_value, keep_attrs=keep_attrs, ) def argmin( self, dim: Dims = None, *, axis: int | None = None, keep_attrs: bool | None = None, skipna: bool | None = None, ) -> Self | dict[Hashable, Self]: """Index or indices of the minimum of the DataArray over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of DataArrays, which can be passed directly to isel(). If a single str is passed to 'dim' then returns a DataArray with dtype int. If there are multiple minima, the indices of the first one found will be returned. Parameters ---------- dim : "...", str, Iterable of Hashable or None, optional The dimensions over which to find the minimum. By default, finds minimum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will return a dict with indices for all dimensions; to return a dict with all dimensions now, pass '...'. axis : int or None, optional Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments can be supplied. keep_attrs : bool or None, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : DataArray or dict of DataArray See Also -------- Variable.argmin, DataArray.idxmin Examples -------- >>> array = xr.DataArray([0, 2, -1, 3], dims="x") >>> array.min() Size: 8B array(-1) >>> array.argmin(...) {'x': Size: 8B array(2)} >>> array.isel(array.argmin(...)) Size: 8B array(-1) >>> array = xr.DataArray( ... [[[3, 2, 1], [3, 1, 2], [2, 1, 3]], [[1, 3, 2], [2, -5, 1], [2, 3, 1]]], ... dims=("x", "y", "z"), ... ) >>> array.min(dim="x") Size: 72B array([[ 1, 2, 1], [ 2, -5, 1], [ 2, 1, 1]]) Dimensions without coordinates: y, z >>> array.argmin(dim="x") Size: 72B array([[1, 0, 0], [1, 1, 1], [0, 0, 1]]) Dimensions without coordinates: y, z >>> array.argmin(dim=["x"]) {'x': Size: 72B array([[1, 0, 0], [1, 1, 1], [0, 0, 1]]) Dimensions without coordinates: y, z} >>> array.min(dim=("x", "z")) Size: 24B array([ 1, -5, 1]) Dimensions without coordinates: y >>> array.argmin(dim=["x", "z"]) {'x': Size: 24B array([0, 1, 0]) Dimensions without coordinates: y, 'z': Size: 24B array([2, 1, 1]) Dimensions without coordinates: y} >>> array.isel(array.argmin(dim=["x", "z"])) Size: 24B array([ 1, -5, 1]) Dimensions without coordinates: y """ result = self.variable.argmin(dim, axis, keep_attrs, skipna) if isinstance(result, dict): return {k: self._replace_maybe_drop_dims(v) for k, v in result.items()} else: return self._replace_maybe_drop_dims(result) def argmax( self, dim: Dims = None, *, axis: int | None = None, keep_attrs: bool | None = None, skipna: bool | None = None, ) -> Self | dict[Hashable, Self]: """Index or indices of the maximum of the DataArray over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of DataArrays, which can be passed directly to isel(). If a single str is passed to 'dim' then returns a DataArray with dtype int. If there are multiple maxima, the indices of the first one found will be returned. Parameters ---------- dim : "...", str, Iterable of Hashable or None, optional The dimensions over which to find the maximum. By default, finds maximum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will return a dict with indices for all dimensions; to return a dict with all dimensions now, pass '...'. axis : int or None, optional Axis over which to apply `argmax`. Only one of the 'dim' and 'axis' arguments can be supplied. keep_attrs : bool or None, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : DataArray or dict of DataArray See Also -------- Variable.argmax, DataArray.idxmax Examples -------- >>> array = xr.DataArray([0, 2, -1, 3], dims="x") >>> array.max() Size: 8B array(3) >>> array.argmax(...) {'x': Size: 8B array(3)} >>> array.isel(array.argmax(...)) Size: 8B array(3) >>> array = xr.DataArray( ... [[[3, 2, 1], [3, 1, 2], [2, 1, 3]], [[1, 3, 2], [2, 5, 1], [2, 3, 1]]], ... dims=("x", "y", "z"), ... ) >>> array.max(dim="x") Size: 72B array([[3, 3, 2], [3, 5, 2], [2, 3, 3]]) Dimensions without coordinates: y, z >>> array.argmax(dim="x") Size: 72B array([[0, 1, 1], [0, 1, 0], [0, 1, 0]]) Dimensions without coordinates: y, z >>> array.argmax(dim=["x"]) {'x': Size: 72B array([[0, 1, 1], [0, 1, 0], [0, 1, 0]]) Dimensions without coordinates: y, z} >>> array.max(dim=("x", "z")) Size: 24B array([3, 5, 3]) Dimensions without coordinates: y >>> array.argmax(dim=["x", "z"]) {'x': Size: 24B array([0, 1, 0]) Dimensions without coordinates: y, 'z': Size: 24B array([0, 1, 2]) Dimensions without coordinates: y} >>> array.isel(array.argmax(dim=["x", "z"])) Size: 24B array([3, 5, 3]) Dimensions without coordinates: y """ result = self.variable.argmax(dim, axis, keep_attrs, skipna) if isinstance(result, dict): return {k: self._replace_maybe_drop_dims(v) for k, v in result.items()} else: return self._replace_maybe_drop_dims(result) def query( self, queries: Mapping[Any, Any] | None = None, parser: QueryParserOptions = "pandas", engine: QueryEngineOptions = None, missing_dims: ErrorOptionsWithWarn = "raise", **queries_kwargs: Any, ) -> DataArray: """Return a new data array indexed along the specified dimension(s), where the indexers are given as strings containing Python expressions to be evaluated against the values in the array. Parameters ---------- queries : dict-like or None, optional A dict-like with keys matching dimensions and values given by strings containing Python expressions to be evaluated against the data variables in the dataset. The expressions will be evaluated using the pandas eval() function, and can contain any valid Python expressions but cannot contain any Python statements. parser : {"pandas", "python"}, default: "pandas" The parser to use to construct the syntax tree from the expression. The default of 'pandas' parses code slightly different than standard Python. Alternatively, you can parse an expression using the 'python' parser to retain strict Python semantics. engine : {"python", "numexpr", None}, default: None The engine used to evaluate the expression. Supported engines are: - None: tries to use numexpr, falls back to python - "numexpr": evaluates expressions using numexpr - "python": performs operations as if you had eval’d in top level python missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the DataArray: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions **queries_kwargs : {dim: query, ...}, optional The keyword arguments form of ``queries``. One of queries or queries_kwargs must be provided. Returns ------- obj : DataArray A new DataArray with the same contents as this dataset, indexed by the results of the appropriate queries. See Also -------- DataArray.isel Dataset.query pandas.eval Examples -------- >>> da = xr.DataArray(np.arange(0, 5, 1), dims="x", name="a") >>> da Size: 40B array([0, 1, 2, 3, 4]) Dimensions without coordinates: x >>> da.query(x="a > 2") Size: 16B array([3, 4]) Dimensions without coordinates: x """ ds = self._to_dataset_whole(shallow_copy=True) ds = ds.query( queries=queries, parser=parser, engine=engine, missing_dims=missing_dims, **queries_kwargs, ) return ds[self.name] def curvefit( self, coords: str | DataArray | Iterable[str | DataArray], func: Callable[..., Any], reduce_dims: Dims = None, skipna: bool = True, p0: Mapping[str, float | DataArray] | None = None, bounds: Mapping[str, tuple[float | DataArray, float | DataArray]] | None = None, param_names: Sequence[str] | None = None, errors: ErrorOptions = "raise", kwargs: dict[str, Any] | None = None, ) -> Dataset: """ Curve fitting optimization for arbitrary functions. Wraps :py:func:`scipy.optimize.curve_fit` with :py:func:`~xarray.apply_ufunc`. Parameters ---------- coords : Hashable, DataArray, or sequence of DataArray or Hashable Independent coordinate(s) over which to perform the curve fitting. Must share at least one dimension with the calling object. When fitting multi-dimensional functions, supply `coords` as a sequence in the same order as arguments in `func`. To fit along existing dimensions of the calling object, `coords` can also be specified as a str or sequence of strs. func : callable User specified function in the form `f(x, *params)` which returns a numpy array of length `len(x)`. `params` are the fittable parameters which are optimized by scipy curve_fit. `x` can also be specified as a sequence containing multiple coordinates, e.g. `f((x0, x1), *params)`. reduce_dims : str, Iterable of Hashable or None, optional Additional dimension(s) over which to aggregate while fitting. For example, calling `ds.curvefit(coords='time', reduce_dims=['lat', 'lon'], ...)` will aggregate all lat and lon points and fit the specified function along the time dimension. skipna : bool, default: True Whether to skip missing values when fitting. Default is True. p0 : dict-like or None, optional Optional dictionary of parameter names to initial guesses passed to the `curve_fit` `p0` arg. If the values are DataArrays, they will be appropriately broadcast to the coordinates of the array. If none or only some parameters are passed, the rest will be assigned initial values following the default scipy behavior. bounds : dict-like, optional Optional dictionary of parameter names to tuples of bounding values passed to the `curve_fit` `bounds` arg. If any of the bounds are DataArrays, they will be appropriately broadcast to the coordinates of the array. If none or only some parameters are passed, the rest will be unbounded following the default scipy behavior. param_names : sequence of Hashable or None, optional Sequence of names for the fittable parameters of `func`. If not supplied, this will be automatically determined by arguments of `func`. `param_names` should be manually supplied when fitting a function that takes a variable number of parameters. errors : {"raise", "ignore"}, default: "raise" If 'raise', any errors from the `scipy.optimize_curve_fit` optimization will raise an exception. If 'ignore', the coefficients and covariances for the coordinates where the fitting failed will be NaN. **kwargs : optional Additional keyword arguments to passed to scipy curve_fit. Returns ------- curvefit_results : Dataset A single dataset which contains: [var]_curvefit_coefficients The coefficients of the best fit. [var]_curvefit_covariance The covariance matrix of the coefficient estimates. Examples -------- Generate some exponentially decaying data, where the decay constant and amplitude are different for different values of the coordinate ``x``: >>> rng = np.random.default_rng(seed=0) >>> def exp_decay(t, time_constant, amplitude): ... return np.exp(-t / time_constant) * amplitude ... >>> t = np.arange(11) >>> da = xr.DataArray( ... np.stack( ... [ ... exp_decay(t, 1, 0.1), ... exp_decay(t, 2, 0.2), ... exp_decay(t, 3, 0.3), ... ] ... ) ... + rng.normal(size=(3, t.size)) * 0.01, ... coords={"x": [0, 1, 2], "time": t}, ... ) >>> da Size: 264B array([[ 0.1012573 , 0.0354669 , 0.01993775, 0.00602771, -0.00352513, 0.00428975, 0.01328788, 0.009562 , -0.00700381, -0.01264187, -0.0062282 ], [ 0.20041326, 0.09805582, 0.07138797, 0.03216692, 0.01974438, 0.01097441, 0.00679441, 0.01015578, 0.01408826, 0.00093645, 0.01501222], [ 0.29334805, 0.21847449, 0.16305984, 0.11130396, 0.07164415, 0.04744543, 0.03602333, 0.03129354, 0.01074885, 0.01284436, 0.00910995]]) Coordinates: * x (x) int64 24B 0 1 2 * time (time) int64 88B 0 1 2 3 4 5 6 7 8 9 10 Fit the exponential decay function to the data along the ``time`` dimension: >>> fit_result = da.curvefit("time", exp_decay) >>> fit_result["curvefit_coefficients"].sel( ... param="time_constant" ... ) # doctest: +NUMBER Size: 24B array([1.05692036, 1.73549638, 2.94215771]) Coordinates: * x (x) int64 24B 0 1 2 param >> fit_result["curvefit_coefficients"].sel(param="amplitude") Size: 24B array([0.1005489 , 0.19631423, 0.30003579]) Coordinates: * x (x) int64 24B 0 1 2 param >> fit_result = da.curvefit( ... "time", ... exp_decay, ... p0={ ... "amplitude": 0.2, ... "time_constant": xr.DataArray([1, 2, 3], coords=[da.x]), ... }, ... ) >>> fit_result["curvefit_coefficients"].sel(param="time_constant") Size: 24B array([1.0569213 , 1.73550052, 2.94215733]) Coordinates: * x (x) int64 24B 0 1 2 param >> fit_result["curvefit_coefficients"].sel(param="amplitude") Size: 24B array([0.10054889, 0.1963141 , 0.3000358 ]) Coordinates: * x (x) int64 24B 0 1 2 param `_ with more curve fitting functionality. """ # For DataArray, use the original implementation by converting to a dataset first return self._to_temp_dataset().curvefit( coords, func, reduce_dims=reduce_dims, skipna=skipna, p0=p0, bounds=bounds, param_names=param_names, errors=errors, kwargs=kwargs, ) def drop_duplicates( self, dim: Hashable | Iterable[Hashable], *, keep: Literal["first", "last", False] = "first", ) -> Self: """Returns a new DataArray with duplicate dimension values removed. Parameters ---------- dim : dimension label or labels Pass `...` to drop duplicates along all dimensions. keep : {"first", "last", False}, default: "first" Determines which duplicates (if any) to keep. - ``"first"`` : Drop duplicates except for the first occurrence. - ``"last"`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. Returns ------- DataArray See Also -------- Dataset.drop_duplicates Examples -------- >>> da = xr.DataArray( ... np.arange(25).reshape(5, 5), ... dims=("x", "y"), ... coords={"x": np.array([0, 0, 1, 2, 3]), "y": np.array([0, 1, 2, 3, 3])}, ... ) >>> da Size: 200B array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: * x (x) int64 40B 0 0 1 2 3 * y (y) int64 40B 0 1 2 3 3 >>> da.drop_duplicates(dim="x") Size: 160B array([[ 0, 1, 2, 3, 4], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: * x (x) int64 32B 0 1 2 3 * y (y) int64 40B 0 1 2 3 3 >>> da.drop_duplicates(dim="x", keep="last") Size: 160B array([[ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) Coordinates: * x (x) int64 32B 0 1 2 3 * y (y) int64 40B 0 1 2 3 3 Drop all duplicate dimension values: >>> da.drop_duplicates(dim=...) Size: 128B array([[ 0, 1, 2, 3], [10, 11, 12, 13], [15, 16, 17, 18], [20, 21, 22, 23]]) Coordinates: * x (x) int64 32B 0 1 2 3 * y (y) int64 32B 0 1 2 3 """ deduplicated = self._to_temp_dataset().drop_duplicates(dim, keep=keep) return self._from_temp_dataset(deduplicated) def convert_calendar( self, calendar: str, dim: str = "time", align_on: str | None = None, missing: Any | None = None, use_cftime: bool | None = None, ) -> Self: """Convert the DataArray to another calendar. Only converts the individual timestamps, does not modify any data except in dropping invalid/surplus dates or inserting missing dates. If the source and target calendars are either no_leap, all_leap or a standard type, only the type of the time array is modified. When converting to a leap year from a non-leap year, the 29th of February is removed from the array. In the other direction the 29th of February will be missing in the output, unless `missing` is specified, in which case that value is inserted. For conversions involving `360_day` calendars, see Notes. This method is safe to use with sub-daily data as it doesn't touch the time part of the timestamps. Parameters --------- calendar : str The target calendar name. dim : str Name of the time coordinate. align_on : {None, 'date', 'year'} Must be specified when either source or target is a `360_day` calendar, ignored otherwise. See Notes. missing : Optional[any] By default, i.e. if the value is None, this method will simply attempt to convert the dates in the source calendar to the same dates in the target calendar, and drop any of those that are not possible to represent. If a value is provided, a new time coordinate will be created in the target calendar with the same frequency as the original time coordinate; for any dates that are not present in the source, the data will be filled with this value. Note that using this mode requires that the source data have an inferable frequency; for more information see :py:func:`xarray.infer_freq`. For certain frequency, source, and target calendar combinations, this could result in many missing values, see notes. use_cftime : boolean, optional Whether to use cftime objects in the output, only used if `calendar` is one of {"proleptic_gregorian", "gregorian" or "standard"}. If True, the new time axis uses cftime objects. If None (default), it uses :py:class:`numpy.datetime64` values if the date range permits it, and :py:class:`cftime.datetime` objects if not. If False, it uses :py:class:`numpy.datetime64` or fails. Returns ------- DataArray Copy of the dataarray with the time coordinate converted to the target calendar. If 'missing' was None (default), invalid dates in the new calendar are dropped, but missing dates are not inserted. If `missing` was given, the new data is reindexed to have a time axis with the same frequency as the source, but in the new calendar; any missing datapoints are filled with `missing`. Notes ----- Passing a value to `missing` is only usable if the source's time coordinate as an inferable frequencies (see :py:func:`~xarray.infer_freq`) and is only appropriate if the target coordinate, generated from this frequency, has dates equivalent to the source. It is usually **not** appropriate to use this mode with: - Period-end frequencies : 'A', 'Y', 'Q' or 'M', in opposition to 'AS' 'YS', 'QS' and 'MS' - Sub-monthly frequencies that do not divide a day evenly : 'W', 'nD' where `N != 1` or 'mH' where 24 % m != 0). If one of the source or target calendars is `"360_day"`, `align_on` must be specified and two options are offered. - "year" The dates are translated according to their relative position in the year, ignoring their original month and day information, meaning that the missing/surplus days are added/removed at regular intervals. From a `360_day` to a standard calendar, the output will be missing the following dates (day of year in parentheses): To a leap year: January 31st (31), March 31st (91), June 1st (153), July 31st (213), September 31st (275) and November 30th (335). To a non-leap year: February 6th (36), April 19th (109), July 2nd (183), September 12th (255), November 25th (329). From a standard calendar to a `"360_day"`, the following dates in the source array will be dropped: From a leap year: January 31st (31), April 1st (92), June 1st (153), August 1st (214), September 31st (275), December 1st (336) From a non-leap year: February 6th (37), April 20th (110), July 2nd (183), September 13th (256), November 25th (329) This option is best used on daily and subdaily data. - "date" The month/day information is conserved and invalid dates are dropped from the output. This means that when converting from a `"360_day"` to a standard calendar, all 31st (Jan, March, May, July, August, October and December) will be missing as there is no equivalent dates in the `"360_day"` calendar and the 29th (on non-leap years) and 30th of February will be dropped as there are no equivalent dates in a standard calendar. This option is best used with data on a frequency coarser than daily. """ return convert_calendar( self, calendar, dim=dim, align_on=align_on, missing=missing, use_cftime=use_cftime, ) def interp_calendar( self, target: pd.DatetimeIndex | CFTimeIndex | DataArray, dim: str = "time", ) -> Self: """Interpolates the DataArray to another calendar based on decimal year measure. Each timestamp in `source` and `target` are first converted to their decimal year equivalent then `source` is interpolated on the target coordinate. The decimal year of a timestamp is its year plus its sub-year component converted to the fraction of its year. For example "2000-03-01 12:00" is 2000.1653 in a standard calendar or 2000.16301 in a `"noleap"` calendar. This method should only be used when the time (HH:MM:SS) information of time coordinate is not important. Parameters ---------- target: DataArray or DatetimeIndex or CFTimeIndex The target time coordinate of a valid dtype (np.datetime64 or cftime objects) dim : str The time coordinate name. Return ------ DataArray The source interpolated on the decimal years of target, """ return interp_calendar(self, target, dim=dim) @_deprecate_positional_args("v2024.07.0") def groupby( self, group: GroupInput = None, *, squeeze: Literal[False] = False, restore_coord_dims: bool = False, eagerly_compute_group: Literal[False] | None = None, **groupers: Grouper, ) -> DataArrayGroupBy: """Returns a DataArrayGroupBy object for performing grouped operations. Parameters ---------- group : str or DataArray or IndexVariable or sequence of hashable or mapping of hashable to Grouper Array whose unique values should be used to group this array. If a Hashable, must be the name of a coordinate contained in this dataarray. If a dictionary, must map an existing variable name to a :py:class:`Grouper` instance. squeeze : False This argument is deprecated. restore_coord_dims : bool, default: False If True, also restore the dimension order of multi-dimensional coordinates. eagerly_compute_group: bool, optional This argument is deprecated. **groupers : Mapping of str to Grouper or Resampler Mapping of variable name to group by to :py:class:`Grouper` or :py:class:`Resampler` object. One of ``group`` or ``groupers`` must be provided. Only a single ``grouper`` is allowed at present. Returns ------- grouped : DataArrayGroupBy A `DataArrayGroupBy` object patterned after `pandas.GroupBy` that can be iterated over in the form of `(unique_value, grouped_array)` pairs. Examples -------- Calculate daily anomalies for daily data: >>> da = xr.DataArray( ... np.linspace(0, 1826, num=1827), ... coords=[pd.date_range("2000-01-01", "2004-12-31", freq="D")], ... dims="time", ... ) >>> da Size: 15kB array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03, 1.826e+03], shape=(1827,)) Coordinates: * time (time) datetime64[ns] 15kB 2000-01-01 2000-01-02 ... 2004-12-31 >>> da.groupby("time.dayofyear") - da.groupby("time.dayofyear").mean("time") Size: 15kB array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5], shape=(1827,)) Coordinates: * time (time) datetime64[ns] 15kB 2000-01-01 2000-01-02 ... 2004-12-31 dayofyear (time) int64 15kB 1 2 3 4 5 6 7 8 ... 360 361 362 363 364 365 366 Use a ``Grouper`` object to be more explicit >>> da.coords["dayofyear"] = da.time.dt.dayofyear >>> da.groupby(dayofyear=xr.groupers.UniqueGrouper()).mean() Size: 3kB array([ 730.8, 731.8, 732.8, ..., 1093.8, 1094.8, 1095.5]) Coordinates: * dayofyear (dayofyear) int64 3kB 1 2 3 4 5 6 7 ... 361 362 363 364 365 366 >>> da = xr.DataArray( ... data=np.arange(12).reshape((4, 3)), ... dims=("x", "y"), ... coords={"x": [10, 20, 30, 40], "letters": ("x", list("abba"))}, ... ) Grouping by a single variable is easy >>> da.groupby("letters") Execute a reduction >>> da.groupby("letters").sum() Size: 48B array([[ 9, 11, 13], [ 9, 11, 13]]) Coordinates: * letters (letters) object 16B 'a' 'b' Dimensions without coordinates: y Grouping by multiple variables >>> da.groupby(["letters", "x"]) Use Grouper objects to express more complicated GroupBy operations >>> from xarray.groupers import BinGrouper, UniqueGrouper >>> >>> da.groupby(x=BinGrouper(bins=[5, 15, 25]), letters=UniqueGrouper()).sum() Size: 96B array([[[ 0., 1., 2.], [nan, nan, nan]], [[nan, nan, nan], [ 3., 4., 5.]]]) Coordinates: * x_bins (x_bins) interval[int64, right] 32B (5, 15] (15, 25] * letters (letters) object 16B 'a' 'b' Dimensions without coordinates: y See Also -------- :ref:`groupby` Users guide explanation of how to group and bin data. :doc:`xarray-tutorial:intermediate/computation/01-high-level-computation-patterns` Tutorial on :py:func:`~xarray.DataArray.Groupby` for windowed computation :doc:`xarray-tutorial:fundamentals/03.2_groupby_with_xarray` Tutorial on :py:func:`~xarray.DataArray.Groupby` demonstrating reductions, transformation and comparison with :py:func:`~xarray.DataArray.resample` :external:py:meth:`pandas.DataFrame.groupby ` :func:`DataArray.groupby_bins ` :func:`Dataset.groupby ` :func:`core.groupby.DataArrayGroupBy ` :func:`DataArray.coarsen ` :func:`Dataset.resample ` :func:`DataArray.resample ` """ from xarray.core.groupby import ( DataArrayGroupBy, _parse_group_and_groupers, _validate_groupby_squeeze, ) _validate_groupby_squeeze(squeeze) rgroupers = _parse_group_and_groupers( self, group, groupers, eagerly_compute_group=eagerly_compute_group ) return DataArrayGroupBy(self, rgroupers, restore_coord_dims=restore_coord_dims) @_deprecate_positional_args("v2024.07.0") def groupby_bins( self, group: Hashable | DataArray | IndexVariable, bins: Bins, right: bool = True, labels: ArrayLike | Literal[False] | None = None, precision: int = 3, include_lowest: bool = False, squeeze: Literal[False] = False, restore_coord_dims: bool = False, duplicates: Literal["raise", "drop"] = "raise", eagerly_compute_group: Literal[False] | None = None, ) -> DataArrayGroupBy: """Returns a DataArrayGroupBy object for performing grouped operations. Rather than using all unique values of `group`, the values are discretized first by applying `pandas.cut` [1]_ to `group`. Parameters ---------- group : Hashable, DataArray or IndexVariable Array whose binned values should be used to group this array. If a Hashable, must be the name of a coordinate contained in this dataarray. bins : int or array-like If bins is an int, it defines the number of equal-width bins in the range of x. However, in this case, the range of x is extended by .1% on each side to include the min or max values of x. If bins is a sequence it defines the bin edges allowing for non-uniform bin width. No extension of the range of x is done in this case. right : bool, default: True Indicates whether the bins include the rightmost edge or not. If right == True (the default), then the bins [1,2,3,4] indicate (1,2], (2,3], (3,4]. labels : array-like, False or None, default: None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, string bin labels are assigned by `pandas.cut`. precision : int, default: 3 The precision at which to store and display the bins labels. include_lowest : bool, default: False Whether the first interval should be left-inclusive or not. squeeze : False This argument is deprecated. restore_coord_dims : bool, default: False If True, also restore the dimension order of multi-dimensional coordinates. duplicates : {"raise", "drop"}, default: "raise" If bin edges are not unique, raise ValueError or drop non-uniques. eagerly_compute_group: bool, optional This argument is deprecated. Returns ------- grouped : DataArrayGroupBy A `DataArrayGroupBy` object patterned after `pandas.GroupBy` that can be iterated over in the form of `(unique_value, grouped_array)` pairs. The name of the group has the added suffix `_bins` in order to distinguish it from the original variable. See Also -------- :ref:`groupby` Users guide explanation of how to group and bin data. DataArray.groupby Dataset.groupby_bins core.groupby.DataArrayGroupBy pandas.DataFrame.groupby References ---------- .. [1] https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.cut.html """ from xarray.core.groupby import ( DataArrayGroupBy, ResolvedGrouper, _validate_groupby_squeeze, ) from xarray.groupers import BinGrouper _validate_groupby_squeeze(squeeze) grouper = BinGrouper( bins=bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, ) rgrouper = ResolvedGrouper( grouper, group, self, eagerly_compute_group=eagerly_compute_group ) return DataArrayGroupBy( self, (rgrouper,), restore_coord_dims=restore_coord_dims, ) def weighted(self, weights: DataArray) -> DataArrayWeighted: """ Weighted DataArray operations. Parameters ---------- weights : DataArray An array of weights associated with the values in this Dataset. Each value in the data contributes to the reduction operation according to its associated weight. Notes ----- ``weights`` must be a DataArray and cannot contain missing values. Missing values can be replaced by ``weights.fillna(0)``. Returns ------- computation.weighted.DataArrayWeighted See Also -------- :func:`Dataset.weighted ` :ref:`compute.weighted` User guide on weighted array reduction using :py:func:`~xarray.DataArray.weighted` :doc:`xarray-tutorial:fundamentals/03.4_weighted` Tutorial on Weighted Reduction using :py:func:`~xarray.DataArray.weighted` """ from xarray.computation.weighted import DataArrayWeighted return DataArrayWeighted(self, weights) def rolling( self, dim: Mapping[Any, int] | None = None, min_periods: int | None = None, center: bool | Mapping[Any, bool] = False, **window_kwargs: int, ) -> DataArrayRolling: """ Rolling window object for DataArrays. Parameters ---------- dim : dict, optional Mapping from the dimension name to create the rolling iterator along (e.g. `time`) to its moving window size. min_periods : int or None, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. center : bool or Mapping to int, default: False Set the labels at the center of the window. The default, False, sets the labels at the right edge of the window. **window_kwargs : optional The keyword arguments form of ``dim``. One of dim or window_kwargs must be provided. Returns ------- computation.rolling.DataArrayRolling Examples -------- Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON: >>> da = xr.DataArray( ... np.linspace(0, 11, num=12), ... coords=[ ... pd.date_range( ... "1999-12-15", ... periods=12, ... freq=pd.DateOffset(months=1), ... ) ... ], ... dims="time", ... ) >>> da Size: 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.rolling(time=3, center=True).mean() Size: 96B array([nan, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., nan]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 Remove the NaNs using ``dropna()``: >>> da.rolling(time=3, center=True).mean().dropna("time") Size: 80B array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]) Coordinates: * time (time) datetime64[ns] 80B 2000-01-15 2000-02-15 ... 2000-10-15 See Also -------- DataArray.cumulative Dataset.rolling computation.rolling.DataArrayRolling """ from xarray.computation.rolling import DataArrayRolling dim = either_dict_or_kwargs(dim, window_kwargs, "rolling") return DataArrayRolling(self, dim, min_periods=min_periods, center=center) def cumulative( self, dim: str | Iterable[Hashable], min_periods: int = 1, ) -> DataArrayRolling: """ Accumulating object for DataArrays. Parameters ---------- dims : iterable of hashable The name(s) of the dimensions to create the cumulative window along min_periods : int, default: 1 Minimum number of observations in window required to have a value (otherwise result is NA). The default is 1 (note this is different from ``Rolling``, whose default is the size of the window). Returns ------- computation.rolling.DataArrayRolling Examples -------- Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON: >>> da = xr.DataArray( ... np.linspace(0, 11, num=12), ... coords=[ ... pd.date_range( ... "1999-12-15", ... periods=12, ... freq=pd.DateOffset(months=1), ... ) ... ], ... dims="time", ... ) >>> da Size: 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.cumulative("time").sum() Size: 96B array([ 0., 1., 3., 6., 10., 15., 21., 28., 36., 45., 55., 66.]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 See Also -------- DataArray.rolling Dataset.cumulative computation.rolling.DataArrayRolling """ from xarray.computation.rolling import DataArrayRolling # Could we abstract this "normalize and check 'dim'" logic? It's currently shared # with the same method in Dataset. if isinstance(dim, str): if dim not in self.dims: raise ValueError( f"Dimension {dim} not found in data dimensions: {self.dims}" ) dim = {dim: self.sizes[dim]} else: missing_dims = set(dim) - set(self.dims) if missing_dims: raise ValueError( f"Dimensions {missing_dims} not found in data dimensions: {self.dims}" ) dim = {d: self.sizes[d] for d in dim} return DataArrayRolling(self, dim, min_periods=min_periods, center=False) def coarsen( self, dim: Mapping[Any, int] | None = None, boundary: CoarsenBoundaryOptions = "exact", side: SideOptions | Mapping[Any, SideOptions] = "left", coord_func: str | Callable | Mapping[Any, str | Callable] = "mean", **window_kwargs: int, ) -> DataArrayCoarsen: """ Coarsen object for DataArrays. Parameters ---------- dim : mapping of hashable to int, optional Mapping from the dimension name to the window size. boundary : {"exact", "trim", "pad"}, default: "exact" If 'exact', a ValueError will be raised if dimension size is not a multiple of the window size. If 'trim', the excess entries are dropped. If 'pad', NA will be padded. side : {"left", "right"} or mapping of str to {"left", "right"}, default: "left" coord_func : str or mapping of hashable to str, default: "mean" function (name) that is applied to the coordinates, or a mapping from coordinate name to function (name). Returns ------- computation.rolling.DataArrayCoarsen Examples -------- Coarsen the long time series by averaging over every three days. >>> da = xr.DataArray( ... np.linspace(0, 364, num=364), ... dims="time", ... coords={"time": pd.date_range("1999-12-15", periods=364)}, ... ) >>> da # +doctest: ELLIPSIS Size: 3kB array([ 0. , 1.00275482, 2.00550964, 3.00826446, 4.01101928, 5.0137741 , 6.01652893, 7.01928375, 8.02203857, 9.02479339, 10.02754821, 11.03030303, 12.03305785, 13.03581267, 14.03856749, 15.04132231, 16.04407713, 17.04683196, 18.04958678, 19.0523416 , 20.05509642, 21.05785124, 22.06060606, 23.06336088, 24.0661157 , 25.06887052, 26.07162534, 27.07438017, 28.07713499, 29.07988981, 30.08264463, 31.08539945, 32.08815427, 33.09090909, 34.09366391, 35.09641873, 36.09917355, 37.10192837, 38.1046832 , 39.10743802, 40.11019284, 41.11294766, 42.11570248, 43.1184573 , 44.12121212, 45.12396694, 46.12672176, 47.12947658, 48.1322314 , 49.13498623, 50.13774105, 51.14049587, 52.14325069, 53.14600551, 54.14876033, 55.15151515, 56.15426997, 57.15702479, 58.15977961, 59.16253444, 60.16528926, 61.16804408, 62.1707989 , 63.17355372, 64.17630854, 65.17906336, 66.18181818, 67.184573 , 68.18732782, 69.19008264, 70.19283747, 71.19559229, 72.19834711, 73.20110193, 74.20385675, 75.20661157, 76.20936639, 77.21212121, 78.21487603, 79.21763085, ... 284.78236915, 285.78512397, 286.78787879, 287.79063361, 288.79338843, 289.79614325, 290.79889807, 291.80165289, 292.80440771, 293.80716253, 294.80991736, 295.81267218, 296.815427 , 297.81818182, 298.82093664, 299.82369146, 300.82644628, 301.8292011 , 302.83195592, 303.83471074, 304.83746556, 305.84022039, 306.84297521, 307.84573003, 308.84848485, 309.85123967, 310.85399449, 311.85674931, 312.85950413, 313.86225895, 314.86501377, 315.8677686 , 316.87052342, 317.87327824, 318.87603306, 319.87878788, 320.8815427 , 321.88429752, 322.88705234, 323.88980716, 324.89256198, 325.8953168 , 326.89807163, 327.90082645, 328.90358127, 329.90633609, 330.90909091, 331.91184573, 332.91460055, 333.91735537, 334.92011019, 335.92286501, 336.92561983, 337.92837466, 338.93112948, 339.9338843 , 340.93663912, 341.93939394, 342.94214876, 343.94490358, 344.9476584 , 345.95041322, 346.95316804, 347.95592287, 348.95867769, 349.96143251, 350.96418733, 351.96694215, 352.96969697, 353.97245179, 354.97520661, 355.97796143, 356.98071625, 357.98347107, 358.9862259 , 359.98898072, 360.99173554, 361.99449036, 362.99724518, 364. ]) Coordinates: * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-12-12 >>> da.coarsen(time=3, boundary="trim").mean() # +doctest: ELLIPSIS Size: 968B array([ 1.00275482, 4.01101928, 7.01928375, 10.02754821, 13.03581267, 16.04407713, 19.0523416 , 22.06060606, 25.06887052, 28.07713499, 31.08539945, 34.09366391, 37.10192837, 40.11019284, 43.1184573 , 46.12672176, 49.13498623, 52.14325069, 55.15151515, 58.15977961, 61.16804408, 64.17630854, 67.184573 , 70.19283747, 73.20110193, 76.20936639, 79.21763085, 82.22589532, 85.23415978, 88.24242424, 91.25068871, 94.25895317, 97.26721763, 100.27548209, 103.28374656, 106.29201102, 109.30027548, 112.30853994, 115.31680441, 118.32506887, 121.33333333, 124.3415978 , 127.34986226, 130.35812672, 133.36639118, 136.37465565, 139.38292011, 142.39118457, 145.39944904, 148.4077135 , 151.41597796, 154.42424242, 157.43250689, 160.44077135, 163.44903581, 166.45730028, 169.46556474, 172.4738292 , 175.48209366, 178.49035813, 181.49862259, 184.50688705, 187.51515152, 190.52341598, 193.53168044, 196.5399449 , 199.54820937, 202.55647383, 205.56473829, 208.57300275, 211.58126722, 214.58953168, 217.59779614, 220.60606061, 223.61432507, 226.62258953, 229.63085399, 232.63911846, 235.64738292, 238.65564738, 241.66391185, 244.67217631, 247.68044077, 250.68870523, 253.6969697 , 256.70523416, 259.71349862, 262.72176309, 265.73002755, 268.73829201, 271.74655647, 274.75482094, 277.7630854 , 280.77134986, 283.77961433, 286.78787879, 289.79614325, 292.80440771, 295.81267218, 298.82093664, 301.8292011 , 304.83746556, 307.84573003, 310.85399449, 313.86225895, 316.87052342, 319.87878788, 322.88705234, 325.8953168 , 328.90358127, 331.91184573, 334.92011019, 337.92837466, 340.93663912, 343.94490358, 346.95316804, 349.96143251, 352.96969697, 355.97796143, 358.9862259 , 361.99449036]) Coordinates: * time (time) datetime64[ns] 968B 1999-12-16 1999-12-19 ... 2000-12-10 >>> See Also -------- :class:`computation.rolling.DataArrayCoarsen ` :func:`Dataset.coarsen ` :ref:`reshape.coarsen` User guide describing :py:func:`~xarray.DataArray.coarsen` :ref:`compute.coarsen` User guide on block aggregation :py:func:`~xarray.DataArray.coarsen` :doc:`xarray-tutorial:fundamentals/03.3_windowed` Tutorial on windowed computation using :py:func:`~xarray.DataArray.coarsen` """ from xarray.computation.rolling import DataArrayCoarsen dim = either_dict_or_kwargs(dim, window_kwargs, "coarsen") return DataArrayCoarsen( self, dim, boundary=boundary, side=side, coord_func=coord_func, ) @_deprecate_positional_args("v2024.07.0") def resample( self, indexer: Mapping[Hashable, ResampleCompatible | Resampler] | None = None, *, skipna: bool | None = None, closed: SideOptions | None = None, label: SideOptions | None = None, offset: pd.Timedelta | datetime.timedelta | str | None = None, origin: str | DatetimeLike = "start_day", restore_coord_dims: bool | None = None, **indexer_kwargs: ResampleCompatible | Resampler, ) -> DataArrayResample: """Returns a Resample object for performing resampling operations. Handles both downsampling and upsampling. The resampled dimension must be a datetime-like coordinate. If any intervals contain no values from the original object, they will be given the value ``NaN``. Parameters ---------- indexer : Mapping of Hashable to str, datetime.timedelta, pd.Timedelta, pd.DateOffset, or Resampler, optional Mapping from the dimension name to resample frequency [1]_. The dimension must be datetime-like. skipna : bool, optional Whether to skip missing values when aggregating in downsampling. closed : {"left", "right"}, optional Side of each interval to treat as closed. label : {"left", "right"}, optional Side of each interval to use for labeling. origin : {'epoch', 'start', 'start_day', 'end', 'end_day'}, pd.Timestamp, datetime.datetime, np.datetime64, or cftime.datetime, default 'start_day' The datetime on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a datetime is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day offset : pd.Timedelta, datetime.timedelta, or str, default is None An offset timedelta added to the origin. restore_coord_dims : bool, optional If True, also restore the dimension order of multi-dimensional coordinates. **indexer_kwargs : str, datetime.timedelta, pd.Timedelta, pd.DateOffset, or Resampler The keyword arguments form of ``indexer``. One of indexer or indexer_kwargs must be provided. Returns ------- resampled : core.resample.DataArrayResample This object resampled. Examples -------- Downsample monthly time-series data to seasonal data: >>> da = xr.DataArray( ... np.linspace(0, 11, num=12), ... coords=[ ... pd.date_range( ... "1999-12-15", ... periods=12, ... freq=pd.DateOffset(months=1), ... ) ... ], ... dims="time", ... ) >>> da Size: 96B array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]) Coordinates: * time (time) datetime64[ns] 96B 1999-12-15 2000-01-15 ... 2000-11-15 >>> da.resample(time="QS-DEC").mean() Size: 32B array([ 1., 4., 7., 10.]) Coordinates: * time (time) datetime64[ns] 32B 1999-12-01 2000-03-01 ... 2000-09-01 Upsample monthly time-series data to daily data: >>> da.resample(time="1D").interpolate("linear") # +doctest: ELLIPSIS Size: 3kB array([ 0. , 0.03225806, 0.06451613, 0.09677419, 0.12903226, 0.16129032, 0.19354839, 0.22580645, 0.25806452, 0.29032258, 0.32258065, 0.35483871, 0.38709677, 0.41935484, 0.4516129 , 0.48387097, 0.51612903, 0.5483871 , 0.58064516, 0.61290323, 0.64516129, 0.67741935, 0.70967742, 0.74193548, 0.77419355, 0.80645161, 0.83870968, 0.87096774, 0.90322581, 0.93548387, 0.96774194, 1. , ..., 9. , 9.03333333, 9.06666667, 9.1 , 9.13333333, 9.16666667, 9.2 , 9.23333333, 9.26666667, 9.3 , 9.33333333, 9.36666667, 9.4 , 9.43333333, 9.46666667, 9.5 , 9.53333333, 9.56666667, 9.6 , 9.63333333, 9.66666667, 9.7 , 9.73333333, 9.76666667, 9.8 , 9.83333333, 9.86666667, 9.9 , 9.93333333, 9.96666667, 10. , 10.03225806, 10.06451613, 10.09677419, 10.12903226, 10.16129032, 10.19354839, 10.22580645, 10.25806452, 10.29032258, 10.32258065, 10.35483871, 10.38709677, 10.41935484, 10.4516129 , 10.48387097, 10.51612903, 10.5483871 , 10.58064516, 10.61290323, 10.64516129, 10.67741935, 10.70967742, 10.74193548, 10.77419355, 10.80645161, 10.83870968, 10.87096774, 10.90322581, 10.93548387, 10.96774194, 11. ]) Coordinates: * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-11-15 Limit scope of upsampling method >>> da.resample(time="1D").nearest(tolerance="1D") Size: 3kB array([ 0., 0., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 1., 1., 1., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 2., 2., 2., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 3., 3., 3., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 4., 4., 4., nan, nan, nan, nan, nan, ..., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 10., 10., 10., nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 11., 11.]) Coordinates: * time (time) datetime64[ns] 3kB 1999-12-15 1999-12-16 ... 2000-11-15 See Also -------- Dataset.resample pandas.Series.resample pandas.DataFrame.resample References ---------- .. [1] https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases """ from xarray.core.resample import DataArrayResample return self._resample( resample_cls=DataArrayResample, indexer=indexer, skipna=skipna, closed=closed, label=label, offset=offset, origin=origin, restore_coord_dims=restore_coord_dims, **indexer_kwargs, ) def to_dask_dataframe( self, dim_order: Sequence[Hashable] | None = None, set_index: bool = False, ) -> DaskDataFrame: """Convert this array into a dask.dataframe.DataFrame. Parameters ---------- dim_order : Sequence of Hashable or None , optional Hierarchical dimension order for the resulting dataframe. Array content is transposed to this order and then written out as flat vectors in contiguous order, so the last dimension in this list will be contiguous in the resulting DataFrame. This has a major influence on which operations are efficient on the resulting dask dataframe. set_index : bool, default: False If set_index=True, the dask DataFrame is indexed by this dataset's coordinate. Since dask DataFrames do not support multi-indexes, set_index only works if the dataset only contains one dimension. Returns ------- dask.dataframe.DataFrame Examples -------- >>> da = xr.DataArray( ... np.arange(4 * 2 * 2).reshape(4, 2, 2), ... dims=("time", "lat", "lon"), ... coords={ ... "time": np.arange(4), ... "lat": [-30, -20], ... "lon": [120, 130], ... }, ... name="eg_dataarray", ... attrs={"units": "Celsius", "description": "Random temperature data"}, ... ) >>> da.to_dask_dataframe(["lat", "lon", "time"]).compute() lat lon time eg_dataarray 0 -30 120 0 0 1 -30 120 1 4 2 -30 120 2 8 3 -30 120 3 12 4 -30 130 0 1 5 -30 130 1 5 6 -30 130 2 9 7 -30 130 3 13 8 -20 120 0 2 9 -20 120 1 6 10 -20 120 2 10 11 -20 120 3 14 12 -20 130 0 3 13 -20 130 1 7 14 -20 130 2 11 15 -20 130 3 15 """ if self.name is None: raise ValueError( "Cannot convert an unnamed DataArray to a " "dask dataframe : use the ``.rename`` method to assign a name." ) name = self.name ds = self._to_dataset_whole(name, shallow_copy=False) return ds.to_dask_dataframe(dim_order, set_index) # this needs to be at the end, or mypy will confuse with `str` # https://mypy.readthedocs.io/en/latest/common_issues.html#dealing-with-conflicting-names str = utils.UncachedAccessor(StringAccessor["DataArray"]) def drop_attrs(self, *, deep: bool = True) -> Self: """ Removes all attributes from the DataArray. Parameters ---------- deep : bool, default True Removes attributes from coordinates. Returns ------- DataArray """ if not deep: return self._replace(attrs={}) else: return ( self._to_temp_dataset() .drop_attrs(deep=deep) .pipe(self._from_temp_dataset) ) xarray-2025.09.0/xarray/core/dataset.py000066400000000000000000014300451505620616400176100ustar00rootroot00000000000000from __future__ import annotations import asyncio import copy import datetime import io import math import sys import warnings from collections import defaultdict from collections.abc import ( Callable, Collection, Hashable, Iterable, Iterator, Mapping, MutableMapping, Sequence, ) from functools import partial from html import escape from numbers import Number from operator import methodcaller from os import PathLike from types import EllipsisType from typing import IO, TYPE_CHECKING, Any, Literal, cast, overload import numpy as np import pandas as pd from xarray.coding.calendar_ops import convert_calendar, interp_calendar from xarray.coding.cftimeindex import CFTimeIndex, _parse_array_of_cftime_strings from xarray.compat.array_api_compat import to_like_array from xarray.computation import ops from xarray.computation.arithmetic import DatasetArithmetic from xarray.core import dtypes as xrdtypes from xarray.core import duck_array_ops, formatting, formatting_html, utils from xarray.core._aggregations import DatasetAggregations from xarray.core.common import ( DataWithCoords, _contains_datetime_like_objects, get_chunksizes, ) from xarray.core.coordinates import ( Coordinates, DatasetCoordinates, assert_coordinate_consistent, ) from xarray.core.dataset_utils import _get_virtual_variable, _LocIndexer from xarray.core.dataset_variables import DataVariables from xarray.core.duck_array_ops import datetime_to_numeric from xarray.core.indexes import ( Index, Indexes, PandasIndex, PandasMultiIndex, assert_no_index_corrupted, create_default_index_implicit, filter_indexes_from_coords, isel_indexes, remove_unused_levels_categories, roll_indexes, ) from xarray.core.indexing import is_fancy_indexer, map_index_queries from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.core.types import ( Bins, NetcdfWriteModes, QuantileMethods, Self, T_ChunkDim, T_ChunksFreq, T_DataArray, T_DataArrayOrSet, ZarrWriteModes, ) from xarray.core.utils import ( Default, FilteredMapping, Frozen, FrozenMappingWarningOnValuesAccess, OrderedSet, _default, decode_numpy_dict_values, drop_dims_from_indexers, either_dict_or_kwargs, emit_user_level_warning, infix_dims, is_allowed_extension_array, is_dict_like, is_duck_array, is_duck_dask_array, is_scalar, maybe_wrap_array, parse_dims_as_set, ) from xarray.core.variable import ( UNSUPPORTED_EXTENSION_ARRAY_TYPES, IndexVariable, Variable, as_variable, broadcast_variables, calculate_dimensions, ) from xarray.namedarray.parallelcompat import get_chunked_array_type, guess_chunkmanager from xarray.namedarray.pycompat import array_type, is_chunked_array, to_numpy from xarray.plot.accessor import DatasetPlotAccessor from xarray.structure import alignment from xarray.structure.alignment import ( _broadcast_helper, _get_broadcast_dims_map_common_coords, align, ) from xarray.structure.chunks import _maybe_chunk, unify_chunks from xarray.structure.merge import ( dataset_merge_method, dataset_update_method, merge_coordinates_without_align, merge_data_and_coords, ) from xarray.util.deprecation_helpers import ( _COMPAT_DEFAULT, _JOIN_DEFAULT, CombineKwargDefault, _deprecate_positional_args, deprecate_dims, ) if TYPE_CHECKING: from dask.dataframe import DataFrame as DaskDataFrame from dask.delayed import Delayed from numpy.typing import ArrayLike from xarray.backends import AbstractDataStore, ZarrStore from xarray.backends.api import T_NetcdfEngine, T_NetcdfTypes from xarray.computation.rolling import DatasetCoarsen, DatasetRolling from xarray.computation.weighted import DatasetWeighted from xarray.core.dataarray import DataArray from xarray.core.groupby import DatasetGroupBy from xarray.core.resample import DatasetResample from xarray.core.types import ( CFCalendar, CoarsenBoundaryOptions, CombineAttrsOptions, CompatOptions, DataVars, DatetimeLike, DatetimeUnitOptions, Dims, DsCompatible, ErrorOptions, ErrorOptionsWithWarn, GroupIndices, GroupInput, InterpOptions, JoinOptions, PadModeOptions, PadReflectOptions, QueryEngineOptions, QueryParserOptions, ReindexMethodOptions, ResampleCompatible, SideOptions, T_ChunkDimFreq, T_Chunks, T_DatasetPadConstantValues, T_Xarray, ) from xarray.groupers import Grouper, Resampler from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint from xarray.structure.merge import CoercibleMapping, CoercibleValue # list of attributes of pd.DatetimeIndex that are ndarrays of time info _DATETIMEINDEX_COMPONENTS = [ "year", "month", "day", "hour", "minute", "second", "microsecond", "nanosecond", "date", "time", "dayofyear", "weekofyear", "dayofweek", "quarter", ] class Dataset( DataWithCoords, DatasetAggregations, DatasetArithmetic, Mapping[Hashable, "DataArray"], ): """A multi-dimensional, in memory, array database. A dataset resembles an in-memory representation of a NetCDF file, and consists of variables, coordinates and attributes which together form a self describing dataset. Dataset implements the mapping interface with keys given by variable names and values given by DataArray objects for each variable name. By default, pandas indexes are created for one dimensional variables with name equal to their dimension (i.e., :term:`Dimension coordinate`) so those variables can be readily used as coordinates for label based indexing. When a :py:class:`~xarray.Coordinates` object is passed to ``coords``, any existing index(es) built from those coordinates will be added to the Dataset. To load data from a file or file-like object, use the `open_dataset` function. Parameters ---------- data_vars : dict-like, optional A mapping from variable names to :py:class:`~xarray.DataArray` objects, :py:class:`~xarray.Variable` objects or to tuples of the form ``(dims, data[, attrs])`` which can be used as arguments to create a new ``Variable``. Each dimension must have the same length in all variables in which it appears. The following notations are accepted: - mapping {var name: DataArray} - mapping {var name: Variable} - mapping {var name: (dimension name, array-like)} - mapping {var name: (tuple of dimension names, array-like)} - mapping {dimension name: array-like} (if array-like is not a scalar it will be automatically moved to coords, see below) Each dimension must have the same length in all variables in which it appears. coords : :py:class:`~xarray.Coordinates` or dict-like, optional A :py:class:`~xarray.Coordinates` object or another mapping in similar form as the `data_vars` argument, except that each item is saved on the dataset as a "coordinate". These variables have an associated meaning: they describe constant/fixed/independent quantities, unlike the varying/measured/dependent quantities that belong in `variables`. The following notations are accepted for arbitrary mappings: - mapping {coord name: DataArray} - mapping {coord name: Variable} - mapping {coord name: (dimension name, array-like)} - mapping {coord name: (tuple of dimension names, array-like)} - mapping {dimension name: array-like} (the dimension name is implicitly set to be the same as the coord name) The last notation implies either that the coordinate value is a scalar or that it is a 1-dimensional array and the coord name is the same as the dimension name (i.e., a :term:`Dimension coordinate`). In the latter case, the 1-dimensional array will be assumed to give index values along the dimension with the same name. Alternatively, a :py:class:`~xarray.Coordinates` object may be used in order to explicitly pass indexes (e.g., a multi-index or any custom Xarray index) or to bypass the creation of a default index for any :term:`Dimension coordinate` included in that object. attrs : dict-like, optional Global attributes to save on this dataset. (see FAQ, :ref:`approach to metadata`) Examples -------- In this example dataset, we will represent measurements of the temperature and pressure that were made under various conditions: * the measurements were made on four different days; * they were made at two separate locations, which we will represent using their latitude and longitude; and * they were made using three instrument developed by three different manufacturers, which we will refer to using the strings `'manufac1'`, `'manufac2'`, and `'manufac3'`. >>> np.random.seed(0) >>> temperature = 15 + 8 * np.random.randn(2, 3, 4) >>> precipitation = 10 * np.random.rand(2, 3, 4) >>> lon = [-99.83, -99.32] >>> lat = [42.25, 42.21] >>> instruments = ["manufac1", "manufac2", "manufac3"] >>> time = pd.date_range("2014-09-06", periods=4) >>> reference_time = pd.Timestamp("2014-09-05") Here, we initialize the dataset with multiple dimensions. We use the string `"loc"` to represent the location dimension of the data, the string `"instrument"` to represent the instrument manufacturer dimension, and the string `"time"` for the time dimension. >>> ds = xr.Dataset( ... data_vars=dict( ... temperature=(["loc", "instrument", "time"], temperature), ... precipitation=(["loc", "instrument", "time"], precipitation), ... ), ... coords=dict( ... lon=("loc", lon), ... lat=("loc", lat), ... instrument=instruments, ... time=time, ... reference_time=reference_time, ... ), ... attrs=dict(description="Weather related data."), ... ) >>> ds Size: 552B Dimensions: (loc: 2, instrument: 3, time: 4) Coordinates: lon (loc) float64 16B -99.83 -99.32 lat (loc) float64 16B 42.25 42.21 * instrument (instrument) >> ds.isel(ds.temperature.argmin(...)) Size: 80B Dimensions: () Coordinates: lon float64 8B -99.32 lat float64 8B 42.21 instrument None: if data_vars is None: data_vars = {} if coords is None: coords = {} both_data_and_coords = set(data_vars) & set(coords) if both_data_and_coords: raise ValueError( f"variables {both_data_and_coords!r} are found in both data_vars and coords" ) if isinstance(coords, Dataset): coords = coords._variables variables, coord_names, dims, indexes, _ = merge_data_and_coords( data_vars, coords ) self._attrs = dict(attrs) if attrs else None self._close = None self._encoding = None self._variables = variables self._coord_names = coord_names self._dims = dims self._indexes = indexes # TODO: dirty workaround for mypy 1.5 error with inherited DatasetOpsMixin vs. Mapping # related to https://github.com/python/mypy/issues/9319? def __eq__(self, other: DsCompatible) -> Self: # type: ignore[override] return super().__eq__(other) @classmethod def load_store(cls, store, decoder=None) -> Self: """Create a new dataset from the contents of a backends.*DataStore object """ variables, attributes = store.load() if decoder: variables, attributes = decoder(variables, attributes) obj = cls(variables, attrs=attributes) obj.set_close(store.close) return obj @property def variables(self) -> Frozen[Hashable, Variable]: """Low level interface to Dataset contents as dict of Variable objects. This ordered dictionary is frozen to prevent mutation that could violate Dataset invariants. It contains all variable objects constituting the Dataset, including both data variables and coordinates. """ return Frozen(self._variables) @property def attrs(self) -> dict[Any, Any]: """Dictionary of global attributes on this dataset""" if self._attrs is None: self._attrs = {} return self._attrs @attrs.setter def attrs(self, value: Mapping[Any, Any]) -> None: self._attrs = dict(value) if value else None @property def encoding(self) -> dict[Any, Any]: """Dictionary of global encoding attributes on this dataset""" if self._encoding is None: self._encoding = {} return self._encoding @encoding.setter def encoding(self, value: Mapping[Any, Any]) -> None: self._encoding = dict(value) def reset_encoding(self) -> Self: warnings.warn( "reset_encoding is deprecated since 2023.11, use `drop_encoding` instead", stacklevel=2, ) return self.drop_encoding() def drop_encoding(self) -> Self: """Return a new Dataset without encoding on the dataset or any of its variables/coords.""" variables = {k: v.drop_encoding() for k, v in self.variables.items()} return self._replace(variables=variables, encoding={}) @property def dims(self) -> Frozen[Hashable, int]: """Mapping from dimension names to lengths. Cannot be modified directly, but is updated when adding new variables. Note that type of this object differs from `DataArray.dims`. See `Dataset.sizes` and `DataArray.sizes` for consistently named properties. This property will be changed to return a type more consistent with `DataArray.dims` in the future, i.e. a set of dimension names. See Also -------- Dataset.sizes DataArray.dims """ return FrozenMappingWarningOnValuesAccess(self._dims) @property def sizes(self) -> Frozen[Hashable, int]: """Mapping from dimension names to lengths. Cannot be modified directly, but is updated when adding new variables. This is an alias for `Dataset.dims` provided for the benefit of consistency with `DataArray.sizes`. See Also -------- DataArray.sizes """ return Frozen(self._dims) @property def dtypes(self) -> Frozen[Hashable, np.dtype]: """Mapping from data variable names to dtypes. Cannot be modified directly, but is updated when adding new variables. See Also -------- DataArray.dtype """ return Frozen( { n: v.dtype for n, v in self._variables.items() if n not in self._coord_names } ) def load(self, **kwargs) -> Self: """Trigger loading data into memory and return this dataset. Data will be computed and/or loaded from disk or a remote source. Unlike ``.compute``, the original dataset is modified and returned. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. However, this method can be necessary when working with many file objects on disk. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. Returns ------- object : Dataset Same object but with lazy data variables and coordinates as in-memory arrays. See Also -------- dask.compute Dataset.compute Dataset.load_async DataArray.load Variable.load """ # access .data to coerce everything to numpy or dask arrays chunked_data = { k: v._data for k, v in self.variables.items() if is_chunked_array(v._data) } if chunked_data: chunkmanager = get_chunked_array_type(*chunked_data.values()) # evaluate all the chunked arrays simultaneously evaluated_data: tuple[np.ndarray[Any, Any], ...] = chunkmanager.compute( *chunked_data.values(), **kwargs ) for k, data in zip(chunked_data, evaluated_data, strict=False): self.variables[k].data = data # load everything else sequentially [v.load() for k, v in self.variables.items() if k not in chunked_data] return self async def load_async(self, **kwargs) -> Self: """Trigger and await asynchronous loading of data into memory and return this dataset. Data will be computed and/or loaded from disk or a remote source. Unlike ``.compute``, the original dataset is modified and returned. Only works when opening data lazily from IO storage backends which support lazy asynchronous loading. Otherwise will raise a NotImplementedError. Note users are expected to limit concurrency themselves - xarray does not internally limit concurrency in any way. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. Returns ------- object : Dataset Same object but with lazy data variables and coordinates as in-memory arrays. See Also -------- dask.compute Dataset.compute Dataset.load DataArray.load_async Variable.load_async """ # TODO refactor this to pull out the common chunked_data codepath # this blocks on chunked arrays but not on lazily indexed arrays # access .data to coerce everything to numpy or dask arrays chunked_data = { k: v._data for k, v in self.variables.items() if is_chunked_array(v._data) } if chunked_data: chunkmanager = get_chunked_array_type(*chunked_data.values()) # evaluate all the chunked arrays simultaneously evaluated_data: tuple[np.ndarray[Any, Any], ...] = chunkmanager.compute( *chunked_data.values(), **kwargs ) for k, data in zip(chunked_data, evaluated_data, strict=False): self.variables[k].data = data # load everything else concurrently coros = [ v.load_async() for k, v in self.variables.items() if k not in chunked_data ] await asyncio.gather(*coros) return self def __dask_tokenize__(self) -> object: from dask.base import normalize_token return normalize_token( (type(self), self._variables, self._coord_names, self._attrs or None) ) def __dask_graph__(self): graphs = {k: v.__dask_graph__() for k, v in self.variables.items()} graphs = {k: v for k, v in graphs.items() if v is not None} if not graphs: return None else: try: from dask.highlevelgraph import HighLevelGraph return HighLevelGraph.merge(*graphs.values()) except ImportError: from dask import sharedict return sharedict.merge(*graphs.values()) def __dask_keys__(self): import dask return [ v.__dask_keys__() for v in self.variables.values() if dask.is_dask_collection(v) ] def __dask_layers__(self): import dask return sum( ( v.__dask_layers__() for v in self.variables.values() if dask.is_dask_collection(v) ), (), ) @property def __dask_optimize__(self): import dask.array as da return da.Array.__dask_optimize__ @property def __dask_scheduler__(self): import dask.array as da return da.Array.__dask_scheduler__ def __dask_postcompute__(self): return self._dask_postcompute, () def __dask_postpersist__(self): return self._dask_postpersist, () def _dask_postcompute(self, results: Iterable[Variable]) -> Self: import dask variables = {} results_iter = iter(results) for k, v in self._variables.items(): if dask.is_dask_collection(v): rebuild, args = v.__dask_postcompute__() v = rebuild(next(results_iter), *args) variables[k] = v return type(self)._construct_direct( variables, self._coord_names, self._dims, self._attrs, self._indexes, self._encoding, self._close, ) def _dask_postpersist( self, dsk: Mapping, *, rename: Mapping[str, str] | None = None ) -> Self: from dask import is_dask_collection from dask.highlevelgraph import HighLevelGraph from dask.optimization import cull variables = {} for k, v in self._variables.items(): if not is_dask_collection(v): variables[k] = v continue if isinstance(dsk, HighLevelGraph): # dask >= 2021.3 # __dask_postpersist__() was called by dask.highlevelgraph. # Don't use dsk.cull(), as we need to prevent partial layers: # https://github.com/dask/dask/issues/7137 layers = v.__dask_layers__() if rename: layers = [rename.get(k, k) for k in layers] dsk2 = dsk.cull_layers(layers) elif rename: # pragma: nocover # At the moment of writing, this is only for forward compatibility. # replace_name_in_key requires dask >= 2021.3. from dask.base import flatten, replace_name_in_key keys = [ replace_name_in_key(k, rename) for k in flatten(v.__dask_keys__()) ] dsk2, _ = cull(dsk, keys) else: # __dask_postpersist__() was called by dask.optimize or dask.persist dsk2, _ = cull(dsk, v.__dask_keys__()) rebuild, args = v.__dask_postpersist__() # rename was added in dask 2021.3 kwargs = {"rename": rename} if rename else {} variables[k] = rebuild(dsk2, *args, **kwargs) return type(self)._construct_direct( variables, self._coord_names, self._dims, self._attrs, self._indexes, self._encoding, self._close, ) def compute(self, **kwargs) -> Self: """Trigger loading data into memory and return a new dataset. Data will be computed and/or loaded from disk or a remote source. Unlike ``.load``, the original dataset is left unaltered. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. However, this method can be necessary when working with many file objects on disk. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. Returns ------- object : Dataset New object with lazy data variables and coordinates as in-memory arrays. See Also -------- dask.compute Dataset.load Dataset.load_async DataArray.compute Variable.compute """ new = self.copy(deep=False) return new.load(**kwargs) def _persist_inplace(self, **kwargs) -> Self: """Persist all chunked arrays in memory.""" # access .data to coerce everything to numpy or dask arrays lazy_data = { k: v._data for k, v in self.variables.items() if is_chunked_array(v._data) } if lazy_data: chunkmanager = get_chunked_array_type(*lazy_data.values()) # evaluate all the dask arrays simultaneously evaluated_data = chunkmanager.persist(*lazy_data.values(), **kwargs) for k, data in zip(lazy_data, evaluated_data, strict=False): self.variables[k].data = data return self def persist(self, **kwargs) -> Self: """Trigger computation, keeping data as chunked arrays. This operation can be used to trigger computation on underlying dask arrays, similar to ``.compute()`` or ``.load()``. However this operation keeps the data as dask arrays. This is particularly useful when using the dask.distributed scheduler and you want to load a large amount of data into distributed memory. Like compute (but unlike load), the original dataset is left unaltered. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.persist``. Returns ------- object : Dataset New object with all dask-backed coordinates and data variables as persisted dask arrays. See Also -------- dask.persist """ new = self.copy(deep=False) return new._persist_inplace(**kwargs) @classmethod def _construct_direct( cls, variables: dict[Any, Variable], coord_names: set[Hashable], dims: dict[Any, int] | None = None, attrs: dict | None = None, indexes: dict[Any, Index] | None = None, encoding: dict | None = None, close: Callable[[], None] | None = None, ) -> Self: """Shortcut around __init__ for internal use when we want to skip costly validation """ if dims is None: dims = calculate_dimensions(variables) if indexes is None: indexes = {} obj = object.__new__(cls) obj._variables = variables obj._coord_names = coord_names obj._dims = dims obj._indexes = indexes obj._attrs = attrs obj._close = close obj._encoding = encoding return obj def _replace( self, variables: dict[Hashable, Variable] | None = None, coord_names: set[Hashable] | None = None, dims: dict[Any, int] | None = None, attrs: dict[Hashable, Any] | Default | None = _default, indexes: dict[Hashable, Index] | None = None, encoding: dict | Default | None = _default, inplace: bool = False, ) -> Self: """Fastpath constructor for internal use. Returns an object with optionally with replaced attributes. Explicitly passed arguments are *not* copied when placed on the new dataset. It is up to the caller to ensure that they have the right type and are not used elsewhere. """ if inplace: if variables is not None: self._variables = variables if coord_names is not None: self._coord_names = coord_names if dims is not None: self._dims = dims if attrs is not _default: self._attrs = attrs if indexes is not None: self._indexes = indexes if encoding is not _default: self._encoding = encoding obj = self else: if variables is None: variables = self._variables.copy() if coord_names is None: coord_names = self._coord_names.copy() if dims is None: dims = self._dims.copy() if attrs is _default: attrs = copy.copy(self._attrs) if indexes is None: indexes = self._indexes.copy() if encoding is _default: encoding = copy.copy(self._encoding) obj = self._construct_direct( variables, coord_names, dims, attrs, indexes, encoding ) return obj def _replace_with_new_dims( self, variables: dict[Hashable, Variable], coord_names: set | None = None, attrs: dict[Hashable, Any] | Default | None = _default, indexes: dict[Hashable, Index] | None = None, inplace: bool = False, ) -> Self: """Replace variables with recalculated dimensions.""" dims = calculate_dimensions(variables) return self._replace( variables, coord_names, dims, attrs, indexes, inplace=inplace ) def _replace_vars_and_dims( self, variables: dict[Hashable, Variable], coord_names: set | None = None, dims: dict[Hashable, int] | None = None, attrs: dict[Hashable, Any] | Default | None = _default, inplace: bool = False, ) -> Self: """Deprecated version of _replace_with_new_dims(). Unlike _replace_with_new_dims(), this method always recalculates indexes from variables. """ if dims is None: dims = calculate_dimensions(variables) return self._replace( variables, coord_names, dims, attrs, indexes=None, inplace=inplace ) def _overwrite_indexes( self, indexes: Mapping[Hashable, Index], variables: Mapping[Hashable, Variable] | None = None, drop_variables: list[Hashable] | None = None, drop_indexes: list[Hashable] | None = None, rename_dims: Mapping[Hashable, Hashable] | None = None, ) -> Self: """Maybe replace indexes. This function may do a lot more depending on index query results. """ if not indexes: return self if variables is None: variables = {} if drop_variables is None: drop_variables = [] if drop_indexes is None: drop_indexes = [] new_variables = self._variables.copy() new_coord_names = self._coord_names.copy() new_indexes = dict(self._indexes) index_variables = {} no_index_variables = {} for name, var in variables.items(): old_var = self._variables.get(name) if old_var is not None: var.attrs.update(old_var.attrs) var.encoding.update(old_var.encoding) if name in indexes: index_variables[name] = var else: no_index_variables[name] = var for name in indexes: new_indexes[name] = indexes[name] for name, var in index_variables.items(): new_coord_names.add(name) new_variables[name] = var # append no-index variables at the end for k in no_index_variables: new_variables.pop(k) new_variables.update(no_index_variables) for name in drop_indexes: new_indexes.pop(name) for name in drop_variables: new_variables.pop(name) new_indexes.pop(name, None) new_coord_names.remove(name) replaced = self._replace( variables=new_variables, coord_names=new_coord_names, indexes=new_indexes ) if rename_dims: # skip rename indexes: they should already have the right name(s) dims = replaced._rename_dims(rename_dims) new_variables, new_coord_names = replaced._rename_vars({}, rename_dims) return replaced._replace( variables=new_variables, coord_names=new_coord_names, dims=dims ) else: return replaced def copy(self, deep: bool = False, data: DataVars | None = None) -> Self: """Returns a copy of this dataset. If `deep=True`, a deep copy is made of each of the component variables. Otherwise, a shallow copy of each of the component variable is made, so that the underlying memory region of the new dataset is the same as in the original dataset. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, default: False Whether each component variable is loaded into memory and copied onto the new object. Default is False. data : dict-like or None, optional Data to use in the new object. Each item in `data` must have same shape as corresponding data variable in original. When `data` is used, `deep` is ignored for the data variables and only used for coords. Returns ------- object : Dataset New object with dimensions, attributes, coordinates, name, encoding, and optionally data copied from original. Examples -------- Shallow copy versus deep copy >>> da = xr.DataArray(np.random.randn(2, 3)) >>> ds = xr.Dataset( ... {"foo": da, "bar": ("x", [-1, 2])}, ... coords={"x": ["one", "two"]}, ... ) >>> ds.copy() Size: 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: * x (x) >> ds_0 = ds.copy(deep=False) >>> ds_0["foo"][0, 0] = 7 >>> ds_0 Size: 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: * x (x) >> ds Size: 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: * x (x) >> ds.copy(data={"foo": np.arange(6).reshape(2, 3), "bar": ["a", "b"]}) Size: 80B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: * x (x) >> ds Size: 88B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Coordinates: * x (x) Self: if data is None: data = {} elif not utils.is_dict_like(data): raise ValueError("Data must be dict-like") if data: var_keys = set(self.data_vars.keys()) data_keys = set(data.keys()) keys_not_in_vars = data_keys - var_keys if keys_not_in_vars: raise ValueError( "Data must only contain variables in original " f"dataset. Extra variables: {keys_not_in_vars}" ) keys_missing_from_data = var_keys - data_keys if keys_missing_from_data: raise ValueError( "Data must contain all variables in original " f"dataset. Data is missing {keys_missing_from_data}" ) indexes, index_vars = self.xindexes.copy_indexes(deep=deep) variables = {} for k, v in self._variables.items(): if k in index_vars: variables[k] = index_vars[k] else: variables[k] = v._copy(deep=deep, data=data.get(k), memo=memo) attrs = copy.deepcopy(self._attrs, memo) if deep else copy.copy(self._attrs) encoding = ( copy.deepcopy(self._encoding, memo) if deep else copy.copy(self._encoding) ) return self._replace(variables, indexes=indexes, attrs=attrs, encoding=encoding) def __copy__(self) -> Self: return self._copy(deep=False) def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: return self._copy(deep=True, memo=memo) def as_numpy(self) -> Self: """ Coerces wrapped data and coordinates into numpy arrays, returning a Dataset. See also -------- DataArray.as_numpy DataArray.to_numpy : Returns only the data as a numpy.ndarray object. """ numpy_variables = {k: v.as_numpy() for k, v in self.variables.items()} return self._replace(variables=numpy_variables) def _copy_listed(self, names: Iterable[Hashable]) -> Self: """Create a new Dataset with the listed variables from this dataset and the all relevant coordinates. Skips all validation. """ variables: dict[Hashable, Variable] = {} coord_names = set() indexes: dict[Hashable, Index] = {} for name in names: try: variables[name] = self._variables[name] except KeyError: ref_name, var_name, var = _get_virtual_variable( self._variables, name, self.sizes ) variables[var_name] = var if ref_name in self._coord_names or ref_name in self.dims: coord_names.add(var_name) if (var_name,) == var.dims: index, index_vars = create_default_index_implicit(var, names) indexes.update(dict.fromkeys(index_vars, index)) variables.update(index_vars) coord_names.update(index_vars) needed_dims: OrderedSet[Hashable] = OrderedSet() for v in variables.values(): needed_dims.update(v.dims) dims = {k: self.sizes[k] for k in needed_dims} # preserves ordering of coordinates for k in self._variables: if k not in self._coord_names: continue if set(self.variables[k].dims) <= needed_dims: variables[k] = self._variables[k] coord_names.add(k) indexes.update(filter_indexes_from_coords(self._indexes, coord_names)) return self._replace(variables, coord_names, dims, indexes=indexes) def _construct_dataarray(self, name: Hashable) -> DataArray: """Construct a DataArray by indexing this dataset""" from xarray.core.dataarray import DataArray try: variable = self._variables[name] except KeyError: _, name, variable = _get_virtual_variable(self._variables, name, self.sizes) needed_dims = set(variable.dims) coords: dict[Hashable, Variable] = {} # preserve ordering for k in self._variables: if k in self._indexes: add_coord = self._indexes[k].should_add_coord_to_array( k, self._variables[k], needed_dims ) else: var_dims = set(self._variables[k].dims) add_coord = k in self._coord_names and var_dims <= needed_dims if add_coord: coords[k] = self._variables[k] indexes = filter_indexes_from_coords(self._indexes, set(coords)) return DataArray(variable, coords, name=name, indexes=indexes, fastpath=True) @property def _attr_sources(self) -> Iterable[Mapping[Hashable, Any]]: """Places to look-up items for attribute-style access""" yield from self._item_sources yield self.attrs @property def _item_sources(self) -> Iterable[Mapping[Hashable, Any]]: """Places to look-up items for key-completion""" yield self.data_vars yield FilteredMapping(keys=self._coord_names, mapping=self.coords) # virtual coordinates yield FilteredMapping(keys=self.sizes, mapping=self) def __contains__(self, key: object) -> bool: """The 'in' operator will return true or false depending on whether 'key' is an array in the dataset or not. """ return key in self._variables def __len__(self) -> int: return len(self.data_vars) def __bool__(self) -> bool: return bool(self.data_vars) def __iter__(self) -> Iterator[Hashable]: return iter(self.data_vars) if TYPE_CHECKING: # needed because __getattr__ is returning Any and otherwise # this class counts as part of the SupportsArray Protocol __array__ = None # type: ignore[var-annotated,unused-ignore] else: def __array__(self, dtype=None, copy=None): raise TypeError( "cannot directly convert an xarray.Dataset into a " "numpy array. Instead, create an xarray.DataArray " "first, either with indexing on the Dataset or by " "invoking the `to_dataarray()` method." ) @property def nbytes(self) -> int: """ Total bytes consumed by the data arrays of all variables in this dataset. If the backend array for any variable does not include ``nbytes``, estimates the total bytes for that array based on the ``size`` and ``dtype``. """ return sum(v.nbytes for v in self.variables.values()) @property def loc(self) -> _LocIndexer[Self]: """Attribute for location based indexing. Only supports __getitem__, and only when the key is a dict of the form {dim: labels}. """ return _LocIndexer(self) @overload def __getitem__(self, key: Hashable) -> DataArray: ... # Mapping is Iterable @overload def __getitem__(self, key: Iterable[Hashable]) -> Self: ... def __getitem__( self, key: Mapping[Any, Any] | Hashable | Iterable[Hashable] ) -> Self | DataArray: """Access variables or coordinates of this dataset as a :py:class:`~xarray.DataArray` or a subset of variables or a indexed dataset. Indexing with a list of names will return a new ``Dataset`` object. """ from xarray.core.formatting import shorten_list_repr if utils.is_dict_like(key): return self.isel(**key) if utils.hashable(key): try: return self._construct_dataarray(key) except KeyError as e: message = f"No variable named {key!r}." best_guess = utils.did_you_mean(key, self.variables.keys()) if best_guess: message += f" {best_guess}" else: message += f" Variables on the dataset include {shorten_list_repr(list(self.variables.keys()), max_items=10)}" # If someone attempts `ds['foo' , 'bar']` instead of `ds[['foo', 'bar']]` if isinstance(key, tuple): message += f"\nHint: use a list to select multiple variables, for example `ds[{list(key)}]`" raise KeyError(message) from e if utils.iterable_of_hashable(key): return self._copy_listed(key) raise ValueError(f"Unsupported key-type {type(key)}") def __setitem__( self, key: Hashable | Iterable[Hashable] | Mapping, value: Any ) -> None: """Add an array to this dataset. Multiple arrays can be added at the same time, in which case each of the following operations is applied to the respective value. If key is dict-like, update all variables in the dataset one by one with the given value at the given location. If the given value is also a dataset, select corresponding variables in the given value and in the dataset to be changed. If value is a ` from .dataarray import DataArray`, call its `select_vars()` method, rename it to `key` and merge the contents of the resulting dataset into this dataset. If value is a `Variable` object (or tuple of form ``(dims, data[, attrs])``), add it to this dataset as a new variable. """ from xarray.core.dataarray import DataArray if utils.is_dict_like(key): # check for consistency and convert value to dataset value = self._setitem_check(key, value) # loop over dataset variables and set new values processed = [] for name, var in self.items(): try: var[key] = value[name] processed.append(name) except Exception as e: if processed: raise RuntimeError( "An error occurred while setting values of the" f" variable '{name}'. The following variables have" f" been successfully updated:\n{processed}" ) from e else: raise e elif utils.hashable(key): if isinstance(value, Dataset): raise TypeError( "Cannot assign a Dataset to a single key - only a DataArray or Variable " "object can be stored under a single key." ) self.update({key: value}) elif utils.iterable_of_hashable(key): keylist = list(key) if len(keylist) == 0: raise ValueError("Empty list of variables to be set") if len(keylist) == 1: self.update({keylist[0]: value}) else: if len(keylist) != len(value): raise ValueError( f"Different lengths of variables to be set " f"({len(keylist)}) and data used as input for " f"setting ({len(value)})" ) if isinstance(value, Dataset): self.update( dict(zip(keylist, value.data_vars.values(), strict=True)) ) elif isinstance(value, DataArray): raise ValueError("Cannot assign single DataArray to multiple keys") else: self.update(dict(zip(keylist, value, strict=True))) else: raise ValueError(f"Unsupported key-type {type(key)}") def _setitem_check(self, key, value): """Consistency check for __setitem__ When assigning values to a subset of a Dataset, do consistency check beforehand to avoid leaving the dataset in a partially updated state when an error occurs. """ from xarray.core.dataarray import DataArray if isinstance(value, Dataset): missing_vars = [ name for name in value.data_vars if name not in self.data_vars ] if missing_vars: raise ValueError( f"Variables {missing_vars} in new values" f" not available in original dataset:\n{self}" ) elif not any(isinstance(value, t) for t in [DataArray, Number, str]): raise TypeError( "Dataset assignment only accepts DataArrays, Datasets, and scalars." ) new_value = Dataset() for name, var in self.items(): # test indexing try: var_k = var[key] except Exception as e: raise ValueError( f"Variable '{name}': indexer {key} not available" ) from e if isinstance(value, Dataset): val = value[name] else: val = value if isinstance(val, DataArray): # check consistency of dimensions for dim in val.dims: if dim not in var_k.dims: raise KeyError( f"Variable '{name}': dimension '{dim}' appears in new values " f"but not in the indexed original data" ) dims = tuple(dim for dim in var_k.dims if dim in val.dims) if dims != val.dims: raise ValueError( f"Variable '{name}': dimension order differs between" f" original and new data:\n{dims}\nvs.\n{val.dims}" ) else: val = np.array(val) # type conversion new_value[name] = duck_array_ops.astype(val, dtype=var_k.dtype, copy=False) # check consistency of dimension sizes and dimension coordinates if isinstance(value, DataArray | Dataset): align(self[key], value, join="exact", copy=False) return new_value def __delitem__(self, key: Hashable) -> None: """Remove a variable from this dataset.""" assert_no_index_corrupted(self.xindexes, {key}) if key in self._indexes: del self._indexes[key] del self._variables[key] self._coord_names.discard(key) self._dims = calculate_dimensions(self._variables) # mutable objects should not be hashable # https://github.com/python/mypy/issues/4266 __hash__ = None # type: ignore[assignment] def _all_compat(self, other: Self, compat_str: str) -> bool: """Helper function for equals and identical""" # some stores (e.g., scipy) do not seem to preserve order, so don't # require matching order for equality def compat(x: Variable, y: Variable) -> bool: return getattr(x, compat_str)(y) return self._coord_names == other._coord_names and utils.dict_equiv( self._variables, other._variables, compat=compat ) def broadcast_equals(self, other: Self) -> bool: """Two Datasets are broadcast equal if they are equal after broadcasting all variables against each other. For example, variables that are scalar in one dataset but non-scalar in the other dataset can still be broadcast equal if the the non-scalar variable is a constant. Examples -------- # 2D array with shape (1, 3) >>> data = np.array([[1, 2, 3]]) >>> a = xr.Dataset( ... {"variable_name": (("space", "time"), data)}, ... coords={"space": [0], "time": [0, 1, 2]}, ... ) >>> a Size: 56B Dimensions: (space: 1, time: 3) Coordinates: * space (space) int64 8B 0 * time (time) int64 24B 0 1 2 Data variables: variable_name (space, time) int64 24B 1 2 3 # 2D array with shape (3, 1) >>> data = np.array([[1], [2], [3]]) >>> b = xr.Dataset( ... {"variable_name": (("time", "space"), data)}, ... coords={"time": [0, 1, 2], "space": [0]}, ... ) >>> b Size: 56B Dimensions: (time: 3, space: 1) Coordinates: * time (time) int64 24B 0 1 2 * space (space) int64 8B 0 Data variables: variable_name (time, space) int64 24B 1 2 3 .equals returns True if two Datasets have the same values, dimensions, and coordinates. .broadcast_equals returns True if the results of broadcasting two Datasets against each other have the same values, dimensions, and coordinates. >>> a.equals(b) False >>> a.broadcast_equals(b) True >>> a2, b2 = xr.broadcast(a, b) >>> a2.equals(b2) True See Also -------- Dataset.equals Dataset.identical Dataset.broadcast """ try: return self._all_compat(other, "broadcast_equals") except (TypeError, AttributeError): return False def equals(self, other: Self) -> bool: """Two Datasets are equal if they have matching variables and coordinates, all of which are equal. Datasets can still be equal (like pandas objects) if they have NaN values in the same locations. This method is necessary because `v1 == v2` for ``Dataset`` does element-wise comparisons (like numpy.ndarrays). Examples -------- # 2D array with shape (1, 3) >>> data = np.array([[1, 2, 3]]) >>> dataset1 = xr.Dataset( ... {"variable_name": (("space", "time"), data)}, ... coords={"space": [0], "time": [0, 1, 2]}, ... ) >>> dataset1 Size: 56B Dimensions: (space: 1, time: 3) Coordinates: * space (space) int64 8B 0 * time (time) int64 24B 0 1 2 Data variables: variable_name (space, time) int64 24B 1 2 3 # 2D array with shape (3, 1) >>> data = np.array([[1], [2], [3]]) >>> dataset2 = xr.Dataset( ... {"variable_name": (("time", "space"), data)}, ... coords={"time": [0, 1, 2], "space": [0]}, ... ) >>> dataset2 Size: 56B Dimensions: (time: 3, space: 1) Coordinates: * time (time) int64 24B 0 1 2 * space (space) int64 8B 0 Data variables: variable_name (time, space) int64 24B 1 2 3 >>> dataset1.equals(dataset2) False >>> dataset1.broadcast_equals(dataset2) True .equals returns True if two Datasets have the same values, dimensions, and coordinates. .broadcast_equals returns True if the results of broadcasting two Datasets against each other have the same values, dimensions, and coordinates. Similar for missing values too: >>> ds1 = xr.Dataset( ... { ... "temperature": (["x", "y"], [[1, np.nan], [3, 4]]), ... }, ... coords={"x": [0, 1], "y": [0, 1]}, ... ) >>> ds2 = xr.Dataset( ... { ... "temperature": (["x", "y"], [[1, np.nan], [3, 4]]), ... }, ... coords={"x": [0, 1], "y": [0, 1]}, ... ) >>> ds1.equals(ds2) True See Also -------- Dataset.broadcast_equals Dataset.identical """ try: return self._all_compat(other, "equals") except (TypeError, AttributeError): return False def identical(self, other: Self) -> bool: """Like equals, but also checks all dataset attributes and the attributes on all variables and coordinates. Example ------- >>> a = xr.Dataset( ... {"Width": ("X", [1, 2, 3])}, ... coords={"X": [1, 2, 3]}, ... attrs={"units": "m"}, ... ) >>> b = xr.Dataset( ... {"Width": ("X", [1, 2, 3])}, ... coords={"X": [1, 2, 3]}, ... attrs={"units": "m"}, ... ) >>> c = xr.Dataset( ... {"Width": ("X", [1, 2, 3])}, ... coords={"X": [1, 2, 3]}, ... attrs={"units": "ft"}, ... ) >>> a Size: 48B Dimensions: (X: 3) Coordinates: * X (X) int64 24B 1 2 3 Data variables: Width (X) int64 24B 1 2 3 Attributes: units: m >>> b Size: 48B Dimensions: (X: 3) Coordinates: * X (X) int64 24B 1 2 3 Data variables: Width (X) int64 24B 1 2 3 Attributes: units: m >>> c Size: 48B Dimensions: (X: 3) Coordinates: * X (X) int64 24B 1 2 3 Data variables: Width (X) int64 24B 1 2 3 Attributes: units: ft >>> a.equals(b) True >>> a.identical(b) True >>> a.equals(c) True >>> a.identical(c) False See Also -------- Dataset.broadcast_equals Dataset.equals """ try: return utils.dict_equiv(self.attrs, other.attrs) and self._all_compat( other, "identical" ) except (TypeError, AttributeError): return False @property def indexes(self) -> Indexes[pd.Index]: """Mapping of pandas.Index objects used for label based indexing. Raises an error if this Dataset has indexes that cannot be coerced to pandas.Index objects. See Also -------- Dataset.xindexes """ return self.xindexes.to_pandas_indexes() @property def xindexes(self) -> Indexes[Index]: """Mapping of :py:class:`~xarray.indexes.Index` objects used for label based indexing. """ return Indexes(self._indexes, {k: self._variables[k] for k in self._indexes}) @property def coords(self) -> DatasetCoordinates: """Mapping of :py:class:`~xarray.DataArray` objects corresponding to coordinate variables. See Also -------- Coordinates """ return DatasetCoordinates(self) @property def data_vars(self) -> DataVariables: """Dictionary of DataArray objects corresponding to data variables""" return DataVariables(self) def set_coords(self, names: Hashable | Iterable[Hashable]) -> Self: """Given names of one or more variables, set them as coordinates Parameters ---------- names : hashable or iterable of hashable Name(s) of variables in this dataset to convert into coordinates. Examples -------- >>> dataset = xr.Dataset( ... { ... "pressure": ("time", [1.013, 1.2, 3.5]), ... "time": pd.date_range("2023-01-01", periods=3), ... } ... ) >>> dataset Size: 48B Dimensions: (time: 3) Coordinates: * time (time) datetime64[ns] 24B 2023-01-01 2023-01-02 2023-01-03 Data variables: pressure (time) float64 24B 1.013 1.2 3.5 >>> dataset.set_coords("pressure") Size: 48B Dimensions: (time: 3) Coordinates: pressure (time) float64 24B 1.013 1.2 3.5 * time (time) datetime64[ns] 24B 2023-01-01 2023-01-02 2023-01-03 Data variables: *empty* On calling ``set_coords`` , these data variables are converted to coordinates, as shown in the final dataset. Returns ------- Dataset See Also -------- Dataset.swap_dims Dataset.assign_coords """ # TODO: allow inserting new coordinates with this method, like # DataFrame.set_index? # nb. check in self._variables, not self.data_vars to insure that the # operation is idempotent if isinstance(names, str) or not isinstance(names, Iterable): names = [names] else: names = list(names) self._assert_all_in_dataset(names) obj = self.copy() obj._coord_names.update(names) return obj def reset_coords( self, names: Dims = None, drop: bool = False, ) -> Self: """Given names of coordinates, reset them to become variables Parameters ---------- names : str, Iterable of Hashable or None, optional Name(s) of non-index coordinates in this dataset to reset into variables. By default, all non-index coordinates are reset. drop : bool, default: False If True, remove coordinates instead of converting them into variables. Examples -------- >>> dataset = xr.Dataset( ... { ... "temperature": ( ... ["time", "lat", "lon"], ... [[[25, 26], [27, 28]], [[29, 30], [31, 32]]], ... ), ... "precipitation": ( ... ["time", "lat", "lon"], ... [[[0.5, 0.8], [0.2, 0.4]], [[0.3, 0.6], [0.7, 0.9]]], ... ), ... }, ... coords={ ... "time": pd.date_range(start="2023-01-01", periods=2), ... "lat": [40, 41], ... "lon": [-80, -79], ... "altitude": 1000, ... }, ... ) # Dataset before resetting coordinates >>> dataset Size: 184B Dimensions: (time: 2, lat: 2, lon: 2) Coordinates: * time (time) datetime64[ns] 16B 2023-01-01 2023-01-02 * lat (lat) int64 16B 40 41 * lon (lon) int64 16B -80 -79 altitude int64 8B 1000 Data variables: temperature (time, lat, lon) int64 64B 25 26 27 28 29 30 31 32 precipitation (time, lat, lon) float64 64B 0.5 0.8 0.2 0.4 0.3 0.6 0.7 0.9 # Reset the 'altitude' coordinate >>> dataset_reset = dataset.reset_coords("altitude") # Dataset after resetting coordinates >>> dataset_reset Size: 184B Dimensions: (time: 2, lat: 2, lon: 2) Coordinates: * time (time) datetime64[ns] 16B 2023-01-01 2023-01-02 * lat (lat) int64 16B 40 41 * lon (lon) int64 16B -80 -79 Data variables: temperature (time, lat, lon) int64 64B 25 26 27 28 29 30 31 32 precipitation (time, lat, lon) float64 64B 0.5 0.8 0.2 0.4 0.3 0.6 0.7 0.9 altitude int64 8B 1000 Returns ------- Dataset See Also -------- Dataset.set_coords """ if names is None: names = self._coord_names - set(self._indexes) else: if isinstance(names, str) or not isinstance(names, Iterable): names = [names] else: names = list(names) self._assert_all_in_dataset(names) bad_coords = set(names) & set(self._indexes) if bad_coords: raise ValueError( f"cannot remove index coordinates with reset_coords: {bad_coords}" ) obj = self.copy() obj._coord_names.difference_update(names) if drop: for name in names: del obj._variables[name] return obj def dump_to_store(self, store: AbstractDataStore, **kwargs) -> None: """Store dataset contents to a backends.*DataStore object.""" from xarray.backends.api import dump_to_store # TODO: rename and/or cleanup this method to make it more consistent # with to_netcdf() dump_to_store(self, store, **kwargs) # path=None writes to bytes @overload def to_netcdf( self, path: None = None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Any, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> memoryview: ... # compute=False returns dask.Delayed @overload def to_netcdf( self, path: str | PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Any, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, *, compute: Literal[False], invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> Delayed: ... # default return None @overload def to_netcdf( self, path: str | PathLike | io.IOBase, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Any, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: Literal[True] = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> None: ... # if compute cannot be evaluated at type check time # we may get back either Delayed or None @overload def to_netcdf( self, path: str | PathLike, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Any, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> Delayed | None: ... def to_netcdf( self, path: str | PathLike | io.IOBase | None = None, mode: NetcdfWriteModes = "w", format: T_NetcdfTypes | None = None, group: str | None = None, engine: T_NetcdfEngine | None = None, encoding: Mapping[Any, Mapping[str, Any]] | None = None, unlimited_dims: Iterable[Hashable] | None = None, compute: bool = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> memoryview | Delayed | None: """Write dataset contents to a netCDF file. Parameters ---------- path : str, path-like, file-like or None, optional Path to which to save this datatree, or a file-like object to write it to (which must support read and write and be seekable) or None (default) to return in-memory bytes as a memoryview. mode : {"w", "a"}, default: "w" Write ('w') or append ('a') mode. If mode='w', any existing file at this location will be overwritten. If mode='a', existing variables will be overwritten. format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \ "NETCDF3_CLASSIC"}, optional File format for the resulting netCDF file: * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API features. * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only netCDF 3 compatible API features. * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format, which fully supports 2+ GB files, but is only compatible with clients linked against netCDF version 3.6.0 or later. * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not handle 2+ GB files very well. All formats are supported by the netCDF4-python library. scipy.io.netcdf only supports the last two formats. The default format is NETCDF4 if you are saving a file to disk and have the netCDF4-python library available. Otherwise, xarray falls back to using scipy to write netCDF files and defaults to the NETCDF3_64BIT format (scipy does not support netCDF4). group : str, optional Path to the netCDF4 group in the given file to open (only works for format='NETCDF4'). The group(s) will be created if necessary. engine : {"netcdf4", "scipy", "h5netcdf"}, optional Engine to use when writing netCDF files. If not provided, the default engine is chosen based on available dependencies, with a preference for 'netcdf4' if writing to a file on disk. encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., ``{"my_variable": {"dtype": "int16", "scale_factor": 0.1, "zlib": True}, ...}``. If ``encoding`` is specified the original encoding of the variables of the dataset is ignored. The `h5netcdf` engine supports both the NetCDF4-style compression encoding parameters ``{"zlib": True, "complevel": 9}`` and the h5py ones ``{"compression": "gzip", "compression_opts": 9}``. This allows using any compression plugin installed in the HDF5 library, e.g. LZF. unlimited_dims : iterable of hashable, optional Dimension(s) that should be serialized as unlimited dimensions. By default, no dimensions are treated as unlimited dimensions. Note that unlimited_dims may also be set via ``dataset.encoding["unlimited_dims"]``. compute: bool, default: True If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. invalid_netcdf: bool, default: False Only valid along with ``engine="h5netcdf"``. If True, allow writing hdf5 files which are invalid netcdf as described in https://github.com/h5netcdf/h5netcdf. Returns ------- * ``memoryview`` if path is None * ``dask.delayed.Delayed`` if compute is False * ``None`` otherwise See Also -------- DataArray.to_netcdf """ if encoding is None: encoding = {} from xarray.backends.api import to_netcdf return to_netcdf( # type: ignore[return-value] # mypy cannot resolve the overloads:( self, path, mode=mode, format=format, group=group, engine=engine, encoding=encoding, unlimited_dims=unlimited_dims, compute=compute, multifile=False, invalid_netcdf=invalid_netcdf, auto_complex=auto_complex, ) # compute=True (default) returns ZarrStore @overload def to_zarr( self, store: MutableMapping | str | PathLike[str] | None = None, chunk_store: MutableMapping | str | PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: Literal[True] = True, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> ZarrStore: ... # compute=False returns dask.Delayed @overload def to_zarr( self, store: MutableMapping | str | PathLike[str] | None = None, chunk_store: MutableMapping | str | PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: Literal[False], consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> Delayed: ... def to_zarr( self, store: MutableMapping | str | PathLike[str] | None = None, chunk_store: MutableMapping | str | PathLike | None = None, mode: ZarrWriteModes | None = None, synchronizer=None, group: str | None = None, encoding: Mapping | None = None, *, compute: bool = True, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> ZarrStore | Delayed: """Write dataset contents to a zarr group. Zarr chunks are determined in the following way: - From the ``chunks`` attribute in each variable's ``encoding`` (can be set via `Dataset.chunk`). - If the variable is a Dask array, from the dask chunks - If neither Dask chunks nor encoding chunks are present, chunks will be determined automatically by Zarr - If both Dask chunks and encoding chunks are present, encoding chunks will be used, provided that there is a many-to-one relationship between encoding chunks and dask chunks (i.e. Dask chunks are bigger than and evenly divide encoding chunks); otherwise raise a ``ValueError``. This restriction ensures that no synchronization / locks are required when writing. To disable this restriction, use ``safe_chunks=False``. Parameters ---------- store : MutableMapping, str or path-like, optional Store or path to directory in local or remote file system. chunk_store : MutableMapping, str or path-like, optional Store or path to directory in local or remote file system only for Zarr array chunks. Requires zarr-python v2.4.0 or later. mode : {"w", "w-", "a", "a-", r+", None}, optional Persistence mode: "w" means create (overwrite if exists); "w-" means create (fail if exists); "a" means override all existing variables including dimension coordinates (create if does not exist); "a-" means only append those variables that have ``append_dim``. "r+" means modify existing array *values* only (raise an error if any metadata or shapes would change). The default mode is "a" if ``append_dim`` is set. Otherwise, it is "r+" if ``region`` is set and ``w-`` otherwise. synchronizer : object, optional Zarr array synchronizer. group : str, optional Group path. (a.k.a. `path` in zarr terminology.) encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., ``{"my_variable": {"dtype": "int16", "scale_factor": 0.1,}, ...}`` compute : bool, default: True If True write array data immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed to write array data later. Metadata is always updated eagerly. consolidated : bool, optional If True, apply :func:`zarr.convenience.consolidate_metadata` after writing metadata and read existing stores with consolidated metadata; if False, do not. The default (`consolidated=None`) means write consolidated metadata and attempt to read consolidated metadata for existing stores (falling back to non-consolidated). When the experimental ``zarr_version=3``, ``consolidated`` must be either be ``None`` or ``False``. append_dim : hashable, optional If set, the dimension along which the data will be appended. All other dimensions on overridden variables must remain the same size. region : dict or "auto", optional Optional mapping from dimension names to either a) ``"auto"``, or b) integer slices, indicating the region of existing zarr array(s) in which to write this dataset's data. If ``"auto"`` is provided the existing store will be opened and the region inferred by matching indexes. ``"auto"`` can be used as a single string, which will automatically infer the region for all dimensions, or as dictionary values for specific dimensions mixed together with explicit slices for other dimensions. Alternatively integer slices can be provided; for example, ``{'x': slice(0, 1000), 'y': slice(10000, 11000)}`` would indicate that values should be written to the region ``0:1000`` along ``x`` and ``10000:11000`` along ``y``. Two restrictions apply to the use of ``region``: - If ``region`` is set, _all_ variables in a dataset must have at least one dimension in common with the region. Other variables should be written in a separate single call to ``to_zarr()``. - Dimensions cannot be included in both ``region`` and ``append_dim`` at the same time. To create empty arrays to fill in with ``region``, use a separate call to ``to_zarr()`` with ``compute=False``. See "Modifying existing Zarr stores" in the reference documentation for full details. Users are expected to ensure that the specified region aligns with Zarr chunk boundaries, and that dask chunks are also aligned. Xarray makes limited checks that these multiple chunk boundaries line up. It is possible to write incomplete chunks and corrupt the data with this option if you are not careful. safe_chunks : bool, default: True If True, only allow writes to when there is a many-to-one relationship between Zarr chunks (specified in encoding) and Dask chunks. Set False to override this restriction; however, data may become corrupted if Zarr arrays are written in parallel. This option may be useful in combination with ``compute=False`` to initialize a Zarr from an existing Dataset with arbitrary chunk structure. In addition to the many-to-one relationship validation, it also detects partial chunks writes when using the region parameter, these partial chunks are considered unsafe in the mode "r+" but safe in the mode "a". Note: Even with these validations it can still be unsafe to write two or more chunked arrays in the same location in parallel if they are not writing in independent regions, for those cases it is better to use a synchronizer. align_chunks: bool, default False If True, rechunks the Dask array to align with Zarr chunks before writing. This ensures each Dask chunk maps to one or more contiguous Zarr chunks, which avoids race conditions. Internally, the process sets safe_chunks=False and tries to preserve the original Dask chunking as much as possible. Note: While this alignment avoids write conflicts stemming from chunk boundary misalignment, it does not protect against race conditions if multiple uncoordinated processes write to the same Zarr array concurrently. storage_options : dict, optional Any additional parameters for the storage backend (ignored for local paths). zarr_version : int or None, optional .. deprecated:: 2024.9.1 Use ``zarr_format`` instead. zarr_format : int or None, optional The desired zarr format to target (currently 2 or 3). The default of None will attempt to determine the zarr version from ``store`` when possible, otherwise defaulting to the default version used by the zarr-python library installed. write_empty_chunks : bool or None, optional If True, all chunks will be stored regardless of their contents. If False, each chunk is compared to the array's fill value prior to storing. If a chunk is uniformly equal to the fill value, then that chunk is not be stored, and the store entry for that chunk's key is deleted. This setting enables sparser storage, as only chunks with non-fill-value data are stored, at the expense of overhead associated with checking the data of each chunk. If None (default) fall back to specification(s) in ``encoding`` or Zarr defaults. A ``ValueError`` will be raised if the value of this (if not None) differs with ``encoding``. chunkmanager_store_kwargs : dict, optional Additional keyword arguments passed on to the `ChunkManager.store` method used to store chunked arrays. For example for a dask array additional kwargs will be passed eventually to :py:func:`dask.array.store()`. Experimental API that should not be relied upon. Returns ------- * ``dask.delayed.Delayed`` if compute is False * ZarrStore otherwise References ---------- https://zarr.readthedocs.io/ Notes ----- Zarr chunking behavior: If chunks are found in the encoding argument or attribute corresponding to any DataArray, those chunks are used. If a DataArray is a dask array, it is written with those chunks. If not other chunks are found, Zarr uses its own heuristics to choose automatic chunk sizes. encoding: The encoding attribute (if exists) of the DataArray(s) will be used. Override any existing encodings by providing the ``encoding`` kwarg. ``fill_value`` handling: There exists a subtlety in interpreting zarr's ``fill_value`` property. For zarr v2 format arrays, ``fill_value`` is *always* interpreted as an invalid value similar to the ``_FillValue`` attribute in CF/netCDF. For Zarr v3 format arrays, only an explicit ``_FillValue`` attribute will be used to mask the data if requested using ``mask_and_scale=True``. See this `Github issue `_ for more. See Also -------- :ref:`io.zarr` The I/O user guide, with more details and examples. """ from xarray.backends.api import to_zarr return to_zarr( # type: ignore[call-overload,misc] self, store=store, chunk_store=chunk_store, storage_options=storage_options, mode=mode, synchronizer=synchronizer, group=group, encoding=encoding, compute=compute, consolidated=consolidated, append_dim=append_dim, region=region, safe_chunks=safe_chunks, zarr_version=zarr_version, zarr_format=zarr_format, write_empty_chunks=write_empty_chunks, chunkmanager_store_kwargs=chunkmanager_store_kwargs, ) def __repr__(self) -> str: return formatting.dataset_repr(self) def _repr_html_(self) -> str: if OPTIONS["display_style"] == "text": return f"
{escape(repr(self))}
" return formatting_html.dataset_repr(self) def info(self, buf: IO | None = None) -> None: """ Concise summary of a Dataset variables and attributes. Parameters ---------- buf : file-like, default: sys.stdout writable buffer See Also -------- pandas.DataFrame.assign ncdump : netCDF's ncdump """ if buf is None: # pragma: no cover buf = sys.stdout lines = [ "xarray.Dataset {", "dimensions:", ] for name, size in self.sizes.items(): lines.append(f"\t{name} = {size} ;") lines.append("\nvariables:") for name, da in self.variables.items(): dims = ", ".join(map(str, da.dims)) lines.append(f"\t{da.dtype} {name}({dims}) ;") for k, v in da.attrs.items(): lines.append(f"\t\t{name}:{k} = {v} ;") lines.append("\n// global attributes:") for k, v in self.attrs.items(): lines.append(f"\t:{k} = {v} ;") lines.append("}") buf.write("\n".join(lines)) @property def chunks(self) -> Mapping[Hashable, tuple[int, ...]]: """ Mapping from dimension names to block lengths for this dataset's data. If this dataset does not contain chunked arrays, the mapping will be empty. Cannot be modified directly, but can be modified by calling .chunk(). Same as Dataset.chunksizes, but maintained for backwards compatibility. See Also -------- Dataset.chunk Dataset.chunksizes xarray.unify_chunks """ return get_chunksizes(self.variables.values()) @property def chunksizes(self) -> Mapping[Hashable, tuple[int, ...]]: """ Mapping from dimension names to block lengths for this dataset's data. If this dataset does not contain chunked arrays, the mapping will be empty. Cannot be modified directly, but can be modified by calling .chunk(). Same as Dataset.chunks. See Also -------- Dataset.chunk Dataset.chunks xarray.unify_chunks """ return get_chunksizes(self.variables.values()) def chunk( self, chunks: T_ChunksFreq = {}, # noqa: B006 # {} even though it's technically unsafe, is being used intentionally here (#4667) name_prefix: str = "xarray-", token: str | None = None, lock: bool = False, inline_array: bool = False, chunked_array_type: str | ChunkManagerEntrypoint | None = None, from_array_kwargs=None, **chunks_kwargs: T_ChunkDimFreq, ) -> Self: """Coerce all arrays in this dataset into dask arrays with the given chunks. Non-dask arrays in this dataset will be converted to dask arrays. Dask arrays will be rechunked to the given chunk sizes. If neither chunks is not provided for one or more dimensions, chunk sizes along that dimension will not be updated; non-dask arrays will be converted into dask arrays with a single block. Along datetime-like dimensions, a :py:class:`Resampler` object (e.g. :py:class:`groupers.TimeResampler` or :py:class:`groupers.SeasonResampler`) is also accepted. Parameters ---------- chunks : int, tuple of int, "auto" or mapping of hashable to int or a Resampler, optional Chunk sizes along each dimension, e.g., ``5``, ``"auto"``, or ``{"x": 5, "y": 5}`` or ``{"x": 5, "time": TimeResampler(freq="YE")}`` or ``{"time": SeasonResampler(["DJF", "MAM", "JJA", "SON"])}``. name_prefix : str, default: "xarray-" Prefix for the name of any new dask arrays. token : str, optional Token uniquely identifying this dataset. lock : bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. inline_array: bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. chunked_array_type: str, optional Which chunked array type to coerce this datasets' arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. **chunks_kwargs : {dim: chunks, ...}, optional The keyword arguments form of ``chunks``. One of chunks or chunks_kwargs must be provided Returns ------- chunked : xarray.Dataset See Also -------- Dataset.chunks Dataset.chunksizes xarray.unify_chunks dask.array.from_array """ from xarray.groupers import Resampler if chunks is None and not chunks_kwargs: warnings.warn( "None value for 'chunks' is deprecated. " "It will raise an error in the future. Use instead '{}'", category=DeprecationWarning, stacklevel=2, ) chunks = {} chunks_mapping: Mapping[Any, Any] if not isinstance(chunks, Mapping) and chunks is not None: if isinstance(chunks, tuple | list): utils.emit_user_level_warning( "Supplying chunks as dimension-order tuples is deprecated. " "It will raise an error in the future. Instead use a dict with dimensions as keys.", category=DeprecationWarning, ) chunks_mapping = dict.fromkeys(self.dims, chunks) else: chunks_mapping = either_dict_or_kwargs(chunks, chunks_kwargs, "chunk") bad_dims = chunks_mapping.keys() - self.sizes.keys() if bad_dims: raise ValueError( f"chunks keys {tuple(bad_dims)} not found in data dimensions {tuple(self.sizes.keys())}" ) def _resolve_resampler(name: Hashable, resampler: Resampler) -> tuple[int, ...]: variable = self._variables.get(name, None) if variable is None: raise ValueError( f"Cannot chunk by resampler {resampler!r} for virtual variable {name!r}." ) if variable.ndim != 1: raise ValueError( f"chunks={resampler!r} only supported for 1D variables. " f"Received variable {name!r} with {variable.ndim} dimensions instead." ) newchunks = resampler.compute_chunks(variable, dim=name) if sum(newchunks) != variable.shape[0]: raise ValueError( f"Logic bug in rechunking variable {name!r} using {resampler!r}. " "New chunks tuple does not match size of data. Please open an issue." ) return newchunks chunks_mapping_ints: Mapping[Any, T_ChunkDim] = { name: ( _resolve_resampler(name, chunks) if isinstance(chunks, Resampler) else chunks ) for name, chunks in chunks_mapping.items() } chunkmanager = guess_chunkmanager(chunked_array_type) if from_array_kwargs is None: from_array_kwargs = {} variables = { k: _maybe_chunk( k, v, chunks_mapping_ints, token, lock, name_prefix, inline_array=inline_array, chunked_array_type=chunkmanager, from_array_kwargs=from_array_kwargs.copy(), ) for k, v in self.variables.items() } return self._replace(variables) def _validate_indexers( self, indexers: Mapping[Any, Any], missing_dims: ErrorOptionsWithWarn = "raise" ) -> Iterator[tuple[Hashable, int | slice | np.ndarray | Variable]]: """Here we make sure + indexer has a valid keys + indexer is in a valid data type + string indexers are cast to the appropriate date type if the associated index is a DatetimeIndex or CFTimeIndex """ from xarray.core.dataarray import DataArray indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims) # all indexers should be int, slice, np.ndarrays, or Variable for k, v in indexers.items(): if isinstance(v, int | slice | Variable) and not isinstance(v, bool): yield k, v elif isinstance(v, DataArray): yield k, v.variable elif isinstance(v, tuple): yield k, as_variable(v) elif isinstance(v, Dataset): raise TypeError("cannot use a Dataset as an indexer") elif isinstance(v, Sequence) and len(v) == 0: yield k, np.empty((0,), dtype="int64") else: if not is_duck_array(v): v = np.asarray(v) if v.dtype.kind in "US": index = self._indexes[k].to_pandas_index() if isinstance(index, pd.DatetimeIndex): v = duck_array_ops.astype(v, dtype="datetime64[ns]") elif isinstance(index, CFTimeIndex): v = _parse_array_of_cftime_strings(v, index.date_type) if v.ndim > 1: raise IndexError( "Unlabeled multi-dimensional array cannot be " f"used for indexing: {k}" ) yield k, v def _validate_interp_indexers( self, indexers: Mapping[Any, Any] ) -> Iterator[tuple[Hashable, Variable]]: """Variant of _validate_indexers to be used for interpolation""" for k, v in self._validate_indexers(indexers): if isinstance(v, Variable): yield k, v elif is_scalar(v): yield k, Variable((), v, attrs=self.coords[k].attrs) elif isinstance(v, np.ndarray): yield k, Variable(dims=(k,), data=v, attrs=self.coords[k].attrs) else: raise TypeError(type(v)) def _get_indexers_coords_and_indexes(self, indexers): """Extract coordinates and indexes from indexers. Only coordinate with a name different from any of self.variables will be attached. """ from xarray.core.dataarray import DataArray coords_list = [] for k, v in indexers.items(): if isinstance(v, DataArray): if v.dtype.kind == "b": if v.ndim != 1: # we only support 1-d boolean array raise ValueError( f"{v.ndim:d}d-boolean array is used for indexing along " f"dimension {k!r}, but only 1d boolean arrays are " "supported." ) # Make sure in case of boolean DataArray, its # coordinate also should be indexed. v_coords = v[v.values.nonzero()[0]].coords else: v_coords = v.coords coords_list.append(v_coords) # we don't need to call align() explicitly or check indexes for # alignment, because merge_variables already checks for exact alignment # between dimension coordinates coords, indexes = merge_coordinates_without_align(coords_list) assert_coordinate_consistent(self, coords) # silently drop the conflicted variables. attached_coords = {k: v for k, v in coords.items() if k not in self._variables} attached_indexes = { k: v for k, v in indexes.items() if k not in self._variables } return attached_coords, attached_indexes def isel( self, indexers: Mapping[Any, Any] | None = None, drop: bool = False, missing_dims: ErrorOptionsWithWarn = "raise", **indexers_kwargs: Any, ) -> Self: """Returns a new dataset with each array indexed along the specified dimension(s). This method selects values from each array using its `__getitem__` method, except this method does not require knowing the order of each array's dimensions. Parameters ---------- indexers : dict, optional A dict with keys matching dimensions and values given by integers, slice objects or arrays. indexer can be a integer, slice, array-like or DataArray. If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. drop : bool, default: False If ``drop=True``, drop coordinates variables indexed by integers instead of making them scalar. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the Dataset: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions **indexers_kwargs : {dim: indexer, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Returns ------- obj : Dataset A new Dataset with the same contents as this dataset, except each array and dimension is indexed by the appropriate indexers. If indexer DataArrays have coordinates that do not conflict with this object, then these coordinates will be attached. In general, each array's data will be a view of the array's data in this dataset, unless vectorized indexing was triggered by using an array indexer, in which case the data will be a copy. Examples -------- >>> dataset = xr.Dataset( ... { ... "math_scores": ( ... ["student", "test"], ... [[90, 85, 92], [78, 80, 85], [95, 92, 98]], ... ), ... "english_scores": ( ... ["student", "test"], ... [[88, 90, 92], [75, 82, 79], [93, 96, 91]], ... ), ... }, ... coords={ ... "student": ["Alice", "Bob", "Charlie"], ... "test": ["Test 1", "Test 2", "Test 3"], ... }, ... ) # A specific element from the dataset is selected >>> dataset.isel(student=1, test=0) Size: 68B Dimensions: () Coordinates: student >> slice_of_data = dataset.isel(student=slice(0, 2), test=slice(0, 2)) >>> slice_of_data Size: 168B Dimensions: (student: 2, test: 2) Coordinates: * student (student) >> index_array = xr.DataArray([0, 2], dims="student") >>> indexed_data = dataset.isel(student=index_array) >>> indexed_data Size: 224B Dimensions: (student: 2, test: 3) Coordinates: * student (student) ` :func:`DataArray.isel ` :doc:`xarray-tutorial:intermediate/indexing/indexing` Tutorial material on indexing with Xarray objects :doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic` Tutorial material on basics of indexing """ indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel") if any(is_fancy_indexer(idx) for idx in indexers.values()): return self._isel_fancy(indexers, drop=drop, missing_dims=missing_dims) # Much faster algorithm for when all indexers are ints, slices, one-dimensional # lists, or zero or one-dimensional np.ndarray's indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims) variables = {} dims: dict[Hashable, int] = {} coord_names = self._coord_names.copy() indexes, index_variables = isel_indexes(self.xindexes, indexers) for name, var in self._variables.items(): # preserve variable order if name in index_variables: var = index_variables[name] else: var_indexers = {k: v for k, v in indexers.items() if k in var.dims} if var_indexers: var = var.isel(var_indexers) if drop and var.ndim == 0 and name in coord_names: coord_names.remove(name) continue variables[name] = var dims.update(zip(var.dims, var.shape, strict=True)) return self._construct_direct( variables=variables, coord_names=coord_names, dims=dims, attrs=self._attrs, indexes=indexes, encoding=self._encoding, close=self._close, ) def _isel_fancy( self, indexers: Mapping[Any, Any], *, drop: bool, missing_dims: ErrorOptionsWithWarn = "raise", ) -> Self: valid_indexers = dict(self._validate_indexers(indexers, missing_dims)) variables: dict[Hashable, Variable] = {} indexes, index_variables = isel_indexes(self.xindexes, valid_indexers) for name, var in self.variables.items(): if name in index_variables: new_var = index_variables[name] else: var_indexers = { k: v for k, v in valid_indexers.items() if k in var.dims } if var_indexers: new_var = var.isel(indexers=var_indexers) # drop scalar coordinates # https://github.com/pydata/xarray/issues/6554 if name in self.coords and drop and new_var.ndim == 0: continue else: new_var = var.copy(deep=False) if name not in indexes: new_var = new_var.to_base_variable() variables[name] = new_var coord_names = self._coord_names & variables.keys() selected = self._replace_with_new_dims(variables, coord_names, indexes) # Extract coordinates from indexers coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(indexers) variables.update(coord_vars) indexes.update(new_indexes) coord_names = self._coord_names & variables.keys() | coord_vars.keys() return self._replace_with_new_dims(variables, coord_names, indexes=indexes) def sel( self, indexers: Mapping[Any, Any] | None = None, method: str | None = None, tolerance: int | float | Iterable[int | float] | None = None, drop: bool = False, **indexers_kwargs: Any, ) -> Self: """Returns a new dataset with each array indexed by tick labels along the specified dimension(s). In contrast to `Dataset.isel`, indexers for this method should use labels instead of integers. Under the hood, this method is powered by using pandas's powerful Index objects. This makes label based indexing essentially just as fast as using integer indexing. It also means this method uses pandas's (well documented) logic for indexing. This means you can use string shortcuts for datetime indexes (e.g., '2000-01' to select all values in January 2000). It also means that slices are treated as inclusive of both the start and stop values, unlike normal Python indexing. Parameters ---------- indexers : dict, optional A dict with keys matching dimensions and values given by scalars, slices or arrays of tick labels. For dimensions with multi-index, the indexer may also be a dict-like object with keys matching index level names. If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for inexact matches: * None (default): only exact matches * pad / ffill: propagate last valid index value forward * backfill / bfill: propagate next valid index value backward * nearest: use nearest valid index value tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. drop : bool, optional If ``drop=True``, drop coordinates variables in `indexers` instead of making them scalar. **indexers_kwargs : {dim: indexer, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Returns ------- obj : Dataset A new Dataset with the same contents as this dataset, except each variable and dimension is indexed by the appropriate indexers. If indexer DataArrays have coordinates that do not conflict with this object, then these coordinates will be attached. In general, each array's data will be a view of the array's data in this dataset, unless vectorized indexing was triggered by using an array indexer, in which case the data will be a copy. See Also -------- :func:`Dataset.isel ` :func:`DataArray.sel ` :doc:`xarray-tutorial:intermediate/indexing/indexing` Tutorial material on indexing with Xarray objects :doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic` Tutorial material on basics of indexing """ indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "sel") query_results = map_index_queries( self, indexers=indexers, method=method, tolerance=tolerance ) if drop: no_scalar_variables = {} for k, v in query_results.variables.items(): if v.dims: no_scalar_variables[k] = v elif k in self._coord_names: query_results.drop_coords.append(k) query_results.variables = no_scalar_variables result = self.isel(indexers=query_results.dim_indexers, drop=drop) return result._overwrite_indexes(*query_results.as_tuple()[1:]) def _shuffle(self, dim, *, indices: GroupIndices, chunks: T_Chunks) -> Self: # Shuffling is only different from `isel` for chunked arrays. # Extract them out, and treat them specially. The rest, we route through isel. # This makes it easy to ensure correct handling of indexes. is_chunked = { name: var for name, var in self._variables.items() if is_chunked_array(var._data) } subset = self[[name for name in self._variables if name not in is_chunked]] no_slices: list[list[int]] = [ ( list(range(*idx.indices(self.sizes[dim]))) if isinstance(idx, slice) else idx ) for idx in indices ] no_slices = [idx for idx in no_slices if idx] shuffled = ( subset if dim not in subset.dims else subset.isel({dim: np.concatenate(no_slices)}) ) for name, var in is_chunked.items(): shuffled[name] = var._shuffle( indices=no_slices, dim=dim, chunks=chunks, ) return shuffled def head( self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, ) -> Self: """Returns a new dataset with the first `n` values of each array for the specified dimension(s). Parameters ---------- indexers : dict or int, default: 5 A dict with keys matching dimensions and integer values `n` or a single integer `n` applied over all dimensions. One of indexers or indexers_kwargs must be provided. **indexers_kwargs : {dim: n, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Examples -------- >>> dates = pd.date_range(start="2023-01-01", periods=5) >>> pageviews = [1200, 1500, 900, 1800, 2000] >>> visitors = [800, 1000, 600, 1200, 1500] >>> dataset = xr.Dataset( ... { ... "pageviews": (("date"), pageviews), ... "visitors": (("date"), visitors), ... }, ... coords={"date": dates}, ... ) >>> busiest_days = dataset.sortby("pageviews", ascending=False) >>> busiest_days.head() Size: 120B Dimensions: (date: 5) Coordinates: * date (date) datetime64[ns] 40B 2023-01-05 2023-01-04 ... 2023-01-03 Data variables: pageviews (date) int64 40B 2000 1800 1500 1200 900 visitors (date) int64 40B 1500 1200 1000 800 600 # Retrieve the 3 most busiest days in terms of pageviews >>> busiest_days.head(3) Size: 72B Dimensions: (date: 3) Coordinates: * date (date) datetime64[ns] 24B 2023-01-05 2023-01-04 2023-01-02 Data variables: pageviews (date) int64 24B 2000 1800 1500 visitors (date) int64 24B 1500 1200 1000 # Using a dictionary to specify the number of elements for specific dimensions >>> busiest_days.head({"date": 3}) Size: 72B Dimensions: (date: 3) Coordinates: * date (date) datetime64[ns] 24B 2023-01-05 2023-01-04 2023-01-02 Data variables: pageviews (date) int64 24B 2000 1800 1500 visitors (date) int64 24B 1500 1200 1000 See Also -------- Dataset.tail Dataset.thin DataArray.head """ if not indexers_kwargs: if indexers is None: indexers = 5 if not isinstance(indexers, int) and not is_dict_like(indexers): raise TypeError("indexers must be either dict-like or a single integer") if isinstance(indexers, int): indexers = dict.fromkeys(self.dims, indexers) indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "head") for k, v in indexers.items(): if not isinstance(v, int): raise TypeError( "expected integer type indexer for " f"dimension {k!r}, found {type(v)!r}" ) elif v < 0: raise ValueError( "expected positive integer as indexer " f"for dimension {k!r}, found {v}" ) indexers_slices = {k: slice(val) for k, val in indexers.items()} return self.isel(indexers_slices) def tail( self, indexers: Mapping[Any, int] | int | None = None, **indexers_kwargs: Any, ) -> Self: """Returns a new dataset with the last `n` values of each array for the specified dimension(s). Parameters ---------- indexers : dict or int, default: 5 A dict with keys matching dimensions and integer values `n` or a single integer `n` applied over all dimensions. One of indexers or indexers_kwargs must be provided. **indexers_kwargs : {dim: n, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Examples -------- >>> activity_names = ["Walking", "Running", "Cycling", "Swimming", "Yoga"] >>> durations = [30, 45, 60, 45, 60] # in minutes >>> energies = [150, 300, 250, 400, 100] # in calories >>> dataset = xr.Dataset( ... { ... "duration": (["activity"], durations), ... "energy_expenditure": (["activity"], energies), ... }, ... coords={"activity": activity_names}, ... ) >>> sorted_dataset = dataset.sortby("energy_expenditure", ascending=False) >>> sorted_dataset Size: 240B Dimensions: (activity: 5) Coordinates: * activity (activity) >> sorted_dataset.tail(3) Size: 144B Dimensions: (activity: 3) Coordinates: * activity (activity) >> sorted_dataset.tail({"activity": 3}) Size: 144B Dimensions: (activity: 3) Coordinates: * activity (activity) Self: """Returns a new dataset with each array indexed along every `n`-th value for the specified dimension(s) Parameters ---------- indexers : dict or int A dict with keys matching dimensions and integer values `n` or a single integer `n` applied over all dimensions. One of indexers or indexers_kwargs must be provided. **indexers_kwargs : {dim: n, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Examples -------- >>> x_arr = np.arange(0, 26) >>> x_arr array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]) >>> x = xr.DataArray( ... np.reshape(x_arr, (2, 13)), ... dims=("x", "y"), ... coords={"x": [0, 1], "y": np.arange(0, 13)}, ... ) >>> x_ds = xr.Dataset({"foo": x}) >>> x_ds Size: 328B Dimensions: (x: 2, y: 13) Coordinates: * x (x) int64 16B 0 1 * y (y) int64 104B 0 1 2 3 4 5 6 7 8 9 10 11 12 Data variables: foo (x, y) int64 208B 0 1 2 3 4 5 6 7 8 ... 17 18 19 20 21 22 23 24 25 >>> x_ds.thin(3) Size: 88B Dimensions: (x: 1, y: 5) Coordinates: * x (x) int64 8B 0 * y (y) int64 40B 0 3 6 9 12 Data variables: foo (x, y) int64 40B 0 3 6 9 12 >>> x.thin({"x": 2, "y": 5}) Size: 24B array([[ 0, 5, 10]]) Coordinates: * x (x) int64 8B 0 * y (y) int64 24B 0 5 10 See Also -------- Dataset.head Dataset.tail DataArray.thin """ if ( not indexers_kwargs and not isinstance(indexers, int) and not is_dict_like(indexers) ): raise TypeError("indexers must be either dict-like or a single integer") if isinstance(indexers, int): indexers = dict.fromkeys(self.dims, indexers) indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "thin") for k, v in indexers.items(): if not isinstance(v, int): raise TypeError( "expected integer type indexer for " f"dimension {k!r}, found {type(v)!r}" ) elif v < 0: raise ValueError( "expected positive integer as indexer " f"for dimension {k!r}, found {v}" ) elif v == 0: raise ValueError("step cannot be zero") indexers_slices = {k: slice(None, None, val) for k, val in indexers.items()} return self.isel(indexers_slices) def broadcast_like( self, other: T_DataArrayOrSet, exclude: Iterable[Hashable] | None = None, ) -> Self: """Broadcast this DataArray against another Dataset or DataArray. This is equivalent to xr.broadcast(other, self)[1] Parameters ---------- other : Dataset or DataArray Object against which to broadcast this array. exclude : iterable of hashable, optional Dimensions that must not be broadcasted """ if exclude is None: exclude = set() else: exclude = set(exclude) args = align(other, self, join="outer", copy=False, exclude=exclude) dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude) return _broadcast_helper(args[1], exclude, dims_map, common_coords) def _reindex_callback( self, aligner: alignment.Aligner, dim_pos_indexers: dict[Hashable, Any], variables: dict[Hashable, Variable], indexes: dict[Hashable, Index], fill_value: Any, exclude_dims: frozenset[Hashable], exclude_vars: frozenset[Hashable], ) -> Self: """Callback called from ``Aligner`` to create a new reindexed Dataset.""" new_variables = variables.copy() new_indexes = indexes.copy() # re-assign variable metadata for name, new_var in new_variables.items(): var = self._variables.get(name) if var is not None: new_var.attrs = var.attrs new_var.encoding = var.encoding # pass through indexes from excluded dimensions # no extra check needed for multi-coordinate indexes, potential conflicts # should already have been detected when aligning the indexes for name, idx in self._indexes.items(): var = self._variables[name] if set(var.dims) <= exclude_dims: new_indexes[name] = idx new_variables[name] = var if not dim_pos_indexers: # fast path for no reindexing necessary if set(new_indexes) - set(self._indexes): # this only adds new indexes and their coordinate variables reindexed = self._overwrite_indexes(new_indexes, new_variables) else: reindexed = self.copy(deep=aligner.copy) else: to_reindex = { k: v for k, v in self.variables.items() if k not in variables and k not in exclude_vars } reindexed_vars = alignment.reindex_variables( to_reindex, dim_pos_indexers, copy=aligner.copy, fill_value=fill_value, sparse=aligner.sparse, ) new_variables.update(reindexed_vars) new_coord_names = self._coord_names | set(new_indexes) reindexed = self._replace_with_new_dims( new_variables, new_coord_names, indexes=new_indexes ) reindexed.encoding = self.encoding return reindexed def reindex_like( self, other: T_Xarray, method: ReindexMethodOptions = None, tolerance: float | Iterable[float] | str | None = None, copy: bool = True, fill_value: Any = xrdtypes.NA, ) -> Self: """ Conform this object onto the indexes of another object, for indexes which the objects share. Missing values are filled with ``fill_value``. The default fill value is NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to pandas.Index objects, which provides coordinates upon which to index the variables in this dataset. The indexes on this other object need not be the same as the indexes on this dataset. Any mismatched index values will be filled in with NaN, and any mismatched dimension names will simply be ignored. method : {None, "nearest", "pad", "ffill", "backfill", "bfill", None}, optional Method to use for filling index values from other not found in this dataset: - None (default): don't fill gaps - "pad" / "ffill": propagate last valid index value forward - "backfill" / "bfill": propagate next valid index value backward - "nearest": use nearest valid index value tolerance : float | Iterable[float] | str | None, default: None Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like must be the same size as the index and its dtype must exactly match the index’s type. copy : bool, default: True If ``copy=True``, data in the return value is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, a new xarray object is always returned. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like maps variable names to fill values. Returns ------- reindexed : Dataset Another dataset, with this dataset's data but coordinates from the other object. See Also -------- Dataset.reindex DataArray.reindex_like align """ return alignment.reindex_like( self, other=other, method=method, tolerance=tolerance, copy=copy, fill_value=fill_value, ) def reindex( self, indexers: Mapping[Any, Any] | None = None, method: ReindexMethodOptions = None, tolerance: float | Iterable[float] | str | None = None, copy: bool = True, fill_value: Any = xrdtypes.NA, **indexers_kwargs: Any, ) -> Self: """Conform this object onto a new set of indexes, filling in missing values with ``fill_value``. The default fill value is NaN. Parameters ---------- indexers : dict, optional Dictionary with keys given by dimension names and values given by arrays of coordinates tick labels. Any mismatched coordinate values will be filled in with NaN, and any mismatched dimension names will simply be ignored. One of indexers or indexers_kwargs must be provided. method : {None, "nearest", "pad", "ffill", "backfill", "bfill", None}, optional Method to use for filling index values in ``indexers`` not found in this dataset: - None (default): don't fill gaps - "pad" / "ffill": propagate last valid index value forward - "backfill" / "bfill": propagate next valid index value backward - "nearest": use nearest valid index value tolerance : float | Iterable[float] | str | None, default: None Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like must be the same size as the index and its dtype must exactly match the index’s type. copy : bool, default: True If ``copy=True``, data in the return value is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, a new xarray object is always returned. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names (including coordinates) to fill values. sparse : bool, default: False use sparse-array. **indexers_kwargs : {dim: indexer, ...}, optional Keyword arguments in the same form as ``indexers``. One of indexers or indexers_kwargs must be provided. Returns ------- reindexed : Dataset Another dataset, with this dataset's data but replaced coordinates. See Also -------- Dataset.reindex_like align pandas.Index.get_indexer Examples -------- Create a dataset with some fictional data. >>> x = xr.Dataset( ... { ... "temperature": ("station", 20 * np.random.rand(4)), ... "pressure": ("station", 500 * np.random.rand(4)), ... }, ... coords={"station": ["boston", "nyc", "seattle", "denver"]}, ... ) >>> x Size: 176B Dimensions: (station: 4) Coordinates: * station (station) >> x.indexes Indexes: station Index(['boston', 'nyc', 'seattle', 'denver'], dtype='object', name='station') Create a new index and reindex the dataset. By default values in the new index that do not have corresponding records in the dataset are assigned `NaN`. >>> new_index = ["boston", "austin", "seattle", "lincoln"] >>> x.reindex({"station": new_index}) Size: 176B Dimensions: (station: 4) Coordinates: * station (station) >> x.reindex({"station": new_index}, fill_value=0) Size: 176B Dimensions: (station: 4) Coordinates: * station (station) >> x.reindex( ... {"station": new_index}, fill_value={"temperature": 0, "pressure": 100} ... ) Size: 176B Dimensions: (station: 4) Coordinates: * station (station) >> x.reindex({"station": new_index}, method="nearest") Traceback (most recent call last): ... raise ValueError('index must be monotonic increasing or decreasing') ValueError: index must be monotonic increasing or decreasing To further illustrate the filling functionality in reindex, we will create a dataset with a monotonically increasing index (for example, a sequence of dates). >>> x2 = xr.Dataset( ... { ... "temperature": ( ... "time", ... [15.57, 12.77, np.nan, 0.3081, 16.59, 15.12], ... ), ... "pressure": ("time", 500 * np.random.rand(6)), ... }, ... coords={"time": pd.date_range("01/01/2019", periods=6, freq="D")}, ... ) >>> x2 Size: 144B Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2019-01-01 2019-01-02 ... 2019-01-06 Data variables: temperature (time) float64 48B 15.57 12.77 nan 0.3081 16.59 15.12 pressure (time) float64 48B 481.8 191.7 395.9 264.4 284.0 462.8 Suppose we decide to expand the dataset to cover a wider date range. >>> time_index2 = pd.date_range("12/29/2018", periods=10, freq="D") >>> x2.reindex({"time": time_index2}) Size: 240B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2018-12-29 2018-12-30 ... 2019-01-07 Data variables: temperature (time) float64 80B nan nan nan 15.57 ... 0.3081 16.59 15.12 nan pressure (time) float64 80B nan nan nan 481.8 ... 264.4 284.0 462.8 nan The index entries that did not have a value in the original data frame (for example, `2018-12-29`) are by default filled with NaN. If desired, we can fill in the missing values using one of several options. For example, to back-propagate the last valid value to fill the `NaN` values, pass `bfill` as an argument to the `method` keyword. >>> x3 = x2.reindex({"time": time_index2}, method="bfill") >>> x3 Size: 240B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2018-12-29 2018-12-30 ... 2019-01-07 Data variables: temperature (time) float64 80B 15.57 15.57 15.57 15.57 ... 16.59 15.12 nan pressure (time) float64 80B 481.8 481.8 481.8 481.8 ... 284.0 462.8 nan Please note that the `NaN` value present in the original dataset (at index value `2019-01-03`) will not be filled by any of the value propagation schemes. >>> x2.where(x2.temperature.isnull(), drop=True) Size: 24B Dimensions: (time: 1) Coordinates: * time (time) datetime64[ns] 8B 2019-01-03 Data variables: temperature (time) float64 8B nan pressure (time) float64 8B 395.9 >>> x3.where(x3.temperature.isnull(), drop=True) Size: 48B Dimensions: (time: 2) Coordinates: * time (time) datetime64[ns] 16B 2019-01-03 2019-01-07 Data variables: temperature (time) float64 16B nan nan pressure (time) float64 16B 395.9 nan This is because filling while reindexing does not look at dataset values, but only compares the original and desired indexes. If you do want to fill in the `NaN` values present in the original dataset, use the :py:meth:`~Dataset.fillna()` method. """ indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs, "reindex") return alignment.reindex( self, indexers=indexers, method=method, tolerance=tolerance, copy=copy, fill_value=fill_value, ) def _reindex( self, indexers: Mapping[Any, Any] | None = None, method: str | None = None, tolerance: int | float | Iterable[int | float] | None = None, copy: bool = True, fill_value: Any = xrdtypes.NA, sparse: bool = False, **indexers_kwargs: Any, ) -> Self: """ Same as reindex but supports sparse option. """ indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs, "reindex") return alignment.reindex( self, indexers=indexers, method=method, tolerance=tolerance, copy=copy, fill_value=fill_value, sparse=sparse, ) def interp( self, coords: Mapping[Any, Any] | None = None, method: InterpOptions = "linear", assume_sorted: bool = False, kwargs: Mapping[str, Any] | None = None, method_non_numeric: str = "nearest", **coords_kwargs: Any, ) -> Self: """ Interpolate a Dataset onto new coordinates. Performs univariate or multivariate interpolation of a Dataset onto new coordinates, utilizing either NumPy or SciPy interpolation routines. Out-of-range values are filled with NaN, unless specified otherwise via `kwargs` to the numpy/scipy interpolant. Parameters ---------- coords : dict, optional Mapping from dimension names to the new coordinates. New coordinate can be a scalar, array-like or DataArray. If DataArrays are passed as new coordinates, their dimensions are used for the broadcasting. Missing values are skipped. method : { "linear", "nearest", "zero", "slinear", "quadratic", "cubic", \ "quintic", "polynomial", "pchip", "barycentric", "krogh", "akima", "makima" } Interpolation method to use (see descriptions above). assume_sorted : bool, default: False If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs : dict, optional Additional keyword arguments passed to the interpolator. Valid options and their behavior depend which interpolant is used. method_non_numeric : {"nearest", "pad", "ffill", "backfill", "bfill"}, optional Method for non-numeric types. Passed on to :py:meth:`Dataset.reindex`. ``"nearest"`` is used by default. **coords_kwargs : {dim: coordinate, ...}, optional The keyword arguments form of ``coords``. One of coords or coords_kwargs must be provided. Returns ------- interpolated : Dataset New dataset on the new coordinates. Notes ----- - SciPy is required for certain interpolation methods. - When interpolating along multiple dimensions with methods `linear` and `nearest`, the process attempts to decompose the interpolation into independent interpolations along one dimension at a time. - The specific interpolation method and dimensionality determine which interpolant is used: 1. **Interpolation along one dimension of 1D data (`method='linear'`)** - Uses :py:func:`numpy.interp`, unless `fill_value='extrapolate'` is provided via `kwargs`. 2. **Interpolation along one dimension of N-dimensional data (N β‰₯ 1)** - Methods {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "quintic", "polynomial"} use :py:func:`scipy.interpolate.interp1d`, unless conditions permit the use of :py:func:`numpy.interp` (as in the case of `method='linear'` for 1D data). - If `method='polynomial'`, the `order` keyword argument must also be provided. 3. **Special interpolants for interpolation along one dimension of N-dimensional data (N β‰₯ 1)** - Depending on the `method`, the following interpolants from :py:class:`scipy.interpolate` are used: - `"pchip"`: :py:class:`scipy.interpolate.PchipInterpolator` - `"barycentric"`: :py:class:`scipy.interpolate.BarycentricInterpolator` - `"krogh"`: :py:class:`scipy.interpolate.KroghInterpolator` - `"akima"` or `"makima"`: :py:class:`scipy.interpolate.Akima1dInterpolator` (`makima` is handled by passing the `makima` flag). 4. **Interpolation along multiple dimensions of multi-dimensional data** - Uses :py:func:`scipy.interpolate.interpn` for methods {"linear", "nearest", "slinear", "cubic", "quintic", "pchip"}. See Also -------- :mod:`scipy.interpolate` :doc:`xarray-tutorial:fundamentals/02.2_manipulating_dimensions` Tutorial material on manipulating data resolution using :py:func:`~xarray.Dataset.interp` Examples -------- >>> ds = xr.Dataset( ... data_vars={ ... "a": ("x", [5, 7, 4]), ... "b": ( ... ("x", "y"), ... [[1, 4, 2, 9], [2, 7, 6, np.nan], [6, np.nan, 5, 8]], ... ), ... }, ... coords={"x": [0, 1, 2], "y": [10, 12, 14, 16]}, ... ) >>> ds Size: 176B Dimensions: (x: 3, y: 4) Coordinates: * x (x) int64 24B 0 1 2 * y (y) int64 32B 10 12 14 16 Data variables: a (x) int64 24B 5 7 4 b (x, y) float64 96B 1.0 4.0 2.0 9.0 2.0 7.0 6.0 nan 6.0 nan 5.0 8.0 1D interpolation with the default method (linear): >>> ds.interp(x=[0, 0.75, 1.25, 1.75]) Size: 224B Dimensions: (x: 4, y: 4) Coordinates: * y (y) int64 32B 10 12 14 16 * x (x) float64 32B 0.0 0.75 1.25 1.75 Data variables: a (x) float64 32B 5.0 6.5 6.25 4.75 b (x, y) float64 128B 1.0 4.0 2.0 nan 1.75 ... nan 5.0 nan 5.25 nan 1D interpolation with a different method: >>> ds.interp(x=[0, 0.75, 1.25, 1.75], method="nearest") Size: 224B Dimensions: (x: 4, y: 4) Coordinates: * y (y) int64 32B 10 12 14 16 * x (x) float64 32B 0.0 0.75 1.25 1.75 Data variables: a (x) float64 32B 5.0 7.0 7.0 4.0 b (x, y) float64 128B 1.0 4.0 2.0 9.0 2.0 7.0 ... nan 6.0 nan 5.0 8.0 1D extrapolation: >>> ds.interp( ... x=[1, 1.5, 2.5, 3.5], ... method="linear", ... kwargs={"fill_value": "extrapolate"}, ... ) Size: 224B Dimensions: (x: 4, y: 4) Coordinates: * y (y) int64 32B 10 12 14 16 * x (x) float64 32B 1.0 1.5 2.5 3.5 Data variables: a (x) float64 32B 7.0 5.5 2.5 -0.5 b (x, y) float64 128B 2.0 7.0 6.0 nan 4.0 ... nan 12.0 nan 3.5 nan 2D interpolation: >>> ds.interp(x=[0, 0.75, 1.25, 1.75], y=[11, 13, 15], method="linear") Size: 184B Dimensions: (x: 4, y: 3) Coordinates: * x (x) float64 32B 0.0 0.75 1.25 1.75 * y (y) int64 24B 11 13 15 Data variables: a (x) float64 32B 5.0 6.5 6.25 4.75 b (x, y) float64 96B 2.5 3.0 nan 4.0 5.625 ... nan nan nan nan nan """ from xarray.core import missing if kwargs is None: kwargs = {} coords = either_dict_or_kwargs(coords, coords_kwargs, "interp") indexers = dict(self._validate_interp_indexers(coords)) obj = self if assume_sorted else self.sortby(list(coords)) def maybe_variable(obj, k): # workaround to get variable for dimension without coordinate. try: return obj._variables[k] except KeyError: return as_variable((k, range(obj.sizes[k]))) def _validate_interp_indexer(x, new_x): # In the case of datetimes, the restrictions placed on indexers # used with interp are stronger than those which are placed on # isel, so we need an additional check after _validate_indexers. if _contains_datetime_like_objects( x ) and not _contains_datetime_like_objects(new_x): raise TypeError( "When interpolating over a datetime-like " "coordinate, the coordinates to " "interpolate to must be either datetime " "strings or datetimes. " f"Instead got\n{new_x}" ) return x, new_x validated_indexers = { k: _validate_interp_indexer(maybe_variable(obj, k), v) for k, v in indexers.items() } # optimization: subset to coordinate range of the target index if method in ["linear", "nearest"]: for k, v in validated_indexers.items(): obj, newidx = missing._localize(obj, {k: v}) validated_indexers[k] = newidx[k] has_chunked_array = bool( any(is_chunked_array(v._data) for v in obj._variables.values()) ) if has_chunked_array: # optimization: create dask coordinate arrays once per Dataset # rather than once per Variable when dask.array.unify_chunks is called later # GH4739 dask_indexers = { k: (index.to_base_variable().chunk(), dest.to_base_variable().chunk()) for k, (index, dest) in validated_indexers.items() } variables: dict[Hashable, Variable] = {} reindex_vars: list[Hashable] = [] for name, var in obj._variables.items(): if name in indexers: continue use_indexers = ( dask_indexers if is_duck_dask_array(var._data) else validated_indexers ) dtype_kind = var.dtype.kind if dtype_kind in "uifc": # For normal number types do the interpolation: var_indexers = {k: v for k, v in use_indexers.items() if k in var.dims} variables[name] = missing.interp(var, var_indexers, method, **kwargs) elif dtype_kind in "ObU" and (use_indexers.keys() & var.dims): if all(var.sizes[d] == 1 for d in (use_indexers.keys() & var.dims)): # Broadcastable, can be handled quickly without reindex: to_broadcast = (var.squeeze(),) + tuple( dest for _, dest in use_indexers.values() ) variables[name] = broadcast_variables(*to_broadcast)[0].copy( deep=True ) else: # For types that we do not understand do stepwise # interpolation to avoid modifying the elements. # reindex the variable instead because it supports # booleans and objects and retains the dtype but inside # this loop there might be some duplicate code that slows it # down, therefore collect these signals and run it later: reindex_vars.append(name) elif all(d not in indexers for d in var.dims): # For anything else we can only keep variables if they # are not dependent on any coords that are being # interpolated along: variables[name] = var if reindex_vars and ( reindex_indexers := { k: v for k, (_, v) in validated_indexers.items() if v.dims == (k,) } ): reindexed = alignment.reindex( obj[reindex_vars], indexers=reindex_indexers, method=method_non_numeric, exclude_vars=variables.keys(), ) indexes = dict(reindexed._indexes) variables.update(reindexed.variables) else: # Get the indexes that are not being interpolated along indexes = {k: v for k, v in obj._indexes.items() if k not in indexers} # Get the coords that also exist in the variables: coord_names = obj._coord_names & variables.keys() selected = self._replace_with_new_dims( variables.copy(), coord_names, indexes=indexes ) # Attach indexer as coordinate for k, v in indexers.items(): assert isinstance(v, Variable) if v.dims == (k,): index = PandasIndex(v, k, coord_dtype=v.dtype) index_vars = index.create_variables({k: v}) indexes[k] = index variables.update(index_vars) else: variables[k] = v # Extract coordinates from indexers coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(coords) variables.update(coord_vars) indexes.update(new_indexes) coord_names = obj._coord_names & variables.keys() | coord_vars.keys() return self._replace_with_new_dims(variables, coord_names, indexes=indexes) def interp_like( self, other: T_Xarray, method: InterpOptions = "linear", assume_sorted: bool = False, kwargs: Mapping[str, Any] | None = None, method_non_numeric: str = "nearest", ) -> Self: """Interpolate this object onto the coordinates of another object. Performs univariate or multivariate interpolation of a Dataset onto new coordinates, utilizing either NumPy or SciPy interpolation routines. Out-of-range values are filled with NaN, unless specified otherwise via `kwargs` to the numpy/scipy interpolant. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. Missing values are skipped. method : { "linear", "nearest", "zero", "slinear", "quadratic", "cubic", \ "quintic", "polynomial", "pchip", "barycentric", "krogh", "akima", "makima" } Interpolation method to use (see descriptions above). assume_sorted : bool, default: False If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs : dict, optional Additional keyword arguments passed to the interpolator. Valid options and their behavior depend which interpolant is use method_non_numeric : {"nearest", "pad", "ffill", "backfill", "bfill"}, optional Method for non-numeric types. Passed on to :py:meth:`Dataset.reindex`. ``"nearest"`` is used by default. Returns ------- interpolated : Dataset Another dataset by interpolating this dataset's data along the coordinates of the other object. Notes ----- - scipy is required. - If the dataset has object-type coordinates, reindex is used for these coordinates instead of the interpolation. - When interpolating along multiple dimensions with methods `linear` and `nearest`, the process attempts to decompose the interpolation into independent interpolations along one dimension at a time. - The specific interpolation method and dimensionality determine which interpolant is used: 1. **Interpolation along one dimension of 1D data (`method='linear'`)** - Uses :py:func:`numpy.interp`, unless `fill_value='extrapolate'` is provided via `kwargs`. 2. **Interpolation along one dimension of N-dimensional data (N β‰₯ 1)** - Methods {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "quintic", "polynomial"} use :py:func:`scipy.interpolate.interp1d`, unless conditions permit the use of :py:func:`numpy.interp` (as in the case of `method='linear'` for 1D data). - If `method='polynomial'`, the `order` keyword argument must also be provided. 3. **Special interpolants for interpolation along one dimension of N-dimensional data (N β‰₯ 1)** - Depending on the `method`, the following interpolants from :py:class:`scipy.interpolate` are used: - `"pchip"`: :py:class:`scipy.interpolate.PchipInterpolator` - `"barycentric"`: :py:class:`scipy.interpolate.BarycentricInterpolator` - `"krogh"`: :py:class:`scipy.interpolate.KroghInterpolator` - `"akima"` or `"makima"`: :py:class:`scipy.interpolate.Akima1dInterpolator` (`makima` is handled by passing the `makima` flag). 4. **Interpolation along multiple dimensions of multi-dimensional data** - Uses :py:func:`scipy.interpolate.interpn` for methods {"linear", "nearest", "slinear", "cubic", "quintic", "pchip"}. See Also -------- :func:`Dataset.interp` :func:`Dataset.reindex_like` :mod:`scipy.interpolate` """ if kwargs is None: kwargs = {} # pick only dimension coordinates with a single index coords: dict[Hashable, Variable] = {} other_indexes = other.xindexes for dim in self.dims: other_dim_coords = other_indexes.get_all_coords(dim, errors="ignore") if len(other_dim_coords) == 1: coords[dim] = other_dim_coords[dim] numeric_coords: dict[Hashable, Variable] = {} object_coords: dict[Hashable, Variable] = {} for k, v in coords.items(): if v.dtype.kind in "uifcMm": numeric_coords[k] = v else: object_coords[k] = v ds = self if object_coords: # We do not support interpolation along object coordinate. # reindex instead. ds = self.reindex(object_coords) return ds.interp( coords=numeric_coords, method=method, assume_sorted=assume_sorted, kwargs=kwargs, method_non_numeric=method_non_numeric, ) # Helper methods for rename() def _rename_vars( self, name_dict, dims_dict ) -> tuple[dict[Hashable, Variable], set[Hashable]]: variables = {} coord_names = set() for k, v in self.variables.items(): var = v.copy(deep=False) var.dims = tuple(dims_dict.get(dim, dim) for dim in v.dims) name = name_dict.get(k, k) if name in variables: raise ValueError(f"the new name {name!r} conflicts") variables[name] = var if k in self._coord_names: coord_names.add(name) return variables, coord_names def _rename_dims(self, name_dict: Mapping[Any, Hashable]) -> dict[Hashable, int]: return {name_dict.get(k, k): v for k, v in self.sizes.items()} def _rename_indexes( self, name_dict: Mapping[Any, Hashable], dims_dict: Mapping[Any, Hashable] ) -> tuple[dict[Hashable, Index], dict[Hashable, Variable]]: if not self._indexes: return {}, {} indexes = {} variables = {} for index, coord_names in self.xindexes.group_by_index(): new_index = index.rename(name_dict, dims_dict) new_coord_names = [name_dict.get(k, k) for k in coord_names] indexes.update(dict.fromkeys(new_coord_names, new_index)) new_index_vars = new_index.create_variables( { new: self._variables[old] for old, new in zip(coord_names, new_coord_names, strict=True) } ) variables.update(new_index_vars) return indexes, variables def _rename_all( self, name_dict: Mapping[Any, Hashable], dims_dict: Mapping[Any, Hashable] ) -> tuple[ dict[Hashable, Variable], set[Hashable], dict[Hashable, int], dict[Hashable, Index], ]: variables, coord_names = self._rename_vars(name_dict, dims_dict) dims = self._rename_dims(dims_dict) indexes, index_vars = self._rename_indexes(name_dict, dims_dict) variables = {k: index_vars.get(k, v) for k, v in variables.items()} return variables, coord_names, dims, indexes def _rename( self, name_dict: Mapping[Any, Hashable] | None = None, **names: Hashable, ) -> Self: """Also used internally by DataArray so that the warning (if any) is raised at the right stack level. """ name_dict = either_dict_or_kwargs(name_dict, names, "rename") for k in name_dict.keys(): if k not in self and k not in self.dims: raise ValueError( f"cannot rename {k!r} because it is not a " "variable or dimension in this dataset" ) create_dim_coord = False new_k = name_dict[k] if k == new_k: continue # Same name, nothing to do if k in self.dims and new_k in self._coord_names: coord_dims = self._variables[name_dict[k]].dims if coord_dims == (k,): create_dim_coord = True elif k in self._coord_names and new_k in self.dims: coord_dims = self._variables[k].dims if coord_dims == (new_k,): create_dim_coord = True if create_dim_coord: warnings.warn( f"rename {k!r} to {name_dict[k]!r} does not create an index " "anymore. Try using swap_dims instead or use set_index " "after rename to create an indexed coordinate.", UserWarning, stacklevel=3, ) variables, coord_names, dims, indexes = self._rename_all( name_dict=name_dict, dims_dict=name_dict ) return self._replace(variables, coord_names, dims=dims, indexes=indexes) def rename( self, name_dict: Mapping[Any, Hashable] | None = None, **names: Hashable, ) -> Self: """Returns a new object with renamed variables, coordinates and dimensions. Parameters ---------- name_dict : dict-like, optional Dictionary whose keys are current variable, coordinate or dimension names and whose values are the desired names. **names : optional Keyword form of ``name_dict``. One of name_dict or names must be provided. Returns ------- renamed : Dataset Dataset with renamed variables, coordinates and dimensions. See Also -------- Dataset.swap_dims Dataset.rename_vars Dataset.rename_dims DataArray.rename """ return self._rename(name_dict=name_dict, **names) def rename_dims( self, dims_dict: Mapping[Any, Hashable] | None = None, **dims: Hashable, ) -> Self: """Returns a new object with renamed dimensions only. Parameters ---------- dims_dict : dict-like, optional Dictionary whose keys are current dimension names and whose values are the desired names. The desired names must not be the name of an existing dimension or Variable in the Dataset. **dims : optional Keyword form of ``dims_dict``. One of dims_dict or dims must be provided. Returns ------- renamed : Dataset Dataset with renamed dimensions. See Also -------- Dataset.swap_dims Dataset.rename Dataset.rename_vars DataArray.rename """ dims_dict = either_dict_or_kwargs(dims_dict, dims, "rename_dims") for k, v in dims_dict.items(): if k not in self.dims: raise ValueError( f"cannot rename {k!r} because it is not found " f"in the dimensions of this dataset {tuple(self.dims)}" ) if v in self.dims or v in self: raise ValueError( f"Cannot rename {k} to {v} because {v} already exists. " "Try using swap_dims instead." ) variables, coord_names, sizes, indexes = self._rename_all( name_dict={}, dims_dict=dims_dict ) return self._replace(variables, coord_names, dims=sizes, indexes=indexes) def rename_vars( self, name_dict: Mapping[Any, Hashable] | None = None, **names: Hashable, ) -> Self: """Returns a new object with renamed variables including coordinates Parameters ---------- name_dict : dict-like, optional Dictionary whose keys are current variable or coordinate names and whose values are the desired names. **names : optional Keyword form of ``name_dict``. One of name_dict or names must be provided. Returns ------- renamed : Dataset Dataset with renamed variables including coordinates See Also -------- Dataset.swap_dims Dataset.rename Dataset.rename_dims DataArray.rename """ name_dict = either_dict_or_kwargs(name_dict, names, "rename_vars") for k in name_dict: if k not in self: raise ValueError( f"cannot rename {k!r} because it is not a " "variable or coordinate in this dataset" ) variables, coord_names, dims, indexes = self._rename_all( name_dict=name_dict, dims_dict={} ) return self._replace(variables, coord_names, dims=dims, indexes=indexes) def swap_dims( self, dims_dict: Mapping[Any, Hashable] | None = None, **dims_kwargs ) -> Self: """Returns a new object with swapped dimensions. Parameters ---------- dims_dict : dict-like Dictionary whose keys are current dimension names and whose values are new names. **dims_kwargs : {existing_dim: new_dim, ...}, optional The keyword arguments form of ``dims_dict``. One of dims_dict or dims_kwargs must be provided. Returns ------- swapped : Dataset Dataset with swapped dimensions. Examples -------- >>> ds = xr.Dataset( ... data_vars={"a": ("x", [5, 7]), "b": ("x", [0.1, 2.4])}, ... coords={"x": ["a", "b"], "y": ("x", [0, 1])}, ... ) >>> ds Size: 56B Dimensions: (x: 2) Coordinates: * x (x) >> ds.swap_dims({"x": "y"}) Size: 56B Dimensions: (y: 2) Coordinates: x (y) >> ds.swap_dims({"x": "z"}) Size: 56B Dimensions: (z: 2) Coordinates: x (z) Self: """Return a new object with an additional axis (or axes) inserted at the corresponding position in the array shape. The new object is a view into the underlying array, not a copy. If dim is already a scalar coordinate, it will be promoted to a 1D coordinate consisting of a single value. The automatic creation of indexes to back new 1D coordinate variables controlled by the create_index_for_new_dim kwarg. Parameters ---------- dim : hashable, sequence of hashable, mapping, or None Dimensions to include on the new variable. If provided as hashable or sequence of hashable, then dimensions are inserted with length 1. If provided as a mapping, then the keys are the new dimensions and the values are either integers (giving the length of the new dimensions) or array-like (giving the coordinates of the new dimensions). axis : int, sequence of int, or None, default: None Axis position(s) where new axis is to be inserted (position(s) on the result array). If a sequence of integers is passed, multiple axes are inserted. In this case, dim arguments should be same length list. If axis=None is passed, all the axes will be inserted to the start of the result array. create_index_for_new_dim : bool, default: True Whether to create new ``PandasIndex`` objects when the object being expanded contains scalar variables with names in ``dim``. **dim_kwargs : int or sequence or ndarray The keywords are arbitrary dimensions being inserted and the values are either the lengths of the new dims (if int is given), or their coordinates. Note, this is an alternative to passing a dict to the dim kwarg and will only be used if dim is None. Returns ------- expanded : Dataset This object, but with additional dimension(s). Examples -------- >>> dataset = xr.Dataset({"temperature": ([], 25.0)}) >>> dataset Size: 8B Dimensions: () Data variables: temperature float64 8B 25.0 # Expand the dataset with a new dimension called "time" >>> dataset.expand_dims(dim="time") Size: 8B Dimensions: (time: 1) Dimensions without coordinates: time Data variables: temperature (time) float64 8B 25.0 # 1D data >>> temperature_1d = xr.DataArray([25.0, 26.5, 24.8], dims="x") >>> dataset_1d = xr.Dataset({"temperature": temperature_1d}) >>> dataset_1d Size: 24B Dimensions: (x: 3) Dimensions without coordinates: x Data variables: temperature (x) float64 24B 25.0 26.5 24.8 # Expand the dataset with a new dimension called "time" using axis argument >>> dataset_1d.expand_dims(dim="time", axis=0) Size: 24B Dimensions: (time: 1, x: 3) Dimensions without coordinates: time, x Data variables: temperature (time, x) float64 24B 25.0 26.5 24.8 # 2D data >>> temperature_2d = xr.DataArray(np.random.rand(3, 4), dims=("y", "x")) >>> dataset_2d = xr.Dataset({"temperature": temperature_2d}) >>> dataset_2d Size: 96B Dimensions: (y: 3, x: 4) Dimensions without coordinates: y, x Data variables: temperature (y, x) float64 96B 0.5488 0.7152 0.6028 ... 0.7917 0.5289 # Expand the dataset with a new dimension called "time" using axis argument >>> dataset_2d.expand_dims(dim="time", axis=2) Size: 96B Dimensions: (y: 3, x: 4, time: 1) Dimensions without coordinates: y, x, time Data variables: temperature (y, x, time) float64 96B 0.5488 0.7152 0.6028 ... 0.7917 0.5289 # Expand a scalar variable along a new dimension of the same name with and without creating a new index >>> ds = xr.Dataset(coords={"x": 0}) >>> ds Size: 8B Dimensions: () Coordinates: x int64 8B 0 Data variables: *empty* >>> ds.expand_dims("x") Size: 8B Dimensions: (x: 1) Coordinates: * x (x) int64 8B 0 Data variables: *empty* >>> ds.expand_dims("x").indexes Indexes: x Index([0], dtype='int64', name='x') >>> ds.expand_dims("x", create_index_for_new_dim=False).indexes Indexes: *empty* See Also -------- DataArray.expand_dims """ if dim is None: pass elif isinstance(dim, Mapping): # We're later going to modify dim in place; don't tamper with # the input dim = dict(dim) elif isinstance(dim, int): raise TypeError( "dim should be hashable or sequence of hashables or mapping" ) elif isinstance(dim, str) or not isinstance(dim, Sequence): dim = {dim: 1} elif isinstance(dim, Sequence): if len(dim) != len(set(dim)): raise ValueError("dims should not contain duplicate values.") dim = dict.fromkeys(dim, 1) dim = either_dict_or_kwargs(dim, dim_kwargs, "expand_dims") assert isinstance(dim, MutableMapping) if axis is None: axis = list(range(len(dim))) elif not isinstance(axis, Sequence): axis = [axis] if len(dim) != len(axis): raise ValueError("lengths of dim and axis should be identical.") for d in dim: if d in self.dims: raise ValueError(f"Dimension {d} already exists.") if d in self._variables and not utils.is_scalar(self._variables[d]): raise ValueError(f"{d} already exists as coordinate or variable name.") variables: dict[Hashable, Variable] = {} indexes: dict[Hashable, Index] = dict(self._indexes) coord_names = self._coord_names.copy() # If dim is a dict, then ensure that the values are either integers # or iterables. for k, v in dim.items(): if hasattr(v, "__iter__"): # If the value for the new dimension is an iterable, then # save the coordinates to the variables dict, and set the # value within the dim dict to the length of the iterable # for later use. if create_index_for_new_dim: index = PandasIndex(v, k) indexes[k] = index name_and_new_1d_var = index.create_variables() else: name_and_new_1d_var = {k: Variable(data=v, dims=k)} variables.update(name_and_new_1d_var) coord_names.add(k) dim[k] = variables[k].size elif isinstance(v, int): pass # Do nothing if the dimensions value is just an int else: raise TypeError( f"The value of new dimension {k} must be an iterable or an int" ) for k, v in self._variables.items(): if k not in dim: if k in coord_names: # Do not change coordinates variables[k] = v else: result_ndim = len(v.dims) + len(axis) for a in axis: if a < -result_ndim or result_ndim - 1 < a: raise IndexError( f"Axis {a} of variable {k} is out of bounds of the " f"expanded dimension size {result_ndim}" ) axis_pos = [a if a >= 0 else result_ndim + a for a in axis] if len(axis_pos) != len(set(axis_pos)): raise ValueError("axis should not contain duplicate values") # We need to sort them to make sure `axis` equals to the # axis positions of the result array. zip_axis_dim = sorted(zip(axis_pos, dim.items(), strict=True)) all_dims = list(zip(v.dims, v.shape, strict=True)) for d, c in zip_axis_dim: all_dims.insert(d, c) variables[k] = v.set_dims(dict(all_dims)) elif k not in variables: if k in coord_names and create_index_for_new_dim: # If dims includes a label of a non-dimension coordinate, # it will be promoted to a 1D coordinate with a single value. index, index_vars = create_default_index_implicit(v.set_dims(k)) indexes[k] = index variables.update(index_vars) else: if create_index_for_new_dim: warnings.warn( f"No index created for dimension {k} because variable {k} is not a coordinate. " f"To create an index for {k}, please first call `.set_coords('{k}')` on this object.", UserWarning, stacklevel=2, ) # create 1D variable without creating a new index new_1d_var = v.set_dims(k) variables.update({k: new_1d_var}) return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes ) def set_index( self, indexes: Mapping[Any, Hashable | Sequence[Hashable]] | None = None, append: bool = False, **indexes_kwargs: Hashable | Sequence[Hashable], ) -> Self: """Set Dataset (multi-)indexes using one or more existing coordinates or variables. This legacy method is limited to pandas (multi-)indexes and 1-dimensional "dimension" coordinates. See :py:meth:`~Dataset.set_xindex` for setting a pandas or a custom Xarray-compatible index from one or more arbitrary coordinates. Parameters ---------- indexes : {dim: index, ...} Mapping from names matching dimensions and values given by (lists of) the names of existing coordinates or variables to set as new (multi-)index. append : bool, default: False If True, append the supplied index(es) to the existing index(es). Otherwise replace the existing index(es) (default). **indexes_kwargs : optional The keyword arguments form of ``indexes``. One of indexes or indexes_kwargs must be provided. Returns ------- obj : Dataset Another dataset, with this dataset's data but replaced coordinates. Examples -------- >>> arr = xr.DataArray( ... data=np.ones((2, 3)), ... dims=["x", "y"], ... coords={"x": range(2), "y": range(3), "a": ("x", [3, 4])}, ... ) >>> ds = xr.Dataset({"v": arr}) >>> ds Size: 104B Dimensions: (x: 2, y: 3) Coordinates: * x (x) int64 16B 0 1 * y (y) int64 24B 0 1 2 a (x) int64 16B 3 4 Data variables: v (x, y) float64 48B 1.0 1.0 1.0 1.0 1.0 1.0 >>> ds.set_index(x="a") Size: 88B Dimensions: (x: 2, y: 3) Coordinates: * x (x) int64 16B 3 4 * y (y) int64 24B 0 1 2 Data variables: v (x, y) float64 48B 1.0 1.0 1.0 1.0 1.0 1.0 See Also -------- Dataset.reset_index Dataset.set_xindex Dataset.swap_dims """ dim_coords = either_dict_or_kwargs(indexes, indexes_kwargs, "set_index") new_indexes: dict[Hashable, Index] = {} new_variables: dict[Hashable, Variable] = {} drop_indexes: set[Hashable] = set() drop_variables: set[Hashable] = set() replace_dims: dict[Hashable, Hashable] = {} all_var_names: set[Hashable] = set() for dim, _var_names in dim_coords.items(): if isinstance(_var_names, str) or not isinstance(_var_names, Sequence): var_names = [_var_names] else: var_names = list(_var_names) invalid_vars = set(var_names) - set(self._variables) if invalid_vars: raise ValueError( ", ".join([str(v) for v in invalid_vars]) + " variable(s) do not exist" ) all_var_names.update(var_names) drop_variables.update(var_names) # drop any pre-existing index involved and its corresponding coordinates index_coord_names = self.xindexes.get_all_coords(dim, errors="ignore") all_index_coord_names = set(index_coord_names) for k in var_names: all_index_coord_names.update( self.xindexes.get_all_coords(k, errors="ignore") ) drop_indexes.update(all_index_coord_names) drop_variables.update(all_index_coord_names) if len(var_names) == 1 and (not append or dim not in self._indexes): var_name = var_names[0] var = self._variables[var_name] # an error with a better message will be raised for scalar variables # when creating the PandasIndex if var.ndim > 0 and var.dims != (dim,): raise ValueError( f"dimension mismatch: try setting an index for dimension {dim!r} with " f"variable {var_name!r} that has dimensions {var.dims}" ) idx = PandasIndex.from_variables({dim: var}, options={}) idx_vars = idx.create_variables({var_name: var}) # trick to preserve coordinate order in this case if dim in self._coord_names: drop_variables.remove(dim) else: if append: current_variables = { k: self._variables[k] for k in index_coord_names } else: current_variables = {} idx, idx_vars = PandasMultiIndex.from_variables_maybe_expand( dim, current_variables, {k: self._variables[k] for k in var_names}, ) for n in idx.index.names: replace_dims[n] = dim new_indexes.update(dict.fromkeys(idx_vars, idx)) new_variables.update(idx_vars) # re-add deindexed coordinates (convert to base variables) for k in drop_variables: if ( k not in new_variables and k not in all_var_names and k in self._coord_names ): new_variables[k] = self._variables[k].to_base_variable() indexes_: dict[Any, Index] = { k: v for k, v in self._indexes.items() if k not in drop_indexes } indexes_.update(new_indexes) variables = { k: v for k, v in self._variables.items() if k not in drop_variables } variables.update(new_variables) # update dimensions if necessary, GH: 3512 for k, v in variables.items(): if any(d in replace_dims for d in v.dims): new_dims = [replace_dims.get(d, d) for d in v.dims] variables[k] = v._replace(dims=new_dims) coord_names = self._coord_names - drop_variables | set(new_variables) return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes_ ) def reset_index( self, dims_or_levels: Hashable | Sequence[Hashable], *, drop: bool = False, ) -> Self: """Reset the specified index(es) or multi-index level(s). This legacy method is specific to pandas (multi-)indexes and 1-dimensional "dimension" coordinates. See the more generic :py:meth:`~Dataset.drop_indexes` and :py:meth:`~Dataset.set_xindex` method to respectively drop and set pandas or custom indexes for arbitrary coordinates. Parameters ---------- dims_or_levels : Hashable or Sequence of Hashable Name(s) of the dimension(s) and/or multi-index level(s) that will be reset. drop : bool, default: False If True, remove the specified indexes and/or multi-index levels instead of extracting them as new coordinates (default: False). Returns ------- obj : Dataset Another dataset, with this dataset's data but replaced coordinates. See Also -------- Dataset.set_index Dataset.set_xindex Dataset.drop_indexes """ if isinstance(dims_or_levels, str) or not isinstance(dims_or_levels, Sequence): dims_or_levels = [dims_or_levels] invalid_coords = set(dims_or_levels) - set(self._indexes) if invalid_coords: raise ValueError( f"{tuple(invalid_coords)} are not coordinates with an index" ) drop_indexes: set[Hashable] = set() drop_variables: set[Hashable] = set() seen: set[Index] = set() new_indexes: dict[Hashable, Index] = {} new_variables: dict[Hashable, Variable] = {} def drop_or_convert(var_names): if drop: drop_variables.update(var_names) else: base_vars = { k: self._variables[k].to_base_variable() for k in var_names } new_variables.update(base_vars) for name in dims_or_levels: index = self._indexes[name] if index in seen: continue seen.add(index) idx_var_names = set(self.xindexes.get_all_coords(name)) drop_indexes.update(idx_var_names) if isinstance(index, PandasMultiIndex): # special case for pd.MultiIndex level_names = index.index.names keep_level_vars = { k: self._variables[k] for k in level_names if k not in dims_or_levels } if index.dim not in dims_or_levels and keep_level_vars: # do not drop the multi-index completely # instead replace it by a new (multi-)index with dropped level(s) idx = index.keep_levels(keep_level_vars) idx_vars = idx.create_variables(keep_level_vars) new_indexes.update(dict.fromkeys(idx_vars, idx)) new_variables.update(idx_vars) if not isinstance(idx, PandasMultiIndex): # multi-index reduced to single index # backward compatibility: unique level coordinate renamed to dimension drop_variables.update(keep_level_vars) drop_or_convert( [k for k in level_names if k not in keep_level_vars] ) else: # always drop the multi-index dimension variable drop_variables.add(index.dim) drop_or_convert(level_names) else: drop_or_convert(idx_var_names) indexes = {k: v for k, v in self._indexes.items() if k not in drop_indexes} indexes.update(new_indexes) variables = { k: v for k, v in self._variables.items() if k not in drop_variables } variables.update(new_variables) coord_names = self._coord_names - drop_variables return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes ) def set_xindex( self, coord_names: str | Sequence[Hashable], index_cls: type[Index] | None = None, **options, ) -> Self: """Set a new, Xarray-compatible index from one or more existing coordinate(s). Parameters ---------- coord_names : str or list Name(s) of the coordinate(s) used to build the index. If several names are given, their order matters. index_cls : subclass of :class:`~xarray.indexes.Index`, optional The type of index to create. By default, try setting a ``PandasIndex`` if ``len(coord_names) == 1``, otherwise a ``PandasMultiIndex``. **options Options passed to the index constructor. Returns ------- obj : Dataset Another dataset, with this dataset's data and with a new index. """ # the Sequence check is required for mypy if is_scalar(coord_names) or not isinstance(coord_names, Sequence): coord_names = [coord_names] if index_cls is None: if len(coord_names) == 1: index_cls = PandasIndex else: index_cls = PandasMultiIndex elif not issubclass(index_cls, Index): raise TypeError(f"{index_cls} is not a subclass of xarray.Index") invalid_coords = set(coord_names) - self._coord_names if invalid_coords: msg = ["invalid coordinate(s)"] no_vars = invalid_coords - set(self._variables) data_vars = invalid_coords - no_vars if no_vars: msg.append(f"those variables don't exist: {no_vars}") if data_vars: msg.append( f"those variables are data variables: {data_vars}, use `set_coords` first" ) raise ValueError("\n".join(msg)) # we could be more clever here (e.g., drop-in index replacement if index # coordinates do not conflict), but let's not allow this for now indexed_coords = set(coord_names) & set(self._indexes) if indexed_coords: raise ValueError( f"those coordinates already have an index: {indexed_coords}" ) coord_vars = {name: self._variables[name] for name in coord_names} index = index_cls.from_variables(coord_vars, options=options) new_coord_vars = index.create_variables(coord_vars) # special case for setting a pandas multi-index from level coordinates # TODO: remove it once we depreciate pandas multi-index dimension (tuple # elements) coordinate if isinstance(index, PandasMultiIndex): coord_names = [index.dim] + list(coord_names) # Check for extra variables that don't match the coordinate names extra_vars = set(new_coord_vars) - set(coord_names) if extra_vars: extra_vars_str = ", ".join(f"'{name}'" for name in extra_vars) coord_names_str = ", ".join(f"'{name}'" for name in coord_names) raise ValueError( f"The index created extra variables {extra_vars_str} that are not " f"in the list of coordinates {coord_names_str}. " f"Use a factory method pattern instead:\n" f" index = {index_cls.__name__}.from_variables(ds, {list(coord_names)!r})\n" f" coords = xr.Coordinates.from_xindex(index)\n" f" ds = ds.assign_coords(coords)" ) variables: dict[Hashable, Variable] indexes: dict[Hashable, Index] if len(coord_names) == 1: variables = self._variables.copy() indexes = self._indexes.copy() name = list(coord_names).pop() if name in new_coord_vars: variables[name] = new_coord_vars[name] indexes[name] = index else: # reorder variables and indexes so that coordinates having the same # index are next to each other variables = {} for name, var in self._variables.items(): if name not in coord_names: variables[name] = var indexes = {} for name, idx in self._indexes.items(): if name not in coord_names: indexes[name] = idx for name in coord_names: try: variables[name] = new_coord_vars[name] except KeyError: variables[name] = self._variables[name] indexes[name] = index return self._replace( variables=variables, coord_names=self._coord_names | set(coord_names), indexes=indexes, ) def reorder_levels( self, dim_order: Mapping[Any, Sequence[int | Hashable]] | None = None, **dim_order_kwargs: Sequence[int | Hashable], ) -> Self: """Rearrange index levels using input order. Parameters ---------- dim_order : dict-like of Hashable to Sequence of int or Hashable, optional Mapping from names matching dimensions and values given by lists representing new level orders. Every given dimension must have a multi-index. **dim_order_kwargs : Sequence of int or Hashable, optional The keyword arguments form of ``dim_order``. One of dim_order or dim_order_kwargs must be provided. Returns ------- obj : Dataset Another dataset, with this dataset's data but replaced coordinates. """ dim_order = either_dict_or_kwargs(dim_order, dim_order_kwargs, "reorder_levels") variables = self._variables.copy() indexes = dict(self._indexes) new_indexes: dict[Hashable, Index] = {} new_variables: dict[Hashable, IndexVariable] = {} for dim, order in dim_order.items(): index = self._indexes[dim] if not isinstance(index, PandasMultiIndex): raise ValueError(f"coordinate {dim} has no MultiIndex") level_vars = {k: self._variables[k] for k in order} idx = index.reorder_levels(level_vars) idx_vars = idx.create_variables(level_vars) new_indexes.update(dict.fromkeys(idx_vars, idx)) new_variables.update(idx_vars) indexes = {k: v for k, v in self._indexes.items() if k not in new_indexes} indexes.update(new_indexes) variables = {k: v for k, v in self._variables.items() if k not in new_variables} variables.update(new_variables) return self._replace(variables, indexes=indexes) def _get_stack_index( self, dim, multi=False, create_index=False, ) -> tuple[Index | None, dict[Hashable, Variable]]: """Used by stack and unstack to get one pandas (multi-)index among the indexed coordinates along dimension `dim`. If exactly one index is found, return it with its corresponding coordinate variables(s), otherwise return None and an empty dict. If `create_index=True`, create a new index if none is found or raise an error if multiple indexes are found. """ stack_index: Index | None = None stack_coords: dict[Hashable, Variable] = {} for name, index in self._indexes.items(): var = self._variables[name] if ( var.ndim == 1 and var.dims[0] == dim and ( # stack: must be a single coordinate index (not multi and not self.xindexes.is_multi(name)) # unstack: must be an index that implements .unstack or (multi and type(index).unstack is not Index.unstack) ) ): if stack_index is not None and index is not stack_index: # more than one index found, stop if create_index: raise ValueError( f"cannot stack dimension {dim!r} with `create_index=True` " "and with more than one index found along that dimension" ) return None, {} stack_index = index stack_coords[name] = var if create_index and stack_index is None: if dim in self._variables: var = self._variables[dim] else: _, _, var = _get_virtual_variable(self._variables, dim, self.sizes) # dummy index (only `stack_coords` will be used to construct the multi-index) stack_index = PandasIndex([0], dim) stack_coords = {dim: var} return stack_index, stack_coords def _stack_once( self, dims: Sequence[Hashable | EllipsisType], new_dim: Hashable, index_cls: type[Index], create_index: bool | None = True, ) -> Self: if dims == ...: raise ValueError("Please use [...] for dims, rather than just ...") if ... in dims: dims = list(infix_dims(dims, self.dims)) new_variables: dict[Hashable, Variable] = {} stacked_var_names: list[Hashable] = [] drop_indexes: list[Hashable] = [] for name, var in self.variables.items(): if any(d in var.dims for d in dims): add_dims = [d for d in dims if d not in var.dims] vdims = list(var.dims) + add_dims shape = [self.sizes[d] for d in vdims] exp_var = var.set_dims(vdims, shape) stacked_var = exp_var.stack(**{new_dim: dims}) new_variables[name] = stacked_var stacked_var_names.append(name) else: new_variables[name] = var.copy(deep=False) # drop indexes of stacked coordinates (if any) for name in stacked_var_names: drop_indexes += list(self.xindexes.get_all_coords(name, errors="ignore")) new_indexes = {} new_coord_names = set(self._coord_names) if create_index or create_index is None: product_vars: dict[Any, Variable] = {} for dim in dims: idx, idx_vars = self._get_stack_index(dim, create_index=create_index) if idx is not None: product_vars.update(idx_vars) if len(product_vars) == len(dims): idx = index_cls.stack(product_vars, new_dim) new_indexes[new_dim] = idx new_indexes.update(dict.fromkeys(product_vars, idx)) idx_vars = idx.create_variables(product_vars) # keep consistent multi-index coordinate order for k in idx_vars: new_variables.pop(k, None) new_variables.update(idx_vars) new_coord_names.update(idx_vars) indexes = {k: v for k, v in self._indexes.items() if k not in drop_indexes} indexes.update(new_indexes) return self._replace_with_new_dims( new_variables, coord_names=new_coord_names, indexes=indexes ) @partial(deprecate_dims, old_name="dimensions") def stack( self, dim: Mapping[Any, Sequence[Hashable | EllipsisType]] | None = None, create_index: bool | None = True, index_cls: type[Index] = PandasMultiIndex, **dim_kwargs: Sequence[Hashable | EllipsisType], ) -> Self: """ Stack any number of existing dimensions into a single new dimension. New dimensions will be added at the end, and by default the corresponding coordinate variables will be combined into a MultiIndex. Parameters ---------- dim : mapping of hashable to sequence of hashable Mapping of the form `new_name=(dim1, dim2, ...)`. Names of new dimensions, and the existing dimensions that they replace. An ellipsis (`...`) will be replaced by all unlisted dimensions. Passing a list containing an ellipsis (`stacked_dim=[...]`) will stack over all dimensions. create_index : bool or None, default: True - True: create a multi-index for each of the stacked dimensions. - False: don't create any index. - None. create a multi-index only if exactly one single (1-d) coordinate index is found for every dimension to stack. index_cls: Index-class, default: PandasMultiIndex Can be used to pass a custom multi-index type (must be an Xarray index that implements `.stack()`). By default, a pandas multi-index wrapper is used. **dim_kwargs The keyword arguments form of ``dim``. One of dim or dim_kwargs must be provided. Returns ------- stacked : Dataset Dataset with stacked data. See Also -------- Dataset.unstack """ dim = either_dict_or_kwargs(dim, dim_kwargs, "stack") result = self for new_dim, dims in dim.items(): result = result._stack_once(dims, new_dim, index_cls, create_index) return result def to_stacked_array( self, new_dim: Hashable, sample_dims: Collection[Hashable], variable_dim: Hashable = "variable", name: Hashable | None = None, ) -> DataArray: """Combine variables of differing dimensionality into a DataArray without broadcasting. This method is similar to Dataset.to_dataarray but does not broadcast the variables. Parameters ---------- new_dim : hashable Name of the new stacked coordinate sample_dims : Collection of hashables List of dimensions that **will not** be stacked. Each array in the dataset must share these dimensions. For machine learning applications, these define the dimensions over which samples are drawn. variable_dim : hashable, default: "variable" Name of the level in the stacked coordinate which corresponds to the variables. name : hashable, optional Name of the new data array. Returns ------- stacked : DataArray DataArray with the specified dimensions and data variables stacked together. The stacked coordinate is named ``new_dim`` and represented by a MultiIndex object with a level containing the data variable names. The name of this level is controlled using the ``variable_dim`` argument. See Also -------- Dataset.to_dataarray Dataset.stack DataArray.to_unstacked_dataset Examples -------- >>> data = xr.Dataset( ... data_vars={ ... "a": (("x", "y"), [[0, 1, 2], [3, 4, 5]]), ... "b": ("x", [6, 7]), ... }, ... coords={"y": ["u", "v", "w"]}, ... ) >>> data Size: 76B Dimensions: (x: 2, y: 3) Coordinates: * y (y) >> data.to_stacked_array("z", sample_dims=["x"]) Size: 64B array([[0, 1, 2, 6], [3, 4, 5, 7]]) Coordinates: * z (z) object 32B MultiIndex * variable (z) Self: index, index_vars = index_and_vars variables: dict[Hashable, Variable] = {} indexes = {k: v for k, v in self._indexes.items() if k != dim} new_indexes, clean_index = index.unstack() indexes.update(new_indexes) for idx in new_indexes.values(): variables.update(idx.create_variables(index_vars)) for name, var in self.variables.items(): if name not in index_vars: if dim in var.dims: if isinstance(fill_value, Mapping): fill_value_ = fill_value[name] else: fill_value_ = fill_value variables[name] = var._unstack_once( index=clean_index, dim=dim, fill_value=fill_value_, sparse=sparse, ) else: variables[name] = var coord_names = set(self._coord_names) - {dim} | set(new_indexes) return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes ) def _unstack_full_reindex( self, dim: Hashable, index_and_vars: tuple[Index, dict[Hashable, Variable]], fill_value, sparse: bool, ) -> Self: index, index_vars = index_and_vars variables: dict[Hashable, Variable] = {} indexes = {k: v for k, v in self._indexes.items() if k != dim} new_indexes, clean_index = index.unstack() indexes.update(new_indexes) new_index_variables = {} for idx in new_indexes.values(): new_index_variables.update(idx.create_variables(index_vars)) new_dim_sizes = {k: v.size for k, v in new_index_variables.items()} variables.update(new_index_variables) # take a shortcut in case the MultiIndex was not modified. full_idx = pd.MultiIndex.from_product( clean_index.levels, names=clean_index.names ) if clean_index.equals(full_idx): obj = self else: # TODO: we may depreciate implicit re-indexing with a pandas.MultiIndex xr_full_idx = PandasMultiIndex(full_idx, dim) indexers = Indexes( dict.fromkeys(index_vars, xr_full_idx), xr_full_idx.create_variables(index_vars), ) obj = self._reindex( indexers, copy=False, fill_value=fill_value, sparse=sparse ) for name, var in obj.variables.items(): if name not in index_vars: if dim in var.dims: variables[name] = var.unstack({dim: new_dim_sizes}) else: variables[name] = var coord_names = set(self._coord_names) - {dim} | set(new_dim_sizes) return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes ) def unstack( self, dim: Dims = None, *, fill_value: Any = xrdtypes.NA, sparse: bool = False, ) -> Self: """ Unstack existing dimensions corresponding to MultiIndexes into multiple new dimensions. New dimensions will be added at the end. Parameters ---------- dim : str, Iterable of Hashable or None, optional Dimension(s) over which to unstack. By default unstacks all MultiIndexes. fill_value : scalar or dict-like, default: nan value to be filled. If a dict-like, maps variable names to fill values. If not provided or if the dict-like does not contain all variables, the dtype's NA value will be used. sparse : bool, default: False use sparse-array if True Returns ------- unstacked : Dataset Dataset with unstacked data. See Also -------- Dataset.stack """ if dim is None: dims = list(self.dims) else: if isinstance(dim, str) or not isinstance(dim, Iterable): dims = [dim] else: dims = list(dim) missing_dims = set(dims) - set(self.dims) if missing_dims: raise ValueError( f"Dimensions {tuple(missing_dims)} not found in data dimensions {tuple(self.dims)}" ) # each specified dimension must have exactly one multi-index stacked_indexes: dict[Any, tuple[Index, dict[Hashable, Variable]]] = {} for d in dims: idx, idx_vars = self._get_stack_index(d, multi=True) if idx is not None: stacked_indexes[d] = idx, idx_vars if dim is None: dims = list(stacked_indexes) else: non_multi_dims = set(dims) - set(stacked_indexes) if non_multi_dims: raise ValueError( "cannot unstack dimensions that do not " f"have exactly one multi-index: {tuple(non_multi_dims)}" ) result = self.copy(deep=False) # we want to avoid allocating an object-dtype ndarray for a MultiIndex, # so we can't just access self.variables[v].data for every variable. # We only check the non-index variables. # https://github.com/pydata/xarray/issues/5902 nonindexes = [ self.variables[k] for k in set(self.variables) - set(self._indexes) ] # Notes for each of these cases: # 1. Dask arrays don't support assignment by index, which the fast unstack # function requires. # https://github.com/pydata/xarray/pull/4746#issuecomment-753282125 # 2. Sparse doesn't currently support (though we could special-case it) # https://github.com/pydata/sparse/issues/422 # 3. pint requires checking if it's a NumPy array until # https://github.com/pydata/xarray/pull/4751 is resolved, # Once that is resolved, explicitly exclude pint arrays. # pint doesn't implement `np.full_like` in a way that's # currently compatible. sparse_array_type = array_type("sparse") needs_full_reindex = any( is_duck_dask_array(v.data) or isinstance(v.data, sparse_array_type) or not isinstance(v.data, np.ndarray) for v in nonindexes ) for d in dims: if needs_full_reindex: result = result._unstack_full_reindex( d, stacked_indexes[d], fill_value, sparse ) else: result = result._unstack_once(d, stacked_indexes[d], fill_value, sparse) return result def update(self, other: CoercibleMapping) -> Self: """Update this dataset's variables with those from another dataset. Just like :py:meth:`dict.update` this is a in-place operation. For a non-inplace version, see :py:meth:`Dataset.merge`. Parameters ---------- other : Dataset or mapping Variables with which to update this dataset. One of: - Dataset - mapping {var name: DataArray} - mapping {var name: Variable} - mapping {var name: (dimension name, array-like)} - mapping {var name: (tuple of dimension names, array-like)} Returns ------- updated : Dataset Updated dataset. Note that since the update is in-place this is the input dataset. It is deprecated since version 0.17 and scheduled to be removed in 0.21. Raises ------ ValueError If any dimensions would have inconsistent sizes in the updated dataset. See Also -------- Dataset.assign Dataset.merge """ merge_result = dataset_update_method(self, other) return self._replace(inplace=True, **merge_result._asdict()) def merge( self, other: CoercibleMapping | DataArray, overwrite_vars: Hashable | Iterable[Hashable] = frozenset(), compat: CompatOptions | CombineKwargDefault = _COMPAT_DEFAULT, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, fill_value: Any = xrdtypes.NA, combine_attrs: CombineAttrsOptions = "override", ) -> Self: """Merge the arrays of two datasets into a single dataset. This method generally does not allow for overriding data, with the exception of attributes, which are ignored on the second dataset. Variables with the same name are checked for conflicts via the equals or identical methods. Parameters ---------- other : Dataset or mapping Dataset or variables to merge with this dataset. overwrite_vars : hashable or iterable of hashable, optional If provided, update variables of these name(s) without checking for conflicts in this dataset. compat : {"identical", "equals", "broadcast_equals", \ "no_conflicts", "override", "minimal"}, default: "no_conflicts" String indicating how to compare variables of the same name for potential conflicts: - 'identical': all values, dimensions and attributes must be the same. - 'equals': all values and dimensions must be the same. - 'broadcast_equals': all values must be equal when variables are broadcast against each other to ensure common dimensions. - 'no_conflicts': only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - 'override': skip comparing and pick variable from first dataset - 'minimal': drop conflicting coordinates join : {"outer", "inner", "left", "right", "exact", "override"}, \ default: "outer" Method for joining ``self`` and ``other`` along shared dimensions: - 'outer': use the union of the indexes - 'inner': use the intersection of the indexes - 'left': use indexes from ``self`` - 'right': use indexes from ``other`` - 'exact': error instead of aligning non-equal indexes - 'override': use indexes from ``self`` that are the same size as those of ``other`` in that dimension fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names (including coordinates) to fill values. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "override" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. Returns ------- merged : Dataset Merged dataset. Raises ------ MergeError If any variables conflict (see ``compat``). See Also -------- Dataset.update """ from xarray.core.dataarray import DataArray other = other.to_dataset() if isinstance(other, DataArray) else other merge_result = dataset_merge_method( self, other, overwrite_vars=overwrite_vars, compat=compat, join=join, fill_value=fill_value, combine_attrs=combine_attrs, ) return self._replace(**merge_result._asdict()) def _assert_all_in_dataset( self, names: Iterable[Hashable], virtual_okay: bool = False ) -> None: bad_names = set(names) - set(self._variables) if virtual_okay: bad_names -= self.virtual_variables if bad_names: ordered_bad_names = [name for name in names if name in bad_names] raise ValueError( f"These variables cannot be found in this dataset: {ordered_bad_names}" ) def drop_vars( self, names: str | Iterable[Hashable] | Callable[[Self], str | Iterable[Hashable]], *, errors: ErrorOptions = "raise", ) -> Self: """Drop variables from this dataset. Parameters ---------- names : Hashable or iterable of Hashable or Callable Name(s) of variables to drop. If a Callable, this object is passed as its only argument and its result is used. errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the variable passed are not in the dataset. If 'ignore', any given names that are in the dataset are dropped and no error is raised. Examples -------- >>> dataset = xr.Dataset( ... { ... "temperature": ( ... ["time", "latitude", "longitude"], ... [[[25.5, 26.3], [27.1, 28.0]]], ... ), ... "humidity": ( ... ["time", "latitude", "longitude"], ... [[[65.0, 63.8], [58.2, 59.6]]], ... ), ... "wind_speed": ( ... ["time", "latitude", "longitude"], ... [[[10.2, 8.5], [12.1, 9.8]]], ... ), ... }, ... coords={ ... "time": pd.date_range("2023-07-01", periods=1), ... "latitude": [40.0, 40.2], ... "longitude": [-75.0, -74.8], ... }, ... ) >>> dataset Size: 136B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: * time (time) datetime64[ns] 8B 2023-07-01 * latitude (latitude) float64 16B 40.0 40.2 * longitude (longitude) float64 16B -75.0 -74.8 Data variables: temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0 humidity (time, latitude, longitude) float64 32B 65.0 63.8 58.2 59.6 wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8 Drop the 'humidity' variable >>> dataset.drop_vars(["humidity"]) Size: 104B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: * time (time) datetime64[ns] 8B 2023-07-01 * latitude (latitude) float64 16B 40.0 40.2 * longitude (longitude) float64 16B -75.0 -74.8 Data variables: temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0 wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8 Drop the 'humidity', 'temperature' variables >>> dataset.drop_vars(["humidity", "temperature"]) Size: 72B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: * time (time) datetime64[ns] 8B 2023-07-01 * latitude (latitude) float64 16B 40.0 40.2 * longitude (longitude) float64 16B -75.0 -74.8 Data variables: wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8 Drop all indexes >>> dataset.drop_vars(lambda x: x.indexes) Size: 96B Dimensions: (time: 1, latitude: 2, longitude: 2) Dimensions without coordinates: time, latitude, longitude Data variables: temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0 humidity (time, latitude, longitude) float64 32B 65.0 63.8 58.2 59.6 wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8 Attempt to drop non-existent variable with errors="ignore" >>> dataset.drop_vars(["pressure"], errors="ignore") Size: 136B Dimensions: (time: 1, latitude: 2, longitude: 2) Coordinates: * time (time) datetime64[ns] 8B 2023-07-01 * latitude (latitude) float64 16B 40.0 40.2 * longitude (longitude) float64 16B -75.0 -74.8 Data variables: temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0 humidity (time, latitude, longitude) float64 32B 65.0 63.8 58.2 59.6 wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8 Attempt to drop non-existent variable with errors="raise" >>> dataset.drop_vars(["pressure"], errors="raise") Traceback (most recent call last): ValueError: These variables cannot be found in this dataset: ['pressure'] Raises ------ ValueError Raised if you attempt to drop a variable which is not present, and the kwarg ``errors='raise'``. Returns ------- dropped : Dataset See Also -------- DataArray.drop_vars """ if callable(names): names = names(self) # the Iterable check is required for mypy if is_scalar(names) or not isinstance(names, Iterable): names_set = {names} else: names_set = set(names) if errors == "raise": self._assert_all_in_dataset(names_set) # GH6505 other_names = set() for var in names_set: maybe_midx = self._indexes.get(var, None) if isinstance(maybe_midx, PandasMultiIndex): idx_coord_names = set(list(maybe_midx.index.names) + [maybe_midx.dim]) idx_other_names = idx_coord_names - set(names_set) other_names.update(idx_other_names) if other_names: names_set |= set(other_names) emit_user_level_warning( f"Deleting a single level of a MultiIndex is deprecated. Previously, this deleted all levels of a MultiIndex. " f"Please also drop the following variables: {other_names!r} to avoid an error in the future.", DeprecationWarning, ) assert_no_index_corrupted(self.xindexes, names_set) variables = {k: v for k, v in self._variables.items() if k not in names_set} coord_names = {k for k in self._coord_names if k in variables} indexes = {k: v for k, v in self._indexes.items() if k not in names_set} return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes ) def drop_indexes( self, coord_names: Hashable | Iterable[Hashable], *, errors: ErrorOptions = "raise", ) -> Self: """Drop the indexes assigned to the given coordinates. Parameters ---------- coord_names : hashable or iterable of hashable Name(s) of the coordinate(s) for which to drop the index. errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the coordinates passed have no index or are not in the dataset. If 'ignore', no error is raised. Returns ------- dropped : Dataset A new dataset with dropped indexes. """ # the Iterable check is required for mypy if is_scalar(coord_names) or not isinstance(coord_names, Iterable): coord_names = {coord_names} else: coord_names = set(coord_names) if errors == "raise": invalid_coords = coord_names - self._coord_names if invalid_coords: raise ValueError( f"The coordinates {tuple(invalid_coords)} are not found in the " f"dataset coordinates {tuple(self.coords.keys())}" ) unindexed_coords = set(coord_names) - set(self._indexes) if unindexed_coords: raise ValueError( f"those coordinates do not have an index: {unindexed_coords}" ) assert_no_index_corrupted(self.xindexes, coord_names, action="remove index(es)") variables = {} for name, var in self._variables.items(): if name in coord_names: variables[name] = var.to_base_variable() else: variables[name] = var indexes = {k: v for k, v in self._indexes.items() if k not in coord_names} return self._replace(variables=variables, indexes=indexes) def drop( self, labels=None, dim=None, *, errors: ErrorOptions = "raise", **labels_kwargs, ) -> Self: """Backward compatible method based on `drop_vars` and `drop_sel` Using either `drop_vars` or `drop_sel` is encouraged See Also -------- Dataset.drop_vars Dataset.drop_sel """ if errors not in ["raise", "ignore"]: raise ValueError('errors must be either "raise" or "ignore"') if is_dict_like(labels) and not isinstance(labels, dict): emit_user_level_warning( "dropping coordinates using `drop` is deprecated; use drop_vars.", DeprecationWarning, ) return self.drop_vars(labels, errors=errors) if labels_kwargs or isinstance(labels, dict): if dim is not None: raise ValueError("cannot specify dim and dict-like arguments.") labels = either_dict_or_kwargs(labels, labels_kwargs, "drop") if dim is None and (is_scalar(labels) or isinstance(labels, Iterable)): emit_user_level_warning( "dropping variables using `drop` is deprecated; use drop_vars.", DeprecationWarning, ) # for mypy if is_scalar(labels): labels = [labels] return self.drop_vars(labels, errors=errors) if dim is not None: warnings.warn( "dropping labels using list-like labels is deprecated; using " "dict-like arguments with `drop_sel`, e.g. `ds.drop_sel(dim=[labels]).", DeprecationWarning, stacklevel=2, ) return self.drop_sel({dim: labels}, errors=errors, **labels_kwargs) emit_user_level_warning( "dropping labels using `drop` is deprecated; use `drop_sel` instead.", DeprecationWarning, ) return self.drop_sel(labels, errors=errors) def drop_sel( self, labels=None, *, errors: ErrorOptions = "raise", **labels_kwargs ) -> Self: """Drop index labels from this dataset. Parameters ---------- labels : mapping of hashable to Any Index labels to drop errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the index labels passed are not in the dataset. If 'ignore', any given labels that are in the dataset are dropped and no error is raised. **labels_kwargs : {dim: label, ...}, optional The keyword arguments form of ``dim`` and ``labels`` Returns ------- dropped : Dataset Examples -------- >>> data = np.arange(6).reshape(2, 3) >>> labels = ["a", "b", "c"] >>> ds = xr.Dataset({"A": (["x", "y"], data), "y": labels}) >>> ds Size: 60B Dimensions: (x: 2, y: 3) Coordinates: * y (y) >> ds.drop_sel(y=["a", "c"]) Size: 20B Dimensions: (x: 2, y: 1) Coordinates: * y (y) >> ds.drop_sel(y="b") Size: 40B Dimensions: (x: 2, y: 2) Coordinates: * y (y) Self: """Drop index positions from this Dataset. Parameters ---------- indexers : mapping of hashable to Any Index locations to drop **indexers_kwargs : {dim: position, ...}, optional The keyword arguments form of ``dim`` and ``positions`` Returns ------- dropped : Dataset Raises ------ IndexError Examples -------- >>> data = np.arange(6).reshape(2, 3) >>> labels = ["a", "b", "c"] >>> ds = xr.Dataset({"A": (["x", "y"], data), "y": labels}) >>> ds Size: 60B Dimensions: (x: 2, y: 3) Coordinates: * y (y) >> ds.drop_isel(y=[0, 2]) Size: 20B Dimensions: (x: 2, y: 1) Coordinates: * y (y) >> ds.drop_isel(y=1) Size: 40B Dimensions: (x: 2, y: 2) Coordinates: * y (y) Self: """Drop dimensions and associated variables from this dataset. Parameters ---------- drop_dims : str or Iterable of Hashable Dimension or dimensions to drop. errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a ValueError error if any of the dimensions passed are not in the dataset. If 'ignore', any given dimensions that are in the dataset are dropped and no error is raised. Returns ------- obj : Dataset The dataset without the given dimensions (or any variables containing those dimensions). """ if errors not in ["raise", "ignore"]: raise ValueError('errors must be either "raise" or "ignore"') if isinstance(drop_dims, str) or not isinstance(drop_dims, Iterable): drop_dims = {drop_dims} else: drop_dims = set(drop_dims) if errors == "raise": missing_dims = drop_dims - set(self.dims) if missing_dims: raise ValueError( f"Dimensions {tuple(missing_dims)} not found in data dimensions {tuple(self.dims)}" ) drop_vars = {k for k, v in self._variables.items() if set(v.dims) & drop_dims} return self.drop_vars(drop_vars) @deprecate_dims def transpose( self, *dim: Hashable, missing_dims: ErrorOptionsWithWarn = "raise", ) -> Self: """Return a new Dataset object with all array dimensions transposed. Although the order of dimensions on each array will change, the dataset dimensions themselves will remain in fixed (sorted) order. Parameters ---------- *dim : hashable, optional By default, reverse the dimensions on each array. Otherwise, reorder the dimensions to this order. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the Dataset: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions Returns ------- transposed : Dataset Each array in the dataset (including) coordinates will be transposed to the given order. Notes ----- This operation returns a view of each array's data. It is lazy for dask-backed DataArrays but not for numpy-backed DataArrays -- the data will be fully loaded into memory. See Also -------- numpy.transpose DataArray.transpose """ # Raise error if list is passed as dim if (len(dim) > 0) and (isinstance(dim[0], list)): list_fix = [f"{x!r}" if isinstance(x, str) else f"{x}" for x in dim[0]] raise TypeError( f"transpose requires dim to be passed as multiple arguments. Expected `{', '.join(list_fix)}`. Received `{dim[0]}` instead" ) # Use infix_dims to check once for missing dimensions if len(dim) != 0: _ = list(infix_dims(dim, self.dims, missing_dims)) ds = self.copy() for name, var in self._variables.items(): var_dims = tuple(d for d in dim if d in (var.dims + (...,))) ds._variables[name] = var.transpose(*var_dims) return ds def dropna( self, dim: Hashable, *, how: Literal["any", "all"] = "any", thresh: int | None = None, subset: Iterable[Hashable] | None = None, ) -> Self: """Returns a new dataset with dropped labels for missing values along the provided dimension. Parameters ---------- dim : hashable Dimension along which to drop missing values. Dropping along multiple dimensions simultaneously is not yet supported. how : {"any", "all"}, default: "any" - any : if any NA values are present, drop that label - all : if all values are NA, drop that label thresh : int or None, optional If supplied, require this many non-NA values (summed over all the subset variables). subset : iterable of hashable or None, optional Which variables to check for missing values. By default, all variables in the dataset are checked. Examples -------- >>> dataset = xr.Dataset( ... { ... "temperature": ( ... ["time", "location"], ... [[23.4, 24.1], [np.nan, 22.1], [21.8, 24.2], [20.5, 25.3]], ... ) ... }, ... coords={"time": [1, 2, 3, 4], "location": ["A", "B"]}, ... ) >>> dataset Size: 104B Dimensions: (time: 4, location: 2) Coordinates: * time (time) int64 32B 1 2 3 4 * location (location) >> dataset.dropna(dim="time") Size: 80B Dimensions: (time: 3, location: 2) Coordinates: * time (time) int64 24B 1 3 4 * location (location) >> dataset.dropna(dim="time", how="any") Size: 80B Dimensions: (time: 3, location: 2) Coordinates: * time (time) int64 24B 1 3 4 * location (location) >> dataset.dropna(dim="time", how="all") Size: 104B Dimensions: (time: 4, location: 2) Coordinates: * time (time) int64 32B 1 2 3 4 * location (location) >> dataset.dropna(dim="time", thresh=2) Size: 80B Dimensions: (time: 3, location: 2) Coordinates: * time (time) int64 24B 1 3 4 * location (location) = thresh elif how == "any": mask = count == size elif how == "all": mask = count > 0 elif how is not None: raise ValueError(f"invalid how option: {how}") else: raise TypeError("must specify how or thresh") return self.isel({dim: mask}) def fillna(self, value: Any) -> Self: """Fill missing values in this object. This operation follows the normal broadcasting and alignment rules that xarray uses for binary arithmetic, except the result is aligned to this object (``join='left'``) instead of aligned to the intersection of index coordinates (``join='inner'``). Parameters ---------- value : scalar, ndarray, DataArray, dict or Dataset Used to fill all matching missing values in this dataset's data variables. Scalars, ndarrays or DataArrays arguments are used to fill all data with aligned coordinates (for DataArrays). Dictionaries or datasets match data variables and then align coordinates if necessary. Returns ------- Dataset Examples -------- >>> ds = xr.Dataset( ... { ... "A": ("x", [np.nan, 2, np.nan, 0]), ... "B": ("x", [3, 4, np.nan, 1]), ... "C": ("x", [np.nan, np.nan, np.nan, 5]), ... "D": ("x", [np.nan, 3, np.nan, 4]), ... }, ... coords={"x": [0, 1, 2, 3]}, ... ) >>> ds Size: 160B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 Data variables: A (x) float64 32B nan 2.0 nan 0.0 B (x) float64 32B 3.0 4.0 nan 1.0 C (x) float64 32B nan nan nan 5.0 D (x) float64 32B nan 3.0 nan 4.0 Replace all `NaN` values with 0s. >>> ds.fillna(0) Size: 160B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 Data variables: A (x) float64 32B 0.0 2.0 0.0 0.0 B (x) float64 32B 3.0 4.0 0.0 1.0 C (x) float64 32B 0.0 0.0 0.0 5.0 D (x) float64 32B 0.0 3.0 0.0 4.0 Replace all `NaN` elements in column β€˜A’, β€˜B’, β€˜C’, and β€˜D’, with 0, 1, 2, and 3 respectively. >>> values = {"A": 0, "B": 1, "C": 2, "D": 3} >>> ds.fillna(value=values) Size: 160B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 Data variables: A (x) float64 32B 0.0 2.0 0.0 0.0 B (x) float64 32B 3.0 4.0 1.0 1.0 C (x) float64 32B 2.0 2.0 2.0 5.0 D (x) float64 32B 3.0 3.0 3.0 4.0 """ if utils.is_dict_like(value): value_keys = getattr(value, "data_vars", value).keys() if not set(value_keys) <= set(self.data_vars.keys()): raise ValueError( "all variables in the argument to `fillna` " "must be contained in the original dataset" ) out = ops.fillna(self, value) return out def interpolate_na( self, dim: Hashable | None = None, method: InterpOptions = "linear", limit: int | None = None, use_coordinate: bool | Hashable = True, max_gap: ( int | float | str | pd.Timedelta | np.timedelta64 | datetime.timedelta | None ) = None, **kwargs: Any, ) -> Self: """Fill in NaNs by interpolating according to different methods. Parameters ---------- dim : Hashable or None, optional Specifies the dimension along which to interpolate. method : {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "polynomial", \ "barycentric", "krogh", "pchip", "spline", "akima"}, default: "linear" String indicating which method to use for interpolation: - 'linear': linear interpolation. Additional keyword arguments are passed to :py:func:`numpy.interp` - 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'polynomial': are passed to :py:func:`scipy.interpolate.interp1d`. If ``method='polynomial'``, the ``order`` keyword argument must also be provided. - 'barycentric', 'krogh', 'pchip', 'spline', 'akima': use their respective :py:class:`scipy.interpolate` classes. use_coordinate : bool or Hashable, default: True Specifies which index to use as the x values in the interpolation formulated as `y = f(x)`. If False, values are treated as if equally-spaced along ``dim``. If True, the IndexVariable `dim` is used. If ``use_coordinate`` is a string, it specifies the name of a coordinate variable to use as the index. limit : int, default: None Maximum number of consecutive NaNs to fill. Must be greater than 0 or None for no limit. This filling is done regardless of the size of the gap in the data. To only interpolate over gaps less than a given length, see ``max_gap``. max_gap : int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta \ or None, default: None Maximum size of gap, a continuous sequence of NaNs, that will be filled. Use None for no limit. When interpolating along a datetime64 dimension and ``use_coordinate=True``, ``max_gap`` can be one of the following: - a string that is valid input for pandas.to_timedelta - a :py:class:`numpy.timedelta64` object - a :py:class:`pandas.Timedelta` object - a :py:class:`datetime.timedelta` object Otherwise, ``max_gap`` must be an int or a float. Use of ``max_gap`` with unlabeled dimensions has not been implemented yet. Gap length is defined as the difference between coordinate values at the first data point after a gap and the last value before a gap. For gaps at the beginning (end), gap length is defined as the difference between coordinate values at the first (last) valid data point and the first (last) NaN. For example, consider:: array([nan, nan, nan, 1., nan, nan, 4., nan, nan]) Coordinates: * x (x) int64 0 1 2 3 4 5 6 7 8 The gap lengths are 3-0 = 3; 6-3 = 3; and 8-6 = 2 respectively **kwargs : dict, optional parameters passed verbatim to the underlying interpolation function Returns ------- interpolated: Dataset Filled in Dataset. Warning -------- When passing fill_value as a keyword argument with method="linear", it does not use ``numpy.interp`` but it uses ``scipy.interpolate.interp1d``, which provides the fill_value parameter. See Also -------- numpy.interp scipy.interpolate Examples -------- >>> ds = xr.Dataset( ... { ... "A": ("x", [np.nan, 2, 3, np.nan, 0]), ... "B": ("x", [3, 4, np.nan, 1, 7]), ... "C": ("x", [np.nan, np.nan, np.nan, 5, 0]), ... "D": ("x", [np.nan, 3, np.nan, -1, 4]), ... }, ... coords={"x": [0, 1, 2, 3, 4]}, ... ) >>> ds Size: 200B Dimensions: (x: 5) Coordinates: * x (x) int64 40B 0 1 2 3 4 Data variables: A (x) float64 40B nan 2.0 3.0 nan 0.0 B (x) float64 40B 3.0 4.0 nan 1.0 7.0 C (x) float64 40B nan nan nan 5.0 0.0 D (x) float64 40B nan 3.0 nan -1.0 4.0 >>> ds.interpolate_na(dim="x", method="linear") Size: 200B Dimensions: (x: 5) Coordinates: * x (x) int64 40B 0 1 2 3 4 Data variables: A (x) float64 40B nan 2.0 3.0 1.5 0.0 B (x) float64 40B 3.0 4.0 2.5 1.0 7.0 C (x) float64 40B nan nan nan 5.0 0.0 D (x) float64 40B nan 3.0 1.0 -1.0 4.0 >>> ds.interpolate_na(dim="x", method="linear", fill_value="extrapolate") Size: 200B Dimensions: (x: 5) Coordinates: * x (x) int64 40B 0 1 2 3 4 Data variables: A (x) float64 40B 1.0 2.0 3.0 1.5 0.0 B (x) float64 40B 3.0 4.0 2.5 1.0 7.0 C (x) float64 40B 20.0 15.0 10.0 5.0 0.0 D (x) float64 40B 5.0 3.0 1.0 -1.0 4.0 """ from xarray.core.missing import _apply_over_vars_with_dim, interp_na new = _apply_over_vars_with_dim( interp_na, self, dim=dim, method=method, limit=limit, use_coordinate=use_coordinate, max_gap=max_gap, **kwargs, ) return new def ffill(self, dim: Hashable, limit: int | None = None) -> Self: """Fill NaN values by propagating values forward *Requires bottleneck.* Parameters ---------- dim : Hashable Specifies the dimension along which to propagate values when filling. limit : int or None, optional The maximum number of consecutive NaN values to forward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater than 0 or None for no limit. Must be None or greater than or equal to axis length if filling along chunked axes (dimensions). Examples -------- >>> time = pd.date_range("2023-01-01", periods=10, freq="D") >>> data = np.array( ... [1, np.nan, np.nan, np.nan, 5, np.nan, np.nan, 8, np.nan, 10] ... ) >>> dataset = xr.Dataset({"data": (("time",), data)}, coords={"time": time}) >>> dataset Size: 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: data (time) float64 80B 1.0 nan nan nan 5.0 nan nan 8.0 nan 10.0 # Perform forward fill (ffill) on the dataset >>> dataset.ffill(dim="time") Size: 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: data (time) float64 80B 1.0 1.0 1.0 1.0 5.0 5.0 5.0 8.0 8.0 10.0 # Limit the forward filling to a maximum of 2 consecutive NaN values >>> dataset.ffill(dim="time", limit=2) Size: 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: data (time) float64 80B 1.0 1.0 1.0 nan 5.0 5.0 5.0 8.0 8.0 10.0 Returns ------- Dataset See Also -------- Dataset.bfill """ from xarray.core.missing import _apply_over_vars_with_dim, ffill new = _apply_over_vars_with_dim(ffill, self, dim=dim, limit=limit) return new def bfill(self, dim: Hashable, limit: int | None = None) -> Self: """Fill NaN values by propagating values backward *Requires bottleneck.* Parameters ---------- dim : Hashable Specifies the dimension along which to propagate values when filling. limit : int or None, optional The maximum number of consecutive NaN values to backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater than 0 or None for no limit. Must be None or greater than or equal to axis length if filling along chunked axes (dimensions). Examples -------- >>> time = pd.date_range("2023-01-01", periods=10, freq="D") >>> data = np.array( ... [1, np.nan, np.nan, np.nan, 5, np.nan, np.nan, 8, np.nan, 10] ... ) >>> dataset = xr.Dataset({"data": (("time",), data)}, coords={"time": time}) >>> dataset Size: 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: data (time) float64 80B 1.0 nan nan nan 5.0 nan nan 8.0 nan 10.0 # filled dataset, fills NaN values by propagating values backward >>> dataset.bfill(dim="time") Size: 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: data (time) float64 80B 1.0 5.0 5.0 5.0 5.0 8.0 8.0 8.0 10.0 10.0 # Limit the backward filling to a maximum of 2 consecutive NaN values >>> dataset.bfill(dim="time", limit=2) Size: 160B Dimensions: (time: 10) Coordinates: * time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10 Data variables: data (time) float64 80B 1.0 nan 5.0 5.0 5.0 8.0 8.0 8.0 10.0 10.0 Returns ------- Dataset See Also -------- Dataset.ffill """ from xarray.core.missing import _apply_over_vars_with_dim, bfill new = _apply_over_vars_with_dim(bfill, self, dim=dim, limit=limit) return new def combine_first(self, other: Self) -> Self: """Combine two Datasets, default to data_vars of self. The new coordinates follow the normal broadcasting and alignment rules of ``join='outer'``. Vacant cells in the expanded coordinates are filled with np.nan. Parameters ---------- other : Dataset Used to fill all matching missing values in this array. Returns ------- Dataset """ out = ops.fillna(self, other, join="outer", dataset_join="outer") return out def reduce( self, func: Callable, dim: Dims = None, *, keep_attrs: bool | None = None, keepdims: bool = False, numeric_only: bool = False, **kwargs: Any, ) -> Self: """Reduce this dataset by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `f(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. dim : str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. By default `func` is applied over all dimensions. keep_attrs : bool or None, optional If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one. Coordinates that use these dimensions are removed. numeric_only : bool, default: False If True, only apply ``func`` to variables with a numeric dtype. **kwargs : Any Additional keyword arguments passed on to ``func``. Returns ------- reduced : Dataset Dataset with this object's DataArrays replaced with new DataArrays of summarized data and the indicated dimension(s) removed. Examples -------- >>> dataset = xr.Dataset( ... { ... "math_scores": ( ... ["student", "test"], ... [[90, 85, 92], [78, 80, 85], [95, 92, 98]], ... ), ... "english_scores": ( ... ["student", "test"], ... [[88, 90, 92], [75, 82, 79], [93, 96, 91]], ... ), ... }, ... coords={ ... "student": ["Alice", "Bob", "Charlie"], ... "test": ["Test 1", "Test 2", "Test 3"], ... }, ... ) # Calculate the 75th percentile of math scores for each student using np.percentile >>> percentile_scores = dataset.reduce(np.percentile, q=75, dim="test") >>> percentile_scores Size: 132B Dimensions: (student: 3) Coordinates: * student (student) Self: """Apply a function to each data variable in this dataset Parameters ---------- func : callable Function which can be called in the form `func(x, *args, **kwargs)` to transform each DataArray `x` in this dataset into another DataArray. keep_attrs : bool or None, optional If True, both the dataset's and variables' attributes (`attrs`) will be copied from the original objects to the new ones. If False, the new dataset and variables will be returned without copying the attributes. args : iterable, optional Positional arguments passed on to `func`. **kwargs : Any Keyword arguments passed on to `func`. Returns ------- applied : Dataset Resulting dataset from applying ``func`` to each data variable. Examples -------- >>> da = xr.DataArray(np.random.randn(2, 3)) >>> ds = xr.Dataset({"foo": da, "bar": ("x", [-1, 2])}) >>> ds Size: 64B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 -0.9773 bar (x) int64 16B -1 2 >>> ds.map(np.fabs) Size: 64B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 0.9773 bar (x) float64 16B 1.0 2.0 """ if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) variables = { k: maybe_wrap_array(v, func(v, *args, **kwargs)) for k, v in self.data_vars.items() } if keep_attrs: for k, v in variables.items(): v._copy_attrs_from(self.data_vars[k]) attrs = self.attrs if keep_attrs else None return type(self)(variables, attrs=attrs) def apply( self, func: Callable, keep_attrs: bool | None = None, args: Iterable[Any] = (), **kwargs: Any, ) -> Self: """ Backward compatible implementation of ``map`` See Also -------- Dataset.map """ warnings.warn( "Dataset.apply may be deprecated in the future. Using Dataset.map is encouraged", PendingDeprecationWarning, stacklevel=2, ) return self.map(func, keep_attrs, args, **kwargs) def assign( self, variables: Mapping[Any, Any] | None = None, **variables_kwargs: Any, ) -> Self: """Assign new data variables to a Dataset, returning a new object with all the original variables in addition to the new ones. Parameters ---------- variables : mapping of hashable to Any Mapping from variables names to the new values. If the new values are callable, they are computed on the Dataset and assigned to new data variables. If the values are not callable, (e.g. a DataArray, scalar, or array), they are simply assigned. **variables_kwargs The keyword arguments form of ``variables``. One of variables or variables_kwargs must be provided. Returns ------- ds : Dataset A new Dataset with the new variables in addition to all the existing variables. Notes ----- Since ``kwargs`` is a dictionary, the order of your arguments may not be preserved, and so the order of the new variables is not well defined. Assigning multiple variables within the same ``assign`` is possible, but you cannot reference other variables created within the same ``assign`` call. The new assigned variables that replace existing coordinates in the original dataset are still listed as coordinates in the returned Dataset. See Also -------- pandas.DataFrame.assign Examples -------- >>> x = xr.Dataset( ... { ... "temperature_c": ( ... ("lat", "lon"), ... 20 * np.random.rand(4).reshape(2, 2), ... ), ... "precipitation": (("lat", "lon"), np.random.rand(4).reshape(2, 2)), ... }, ... coords={"lat": [10, 20], "lon": [150, 160]}, ... ) >>> x Size: 96B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 * lon (lon) int64 16B 150 160 Data variables: temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9 precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918 Where the value is a callable, evaluated on dataset: >>> x.assign(temperature_f=lambda x: x.temperature_c * 9 / 5 + 32) Size: 128B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 * lon (lon) int64 16B 150 160 Data variables: temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9 precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918 temperature_f (lat, lon) float64 32B 51.76 57.75 53.7 51.62 Alternatively, the same behavior can be achieved by directly referencing an existing dataarray: >>> x.assign(temperature_f=x["temperature_c"] * 9 / 5 + 32) Size: 128B Dimensions: (lat: 2, lon: 2) Coordinates: * lat (lat) int64 16B 10 20 * lon (lon) int64 16B 150 160 Data variables: temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9 precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918 temperature_f (lat, lon) float64 32B 51.76 57.75 53.7 51.62 """ variables = either_dict_or_kwargs(variables, variables_kwargs, "assign") data = self.copy() # do all calculations first... results: CoercibleMapping = data._calc_assign_results(variables) # split data variables to add/replace vs. coordinates to replace results_data_vars: dict[Hashable, CoercibleValue] = {} results_coords: dict[Hashable, CoercibleValue] = {} for k, v in results.items(): if k in data._coord_names: results_coords[k] = v else: results_data_vars[k] = v # ... and then assign data.coords.update(results_coords) data.update(results_data_vars) return data def to_dataarray( self, dim: Hashable = "variable", name: Hashable | None = None ) -> DataArray: """Convert this dataset into an xarray.DataArray The data variables of this dataset will be broadcast against each other and stacked along the first axis of the new array. All coordinates of this dataset will remain coordinates. Parameters ---------- dim : Hashable, default: "variable" Name of the new dimension. name : Hashable or None, optional Name of the new data array. Returns ------- array : xarray.DataArray """ from xarray.core.dataarray import DataArray data_vars = [self.variables[k] for k in self.data_vars] broadcast_vars = broadcast_variables(*data_vars) data = duck_array_ops.stack([b.data for b in broadcast_vars], axis=0) dims = (dim,) + broadcast_vars[0].dims variable = Variable(dims, data, self.attrs, fastpath=True) coords = {k: v.variable for k, v in self.coords.items()} indexes = filter_indexes_from_coords(self._indexes, set(coords)) new_dim_index = PandasIndex(list(self.data_vars), dim) indexes[dim] = new_dim_index coords.update(new_dim_index.create_variables()) return DataArray._construct_direct(variable, coords, name, indexes) def to_array( self, dim: Hashable = "variable", name: Hashable | None = None ) -> DataArray: """Deprecated version of to_dataarray""" return self.to_dataarray(dim=dim, name=name) def _normalize_dim_order( self, dim_order: Sequence[Hashable] | None = None ) -> dict[Hashable, int]: """ Check the validity of the provided dimensions if any and return the mapping between dimension name and their size. Parameters ---------- dim_order: Sequence of Hashable or None, optional Dimension order to validate (default to the alphabetical order if None). Returns ------- result : dict[Hashable, int] Validated dimensions mapping. """ if dim_order is None: dim_order = list(self.dims) elif set(dim_order) != set(self.dims): raise ValueError( f"dim_order {dim_order} does not match the set of dimensions of this " f"Dataset: {list(self.dims)}" ) ordered_dims = {k: self.sizes[k] for k in dim_order} return ordered_dims def to_pandas(self) -> pd.Series | pd.DataFrame: """Convert this dataset into a pandas object without changing the number of dimensions. The type of the returned object depends on the number of Dataset dimensions: * 0D -> `pandas.Series` * 1D -> `pandas.DataFrame` Only works for Datasets with 1 or fewer dimensions. """ if len(self.dims) == 0: return pd.Series({k: v.item() for k, v in self.items()}) if len(self.dims) == 1: return self.to_dataframe() raise ValueError( f"cannot convert Datasets with {len(self.dims)} dimensions into " "pandas objects without changing the number of dimensions. " "Please use Dataset.to_dataframe() instead." ) def _to_dataframe(self, ordered_dims: Mapping[Any, int]): from xarray.core.extension_array import PandasExtensionArray columns_in_order = [k for k in self.variables if k not in self.dims] non_extension_array_columns = [ k for k in columns_in_order if not pd.api.types.is_extension_array_dtype(self.variables[k].data) # noqa: TID251 ] extension_array_columns = [ k for k in columns_in_order if pd.api.types.is_extension_array_dtype(self.variables[k].data) # noqa: TID251 ] extension_array_columns_different_index = [ k for k in extension_array_columns if set(self.variables[k].dims) != set(ordered_dims.keys()) ] extension_array_columns_same_index = [ k for k in extension_array_columns if k not in extension_array_columns_different_index ] data = [ self._variables[k].set_dims(ordered_dims).values.reshape(-1) for k in non_extension_array_columns ] index = self.coords.to_index([*ordered_dims]) broadcasted_df = pd.DataFrame( { **dict(zip(non_extension_array_columns, data, strict=True)), **{ c: self.variables[c].data for c in extension_array_columns_same_index }, }, index=index, ) for extension_array_column in extension_array_columns_different_index: extension_array = self.variables[extension_array_column].data index = self[ self.variables[extension_array_column].dims[0] ].coords.to_index() extension_array_df = pd.DataFrame( {extension_array_column: extension_array}, index=pd.Index(index.array) if isinstance(index, PandasExtensionArray) # type: ignore[redundant-expr] else index, ) extension_array_df.index.name = self.variables[extension_array_column].dims[ 0 ] broadcasted_df = broadcasted_df.join(extension_array_df) return broadcasted_df[columns_in_order] def to_dataframe(self, dim_order: Sequence[Hashable] | None = None) -> pd.DataFrame: """Convert this dataset into a pandas.DataFrame. Non-index variables in this dataset form the columns of the DataFrame. The DataFrame is indexed by the Cartesian product of this dataset's indices. Parameters ---------- dim_order: Sequence of Hashable or None, optional Hierarchical dimension order for the resulting dataframe. All arrays are transposed to this order and then written out as flat vectors in contiguous order, so the last dimension in this list will be contiguous in the resulting DataFrame. This has a major influence on which operations are efficient on the resulting dataframe. If provided, must include all dimensions of this dataset. By default, dimensions are in the same order as in `Dataset.sizes`. Returns ------- result : DataFrame Dataset as a pandas DataFrame. """ ordered_dims = self._normalize_dim_order(dim_order=dim_order) return self._to_dataframe(ordered_dims=ordered_dims) def _set_sparse_data_from_dataframe( self, idx: pd.Index, arrays: list[tuple[Hashable, np.ndarray]], dims: tuple ) -> None: from sparse import COO if isinstance(idx, pd.MultiIndex): coords = np.stack([np.asarray(code) for code in idx.codes], axis=0) is_sorted = idx.is_monotonic_increasing shape = tuple(lev.size for lev in idx.levels) else: coords = np.arange(idx.size).reshape(1, -1) is_sorted = True shape = (idx.size,) for name, values in arrays: # In virtually all real use cases, the sparse array will now have # missing values and needs a fill_value. For consistency, don't # special case the rare exceptions (e.g., dtype=int without a # MultiIndex). dtype, fill_value = xrdtypes.maybe_promote(values.dtype) values = np.asarray(values, dtype=dtype) data = COO( coords, values, shape, has_duplicates=False, sorted=is_sorted, fill_value=fill_value, ) self[name] = (dims, data) def _set_numpy_data_from_dataframe( self, idx: pd.Index, arrays: list[tuple[Hashable, np.ndarray]], dims: tuple ) -> None: if not isinstance(idx, pd.MultiIndex): for name, values in arrays: self[name] = (dims, values) return # NB: similar, more general logic, now exists in # variable.unstack_once; we could consider combining them at some # point. shape = tuple(lev.size for lev in idx.levels) indexer = tuple(idx.codes) # We already verified that the MultiIndex has all unique values, so # there are missing values if and only if the size of output arrays is # larger that the index. missing_values = math.prod(shape) > idx.shape[0] for name, values in arrays: # NumPy indexing is much faster than using DataFrame.reindex() to # fill in missing values: # https://stackoverflow.com/a/35049899/809705 if missing_values: dtype, fill_value = xrdtypes.maybe_promote(values.dtype) data = np.full(shape, fill_value, dtype) else: # If there are no missing values, keep the existing dtype # instead of promoting to support NA, e.g., keep integer # columns as integers. # TODO: consider removing this special case, which doesn't # exist for sparse=True. data = np.zeros(shape, values.dtype) data[indexer] = values self[name] = (dims, data) @classmethod def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> Self: """Convert a pandas.DataFrame into an xarray.Dataset Each column will be converted into an independent variable in the Dataset. If the dataframe's index is a MultiIndex, it will be expanded into a tensor product of one-dimensional indices (filling in missing values with NaN). If you rather preserve the MultiIndex use `xr.Dataset(df)`. This method will produce a Dataset very similar to that on which the 'to_dataframe' method was called, except with possibly redundant dimensions (since all dataset variables will have the same dimensionality). Parameters ---------- dataframe : DataFrame DataFrame from which to copy data and indices. sparse : bool, default: False If true, create a sparse arrays instead of dense numpy arrays. This can potentially save a large amount of memory if the DataFrame has a MultiIndex. Requires the sparse package (sparse.pydata.org). Returns ------- New Dataset. See Also -------- xarray.DataArray.from_series pandas.DataFrame.to_xarray """ # TODO: Add an option to remove dimensions along which the variables # are constant, to enable consistent serialization to/from a dataframe, # even if some variables have different dimensionality. if not dataframe.columns.is_unique: raise ValueError("cannot convert DataFrame with non-unique columns") idx = remove_unused_levels_categories(dataframe.index) if isinstance(idx, pd.MultiIndex) and not idx.is_unique: raise ValueError( "cannot convert a DataFrame with a non-unique MultiIndex into xarray" ) arrays = [] extension_arrays = [] for k, v in dataframe.items(): if not is_allowed_extension_array(v) or isinstance( v.array, UNSUPPORTED_EXTENSION_ARRAY_TYPES ): arrays.append((k, np.asarray(v))) else: extension_arrays.append((k, v)) indexes: dict[Hashable, Index] = {} index_vars: dict[Hashable, Variable] = {} if isinstance(idx, pd.MultiIndex): dims = tuple( name if name is not None else f"level_{n}" # type: ignore[redundant-expr,unused-ignore] for n, name in enumerate(idx.names) ) for dim, lev in zip(dims, idx.levels, strict=True): xr_idx = PandasIndex(lev, dim) indexes[dim] = xr_idx index_vars.update(xr_idx.create_variables()) arrays += [(k, np.asarray(v)) for k, v in extension_arrays] extension_arrays = [] else: index_name = idx.name if idx.name is not None else "index" dims = (index_name,) xr_idx = PandasIndex(idx, index_name) indexes[index_name] = xr_idx index_vars.update(xr_idx.create_variables()) obj = cls._construct_direct(index_vars, set(index_vars), indexes=indexes) if sparse: obj._set_sparse_data_from_dataframe(idx, arrays, dims) else: obj._set_numpy_data_from_dataframe(idx, arrays, dims) for name, extension_array in extension_arrays: obj[name] = (dims, extension_array) return obj[dataframe.columns] if len(dataframe.columns) else obj def to_dask_dataframe( self, dim_order: Sequence[Hashable] | None = None, set_index: bool = False ) -> DaskDataFrame: """ Convert this dataset into a dask.dataframe.DataFrame. The dimensions, coordinates and data variables in this dataset form the columns of the DataFrame. Parameters ---------- dim_order : list, optional Hierarchical dimension order for the resulting dataframe. All arrays are transposed to this order and then written out as flat vectors in contiguous order, so the last dimension in this list will be contiguous in the resulting DataFrame. This has a major influence on which operations are efficient on the resulting dask dataframe. If provided, must include all dimensions of this dataset. By default, dimensions are sorted alphabetically. set_index : bool, default: False If set_index=True, the dask DataFrame is indexed by this dataset's coordinate. Since dask DataFrames do not support multi-indexes, set_index only works if the dataset only contains one dimension. Returns ------- dask.dataframe.DataFrame """ import dask.array as da import dask.dataframe as dd ordered_dims = self._normalize_dim_order(dim_order=dim_order) columns = list(ordered_dims) columns.extend(k for k in self.coords if k not in self.dims) columns.extend(self.data_vars) ds_chunks = self.chunks series_list = [] df_meta = pd.DataFrame() for name in columns: try: var = self.variables[name] except KeyError: # dimension without a matching coordinate size = self.sizes[name] data = da.arange(size, chunks=size, dtype=np.int64) var = Variable((name,), data) # IndexVariable objects have a dummy .chunk() method if isinstance(var, IndexVariable): var = var.to_base_variable() # Make sure var is a dask array, otherwise the array can become too large # when it is broadcasted to several dimensions: if not is_duck_dask_array(var._data): var = var.chunk() # Broadcast then flatten the array: var_new_dims = var.set_dims(ordered_dims).chunk(ds_chunks) dask_array = var_new_dims._data.reshape(-1) series = dd.from_dask_array(dask_array, columns=name, meta=df_meta) series_list.append(series) df = dd.concat(series_list, axis=1) if set_index: dim_order = [*ordered_dims] if len(dim_order) == 1: (dim,) = dim_order df = df.set_index(dim) else: # triggers an error about multi-indexes, even if only one # dimension is passed df = df.set_index(dim_order) return df def to_dict( self, data: bool | Literal["list", "array"] = "list", encoding: bool = False ) -> dict[str, Any]: """ Convert this dataset to a dictionary following xarray naming conventions. Converts all variables and attributes to native Python objects Useful for converting to json. To avoid datetime incompatibility use decode_times=False kwarg in xarrray.open_dataset. Parameters ---------- data : bool or {"list", "array"}, default: "list" Whether to include the actual data in the dictionary. When set to False, returns just the schema. If set to "array", returns data as underlying array type. If set to "list" (or True for backwards compatibility), returns data in lists of Python data types. Note that for obtaining the "list" output efficiently, use `ds.compute().to_dict(data="list")`. encoding : bool, default: False Whether to include the Dataset's encoding in the dictionary. Returns ------- d : dict Dict with keys: "coords", "attrs", "dims", "data_vars" and optionally "encoding". See Also -------- Dataset.from_dict DataArray.to_dict """ d: dict = { "coords": {}, "attrs": decode_numpy_dict_values(self.attrs), "dims": dict(self.sizes), "data_vars": {}, } for k in self.coords: d["coords"].update( {k: self[k].variable.to_dict(data=data, encoding=encoding)} ) for k in self.data_vars: d["data_vars"].update( {k: self[k].variable.to_dict(data=data, encoding=encoding)} ) if encoding: d["encoding"] = dict(self.encoding) return d @classmethod def from_dict(cls, d: Mapping[Any, Any]) -> Self: """Convert a dictionary into an xarray.Dataset. Parameters ---------- d : dict-like Mapping with a minimum structure of ``{"var_0": {"dims": [..], "data": [..]}, \ ...}`` Returns ------- obj : Dataset See also -------- Dataset.to_dict DataArray.from_dict Examples -------- >>> d = { ... "t": {"dims": ("t"), "data": [0, 1, 2]}, ... "a": {"dims": ("t"), "data": ["a", "b", "c"]}, ... "b": {"dims": ("t"), "data": [10, 20, 30]}, ... } >>> ds = xr.Dataset.from_dict(d) >>> ds Size: 60B Dimensions: (t: 3) Coordinates: * t (t) int64 24B 0 1 2 Data variables: a (t) >> d = { ... "coords": { ... "t": {"dims": "t", "data": [0, 1, 2], "attrs": {"units": "s"}} ... }, ... "attrs": {"title": "air temperature"}, ... "dims": "t", ... "data_vars": { ... "a": {"dims": "t", "data": [10, 20, 30]}, ... "b": {"dims": "t", "data": ["a", "b", "c"]}, ... }, ... } >>> ds = xr.Dataset.from_dict(d) >>> ds Size: 60B Dimensions: (t: 3) Coordinates: * t (t) int64 24B 0 1 2 Data variables: a (t) int64 24B 10 20 30 b (t) Self: variables = {} keep_attrs = kwargs.pop("keep_attrs", None) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) for k, v in self._variables.items(): if k in self._coord_names: variables[k] = v else: variables[k] = f(v, *args, **kwargs) if keep_attrs: variables[k]._attrs = v._attrs attrs = self._attrs if keep_attrs else None return self._replace_with_new_dims(variables, attrs=attrs) def _binary_op(self, other, f, reflexive=False, join=None) -> Dataset: from xarray.core.dataarray import DataArray from xarray.core.datatree import DataTree from xarray.core.groupby import GroupBy if isinstance(other, DataTree | GroupBy): return NotImplemented align_type = OPTIONS["arithmetic_join"] if join is None else join if isinstance(other, DataArray | Dataset): self, other = align(self, other, join=align_type, copy=False) g = f if not reflexive else lambda x, y: f(y, x) ds = self._calculate_binary_op(g, other, join=align_type) keep_attrs = _get_keep_attrs(default=False) if keep_attrs: ds.attrs = self.attrs return ds def _inplace_binary_op(self, other, f) -> Self: from xarray.core.dataarray import DataArray from xarray.core.groupby import GroupBy if isinstance(other, GroupBy): raise TypeError( "in-place operations between a Dataset and " "a grouped object are not permitted" ) # we don't actually modify arrays in-place with in-place Dataset # arithmetic -- this lets us automatically align things if isinstance(other, DataArray | Dataset): other = other.reindex_like(self, copy=False) g = ops.inplace_to_noninplace_op(f) ds = self._calculate_binary_op(g, other, inplace=True) self._replace_with_new_dims( ds._variables, ds._coord_names, attrs=ds._attrs, indexes=ds._indexes, inplace=True, ) return self def _calculate_binary_op( self, f, other, join="inner", inplace: bool = False ) -> Dataset: def apply_over_both(lhs_data_vars, rhs_data_vars, lhs_vars, rhs_vars): if inplace and set(lhs_data_vars) != set(rhs_data_vars): raise ValueError( "datasets must have the same data variables " f"for in-place arithmetic operations: {list(lhs_data_vars)}, {list(rhs_data_vars)}" ) dest_vars = {} for k in lhs_data_vars: if k in rhs_data_vars: dest_vars[k] = f(lhs_vars[k], rhs_vars[k]) elif join in ["left", "outer"]: dest_vars[k] = f(lhs_vars[k], np.nan) for k in rhs_data_vars: if k not in dest_vars and join in ["right", "outer"]: dest_vars[k] = f(rhs_vars[k], np.nan) return dest_vars if utils.is_dict_like(other) and not isinstance(other, Dataset): # can't use our shortcut of doing the binary operation with # Variable objects, so apply over our data vars instead. new_data_vars = apply_over_both( self.data_vars, other, self.data_vars, other ) return type(self)(new_data_vars) other_coords: Coordinates | None = getattr(other, "coords", None) ds = self.coords.merge(other_coords) if isinstance(other, Dataset): new_vars = apply_over_both( self.data_vars, other.data_vars, self.variables, other.variables ) else: other_variable = getattr(other, "variable", other) new_vars = {k: f(self.variables[k], other_variable) for k in self.data_vars} ds._variables.update(new_vars) ds._dims = calculate_dimensions(ds._variables) return ds def _copy_attrs_from(self, other): self.attrs = other.attrs for v in other.variables: if v in self.variables: self.variables[v].attrs = other.variables[v].attrs def diff( self, dim: Hashable, n: int = 1, *, label: Literal["upper", "lower"] = "upper", ) -> Self: """Calculate the n-th order discrete difference along given axis. Parameters ---------- dim : Hashable Dimension over which to calculate the finite difference. n : int, default: 1 The number of times values are differenced. label : {"upper", "lower"}, default: "upper" The new coordinate in dimension ``dim`` will have the values of either the minuend's or subtrahend's coordinate for values 'upper' and 'lower', respectively. Returns ------- difference : Dataset The n-th order finite difference of this object. Notes ----- `n` matches numpy's behavior and is different from pandas' first argument named `periods`. Examples -------- >>> ds = xr.Dataset({"foo": ("x", [5, 5, 6, 6])}) >>> ds.diff("x") Size: 24B Dimensions: (x: 3) Dimensions without coordinates: x Data variables: foo (x) int64 24B 0 1 0 >>> ds.diff("x", 2) Size: 16B Dimensions: (x: 2) Dimensions without coordinates: x Data variables: foo (x) int64 16B 1 -1 See Also -------- Dataset.differentiate """ if n == 0: return self if n < 0: raise ValueError(f"order `n` must be non-negative but got {n}") # prepare slices slice_start = {dim: slice(None, -1)} slice_end = {dim: slice(1, None)} # prepare new coordinate if label == "upper": slice_new = slice_end elif label == "lower": slice_new = slice_start else: raise ValueError("The 'label' argument has to be either 'upper' or 'lower'") indexes, index_vars = isel_indexes(self.xindexes, slice_new) variables = {} for name, var in self.variables.items(): if name in index_vars: variables[name] = index_vars[name] elif dim in var.dims: if name in self.data_vars: variables[name] = var.isel(slice_end) - var.isel(slice_start) else: variables[name] = var.isel(slice_new) else: variables[name] = var difference = self._replace_with_new_dims(variables, indexes=indexes) if n > 1: return difference.diff(dim, n - 1) else: return difference def shift( self, shifts: Mapping[Any, int] | None = None, fill_value: Any = xrdtypes.NA, **shifts_kwargs: int, ) -> Self: """Shift this dataset by an offset along one or more dimensions. Only data variables are moved; coordinates stay in place. This is consistent with the behavior of ``shift`` in pandas. Values shifted from beyond array bounds will appear at one end of each dimension, which are filled according to `fill_value`. For periodic offsets instead see `roll`. Parameters ---------- shifts : mapping of hashable to int Integer offset to shift along each of the given dimensions. Positive offsets shift to the right; negative offsets shift to the left. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names (including coordinates) to fill values. **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- shifted : Dataset Dataset with the same coordinates and attributes but shifted data variables. See Also -------- roll Examples -------- >>> ds = xr.Dataset({"foo": ("x", list("abcde"))}) >>> ds.shift(x=2) Size: 40B Dimensions: (x: 5) Dimensions without coordinates: x Data variables: foo (x) object 40B nan nan 'a' 'b' 'c' """ shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift") invalid = tuple(k for k in shifts if k not in self.dims) if invalid: raise ValueError( f"Dimensions {invalid} not found in data dimensions {tuple(self.dims)}" ) variables = {} for name, var in self.variables.items(): if name in self.data_vars: fill_value_ = ( fill_value.get(name, xrdtypes.NA) if isinstance(fill_value, dict) else fill_value ) var_shifts = {k: v for k, v in shifts.items() if k in var.dims} variables[name] = var.shift(fill_value=fill_value_, shifts=var_shifts) else: variables[name] = var return self._replace(variables) def roll( self, shifts: Mapping[Any, int] | None = None, roll_coords: bool = False, **shifts_kwargs: int, ) -> Self: """Roll this dataset by an offset along one or more dimensions. Unlike shift, roll treats the given dimensions as periodic, so will not create any missing values to be filled. Also unlike shift, roll may rotate all variables, including coordinates if specified. The direction of rotation is consistent with :py:func:`numpy.roll`. Parameters ---------- shifts : mapping of hashable to int, optional A dict with keys matching dimensions and values given by integers to rotate each of the given dimensions. Positive offsets roll to the right; negative offsets roll to the left. roll_coords : bool, default: False Indicates whether to roll the coordinates by the offset too. **shifts_kwargs : {dim: offset, ...}, optional The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- rolled : Dataset Dataset with the same attributes but rolled data and coordinates. See Also -------- shift Examples -------- >>> ds = xr.Dataset({"foo": ("x", list("abcde"))}, coords={"x": np.arange(5)}) >>> ds.roll(x=2) Size: 60B Dimensions: (x: 5) Coordinates: * x (x) int64 40B 0 1 2 3 4 Data variables: foo (x) >> ds.roll(x=2, roll_coords=True) Size: 60B Dimensions: (x: 5) Coordinates: * x (x) int64 40B 3 4 0 1 2 Data variables: foo (x) Self: """ Sort object by labels or values (along an axis). Sorts the dataset, either along specified dimensions, or according to values of 1-D dataarrays that share dimension with calling object. If the input variables are dataarrays, then the dataarrays are aligned (via left-join) to the calling object prior to sorting by cell values. NaNs are sorted to the end, following Numpy convention. If multiple sorts along the same dimension is given, numpy's lexsort is performed along that dimension: https://numpy.org/doc/stable/reference/generated/numpy.lexsort.html and the FIRST key in the sequence is used as the primary sort key, followed by the 2nd key, etc. Parameters ---------- variables : Hashable, DataArray, sequence of Hashable or DataArray, or Callable 1D DataArray objects or name(s) of 1D variable(s) in coords whose values are used to sort this array. If a callable, the callable is passed this object, and the result is used as the value for cond. ascending : bool, default: True Whether to sort by ascending or descending order. Returns ------- sorted : Dataset A new dataset where all the specified dims are sorted by dim labels. See Also -------- DataArray.sortby numpy.sort pandas.sort_values pandas.sort_index Examples -------- >>> ds = xr.Dataset( ... { ... "A": (("x", "y"), [[1, 2], [3, 4]]), ... "B": (("x", "y"), [[5, 6], [7, 8]]), ... }, ... coords={"x": ["b", "a"], "y": [1, 0]}, ... ) >>> ds.sortby("x") Size: 88B Dimensions: (x: 2, y: 2) Coordinates: * x (x) >> ds.sortby(lambda x: -x["y"]) Size: 88B Dimensions: (x: 2, y: 2) Coordinates: * x (x) Self: """Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements for each variable in the Dataset. Parameters ---------- q : float or array-like of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or Iterable of Hashable, optional Dimension(s) over which to apply quantile. method : str, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points. The options sorted by their R type as summarized in the H&F paper [1]_ are: 1. "inverted_cdf" 2. "averaged_inverted_cdf" 3. "closest_observation" 4. "interpolated_inverted_cdf" 5. "hazen" 6. "weibull" 7. "linear" (default) 8. "median_unbiased" 9. "normal_unbiased" The first three methods are discontiuous. The following discontinuous variations of the default "linear" (7.) option are also available: * "lower" * "higher" * "midpoint" * "nearest" See :py:func:`numpy.quantile` or [1]_ for details. The "method" argument was previously called "interpolation", renamed in accordance with numpy version 1.22.0. keep_attrs : bool, optional If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. numeric_only : bool, optional If True, only apply ``func`` to variables with a numeric dtype. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- quantiles : Dataset If `q` is a single quantile, then the result is a scalar for each variable in data_vars. If multiple percentiles are given, first axis of the result corresponds to the quantile and a quantile dimension is added to the return Dataset. The other dimensions are the dimensions that remain after the reduction of the array. See Also -------- numpy.nanquantile, numpy.quantile, pandas.Series.quantile, DataArray.quantile Examples -------- >>> ds = xr.Dataset( ... {"a": (("x", "y"), [[0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]])}, ... coords={"x": [7, 9], "y": [1, 1.5, 2, 2.5]}, ... ) >>> ds.quantile(0) # or ds.quantile(0, dim=...) Size: 16B Dimensions: () Coordinates: quantile float64 8B 0.0 Data variables: a float64 8B 0.7 >>> ds.quantile(0, dim="x") Size: 72B Dimensions: (y: 4) Coordinates: * y (y) float64 32B 1.0 1.5 2.0 2.5 quantile float64 8B 0.0 Data variables: a (y) float64 32B 0.7 4.2 2.6 1.5 >>> ds.quantile([0, 0.5, 1]) Size: 48B Dimensions: (quantile: 3) Coordinates: * quantile (quantile) float64 24B 0.0 0.5 1.0 Data variables: a (quantile) float64 24B 0.7 3.4 9.4 >>> ds.quantile([0, 0.5, 1], dim="x") Size: 152B Dimensions: (quantile: 3, y: 4) Coordinates: * y (y) float64 32B 1.0 1.5 2.0 2.5 * quantile (quantile) float64 24B 0.0 0.5 1.0 Data variables: a (quantile, y) float64 96B 0.7 4.2 2.6 1.5 3.6 ... 6.5 7.3 9.4 1.9 References ---------- .. [1] R. J. Hyndman and Y. Fan, "Sample quantiles in statistical packages," The American Statistician, 50(4), pp. 361-365, 1996 """ # interpolation renamed to method in version 0.21.0 # check here and in variable to avoid repeated warnings if interpolation is not None: warnings.warn( "The `interpolation` argument to quantile was renamed to `method`.", FutureWarning, stacklevel=2, ) if method != "linear": raise TypeError("Cannot pass interpolation and method keywords!") method = interpolation dims: set[Hashable] if isinstance(dim, str): dims = {dim} elif dim is None or dim is ...: dims = set(self.dims) else: dims = set(dim) invalid_dims = set(dims) - set(self.dims) if invalid_dims: raise ValueError( f"Dimensions {tuple(invalid_dims)} not found in data dimensions {tuple(self.dims)}" ) q = np.asarray(q, dtype=np.float64) variables = {} for name, var in self.variables.items(): reduce_dims = [d for d in var.dims if d in dims] if reduce_dims or not var.dims: if name not in self.coords and ( not numeric_only or np.issubdtype(var.dtype, np.number) or var.dtype == np.bool_ ): variables[name] = var.quantile( q, dim=reduce_dims, method=method, keep_attrs=keep_attrs, skipna=skipna, ) else: variables[name] = var # construct the new dataset coord_names = {k for k in self.coords if k in variables} indexes = {k: v for k, v in self._indexes.items() if k in variables} if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) attrs = self.attrs if keep_attrs else None new = self._replace_with_new_dims( variables, coord_names=coord_names, attrs=attrs, indexes=indexes ) return new.assign_coords(quantile=q) def rank( self, dim: Hashable, *, pct: bool = False, keep_attrs: bool | None = None, ) -> Self: """Ranks the data. Equal values are assigned a rank that is the average of the ranks that would have been otherwise assigned to all of the values within that set. Ranks begin at 1, not 0. If pct is True, computes percentage ranks. NaNs in the input array are returned as NaNs. The `bottleneck` library is required. Parameters ---------- dim : Hashable Dimension over which to compute rank. pct : bool, default: False If True, compute percentage ranks, otherwise compute integer ranks. keep_attrs : bool or None, optional If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- ranked : Dataset Variables that do not depend on `dim` are dropped. """ if not OPTIONS["use_bottleneck"]: raise RuntimeError( "rank requires bottleneck to be enabled." " Call `xr.set_options(use_bottleneck=True)` to enable it." ) if dim not in self.dims: raise ValueError( f"Dimension {dim!r} not found in data dimensions {tuple(self.dims)}" ) variables = {} for name, var in self.variables.items(): if name in self.data_vars: if dim in var.dims: variables[name] = var.rank(dim, pct=pct) else: variables[name] = var coord_names = set(self.coords) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) attrs = self.attrs if keep_attrs else None return self._replace(variables, coord_names, attrs=attrs) def differentiate( self, coord: Hashable, edge_order: Literal[1, 2] = 1, datetime_unit: DatetimeUnitOptions | None = None, ) -> Self: """Differentiate with the second order accurate central differences. .. note:: This feature is limited to simple cartesian geometry, i.e. coord must be one dimensional. Parameters ---------- coord : Hashable The coordinate to be used to compute the gradient. edge_order : {1, 2}, default: 1 N-th order accurate differences at the boundaries. datetime_unit : None or {"W", "D", "h", "m", "s", "ms", \ "us", "ns", "ps", "fs", "as", None}, default: None Unit to compute gradient. Only valid for datetime coordinate. Returns ------- differentiated: Dataset See also -------- numpy.gradient: corresponding numpy function """ if coord not in self.variables and coord not in self.dims: variables_and_dims = tuple(set(self.variables.keys()).union(self.dims)) raise ValueError( f"Coordinate {coord!r} not found in variables or dimensions {variables_and_dims}." ) coord_var = self[coord].variable if coord_var.ndim != 1: raise ValueError( f"Coordinate {coord} must be 1 dimensional but is {coord_var.ndim}" " dimensional" ) dim = coord_var.dims[0] if _contains_datetime_like_objects(coord_var): if coord_var.dtype.kind in "mM" and datetime_unit is None: datetime_unit = cast( "DatetimeUnitOptions", np.datetime_data(coord_var.dtype)[0] ) elif datetime_unit is None: datetime_unit = "s" # Default to seconds for cftime objects coord_var = coord_var._to_numeric(datetime_unit=datetime_unit) variables = {} for k, v in self.variables.items(): if k in self.data_vars and dim in v.dims and k not in self.coords: if _contains_datetime_like_objects(v): v = v._to_numeric(datetime_unit=datetime_unit) grad = duck_array_ops.gradient( v.data, coord_var.data, edge_order=edge_order, axis=v.get_axis_num(dim), ) variables[k] = Variable(v.dims, grad) else: variables[k] = v return self._replace(variables) def integrate( self, coord: Hashable | Sequence[Hashable], datetime_unit: DatetimeUnitOptions = None, ) -> Self: """Integrate along the given coordinate using the trapezoidal rule. .. note:: This feature is limited to simple cartesian geometry, i.e. coord must be one dimensional. Parameters ---------- coord : hashable, or sequence of hashable Coordinate(s) used for the integration. datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \ 'ps', 'fs', 'as', None}, optional Specify the unit if datetime coordinate is used. Returns ------- integrated : Dataset See also -------- DataArray.integrate numpy.trapz : corresponding numpy function Examples -------- >>> ds = xr.Dataset( ... data_vars={"a": ("x", [5, 5, 6, 6]), "b": ("x", [1, 2, 1, 0])}, ... coords={"x": [0, 1, 2, 3], "y": ("x", [1, 7, 3, 5])}, ... ) >>> ds Size: 128B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 y (x) int64 32B 1 7 3 5 Data variables: a (x) int64 32B 5 5 6 6 b (x) int64 32B 1 2 1 0 >>> ds.integrate("x") Size: 16B Dimensions: () Data variables: a float64 8B 16.5 b float64 8B 3.5 >>> ds.integrate("y") Size: 16B Dimensions: () Data variables: a float64 8B 20.0 b float64 8B 4.0 """ if not isinstance(coord, list | tuple): coord = (coord,) result = self for c in coord: result = result._integrate_one(c, datetime_unit=datetime_unit) return result def _integrate_one(self, coord, datetime_unit=None, cumulative=False): from xarray.core.variable import Variable if coord not in self.variables and coord not in self.dims: variables_and_dims = tuple(set(self.variables.keys()).union(self.dims)) raise ValueError( f"Coordinate {coord!r} not found in variables or dimensions {variables_and_dims}." ) coord_var = self[coord].variable if coord_var.ndim != 1: raise ValueError( f"Coordinate {coord} must be 1 dimensional but is {coord_var.ndim}" " dimensional" ) dim = coord_var.dims[0] if _contains_datetime_like_objects(coord_var): if coord_var.dtype.kind in "mM" and datetime_unit is None: datetime_unit, _ = np.datetime_data(coord_var.dtype) elif datetime_unit is None: datetime_unit = "s" # Default to seconds for cftime objects coord_var = coord_var._replace( data=datetime_to_numeric(coord_var.data, datetime_unit=datetime_unit) ) variables = {} coord_names = set() for k, v in self.variables.items(): if k in self.coords: if dim not in v.dims or cumulative: variables[k] = v coord_names.add(k) elif k in self.data_vars and dim in v.dims: coord_data = to_like_array(coord_var.data, like=v.data) if _contains_datetime_like_objects(v): v = datetime_to_numeric(v, datetime_unit=datetime_unit) if cumulative: integ = duck_array_ops.cumulative_trapezoid( v.data, coord_data, axis=v.get_axis_num(dim) ) v_dims = v.dims else: integ = duck_array_ops.trapz( v.data, coord_data, axis=v.get_axis_num(dim) ) v_dims = list(v.dims) v_dims.remove(dim) variables[k] = Variable(v_dims, integ) else: variables[k] = v indexes = {k: v for k, v in self._indexes.items() if k in variables} return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes ) def cumulative_integrate( self, coord: Hashable | Sequence[Hashable], datetime_unit: DatetimeUnitOptions = None, ) -> Self: """Integrate along the given coordinate using the trapezoidal rule. .. note:: This feature is limited to simple cartesian geometry, i.e. coord must be one dimensional. The first entry of the cumulative integral of each variable is always 0, in order to keep the length of the dimension unchanged between input and output. Parameters ---------- coord : hashable, or sequence of hashable Coordinate(s) used for the integration. datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \ 'ps', 'fs', 'as', None}, optional Specify the unit if datetime coordinate is used. Returns ------- integrated : Dataset See also -------- DataArray.cumulative_integrate scipy.integrate.cumulative_trapezoid : corresponding scipy function Examples -------- >>> ds = xr.Dataset( ... data_vars={"a": ("x", [5, 5, 6, 6]), "b": ("x", [1, 2, 1, 0])}, ... coords={"x": [0, 1, 2, 3], "y": ("x", [1, 7, 3, 5])}, ... ) >>> ds Size: 128B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 y (x) int64 32B 1 7 3 5 Data variables: a (x) int64 32B 5 5 6 6 b (x) int64 32B 1 2 1 0 >>> ds.cumulative_integrate("x") Size: 128B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 y (x) int64 32B 1 7 3 5 Data variables: a (x) float64 32B 0.0 5.0 10.5 16.5 b (x) float64 32B 0.0 1.5 3.0 3.5 >>> ds.cumulative_integrate("y") Size: 128B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 y (x) int64 32B 1 7 3 5 Data variables: a (x) float64 32B 0.0 30.0 8.0 20.0 b (x) float64 32B 0.0 9.0 3.0 4.0 """ if not isinstance(coord, list | tuple): coord = (coord,) result = self for c in coord: result = result._integrate_one( c, datetime_unit=datetime_unit, cumulative=True ) return result @property def real(self) -> Self: """ The real part of each data variable. See Also -------- numpy.ndarray.real """ return self.map(lambda x: x.real, keep_attrs=True) @property def imag(self) -> Self: """ The imaginary part of each data variable. See Also -------- numpy.ndarray.imag """ return self.map(lambda x: x.imag, keep_attrs=True) plot = utils.UncachedAccessor(DatasetPlotAccessor) def filter_by_attrs(self, **kwargs) -> Self: """Returns a ``Dataset`` with variables that match specific conditions. Can pass in ``key=value`` or ``key=callable``. A Dataset is returned containing only the variables for which all the filter tests pass. These tests are either ``key=value`` for which the attribute ``key`` has the exact value ``value`` or the callable passed into ``key=callable`` returns True. The callable will be passed a single value, either the value of the attribute ``key`` or ``None`` if the DataArray does not have an attribute with the name ``key``. Parameters ---------- **kwargs key : str Attribute name. value : callable or obj If value is a callable, it should return a boolean in the form of bool = func(attr) where attr is da.attrs[key]. Otherwise, value will be compared to the each DataArray's attrs[key]. Returns ------- new : Dataset New dataset with variables filtered by attribute. Examples -------- >>> temp = 15 + 8 * np.random.randn(2, 2, 3) >>> precip = 10 * np.random.rand(2, 2, 3) >>> lon = [[-99.83, -99.32], [-99.79, -99.23]] >>> lat = [[42.25, 42.21], [42.63, 42.59]] >>> dims = ["x", "y", "time"] >>> temp_attr = dict(standard_name="air_potential_temperature") >>> precip_attr = dict(standard_name="convective_precipitation_flux") >>> ds = xr.Dataset( ... dict( ... temperature=(dims, temp, temp_attr), ... precipitation=(dims, precip, precip_attr), ... ), ... coords=dict( ... lon=(["x", "y"], lon), ... lat=(["x", "y"], lat), ... time=pd.date_range("2014-09-06", periods=3), ... reference_time=pd.Timestamp("2014-09-05"), ... ), ... ) Get variables matching a specific standard_name: >>> ds.filter_by_attrs(standard_name="convective_precipitation_flux") Size: 192B Dimensions: (x: 2, y: 2, time: 3) Coordinates: lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23 lat (x, y) float64 32B 42.25 42.21 42.63 42.59 * time (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08 reference_time datetime64[ns] 8B 2014-09-05 Dimensions without coordinates: x, y Data variables: precipitation (x, y, time) float64 96B 5.68 9.256 0.7104 ... 4.615 7.805 Get all variables that have a standard_name attribute: >>> standard_name = lambda v: v is not None >>> ds.filter_by_attrs(standard_name=standard_name) Size: 288B Dimensions: (x: 2, y: 2, time: 3) Coordinates: lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23 lat (x, y) float64 32B 42.25 42.21 42.63 42.59 * time (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08 reference_time datetime64[ns] 8B 2014-09-05 Dimensions without coordinates: x, y Data variables: temperature (x, y, time) float64 96B 29.11 18.2 22.83 ... 16.15 26.63 precipitation (x, y, time) float64 96B 5.68 9.256 0.7104 ... 4.615 7.805 """ selection = [] for var_name, variable in self.variables.items(): has_value_flag = False for attr_name, pattern in kwargs.items(): attr_value = variable.attrs.get(attr_name) if (callable(pattern) and pattern(attr_value)) or attr_value == pattern: has_value_flag = True else: has_value_flag = False break if has_value_flag is True: selection.append(var_name) return self[selection] def unify_chunks(self) -> Self: """Unify chunk size along all chunked dimensions of this Dataset. Returns ------- Dataset with consistent chunk sizes for all dask-array variables See Also -------- dask.array.core.unify_chunks """ return unify_chunks(self)[0] def map_blocks( self, func: Callable[..., T_Xarray], args: Sequence[Any] = (), kwargs: Mapping[str, Any] | None = None, template: DataArray | Dataset | None = None, ) -> T_Xarray: """ Apply a function to each block of this Dataset. .. warning:: This method is experimental and its signature may change. Parameters ---------- func : callable User-provided function that accepts a Dataset as its first parameter. The function will receive a subset or 'block' of this Dataset (see below), corresponding to one chunk along each chunked dimension. ``func`` will be executed as ``func(subset_dataset, *subset_args, **kwargs)``. This function must return either a single DataArray or a single Dataset. This function cannot add a new chunked dimension. args : sequence Passed to func after unpacking and subsetting any xarray objects by blocks. xarray objects in args must be aligned with obj, otherwise an error is raised. kwargs : Mapping or None Passed verbatim to func after unpacking. xarray objects, if any, will not be subset to blocks. Passing dask collections in kwargs is not allowed. template : DataArray, Dataset or None, optional xarray object representing the final result after compute is called. If not provided, the function will be first run on mocked-up data, that looks like this object but has sizes 0, to determine properties of the returned object such as dtype, variable names, attributes, new dimensions and new indexes (if any). ``template`` must be provided if the function changes the size of existing dimensions. When provided, ``attrs`` on variables in `template` are copied over to the result. Any ``attrs`` set by ``func`` will be ignored. Returns ------- A single DataArray or Dataset with dask backend, reassembled from the outputs of the function. Notes ----- This function is designed for when ``func`` needs to manipulate a whole xarray object subset to each block. Each block is loaded into memory. In the more common case where ``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``. If none of the variables in this object is backed by dask arrays, calling this function is equivalent to calling ``func(obj, *args, **kwargs)``. See Also -------- :func:`dask.array.map_blocks ` :func:`xarray.apply_ufunc ` :func:`xarray.DataArray.map_blocks ` :doc:`xarray-tutorial:advanced/map_blocks/map_blocks` Advanced Tutorial on map_blocks with dask Examples -------- Calculate an anomaly from climatology using ``.groupby()``. Using ``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``, its indices, and its methods like ``.groupby()``. >>> def calculate_anomaly(da, groupby_type="time.month"): ... gb = da.groupby(groupby_type) ... clim = gb.mean(dim="time") ... return gb - clim ... >>> time = xr.date_range("1990-01", "1992-01", freq="ME", use_cftime=True) >>> month = xr.DataArray(time.month, coords={"time": time}, dims=["time"]) >>> np.random.seed(123) >>> array = xr.DataArray( ... np.random.rand(len(time)), ... dims=["time"], ... coords={"time": time, "month": month}, ... ).chunk() >>> ds = xr.Dataset({"a": array}) >>> ds.map_blocks(calculate_anomaly, template=ds).compute() Size: 576B Dimensions: (time: 24) Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 month (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12 Data variables: a (time) float64 192B 0.1289 0.1132 -0.0856 ... 0.1906 -0.05901 Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments to the function being applied in ``xr.map_blocks()``: >>> ds.map_blocks( ... calculate_anomaly, ... kwargs={"groupby_type": "time.year"}, ... template=ds, ... ) Size: 576B Dimensions: (time: 24) Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 month (time) int64 192B dask.array Data variables: a (time) float64 192B dask.array """ from xarray.core.parallel import map_blocks return map_blocks(func, self, args, kwargs, template) def polyfit( self, dim: Hashable, deg: int, skipna: bool | None = None, rcond: float | None = None, w: Hashable | Any = None, full: bool = False, cov: bool | Literal["unscaled"] = False, ) -> Self: """ Least squares polynomial fit. This replicates the behaviour of `numpy.polyfit` but differs by skipping invalid values when `skipna = True`. Parameters ---------- dim : hashable Coordinate along which to fit the polynomials. deg : int Degree of the fitting polynomial. skipna : bool or None, optional If True, removes all invalid values before fitting each 1D slices of the array. Default is True if data is stored in a dask.array or if there is any invalid values, False otherwise. rcond : float or None, optional Relative condition number to the fit. w : hashable or Any, optional Weights to apply to the y-coordinate of the sample points. Can be an array-like object or the name of a coordinate in the dataset. full : bool, default: False Whether to return the residuals, matrix rank and singular values in addition to the coefficients. cov : bool or "unscaled", default: False Whether to return to the covariance matrix in addition to the coefficients. The matrix is not scaled if `cov='unscaled'`. Returns ------- polyfit_results : Dataset A single dataset which contains (for each "var" in the input dataset): [var]_polyfit_coefficients The coefficients of the best fit for each variable in this dataset. [var]_polyfit_residuals The residuals of the least-square computation for each variable (only included if `full=True`) When the matrix rank is deficient, np.nan is returned. [dim]_matrix_rank The effective rank of the scaled Vandermonde coefficient matrix (only included if `full=True`) The rank is computed ignoring the NaN values that might be skipped. [dim]_singular_values The singular values of the scaled Vandermonde coefficient matrix (only included if `full=True`) [var]_polyfit_covariance The covariance matrix of the polynomial coefficient estimates (only included if `full=False` and `cov=True`) Warns ----- RankWarning The rank of the coefficient matrix in the least-squares fit is deficient. The warning is not raised with in-memory (not dask) data and `full=True`. See Also -------- numpy.polyfit numpy.polyval xarray.polyval """ from xarray.computation.fit import polyfit as polyfit_impl return polyfit_impl(self, dim, deg, skipna, rcond, w, full, cov) def pad( self, pad_width: Mapping[Any, int | tuple[int, int]] | None = None, mode: PadModeOptions = "constant", stat_length: ( int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None ) = None, constant_values: T_DatasetPadConstantValues | None = None, end_values: int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None = None, reflect_type: PadReflectOptions = None, keep_attrs: bool | None = None, **pad_width_kwargs: Any, ) -> Self: """Pad this dataset along one or more dimensions. .. warning:: This function is experimental and its behaviour is likely to change especially regarding padding of dimension coordinates (or IndexVariables). When using one of the modes ("edge", "reflect", "symmetric", "wrap"), coordinates will be padded with the same mode, otherwise coordinates are padded using the "constant" mode with fill_value dtypes.NA. Parameters ---------- pad_width : mapping of hashable to tuple of int Mapping with the form of {dim: (pad_before, pad_after)} describing the number of values padded along each dimension. {dim: pad} is a shortcut for pad_before = pad_after = pad mode : {"constant", "edge", "linear_ramp", "maximum", "mean", "median", \ "minimum", "reflect", "symmetric", "wrap"}, default: "constant" How to pad the DataArray (taken from numpy docs): - "constant": Pads with a constant value. - "edge": Pads with the edge values of array. - "linear_ramp": Pads with the linear ramp between end_value and the array edge value. - "maximum": Pads with the maximum value of all or part of the vector along each axis. - "mean": Pads with the mean value of all or part of the vector along each axis. - "median": Pads with the median value of all or part of the vector along each axis. - "minimum": Pads with the minimum value of all or part of the vector along each axis. - "reflect": Pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - "symmetric": Pads with the reflection of the vector mirrored along the edge of the array. - "wrap": Pads with the wrap of the vector along the axis. The first values are used to pad the end and the end values are used to pad the beginning. stat_length : int, tuple or mapping of hashable to tuple, default: None Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. {dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)} unique statistic lengths along each dimension. ((before, after),) yields same before and after statistic lengths for each dimension. (stat_length,) or int is a shortcut for before = after = statistic length for all axes. Default is ``None``, to use the entire axis. constant_values : scalar, tuple, mapping of dim name to scalar or tuple, or \ mapping of var name to scalar, tuple or to mapping of dim name to scalar or tuple, default: None Used in 'constant'. The values to set the padded values for each data variable / axis. ``{var_1: {dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}, ... var_M: (before, after)}`` unique pad constants per data variable. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique pad constants along each dimension. ``((before, after),)`` yields same before and after constants for each dimension. ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all dimensions. Default is ``None``, pads with ``np.nan``. end_values : scalar, tuple or mapping of hashable to tuple, default: None Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique end values along each dimension. ``((before, after),)`` yields same before and after end values for each axis. ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all axes. Default is None. reflect_type : {"even", "odd", None}, optional Used in "reflect", and "symmetric". The "even" style is the default with an unaltered reflection around the edge value. For the "odd" style, the extended part of the array is created by subtracting the reflected values from two times the edge value. keep_attrs : bool or None, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. **pad_width_kwargs The keyword arguments form of ``pad_width``. One of ``pad_width`` or ``pad_width_kwargs`` must be provided. Returns ------- padded : Dataset Dataset with the padded coordinates and data. See Also -------- Dataset.shift, Dataset.roll, Dataset.bfill, Dataset.ffill, numpy.pad, dask.array.pad Notes ----- By default when ``mode="constant"`` and ``constant_values=None``, integer types will be promoted to ``float`` and padded with ``np.nan``. To avoid type promotion specify ``constant_values=np.nan`` Padding coordinates will drop their corresponding index (if any) and will reset default indexes for dimension coordinates. Examples -------- >>> ds = xr.Dataset({"foo": ("x", range(5))}) >>> ds.pad(x=(1, 2)) Size: 64B Dimensions: (x: 8) Dimensions without coordinates: x Data variables: foo (x) float64 64B nan 0.0 1.0 2.0 3.0 4.0 nan nan """ pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad") if mode in ("edge", "reflect", "symmetric", "wrap"): coord_pad_mode = mode coord_pad_options = { "stat_length": stat_length, "constant_values": constant_values, "end_values": end_values, "reflect_type": reflect_type, } else: coord_pad_mode = "constant" coord_pad_options = {} if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) variables = {} # keep indexes that won't be affected by pad and drop all other indexes xindexes = self.xindexes pad_dims = set(pad_width) indexes = { k: idx for k, idx in xindexes.items() if not pad_dims.intersection(xindexes.get_all_dims(k)) } for name, var in self.variables.items(): var_pad_width = {k: v for k, v in pad_width.items() if k in var.dims} if not var_pad_width: variables[name] = var elif name in self.data_vars: if utils.is_dict_like(constant_values): if name in constant_values.keys(): filtered_constant_values = constant_values[name] elif not set(var.dims).isdisjoint(constant_values.keys()): filtered_constant_values = { k: v for k, v in constant_values.items() if k in var.dims } else: filtered_constant_values = 0 # TODO: https://github.com/pydata/xarray/pull/9353#discussion_r1724018352 else: filtered_constant_values = constant_values variables[name] = var.pad( pad_width=var_pad_width, mode=mode, stat_length=stat_length, constant_values=filtered_constant_values, end_values=end_values, reflect_type=reflect_type, keep_attrs=keep_attrs, ) else: variables[name] = var.pad( pad_width=var_pad_width, mode=coord_pad_mode, keep_attrs=keep_attrs, **coord_pad_options, # type: ignore[arg-type] ) # reset default index of dimension coordinates if (name,) == var.dims: dim_var = {name: variables[name]} index = PandasIndex.from_variables(dim_var, options={}) index_vars = index.create_variables(dim_var) indexes[name] = index variables[name] = index_vars[name] attrs = self._attrs if keep_attrs else None return self._replace_with_new_dims(variables, indexes=indexes, attrs=attrs) def idxmin( self, dim: Hashable | None = None, *, skipna: bool | None = None, fill_value: Any = xrdtypes.NA, keep_attrs: bool | None = None, ) -> Self: """Return the coordinate label of the minimum value along a dimension. Returns a new `Dataset` named after the dimension with the values of the coordinate labels along that dimension corresponding to minimum values along that dimension. In comparison to :py:meth:`~Dataset.argmin`, this returns the coordinate label while :py:meth:`~Dataset.argmin` returns the index. Parameters ---------- dim : Hashable, optional Dimension over which to apply `idxmin`. This is optional for 1D variables, but required for variables with 2 or more dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. keep_attrs : bool or None, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- reduced : Dataset New `Dataset` object with `idxmin` applied to its data and the indicated dimension removed. See Also -------- DataArray.idxmin, Dataset.idxmax, Dataset.min, Dataset.argmin Examples -------- >>> array1 = xr.DataArray( ... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]} ... ) >>> array2 = xr.DataArray( ... [ ... [2.0, 1.0, 2.0, 0.0, -2.0], ... [-4.0, np.nan, 2.0, np.nan, -2.0], ... [np.nan, np.nan, 1.0, np.nan, np.nan], ... ], ... dims=["y", "x"], ... coords={"y": [-1, 0, 1], "x": ["a", "b", "c", "d", "e"]}, ... ) >>> ds = xr.Dataset({"int": array1, "float": array2}) >>> ds.min(dim="x") Size: 56B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 Data variables: int int64 8B -2 float (y) float64 24B -2.0 -4.0 1.0 >>> ds.argmin(dim="x") Size: 56B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 Data variables: int int64 8B 4 float (y) int64 24B 4 0 2 >>> ds.idxmin(dim="x") Size: 52B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 Data variables: int Self: """Return the coordinate label of the maximum value along a dimension. Returns a new `Dataset` named after the dimension with the values of the coordinate labels along that dimension corresponding to maximum values along that dimension. In comparison to :py:meth:`~Dataset.argmax`, this returns the coordinate label while :py:meth:`~Dataset.argmax` returns the index. Parameters ---------- dim : str, optional Dimension over which to apply `idxmax`. This is optional for 1D variables, but required for variables with 2 or more dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. keep_attrs : bool or None, optional If True, the attributes (``attrs``) will be copied from the original object to the new one. If False, the new object will be returned without attributes. Returns ------- reduced : Dataset New `Dataset` object with `idxmax` applied to its data and the indicated dimension removed. See Also -------- DataArray.idxmax, Dataset.idxmin, Dataset.max, Dataset.argmax Examples -------- >>> array1 = xr.DataArray( ... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]} ... ) >>> array2 = xr.DataArray( ... [ ... [2.0, 1.0, 2.0, 0.0, -2.0], ... [-4.0, np.nan, 2.0, np.nan, -2.0], ... [np.nan, np.nan, 1.0, np.nan, np.nan], ... ], ... dims=["y", "x"], ... coords={"y": [-1, 0, 1], "x": ["a", "b", "c", "d", "e"]}, ... ) >>> ds = xr.Dataset({"int": array1, "float": array2}) >>> ds.max(dim="x") Size: 56B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 Data variables: int int64 8B 2 float (y) float64 24B 2.0 2.0 1.0 >>> ds.argmax(dim="x") Size: 56B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 Data variables: int int64 8B 1 float (y) int64 24B 0 2 2 >>> ds.idxmax(dim="x") Size: 52B Dimensions: (y: 3) Coordinates: * y (y) int64 24B -1 0 1 Data variables: int Self: """Indices of the minima of the member variables. If there are multiple minima, the indices of the first one found will be returned. Parameters ---------- dim : Hashable, optional The dimension over which to find the minimum. By default, finds minimum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will be an error, since DataArray.argmin will return a dict with indices for all dimensions, which does not make sense for a Dataset. keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : Dataset Examples -------- >>> dataset = xr.Dataset( ... { ... "math_scores": ( ... ["student", "test"], ... [[90, 85, 79], [78, 80, 85], [95, 92, 98]], ... ), ... "english_scores": ( ... ["student", "test"], ... [[88, 90, 92], [75, 82, 79], [39, 96, 78]], ... ), ... }, ... coords={ ... "student": ["Alice", "Bob", "Charlie"], ... "test": ["Test 1", "Test 2", "Test 3"], ... }, ... ) # Indices of the minimum values along the 'student' dimension are calculated >>> argmin_indices = dataset.argmin(dim="student") >>> min_score_in_math = dataset["student"].isel( ... student=argmin_indices["math_scores"] ... ) >>> min_score_in_math Size: 84B array(['Bob', 'Bob', 'Alice'], dtype='>> min_score_in_english = dataset["student"].isel( ... student=argmin_indices["english_scores"] ... ) >>> min_score_in_english Size: 84B array(['Charlie', 'Bob', 'Charlie'], dtype=' Self: """Indices of the maxima of the member variables. If there are multiple maxima, the indices of the first one found will be returned. Parameters ---------- dim : str, optional The dimension over which to find the maximum. By default, finds maximum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will be an error, since DataArray.argmax will return a dict with indices for all dimensions, which does not make sense for a Dataset. keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : Dataset Examples -------- >>> dataset = xr.Dataset( ... { ... "math_scores": ( ... ["student", "test"], ... [[90, 85, 92], [78, 80, 85], [95, 92, 98]], ... ), ... "english_scores": ( ... ["student", "test"], ... [[88, 90, 92], [75, 82, 79], [93, 96, 91]], ... ), ... }, ... coords={ ... "student": ["Alice", "Bob", "Charlie"], ... "test": ["Test 1", "Test 2", "Test 3"], ... }, ... ) # Indices of the maximum values along the 'student' dimension are calculated >>> argmax_indices = dataset.argmax(dim="test") >>> argmax_indices Size: 132B Dimensions: (student: 3) Coordinates: * student (student) Self | T_DataArray: """ Calculate an expression supplied as a string in the context of the dataset. This is currently experimental; the API may change particularly around assignments, which currently return a ``Dataset`` with the additional variable. Currently only the ``python`` engine is supported, which has the same performance as executing in python. Parameters ---------- statement : str String containing the Python-like expression to evaluate. Returns ------- result : Dataset or DataArray, depending on whether ``statement`` contains an assignment. Examples -------- >>> ds = xr.Dataset( ... {"a": ("x", np.arange(0, 5, 1)), "b": ("x", np.linspace(0, 1, 5))} ... ) >>> ds Size: 80B Dimensions: (x: 5) Dimensions without coordinates: x Data variables: a (x) int64 40B 0 1 2 3 4 b (x) float64 40B 0.0 0.25 0.5 0.75 1.0 >>> ds.eval("a + b") Size: 40B array([0. , 1.25, 2.5 , 3.75, 5. ]) Dimensions without coordinates: x >>> ds.eval("c = a + b") Size: 120B Dimensions: (x: 5) Dimensions without coordinates: x Data variables: a (x) int64 40B 0 1 2 3 4 b (x) float64 40B 0.0 0.25 0.5 0.75 1.0 c (x) float64 40B 0.0 1.25 2.5 3.75 5.0 """ return pd.eval( # type: ignore[return-value] statement, resolvers=[self], target=self, parser=parser, # Because numexpr returns a numpy array, using that engine results in # different behavior. We'd be very open to a contribution handling this. engine="python", ) def query( self, queries: Mapping[Any, Any] | None = None, parser: QueryParserOptions = "pandas", engine: QueryEngineOptions = None, missing_dims: ErrorOptionsWithWarn = "raise", **queries_kwargs: Any, ) -> Self: """Return a new dataset with each array indexed along the specified dimension(s), where the indexers are given as strings containing Python expressions to be evaluated against the data variables in the dataset. Parameters ---------- queries : dict-like, optional A dict-like with keys matching dimensions and values given by strings containing Python expressions to be evaluated against the data variables in the dataset. The expressions will be evaluated using the pandas eval() function, and can contain any valid Python expressions but cannot contain any Python statements. parser : {"pandas", "python"}, default: "pandas" The parser to use to construct the syntax tree from the expression. The default of 'pandas' parses code slightly different than standard Python. Alternatively, you can parse an expression using the 'python' parser to retain strict Python semantics. engine : {"python", "numexpr", None}, default: None The engine used to evaluate the expression. Supported engines are: - None: tries to use numexpr, falls back to python - "numexpr": evaluates expressions using numexpr - "python": performs operations as if you had eval’d in top level python missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the Dataset: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions **queries_kwargs : {dim: query, ...}, optional The keyword arguments form of ``queries``. One of queries or queries_kwargs must be provided. Returns ------- obj : Dataset A new Dataset with the same contents as this dataset, except each array and dimension is indexed by the results of the appropriate queries. See Also -------- Dataset.isel pandas.eval Examples -------- >>> a = np.arange(0, 5, 1) >>> b = np.linspace(0, 1, 5) >>> ds = xr.Dataset({"a": ("x", a), "b": ("x", b)}) >>> ds Size: 80B Dimensions: (x: 5) Dimensions without coordinates: x Data variables: a (x) int64 40B 0 1 2 3 4 b (x) float64 40B 0.0 0.25 0.5 0.75 1.0 >>> ds.query(x="a > 2") Size: 32B Dimensions: (x: 2) Dimensions without coordinates: x Data variables: a (x) int64 16B 3 4 b (x) float64 16B 0.75 1.0 """ # allow queries to be given either as a dict or as kwargs queries = either_dict_or_kwargs(queries, queries_kwargs, "query") # check queries for dim, expr in queries.items(): if not isinstance(expr, str): msg = f"expr for dim {dim} must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) # evaluate the queries to create the indexers indexers = { dim: pd.eval(expr, resolvers=[self], parser=parser, engine=engine) for dim, expr in queries.items() } # apply the selection return self.isel(indexers, missing_dims=missing_dims) def curvefit( self, coords: str | DataArray | Iterable[str | DataArray], func: Callable[..., Any], reduce_dims: Dims = None, skipna: bool = True, p0: Mapping[str, float | DataArray] | None = None, bounds: Mapping[str, tuple[float | DataArray, float | DataArray]] | None = None, param_names: Sequence[str] | None = None, errors: ErrorOptions = "raise", kwargs: dict[str, Any] | None = None, ) -> Self: """ Curve fitting optimization for arbitrary functions. Wraps :py:func:`scipy.optimize.curve_fit` with :py:func:`~xarray.apply_ufunc`. Parameters ---------- coords : hashable, DataArray, or sequence of hashable or DataArray Independent coordinate(s) over which to perform the curve fitting. Must share at least one dimension with the calling object. When fitting multi-dimensional functions, supply `coords` as a sequence in the same order as arguments in `func`. To fit along existing dimensions of the calling object, `coords` can also be specified as a str or sequence of strs. func : callable User specified function in the form `f(x, *params)` which returns a numpy array of length `len(x)`. `params` are the fittable parameters which are optimized by scipy curve_fit. `x` can also be specified as a sequence containing multiple coordinates, e.g. `f((x0, x1), *params)`. reduce_dims : str, Iterable of Hashable or None, optional Additional dimension(s) over which to aggregate while fitting. For example, calling `ds.curvefit(coords='time', reduce_dims=['lat', 'lon'], ...)` will aggregate all lat and lon points and fit the specified function along the time dimension. skipna : bool, default: True Whether to skip missing values when fitting. Default is True. p0 : dict-like, optional Optional dictionary of parameter names to initial guesses passed to the `curve_fit` `p0` arg. If the values are DataArrays, they will be appropriately broadcast to the coordinates of the array. If none or only some parameters are passed, the rest will be assigned initial values following the default scipy behavior. bounds : dict-like, optional Optional dictionary of parameter names to tuples of bounding values passed to the `curve_fit` `bounds` arg. If any of the bounds are DataArrays, they will be appropriately broadcast to the coordinates of the array. If none or only some parameters are passed, the rest will be unbounded following the default scipy behavior. param_names : sequence of hashable, optional Sequence of names for the fittable parameters of `func`. If not supplied, this will be automatically determined by arguments of `func`. `param_names` should be manually supplied when fitting a function that takes a variable number of parameters. errors : {"raise", "ignore"}, default: "raise" If 'raise', any errors from the `scipy.optimize_curve_fit` optimization will raise an exception. If 'ignore', the coefficients and covariances for the coordinates where the fitting failed will be NaN. **kwargs : optional Additional keyword arguments to passed to scipy curve_fit. Returns ------- curvefit_results : Dataset A single dataset which contains: [var]_curvefit_coefficients The coefficients of the best fit. [var]_curvefit_covariance The covariance matrix of the coefficient estimates. See Also -------- Dataset.polyfit scipy.optimize.curve_fit xarray.Dataset.xlm.modelfit External method from `xarray-lmfit `_ with more curve fitting functionality. """ from xarray.computation.fit import curvefit as curvefit_impl return curvefit_impl( self, coords, func, reduce_dims, skipna, p0, bounds, param_names, errors, kwargs, ) def drop_duplicates( self, dim: Hashable | Iterable[Hashable], *, keep: Literal["first", "last", False] = "first", ) -> Self: """Returns a new Dataset with duplicate dimension values removed. Parameters ---------- dim : dimension label or labels Pass `...` to drop duplicates along all dimensions. keep : {"first", "last", False}, default: "first" Determines which duplicates (if any) to keep. - ``"first"`` : Drop duplicates except for the first occurrence. - ``"last"`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. Returns ------- Dataset See Also -------- DataArray.drop_duplicates """ if isinstance(dim, str): dims: Iterable = (dim,) elif dim is ...: dims = self.dims elif not isinstance(dim, Iterable): dims = [dim] else: dims = dim missing_dims = set(dims) - set(self.dims) if missing_dims: raise ValueError( f"Dimensions {tuple(missing_dims)} not found in data dimensions {tuple(self.dims)}" ) indexes = {dim: ~self.get_index(dim).duplicated(keep=keep) for dim in dims} return self.isel(indexes) def convert_calendar( self, calendar: CFCalendar, dim: Hashable = "time", align_on: Literal["date", "year"] | None = None, missing: Any | None = None, use_cftime: bool | None = None, ) -> Self: """Convert the Dataset to another calendar. Only converts the individual timestamps, does not modify any data except in dropping invalid/surplus dates or inserting missing dates. If the source and target calendars are either no_leap, all_leap or a standard type, only the type of the time array is modified. When converting to a leap year from a non-leap year, the 29th of February is removed from the array. In the other direction the 29th of February will be missing in the output, unless `missing` is specified, in which case that value is inserted. For conversions involving `360_day` calendars, see Notes. This method is safe to use with sub-daily data as it doesn't touch the time part of the timestamps. Parameters --------- calendar : str The target calendar name. dim : Hashable, default: "time" Name of the time coordinate. align_on : {None, 'date', 'year'}, optional Must be specified when either source or target is a `360_day` calendar, ignored otherwise. See Notes. missing : Any or None, optional By default, i.e. if the value is None, this method will simply attempt to convert the dates in the source calendar to the same dates in the target calendar, and drop any of those that are not possible to represent. If a value is provided, a new time coordinate will be created in the target calendar with the same frequency as the original time coordinate; for any dates that are not present in the source, the data will be filled with this value. Note that using this mode requires that the source data have an inferable frequency; for more information see :py:func:`xarray.infer_freq`. For certain frequency, source, and target calendar combinations, this could result in many missing values, see notes. use_cftime : bool or None, optional Whether to use cftime objects in the output, only used if `calendar` is one of {"proleptic_gregorian", "gregorian" or "standard"}. If True, the new time axis uses cftime objects. If None (default), it uses :py:class:`numpy.datetime64` values if the date range permits it, and :py:class:`cftime.datetime` objects if not. If False, it uses :py:class:`numpy.datetime64` or fails. Returns ------- Dataset Copy of the dataarray with the time coordinate converted to the target calendar. If 'missing' was None (default), invalid dates in the new calendar are dropped, but missing dates are not inserted. If `missing` was given, the new data is reindexed to have a time axis with the same frequency as the source, but in the new calendar; any missing datapoints are filled with `missing`. Notes ----- Passing a value to `missing` is only usable if the source's time coordinate as an inferable frequencies (see :py:func:`~xarray.infer_freq`) and is only appropriate if the target coordinate, generated from this frequency, has dates equivalent to the source. It is usually **not** appropriate to use this mode with: - Period-end frequencies : 'A', 'Y', 'Q' or 'M', in opposition to 'AS' 'YS', 'QS' and 'MS' - Sub-monthly frequencies that do not divide a day evenly : 'W', 'nD' where `N != 1` or 'mH' where 24 % m != 0). If one of the source or target calendars is `"360_day"`, `align_on` must be specified and two options are offered. - "year" The dates are translated according to their relative position in the year, ignoring their original month and day information, meaning that the missing/surplus days are added/removed at regular intervals. From a `360_day` to a standard calendar, the output will be missing the following dates (day of year in parentheses): To a leap year: January 31st (31), March 31st (91), June 1st (153), July 31st (213), September 31st (275) and November 30th (335). To a non-leap year: February 6th (36), April 19th (109), July 2nd (183), September 12th (255), November 25th (329). From a standard calendar to a `"360_day"`, the following dates in the source array will be dropped: From a leap year: January 31st (31), April 1st (92), June 1st (153), August 1st (214), September 31st (275), December 1st (336) From a non-leap year: February 6th (37), April 20th (110), July 2nd (183), September 13th (256), November 25th (329) This option is best used on daily and subdaily data. - "date" The month/day information is conserved and invalid dates are dropped from the output. This means that when converting from a `"360_day"` to a standard calendar, all 31st (Jan, March, May, July, August, October and December) will be missing as there is no equivalent dates in the `"360_day"` calendar and the 29th (on non-leap years) and 30th of February will be dropped as there are no equivalent dates in a standard calendar. This option is best used with data on a frequency coarser than daily. """ return convert_calendar( self, calendar, dim=dim, align_on=align_on, missing=missing, use_cftime=use_cftime, ) def interp_calendar( self, target: pd.DatetimeIndex | CFTimeIndex | DataArray, dim: Hashable = "time", ) -> Self: """Interpolates the Dataset to another calendar based on decimal year measure. Each timestamp in `source` and `target` are first converted to their decimal year equivalent then `source` is interpolated on the target coordinate. The decimal year of a timestamp is its year plus its sub-year component converted to the fraction of its year. For example "2000-03-01 12:00" is 2000.1653 in a standard calendar or 2000.16301 in a `"noleap"` calendar. This method should only be used when the time (HH:MM:SS) information of time coordinate is not important. Parameters ---------- target: DataArray or DatetimeIndex or CFTimeIndex The target time coordinate of a valid dtype (np.datetime64 or cftime objects) dim : Hashable, default: "time" The time coordinate name. Return ------ DataArray The source interpolated on the decimal years of target, """ return interp_calendar(self, target, dim=dim) @_deprecate_positional_args("v2024.07.0") def groupby( self, group: GroupInput = None, *, squeeze: Literal[False] = False, restore_coord_dims: bool = False, eagerly_compute_group: Literal[False] | None = None, **groupers: Grouper, ) -> DatasetGroupBy: """Returns a DatasetGroupBy object for performing grouped operations. Parameters ---------- group : str or DataArray or IndexVariable or sequence of hashable or mapping of hashable to Grouper Array whose unique values should be used to group this array. If a Hashable, must be the name of a coordinate contained in this dataarray. If a dictionary, must map an existing variable name to a :py:class:`Grouper` instance. squeeze : False This argument is deprecated. restore_coord_dims : bool, default: False If True, also restore the dimension order of multi-dimensional coordinates. eagerly_compute_group: False, optional This argument is deprecated. **groupers : Mapping of str to Grouper or Resampler Mapping of variable name to group by to :py:class:`Grouper` or :py:class:`Resampler` object. One of ``group`` or ``groupers`` must be provided. Only a single ``grouper`` is allowed at present. Returns ------- grouped : DatasetGroupBy A `DatasetGroupBy` object patterned after `pandas.GroupBy` that can be iterated over in the form of `(unique_value, grouped_array)` pairs. Examples -------- >>> ds = xr.Dataset( ... {"foo": (("x", "y"), np.arange(12).reshape((4, 3)))}, ... coords={"x": [10, 20, 30, 40], "letters": ("x", list("abba"))}, ... ) Grouping by a single variable is easy >>> ds.groupby("letters") Execute a reduction >>> ds.groupby("letters").sum() Size: 64B Dimensions: (letters: 2, y: 3) Coordinates: * letters (letters) object 16B 'a' 'b' Dimensions without coordinates: y Data variables: foo (letters, y) int64 48B 9 11 13 9 11 13 Grouping by multiple variables >>> ds.groupby(["letters", "x"]) Use Grouper objects to express more complicated GroupBy operations >>> from xarray.groupers import BinGrouper, UniqueGrouper >>> >>> ds.groupby(x=BinGrouper(bins=[5, 15, 25]), letters=UniqueGrouper()).sum() Size: 144B Dimensions: (y: 3, x_bins: 2, letters: 2) Coordinates: * x_bins (x_bins) interval[int64, right] 32B (5, 15] (15, 25] * letters (letters) object 16B 'a' 'b' Dimensions without coordinates: y Data variables: foo (y, x_bins, letters) float64 96B 0.0 nan nan 3.0 ... nan nan 5.0 See Also -------- :ref:`groupby` Users guide explanation of how to group and bin data. :doc:`xarray-tutorial:intermediate/computation/01-high-level-computation-patterns` Tutorial on :py:func:`~xarray.Dataset.Groupby` for windowed computation. :doc:`xarray-tutorial:fundamentals/03.2_groupby_with_xarray` Tutorial on :py:func:`~xarray.Dataset.Groupby` demonstrating reductions, transformation and comparison with :py:func:`~xarray.Dataset.resample`. :external:py:meth:`pandas.DataFrame.groupby ` :func:`Dataset.groupby_bins ` :func:`DataArray.groupby ` :class:`core.groupby.DatasetGroupBy` :func:`Dataset.coarsen ` :func:`Dataset.resample ` :func:`DataArray.resample ` """ from xarray.core.groupby import ( DatasetGroupBy, _parse_group_and_groupers, _validate_groupby_squeeze, ) _validate_groupby_squeeze(squeeze) rgroupers = _parse_group_and_groupers( self, group, groupers, eagerly_compute_group=eagerly_compute_group ) return DatasetGroupBy(self, rgroupers, restore_coord_dims=restore_coord_dims) @_deprecate_positional_args("v2024.07.0") def groupby_bins( self, group: Hashable | DataArray | IndexVariable, bins: Bins, right: bool = True, labels: ArrayLike | None = None, precision: int = 3, include_lowest: bool = False, squeeze: Literal[False] = False, restore_coord_dims: bool = False, duplicates: Literal["raise", "drop"] = "raise", eagerly_compute_group: Literal[False] | None = None, ) -> DatasetGroupBy: """Returns a DatasetGroupBy object for performing grouped operations. Rather than using all unique values of `group`, the values are discretized first by applying `pandas.cut` [1]_ to `group`. Parameters ---------- group : Hashable, DataArray or IndexVariable Array whose binned values should be used to group this array. If a string, must be the name of a variable contained in this dataset. bins : int or array-like If bins is an int, it defines the number of equal-width bins in the range of x. However, in this case, the range of x is extended by .1% on each side to include the min or max values of x. If bins is a sequence it defines the bin edges allowing for non-uniform bin width. No extension of the range of x is done in this case. right : bool, default: True Indicates whether the bins include the rightmost edge or not. If right == True (the default), then the bins [1,2,3,4] indicate (1,2], (2,3], (3,4]. labels : array-like or bool, default: None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, string bin labels are assigned by `pandas.cut`. precision : int, default: 3 The precision at which to store and display the bins labels. include_lowest : bool, default: False Whether the first interval should be left-inclusive or not. squeeze : False This argument is deprecated. restore_coord_dims : bool, default: False If True, also restore the dimension order of multi-dimensional coordinates. duplicates : {"raise", "drop"}, default: "raise" If bin edges are not unique, raise ValueError or drop non-uniques. eagerly_compute_group: False, optional This argument is deprecated. Returns ------- grouped : DatasetGroupBy A `DatasetGroupBy` object patterned after `pandas.GroupBy` that can be iterated over in the form of `(unique_value, grouped_array)` pairs. The name of the group has the added suffix `_bins` in order to distinguish it from the original variable. See Also -------- :ref:`groupby` Users guide explanation of how to group and bin data. Dataset.groupby DataArray.groupby_bins core.groupby.DatasetGroupBy pandas.DataFrame.groupby References ---------- .. [1] https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.cut.html """ from xarray.core.groupby import ( DatasetGroupBy, ResolvedGrouper, _validate_groupby_squeeze, ) from xarray.groupers import BinGrouper _validate_groupby_squeeze(squeeze) grouper = BinGrouper( bins=bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, ) rgrouper = ResolvedGrouper( grouper, group, self, eagerly_compute_group=eagerly_compute_group ) return DatasetGroupBy( self, (rgrouper,), restore_coord_dims=restore_coord_dims, ) def weighted(self, weights: DataArray) -> DatasetWeighted: """ Weighted Dataset operations. Parameters ---------- weights : DataArray An array of weights associated with the values in this Dataset. Each value in the data contributes to the reduction operation according to its associated weight. Notes ----- ``weights`` must be a DataArray and cannot contain missing values. Missing values can be replaced by ``weights.fillna(0)``. Returns ------- computation.weighted.DatasetWeighted See Also -------- :func:`DataArray.weighted ` :ref:`compute.weighted` User guide on weighted array reduction using :py:func:`~xarray.Dataset.weighted` :doc:`xarray-tutorial:fundamentals/03.4_weighted` Tutorial on Weighted Reduction using :py:func:`~xarray.Dataset.weighted` """ from xarray.computation.weighted import DatasetWeighted return DatasetWeighted(self, weights) def rolling( self, dim: Mapping[Any, int] | None = None, min_periods: int | None = None, center: bool | Mapping[Any, bool] = False, **window_kwargs: int, ) -> DatasetRolling: """ Rolling window object for Datasets. Parameters ---------- dim : dict, optional Mapping from the dimension name to create the rolling iterator along (e.g. `time`) to its moving window size. min_periods : int or None, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. center : bool or Mapping to int, default: False Set the labels at the center of the window. The default, False, sets the labels at the right edge of the window. **window_kwargs : optional The keyword arguments form of ``dim``. One of dim or window_kwargs must be provided. Returns ------- computation.rolling.DatasetRolling See Also -------- Dataset.cumulative DataArray.rolling DataArray.rolling_exp """ from xarray.computation.rolling import DatasetRolling dim = either_dict_or_kwargs(dim, window_kwargs, "rolling") return DatasetRolling(self, dim, min_periods=min_periods, center=center) def cumulative( self, dim: str | Iterable[Hashable], min_periods: int = 1, ) -> DatasetRolling: """ Accumulating object for Datasets Parameters ---------- dims : iterable of hashable The name(s) of the dimensions to create the cumulative window along min_periods : int, default: 1 Minimum number of observations in window required to have a value (otherwise result is NA). The default is 1 (note this is different from ``Rolling``, whose default is the size of the window). Returns ------- computation.rolling.DatasetRolling See Also -------- DataArray.cumulative Dataset.rolling Dataset.rolling_exp """ from xarray.computation.rolling import DatasetRolling if isinstance(dim, str): if dim not in self.dims: raise ValueError( f"Dimension {dim} not found in data dimensions: {self.dims}" ) dim = {dim: self.sizes[dim]} else: missing_dims = set(dim) - set(self.dims) if missing_dims: raise ValueError( f"Dimensions {missing_dims} not found in data dimensions: {self.dims}" ) dim = {d: self.sizes[d] for d in dim} return DatasetRolling(self, dim, min_periods=min_periods, center=False) def coarsen( self, dim: Mapping[Any, int] | None = None, boundary: CoarsenBoundaryOptions = "exact", side: SideOptions | Mapping[Any, SideOptions] = "left", coord_func: str | Callable | Mapping[Any, str | Callable] = "mean", **window_kwargs: int, ) -> DatasetCoarsen: """ Coarsen object for Datasets. Parameters ---------- dim : mapping of hashable to int, optional Mapping from the dimension name to the window size. boundary : {"exact", "trim", "pad"}, default: "exact" If 'exact', a ValueError will be raised if dimension size is not a multiple of the window size. If 'trim', the excess entries are dropped. If 'pad', NA will be padded. side : {"left", "right"} or mapping of str to {"left", "right"}, default: "left" coord_func : str or mapping of hashable to str, default: "mean" function (name) that is applied to the coordinates, or a mapping from coordinate name to function (name). Returns ------- computation.rolling.DatasetCoarsen See Also -------- :class:`computation.rolling.DatasetCoarsen` :func:`DataArray.coarsen ` :ref:`reshape.coarsen` User guide describing :py:func:`~xarray.Dataset.coarsen` :ref:`compute.coarsen` User guide on block arrgragation :py:func:`~xarray.Dataset.coarsen` :doc:`xarray-tutorial:fundamentals/03.3_windowed` Tutorial on windowed computation using :py:func:`~xarray.Dataset.coarsen` """ from xarray.computation.rolling import DatasetCoarsen dim = either_dict_or_kwargs(dim, window_kwargs, "coarsen") return DatasetCoarsen( self, dim, boundary=boundary, side=side, coord_func=coord_func, ) @_deprecate_positional_args("v2024.07.0") def resample( self, indexer: Mapping[Any, ResampleCompatible | Resampler] | None = None, *, skipna: bool | None = None, closed: SideOptions | None = None, label: SideOptions | None = None, offset: pd.Timedelta | datetime.timedelta | str | None = None, origin: str | DatetimeLike = "start_day", restore_coord_dims: bool | None = None, **indexer_kwargs: ResampleCompatible | Resampler, ) -> DatasetResample: """Returns a Resample object for performing resampling operations. Handles both downsampling and upsampling. The resampled dimension must be a datetime-like coordinate. If any intervals contain no values from the original object, they will be given the value ``NaN``. Parameters ---------- indexer : Mapping of Hashable to str, datetime.timedelta, pd.Timedelta, pd.DateOffset, or Resampler, optional Mapping from the dimension name to resample frequency [1]_. The dimension must be datetime-like. skipna : bool, optional Whether to skip missing values when aggregating in downsampling. closed : {"left", "right"}, optional Side of each interval to treat as closed. label : {"left", "right"}, optional Side of each interval to use for labeling. origin : {'epoch', 'start', 'start_day', 'end', 'end_day'}, pd.Timestamp, datetime.datetime, np.datetime64, or cftime.datetime, default 'start_day' The datetime on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a datetime is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day offset : pd.Timedelta, datetime.timedelta, or str, default is None An offset timedelta added to the origin. restore_coord_dims : bool, optional If True, also restore the dimension order of multi-dimensional coordinates. **indexer_kwargs : str, datetime.timedelta, pd.Timedelta, pd.DateOffset, or Resampler The keyword arguments form of ``indexer``. One of indexer or indexer_kwargs must be provided. Returns ------- resampled : core.resample.DataArrayResample This object resampled. See Also -------- DataArray.resample pandas.Series.resample pandas.DataFrame.resample Dataset.groupby DataArray.groupby References ---------- .. [1] https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases """ from xarray.core.resample import DatasetResample return self._resample( resample_cls=DatasetResample, indexer=indexer, skipna=skipna, closed=closed, label=label, offset=offset, origin=origin, restore_coord_dims=restore_coord_dims, **indexer_kwargs, ) def drop_attrs(self, *, deep: bool = True) -> Self: """ Removes all attributes from the Dataset and its variables. Parameters ---------- deep : bool, default True Removes attributes from all variables. Returns ------- Dataset """ # Remove attributes from the dataset self = self._replace(attrs={}) if not deep: return self # Remove attributes from each variable in the dataset for var in self.variables: # variables don't have a `._replace` method, so we copy and then remove # attrs. If we added a `._replace` method, we could use that instead. if var not in self.indexes: self[var] = self[var].copy() self[var].attrs = {} new_idx_variables = {} # Not sure this is the most elegant way of doing this, but it works. # (Should we have a more general "map over all variables, including # indexes" approach?) for idx, idx_vars in self.xindexes.group_by_index(): # copy each coordinate variable of an index and drop their attrs temp_idx_variables = {k: v.copy() for k, v in idx_vars.items()} for v in temp_idx_variables.values(): v.attrs = {} # re-wrap the index object in new coordinate variables new_idx_variables.update(idx.create_variables(temp_idx_variables)) self = self.assign(new_idx_variables) return self xarray-2025.09.0/xarray/core/dataset_utils.py000066400000000000000000000051461505620616400210270ustar00rootroot00000000000000from __future__ import annotations import typing from collections.abc import Hashable, Mapping from typing import Any, Generic import pandas as pd from xarray.core import utils from xarray.core.common import _contains_datetime_like_objects from xarray.core.indexing import map_index_queries from xarray.core.types import T_Dataset from xarray.core.variable import IndexVariable, Variable if typing.TYPE_CHECKING: from xarray.core.dataset import Dataset class _LocIndexer(Generic[T_Dataset]): __slots__ = ("dataset",) def __init__(self, dataset: T_Dataset): self.dataset = dataset def __getitem__(self, key: Mapping[Any, Any]) -> T_Dataset: if not utils.is_dict_like(key): raise TypeError("can only lookup dictionaries from Dataset.loc") return self.dataset.sel(key) def __setitem__(self, key, value) -> None: if not utils.is_dict_like(key): raise TypeError( "can only set locations defined by dictionaries from Dataset.loc." f" Got: {key}" ) # set new values dim_indexers = map_index_queries(self.dataset, key).dim_indexers self.dataset[dim_indexers] = value def as_dataset(obj: Any) -> Dataset: """Cast the given object to a Dataset. Handles Datasets, DataArrays and dictionaries of variables. A new Dataset object is only created if the provided object is not already one. """ from xarray.core.dataset import Dataset if hasattr(obj, "to_dataset"): obj = obj.to_dataset() if not isinstance(obj, Dataset): obj = Dataset(obj) return obj def _get_virtual_variable( variables, key: Hashable, dim_sizes: Mapping | None = None ) -> tuple[Hashable, Hashable, Variable]: """Get a virtual variable (e.g., 'time.year') from a dict of xarray.Variable objects (if possible) """ from xarray.core.dataarray import DataArray if dim_sizes is None: dim_sizes = {} if key in dim_sizes: data = pd.Index(range(dim_sizes[key]), name=key) variable = IndexVariable((key,), data) return key, key, variable if not isinstance(key, str): raise KeyError(key) split_key = key.split(".", 1) if len(split_key) != 2: raise KeyError(key) ref_name, var_name = split_key ref_var = variables[ref_name] if _contains_datetime_like_objects(ref_var): ref_var = DataArray(ref_var) data = getattr(ref_var.dt, var_name).data else: data = getattr(ref_var, var_name).data virtual_var = Variable(ref_var.dims, data) return ref_name, var_name, virtual_var xarray-2025.09.0/xarray/core/dataset_variables.py000066400000000000000000000040061505620616400216310ustar00rootroot00000000000000import typing from collections.abc import Hashable, Iterator, Mapping from typing import Any import numpy as np from xarray.core import formatting from xarray.core.utils import Frozen from xarray.core.variable import Variable if typing.TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset class DataVariables(Mapping[Any, "DataArray"]): __slots__ = ("_dataset",) def __init__(self, dataset: "Dataset"): self._dataset = dataset def __iter__(self) -> Iterator[Hashable]: return ( key for key in self._dataset._variables if key not in self._dataset._coord_names ) def __len__(self) -> int: length = len(self._dataset._variables) - len(self._dataset._coord_names) assert length >= 0, "something is wrong with Dataset._coord_names" return length def __contains__(self, key: Hashable) -> bool: return key in self._dataset._variables and key not in self._dataset._coord_names def __getitem__(self, key: Hashable) -> "DataArray": if key not in self._dataset._coord_names: return self._dataset[key] raise KeyError(key) def __repr__(self) -> str: return formatting.data_vars_repr(self) @property def variables(self) -> Mapping[Hashable, Variable]: all_variables = self._dataset.variables return Frozen({k: all_variables[k] for k in self}) @property def dtypes(self) -> Frozen[Hashable, np.dtype]: """Mapping from data variable names to dtypes. Cannot be modified directly, but is updated when adding new variables. See Also -------- Dataset.dtype """ return self._dataset.dtypes def _ipython_key_completions_(self): """Provide method for the key-autocompletions in IPython.""" return [ key for key in self._dataset._ipython_key_completions_() if key not in self._dataset._coord_names ] xarray-2025.09.0/xarray/core/datatree.py000066400000000000000000002542021505620616400177520ustar00rootroot00000000000000from __future__ import annotations import functools import io import itertools import textwrap from collections import ChainMap from collections.abc import ( Callable, Hashable, Iterable, Iterator, Mapping, ) from html import escape from os import PathLike from typing import ( TYPE_CHECKING, Any, Concatenate, Literal, NoReturn, ParamSpec, TypeVar, Union, overload, ) from xarray.core import utils from xarray.core._aggregations import DataTreeAggregations from xarray.core._typed_ops import DataTreeOpsMixin from xarray.core.common import TreeAttrAccessMixin, get_chunksizes from xarray.core.coordinates import Coordinates, DataTreeCoordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.dataset_variables import DataVariables from xarray.core.datatree_mapping import ( _handle_errors_with_path_context, map_over_datasets, ) from xarray.core.formatting import ( datatree_repr, diff_treestructure, dims_and_coords_repr, ) from xarray.core.formatting_html import ( datatree_repr as datatree_repr_html, ) from xarray.core.indexes import Index, Indexes from xarray.core.options import OPTIONS as XR_OPTS from xarray.core.options import _get_keep_attrs from xarray.core.treenode import NamedNode, NodePath, zip_subtrees from xarray.core.types import Self from xarray.core.utils import ( Default, FilteredMapping, Frozen, _default, drop_dims_from_indexers, either_dict_or_kwargs, maybe_wrap_array, parse_dims_as_set, ) from xarray.core.variable import Variable from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import is_chunked_array from xarray.structure.alignment import align from xarray.structure.merge import dataset_update_method try: from xarray.core.variable import calculate_dimensions except ImportError: # for xarray versions 2022.03.0 and earlier from xarray.core.dataset import calculate_dimensions if TYPE_CHECKING: import numpy as np import pandas as pd from dask.delayed import Delayed from xarray.backends import ZarrStore from xarray.core.datatree_io import T_DataTreeNetcdfEngine, T_DataTreeNetcdfTypes from xarray.core.types import ( Dims, DtCompatible, ErrorOptions, ErrorOptionsWithWarn, NetcdfWriteModes, T_ChunkDimFreq, T_ChunksFreq, ZarrWriteModes, ) from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint from xarray.structure.merge import CoercibleMapping, CoercibleValue # """ # DEVELOPERS' NOTE # ---------------- # The idea of this module is to create a `DataTree` class which inherits the tree # structure from TreeNode, and also copies the entire API of `xarray.Dataset`, but with # certain methods decorated to instead map the dataset function over every node in the # tree. As this API is copied without directly subclassing `xarray.Dataset` we instead # create various Mixin classes (in ops.py) which each define part of `xarray.Dataset`'s # extensive API. # # Some of these methods must be wrapped to map over all nodes in the subtree. Others are # fine to inherit unaltered (normally because they (a) only call dataset properties and # (b) don't return a dataset that should be nested into a new tree) and some will get # overridden by the class definition of DataTree. # """ T_Path = Union[str, NodePath] T = TypeVar("T") P = ParamSpec("P") def _collect_data_and_coord_variables( data: Dataset, ) -> tuple[dict[Hashable, Variable], dict[Hashable, Variable]]: data_variables = {} coord_variables = {} for k, v in data.variables.items(): if k in data._coord_names: coord_variables[k] = v else: data_variables[k] = v return data_variables, coord_variables def _to_new_dataset(data: Dataset | Coordinates | None) -> Dataset: if isinstance(data, Dataset): ds = data.copy(deep=False) elif isinstance(data, Coordinates): ds = data.to_dataset() elif data is None: ds = Dataset() else: raise TypeError(f"data object is not an xarray.Dataset, dict, or None: {data}") return ds def _inherited_dataset(ds: Dataset, parent: Dataset) -> Dataset: return Dataset._construct_direct( variables=parent._variables | ds._variables, coord_names=parent._coord_names | ds._coord_names, dims=parent._dims | ds._dims, attrs=ds._attrs, indexes=parent._indexes | ds._indexes, encoding=ds._encoding, close=ds._close, ) def _without_header(text: str) -> str: return "\n".join(text.split("\n")[1:]) def _indented(text: str) -> str: return textwrap.indent(text, prefix=" ") def check_alignment( path: str, node_ds: Dataset, parent_ds: Dataset | None, children: Mapping[str, DataTree], ) -> None: if parent_ds is not None: try: align(node_ds, parent_ds, join="exact", copy=False) except ValueError as e: node_repr = _indented(_without_header(repr(node_ds))) parent_repr = _indented(dims_and_coords_repr(parent_ds)) raise ValueError( f"group {path!r} is not aligned with its parents:\n" f"Group:\n{node_repr}\nFrom parents:\n{parent_repr}" ) from e if children: if parent_ds is not None: base_ds = _inherited_dataset(node_ds, parent_ds) else: base_ds = node_ds for child_name, child in children.items(): child_path = str(NodePath(path) / child_name) child_ds = child.to_dataset(inherit=False) check_alignment(child_path, child_ds, base_ds, child.children) def _deduplicate_inherited_coordinates(child: DataTree, parent: DataTree) -> None: # This method removes repeated indexes (and corresponding coordinates) # that are repeated between a DataTree and its parents. removed_something = False for name in parent._indexes: if name in child._node_indexes: # Indexes on a Dataset always have a corresponding coordinate. # We already verified that these coordinates match in the # check_alignment() call from _pre_attach(). del child._node_indexes[name] del child._node_coord_variables[name] removed_something = True if removed_something: child._node_dims = calculate_dimensions( child._data_variables | child._node_coord_variables ) for grandchild in child._children.values(): _deduplicate_inherited_coordinates(grandchild, child) def _check_for_slashes_in_names(variables: Iterable[Hashable]) -> None: offending_variable_names = [ name for name in variables if isinstance(name, str) and "/" in name ] if len(offending_variable_names) > 0: raise ValueError( "Given variables have names containing the '/' character: " f"{offending_variable_names}. " "Variables stored in DataTree objects cannot have names containing '/' characters, as this would make path-like access to variables ambiguous." ) class DatasetView(Dataset): """ An immutable Dataset-like view onto the data in a single DataTree node. In-place operations modifying this object should raise an AttributeError. This requires overriding all inherited constructors. Operations returning a new result will return a new xarray.Dataset object. This includes all API on Dataset, which will be inherited. """ # TODO what happens if user alters (in-place) a DataArray they extracted from this object? __slots__ = ( "_attrs", "_cache", # used by _CachedAccessor "_close", "_coord_names", "_dims", "_encoding", "_indexes", "_variables", ) def __init__( self, data_vars: Mapping[Any, Any] | None = None, coords: Mapping[Any, Any] | None = None, attrs: Mapping[Any, Any] | None = None, ): raise AttributeError("DatasetView objects are not to be initialized directly") @classmethod def _constructor( cls, variables: dict[Any, Variable], coord_names: set[Hashable], dims: dict[Any, int], attrs: dict | None, indexes: dict[Any, Index], encoding: dict | None, close: Callable[[], None] | None, ) -> DatasetView: """Private constructor, from Dataset attributes.""" # We override Dataset._construct_direct below, so we need a new # constructor for creating DatasetView objects. obj: DatasetView = object.__new__(cls) obj._variables = variables obj._coord_names = coord_names obj._dims = dims obj._indexes = indexes obj._attrs = attrs obj._close = close obj._encoding = encoding return obj def __setitem__(self, key, val) -> None: raise AttributeError( "Mutation of the DatasetView is not allowed, please use `.__setitem__` on the wrapping DataTree node, " "or use `dt.to_dataset()` if you want a mutable dataset. If calling this from within `map_over_datasets`," "use `.copy()` first to get a mutable version of the input dataset." ) def update(self, other) -> NoReturn: raise AttributeError( "Mutation of the DatasetView is not allowed, please use `.update` on the wrapping DataTree node, " "or use `dt.to_dataset()` if you want a mutable dataset. If calling this from within `map_over_datasets`," "use `.copy()` first to get a mutable version of the input dataset." ) def set_close(self, close: Callable[[], None] | None) -> None: raise AttributeError("cannot modify a DatasetView()") def close(self) -> None: raise AttributeError( "cannot close a DatasetView(). Close the associated DataTree node instead" ) # FIXME https://github.com/python/mypy/issues/7328 @overload # type: ignore[override] def __getitem__(self, key: Mapping) -> Dataset: # type: ignore[overload-overlap] ... @overload def __getitem__(self, key: Hashable) -> DataArray: ... # See: https://github.com/pydata/xarray/issues/8855 @overload def __getitem__(self, key: Any) -> Dataset: ... def __getitem__(self, key) -> DataArray | Dataset: # TODO call the `_get_item` method of DataTree to allow path-like access to contents of other nodes # For now just call Dataset.__getitem__ return Dataset.__getitem__(self, key) @classmethod def _construct_direct( # type: ignore[override] cls, variables: dict[Any, Variable], coord_names: set[Hashable], dims: dict[Any, int] | None = None, attrs: dict | None = None, indexes: dict[Any, Index] | None = None, encoding: dict | None = None, close: Callable[[], None] | None = None, ) -> Dataset: """ Overriding this method (along with ._replace) and modifying it to return a Dataset object should hopefully ensure that the return type of any method on this object is a Dataset. """ if dims is None: dims = calculate_dimensions(variables) if indexes is None: indexes = {} obj = object.__new__(Dataset) obj._variables = variables obj._coord_names = coord_names obj._dims = dims obj._indexes = indexes obj._attrs = attrs obj._close = close obj._encoding = encoding return obj def _replace( # type: ignore[override] self, variables: dict[Hashable, Variable] | None = None, coord_names: set[Hashable] | None = None, dims: dict[Any, int] | None = None, attrs: dict[Hashable, Any] | Default | None = _default, indexes: dict[Hashable, Index] | None = None, encoding: dict | Default | None = _default, inplace: bool = False, ) -> Dataset: """ Overriding this method (along with ._construct_direct) and modifying it to return a Dataset object should hopefully ensure that the return type of any method on this object is a Dataset. """ if inplace: raise AttributeError("In-place mutation of the DatasetView is not allowed") return Dataset._replace( self, variables=variables, coord_names=coord_names, dims=dims, attrs=attrs, indexes=indexes, encoding=encoding, inplace=inplace, ) def map( # type: ignore[override] self, func: Callable, keep_attrs: bool | None = None, args: Iterable[Any] = (), **kwargs: Any, ) -> Dataset: """Apply a function to each data variable in this dataset Parameters ---------- func : callable Function which can be called in the form `func(x, *args, **kwargs)` to transform each DataArray `x` in this dataset into another DataArray. keep_attrs : bool | None, optional If True, both the dataset's and variables' attributes (`attrs`) will be copied from the original objects to the new ones. If False, the new dataset and variables will be returned without copying the attributes. args : iterable, optional Positional arguments passed on to `func`. **kwargs : Any Keyword arguments passed on to `func`. Returns ------- applied : Dataset Resulting dataset from applying ``func`` to each data variable. Examples -------- >>> da = xr.DataArray(np.random.randn(2, 3)) >>> ds = xr.Dataset({"foo": da, "bar": ("x", [-1, 2])}) >>> ds Size: 64B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 -0.9773 bar (x) int64 16B -1 2 >>> ds.map(np.fabs) Size: 64B Dimensions: (dim_0: 2, dim_1: 3, x: 2) Dimensions without coordinates: dim_0, dim_1, x Data variables: foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 0.9773 bar (x) float64 16B 1.0 2.0 """ # Copied from xarray.Dataset so as not to call type(self), which causes problems (see https://github.com/xarray-contrib/datatree/issues/188). # TODO Refactor xarray upstream to avoid needing to overwrite this. if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) variables = { k: maybe_wrap_array(v, func(v, *args, **kwargs)) for k, v in self.data_vars.items() } if keep_attrs: for k, v in variables.items(): v._copy_attrs_from(self.data_vars[k]) attrs = self.attrs if keep_attrs else None # return type(self)(variables, attrs=attrs) return Dataset(variables, attrs=attrs) class DataTree( NamedNode, DataTreeAggregations, DataTreeOpsMixin, TreeAttrAccessMixin, Mapping[str, "DataArray | DataTree"], ): """ A tree-like hierarchical collection of xarray objects. Attempts to present an API like that of xarray.Dataset, but methods are wrapped to also update all the tree's child nodes. """ # TODO Some way of sorting children by depth # TODO do we need a watch out for if methods intended only for root nodes are called on non-root nodes? # TODO dataset methods which should not or cannot act over the whole tree, such as .to_array # TODO .loc method # TODO a lot of properties like .variables could be defined in a DataMapping class which both Dataset and DataTree inherit from # TODO all groupby classes # TODO a lot of properties like .variables could be defined in a DataMapping class which both Dataset and DataTree inherit from # TODO all groupby classes _name: str | None _parent: DataTree | None _children: dict[str, DataTree] _cache: dict[str, Any] # used by _CachedAccessor _data_variables: dict[Hashable, Variable] _node_coord_variables: dict[Hashable, Variable] _node_dims: dict[Hashable, int] _node_indexes: dict[Hashable, Index] _attrs: dict[Hashable, Any] | None _encoding: dict[Hashable, Any] | None _close: Callable[[], None] | None __slots__ = ( "_attrs", "_cache", # used by _CachedAccessor "_children", "_close", "_data_variables", "_encoding", "_name", "_node_coord_variables", "_node_dims", "_node_indexes", "_parent", ) def __init__( self, dataset: Dataset | Coordinates | None = None, children: Mapping[str, DataTree] | None = None, name: str | None = None, ): """ Create a single node of a DataTree. The node may optionally contain data in the form of data and coordinate variables, stored in the same way as data is stored in an xarray.Dataset. Parameters ---------- dataset : Dataset, optional Data to store directly at this node. children : Mapping[str, DataTree], optional Any child nodes of this node. name : str, optional Name for this node of the tree. Returns ------- DataTree See Also -------- DataTree.from_dict """ self._set_node_data(_to_new_dataset(dataset)) # comes after setting node data as this will check for clashes between child names and existing variable names super().__init__(name=name, children=children) def _set_node_data(self, dataset: Dataset): _check_for_slashes_in_names(dataset.variables) data_vars, coord_vars = _collect_data_and_coord_variables(dataset) self._data_variables = data_vars self._node_coord_variables = coord_vars self._node_dims = dataset._dims self._node_indexes = dataset._indexes self._encoding = dataset._encoding self._attrs = dataset._attrs self._close = dataset._close def _pre_attach(self: DataTree, parent: DataTree, name: str) -> None: super()._pre_attach(parent, name) if name in parent.dataset.variables: raise KeyError( f"parent {parent.name} already contains a variable named {name}" ) path = str(NodePath(parent.path) / name) node_ds = self.to_dataset(inherit=False) parent_ds = parent._to_dataset_view(rebuild_dims=False, inherit=True) check_alignment(path, node_ds, parent_ds, self.children) _deduplicate_inherited_coordinates(self, parent) @property def _node_coord_variables_with_index(self) -> Mapping[Hashable, Variable]: return FilteredMapping( keys=self._node_indexes, mapping=self._node_coord_variables ) @property def _coord_variables(self) -> ChainMap[Hashable, Variable]: # ChainMap is incorrected typed in typeshed (only the first argument # needs to be mutable) # https://github.com/python/typeshed/issues/8430 return ChainMap( self._node_coord_variables, *(p._node_coord_variables_with_index for p in self.parents), # type: ignore[arg-type] ) @property def _dims(self) -> ChainMap[Hashable, int]: return ChainMap(self._node_dims, *(p._node_dims for p in self.parents)) @property def _indexes(self) -> ChainMap[Hashable, Index]: return ChainMap(self._node_indexes, *(p._node_indexes for p in self.parents)) def _to_dataset_view(self, rebuild_dims: bool, inherit: bool) -> DatasetView: coord_vars = self._coord_variables if inherit else self._node_coord_variables variables = dict(self._data_variables) variables |= coord_vars if rebuild_dims: dims = calculate_dimensions(variables) elif inherit: # Note: rebuild_dims=False with inherit=True can create # technically invalid Dataset objects because it still includes # dimensions that are only defined on parent data variables # (i.e. not present on any parent coordinate variables). # # For example: # >>> tree = DataTree.from_dict( # ... { # ... "/": xr.Dataset({"foo": ("x", [1, 2])}), # x has size 2 # ... "/b": xr.Dataset(), # ... } # ... ) # >>> ds = tree["b"]._to_dataset_view(rebuild_dims=False, inherit=True) # >>> ds # Size: 0B # Dimensions: (x: 2) # Dimensions without coordinates: x # Data variables: # *empty* # # Notice the "x" dimension is still defined, even though there are no variables # or coordinates. # # Normally this is not supposed to be possible in xarray's data model, # but here it is useful internally for use cases where we # want to inherit everything from parents nodes, e.g., for align() and repr(). # # The user should never be able to see this dimension via public API. dims = dict(self._dims) else: dims = dict(self._node_dims) return DatasetView._constructor( variables=variables, coord_names=set(self._coord_variables), dims=dims, attrs=self._attrs, indexes=dict(self._indexes if inherit else self._node_indexes), encoding=self._encoding, close=None, ) @property def dataset(self) -> DatasetView: """ An immutable Dataset-like view onto the data in this node. Includes inherited coordinates and indexes from parent nodes. For a mutable Dataset containing the same data as in this node, use `.to_dataset()` instead. See Also -------- DataTree.to_dataset """ return self._to_dataset_view(rebuild_dims=True, inherit=True) @dataset.setter def dataset(self, data: Dataset | None = None) -> None: ds = _to_new_dataset(data) self._replace_node(ds) # soft-deprecated alias, to facilitate the transition from # xarray-contrib/datatree ds = dataset def to_dataset(self, inherit: bool = True) -> Dataset: """ Return the data in this node as a new xarray.Dataset object. Parameters ---------- inherit : bool, optional If False, only include coordinates and indexes defined at the level of this DataTree node, excluding any inherited coordinates and indexes. See Also -------- DataTree.dataset """ coord_vars = self._coord_variables if inherit else self._node_coord_variables variables = dict(self._data_variables) variables |= coord_vars dims = calculate_dimensions(variables) if inherit else dict(self._node_dims) return Dataset._construct_direct( variables, set(coord_vars), dims, None if self._attrs is None else dict(self._attrs), dict(self._indexes if inherit else self._node_indexes), None if self._encoding is None else dict(self._encoding), None, ) @property def has_data(self) -> bool: """Whether or not there are any variables in this node.""" return bool(self._data_variables or self._node_coord_variables) @property def has_attrs(self) -> bool: """Whether or not there are any metadata attributes in this node.""" return len(self.attrs.keys()) > 0 @property def is_empty(self) -> bool: """False if node contains any data or attrs. Does not look at children.""" return not (self.has_data or self.has_attrs) @property def is_hollow(self) -> bool: """True if only leaf nodes contain data.""" return not any(node.has_data for node in self.subtree if not node.is_leaf) @property def variables(self) -> Mapping[Hashable, Variable]: """Low level interface to node contents as dict of Variable objects. This dictionary is frozen to prevent mutation that could violate Dataset invariants. It contains all variable objects constituting this DataTree node, including both data variables and coordinates. """ return Frozen(self._data_variables | self._coord_variables) @property def attrs(self) -> dict[Hashable, Any]: """Dictionary of global attributes on this node object.""" if self._attrs is None: self._attrs = {} return self._attrs @attrs.setter def attrs(self, value: Mapping[Any, Any]) -> None: self._attrs = dict(value) @property def encoding(self) -> dict: """Dictionary of global encoding attributes on this node object.""" if self._encoding is None: self._encoding = {} return self._encoding @encoding.setter def encoding(self, value: Mapping) -> None: self._encoding = dict(value) @property def dims(self) -> Mapping[Hashable, int]: """Mapping from dimension names to lengths. Cannot be modified directly, but is updated when adding new variables. Note that type of this object differs from `DataArray.dims`. See `DataTree.sizes`, `Dataset.sizes`, and `DataArray.sizes` for consistently named properties. """ return Frozen(self._dims) @property def sizes(self) -> Mapping[Hashable, int]: """Mapping from dimension names to lengths. Cannot be modified directly, but is updated when adding new variables. This is an alias for `DataTree.dims` provided for the benefit of consistency with `DataArray.sizes`. See Also -------- DataArray.sizes """ return self.dims @property def _attr_sources(self) -> Iterable[Mapping[Hashable, Any]]: """Places to look-up items for attribute-style access""" yield from self._item_sources yield self.attrs @property def _item_sources(self) -> Iterable[Mapping[Any, Any]]: """Places to look-up items for key-completion""" yield self.data_vars yield FilteredMapping(keys=self._coord_variables, mapping=self.coords) # virtual coordinates yield FilteredMapping(keys=self.dims, mapping=self) # immediate child nodes yield self.children def _ipython_key_completions_(self) -> list[str]: """Provide method for the key-autocompletions in IPython. See https://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion For the details. """ # TODO allow auto-completing relative string paths, e.g. `dt['path/to/../ node'` # Would require changes to ipython's autocompleter, see https://github.com/ipython/ipython/issues/12420 # Instead for now we only list direct paths to all node in subtree explicitly items_on_this_node = self._item_sources paths_to_all_nodes_in_subtree = { path: node for path, node in self.subtree_with_keys if path != "." # exclude the root node } all_item_sources = itertools.chain( items_on_this_node, [paths_to_all_nodes_in_subtree] ) items = { item for source in all_item_sources for item in source if isinstance(item, str) } return list(items) def __contains__(self, key: object) -> bool: """The 'in' operator will return true or false depending on whether 'key' is either an array stored in the datatree or a child node, or neither. """ return key in self.variables or key in self.children def __bool__(self) -> bool: return bool(self._data_variables) or bool(self._children) def __iter__(self) -> Iterator[str]: return itertools.chain(self._data_variables, self._children) # type: ignore[arg-type] def __array__( self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None ) -> np.ndarray: raise TypeError( "cannot directly convert a DataTree into a " "numpy array. Instead, create an xarray.DataArray " "first, either with indexing on the DataTree or by " "invoking the `to_array()` method." ) def __repr__(self) -> str: # type: ignore[override] return datatree_repr(self) def __str__(self) -> str: return datatree_repr(self) def _repr_html_(self): """Make html representation of datatree object""" if XR_OPTS["display_style"] == "text": return f"
{escape(repr(self))}
" return datatree_repr_html(self) def __enter__(self) -> Self: return self def __exit__(self, exc_type, exc_value, traceback) -> None: self.close() # DatasetView does not support close() or set_close(), so we reimplement # these methods on DataTree. def _close_node(self) -> None: if self._close is not None: self._close() self._close = None def close(self) -> None: """Close any files associated with this tree.""" for node in self.subtree: node._close_node() def set_close(self, close: Callable[[], None] | None) -> None: """Set the closer for this node.""" self._close = close def _replace_node( self: DataTree, data: Dataset | Default = _default, children: dict[str, DataTree] | Default = _default, ) -> None: ds = self.to_dataset(inherit=False) if data is _default else data if children is _default: children = self._children for child_name in children: if child_name in ds.variables: raise ValueError(f"node already contains a variable named {child_name}") parent_ds = ( self.parent._to_dataset_view(rebuild_dims=False, inherit=True) if self.parent is not None else None ) check_alignment(self.path, ds, parent_ds, children) if data is not _default: self._set_node_data(ds) if self.parent is not None: _deduplicate_inherited_coordinates(self, self.parent) self.children = children def _copy_node( self, inherit: bool, deep: bool = False, memo: dict[int, Any] | None = None ) -> Self: """Copy just one node of a tree.""" new_node = super()._copy_node(inherit=inherit, deep=deep, memo=memo) data = self._to_dataset_view(rebuild_dims=False, inherit=inherit)._copy( deep=deep, memo=memo ) new_node._set_node_data(data) return new_node def get( # type: ignore[override] self: DataTree, key: str, default: DataTree | DataArray | None = None ) -> DataTree | DataArray | None: """ Access child nodes, variables, or coordinates stored in this node. Returned object will be either a DataTree or DataArray object depending on whether the key given points to a child or variable. Parameters ---------- key : str Name of variable / child within this node. Must lie in this immediate node (not elsewhere in the tree). default : DataTree | DataArray | None, optional A value to return if the specified key does not exist. Default return value is None. """ if key in self.children: return self.children[key] elif key in self.dataset: return self.dataset[key] else: return default def __getitem__(self: DataTree, key: str) -> DataTree | DataArray: """ Access child nodes, variables, or coordinates stored anywhere in this tree. Returned object will be either a DataTree or DataArray object depending on whether the key given points to a child or variable. Parameters ---------- key : str Name of variable / child within this node, or unix-like path to variable / child within another node. Returns ------- DataTree | DataArray """ # Either: if utils.is_dict_like(key): # dict-like indexing raise NotImplementedError("Should this index over whole tree?") elif isinstance(key, str): # TODO should possibly deal with hashables in general? # path-like: a name of a node/variable, or path to a node/variable path = NodePath(key) return self._get_item(path) elif utils.is_list_like(key): # iterable of variable names raise NotImplementedError( "Selecting via tags is deprecated, and selecting multiple items should be " "implemented via .subset" ) else: raise ValueError(f"Invalid format for key: {key}") def _set(self, key: str, val: DataTree | CoercibleValue) -> None: """ Set the child node or variable with the specified key to value. Counterpart to the public .get method, and also only works on the immediate node, not other nodes in the tree. """ if isinstance(val, DataTree): # create and assign a shallow copy here so as not to alter original name of node in grafted tree new_node = val.copy(deep=False) new_node.name = key new_node._set_parent(new_parent=self, child_name=key) else: if not isinstance(val, DataArray | Variable): # accommodate other types that can be coerced into Variables val = DataArray(val) self.update({key: val}) def __setitem__( self, key: str, value: Any, ) -> None: """ Add either a child node or an array to the tree, at any position. Data can be added anywhere, and new nodes will be created to cross the path to the new location if necessary. If there is already a node at the given location, then if value is a Node class or Dataset it will overwrite the data already present at that node, and if value is a single array, it will be merged with it. """ # TODO xarray.Dataset accepts other possibilities, how do we exactly replicate all the behaviour? if utils.is_dict_like(key): raise NotImplementedError elif isinstance(key, str): # TODO should possibly deal with hashables in general? # path-like: a name of a node/variable, or path to a node/variable path = NodePath(key) if isinstance(value, Dataset): value = DataTree(dataset=value) return self._set_item(path, value, new_nodes_along_path=True) else: raise ValueError("Invalid format for key") def __delitem__(self, key: str) -> None: """Remove a variable or child node from this datatree node.""" if key in self.children: super().__delitem__(key) elif key in self._node_coord_variables: if key in self._node_indexes: del self._node_indexes[key] del self._node_coord_variables[key] self._node_dims = calculate_dimensions(self.variables) elif key in self._data_variables: del self._data_variables[key] self._node_dims = calculate_dimensions(self.variables) else: raise KeyError(key) @overload def update(self, other: Dataset) -> None: ... @overload def update(self, other: Mapping[Hashable, DataArray | Variable]) -> None: ... @overload def update(self, other: Mapping[str, DataTree | DataArray | Variable]) -> None: ... def update( self, other: ( Dataset | Mapping[Hashable, DataArray | Variable] | Mapping[str, DataTree | DataArray | Variable] ), ) -> None: """ Update this node's children and / or variables. Just like `dict.update` this is an in-place operation. """ new_children: dict[str, DataTree] = {} new_variables: CoercibleMapping if isinstance(other, Dataset): new_variables = other else: new_variables = {} for k, v in other.items(): if isinstance(v, DataTree): # avoid named node being stored under inconsistent key new_child: DataTree = v.copy() # Datatree's name is always a string until we fix that (#8836) new_child.name = str(k) new_children[str(k)] = new_child elif isinstance(v, DataArray | Variable): # TODO this should also accommodate other types that can be coerced into Variables new_variables[k] = v else: raise TypeError(f"Type {type(v)} cannot be assigned to a DataTree") vars_merge_result = dataset_update_method( self.to_dataset(inherit=False), new_variables ) data = Dataset._construct_direct(**vars_merge_result._asdict()) # TODO are there any subtleties with preserving order of children like this? merged_children = {**self.children, **new_children} self._replace_node(data, children=merged_children) def assign( self, items: Mapping[Any, Any] | None = None, **items_kwargs: Any ) -> DataTree: """ Assign new data variables or child nodes to a DataTree, returning a new object with all the original items in addition to the new ones. Parameters ---------- items : mapping of hashable to Any Mapping from variable or child node names to the new values. If the new values are callable, they are computed on the Dataset and assigned to new data variables. If the values are not callable, (e.g. a DataTree, DataArray, scalar, or array), they are simply assigned. **items_kwargs The keyword arguments form of ``variables``. One of variables or variables_kwargs must be provided. Returns ------- dt : DataTree A new DataTree with the new variables or children in addition to all the existing items. Notes ----- Since ``kwargs`` is a dictionary, the order of your arguments may not be preserved, and so the order of the new variables is not well-defined. Assigning multiple items within the same ``assign`` is possible, but you cannot reference other variables created within the same ``assign`` call. See Also -------- xarray.Dataset.assign pandas.DataFrame.assign """ items = either_dict_or_kwargs(items, items_kwargs, "assign") dt = self.copy() dt.update(items) return dt def drop_nodes( self: DataTree, names: str | Iterable[str], *, errors: ErrorOptions = "raise" ) -> DataTree: """ Drop child nodes from this node. Parameters ---------- names : str or iterable of str Name(s) of nodes to drop. errors : {"raise", "ignore"}, default: "raise" If 'raise', raises a KeyError if any of the node names passed are not present as children of this node. If 'ignore', any given names that are present are dropped and no error is raised. Returns ------- dropped : DataTree A copy of the node with the specified children dropped. """ # the Iterable check is required for mypy if isinstance(names, str) or not isinstance(names, Iterable): names = {names} else: names = set(names) if errors == "raise": extra = names - set(self.children) if extra: raise KeyError(f"Cannot drop all nodes - nodes {extra} not present") result = self.copy() children_to_keep = { name: child for name, child in result.children.items() if name not in names } result._replace_node(children=children_to_keep) return result @classmethod def from_dict( cls, d: Mapping[str, Dataset | DataTree | None], /, name: str | None = None, ) -> Self: """ Create a datatree from a dictionary of data objects, organised by paths into the tree. Parameters ---------- d : dict-like A mapping from path names to xarray.Dataset or DataTree objects. Path names are to be given as unix-like path. If path names containing more than one part are given, new tree nodes will be constructed as necessary. To assign data to the root node of the tree use "", ".", "/" or "./" as the path. name : Hashable | None, optional Name for the root node of the tree. Default is None. Returns ------- DataTree Notes ----- If your dictionary is nested you will need to flatten it before using this method. """ # Find any values corresponding to the root d_cast = dict(d) root_data = None for key in ("", ".", "/", "./"): if key in d_cast: if root_data is not None: raise ValueError( "multiple entries found corresponding to the root node" ) root_data = d_cast.pop(key) # Create the root node if isinstance(root_data, DataTree): obj = root_data.copy() obj.name = name elif root_data is None or isinstance(root_data, Dataset): obj = cls(name=name, dataset=root_data, children=None) else: raise TypeError( f'root node data (at "", ".", "/" or "./") must be a Dataset ' f"or DataTree, got {type(root_data)}" ) def depth(item) -> int: pathstr, _ = item return len(NodePath(pathstr).parts) if d_cast: # Populate tree with children determined from data_objects mapping # Sort keys by depth so as to insert nodes from root first (see GH issue #9276) for path, data in sorted(d_cast.items(), key=depth): # Create and set new node if isinstance(data, DataTree): new_node = data.copy() elif isinstance(data, Dataset) or data is None: new_node = cls(dataset=data) else: raise TypeError(f"invalid values: {data}") obj._set_item( path, new_node, allow_overwrite=False, new_nodes_along_path=True, ) # TODO: figure out why mypy is raising an error here, likely something # to do with the return type of Dataset.copy() return obj # type: ignore[return-value] def to_dict(self, relative: bool = False) -> dict[str, Dataset]: """ Create a dictionary mapping of paths to the data contained in those nodes. Parameters ---------- relative : bool If True, return relative instead of absolute paths. Returns ------- dict[str, Dataset] See also -------- DataTree.subtree_with_keys """ return { node.relative_to(self) if relative else node.path: node.to_dataset() for node in self.subtree } @property def nbytes(self) -> int: return sum(node.to_dataset().nbytes for node in self.subtree) def __len__(self) -> int: return len(self.children) + len(self.data_vars) @property def indexes(self) -> Indexes[pd.Index]: """Mapping of pandas.Index objects used for label based indexing. Raises an error if this DataTree node has indexes that cannot be coerced to pandas.Index objects. See Also -------- DataTree.xindexes """ return self.xindexes.to_pandas_indexes() @property def xindexes(self) -> Indexes[Index]: """Mapping of xarray Index objects used for label based indexing.""" return Indexes( self._indexes, {k: self._coord_variables[k] for k in self._indexes} ) @property def coords(self) -> DataTreeCoordinates: """Dictionary of xarray.DataArray objects corresponding to coordinate variables """ return DataTreeCoordinates(self) @property def data_vars(self) -> DataVariables: """Dictionary of DataArray objects corresponding to data variables""" return DataVariables(self.to_dataset()) def isomorphic(self, other: DataTree) -> bool: """ Two DataTrees are considered isomorphic if the set of paths to their descendent nodes are the same. Nothing about the data in each node is checked. Isomorphism is a necessary condition for two trees to be used in a nodewise binary operation, such as ``tree1 + tree2``. Parameters ---------- other : DataTree The other tree object to compare to. See Also -------- DataTree.equals DataTree.identical """ return diff_treestructure(self, other) is None def equals(self, other: DataTree) -> bool: """ Two DataTrees are equal if they have isomorphic node structures, with matching node names, and if they have matching variables and coordinates, all of which are equal. Parameters ---------- other : DataTree The other tree object to compare to. See Also -------- Dataset.equals DataTree.isomorphic DataTree.identical """ if not self.isomorphic(other): return False # Note: by using .dataset, this intentionally does not check that # coordinates are defined at the same levels. return all( node.dataset.equals(other_node.dataset) for node, other_node in zip_subtrees(self, other) ) def _inherited_coords_set(self) -> set[str]: return set(self.parent.coords if self.parent else []) # type: ignore[arg-type] def identical(self, other: DataTree) -> bool: """ Like equals, but also checks attributes on all datasets, variables and coordinates, and requires that any inherited coordinates at the tree root are also inherited on the other tree. Parameters ---------- other : DataTree The other tree object to compare to. See Also -------- Dataset.identical DataTree.isomorphic DataTree.equals """ if not self.isomorphic(other): return False if self.name != other.name: return False if self._inherited_coords_set() != other._inherited_coords_set(): return False return all( node.dataset.identical(other_node.dataset) for node, other_node in zip_subtrees(self, other) ) def filter(self: DataTree, filterfunc: Callable[[DataTree], bool]) -> DataTree: """ Filter nodes according to a specified condition. Returns a new tree containing only the nodes in the original tree for which `fitlerfunc(node)` is True. Will also contain empty nodes at intermediate positions if required to support leaves. Parameters ---------- filterfunc: function A function which accepts only one DataTree - the node on which filterfunc will be called. Returns ------- DataTree See Also -------- match pipe map_over_datasets """ filtered_nodes = { path: node.dataset for path, node in self.subtree_with_keys if filterfunc(node) } return DataTree.from_dict(filtered_nodes, name=self.name) def filter_like(self, other: DataTree) -> DataTree: """ Filter a datatree like another datatree. Returns a new tree containing only the nodes in the original tree which are also present in the other tree. Parameters ---------- other : DataTree The tree to filter this tree by. Returns ------- DataTree See Also -------- filter isomorphic Examples -------- >>> dt = DataTree.from_dict( ... { ... "/a/A": None, ... "/a/B": None, ... "/b/A": None, ... "/b/B": None, ... } ... ) >>> other = DataTree.from_dict( ... { ... "/a/A": None, ... "/b/A": None, ... } ... ) >>> dt.filter_like(other) Group: / β”œβ”€β”€ Group: /a β”‚ └── Group: /a/A └── Group: /b └── Group: /b/A """ other_keys = {key for key, _ in other.subtree_with_keys} return self.filter(lambda node: node.relative_to(self) in other_keys) def prune(self, drop_size_zero_vars: bool = False) -> DataTree: """ Remove empty nodes from the tree. Returns a new tree containing only nodes that contain data variables with actual data. Intermediate nodes are kept if they are required to support non-empty children. Parameters ---------- drop_size_zero_vars : bool, default False If True, also considers variables with zero size as empty. If False, keeps nodes with data variables even if they have zero size. Returns ------- DataTree A new tree with empty nodes removed. See Also -------- filter Examples -------- >>> dt = xr.DataTree.from_dict( ... { ... "/a": xr.Dataset({"foo": ("x", [1, 2])}), ... "/b": xr.Dataset({"bar": ("x", [])}), ... "/c": xr.Dataset(), ... } ... ) >>> dt.prune() # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE Group: / β”œβ”€β”€ Group: /a β”‚ Dimensions: (x: 2) β”‚ Dimensions without coordinates: x β”‚ Data variables: β”‚ foo (x) int64 16B 1 2 └── Group: /b Dimensions: (x: 0) Dimensions without coordinates: x Data variables: bar (x) float64 0B... The ``drop_size_zero_vars`` parameter controls whether variables with zero size are considered empty: >>> dt.prune(drop_size_zero_vars=True) Group: / └── Group: /a Dimensions: (x: 2) Dimensions without coordinates: x Data variables: foo (x) int64 16B 1 2 """ non_empty_cond: Callable[[DataTree], bool] if drop_size_zero_vars: non_empty_cond = lambda node: len(node.data_vars) > 0 and any( var.size > 0 for var in node.data_vars.values() ) else: non_empty_cond = lambda node: len(node.data_vars) > 0 return self.filter(non_empty_cond) def match(self, pattern: str) -> DataTree: """ Return nodes with paths matching pattern. Uses unix glob-like syntax for pattern-matching. Parameters ---------- pattern: str A pattern to match each node path against. Returns ------- DataTree See Also -------- filter pipe map_over_datasets Examples -------- >>> dt = DataTree.from_dict( ... { ... "/a/A": None, ... "/a/B": None, ... "/b/A": None, ... "/b/B": None, ... } ... ) >>> dt.match("*/B") Group: / β”œβ”€β”€ Group: /a β”‚ └── Group: /a/B └── Group: /b └── Group: /b/B """ matching_nodes = { path: node.dataset for path, node in self.subtree_with_keys if NodePath(node.path).match(pattern) } return DataTree.from_dict(matching_nodes, name=self.name) @overload def map_over_datasets( self, func: Callable[..., Dataset | None], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> DataTree: ... @overload def map_over_datasets( self, func: Callable[..., tuple[Dataset | None, Dataset | None]], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> tuple[DataTree, DataTree]: ... @overload def map_over_datasets( self, func: Callable[..., tuple[Dataset | None, ...]], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> tuple[DataTree, ...]: ... def map_over_datasets( self, func: Callable[..., Dataset | None | tuple[Dataset | None, ...]], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> DataTree | tuple[DataTree, ...]: """ Apply a function to every dataset in this subtree, returning a new tree which stores the results. The function will be applied to any dataset stored in this node, as well as any dataset stored in any of the descendant nodes. The returned tree will have the same structure as the original subtree. func needs to return a Dataset in order to rebuild the subtree. Parameters ---------- func : callable Function to apply to datasets with signature: `func(node.dataset, *args, **kwargs) -> Dataset`. Function will not be applied to any nodes without datasets. *args : tuple, optional Positional arguments passed on to `func`. Any DataTree arguments will be converted to Dataset objects via `.dataset`. kwargs : dict, optional Optional keyword arguments passed directly to ``func``. Returns ------- subtrees : DataTree, tuple of DataTrees One or more subtrees containing results from applying ``func`` to the data at each node. See also -------- map_over_datasets """ # TODO this signature means that func has no way to know which node it is being called upon - change? return map_over_datasets(func, self, *args, kwargs=kwargs) # type: ignore[arg-type] @overload def pipe( self, func: Callable[Concatenate[Self, P], T], *args: P.args, **kwargs: P.kwargs, ) -> T: ... @overload def pipe( self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any, ) -> T: ... def pipe( self, func: Callable[Concatenate[Self, P], T] | tuple[Callable[..., T], str], *args: Any, **kwargs: Any, ) -> T: """Apply ``func(self, *args, **kwargs)`` This method replicates the pandas method of the same name. Parameters ---------- func : callable function to apply to this xarray object (Dataset/DataArray). ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the xarray object. *args positional arguments passed into ``func``. **kwargs a dictionary of keyword arguments passed into ``func``. Returns ------- object : T the return type of ``func``. Notes ----- Use ``.pipe`` when chaining together functions that expect xarray or pandas objects, e.g., instead of writing .. code:: python f(g(h(dt), arg1=a), arg2=b, arg3=c) You can write .. code:: python (dt.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)) If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``f`` takes its data as ``arg2``: .. code:: python (dt.pipe(h).pipe(g, arg1=a).pipe((f, "arg2"), arg1=a, arg3=c)) """ if isinstance(func, tuple): # Use different var when unpacking function from tuple because the type # signature of the unpacked function differs from the expected type # signature in the case where only a function is given, rather than a tuple. # This makes type checkers happy at both call sites below. f, target = func if target in kwargs: raise ValueError( f"{target} is both the pipe target and a keyword argument" ) kwargs[target] = self return f(*args, **kwargs) return func(self, *args, **kwargs) # TODO some kind of .collapse() or .flatten() method to merge a subtree @property def groups(self): """Return all groups in the tree, given as a tuple of path-like strings.""" return tuple(node.path for node in self.subtree) def _unary_op(self, f, *args, **kwargs) -> DataTree: # TODO do we need to any additional work to avoid duplication etc.? (Similar to aggregations) return self.map_over_datasets(functools.partial(f, **kwargs), *args) def _binary_op(self, other, f, reflexive=False, join=None) -> DataTree: from xarray.core.groupby import GroupBy if isinstance(other, GroupBy): return NotImplemented ds_binop = functools.partial( Dataset._binary_op, f=f, reflexive=reflexive, join=join, ) return map_over_datasets(ds_binop, self, other) def _inplace_binary_op(self, other, f) -> Self: from xarray.core.groupby import GroupBy if isinstance(other, GroupBy): raise TypeError( "in-place operations between a DataTree and " "a grouped object are not permitted" ) # TODO see GH issue #9629 for required implementation raise NotImplementedError() # TODO: dirty workaround for mypy 1.5 error with inherited DatasetOpsMixin vs. Mapping # related to https://github.com/python/mypy/issues/9319? def __eq__(self, other: DtCompatible) -> Self: # type: ignore[override] return super().__eq__(other) # filepath=None writes to a memoryview @overload def to_netcdf( self, filepath: None = None, mode: NetcdfWriteModes = "w", encoding=None, unlimited_dims=None, format: T_DataTreeNetcdfTypes | None = None, engine: T_DataTreeNetcdfEngine | None = None, group: str | None = None, write_inherited_coords: bool = False, compute: bool = True, **kwargs, ) -> memoryview: ... # compute=False returns dask.Delayed @overload def to_netcdf( self, filepath: str | PathLike | io.IOBase, mode: NetcdfWriteModes = "w", encoding=None, unlimited_dims=None, format: T_DataTreeNetcdfTypes | None = None, engine: T_DataTreeNetcdfEngine | None = None, group: str | None = None, write_inherited_coords: bool = False, *, compute: Literal[False], **kwargs, ) -> Delayed: ... # default return None @overload def to_netcdf( self, filepath: str | PathLike | io.IOBase, mode: NetcdfWriteModes = "w", encoding=None, unlimited_dims=None, format: T_DataTreeNetcdfTypes | None = None, engine: T_DataTreeNetcdfEngine | None = None, group: str | None = None, write_inherited_coords: bool = False, compute: Literal[True] = True, **kwargs, ) -> None: ... def to_netcdf( self, filepath: str | PathLike | io.IOBase | None = None, mode: NetcdfWriteModes = "w", encoding=None, unlimited_dims=None, format: T_DataTreeNetcdfTypes | None = None, engine: T_DataTreeNetcdfEngine | None = None, group: str | None = None, write_inherited_coords: bool = False, compute: bool = True, **kwargs, ) -> None | memoryview | Delayed: """ Write datatree contents to a netCDF file. Parameters ---------- filepath : str or PathLike or file-like object or None Path to which to save this datatree, or a file-like object to write it to (which must support read and write and be seekable) or None to return in-memory bytes as a memoryview. mode : {"w", "a"}, default: "w" Write ('w') or append ('a') mode. If mode='w', any existing file at this location will be overwritten. If mode='a', existing variables will be overwritten. Only applies to the root group. encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., ``{"root/set1": {"my_variable": {"dtype": "int16", "scale_factor": 0.1, "zlib": True}, ...}, ...}``. See ``xarray.Dataset.to_netcdf`` for available options. unlimited_dims : dict, optional Mapping of unlimited dimensions per group that that should be serialized as unlimited dimensions. By default, no dimensions are treated as unlimited dimensions. Note that unlimited_dims may also be set via ``dataset.encoding["unlimited_dims"]``. format : {"NETCDF4", }, optional File format for the resulting netCDF file: * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API features. engine : {"netcdf4", "h5netcdf"}, optional Engine to use when writing netCDF files. If not provided, the default engine is chosen based on available dependencies, with a preference for "netcdf4" if writing to a file on disk. group : str, optional Path to the netCDF4 group in the given file to open as the root group of the ``DataTree``. Currently, specifying a group is not supported. write_inherited_coords : bool, default: False If true, replicate inherited coordinates on all descendant nodes. Otherwise, only write coordinates at the level at which they are originally defined. This saves disk space, but requires opening the full tree to load inherited coordinates. compute : bool, default: True If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. kwargs : Additional keyword arguments to be passed to ``xarray.Dataset.to_netcdf`` Returns ------- * ``memoryview`` if path is None * ``dask.delayed.Delayed`` if compute is False * ``None`` otherwise Note ---- Due to file format specifications the on-disk root group name is always ``"/"`` overriding any given ``DataTree`` root node name. """ from xarray.core.datatree_io import _datatree_to_netcdf return _datatree_to_netcdf( self, filepath, mode=mode, encoding=encoding, unlimited_dims=unlimited_dims, format=format, engine=engine, group=group, write_inherited_coords=write_inherited_coords, compute=compute, **kwargs, ) # compute=False returns dask.Delayed @overload def to_zarr( self, store, mode: ZarrWriteModes = "w-", encoding=None, consolidated: bool = True, group: str | None = None, write_inherited_coords: bool = False, *, compute: Literal[False], **kwargs, ) -> Delayed: ... # default returns ZarrStore @overload def to_zarr( self, store, mode: ZarrWriteModes = "w-", encoding=None, consolidated: bool = True, group: str | None = None, write_inherited_coords: bool = False, compute: Literal[True] = True, **kwargs, ) -> ZarrStore: ... def to_zarr( self, store, mode: ZarrWriteModes = "w-", encoding=None, consolidated: bool = True, group: str | None = None, write_inherited_coords: bool = False, compute: bool = True, **kwargs, ) -> ZarrStore | Delayed: """ Write datatree contents to a Zarr store. Parameters ---------- store : MutableMapping, str or Path, optional Store or path to directory in file system mode : {{"w", "w-", "a", "r+", None}, default: "w-" Persistence mode: β€œw” means create (overwrite if exists); β€œw-” means create (fail if exists); β€œa” means override existing variables (create if does not exist); β€œr+” means modify existing array values only (raise an error if any metadata or shapes would change). The default mode is β€œw-”. encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., ``{"root/set1": {"my_variable": {"dtype": "int16", "scale_factor": 0.1}, ...}, ...}``. See ``xarray.Dataset.to_zarr`` for available options. consolidated : bool If True, apply zarr's `consolidate_metadata` function to the store after writing metadata for all groups. group : str, optional Group path. (a.k.a. `path` in zarr terminology.) write_inherited_coords : bool, default: False If true, replicate inherited coordinates on all descendant nodes. Otherwise, only write coordinates at the level at which they are originally defined. This saves disk space, but requires opening the full tree to load inherited coordinates. compute : bool, default: True If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. Metadata is always updated eagerly. kwargs : Additional keyword arguments to be passed to ``xarray.Dataset.to_zarr`` Note ---- Due to file format specifications the on-disk root group name is always ``"/"`` overriding any given ``DataTree`` root node name. """ from xarray.core.datatree_io import _datatree_to_zarr return _datatree_to_zarr( self, store, mode=mode, encoding=encoding, consolidated=consolidated, group=group, write_inherited_coords=write_inherited_coords, compute=compute, **kwargs, ) def _get_all_dims(self) -> set: all_dims: set[Any] = set() for node in self.subtree: all_dims.update(node._node_dims) return all_dims def reduce( self, func: Callable, dim: Dims = None, *, keep_attrs: bool | None = None, keepdims: bool = False, numeric_only: bool = False, **kwargs: Any, ) -> Self: """Reduce this tree by applying `func` along some dimension(s).""" dims = parse_dims_as_set(dim, self._get_all_dims()) result = {} for path, node in self.subtree_with_keys: reduce_dims = [d for d in node._node_dims if d in dims] node_result = node.dataset.reduce( func, reduce_dims, keep_attrs=keep_attrs, keepdims=keepdims, numeric_only=numeric_only, **kwargs, ) result[path] = node_result return type(self).from_dict(result, name=self.name) def _selective_indexing( self, func: Callable[[Dataset, Mapping[Any, Any]], Dataset], indexers: Mapping[Any, Any], missing_dims: ErrorOptionsWithWarn = "raise", ) -> Self: """Apply an indexing operation over the subtree, handling missing dimensions and inherited coordinates gracefully by only applying indexing at each node selectively. """ all_dims = self._get_all_dims() indexers = drop_dims_from_indexers(indexers, all_dims, missing_dims) result = {} for path, node in self.subtree_with_keys: node_indexers = {k: v for k, v in indexers.items() if k in node.dims} func_with_error_context = _handle_errors_with_path_context(path)(func) node_result = func_with_error_context(node.dataset, node_indexers) # Indexing datasets corresponding to each node results in redundant # coordinates when indexes from a parent node are inherited. # Ideally, we would avoid creating such coordinates in the first # place, but that would require implementing indexing operations at # the Variable instead of the Dataset level. if node is not self: for k in node_indexers: if k not in node._node_coord_variables and k in node_result.coords: # We remove all inherited coordinates. Coordinates # corresponding to an index would be de-duplicated by # _deduplicate_inherited_coordinates(), but indexing (e.g., # with a scalar) can also create scalar coordinates, which # need to be explicitly removed. del node_result.coords[k] result[path] = node_result return type(self).from_dict(result, name=self.name) def isel( self, indexers: Mapping[Any, Any] | None = None, drop: bool = False, missing_dims: ErrorOptionsWithWarn = "raise", **indexers_kwargs: Any, ) -> Self: """Returns a new data tree with each array indexed along the specified dimension(s). This method selects values from each array using its `__getitem__` method, except this method does not require knowing the order of each array's dimensions. Parameters ---------- indexers : dict, optional A dict with keys matching dimensions and values given by integers, slice objects or arrays. indexer can be a integer, slice, array-like or DataArray. If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. drop : bool, default: False If ``drop=True``, drop coordinates variables indexed by integers instead of making them scalar. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the Dataset: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions **indexers_kwargs : {dim: indexer, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Returns ------- obj : DataTree A new DataTree with the same contents as this data tree, except each array and dimension is indexed by the appropriate indexers. If indexer DataArrays have coordinates that do not conflict with this object, then these coordinates will be attached. In general, each array's data will be a view of the array's data in this dataset, unless vectorized indexing was triggered by using an array indexer, in which case the data will be a copy. See Also -------- DataTree.sel Dataset.isel """ def apply_indexers(dataset, node_indexers): return dataset.isel(node_indexers, drop=drop) indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel") return self._selective_indexing( apply_indexers, indexers, missing_dims=missing_dims ) def sel( self, indexers: Mapping[Any, Any] | None = None, method: str | None = None, tolerance: int | float | Iterable[int | float] | None = None, drop: bool = False, **indexers_kwargs: Any, ) -> Self: """Returns a new data tree with each array indexed by tick labels along the specified dimension(s). In contrast to `DataTree.isel`, indexers for this method should use labels instead of integers. Under the hood, this method is powered by using pandas's powerful Index objects. This makes label based indexing essentially just as fast as using integer indexing. It also means this method uses pandas's (well documented) logic for indexing. This means you can use string shortcuts for datetime indexes (e.g., '2000-01' to select all values in January 2000). It also means that slices are treated as inclusive of both the start and stop values, unlike normal Python indexing. Parameters ---------- indexers : dict, optional A dict with keys matching dimensions and values given by scalars, slices or arrays of tick labels. For dimensions with multi-index, the indexer may also be a dict-like object with keys matching index level names. If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for inexact matches: * None (default): only exact matches * pad / ffill: propagate last valid index value forward * backfill / bfill: propagate next valid index value backward * nearest: use nearest valid index value tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. drop : bool, optional If ``drop=True``, drop coordinates variables in `indexers` instead of making them scalar. **indexers_kwargs : {dim: indexer, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. Returns ------- obj : DataTree A new DataTree with the same contents as this data tree, except each variable and dimension is indexed by the appropriate indexers. If indexer DataArrays have coordinates that do not conflict with this object, then these coordinates will be attached. In general, each array's data will be a view of the array's data in this dataset, unless vectorized indexing was triggered by using an array indexer, in which case the data will be a copy. See Also -------- DataTree.isel Dataset.sel """ def apply_indexers(dataset, node_indexers): # TODO: reimplement in terms of map_index_queries(), to avoid # redundant look-ups of integer positions from labels (via indexes) # on child nodes. return dataset.sel( node_indexers, method=method, tolerance=tolerance, drop=drop ) indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "sel") return self._selective_indexing(apply_indexers, indexers) def load(self, **kwargs) -> Self: """Manually trigger loading and/or computation of this datatree's data from disk or a remote source into memory and return this datatree. Unlike compute, the original datatree is modified and returned. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. However, this method can be necessary when working with many file objects on disk. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. See Also -------- Dataset.load dask.compute """ # access .data to coerce everything to numpy or dask arrays lazy_data = { path: { k: v._data for k, v in node.variables.items() if is_chunked_array(v._data) } for path, node in self.subtree_with_keys } flat_lazy_data = { (path, var_name): array for path, node in lazy_data.items() for var_name, array in node.items() } if flat_lazy_data: chunkmanager = get_chunked_array_type(*flat_lazy_data.values()) # evaluate all the chunked arrays simultaneously evaluated_data: tuple[np.ndarray[Any, Any], ...] = chunkmanager.compute( *flat_lazy_data.values(), **kwargs ) for (path, var_name), data in zip( flat_lazy_data, evaluated_data, strict=False ): self[path].variables[var_name].data = data # load everything else sequentially for node in self.subtree: for k, v in node.variables.items(): if k not in lazy_data: v.load() return self def compute(self, **kwargs) -> Self: """Manually trigger loading and/or computation of this datatree's data from disk or a remote source into memory and return a new datatree. Unlike load, the original datatree is left unaltered. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. However, this method can be necessary when working with many file objects on disk. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.compute``. Returns ------- object : DataTree New object with lazy data variables and coordinates as in-memory arrays. See Also -------- dask.compute """ new = self.copy(deep=False) return new.load(**kwargs) def _persist_inplace(self, **kwargs) -> Self: """Persist all chunked arrays in memory""" # access .data to coerce everything to numpy or dask arrays lazy_data = { path: { k: v._data for k, v in node.variables.items() if is_chunked_array(v._data) } for path, node in self.subtree_with_keys } flat_lazy_data = { (path, var_name): array for path, node in lazy_data.items() for var_name, array in node.items() } if flat_lazy_data: chunkmanager = get_chunked_array_type(*flat_lazy_data.values()) # evaluate all the dask arrays simultaneously evaluated_data = chunkmanager.persist(*flat_lazy_data.values(), **kwargs) for (path, var_name), data in zip( flat_lazy_data, evaluated_data, strict=False ): self[path].variables[var_name].data = data return self def persist(self, **kwargs) -> Self: """Trigger computation, keeping data as chunked arrays. This operation can be used to trigger computation on underlying dask arrays, similar to ``.compute()`` or ``.load()``. However this operation keeps the data as dask arrays. This is particularly useful when using the dask.distributed scheduler and you want to load a large amount of data into distributed memory. Like compute (but unlike load), the original dataset is left unaltered. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.persist``. Returns ------- object : DataTree New object with all dask-backed coordinates and data variables as persisted dask arrays. See Also -------- dask.persist """ new = self.copy(deep=False) return new._persist_inplace(**kwargs) @property def chunksizes(self) -> Mapping[str, Mapping[Hashable, tuple[int, ...]]]: """ Mapping from group paths to a mapping of chunksizes. If there's no chunked data in a group, the corresponding mapping of chunksizes will be empty. Cannot be modified directly, but can be modified by calling .chunk(). See Also -------- DataTree.chunk Dataset.chunksizes """ return Frozen( { node.path: get_chunksizes(node.variables.values()) for node in self.subtree } ) def chunk( self, chunks: T_ChunksFreq = {}, # noqa: B006 # {} even though it's technically unsafe, is being used intentionally here (#4667) name_prefix: str = "xarray-", token: str | None = None, lock: bool = False, inline_array: bool = False, chunked_array_type: str | ChunkManagerEntrypoint | None = None, from_array_kwargs=None, **chunks_kwargs: T_ChunkDimFreq, ) -> Self: """Coerce all arrays in all groups in this tree into dask arrays with the given chunks. Non-dask arrays in this tree will be converted to dask arrays. Dask arrays will be rechunked to the given chunk sizes. If neither chunks is not provided for one or more dimensions, chunk sizes along that dimension will not be updated; non-dask arrays will be converted into dask arrays with a single block. Along datetime-like dimensions, a :py:class:`groupers.TimeResampler` object is also accepted. Parameters ---------- chunks : int, tuple of int, "auto" or mapping of hashable to int or a TimeResampler, optional Chunk sizes along each dimension, e.g., ``5``, ``"auto"``, or ``{"x": 5, "y": 5}`` or ``{"x": 5, "time": TimeResampler(freq="YE")}``. name_prefix : str, default: "xarray-" Prefix for the name of any new dask arrays. token : str, optional Token uniquely identifying this datatree. lock : bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. inline_array: bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. chunked_array_type: str, optional Which chunked array type to coerce this datatree's arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntryPoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. **chunks_kwargs : {dim: chunks, ...}, optional The keyword arguments form of ``chunks``. One of chunks or chunks_kwargs must be provided Returns ------- chunked : xarray.DataTree See Also -------- Dataset.chunk Dataset.chunksizes xarray.unify_chunks dask.array.from_array """ # don't support deprecated ways of passing chunks if not isinstance(chunks, Mapping): raise TypeError( f"invalid type for chunks: {type(chunks)}. Only mappings are supported." ) combined_chunks = either_dict_or_kwargs(chunks, chunks_kwargs, "chunk") all_dims = self._get_all_dims() bad_dims = combined_chunks.keys() - all_dims if bad_dims: raise ValueError( f"chunks keys {tuple(bad_dims)} not found in data dimensions {tuple(all_dims)}" ) rechunked_groups = { path: node.dataset.chunk( { dim: size for dim, size in combined_chunks.items() if dim in node._node_dims }, name_prefix=name_prefix, token=token, lock=lock, inline_array=inline_array, chunked_array_type=chunked_array_type, from_array_kwargs=from_array_kwargs, ) for path, node in self.subtree_with_keys } return self.from_dict(rechunked_groups, name=self.name) xarray-2025.09.0/xarray/core/datatree_io.py000066400000000000000000000172461505620616400204460ustar00rootroot00000000000000from __future__ import annotations import io from collections.abc import Hashable, Mapping, MutableMapping from os import PathLike from typing import TYPE_CHECKING, Any, Literal, get_args from xarray.backends.api import ( _normalize_path, delayed_close_after_writes, dump_to_store, get_default_netcdf_write_engine, get_writable_netcdf_store, get_writable_zarr_store, ) from xarray.backends.common import ArrayWriter, BytesIOProxy from xarray.backends.locks import get_dask_scheduler from xarray.core.datatree import DataTree from xarray.core.types import NetcdfWriteModes, ZarrWriteModes T_DataTreeNetcdfEngine = Literal["netcdf4", "h5netcdf", "pydap"] T_DataTreeNetcdfTypes = Literal["NETCDF4"] if TYPE_CHECKING: from dask.delayed import Delayed from xarray.backends import ZarrStore from xarray.core.types import ZarrStoreLike def _datatree_to_netcdf( dt: DataTree, filepath: str | PathLike | io.IOBase | None = None, mode: NetcdfWriteModes = "w", encoding: Mapping[str, Any] | None = None, unlimited_dims: Mapping | None = None, format: T_DataTreeNetcdfTypes | None = None, engine: T_DataTreeNetcdfEngine | None = None, group: str | None = None, write_inherited_coords: bool = False, compute: bool = True, invalid_netcdf: bool = False, auto_complex: bool | None = None, ) -> None | memoryview | Delayed: """Implementation of `DataTree.to_netcdf`.""" if format not in [None, *get_args(T_DataTreeNetcdfTypes)]: raise ValueError("DataTree.to_netcdf only supports the NETCDF4 format") if engine not in [None, *get_args(T_DataTreeNetcdfEngine)]: raise ValueError( "DataTree.to_netcdf only supports the netcdf4 and h5netcdf engines" ) filepath = _normalize_path(filepath) if engine is None: to_fileobject_or_memoryview = not isinstance(filepath, str) engine = get_default_netcdf_write_engine( format="NETCDF4", # required for supporting groups to_fileobject_or_memoryview=to_fileobject_or_memoryview, ) # type: ignore[assignment] if group is not None: raise NotImplementedError( "specifying a root group for the tree has not been implemented" ) if encoding is None: encoding = {} # In the future, we may want to expand this check to insure all the provided encoding # options are valid. For now, this simply checks that all provided encoding keys are # groups in the datatree. if set(encoding) - set(dt.groups): raise ValueError( f"unexpected encoding group name(s) provided: {set(encoding) - set(dt.groups)}" ) if filepath is None: if not compute: raise NotImplementedError( "to_netcdf() with compute=False is not yet implemented when " "returning a memoryview" ) target = BytesIOProxy() else: target = filepath # type: ignore[assignment] if unlimited_dims is None: unlimited_dims = {} scheduler = get_dask_scheduler() have_chunks = any( v.chunks is not None for node in dt.subtree for v in node.variables.values() ) autoclose = have_chunks and scheduler in ["distributed", "multiprocessing"] root_store = get_writable_netcdf_store( target, engine, # type: ignore[arg-type] mode=mode, format=format, autoclose=autoclose, invalid_netcdf=invalid_netcdf, auto_complex=auto_complex, ) writer = ArrayWriter() # TODO: allow this work (setting up the file for writing array data) # to be parallelized with dask try: for node in dt.subtree: at_root = node is dt dataset = node.to_dataset(inherit=write_inherited_coords or at_root) node_store = ( root_store if at_root else root_store.get_child_store(node.path) ) dump_to_store( dataset, node_store, writer, encoding=encoding.get(node.path), unlimited_dims=unlimited_dims.get(node.path), ) if autoclose: root_store.close() writes = writer.sync(compute=compute) finally: if compute: root_store.close() else: root_store.sync() if filepath is None: assert isinstance(target, BytesIOProxy) # created in this function return target.getbuffer() if not compute: return delayed_close_after_writes(writes, root_store) return None def _datatree_to_zarr( dt: DataTree, store: ZarrStoreLike, mode: ZarrWriteModes = "w-", encoding: Mapping[str, Any] | None = None, synchronizer=None, group: str | None = None, write_inherited_coords: bool = False, *, chunk_store: MutableMapping | str | PathLike | None = None, compute: bool = True, consolidated: bool | None = None, append_dim: Hashable | None = None, region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None, safe_chunks: bool = True, align_chunks: bool = False, storage_options: dict[str, str] | None = None, zarr_version: int | None = None, zarr_format: int | None = None, write_empty_chunks: bool | None = None, chunkmanager_store_kwargs: dict[str, Any] | None = None, ) -> ZarrStore | Delayed: """Implementation of `DataTree.to_zarr`.""" if group is not None: raise NotImplementedError( "specifying a root group for the tree has not been implemented" ) if append_dim is not None: raise NotImplementedError( "specifying ``append_dim`` with ``DataTree.to_zarr`` has not been implemented" ) if encoding is None: encoding = {} # In the future, we may want to expand this check to insure all the provided encoding # options are valid. For now, this simply checks that all provided encoding keys are # groups in the datatree. if set(encoding) - set(dt.groups): raise ValueError( f"unexpected encoding group name(s) provided: {set(encoding) - set(dt.groups)}" ) root_store = get_writable_zarr_store( store, chunk_store=chunk_store, mode=mode, synchronizer=synchronizer, group=group, consolidated=consolidated, append_dim=append_dim, region=region, safe_chunks=safe_chunks, align_chunks=align_chunks, storage_options=storage_options, zarr_version=zarr_version, zarr_format=zarr_format, write_empty_chunks=write_empty_chunks, ) writer = ArrayWriter() try: for rel_path, node in dt.subtree_with_keys: at_root = node is dt dataset = node.to_dataset(inherit=write_inherited_coords or at_root) # Use a relative path for group, because absolute paths are broken # with consolidated metadata in zarr 3.1.2 and earlier: # https://github.com/zarr-developers/zarr-python/pull/3428 node_store = root_store if at_root else root_store.get_child_store(rel_path) dataset = node_store._validate_and_autodetect_region(dataset) node_store._validate_encoding(encoding) dump_to_store( dataset, node_store, writer, encoding=encoding.get(node.path), ) writes = writer.sync( compute=compute, chunkmanager_store_kwargs=chunkmanager_store_kwargs ) finally: if compute: root_store.close() if not compute: return delayed_close_after_writes(writes, root_store) return root_store xarray-2025.09.0/xarray/core/datatree_mapping.py000066400000000000000000000175031505620616400214660ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Callable, Mapping from typing import TYPE_CHECKING, Any, cast, overload from xarray.core.dataset import Dataset from xarray.core.treenode import group_subtrees from xarray.core.utils import result_name if TYPE_CHECKING: from xarray.core.datatree import DataTree @overload def map_over_datasets( func: Callable[..., Dataset | None], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> DataTree: ... # add an explicit overload for the most common case of two return values # (python typing does not have a way to match tuple lengths in general) @overload def map_over_datasets( func: Callable[..., tuple[Dataset | None, Dataset | None]], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> tuple[DataTree, DataTree]: ... @overload def map_over_datasets( func: Callable[..., tuple[Dataset | None, ...]], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> tuple[DataTree, ...]: ... def map_over_datasets( func: Callable[..., Dataset | None | tuple[Dataset | None, ...]], *args: Any, kwargs: Mapping[str, Any] | None = None, ) -> DataTree | tuple[DataTree, ...]: """ Applies a function to every dataset in one or more DataTree objects with the same structure (ie.., that are isomorphic), returning new trees which store the results. The function will be applied to any dataset stored in any of the nodes in the trees. The returned trees will have the same structure as the supplied trees. ``func`` needs to return a Dataset, tuple of Dataset objects or None in order to be able to rebuild the subtrees after mapping, as each result will be assigned to its respective node of a new tree via `DataTree.from_dict`. Any returned value that is one of these types will be stacked into a separate tree before returning all of them. ``map_over_datasets`` is essentially syntactic sugar for the combination of ``group_subtrees`` and ``DataTree.from_dict``. For example, in the case of a two argument function that return one result, it is equivalent to:: results = {} for path, (left, right) in group_subtrees(left_tree, right_tree): results[path] = func(left.dataset, right.dataset) return DataTree.from_dict(results) Parameters ---------- func : callable Function to apply to datasets with signature: `func(*args: Dataset, **kwargs) -> Union[Dataset, tuple[Dataset, ...]]`. (i.e. func must accept at least one Dataset and return at least one Dataset.) *args : tuple, optional Positional arguments passed on to `func`. Any DataTree arguments will be converted to Dataset objects via `.dataset`. kwargs : dict, optional Optional keyword arguments passed directly to ``func``. Returns ------- Result of applying `func` to each node in the provided trees, packed back into DataTree objects via `DataTree.from_dict`. See also -------- DataTree.map_over_datasets group_subtrees DataTree.from_dict """ # TODO examples in the docstring # TODO inspect function to work out immediately if the wrong number of arguments were passed for it? from xarray.core.datatree import DataTree if kwargs is None: kwargs = {} # Walk all trees simultaneously, applying func to all nodes that lie in same position in different trees # We don't know which arguments are DataTrees so we zip all arguments together as iterables # Store tuples of results in a dict because we don't yet know how many trees we need to rebuild to return out_data_objects: dict[str, Dataset | tuple[Dataset | None, ...] | None] = {} tree_args = [arg for arg in args if isinstance(arg, DataTree)] name = result_name(tree_args) for path, node_tree_args in group_subtrees(*tree_args): node_dataset_args = [arg.dataset for arg in node_tree_args] for i, arg in enumerate(args): if not isinstance(arg, DataTree): node_dataset_args.insert(i, arg) func_with_error_context = _handle_errors_with_path_context(path)(func) results = func_with_error_context(*node_dataset_args, **kwargs) out_data_objects[path] = results num_return_values = _check_all_return_values(out_data_objects) if num_return_values is None: # one return value out_data = cast(Mapping[str, Dataset | None], out_data_objects) return DataTree.from_dict(out_data, name=name) # multiple return values out_data_tuples = cast(Mapping[str, tuple[Dataset | None, ...]], out_data_objects) output_dicts: list[dict[str, Dataset | None]] = [ {} for _ in range(num_return_values) ] for path, outputs in out_data_tuples.items(): for output_dict, output in zip(output_dicts, outputs, strict=False): output_dict[path] = output return tuple( DataTree.from_dict(output_dict, name=name) for output_dict in output_dicts ) def _handle_errors_with_path_context(path: str): """Wraps given function so that if it fails it also raises path to node on which it failed.""" def decorator(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: # Add the context information to the error message add_note( e, f"Raised whilst mapping function over node with path {path!r}" ) raise return wrapper return decorator def add_note(err: BaseException, msg: str) -> None: err.add_note(msg) def _check_single_set_return_values(path_to_node: str, obj: Any) -> int | None: """Check types returned from single evaluation of func, and return number of return values received from func.""" if isinstance(obj, Dataset | None): return None # no need to pack results if not isinstance(obj, tuple) or not all( isinstance(r, Dataset | None) for r in obj ): raise TypeError( f"the result of calling func on the node at position '{path_to_node}' is" f" not a Dataset or None or a tuple of such types:\n{obj!r}" ) return len(obj) def _check_all_return_values(returned_objects) -> int | None: """Walk through all values returned by mapping func over subtrees, raising on any invalid or inconsistent types.""" result_data_objects = list(returned_objects.items()) first_path, result = result_data_objects[0] return_values = _check_single_set_return_values(first_path, result) for path_to_node, obj in result_data_objects[1:]: cur_return_values = _check_single_set_return_values(path_to_node, obj) if return_values != cur_return_values: if return_values is None: raise TypeError( f"Calling func on the nodes at position {path_to_node} returns " f"a tuple of {cur_return_values} datasets, whereas calling func on the " f"nodes at position {first_path} instead returns a single dataset." ) elif cur_return_values is None: raise TypeError( f"Calling func on the nodes at position {path_to_node} returns " f"a single dataset, whereas calling func on the nodes at position " f"{first_path} instead returns a tuple of {return_values} datasets." ) else: raise TypeError( f"Calling func on the nodes at position {path_to_node} returns " f"a tuple of {cur_return_values} datasets, whereas calling func on " f"the nodes at position {first_path} instead returns a tuple of " f"{return_values} datasets." ) return return_values xarray-2025.09.0/xarray/core/datatree_render.py000066400000000000000000000224001505620616400213020ustar00rootroot00000000000000""" String Tree Rendering. Copied from anytree. Minor changes to `RenderDataTree` include accessing `children.values()`, and type hints. """ from __future__ import annotations from collections.abc import Iterable, Iterator from math import ceil from typing import TYPE_CHECKING, NamedTuple if TYPE_CHECKING: from xarray.core.datatree import DataTree class Row(NamedTuple): pre: str fill: str node: DataTree | str class AbstractStyle: def __init__(self, vertical: str, cont: str, end: str): """ Tree Render Style. Args: vertical: Sign for vertical line. cont: Chars for a continued branch. end: Chars for the last branch. """ super().__init__() self.vertical = vertical self.cont = cont self.end = end assert len(cont) == len(vertical) == len(end), ( f"'{vertical}', '{cont}' and '{end}' need to have equal length" ) @property def empty(self) -> str: """Empty string as placeholder.""" return " " * len(self.end) def __repr__(self) -> str: return f"{self.__class__.__name__}()" class ContStyle(AbstractStyle): def __init__(self): """ Continued style, without gaps. >>> from xarray.core.datatree import DataTree >>> from xarray.core.datatree_render import RenderDataTree >>> root = DataTree.from_dict( ... { ... "/": None, ... "/sub0": None, ... "/sub0/sub0B": None, ... "/sub0/sub0A": None, ... "/sub1": None, ... }, ... name="root", ... ) >>> print(RenderDataTree(root)) Group: / β”œβ”€β”€ Group: /sub0 β”‚ β”œβ”€β”€ Group: /sub0/sub0B β”‚ └── Group: /sub0/sub0A └── Group: /sub1 """ super().__init__("\u2502 ", "\u251c\u2500\u2500 ", "\u2514\u2500\u2500 ") class RenderDataTree: def __init__( self, node: DataTree, style=None, childiter: type = list, maxlevel: int | None = None, maxchildren: int | None = None, ): """ Render tree starting at `node`. Keyword Args: style (AbstractStyle): Render Style. childiter: Child iterator. Note, due to the use of node.children.values(), Iterables that change the order of children cannot be used (e.g., `reversed`). maxlevel: Limit rendering to this depth. maxchildren: Limit number of children at each node. :any:`RenderDataTree` is an iterator, returning a tuple with 3 items: `pre` tree prefix. `fill` filling for multiline entries. `node` :any:`NodeMixin` object. It is up to the user to assemble these parts to a whole. Examples -------- >>> from xarray import Dataset >>> from xarray.core.datatree import DataTree >>> from xarray.core.datatree_render import RenderDataTree >>> root = DataTree.from_dict( ... { ... "/": Dataset({"a": 0, "b": 1}), ... "/sub0": Dataset({"c": 2, "d": 3}), ... "/sub0/sub0B": Dataset({"e": 4}), ... "/sub0/sub0A": Dataset({"f": 5, "g": 6}), ... "/sub1": Dataset({"h": 7}), ... }, ... name="root", ... ) # Simple one line: >>> for pre, _, node in RenderDataTree(root): ... print(f"{pre}{node.name}") ... root β”œβ”€β”€ sub0 β”‚ β”œβ”€β”€ sub0B β”‚ └── sub0A └── sub1 # Multiline: >>> for pre, fill, node in RenderDataTree(root): ... print(f"{pre}{node.name}") ... for variable in node.variables: ... print(f"{fill}{variable}") ... root a b β”œβ”€β”€ sub0 β”‚ c β”‚ d β”‚ β”œβ”€β”€ sub0B β”‚ β”‚ e β”‚ └── sub0A β”‚ f β”‚ g └── sub1 h :any:`by_attr` simplifies attribute rendering and supports multiline: >>> print(RenderDataTree(root).by_attr()) root β”œβ”€β”€ sub0 β”‚ β”œβ”€β”€ sub0B β”‚ └── sub0A └── sub1 # `maxlevel` limits the depth of the tree: >>> print(RenderDataTree(root, maxlevel=2).by_attr("name")) root β”œβ”€β”€ sub0 └── sub1 # `maxchildren` limits the number of children per node >>> print(RenderDataTree(root, maxchildren=1).by_attr("name")) root β”œβ”€β”€ sub0 β”‚ β”œβ”€β”€ sub0B β”‚ ... ... """ if style is None: style = ContStyle() if not isinstance(style, AbstractStyle): style = style() self.node = node self.style = style self.childiter = childiter self.maxlevel = maxlevel self.maxchildren = maxchildren def __iter__(self) -> Iterator[Row]: return self.__next(self.node, tuple()) def __next( self, node: DataTree, continues: tuple[bool, ...], level: int = 0, ) -> Iterator[Row]: yield RenderDataTree.__item(node, continues, self.style) children = node.children.values() level += 1 if children and (self.maxlevel is None or level < self.maxlevel): nchildren = len(children) children = self.childiter(children) for i, (child, is_last) in enumerate(_is_last(children)): if ( self.maxchildren is None or i < ceil(self.maxchildren / 2) or i >= ceil(nchildren - self.maxchildren / 2) ): yield from self.__next( child, continues + (not is_last,), level=level, ) if ( self.maxchildren is not None and nchildren > self.maxchildren and i == ceil(self.maxchildren / 2) ): yield RenderDataTree.__item("...", continues, self.style) @staticmethod def __item( node: DataTree | str, continues: tuple[bool, ...], style: AbstractStyle ) -> Row: if not continues: return Row("", "", node) else: items = [style.vertical if cont else style.empty for cont in continues] indent = "".join(items[:-1]) branch = style.cont if continues[-1] else style.end pre = indent + branch fill = "".join(items) return Row(pre, fill, node) def __str__(self) -> str: return str(self.node) def __repr__(self) -> str: classname = self.__class__.__name__ args = [ repr(self.node), f"style={self.style!r}", f"childiter={self.childiter!r}", ] return f"{classname}({', '.join(args)})" def by_attr(self, attrname: str = "name") -> str: """ Return rendered tree with node attribute `attrname`. Examples -------- >>> from xarray import Dataset >>> from xarray.core.datatree import DataTree >>> from xarray.core.datatree_render import RenderDataTree >>> root = DataTree.from_dict( ... { ... "/sub0/sub0B": Dataset({"foo": 4, "bar": 109}), ... "/sub0/sub0A": None, ... "/sub1/sub1A": None, ... "/sub1/sub1B": Dataset({"bar": 8}), ... "/sub1/sub1C/sub1Ca": None, ... }, ... name="root", ... ) >>> print(RenderDataTree(root).by_attr("name")) root β”œβ”€β”€ sub0 β”‚ β”œβ”€β”€ sub0B β”‚ └── sub0A └── sub1 β”œβ”€β”€ sub1A β”œβ”€β”€ sub1B └── sub1C └── sub1Ca """ def get() -> Iterator[str]: for pre, fill, node in self: if isinstance(node, str): yield f"{fill}{node}" continue attr = ( attrname(node) if callable(attrname) else getattr(node, attrname, "") ) if isinstance(attr, list | tuple): lines = attr else: lines = str(attr).split("\n") yield f"{pre}{lines[0]}" for line in lines[1:]: yield f"{fill}{line}" return "\n".join(get()) def _is_last(iterable: Iterable) -> Iterator[tuple[DataTree, bool]]: iter_ = iter(iterable) try: nextitem = next(iter_) except StopIteration: pass else: item = nextitem while True: try: nextitem = next(iter_) yield item, False except StopIteration: yield nextitem, True break item = nextitem xarray-2025.09.0/xarray/core/dtypes.py000066400000000000000000000201761505620616400174720ustar00rootroot00000000000000from __future__ import annotations import functools from typing import Any import numpy as np import pandas as pd from xarray.compat import array_api_compat, npcompat from xarray.compat.npcompat import HAS_STRING_DTYPE from xarray.core import utils # Use as a sentinel value to indicate a dtype appropriate NA value. NA = utils.ReprObject("") @functools.total_ordering class AlwaysGreaterThan: def __gt__(self, other): return True def __eq__(self, other): return isinstance(other, type(self)) @functools.total_ordering class AlwaysLessThan: def __lt__(self, other): return True def __eq__(self, other): return isinstance(other, type(self)) # Equivalence to np.inf (-np.inf) for object-type INF = AlwaysGreaterThan() NINF = AlwaysLessThan() # Pairs of types that, if both found, should be promoted to object dtype # instead of following NumPy's own type-promotion rules. These type promotion # rules match pandas instead. For reference, see the NumPy type hierarchy: # https://numpy.org/doc/stable/reference/arrays.scalars.html PROMOTE_TO_OBJECT: tuple[tuple[type[np.generic], type[np.generic]], ...] = ( (np.number, np.character), # numpy promotes to character (np.bool_, np.character), # numpy promotes to character (np.bytes_, np.str_), # numpy promotes to unicode ) def maybe_promote(dtype: np.dtype) -> tuple[np.dtype, Any]: """Simpler equivalent of pandas.core.common._maybe_promote Parameters ---------- dtype : np.dtype Returns ------- dtype : Promoted dtype that can hold missing values. fill_value : Valid missing value for the promoted dtype. """ # N.B. these casting rules should match pandas dtype_: np.typing.DTypeLike fill_value: Any if HAS_STRING_DTYPE and np.issubdtype(dtype, np.dtypes.StringDType()): # for now, we always promote string dtypes to object for consistency with existing behavior # TODO: refactor this once we have a better way to handle numpy vlen-string dtypes dtype_ = object fill_value = np.nan elif isdtype(dtype, "real floating"): dtype_ = dtype fill_value = np.nan elif np.issubdtype(dtype, np.timedelta64): # See https://github.com/numpy/numpy/issues/10685 # np.timedelta64 is a subclass of np.integer # Check np.timedelta64 before np.integer fill_value = np.timedelta64("NaT") dtype_ = dtype elif isdtype(dtype, "integral"): dtype_ = np.float32 if dtype.itemsize <= 2 else np.float64 fill_value = np.nan elif isdtype(dtype, "complex floating"): dtype_ = dtype fill_value = np.nan + np.nan * 1j elif np.issubdtype(dtype, np.datetime64): dtype_ = dtype fill_value = np.datetime64("NaT") else: dtype_ = object fill_value = np.nan dtype_out = np.dtype(dtype_) fill_value = dtype_out.type(fill_value) return dtype_out, fill_value NAT_TYPES = {np.datetime64("NaT").dtype, np.timedelta64("NaT").dtype} def get_fill_value(dtype): """Return an appropriate fill value for this dtype. Parameters ---------- dtype : np.dtype Returns ------- fill_value : Missing value corresponding to this dtype. """ _, fill_value = maybe_promote(dtype) return fill_value def get_pos_infinity(dtype, max_for_int=False): """Return an appropriate positive infinity for this dtype. Parameters ---------- dtype : np.dtype max_for_int : bool Return np.iinfo(dtype).max instead of np.inf Returns ------- fill_value : positive infinity value corresponding to this dtype. """ if isdtype(dtype, "real floating"): return np.inf if isdtype(dtype, "integral"): if max_for_int: return np.iinfo(dtype).max else: return np.inf if isdtype(dtype, "complex floating"): return np.inf + 1j * np.inf if isdtype(dtype, "bool"): return True return np.array(INF, dtype=object) def get_neg_infinity(dtype, min_for_int=False): """Return an appropriate positive infinity for this dtype. Parameters ---------- dtype : np.dtype min_for_int : bool Return np.iinfo(dtype).min instead of -np.inf Returns ------- fill_value : positive infinity value corresponding to this dtype. """ if isdtype(dtype, "real floating"): return -np.inf if isdtype(dtype, "integral"): if min_for_int: return np.iinfo(dtype).min else: return -np.inf if isdtype(dtype, "complex floating"): return -np.inf - 1j * np.inf if isdtype(dtype, "bool"): return False return np.array(NINF, dtype=object) def is_datetime_like(dtype) -> bool: """Check if a dtype is a subclass of the numpy datetime types""" return _is_numpy_subdtype(dtype, (np.datetime64, np.timedelta64)) def is_object(dtype) -> bool: """Check if a dtype is object""" return _is_numpy_subdtype(dtype, object) def is_string(dtype) -> bool: """Check if a dtype is a string dtype""" return _is_numpy_subdtype(dtype, (np.str_, np.character)) def _is_numpy_subdtype(dtype, kind) -> bool: if not isinstance(dtype, np.dtype): return False kinds = kind if isinstance(kind, tuple) else (kind,) return any(np.issubdtype(dtype, kind) for kind in kinds) def isdtype(dtype, kind: str | tuple[str, ...], xp=None) -> bool: """Compatibility wrapper for isdtype() from the array API standard. Unlike xp.isdtype(), kind must be a string. """ # TODO(shoyer): remove this wrapper when Xarray requires # numpy>=2 and pandas extensions arrays are implemented in # Xarray via the array API if not isinstance(kind, str) and not ( isinstance(kind, tuple) and all(isinstance(k, str) for k in kind) # type: ignore[redundant-expr] ): raise TypeError(f"kind must be a string or a tuple of strings: {kind!r}") if isinstance(dtype, np.dtype): return npcompat.isdtype(dtype, kind) elif pd.api.types.is_extension_array_dtype(dtype): # noqa: TID251 # we never want to match pandas extension array dtypes return False else: if xp is None: xp = np return xp.isdtype(dtype, kind) def preprocess_types(t): if isinstance(t, str | bytes): return type(t) elif isinstance(dtype := getattr(t, "dtype", t), np.dtype) and ( np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.bytes_) ): # drop the length from numpy's fixed-width string dtypes, it is better to # recalculate # TODO(keewis): remove once the minimum version of `numpy.result_type` does this # for us return dtype.type else: return t def result_type( *arrays_and_dtypes: np.typing.ArrayLike | np.typing.DTypeLike, xp=None, ) -> np.dtype: """Like np.result_type, but with type promotion rules matching pandas. Examples of changed behavior: number + string -> object (not string) bytes + unicode -> object (not unicode) Parameters ---------- *arrays_and_dtypes : list of arrays and dtypes The dtype is extracted from both numpy and dask arrays. Returns ------- numpy.dtype for the result. """ # TODO (keewis): replace `array_api_compat.result_type` with `xp.result_type` once we # can require a version of the Array API that supports passing scalars to it. from xarray.core.duck_array_ops import get_array_namespace if xp is None: xp = get_array_namespace(arrays_and_dtypes) types = { array_api_compat.result_type(preprocess_types(t), xp=xp) for t in arrays_and_dtypes } if any(isinstance(t, np.dtype) for t in types): # only check if there's numpy dtypes – the array API does not # define the types we're checking for for left, right in PROMOTE_TO_OBJECT: if any(np.issubdtype(t, left) for t in types) and any( np.issubdtype(t, right) for t in types ): return np.dtype(object) return array_api_compat.result_type( *map(preprocess_types, arrays_and_dtypes), xp=xp ) xarray-2025.09.0/xarray/core/duck_array_ops.py000066400000000000000000000737041505620616400211740ustar00rootroot00000000000000"""Compatibility module defining operations on duck numpy-arrays. Currently, this means Dask or NumPy arrays. None of these functions should accept or return xarray objects. """ from __future__ import annotations import contextlib import datetime import inspect import warnings from collections.abc import Callable from functools import partial from importlib import import_module from typing import Any import numpy as np import pandas as pd from numpy import ( isclose, isnat, take, unravel_index, # noqa: F401 ) from xarray.compat import dask_array_compat, dask_array_ops from xarray.compat.array_api_compat import get_array_namespace from xarray.core import dtypes, nputils from xarray.core.extension_array import PandasExtensionArray from xarray.core.options import OPTIONS from xarray.core.utils import is_duck_array, is_duck_dask_array, module_available from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import array_type, is_chunked_array # remove once numpy 2.0 is the oldest supported version if module_available("numpy", minversion="2.0.0.dev0"): from numpy.lib.array_utils import ( # type: ignore[import-not-found,unused-ignore] normalize_axis_index, ) else: from numpy.core.multiarray import ( # type: ignore[attr-defined,no-redef,unused-ignore] normalize_axis_index, ) dask_available = module_available("dask") def einsum(*args, **kwargs): if OPTIONS["use_opt_einsum"] and module_available("opt_einsum"): import opt_einsum return opt_einsum.contract(*args, **kwargs) else: xp = get_array_namespace(*args) return xp.einsum(*args, **kwargs) def tensordot(*args, **kwargs): xp = get_array_namespace(*args) return xp.tensordot(*args, **kwargs) def cross(*args, **kwargs): xp = get_array_namespace(*args) return xp.cross(*args, **kwargs) def gradient(f, *varargs, axis=None, edge_order=1): xp = get_array_namespace(f) return xp.gradient(f, *varargs, axis=axis, edge_order=edge_order) def _dask_or_eager_func( name, eager_module=np, dask_module="dask.array", dask_only_kwargs=tuple(), numpy_only_kwargs=tuple(), ): """Create a function that dispatches to dask for dask array inputs.""" def f(*args, **kwargs): if dask_available and any(is_duck_dask_array(a) for a in args): mod = ( import_module(dask_module) if isinstance(dask_module, str) else dask_module ) wrapped = getattr(mod, name) for kwarg in numpy_only_kwargs: kwargs.pop(kwarg, None) else: wrapped = getattr(eager_module, name) for kwarg in dask_only_kwargs: kwargs.pop(kwarg, None) return wrapped(*args, **kwargs) return f def fail_on_dask_array_input(values, msg=None, func_name=None): if is_duck_dask_array(values): if msg is None: msg = "%r is not yet a valid method on dask arrays" if func_name is None: func_name = inspect.stack()[1][3] raise NotImplementedError(msg % func_name) # Requires special-casing because pandas won't automatically dispatch to dask.isnull via NEP-18 pandas_isnull = _dask_or_eager_func("isnull", eager_module=pd, dask_module="dask.array") # TODO replace with simply np.ma.masked_invalid once numpy/numpy#16022 is fixed # TODO: replacing breaks iris + dask tests masked_invalid = _dask_or_eager_func( "masked_invalid", eager_module=np.ma, dask_module="dask.array.ma" ) def sliding_window_view(array, window_shape, axis=None, **kwargs): # TODO: some libraries (e.g. jax) don't have this, implement an alternative? xp = get_array_namespace(array) # sliding_window_view will not dispatch arbitrary kwargs (automatic_rechunk), # so we need to hand-code this. func = _dask_or_eager_func( "sliding_window_view", eager_module=xp.lib.stride_tricks, dask_module=dask_array_compat, dask_only_kwargs=("automatic_rechunk",), numpy_only_kwargs=("subok", "writeable"), ) return func(array, window_shape, axis=axis, **kwargs) def round(array): xp = get_array_namespace(array) return xp.round(array) around: Callable = round def isna(data: Any) -> bool: """Checks if data is literally np.nan or pd.NA. Parameters ---------- data Any python object Returns ------- Whether or not the data is np.nan or pd.NA """ return data is pd.NA or data is np.nan # noqa: PLW0177 def isnull(data): data = asarray(data) xp = get_array_namespace(data) scalar_type = data.dtype if dtypes.is_datetime_like(scalar_type): # datetime types use NaT for null # note: must check timedelta64 before integers, because currently # timedelta64 inherits from np.integer return isnat(data) elif dtypes.isdtype(scalar_type, ("real floating", "complex floating"), xp=xp): # float types use NaN for null xp = get_array_namespace(data) return xp.isnan(data) elif dtypes.isdtype(scalar_type, ("bool", "integral"), xp=xp) or ( isinstance(scalar_type, np.dtype) and ( np.issubdtype(scalar_type, np.character) or np.issubdtype(scalar_type, np.void) ) ): # these types cannot represent missing values # bool_ is for backwards compat with numpy<2, and cupy dtype = xp.bool_ if hasattr(xp, "bool_") else xp.bool return full_like(data, dtype=dtype, fill_value=False) # at this point, array should have dtype=object elif isinstance(data, np.ndarray) or pd.api.types.is_extension_array_dtype(data): # noqa: TID251 return pandas_isnull(data) else: # Not reachable yet, but intended for use with other duck array # types. For full consistency with pandas, we should accept None as # a null value as well as NaN, but it isn't clear how to do this # with duck typing. return data != data # noqa: PLR0124 def notnull(data): return ~isnull(data) def trapz(y, x, axis): if axis < 0: axis = y.ndim + axis x_sl1 = (slice(1, None),) + (None,) * (y.ndim - axis - 1) x_sl2 = (slice(None, -1),) + (None,) * (y.ndim - axis - 1) slice1 = (slice(None),) * axis + (slice(1, None),) slice2 = (slice(None),) * axis + (slice(None, -1),) dx = x[x_sl1] - x[x_sl2] integrand = dx * 0.5 * (y[tuple(slice1)] + y[tuple(slice2)]) return sum(integrand, axis=axis, skipna=False) def cumulative_trapezoid(y, x, axis): if axis < 0: axis = y.ndim + axis x_sl1 = (slice(1, None),) + (None,) * (y.ndim - axis - 1) x_sl2 = (slice(None, -1),) + (None,) * (y.ndim - axis - 1) slice1 = (slice(None),) * axis + (slice(1, None),) slice2 = (slice(None),) * axis + (slice(None, -1),) dx = x[x_sl1] - x[x_sl2] integrand = dx * 0.5 * (y[tuple(slice1)] + y[tuple(slice2)]) # Pad so that 'axis' has same length in result as it did in y pads = [(1, 0) if i == axis else (0, 0) for i in range(y.ndim)] xp = get_array_namespace(y, x) integrand = xp.pad(integrand, pads, mode="constant", constant_values=0.0) return cumsum(integrand, axis=axis, skipna=False) def full_like(a, fill_value, **kwargs): xp = get_array_namespace(a) return xp.full_like(a, fill_value, **kwargs) def empty_like(a, **kwargs): xp = get_array_namespace(a) return xp.empty_like(a, **kwargs) def astype(data, dtype, *, xp=None, **kwargs): if not hasattr(data, "__array_namespace__") and xp is None: return data.astype(dtype, **kwargs) if xp is None: xp = get_array_namespace(data) if xp == np: # numpy currently doesn't have a astype: return data.astype(dtype, **kwargs) return xp.astype(data, dtype, **kwargs) def asarray(data, xp=np, dtype=None): converted = data if is_duck_array(data) else xp.asarray(data) if dtype is None or converted.dtype == dtype: return converted if xp is np or not hasattr(xp, "astype"): return converted.astype(dtype) else: return xp.astype(converted, dtype) def as_shared_dtype(scalars_or_arrays, xp=None): """Cast arrays to a shared dtype using xarray's type promotion rules.""" extension_array_types = [ x.dtype for x in scalars_or_arrays if pd.api.types.is_extension_array_dtype(x) # noqa: TID251 ] if len(extension_array_types) >= 1: non_nans = [x for x in scalars_or_arrays if not isna(x)] if len(extension_array_types) == len(non_nans) and all( isinstance(x, type(extension_array_types[0])) for x in extension_array_types ): return [ x if not isna(x) else PandasExtensionArray( type(non_nans[0].array)._from_sequence([x], dtype=non_nans[0].dtype) ) for x in scalars_or_arrays ] raise ValueError( f"Cannot cast values to shared type, found values: {scalars_or_arrays}" ) # Avoid calling array_type("cupy") repeatidely in the any check array_type_cupy = array_type("cupy") if any(isinstance(x, array_type_cupy) for x in scalars_or_arrays): import cupy as cp xp = cp elif xp is None: xp = get_array_namespace(scalars_or_arrays) # Pass arrays directly instead of dtypes to result_type so scalars # get handled properly. # Note that result_type() safely gets the dtype from dask arrays without # evaluating them. dtype = dtypes.result_type(*scalars_or_arrays, xp=xp) return [asarray(x, dtype=dtype, xp=xp) for x in scalars_or_arrays] def broadcast_to(array, shape): xp = get_array_namespace(array) return xp.broadcast_to(array, shape) def lazy_array_equiv(arr1, arr2): """Like array_equal, but doesn't actually compare values. Returns True when arr1, arr2 identical or their dask tokens are equal. Returns False when shapes are not equal. Returns None when equality cannot determined: one or both of arr1, arr2 are numpy arrays; or their dask tokens are not equal """ if arr1 is arr2: return True arr1 = asarray(arr1) arr2 = asarray(arr2) if arr1.shape != arr2.shape: return False if dask_available and is_duck_dask_array(arr1) and is_duck_dask_array(arr2): from dask.base import tokenize # GH3068, GH4221 if tokenize(arr1) == tokenize(arr2): return True else: return None return None def allclose_or_equiv(arr1, arr2, rtol=1e-5, atol=1e-8): """Like np.allclose, but also allows values to be NaN in both arrays""" arr1 = asarray(arr1) arr2 = asarray(arr2) lazy_equiv = lazy_array_equiv(arr1, arr2) if lazy_equiv is None: with warnings.catch_warnings(): warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered") return bool( array_all(isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=True)) ) else: return lazy_equiv def array_equiv(arr1, arr2): """Like np.array_equal, but also allows values to be NaN in both arrays""" arr1 = asarray(arr1) arr2 = asarray(arr2) lazy_equiv = lazy_array_equiv(arr1, arr2) if lazy_equiv is None: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "In the future, 'NAT == x'") flag_array = (arr1 == arr2) | (isnull(arr1) & isnull(arr2)) return bool(array_all(flag_array)) else: return lazy_equiv def array_notnull_equiv(arr1, arr2): """Like np.array_equal, but also allows values to be NaN in either or both arrays """ arr1 = asarray(arr1) arr2 = asarray(arr2) lazy_equiv = lazy_array_equiv(arr1, arr2) if lazy_equiv is None: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "In the future, 'NAT == x'") flag_array = (arr1 == arr2) | isnull(arr1) | isnull(arr2) return bool(array_all(flag_array)) else: return lazy_equiv def count(data, axis=None): """Count the number of non-NA in this array along the given axis or axes""" xp = get_array_namespace(data) return xp.sum(xp.logical_not(isnull(data)), axis=axis) def sum_where(data, axis=None, dtype=None, where=None): xp = get_array_namespace(data) if where is not None: a = where_method(xp.zeros_like(data), where, data) else: a = data result = xp.sum(a, axis=axis, dtype=dtype) return result def where(condition, x, y): """Three argument where() with better dtype promotion rules.""" xp = get_array_namespace(condition, x, y) dtype = xp.bool_ if hasattr(xp, "bool_") else xp.bool if not is_duck_array(condition): condition = asarray(condition, dtype=dtype, xp=xp) else: condition = astype(condition, dtype=dtype, xp=xp) return xp.where(condition, *as_shared_dtype([x, y], xp=xp)) def where_method(data, cond, other=dtypes.NA): if other is dtypes.NA: other = dtypes.get_fill_value(data.dtype) return where(cond, data, other) def fillna(data, other): # we need to pass data first so pint has a chance of returning the # correct unit # TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed return where(notnull(data), data, other) def logical_not(data): xp = get_array_namespace(data) return xp.logical_not(data) def clip(data, min=None, max=None): xp = get_array_namespace(data) return xp.clip(data, min, max) def concatenate(arrays, axis=0): """concatenate() with better dtype promotion rules.""" # TODO: `concat` is the xp compliant name, but fallback to concatenate for # older numpy and for cupy xp = get_array_namespace(*arrays) if hasattr(xp, "concat"): return xp.concat(as_shared_dtype(arrays, xp=xp), axis=axis) else: return xp.concatenate(as_shared_dtype(arrays, xp=xp), axis=axis) def stack(arrays, axis=0): """stack() with better dtype promotion rules.""" xp = get_array_namespace(arrays[0]) return xp.stack(as_shared_dtype(arrays, xp=xp), axis=axis) def reshape(array, shape): xp = get_array_namespace(array) return xp.reshape(array, shape) def ravel(array): return reshape(array, (-1,)) def transpose(array, axes=None): xp = get_array_namespace(array) return xp.transpose(array, axes) def moveaxis(array, source, destination): xp = get_array_namespace(array) return xp.moveaxis(array, source, destination) def pad(array, pad_width, **kwargs): xp = get_array_namespace(array) return xp.pad(array, pad_width, **kwargs) def quantile(array, q, axis=None, **kwargs): xp = get_array_namespace(array) return xp.quantile(array, q, axis=axis, **kwargs) @contextlib.contextmanager def _ignore_warnings_if(condition): if condition: with warnings.catch_warnings(): warnings.simplefilter("ignore") yield else: yield def _create_nan_agg_method(name, coerce_strings=False, invariant_0d=False): def f(values, axis=None, skipna=None, **kwargs): if kwargs.pop("out", None) is not None: raise TypeError(f"`out` is not valid for {name}") # The data is invariant in the case of 0d data, so do not # change the data (and dtype) # See https://github.com/pydata/xarray/issues/4885 if invariant_0d and axis == (): return values xp = get_array_namespace(values) values = asarray(values, xp=xp) if coerce_strings and dtypes.is_string(values.dtype): values = astype(values, object) func = None if skipna or ( skipna is None and ( dtypes.isdtype( values.dtype, ("complex floating", "real floating"), xp=xp ) or dtypes.is_object(values.dtype) ) ): from xarray.computation import nanops nanname = "nan" + name func = getattr(nanops, nanname) else: if name in ["sum", "prod"]: kwargs.pop("min_count", None) xp = get_array_namespace(values) func = getattr(xp, name) try: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "All-NaN slice encountered") return func(values, axis=axis, **kwargs) except AttributeError: if not is_duck_dask_array(values): raise try: # dask/dask#3133 dask sometimes needs dtype argument # if func does not accept dtype, then raises TypeError return func(values, axis=axis, dtype=values.dtype, **kwargs) except (AttributeError, TypeError) as err: raise NotImplementedError( f"{name} is not yet implemented on dask arrays" ) from err f.__name__ = name return f # Attributes `numeric_only`, `available_min_count` is used for docs. # See ops.inject_reduce_methods argmax = _create_nan_agg_method("argmax", coerce_strings=True) argmin = _create_nan_agg_method("argmin", coerce_strings=True) max = _create_nan_agg_method("max", coerce_strings=True, invariant_0d=True) min = _create_nan_agg_method("min", coerce_strings=True, invariant_0d=True) sum = _create_nan_agg_method("sum", invariant_0d=True) sum.numeric_only = True sum.available_min_count = True std = _create_nan_agg_method("std") std.numeric_only = True var = _create_nan_agg_method("var") var.numeric_only = True median = _create_nan_agg_method("median", invariant_0d=True) median.numeric_only = True prod = _create_nan_agg_method("prod", invariant_0d=True) prod.numeric_only = True prod.available_min_count = True cumprod_1d = _create_nan_agg_method("cumprod", invariant_0d=True) cumprod_1d.numeric_only = True cumsum_1d = _create_nan_agg_method("cumsum", invariant_0d=True) cumsum_1d.numeric_only = True def array_all(array, axis=None, keepdims=False, **kwargs): xp = get_array_namespace(array) return xp.all(array, axis=axis, keepdims=keepdims, **kwargs) def array_any(array, axis=None, keepdims=False, **kwargs): xp = get_array_namespace(array) return xp.any(array, axis=axis, keepdims=keepdims, **kwargs) _mean = _create_nan_agg_method("mean", invariant_0d=True) def _datetime_nanmin(array): return _datetime_nanreduce(array, min) def _datetime_nanreduce(array, func): """nanreduce() function for datetime64. Caveats that this function deals with: - In numpy < 1.18, min() on datetime64 incorrectly ignores NaT - numpy nanmin() don't work on datetime64 (all versions at the moment of writing) - dask min() does not work on datetime64 (all versions at the moment of writing) """ dtype = array.dtype assert dtypes.is_datetime_like(dtype) # (NaT).astype(float) does not produce NaN... array = where(pandas_isnull(array), np.nan, array.astype(float)) array = func(array, skipna=True) if isinstance(array, float): array = np.array(array) # ...but (NaN).astype("M8") does produce NaT return array.astype(dtype) def datetime_to_numeric(array, offset=None, datetime_unit=None, dtype=float): """Convert an array containing datetime-like data to numerical values. Convert the datetime array to a timedelta relative to an offset. Parameters ---------- array : array-like Input data offset : None, datetime or cftime.datetime Datetime offset. If None, this is set by default to the array's minimum value to reduce round off errors. datetime_unit : {None, Y, M, W, D, h, m, s, ms, us, ns, ps, fs, as} If not None, convert output to a given datetime unit. Note that some conversions are not allowed due to non-linear relationships between units. dtype : dtype Output dtype. Returns ------- array Numerical representation of datetime object relative to an offset. Notes ----- Some datetime unit conversions won't work, for example from days to years, even though some calendars would allow for them (e.g. no_leap). This is because there is no `cftime.timedelta` object. """ # Set offset to minimum if not given if offset is None: if dtypes.is_datetime_like(array.dtype): offset = _datetime_nanreduce(array, min) else: offset = min(array) # Compute timedelta object. # For np.datetime64, this can silently yield garbage due to overflow. # One option is to enforce 1970-01-01 as the universal offset. # This map_blocks call is for backwards compatibility. # dask == 2021.04.1 does not support subtracting object arrays # which is required for cftime if is_duck_dask_array(array) and dtypes.is_object(array.dtype): array = array.map_blocks(lambda a, b: a - b, offset, meta=array._meta) else: array = array - offset # Scalar is converted to 0d-array if not hasattr(array, "dtype"): array = np.array(array) # Convert timedelta objects to float by first converting to microseconds. if dtypes.is_object(array.dtype): return py_timedelta_to_float(array, datetime_unit or "ns").astype(dtype) # Convert np.NaT to np.nan elif dtypes.is_datetime_like(array.dtype): # Convert to specified timedelta units. if datetime_unit: array = array / np.timedelta64(1, datetime_unit) return np.where(isnull(array), np.nan, array.astype(dtype)) def timedelta_to_numeric(value, datetime_unit="ns", dtype=float): """Convert a timedelta-like object to numerical values. Parameters ---------- value : datetime.timedelta, numpy.timedelta64, pandas.Timedelta, str Time delta representation. datetime_unit : {Y, M, W, D, h, m, s, ms, us, ns, ps, fs, as} The time units of the output values. Note that some conversions are not allowed due to non-linear relationships between units. dtype : type The output data type. """ if isinstance(value, datetime.timedelta): out = py_timedelta_to_float(value, datetime_unit) elif isinstance(value, np.timedelta64): out = np_timedelta64_to_float(value, datetime_unit) elif isinstance(value, pd.Timedelta): out = pd_timedelta_to_float(value, datetime_unit) elif isinstance(value, str): try: a = pd.to_timedelta(value) except ValueError as err: raise ValueError( f"Could not convert {value!r} to timedelta64 using pandas.to_timedelta" ) from err return py_timedelta_to_float(a, datetime_unit) else: raise TypeError( f"Expected value of type str, pandas.Timedelta, datetime.timedelta " f"or numpy.timedelta64, but received {type(value).__name__}" ) return out.astype(dtype) def _to_pytimedelta(array, unit="us"): return array.astype(f"timedelta64[{unit}]").astype(datetime.timedelta) def np_timedelta64_to_float(array, datetime_unit): """Convert numpy.timedelta64 to float, possibly at a loss of resolution.""" unit, _ = np.datetime_data(array.dtype) conversion_factor = np.timedelta64(1, unit) / np.timedelta64(1, datetime_unit) return conversion_factor * array.astype(np.float64) def pd_timedelta_to_float(value, datetime_unit): """Convert pandas.Timedelta to float. Notes ----- Built on the assumption that pandas timedelta values are in nanoseconds, which is also the numpy default resolution. """ value = value.to_timedelta64() return np_timedelta64_to_float(value, datetime_unit) def _timedelta_to_seconds(array): if isinstance(array, datetime.timedelta): return array.total_seconds() * 1e6 else: return np.reshape([a.total_seconds() for a in array.ravel()], array.shape) * 1e6 def py_timedelta_to_float(array, datetime_unit): """Convert a timedelta object to a float, possibly at a loss of resolution.""" array = asarray(array) if is_duck_dask_array(array): array = array.map_blocks( _timedelta_to_seconds, meta=np.array([], dtype=np.float64) ) else: array = _timedelta_to_seconds(array) conversion_factor = np.timedelta64(1, "us") / np.timedelta64(1, datetime_unit) return conversion_factor * array def mean(array, axis=None, skipna=None, **kwargs): """inhouse mean that can handle np.datetime64 or cftime.datetime dtypes""" from xarray.core.common import _contains_cftime_datetimes array = asarray(array) if dtypes.is_datetime_like(array.dtype): dmin = _datetime_nanreduce(array, min).astype("datetime64[Y]").astype(int) dmax = _datetime_nanreduce(array, max).astype("datetime64[Y]").astype(int) offset = ( np.array((dmin + dmax) // 2).astype("datetime64[Y]").astype(array.dtype) ) # From version 2025.01.2 xarray uses np.datetime64[unit], where unit # is one of "s", "ms", "us", "ns". # To not have to worry about the resolution, we just convert the output # to "timedelta64" (without unit) and let the dtype of offset take precedence. # This is fully backwards compatible with datetime64[ns]. return ( _mean( datetime_to_numeric(array, offset), axis=axis, skipna=skipna, **kwargs ).astype("timedelta64") + offset ) elif _contains_cftime_datetimes(array): offset = min(array) timedeltas = datetime_to_numeric(array, offset, datetime_unit="us") mean_timedeltas = _mean(timedeltas, axis=axis, skipna=skipna, **kwargs) return _to_pytimedelta(mean_timedeltas, unit="us") + offset else: return _mean(array, axis=axis, skipna=skipna, **kwargs) mean.numeric_only = True # type: ignore[attr-defined] def _nd_cum_func(cum_func, array, axis, **kwargs): array = asarray(array) if axis is None: axis = tuple(range(array.ndim)) if isinstance(axis, int): axis = (axis,) out = array for ax in axis: out = cum_func(out, axis=ax, **kwargs) return out def ndim(array) -> int: # Required part of the duck array and the array-api, but we fall back in case # https://docs.xarray.dev/en/latest/internals/duck-arrays-integration.html#duck-array-requirements return array.ndim if hasattr(array, "ndim") else np.ndim(array) def cumprod(array, axis=None, **kwargs): """N-dimensional version of cumprod.""" return _nd_cum_func(cumprod_1d, array, axis, **kwargs) def cumsum(array, axis=None, **kwargs): """N-dimensional version of cumsum.""" return _nd_cum_func(cumsum_1d, array, axis, **kwargs) def first(values, axis, skipna=None): """Return the first non-NA elements in this array along the given axis""" if (skipna or skipna is None) and not ( dtypes.isdtype(values.dtype, "signed integer") or dtypes.is_string(values.dtype) ): # only bother for dtypes that can hold NaN if is_chunked_array(values): return chunked_nanfirst(values, axis) else: return nputils.nanfirst(values, axis) return take(values, 0, axis=axis) def last(values, axis, skipna=None): """Return the last non-NA elements in this array along the given axis""" if (skipna or skipna is None) and not ( dtypes.isdtype(values.dtype, "signed integer") or dtypes.is_string(values.dtype) ): # only bother for dtypes that can hold NaN if is_chunked_array(values): return chunked_nanlast(values, axis) else: return nputils.nanlast(values, axis) return take(values, -1, axis=axis) def isin(element, test_elements, **kwargs): xp = get_array_namespace(element, test_elements) return xp.isin(element, test_elements, **kwargs) def least_squares(lhs, rhs, rcond=None, skipna=False): """Return the coefficients and residuals of a least-squares fit.""" if is_duck_dask_array(rhs): return dask_array_ops.least_squares(lhs, rhs, rcond=rcond, skipna=skipna) else: return nputils.least_squares(lhs, rhs, rcond=rcond, skipna=skipna) def _push(array, n: int | None = None, axis: int = -1): """ Use either bottleneck or numbagg depending on options & what's available """ if not OPTIONS["use_bottleneck"] and not OPTIONS["use_numbagg"]: raise RuntimeError( "ffill & bfill requires bottleneck or numbagg to be enabled." " Call `xr.set_options(use_bottleneck=True)` or `xr.set_options(use_numbagg=True)` to enable one." ) if OPTIONS["use_numbagg"] and module_available("numbagg"): import numbagg return numbagg.ffill(array, limit=n, axis=axis) # work around for bottleneck 178 limit = n if n is not None else array.shape[axis] import bottleneck as bn return bn.push(array, limit, axis) def push(array, n, axis, method="blelloch"): if not OPTIONS["use_bottleneck"] and not OPTIONS["use_numbagg"]: raise RuntimeError( "ffill & bfill requires bottleneck or numbagg to be enabled." " Call `xr.set_options(use_bottleneck=True)` or `xr.set_options(use_numbagg=True)` to enable one." ) if is_duck_dask_array(array): return dask_array_ops.push(array, n, axis, method=method) else: return _push(array, n, axis) def _first_last_wrapper(array, *, axis, op, keepdims): return op(array, axis, keepdims=keepdims) def _chunked_first_or_last(darray, axis, op): chunkmanager = get_chunked_array_type(darray) # This will raise the same error message seen for numpy axis = normalize_axis_index(axis, darray.ndim) wrapped_op = partial(_first_last_wrapper, op=op) return chunkmanager.reduction( darray, func=wrapped_op, aggregate_func=wrapped_op, axis=axis, dtype=darray.dtype, keepdims=False, # match numpy version ) def chunked_nanfirst(darray, axis): return _chunked_first_or_last(darray, axis, op=nputils.nanfirst) def chunked_nanlast(darray, axis): return _chunked_first_or_last(darray, axis, op=nputils.nanlast) xarray-2025.09.0/xarray/core/extension_array.py000066400000000000000000000161361505620616400213750ustar00rootroot00000000000000from __future__ import annotations import copy from collections.abc import Callable, Sequence from dataclasses import dataclass from typing import Any, Generic, cast import numpy as np import pandas as pd from packaging.version import Version from xarray.core.types import DTypeLikeSave, T_ExtensionArray from xarray.core.utils import NDArrayMixin, is_allowed_extension_array HANDLED_EXTENSION_ARRAY_FUNCTIONS: dict[Callable, Callable] = {} def implements(numpy_function): """Register an __array_function__ implementation for MyArray objects.""" def decorator(func): HANDLED_EXTENSION_ARRAY_FUNCTIONS[numpy_function] = func return func return decorator @implements(np.issubdtype) def __extension_duck_array__issubdtype( extension_array_dtype: T_ExtensionArray, other_dtype: DTypeLikeSave ) -> bool: return False # never want a function to think a pandas extension dtype is a subtype of numpy @implements(np.broadcast_to) def __extension_duck_array__broadcast(arr: T_ExtensionArray, shape: tuple): if shape[0] == len(arr) and len(shape) == 1: return arr raise NotImplementedError("Cannot broadcast 1d-only pandas extension array.") @implements(np.stack) def __extension_duck_array__stack(arr: T_ExtensionArray, axis: int): raise NotImplementedError("Cannot stack 1d-only pandas extension array.") @implements(np.concatenate) def __extension_duck_array__concatenate( arrays: Sequence[T_ExtensionArray], axis: int = 0, out=None ) -> T_ExtensionArray: return type(arrays[0])._concat_same_type(arrays) # type: ignore[attr-defined] @implements(np.where) def __extension_duck_array__where( condition: np.ndarray, x: T_ExtensionArray, y: T_ExtensionArray ) -> T_ExtensionArray: if ( isinstance(x, pd.Categorical) and isinstance(y, pd.Categorical) and x.dtype != y.dtype ): x = x.add_categories(set(y.categories).difference(set(x.categories))) # type: ignore[assignment] y = y.add_categories(set(x.categories).difference(set(y.categories))) # type: ignore[assignment] return cast(T_ExtensionArray, pd.Series(x).where(condition, pd.Series(y)).array) @implements(np.ndim) def __extension_duck_array__ndim(x: PandasExtensionArray) -> int: return x.ndim @implements(np.reshape) def __extension_duck_array__reshape( arr: T_ExtensionArray, shape: tuple ) -> T_ExtensionArray: if (shape[0] == len(arr) and len(shape) == 1) or shape == (-1,): return arr raise NotImplementedError( f"Cannot reshape 1d-only pandas extension array to: {shape}" ) @dataclass(frozen=True) class PandasExtensionArray(NDArrayMixin, Generic[T_ExtensionArray]): """NEP-18 compliant wrapper for pandas extension arrays. Parameters ---------- array : T_ExtensionArray The array to be wrapped upon e.g,. :py:class:`xarray.Variable` creation. ``` """ array: T_ExtensionArray def __post_init__(self): if not isinstance(self.array, pd.api.extensions.ExtensionArray): raise TypeError(f"{self.array} is not an pandas ExtensionArray.") # This does not use the UNSUPPORTED_EXTENSION_ARRAY_TYPES whitelist because # we do support extension arrays from datetime, for example, that need # duck array support internally via this class. These can appear from `DatetimeIndex` # wrapped by `PandasIndex` internally, for example. if not is_allowed_extension_array(self.array): raise TypeError( f"{self.array.dtype!r} should be converted to a numpy array in `xarray` internally." ) def __array_function__(self, func, types, args, kwargs): def replace_duck_with_extension_array(args) -> list: args_as_list = list(args) for index, value in enumerate(args_as_list): if isinstance(value, PandasExtensionArray): args_as_list[index] = value.array elif isinstance( value, tuple ): # should handle more than just tuple? iterable? args_as_list[index] = tuple( replace_duck_with_extension_array(value) ) elif isinstance(value, list): args_as_list[index] = replace_duck_with_extension_array(value) return args_as_list args = tuple(replace_duck_with_extension_array(args)) if func not in HANDLED_EXTENSION_ARRAY_FUNCTIONS: raise KeyError("Function not registered for pandas extension arrays.") res = HANDLED_EXTENSION_ARRAY_FUNCTIONS[func](*args, **kwargs) if is_allowed_extension_array(res): return PandasExtensionArray(res) return res def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): return ufunc(*inputs, **kwargs) def __getitem__(self, key) -> PandasExtensionArray[T_ExtensionArray]: item = self.array[key] if is_allowed_extension_array(item): return PandasExtensionArray(item) if np.isscalar(item) or isinstance(key, int): return PandasExtensionArray(type(self.array)._from_sequence([item])) # type: ignore[call-arg,attr-defined,unused-ignore] return PandasExtensionArray(item) def __setitem__(self, key, val): self.array[key] = val def __eq__(self, other): if isinstance(other, PandasExtensionArray): return self.array == other.array return self.array == other def __ne__(self, other): return ~(self == other) def __len__(self): return len(self.array) @property def ndim(self) -> int: return 1 def __array__( self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None ) -> np.ndarray: if Version(np.__version__) >= Version("2.0.0"): return np.asarray(self.array, dtype=dtype, copy=copy) else: return np.asarray(self.array, dtype=dtype) def __getattr__(self, attr: str) -> Any: # with __deepcopy__ or __copy__, the object is first constructed and then the sub-objects are attached (see https://docs.python.org/3/library/copy.html) # Thus, if we didn't have `super().__getattribute__("array")` this method would call `self.array` (i.e., `getattr(self, "array")`) again while looking for `__setstate__` # (which is apparently the first thing sought in copy.copy from the under-construction copied object), # which would cause a recursion error since `array` is not present on the object when it is being constructed during `__{deep}copy__`. # Even though we have defined these two methods now below due to `test_extension_array_copy_arrow_type` (cause unknown) # we leave this here as it more robust than self.array return getattr(super().__getattribute__("array"), attr) def __copy__(self) -> PandasExtensionArray[T_ExtensionArray]: return PandasExtensionArray(copy.copy(self.array)) def __deepcopy__( self, memo: dict[int, Any] | None = None ) -> PandasExtensionArray[T_ExtensionArray]: return PandasExtensionArray(copy.deepcopy(self.array, memo=memo)) xarray-2025.09.0/xarray/core/extensions.py000066400000000000000000000076551505620616400203700ustar00rootroot00000000000000from __future__ import annotations import warnings from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree class AccessorRegistrationWarning(Warning): """Warning for conflicts in accessor registration.""" class _CachedAccessor: """Custom property-like object (descriptor) for caching accessors.""" def __init__(self, name, accessor): self._name = name self._accessor = accessor def __get__(self, obj, cls): if obj is None: # we're accessing the attribute of the class, i.e., Dataset.geo return self._accessor # Use the same dict as @pandas.util.cache_readonly. # It must be explicitly declared in obj.__slots__. try: cache = obj._cache except AttributeError: cache = obj._cache = {} try: return cache[self._name] except KeyError: pass try: accessor_obj = self._accessor(obj) except AttributeError as err: # __getattr__ on data object will swallow any AttributeErrors # raised when initializing the accessor, so we need to raise as # something else (GH933): raise RuntimeError(f"error initializing {self._name!r} accessor.") from err cache[self._name] = accessor_obj return accessor_obj def _register_accessor(name, cls): def decorator(accessor): if hasattr(cls, name): warnings.warn( f"registration of accessor {accessor!r} under name {name!r} for type {cls!r} is " "overriding a preexisting attribute with the same name.", AccessorRegistrationWarning, stacklevel=2, ) setattr(cls, name, _CachedAccessor(name, accessor)) return accessor return decorator def register_dataarray_accessor(name): """Register a custom accessor on xarray.DataArray objects. Parameters ---------- name : str Name under which the accessor should be registered. A warning is issued if this name conflicts with a preexisting attribute. See Also -------- register_dataset_accessor """ return _register_accessor(name, DataArray) def register_dataset_accessor(name): """Register a custom property on xarray.Dataset objects. Parameters ---------- name : str Name under which the accessor should be registered. A warning is issued if this name conflicts with a preexisting attribute. Examples -------- In your library code: >>> @xr.register_dataset_accessor("geo") ... class GeoAccessor: ... def __init__(self, xarray_obj): ... self._obj = xarray_obj ... ... @property ... def center(self): ... # return the geographic center point of this dataset ... lon = self._obj.latitude ... lat = self._obj.longitude ... return (float(lon.mean()), float(lat.mean())) ... ... def plot(self): ... # plot this array's data on a map, e.g., using Cartopy ... pass ... Back in an interactive IPython session: >>> ds = xr.Dataset( ... {"longitude": np.linspace(0, 10), "latitude": np.linspace(0, 20)} ... ) >>> ds.geo.center (10.0, 5.0) >>> ds.geo.plot() # plots data on a map See Also -------- register_dataarray_accessor """ return _register_accessor(name, Dataset) def register_datatree_accessor(name): """Register a custom accessor on DataTree objects. Parameters ---------- name : str Name under which the accessor should be registered. A warning is issued if this name conflicts with a preexisting attribute. See Also -------- xarray.register_dataarray_accessor xarray.register_dataset_accessor """ return _register_accessor(name, DataTree) xarray-2025.09.0/xarray/core/formatting.py000066400000000000000000001167461505620616400203450ustar00rootroot00000000000000"""String formatting routines for __repr__.""" from __future__ import annotations import contextlib import functools import math from collections import ChainMap, defaultdict from collections.abc import Collection, Hashable, Mapping, Sequence from datetime import datetime, timedelta from itertools import chain, zip_longest from reprlib import recursive_repr from textwrap import indent from typing import TYPE_CHECKING, Any import numpy as np import pandas as pd from pandas.errors import OutOfBoundsDatetime from xarray.core.datatree_render import RenderDataTree from xarray.core.duck_array_ops import array_all, array_any, array_equiv, astype, ravel from xarray.core.extension_array import PandasExtensionArray from xarray.core.indexing import ( BasicIndexer, ExplicitlyIndexed, MemoryCachedArray, ) from xarray.core.options import OPTIONS, _get_boolean_with_default from xarray.core.treenode import group_subtrees from xarray.core.utils import is_duck_array from xarray.namedarray.pycompat import array_type, to_duck_array if TYPE_CHECKING: from xarray.core.coordinates import AbstractCoordinates from xarray.core.datatree import DataTree from xarray.core.variable import Variable UNITS = ("B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") def pretty_print(x, numchars: int): """Given an object `x`, call `str(x)` and format the returned string so that it is numchars long, padding with trailing spaces or truncating with ellipses as necessary """ s = maybe_truncate(x, numchars) return s + " " * max(numchars - len(s), 0) def maybe_truncate(obj, maxlen=500): s = str(obj) if len(s) > maxlen: s = s[: (maxlen - 3)] + "..." return s def wrap_indent(text, start="", length=None): if length is None: length = len(start) indent = "\n" + " " * length return start + indent.join(x for x in text.splitlines()) def _get_indexer_at_least_n_items(shape, n_desired, from_end): assert 0 < n_desired <= math.prod(shape) cum_items = np.cumprod(shape[::-1]) n_steps = np.argmax(cum_items >= n_desired) stop = math.ceil(float(n_desired) / np.r_[1, cum_items][n_steps]) indexer = ( ((-1 if from_end else 0),) * (len(shape) - 1 - n_steps) + ((slice(-stop, None) if from_end else slice(stop)),) + (slice(None),) * n_steps ) return indexer def first_n_items(array, n_desired): """Returns the first n_desired items of an array""" # Unfortunately, we can't just do array.flat[:n_desired] here because it # might not be a numpy.ndarray. Moreover, access to elements of the array # could be very expensive (e.g. if it's only available over DAP), so go out # of our way to get them in a single call to __getitem__ using only slices. from xarray.core.variable import Variable if n_desired < 1: raise ValueError("must request at least one item") if array.size == 0: # work around for https://github.com/numpy/numpy/issues/5195 return [] if n_desired < array.size: indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=False) if isinstance(array, ExplicitlyIndexed): indexer = BasicIndexer(indexer) array = array[indexer] # We pass variable objects in to handle indexing # with indexer above. It would not work with our # lazy indexing classes at the moment, so we cannot # pass Variable._data if isinstance(array, Variable): array = array._data return ravel(to_duck_array(array))[:n_desired] def last_n_items(array, n_desired): """Returns the last n_desired items of an array""" # Unfortunately, we can't just do array.flat[-n_desired:] here because it # might not be a numpy.ndarray. Moreover, access to elements of the array # could be very expensive (e.g. if it's only available over DAP), so go out # of our way to get them in a single call to __getitem__ using only slices. from xarray.core.variable import Variable if (n_desired == 0) or (array.size == 0): return [] if n_desired < array.size: indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=True) if isinstance(array, ExplicitlyIndexed): indexer = BasicIndexer(indexer) array = array[indexer] # We pass variable objects in to handle indexing # with indexer above. It would not work with our # lazy indexing classes at the moment, so we cannot # pass Variable._data if isinstance(array, Variable): array = array._data return ravel(to_duck_array(array))[-n_desired:] def last_item(array): """Returns the last item of an array.""" indexer = (slice(-1, None),) * array.ndim return ravel(to_duck_array(array[indexer])) def calc_max_rows_first(max_rows: int) -> int: """Calculate the first rows to maintain the max number of rows.""" return max_rows // 2 + max_rows % 2 def calc_max_rows_last(max_rows: int) -> int: """Calculate the last rows to maintain the max number of rows.""" return max_rows // 2 def format_timestamp(t): """Cast given object to a Timestamp and return a nicely formatted string""" try: timestamp = pd.Timestamp(t) datetime_str = timestamp.isoformat(sep=" ") except OutOfBoundsDatetime: datetime_str = str(t) try: date_str, time_str = datetime_str.split() except ValueError: # catch NaT and others that don't split nicely return datetime_str else: if time_str == "00:00:00": return date_str else: return f"{date_str}T{time_str}" def format_timedelta(t, timedelta_format=None): """Cast given object to a Timestamp and return a nicely formatted string""" timedelta_str = str(pd.Timedelta(t)) try: days_str, time_str = timedelta_str.split(" days ") except ValueError: # catch NaT and others that don't split nicely return timedelta_str else: if timedelta_format == "date": return days_str + " days" elif timedelta_format == "time": return time_str else: return timedelta_str def format_item(x, timedelta_format=None, quote_strings=True): """Returns a succinct summary of an object as a string""" if isinstance(x, PandasExtensionArray): # We want to bypass PandasExtensionArray's repr here # because its __repr__ is PandasExtensionArray(array=[...]) # and this function is only for single elements. return str(x.array[0]) if isinstance(x, np.datetime64 | datetime): return format_timestamp(x) if isinstance(x, np.timedelta64 | timedelta): return format_timedelta(x, timedelta_format=timedelta_format) elif isinstance(x, str | bytes): if hasattr(x, "dtype"): x = x.item() return repr(x) if quote_strings else x elif hasattr(x, "dtype") and np.issubdtype(x.dtype, np.floating) and x.shape == (): return f"{x.item():.4}" else: return str(x) def format_items(x): """Returns a succinct summaries of all items in a sequence as strings""" x = to_duck_array(x) timedelta_format = "datetime" if not isinstance(x, PandasExtensionArray) and np.issubdtype( x.dtype, np.timedelta64 ): x = astype(x, dtype="timedelta64[ns]") day_part = x[~pd.isnull(x)].astype("timedelta64[D]").astype("timedelta64[ns]") time_needed = x[~pd.isnull(x)] != day_part day_needed = day_part != np.timedelta64(0, "ns") if array_all(np.logical_not(day_needed)): timedelta_format = "time" elif array_all(np.logical_not(time_needed)): timedelta_format = "date" formatted = [format_item(xi, timedelta_format) for xi in x] return formatted def format_array_flat(array, max_width: int): """Return a formatted string for as many items in the flattened version of array that will fit within max_width characters. """ # every item will take up at least two characters, but we always want to # print at least first and last items max_possibly_relevant = min(max(array.size, 1), max(math.ceil(max_width / 2.0), 2)) relevant_front_items = format_items( first_n_items(array, (max_possibly_relevant + 1) // 2) ) relevant_back_items = format_items(last_n_items(array, max_possibly_relevant // 2)) # interleave relevant front and back items: # [a, b, c] and [y, z] -> [a, z, b, y, c] relevant_items = sum( zip_longest(relevant_front_items, reversed(relevant_back_items)), () )[:max_possibly_relevant] cum_len = np.cumsum([len(s) + 1 for s in relevant_items]) - 1 if (array.size > 2) and ( (max_possibly_relevant < array.size) or array_any(cum_len > max_width) ): padding = " ... " max_len = max(int(np.argmax(cum_len + len(padding) - 1 > max_width)), 2) count = min(array.size, max_len) else: count = array.size padding = "" if (count <= 1) else " " num_front = (count + 1) // 2 num_back = count - num_front # note that num_back is 0 <--> array.size is 0 or 1 # <--> relevant_back_items is [] pprint_str = "".join( [ " ".join(relevant_front_items[:num_front]), padding, " ".join(relevant_back_items[-num_back:]), ] ) # As a final check, if it's still too long even with the limit in values, # replace the end with an ellipsis # NB: this will still returns a full 3-character ellipsis when max_width < 3 if len(pprint_str) > max_width: pprint_str = pprint_str[: max(max_width - 3, 0)] + "..." return pprint_str # mapping of tuple[modulename, classname] to repr _KNOWN_TYPE_REPRS = { ("numpy", "ndarray"): "np.ndarray", ("sparse._coo.core", "COO"): "sparse.COO", } def inline_dask_repr(array): """Similar to dask.array.DataArray.__repr__, but without redundant information that's already printed by the repr function of the xarray wrapper. """ assert isinstance(array, array_type("dask")), array chunksize = tuple(c[0] for c in array.chunks) if hasattr(array, "_meta"): meta = array._meta identifier = (type(meta).__module__, type(meta).__name__) meta_repr = _KNOWN_TYPE_REPRS.get(identifier, ".".join(identifier)) meta_string = f", meta={meta_repr}" else: meta_string = "" return f"dask.array" def inline_sparse_repr(array): """Similar to sparse.COO.__repr__, but without the redundant shape/dtype.""" sparse_array_type = array_type("sparse") assert isinstance(array, sparse_array_type), array return f"<{type(array).__name__}: nnz={array.nnz:d}, fill_value={array.fill_value}>" def inline_variable_array_repr(var, max_width): """Build a one-line summary of a variable's data.""" if hasattr(var._data, "_repr_inline_"): return var._data._repr_inline_(max_width) if getattr(var, "_in_memory", False): return format_array_flat(var, max_width) dask_array_type = array_type("dask") if isinstance(var._data, dask_array_type): return inline_dask_repr(var.data) sparse_array_type = array_type("sparse") if isinstance(var._data, sparse_array_type): return inline_sparse_repr(var.data) if hasattr(var._data, "__array_function__"): return maybe_truncate(repr(var._data).replace("\n", " "), max_width) # internal xarray array type return "..." def summarize_variable( name: Hashable, var: Variable, col_width: int | None = None, max_width: int | None = None, is_index: bool = False, ): """Summarize a variable in one line, e.g., for the Dataset.__repr__.""" variable = getattr(var, "variable", var) if max_width is None: max_width_options = OPTIONS["display_width"] if not isinstance(max_width_options, int): raise TypeError(f"`max_width` value of `{max_width}` is not a valid int") else: max_width = max_width_options marker = "*" if is_index else " " first_col = f" {marker} {name} " if col_width is not None: first_col = pretty_print(first_col, col_width) if variable.dims: dims_str = ", ".join(map(str, variable.dims)) dims_str = f"({dims_str}) " else: dims_str = "" front_str = f"{first_col}{dims_str}{variable.dtype} {render_human_readable_nbytes(variable.nbytes)} " values_width = max_width - len(front_str) values_str = inline_variable_array_repr(variable, values_width) return f"{front_str}{values_str}" def summarize_attr(key, value, col_width=None): """Summary for __repr__ - use ``X.attrs[key]`` for full value.""" # Indent key and add ':', then right-pad if col_width is not None k_str = f" {key}:" if col_width is not None: k_str = pretty_print(k_str, col_width) # Replace tabs and newlines, so we print on one line in known width v_str = str(value).replace("\t", "\\t").replace("\n", "\\n") # Finally, truncate to the desired display width return maybe_truncate(f"{k_str} {v_str}", OPTIONS["display_width"]) EMPTY_REPR = " *empty*" def _calculate_col_width(col_items): max_name_length = max((len(str(s)) for s in col_items), default=0) col_width = max(max_name_length, 7) + 6 return col_width def _mapping_repr( mapping, title, summarizer, expand_option_name, col_width=None, max_rows=None, indexes=None, ): if col_width is None: col_width = _calculate_col_width(mapping) summarizer_kwargs = defaultdict(dict) if indexes is not None: summarizer_kwargs = {k: {"is_index": k in indexes} for k in mapping} summary = [f"{title}:"] if mapping: len_mapping = len(mapping) if not _get_boolean_with_default(expand_option_name, default=True): summary = [f"{summary[0]} ({len_mapping})"] elif max_rows is not None and len_mapping > max_rows: summary = [f"{summary[0]} ({max_rows}/{len_mapping})"] first_rows = calc_max_rows_first(max_rows) keys = list(mapping.keys()) summary += [ summarizer(k, mapping[k], col_width, **summarizer_kwargs[k]) for k in keys[:first_rows] ] if max_rows > 1: last_rows = calc_max_rows_last(max_rows) summary += [pretty_print(" ...", col_width) + " ..."] summary += [ summarizer(k, mapping[k], col_width, **summarizer_kwargs[k]) for k in keys[-last_rows:] ] else: summary += [ summarizer(k, v, col_width, **summarizer_kwargs[k]) for k, v in mapping.items() ] else: summary += [EMPTY_REPR] return "\n".join(summary) data_vars_repr = functools.partial( _mapping_repr, title="Data variables", summarizer=summarize_variable, expand_option_name="display_expand_data_vars", ) attrs_repr = functools.partial( _mapping_repr, title="Attributes", summarizer=summarize_attr, expand_option_name="display_expand_attrs", ) def coords_repr(coords: AbstractCoordinates, col_width=None, max_rows=None): if col_width is None: col_width = _calculate_col_width(coords) return _mapping_repr( coords, title="Coordinates", summarizer=summarize_variable, expand_option_name="display_expand_coords", col_width=col_width, indexes=coords.xindexes, max_rows=max_rows, ) def inherited_coords_repr(node: DataTree, col_width=None, max_rows=None): coords = inherited_vars(node._coord_variables) if col_width is None: col_width = _calculate_col_width(coords) return _mapping_repr( coords, title="Inherited coordinates", summarizer=summarize_variable, expand_option_name="display_expand_coords", col_width=col_width, indexes=node._indexes, max_rows=max_rows, ) def inline_index_repr(index: pd.Index, max_width: int) -> str: if hasattr(index, "_repr_inline_"): repr_ = index._repr_inline_(max_width=max_width) else: # fallback for the `pandas.Index` subclasses from # `Indexes.get_pandas_indexes` / `xr_obj.indexes` repr_ = repr(index) return repr_ def summarize_index( names: tuple[Hashable, ...], index, col_width: int, max_width: int | None = None, ) -> str: if max_width is None: max_width = OPTIONS["display_width"] def prefixes(length: int) -> list[str]: if length in (0, 1): return [" "] return ["β”Œ"] + ["β”‚"] * max(length - 2, 0) + ["β””"] preformatted = [ pretty_print(f" {prefix} {name}", col_width) for prefix, name in zip(prefixes(len(names)), names, strict=True) ] head, *tail = preformatted index_width = max_width - len(head) repr_ = inline_index_repr(index, max_width=index_width) return "\n".join([head + repr_] + [line.rstrip() for line in tail]) def filter_nondefault_indexes(indexes, filter_indexes: bool): from xarray.core.indexes import PandasIndex, PandasMultiIndex if not filter_indexes: return indexes default_indexes = (PandasIndex, PandasMultiIndex) return { key: index for key, index in indexes.items() if not isinstance(index, default_indexes) } def indexes_repr(indexes, max_rows: int | None = None, title: str = "Indexes") -> str: col_width = _calculate_col_width(chain.from_iterable(indexes)) return _mapping_repr( indexes, title, summarize_index, "display_expand_indexes", col_width=col_width, max_rows=max_rows, ) def dim_summary(obj): elements = [f"{k}: {v}" for k, v in obj.sizes.items()] return ", ".join(elements) def _element_formatter( elements: Collection[Hashable], col_width: int, max_rows: int | None = None, delimiter: str = ", ", ) -> str: """ Formats elements for better readability. Once it becomes wider than the display width it will create a newline and continue indented to col_width. Once there are more rows than the maximum displayed rows it will start removing rows. Parameters ---------- elements : Collection of hashable Elements to join together. col_width : int The width to indent to if a newline has been made. max_rows : int, optional The maximum number of allowed rows. The default is None. delimiter : str, optional Delimiter to use between each element. The default is ", ". """ elements_len = len(elements) out = [""] length_row = 0 for i, v in enumerate(elements): delim = delimiter if i < elements_len - 1 else "" v_delim = f"{v}{delim}" length_element = len(v_delim) length_row += length_element # Create a new row if the next elements makes the print wider than # the maximum display width: if col_width + length_row > OPTIONS["display_width"]: out[-1] = out[-1].rstrip() # Remove trailing whitespace. out.append("\n" + pretty_print("", col_width) + v_delim) length_row = length_element else: out[-1] += v_delim # If there are too many rows of dimensions trim some away: if max_rows and (len(out) > max_rows): first_rows = calc_max_rows_first(max_rows) last_rows = calc_max_rows_last(max_rows) out = ( out[:first_rows] + ["\n" + pretty_print("", col_width) + "..."] + (out[-last_rows:] if max_rows > 1 else []) ) return "".join(out) def dim_summary_limited( sizes: Mapping[Any, int], col_width: int, max_rows: int | None = None ) -> str: elements = [f"{k}: {v}" for k, v in sizes.items()] return _element_formatter(elements, col_width, max_rows) def unindexed_dims_repr(dims, coords, max_rows: int | None = None): unindexed_dims = [d for d in dims if d not in coords] if unindexed_dims: dims_start = "Dimensions without coordinates: " dims_str = _element_formatter( unindexed_dims, col_width=len(dims_start), max_rows=max_rows ) return dims_start + dims_str else: return None @contextlib.contextmanager def set_numpy_options(*args, **kwargs): original = np.get_printoptions() np.set_printoptions(*args, **kwargs) try: yield finally: np.set_printoptions(**original) def limit_lines(string: str, *, limit: int): """ If the string is more lines than the limit, this returns the middle lines replaced by an ellipsis """ lines = string.splitlines() if len(lines) > limit: string = "\n".join(chain(lines[: limit // 2], ["..."], lines[-limit // 2 :])) return string def short_array_repr(array): from xarray.core.common import AbstractArray if isinstance(array, AbstractArray): array = array.data if isinstance(array, pd.api.extensions.ExtensionArray): return repr(array) array = to_duck_array(array) # default to lower precision so a full (abbreviated) line can fit on # one line with the default display_width options = { "precision": 6, "linewidth": OPTIONS["display_width"], "threshold": OPTIONS["display_values_threshold"], } if array.ndim < 3: edgeitems = 3 elif array.ndim == 3: edgeitems = 2 else: edgeitems = 1 options["edgeitems"] = edgeitems with set_numpy_options(**options): return repr(array) def short_data_repr(array): """Format "data" for DataArray and Variable.""" internal_data = getattr(array, "variable", array)._data if isinstance(array, np.ndarray): return short_array_repr(array) elif is_duck_array(internal_data): return limit_lines(repr(array.data), limit=40) elif getattr(array, "_in_memory", None): return short_array_repr(array) else: # internal xarray array type return f"[{array.size} values with dtype={array.dtype}]" def _get_indexes_dict(indexes): return { tuple(index_vars.keys()): idx for idx, index_vars in indexes.group_by_index() } @recursive_repr("") def array_repr(arr): from xarray.core.variable import Variable max_rows = OPTIONS["display_max_rows"] # used for DataArray, Variable and IndexVariable if hasattr(arr, "name") and arr.name is not None: name_str = f"{arr.name!r} " else: name_str = "" if ( isinstance(arr, Variable) or _get_boolean_with_default("display_expand_data", default=True) or isinstance(arr.variable._data, MemoryCachedArray) ): data_repr = short_data_repr(arr) else: data_repr = inline_variable_array_repr(arr.variable, OPTIONS["display_width"]) start = f" Size: {nbytes_str}", data_repr, ] if hasattr(arr, "coords"): if arr.coords: col_width = _calculate_col_width(arr.coords) summary.append( coords_repr(arr.coords, col_width=col_width, max_rows=max_rows) ) unindexed_dims_str = unindexed_dims_repr( arr.dims, arr.coords, max_rows=max_rows ) if unindexed_dims_str: summary.append(unindexed_dims_str) display_default_indexes = _get_boolean_with_default( "display_default_indexes", False ) xindexes = filter_nondefault_indexes( _get_indexes_dict(arr.xindexes), not display_default_indexes ) if xindexes: summary.append(indexes_repr(xindexes, max_rows=max_rows)) if arr.attrs: summary.append(attrs_repr(arr.attrs, max_rows=max_rows)) return "\n".join(summary) @recursive_repr("") def dataset_repr(ds): nbytes_str = render_human_readable_nbytes(ds.nbytes) summary = [f" Size: {nbytes_str}"] col_width = _calculate_col_width(ds.variables) max_rows = OPTIONS["display_max_rows"] dims_start = pretty_print("Dimensions:", col_width) dims_values = dim_summary_limited( ds.sizes, col_width=col_width + 1, max_rows=max_rows ) summary.append(f"{dims_start}({dims_values})") if ds.coords: summary.append(coords_repr(ds.coords, col_width=col_width, max_rows=max_rows)) unindexed_dims_str = unindexed_dims_repr(ds.dims, ds.coords, max_rows=max_rows) if unindexed_dims_str: summary.append(unindexed_dims_str) summary.append(data_vars_repr(ds.data_vars, col_width=col_width, max_rows=max_rows)) display_default_indexes = _get_boolean_with_default( "display_default_indexes", False ) xindexes = filter_nondefault_indexes( _get_indexes_dict(ds.xindexes), not display_default_indexes ) if xindexes: summary.append(indexes_repr(xindexes, max_rows=max_rows)) if ds.attrs: summary.append(attrs_repr(ds.attrs, max_rows=max_rows)) return "\n".join(summary) def dims_and_coords_repr(ds) -> str: """Partial Dataset repr for use inside DataTree inheritance errors.""" summary = [] col_width = _calculate_col_width(ds.coords) max_rows = OPTIONS["display_max_rows"] dims_start = pretty_print("Dimensions:", col_width) dims_values = dim_summary_limited( ds.sizes, col_width=col_width + 1, max_rows=max_rows ) summary.append(f"{dims_start}({dims_values})") if ds.coords: summary.append(coords_repr(ds.coords, col_width=col_width, max_rows=max_rows)) unindexed_dims_str = unindexed_dims_repr(ds.dims, ds.coords, max_rows=max_rows) if unindexed_dims_str: summary.append(unindexed_dims_str) return "\n".join(summary) def diff_name_summary(a, b) -> str: if a.name != b.name: return f"Differing names:\n {a.name!r} != {b.name!r}" else: return "" def diff_dim_summary(a, b) -> str: if a.sizes != b.sizes: return f"Differing dimensions:\n ({dim_summary(a)}) != ({dim_summary(b)})" else: return "" def _diff_mapping_repr( a_mapping, b_mapping, compat, title, summarizer, col_width=None, a_indexes=None, b_indexes=None, ): def compare_attr(a, b): if is_duck_array(a) or is_duck_array(b): return array_equiv(a, b) else: return a == b def extra_items_repr(extra_keys, mapping, ab_side, kwargs): extra_repr = [ summarizer(k, mapping[k], col_width, **kwargs[k]) for k in extra_keys ] if extra_repr: header = f"{title} only on the {ab_side} object:" return [header] + extra_repr else: return [] a_keys = set(a_mapping) b_keys = set(b_mapping) summary = [] diff_items = [] a_summarizer_kwargs = defaultdict(dict) if a_indexes is not None: a_summarizer_kwargs = {k: {"is_index": k in a_indexes} for k in a_mapping} b_summarizer_kwargs = defaultdict(dict) if b_indexes is not None: b_summarizer_kwargs = {k: {"is_index": k in b_indexes} for k in b_mapping} for k in a_keys & b_keys: try: # compare xarray variable if not callable(compat): compatible = getattr(a_mapping[k].variable, compat)( b_mapping[k].variable ) else: compatible = compat(a_mapping[k].variable, b_mapping[k].variable) is_variable = True except AttributeError: # compare attribute value compatible = compare_attr(a_mapping[k], b_mapping[k]) is_variable = False if not compatible: temp = [ summarizer(k, a_mapping[k], col_width, **a_summarizer_kwargs[k]), summarizer(k, b_mapping[k], col_width, **b_summarizer_kwargs[k]), ] if compat == "identical" and is_variable: attrs_summary = [] a_attrs = a_mapping[k].attrs b_attrs = b_mapping[k].attrs attrs_to_print = set(a_attrs) ^ set(b_attrs) attrs_to_print.update( { k for k in set(a_attrs) & set(b_attrs) if not compare_attr(a_attrs[k], b_attrs[k]) } ) for m in (a_mapping, b_mapping): attr_s = "\n".join( " " + summarize_attr(ak, av) for ak, av in m[k].attrs.items() if ak in attrs_to_print ) if attr_s: attr_s = " Differing variable attributes:\n" + attr_s attrs_summary.append(attr_s) temp = [ f"{var_s}\n{attr_s}" if attr_s else var_s for var_s, attr_s in zip(temp, attrs_summary, strict=True) ] # TODO: It should be possible recursively use _diff_mapping_repr # instead of explicitly handling variable attrs specially. # That would require some refactoring. # newdiff = _diff_mapping_repr( # {k: v for k,v in a_attrs.items() if k in attrs_to_print}, # {k: v for k,v in b_attrs.items() if k in attrs_to_print}, # compat=compat, # summarizer=summarize_attr, # title="Variable Attributes" # ) # temp += [newdiff] diff_items += [ ab_side + s[1:] for ab_side, s in zip(("L", "R"), temp, strict=True) ] if diff_items: summary += [f"Differing {title.lower()}:"] + diff_items summary += extra_items_repr(a_keys - b_keys, a_mapping, "left", a_summarizer_kwargs) summary += extra_items_repr( b_keys - a_keys, b_mapping, "right", b_summarizer_kwargs ) return "\n".join(summary) def diff_coords_repr(a, b, compat, col_width=None): return _diff_mapping_repr( a, b, compat, "Coordinates", summarize_variable, col_width=col_width, a_indexes=a.xindexes, b_indexes=b.xindexes, ) diff_data_vars_repr = functools.partial( _diff_mapping_repr, title="Data variables", summarizer=summarize_variable ) diff_attrs_repr = functools.partial( _diff_mapping_repr, title="Attributes", summarizer=summarize_attr ) def _compat_to_str(compat): if callable(compat): compat = compat.__name__ if compat == "equals": return "equal" elif compat == "allclose": return "close" else: return compat def diff_array_repr(a, b, compat): # used for DataArray, Variable and IndexVariable summary = [ f"Left and right {type(a).__name__} objects are not {_compat_to_str(compat)}" ] if dims_diff := diff_dim_summary(a, b): summary.append(dims_diff) if callable(compat): equiv = compat else: equiv = array_equiv if not equiv(a.data, b.data): temp = [wrap_indent(short_array_repr(obj), start=" ") for obj in (a, b)] diff_data_repr = [ ab_side + "\n" + ab_data_repr for ab_side, ab_data_repr in zip(("L", "R"), temp, strict=True) ] summary += ["Differing values:"] + diff_data_repr if hasattr(a, "coords"): col_width = _calculate_col_width(set(a.coords) | set(b.coords)) if coords_diff := diff_coords_repr( a.coords, b.coords, compat, col_width=col_width ): summary.append(coords_diff) if compat == "identical" and ( attrs_diff := diff_attrs_repr(a.attrs, b.attrs, compat) ): summary.append(attrs_diff) return "\n".join(summary) def diff_treestructure(a: DataTree, b: DataTree) -> str | None: """ Return a summary of why two trees are not isomorphic. If they are isomorphic return None. """ # .group_subtrees walks nodes in breadth-first-order, in order to produce as # shallow of a diff as possible for path, (node_a, node_b) in group_subtrees(a, b): if node_a.children.keys() != node_b.children.keys(): path_str = "root node" if path == "." else f"node {path!r}" child_summary = f"{list(node_a.children)} vs {list(node_b.children)}" diff = f"Children at {path_str} do not match: {child_summary}" return diff return None def diff_dataset_repr(a, b, compat): summary = [ f"Left and right {type(a).__name__} objects are not {_compat_to_str(compat)}" ] col_width = _calculate_col_width(set(list(a.variables) + list(b.variables))) if dims_diff := diff_dim_summary(a, b): summary.append(dims_diff) if coords_diff := diff_coords_repr(a.coords, b.coords, compat, col_width=col_width): summary.append(coords_diff) if data_diff := diff_data_vars_repr( a.data_vars, b.data_vars, compat, col_width=col_width ): summary.append(data_diff) if compat == "identical" and ( attrs_diff := diff_attrs_repr(a.attrs, b.attrs, compat) ): summary.append(attrs_diff) return "\n".join(summary) def diff_nodewise_summary(a: DataTree, b: DataTree, compat): """Iterates over all corresponding nodes, recording differences between data at each location.""" summary = [] for path, (node_a, node_b) in group_subtrees(a, b): a_ds, b_ds = node_a.dataset, node_b.dataset if not a_ds._all_compat(b_ds, compat): path_str = "root node" if path == "." else f"node {path!r}" dataset_diff = diff_dataset_repr(a_ds, b_ds, compat) data_diff = indent( "\n".join(dataset_diff.split("\n", 1)[1:]), prefix=" " ) nodediff = f"Data at {path_str} does not match:\n{data_diff}" summary.append(nodediff) return "\n\n".join(summary) def diff_datatree_repr(a: DataTree, b: DataTree, compat): summary = [ f"Left and right {type(a).__name__} objects are not {_compat_to_str(compat)}" ] if compat == "identical" and (diff_name := diff_name_summary(a, b)): summary.append(diff_name) treestructure_diff = diff_treestructure(a, b) # If the trees structures are different there is no point comparing each node, # and doing so would raise an error. # TODO we could show any differences in nodes up to the first place that structure differs? if treestructure_diff is not None: summary.append(treestructure_diff) elif compat != "isomorphic": nodewise_diff = diff_nodewise_summary(a, b, compat) summary.append(nodewise_diff) return "\n\n".join(summary) def inherited_vars(mapping: ChainMap) -> dict: return {k: v for k, v in mapping.parents.items() if k not in mapping.maps[0]} def _datatree_node_repr(node: DataTree, show_inherited: bool) -> str: summary = [f"Group: {node.path}"] col_width = _calculate_col_width(node.variables) max_rows = OPTIONS["display_max_rows"] inherited_coords = inherited_vars(node._coord_variables) # Only show dimensions if also showing a variable or coordinates section. show_dims = ( node._node_coord_variables or (show_inherited and inherited_coords) or node._data_variables ) dim_sizes = node.sizes if show_inherited else node._node_dims if show_dims: # Includes inherited dimensions. dims_start = pretty_print("Dimensions:", col_width) dims_values = dim_summary_limited( dim_sizes, col_width=col_width + 1, max_rows=max_rows ) summary.append(f"{dims_start}({dims_values})") if node._node_coord_variables: node_coords = node.to_dataset(inherit=False).coords summary.append(coords_repr(node_coords, col_width=col_width, max_rows=max_rows)) if show_inherited and inherited_coords: summary.append( inherited_coords_repr(node, col_width=col_width, max_rows=max_rows) ) if show_dims: unindexed_dims_str = unindexed_dims_repr( dim_sizes, node.coords, max_rows=max_rows ) if unindexed_dims_str: summary.append(unindexed_dims_str) if node._data_variables: summary.append( data_vars_repr(node._data_variables, col_width=col_width, max_rows=max_rows) ) # TODO: only show indexes defined at this node, with a separate section for # inherited indexes (if show_inherited=True) display_default_indexes = _get_boolean_with_default( "display_default_indexes", False ) xindexes = filter_nondefault_indexes( _get_indexes_dict(node.xindexes), not display_default_indexes ) if xindexes: summary.append(indexes_repr(xindexes, max_rows=max_rows)) if node.attrs: summary.append(attrs_repr(node.attrs, max_rows=max_rows)) return "\n".join(summary) def datatree_repr(dt: DataTree) -> str: """A printable representation of the structure of this entire tree.""" max_children = OPTIONS["display_max_children"] renderer = RenderDataTree(dt, maxchildren=max_children) name_info = "" if dt.name is None else f" {dt.name!r}" header = f"" lines = [header] show_inherited = True for pre, fill, node in renderer: if isinstance(node, str): lines.append(f"{fill}{node}") continue node_repr = _datatree_node_repr(node, show_inherited=show_inherited) show_inherited = False # only show inherited coords on the root raw_repr_lines = node_repr.splitlines() node_line = f"{pre}{raw_repr_lines[0]}" lines.append(node_line) for line in raw_repr_lines[1:]: if len(node.children) > 0: lines.append(f"{fill}{renderer.style.vertical}{line}") else: lines.append(f"{fill}{' ' * len(renderer.style.vertical)}{line}") return "\n".join(lines) def shorten_list_repr(items: Sequence, max_items: int) -> str: if len(items) <= max_items: return repr(items) else: first_half = repr(items[: max_items // 2])[ 1:-1 ] # Convert to string and remove brackets second_half = repr(items[-max_items // 2 :])[ 1:-1 ] # Convert to string and remove brackets return f"[{first_half}, ..., {second_half}]" def render_human_readable_nbytes( nbytes: int, /, *, attempt_constant_width: bool = False, ) -> str: """Renders simple human-readable byte count representation This is only a quick representation that should not be relied upon for precise needs. To get the exact byte count, please use the ``nbytes`` attribute directly. Parameters ---------- nbytes Byte count attempt_constant_width For reasonable nbytes sizes, tries to render a fixed-width representation. Returns ------- Human-readable representation of the byte count """ dividend = float(nbytes) divisor = 1000.0 last_unit_available = UNITS[-1] for unit in UNITS: if dividend < divisor or unit == last_unit_available: break dividend /= divisor dividend_str = f"{dividend:.0f}" unit_str = f"{unit}" if attempt_constant_width: dividend_str = dividend_str.rjust(3) unit_str = unit_str.ljust(2) string = f"{dividend_str}{unit_str}" return string xarray-2025.09.0/xarray/core/formatting_html.py000066400000000000000000000353561505620616400213660ustar00rootroot00000000000000from __future__ import annotations import uuid from collections import OrderedDict from collections.abc import Mapping from functools import lru_cache, partial from html import escape from importlib.resources import files from math import ceil from typing import TYPE_CHECKING, Literal from xarray.core.formatting import ( inherited_vars, inline_index_repr, inline_variable_array_repr, short_data_repr, ) from xarray.core.options import OPTIONS, _get_boolean_with_default STATIC_FILES = ( ("xarray.static.html", "icons-svg-inline.html"), ("xarray.static.css", "style.css"), ) if TYPE_CHECKING: from xarray.core.datatree import DataTree @lru_cache(None) def _load_static_files(): """Lazily load the resource files into memory the first time they are needed""" return [ files(package).joinpath(resource).read_text(encoding="utf-8") for package, resource in STATIC_FILES ] def short_data_repr_html(array) -> str: """Format "data" for DataArray and Variable.""" internal_data = getattr(array, "variable", array)._data if hasattr(internal_data, "_repr_html_"): return internal_data._repr_html_() text = escape(short_data_repr(array)) return f"
{text}
" def format_dims(dim_sizes, dims_with_index) -> str: if not dim_sizes: return "" dim_css_map = { dim: " class='xr-has-index'" if dim in dims_with_index else "" for dim in dim_sizes } dims_li = "".join( f"
  • {escape(str(dim))}: {size}
  • " for dim, size in dim_sizes.items() ) return f"
      {dims_li}
    " def summarize_attrs(attrs) -> str: attrs_dl = "".join( f"
    {escape(str(k))} :
    {escape(str(v))}
    " for k, v in attrs.items() ) return f"
    {attrs_dl}
    " def _icon(icon_name) -> str: # icon_name should be defined in xarray/static/html/icon-svg-inline.html return ( f"" ) def summarize_variable(name, var, is_index=False, dtype=None) -> str: variable = var.variable if hasattr(var, "variable") else var cssclass_idx = " class='xr-has-index'" if is_index else "" dims_str = f"({', '.join(escape(dim) for dim in var.dims)})" name = escape(str(name)) dtype = dtype or escape(str(var.dtype)) # "unique" ids required to expand/collapse subsections attrs_id = "attrs-" + str(uuid.uuid4()) data_id = "data-" + str(uuid.uuid4()) disabled = "" if len(var.attrs) else "disabled" preview = escape(inline_variable_array_repr(variable, 35)) attrs_ul = summarize_attrs(var.attrs) data_repr = short_data_repr_html(variable) attrs_icon = _icon("icon-file-text2") data_icon = _icon("icon-database") return ( f"
    {name}
    " f"
    {dims_str}
    " f"
    {dtype}
    " f"
    {preview}
    " f"" f"" f"" f"" f"
    {attrs_ul}
    " f"
    {data_repr}
    " ) def summarize_coords(variables) -> str: li_items = [] for k, v in variables.items(): li_content = summarize_variable(k, v, is_index=k in variables.xindexes) li_items.append(f"
  • {li_content}
  • ") vars_li = "".join(li_items) return f"
      {vars_li}
    " def summarize_vars(variables) -> str: vars_li = "".join( f"
  • {summarize_variable(k, v)}
  • " for k, v in variables.items() ) return f"
      {vars_li}
    " def short_index_repr_html(index) -> str: if hasattr(index, "_repr_html_"): return index._repr_html_() return f"
    {escape(repr(index))}
    " def summarize_index(coord_names, index) -> str: name = "
    ".join([escape(str(n)) for n in coord_names]) index_id = f"index-{uuid.uuid4()}" preview = escape(inline_index_repr(index, max_width=70)) details = short_index_repr_html(index) data_icon = _icon("icon-database") return ( f"
    {name}
    " f"
    {preview}
    " # need empty input + label here to conform to the fixed CSS grid layout f"" f"" f"" f"" f"
    {details}
    " ) def summarize_indexes(indexes) -> str: indexes_li = "".join( f"
  • {summarize_index(v, i)}
  • " for v, i in indexes.items() ) return f"
      {indexes_li}
    " def collapsible_section( name, inline_details="", details="", n_items=None, enabled=True, collapsed=False ) -> str: # "unique" id to expand/collapse the section data_id = "section-" + str(uuid.uuid4()) has_items = n_items is not None and n_items n_items_span = "" if n_items is None else f" ({n_items})" enabled = "" if enabled and has_items else "disabled" collapsed = "" if collapsed or not has_items else "checked" tip = " title='Expand/collapse section'" if enabled else "" return ( f"" f"" f"
    {inline_details}
    " f"
    {details}
    " ) def _mapping_section( mapping, name, details_func, max_items_collapse, expand_option_name, enabled=True, max_option_name: Literal["display_max_children"] | None = None, ) -> str: n_items = len(mapping) expanded = _get_boolean_with_default( expand_option_name, n_items < max_items_collapse ) collapsed = not expanded inline_details = "" if max_option_name and max_option_name in OPTIONS: max_items = int(OPTIONS[max_option_name]) if n_items > max_items: inline_details = f"({max_items}/{n_items})" return collapsible_section( name, inline_details=inline_details, details=details_func(mapping), n_items=n_items, enabled=enabled, collapsed=collapsed, ) def dim_section(obj) -> str: dim_list = format_dims(obj.sizes, obj.xindexes.dims) return collapsible_section( "Dimensions", inline_details=dim_list, enabled=False, collapsed=True ) def array_section(obj) -> str: # "unique" id to expand/collapse the section data_id = "section-" + str(uuid.uuid4()) collapsed = ( "checked" if _get_boolean_with_default("display_expand_data", default=True) else "" ) variable = getattr(obj, "variable", obj) preview = escape(inline_variable_array_repr(variable, max_width=70)) data_repr = short_data_repr_html(obj) data_icon = _icon("icon-database") return ( "
    " f"" f"" f"
    {preview}
    " f"
    {data_repr}
    " "
    " ) coord_section = partial( _mapping_section, name="Coordinates", details_func=summarize_coords, max_items_collapse=25, expand_option_name="display_expand_coords", ) datavar_section = partial( _mapping_section, name="Data variables", details_func=summarize_vars, max_items_collapse=15, expand_option_name="display_expand_data_vars", ) index_section = partial( _mapping_section, name="Indexes", details_func=summarize_indexes, max_items_collapse=0, expand_option_name="display_expand_indexes", ) attr_section = partial( _mapping_section, name="Attributes", details_func=summarize_attrs, max_items_collapse=10, expand_option_name="display_expand_attrs", ) def _get_indexes_dict(indexes): return { tuple(index_vars.keys()): idx for idx, index_vars in indexes.group_by_index() } def _obj_repr(obj, header_components, sections): """Return HTML repr of an xarray object. If CSS is not injected (untrusted notebook), fallback to the plain text repr. """ header = f"
    {''.join(h for h in header_components)}
    " sections = "".join(f"
  • {s}
  • " for s in sections) icons_svg, css_style = _load_static_files() return ( "
    " f"{icons_svg}" f"
    {escape(repr(obj))}
    " "" "
    " ) def array_repr(arr) -> str: dims = OrderedDict((k, v) for k, v in zip(arr.dims, arr.shape, strict=True)) if hasattr(arr, "xindexes"): indexed_dims = arr.xindexes.dims else: indexed_dims = {} obj_type = f"xarray.{type(arr).__name__}" arr_name = f"'{arr.name}'" if getattr(arr, "name", None) else "" header_components = [ f"
    {obj_type}
    ", f"
    {arr_name}
    ", format_dims(dims, indexed_dims), ] sections = [array_section(arr)] if hasattr(arr, "coords"): sections.append(coord_section(arr.coords)) if hasattr(arr, "xindexes"): indexes = _get_indexes_dict(arr.xindexes) sections.append(index_section(indexes)) sections.append(attr_section(arr.attrs)) return _obj_repr(arr, header_components, sections) def dataset_repr(ds) -> str: obj_type = f"xarray.{type(ds).__name__}" header_components = [f"
    {escape(obj_type)}
    "] sections = [ dim_section(ds), coord_section(ds.coords), datavar_section(ds.data_vars), index_section(_get_indexes_dict(ds.xindexes)), attr_section(ds.attrs), ] return _obj_repr(ds, header_components, sections) def summarize_datatree_children(children: Mapping[str, DataTree]) -> str: MAX_CHILDREN = OPTIONS["display_max_children"] n_children = len(children) children_html = [] for i, (n, c) in enumerate(children.items()): if i < ceil(MAX_CHILDREN / 2) or i >= ceil(n_children - MAX_CHILDREN / 2): is_last = i == (n_children - 1) children_html.append( _wrap_datatree_repr(datatree_node_repr(n, c), end=is_last) ) elif n_children > MAX_CHILDREN and i == ceil(MAX_CHILDREN / 2): children_html.append("
    ...
    ") return "".join( [ "
    ", "".join(children_html), "
    ", ] ) children_section = partial( _mapping_section, name="Groups", details_func=summarize_datatree_children, max_items_collapse=1, max_option_name="display_max_children", expand_option_name="display_expand_groups", ) inherited_coord_section = partial( _mapping_section, name="Inherited coordinates", details_func=summarize_coords, max_items_collapse=25, expand_option_name="display_expand_coords", ) def datatree_node_repr(group_title: str, node: DataTree, show_inherited=False) -> str: from xarray.core.coordinates import Coordinates header_components = [f"
    {escape(group_title)}
    "] ds = node._to_dataset_view(rebuild_dims=False, inherit=True) node_coords = node.to_dataset(inherit=False).coords # use this class to get access to .xindexes property inherited_coords = Coordinates( coords=inherited_vars(node._coord_variables), indexes=inherited_vars(node._indexes), ) sections = [ children_section(node.children), dim_section(ds), coord_section(node_coords), ] # only show inherited coordinates on the root if show_inherited: sections.append(inherited_coord_section(inherited_coords)) sections += [ datavar_section(ds.data_vars), attr_section(ds.attrs), ] return _obj_repr(ds, header_components, sections) def _wrap_datatree_repr(r: str, end: bool = False) -> str: """ Wrap HTML representation with a tee to the left of it. Enclosing HTML tag is a
    with :code:`display: inline-grid` style. Turns: [ title ] | details | |_____________| into (A): |─ [ title ] | | details | | |_____________| or (B): └─ [ title ] | details | |_____________| Parameters ---------- r: str HTML representation to wrap. end: bool Specify if the line on the left should continue or end. Default is True. Returns ------- str Wrapped HTML representation. Tee color is set to the variable :code:`--xr-border-color`. """ # height of line end = bool(end) height = "100%" if end is False else "1.2em" return "".join( [ "
    ", "
    ", "
    ", "
    ", "
    ", "
    ", r, "
    ", "
    ", ] ) def datatree_repr(dt: DataTree) -> str: obj_type = f"xarray.{type(dt).__name__}" return datatree_node_repr(obj_type, dt, show_inherited=True) xarray-2025.09.0/xarray/core/groupby.py000066400000000000000000002057631505620616400176600ustar00rootroot00000000000000from __future__ import annotations import copy import functools import itertools import warnings from collections.abc import Callable, Hashable, Iterator, Mapping, Sequence from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, Generic, Literal, Union, cast import numpy as np import pandas as pd from packaging.version import Version from xarray.computation import ops from xarray.computation.arithmetic import ( DataArrayGroupbyArithmetic, DatasetGroupbyArithmetic, ) from xarray.core import dtypes, duck_array_ops, nputils from xarray.core._aggregations import ( DataArrayGroupByAggregations, DatasetGroupByAggregations, ) from xarray.core.common import ImplementsArrayReduce, ImplementsDatasetReduce from xarray.core.coordinates import Coordinates, coordinates_from_variable from xarray.core.duck_array_ops import where from xarray.core.formatting import format_array_flat from xarray.core.indexes import ( PandasMultiIndex, filter_indexes_from_coords, ) from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.core.types import ( Dims, QuantileMethods, T_DataArray, T_DataWithCoords, T_Xarray, ) from xarray.core.utils import ( FrozenMappingWarningOnValuesAccess, contains_only_chunked_or_numpy, either_dict_or_kwargs, emit_user_level_warning, hashable, is_scalar, maybe_wrap_array, module_available, peek_at, ) from xarray.core.variable import IndexVariable, Variable from xarray.namedarray.pycompat import is_chunked_array from xarray.structure.alignment import align, broadcast from xarray.structure.concat import concat from xarray.structure.merge import merge_coords if TYPE_CHECKING: from numpy.typing import ArrayLike from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import ( GroupIndex, GroupIndices, GroupInput, GroupKey, T_Chunks, ) from xarray.core.utils import Frozen from xarray.groupers import EncodedGroups, Grouper def check_reduce_dims(reduce_dims, dimensions): if reduce_dims is not ...: if is_scalar(reduce_dims): reduce_dims = [reduce_dims] if any(dim not in dimensions for dim in reduce_dims): raise ValueError( f"cannot reduce over dimensions {reduce_dims!r}. expected either '...' " f"to reduce over all dimensions or one or more of {dimensions!r}. " f"Alternatively, install the `flox` package. " ) def _codes_to_group_indices(codes: np.ndarray, N: int) -> GroupIndices: """Converts integer codes for groups to group indices.""" assert codes.ndim == 1 groups: GroupIndices = tuple([] for _ in range(N)) for n, g in enumerate(codes): if g >= 0: groups[g].append(n) return groups def _dummy_copy(xarray_obj): from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset if isinstance(xarray_obj, Dataset): res = Dataset( { k: dtypes.get_fill_value(v.dtype) for k, v in xarray_obj.data_vars.items() }, { k: dtypes.get_fill_value(v.dtype) for k, v in xarray_obj.coords.items() if k not in xarray_obj.dims }, xarray_obj.attrs, ) elif isinstance(xarray_obj, DataArray): res = DataArray( dtypes.get_fill_value(xarray_obj.dtype), { k: dtypes.get_fill_value(v.dtype) for k, v in xarray_obj.coords.items() if k not in xarray_obj.dims }, dims=[], name=xarray_obj.name, attrs=xarray_obj.attrs, ) else: # pragma: no cover raise AssertionError return res def _is_one_or_none(obj) -> bool: return obj == 1 or obj is None def _consolidate_slices(slices: list[slice]) -> list[slice]: """Consolidate adjacent slices in a list of slices.""" result: list[slice] = [] last_slice = slice(None) for slice_ in slices: if not isinstance(slice_, slice): raise ValueError(f"list element is not a slice: {slice_!r}") if ( result and last_slice.stop == slice_.start and _is_one_or_none(last_slice.step) and _is_one_or_none(slice_.step) ): last_slice = slice(last_slice.start, slice_.stop, slice_.step) result[-1] = last_slice else: result.append(slice_) last_slice = slice_ return result def _inverse_permutation_indices(positions, N: int | None = None) -> np.ndarray | None: """Like inverse_permutation, but also handles slices. Parameters ---------- positions : list of ndarray or slice If slice objects, all are assumed to be slices. Returns ------- np.ndarray of indices or None, if no permutation is necessary. """ if not positions: return None if isinstance(positions[0], slice): positions = _consolidate_slices(positions) if positions == slice(None): return None positions = [np.arange(sl.start, sl.stop, sl.step) for sl in positions] newpositions = nputils.inverse_permutation(np.concatenate(positions), N) return newpositions[newpositions != -1] class _DummyGroup(Generic[T_Xarray]): """Class for keeping track of grouped dimensions without coordinates. Should not be user visible. """ __slots__ = ("coords", "dataarray", "name", "size") def __init__(self, obj: T_Xarray, name: Hashable, coords) -> None: self.name = name self.coords = coords self.size = obj.sizes[name] @property def dims(self) -> tuple[Hashable]: return (self.name,) @property def ndim(self) -> Literal[1]: return 1 @property def values(self) -> range: return range(self.size) @property def data(self) -> np.ndarray: return np.arange(self.size, dtype=int) def __array__( self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None ) -> np.ndarray: if copy is False: raise NotImplementedError(f"An array copy is necessary, got {copy = }.") return np.arange(self.size) @property def shape(self) -> tuple[int, ...]: return (self.size,) @property def attrs(self) -> dict: return {} def __getitem__(self, key): if isinstance(key, tuple): (key,) = key return self.values[key] def to_index(self) -> pd.Index: # could be pd.RangeIndex? return pd.Index(np.arange(self.size)) def copy(self, deep: bool = True, data: Any = None): raise NotImplementedError def to_dataarray(self) -> DataArray: from xarray.core.dataarray import DataArray return DataArray( data=self.data, dims=(self.name,), coords=self.coords, name=self.name ) def to_array(self) -> DataArray: """Deprecated version of to_dataarray.""" return self.to_dataarray() T_Group = Union["T_DataArray", _DummyGroup] def _ensure_1d( group: T_Group, obj: T_DataWithCoords ) -> tuple[ T_Group, T_DataWithCoords, Hashable | None, list[Hashable], ]: # 1D cases: do nothing if isinstance(group, _DummyGroup) or group.ndim == 1: return group, obj, None, [] from xarray.core.dataarray import DataArray if isinstance(group, DataArray): for dim in set(group.dims) - set(obj.dims): obj = obj.expand_dims(dim) # try to stack the dims of the group into a single dim orig_dims = group.dims stacked_dim = "stacked_" + "_".join(map(str, orig_dims)) # these dimensions get created by the stack operation inserted_dims = [dim for dim in group.dims if dim not in group.coords] # `newgroup` construction is optimized so we don't create an index unnecessarily, # or stack any non-dim coords unnecessarily newgroup = DataArray(group.variable.stack({stacked_dim: orig_dims})) newobj = obj.stack({stacked_dim: orig_dims}) return newgroup, newobj, stacked_dim, inserted_dims raise TypeError(f"group must be DataArray or _DummyGroup, got {type(group)!r}.") @dataclass class ResolvedGrouper(Generic[T_DataWithCoords]): """ Wrapper around a Grouper object. The Grouper object represents an abstract instruction to group an object. The ResolvedGrouper object is a concrete version that contains all the common logic necessary for a GroupBy problem including the intermediates necessary for executing a GroupBy calculation. Specialization to the grouping problem at hand, is accomplished by calling the `factorize` method on the encapsulated Grouper object. This class is private API, while Groupers are public. """ grouper: Grouper group: T_Group obj: T_DataWithCoords eagerly_compute_group: Literal[False] | None = field(repr=False, default=None) # returned by factorize: encoded: EncodedGroups = field(init=False, repr=False) @property def full_index(self) -> pd.Index: return self.encoded.full_index @property def codes(self) -> DataArray: return self.encoded.codes @property def unique_coord(self) -> Variable | _DummyGroup: return self.encoded.unique_coord def __post_init__(self) -> None: # This copy allows the BinGrouper.factorize() method # to update BinGrouper.bins when provided as int, using the output # of pd.cut # We do not want to modify the original object, since the same grouper # might be used multiple times. from xarray.groupers import BinGrouper, UniqueGrouper self.grouper = copy.deepcopy(self.grouper) self.group = _resolve_group(self.obj, self.group) if self.eagerly_compute_group: raise ValueError( f""""Eagerly computing the DataArray you're grouping by ({self.group.name!r}) " has been removed. Please load this array's data manually using `.compute` or `.load`. To intentionally avoid eager loading, either (1) specify `.groupby({self.group.name}=UniqueGrouper(labels=...))` or (2) pass explicit bin edges using ``bins`` or `.groupby({self.group.name}=BinGrouper(bins=...))`; as appropriate.""" ) if self.eagerly_compute_group is not None: emit_user_level_warning( "Passing `eagerly_compute_group` is now deprecated. It has no effect.", DeprecationWarning, ) if not isinstance(self.group, _DummyGroup) and is_chunked_array( self.group.variable._data ): # This requires a pass to discover the groups present if isinstance(self.grouper, UniqueGrouper) and self.grouper.labels is None: raise ValueError( "Please pass `labels` to UniqueGrouper when grouping by a chunked array." ) # this requires a pass to compute the bin edges if isinstance(self.grouper, BinGrouper) and isinstance( self.grouper.bins, int ): raise ValueError( "Please pass explicit bin edges to BinGrouper using the ``bins`` kwarg" "when grouping by a chunked array." ) self.encoded = self.grouper.factorize(self.group) @property def name(self) -> Hashable: """Name for the grouped coordinate after reduction.""" # the name has to come from unique_coord because we need `_bins` suffix for BinGrouper (name,) = self.encoded.unique_coord.dims return name @property def size(self) -> int: """Number of groups.""" return len(self) def __len__(self) -> int: """Number of groups.""" return len(self.encoded.full_index) def _parse_group_and_groupers( obj: T_Xarray, group: GroupInput, groupers: dict[str, Grouper], *, eagerly_compute_group: Literal[False] | None, ) -> tuple[ResolvedGrouper, ...]: from xarray.core.dataarray import DataArray from xarray.core.variable import Variable from xarray.groupers import Grouper, UniqueGrouper if group is not None and groupers: raise ValueError( "Providing a combination of `group` and **groupers is not supported." ) if group is None and not groupers: raise ValueError("Either `group` or `**groupers` must be provided.") if isinstance(group, np.ndarray | pd.Index): raise TypeError( f"`group` must be a DataArray. Received {type(group).__name__!r} instead" ) if isinstance(group, Grouper): raise TypeError( "Cannot group by a Grouper object. " f"Instead use `.groupby(var_name={type(group).__name__}(...))`. " "You may need to assign the variable you're grouping by as a coordinate using `assign_coords`." ) if isinstance(group, Mapping): grouper_mapping = either_dict_or_kwargs(group, groupers, "groupby") group = None rgroupers: tuple[ResolvedGrouper, ...] if isinstance(group, DataArray | Variable): rgroupers = ( ResolvedGrouper( UniqueGrouper(), group, obj, eagerly_compute_group=eagerly_compute_group ), ) else: if group is not None: if TYPE_CHECKING: assert isinstance(group, str | Sequence) group_iter: Sequence[Hashable] = ( (group,) if isinstance(group, str) else group ) grouper_mapping = {g: UniqueGrouper() for g in group_iter} elif groupers: grouper_mapping = cast("Mapping[Hashable, Grouper]", groupers) rgroupers = tuple( ResolvedGrouper( grouper, group, obj, eagerly_compute_group=eagerly_compute_group ) for group, grouper in grouper_mapping.items() ) return rgroupers def _validate_groupby_squeeze(squeeze: Literal[False]) -> None: # While we don't generally check the type of every arg, passing # multiple dimensions as multiple arguments is common enough, and the # consequences hidden enough (strings evaluate as true) to warrant # checking here. # A future version could make squeeze kwarg only, but would face # backward-compat issues. if squeeze is not False: raise TypeError(f"`squeeze` must be False, but {squeeze!r} was supplied.") def _resolve_group( obj: T_DataWithCoords, group: T_Group | Hashable | IndexVariable ) -> T_Group: from xarray.core.dataarray import DataArray error_msg = ( "the group variable's length does not " "match the length of this variable along its " "dimensions" ) newgroup: T_Group if isinstance(group, DataArray): try: align(obj, group, join="exact", copy=False) except ValueError as err: raise ValueError(error_msg) from err newgroup = group.copy(deep=False) newgroup.name = group.name or "group" elif isinstance(group, IndexVariable): # This assumption is built in to _ensure_1d. if group.ndim != 1: raise ValueError( "Grouping by multi-dimensional IndexVariables is not allowed." "Convert to and pass a DataArray instead." ) (group_dim,) = group.dims if len(group) != obj.sizes[group_dim]: raise ValueError(error_msg) newgroup = DataArray(group) else: if not hashable(group): raise TypeError( "`group` must be an xarray.DataArray or the " "name of an xarray variable or dimension. " f"Received {group!r} instead." ) group_da: DataArray = obj[group] if group_da.name not in obj._indexes and group_da.name in obj.dims: # DummyGroups should not appear on groupby results newgroup = _DummyGroup(obj, group_da.name, group_da.coords) else: newgroup = group_da if newgroup.size == 0: raise ValueError(f"{newgroup.name} must not be empty") return newgroup @dataclass class ComposedGrouper: """ Helper class for multi-variable GroupBy. This satisfies the Grouper interface, but is awkward to wrap in ResolvedGrouper. For one, it simply re-infers a new EncodedGroups using known information in existing ResolvedGroupers. So passing in a `group` (hard to define), and `obj` (pointless) is not useful. """ groupers: tuple[ResolvedGrouper, ...] def factorize(self) -> EncodedGroups: from xarray.groupers import EncodedGroups groupers = self.groupers # At this point all arrays have been factorized. codes = tuple(grouper.codes for grouper in groupers) shape = tuple(grouper.size for grouper in groupers) masks = tuple((code == -1) for code in codes) # We broadcast the codes against each other broadcasted_codes = broadcast(*codes) # This fully broadcasted DataArray is used as a template later first_codes = broadcasted_codes[0] # Now we convert to a single variable GroupBy problem _flatcodes = np.ravel_multi_index( tuple(codes.data for codes in broadcasted_codes), shape, mode="wrap" ) # NaNs; as well as values outside the bins are coded by -1 # Restore these after the raveling broadcasted_masks = broadcast(*masks) mask = functools.reduce(np.logical_or, broadcasted_masks) # type: ignore[arg-type] _flatcodes = where(mask.data, -1, _flatcodes) full_index = pd.MultiIndex.from_product( [list(grouper.full_index.values) for grouper in groupers], names=tuple(grouper.name for grouper in groupers), ) if not full_index.is_unique: raise ValueError( "The output index for the GroupBy is non-unique. " "This is a bug in the Grouper provided." ) # This will be unused when grouping by dask arrays, so skip.. if not is_chunked_array(_flatcodes): # Constructing an index from the product is wrong when there are missing groups # (e.g. binning, resampling). Account for that now. midx = full_index[np.sort(pd.unique(_flatcodes[~mask]))] group_indices = _codes_to_group_indices(_flatcodes.ravel(), len(full_index)) else: midx = full_index group_indices = None dim_name = "stacked_" + "_".join(str(grouper.name) for grouper in groupers) coords = Coordinates.from_pandas_multiindex(midx, dim=dim_name) for grouper in groupers: coords.variables[grouper.name].attrs = grouper.group.attrs return EncodedGroups( codes=first_codes.copy(data=_flatcodes), full_index=full_index, group_indices=group_indices, unique_coord=Variable(dims=(dim_name,), data=midx.values), coords=coords, ) class GroupBy(Generic[T_Xarray]): """A object that implements the split-apply-combine pattern. Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over (unique_value, grouped_array) pairs, but the main way to interact with a groupby object are with the `apply` or `reduce` methods. You can also directly call numpy methods like `mean` or `std`. You should create a GroupBy object by using the `DataArray.groupby` or `Dataset.groupby` methods. See Also -------- Dataset.groupby DataArray.groupby """ __slots__ = ( "_by_chunked", "_codes", "_dims", "_group_dim", # cached properties "_groups", "_inserted_dims", "_len", "_obj", # Save unstacked object for flox "_original_obj", "_restore_coord_dims", "_sizes", "_stacked_dim", "encoded", # stack nD vars "group1d", "groupers", ) _obj: T_Xarray groupers: tuple[ResolvedGrouper, ...] _restore_coord_dims: bool _original_obj: T_Xarray _group_indices: GroupIndices _codes: tuple[DataArray, ...] _group_dim: Hashable _by_chunked: bool _groups: dict[GroupKey, GroupIndex] | None _dims: tuple[Hashable, ...] | Frozen[Hashable, int] | None _sizes: Mapping[Hashable, int] | None _len: int # _ensure_1d: group1d: T_Group _stacked_dim: Hashable | None _inserted_dims: list[Hashable] encoded: EncodedGroups def __init__( self, obj: T_Xarray, groupers: tuple[ResolvedGrouper, ...], restore_coord_dims: bool = True, ) -> None: """Create a GroupBy object Parameters ---------- obj : Dataset or DataArray Object to group. grouper : Grouper Grouper object restore_coord_dims : bool, default: True If True, also restore the dimension order of multi-dimensional coordinates. """ self._original_obj = obj self._restore_coord_dims = restore_coord_dims self.groupers = groupers if len(groupers) == 1: (grouper,) = groupers self.encoded = grouper.encoded else: if any( isinstance(obj._indexes.get(grouper.name, None), PandasMultiIndex) for grouper in groupers ): raise NotImplementedError( "Grouping by multiple variables, one of which " "wraps a Pandas MultiIndex, is not supported yet." ) self.encoded = ComposedGrouper(groupers).factorize() # specification for the groupby operation # TODO: handle obj having variables that are not present on any of the groupers # simple broadcasting fails for ExtensionArrays. codes = self.encoded.codes self._by_chunked = is_chunked_array(codes._variable._data) if not self._by_chunked: (self.group1d, self._obj, self._stacked_dim, self._inserted_dims) = ( _ensure_1d(group=codes, obj=obj) ) (self._group_dim,) = self.group1d.dims else: self.group1d = None # This transpose preserves dim order behaviour self._obj = obj.transpose(..., *codes.dims) self._stacked_dim = None self._inserted_dims = [] self._group_dim = None # cached attributes self._groups = None self._dims = None self._sizes = None self._len = len(self.encoded.full_index) @property def sizes(self) -> Mapping[Hashable, int]: """Ordered mapping from dimension names to lengths. Immutable. See Also -------- DataArray.sizes Dataset.sizes """ if self._sizes is None: index = self.encoded.group_indices[0] self._sizes = self._obj.isel({self._group_dim: index}).sizes return self._sizes def shuffle_to_chunks(self, chunks: T_Chunks = None) -> T_Xarray: """ Sort or "shuffle" the underlying object. "Shuffle" means the object is sorted so that all group members occur sequentially, in the same chunk. Multiple groups may occur in the same chunk. This method is particularly useful for chunked arrays (e.g. dask, cubed). particularly when you need to map a function that requires all members of a group to be present in a single chunk. For chunked array types, the order of appearance is not guaranteed, but will depend on the input chunking. Parameters ---------- chunks : int, tuple of int, "auto" or mapping of hashable to int or tuple of int, optional How to adjust chunks along dimensions not present in the array being grouped by. Returns ------- DataArrayGroupBy or DatasetGroupBy Examples -------- >>> import dask.array >>> da = xr.DataArray( ... dims="x", ... data=dask.array.arange(10, chunks=3), ... coords={"x": [1, 2, 3, 1, 2, 3, 1, 2, 3, 0]}, ... name="a", ... ) >>> shuffled = da.groupby("x").shuffle_to_chunks() >>> shuffled Size: 80B dask.array Coordinates: * x (x) int64 80B 0 1 1 1 2 2 2 3 3 3 >>> shuffled.groupby("x").quantile(q=0.5).compute() Size: 32B array([9., 3., 4., 5.]) Coordinates: quantile float64 8B 0.5 * x (x) int64 32B 0 1 2 3 See Also -------- dask.dataframe.DataFrame.shuffle dask.array.shuffle """ self._raise_if_by_is_chunked() return self._shuffle_obj(chunks) def _shuffle_obj(self, chunks: T_Chunks) -> T_Xarray: from xarray.core.dataarray import DataArray was_array = isinstance(self._obj, DataArray) as_dataset = self._obj._to_temp_dataset() if was_array else self._obj for grouper in self.groupers: if grouper.name not in as_dataset._variables: as_dataset.coords[grouper.name] = grouper.group shuffled = as_dataset._shuffle( dim=self._group_dim, indices=self.encoded.group_indices, chunks=chunks ) unstacked: Dataset = self._maybe_unstack(shuffled) if was_array: return self._obj._from_temp_dataset(unstacked) else: return unstacked # type: ignore[return-value] def map( self, func: Callable, args: tuple[Any, ...] = (), shortcut: bool | None = None, **kwargs: Any, ) -> T_Xarray: raise NotImplementedError() def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, shortcut: bool = True, **kwargs: Any, ) -> T_Xarray: raise NotImplementedError() def _raise_if_by_is_chunked(self): if self._by_chunked: raise ValueError( "This method is not supported when lazily grouping by a chunked array. " "Either load the array in to memory prior to grouping using .load or .compute, " " or explore another way of applying your function, " "potentially using the `flox` package." ) def _raise_if_not_single_group(self): if len(self.groupers) != 1: raise NotImplementedError( "This method is not supported for grouping by multiple variables yet." ) @property def groups(self) -> dict[GroupKey, GroupIndex]: """ Mapping from group labels to indices. The indices can be used to index the underlying object. """ # provided to mimic pandas.groupby if self._groups is None: self._groups = dict( zip( self.encoded.unique_coord.data, self.encoded.group_indices, strict=True, ) ) return self._groups def __getitem__(self, key: GroupKey) -> T_Xarray: """ Get DataArray or Dataset corresponding to a particular group label. """ self._raise_if_by_is_chunked() return self._obj.isel({self._group_dim: self.groups[key]}) def __len__(self) -> int: return self._len def __iter__(self) -> Iterator[tuple[GroupKey, T_Xarray]]: return zip(self.encoded.unique_coord.data, self._iter_grouped(), strict=True) def __repr__(self) -> str: text = ( f"<{self.__class__.__name__}, " f"grouped over {len(self.groupers)} grouper(s)," f" {self._len} groups in total:" ) for grouper in self.groupers: coord = grouper.unique_coord labels = ", ".join(format_array_flat(coord, 30).split()) text += ( f"\n {grouper.name!r}: {type(grouper.grouper).__name__}({grouper.group.name!r}), " f"{coord.size}/{grouper.full_index.size} groups with labels {labels}" ) return text + ">" def _iter_grouped(self) -> Iterator[T_Xarray]: """Iterate over each element in this group""" self._raise_if_by_is_chunked() for indices in self.encoded.group_indices: if indices: yield self._obj.isel({self._group_dim: indices}) def _infer_concat_args(self, applied_example): if self._group_dim in applied_example.dims: coord = self.group1d positions = self.encoded.group_indices else: coord = self.encoded.unique_coord positions = None (dim,) = coord.dims return dim, positions def _binary_op(self, other, f, reflexive=False): from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset g = f if not reflexive else lambda x, y: f(y, x) self._raise_if_not_single_group() (grouper,) = self.groupers obj = self._original_obj name = grouper.name group = grouper.group codes = self.encoded.codes dims = group.dims if isinstance(group, _DummyGroup): group = coord = group.to_dataarray() else: coord = grouper.unique_coord if isinstance(coord, Variable): assert coord.ndim == 1 (coord_dim,) = coord.dims # TODO: explicitly create Index here coord = DataArray(coord, coords={coord_dim: coord.data}) if not isinstance(other, Dataset | DataArray): raise TypeError( "GroupBy objects only support binary ops " "when the other argument is a Dataset or " "DataArray" ) if name not in other.dims: raise ValueError( "incompatible dimensions for a grouped " f"binary operation: the group variable {name!r} " "is not a dimension on the other argument " f"with dimensions {other.dims!r}" ) # Broadcast out scalars for backwards compatibility # TODO: get rid of this when fixing GH2145 for var in other.coords: if other[var].ndim == 0: other[var] = ( other[var].drop_vars(var).expand_dims({name: other.sizes[name]}) ) # need to handle NaNs in group or elements that don't belong to any bins mask = codes == -1 if mask.any(): obj = obj.where(~mask, drop=True) group = group.where(~mask, drop=True) codes = codes.where(~mask, drop=True).astype(int) # if other is dask-backed, that's a hint that the # "expanded" dataset is too big to hold in memory. # this can be the case when `other` was read from disk # and contains our lazy indexing classes # We need to check for dask-backed Datasets # so utils.is_duck_dask_array does not work for this check if obj.chunks and not other.chunks: # TODO: What about datasets with some dask vars, and others not? # This handles dims other than `name`` chunks = {k: v for k, v in obj.chunksizes.items() if k in other.dims} # a chunk size of 1 seems reasonable since we expect individual elements of # other to be repeated multiple times across the reduced dimension(s) chunks[name] = 1 other = other.chunk(chunks) # codes are defined for coord, so we align `other` with `coord` # before indexing other, _ = align(other, coord, join="right", copy=False) expanded = other.isel({name: codes}) result = g(obj, expanded) if group.ndim > 1: # backcompat: # TODO: get rid of this when fixing GH2145 for var in set(obj.coords) - set(obj.xindexes): if set(obj[var].dims) < set(group.dims): result[var] = obj[var].reset_coords(drop=True).broadcast_like(group) if isinstance(result, Dataset) and isinstance(obj, Dataset): for var in set(result): for d in dims: if d not in obj[var].dims: result[var] = result[var].transpose(d, ...) return result def _restore_dim_order(self, stacked): raise NotImplementedError def _maybe_reindex(self, combined): """Reindexing is needed in two cases: 1. Our index contained empty groups (e.g., from a resampling or binning). If we reduced on that dimension, we want to restore the full index. 2. We use a MultiIndex for multi-variable GroupBy. The MultiIndex stores each level's labels in sorted order which are then assigned on unstacking. So we need to restore the correct order here. """ has_missing_groups = ( self.encoded.unique_coord.size != self.encoded.full_index.size ) indexers = {} for grouper in self.groupers: index = combined._indexes.get(grouper.name, None) if (has_missing_groups and index is not None) or ( len(self.groupers) > 1 and not isinstance(grouper.full_index, pd.RangeIndex) and not index.index.equals(grouper.full_index) ): indexers[grouper.name] = grouper.full_index if indexers: combined = combined.reindex(**indexers) return combined def _maybe_unstack(self, obj): """This gets called if we are applying on an array with a multidimensional group.""" from xarray.groupers import UniqueGrouper stacked_dim = self._stacked_dim if stacked_dim is not None and stacked_dim in obj.dims: inserted_dims = self._inserted_dims obj = obj.unstack(stacked_dim) for dim in inserted_dims: if dim in obj.coords: del obj.coords[dim] obj._indexes = filter_indexes_from_coords(obj._indexes, set(obj.coords)) elif len(self.groupers) > 1: # TODO: we could clean this up by setting the appropriate `stacked_dim` # and `inserted_dims` # if multiple groupers all share the same single dimension, then # we don't stack/unstack. Do that manually now. dims_to_unstack = self.encoded.unique_coord.dims if all(dim in obj.dims for dim in dims_to_unstack): obj = obj.unstack(*dims_to_unstack) to_drop = [ grouper.name for grouper in self.groupers if isinstance(grouper.group, _DummyGroup) and isinstance(grouper.grouper, UniqueGrouper) ] obj = obj.drop_vars(to_drop) return obj def _flox_reduce( self, dim: Dims, keep_attrs: bool | None = None, **kwargs: Any, ) -> T_Xarray: """Adaptor function that translates our groupby API to that of flox.""" import flox from flox.xarray import xarray_reduce from xarray.core.dataset import Dataset obj = self._original_obj variables = ( {k: v.variable for k, v in obj.data_vars.items()} if isinstance(obj, Dataset) # type: ignore[redundant-expr] # seems to be a mypy bug else obj._coords ) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) if Version(flox.__version__) < Version("0.9") and not self._by_chunked: # preserve current strategy (approximately) for dask groupby # on older flox versions to prevent surprises. # flox >=0.9 will choose this on its own. kwargs.setdefault("method", "cohorts") midx_grouping_vars: tuple[Hashable, ...] = () for grouper in self.groupers: name = grouper.name maybe_midx = obj._indexes.get(name, None) if isinstance(maybe_midx, PandasMultiIndex): midx_grouping_vars += tuple(maybe_midx.index.names) + (name,) # For datasets, running a numeric-only reduction on non-numeric # variable will just drop it. non_numeric: dict[Hashable, Variable] if kwargs.pop("numeric_only", None): non_numeric = { name: var for name, var in variables.items() if ( not (np.issubdtype(var.dtype, np.number) or (var.dtype == np.bool_)) # this avoids dropping any levels of a MultiIndex, which raises # a warning and name not in midx_grouping_vars and name not in obj.dims ) } else: non_numeric = {} if "min_count" in kwargs: if kwargs["func"] not in ["sum", "prod"]: raise TypeError("Received an unexpected keyword argument 'min_count'") elif kwargs["min_count"] is None: # set explicitly to avoid unnecessarily accumulating count kwargs["min_count"] = 0 parsed_dim: tuple[Hashable, ...] if isinstance(dim, str): parsed_dim = (dim,) elif dim is None: parsed_dim_list = list() # preserve order for dim_ in itertools.chain( *(grouper.codes.dims for grouper in self.groupers) ): if dim_ not in parsed_dim_list: parsed_dim_list.append(dim_) parsed_dim = tuple(parsed_dim_list) elif dim is ...: parsed_dim = tuple(obj.dims) else: parsed_dim = tuple(dim) # Do this so we raise the same error message whether flox is present or not. # Better to control it here than in flox. for grouper in self.groupers: if any( d not in grouper.codes.dims and d not in obj.dims for d in parsed_dim ): raise ValueError(f"cannot reduce over dimensions {dim}.") has_missing_groups = ( self.encoded.unique_coord.size != self.encoded.full_index.size ) if self._by_chunked or has_missing_groups or kwargs.get("min_count", 0) > 0: # Xarray *always* returns np.nan when there are no observations in a group, # We can fake that here by forcing min_count=1 when it is not set. # This handles boolean reductions, and count # See GH8090, GH9398 # Note that `has_missing_groups=False` when `self._by_chunked is True`. # We *choose* to always do the masking, so that behaviour is predictable # in some way. The real solution is to expose fill_value as a kwarg, # and set appropriate defaults :/. kwargs.setdefault("fill_value", np.nan) kwargs.setdefault("min_count", 1) # pass RangeIndex as a hint to flox that `by` is already factorized expected_groups = tuple( pd.RangeIndex(len(grouper)) for grouper in self.groupers ) codes = tuple(g.codes for g in self.groupers) result = xarray_reduce( obj.drop_vars(non_numeric.keys()), *codes, dim=parsed_dim, expected_groups=expected_groups, isbin=False, keep_attrs=keep_attrs, **kwargs, ) # we did end up reducing over dimension(s) that are # in the grouped variable group_dims = set(grouper.group.dims) new_coords = [] to_drop = [] if group_dims & set(parsed_dim): for grouper in self.groupers: output_index = grouper.full_index if isinstance(output_index, pd.RangeIndex): # flox always assigns an index so we must drop it here if we don't need it. to_drop.append(grouper.name) continue # TODO: We can't simply use `self.encoded.coords` here because it corresponds to `unique_coord`, # NOT `full_index`. We would need to construct a new Coordinates object, that corresponds to `full_index`. new_coords.append( # Using IndexVariable here ensures we reconstruct PandasMultiIndex with # all associated levels properly. coordinates_from_variable( IndexVariable( dims=grouper.name, data=output_index, attrs=grouper.codes.attrs, ) ) ) result = result.assign_coords( Coordinates._construct_direct(*merge_coords(new_coords)) ).drop_vars(to_drop) # broadcast any non-dim coord variables that don't # share all dimensions with the grouper result_variables = ( result._variables if isinstance(result, Dataset) else result._coords ) to_broadcast: dict[Hashable, Variable] = {} for name, var in variables.items(): dims_set = set(var.dims) if ( dims_set <= set(parsed_dim) and (dims_set & set(result.dims)) and name not in result_variables ): to_broadcast[name] = var for name, var in to_broadcast.items(): if new_dims := tuple(d for d in parsed_dim if d not in var.dims): new_sizes = tuple( result.sizes.get(dim, obj.sizes.get(dim)) for dim in new_dims ) result[name] = var.set_dims( new_dims + var.dims, new_sizes + var.shape ).transpose(..., *result.dims) if not isinstance(result, Dataset): # only restore dimension order for arrays result = self._restore_dim_order(result) return result def fillna(self, value: Any) -> T_Xarray: """Fill missing values in this object by group. This operation follows the normal broadcasting and alignment rules that xarray uses for binary arithmetic, except the result is aligned to this object (``join='left'``) instead of aligned to the intersection of index coordinates (``join='inner'``). Parameters ---------- value Used to fill all matching missing values by group. Needs to be of a valid type for the wrapped object's fillna method. Returns ------- same type as the grouped object See Also -------- Dataset.fillna DataArray.fillna """ return ops.fillna(self, value) def quantile( self, q: ArrayLike, dim: Dims = None, *, method: QuantileMethods = "linear", keep_attrs: bool | None = None, skipna: bool | None = None, interpolation: QuantileMethods | None = None, ) -> T_Xarray: """Compute the qth quantile over each array in the groups and concatenate them together into a new array. Parameters ---------- q : float or sequence of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or Iterable of Hashable, optional Dimension(s) over which to apply quantile. Defaults to the grouped dimension. method : str, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points. The options sorted by their R type as summarized in the H&F paper [1]_ are: 1. "inverted_cdf" 2. "averaged_inverted_cdf" 3. "closest_observation" 4. "interpolated_inverted_cdf" 5. "hazen" 6. "weibull" 7. "linear" (default) 8. "median_unbiased" 9. "normal_unbiased" The first three methods are discontiuous. The following discontinuous variations of the default "linear" (7.) option are also available: * "lower" * "higher" * "midpoint" * "nearest" See :py:func:`numpy.quantile` or [1]_ for details. The "method" argument was previously called "interpolation", renamed in accordance with numpy version 1.22.0. keep_attrs : bool or None, default: None If True, the dataarray's attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. skipna : bool or None, default: None If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- quantiles : Variable If `q` is a single quantile, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the quantile. In either case a quantile dimension is added to the return array. The other dimensions are the dimensions that remain after the reduction of the array. See Also -------- numpy.nanquantile, numpy.quantile, pandas.Series.quantile, Dataset.quantile DataArray.quantile Examples -------- >>> da = xr.DataArray( ... [[1.3, 8.4, 0.7, 6.9], [0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]], ... coords={"x": [0, 0, 1], "y": [1, 1, 2, 2]}, ... dims=("x", "y"), ... ) >>> ds = xr.Dataset({"a": da}) >>> da.groupby("x").quantile(0) Size: 64B array([[0.7, 4.2, 0.7, 1.5], [6.5, 7.3, 2.6, 1.9]]) Coordinates: * y (y) int64 32B 1 1 2 2 quantile float64 8B 0.0 * x (x) int64 16B 0 1 >>> ds.groupby("y").quantile(0, dim=...) Size: 40B Dimensions: (y: 2) Coordinates: quantile float64 8B 0.0 * y (y) int64 16B 1 2 Data variables: a (y) float64 16B 0.7 0.7 >>> da.groupby("x").quantile([0, 0.5, 1]) Size: 192B array([[[0.7 , 1. , 1.3 ], [4.2 , 6.3 , 8.4 ], [0.7 , 5.05, 9.4 ], [1.5 , 4.2 , 6.9 ]], [[6.5 , 6.5 , 6.5 ], [7.3 , 7.3 , 7.3 ], [2.6 , 2.6 , 2.6 ], [1.9 , 1.9 , 1.9 ]]]) Coordinates: * y (y) int64 32B 1 1 2 2 * quantile (quantile) float64 24B 0.0 0.5 1.0 * x (x) int64 16B 0 1 >>> ds.groupby("y").quantile([0, 0.5, 1], dim=...) Size: 88B Dimensions: (y: 2, quantile: 3) Coordinates: * quantile (quantile) float64 24B 0.0 0.5 1.0 * y (y) int64 16B 1 2 Data variables: a (y, quantile) float64 48B 0.7 5.35 8.4 0.7 2.25 9.4 References ---------- .. [1] R. J. Hyndman and Y. Fan, "Sample quantiles in statistical packages," The American Statistician, 50(4), pp. 361-365, 1996 """ # Dataset.quantile does this, do it for flox to ensure same output. q = np.asarray(q, dtype=np.float64) if ( method == "linear" and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) and module_available("flox", minversion="0.9.4") ): result = self._flox_reduce( func="quantile", q=q, dim=dim, keep_attrs=keep_attrs, skipna=skipna ) return result else: if dim is None: dim = (self._group_dim,) return self.map( self._obj.__class__.quantile, shortcut=False, q=q, dim=dim or self._group_dim, method=method, keep_attrs=keep_attrs, skipna=skipna, interpolation=interpolation, ) def where(self, cond, other=dtypes.NA) -> T_Xarray: """Return elements from `self` or `other` depending on `cond`. Parameters ---------- cond : DataArray or Dataset Locations at which to preserve this objects values. dtypes have to be `bool` other : scalar, DataArray or Dataset, optional Value to use for locations in this object where ``cond`` is False. By default, inserts missing values. Returns ------- same type as the grouped object See Also -------- Dataset.where """ return ops.where_method(self, cond, other) def _first_or_last( self, op: Literal["first" | "last"], skipna: bool | None, keep_attrs: bool | None, ): if all( isinstance(maybe_slice, slice) and (maybe_slice.stop == maybe_slice.start + 1) for maybe_slice in self.encoded.group_indices ): # NB. this is currently only used for reductions along an existing # dimension return self._obj if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) if ( module_available("flox", minversion="0.10.0") and OPTIONS["use_flox"] and contains_only_chunked_or_numpy(self._obj) ): import flox.xrdtypes result = self._flox_reduce( dim=None, func=op, skipna=skipna, keep_attrs=keep_attrs, fill_value=flox.xrdtypes.NA, ) else: result = self.reduce( getattr(duck_array_ops, op), dim=[self._group_dim], skipna=skipna, keep_attrs=keep_attrs, ) return result def first( self, skipna: bool | None = None, keep_attrs: bool | None = None ) -> T_Xarray: """ Return the first element of each group along the group dimension Parameters ---------- skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. """ return self._first_or_last("first", skipna, keep_attrs) def last( self, skipna: bool | None = None, keep_attrs: bool | None = None ) -> T_Xarray: """ Return the last element of each group along the group dimension Parameters ---------- skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. """ return self._first_or_last("last", skipna, keep_attrs) def assign_coords(self, coords=None, **coords_kwargs): """Assign coordinates by group. See Also -------- Dataset.assign_coords Dataset.swap_dims """ coords_kwargs = either_dict_or_kwargs(coords, coords_kwargs, "assign_coords") return self.map(lambda ds: ds.assign_coords(**coords_kwargs)) def _maybe_reorder(xarray_obj, dim, positions, N: int | None): order = _inverse_permutation_indices(positions, N) if order is None or len(order) != xarray_obj.sizes[dim]: return xarray_obj else: return xarray_obj[{dim: order}] class DataArrayGroupByBase(GroupBy["DataArray"], DataArrayGroupbyArithmetic): """GroupBy object specialized to grouping DataArray objects""" __slots__ = () _dims: tuple[Hashable, ...] | None @property def dims(self) -> tuple[Hashable, ...]: self._raise_if_by_is_chunked() if self._dims is None: index = self.encoded.group_indices[0] self._dims = self._obj.isel({self._group_dim: index}).dims return self._dims def _iter_grouped_shortcut(self): """Fast version of `_iter_grouped` that yields Variables without metadata """ self._raise_if_by_is_chunked() var = self._obj.variable for _idx, indices in enumerate(self.encoded.group_indices): if indices: yield var[{self._group_dim: indices}] def _concat_shortcut(self, applied, dim, positions=None): # nb. don't worry too much about maintaining this method -- it does # speed things up, but it's not very interpretable and there are much # faster alternatives (e.g., doing the grouped aggregation in a # compiled language) # TODO: benbovy - explicit indexes: this fast implementation doesn't # create an explicit index for the stacked dim coordinate stacked = Variable.concat(applied, dim, shortcut=True) reordered = _maybe_reorder(stacked, dim, positions, N=self.group1d.size) return self._obj._replace_maybe_drop_dims(reordered) def _restore_dim_order(self, stacked: DataArray) -> DataArray: def lookup_order(dimension): for grouper in self.groupers: if dimension == grouper.name and grouper.group.ndim == 1: (dimension,) = grouper.group.dims if dimension in self._obj.dims: axis = self._obj.get_axis_num(dimension) else: axis = 1e6 # some arbitrarily high value return axis new_order = sorted(stacked.dims, key=lookup_order) stacked = stacked.transpose( *new_order, transpose_coords=self._restore_coord_dims ) return stacked def map( self, func: Callable[..., DataArray], args: tuple[Any, ...] = (), shortcut: bool | None = None, **kwargs: Any, ) -> DataArray: """Apply a function to each array in the group and concatenate them together into a new array. `func` is called like `func(ar, *args, **kwargs)` for each array `ar` in this group. Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how to stack together the array. The rule is: 1. If the dimension along which the group coordinate is defined is still in the first grouped array after applying `func`, then stack over this dimension. 2. Otherwise, stack over the new dimension given by name of this grouping (the argument to the `groupby` function). Parameters ---------- func : callable Callable to apply to each array. shortcut : bool, optional Whether or not to shortcut evaluation under the assumptions that: (1) The action of `func` does not depend on any of the array metadata (attributes or coordinates) but only on the data and dimensions. (2) The action of `func` creates arrays with homogeneous metadata, that is, with the same dimensions and attributes. If these conditions are satisfied `shortcut` provides significant speedup. This should be the case for many common groupby operations (e.g., applying numpy ufuncs). *args : tuple, optional Positional arguments passed to `func`. **kwargs Used to call `func(ar, **kwargs)` for each array `ar`. Returns ------- applied : DataArray The result of splitting, applying and combining this array. """ grouped = self._iter_grouped_shortcut() if shortcut else self._iter_grouped() applied = (maybe_wrap_array(arr, func(arr, *args, **kwargs)) for arr in grouped) return self._combine(applied, shortcut=shortcut) def apply(self, func, shortcut=False, args=(), **kwargs): """ Backward compatible implementation of ``map`` See Also -------- DataArrayGroupBy.map """ warnings.warn( "GroupBy.apply may be deprecated in the future. Using GroupBy.map is encouraged", PendingDeprecationWarning, stacklevel=2, ) return self.map(func, shortcut=shortcut, args=args, **kwargs) def _combine(self, applied, shortcut=False): """Recombine the applied objects like the original.""" applied_example, applied = peek_at(applied) dim, positions = self._infer_concat_args(applied_example) if shortcut: combined = self._concat_shortcut(applied, dim, positions) else: combined = concat( applied, dim, data_vars="all", coords="different", compat="equals", join="outer", ) combined = _maybe_reorder(combined, dim, positions, N=self.group1d.size) if isinstance(combined, type(self._obj)): # only restore dimension order for arrays combined = self._restore_dim_order(combined) # assign coord and index when the applied function does not return that coord if dim not in applied_example.dims: combined = combined.assign_coords(self.encoded.coords) combined = self._maybe_unstack(combined) combined = self._maybe_reindex(combined) return combined def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, shortcut: bool = True, **kwargs: Any, ) -> DataArray: """Reduce the items in this group by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. dim : "...", str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. If None, apply over the groupby dimension, if "..." apply over all dimensions. axis : int or sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dimension' and 'axis' arguments can be supplied. If neither are supplied, then `func` is calculated over all dimension for each group item. keep_attrs : bool, optional If True, the datasets's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Array Array with summarized data and the indicated dimension(s) removed. """ if self._by_chunked: raise ValueError( "This method is not supported when lazily grouping by a chunked array. " "Try installing the `flox` package if you are using one of the standard " "reductions (e.g. `mean`). " ) if dim is None: dim = [self._group_dim] if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) def reduce_array(ar: DataArray) -> DataArray: return ar.reduce( func=func, dim=dim, axis=axis, keep_attrs=keep_attrs, keepdims=keepdims, **kwargs, ) check_reduce_dims(dim, self.dims) return self.map(reduce_array, shortcut=shortcut) class DataArrayGroupBy( DataArrayGroupByBase, DataArrayGroupByAggregations, ImplementsArrayReduce, ): __slots__ = () class DatasetGroupByBase(GroupBy["Dataset"], DatasetGroupbyArithmetic): __slots__ = () _dims: Frozen[Hashable, int] | None @property def dims(self) -> Frozen[Hashable, int]: self._raise_if_by_is_chunked() if self._dims is None: index = self.encoded.group_indices[0] self._dims = self._obj.isel({self._group_dim: index}).dims return FrozenMappingWarningOnValuesAccess(self._dims) def map( self, func: Callable[..., Dataset], args: tuple[Any, ...] = (), shortcut: bool | None = None, **kwargs: Any, ) -> Dataset: """Apply a function to each Dataset in the group and concatenate them together into a new Dataset. `func` is called like `func(ds, *args, **kwargs)` for each dataset `ds` in this group. Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how to stack together the datasets. The rule is: 1. If the dimension along which the group coordinate is defined is still in the first grouped item after applying `func`, then stack over this dimension. 2. Otherwise, stack over the new dimension given by name of this grouping (the argument to the `groupby` function). Parameters ---------- func : callable Callable to apply to each sub-dataset. args : tuple, optional Positional arguments to pass to `func`. **kwargs Used to call `func(ds, **kwargs)` for each sub-dataset `ar`. Returns ------- applied : Dataset The result of splitting, applying and combining this dataset. """ # ignore shortcut if set (for now) applied = (func(ds, *args, **kwargs) for ds in self._iter_grouped()) return self._combine(applied) def apply(self, func, args=(), shortcut=None, **kwargs): """ Backward compatible implementation of ``map`` See Also -------- DatasetGroupBy.map """ warnings.warn( "GroupBy.apply may be deprecated in the future. Using GroupBy.map is encouraged", PendingDeprecationWarning, stacklevel=2, ) return self.map(func, shortcut=shortcut, args=args, **kwargs) def _combine(self, applied): """Recombine the applied objects like the original.""" applied_example, applied = peek_at(applied) dim, positions = self._infer_concat_args(applied_example) combined = concat( applied, dim, data_vars="all", coords="different", compat="equals", join="outer", ) combined = _maybe_reorder(combined, dim, positions, N=self.group1d.size) # assign coord when the applied function does not return that coord if dim not in applied_example.dims: combined = combined.assign_coords(self.encoded.coords) combined = self._maybe_unstack(combined) combined = self._maybe_reindex(combined) return combined def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, shortcut: bool = True, **kwargs: Any, ) -> Dataset: """Reduce the items in this group by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. dim : ..., str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. By default apply over the groupby dimension, with "..." apply over all dimensions. axis : int or sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dimension' and 'axis' arguments can be supplied. If neither are supplied, then `func` is calculated over all dimension for each group item. keep_attrs : bool, optional If True, the datasets's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Dataset Array with summarized data and the indicated dimension(s) removed. """ if self._by_chunked: raise ValueError( "This method is not supported when lazily grouping by a chunked array. " "Try installing the `flox` package if you are using one of the standard " "reductions (e.g. `mean`). " ) if dim is None: dim = [self._group_dim] if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) def reduce_dataset(ds: Dataset) -> Dataset: return ds.reduce( func=func, dim=dim, axis=axis, keep_attrs=keep_attrs, keepdims=keepdims, **kwargs, ) check_reduce_dims(dim, self.dims) return self.map(reduce_dataset) def assign(self, **kwargs: Any) -> Dataset: """Assign data variables by group. See Also -------- Dataset.assign """ return self.map(lambda ds: ds.assign(**kwargs)) class DatasetGroupBy( DatasetGroupByBase, DatasetGroupByAggregations, ImplementsDatasetReduce, ): __slots__ = () xarray-2025.09.0/xarray/core/indexes.py000066400000000000000000002337201505620616400176220ustar00rootroot00000000000000from __future__ import annotations import collections.abc import copy import inspect from collections import defaultdict from collections.abc import Callable, Hashable, Iterable, Iterator, Mapping, Sequence from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast, overload import numpy as np import pandas as pd from xarray.core import formatting, nputils, utils from xarray.core.coordinate_transform import CoordinateTransform from xarray.core.extension_array import PandasExtensionArray from xarray.core.indexing import ( CoordinateTransformIndexingAdapter, IndexSelResult, PandasIndexingAdapter, PandasMultiIndexingAdapter, ) from xarray.core.utils import ( Frozen, emit_user_level_warning, get_valid_numpy_dtype, is_allowed_extension_array_dtype, is_dict_like, is_scalar, ) if TYPE_CHECKING: from xarray.core.types import ErrorOptions, JoinOptions, Self from xarray.core.variable import Variable IndexVars = dict[Any, "Variable"] class Index: """ Base class inherited by all xarray-compatible indexes. Do not use this class directly for creating index objects. Xarray indexes are created exclusively from subclasses of ``Index``, mostly via Xarray's public API like ``Dataset.set_xindex``. Every subclass must at least implement :py:meth:`Index.from_variables`. The (re)implementation of the other methods of this base class is optional but mostly required in order to support operations relying on indexes such as label-based selection or alignment. The ``Index`` API closely follows the :py:meth:`Dataset` and :py:meth:`DataArray` API, e.g., for an index to support ``.sel()`` it needs to implement :py:meth:`Index.sel`, to support ``.stack()`` and ``.unstack()`` it needs to implement :py:meth:`Index.stack` and :py:meth:`Index.unstack`, etc. When a method is not (re)implemented, depending on the case the corresponding operation on a :py:meth:`Dataset` or :py:meth:`DataArray` either will raise a ``NotImplementedError`` or will simply drop/pass/copy the index from/to the result. Do not use this class directly for creating index objects. """ @classmethod def from_variables( cls, variables: Mapping[Any, Variable], *, options: Mapping[str, Any], ) -> Self: """Create a new index object from one or more coordinate variables. This factory method must be implemented in all subclasses of Index. The coordinate variables may be passed here in an arbitrary number and order and each with arbitrary dimensions. It is the responsibility of the index to check the consistency and validity of these coordinates. Parameters ---------- variables : dict-like Mapping of :py:class:`Variable` objects holding the coordinate labels to index. Returns ------- index : Index A new Index object. """ raise NotImplementedError() @classmethod def concat( cls, indexes: Sequence[Self], dim: Hashable, positions: Iterable[Iterable[int]] | None = None, ) -> Self: """Create a new index by concatenating one or more indexes of the same type. Implementation is optional but required in order to support ``concat``. Otherwise it will raise an error if the index needs to be updated during the operation. Parameters ---------- indexes : sequence of Index objects Indexes objects to concatenate together. All objects must be of the same type. dim : Hashable Name of the dimension to concatenate along. positions : None or list of integer arrays, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. Returns ------- index : Index A new Index object. """ raise NotImplementedError() @classmethod def stack(cls, variables: Mapping[Any, Variable], dim: Hashable) -> Self: """Create a new index by stacking coordinate variables into a single new dimension. Implementation is optional but required in order to support ``stack``. Otherwise it will raise an error when trying to pass the Index subclass as argument to :py:meth:`Dataset.stack`. Parameters ---------- variables : dict-like Mapping of :py:class:`Variable` objects to stack together. dim : Hashable Name of the new, stacked dimension. Returns ------- index A new Index object. """ raise NotImplementedError( f"{cls!r} cannot be used for creating an index of stacked coordinates" ) def unstack(self) -> tuple[dict[Hashable, Index], pd.MultiIndex]: """Unstack a (multi-)index into multiple (single) indexes. Implementation is optional but required in order to support unstacking the coordinates from which this index has been built. Returns ------- indexes : tuple A 2-length tuple where the 1st item is a dictionary of unstacked Index objects and the 2nd item is a :py:class:`pandas.MultiIndex` object used to unstack unindexed coordinate variables or data variables. """ raise NotImplementedError() def create_variables( self, variables: Mapping[Any, Variable] | None = None ) -> IndexVars: """Maybe create new coordinate variables from this index. This method is useful if the index data can be reused as coordinate variable data. It is often the case when the underlying index structure has an array-like interface, like :py:class:`pandas.Index` objects. The variables given as argument (if any) are either returned as-is (default behavior) or can be used to copy their metadata (attributes and encoding) into the new returned coordinate variables. Note: the input variables may or may not have been filtered for this index. Parameters ---------- variables : dict-like, optional Mapping of :py:class:`Variable` objects. Returns ------- index_variables : dict-like Dictionary of :py:class:`Variable` or :py:class:`IndexVariable` objects. """ if variables is not None: # pass through return dict(**variables) else: return {} def should_add_coord_to_array( self, name: Hashable, var: Variable, dims: set[Hashable], ) -> bool: """Define whether or not an index coordinate variable should be added to a new DataArray. This method is called repeatedly for each Variable associated with this index when creating a new DataArray (via its constructor or from a Dataset) or updating an existing one. The variables associated with this index are the ones passed to :py:meth:`Index.from_variables` and/or returned by :py:meth:`Index.create_variables`. By default returns ``True`` if the dimensions of the coordinate variable are a subset of the array dimensions and ``False`` otherwise (DataArray model). This default behavior may be overridden in Index subclasses to bypass strict conformance with the DataArray model. This is useful for example to include the (n+1)-dimensional cell boundary coordinate associated with an interval index. Returning ``False`` will either: - raise a :py:class:`CoordinateValidationError` when passing the coordinate directly to a new or an existing DataArray, e.g., via ``DataArray.__init__()`` or ``DataArray.assign_coords()`` - drop the coordinate (and therefore drop the index) when a new DataArray is constructed by indexing a Dataset Parameters ---------- name : Hashable Name of a coordinate variable associated to this index. var : Variable Coordinate variable object. dims: tuple Dimensions of the new DataArray object being created. """ return all(d in dims for d in var.dims) def to_pandas_index(self) -> pd.Index: """Cast this xarray index to a pandas.Index object or raise a ``TypeError`` if this is not supported. This method is used by all xarray operations that still rely on pandas.Index objects. By default it raises a ``TypeError``, unless it is re-implemented in subclasses of Index. """ raise TypeError(f"{self!r} cannot be cast to a pandas.Index object") def isel( self, indexers: Mapping[Any, int | slice | np.ndarray | Variable] ) -> Index | None: """Maybe returns a new index from the current index itself indexed by positional indexers. This method should be re-implemented in subclasses of Index if the wrapped index structure supports indexing operations. For example, indexing a ``pandas.Index`` is pretty straightforward as it behaves very much like an array. By contrast, it may be harder doing so for a structure like a kd-tree that differs much from a simple array. If not re-implemented in subclasses of Index, this method returns ``None``, i.e., calling :py:meth:`Dataset.isel` will either drop the index in the resulting dataset or pass it unchanged if its corresponding coordinate(s) are not indexed. Parameters ---------- indexers : dict A dictionary of positional indexers as passed from :py:meth:`Dataset.isel` and where the entries have been filtered for the current index. Returns ------- maybe_index : Index A new Index object or ``None``. """ return None def sel(self, labels: dict[Any, Any]) -> IndexSelResult: """Query the index with arbitrary coordinate label indexers. Implementation is optional but required in order to support label-based selection. Otherwise it will raise an error when trying to call :py:meth:`Dataset.sel` with labels for this index coordinates. Coordinate label indexers can be of many kinds, e.g., scalar, list, tuple, array-like, slice, :py:class:`Variable`, :py:class:`DataArray`, etc. It is the responsibility of the index to handle those indexers properly. Parameters ---------- labels : dict A dictionary of coordinate label indexers passed from :py:meth:`Dataset.sel` and where the entries have been filtered for the current index. Returns ------- sel_results : :py:class:`IndexSelResult` An index query result object that contains dimension positional indexers. It may also contain new indexes, coordinate variables, etc. """ raise NotImplementedError(f"{self!r} doesn't support label-based selection") def join(self, other: Self, how: JoinOptions = "inner") -> Self: """Return a new index from the combination of this index with another index of the same type. Implementation is optional but required in order to support alignment. Parameters ---------- other : Index The other Index object to combine with this index. join : str, optional Method for joining the two indexes (see :py:func:`~xarray.align`). Returns ------- joined : Index A new Index object. """ raise NotImplementedError( f"{self!r} doesn't support alignment with inner/outer join method" ) def reindex_like(self, other: Self) -> dict[Hashable, Any]: """Query the index with another index of the same type. Implementation is optional but required in order to support alignment. Parameters ---------- other : Index The other Index object used to query this index. Returns ------- dim_positional_indexers : dict A dictionary where keys are dimension names and values are positional indexers. """ raise NotImplementedError(f"{self!r} doesn't support re-indexing labels") @overload def equals(self, other: Index) -> bool: ... @overload def equals( self, other: Index, *, exclude: frozenset[Hashable] | None = None ) -> bool: ... def equals(self, other: Index, **kwargs) -> bool: """Compare this index with another index of the same type. Implementation is optional but required in order to support alignment. Parameters ---------- other : Index The other Index object to compare with this object. exclude : frozenset of hashable, optional Dimensions excluded from checking. It is None by default, (i.e., when this method is not called in the context of alignment). For a n-dimensional index this option allows an Index to optionally ignore any dimension in ``exclude`` when comparing ``self`` with ``other``. For a 1-dimensional index this kwarg can be safely ignored, as this method is not called when all of the index's dimensions are also excluded from alignment (note: the index's dimensions correspond to the union of the dimensions of all coordinate variables associated with this index). Returns ------- is_equal : bool ``True`` if the indexes are equal, ``False`` otherwise. """ raise NotImplementedError() def roll(self, shifts: Mapping[Any, int]) -> Self | None: """Roll this index by an offset along one or more dimensions. This method can be re-implemented in subclasses of Index, e.g., when the index can be itself indexed. If not re-implemented, this method returns ``None``, i.e., calling :py:meth:`Dataset.roll` will either drop the index in the resulting dataset or pass it unchanged if its corresponding coordinate(s) are not rolled. Parameters ---------- shifts : mapping of hashable to int, optional A dict with keys matching dimensions and values given by integers to rotate each of the given dimensions, as passed :py:meth:`Dataset.roll`. Returns ------- rolled : Index A new index with rolled data. """ return None def rename( self, name_dict: Mapping[Any, Hashable], dims_dict: Mapping[Any, Hashable], ) -> Self: """Maybe update the index with new coordinate and dimension names. This method should be re-implemented in subclasses of Index if it has attributes that depend on coordinate or dimension names. By default (if not re-implemented), it returns the index itself. Warning: the input names are not filtered for this method, they may correspond to any variable or dimension of a Dataset or a DataArray. Parameters ---------- name_dict : dict-like Mapping of current variable or coordinate names to the desired names, as passed from :py:meth:`Dataset.rename_vars`. dims_dict : dict-like Mapping of current dimension names to the desired names, as passed from :py:meth:`Dataset.rename_dims`. Returns ------- renamed : Index Index with renamed attributes. """ return self def copy(self, deep: bool = True) -> Self: """Return a (deep) copy of this index. Implementation in subclasses of Index is optional. The base class implements the default (deep) copy semantics. Parameters ---------- deep : bool, optional If true (default), a copy of the internal structures (e.g., wrapped index) is returned with the new object. Returns ------- index : Index A new Index object. """ return self._copy(deep=deep) def __copy__(self) -> Self: return self.copy(deep=False) def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Index: return self._copy(deep=True, memo=memo) def _copy(self, deep: bool = True, memo: dict[int, Any] | None = None) -> Self: cls = self.__class__ copied = cls.__new__(cls) if deep: for k, v in self.__dict__.items(): setattr(copied, k, copy.deepcopy(v, memo)) else: copied.__dict__.update(self.__dict__) return copied def __getitem__(self, indexer: Any) -> Self: raise NotImplementedError() def _repr_inline_(self, max_width: int) -> str: return self.__class__.__name__ def _maybe_cast_to_cftimeindex(index: pd.Index) -> pd.Index: from xarray.coding.cftimeindex import CFTimeIndex if len(index) > 0 and index.dtype == "O" and not isinstance(index, CFTimeIndex): try: return CFTimeIndex(index) except (ImportError, TypeError): return index else: return index def safe_cast_to_index(array: Any) -> pd.Index: """Given an array, safely cast it to a pandas.Index. If it is already a pandas.Index, return it unchanged. Unlike pandas.Index, if the array has dtype=object or dtype=timedelta64, this function will not attempt to do automatic type conversion but will always return an index with dtype=object. """ from xarray.core.dataarray import DataArray from xarray.core.variable import Variable from xarray.namedarray.pycompat import to_numpy if isinstance(array, PandasExtensionArray): array = pd.Index(array.array) if isinstance(array, pd.Index): index = array elif isinstance(array, DataArray | Variable): # returns the original multi-index for pandas.MultiIndex level coordinates index = array._to_index() elif isinstance(array, Index): index = array.to_pandas_index() elif isinstance(array, PandasIndexingAdapter): index = array.array else: kwargs: dict[str, Any] = {} if hasattr(array, "dtype"): if array.dtype.kind == "O": kwargs["dtype"] = "object" elif array.dtype == "float16": emit_user_level_warning( ( "`pandas.Index` does not support the `float16` dtype." " Casting to `float64` for you, but in the future please" " manually cast to either `float32` and `float64`." ), category=DeprecationWarning, ) kwargs["dtype"] = "float64" index = pd.Index(to_numpy(array), **kwargs) return _maybe_cast_to_cftimeindex(index) def _sanitize_slice_element(x): from xarray.core.dataarray import DataArray from xarray.core.variable import Variable if not isinstance(x, tuple) and len(np.shape(x)) != 0: raise ValueError( f"cannot use non-scalar arrays in a slice for xarray indexing: {x}" ) if isinstance(x, Variable | DataArray): x = x.values if isinstance(x, np.ndarray): x = x[()] return x def _query_slice(index, label, coord_name="", method=None, tolerance=None): if method is not None or tolerance is not None: raise NotImplementedError( "cannot use ``method`` argument if any indexers are slice objects" ) indexer = index.slice_indexer( _sanitize_slice_element(label.start), _sanitize_slice_element(label.stop), _sanitize_slice_element(label.step), ) if not isinstance(indexer, slice): # unlike pandas, in xarray we never want to silently convert a # slice indexer into an array indexer raise KeyError( "cannot represent labeled-based slice indexer for coordinate " f"{coord_name!r} with a slice over integer positions; the index is " "unsorted or non-unique" ) return indexer def _asarray_tuplesafe(values): """ Convert values into a numpy array of at most 1-dimension, while preserving tuples. Adapted from pandas.core.common._asarray_tuplesafe """ if isinstance(values, tuple): result = utils.to_0d_object_array(values) else: result = np.asarray(values) if result.ndim == 2: result = np.empty(len(values), dtype=object) result[:] = values return result def _is_nested_tuple(possible_tuple): return isinstance(possible_tuple, tuple) and any( isinstance(value, tuple | list | slice) for value in possible_tuple ) def normalize_label(value, dtype=None) -> np.ndarray: if getattr(value, "ndim", 1) <= 1: value = _asarray_tuplesafe(value) if dtype is not None and dtype.kind == "f" and value.dtype.kind != "b": # pd.Index built from coordinate with float precision != 64 # see https://github.com/pydata/xarray/pull/3153 for details # bypass coercing dtype for boolean indexers (ignore index) # see https://github.com/pydata/xarray/issues/5727 value = np.asarray(value, dtype=dtype) return value def as_scalar(value: np.ndarray): # see https://github.com/pydata/xarray/pull/4292 for details return value[()] if value.dtype.kind in "mM" else value.item() def get_indexer_nd(index: pd.Index, labels, method=None, tolerance=None) -> np.ndarray: """Wrapper around :meth:`pandas.Index.get_indexer` supporting n-dimensional labels """ flat_labels = np.ravel(labels) if flat_labels.dtype == "float16": flat_labels = flat_labels.astype("float64") flat_indexer = index.get_indexer(flat_labels, method=method, tolerance=tolerance) indexer = flat_indexer.reshape(labels.shape) return indexer T_PandasIndex = TypeVar("T_PandasIndex", bound="PandasIndex") class PandasIndex(Index): """Wrap a pandas.Index as an xarray compatible index.""" index: pd.Index dim: Hashable coord_dtype: Any __slots__ = ("coord_dtype", "dim", "index") def __init__( self, array: Any, dim: Hashable, coord_dtype: Any = None, *, fastpath: bool = False, ): if fastpath: index = array else: index = safe_cast_to_index(array) if index.name is None: # make a shallow copy: cheap and because the index name may be updated # here or in other constructors (cannot use pd.Index.rename as this # constructor is also called from PandasMultiIndex) index = index.copy() index.name = dim self.index = index self.dim = dim if coord_dtype is None: if is_allowed_extension_array_dtype(index.dtype): cast(pd.api.extensions.ExtensionDtype, index.dtype) coord_dtype = index.dtype else: coord_dtype = get_valid_numpy_dtype(index) self.coord_dtype = coord_dtype def _replace(self, index, dim=None, coord_dtype=None): if dim is None: dim = self.dim if coord_dtype is None: coord_dtype = self.coord_dtype return type(self)(index, dim, coord_dtype, fastpath=True) @classmethod def from_variables( cls, variables: Mapping[Any, Variable], *, options: Mapping[str, Any], ) -> PandasIndex: if len(variables) != 1: raise ValueError( f"PandasIndex only accepts one variable, found {len(variables)} variables" ) name, var = next(iter(variables.items())) if var.ndim == 0: raise ValueError( f"cannot set a PandasIndex from the scalar variable {name!r}, " "only 1-dimensional variables are supported. " f"Note: you might want to use `obj.expand_dims({name!r})` to create a " f"new dimension and turn {name!r} as an indexed dimension coordinate." ) elif var.ndim != 1: raise ValueError( "PandasIndex only accepts a 1-dimensional variable, " f"variable {name!r} has {var.ndim} dimensions" ) dim = var.dims[0] # TODO: (benbovy - explicit indexes): add __index__ to ExplicitlyIndexesNDArrayMixin? # this could be eventually used by Variable.to_index() and would remove the need to perform # the checks below. # preserve wrapped pd.Index (if any) # accessing `.data` can load data from disk, so we only access if needed data = var._data if isinstance(var._data, PandasIndexingAdapter) else var.data # type: ignore[redundant-expr] # multi-index level variable: get level index if isinstance(var._data, PandasMultiIndexingAdapter): level = var._data.level if level is not None: data = var._data.array.get_level_values(level) obj = cls(data, dim, coord_dtype=var.dtype) assert not isinstance(obj.index, pd.MultiIndex) # Rename safely # make a shallow copy: cheap and because the index name may be updated # here or in other constructors (cannot use pd.Index.rename as this # constructor is also called from PandasMultiIndex) obj.index = obj.index.copy() obj.index.name = name return obj @staticmethod def _concat_indexes(indexes, dim, positions=None) -> pd.Index: new_pd_index: pd.Index if not indexes: new_pd_index = pd.Index([]) else: if not all(idx.dim == dim for idx in indexes): dims = ",".join({f"{idx.dim!r}" for idx in indexes}) raise ValueError( f"Cannot concatenate along dimension {dim!r} indexes with " f"dimensions: {dims}" ) pd_indexes = [idx.index for idx in indexes] new_pd_index = pd_indexes[0].append(pd_indexes[1:]) if positions is not None: indices = nputils.inverse_permutation(np.concatenate(positions)) new_pd_index = new_pd_index.take(indices) return new_pd_index @classmethod def concat( cls, indexes: Sequence[Self], dim: Hashable, positions: Iterable[Iterable[int]] | None = None, ) -> Self: new_pd_index = cls._concat_indexes(indexes, dim, positions) if not indexes: coord_dtype = None else: indexes_coord_dtypes = {idx.coord_dtype for idx in indexes} if len(indexes_coord_dtypes) == 1: coord_dtype = next(iter(indexes_coord_dtypes)) else: coord_dtype = np.result_type(*indexes_coord_dtypes) return cls(new_pd_index, dim=dim, coord_dtype=coord_dtype) def create_variables( self, variables: Mapping[Any, Variable] | None = None ) -> IndexVars: from xarray.core.variable import IndexVariable name = self.index.name attrs: Mapping[Hashable, Any] | None encoding: Mapping[Hashable, Any] | None if variables is not None and name in variables: var = variables[name] attrs = var.attrs encoding = var.encoding else: attrs = None encoding = None data = PandasIndexingAdapter(self.index, dtype=self.coord_dtype) var = IndexVariable(self.dim, data, attrs=attrs, encoding=encoding) return {name: var} def to_pandas_index(self) -> pd.Index: return self.index def isel( self, indexers: Mapping[Any, int | slice | np.ndarray | Variable] ) -> PandasIndex | None: from xarray.core.variable import Variable indxr = indexers[self.dim] if isinstance(indxr, Variable): if indxr.dims != (self.dim,): # can't preserve a index if result has new dimensions return None else: indxr = indxr.data if not isinstance(indxr, slice) and is_scalar(indxr): # scalar indexer: drop index return None return self._replace(self.index[indxr]) # type: ignore[index,unused-ignore] def sel( self, labels: dict[Any, Any], method=None, tolerance=None ) -> IndexSelResult: from xarray.core.dataarray import DataArray from xarray.core.variable import Variable if method is not None and not isinstance(method, str): raise TypeError("``method`` must be a string") assert len(labels) == 1 coord_name, label = next(iter(labels.items())) if isinstance(label, slice): indexer = _query_slice(self.index, label, coord_name, method, tolerance) elif is_dict_like(label): raise ValueError( "cannot use a dict-like object for selection on " "a dimension that does not have a MultiIndex" ) else: label_array = normalize_label(label, dtype=self.coord_dtype) if label_array.ndim == 0: label_value = as_scalar(label_array) if isinstance(self.index, pd.CategoricalIndex): if method is not None: raise ValueError( "'method' is not supported when indexing using a CategoricalIndex." ) if tolerance is not None: raise ValueError( "'tolerance' is not supported when indexing using a CategoricalIndex." ) indexer = self.index.get_loc(label_value) elif method is not None: indexer = get_indexer_nd(self.index, label_array, method, tolerance) if np.any(indexer < 0): raise KeyError(f"not all values found in index {coord_name!r}") else: try: indexer = self.index.get_loc(label_value) except KeyError as e: raise KeyError( f"not all values found in index {coord_name!r}. " "Try setting the `method` keyword argument (example: method='nearest')." ) from e elif label_array.dtype.kind == "b": indexer = label_array else: indexer = get_indexer_nd(self.index, label_array, method, tolerance) if np.any(indexer < 0): raise KeyError(f"not all values found in index {coord_name!r}") # attach dimension names and/or coordinates to positional indexer if isinstance(label, Variable): indexer = Variable(label.dims, indexer) elif isinstance(label, DataArray): indexer = DataArray(indexer, coords=label._coords, dims=label.dims) return IndexSelResult({self.dim: indexer}) def equals(self, other: Index, *, exclude: frozenset[Hashable] | None = None): if not isinstance(other, PandasIndex): return False return self.index.equals(other.index) and self.dim == other.dim def join( self, other: Self, how: str = "inner", ) -> Self: if how == "outer": index = self.index.union(other.index) else: # how = "inner" index = self.index.intersection(other.index) coord_dtype = np.result_type(self.coord_dtype, other.coord_dtype) return type(self)(index, self.dim, coord_dtype=coord_dtype) def reindex_like( self, other: Self, method=None, tolerance=None ) -> dict[Hashable, Any]: if not self.index.is_unique: raise ValueError( f"cannot reindex or align along dimension {self.dim!r} because the " "(pandas) index has duplicate values" ) return {self.dim: get_indexer_nd(self.index, other.index, method, tolerance)} def roll(self, shifts: Mapping[Any, int]) -> PandasIndex: shift = shifts[self.dim] % self.index.shape[0] if shift != 0: new_pd_idx = self.index[-shift:].append(self.index[:-shift]) else: new_pd_idx = self.index[:] return self._replace(new_pd_idx) def rename(self, name_dict, dims_dict): if self.index.name not in name_dict and self.dim not in dims_dict: return self new_name = name_dict.get(self.index.name, self.index.name) index = self.index.rename(new_name) new_dim = dims_dict.get(self.dim, self.dim) return self._replace(index, dim=new_dim) def _copy( self: T_PandasIndex, deep: bool = True, memo: dict[int, Any] | None = None ) -> T_PandasIndex: if deep: # pandas is not using the memo index = self.index.copy(deep=True) else: # index will be copied in constructor index = self.index return self._replace(index) def __getitem__(self, indexer: Any): return self._replace(self.index[indexer]) def __repr__(self): return f"PandasIndex({self.index!r})" def _check_dim_compat(variables: Mapping[Any, Variable], all_dims: str = "equal"): """Check that all multi-index variable candidates are 1-dimensional and either share the same (single) dimension or each have a different dimension. """ if any(var.ndim != 1 for var in variables.values()): raise ValueError("PandasMultiIndex only accepts 1-dimensional variables") dims = {var.dims for var in variables.values()} if all_dims == "equal" and len(dims) > 1: raise ValueError( "unmatched dimensions for multi-index variables " + ", ".join([f"{k!r} {v.dims}" for k, v in variables.items()]) ) if all_dims == "different" and len(dims) < len(variables): raise ValueError( "conflicting dimensions for multi-index product variables " + ", ".join([f"{k!r} {v.dims}" for k, v in variables.items()]) ) T_PDIndex = TypeVar("T_PDIndex", bound=pd.Index) def remove_unused_levels_categories(index: T_PDIndex) -> T_PDIndex: """ Remove unused levels from MultiIndex and unused categories from CategoricalIndex """ if isinstance(index, pd.MultiIndex): new_index = cast(pd.MultiIndex, index.remove_unused_levels()) # if it contains CategoricalIndex, we need to remove unused categories # manually. See https://github.com/pandas-dev/pandas/issues/30846 if any(isinstance(lev, pd.CategoricalIndex) for lev in new_index.levels): levels = [] for i, level in enumerate(new_index.levels): if isinstance(level, pd.CategoricalIndex): level = level[new_index.codes[i]].remove_unused_categories() else: level = level[new_index.codes[i]] levels.append(level) # TODO: calling from_array() reorders MultiIndex levels. It would # be best to avoid this, if possible, e.g., by using # MultiIndex.remove_unused_levels() (which does not reorder) on the # part of the MultiIndex that is not categorical, or by fixing this # upstream in pandas. new_index = pd.MultiIndex.from_arrays(levels, names=new_index.names) return cast(T_PDIndex, new_index) if isinstance(index, pd.CategoricalIndex): return index.remove_unused_categories() # type: ignore[attr-defined] return index class PandasMultiIndex(PandasIndex): """Wrap a pandas.MultiIndex as an xarray compatible index.""" index: pd.MultiIndex dim: Hashable coord_dtype: Any level_coords_dtype: dict[Hashable | None, Any] __slots__ = ("coord_dtype", "dim", "index", "level_coords_dtype") def __init__(self, array: Any, dim: Hashable, level_coords_dtype: Any = None): super().__init__(array, dim) # default index level names names = [] for i, idx in enumerate(self.index.levels): name = idx.name or f"{dim}_level_{i}" if name == dim: raise ValueError( f"conflicting multi-index level name {name!r} with dimension {dim!r}" ) names.append(name) self.index.names = names if level_coords_dtype is None: level_coords_dtype = { idx.name: get_valid_numpy_dtype(idx) for idx in self.index.levels } self.level_coords_dtype = level_coords_dtype def _replace(self, index, dim=None, level_coords_dtype=None) -> PandasMultiIndex: if dim is None: dim = self.dim index.name = dim if level_coords_dtype is None: level_coords_dtype = self.level_coords_dtype return type(self)(index, dim, level_coords_dtype) @classmethod def from_variables( cls, variables: Mapping[Any, Variable], *, options: Mapping[str, Any], ) -> PandasMultiIndex: _check_dim_compat(variables) dim = next(iter(variables.values())).dims[0] index = pd.MultiIndex.from_arrays( [var.values for var in variables.values()], names=list(variables.keys()) ) index.name = dim level_coords_dtype = {name: var.dtype for name, var in variables.items()} obj = cls(index, dim, level_coords_dtype=level_coords_dtype) return obj @classmethod def concat( cls, indexes: Sequence[Self], dim: Hashable, positions: Iterable[Iterable[int]] | None = None, ) -> Self: new_pd_index = cls._concat_indexes(indexes, dim, positions) if not indexes: level_coords_dtype = None else: level_coords_dtype = {} for name in indexes[0].level_coords_dtype: level_coords_dtype[name] = np.result_type( *[idx.level_coords_dtype[name] for idx in indexes] ) return cls(new_pd_index, dim=dim, level_coords_dtype=level_coords_dtype) @classmethod def stack( cls, variables: Mapping[Any, Variable], dim: Hashable ) -> PandasMultiIndex: """Create a new Pandas MultiIndex from the product of 1-d variables (levels) along a new dimension. Level variables must have a dimension distinct from each other. Keeps levels the same (doesn't refactorize them) so that it gives back the original labels after a stack/unstack roundtrip. """ _check_dim_compat(variables, all_dims="different") level_indexes = [safe_cast_to_index(var) for var in variables.values()] for name, idx in zip(variables, level_indexes, strict=True): if isinstance(idx, pd.MultiIndex): raise ValueError( f"cannot create a multi-index along stacked dimension {dim!r} " f"from variable {name!r} that wraps a multi-index" ) # from_product sorts by default, so we can't use that always # https://github.com/pydata/xarray/issues/980 # https://github.com/pandas-dev/pandas/issues/14672 if all(index.is_monotonic_increasing for index in level_indexes): index = pd.MultiIndex.from_product( level_indexes, sortorder=0, names=list(variables.keys()) ) else: split_labels, levels = zip( *[lev.factorize() for lev in level_indexes], strict=True ) labels_mesh = np.meshgrid(*split_labels, indexing="ij") labels = [x.ravel().tolist() for x in labels_mesh] index = pd.MultiIndex( levels=levels, codes=labels, sortorder=0, names=list(variables.keys()) ) level_coords_dtype = {k: var.dtype for k, var in variables.items()} return cls(index, dim, level_coords_dtype=level_coords_dtype) def unstack(self) -> tuple[dict[Hashable, Index], pd.MultiIndex]: clean_index = remove_unused_levels_categories(self.index) if not clean_index.is_unique: raise ValueError( "Cannot unstack MultiIndex containing duplicates. Make sure entries " f"are unique, e.g., by calling ``.drop_duplicates('{self.dim}')``, " "before unstacking." ) new_indexes: dict[Hashable, Index] = {} for name, lev in zip(clean_index.names, clean_index.levels, strict=True): idx = PandasIndex( lev.copy(), name, coord_dtype=self.level_coords_dtype[name] ) new_indexes[name] = idx return new_indexes, clean_index @classmethod def from_variables_maybe_expand( cls, dim: Hashable, current_variables: Mapping[Any, Variable], variables: Mapping[Any, Variable], ) -> tuple[PandasMultiIndex, IndexVars]: """Create a new multi-index maybe by expanding an existing one with new variables as index levels. The index and its corresponding coordinates may be created along a new dimension. """ names: list[Hashable] = [] codes: list[Iterable[int]] = [] levels: list[Iterable[Any]] = [] level_variables: dict[Any, Variable] = {} _check_dim_compat({**current_variables, **variables}) if len(current_variables) > 1: # expand from an existing multi-index data = cast( PandasMultiIndexingAdapter, next(iter(current_variables.values()))._data ) current_index = data.array names.extend(current_index.names) codes.extend(current_index.codes) levels.extend(current_index.levels) for name in current_index.names: level_variables[name] = current_variables[name] elif len(current_variables) == 1: # expand from one 1D variable (no multi-index): convert it to an index level var = next(iter(current_variables.values())) new_var_name = f"{dim}_level_0" names.append(new_var_name) cat = pd.Categorical(var.values, ordered=True) codes.append(cat.codes) levels.append(cat.categories) level_variables[new_var_name] = var for name, var in variables.items(): names.append(name) cat = pd.Categorical(var.values, ordered=True) codes.append(cat.codes) levels.append(cat.categories) level_variables[name] = var codes_as_lists = [list(x) for x in codes] levels_as_lists = [list(level) for level in levels] index = pd.MultiIndex(levels=levels_as_lists, codes=codes_as_lists, names=names) level_coords_dtype = {k: var.dtype for k, var in level_variables.items()} obj = cls(index, dim, level_coords_dtype=level_coords_dtype) index_vars = obj.create_variables(level_variables) return obj, index_vars def keep_levels( self, level_variables: Mapping[Any, Variable] ) -> PandasMultiIndex | PandasIndex: """Keep only the provided levels and return a new multi-index with its corresponding coordinates. """ index = self.index.droplevel( [k for k in self.index.names if k not in level_variables] ) if isinstance(index, pd.MultiIndex): level_coords_dtype = {k: self.level_coords_dtype[k] for k in index.names} return self._replace(index, level_coords_dtype=level_coords_dtype) else: # backward compatibility: rename the level coordinate to the dimension name return PandasIndex( index.rename(self.dim), self.dim, coord_dtype=self.level_coords_dtype[index.name], ) def reorder_levels( self, level_variables: Mapping[Any, Variable] ) -> PandasMultiIndex: """Re-arrange index levels using input order and return a new multi-index with its corresponding coordinates. """ index = cast(pd.MultiIndex, self.index.reorder_levels(level_variables.keys())) level_coords_dtype = {k: self.level_coords_dtype[k] for k in index.names} return self._replace(index, level_coords_dtype=level_coords_dtype) def create_variables( self, variables: Mapping[Any, Variable] | None = None ) -> IndexVars: from xarray.core.variable import IndexVariable if variables is None: variables = {} index_vars: IndexVars = {} for name in (self.dim,) + tuple(self.index.names): if name == self.dim: level = None dtype = None else: level = name dtype = self.level_coords_dtype[name] var = variables.get(name) if var is not None: attrs = var.attrs encoding = var.encoding else: attrs = {} encoding = {} data = PandasMultiIndexingAdapter(self.index, dtype=dtype, level=level) # type: ignore[arg-type] # TODO: are Hashables ok? index_vars[name] = IndexVariable( self.dim, data, attrs=attrs, encoding=encoding, fastpath=True, ) return index_vars def sel(self, labels, method=None, tolerance=None) -> IndexSelResult: from xarray.core.dataarray import DataArray from xarray.core.variable import Variable if method is not None or tolerance is not None: raise ValueError( "multi-index does not support ``method`` and ``tolerance``" ) new_index = None scalar_coord_values = {} indexer: int | slice | np.ndarray | Variable | DataArray # label(s) given for multi-index level(s) if all(lbl in self.index.names for lbl in labels): label_values = {} for k, v in labels.items(): label_array = normalize_label(v, dtype=self.level_coords_dtype[k]) try: label_values[k] = as_scalar(label_array) except ValueError as err: # label should be an item not an array-like raise ValueError( "Vectorized selection is not " f"available along coordinate {k!r} (multi-index level)" ) from err has_slice = any(isinstance(v, slice) for v in label_values.values()) if len(label_values) == self.index.nlevels and not has_slice: indexer = self.index.get_loc( tuple(label_values[k] for k in self.index.names) ) else: indexer, new_index = self.index.get_loc_level( tuple(label_values.values()), level=tuple(label_values.keys()) ) scalar_coord_values.update(label_values) # GH2619. Raise a KeyError if nothing is chosen if indexer.dtype.kind == "b" and indexer.sum() == 0: # type: ignore[union-attr] raise KeyError(f"{labels} not found") # assume one label value given for the multi-index "array" (dimension) else: if len(labels) > 1: coord_name = next(iter(set(labels) - set(self.index.names))) raise ValueError( f"cannot provide labels for both coordinate {coord_name!r} (multi-index array) " f"and one or more coordinates among {self.index.names!r} (multi-index levels)" ) coord_name, label = next(iter(labels.items())) if is_dict_like(label): invalid_levels = tuple( name for name in label if name not in self.index.names ) if invalid_levels: raise ValueError( f"multi-index level names {invalid_levels} not found in indexes {tuple(self.index.names)}" ) return self.sel(label) elif isinstance(label, slice): indexer = _query_slice(self.index, label, coord_name) elif isinstance(label, tuple): if _is_nested_tuple(label): indexer = self.index.get_locs(label) elif len(label) == self.index.nlevels: indexer = self.index.get_loc(label) else: levels = [self.index.names[i] for i in range(len(label))] indexer, new_index = self.index.get_loc_level(label, level=levels) scalar_coord_values.update(dict(zip(levels, label, strict=True))) else: label_array = normalize_label(label) if label_array.ndim == 0: label_value = as_scalar(label_array) indexer, new_index = self.index.get_loc_level(label_value, level=0) scalar_coord_values[self.index.names[0]] = label_value elif label_array.dtype.kind == "b": indexer = label_array else: if label_array.ndim > 1: raise ValueError( "Vectorized selection is not available along " f"coordinate {coord_name!r} with a multi-index" ) indexer = get_indexer_nd(self.index, label_array) if np.any(indexer < 0): raise KeyError(f"not all values found in index {coord_name!r}") # attach dimension names and/or coordinates to positional indexer if isinstance(label, Variable): indexer = Variable(label.dims, indexer) elif isinstance(label, DataArray): # do not include label-indexer DataArray coordinates that conflict # with the level names of this index coords = { k: v for k, v in label._coords.items() if k not in self.index.names } indexer = DataArray(indexer, coords=coords, dims=label.dims) if new_index is not None: if isinstance(new_index, pd.MultiIndex): level_coords_dtype = { k: self.level_coords_dtype[k] for k in new_index.names } new_index = self._replace( new_index, level_coords_dtype=level_coords_dtype ) dims_dict = {} drop_coords = [] else: new_index = PandasIndex( new_index, new_index.name, coord_dtype=self.level_coords_dtype[new_index.name], ) dims_dict = {self.dim: new_index.index.name} drop_coords = [self.dim] # variable(s) attrs and encoding metadata are propagated # when replacing the indexes in the resulting xarray object new_vars = new_index.create_variables() indexes = cast(dict[Any, Index], dict.fromkeys(new_vars, new_index)) # add scalar variable for each dropped level variables = new_vars for name, val in scalar_coord_values.items(): variables[name] = Variable([], val) return IndexSelResult( {self.dim: indexer}, indexes=indexes, variables=variables, drop_indexes=list(scalar_coord_values), drop_coords=drop_coords, rename_dims=dims_dict, ) else: return IndexSelResult({self.dim: indexer}) def join(self, other, how: str = "inner"): if how == "outer": # bug in pandas? need to reset index.name other_index = other.index.copy() other_index.name = None index = self.index.union(other_index) index.name = self.dim else: # how = "inner" index = self.index.intersection(other.index) level_coords_dtype = { k: np.result_type(lvl_dtype, other.level_coords_dtype[k]) for k, lvl_dtype in self.level_coords_dtype.items() } return type(self)(index, self.dim, level_coords_dtype=level_coords_dtype) def rename(self, name_dict, dims_dict): if not set(self.index.names) & set(name_dict) and self.dim not in dims_dict: return self # pandas 1.3.0: could simply do `self.index.rename(names_dict)` new_names = [name_dict.get(k, k) for k in self.index.names] index = self.index.rename(new_names) new_dim = dims_dict.get(self.dim, self.dim) new_level_coords_dtype = dict( zip(new_names, self.level_coords_dtype.values(), strict=True) ) return self._replace( index, dim=new_dim, level_coords_dtype=new_level_coords_dtype ) class CoordinateTransformIndex(Index): """Helper class for creating Xarray indexes based on coordinate transforms. - wraps a :py:class:`CoordinateTransform` instance - takes care of creating the index (lazy) coordinates - supports point-wise label-based selection - supports exact alignment only, by comparing indexes based on their transform (not on their explicit coordinate labels) .. caution:: This API is experimental and subject to change. Please report any bugs or surprising behaviour you encounter. """ transform: CoordinateTransform def __init__( self, transform: CoordinateTransform, ): self.transform = transform def create_variables( self, variables: Mapping[Any, Variable] | None = None ) -> IndexVars: from xarray.core.variable import Variable new_variables = {} for name in self.transform.coord_names: # copy attributes, if any attrs: Mapping[Hashable, Any] | None if variables is not None and name in variables: var = variables[name] attrs = var.attrs else: attrs = None data = CoordinateTransformIndexingAdapter(self.transform, name) new_variables[name] = Variable(self.transform.dims, data, attrs=attrs) return new_variables def isel( self, indexers: Mapping[Any, int | slice | np.ndarray | Variable] ) -> Index | None: # TODO: support returning a new index (e.g., possible to re-calculate the # the transform or calculate another transform on a reduced dimension space) return None def sel( self, labels: dict[Any, Any], method=None, tolerance=None ) -> IndexSelResult: from xarray.core.dataarray import DataArray from xarray.core.variable import Variable if method != "nearest": raise ValueError( "CoordinateTransformIndex only supports selection with method='nearest'" ) labels_set = set(labels) coord_names_set = set(self.transform.coord_names) missing_labels = coord_names_set - labels_set if missing_labels: missing_labels_str = ",".join([f"{name}" for name in missing_labels]) raise ValueError(f"missing labels for coordinate(s): {missing_labels_str}.") label0_obj = next(iter(labels.values())) dim_size0 = getattr(label0_obj, "sizes", {}) is_xr_obj = [ isinstance(label, DataArray | Variable) for label in labels.values() ] if not all(is_xr_obj): raise TypeError( "CoordinateTransformIndex only supports advanced (point-wise) indexing " "with either xarray.DataArray or xarray.Variable objects." ) dim_size = [getattr(label, "sizes", {}) for label in labels.values()] if any(ds != dim_size0 for ds in dim_size): raise ValueError( "CoordinateTransformIndex only supports advanced (point-wise) indexing " "with xarray.DataArray or xarray.Variable objects of matching dimensions." ) coord_labels = { name: labels[name].values for name in self.transform.coord_names } dim_positions = self.transform.reverse(coord_labels) results: dict[str, Variable | DataArray] = {} dims0 = tuple(dim_size0) for dim, pos in dim_positions.items(): # TODO: rounding the decimal positions is not always the behavior we expect # (there are different ways to represent implicit intervals) # we should probably make this customizable. pos = np.round(pos).astype("int") if isinstance(label0_obj, Variable): results[dim] = Variable(dims0, pos) else: # dataarray results[dim] = DataArray(pos, dims=dims0) return IndexSelResult(results) def equals( self, other: Index, *, exclude: frozenset[Hashable] | None = None ) -> bool: if not isinstance(other, CoordinateTransformIndex): return False return self.transform.equals(other.transform, exclude=exclude) def rename( self, name_dict: Mapping[Any, Hashable], dims_dict: Mapping[Any, Hashable], ) -> Self: coord_names = self.transform.coord_names dims = self.transform.dims dim_size = self.transform.dim_size if not set(coord_names) & set(name_dict) and not set(dims) & set(dims_dict): return self new_transform = copy.deepcopy(self.transform) new_transform.coord_names = tuple(name_dict.get(n, n) for n in coord_names) new_transform.dims = tuple(str(dims_dict.get(d, d)) for d in dims) new_transform.dim_size = { str(dims_dict.get(d, d)): v for d, v in dim_size.items() } return type(self)(new_transform) def create_default_index_implicit( dim_variable: Variable, all_variables: Mapping | Iterable[Hashable] | None = None, ) -> tuple[PandasIndex, IndexVars]: """Create a default index from a dimension variable. Create a PandasMultiIndex if the given variable wraps a pandas.MultiIndex, otherwise create a PandasIndex (note that this will become obsolete once we depreciate implicitly passing a pandas.MultiIndex as a coordinate). """ if all_variables is None: all_variables = {} if not isinstance(all_variables, Mapping): all_variables = dict.fromkeys(all_variables) name = dim_variable.dims[0] array = getattr(dim_variable._data, "array", None) index: PandasIndex if isinstance(array, pd.MultiIndex): index = PandasMultiIndex(array, name) index_vars = index.create_variables() # check for conflict between level names and variable names duplicate_names = [k for k in index_vars if k in all_variables and k != name] if duplicate_names: # dirty workaround for an edge case where both the dimension # coordinate and the level coordinates are given for the same # multi-index object => do not raise an error # TODO: remove this check when removing the multi-index dimension coordinate if len(duplicate_names) < len(index.index.names): conflict = True else: duplicate_vars = [all_variables[k] for k in duplicate_names] conflict = any( v is None or not dim_variable.equals(v) for v in duplicate_vars ) if conflict: conflict_str = "\n".join(duplicate_names) raise ValueError( f"conflicting MultiIndex level / variable name(s):\n{conflict_str}" ) else: dim_var = {name: dim_variable} index = PandasIndex.from_variables(dim_var, options={}) index_vars = index.create_variables(dim_var) return index, index_vars # generic type that represents either a pandas or an xarray index T_PandasOrXarrayIndex = TypeVar("T_PandasOrXarrayIndex", Index, pd.Index) class Indexes(collections.abc.Mapping, Generic[T_PandasOrXarrayIndex]): """Immutable proxy for Dataset or DataArray indexes. It is a mapping where keys are coordinate names and values are either pandas or xarray indexes. It also contains the indexed coordinate variables and provides some utility methods. """ _index_type: type[Index | pd.Index] _indexes: dict[Any, T_PandasOrXarrayIndex] _variables: dict[Any, Variable] __slots__ = ( "__coord_name_id", "__id_coord_names", "__id_index", "_dims", "_index_type", "_indexes", "_variables", ) def __init__( self, indexes: Mapping[Any, T_PandasOrXarrayIndex] | None = None, variables: Mapping[Any, Variable] | None = None, index_type: type[Index | pd.Index] = Index, ): """Constructor not for public consumption. Parameters ---------- indexes : dict Indexes held by this object. variables : dict Indexed coordinate variables in this object. Entries must match those of `indexes`. index_type : type The type of all indexes, i.e., either :py:class:`xarray.indexes.Index` or :py:class:`pandas.Index`. """ if indexes is None: indexes = {} if variables is None: variables = {} unmatched_keys = set(indexes) ^ set(variables) if unmatched_keys: raise ValueError( f"unmatched keys found in indexes and variables: {unmatched_keys}" ) if any(not isinstance(idx, index_type) for idx in indexes.values()): index_type_str = f"{index_type.__module__}.{index_type.__name__}" raise TypeError( f"values of indexes must all be instances of {index_type_str}" ) self._index_type = index_type self._indexes = dict(**indexes) self._variables = dict(**variables) self._dims: Mapping[Hashable, int] | None = None self.__coord_name_id: dict[Any, int] | None = None self.__id_index: dict[int, T_PandasOrXarrayIndex] | None = None self.__id_coord_names: dict[int, tuple[Hashable, ...]] | None = None @property def _coord_name_id(self) -> dict[Any, int]: if self.__coord_name_id is None: self.__coord_name_id = {k: id(idx) for k, idx in self._indexes.items()} return self.__coord_name_id @property def _id_index(self) -> dict[int, T_PandasOrXarrayIndex]: if self.__id_index is None: self.__id_index = {id(idx): idx for idx in self.get_unique()} return self.__id_index @property def _id_coord_names(self) -> dict[int, tuple[Hashable, ...]]: if self.__id_coord_names is None: id_coord_names: Mapping[int, list[Hashable]] = defaultdict(list) for k, v in self._coord_name_id.items(): id_coord_names[v].append(k) self.__id_coord_names = {k: tuple(v) for k, v in id_coord_names.items()} return self.__id_coord_names @property def variables(self) -> Mapping[Hashable, Variable]: return Frozen(self._variables) @property def dims(self) -> Mapping[Hashable, int]: from xarray.core.variable import calculate_dimensions if self._dims is None: self._dims = calculate_dimensions(self._variables) return Frozen(self._dims) def copy(self) -> Indexes: return type(self)(dict(self._indexes), dict(self._variables)) def get_unique(self) -> list[T_PandasOrXarrayIndex]: """Return a list of unique indexes, preserving order.""" unique_indexes: list[T_PandasOrXarrayIndex] = [] seen: set[int] = set() for index in self._indexes.values(): index_id = id(index) if index_id not in seen: unique_indexes.append(index) seen.add(index_id) return unique_indexes def is_multi(self, key: Hashable) -> bool: """Return True if ``key`` maps to a multi-coordinate index, False otherwise. """ return len(self._id_coord_names[self._coord_name_id[key]]) > 1 def get_all_coords( self, key: Hashable, errors: ErrorOptions = "raise" ) -> dict[Hashable, Variable]: """Return all coordinates having the same index. Parameters ---------- key : hashable Index key. errors : {"raise", "ignore"}, default: "raise" If "raise", raises a ValueError if `key` is not in indexes. If "ignore", an empty tuple is returned instead. Returns ------- coords : dict A dictionary of all coordinate variables having the same index. """ if errors not in ["raise", "ignore"]: raise ValueError('errors must be either "raise" or "ignore"') if key not in self._indexes: if errors == "raise": raise ValueError(f"no index found for {key!r} coordinate") else: return {} all_coord_names = self._id_coord_names[self._coord_name_id[key]] return {k: self._variables[k] for k in all_coord_names} def get_all_dims( self, key: Hashable, errors: ErrorOptions = "raise" ) -> Mapping[Hashable, int]: """Return all dimensions shared by an index. Parameters ---------- key : hashable Index key. errors : {"raise", "ignore"}, default: "raise" If "raise", raises a ValueError if `key` is not in indexes. If "ignore", an empty tuple is returned instead. Returns ------- dims : dict A dictionary of all dimensions shared by an index. """ from xarray.core.variable import calculate_dimensions return calculate_dimensions(self.get_all_coords(key, errors=errors)) def group_by_index( self, ) -> list[tuple[T_PandasOrXarrayIndex, dict[Hashable, Variable]]]: """Returns a list of unique indexes and their corresponding coordinates.""" index_coords = [] for i, index in self._id_index.items(): coords = {k: self._variables[k] for k in self._id_coord_names[i]} index_coords.append((index, coords)) return index_coords def to_pandas_indexes(self) -> Indexes[pd.Index]: """Returns an immutable proxy for Dataset or DataArray pandas indexes. Raises an error if this proxy contains indexes that cannot be coerced to pandas.Index objects. """ indexes: dict[Hashable, pd.Index] = {} for k, idx in self._indexes.items(): if isinstance(idx, pd.Index): indexes[k] = idx elif isinstance(idx, Index): indexes[k] = idx.to_pandas_index() return Indexes(indexes, self._variables, index_type=pd.Index) def copy_indexes( self, deep: bool = True, memo: dict[int, T_PandasOrXarrayIndex] | None = None ) -> tuple[dict[Hashable, T_PandasOrXarrayIndex], dict[Hashable, Variable]]: """Return a new dictionary with copies of indexes, preserving unique indexes. Parameters ---------- deep : bool, default: True Whether the indexes are deep or shallow copied onto the new object. memo : dict if object id to copied objects or None, optional To prevent infinite recursion deepcopy stores all copied elements in this dict. """ new_indexes: dict[Hashable, T_PandasOrXarrayIndex] = {} new_index_vars: dict[Hashable, Variable] = {} xr_idx: Index new_idx: T_PandasOrXarrayIndex for idx, coords in self.group_by_index(): if isinstance(idx, pd.Index): convert_new_idx = True dim = next(iter(coords.values())).dims[0] if isinstance(idx, pd.MultiIndex): xr_idx = PandasMultiIndex(idx, dim) else: xr_idx = PandasIndex(idx, dim) else: convert_new_idx = False xr_idx = idx new_idx = xr_idx._copy(deep=deep, memo=memo) # type: ignore[assignment] idx_vars = xr_idx.create_variables(coords) if convert_new_idx: new_idx = new_idx.index # type: ignore[attr-defined] new_indexes.update(dict.fromkeys(coords, new_idx)) new_index_vars.update(idx_vars) return new_indexes, new_index_vars def __iter__(self) -> Iterator[T_PandasOrXarrayIndex]: return iter(self._indexes) def __len__(self) -> int: return len(self._indexes) def __contains__(self, key) -> bool: return key in self._indexes def __getitem__(self, key) -> T_PandasOrXarrayIndex: return self._indexes[key] def __repr__(self): indexes = formatting._get_indexes_dict(self) return formatting.indexes_repr(indexes) def default_indexes( coords: Mapping[Any, Variable], dims: Iterable ) -> dict[Hashable, Index]: """Default indexes for a Dataset/DataArray. Parameters ---------- coords : Mapping[Any, xarray.Variable] Coordinate variables from which to draw default indexes. dims : iterable Iterable of dimension names. Returns ------- Mapping from indexing keys (levels/dimension names) to indexes used for indexing along that dimension. """ indexes: dict[Hashable, Index] = {} coord_names = set(coords) for name, var in coords.items(): if name in dims and var.ndim == 1: index, index_vars = create_default_index_implicit(var, coords) if set(index_vars) <= coord_names: indexes.update(dict.fromkeys(index_vars, index)) return indexes def _wrap_index_equals( index: Index, ) -> Callable[[Index, frozenset[Hashable]], bool]: # TODO: remove this Index.equals() wrapper (backward compatibility) sig = inspect.signature(index.equals) if len(sig.parameters) == 1: index_cls_name = type(index).__module__ + "." + type(index).__qualname__ emit_user_level_warning( f"the signature ``{index_cls_name}.equals(self, other)`` is deprecated. " f"Please update it to " f"``{index_cls_name}.equals(self, other, *, exclude=None)`` " f"or kindly ask the maintainers of ``{index_cls_name}`` to do it. " "See documentation of xarray.Index.equals() for more info.", FutureWarning, ) exclude_kwarg = False else: exclude_kwarg = True def equals_wrapper(other: Index, exclude: frozenset[Hashable]) -> bool: if exclude_kwarg: return index.equals(other, exclude=exclude) else: return index.equals(other) return equals_wrapper def indexes_equal( index: Index, other_index: Index, variable: Variable, other_variable: Variable, cache: dict[tuple[int, int], bool | None] | None = None, ) -> bool: """Check if two indexes are equal, possibly with cached results. If the two indexes are not of the same type or they do not implement equality, fallback to coordinate labels equality check. """ if cache is None: # dummy cache cache = {} key = (id(index), id(other_index)) equal: bool | None = None if key not in cache: if type(index) is type(other_index): try: equal = index.equals(other_index) except NotImplementedError: equal = None else: cache[key] = equal else: equal = None else: equal = cache[key] if equal is None: equal = variable.equals(other_variable) return cast(bool, equal) def indexes_all_equal( elements: Sequence[tuple[Index, dict[Hashable, Variable]]], exclude_dims: frozenset[Hashable], ) -> bool: """Check if indexes are all equal. If they are not of the same type or they do not implement this check, check if their coordinate variables are all equal instead. """ def check_variables(): variables = [e[1] for e in elements] return any( not variables[0][k].equals(other_vars[k]) for other_vars in variables[1:] for k in variables[0] ) indexes = [e[0] for e in elements] same_objects = all(indexes[0] is other_idx for other_idx in indexes[1:]) if same_objects: return True same_type = all(type(indexes[0]) is type(other_idx) for other_idx in indexes[1:]) if same_type: index_equals_func = _wrap_index_equals(indexes[0]) try: not_equal = any( not index_equals_func(other_idx, exclude_dims) for other_idx in indexes[1:] ) except NotImplementedError: not_equal = check_variables() else: not_equal = check_variables() return not not_equal def _apply_indexes_fast(indexes: Indexes[Index], args: Mapping[Any, Any], func: str): # This function avoids the call to indexes.group_by_index # which is really slow when repeatedly iterating through # an array. However, it fails to return the correct ID for # multi-index arrays indexes_fast, coords = indexes._indexes, indexes._variables new_indexes: dict[Hashable, Index] = dict(indexes_fast.items()) new_index_variables: dict[Hashable, Variable] = {} for name, index in indexes_fast.items(): coord = coords[name] if hasattr(coord, "_indexes"): index_vars = {n: coords[n] for n in coord._indexes} else: index_vars = {name: coord} index_dims = {d for var in index_vars.values() for d in var.dims} index_args = {k: v for k, v in args.items() if k in index_dims} if index_args: new_index = getattr(index, func)(index_args) if new_index is not None: new_indexes.update(dict.fromkeys(index_vars, new_index)) new_index_vars = new_index.create_variables(index_vars) new_index_variables.update(new_index_vars) else: for k in index_vars: new_indexes.pop(k, None) return new_indexes, new_index_variables def _apply_indexes( indexes: Indexes[Index], args: Mapping[Any, Any], func: str, ) -> tuple[dict[Hashable, Index], dict[Hashable, Variable]]: new_indexes: dict[Hashable, Index] = dict(indexes.items()) new_index_variables: dict[Hashable, Variable] = {} for index, index_vars in indexes.group_by_index(): index_dims = {d for var in index_vars.values() for d in var.dims} index_args = {k: v for k, v in args.items() if k in index_dims} if index_args: new_index = getattr(index, func)(index_args) if new_index is not None: new_indexes.update(dict.fromkeys(index_vars, new_index)) new_index_vars = new_index.create_variables(index_vars) new_index_variables.update(new_index_vars) else: for k in index_vars: new_indexes.pop(k, None) return new_indexes, new_index_variables def isel_indexes( indexes: Indexes[Index], indexers: Mapping[Any, Any], ) -> tuple[dict[Hashable, Index], dict[Hashable, Variable]]: # Fast path function _apply_indexes_fast does not work with multi-coordinate # Xarray indexes (see https://github.com/pydata/xarray/issues/10063). # -> call it only in the most common case where all indexes are default # PandasIndex each associated to a single 1-dimensional coordinate. if any(type(idx) is not PandasIndex for idx in indexes._indexes.values()): return _apply_indexes(indexes, indexers, "isel") else: return _apply_indexes_fast(indexes, indexers, "isel") def roll_indexes( indexes: Indexes[Index], shifts: Mapping[Any, int], ) -> tuple[dict[Hashable, Index], dict[Hashable, Variable]]: return _apply_indexes(indexes, shifts, "roll") def filter_indexes_from_coords( indexes: Mapping[Any, Index], filtered_coord_names: set, ) -> dict[Hashable, Index]: """Filter index items given a (sub)set of coordinate names. Drop all multi-coordinate related index items for any key missing in the set of coordinate names. """ filtered_indexes: dict[Any, Index] = dict(indexes) index_coord_names: dict[Hashable, set[Hashable]] = defaultdict(set) for name, idx in indexes.items(): index_coord_names[id(idx)].add(name) for idx_coord_names in index_coord_names.values(): if not idx_coord_names <= filtered_coord_names: for k in idx_coord_names: del filtered_indexes[k] return filtered_indexes def assert_no_index_corrupted( indexes: Indexes[Index], coord_names: set[Hashable], action: str = "remove coordinate(s)", ) -> None: """Assert removing coordinates or indexes will not corrupt indexes.""" # An index may be corrupted when the set of its corresponding coordinate name(s) # partially overlaps the set of coordinate names to remove for index, index_coords in indexes.group_by_index(): common_names = set(index_coords) & coord_names if common_names and len(common_names) != len(index_coords): common_names_str = ", ".join(f"{k!r}" for k in common_names) index_names_str = ", ".join(f"{k!r}" for k in index_coords) raise ValueError( f"cannot {action} {common_names_str}, which would corrupt " f"the following index built from coordinates {index_names_str}:\n" f"{index}" ) xarray-2025.09.0/xarray/core/indexing.py000066400000000000000000002324431505620616400177710ustar00rootroot00000000000000from __future__ import annotations import enum import functools import math import operator from collections import Counter, defaultdict from collections.abc import Callable, Hashable, Iterable, Mapping from contextlib import suppress from dataclasses import dataclass, field from datetime import timedelta from typing import TYPE_CHECKING, Any, cast, overload import numpy as np import pandas as pd from numpy.typing import DTypeLike from packaging.version import Version from xarray.core import duck_array_ops from xarray.core.coordinate_transform import CoordinateTransform from xarray.core.nputils import NumpyVIndexAdapter from xarray.core.types import T_Xarray from xarray.core.utils import ( NDArrayMixin, either_dict_or_kwargs, get_valid_numpy_dtype, is_allowed_extension_array, is_allowed_extension_array_dtype, is_duck_array, is_duck_dask_array, is_full_slice, is_scalar, is_valid_numpy_dtype, to_0d_array, ) from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import array_type, integer_types, is_chunked_array if TYPE_CHECKING: from xarray.core.extension_array import PandasExtensionArray from xarray.core.indexes import Index from xarray.core.types import Self from xarray.core.variable import Variable from xarray.namedarray._typing import _Shape, duckarray from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint BasicIndexerType = int | np.integer | slice OuterIndexerType = BasicIndexerType | np.ndarray[Any, np.dtype[np.integer]] @dataclass class IndexSelResult: """Index query results. Attributes ---------- dim_indexers: dict A dictionary where keys are array dimensions and values are location-based indexers. indexes: dict, optional New indexes to replace in the resulting DataArray or Dataset. variables : dict, optional New variables to replace in the resulting DataArray or Dataset. drop_coords : list, optional Coordinate(s) to drop in the resulting DataArray or Dataset. drop_indexes : list, optional Index(es) to drop in the resulting DataArray or Dataset. rename_dims : dict, optional A dictionary in the form ``{old_dim: new_dim}`` for dimension(s) to rename in the resulting DataArray or Dataset. """ dim_indexers: dict[Any, Any] indexes: dict[Any, Index] = field(default_factory=dict) variables: dict[Any, Variable] = field(default_factory=dict) drop_coords: list[Hashable] = field(default_factory=list) drop_indexes: list[Hashable] = field(default_factory=list) rename_dims: dict[Any, Hashable] = field(default_factory=dict) def as_tuple(self): """Unlike ``dataclasses.astuple``, return a shallow copy. See https://stackoverflow.com/a/51802661 """ return ( self.dim_indexers, self.indexes, self.variables, self.drop_coords, self.drop_indexes, self.rename_dims, ) def merge_sel_results(results: list[IndexSelResult]) -> IndexSelResult: all_dims_count = Counter([dim for res in results for dim in res.dim_indexers]) duplicate_dims = {k: v for k, v in all_dims_count.items() if v > 1} if duplicate_dims: # TODO: this message is not right when combining indexe(s) queries with # location-based indexing on a dimension with no dimension-coordinate (failback) fmt_dims = [ f"{dim!r}: {count} indexes involved" for dim, count in duplicate_dims.items() ] raise ValueError( "Xarray does not support label-based selection with more than one index " "over the following dimension(s):\n" + "\n".join(fmt_dims) + "\nSuggestion: use a multi-index for each of those dimension(s)." ) dim_indexers = {} indexes = {} variables = {} drop_coords = [] drop_indexes = [] rename_dims = {} for res in results: dim_indexers.update(res.dim_indexers) indexes.update(res.indexes) variables.update(res.variables) drop_coords += res.drop_coords drop_indexes += res.drop_indexes rename_dims.update(res.rename_dims) return IndexSelResult( dim_indexers, indexes, variables, drop_coords, drop_indexes, rename_dims ) def group_indexers_by_index( obj: T_Xarray, indexers: Mapping[Any, Any], options: Mapping[str, Any], ) -> list[tuple[Index, dict[Any, Any]]]: """Returns a list of unique indexes and their corresponding indexers.""" unique_indexes = {} grouped_indexers: Mapping[int | None, dict] = defaultdict(dict) for key, label in indexers.items(): index: Index = obj.xindexes.get(key, None) if index is not None: index_id = id(index) unique_indexes[index_id] = index grouped_indexers[index_id][key] = label elif key in obj.coords: raise KeyError(f"no index found for coordinate {key!r}") elif key not in obj.dims: raise KeyError( f"{key!r} is not a valid dimension or coordinate for " f"{obj.__class__.__name__} with dimensions {obj.dims!r}" ) elif len(options): raise ValueError( f"cannot supply selection options {options!r} for dimension {key!r}" "that has no associated coordinate or index" ) else: # key is a dimension without a "dimension-coordinate" # failback to location-based selection # TODO: depreciate this implicit behavior and suggest using isel instead? unique_indexes[None] = None grouped_indexers[None][key] = label return [(unique_indexes[k], grouped_indexers[k]) for k in unique_indexes] def map_index_queries( obj: T_Xarray, indexers: Mapping[Any, Any], method=None, tolerance: int | float | Iterable[int | float] | None = None, **indexers_kwargs: Any, ) -> IndexSelResult: """Execute index queries from a DataArray / Dataset and label-based indexers and return the (merged) query results. """ from xarray.core.dataarray import DataArray # TODO benbovy - flexible indexes: remove when custom index options are available if method is None and tolerance is None: options = {} else: options = {"method": method, "tolerance": tolerance} indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "map_index_queries") grouped_indexers = group_indexers_by_index(obj, indexers, options) results = [] for index, labels in grouped_indexers: if index is None: # forward dimension indexers with no index/coordinate results.append(IndexSelResult(labels)) else: results.append(index.sel(labels, **options)) merged = merge_sel_results(results) # drop dimension coordinates found in dimension indexers # (also drop multi-index if any) # (.sel() already ensures alignment) for k, v in merged.dim_indexers.items(): if isinstance(v, DataArray): if k in v._indexes: v = v.reset_index(k) drop_coords = [name for name in v._coords if name in merged.dim_indexers] merged.dim_indexers[k] = v.drop_vars(drop_coords) return merged def expanded_indexer(key, ndim): """Given a key for indexing an ndarray, return an equivalent key which is a tuple with length equal to the number of dimensions. The expansion is done by replacing all `Ellipsis` items with the right number of full slices and then padding the key with full slices so that it reaches the appropriate dimensionality. """ if not isinstance(key, tuple): # numpy treats non-tuple keys equivalent to tuples of length 1 key = (key,) new_key = [] # handling Ellipsis right is a little tricky, see: # https://numpy.org/doc/stable/reference/arrays.indexing.html#advanced-indexing found_ellipsis = False for k in key: if k is Ellipsis: if not found_ellipsis: new_key.extend((ndim + 1 - len(key)) * [slice(None)]) found_ellipsis = True else: new_key.append(slice(None)) else: new_key.append(k) if len(new_key) > ndim: raise IndexError("too many indices") new_key.extend((ndim - len(new_key)) * [slice(None)]) return tuple(new_key) def normalize_slice(sl: slice, size: int) -> slice: """ Ensure that given slice only contains positive start and stop values (stop can be -1 for full-size slices with negative steps, e.g. [-10::-1]) Examples -------- >>> normalize_slice(slice(0, 9), 10) slice(0, 9, 1) >>> normalize_slice(slice(0, -1), 10) slice(0, 9, 1) """ return slice(*sl.indices(size)) def _expand_slice(slice_: slice, size: int) -> np.ndarray[Any, np.dtype[np.integer]]: """ Expand slice to an array containing only positive integers. Examples -------- >>> _expand_slice(slice(0, 9), 10) array([0, 1, 2, 3, 4, 5, 6, 7, 8]) >>> _expand_slice(slice(0, -1), 10) array([0, 1, 2, 3, 4, 5, 6, 7, 8]) """ sl = normalize_slice(slice_, size) return np.arange(sl.start, sl.stop, sl.step) def slice_slice(old_slice: slice, applied_slice: slice, size: int) -> slice: """Given a slice and the size of the dimension to which it will be applied, index it with another slice to return a new slice equivalent to applying the slices sequentially """ old_slice = normalize_slice(old_slice, size) size_after_old_slice = len(range(old_slice.start, old_slice.stop, old_slice.step)) if size_after_old_slice == 0: # nothing left after applying first slice return slice(0) applied_slice = normalize_slice(applied_slice, size_after_old_slice) start = old_slice.start + applied_slice.start * old_slice.step if start < 0: # nothing left after applying second slice # (can only happen for old_slice.step < 0, e.g. [10::-1], [20:]) return slice(0) stop = old_slice.start + applied_slice.stop * old_slice.step if stop < 0: stop = None step = old_slice.step * applied_slice.step return slice(start, stop, step) def normalize_array( array: np.ndarray[Any, np.dtype[np.integer]], size: int ) -> np.ndarray[Any, np.dtype[np.integer]]: """ Ensure that the given array only contains positive values. Examples -------- >>> normalize_array(np.array([-1, -2, -3, -4]), 10) array([9, 8, 7, 6]) >>> normalize_array(np.array([-5, 3, 5, -1, 8]), 12) array([ 7, 3, 5, 11, 8]) """ if np.issubdtype(array.dtype, np.unsignedinteger): return array return np.where(array >= 0, array, array + size) def slice_slice_by_array( old_slice: slice, array: np.ndarray[Any, np.dtype[np.integer]], size: int, ) -> np.ndarray[Any, np.dtype[np.integer]]: """Given a slice and the size of the dimension to which it will be applied, index it with an array to return a new array equivalent to applying the slices sequentially Examples -------- >>> slice_slice_by_array(slice(2, 10), np.array([1, 3, 5]), 12) array([3, 5, 7]) >>> slice_slice_by_array(slice(1, None, 2), np.array([1, 3, 7, 8]), 20) array([ 3, 7, 15, 17]) >>> slice_slice_by_array(slice(None, None, -1), np.array([2, 4, 7]), 20) array([17, 15, 12]) """ # to get a concrete slice, limited to the size of the array normalized_slice = normalize_slice(old_slice, size) size_after_slice = len(range(*normalized_slice.indices(size))) normalized_array = normalize_array(array, size_after_slice) new_indexer = normalized_array * normalized_slice.step + normalized_slice.start if np.any(new_indexer >= size): raise IndexError("indices out of bounds") # TODO: more helpful error message return new_indexer def _index_indexer_1d( old_indexer: OuterIndexerType, applied_indexer: OuterIndexerType, size: int, ) -> OuterIndexerType: if is_full_slice(applied_indexer): # shortcut for the usual case return old_indexer if is_full_slice(old_indexer): # shortcut for full slices return applied_indexer indexer: OuterIndexerType if isinstance(old_indexer, slice): if isinstance(applied_indexer, slice): indexer = slice_slice(old_indexer, applied_indexer, size) elif isinstance(applied_indexer, integer_types): indexer = range(*old_indexer.indices(size))[applied_indexer] else: indexer = slice_slice_by_array(old_indexer, applied_indexer, size) elif isinstance(old_indexer, np.ndarray): indexer = old_indexer[applied_indexer] else: # should be unreachable raise ValueError("cannot index integers. Please open an issuec-") return indexer class ExplicitIndexer: """Base class for explicit indexer objects. ExplicitIndexer objects wrap a tuple of values given by their ``tuple`` property. These tuples should always have length equal to the number of dimensions on the indexed array. Do not instantiate BaseIndexer objects directly: instead, use one of the sub-classes BasicIndexer, OuterIndexer or VectorizedIndexer. """ __slots__ = ("_key",) def __init__(self, key: tuple[Any, ...]): if type(self) is ExplicitIndexer: raise TypeError("cannot instantiate base ExplicitIndexer objects") self._key = tuple(key) @property def tuple(self) -> tuple[Any, ...]: return self._key def __repr__(self) -> str: return f"{type(self).__name__}({self.tuple})" @overload def as_integer_or_none(value: int) -> int: ... @overload def as_integer_or_none(value: None) -> None: ... def as_integer_or_none(value: int | None) -> int | None: return None if value is None else operator.index(value) def as_integer_slice(value: slice) -> slice: start = as_integer_or_none(value.start) stop = as_integer_or_none(value.stop) step = as_integer_or_none(value.step) return slice(start, stop, step) class IndexCallable: """Provide getitem and setitem syntax for callable objects.""" __slots__ = ("getter", "setter") def __init__( self, getter: Callable[..., Any], setter: Callable[..., Any] | None = None ): self.getter = getter self.setter = setter def __getitem__(self, key: Any) -> Any: return self.getter(key) def __setitem__(self, key: Any, value: Any) -> None: if self.setter is None: raise NotImplementedError( "Setting values is not supported for this indexer." ) self.setter(key, value) class BasicIndexer(ExplicitIndexer): """Tuple for basic indexing. All elements should be int or slice objects. Indexing follows NumPy's rules for basic indexing: each axis is independently sliced and axes indexed with an integer are dropped from the result. """ __slots__ = () def __init__(self, key: tuple[BasicIndexerType, ...]): if not isinstance(key, tuple): raise TypeError(f"key must be a tuple: {key!r}") new_key = [] for k in key: if isinstance(k, integer_types): k = int(k) elif isinstance(k, slice): k = as_integer_slice(k) else: raise TypeError( f"unexpected indexer type for {type(self).__name__}: {k!r}" ) new_key.append(k) super().__init__(tuple(new_key)) class OuterIndexer(ExplicitIndexer): """Tuple for outer/orthogonal indexing. All elements should be int, slice or 1-dimensional np.ndarray objects with an integer dtype. Indexing is applied independently along each axis, and axes indexed with an integer are dropped from the result. This type of indexing works like MATLAB/Fortran. """ __slots__ = () def __init__( self, key: tuple[BasicIndexerType | np.ndarray[Any, np.dtype[np.generic]], ...], ): if not isinstance(key, tuple): raise TypeError(f"key must be a tuple: {key!r}") new_key = [] for k in key: if isinstance(k, integer_types) and not isinstance(k, bool): k = int(k) elif isinstance(k, slice): k = as_integer_slice(k) elif is_duck_array(k): if not np.issubdtype(k.dtype, np.integer): raise TypeError( f"invalid indexer array, does not have integer dtype: {k!r}" ) if k.ndim > 1: # type: ignore[union-attr] raise TypeError( f"invalid indexer array for {type(self).__name__}; must be scalar " f"or have 1 dimension: {k!r}" ) k = duck_array_ops.astype(k, np.int64, copy=False) else: raise TypeError( f"unexpected indexer type for {type(self).__name__}: {k!r}, {type(k)}" ) new_key.append(k) super().__init__(tuple(new_key)) class VectorizedIndexer(ExplicitIndexer): """Tuple for vectorized indexing. All elements should be slice or N-dimensional np.ndarray objects with an integer dtype and the same number of dimensions. Indexing follows proposed rules for np.ndarray.vindex, which matches NumPy's advanced indexing rules (including broadcasting) except sliced axes are always moved to the end: https://github.com/numpy/numpy/pull/6256 """ __slots__ = () def __init__(self, key: tuple[slice | np.ndarray[Any, np.dtype[np.generic]], ...]): if not isinstance(key, tuple): raise TypeError(f"key must be a tuple: {key!r}") new_key = [] ndim = None for k in key: if isinstance(k, slice): k = as_integer_slice(k) elif is_duck_array(k): if not np.issubdtype(k.dtype, np.integer): raise TypeError( f"invalid indexer array, does not have integer dtype: {k!r}" ) if ndim is None: ndim = k.ndim # type: ignore[union-attr] elif ndim != k.ndim: # type: ignore[union-attr] ndims = [k.ndim for k in key if isinstance(k, np.ndarray)] raise ValueError( "invalid indexer key: ndarray arguments " f"have different numbers of dimensions: {ndims}" ) k = duck_array_ops.astype(k, np.int64, copy=False) else: raise TypeError( f"unexpected indexer type for {type(self).__name__}: {k!r}" ) new_key.append(k) super().__init__(tuple(new_key)) class ExplicitlyIndexed: """Mixin to mark support for Indexer subclasses in indexing.""" __slots__ = () def __array__( self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None ) -> np.ndarray: # Leave casting to an array up to the underlying array type. if Version(np.__version__) >= Version("2.0.0"): return np.asarray(self.get_duck_array(), dtype=dtype, copy=copy) else: return np.asarray(self.get_duck_array(), dtype=dtype) def get_duck_array(self): return self.array class ExplicitlyIndexedNDArrayMixin(NDArrayMixin, ExplicitlyIndexed): __slots__ = () def get_duck_array(self): raise NotImplementedError async def async_get_duck_array(self): raise NotImplementedError def _oindex_get(self, indexer: OuterIndexer): raise NotImplementedError( f"{self.__class__.__name__}._oindex_get method should be overridden" ) def _vindex_get(self, indexer: VectorizedIndexer): raise NotImplementedError( f"{self.__class__.__name__}._vindex_get method should be overridden" ) def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None: raise NotImplementedError( f"{self.__class__.__name__}._oindex_set method should be overridden" ) def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None: raise NotImplementedError( f"{self.__class__.__name__}._vindex_set method should be overridden" ) def _check_and_raise_if_non_basic_indexer(self, indexer: ExplicitIndexer) -> None: if isinstance(indexer, VectorizedIndexer | OuterIndexer): raise TypeError( "Vectorized indexing with vectorized or outer indexers is not supported. " "Please use .vindex and .oindex properties to index the array." ) @property def oindex(self) -> IndexCallable: return IndexCallable(self._oindex_get, self._oindex_set) @property def vindex(self) -> IndexCallable: return IndexCallable(self._vindex_get, self._vindex_set) class IndexingAdapter(ExplicitlyIndexedNDArrayMixin): """Marker class for indexing adapters. These classes translate between Xarray's indexing semantics and the underlying array's indexing semantics. """ def get_duck_array(self): key = BasicIndexer((slice(None),) * self.ndim) return self[key] async def async_get_duck_array(self): """These classes are applied to in-memory arrays, so specific async support isn't needed.""" return self.get_duck_array() class ImplicitToExplicitIndexingAdapter(NDArrayMixin): """Wrap an array, converting tuples into the indicated explicit indexer.""" __slots__ = ("array", "indexer_cls") def __init__(self, array, indexer_cls: type[ExplicitIndexer] = BasicIndexer): self.array = as_indexable(array) self.indexer_cls = indexer_cls def __array__( self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None ) -> np.ndarray: if Version(np.__version__) >= Version("2.0.0"): return np.asarray(self.get_duck_array(), dtype=dtype, copy=copy) else: return np.asarray(self.get_duck_array(), dtype=dtype) def get_duck_array(self): return self.array.get_duck_array() def __getitem__(self, key: Any): key = expanded_indexer(key, self.ndim) indexer = self.indexer_cls(key) result = apply_indexer(self.array, indexer) if isinstance(result, ExplicitlyIndexed): return type(self)(result, self.indexer_cls) else: # Sometimes explicitly indexed arrays return NumPy arrays or # scalars. return result class LazilyIndexedArray(ExplicitlyIndexedNDArrayMixin): """Wrap an array to make basic and outer indexing lazy.""" __slots__ = ("_shape", "array", "key") def __init__(self, array: Any, key: ExplicitIndexer | None = None): """ Parameters ---------- array : array_like Array like object to index. key : ExplicitIndexer, optional Array indexer. If provided, it is assumed to already be in canonical expanded form. """ if isinstance(array, type(self)) and key is None: # unwrap key = array.key # type: ignore[has-type] array = array.array # type: ignore[has-type] if key is None: key = BasicIndexer((slice(None),) * array.ndim) self.array = as_indexable(array) self.key = key shape: _Shape = () for size, k in zip(self.array.shape, self.key.tuple, strict=True): if isinstance(k, slice): shape += (len(range(*k.indices(size))),) elif isinstance(k, np.ndarray): shape += (k.size,) self._shape = shape def _updated_key(self, new_key: ExplicitIndexer) -> BasicIndexer | OuterIndexer: iter_new_key = iter(expanded_indexer(new_key.tuple, self.ndim)) full_key: list[OuterIndexerType] = [] for size, k in zip(self.array.shape, self.key.tuple, strict=True): if isinstance(k, integer_types): full_key.append(k) else: full_key.append(_index_indexer_1d(k, next(iter_new_key), size)) full_key_tuple = tuple(full_key) if all(isinstance(k, integer_types + (slice,)) for k in full_key_tuple): return BasicIndexer(cast(tuple[BasicIndexerType, ...], full_key_tuple)) return OuterIndexer(full_key_tuple) @property def shape(self) -> _Shape: return self._shape def get_duck_array(self): from xarray.backends.common import BackendArray if isinstance(self.array, BackendArray): array = self.array[self.key] else: array = apply_indexer(self.array, self.key) if isinstance(array, ExplicitlyIndexed): array = array.get_duck_array() return _wrap_numpy_scalars(array) async def async_get_duck_array(self): from xarray.backends.common import BackendArray if isinstance(self.array, BackendArray): array = await self.array.async_getitem(self.key) else: array = apply_indexer(self.array, self.key) if isinstance(array, ExplicitlyIndexed): array = await array.async_get_duck_array() return _wrap_numpy_scalars(array) def transpose(self, order): return LazilyVectorizedIndexedArray(self.array, self.key).transpose(order) def _oindex_get(self, indexer: OuterIndexer): return type(self)(self.array, self._updated_key(indexer)) def _vindex_get(self, indexer: VectorizedIndexer): array = LazilyVectorizedIndexedArray(self.array, self.key) return array.vindex[indexer] def __getitem__(self, indexer: ExplicitIndexer): self._check_and_raise_if_non_basic_indexer(indexer) return type(self)(self.array, self._updated_key(indexer)) def _vindex_set(self, key: VectorizedIndexer, value: Any) -> None: raise NotImplementedError( "Lazy item assignment with the vectorized indexer is not yet " "implemented. Load your data first by .load() or compute()." ) def _oindex_set(self, key: OuterIndexer, value: Any) -> None: full_key = self._updated_key(key) self.array.oindex[full_key] = value def __setitem__(self, key: BasicIndexer, value: Any) -> None: self._check_and_raise_if_non_basic_indexer(key) full_key = self._updated_key(key) self.array[full_key] = value def __repr__(self) -> str: return f"{type(self).__name__}(array={self.array!r}, key={self.key!r})" # keep an alias to the old name for external backends pydata/xarray#5111 LazilyOuterIndexedArray = LazilyIndexedArray class LazilyVectorizedIndexedArray(ExplicitlyIndexedNDArrayMixin): """Wrap an array to make vectorized indexing lazy.""" __slots__ = ("array", "key") def __init__(self, array: duckarray[Any, Any], key: ExplicitIndexer): """ Parameters ---------- array : array_like Array like object to index. key : VectorizedIndexer """ if isinstance(key, BasicIndexer | OuterIndexer): self.key = _outer_to_vectorized_indexer(key, array.shape) elif isinstance(key, VectorizedIndexer): self.key = _arrayize_vectorized_indexer(key, array.shape) self.array = as_indexable(array) @property def shape(self) -> _Shape: return np.broadcast(*self.key.tuple).shape def get_duck_array(self): from xarray.backends.common import BackendArray if isinstance(self.array, BackendArray): array = self.array[self.key] else: array = apply_indexer(self.array, self.key) if isinstance(array, ExplicitlyIndexed): array = array.get_duck_array() return _wrap_numpy_scalars(array) async def async_get_duck_array(self): from xarray.backends.common import BackendArray if isinstance(self.array, BackendArray): array = await self.array.async_getitem(self.key) else: array = apply_indexer(self.array, self.key) if isinstance(array, ExplicitlyIndexed): array = await array.async_get_duck_array() return _wrap_numpy_scalars(array) def _updated_key(self, new_key: ExplicitIndexer): return _combine_indexers(self.key, self.shape, new_key) def _oindex_get(self, indexer: OuterIndexer): return type(self)(self.array, self._updated_key(indexer)) def _vindex_get(self, indexer: VectorizedIndexer): return type(self)(self.array, self._updated_key(indexer)) def __getitem__(self, indexer: ExplicitIndexer): self._check_and_raise_if_non_basic_indexer(indexer) # If the indexed array becomes a scalar, return LazilyIndexedArray if all(isinstance(ind, integer_types) for ind in indexer.tuple): key = BasicIndexer(tuple(k[indexer.tuple] for k in self.key.tuple)) return LazilyIndexedArray(self.array, key) return type(self)(self.array, self._updated_key(indexer)) def transpose(self, order): key = VectorizedIndexer(tuple(k.transpose(order) for k in self.key.tuple)) return type(self)(self.array, key) def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None: raise NotImplementedError( "Lazy item assignment with the vectorized indexer is not yet " "implemented. Load your data first by .load() or compute()." ) def __repr__(self) -> str: return f"{type(self).__name__}(array={self.array!r}, key={self.key!r})" def _wrap_numpy_scalars(array): """Wrap NumPy scalars in 0d arrays.""" ndim = duck_array_ops.ndim(array) if ndim == 0 and ( isinstance(array, np.generic) or not (is_duck_array(array) or isinstance(array, NDArrayMixin)) ): return np.array(array) elif hasattr(array, "dtype"): return array elif ndim == 0: return np.array(array) else: return array class CopyOnWriteArray(ExplicitlyIndexedNDArrayMixin): __slots__ = ("_copied", "array") def __init__(self, array: duckarray[Any, Any]): self.array = as_indexable(array) self._copied = False def _ensure_copied(self): if not self._copied: self.array = as_indexable(np.array(self.array)) self._copied = True def get_duck_array(self): return self.array.get_duck_array() async def async_get_duck_array(self): return await self.array.async_get_duck_array() def _oindex_get(self, indexer: OuterIndexer): return type(self)(_wrap_numpy_scalars(self.array.oindex[indexer])) def _vindex_get(self, indexer: VectorizedIndexer): return type(self)(_wrap_numpy_scalars(self.array.vindex[indexer])) def __getitem__(self, indexer: ExplicitIndexer): self._check_and_raise_if_non_basic_indexer(indexer) return type(self)(_wrap_numpy_scalars(self.array[indexer])) def transpose(self, order): return self.array.transpose(order) def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None: self._ensure_copied() self.array.vindex[indexer] = value def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None: self._ensure_copied() self.array.oindex[indexer] = value def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None: self._check_and_raise_if_non_basic_indexer(indexer) self._ensure_copied() self.array[indexer] = value def __deepcopy__(self, memo): # CopyOnWriteArray is used to wrap backend array objects, which might # point to files on disk, so we can't rely on the default deepcopy # implementation. return type(self)(self.array) class MemoryCachedArray(ExplicitlyIndexedNDArrayMixin): __slots__ = ("array",) def __init__(self, array): self.array = _wrap_numpy_scalars(as_indexable(array)) def get_duck_array(self): duck_array = self.array.get_duck_array() # ensure the array object is cached in-memory self.array = as_indexable(duck_array) return duck_array async def async_get_duck_array(self): duck_array = await self.array.async_get_duck_array() # ensure the array object is cached in-memory self.array = as_indexable(duck_array) return duck_array def _oindex_get(self, indexer: OuterIndexer): return type(self)(_wrap_numpy_scalars(self.array.oindex[indexer])) def _vindex_get(self, indexer: VectorizedIndexer): return type(self)(_wrap_numpy_scalars(self.array.vindex[indexer])) def __getitem__(self, indexer: ExplicitIndexer): self._check_and_raise_if_non_basic_indexer(indexer) return type(self)(_wrap_numpy_scalars(self.array[indexer])) def transpose(self, order): return self.array.transpose(order) def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None: self.array.vindex[indexer] = value def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None: self.array.oindex[indexer] = value def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None: self._check_and_raise_if_non_basic_indexer(indexer) self.array[indexer] = value def as_indexable(array): """ This function always returns a ExplicitlyIndexed subclass, so that the vectorized indexing is always possible with the returned object. """ if isinstance(array, ExplicitlyIndexed): return array if isinstance(array, np.ndarray): return NumpyIndexingAdapter(array) if isinstance(array, pd.Index): return PandasIndexingAdapter(array) if is_duck_dask_array(array): return DaskIndexingAdapter(array) if hasattr(array, "__array_namespace__"): return ArrayApiIndexingAdapter(array) if hasattr(array, "__array_function__"): return NdArrayLikeIndexingAdapter(array) raise TypeError(f"Invalid array type: {type(array)}") def _outer_to_vectorized_indexer( indexer: BasicIndexer | OuterIndexer, shape: _Shape ) -> VectorizedIndexer: """Convert an OuterIndexer into an vectorized indexer. Parameters ---------- indexer : Outer/Basic Indexer An indexer to convert. shape : tuple Shape of the array subject to the indexing. Returns ------- VectorizedIndexer Tuple suitable for use to index a NumPy array with vectorized indexing. Each element is an array: broadcasting them together gives the shape of the result. """ key = indexer.tuple n_dim = len([k for k in key if not isinstance(k, integer_types)]) i_dim = 0 new_key = [] for k, size in zip(key, shape, strict=True): if isinstance(k, integer_types): new_key.append(np.array(k).reshape((1,) * n_dim)) else: # np.ndarray or slice if isinstance(k, slice): k = np.arange(*k.indices(size)) assert k.dtype.kind in {"i", "u"} new_shape = [(1,) * i_dim + (k.size,) + (1,) * (n_dim - i_dim - 1)] new_key.append(k.reshape(*new_shape)) i_dim += 1 return VectorizedIndexer(tuple(new_key)) def _outer_to_numpy_indexer(indexer: BasicIndexer | OuterIndexer, shape: _Shape): """Convert an OuterIndexer into an indexer for NumPy. Parameters ---------- indexer : Basic/OuterIndexer An indexer to convert. shape : tuple Shape of the array subject to the indexing. Returns ------- tuple Tuple suitable for use to index a NumPy array. """ if len([k for k in indexer.tuple if not isinstance(k, slice)]) <= 1: # If there is only one vector and all others are slice, # it can be safely used in mixed basic/advanced indexing. # Boolean index should already be converted to integer array. return indexer.tuple else: return _outer_to_vectorized_indexer(indexer, shape).tuple def _combine_indexers(old_key, shape: _Shape, new_key) -> VectorizedIndexer: """Combine two indexers. Parameters ---------- old_key : ExplicitIndexer The first indexer for the original array shape : tuple of ints Shape of the original array to be indexed by old_key new_key The second indexer for indexing original[old_key] """ if not isinstance(old_key, VectorizedIndexer): old_key = _outer_to_vectorized_indexer(old_key, shape) if len(old_key.tuple) == 0: return new_key new_shape = np.broadcast(*old_key.tuple).shape if isinstance(new_key, VectorizedIndexer): new_key = _arrayize_vectorized_indexer(new_key, new_shape) else: new_key = _outer_to_vectorized_indexer(new_key, new_shape) return VectorizedIndexer( tuple(o[new_key.tuple] for o in np.broadcast_arrays(*old_key.tuple)) ) @enum.unique class IndexingSupport(enum.Enum): # for backends that support only basic indexer BASIC = 0 # for backends that support basic / outer indexer OUTER = 1 # for backends that support outer indexer including at most 1 vector. OUTER_1VECTOR = 2 # for backends that support full vectorized indexer. VECTORIZED = 3 def explicit_indexing_adapter( key: ExplicitIndexer, shape: _Shape, indexing_support: IndexingSupport, raw_indexing_method: Callable[..., Any], ) -> Any: """Support explicit indexing by delegating to a raw indexing method. Outer and/or vectorized indexers are supported by indexing a second time with a NumPy array. Parameters ---------- key : ExplicitIndexer Explicit indexing object. shape : Tuple[int, ...] Shape of the indexed array. indexing_support : IndexingSupport enum Form of indexing supported by raw_indexing_method. raw_indexing_method : callable Function (like ndarray.__getitem__) that when called with indexing key in the form of a tuple returns an indexed array. Returns ------- Indexing result, in the form of a duck numpy-array. """ raw_key, numpy_indices = decompose_indexer(key, shape, indexing_support) result = raw_indexing_method(raw_key.tuple) if numpy_indices.tuple: # index the loaded duck array indexable = as_indexable(result) result = apply_indexer(indexable, numpy_indices) return result async def async_explicit_indexing_adapter( key: ExplicitIndexer, shape: _Shape, indexing_support: IndexingSupport, raw_indexing_method: Callable[..., Any], ) -> Any: raw_key, numpy_indices = decompose_indexer(key, shape, indexing_support) result = await raw_indexing_method(raw_key.tuple) if numpy_indices.tuple: # index the loaded duck array indexable = as_indexable(result) result = apply_indexer(indexable, numpy_indices) return result def apply_indexer(indexable, indexer: ExplicitIndexer): """Apply an indexer to an indexable object.""" if isinstance(indexer, VectorizedIndexer): return indexable.vindex[indexer] elif isinstance(indexer, OuterIndexer): return indexable.oindex[indexer] else: return indexable[indexer] def set_with_indexer(indexable, indexer: ExplicitIndexer, value: Any) -> None: """Set values in an indexable object using an indexer.""" if isinstance(indexer, VectorizedIndexer): indexable.vindex[indexer] = value elif isinstance(indexer, OuterIndexer): indexable.oindex[indexer] = value else: indexable[indexer] = value def decompose_indexer( indexer: ExplicitIndexer, shape: _Shape, indexing_support: IndexingSupport ) -> tuple[ExplicitIndexer, ExplicitIndexer]: if isinstance(indexer, VectorizedIndexer): return _decompose_vectorized_indexer(indexer, shape, indexing_support) if isinstance(indexer, BasicIndexer | OuterIndexer): return _decompose_outer_indexer(indexer, shape, indexing_support) raise TypeError(f"unexpected key type: {indexer}") def _decompose_slice(key: slice, size: int) -> tuple[slice, slice]: """convert a slice to successive two slices. The first slice always has a positive step. >>> _decompose_slice(slice(2, 98, 2), 99) (slice(2, 98, 2), slice(None, None, None)) >>> _decompose_slice(slice(98, 2, -2), 99) (slice(4, 99, 2), slice(None, None, -1)) >>> _decompose_slice(slice(98, 2, -2), 98) (slice(3, 98, 2), slice(None, None, -1)) >>> _decompose_slice(slice(360, None, -10), 361) (slice(0, 361, 10), slice(None, None, -1)) """ start, stop, step = key.indices(size) if step > 0: # If key already has a positive step, use it as is in the backend return key, slice(None) else: # determine stop precisely for step > 1 case # Use the range object to do the calculation # e.g. [98:2:-2] -> [98:3:-2] exact_stop = range(start, stop, step)[-1] return slice(exact_stop, start + 1, -step), slice(None, None, -1) def _decompose_vectorized_indexer( indexer: VectorizedIndexer, shape: _Shape, indexing_support: IndexingSupport, ) -> tuple[ExplicitIndexer, ExplicitIndexer]: """ Decompose vectorized indexer to the successive two indexers, where the first indexer will be used to index backend arrays, while the second one is used to index loaded on-memory np.ndarray. Parameters ---------- indexer : VectorizedIndexer indexing_support : one of IndexerSupport entries Returns ------- backend_indexer: OuterIndexer or BasicIndexer np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer) Notes ----- This function is used to realize the vectorized indexing for the backend arrays that only support basic or outer indexing. As an example, let us consider to index a few elements from a backend array with a vectorized indexer ([0, 3, 1], [2, 3, 2]). Even if the backend array only supports outer indexing, it is more efficient to load a subslice of the array than loading the entire array, >>> array = np.arange(36).reshape(6, 6) >>> backend_indexer = OuterIndexer((np.array([0, 1, 3]), np.array([2, 3]))) >>> # load subslice of the array ... array = NumpyIndexingAdapter(array).oindex[backend_indexer] >>> np_indexer = VectorizedIndexer((np.array([0, 2, 1]), np.array([0, 1, 0]))) >>> # vectorized indexing for on-memory np.ndarray. ... NumpyIndexingAdapter(array).vindex[np_indexer] array([ 2, 21, 8]) """ assert isinstance(indexer, VectorizedIndexer) if indexing_support is IndexingSupport.VECTORIZED: return indexer, BasicIndexer(()) backend_indexer_elems = [] np_indexer_elems = [] # convert negative indices indexer_elems = [ np.where(k < 0, k + s, k) if isinstance(k, np.ndarray) else k for k, s in zip(indexer.tuple, shape, strict=True) ] for k, s in zip(indexer_elems, shape, strict=True): if isinstance(k, slice): # If it is a slice, then we will slice it as-is # (but make its step positive) in the backend, # and then use all of it (slice(None)) for the in-memory portion. bk_slice, np_slice = _decompose_slice(k, s) backend_indexer_elems.append(bk_slice) np_indexer_elems.append(np_slice) else: # If it is a (multidimensional) np.ndarray, just pickup the used # keys without duplication and store them as a 1d-np.ndarray. oind, vind = np.unique(k, return_inverse=True) backend_indexer_elems.append(oind) np_indexer_elems.append(vind.reshape(*k.shape)) backend_indexer = OuterIndexer(tuple(backend_indexer_elems)) np_indexer = VectorizedIndexer(tuple(np_indexer_elems)) if indexing_support is IndexingSupport.OUTER: return backend_indexer, np_indexer # If the backend does not support outer indexing, # backend_indexer (OuterIndexer) is also decomposed. backend_indexer1, np_indexer1 = _decompose_outer_indexer( backend_indexer, shape, indexing_support ) np_indexer = _combine_indexers(np_indexer1, shape, np_indexer) return backend_indexer1, np_indexer def _decompose_outer_indexer( indexer: BasicIndexer | OuterIndexer, shape: _Shape, indexing_support: IndexingSupport, ) -> tuple[ExplicitIndexer, ExplicitIndexer]: """ Decompose outer indexer to the successive two indexers, where the first indexer will be used to index backend arrays, while the second one is used to index the loaded on-memory np.ndarray. Parameters ---------- indexer : OuterIndexer or BasicIndexer indexing_support : One of the entries of IndexingSupport Returns ------- backend_indexer: OuterIndexer or BasicIndexer np_indexers: an ExplicitIndexer (OuterIndexer / BasicIndexer) Notes ----- This function is used to realize the vectorized indexing for the backend arrays that only support basic or outer indexing. As an example, let us consider to index a few elements from a backend array with a orthogonal indexer ([0, 3, 1], [2, 3, 2]). Even if the backend array only supports basic indexing, it is more efficient to load a subslice of the array than loading the entire array, >>> array = np.arange(36).reshape(6, 6) >>> backend_indexer = BasicIndexer((slice(0, 3), slice(2, 4))) >>> # load subslice of the array ... array = NumpyIndexingAdapter(array)[backend_indexer] >>> np_indexer = OuterIndexer((np.array([0, 2, 1]), np.array([0, 1, 0]))) >>> # outer indexing for on-memory np.ndarray. ... NumpyIndexingAdapter(array).oindex[np_indexer] array([[ 2, 3, 2], [14, 15, 14], [ 8, 9, 8]]) """ backend_indexer: list[Any] = [] np_indexer: list[Any] = [] assert isinstance(indexer, OuterIndexer | BasicIndexer) if indexing_support == IndexingSupport.VECTORIZED: for k, s in zip(indexer.tuple, shape, strict=False): if isinstance(k, slice): # If it is a slice, then we will slice it as-is # (but make its step positive) in the backend, bk_slice, np_slice = _decompose_slice(k, s) backend_indexer.append(bk_slice) np_indexer.append(np_slice) else: backend_indexer.append(k) if not is_scalar(k): np_indexer.append(slice(None)) return type(indexer)(tuple(backend_indexer)), BasicIndexer(tuple(np_indexer)) # make indexer positive pos_indexer: list[np.ndarray | int | np.number] = [] for k, s in zip(indexer.tuple, shape, strict=False): if isinstance(k, np.ndarray): pos_indexer.append(np.where(k < 0, k + s, k)) elif isinstance(k, integer_types) and k < 0: pos_indexer.append(k + s) else: pos_indexer.append(k) indexer_elems = pos_indexer if indexing_support is IndexingSupport.OUTER_1VECTOR: # some backends such as h5py supports only 1 vector in indexers # We choose the most efficient axis gains = [ ( (np.max(k) - np.min(k) + 1.0) / len(np.unique(k)) if isinstance(k, np.ndarray) else 0 ) for k in indexer_elems ] array_index = np.argmax(np.array(gains)) if len(gains) > 0 else None for i, (k, s) in enumerate(zip(indexer_elems, shape, strict=False)): if isinstance(k, np.ndarray) and i != array_index: # np.ndarray key is converted to slice that covers the entire # entries of this key. backend_indexer.append(slice(np.min(k), np.max(k) + 1)) np_indexer.append(k - np.min(k)) elif isinstance(k, np.ndarray): # Remove duplicates and sort them in the increasing order pkey, ekey = np.unique(k, return_inverse=True) backend_indexer.append(pkey) np_indexer.append(ekey) elif isinstance(k, integer_types): backend_indexer.append(k) else: # slice: convert positive step slice for backend bk_slice, np_slice = _decompose_slice(cast(slice, k), s) backend_indexer.append(bk_slice) np_indexer.append(np_slice) return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer))) if indexing_support == IndexingSupport.OUTER: for k, s in zip(indexer_elems, shape, strict=False): if isinstance(k, slice): # slice: convert positive step slice for backend bk_slice, np_slice = _decompose_slice(k, s) backend_indexer.append(bk_slice) np_indexer.append(np_slice) elif isinstance(k, integer_types): backend_indexer.append(k) elif isinstance(k, np.ndarray) and (np.diff(k) >= 0).all(): backend_indexer.append(k) np_indexer.append(slice(None)) else: # Remove duplicates and sort them in the increasing order oind, vind = np.unique(k, return_inverse=True) backend_indexer.append(oind) np_indexer.append(vind.reshape(*k.shape)) return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer))) # basic indexer assert indexing_support == IndexingSupport.BASIC for k, s in zip(indexer_elems, shape, strict=False): if isinstance(k, np.ndarray): # np.ndarray key is converted to slice that covers the entire # entries of this key. backend_indexer.append(slice(np.min(k), np.max(k) + 1)) np_indexer.append(k - np.min(k)) elif isinstance(k, integer_types): backend_indexer.append(k) else: # slice: convert positive step slice for backend bk_slice, np_slice = _decompose_slice(cast(slice, k), s) backend_indexer.append(bk_slice) np_indexer.append(np_slice) return (BasicIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer))) def _posify_indices(indices: Any, size: int) -> np.ndarray: """Convert negative indices by their equivalent positive indices. Note: the resulting indices may still be out of bounds (< 0 or >= size). """ return np.where(indices < 0, size + indices, indices) def _check_bounds(indices: Any, size: int): """Check if the given indices are all within the array boundaries.""" if np.any((indices < 0) | (indices >= size)): raise IndexError("out of bounds index") def _arrayize_outer_indexer(indexer: OuterIndexer, shape) -> OuterIndexer: """Return a similar oindex with after replacing slices by arrays and negative indices by their corresponding positive indices. Also check if array indices are within bounds. """ new_key = [] for axis, value in enumerate(indexer.tuple): size = shape[axis] if isinstance(value, slice): value = _expand_slice(value, size) else: value = _posify_indices(value, size) _check_bounds(value, size) new_key.append(value) return OuterIndexer(tuple(new_key)) def _arrayize_vectorized_indexer( indexer: VectorizedIndexer, shape: _Shape ) -> VectorizedIndexer: """Return an identical vindex but slices are replaced by arrays""" slices = [v for v in indexer.tuple if isinstance(v, slice)] if len(slices) == 0: return indexer arrays = [v for v in indexer.tuple if isinstance(v, np.ndarray)] n_dim = arrays[0].ndim if len(arrays) > 0 else 0 i_dim = 0 new_key = [] for v, size in zip(indexer.tuple, shape, strict=True): if isinstance(v, np.ndarray): new_key.append(np.reshape(v, v.shape + (1,) * len(slices))) else: # slice shape = (1,) * (n_dim + i_dim) + (-1,) + (1,) * (len(slices) - i_dim - 1) new_key.append(np.arange(*v.indices(size)).reshape(shape)) i_dim += 1 return VectorizedIndexer(tuple(new_key)) def _chunked_array_with_chunks_hint( array, chunks, chunkmanager: ChunkManagerEntrypoint[Any] ): """Create a chunked array using the chunks hint for dimensions of size > 1.""" if len(chunks) < array.ndim: raise ValueError("not enough chunks in hint") new_chunks = [] for chunk, size in zip(chunks, array.shape, strict=False): new_chunks.append(chunk if size > 1 else (1,)) return chunkmanager.from_array(array, new_chunks) # type: ignore[arg-type] def _logical_any(args): return functools.reduce(operator.or_, args) def _masked_result_drop_slice(key, data: duckarray[Any, Any] | None = None): key = (k for k in key if not isinstance(k, slice)) chunks_hint = getattr(data, "chunks", None) new_keys = [] for k in key: if isinstance(k, np.ndarray): if is_chunked_array(data): # type: ignore[arg-type] chunkmanager = get_chunked_array_type(data) new_keys.append( _chunked_array_with_chunks_hint(k, chunks_hint, chunkmanager) ) elif isinstance(data, array_type("sparse")): import sparse new_keys.append(sparse.COO.from_numpy(k)) else: new_keys.append(k) else: new_keys.append(k) mask = _logical_any(k == -1 for k in new_keys) return mask def create_mask( indexer: ExplicitIndexer, shape: _Shape, data: duckarray[Any, Any] | None = None ): """Create a mask for indexing with a fill-value. Parameters ---------- indexer : ExplicitIndexer Indexer with -1 in integer or ndarray value to indicate locations in the result that should be masked. shape : tuple Shape of the array being indexed. data : optional Data for which mask is being created. If data is a dask arrays, its chunks are used as a hint for chunks on the resulting mask. If data is a sparse array, the returned mask is also a sparse array. Returns ------- mask : bool, np.ndarray, SparseArray or dask.array.Array with dtype=bool Same type as data. Has the same shape as the indexing result. """ if isinstance(indexer, OuterIndexer): key = _outer_to_vectorized_indexer(indexer, shape).tuple assert not any(isinstance(k, slice) for k in key) mask = _masked_result_drop_slice(key, data) elif isinstance(indexer, VectorizedIndexer): key = indexer.tuple base_mask = _masked_result_drop_slice(key, data) slice_shape = tuple( np.arange(*k.indices(size)).size for k, size in zip(key, shape, strict=False) if isinstance(k, slice) ) expanded_mask = base_mask[(Ellipsis,) + (np.newaxis,) * len(slice_shape)] mask = duck_array_ops.broadcast_to(expanded_mask, base_mask.shape + slice_shape) elif isinstance(indexer, BasicIndexer): mask = any(k == -1 for k in indexer.tuple) else: raise TypeError(f"unexpected key type: {type(indexer)}") return mask def _posify_mask_subindexer( index: np.ndarray[Any, np.dtype[np.generic]], ) -> np.ndarray[Any, np.dtype[np.generic]]: """Convert masked indices in a flat array to the nearest unmasked index. Parameters ---------- index : np.ndarray One dimensional ndarray with dtype=int. Returns ------- np.ndarray One dimensional ndarray with all values equal to -1 replaced by an adjacent non-masked element. """ masked = index == -1 unmasked_locs = np.flatnonzero(~masked) if not unmasked_locs.size: # indexing unmasked_locs is invalid return np.zeros_like(index) masked_locs = np.flatnonzero(masked) prev_value = np.maximum(0, np.searchsorted(unmasked_locs, masked_locs) - 1) new_index = index.copy() new_index[masked_locs] = index[unmasked_locs[prev_value]] return new_index def posify_mask_indexer(indexer: ExplicitIndexer) -> ExplicitIndexer: """Convert masked values (-1) in an indexer to nearest unmasked values. This routine is useful for dask, where it can be much faster to index adjacent points than arbitrary points from the end of an array. Parameters ---------- indexer : ExplicitIndexer Input indexer. Returns ------- ExplicitIndexer Same type of input, with all values in ndarray keys equal to -1 replaced by an adjacent non-masked element. """ key = tuple( ( _posify_mask_subindexer(k.ravel()).reshape(k.shape) if isinstance(k, np.ndarray) else k ) for k in indexer.tuple ) return type(indexer)(key) def is_fancy_indexer(indexer: Any) -> bool: """Return False if indexer is a int, slice, a 1-dimensional list, or a 0 or 1-dimensional ndarray; in all other cases return True """ if isinstance(indexer, int | slice) and not isinstance(indexer, bool): return False if isinstance(indexer, np.ndarray): return indexer.ndim > 1 if isinstance(indexer, list): return bool(indexer) and not isinstance(indexer[0], int) return True class NumpyIndexingAdapter(IndexingAdapter): """Wrap a NumPy array to use explicit indexing.""" __slots__ = ("array",) def __init__(self, array): # In NumpyIndexingAdapter we only allow to store bare np.ndarray if not isinstance(array, np.ndarray): raise TypeError( "NumpyIndexingAdapter only wraps np.ndarray. " f"Trying to wrap {type(array)}" ) self.array = array def transpose(self, order): return self.array.transpose(order) def _oindex_get(self, indexer: OuterIndexer): key = _outer_to_numpy_indexer(indexer, self.array.shape) return self.array[key] def _vindex_get(self, indexer: VectorizedIndexer): _assert_not_chunked_indexer(indexer.tuple) array = NumpyVIndexAdapter(self.array) return array[indexer.tuple] def __getitem__(self, indexer: ExplicitIndexer): self._check_and_raise_if_non_basic_indexer(indexer) array = self.array # We want 0d slices rather than scalars. This is achieved by # appending an ellipsis (see # https://numpy.org/doc/stable/reference/arrays.indexing.html#detailed-notes). key = indexer.tuple + (Ellipsis,) return array[key] def _safe_setitem(self, array, key: tuple[Any, ...], value: Any) -> None: try: array[key] = value except ValueError as exc: # More informative exception if read-only view if not array.flags.writeable and not array.flags.owndata: raise ValueError( "Assignment destination is a view. " "Do you want to .copy() array first?" ) from exc else: raise exc def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None: key = _outer_to_numpy_indexer(indexer, self.array.shape) self._safe_setitem(self.array, key, value) def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None: array = NumpyVIndexAdapter(self.array) self._safe_setitem(array, indexer.tuple, value) def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None: self._check_and_raise_if_non_basic_indexer(indexer) array = self.array # We want 0d slices rather than scalars. This is achieved by # appending an ellipsis (see # https://numpy.org/doc/stable/reference/arrays.indexing.html#detailed-notes). key = indexer.tuple + (Ellipsis,) self._safe_setitem(array, key, value) class NdArrayLikeIndexingAdapter(NumpyIndexingAdapter): __slots__ = ("array",) def __init__(self, array): if not hasattr(array, "__array_function__"): raise TypeError( "NdArrayLikeIndexingAdapter must wrap an object that " "implements the __array_function__ protocol" ) self.array = array class ArrayApiIndexingAdapter(IndexingAdapter): """Wrap an array API array to use explicit indexing.""" __slots__ = ("array",) def __init__(self, array): if not hasattr(array, "__array_namespace__"): raise TypeError( "ArrayApiIndexingAdapter must wrap an object that " "implements the __array_namespace__ protocol" ) self.array = array def _oindex_get(self, indexer: OuterIndexer): # manual orthogonal indexing (implemented like DaskIndexingAdapter) key = indexer.tuple value = self.array for axis, subkey in reversed(list(enumerate(key))): value = value[(slice(None),) * axis + (subkey, Ellipsis)] return value def _vindex_get(self, indexer: VectorizedIndexer): raise TypeError("Vectorized indexing is not supported") def __getitem__(self, indexer: ExplicitIndexer): self._check_and_raise_if_non_basic_indexer(indexer) return self.array[indexer.tuple] def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None: self.array[indexer.tuple] = value def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None: raise TypeError("Vectorized indexing is not supported") def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None: self._check_and_raise_if_non_basic_indexer(indexer) self.array[indexer.tuple] = value def transpose(self, order): xp = self.array.__array_namespace__() return xp.permute_dims(self.array, order) def _apply_vectorized_indexer_dask_wrapper(indices, coord): from xarray.core.indexing import VectorizedIndexer, apply_indexer, as_indexable return apply_indexer( as_indexable(coord), VectorizedIndexer((indices.squeeze(axis=-1),)) ) def _assert_not_chunked_indexer(idxr: tuple[Any, ...]) -> None: if any(is_chunked_array(i) for i in idxr): raise ValueError( "Cannot index with a chunked array indexer. " "Please chunk the array you are indexing first, " "and drop any indexed dimension coordinate variables. " "Alternatively, call `.compute()` on any chunked arrays in the indexer." ) class DaskIndexingAdapter(IndexingAdapter): """Wrap a dask array to support explicit indexing.""" __slots__ = ("array",) def __init__(self, array): """This adapter is created in Variable.__getitem__ in Variable._broadcast_indexes. """ self.array = array def _oindex_get(self, indexer: OuterIndexer): key = indexer.tuple try: return self.array[key] except NotImplementedError: # manual orthogonal indexing value = self.array for axis, subkey in reversed(list(enumerate(key))): value = value[(slice(None),) * axis + (subkey,)] return value def _vindex_get(self, indexer: VectorizedIndexer): try: return self.array.vindex[indexer.tuple] except IndexError as e: # TODO: upstream to dask has_dask = any(is_duck_dask_array(i) for i in indexer.tuple) # this only works for "small" 1d coordinate arrays with one chunk # it is intended for idxmin, idxmax, and allows indexing with # the nD array output of argmin, argmax if ( not has_dask or len(indexer.tuple) > 1 or math.prod(self.array.numblocks) > 1 or self.array.ndim > 1 ): raise e (idxr,) = indexer.tuple if idxr.ndim == 0: return self.array[idxr.data] else: import dask.array return dask.array.map_blocks( _apply_vectorized_indexer_dask_wrapper, idxr[..., np.newaxis], self.array, chunks=idxr.chunks, drop_axis=-1, dtype=self.array.dtype, ) def __getitem__(self, indexer: ExplicitIndexer): self._check_and_raise_if_non_basic_indexer(indexer) return self.array[indexer.tuple] def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None: num_non_slices = sum(0 if isinstance(k, slice) else 1 for k in indexer.tuple) if num_non_slices > 1: raise NotImplementedError( "xarray can't set arrays with multiple array indices to dask yet." ) self.array[indexer.tuple] = value def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None: self.array.vindex[indexer.tuple] = value def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None: self._check_and_raise_if_non_basic_indexer(indexer) self.array[indexer.tuple] = value def transpose(self, order): return self.array.transpose(order) class PandasIndexingAdapter(IndexingAdapter): """Wrap a pandas.Index to preserve dtypes and handle explicit indexing.""" __slots__ = ("_dtype", "array") array: pd.Index _dtype: np.dtype | pd.api.extensions.ExtensionDtype def __init__( self, array: pd.Index, dtype: DTypeLike | pd.api.extensions.ExtensionDtype | None = None, ): from xarray.core.indexes import safe_cast_to_index self.array = safe_cast_to_index(array) if dtype is None: if is_allowed_extension_array(array): cast(pd.api.extensions.ExtensionDtype, array.dtype) self._dtype = array.dtype else: self._dtype = get_valid_numpy_dtype(array) elif is_allowed_extension_array_dtype(dtype): self._dtype = cast(pd.api.extensions.ExtensionDtype, dtype) else: self._dtype = np.dtype(cast(DTypeLike, dtype)) @property def _in_memory(self) -> bool: # prevent costly conversion of a memory-saving pd.RangeIndex into a # large numpy array. return not isinstance(self.array, pd.RangeIndex) @property def dtype(self) -> np.dtype | pd.api.extensions.ExtensionDtype: # type: ignore[override] return self._dtype def _get_numpy_dtype(self, dtype: np.typing.DTypeLike | None = None) -> np.dtype: if dtype is None: if is_valid_numpy_dtype(self.dtype): return cast(np.dtype, self.dtype) else: return get_valid_numpy_dtype(self.array) else: return np.dtype(dtype) def __array__( self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None, ) -> np.ndarray: dtype = self._get_numpy_dtype(dtype) array = self.array if isinstance(array, pd.PeriodIndex): with suppress(AttributeError): # this might not be public API array = array.astype("object") if Version(np.__version__) >= Version("2.0.0"): return np.asarray(array.values, dtype=dtype, copy=copy) else: return np.asarray(array.values, dtype=dtype) def get_duck_array(self) -> np.ndarray | PandasExtensionArray: # We return an PandasExtensionArray wrapper type that satisfies # duck array protocols. # `NumpyExtensionArray` is excluded if is_allowed_extension_array(self.array): from xarray.core.extension_array import PandasExtensionArray return PandasExtensionArray(self.array.array) return np.asarray(self) @property def shape(self) -> _Shape: return (len(self.array),) def _convert_scalar(self, item) -> np.ndarray: if item is pd.NaT: # work around the impossibility of casting NaT with asarray # note: it probably would be better in general to return # pd.Timestamp rather np.than datetime64 but this is easier # (for now) item = np.datetime64("NaT", "ns") elif isinstance(item, pd.Timedelta): item = item.to_numpy() elif isinstance(item, timedelta): item = np.timedelta64(item) elif isinstance(item, pd.Timestamp): # Work around for GH: pydata/xarray#1932 and numpy/numpy#10668 # numpy fails to convert pd.Timestamp to np.datetime64[ns] item = np.asarray(item.to_datetime64()) elif self.dtype != object: dtype = self._get_numpy_dtype() item = np.asarray(item, dtype=dtype) # as for numpy.ndarray indexing, we always want the result to be # a NumPy array. return to_0d_array(item) def _index_get( self, indexer: ExplicitIndexer, func_name: str ) -> PandasIndexingAdapter | np.ndarray: key = indexer.tuple if len(key) == 1: # unpack key so it can index a pandas.Index object (pandas.Index # objects don't like tuples) (key,) = key # if multidimensional key, convert the index to numpy array and index the latter if getattr(key, "ndim", 0) > 1: indexable = NumpyIndexingAdapter(np.asarray(self)) return getattr(indexable, func_name)(indexer) # otherwise index the pandas index then re-wrap or convert the result result = self.array[key] if isinstance(result, pd.Index): return type(self)(result, dtype=self.dtype) else: return self._convert_scalar(result) def _oindex_get(self, indexer: OuterIndexer) -> PandasIndexingAdapter | np.ndarray: return self._index_get(indexer, "_oindex_get") def _vindex_get( self, indexer: VectorizedIndexer ) -> PandasIndexingAdapter | np.ndarray: _assert_not_chunked_indexer(indexer.tuple) return self._index_get(indexer, "_vindex_get") def __getitem__( self, indexer: ExplicitIndexer ) -> PandasIndexingAdapter | np.ndarray: return self._index_get(indexer, "__getitem__") def transpose(self, order) -> pd.Index: return self.array # self.array should be always one-dimensional def _repr_inline_(self, max_width: int) -> str: # we want to display values in the inline repr for lazy coordinates too # (pd.RangeIndex and pd.MultiIndex). `format_array_flat` prevents loading # the whole array in memory. from xarray.core.formatting import format_array_flat return format_array_flat(self, max_width) def __repr__(self) -> str: return f"{type(self).__name__}(array={self.array!r}, dtype={self.dtype!r})" def copy(self, deep: bool = True) -> Self: # Not the same as just writing `self.array.copy(deep=deep)`, as # shallow copies of the underlying numpy.ndarrays become deep ones # upon pickling # >>> len(pickle.dumps((self.array, self.array))) # 4000281 # >>> len(pickle.dumps((self.array, self.array.copy(deep=False)))) # 8000341 array = self.array.copy(deep=True) if deep else self.array return type(self)(array, self._dtype) @property def nbytes(self) -> int: if is_allowed_extension_array(self.array): return self.array.nbytes dtype = self._get_numpy_dtype() return dtype.itemsize * len(self.array) class PandasMultiIndexingAdapter(PandasIndexingAdapter): """Handles explicit indexing for a pandas.MultiIndex. This allows creating one instance for each multi-index level while preserving indexing efficiency (memoized + might reuse another instance with the same multi-index). """ __slots__ = ("_dtype", "adapter", "array", "level") array: pd.MultiIndex _dtype: np.dtype | pd.api.extensions.ExtensionDtype level: str | None def __init__( self, array: pd.MultiIndex, dtype: DTypeLike | pd.api.extensions.ExtensionDtype | None = None, level: str | None = None, ): super().__init__(array, dtype) self.level = level def __array__( self, dtype: DTypeLike | None = None, /, *, copy: bool | None = None, ) -> np.ndarray: dtype = self._get_numpy_dtype(dtype) if self.level is not None: return np.asarray( self.array.get_level_values(self.level).values, dtype=dtype ) else: return super().__array__(dtype, copy=copy) @property def _in_memory(self) -> bool: # The pd.MultiIndex's data is fully in memory, but it has a different # layout than the level and dimension coordinate arrays. Marking this # adapter class as a "lazy" array will prevent costly conversion when, # e.g., formatting the Xarray reprs. return False def _convert_scalar(self, item: Any): if isinstance(item, tuple) and self.level is not None: idx = tuple(self.array.names).index(self.level) item = item[idx] return super()._convert_scalar(item) def _index_get( self, indexer: ExplicitIndexer, func_name: str ) -> PandasIndexingAdapter | np.ndarray: result = super()._index_get(indexer, func_name) if isinstance(result, type(self)): result.level = self.level return result def __repr__(self) -> str: if self.level is None: return super().__repr__() else: props = ( f"(array={self.array!r}, level={self.level!r}, dtype={self.dtype!r})" ) return f"{type(self).__name__}{props}" def _repr_inline_(self, max_width: int) -> str: if self.level is None: return "MultiIndex" else: return super()._repr_inline_(max_width=max_width) def copy(self, deep: bool = True) -> Self: # see PandasIndexingAdapter.copy array = self.array.copy(deep=True) if deep else self.array return type(self)(array, self._dtype, self.level) class CoordinateTransformIndexingAdapter(IndexingAdapter): """Wrap a CoordinateTransform as a lazy coordinate array. Supports explicit indexing (both outer and vectorized). """ _transform: CoordinateTransform _coord_name: Hashable _dims: tuple[str, ...] def __init__( self, transform: CoordinateTransform, coord_name: Hashable, dims: tuple[str, ...] | None = None, ): self._transform = transform self._coord_name = coord_name self._dims = dims or transform.dims @property def dtype(self) -> np.dtype: return self._transform.dtype @property def shape(self) -> tuple[int, ...]: return tuple(self._transform.dim_size.values()) @property def _in_memory(self) -> bool: return False def get_duck_array(self) -> np.ndarray: all_coords = self._transform.generate_coords(dims=self._dims) return np.asarray(all_coords[self._coord_name]) def _oindex_get(self, indexer: OuterIndexer): expanded_indexer_ = OuterIndexer(expanded_indexer(indexer.tuple, self.ndim)) array_indexer = _arrayize_outer_indexer(expanded_indexer_, self.shape) positions = np.meshgrid(*array_indexer.tuple, indexing="ij") dim_positions = dict(zip(self._dims, positions, strict=False)) result = self._transform.forward(dim_positions) return np.asarray(result[self._coord_name]).squeeze() def _oindex_set(self, indexer: OuterIndexer, value: Any) -> None: raise TypeError( "setting values is not supported on coordinate transform arrays." ) def _vindex_get(self, indexer: VectorizedIndexer): expanded_indexer_ = VectorizedIndexer( expanded_indexer(indexer.tuple, self.ndim) ) array_indexer = _arrayize_vectorized_indexer(expanded_indexer_, self.shape) dim_positions = {} for i, (dim, pos) in enumerate( zip(self._dims, array_indexer.tuple, strict=False) ): pos = _posify_indices(pos, self.shape[i]) _check_bounds(pos, self.shape[i]) dim_positions[dim] = pos result = self._transform.forward(dim_positions) return np.asarray(result[self._coord_name]) def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None: raise TypeError( "setting values is not supported on coordinate transform arrays." ) def __getitem__(self, indexer: ExplicitIndexer): # TODO: make it lazy (i.e., re-calculate and re-wrap the transform) when possible? self._check_and_raise_if_non_basic_indexer(indexer) # also works with basic indexing return self._oindex_get(OuterIndexer(indexer.tuple)) def __setitem__(self, indexer: ExplicitIndexer, value: Any) -> None: raise TypeError( "setting values is not supported on coordinate transform arrays." ) def transpose(self, order: Iterable[int]) -> Self: new_dims = tuple(self._dims[i] for i in order) return type(self)(self._transform, self._coord_name, new_dims) def __repr__(self: Any) -> str: return f"{type(self).__name__}(transform={self._transform!r})" def _repr_inline_(self, max_width: int) -> str: # we want to display values in the inline repr for this lazy coordinate # `format_array_flat` prevents loading the whole array in memory. from xarray.core.formatting import format_array_flat return format_array_flat(self, max_width) xarray-2025.09.0/xarray/core/missing.py000066400000000000000000000673611505620616400176420ustar00rootroot00000000000000from __future__ import annotations import datetime as dt import itertools import warnings from collections import ChainMap from collections.abc import Callable, Generator, Hashable, Sequence from functools import partial from numbers import Number from typing import TYPE_CHECKING, Any, TypeVar, get_args import numpy as np import pandas as pd from xarray.computation.apply_ufunc import apply_ufunc from xarray.core import utils from xarray.core.common import _contains_datetime_like_objects, ones_like from xarray.core.duck_array_ops import ( datetime_to_numeric, push, ravel, reshape, stack, timedelta_to_numeric, transpose, ) from xarray.core.options import _get_keep_attrs from xarray.core.types import Interp1dOptions, InterpnOptions, InterpOptions from xarray.core.utils import OrderedSet, is_scalar from xarray.core.variable import ( Variable, broadcast_variables, ) from xarray.namedarray.pycompat import is_chunked_array if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset InterpCallable = Callable[..., np.ndarray] # interpn Interpolator = Callable[..., Callable[..., np.ndarray]] # *Interpolator # interpolator objects return callables that can be evaluated SourceDest = dict[Hashable, tuple[Variable, Variable]] T = TypeVar("T") def _get_nan_block_lengths( obj: Dataset | DataArray | Variable, dim: Hashable, index: Variable ): """ Return an object where each NaN element in 'obj' is replaced by the length of the gap the element is in. """ # make variable so that we get broadcasting for free index = Variable([dim], index) # algorithm from https://github.com/pydata/xarray/pull/3302#discussion_r324707072 arange = ones_like(obj) * index valid = obj.notnull() valid_arange = arange.where(valid) cumulative_nans = valid_arange.ffill(dim=dim).fillna(index[0]) nan_block_lengths = ( cumulative_nans.diff(dim=dim, label="upper") .reindex({dim: obj[dim]}) .where(valid) .bfill(dim=dim) .where(~valid, 0) .fillna(index[-1] - valid_arange.max(dim=[dim])) ) return nan_block_lengths class BaseInterpolator: """Generic interpolator class for normalizing interpolation methods""" cons_kwargs: dict[str, Any] call_kwargs: dict[str, Any] f: Callable method: str def __call__(self, x): return self.f(x, **self.call_kwargs) def __repr__(self): return f"{self.__class__.__name__}: method={self.method}" class NumpyInterpolator(BaseInterpolator): """One-dimensional linear interpolation. See Also -------- numpy.interp """ def __init__(self, xi, yi, method="linear", fill_value=None, period=None): if method != "linear": raise ValueError("only method `linear` is valid for the NumpyInterpolator") self.method = method self.f = np.interp self.cons_kwargs = {} self.call_kwargs = {"period": period} self._xi = xi self._yi = yi nan = np.nan if yi.dtype.kind != "c" else np.nan + np.nan * 1j if fill_value is None: self._left = nan self._right = nan elif isinstance(fill_value, Sequence) and len(fill_value) == 2: self._left = fill_value[0] self._right = fill_value[1] elif is_scalar(fill_value): self._left = fill_value self._right = fill_value else: raise ValueError(f"{fill_value} is not a valid fill_value") def __call__(self, x): return self.f( x, self._xi, self._yi, left=self._left, right=self._right, **self.call_kwargs, ) class ScipyInterpolator(BaseInterpolator): """Interpolate a 1-D function using Scipy interp1d See Also -------- scipy.interpolate.interp1d """ def __init__( self, xi, yi, method=None, fill_value=None, assume_sorted=True, copy=False, bounds_error=False, order=None, axis=-1, **kwargs, ): from scipy.interpolate import interp1d if method is None: raise ValueError( "method is a required argument, please supply a " "valid scipy.inter1d method (kind)" ) if method == "polynomial": if order is None: raise ValueError("order is required when method=polynomial") method = order if method == "quintic": method = 5 self.method = method self.cons_kwargs = kwargs self.call_kwargs = {} nan = np.nan if yi.dtype.kind != "c" else np.nan + np.nan * 1j if fill_value is None and method == "linear": fill_value = nan, nan elif fill_value is None: fill_value = nan self.f = interp1d( xi, yi, kind=self.method, fill_value=fill_value, bounds_error=bounds_error, assume_sorted=assume_sorted, copy=copy, axis=axis, **self.cons_kwargs, ) class SplineInterpolator(BaseInterpolator): """One-dimensional smoothing spline fit to a given set of data points. See Also -------- scipy.interpolate.UnivariateSpline """ def __init__( self, xi, yi, method="spline", fill_value=None, order=3, nu=0, ext=None, **kwargs, ): from scipy.interpolate import UnivariateSpline if method != "spline": raise ValueError("only method `spline` is valid for the SplineInterpolator") self.method = method self.cons_kwargs = kwargs self.call_kwargs = {"nu": nu, "ext": ext} if fill_value is not None: raise ValueError("SplineInterpolator does not support fill_value") self.f = UnivariateSpline(xi, yi, k=order, **self.cons_kwargs) def _apply_over_vars_with_dim(func, self, dim=None, **kwargs): """Wrapper for datasets""" ds = type(self)(coords=self.coords, attrs=self.attrs) for name, var in self.data_vars.items(): if dim in var.dims: ds[name] = func(var, dim=dim, **kwargs) else: ds[name] = var return ds def get_clean_interp_index( arr, dim: Hashable, use_coordinate: Hashable | bool = True, strict: bool = True ): """Return index to use for x values in interpolation or curve fitting. Parameters ---------- arr : DataArray Array to interpolate or fit to a curve. dim : str Name of dimension along which to fit. use_coordinate : str or bool If use_coordinate is True, the coordinate that shares the name of the dimension along which interpolation is being performed will be used as the x values. If False, the x values are set as an equally spaced sequence. strict : bool Whether to raise errors if the index is either non-unique or non-monotonic (default). Returns ------- Variable Numerical values for the x-coordinates. Notes ----- If indexing is along the time dimension, datetime coordinates are converted to time deltas with respect to 1970-01-01. """ # Question: If use_coordinate is a string, what role does `dim` play? from xarray.coding.cftimeindex import CFTimeIndex if use_coordinate is False: axis = arr.get_axis_num(dim) return np.arange(arr.shape[axis], dtype=np.float64) if use_coordinate is True: index = arr.get_index(dim) else: # string index = arr.coords[use_coordinate] if index.ndim != 1: raise ValueError( f"Coordinates used for interpolation must be 1D, " f"{use_coordinate} is {index.ndim}D." ) index = index.to_index() # TODO: index.name is None for multiindexes # set name for nice error messages below if isinstance(index, pd.MultiIndex): index.name = dim if strict: if not index.is_monotonic_increasing: raise ValueError(f"Index {index.name!r} must be monotonically increasing") if not index.is_unique: raise ValueError(f"Index {index.name!r} has duplicate values") # Special case for non-standard calendar indexes # Numerical datetime values are defined with respect to 1970-01-01T00:00:00 in units of nanoseconds if isinstance(index, CFTimeIndex | pd.DatetimeIndex): offset = type(index[0])(1970, 1, 1) if isinstance(index, CFTimeIndex): index = index.values index = Variable( data=datetime_to_numeric(index, offset=offset, datetime_unit="ns"), dims=(dim,), ) # raise if index cannot be cast to a float (e.g. MultiIndex) try: index = index.values.astype(np.float64) except (TypeError, ValueError) as err: # pandas raises a TypeError # xarray/numpy raise a ValueError raise TypeError( f"Index {index.name!r} must be castable to float64 to support " f"interpolation or curve fitting, got {type(index).__name__}." ) from err return index def interp_na( self, dim: Hashable | None = None, use_coordinate: bool | str = True, method: InterpOptions = "linear", limit: int | None = None, max_gap: ( int | float | str | pd.Timedelta | np.timedelta64 | dt.timedelta | None ) = None, keep_attrs: bool | None = None, **kwargs, ): """Interpolate values according to different methods.""" from xarray.coding.cftimeindex import CFTimeIndex if dim is None: raise NotImplementedError("dim is a required argument") if limit is not None: valids = _get_valid_fill_mask(self, dim, limit) if max_gap is not None: max_type = type(max_gap).__name__ if not is_scalar(max_gap): raise ValueError("max_gap must be a scalar.") if ( dim in self._indexes and isinstance( self._indexes[dim].to_pandas_index(), pd.DatetimeIndex | CFTimeIndex ) and use_coordinate ): # Convert to float max_gap = timedelta_to_numeric(max_gap) if not use_coordinate and not isinstance(max_gap, Number | np.number): raise TypeError( f"Expected integer or floating point max_gap since use_coordinate=False. Received {max_type}." ) # method index = get_clean_interp_index(self, dim, use_coordinate=use_coordinate) interp_class, kwargs = _get_interpolator(method, **kwargs) interpolator = partial(func_interpolate_na, interp_class, **kwargs) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "overflow", RuntimeWarning) warnings.filterwarnings("ignore", "invalid value", RuntimeWarning) arr = apply_ufunc( interpolator, self, index, input_core_dims=[[dim], [dim]], output_core_dims=[[dim]], output_dtypes=[self.dtype], dask="parallelized", vectorize=True, keep_attrs=keep_attrs, ).transpose(*self.dims) if limit is not None: arr = arr.where(valids) if max_gap is not None: if dim not in self.coords: raise NotImplementedError( "max_gap not implemented for unlabeled coordinates yet." ) nan_block_lengths = _get_nan_block_lengths(self, dim, index) arr = arr.where(nan_block_lengths <= max_gap) return arr def func_interpolate_na(interpolator, y, x, **kwargs): """helper function to apply interpolation along 1 dimension""" # reversed arguments are so that attrs are preserved from da, not index # it would be nice if this wasn't necessary, works around: # "ValueError: assignment destination is read-only" in assignment below out = y.copy() nans = pd.isnull(y) nonans = ~nans # fast track for no-nans, all nan but one, and all-nans cases n_nans = nans.sum() if n_nans == 0 or n_nans >= len(y) - 1: return y f = interpolator(x[nonans], y[nonans], **kwargs) out[nans] = f(x[nans]) return out def _bfill(arr, n=None, axis=-1): """inverse of ffill""" arr = np.flip(arr, axis=axis) # fill arr = push(arr, axis=axis, n=n) # reverse back to original return np.flip(arr, axis=axis) def ffill(arr, dim=None, limit=None): """forward fill missing values""" axis = arr.get_axis_num(dim) # work around for bottleneck 178 _limit = limit if limit is not None else arr.shape[axis] return apply_ufunc( push, arr, dask="allowed", keep_attrs=True, output_dtypes=[arr.dtype], kwargs=dict(n=_limit, axis=axis), ).transpose(*arr.dims) def bfill(arr, dim=None, limit=None): """backfill missing values""" axis = arr.get_axis_num(dim) # work around for bottleneck 178 _limit = limit if limit is not None else arr.shape[axis] return apply_ufunc( _bfill, arr, dask="allowed", keep_attrs=True, output_dtypes=[arr.dtype], kwargs=dict(n=_limit, axis=axis), ).transpose(*arr.dims) def _import_interpolant(interpolant, method): """Import interpolant from scipy.interpolate.""" try: from scipy import interpolate return getattr(interpolate, interpolant) except ImportError as e: raise ImportError(f"Interpolation with method {method} requires scipy.") from e def _get_interpolator( method: InterpOptions, vectorizeable_only: bool = False, **kwargs ): """helper function to select the appropriate interpolator class returns interpolator class and keyword arguments for the class """ interp_class: Interpolator interp1d_methods = get_args(Interp1dOptions) valid_methods = tuple(vv for v in get_args(InterpOptions) for vv in get_args(v)) # prefer numpy.interp for 1d linear interpolation. This function cannot # take higher dimensional data but scipy.interp1d can. if ( method == "linear" and kwargs.get("fill_value") != "extrapolate" and not vectorizeable_only ): kwargs.update(method=method) interp_class = NumpyInterpolator elif method in valid_methods: if method in interp1d_methods: kwargs.update(method=method) interp_class = ScipyInterpolator elif method == "barycentric": kwargs.update(axis=-1) interp_class = _import_interpolant("BarycentricInterpolator", method) elif method in ["krogh", "krog"]: kwargs.update(axis=-1) interp_class = _import_interpolant("KroghInterpolator", method) elif method == "pchip": kwargs.update(axis=-1) # pchip default behavior is to extrapolate kwargs.setdefault("extrapolate", False) interp_class = _import_interpolant("PchipInterpolator", method) elif method == "spline": utils.emit_user_level_warning( "The 1d SplineInterpolator class is performing an incorrect calculation and " "is being deprecated. Please use `method=polynomial` for 1D Spline Interpolation.", PendingDeprecationWarning, ) if vectorizeable_only: raise ValueError(f"{method} is not a vectorizeable interpolator. ") kwargs.update(method=method) interp_class = SplineInterpolator elif method == "akima": kwargs.update(axis=-1) interp_class = _import_interpolant("Akima1DInterpolator", method) elif method == "makima": kwargs.update(method="makima", axis=-1) interp_class = _import_interpolant("Akima1DInterpolator", method) else: raise ValueError(f"{method} is not a valid scipy interpolator") else: raise ValueError(f"{method} is not a valid interpolator") return interp_class, kwargs def _get_interpolator_nd(method, **kwargs): """helper function to select the appropriate interpolator class returns interpolator class and keyword arguments for the class """ valid_methods = tuple(get_args(InterpnOptions)) if method in valid_methods: kwargs.update(method=method) kwargs.setdefault("bounds_error", False) interp_class = _import_interpolant("interpn", method) else: raise ValueError( f"{method} is not a valid interpolator for interpolating " "over multiple dimensions." ) return interp_class, kwargs def _get_valid_fill_mask(arr, dim, limit): """helper function to determine values that can be filled when limit is not None""" kw = {dim: limit + 1} # we explicitly use construct method to avoid copy. new_dim = utils.get_temp_dimname(arr.dims, "_window") return ( arr.isnull() .rolling(min_periods=1, **kw) .construct(new_dim, fill_value=False) .sum(new_dim, skipna=False) ) <= limit def _localize(obj: T, indexes_coords: SourceDest) -> tuple[T, SourceDest]: """Speed up for linear and nearest neighbor method. Only consider a subspace that is needed for the interpolation """ indexes = {} for dim, [x, new_x] in indexes_coords.items(): if is_chunked_array(new_x._data): continue new_x_loaded = new_x.data minval = np.nanmin(new_x_loaded) maxval = np.nanmax(new_x_loaded) index = x.to_index() imin, imax = index.get_indexer([minval, maxval], method="nearest") indexes[dim] = slice(max(imin - 2, 0), imax + 2) indexes_coords[dim] = (x[indexes[dim]], new_x) return obj.isel(indexes), indexes_coords # type: ignore[attr-defined] def _floatize_x( x: list[Variable], new_x: list[Variable] ) -> tuple[list[Variable], list[Variable]]: """Make x and new_x float. This is particularly useful for datetime dtype. """ for i in range(len(x)): if _contains_datetime_like_objects(x[i]): # Scipy casts coordinates to np.float64, which is not accurate # enough for datetime64 (uses 64bit integer). # We assume that the most of the bits are used to represent the # offset (min(x)) and the variation (x - min(x)) can be # represented by float. xmin = x[i].values.min() x[i] = x[i]._to_numeric(offset=xmin, dtype=np.float64) new_x[i] = new_x[i]._to_numeric(offset=xmin, dtype=np.float64) return x, new_x def interp( var: Variable, indexes_coords: SourceDest, method: InterpOptions, **kwargs, ) -> Variable: """Make an interpolation of Variable Parameters ---------- var : Variable indexes_coords Mapping from dimension name to a pair of original and new coordinates. Original coordinates should be sorted in strictly ascending order. Note that all the coordinates should be Variable objects. method : string One of {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}. For multidimensional interpolation, only {'linear', 'nearest'} can be used. **kwargs keyword arguments to be passed to scipy.interpolate Returns ------- Interpolated Variable See Also -------- DataArray.interp Dataset.interp """ if not indexes_coords: return var.copy() result = var if method in ["linear", "nearest", "slinear"]: # decompose the interpolation into a succession of independent interpolation. iter_indexes_coords = decompose_interp(indexes_coords) else: iter_indexes_coords = (_ for _ in [indexes_coords]) for indep_indexes_coords in iter_indexes_coords: var = result # target dimensions dims = list(indep_indexes_coords) # transpose to make the interpolated axis to the last position broadcast_dims = [d for d in var.dims if d not in dims] original_dims = broadcast_dims + dims result = interpolate_variable( var.transpose(*original_dims), {k: indep_indexes_coords[k] for k in dims}, method=method, kwargs=kwargs, ) # dimension of the output array out_dims: OrderedSet = OrderedSet() for d in var.dims: if d in dims: out_dims.update(indep_indexes_coords[d][1].dims) else: out_dims.add(d) if len(out_dims) > 1: result = result.transpose(*out_dims) return result def interpolate_variable( var: Variable, indexes_coords: SourceDest, *, method: InterpOptions, kwargs: dict[str, Any], ) -> Variable: """core routine that returns the interpolated variable.""" if not indexes_coords: return var.copy() if len(indexes_coords) == 1: func, kwargs = _get_interpolator(method, vectorizeable_only=True, **kwargs) else: func, kwargs = _get_interpolator_nd(method, **kwargs) in_coords, result_coords = zip(*(v for v in indexes_coords.values()), strict=True) # input coordinates along which we are interpolation are core dimensions # the corresponding output coordinates may or may not have the same name, # so `all_in_core_dims` is also `exclude_dims` all_in_core_dims = set(indexes_coords) result_dims = OrderedSet(itertools.chain(*(_.dims for _ in result_coords))) result_sizes = ChainMap(*(_.sizes for _ in result_coords)) # any dimensions on the output that are present on the input, but are not being # interpolated along are dimensions along which we automatically vectorize. # Consider the problem in https://github.com/pydata/xarray/issues/6799#issuecomment-2474126217 # In the following, dimension names are listed out in []. # # da[time, q, lat, lon].interp(q=bar[lat,lon]). Here `lat`, `lon` # are input dimensions, present on the output, but are not the coordinates # we are explicitly interpolating. These are the dimensions along which we vectorize. # `q` is the only input core dimensions, and changes size (disappears) # so it is in exclude_dims. vectorize_dims = (result_dims - all_in_core_dims) & set(var.dims) # remove any output broadcast dimensions from the list of core dimensions output_core_dims = tuple(d for d in result_dims if d not in vectorize_dims) input_core_dims = ( # all coordinates on the input that we interpolate along [tuple(indexes_coords)] # the input coordinates are always 1D at the moment, so we just need to list out their names + [tuple(_.dims) for _ in in_coords] # The last set of inputs are the coordinates we are interpolating to. + [ tuple(d for d in coord.dims if d not in vectorize_dims) for coord in result_coords ] ) output_sizes = {k: result_sizes[k] for k in output_core_dims} # scipy.interpolate.interp1d always forces to float. dtype = float if not issubclass(var.dtype.type, np.inexact) else var.dtype result = apply_ufunc( _interpnd, var, *in_coords, *result_coords, input_core_dims=input_core_dims, output_core_dims=[output_core_dims], exclude_dims=all_in_core_dims, dask="parallelized", kwargs=dict( interp_func=func, interp_kwargs=kwargs, # we leave broadcasting up to dask if possible # but we need broadcasted values in _interpnd, so propagate that # context (dimension names), and broadcast there # This would be unnecessary if we could tell apply_ufunc # to insert size-1 broadcast dimensions result_coord_core_dims=input_core_dims[-len(result_coords) :], ), # TODO: deprecate and have the user rechunk themselves dask_gufunc_kwargs=dict(output_sizes=output_sizes, allow_rechunk=True), output_dtypes=[dtype], vectorize=bool(vectorize_dims), keep_attrs=True, ) return result def _interp1d( var: Variable, x_: list[Variable], new_x_: list[Variable], func: Interpolator, kwargs, ) -> np.ndarray: """Core 1D array interpolation routine.""" # x, new_x are tuples of size 1. x, new_x = x_[0], new_x_[0] rslt = func(x.data, var, **kwargs)(ravel(new_x.data)) if new_x.ndim > 1: return reshape(rslt.data, (var.shape[:-1] + new_x.shape)) if new_x.ndim == 0: return rslt[..., -1] return rslt def _interpnd( data: np.ndarray, *coords: np.ndarray, interp_func: Interpolator | InterpCallable, interp_kwargs, result_coord_core_dims: list[tuple[Hashable, ...]], ) -> np.ndarray: """ Core nD array interpolation routine. The first half arrays in `coords` are original coordinates, the other half are destination coordinates. """ n_x = len(coords) // 2 ndim = data.ndim nconst = ndim - n_x # Convert everything to Variables, since that makes applying # `_localize` and `_floatize_x` much easier x = [ Variable([f"dim_{nconst + dim}"], _x, fastpath=True) for dim, _x in enumerate(coords[:n_x]) ] new_x = list( broadcast_variables( *( Variable(dims, _x, fastpath=True) for dims, _x in zip(result_coord_core_dims, coords[n_x:], strict=True) ) ) ) var = Variable([f"dim_{dim}" for dim in range(ndim)], data, fastpath=True) if interp_kwargs.get("method") in ["linear", "nearest"]: indexes_coords = { _x.dims[0]: (_x, _new_x) for _x, _new_x in zip(x, new_x, strict=True) } # simple speed up for the local interpolation var, indexes_coords = _localize(var, indexes_coords) x, new_x = tuple( list(_) for _ in zip(*(indexes_coords[d] for d in indexes_coords), strict=True) ) x_list, new_x_list = _floatize_x(x, new_x) if len(x) == 1: # TODO: narrow interp_func to interpolator here return _interp1d(var, x_list, new_x_list, interp_func, interp_kwargs) # type: ignore[arg-type] # move the interpolation axes to the start position data = transpose(var._data, range(-len(x), var.ndim - len(x))) # stack new_x to 1 vector, with reshape xi = stack([ravel(x1.data) for x1 in new_x_list], axis=-1) rslt: np.ndarray = interp_func(x_list, data, xi, **interp_kwargs) # type: ignore[assignment] # move back the interpolation axes to the last position rslt = transpose(rslt, range(-rslt.ndim + 1, 1)) return reshape(rslt, rslt.shape[:-1] + new_x[0].shape) def decompose_interp(indexes_coords: SourceDest) -> Generator[SourceDest, None]: """Decompose the interpolation into a succession of independent interpolation keeping the order""" dest_dims = [ dest[1].dims if dest[1].ndim > 0 else (dim,) for dim, dest in indexes_coords.items() ] partial_dest_dims: list[tuple[Hashable, ...]] = [] partial_indexes_coords: SourceDest = {} for i, index_coords in enumerate(indexes_coords.items()): partial_indexes_coords.update([index_coords]) if i == len(dest_dims) - 1: break partial_dest_dims += [dest_dims[i]] other_dims = dest_dims[i + 1 :] s_partial_dest_dims = {dim for dims in partial_dest_dims for dim in dims} s_other_dims = {dim for dims in other_dims for dim in dims} if not s_partial_dest_dims.intersection(s_other_dims): # this interpolation is orthogonal to the rest yield partial_indexes_coords partial_dest_dims = [] partial_indexes_coords = {} yield partial_indexes_coords xarray-2025.09.0/xarray/core/nputils.py000066400000000000000000000255411505620616400176610ustar00rootroot00000000000000from __future__ import annotations import warnings from collections.abc import Callable import numpy as np import pandas as pd from packaging.version import Version from xarray.compat.array_api_compat import get_array_namespace from xarray.core.utils import is_duck_array, module_available from xarray.namedarray import pycompat # remove once numpy 2.0 is the oldest supported version if module_available("numpy", minversion="2.0.0.dev0"): from numpy.lib.array_utils import ( # type: ignore[import-not-found,unused-ignore] normalize_axis_index, ) else: from numpy.core.multiarray import ( # type: ignore[attr-defined,no-redef,unused-ignore] normalize_axis_index, ) # remove once numpy 2.0 is the oldest supported version try: from numpy.exceptions import RankWarning # type: ignore[attr-defined,unused-ignore] except ImportError: from numpy import RankWarning # type: ignore[attr-defined,no-redef,unused-ignore] from xarray.core.options import OPTIONS try: import bottleneck as bn _BOTTLENECK_AVAILABLE = True except ImportError: # use numpy methods instead bn = np _BOTTLENECK_AVAILABLE = False def _select_along_axis(values, idx, axis): other_ind = np.ix_(*[np.arange(s) for s in idx.shape]) sl = other_ind[:axis] + (idx,) + other_ind[axis:] return values[sl] def nanfirst(values, axis, keepdims=False): if isinstance(axis, tuple): (axis,) = axis axis = normalize_axis_index(axis, values.ndim) idx_first = np.argmax(~pd.isnull(values), axis=axis) result = _select_along_axis(values, idx_first, axis) if keepdims: return np.expand_dims(result, axis=axis) else: return result def nanlast(values, axis, keepdims=False): if isinstance(axis, tuple): (axis,) = axis axis = normalize_axis_index(axis, values.ndim) rev = (slice(None),) * axis + (slice(None, None, -1),) idx_last = -1 - np.argmax(~pd.isnull(values)[rev], axis=axis) result = _select_along_axis(values, idx_last, axis) if keepdims: return np.expand_dims(result, axis=axis) else: return result def inverse_permutation(indices: np.ndarray, N: int | None = None) -> np.ndarray: """Return indices for an inverse permutation. Parameters ---------- indices : 1D np.ndarray with dtype=int Integer positions to assign elements to. N : int, optional Size of the array Returns ------- inverse_permutation : 1D np.ndarray with dtype=int Integer indices to take from the original array to create the permutation. """ if N is None: N = len(indices) # use intp instead of int64 because of windows :( inverse_permutation = np.full(N, -1, dtype=np.intp) inverse_permutation[indices] = np.arange(len(indices), dtype=np.intp) return inverse_permutation def _ensure_bool_is_ndarray(result, *args): # numpy will sometimes return a scalar value from binary comparisons if it # can't handle the comparison instead of broadcasting, e.g., # In [10]: 1 == np.array(['a', 'b']) # Out[10]: False # This function ensures that the result is the appropriate shape in these # cases if isinstance(result, bool): shape = np.broadcast(*args).shape constructor = np.ones if result else np.zeros result = constructor(shape, dtype=bool) return result def array_eq(self, other): with warnings.catch_warnings(): warnings.filterwarnings("ignore", r"elementwise comparison failed") return _ensure_bool_is_ndarray(self == other, self, other) def array_ne(self, other): with warnings.catch_warnings(): warnings.filterwarnings("ignore", r"elementwise comparison failed") return _ensure_bool_is_ndarray(self != other, self, other) def _is_contiguous(positions): """Given a non-empty list, does it consist of contiguous integers?""" previous = positions[0] for current in positions[1:]: if current != previous + 1: return False previous = current return True def _advanced_indexer_subspaces(key): """Indices of the advanced indexes subspaces for mixed indexing and vindex.""" if not isinstance(key, tuple): key = (key,) advanced_index_positions = [ i for i, k in enumerate(key) if not isinstance(k, slice) ] if not advanced_index_positions or not _is_contiguous(advanced_index_positions): # Nothing to reorder: dimensions on the indexing result are already # ordered like vindex. See NumPy's rule for "Combining advanced and # basic indexing": # https://numpy.org/doc/stable/reference/arrays.indexing.html#combining-advanced-and-basic-indexing return (), () non_slices = [k for k in key if not isinstance(k, slice)] broadcasted_shape = np.broadcast_shapes( *[item.shape if is_duck_array(item) else (0,) for item in non_slices] ) ndim = len(broadcasted_shape) mixed_positions = advanced_index_positions[0] + np.arange(ndim) vindex_positions = np.arange(ndim) return mixed_positions, vindex_positions class NumpyVIndexAdapter: """Object that implements indexing like vindex on a np.ndarray. This is a pure Python implementation of (some of) the logic in this NumPy proposal: https://github.com/numpy/numpy/pull/6256 """ def __init__(self, array): self._array = array def __getitem__(self, key): mixed_positions, vindex_positions = _advanced_indexer_subspaces(key) return np.moveaxis(self._array[key], mixed_positions, vindex_positions) def __setitem__(self, key, value): """Value must have dimensionality matching the key.""" mixed_positions, vindex_positions = _advanced_indexer_subspaces(key) self._array[key] = np.moveaxis(value, vindex_positions, mixed_positions) def _create_method(name, npmodule=np) -> Callable: def f(values, axis=None, **kwargs): dtype = kwargs.get("dtype") bn_func = getattr(bn, name, None) xp = get_array_namespace(values) if xp is not np: func = getattr(xp, name, None) if func is not None: return func(values, axis=axis, **kwargs) if ( module_available("numbagg") and OPTIONS["use_numbagg"] and isinstance(values, np.ndarray) # numbagg<0.7.0 uses ddof=1 only, but numpy uses ddof=0 by default and ( pycompat.mod_version("numbagg") >= Version("0.7.0") or ("var" not in name and "std" not in name) or kwargs.get("ddof", 0) == 1 ) # TODO: bool? and values.dtype.kind in "uif" # and values.dtype.isnative and (dtype is None or np.dtype(dtype) == values.dtype) # numbagg.nanquantile only available after 0.8.0 and with linear method and ( name != "nanquantile" or ( pycompat.mod_version("numbagg") >= Version("0.8.0") and kwargs.get("method", "linear") == "linear" ) ) ): import numbagg nba_func = getattr(numbagg, name, None) if nba_func is not None: # numbagg does not use dtype kwargs.pop("dtype", None) # prior to 0.7.0, numbagg did not support ddof; we ensure it's limited # to ddof=1 above. if pycompat.mod_version("numbagg") < Version("0.7.0"): kwargs.pop("ddof", None) if name == "nanquantile": kwargs["quantiles"] = kwargs.pop("q") kwargs.pop("method", None) return nba_func(values, axis=axis, **kwargs) if ( _BOTTLENECK_AVAILABLE and OPTIONS["use_bottleneck"] and isinstance(values, np.ndarray) and bn_func is not None and not isinstance(axis, tuple) and values.dtype.kind in "uifc" and values.dtype.isnative and (dtype is None or np.dtype(dtype) == values.dtype) ): # bottleneck does not take care dtype, min_count kwargs.pop("dtype", None) result = bn_func(values, axis=axis, **kwargs) # bottleneck returns python scalars for reduction over all axes if isinstance(result, float): result = np.float64(result) else: result = getattr(npmodule, name)(values, axis=axis, **kwargs) return result f.__name__ = name return f def _nanpolyfit_1d(arr, x, rcond=None): out = np.full((x.shape[1] + 1,), np.nan) mask = np.isnan(arr) if not np.all(mask): out[:-1], resid, rank, _ = np.linalg.lstsq(x[~mask, :], arr[~mask], rcond=rcond) out[-1] = resid[0] if resid.size > 0 else np.nan warn_on_deficient_rank(rank, x.shape[1]) return out def warn_on_deficient_rank(rank, order): if rank != order: warnings.warn("Polyfit may be poorly conditioned", RankWarning, stacklevel=2) def least_squares(lhs, rhs, rcond=None, skipna=False): if rhs.ndim > 2: out_shape = rhs.shape rhs = rhs.reshape(rhs.shape[0], -1) else: out_shape = None if skipna: added_dim = rhs.ndim == 1 if added_dim: rhs = rhs.reshape(rhs.shape[0], 1) nan_cols = np.any(np.isnan(rhs), axis=0) out = np.empty((lhs.shape[1] + 1, rhs.shape[1])) if np.any(nan_cols): out[:, nan_cols] = np.apply_along_axis( _nanpolyfit_1d, 0, rhs[:, nan_cols], lhs ) if np.any(~nan_cols): out[:-1, ~nan_cols], resids, rank, _ = np.linalg.lstsq( lhs, rhs[:, ~nan_cols], rcond=rcond ) out[-1, ~nan_cols] = resids if resids.size > 0 else np.nan warn_on_deficient_rank(rank, lhs.shape[1]) coeffs = out[:-1, :] residuals = out[-1, :] if added_dim: coeffs = coeffs.reshape(coeffs.shape[0]) residuals = residuals.reshape(residuals.shape[0]) else: coeffs, residuals, rank, _ = np.linalg.lstsq(lhs, rhs, rcond=rcond) if residuals.size == 0: residuals = coeffs[0] * np.nan warn_on_deficient_rank(rank, lhs.shape[1]) if out_shape is not None: coeffs = coeffs.reshape(-1, *out_shape[1:]) residuals = residuals.reshape(*out_shape[1:]) return coeffs, residuals nanmin = _create_method("nanmin") nanmax = _create_method("nanmax") nanmean = _create_method("nanmean") nanmedian = _create_method("nanmedian") nanvar = _create_method("nanvar") nanstd = _create_method("nanstd") nanprod = _create_method("nanprod") nancumsum = _create_method("nancumsum") nancumprod = _create_method("nancumprod") nanargmin = _create_method("nanargmin") nanargmax = _create_method("nanargmax") nanquantile = _create_method("nanquantile") xarray-2025.09.0/xarray/core/options.py000066400000000000000000000311321505620616400176470ustar00rootroot00000000000000from __future__ import annotations import warnings from typing import TYPE_CHECKING, Any, Literal, TypedDict from xarray.core.utils import FrozenDict if TYPE_CHECKING: from matplotlib.colors import Colormap Options = Literal[ "arithmetic_join", "chunk_manager", "cmap_divergent", "cmap_sequential", "display_max_children", "display_max_rows", "display_values_threshold", "display_style", "display_width", "display_expand_attrs", "display_expand_coords", "display_expand_data_vars", "display_expand_data", "display_expand_groups", "display_expand_indexes", "display_default_indexes", "enable_cftimeindex", "file_cache_maxsize", "keep_attrs", "warn_for_unclosed_files", "use_bottleneck", "use_new_combine_kwarg_defaults", "use_numbagg", "use_opt_einsum", "use_flox", ] class T_Options(TypedDict): arithmetic_broadcast: bool arithmetic_join: Literal["inner", "outer", "left", "right", "exact"] chunk_manager: str cmap_divergent: str | Colormap cmap_sequential: str | Colormap display_max_children: int display_max_rows: int display_values_threshold: int display_style: Literal["text", "html"] display_width: int display_expand_attrs: Literal["default"] | bool display_expand_coords: Literal["default"] | bool display_expand_data_vars: Literal["default"] | bool display_expand_data: Literal["default"] | bool display_expand_groups: Literal["default"] | bool display_expand_indexes: Literal["default"] | bool display_default_indexes: Literal["default"] | bool enable_cftimeindex: bool file_cache_maxsize: int keep_attrs: Literal["default"] | bool warn_for_unclosed_files: bool use_bottleneck: bool use_flox: bool use_new_combine_kwarg_defaults: bool use_numbagg: bool use_opt_einsum: bool OPTIONS: T_Options = { "arithmetic_broadcast": True, "arithmetic_join": "inner", "chunk_manager": "dask", "cmap_divergent": "RdBu_r", "cmap_sequential": "viridis", "display_max_children": 6, "display_max_rows": 12, "display_values_threshold": 200, "display_style": "html", "display_width": 80, "display_expand_attrs": "default", "display_expand_coords": "default", "display_expand_data_vars": "default", "display_expand_data": "default", "display_expand_groups": "default", "display_expand_indexes": "default", "display_default_indexes": False, "enable_cftimeindex": True, "file_cache_maxsize": 128, "keep_attrs": "default", "warn_for_unclosed_files": False, "use_bottleneck": True, "use_flox": True, "use_new_combine_kwarg_defaults": False, "use_numbagg": True, "use_opt_einsum": True, } _JOIN_OPTIONS = frozenset(["inner", "outer", "left", "right", "exact"]) _DISPLAY_OPTIONS = frozenset(["text", "html"]) def _positive_integer(value: Any) -> bool: return isinstance(value, int) and value > 0 _VALIDATORS = { "arithmetic_broadcast": lambda value: isinstance(value, bool), "arithmetic_join": _JOIN_OPTIONS.__contains__, "display_max_children": _positive_integer, "display_max_rows": _positive_integer, "display_values_threshold": _positive_integer, "display_style": _DISPLAY_OPTIONS.__contains__, "display_width": _positive_integer, "display_expand_attrs": lambda choice: choice in [True, False, "default"], "display_expand_coords": lambda choice: choice in [True, False, "default"], "display_expand_data_vars": lambda choice: choice in [True, False, "default"], "display_expand_data": lambda choice: choice in [True, False, "default"], "display_expand_indexes": lambda choice: choice in [True, False, "default"], "display_default_indexes": lambda choice: choice in [True, False, "default"], "enable_cftimeindex": lambda value: isinstance(value, bool), "file_cache_maxsize": _positive_integer, "keep_attrs": lambda choice: choice in [True, False, "default"], "use_bottleneck": lambda value: isinstance(value, bool), "use_new_combine_kwarg_defaults": lambda value: isinstance(value, bool), "use_numbagg": lambda value: isinstance(value, bool), "use_opt_einsum": lambda value: isinstance(value, bool), "use_flox": lambda value: isinstance(value, bool), "warn_for_unclosed_files": lambda value: isinstance(value, bool), } def _set_file_cache_maxsize(value) -> None: from xarray.backends.file_manager import FILE_CACHE FILE_CACHE.maxsize = value def _warn_on_setting_enable_cftimeindex(enable_cftimeindex): warnings.warn( "The enable_cftimeindex option is now a no-op " "and will be removed in a future version of xarray.", FutureWarning, stacklevel=2, ) _SETTERS = { "enable_cftimeindex": _warn_on_setting_enable_cftimeindex, "file_cache_maxsize": _set_file_cache_maxsize, } def _get_boolean_with_default(option: Options, default: bool) -> bool: global_choice = OPTIONS[option] if global_choice == "default": return default elif isinstance(global_choice, bool): return global_choice else: raise ValueError( f"The global option {option} must be one of True, False or 'default'." ) def _get_keep_attrs(default: bool) -> bool: return _get_boolean_with_default("keep_attrs", default) class set_options: """ Set options for xarray in a controlled context. Parameters ---------- arithmetic_join : {"inner", "outer", "left", "right", "exact"}, default: "inner" DataArray/Dataset alignment in binary operations: - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. chunk_manager : str, default: "dask" Chunk manager to use for chunked array computations when multiple options are installed. cmap_divergent : str or matplotlib.colors.Colormap, default: "RdBu_r" Colormap to use for divergent data plots. If string, must be matplotlib built-in colormap. Can also be a Colormap object (e.g. mpl.colormaps["magma"]) cmap_sequential : str or matplotlib.colors.Colormap, default: "viridis" Colormap to use for nondivergent data plots. If string, must be matplotlib built-in colormap. Can also be a Colormap object (e.g. mpl.colormaps["magma"]) display_expand_attrs : {"default", True, False} Whether to expand the attributes section for display of ``DataArray`` or ``Dataset`` objects. Can be * ``True`` : to always expand attrs * ``False`` : to always collapse attrs * ``default`` : to expand unless over a pre-defined limit display_expand_coords : {"default", True, False} Whether to expand the coordinates section for display of ``DataArray`` or ``Dataset`` objects. Can be * ``True`` : to always expand coordinates * ``False`` : to always collapse coordinates * ``default`` : to expand unless over a pre-defined limit display_expand_data : {"default", True, False} Whether to expand the data section for display of ``DataArray`` objects. Can be * ``True`` : to always expand data * ``False`` : to always collapse data * ``default`` : to expand unless over a pre-defined limit display_expand_data_vars : {"default", True, False} Whether to expand the data variables section for display of ``Dataset`` objects. Can be * ``True`` : to always expand data variables * ``False`` : to always collapse data variables * ``default`` : to expand unless over a pre-defined limit display_expand_indexes : {"default", True, False} Whether to expand the indexes section for display of ``DataArray`` or ``Dataset``. Can be * ``True`` : to always expand indexes * ``False`` : to always collapse indexes * ``default`` : to expand unless over a pre-defined limit (always collapse for html style) display_max_children : int, default: 6 Maximum number of children to display for each node in a DataTree. display_max_rows : int, default: 12 Maximum display rows. display_values_threshold : int, default: 200 Total number of array elements which trigger summarization rather than full repr for variable data views (numpy arrays). display_style : {"text", "html"}, default: "html" Display style to use in jupyter for xarray objects. display_width : int, default: 80 Maximum display width for ``repr`` on xarray objects. file_cache_maxsize : int, default: 128 Maximum number of open files to hold in xarray's global least-recently-usage cached. This should be smaller than your system's per-process file descriptor limit, e.g., ``ulimit -n`` on Linux. keep_attrs : {"default", True, False} Whether to keep attributes on xarray Datasets/dataarrays after operations. Can be * ``True`` : to always keep attrs * ``False`` : to always discard attrs * ``default`` : to use original logic that attrs should only be kept in unambiguous circumstances use_bottleneck : bool, default: True Whether to use ``bottleneck`` to accelerate 1D reductions and 1D rolling reduction operations. use_flox : bool, default: True Whether to use ``numpy_groupies`` and `flox`` to accelerate groupby and resampling reductions. use_new_combine_kwarg_defaults : bool, default False Whether to use new kwarg default values for combine functions: :py:func:`~xarray.concat`, :py:func:`~xarray.merge`, :py:func:`~xarray.open_mfdataset`. New values are: * ``data_vars``: None * ``coords``: "minimal" * ``compat``: "override" * ``join``: "exact" use_numbagg : bool, default: True Whether to use ``numbagg`` to accelerate reductions. Takes precedence over ``use_bottleneck`` when both are True. use_opt_einsum : bool, default: True Whether to use ``opt_einsum`` to accelerate dot products. warn_for_unclosed_files : bool, default: False Whether or not to issue a warning when unclosed files are deallocated. This is mostly useful for debugging. Examples -------- It is possible to use ``set_options`` either as a context manager: >>> ds = xr.Dataset({"x": np.arange(1000)}) >>> with xr.set_options(display_width=40): ... print(ds) ... Size: 8kB Dimensions: (x: 1000) Coordinates: * x (x) int64 8kB 0 1 ... 999 Data variables: *empty* Or to set global options: >>> xr.set_options(display_width=80) # doctest: +ELLIPSIS """ def __init__(self, **kwargs): self.old = {} for k, v in kwargs.items(): if k not in OPTIONS: raise ValueError( f"argument name {k!r} is not in the set of valid options {set(OPTIONS)!r}" ) if k in _VALIDATORS and not _VALIDATORS[k](v): if k == "arithmetic_join": expected = f"Expected one of {_JOIN_OPTIONS!r}" elif k == "display_style": expected = f"Expected one of {_DISPLAY_OPTIONS!r}" else: expected = "" raise ValueError( f"option {k!r} given an invalid value: {v!r}. " + expected ) self.old[k] = OPTIONS[k] self._apply_update(kwargs) def _apply_update(self, options_dict): for k, v in options_dict.items(): if k in _SETTERS: _SETTERS[k](v) OPTIONS.update(options_dict) def __enter__(self): return def __exit__(self, type, value, traceback): self._apply_update(self.old) def get_options(): """ Get options for xarray. See Also ---------- set_options """ return FrozenDict(OPTIONS) xarray-2025.09.0/xarray/core/parallel.py000066400000000000000000000607461505620616400177650ustar00rootroot00000000000000from __future__ import annotations import collections import itertools import operator from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence from typing import TYPE_CHECKING, Any, Literal, TypedDict import numpy as np from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.indexes import Index from xarray.core.utils import is_dask_collection from xarray.core.variable import Variable from xarray.structure.alignment import align from xarray.structure.merge import merge if TYPE_CHECKING: from xarray.core.types import T_Xarray class ExpectedDict(TypedDict): shapes: dict[Hashable, int] coords: set[Hashable] data_vars: set[Hashable] def unzip(iterable): return zip(*iterable, strict=True) def assert_chunks_compatible(a: Dataset, b: Dataset): a = a.unify_chunks() b = b.unify_chunks() for dim in set(a.chunks).intersection(set(b.chunks)): if a.chunks[dim] != b.chunks[dim]: raise ValueError(f"Chunk sizes along dimension {dim!r} are not equal.") def check_result_variables( result: DataArray | Dataset, expected: ExpectedDict, kind: Literal["coords", "data_vars"], ): if kind == "coords": nice_str = "coordinate" elif kind == "data_vars": nice_str = "data" # check that coords and data variables are as expected missing = expected[kind] - set(getattr(result, kind)) if missing: raise ValueError( "Result from applying user function does not contain " f"{nice_str} variables {missing}." ) extra = set(getattr(result, kind)) - expected[kind] if extra: raise ValueError( "Result from applying user function has unexpected " f"{nice_str} variables {extra}." ) def dataset_to_dataarray(obj: Dataset) -> DataArray: if not isinstance(obj, Dataset): raise TypeError(f"Expected Dataset, got {type(obj)}") if len(obj.data_vars) > 1: raise TypeError( "Trying to convert Dataset with more than one data variable to DataArray" ) return next(iter(obj.data_vars.values())) def dataarray_to_dataset(obj: DataArray) -> Dataset: # only using _to_temp_dataset would break # func = lambda x: x.to_dataset() # since that relies on preserving name. if obj.name is None: dataset = obj._to_temp_dataset() else: dataset = obj.to_dataset() return dataset def make_meta(obj): """If obj is a DataArray or Dataset, return a new object of the same type and with the same variables and dtypes, but where all variables have size 0 and numpy backend. If obj is neither a DataArray nor Dataset, return it unaltered. """ if isinstance(obj, DataArray): obj_array = obj obj = dataarray_to_dataset(obj) elif isinstance(obj, Dataset): obj_array = None else: return obj from dask.array.utils import meta_from_array meta = Dataset() for name, variable in obj.variables.items(): meta_obj = meta_from_array(variable.data, ndim=variable.ndim) meta[name] = (variable.dims, meta_obj, variable.attrs) meta.attrs = obj.attrs meta = meta.set_coords(obj.coords) if obj_array is not None: return dataset_to_dataarray(meta) return meta def infer_template( func: Callable[..., T_Xarray], obj: DataArray | Dataset, *args, **kwargs ) -> T_Xarray: """Infer return object by running the function on meta objects.""" meta_args = [make_meta(arg) for arg in (obj,) + args] try: template = func(*meta_args, **kwargs) except Exception as e: raise Exception( "Cannot infer object returned from running user provided function. " "Please supply the 'template' kwarg to map_blocks." ) from e if not isinstance(template, Dataset | DataArray): raise TypeError( "Function must return an xarray DataArray or Dataset. Instead it returned " f"{type(template)}" ) return template def make_dict(x: DataArray | Dataset) -> dict[Hashable, Any]: """Map variable name to numpy(-like) data (Dataset.to_dict() is too complicated). """ if isinstance(x, DataArray): x = x._to_temp_dataset() return {k: v.data for k, v in x.variables.items()} def _get_chunk_slicer(dim: Hashable, chunk_index: Mapping, chunk_bounds: Mapping): if dim in chunk_index: which_chunk = chunk_index[dim] return slice(chunk_bounds[dim][which_chunk], chunk_bounds[dim][which_chunk + 1]) return slice(None) def subset_dataset_to_block( graph: dict, gname: str, dataset: Dataset, input_chunk_bounds, chunk_index ): """ Creates a task that subsets an xarray dataset to a block determined by chunk_index. Block extents are determined by input_chunk_bounds. Also subtasks that subset the constituent variables of a dataset. """ import dask # this will become [[name1, variable1], # [name2, variable2], # ...] # which is passed to dict and then to Dataset data_vars = [] coords = [] chunk_tuple = tuple(chunk_index.values()) chunk_dims_set = set(chunk_index) variable: Variable for name, variable in dataset.variables.items(): # make a task that creates tuple of (dims, chunk) if dask.is_dask_collection(variable.data): # get task name for chunk chunk = ( variable.data.name, *tuple(chunk_index[dim] for dim in variable.dims), ) chunk_variable_task = (f"{name}-{gname}-{chunk[0]!r}",) + chunk_tuple graph[chunk_variable_task] = ( tuple, [variable.dims, chunk, variable.attrs], ) else: assert name in dataset.dims or variable.ndim == 0 # non-dask array possibly with dimensions chunked on other variables # index into variable appropriately subsetter = { dim: _get_chunk_slicer(dim, chunk_index, input_chunk_bounds) for dim in variable.dims } if set(variable.dims) < chunk_dims_set: this_var_chunk_tuple = tuple(chunk_index[dim] for dim in variable.dims) else: this_var_chunk_tuple = chunk_tuple chunk_variable_task = ( f"{name}-{gname}-{dask.base.tokenize(subsetter)}", ) + this_var_chunk_tuple # We are including a dimension coordinate, # minimize duplication by not copying it in the graph for every chunk. if variable.ndim == 0 or chunk_variable_task not in graph: subset = variable.isel(subsetter) graph[chunk_variable_task] = ( tuple, [subset.dims, subset._data, subset.attrs], ) # this task creates dict mapping variable name to above tuple if name in dataset._coord_names: coords.append([name, chunk_variable_task]) else: data_vars.append([name, chunk_variable_task]) return (Dataset, (dict, data_vars), (dict, coords), dataset.attrs) def map_blocks( func: Callable[..., T_Xarray], obj: DataArray | Dataset, args: Sequence[Any] = (), kwargs: Mapping[str, Any] | None = None, template: DataArray | Dataset | None = None, ) -> T_Xarray: """Apply a function to each block of a DataArray or Dataset. .. warning:: This function is experimental and its signature may change. Parameters ---------- func : callable User-provided function that accepts a DataArray or Dataset as its first parameter ``obj``. The function will receive a subset or 'block' of ``obj`` (see below), corresponding to one chunk along each chunked dimension. ``func`` will be executed as ``func(subset_obj, *subset_args, **kwargs)``. This function must return either a single DataArray or a single Dataset. This function cannot add a new chunked dimension. obj : DataArray, Dataset Passed to the function as its first argument, one block at a time. args : sequence Passed to func after unpacking and subsetting any xarray objects by blocks. xarray objects in args must be aligned with obj, otherwise an error is raised. kwargs : mapping Passed verbatim to func after unpacking. xarray objects, if any, will not be subset to blocks. Passing dask collections in kwargs is not allowed. template : DataArray or Dataset, optional xarray object representing the final result after compute is called. If not provided, the function will be first run on mocked-up data, that looks like ``obj`` but has sizes 0, to determine properties of the returned object such as dtype, variable names, attributes, new dimensions and new indexes (if any). ``template`` must be provided if the function changes the size of existing dimensions. When provided, ``attrs`` on variables in `template` are copied over to the result. Any ``attrs`` set by ``func`` will be ignored. Returns ------- obj : same as obj A single DataArray or Dataset with dask backend, reassembled from the outputs of the function. Notes ----- This function is designed for when ``func`` needs to manipulate a whole xarray object subset to each block. Each block is loaded into memory. In the more common case where ``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``. If none of the variables in ``obj`` is backed by dask arrays, calling this function is equivalent to calling ``func(obj, *args, **kwargs)``. See Also -------- dask.array.map_blocks, xarray.apply_ufunc, xarray.Dataset.map_blocks xarray.DataArray.map_blocks Examples -------- Calculate an anomaly from climatology using ``.groupby()``. Using ``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``, its indices, and its methods like ``.groupby()``. >>> def calculate_anomaly(da, groupby_type="time.month"): ... gb = da.groupby(groupby_type) ... clim = gb.mean(dim="time") ... return gb - clim ... >>> time = xr.date_range("1990-01", "1992-01", freq="ME", use_cftime=True) >>> month = xr.DataArray(time.month, coords={"time": time}, dims=["time"]) >>> np.random.seed(123) >>> array = xr.DataArray( ... np.random.rand(len(time)), ... dims=["time"], ... coords={"time": time, "month": month}, ... ).chunk() >>> array.map_blocks(calculate_anomaly, template=array).compute() Size: 192B array([ 0.12894847, 0.11323072, -0.0855964 , -0.09334032, 0.26848862, 0.12382735, 0.22460641, 0.07650108, -0.07673453, -0.22865714, -0.19063865, 0.0590131 , -0.12894847, -0.11323072, 0.0855964 , 0.09334032, -0.26848862, -0.12382735, -0.22460641, -0.07650108, 0.07673453, 0.22865714, 0.19063865, -0.0590131 ]) Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 month (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12 Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments to the function being applied in ``xr.map_blocks()``: >>> array.map_blocks( ... calculate_anomaly, ... kwargs={"groupby_type": "time.year"}, ... template=array, ... ) # doctest: +ELLIPSIS Size: 192B dask.array<-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray> Coordinates: * time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00 month (time) int64 192B dask.array """ def _wrapper( func: Callable, args: list, kwargs: dict, arg_is_array: Iterable[bool], expected: ExpectedDict, expected_indexes: dict[Hashable, Index], ): """ Wrapper function that receives datasets in args; converts to dataarrays when necessary; passes these to the user function `func` and checks returned objects for expected shapes/sizes/etc. """ converted_args = [ dataset_to_dataarray(arg) if is_array else arg for is_array, arg in zip(arg_is_array, args, strict=True) ] result = func(*converted_args, **kwargs) merged_coordinates = merge( [arg.coords for arg in args if isinstance(arg, Dataset | DataArray)], join="exact", compat="override", ).coords # check all dims are present missing_dimensions = set(expected["shapes"]) - set(result.sizes) if missing_dimensions: raise ValueError( f"Dimensions {missing_dimensions} missing on returned object." ) # check that index lengths and values are as expected for name, index in result._indexes.items(): if ( name in expected["shapes"] and result.sizes[name] != expected["shapes"][name] ): raise ValueError( f"Received dimension {name!r} of length {result.sizes[name]}. " f"Expected length {expected['shapes'][name]}." ) # ChainMap wants MutableMapping, but xindexes is Mapping merged_indexes = collections.ChainMap( expected_indexes, merged_coordinates.xindexes, # type: ignore[arg-type] ) expected_index = merged_indexes.get(name, None) if expected_index is not None and not index.equals(expected_index): raise ValueError( f"Expected index {name!r} to be {expected_index!r}. Received {index!r} instead." ) # check that all expected variables were returned check_result_variables(result, expected, "coords") if isinstance(result, Dataset): check_result_variables(result, expected, "data_vars") return make_dict(result) if template is not None and not isinstance(template, DataArray | Dataset): raise TypeError( f"template must be a DataArray or Dataset. Received {type(template).__name__} instead." ) if not isinstance(args, Sequence): raise TypeError("args must be a sequence (for example, a list or tuple).") if kwargs is None: kwargs = {} elif not isinstance(kwargs, Mapping): raise TypeError("kwargs must be a mapping (for example, a dict)") for value in kwargs.values(): if is_dask_collection(value): raise TypeError( "Cannot pass dask collections in kwargs yet. Please compute or " "load values before passing to map_blocks." ) if not is_dask_collection(obj): return func(obj, *args, **kwargs) try: import dask import dask.array from dask.base import tokenize from dask.highlevelgraph import HighLevelGraph except ImportError: pass all_args = [obj] + list(args) is_xarray = [isinstance(arg, Dataset | DataArray) for arg in all_args] is_array = [isinstance(arg, DataArray) for arg in all_args] # there should be a better way to group this. partition? xarray_indices, xarray_objs = unzip( (index, arg) for index, arg in enumerate(all_args) if is_xarray[index] ) others = [ (index, arg) for index, arg in enumerate(all_args) if not is_xarray[index] ] # all xarray objects must be aligned. This is consistent with apply_ufunc. aligned = align(*xarray_objs, join="exact") xarray_objs = tuple( dataarray_to_dataset(arg) if isinstance(arg, DataArray) else arg for arg in aligned ) # rechunk any numpy variables appropriately xarray_objs = tuple(arg.chunk(arg.chunksizes) for arg in xarray_objs) merged_coordinates = merge( [arg.coords for arg in aligned], join="exact", compat="override", ).coords _, npargs = unzip( sorted( list(zip(xarray_indices, xarray_objs, strict=True)) + others, key=lambda x: x[0], ) ) # check that chunk sizes are compatible input_chunks = dict(npargs[0].chunks) for arg in xarray_objs[1:]: assert_chunks_compatible(npargs[0], arg) input_chunks.update(arg.chunks) coordinates: Coordinates if template is None: # infer template by providing zero-shaped arrays template = infer_template(func, aligned[0], *args, **kwargs) template_coords = set(template.coords) preserved_coord_vars = template_coords & set(merged_coordinates) new_coord_vars = template_coords - set(merged_coordinates) preserved_coords = merged_coordinates.to_dataset()[preserved_coord_vars] # preserved_coords contains all coordinates variables that share a dimension # with any index variable in preserved_indexes # Drop any unneeded vars in a second pass, this is required for e.g. # if the mapped function were to drop a non-dimension coordinate variable. preserved_coords = preserved_coords.drop_vars( tuple(k for k in preserved_coords.variables if k not in template_coords) ) coordinates = merge( (preserved_coords, template.coords.to_dataset()[new_coord_vars]), # FIXME: this should be join="exact", but breaks a test join="outer", compat="override", ).coords output_chunks: Mapping[Hashable, tuple[int, ...]] = { dim: input_chunks[dim] for dim in template.dims if dim in input_chunks } else: # template xarray object has been provided with proper sizes and chunk shapes coordinates = template.coords output_chunks = template.chunksizes if not output_chunks: raise ValueError( "Provided template has no dask arrays. " " Please construct a template with appropriately chunked dask arrays." ) new_indexes = set(template.xindexes) - set(merged_coordinates) modified_indexes = set( name for name, xindex in coordinates.xindexes.items() if not xindex.equals(merged_coordinates.xindexes.get(name, None)) ) for dim in output_chunks: if dim in input_chunks and len(input_chunks[dim]) != len(output_chunks[dim]): raise ValueError( "map_blocks requires that one block of the input maps to one block of output. " f"Expected number of output chunks along dimension {dim!r} to be {len(input_chunks[dim])}. " f"Received {len(output_chunks[dim])} instead. Please provide template if not provided, or " "fix the provided template." ) if isinstance(template, DataArray): result_is_array = True template_name = template.name template = template._to_temp_dataset() elif isinstance(template, Dataset): result_is_array = False else: raise TypeError( f"func output must be DataArray or Dataset; got {type(template)}" ) # We're building a new HighLevelGraph hlg. We'll have one new layer # for each variable in the dataset, which is the result of the # func applied to the values. graph: dict[Any, Any] = {} new_layers: collections.defaultdict[str, dict[Any, Any]] = collections.defaultdict( dict ) gname = f"{dask.utils.funcname(func)}-{dask.base.tokenize(npargs[0], args, kwargs)}" # map dims to list of chunk indexes ichunk = {dim: range(len(chunks_v)) for dim, chunks_v in input_chunks.items()} # mapping from chunk index to slice bounds input_chunk_bounds = { dim: np.cumsum((0,) + chunks_v) for dim, chunks_v in input_chunks.items() } output_chunk_bounds = { dim: np.cumsum((0,) + chunks_v) for dim, chunks_v in output_chunks.items() } computed_variables = set(template.variables) - set(coordinates.indexes) # iterate over all possible chunk combinations for chunk_tuple in itertools.product(*ichunk.values()): # mapping from dimension name to chunk index chunk_index = dict(zip(ichunk.keys(), chunk_tuple, strict=True)) blocked_args = [ ( subset_dataset_to_block( graph, gname, arg, input_chunk_bounds, chunk_index ) if isxr else arg ) for isxr, arg in zip(is_xarray, npargs, strict=True) ] # only include new or modified indexes to minimize duplication of data indexes = { dim: coordinates.xindexes[dim][ _get_chunk_slicer(dim, chunk_index, output_chunk_bounds) ] for dim in (new_indexes | modified_indexes) } tokenized_indexes: dict[Hashable, str] = {} for k, v in indexes.items(): tokenized_v = tokenize(v) graph[f"{k}-coordinate-{tokenized_v}"] = v tokenized_indexes[k] = f"{k}-coordinate-{tokenized_v}" # raise nice error messages in _wrapper expected: ExpectedDict = { # input chunk 0 along a dimension maps to output chunk 0 along the same dimension # even if length of dimension is changed by the applied function "shapes": { k: output_chunks[k][v] for k, v in chunk_index.items() if k in output_chunks }, "data_vars": set(template.data_vars.keys()), "coords": set(template.coords.keys()), } from_wrapper = (gname,) + chunk_tuple graph[from_wrapper] = ( _wrapper, func, blocked_args, kwargs, is_array, expected, (dict, [[k, v] for k, v in tokenized_indexes.items()]), ) # mapping from variable name to dask graph key var_key_map: dict[Hashable, str] = {} for name in computed_variables: variable = template.variables[name] gname_l = f"{name}-{gname}" var_key_map[name] = gname_l # unchunked dimensions in the input have one chunk in the result # output can have new dimensions with exactly one chunk key: tuple[Any, ...] = (gname_l,) + tuple( chunk_index.get(dim, 0) for dim in variable.dims ) # We're adding multiple new layers to the graph: # The first new layer is the result of the computation on # the array. # Then we add one layer per variable, which extracts the # result for that variable, and depends on just the first new # layer. new_layers[gname_l][key] = (operator.getitem, from_wrapper, name) hlg = HighLevelGraph.from_collections( gname, graph, dependencies=[arg for arg in npargs if dask.is_dask_collection(arg)], ) # This adds in the getitems for each variable in the dataset. hlg = HighLevelGraph( {**hlg.layers, **new_layers}, dependencies={ **hlg.dependencies, **{name: {gname} for name in new_layers.keys()}, }, ) result = Dataset(coords=coordinates, attrs=template.attrs) for index in result._indexes: result[index].attrs = template[index].attrs result[index].encoding = template[index].encoding for name, gname_l in var_key_map.items(): dims = template[name].dims var_chunks = [] for dim in dims: if dim in output_chunks: var_chunks.append(output_chunks[dim]) elif dim in result._indexes: var_chunks.append((result.sizes[dim],)) elif dim in template.dims: # new unindexed dimension var_chunks.append((template.sizes[dim],)) data = dask.array.Array( hlg, name=gname_l, chunks=var_chunks, dtype=template[name].dtype ) result[name] = (dims, data, template[name].attrs) result[name].encoding = template[name].encoding result = result.set_coords(template._coord_names) if result_is_array: da = dataset_to_dataarray(result) da.name = template_name return da # type: ignore[return-value] return result # type: ignore[return-value] xarray-2025.09.0/xarray/core/resample.py000066400000000000000000000434051505620616400177720ustar00rootroot00000000000000from __future__ import annotations import warnings from collections.abc import Callable, Hashable, Iterable, Sequence from typing import TYPE_CHECKING, Any, Literal from xarray.core._aggregations import ( DataArrayResampleAggregations, DatasetResampleAggregations, ) from xarray.core.groupby import DataArrayGroupByBase, DatasetGroupByBase, GroupBy from xarray.core.types import Dims, InterpOptions, T_Xarray if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import T_Chunks from xarray.groupers import RESAMPLE_DIM class Resample(GroupBy[T_Xarray]): """An object that extends the `GroupBy` object with additional logic for handling specialized re-sampling operations. You should create a `Resample` object by using the `DataArray.resample` or `Dataset.resample` methods. The dimension along re-sampling See Also -------- DataArray.resample Dataset.resample """ def __init__( self, *args, dim: Hashable | None = None, resample_dim: Hashable | None = None, **kwargs, ) -> None: if dim == resample_dim: raise ValueError( f"Proxy resampling dimension ('{resample_dim}') " f"cannot have the same name as actual dimension ('{dim}')!" ) self._dim = dim super().__init__(*args, **kwargs) def _flox_reduce( self, dim: Dims, keep_attrs: bool | None = None, **kwargs, ) -> T_Xarray: result: T_Xarray = ( super() ._flox_reduce(dim=dim, keep_attrs=keep_attrs, **kwargs) .rename({RESAMPLE_DIM: self._group_dim}) # type: ignore[assignment] ) return result def shuffle_to_chunks(self, chunks: T_Chunks = None): """ Sort or "shuffle" the underlying object. "Shuffle" means the object is sorted so that all group members occur sequentially, in the same chunk. Multiple groups may occur in the same chunk. This method is particularly useful for chunked arrays (e.g. dask, cubed). particularly when you need to map a function that requires all members of a group to be present in a single chunk. For chunked array types, the order of appearance is not guaranteed, but will depend on the input chunking. Parameters ---------- chunks : int, tuple of int, "auto" or mapping of hashable to int or tuple of int, optional How to adjust chunks along dimensions not present in the array being grouped by. Returns ------- DataArrayGroupBy or DatasetGroupBy Examples -------- >>> import dask.array >>> da = xr.DataArray( ... dims="time", ... data=dask.array.arange(10, chunks=1), ... coords={"time": xr.date_range("2001-01-01", freq="12h", periods=10)}, ... name="a", ... ) >>> shuffled = da.resample(time="2D").shuffle_to_chunks() >>> shuffled Size: 80B dask.array Coordinates: * time (time) datetime64[ns] 80B 2001-01-01 ... 2001-01-05T12:00:00 See Also -------- dask.dataframe.DataFrame.shuffle dask.array.shuffle """ (grouper,) = self.groupers return self._shuffle_obj(chunks).drop_vars(RESAMPLE_DIM) def _first_or_last( self, op: Literal["first", "last"], skipna: bool | None, keep_attrs: bool | None ) -> T_Xarray: from xarray.core.dataset import Dataset result = super()._first_or_last(op=op, skipna=skipna, keep_attrs=keep_attrs) if isinstance(result, Dataset): # Can't do this in the base class because group_dim is RESAMPLE_DIM # which is not present in the original object for var in result.data_vars: result._variables[var] = result._variables[var].transpose( *self._obj._variables[var].dims ) return result def _drop_coords(self) -> T_Xarray: """Drop non-dimension coordinates along the resampled dimension.""" obj = self._obj for k, v in obj.coords.items(): if k != self._dim and self._dim in v.dims: obj = obj.drop_vars([k]) return obj def pad(self, tolerance: float | Iterable[float] | str | None = None) -> T_Xarray: """Forward fill new values at up-sampled frequency. Parameters ---------- tolerance : float | Iterable[float] | str | None, default: None Maximum distance between original and new labels to limit the up-sampling method. Up-sampled data with indices that satisfy the equation ``abs(index[indexer] - target) <= tolerance`` are filled by new values. Data with indices that are outside the given tolerance are filled with ``NaN`` s. Returns ------- padded : DataArray or Dataset """ obj = self._drop_coords() (grouper,) = self.groupers return obj.reindex( {self._dim: grouper.full_index}, method="pad", tolerance=tolerance ) ffill = pad def backfill( self, tolerance: float | Iterable[float] | str | None = None ) -> T_Xarray: """Backward fill new values at up-sampled frequency. Parameters ---------- tolerance : float | Iterable[float] | str | None, default: None Maximum distance between original and new labels to limit the up-sampling method. Up-sampled data with indices that satisfy the equation ``abs(index[indexer] - target) <= tolerance`` are filled by new values. Data with indices that are outside the given tolerance are filled with ``NaN`` s. Returns ------- backfilled : DataArray or Dataset """ obj = self._drop_coords() (grouper,) = self.groupers return obj.reindex( {self._dim: grouper.full_index}, method="backfill", tolerance=tolerance ) bfill = backfill def nearest( self, tolerance: float | Iterable[float] | str | None = None ) -> T_Xarray: """Take new values from nearest original coordinate to up-sampled frequency coordinates. Parameters ---------- tolerance : float | Iterable[float] | str | None, default: None Maximum distance between original and new labels to limit the up-sampling method. Up-sampled data with indices that satisfy the equation ``abs(index[indexer] - target) <= tolerance`` are filled by new values. Data with indices that are outside the given tolerance are filled with ``NaN`` s. Returns ------- upsampled : DataArray or Dataset """ obj = self._drop_coords() (grouper,) = self.groupers return obj.reindex( {self._dim: grouper.full_index}, method="nearest", tolerance=tolerance ) def interpolate(self, kind: InterpOptions = "linear", **kwargs) -> T_Xarray: """Interpolate up-sampled data using the original data as knots. Parameters ---------- kind : {"linear", "nearest", "zero", "slinear", \ "quadratic", "cubic", "polynomial"}, default: "linear" The method used to interpolate. The method should be supported by the scipy interpolator: - ``interp1d``: {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "polynomial"} - ``interpn``: {"linear", "nearest"} If ``"polynomial"`` is passed, the ``order`` keyword argument must also be provided. Returns ------- interpolated : DataArray or Dataset See Also -------- DataArray.interp Dataset.interp scipy.interpolate.interp1d """ return self._interpolate(kind=kind, **kwargs) def _interpolate(self, kind="linear", **kwargs) -> T_Xarray: """Apply scipy.interpolate.interp1d along resampling dimension.""" obj = self._drop_coords() (grouper,) = self.groupers kwargs.setdefault("bounds_error", False) return obj.interp( coords={self._dim: grouper.full_index}, assume_sorted=True, method=kind, kwargs=kwargs, ) class DataArrayResample( Resample["DataArray"], DataArrayGroupByBase, DataArrayResampleAggregations ): """DataArrayGroupBy object specialized to time resampling operations over a specified dimension """ def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, shortcut: bool = True, **kwargs: Any, ) -> DataArray: """Reduce the items in this group by applying `func` along the pre-defined resampling dimension. Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. dim : "...", str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. keep_attrs : bool, optional If True, the datasets's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : DataArray Array with summarized data and the indicated dimension(s) removed. """ return super().reduce( func=func, dim=dim, axis=axis, keep_attrs=keep_attrs, keepdims=keepdims, shortcut=shortcut, **kwargs, ) def map( self, func: Callable[..., Any], args: tuple[Any, ...] = (), shortcut: bool | None = False, **kwargs: Any, ) -> DataArray: """Apply a function to each array in the group and concatenate them together into a new array. `func` is called like `func(ar, *args, **kwargs)` for each array `ar` in this group. Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how to stack together the array. The rule is: 1. If the dimension along which the group coordinate is defined is still in the first grouped array after applying `func`, then stack over this dimension. 2. Otherwise, stack over the new dimension given by name of this grouping (the argument to the `groupby` function). Parameters ---------- func : callable Callable to apply to each array. shortcut : bool, optional Whether or not to shortcut evaluation under the assumptions that: (1) The action of `func` does not depend on any of the array metadata (attributes or coordinates) but only on the data and dimensions. (2) The action of `func` creates arrays with homogeneous metadata, that is, with the same dimensions and attributes. If these conditions are satisfied `shortcut` provides significant speedup. This should be the case for many common groupby operations (e.g., applying numpy ufuncs). args : tuple, optional Positional arguments passed on to `func`. **kwargs Used to call `func(ar, **kwargs)` for each array `ar`. Returns ------- applied : DataArray The result of splitting, applying and combining this array. """ # TODO: the argument order for Resample doesn't match that for its parent, # GroupBy combined = super().map(func, shortcut=shortcut, args=args, **kwargs) # If the aggregation function didn't drop the original resampling # dimension, then we need to do so before we can rename the proxy # dimension we used. if self._dim in combined.coords: combined = combined.drop_vars([self._dim]) if RESAMPLE_DIM in combined.dims: combined = combined.rename({RESAMPLE_DIM: self._dim}) return combined def apply(self, func, args=(), shortcut=None, **kwargs): """ Backward compatible implementation of ``map`` See Also -------- DataArrayResample.map """ warnings.warn( "Resample.apply may be deprecated in the future. Using Resample.map is encouraged", PendingDeprecationWarning, stacklevel=2, ) return self.map(func=func, shortcut=shortcut, args=args, **kwargs) def asfreq(self) -> DataArray: """Return values of original object at the new up-sampling frequency; essentially a re-index with new times set to NaN. Returns ------- resampled : DataArray """ self._obj = self._drop_coords() return self.mean(None if self._dim is None else [self._dim]) class DatasetResample( Resample["Dataset"], DatasetGroupByBase, DatasetResampleAggregations ): """DatasetGroupBy object specialized to resampling a specified dimension""" def map( self, func: Callable[..., Any], args: tuple[Any, ...] = (), shortcut: bool | None = None, **kwargs: Any, ) -> Dataset: """Apply a function over each Dataset in the groups generated for resampling and concatenate them together into a new Dataset. `func` is called like `func(ds, *args, **kwargs)` for each dataset `ds` in this group. Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how to stack together the datasets. The rule is: 1. If the dimension along which the group coordinate is defined is still in the first grouped item after applying `func`, then stack over this dimension. 2. Otherwise, stack over the new dimension given by name of this grouping (the argument to the `groupby` function). Parameters ---------- func : callable Callable to apply to each sub-dataset. args : tuple, optional Positional arguments passed on to `func`. **kwargs Used to call `func(ds, **kwargs)` for each sub-dataset `ar`. Returns ------- applied : Dataset The result of splitting, applying and combining this dataset. """ # ignore shortcut if set (for now) applied = (func(ds, *args, **kwargs) for ds in self._iter_grouped()) combined = self._combine(applied) # If the aggregation function didn't drop the original resampling # dimension, then we need to do so before we can rename the proxy # dimension we used. if self._dim in combined.coords: combined = combined.drop_vars(self._dim) if RESAMPLE_DIM in combined.dims: combined = combined.rename({RESAMPLE_DIM: self._dim}) return combined def apply(self, func, args=(), shortcut=None, **kwargs): """ Backward compatible implementation of ``map`` See Also -------- DataSetResample.map """ warnings.warn( "Resample.apply may be deprecated in the future. Using Resample.map is encouraged", PendingDeprecationWarning, stacklevel=2, ) return self.map(func=func, shortcut=shortcut, args=args, **kwargs) def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, shortcut: bool = True, **kwargs: Any, ) -> Dataset: """Reduce the items in this group by applying `func` along the pre-defined resampling dimension. Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. dim : "...", str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. keep_attrs : bool, optional If True, the datasets's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Dataset Array with summarized data and the indicated dimension(s) removed. """ return super().reduce( func=func, dim=dim, axis=axis, keep_attrs=keep_attrs, keepdims=keepdims, shortcut=shortcut, **kwargs, ) def asfreq(self) -> Dataset: """Return values of original object at the new up-sampling frequency; essentially a re-index with new times set to NaN. Returns ------- resampled : Dataset """ self._obj = self._drop_coords() return self.mean(None if self._dim is None else [self._dim]) xarray-2025.09.0/xarray/core/resample_cftime.py000066400000000000000000000460041505620616400213170ustar00rootroot00000000000000"""Resampling for CFTimeIndex. Does not support non-integer freq.""" # The mechanisms for resampling CFTimeIndex was copied and adapted from # the source code defined in pandas.core.resample # # For reference, here is a copy of the pandas copyright notice: # # BSD 3-Clause License # # Copyright (c) 2008-2012, AQR Capital Management, LLC, Lambda Foundry, Inc. # and PyData Development Team # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import datetime import typing import numpy as np import pandas as pd from xarray.coding.cftime_offsets import ( CFTIME_TICKS, BaseCFTimeOffset, MonthEnd, QuarterEnd, Tick, YearEnd, date_range, normalize_date, to_offset, ) from xarray.coding.cftimeindex import CFTimeIndex from xarray.core.types import SideOptions from xarray.core.utils import emit_user_level_warning if typing.TYPE_CHECKING: from xarray.core.types import CFTimeDatetime, ResampleCompatible class CFTimeGrouper: """This is a simple container for the grouping parameters that implements a single method, the only one required for resampling in xarray. It cannot be used in a call to groupby like a pandas.Grouper object can.""" freq: BaseCFTimeOffset closed: SideOptions label: SideOptions loffset: str | datetime.timedelta | BaseCFTimeOffset | None origin: str | CFTimeDatetime offset: datetime.timedelta | None def __init__( self, freq: ResampleCompatible | BaseCFTimeOffset, closed: SideOptions | None = None, label: SideOptions | None = None, origin: str | CFTimeDatetime = "start_day", offset: str | datetime.timedelta | BaseCFTimeOffset | None = None, ): self.freq = to_offset(freq) self.origin = origin if not isinstance(self.freq, CFTIME_TICKS): if offset is not None: message = ( "The 'offset' keyword does not take effect when " "resampling with a 'freq' that is not Tick-like (h, m, s, " "ms, us)" ) emit_user_level_warning(message, category=RuntimeWarning) if origin != "start_day": message = ( "The 'origin' keyword does not take effect when " "resampling with a 'freq' that is not Tick-like (h, m, s, " "ms, us)" ) emit_user_level_warning(message, category=RuntimeWarning) if isinstance(self.freq, MonthEnd | QuarterEnd | YearEnd) or self.origin in [ "end", "end_day", ]: # The backward resample sets ``closed`` to ``'right'`` by default # since the last value should be considered as the edge point for # the last bin. When origin in "end" or "end_day", the value for a # specific ``cftime.datetime`` index stands for the resample result # from the current ``cftime.datetime`` minus ``freq`` to the current # ``cftime.datetime`` with a right close. if closed is None: self.closed = "right" else: self.closed = closed if label is None: self.label = "right" else: self.label = label else: if closed is None: self.closed = "left" else: self.closed = closed if label is None: self.label = "left" else: self.label = label if offset is not None: try: self.offset = _convert_offset_to_timedelta(offset) except (ValueError, TypeError) as error: raise ValueError( f"offset must be a datetime.timedelta object or an offset string " f"that can be converted to a timedelta. Got {type(offset)} instead." ) from error else: self.offset = None def first_items(self, index: CFTimeIndex): """Meant to reproduce the results of the following grouper = pandas.Grouper(...) first_items = pd.Series(np.arange(len(index)), index).groupby(grouper).first() with index being a CFTimeIndex instead of a DatetimeIndex. """ datetime_bins, labels = _get_time_bins( index, self.freq, self.closed, self.label, self.origin, self.offset ) # check binner fits data if index[0] < datetime_bins[0]: raise ValueError("Value falls before first bin") if index[-1] > datetime_bins[-1]: raise ValueError("Value falls after last bin") integer_bins = np.searchsorted(index, datetime_bins, side=self.closed) counts = np.diff(integer_bins) codes = np.repeat(np.arange(len(labels)), counts) first_items = pd.Series(integer_bins[:-1], labels, copy=False) # Mask duplicate values with NaNs, preserving the last values non_duplicate = ~first_items.duplicated("last") return first_items.where(non_duplicate), codes def _get_time_bins( index: CFTimeIndex, freq: BaseCFTimeOffset, closed: SideOptions, label: SideOptions, origin: str | CFTimeDatetime, offset: datetime.timedelta | None, ): """Obtain the bins and their respective labels for resampling operations. Parameters ---------- index : CFTimeIndex Index object to be resampled (e.g., CFTimeIndex named 'time'). freq : xarray.coding.cftime_offsets.BaseCFTimeOffset The offset object representing target conversion a.k.a. resampling frequency (e.g., 'MS', '2D', 'H', or '3T' with coding.cftime_offsets.to_offset() applied to it). closed : 'left' or 'right' Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M' and 'A', which have a default of 'right'. label : 'left' or 'right' Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M' and 'A', which have a default of 'right'. origin : {'epoch', 'start', 'start_day', 'end', 'end_day'} or cftime.datetime, default 'start_day' The datetime on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a datetime is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day offset : datetime.timedelta, default is None An offset timedelta added to the origin. Returns ------- datetime_bins : CFTimeIndex Defines the edge of resampling bins by which original index values will be grouped into. labels : CFTimeIndex Define what the user actually sees the bins labeled as. """ if not isinstance(index, CFTimeIndex): raise TypeError( "index must be a CFTimeIndex, but got " f"an instance of {type(index).__name__!r}" ) if len(index) == 0: datetime_bins = labels = CFTimeIndex(data=[], name=index.name) return datetime_bins, labels first, last = _get_range_edges( index.min(), index.max(), freq, closed=closed, origin=origin, offset=offset ) datetime_bins = labels = date_range( freq=freq, start=first, end=last, name=index.name, use_cftime=True ) datetime_bins, labels = _adjust_bin_edges( datetime_bins, freq, closed, index, labels ) labels = labels[1:] if label == "right" else labels[:-1] # TODO: when CFTimeIndex supports missing values, if the reference index # contains missing values, insert the appropriate NaN value at the # beginning of the datetime_bins and labels indexes. return datetime_bins, labels def _adjust_bin_edges( datetime_bins: CFTimeIndex, freq: BaseCFTimeOffset, closed: SideOptions, index: CFTimeIndex, labels: CFTimeIndex, ) -> tuple[CFTimeIndex, CFTimeIndex]: """This is required for determining the bin edges resampling with month end, quarter end, and year end frequencies. Consider the following example. Let's say you want to downsample the time series with the following coordinates to month end frequency: CFTimeIndex([2000-01-01 12:00:00, 2000-01-31 12:00:00, 2000-02-01 12:00:00], dtype='object') Without this adjustment, _get_time_bins with month-end frequency will return the following index for the bin edges (default closed='right' and label='right' in this case): CFTimeIndex([1999-12-31 00:00:00, 2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object') If 2000-01-31 is used as a bound for a bin, the value on 2000-01-31T12:00:00 (at noon on January 31st), will not be included in the month of January. To account for this, pandas adds a day minus one worth of microseconds to the bin edges generated by cftime range, so that we do bin the value at noon on January 31st in the January bin. This results in an index with bin edges like the following: CFTimeIndex([1999-12-31 23:59:59, 2000-01-31 23:59:59, 2000-02-29 23:59:59], dtype='object') The labels are still: CFTimeIndex([2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object') """ if isinstance(freq, MonthEnd | QuarterEnd | YearEnd): if closed == "right": datetime_bins = datetime_bins + datetime.timedelta(days=1, microseconds=-1) if datetime_bins[-2] > index.max(): datetime_bins = datetime_bins[:-1] labels = labels[:-1] return datetime_bins, labels def _get_range_edges( first: CFTimeDatetime, last: CFTimeDatetime, freq: BaseCFTimeOffset, closed: SideOptions = "left", origin: str | CFTimeDatetime = "start_day", offset: datetime.timedelta | None = None, ): """Get the correct starting and ending datetimes for the resampled CFTimeIndex range. Parameters ---------- first : cftime.datetime Uncorrected starting datetime object for resampled CFTimeIndex range. Usually the min of the original CFTimeIndex. last : cftime.datetime Uncorrected ending datetime object for resampled CFTimeIndex range. Usually the max of the original CFTimeIndex. freq : xarray.coding.cftime_offsets.BaseCFTimeOffset The offset object representing target conversion a.k.a. resampling frequency. Contains information on offset type (e.g. Day or 'D') and offset magnitude (e.g., n = 3). closed : 'left' or 'right' Which side of bin interval is closed. Defaults to 'left'. origin : {'epoch', 'start', 'start_day', 'end', 'end_day'} or cftime.datetime, default 'start_day' The datetime on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a datetime is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day offset : datetime.timedelta, default is None An offset timedelta added to the origin. Returns ------- first : cftime.datetime Corrected starting datetime object for resampled CFTimeIndex range. last : cftime.datetime Corrected ending datetime object for resampled CFTimeIndex range. """ if isinstance(freq, Tick): first, last = _adjust_dates_anchored( first, last, freq, closed=closed, origin=origin, offset=offset ) return first, last else: first = normalize_date(first) last = normalize_date(last) first = freq.rollback(first) if closed == "left" else first - freq last = last + freq return first, last def _adjust_dates_anchored( first: CFTimeDatetime, last: CFTimeDatetime, freq: Tick, closed: SideOptions = "right", origin: str | CFTimeDatetime = "start_day", offset: datetime.timedelta | None = None, ): """First and last offsets should be calculated from the start day to fix an error cause by resampling across multiple days when a one day period is not a multiple of the frequency. See https://github.com/pandas-dev/pandas/issues/8683 Parameters ---------- first : cftime.datetime A datetime object representing the start of a CFTimeIndex range. last : cftime.datetime A datetime object representing the end of a CFTimeIndex range. freq : xarray.coding.cftime_offsets.BaseCFTimeOffset The offset object representing target conversion a.k.a. resampling frequency. Contains information on offset type (e.g. Day or 'D') and offset magnitude (e.g., n = 3). closed : 'left' or 'right' Which side of bin interval is closed. Defaults to 'right'. origin : {'epoch', 'start', 'start_day', 'end', 'end_day'} or cftime.datetime, default 'start_day' The datetime on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a datetime is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day offset : datetime.timedelta, default is None An offset timedelta added to the origin. Returns ------- fresult : cftime.datetime A datetime object representing the start of a date range that has been adjusted to fix resampling errors. lresult : cftime.datetime A datetime object representing the end of a date range that has been adjusted to fix resampling errors. """ import cftime if origin == "start_day": origin_date = normalize_date(first) elif origin == "start": origin_date = first elif origin == "epoch": origin_date = type(first)(1970, 1, 1) elif origin in ["end", "end_day"]: origin_last = last if origin == "end" else _ceil_via_cftimeindex(last, "D") sub_freq_times = (origin_last - first) // freq.as_timedelta() if closed == "left": sub_freq_times += 1 first = origin_last - sub_freq_times * freq origin_date = first elif isinstance(origin, cftime.datetime): origin_date = origin else: raise ValueError( f"origin must be one of {{'epoch', 'start_day', 'start', 'end', 'end_day'}} " f"or a cftime.datetime object. Got {origin}." ) if offset is not None: origin_date = origin_date + offset foffset = (first - origin_date) % freq.as_timedelta() loffset = (last - origin_date) % freq.as_timedelta() if closed == "right": if foffset.total_seconds() > 0: fresult = first - foffset else: fresult = first - freq.as_timedelta() if loffset.total_seconds() > 0: lresult = last + (freq.as_timedelta() - loffset) else: lresult = last else: if foffset.total_seconds() > 0: fresult = first - foffset else: fresult = first if loffset.total_seconds() > 0: lresult = last + (freq.as_timedelta() - loffset) else: lresult = last + freq return fresult, lresult def exact_cftime_datetime_difference(a: CFTimeDatetime, b: CFTimeDatetime): """Exact computation of b - a Assumes: a = a_0 + a_m b = b_0 + b_m Here a_0, and b_0 represent the input dates rounded down to the nearest second, and a_m, and b_m represent the remaining microseconds associated with date a and date b. We can then express the value of b - a as: b - a = (b_0 + b_m) - (a_0 + a_m) = b_0 - a_0 + b_m - a_m By construction, we know that b_0 - a_0 must be a round number of seconds. Therefore we can take the result of b_0 - a_0 using ordinary cftime.datetime arithmetic and round to the nearest second. b_m - a_m is the remainder, in microseconds, and we can simply add this to the rounded timedelta. Parameters ---------- a : cftime.datetime Input datetime b : cftime.datetime Input datetime Returns ------- datetime.timedelta """ seconds = b.replace(microsecond=0) - a.replace(microsecond=0) seconds = round(seconds.total_seconds()) microseconds = b.microsecond - a.microsecond return datetime.timedelta(seconds=seconds, microseconds=microseconds) def _convert_offset_to_timedelta( offset: datetime.timedelta | str | BaseCFTimeOffset, ) -> datetime.timedelta: if isinstance(offset, datetime.timedelta): return offset if isinstance(offset, str | Tick): timedelta_cftime_offset = to_offset(offset) if isinstance(timedelta_cftime_offset, Tick): return timedelta_cftime_offset.as_timedelta() raise TypeError(f"Expected timedelta, str or Tick, got {type(offset)}") def _ceil_via_cftimeindex(date: CFTimeDatetime, freq: str | BaseCFTimeOffset): index = CFTimeIndex([date]) return index.ceil(freq).item() xarray-2025.09.0/xarray/core/treenode.py000066400000000000000000000711331505620616400177660ustar00rootroot00000000000000from __future__ import annotations import collections import sys from collections.abc import Iterator, Mapping from pathlib import PurePosixPath from typing import ( TYPE_CHECKING, Any, TypeVar, ) from xarray.core.types import Self from xarray.core.utils import Frozen, is_dict_like if TYPE_CHECKING: from xarray.core.dataarray import DataArray class InvalidTreeError(Exception): """Raised when user attempts to create an invalid tree in some way.""" class NotFoundInTreeError(ValueError): """Raised when operation can't be completed because one node is not part of the expected tree.""" class NodePath(PurePosixPath): """Represents a path from one node to another within a tree.""" def __init__(self, *pathsegments): if sys.version_info >= (3, 12): super().__init__(*pathsegments) else: super().__new__(PurePosixPath, *pathsegments) if self.drive: raise ValueError("NodePaths cannot have drives") if self.root not in ["/", ""]: raise ValueError( 'Root of NodePath can only be either "/" or "", with "" meaning the path is relative.' ) # TODO should we also forbid suffixes to avoid node names with dots in them? class TreeNode: """ Base class representing a node of a tree, with methods for traversing and altering the tree. This class stores no data, it has only parents and children attributes, and various methods. Stores child nodes in an dict, ensuring that equality checks between trees and order of child nodes is preserved (since python 3.7). Nodes themselves are intrinsically unnamed (do not possess a ._name attribute), but if the node has a parent you can find the key it is stored under via the .name property. The .parent attribute is read-only: to replace the parent using public API you must set this node as the child of a new parent using `new_parent.children[name] = child_node`, or to instead detach from the current parent use `child_node.orphan()`. This class is intended to be subclassed by DataTree, which will overwrite some of the inherited behaviour, in particular to make names an inherent attribute, and allow setting parents directly. The intention is to mirror the class structure of xarray.Variable & xarray.DataArray, where Variable is unnamed but DataArray is (optionally) named. Also allows access to any other node in the tree via unix-like paths, including upwards referencing via '../'. (This class is heavily inspired by the anytree library's NodeMixin class.) """ _parent: Self | None _children: dict[str, Self] def __init__(self, children: Mapping[str, Self] | None = None): """Create a parentless node.""" self._parent = None self._children = {} if children: # shallow copy to avoid modifying arguments in-place (see GH issue #9196) self.children = {name: child.copy() for name, child in children.items()} @property def parent(self) -> Self | None: """Parent of this node.""" return self._parent @parent.setter def parent(self, new_parent: Self) -> None: raise AttributeError( "Cannot set parent attribute directly, you must modify the children of the other node instead using dict-like syntax" ) def _set_parent( self, new_parent: Self | None, child_name: str | None = None ) -> None: # TODO is it possible to refactor in a way that removes this private method? if new_parent is not None and not isinstance(new_parent, TreeNode): raise TypeError( "Parent nodes must be of type DataTree or None, " f"not type {type(new_parent)}" ) old_parent = self._parent if new_parent is not old_parent: self._check_loop(new_parent) self._detach(old_parent) self._attach(new_parent, child_name) def _check_loop(self, new_parent: Self | None) -> None: """Checks that assignment of this new parent will not create a cycle.""" if new_parent is not None: if new_parent is self: raise InvalidTreeError( f"Cannot set parent, as node {self} cannot be a parent of itself." ) if self._is_descendant_of(new_parent): raise InvalidTreeError( "Cannot set parent, as intended parent is already a descendant of this node." ) def _is_descendant_of(self, node: Self) -> bool: return any(n is self for n in node.parents) def _detach(self, parent: Self | None) -> None: if parent is not None: self._pre_detach(parent) parents_children = parent.children parent._children = { name: child for name, child in parents_children.items() if child is not self } self._parent = None self._post_detach(parent) def _attach(self, parent: Self | None, child_name: str | None = None) -> None: if parent is not None: if child_name is None: raise ValueError( "To directly set parent, child needs a name, but child is unnamed" ) self._pre_attach(parent, child_name) parentchildren = parent._children assert not any(child is self for child in parentchildren), ( "Tree is corrupt." ) parentchildren[child_name] = self self._parent = parent self._post_attach(parent, child_name) else: self._parent = None def orphan(self) -> None: """Detach this node from its parent.""" self._set_parent(new_parent=None) @property def children(self) -> Mapping[str, Self]: """Child nodes of this node, stored under a mapping via their names.""" return Frozen(self._children) @children.setter def children(self, children: Mapping[str, Self]) -> None: self._check_children(children) children = {**children} old_children = self.children del self.children try: self._pre_attach_children(children) for name, child in children.items(): child._set_parent(new_parent=self, child_name=name) self._post_attach_children(children) assert len(self.children) == len(children) except Exception: # if something goes wrong then revert to previous children self.children = old_children raise @children.deleter def children(self) -> None: # TODO this just detaches all the children, it doesn't actually delete them... children = self.children self._pre_detach_children(children) for child in self.children.values(): child.orphan() assert len(self.children) == 0 self._post_detach_children(children) @staticmethod def _check_children(children: Mapping[str, TreeNode]) -> None: """Check children for correct types and for any duplicates.""" if not is_dict_like(children): raise TypeError( "children must be a dict-like mapping from names to node objects" ) seen = set() for name, child in children.items(): if not isinstance(child, TreeNode): raise TypeError( f"Cannot add object {name}. It is of type {type(child)}, " "but can only add children of type DataTree" ) childid = id(child) if childid not in seen: seen.add(childid) else: raise InvalidTreeError( f"Cannot add same node {name} multiple times as different children." ) def __repr__(self) -> str: return f"TreeNode(children={dict(self._children)})" def _pre_detach_children(self, children: Mapping[str, Self]) -> None: """Method call before detaching `children`.""" pass def _post_detach_children(self, children: Mapping[str, Self]) -> None: """Method call after detaching `children`.""" pass def _pre_attach_children(self, children: Mapping[str, Self]) -> None: """Method call before attaching `children`.""" pass def _post_attach_children(self, children: Mapping[str, Self]) -> None: """Method call after attaching `children`.""" pass def copy(self, *, inherit: bool = True, deep: bool = False) -> Self: """ Returns a copy of this subtree. Copies this node and all child nodes. If `deep=True`, a deep copy is made of each of the component variables. Otherwise, a shallow copy of each of the component variable is made, so that the underlying memory region of the new datatree is the same as in the original datatree. Parameters ---------- inherit : bool Whether inherited coordinates defined on parents of this node should also be copied onto the new tree. Only relevant if the `parent` of this node is not yet, and "Inherited coordinates" appear in its repr. deep : bool Whether each component variable is loaded into memory and copied onto the new object. Default is False. Returns ------- object : DataTree New object with dimensions, attributes, coordinates, name, encoding, and data of this node and all child nodes copied from original. See Also -------- xarray.Dataset.copy pandas.DataFrame.copy """ return self._copy_subtree(inherit=inherit, deep=deep) def _copy_subtree( self, inherit: bool, deep: bool = False, memo: dict[int, Any] | None = None ) -> Self: """Copy entire subtree recursively.""" new_tree = self._copy_node(inherit=inherit, deep=deep, memo=memo) for name, child in self.children.items(): # TODO use `.children[name] = ...` once #9477 is implemented new_tree._set( name, child._copy_subtree(inherit=False, deep=deep, memo=memo) ) return new_tree def _copy_node( self, inherit: bool, deep: bool = False, memo: dict[int, Any] | None = None ) -> Self: """Copy just one node of a tree""" new_empty_node = type(self)() return new_empty_node def __copy__(self) -> Self: return self._copy_subtree(inherit=True, deep=False) def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: return self._copy_subtree(inherit=True, deep=True, memo=memo) def _iter_parents(self) -> Iterator[Self]: """Iterate up the tree, starting from the current node's parent.""" node: Self | None = self.parent while node is not None: yield node node = node.parent def iter_lineage(self) -> tuple[Self, ...]: """Iterate up the tree, starting from the current node.""" from warnings import warn warn( "`iter_lineage` has been deprecated, and in the future will raise an error." "Please use `parents` from now on.", DeprecationWarning, stacklevel=2, ) return (self, *self.parents) @property def lineage(self) -> tuple[Self, ...]: """All parent nodes and their parent nodes, starting with the closest.""" from warnings import warn warn( "`lineage` has been deprecated, and in the future will raise an error." "Please use `parents` from now on.", DeprecationWarning, stacklevel=2, ) return self.iter_lineage() @property def parents(self) -> tuple[Self, ...]: """All parent nodes and their parent nodes, starting with the closest.""" return tuple(self._iter_parents()) @property def ancestors(self) -> tuple[Self, ...]: """All parent nodes and their parent nodes, starting with the most distant.""" from warnings import warn warn( "`ancestors` has been deprecated, and in the future will raise an error." "Please use `parents`. Example: `tuple(reversed(node.parents))`", DeprecationWarning, stacklevel=2, ) return (*reversed(self.parents), self) @property def root(self) -> Self: """Root node of the tree""" node = self while node.parent is not None: node = node.parent return node @property def is_root(self) -> bool: """Whether this node is the tree root.""" return self.parent is None @property def is_leaf(self) -> bool: """ Whether this node is a leaf node. Leaf nodes are defined as nodes which have no children. """ return self.children == {} @property def leaves(self) -> tuple[Self, ...]: """ All leaf nodes. Leaf nodes are defined as nodes which have no children. """ return tuple(node for node in self.subtree if node.is_leaf) @property def siblings(self) -> dict[str, Self]: """ Nodes with the same parent as this node. """ if self.parent: return { name: child for name, child in self.parent.children.items() if child is not self } else: return {} @property def subtree(self) -> Iterator[Self]: """ Iterate over all nodes in this tree, including both self and all descendants. Iterates breadth-first. See Also -------- DataTree.subtree_with_keys DataTree.descendants group_subtrees """ # https://en.wikipedia.org/wiki/Breadth-first_search#Pseudocode queue = collections.deque([self]) while queue: node = queue.popleft() yield node queue.extend(node.children.values()) @property def subtree_with_keys(self) -> Iterator[tuple[str, Self]]: """ Iterate over relative paths and node pairs for all nodes in this tree. Iterates breadth-first. See Also -------- DataTree.subtree DataTree.descendants group_subtrees """ queue = collections.deque([(NodePath(), self)]) while queue: path, node = queue.popleft() yield str(path), node queue.extend((path / name, child) for name, child in node.children.items()) @property def descendants(self) -> tuple[Self, ...]: """ Child nodes and all their child nodes. Returned in depth-first order. See Also -------- DataTree.subtree """ all_nodes = tuple(self.subtree) this_node, *descendants = all_nodes return tuple(descendants) @property def level(self) -> int: """ Level of this node. Level means number of parent nodes above this node before reaching the root. The root node is at level 0. Returns ------- level : int See Also -------- depth width """ return len(self.parents) @property def depth(self) -> int: """ Maximum level of this tree. Measured from the root, which has a depth of 0. Returns ------- depth : int See Also -------- level width """ return max(node.level for node in self.root.subtree) @property def width(self) -> int: """ Number of nodes at this level in the tree. Includes number of immediate siblings, but also "cousins" in other branches and so-on. Returns ------- depth : int See Also -------- level depth """ return len([node for node in self.root.subtree if node.level == self.level]) def _pre_detach(self, parent: Self) -> None: """Method call before detaching from `parent`.""" pass def _post_detach(self, parent: Self) -> None: """Method call after detaching from `parent`.""" pass def _pre_attach(self, parent: Self, name: str) -> None: """Method call before attaching to `parent`.""" pass def _post_attach(self, parent: Self, name: str) -> None: """Method call after attaching to `parent`.""" pass def get(self, key: str, default: Self | None = None) -> Self | None: """ Return the child node with the specified key. Only looks for the node within the immediate children of this node, not in other nodes of the tree. """ if key in self.children: return self.children[key] else: return default # TODO `._walk` method to be called by both `_get_item` and `_set_item` def _get_item(self, path: str | NodePath) -> Self | DataArray: """ Returns the object lying at the given path. Raises a KeyError if there is no object at the given path. """ if isinstance(path, str): path = NodePath(path) if path.root: current_node = self.root root, *parts = list(path.parts) else: current_node = self parts = list(path.parts) for part in parts: if part == "..": if current_node.parent is None: raise KeyError(f"Could not find node at {path}") else: current_node = current_node.parent elif part in ("", "."): pass else: child = current_node.get(part) if child is None: raise KeyError(f"Could not find node at {path}") current_node = child return current_node def _set(self, key: str, val: Any) -> None: """ Set the child node with the specified key to value. Counterpart to the public .get method, and also only works on the immediate node, not other nodes in the tree. """ new_children = {**self.children, key: val} self.children = new_children def _set_item( self, path: str | NodePath, item: Any, new_nodes_along_path: bool = False, allow_overwrite: bool = True, ) -> None: """ Set a new item in the tree, overwriting anything already present at that path. The given value either forms a new node of the tree or overwrites an existing item at that location. Parameters ---------- path item new_nodes_along_path : bool If true, then if necessary new nodes will be created along the given path, until the tree can reach the specified location. allow_overwrite : bool Whether or not to overwrite any existing node at the location given by path. Raises ------ KeyError If node cannot be reached, and new_nodes_along_path=False. Or if a node already exists at the specified path, and allow_overwrite=False. """ if isinstance(path, str): path = NodePath(path) if not path.name: raise ValueError("Can't set an item under a path which has no name") if path.root: # absolute path current_node = self.root root, *parts, name = path.parts else: # relative path current_node = self *parts, name = path.parts if parts: # Walk to location of new node, creating intermediate node objects as we go if necessary for part in parts: if part == "..": if current_node.parent is None: # We can't create a parent if `new_nodes_along_path=True` as we wouldn't know what to name it raise KeyError(f"Could not reach node at path {path}") else: current_node = current_node.parent elif part in ("", "."): pass elif part in current_node.children: current_node = current_node.children[part] elif new_nodes_along_path: # Want child classes (i.e. DataTree) to populate tree with their own types new_node = type(self)() current_node._set(part, new_node) current_node = current_node.children[part] else: raise KeyError(f"Could not reach node at path {path}") if name in current_node.children: # Deal with anything already existing at this location if allow_overwrite: current_node._set(name, item) else: raise KeyError(f"Already a node object at path {path}") else: current_node._set(name, item) def __delitem__(self, key: str) -> None: """Remove a child node from this tree object.""" if key in self.children: child = self._children[key] del self._children[key] child.orphan() else: raise KeyError(key) def same_tree(self, other: Self) -> bool: """True if other node is in the same tree as this node.""" return self.root is other.root AnyNamedNode = TypeVar("AnyNamedNode", bound="NamedNode") def _validate_name(name: str | None) -> None: if name is not None: if not isinstance(name, str): raise TypeError("node name must be a string or None") if "/" in name: raise ValueError("node names cannot contain forward slashes") class NamedNode(TreeNode): """ A TreeNode which knows its own name. Implements path-like relationships to other nodes in its tree. """ _name: str | None def __init__( self, name: str | None = None, children: Mapping[str, Self] | None = None, ): super().__init__(children=children) _validate_name(name) self._name = name @property def name(self) -> str | None: """The name of this node.""" return self._name @name.setter def name(self, name: str | None) -> None: if self.parent is not None: raise ValueError( "cannot set the name of a node which already has a parent. " "Consider creating a detached copy of this node via .copy() " "on the parent node." ) _validate_name(name) self._name = name def __repr__(self, level=0): repr_value = "\t" * level + self.__str__() + "\n" for child in self.children: repr_value += self.get(child).__repr__(level + 1) return repr_value def __str__(self) -> str: name_repr = repr(self.name) if self.name is not None else "" return f"NamedNode({name_repr})" def _post_attach(self, parent: Self, name: str) -> None: """Ensures child has name attribute corresponding to key under which it has been stored.""" _validate_name(name) # is this check redundant? self._name = name def _copy_node( self, inherit: bool, deep: bool = False, memo: dict[int, Any] | None = None ) -> Self: """Copy just one node of a tree""" new_node = super()._copy_node(inherit=inherit, deep=deep, memo=memo) new_node._name = self.name return new_node @property def path(self) -> str: """Return the file-like path from the root to this node.""" if self.is_root: return "/" else: root, *ancestors = tuple(reversed(self.parents)) # don't include name of root because (a) root might not have a name & (b) we want path relative to root. names = [*(node.name for node in ancestors), self.name] return "/" + "/".join(names) # type: ignore[arg-type] def relative_to(self, other: Self) -> str: """ Compute the relative path from this node to node `other`. If other is not in this tree, or it's otherwise impossible, raise a ValueError. """ if not self.same_tree(other): raise NotFoundInTreeError( "Cannot find relative path because nodes do not lie within the same tree" ) this_path = NodePath(self.path) if any(other.path == parent.path for parent in (self, *self.parents)): return str(this_path.relative_to(other.path)) else: common_ancestor = self.find_common_ancestor(other) path_to_common_ancestor = other._path_to_ancestor(common_ancestor) return str( path_to_common_ancestor / this_path.relative_to(common_ancestor.path) ) def find_common_ancestor(self, other: Self) -> Self: """ Find the first common ancestor of two nodes in the same tree. Raise ValueError if they are not in the same tree. """ if self is other: return self other_paths = [op.path for op in other.parents] for parent in (self, *self.parents): if parent.path in other_paths: return parent raise NotFoundInTreeError( "Cannot find common ancestor because nodes do not lie within the same tree" ) def _path_to_ancestor(self, ancestor: Self) -> NodePath: """Return the relative path from this node to the given ancestor node""" if not self.same_tree(ancestor): raise NotFoundInTreeError( "Cannot find relative path to ancestor because nodes do not lie within the same tree" ) if ancestor.path not in [a.path for a in (self, *self.parents)]: raise NotFoundInTreeError( "Cannot find relative path to ancestor because given node is not an ancestor of this node" ) parents_paths = [parent.path for parent in (self, *self.parents)] generation_gap = list(parents_paths).index(ancestor.path) path_upwards = "../" * generation_gap if generation_gap > 0 else "." return NodePath(path_upwards) class TreeIsomorphismError(ValueError): """Error raised if two tree objects do not share the same node structure.""" def group_subtrees( *trees: AnyNamedNode, ) -> Iterator[tuple[str, tuple[AnyNamedNode, ...]]]: """Iterate over subtrees grouped by relative paths in breadth-first order. `group_subtrees` allows for applying operations over all nodes of a collection of DataTree objects with nodes matched by their relative paths. Example usage:: outputs = {} for path, (node_a, node_b) in group_subtrees(tree_a, tree_b): outputs[path] = f(node_a, node_b) tree_out = DataTree.from_dict(outputs) Parameters ---------- *trees : Tree Trees to iterate over. Yields ------ A tuple of the relative path and corresponding nodes for each subtree in the inputs. Raises ------ TreeIsomorphismError If trees are not isomorphic, i.e., they have different structures. See also -------- DataTree.subtree DataTree.subtree_with_keys """ if not trees: raise TypeError("must pass at least one tree object") # https://en.wikipedia.org/wiki/Breadth-first_search#Pseudocode queue = collections.deque([(NodePath(), trees)]) while queue: path, active_nodes = queue.popleft() # yield before raising an error, in case the caller chooses to exit # iteration early yield str(path), active_nodes first_node = active_nodes[0] if any( sibling.children.keys() != first_node.children.keys() for sibling in active_nodes[1:] ): path_str = "root node" if not path.parts else f"node {str(path)!r}" child_summary = " vs ".join( str(list(node.children)) for node in active_nodes ) raise TreeIsomorphismError( f"children at {path_str} do not match: {child_summary}" ) for name in first_node.children: child_nodes = tuple(node.children[name] for node in active_nodes) queue.append((path / name, child_nodes)) def zip_subtrees( *trees: AnyNamedNode, ) -> Iterator[tuple[AnyNamedNode, ...]]: """Zip together subtrees aligned by relative path.""" for _, nodes in group_subtrees(*trees): yield nodes xarray-2025.09.0/xarray/core/types.py000066400000000000000000000264731505620616400173340ustar00rootroot00000000000000from __future__ import annotations import datetime from collections.abc import Callable, Collection, Hashable, Iterator, Mapping, Sequence from types import EllipsisType from typing import ( TYPE_CHECKING, Any, Literal, Protocol, Self, SupportsIndex, TypeAlias, TypeVar, Union, overload, runtime_checkable, ) import numpy as np import pandas as pd from numpy._typing import _SupportsDType from numpy.typing import ArrayLike if TYPE_CHECKING: from xarray.backends.common import BackendEntrypoint from xarray.core.common import AbstractArray, DataWithCoords from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.indexes import Index, Indexes from xarray.core.utils import Frozen from xarray.core.variable import IndexVariable, Variable from xarray.groupers import Grouper, Resampler from xarray.structure.alignment import Aligner GroupInput: TypeAlias = ( str | DataArray | IndexVariable | Sequence[Hashable] | Mapping[Any, Grouper] | None ) try: from dask.array import Array as DaskArray except ImportError: DaskArray = np.ndarray # type: ignore[misc, assignment, unused-ignore] try: from cubed import Array as CubedArray except ImportError: CubedArray = np.ndarray try: from zarr import Array as ZarrArray from zarr import Group as ZarrGroup except ImportError: ZarrArray = np.ndarray # type: ignore[misc, assignment, unused-ignore] ZarrGroup = Any # type: ignore[misc, assignment, unused-ignore] try: # this is V3 only from zarr.storage import StoreLike as ZarrStoreLike except ImportError: ZarrStoreLike = Any # type: ignore[misc, assignment, unused-ignore] # Anything that can be coerced to a shape tuple _ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]] _DTypeLikeNested = Any # TODO: wait for support for recursive types # Xarray requires a Mapping[Hashable, dtype] in many places which # conflicts with numpys own DTypeLike (with dtypes for fields). # https://numpy.org/devdocs/reference/typing.html#numpy.typing.DTypeLike # This is a copy of this DTypeLike that allows only non-Mapping dtypes. DTypeLikeSave = Union[ np.dtype[Any], # default data type (float64) None, # array-scalar types and generic types type[Any], # character codes, type strings or comma-separated fields, e.g., 'float64' str, # (flexible_dtype, itemsize) tuple[_DTypeLikeNested, int], # (fixed_dtype, shape) tuple[_DTypeLikeNested, _ShapeLike], # (base_dtype, new_dtype) tuple[_DTypeLikeNested, _DTypeLikeNested], # because numpy does the same? list[Any], # anything with a dtype attribute _SupportsDType[np.dtype[Any]], ] else: DTypeLikeSave: Any = None # https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases try: from cftime import datetime as CFTimeDatetime except ImportError: CFTimeDatetime = np.datetime64 DatetimeLike: TypeAlias = ( pd.Timestamp | datetime.datetime | np.datetime64 | CFTimeDatetime ) class Alignable(Protocol): """Represents any Xarray type that supports alignment. It may be ``Dataset``, ``DataArray`` or ``Coordinates``. This protocol class is needed since those types do not all have a common base class. """ @property def dims(self) -> Frozen[Hashable, int] | tuple[Hashable, ...]: ... @property def sizes(self) -> Mapping[Hashable, int]: ... @property def xindexes(self) -> Indexes[Index]: ... def _reindex_callback( self, aligner: Any, dim_pos_indexers: dict[Hashable, Any], variables: dict[Hashable, Variable], indexes: dict[Hashable, Index], fill_value: Any, exclude_dims: frozenset[Hashable], exclude_vars: frozenset[Hashable], ) -> Self: ... def _overwrite_indexes( self, indexes: Mapping[Any, Index], variables: Mapping[Any, Variable] | None = None, ) -> Self: ... def __len__(self) -> int: ... def __iter__(self) -> Iterator[Hashable]: ... def copy( self, deep: bool = False, ) -> Self: ... T_Alignable = TypeVar("T_Alignable", bound="Alignable") T_Aligner = TypeVar("T_Aligner", bound="Aligner") T_Backend = TypeVar("T_Backend", bound="BackendEntrypoint") T_Dataset = TypeVar("T_Dataset", bound="Dataset") T_DataArray = TypeVar("T_DataArray", bound="DataArray") T_Variable = TypeVar("T_Variable", bound="Variable") T_Coordinates = TypeVar("T_Coordinates", bound="Coordinates") T_Array = TypeVar("T_Array", bound="AbstractArray") T_Index = TypeVar("T_Index", bound="Index") # `T_Xarray` is a type variable that can be either "DataArray" or "Dataset". When used # in a function definition, all inputs and outputs annotated with `T_Xarray` must be of # the same concrete type, either "DataArray" or "Dataset". This is generally preferred # over `T_DataArrayOrSet`, given the type system can determine the exact type. T_Xarray = TypeVar("T_Xarray", "DataArray", "Dataset") # `T_DataArrayOrSet` is a type variable that is bounded to either "DataArray" or # "Dataset". Use it for functions that might return either type, but where the exact # type cannot be determined statically using the type system. T_DataArrayOrSet = TypeVar("T_DataArrayOrSet", bound=Union["Dataset", "DataArray"]) # For working directly with `DataWithCoords`. It will only allow using methods defined # on `DataWithCoords`. T_DataWithCoords = TypeVar("T_DataWithCoords", bound="DataWithCoords") # Temporary placeholder for indicating an array api compliant type. # hopefully in the future we can narrow this down more: T_DuckArray = TypeVar("T_DuckArray", bound=Any, covariant=True) # noqa: PLC0105 # For typing pandas extension arrays. T_ExtensionArray = TypeVar("T_ExtensionArray", bound=pd.api.extensions.ExtensionArray) ScalarOrArray = Union["ArrayLike", np.generic] VarCompatible = Union["Variable", "ScalarOrArray"] DaCompatible = Union["DataArray", "VarCompatible"] DsCompatible = Union["Dataset", "DaCompatible"] DtCompatible = Union["DataTree", "DsCompatible"] GroupByCompatible = Union["Dataset", "DataArray"] # Don't change to Hashable | Collection[Hashable] # Read: https://github.com/pydata/xarray/issues/6142 Dims = Union[str, Collection[Hashable], EllipsisType, None] # FYI in some cases we don't allow `None`, which this doesn't take account of. # FYI the `str` is for a size string, e.g. "16MB", supported by dask. T_ChunkDim: TypeAlias = str | int | Literal["auto"] | tuple[int, ...] | None # noqa: PYI051 T_ChunkDimFreq: TypeAlias = Union["Resampler", T_ChunkDim] T_ChunksFreq: TypeAlias = T_ChunkDim | Mapping[Any, T_ChunkDimFreq] # We allow the tuple form of this (though arguably we could transition to named dims only) T_Chunks: TypeAlias = T_ChunkDim | Mapping[Any, T_ChunkDim] T_NormalizedChunks = tuple[tuple[int, ...], ...] DataVars = Mapping[Any, Any] ErrorOptions = Literal["raise", "ignore"] ErrorOptionsWithWarn = Literal["raise", "warn", "ignore"] CompatOptions = Literal[ "identical", "equals", "broadcast_equals", "no_conflicts", "override", "minimal" ] ConcatOptions = Literal["all", "minimal", "different"] CombineAttrsOptions = Union[ Literal["drop", "identical", "no_conflicts", "drop_conflicts", "override"], Callable[..., Any], ] JoinOptions = Literal["outer", "inner", "left", "right", "exact", "override"] Interp1dOptions = Literal[ "linear", "nearest", "zero", "slinear", "quadratic", "cubic", "quintic", "polynomial", ] InterpolantOptions = Literal[ "barycentric", "krogh", "pchip", "spline", "akima", "makima" ] InterpnOptions = Literal["linear", "nearest", "slinear", "cubic", "quintic", "pchip"] InterpOptions = Union[Interp1dOptions, InterpolantOptions, InterpnOptions] DatetimeUnitOptions = ( Literal["W", "D", "h", "m", "s", "ms", "us", "ΞΌs", "ns", "ps", "fs", "as"] | None ) NPDatetimeUnitOptions = Literal["D", "h", "m", "s", "ms", "us", "ns"] PDDatetimeUnitOptions = Literal["s", "ms", "us", "ns"] QueryEngineOptions = Literal["python", "numexpr"] | None QueryParserOptions = Literal["pandas", "python"] ReindexMethodOptions = Literal["nearest", "pad", "ffill", "backfill", "bfill"] | None PadModeOptions = Literal[ "constant", "edge", "linear_ramp", "maximum", "mean", "median", "minimum", "reflect", "symmetric", "wrap", ] T_PadConstantValues = float | tuple[float, float] T_VarPadConstantValues = T_PadConstantValues | Mapping[Any, T_PadConstantValues] T_DatasetPadConstantValues = ( T_VarPadConstantValues | Mapping[Any, T_VarPadConstantValues] ) PadReflectOptions = Literal["even", "odd"] | None CFCalendar = Literal[ "standard", "gregorian", "proleptic_gregorian", "noleap", "365_day", "360_day", "julian", "all_leap", "366_day", ] CoarsenBoundaryOptions = Literal["exact", "trim", "pad"] SideOptions = Literal["left", "right"] InclusiveOptions = Literal["both", "neither", "left", "right"] ScaleOptions = Literal["linear", "symlog", "log", "logit"] | None HueStyleOptions = Literal["continuous", "discrete"] | None AspectOptions = Union[Literal["auto", "equal"], float, None] ExtendOptions = Literal["neither", "both", "min", "max"] | None _T_co = TypeVar("_T_co", covariant=True) class NestedSequence(Protocol[_T_co]): def __len__(self, /) -> int: ... @overload def __getitem__(self, index: int, /) -> _T_co | NestedSequence[_T_co]: ... @overload def __getitem__(self, index: slice, /) -> NestedSequence[_T_co]: ... def __iter__(self, /) -> Iterator[_T_co | NestedSequence[_T_co]]: ... def __reversed__(self, /) -> Iterator[_T_co | NestedSequence[_T_co]]: ... AnyStr_co = TypeVar("AnyStr_co", str, bytes, covariant=True) # this is shamelessly stolen from pandas._typing @runtime_checkable class BaseBuffer(Protocol): @property def mode(self) -> str: # for _get_filepath_or_buffer ... def seek(self, offset: int, whence: int = ..., /) -> int: # with one argument: gzip.GzipFile, bz2.BZ2File # with two arguments: zip.ZipFile, read_sas ... def seekable(self) -> bool: # for bz2.BZ2File ... def tell(self) -> int: # for zip.ZipFile, read_stata, to_stata ... @runtime_checkable class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): def read(self, n: int = ..., /) -> AnyStr_co: # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File ... QuantileMethods = Literal[ "inverted_cdf", "averaged_inverted_cdf", "closest_observation", "interpolated_inverted_cdf", "hazen", "weibull", "linear", "median_unbiased", "normal_unbiased", "lower", "higher", "midpoint", "nearest", ] NetcdfWriteModes = Literal["w", "a"] ZarrWriteModes = Literal["w", "w-", "a", "a-", "r+", "r"] GroupKey = Any GroupIndex = Union[slice, list[int]] GroupIndices = tuple[GroupIndex, ...] Bins = Union[ int, Sequence[int], Sequence[float], Sequence[pd.Timestamp], np.ndarray, pd.Index ] ResampleCompatible: TypeAlias = str | datetime.timedelta | pd.Timedelta | pd.DateOffset xarray-2025.09.0/xarray/core/utils.py000066400000000000000000001164131505620616400173220ustar00rootroot00000000000000"""Internal utilities; not for external use""" # Some functions in this module are derived from functions in pandas. For # reference, here is a copy of the pandas copyright notice: # BSD 3-Clause License # Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team # All rights reserved. # Copyright (c) 2011-2022, Open source contributors. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import contextlib import difflib import functools import importlib import inspect import io import itertools import math import os import re import sys import warnings from collections.abc import ( Callable, Collection, Container, Hashable, ItemsView, Iterable, Iterator, KeysView, Mapping, MutableMapping, MutableSet, Sequence, ValuesView, ) from collections.abc import ( Set as AbstractSet, ) from enum import Enum from pathlib import Path from types import EllipsisType, ModuleType from typing import ( TYPE_CHECKING, Any, Generic, Literal, TypeGuard, TypeVar, cast, overload, ) import numpy as np import pandas as pd from xarray.namedarray.utils import ( # noqa: F401 ReprObject, drop_missing_dims, either_dict_or_kwargs, infix_dims, is_dask_collection, is_dict_like, is_duck_array, is_duck_dask_array, module_available, to_0d_object_array, ) if TYPE_CHECKING: from xarray.core.types import Dims, ErrorOptionsWithWarn K = TypeVar("K") V = TypeVar("V") T = TypeVar("T") def is_allowed_extension_array_dtype(dtype: Any): return pd.api.types.is_extension_array_dtype(dtype) and not isinstance( # noqa: TID251 dtype, pd.StringDtype ) def is_allowed_extension_array(array: Any) -> bool: return ( hasattr(array, "dtype") and is_allowed_extension_array_dtype(array.dtype) and not isinstance(array, pd.arrays.NumpyExtensionArray) # type: ignore[attr-defined] ) def alias_message(old_name: str, new_name: str) -> str: return f"{old_name} has been deprecated. Use {new_name} instead." def alias_warning(old_name: str, new_name: str, stacklevel: int = 3) -> None: warnings.warn( alias_message(old_name, new_name), FutureWarning, stacklevel=stacklevel ) def alias(obj: Callable[..., T], old_name: str) -> Callable[..., T]: assert isinstance(old_name, str) @functools.wraps(obj) def wrapper(*args, **kwargs): alias_warning(old_name, obj.__name__) return obj(*args, **kwargs) wrapper.__doc__ = alias_message(old_name, obj.__name__) return wrapper def did_you_mean( word: Hashable, possibilities: Iterable[Hashable], *, n: int = 10 ) -> str: """ Suggest a few correct words based on a list of possibilities Parameters ---------- word : Hashable Word to compare to a list of possibilities. possibilities : Iterable of Hashable The iterable of Hashable that contains the correct values. n : int, default: 10 Maximum number of suggestions to show. Examples -------- >>> did_you_mean("bluch", ("blech", "gray_r", 1, None, (2, 56))) "Did you mean one of ('blech',)?" >>> did_you_mean("none", ("blech", "gray_r", 1, None, (2, 56))) 'Did you mean one of (None,)?' See also -------- https://en.wikipedia.org/wiki/String_metric """ # Convert all values to string, get_close_matches doesn't handle all hashables: possibilities_str: dict[str, Hashable] = {str(k): k for k in possibilities} msg = "" if len( best_str := difflib.get_close_matches( str(word), list(possibilities_str.keys()), n=n ) ): best = tuple(possibilities_str[k] for k in best_str) msg = f"Did you mean one of {best}?" return msg def get_valid_numpy_dtype(array: np.ndarray | pd.Index) -> np.dtype: """Return a numpy compatible dtype from either a numpy array or a pandas.Index. Used for wrapping a pandas.Index as an xarray.Variable. """ if isinstance(array, pd.PeriodIndex): return np.dtype("O") if hasattr(array, "categories"): # category isn't a real numpy dtype dtype = array.categories.dtype if not is_valid_numpy_dtype(dtype): dtype = np.dtype("O") return dtype if not is_valid_numpy_dtype(array.dtype): return np.dtype("O") return array.dtype # type: ignore[return-value] def maybe_coerce_to_str(index, original_coords): """maybe coerce a pandas Index back to a nunpy array of type str pd.Index uses object-dtype to store str - try to avoid this for coords """ from xarray.core import dtypes try: result_type = dtypes.result_type(*original_coords) except TypeError: pass else: if result_type.kind in "SU": index = np.asarray(index, dtype=result_type.type) return index def maybe_wrap_array(original, new_array): """Wrap a transformed array with __array_wrap__ if it can be done safely. This lets us treat arbitrary functions that take and return ndarray objects like ufuncs, as long as they return an array with the same shape. """ # in case func lost array's metadata if isinstance(new_array, np.ndarray) and new_array.shape == original.shape: return original.__array_wrap__(new_array) else: return new_array def equivalent(first: T, second: T) -> bool: """Compare two objects for equivalence (identity or equality), using array_equiv if either object is an ndarray. If both objects are lists, equivalent is sequentially called on all the elements. """ # TODO: refactor to avoid circular import from xarray.core import duck_array_ops if first is second: return True if isinstance(first, np.ndarray) or isinstance(second, np.ndarray): return duck_array_ops.array_equiv(first, second) if isinstance(first, list) or isinstance(second, list): return list_equiv(first, second) # type: ignore[arg-type] return (first == second) or (pd.isnull(first) and pd.isnull(second)) # type: ignore[call-overload] def list_equiv(first: Sequence[T], second: Sequence[T]) -> bool: if len(first) != len(second): return False return all(itertools.starmap(equivalent, zip(first, second, strict=True))) def peek_at(iterable: Iterable[T]) -> tuple[T, Iterator[T]]: """Returns the first value from iterable, as well as a new iterator with the same content as the original iterable """ gen = iter(iterable) peek = next(gen) return peek, itertools.chain([peek], gen) def update_safety_check( first_dict: Mapping[K, V], second_dict: Mapping[K, V], compat: Callable[[V, V], bool] = equivalent, ) -> None: """Check the safety of updating one dictionary with another. Raises ValueError if dictionaries have non-compatible values for any key, where compatibility is determined by identity (they are the same item) or the `compat` function. Parameters ---------- first_dict, second_dict : dict-like All items in the second dictionary are checked against for conflicts against items in the first dictionary. compat : function, optional Binary operator to determine if two values are compatible. By default, checks for equivalence. """ for k, v in second_dict.items(): if k in first_dict and not compat(v, first_dict[k]): raise ValueError( "unsafe to merge dictionaries without " f"overriding values; conflicting key {k!r}" ) def remove_incompatible_items( first_dict: MutableMapping[K, V], second_dict: Mapping[K, V], compat: Callable[[V, V], bool] = equivalent, ) -> None: """Remove incompatible items from the first dictionary in-place. Items are retained if their keys are found in both dictionaries and the values are compatible. Parameters ---------- first_dict, second_dict : dict-like Mappings to merge. compat : function, optional Binary operator to determine if two values are compatible. By default, checks for equivalence. """ for k in list(first_dict): if k not in second_dict or not compat(first_dict[k], second_dict[k]): del first_dict[k] def is_full_slice(value: Any) -> bool: return isinstance(value, slice) and value == slice(None) def is_list_like(value: Any) -> TypeGuard[list | tuple]: return isinstance(value, list | tuple) def _is_scalar(value, include_0d): from xarray.core.variable import NON_NUMPY_SUPPORTED_ARRAY_TYPES if include_0d: include_0d = getattr(value, "ndim", None) == 0 return ( include_0d or isinstance(value, str | bytes) or not ( isinstance(value, (Iterable,) + NON_NUMPY_SUPPORTED_ARRAY_TYPES) or hasattr(value, "__array_function__") or hasattr(value, "__array_namespace__") ) ) def is_scalar(value: Any, include_0d: bool = True) -> TypeGuard[Hashable]: """Whether to treat a value as a scalar. Any non-iterable, string, or 0-D array """ return _is_scalar(value, include_0d) def is_valid_numpy_dtype(dtype: Any) -> bool: try: np.dtype(dtype) except (TypeError, ValueError): return False else: return True def to_0d_array(value: Any) -> np.ndarray: """Given a value, wrap it in a 0-D numpy.ndarray.""" if np.isscalar(value) or (isinstance(value, np.ndarray) and value.ndim == 0): return np.array(value) else: return to_0d_object_array(value) def dict_equiv( first: Mapping[K, V], second: Mapping[K, V], compat: Callable[[V, V], bool] = equivalent, ) -> bool: """Test equivalence of two dict-like objects. If any of the values are numpy arrays, compare them correctly. Parameters ---------- first, second : dict-like Dictionaries to compare for equality compat : function, optional Binary operator to determine if two values are compatible. By default, checks for equivalence. Returns ------- equals : bool True if the dictionaries are equal """ for k in first: if k not in second or not compat(first[k], second[k]): return False return all(k in first for k in second) def compat_dict_intersection( first_dict: Mapping[K, V], second_dict: Mapping[K, V], compat: Callable[[V, V], bool] = equivalent, ) -> MutableMapping[K, V]: """Return the intersection of two dictionaries as a new dictionary. Items are retained if their keys are found in both dictionaries and the values are compatible. Parameters ---------- first_dict, second_dict : dict-like Mappings to merge. compat : function, optional Binary operator to determine if two values are compatible. By default, checks for equivalence. Returns ------- intersection : dict Intersection of the contents. """ new_dict = dict(first_dict) remove_incompatible_items(new_dict, second_dict, compat) return new_dict def compat_dict_union( first_dict: Mapping[K, V], second_dict: Mapping[K, V], compat: Callable[[V, V], bool] = equivalent, ) -> MutableMapping[K, V]: """Return the union of two dictionaries as a new dictionary. An exception is raised if any keys are found in both dictionaries and the values are not compatible. Parameters ---------- first_dict, second_dict : dict-like Mappings to merge. compat : function, optional Binary operator to determine if two values are compatible. By default, checks for equivalence. Returns ------- union : dict union of the contents. """ new_dict = dict(first_dict) update_safety_check(first_dict, second_dict, compat) new_dict.update(second_dict) return new_dict class Frozen(Mapping[K, V]): """Wrapper around an object implementing the mapping interface to make it immutable. If you really want to modify the mapping, the mutable version is saved under the `mapping` attribute. """ __slots__ = ("mapping",) def __init__(self, mapping: Mapping[K, V]): self.mapping = mapping def __getitem__(self, key: K) -> V: return self.mapping[key] def __iter__(self) -> Iterator[K]: return iter(self.mapping) def __len__(self) -> int: return len(self.mapping) def __contains__(self, key: object) -> bool: return key in self.mapping def __repr__(self) -> str: return f"{type(self).__name__}({self.mapping!r})" def FrozenDict(*args, **kwargs) -> Frozen: return Frozen(dict(*args, **kwargs)) class FrozenMappingWarningOnValuesAccess(Frozen[K, V]): """ Class which behaves like a Mapping but warns if the values are accessed. Temporary object to aid in deprecation cycle of `Dataset.dims` (see GH issue #8496). `Dataset.dims` is being changed from returning a mapping of dimension names to lengths to just returning a frozen set of dimension names (to increase consistency with `DataArray.dims`). This class retains backwards compatibility but raises a warning only if the return value of ds.dims is used like a dictionary (i.e. it doesn't raise a warning if used in a way that would also be valid for a FrozenSet, e.g. iteration). """ __slots__ = ("mapping",) def _warn(self) -> None: emit_user_level_warning( "The return type of `Dataset.dims` will be changed to return a set of dimension names in future, " "in order to be more consistent with `DataArray.dims`. To access a mapping from dimension names to lengths, " "please use `Dataset.sizes`.", FutureWarning, ) def __getitem__(self, key: K) -> V: self._warn() return super().__getitem__(key) @overload def get(self, key: K, /) -> V | None: ... @overload def get(self, key: K, /, default: V | T) -> V | T: ... def get(self, key: K, default: T | None = None) -> V | T | None: self._warn() return super().get(key, default) def keys(self) -> KeysView[K]: self._warn() return super().keys() def items(self) -> ItemsView[K, V]: self._warn() return super().items() def values(self) -> ValuesView[V]: self._warn() return super().values() class FilteredMapping(Mapping[K, V]): """Implements the Mapping interface. Uses the wrapped mapping for item lookup and a separate wrapped keys collection for iteration. Can be used to construct a mapping object from another dict-like object without eagerly accessing its items or when a mapping object is expected but only iteration over keys is actually used. Note: keys should be a subset of mapping, but FilteredMapping does not validate consistency of the provided `keys` and `mapping`. It is the caller's responsibility to ensure that they are suitable for the task at hand. """ __slots__ = ("keys_", "mapping") def __init__(self, keys: Collection[K], mapping: Mapping[K, V]): self.keys_ = keys # .keys is already a property on Mapping self.mapping = mapping def __getitem__(self, key: K) -> V: if key not in self.keys_: raise KeyError(key) return self.mapping[key] def __iter__(self) -> Iterator[K]: return iter(self.keys_) def __len__(self) -> int: return len(self.keys_) def __repr__(self) -> str: return f"{type(self).__name__}(keys={self.keys_!r}, mapping={self.mapping!r})" class OrderedSet(MutableSet[T]): """A simple ordered set. The API matches the builtin set, but it preserves insertion order of elements, like a dict. Note that, unlike in an OrderedDict, equality tests are not order-sensitive. """ _d: dict[T, None] __slots__ = ("_d",) def __init__(self, values: Iterable[T] | None = None): self._d = {} if values is not None: self.update(values) # Required methods for MutableSet def __contains__(self, value: Hashable) -> bool: return value in self._d def __iter__(self) -> Iterator[T]: return iter(self._d) def __len__(self) -> int: return len(self._d) def add(self, value: T) -> None: self._d[value] = None def discard(self, value: T) -> None: del self._d[value] # Additional methods def update(self, values: Iterable[T]) -> None: self._d.update(dict.fromkeys(values)) def __repr__(self) -> str: return f"{type(self).__name__}({list(self)!r})" class NdimSizeLenMixin: """Mixin class that extends a class that defines a ``shape`` property to one that also defines ``ndim``, ``size`` and ``__len__``. """ __slots__ = () @property def ndim(self: Any) -> int: """ Number of array dimensions. See Also -------- numpy.ndarray.ndim """ return len(self.shape) @property def size(self: Any) -> int: """ Number of elements in the array. Equal to ``np.prod(a.shape)``, i.e., the product of the array’s dimensions. See Also -------- numpy.ndarray.size """ return math.prod(self.shape) def __len__(self: Any) -> int: try: return self.shape[0] except IndexError as err: raise TypeError("len() of unsized object") from err class NDArrayMixin(NdimSizeLenMixin): """Mixin class for making wrappers of N-dimensional arrays that conform to the ndarray interface required for the data argument to Variable objects. A subclass should set the `array` property and override one or more of `dtype`, `shape` and `__getitem__`. """ __slots__ = () @property def dtype(self: Any) -> np.dtype: return self.array.dtype @property def shape(self: Any) -> tuple[int, ...]: return self.array.shape def __getitem__(self: Any, key): return self.array[key] def __repr__(self: Any) -> str: return f"{type(self).__name__}(array={self.array!r})" @contextlib.contextmanager def close_on_error(f): """Context manager to ensure that a file opened by xarray is closed if an exception is raised before the user sees the file object. """ try: yield except Exception: f.close() raise def is_remote_uri(path: str) -> bool: """Finds URLs of the form protocol:// or protocol:: This also matches for http[s]://, which were the only remote URLs supported in <=v0.16.2. """ return bool(re.search(r"^[a-z][a-z0-9]*(\://|\:\:)", path)) def read_magic_number_from_file(filename_or_obj, count=8) -> bytes: # check byte header to determine file type if not isinstance(filename_or_obj, io.IOBase): raise TypeError(f"cannot read the magic number from {type(filename_or_obj)}") if filename_or_obj.tell() != 0: filename_or_obj.seek(0) magic_number = filename_or_obj.read(count) filename_or_obj.seek(0) return magic_number def try_read_magic_number_from_path(pathlike, count=8) -> bytes | None: if isinstance(pathlike, str) or hasattr(pathlike, "__fspath__"): path = os.fspath(pathlike) try: with open(path, "rb") as f: return read_magic_number_from_file(f, count) except (FileNotFoundError, IsADirectoryError, TypeError): pass return None def try_read_magic_number_from_file_or_path(filename_or_obj, count=8) -> bytes | None: magic_number = try_read_magic_number_from_path(filename_or_obj, count) if magic_number is None: with contextlib.suppress(TypeError): magic_number = read_magic_number_from_file(filename_or_obj, count) return magic_number def is_uniform_spaced(arr, **kwargs) -> bool: """Return True if values of an array are uniformly spaced and sorted. >>> is_uniform_spaced(range(5)) True >>> is_uniform_spaced([-4, 0, 100]) False kwargs are additional arguments to ``np.isclose`` """ arr = np.array(arr, dtype=float) diffs = np.diff(arr) return bool(np.isclose(diffs.min(), diffs.max(), **kwargs)) def hashable(v: Any) -> TypeGuard[Hashable]: """Determine whether `v` can be hashed.""" try: hash(v) except TypeError: return False return True def iterable(v: Any) -> TypeGuard[Iterable[Any]]: """Determine whether `v` is iterable.""" try: iter(v) except TypeError: return False return True def iterable_of_hashable(v: Any) -> TypeGuard[Iterable[Hashable]]: """Determine whether `v` is an Iterable of Hashables.""" try: it = iter(v) except TypeError: return False return all(hashable(elm) for elm in it) def decode_numpy_dict_values(attrs: Mapping[K, V]) -> dict[K, V]: """Convert attribute values from numpy objects to native Python objects, for use in to_dict """ attrs = dict(attrs) for k, v in attrs.items(): if isinstance(v, np.ndarray): attrs[k] = cast(V, v.tolist()) elif isinstance(v, np.generic): attrs[k] = v.item() return attrs def ensure_us_time_resolution(val): """Convert val out of numpy time, for use in to_dict. Needed because of numpy bug GH#7619""" if np.issubdtype(val.dtype, np.datetime64): val = val.astype("datetime64[us]") elif np.issubdtype(val.dtype, np.timedelta64): val = val.astype("timedelta64[us]") return val class HiddenKeyDict(MutableMapping[K, V]): """Acts like a normal dictionary, but hides certain keys.""" __slots__ = ("_data", "_hidden_keys") # ``__init__`` method required to create instance from class. def __init__(self, data: MutableMapping[K, V], hidden_keys: Iterable[K]): self._data = data self._hidden_keys = frozenset(hidden_keys) def _raise_if_hidden(self, key: K) -> None: if key in self._hidden_keys: raise KeyError(f"Key `{key!r}` is hidden.") # The next five methods are requirements of the ABC. def __setitem__(self, key: K, value: V) -> None: self._raise_if_hidden(key) self._data[key] = value def __getitem__(self, key: K) -> V: self._raise_if_hidden(key) return self._data[key] def __delitem__(self, key: K) -> None: self._raise_if_hidden(key) del self._data[key] def __iter__(self) -> Iterator[K]: for k in self._data: if k not in self._hidden_keys: yield k def __len__(self) -> int: num_hidden = len(self._hidden_keys & self._data.keys()) return len(self._data) - num_hidden def get_temp_dimname(dims: Container[Hashable], new_dim: Hashable) -> Hashable: """Get an new dimension name based on new_dim, that is not used in dims. If the same name exists, we add an underscore(s) in the head. Example1: dims: ['a', 'b', 'c'] new_dim: ['_rolling'] -> ['_rolling'] Example2: dims: ['a', 'b', 'c', '_rolling'] new_dim: ['_rolling'] -> ['__rolling'] """ while new_dim in dims: new_dim = "_" + str(new_dim) return new_dim def drop_dims_from_indexers( indexers: Mapping[Any, Any], dims: Iterable[Hashable] | Mapping[Any, int], missing_dims: ErrorOptionsWithWarn, ) -> Mapping[Hashable, Any]: """Depending on the setting of missing_dims, drop any dimensions from indexers that are not present in dims. Parameters ---------- indexers : dict dims : sequence missing_dims : {"raise", "warn", "ignore"} """ if missing_dims == "raise": invalid = indexers.keys() - set(dims) if invalid: raise ValueError( f"Dimensions {invalid} do not exist. Expected one or more of {dims}" ) return indexers elif missing_dims == "warn": # don't modify input indexers = dict(indexers) invalid = indexers.keys() - set(dims) if invalid: warnings.warn( f"Dimensions {invalid} do not exist. Expected one or more of {dims}", stacklevel=2, ) for key in invalid: indexers.pop(key) return indexers elif missing_dims == "ignore": return {key: val for key, val in indexers.items() if key in dims} else: raise ValueError( f"Unrecognised option {missing_dims} for missing_dims argument" ) @overload def parse_dims_as_tuple( dim: Dims, all_dims: tuple[Hashable, ...], *, check_exists: bool = True, replace_none: Literal[True] = True, ) -> tuple[Hashable, ...]: ... @overload def parse_dims_as_tuple( dim: Dims, all_dims: tuple[Hashable, ...], *, check_exists: bool = True, replace_none: Literal[False], ) -> tuple[Hashable, ...] | EllipsisType | None: ... def parse_dims_as_tuple( dim: Dims, all_dims: tuple[Hashable, ...], *, check_exists: bool = True, replace_none: bool = True, ) -> tuple[Hashable, ...] | EllipsisType | None: """Parse one or more dimensions. A single dimension must be always a str, multiple dimensions can be Hashables. This supports e.g. using a tuple as a dimension. If you supply e.g. a set of dimensions the order cannot be conserved, but for sequences it will be. Parameters ---------- dim : str, Iterable of Hashable, "..." or None Dimension(s) to parse. all_dims : tuple of Hashable All possible dimensions. check_exists: bool, default: True if True, check if dim is a subset of all_dims. replace_none : bool, default: True If True, return all_dims if dim is None or "...". Returns ------- parsed_dims : tuple of Hashable Input dimensions as a tuple. """ if dim is None or dim is ...: if replace_none: return all_dims return dim if isinstance(dim, str): dim = (dim,) if check_exists: _check_dims(set(dim), set(all_dims)) return tuple(dim) @overload def parse_dims_as_set( dim: Dims, all_dims: set[Hashable], *, check_exists: bool = True, replace_none: Literal[True] = True, ) -> set[Hashable]: ... @overload def parse_dims_as_set( dim: Dims, all_dims: set[Hashable], *, check_exists: bool = True, replace_none: Literal[False], ) -> set[Hashable] | EllipsisType | None: ... def parse_dims_as_set( dim: Dims, all_dims: set[Hashable], *, check_exists: bool = True, replace_none: bool = True, ) -> set[Hashable] | EllipsisType | None: """Like parse_dims_as_tuple, but returning a set instead of a tuple.""" # TODO: Consider removing parse_dims_as_tuple? if dim is None or dim is ...: if replace_none: return all_dims return dim if isinstance(dim, str): dim = {dim} dim = set(dim) if check_exists: _check_dims(dim, all_dims) return dim @overload def parse_ordered_dims( dim: Dims, all_dims: tuple[Hashable, ...], *, check_exists: bool = True, replace_none: Literal[True] = True, ) -> tuple[Hashable, ...]: ... @overload def parse_ordered_dims( dim: Dims, all_dims: tuple[Hashable, ...], *, check_exists: bool = True, replace_none: Literal[False], ) -> tuple[Hashable, ...] | EllipsisType | None: ... def parse_ordered_dims( dim: Dims, all_dims: tuple[Hashable, ...], *, check_exists: bool = True, replace_none: bool = True, ) -> tuple[Hashable, ...] | EllipsisType | None: """Parse one or more dimensions. A single dimension must be always a str, multiple dimensions can be Hashables. This supports e.g. using a tuple as a dimension. An ellipsis ("...") in a sequence of dimensions will be replaced with all remaining dimensions. This only makes sense when the input is a sequence and not e.g. a set. Parameters ---------- dim : str, Sequence of Hashable or "...", "..." or None Dimension(s) to parse. If "..." appears in a Sequence it always gets replaced with all remaining dims all_dims : tuple of Hashable All possible dimensions. check_exists: bool, default: True if True, check if dim is a subset of all_dims. replace_none : bool, default: True If True, return all_dims if dim is None. Returns ------- parsed_dims : tuple of Hashable Input dimensions as a tuple. """ if dim is not None and dim is not ... and not isinstance(dim, str) and ... in dim: dims_set: set[Hashable | EllipsisType] = set(dim) all_dims_set = set(all_dims) if check_exists: _check_dims(dims_set, all_dims_set) if len(all_dims_set) != len(all_dims): raise ValueError("Cannot use ellipsis with repeated dims") dims = tuple(dim) if dims.count(...) > 1: raise ValueError("More than one ellipsis supplied") other_dims = tuple(d for d in all_dims if d not in dims_set) idx = dims.index(...) return dims[:idx] + other_dims + dims[idx + 1 :] else: # mypy cannot resolve that the sequence cannot contain "..." return parse_dims_as_tuple( # type: ignore[call-overload] dim=dim, all_dims=all_dims, check_exists=check_exists, replace_none=replace_none, ) def _check_dims(dim: AbstractSet[Hashable], all_dims: AbstractSet[Hashable]) -> None: wrong_dims = (dim - all_dims) - {...} if wrong_dims: wrong_dims_str = ", ".join(f"'{d}'" for d in wrong_dims) raise ValueError( f"Dimension(s) {wrong_dims_str} do not exist. Expected one or more of {all_dims}" ) _Accessor = TypeVar("_Accessor") class UncachedAccessor(Generic[_Accessor]): """Acts like a property, but on both classes and class instances This class is necessary because some tools (e.g. pydoc and sphinx) inspect classes for which property returns itself and not the accessor. """ def __init__(self, accessor: type[_Accessor]) -> None: self._accessor = accessor @overload def __get__(self, obj: None, cls) -> type[_Accessor]: ... @overload def __get__(self, obj: object, cls) -> _Accessor: ... def __get__(self, obj: object | None, cls) -> type[_Accessor] | _Accessor: if obj is None: return self._accessor return self._accessor(obj) # type: ignore[call-arg] # assume it is a valid accessor! # Singleton type, as per https://github.com/python/typing/pull/240 class Default(Enum): token = 0 _default = Default.token def iterate_nested(nested_list): for item in nested_list: if isinstance(item, list): yield from iterate_nested(item) else: yield item def contains_only_chunked_or_numpy(obj) -> bool: """Returns True if xarray object contains only numpy arrays or chunked arrays (i.e. pure dask or cubed). Expects obj to be Dataset or DataArray""" from xarray.core.dataarray import DataArray from xarray.core.indexing import ExplicitlyIndexed from xarray.namedarray.pycompat import is_chunked_array if isinstance(obj, DataArray): obj = obj._to_temp_dataset() return all( isinstance(var._data, ExplicitlyIndexed | np.ndarray) or is_chunked_array(var._data) for var in obj._variables.values() ) def find_stack_level(test_mode=False) -> int: """Find the first place in the stack that is not inside xarray or the Python standard library. This is unless the code emanates from a test, in which case we would prefer to see the xarray source. This function is taken from pandas and modified to exclude standard library paths. Parameters ---------- test_mode : bool Flag used for testing purposes to switch off the detection of test directories in the stack trace. Returns ------- stacklevel : int First level in the stack that is not part of xarray or the Python standard library. """ import xarray as xr pkg_dir = Path(xr.__file__).parent test_dir = pkg_dir / "tests" std_lib_init = sys.modules["os"].__file__ # Mostly to appease mypy; I don't think this can happen... if std_lib_init is None: return 0 std_lib_dir = Path(std_lib_init).parent frame = inspect.currentframe() n = 0 while frame: fname = inspect.getfile(frame) if ( fname.startswith(str(pkg_dir)) and (not fname.startswith(str(test_dir)) or test_mode) ) or ( fname.startswith(str(std_lib_dir)) and "site-packages" not in fname and "dist-packages" not in fname ): frame = frame.f_back n += 1 else: break return n def emit_user_level_warning(message, category=None) -> None: """Emit a warning at the user level by inspecting the stack trace.""" stacklevel = find_stack_level() return warnings.warn(message, category=category, stacklevel=stacklevel) def consolidate_dask_from_array_kwargs( from_array_kwargs: dict[Any, Any], name: str | None = None, lock: bool | None = None, inline_array: bool | None = None, ) -> dict[Any, Any]: """ Merge dask-specific kwargs with arbitrary from_array_kwargs dict. Temporary function, to be deleted once explicitly passing dask-specific kwargs to .chunk() is deprecated. """ from_array_kwargs = _resolve_doubly_passed_kwarg( from_array_kwargs, kwarg_name="name", passed_kwarg_value=name, default=None, err_msg_dict_name="from_array_kwargs", ) from_array_kwargs = _resolve_doubly_passed_kwarg( from_array_kwargs, kwarg_name="lock", passed_kwarg_value=lock, default=False, err_msg_dict_name="from_array_kwargs", ) from_array_kwargs = _resolve_doubly_passed_kwarg( from_array_kwargs, kwarg_name="inline_array", passed_kwarg_value=inline_array, default=False, err_msg_dict_name="from_array_kwargs", ) return from_array_kwargs def _resolve_doubly_passed_kwarg( kwargs_dict: dict[Any, Any], kwarg_name: str, passed_kwarg_value: str | bool | None, default: bool | None, err_msg_dict_name: str, ) -> dict[Any, Any]: # if in kwargs_dict but not passed explicitly then just pass kwargs_dict through unaltered if kwarg_name in kwargs_dict and passed_kwarg_value is None: pass # if passed explicitly but not in kwargs_dict then use that elif kwarg_name not in kwargs_dict and passed_kwarg_value is not None: kwargs_dict[kwarg_name] = passed_kwarg_value # if in neither then use default elif kwarg_name not in kwargs_dict and passed_kwarg_value is None: kwargs_dict[kwarg_name] = default # if in both then raise else: raise ValueError( f"argument {kwarg_name} cannot be passed both as a keyword argument and within " f"the {err_msg_dict_name} dictionary" ) return kwargs_dict def attempt_import(module: str) -> ModuleType: """Import an optional dependency, and raise an informative error on failure. Parameters ---------- module : str Module to import. For example, ``'zarr'`` or ``'matplotlib.pyplot'``. Returns ------- module : ModuleType The Imported module. Raises ------ ImportError If the module could not be imported. Notes ----- Static type checkers will not be able to infer the type of the returned module, so it is recommended to precede this function with a direct import of the module, guarded by an ``if TYPE_CHECKING`` block, to preserve type checker functionality. See the examples section below for a demonstration. Examples -------- >>> from xarray.core.utils import attempt_import >>> if TYPE_CHECKING: ... import zarr ... else: ... zarr = attempt_import("zarr") ... """ install_mapping = dict(nc_time_axis="nc-time-axis") package_purpose = dict( zarr="for working with Zarr stores", cftime="for working with non-standard calendars", matplotlib="for plotting", hypothesis="for the `xarray.testing.strategies` submodule", ) package_name = module.split(".", maxsplit=1)[0] # e.g. "zarr" from "zarr.storage" install_name = install_mapping.get(package_name, package_name) reason = package_purpose.get(package_name, "") try: return importlib.import_module(module) except ImportError as e: raise ImportError( f"The {install_name} package is required {reason}" " but could not be imported." " Please install it with your package manager (e.g. conda or pip)." ) from e _DEFAULT_NAME = ReprObject("") def result_name(objects: Iterable[Any]) -> Any: # use the same naming heuristics as pandas: # https://github.com/blaze/blaze/issues/458#issuecomment-51936356 names = {getattr(obj, "name", _DEFAULT_NAME) for obj in objects} names.discard(_DEFAULT_NAME) if len(names) == 1: (name,) = names else: name = None return name def _get_func_args(func, param_names): """Use `inspect.signature` to try accessing `func` args. Otherwise, ensure they are provided by user. """ try: func_args = inspect.signature(func).parameters except ValueError as err: func_args = {} if not param_names: raise ValueError( "Unable to inspect `func` signature, and `param_names` was not provided." ) from err if param_names: params = param_names else: params = list(func_args)[1:] if any( (p.kind in [p.VAR_POSITIONAL, p.VAR_KEYWORD]) for p in func_args.values() ): raise ValueError( "`param_names` must be provided because `func` takes variable length arguments." ) return params, func_args xarray-2025.09.0/xarray/core/variable.py000066400000000000000000003436601505620616400177550ustar00rootroot00000000000000from __future__ import annotations import copy import itertools import math import numbers import warnings from collections.abc import Callable, Hashable, Mapping, Sequence from functools import partial from types import EllipsisType from typing import TYPE_CHECKING, Any, NoReturn, cast import numpy as np import pandas as pd from numpy.typing import ArrayLike from packaging.version import Version import xarray as xr # only for Dataset and DataArray from xarray.compat.array_api_compat import to_like_array from xarray.computation import ops from xarray.computation.arithmetic import VariableArithmetic from xarray.core import common, dtypes, duck_array_ops, indexing, nputils, utils from xarray.core.common import AbstractArray from xarray.core.extension_array import PandasExtensionArray from xarray.core.indexing import ( BasicIndexer, CoordinateTransformIndexingAdapter, OuterIndexer, PandasIndexingAdapter, VectorizedIndexer, as_indexable, ) from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.core.utils import ( OrderedSet, _default, consolidate_dask_from_array_kwargs, decode_numpy_dict_values, drop_dims_from_indexers, either_dict_or_kwargs, emit_user_level_warning, ensure_us_time_resolution, infix_dims, is_allowed_extension_array, is_dict_like, is_duck_array, is_duck_dask_array, maybe_coerce_to_str, ) from xarray.namedarray.core import NamedArray, _raise_if_any_duplicate_dimensions from xarray.namedarray.parallelcompat import get_chunked_array_type from xarray.namedarray.pycompat import ( async_to_duck_array, integer_types, is_0d_dask_array, is_chunked_array, to_duck_array, ) from xarray.namedarray.utils import module_available from xarray.util.deprecation_helpers import _deprecate_positional_args, deprecate_dims NON_NUMPY_SUPPORTED_ARRAY_TYPES = ( indexing.ExplicitlyIndexed, pd.Index, pd.api.extensions.ExtensionArray, PandasExtensionArray, ) # https://github.com/python/mypy/issues/224 BASIC_INDEXING_TYPES = integer_types + (slice,) UNSUPPORTED_EXTENSION_ARRAY_TYPES = ( pd.arrays.DatetimeArray, pd.arrays.TimedeltaArray, pd.arrays.NumpyExtensionArray, # type: ignore[attr-defined] ) if TYPE_CHECKING: from xarray.core.types import ( Dims, ErrorOptionsWithWarn, PadModeOptions, PadReflectOptions, QuantileMethods, Self, T_Chunks, T_DuckArray, T_VarPadConstantValues, ) from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint class MissingDimensionsError(ValueError): """Error class used when we can't safely guess a dimension name.""" # inherits from ValueError for backward compatibility # TODO: move this to an xarray.exceptions module? def as_variable( obj: T_DuckArray | Any, name=None, auto_convert: bool = True ) -> Variable | IndexVariable: """Convert an object into a Variable. Parameters ---------- obj : object Object to convert into a Variable. - If the object is already a Variable, return a shallow copy. - Otherwise, if the object has 'dims' and 'data' attributes, convert it into a new Variable. - If all else fails, attempt to convert the object into a Variable by unpacking it into the arguments for creating a new Variable. name : str, optional If provided: - `obj` can be a 1D array, which is assumed to label coordinate values along a dimension of this given name. - Variables with name matching one of their dimensions are converted into `IndexVariable` objects. auto_convert : bool, optional For internal use only! If True, convert a "dimension" variable into an IndexVariable object (deprecated). Returns ------- var : Variable The newly created variable. """ from xarray.core.dataarray import DataArray # TODO: consider extending this method to automatically handle Iris and if isinstance(obj, DataArray): # extract the primary Variable from DataArrays obj = obj.variable if isinstance(obj, Variable): obj = obj.copy(deep=False) elif isinstance(obj, tuple): try: dims_, data_, *attrs = obj except ValueError as err: raise ValueError( f"Tuple {obj} is not in the form (dims, data[, attrs])" ) from err if isinstance(data_, DataArray): raise TypeError( f"Variable {name!r}: Using a DataArray object to construct a variable is" " ambiguous, please extract the data using the .data property." ) try: obj = Variable(dims_, data_, *attrs) except (TypeError, ValueError) as error: raise error.__class__( f"Variable {name!r}: Could not convert tuple of form " f"(dims, data[, attrs, encoding]): {obj} to Variable." ) from error elif utils.is_scalar(obj): obj = Variable([], obj) elif isinstance(obj, pd.Index | IndexVariable) and obj.name is not None: obj = Variable(obj.name, obj) elif isinstance(obj, set | dict): raise TypeError(f"variable {name!r} has invalid type {type(obj)!r}") elif name is not None: data: T_DuckArray = as_compatible_data(obj) if data.ndim != 1: raise MissingDimensionsError( f"cannot set variable {name!r} with {data.ndim!r}-dimensional data " "without explicit dimension names. Pass a tuple of " "(dims, data) instead." ) obj = Variable(name, data, fastpath=True) else: raise TypeError( f"Variable {name!r}: unable to convert object into a variable without an " f"explicit list of dimensions: {obj!r}" ) if auto_convert and name is not None and name in obj.dims and obj.ndim == 1: # automatically convert the Variable into an Index emit_user_level_warning( f"variable {name!r} with name matching its dimension will not be " "automatically converted into an `IndexVariable` object in the future.", FutureWarning, ) obj = obj.to_index_variable() return obj def _maybe_wrap_data(data): """ Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure they can be indexed properly. NumpyArrayAdapter, PandasIndexingAdapter and LazilyIndexedArray should all pass through unmodified. """ if isinstance(data, pd.Index): return PandasIndexingAdapter(data) if isinstance(data, UNSUPPORTED_EXTENSION_ARRAY_TYPES): return data.to_numpy() if isinstance( data, pd.api.extensions.ExtensionArray ) and is_allowed_extension_array(data): return PandasExtensionArray(data) return data def _possibly_convert_objects(values): """Convert object arrays into datetime64 and timedelta64 according to the pandas convention. For backwards compat, as of 3.0.0 pandas, object dtype inputs are cast to strings by `pandas.Series` but we output them as object dtype with the input metadata preserved as well. * datetime.datetime * datetime.timedelta * pd.Timestamp * pd.Timedelta """ as_series = pd.Series(values.ravel(), copy=False) result = np.asarray(as_series).reshape(values.shape) if not result.flags.writeable: # GH8843, pandas copy-on-write mode creates read-only arrays by default try: result.flags.writeable = True except ValueError: result = result.copy() # For why we need this behavior: https://github.com/pandas-dev/pandas/issues/61938 # Object datatype inputs that are strings # will be converted to strings by `pandas.Series`, and as of 3.0.0, lose # `dtype.metadata`. If the roundtrip back to numpy in this function yields an # object array again, the dtype.metadata will be preserved. if ( result.dtype.kind == "O" and values.dtype.kind == "O" and Version(pd.__version__) >= Version("3.0.0dev0") ): result.dtype = values.dtype return result def as_compatible_data( data: T_DuckArray | ArrayLike, fastpath: bool = False ) -> T_DuckArray: """Prepare and wrap data to put in a Variable. - If data does not have the necessary attributes, convert it to ndarray. - If it's a pandas.Timestamp, convert it to datetime64. - If data is already a pandas or xarray object (other than an Index), just use the values. Finally, wrap it up with an adapter if necessary. """ if fastpath and getattr(data, "ndim", None) is not None: return cast("T_DuckArray", data) from xarray.core.dataarray import DataArray # TODO: do this uwrapping in the Variable/NamedArray constructor instead. if isinstance(data, Variable): return cast("T_DuckArray", data._data) # TODO: do this uwrapping in the DataArray constructor instead. if isinstance(data, DataArray): return cast("T_DuckArray", data._variable._data) def convert_non_numpy_type(data): return cast("T_DuckArray", _maybe_wrap_data(data)) if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES): return convert_non_numpy_type(data) if isinstance(data, tuple): data = utils.to_0d_object_array(data) # we don't want nested self-described arrays if isinstance(data, pd.Series | pd.DataFrame): if ( isinstance(data, pd.Series) and is_allowed_extension_array(data.array) # Some datetime types are not allowed as well as backing Variable types and not isinstance(data.array, UNSUPPORTED_EXTENSION_ARRAY_TYPES) ): pandas_data = data.array else: pandas_data = data.values # type: ignore[assignment] if isinstance(pandas_data, NON_NUMPY_SUPPORTED_ARRAY_TYPES): return convert_non_numpy_type(pandas_data) else: data = pandas_data if isinstance(data, np.ma.MaskedArray): mask = np.ma.getmaskarray(data) if mask.any(): dtype, fill_value = dtypes.maybe_promote(data.dtype) data = duck_array_ops.where_method(data, ~mask, fill_value) else: data = np.asarray(data) if isinstance(data, np.matrix): data = np.asarray(data) # immediately return array-like types except `numpy.ndarray` and `numpy` scalars # compare types with `is` instead of `isinstance` to allow `numpy.ndarray` subclasses is_numpy = type(data) is np.ndarray or isinstance(data, np.generic) if not is_numpy and ( hasattr(data, "__array_function__") or hasattr(data, "__array_namespace__") ): return cast("T_DuckArray", data) # anything left will be converted to `numpy.ndarray`, including `numpy` scalars data = np.asarray(data) if data.dtype.kind in "OMm": data = _possibly_convert_objects(data) return _maybe_wrap_data(data) def _as_array_or_item(data): """Return the given values as a numpy array, or as an individual item if it's a 0d datetime64 or timedelta64 array. Importantly, this function does not copy data if it is already an ndarray - otherwise, it will not be possible to update Variable values in place. This function mostly exists because 0-dimensional ndarrays with dtype=datetime64 are broken :( https://github.com/numpy/numpy/issues/4337 https://github.com/numpy/numpy/issues/7619 TODO: remove this (replace with np.asarray) once these issues are fixed """ data = np.asarray(data) if data.ndim == 0: kind = data.dtype.kind if kind in "mM": unit, _ = np.datetime_data(data.dtype) if kind == "M": data = np.datetime64(data, unit) elif kind == "m": data = np.timedelta64(data, unit) return data class Variable(NamedArray, AbstractArray, VariableArithmetic): """A netcdf-like variable consisting of dimensions, data and attributes which describe a single Array. A single Variable object is not fully described outside the context of its parent Dataset (if you want such a fully described object, use a DataArray instead). The main functional difference between Variables and numpy arrays is that numerical operations on Variables implement array broadcasting by dimension name. For example, adding an Variable with dimensions `('time',)` to another Variable with dimensions `('space',)` results in a new Variable with dimensions `('time', 'space')`. Furthermore, numpy reduce operations like ``mean`` or ``sum`` are overwritten to take a "dimension" argument instead of an "axis". Variables are light-weight objects used as the building block for datasets. They are more primitive objects, so operations with them provide marginally higher performance than using DataArrays. However, manipulating data in the form of a Dataset or DataArray should almost always be preferred, because they can use more complete metadata in context of coordinate labels. """ __slots__ = ("_attrs", "_data", "_dims", "_encoding") def __init__( self, dims, data: T_DuckArray | ArrayLike, attrs=None, encoding=None, fastpath=False, ): """ Parameters ---------- dims : str or sequence of str Name(s) of the the data dimension(s). Must be either a string (only for 1D data) or a sequence of strings with length equal to the number of dimensions. data : array_like Data array which supports numpy-like data access. attrs : dict_like or None, optional Attributes to assign to the new variable. If None (default), an empty attribute dictionary is initialized. (see FAQ, :ref:`approach to metadata`) encoding : dict_like or None, optional Dictionary specifying how to encode this array's data into a serialized format like netCDF4. Currently used keys (for netCDF) include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'. Well-behaved code to serialize a Variable should ignore unrecognized encoding items. """ super().__init__( dims=dims, data=as_compatible_data(data, fastpath=fastpath), attrs=attrs ) self._encoding: dict[Any, Any] | None = None if encoding is not None: self.encoding = encoding def _new( self, dims=_default, data=_default, attrs=_default, ): dims_ = copy.copy(self._dims) if dims is _default else dims if attrs is _default: attrs_ = None if self._attrs is None else self._attrs.copy() else: attrs_ = attrs if data is _default: return type(self)(dims_, copy.copy(self._data), attrs_) else: cls_ = type(self) return cls_(dims_, data, attrs_) @property def _in_memory(self) -> bool: if isinstance( self._data, PandasIndexingAdapter | CoordinateTransformIndexingAdapter ): return self._data._in_memory return isinstance( self._data, np.ndarray | np.number | PandasExtensionArray, ) or ( isinstance(self._data, indexing.MemoryCachedArray) and isinstance(self._data.array, indexing.NumpyIndexingAdapter) ) @property def data(self): """ The Variable's data as an array. The underlying array type (e.g. dask, sparse, pint) is preserved. See Also -------- Variable.to_numpy Variable.as_numpy Variable.values """ if isinstance(self._data, PandasExtensionArray): duck_array = self._data.array elif isinstance(self._data, indexing.ExplicitlyIndexed): duck_array = self._data.get_duck_array() elif is_duck_array(self._data): duck_array = self._data else: duck_array = self.values if isinstance(duck_array, PandasExtensionArray): # even though PandasExtensionArray is a duck array, # we should not return the PandasExtensionArray wrapper, # and instead return the underlying data. return duck_array.array return duck_array @data.setter # type: ignore[override,unused-ignore] def data(self, data: T_DuckArray | ArrayLike) -> None: data = as_compatible_data(data) self._check_shape(data) self._data = data def astype( self, dtype, *, order=None, casting=None, subok=None, copy=None, keep_attrs=True, ) -> Self: """ Copy of the Variable object, with data cast to a specified type. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout order of the result. β€˜C’ means C order, β€˜F’ means Fortran order, β€˜A’ means β€˜F’ order if all the arrays are Fortran contiguous, β€˜C’ order otherwise, and β€˜K’ means as close to the order the array elements appear in memory as possible. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array. copy : bool, optional By default, astype always returns a newly allocated array. If this is set to False and the `dtype` requirement is satisfied, the input array is returned instead of a copy. keep_attrs : bool, optional By default, astype keeps attributes. Set to False to remove attributes in the returned object. Returns ------- out : same as object New object with data cast to the specified type. Notes ----- The ``order``, ``casting``, ``subok`` and ``copy`` arguments are only passed through to the ``astype`` method of the underlying array when a value different than ``None`` is supplied. Make sure to only supply these arguments if the underlying array class supports them. See Also -------- numpy.ndarray.astype dask.array.Array.astype sparse.COO.astype """ from xarray.computation.apply_ufunc import apply_ufunc kwargs = dict(order=order, casting=casting, subok=subok, copy=copy) kwargs = {k: v for k, v in kwargs.items() if v is not None} return apply_ufunc( duck_array_ops.astype, self, dtype, kwargs=kwargs, keep_attrs=keep_attrs, dask="allowed", ) def _dask_finalize(self, results, array_func, *args, **kwargs): data = array_func(results, *args, **kwargs) return Variable(self._dims, data, attrs=self._attrs, encoding=self._encoding) @property def values(self) -> np.ndarray: """The variable's data as a numpy.ndarray""" return _as_array_or_item(self._data) @values.setter def values(self, values): self.data = values def to_base_variable(self) -> Variable: """Return this variable as a base xarray.Variable""" return Variable( self._dims, self._data, self._attrs, encoding=self._encoding, fastpath=True ) to_variable = utils.alias(to_base_variable, "to_variable") def to_index_variable(self) -> IndexVariable: """Return this variable as an xarray.IndexVariable""" return IndexVariable( self._dims, self._data, self._attrs, encoding=self._encoding, fastpath=True ) to_coord = utils.alias(to_index_variable, "to_coord") def _to_index(self) -> pd.Index: return self.to_index_variable()._to_index() def to_index(self) -> pd.Index: """Convert this variable to a pandas.Index""" return self.to_index_variable().to_index() def to_dict( self, data: bool | str = "list", encoding: bool = False ) -> dict[str, Any]: """Dictionary representation of variable.""" item: dict[str, Any] = { "dims": self.dims, "attrs": decode_numpy_dict_values(self.attrs), } if data is not False: if data in [True, "list"]: item["data"] = ensure_us_time_resolution(self.to_numpy()).tolist() elif data == "array": item["data"] = ensure_us_time_resolution(self.data) else: msg = 'data argument must be bool, "list", or "array"' raise ValueError(msg) else: item.update({"dtype": str(self.dtype), "shape": self.shape}) if encoding: item["encoding"] = dict(self.encoding) return item def _item_key_to_tuple(self, key): if is_dict_like(key): return tuple(key.get(dim, slice(None)) for dim in self.dims) else: return key def _broadcast_indexes(self, key): """Prepare an indexing key for an indexing operation. Parameters ---------- key : int, slice, array-like, dict or tuple of integer, slice and array-like Any valid input for indexing. Returns ------- dims : tuple Dimension of the resultant variable. indexers : IndexingTuple subclass Tuple of integer, array-like, or slices to use when indexing self._data. The type of this argument indicates the type of indexing to perform, either basic, outer or vectorized. new_order : Optional[Sequence[int]] Optional reordering to do on the result of indexing. If not None, the first len(new_order) indexing should be moved to these positions. """ key = self._item_key_to_tuple(key) # key is a tuple # key is a tuple of full size key = indexing.expanded_indexer(key, self.ndim) # Convert a scalar Variable to a 0d-array key = tuple( k.data if isinstance(k, Variable) and k.ndim == 0 else k for k in key ) # Convert a 0d numpy arrays to an integer # dask 0d arrays are passed through key = tuple( k.item() if isinstance(k, np.ndarray) and k.ndim == 0 else k for k in key ) if all( (isinstance(k, BASIC_INDEXING_TYPES) and not isinstance(k, bool)) for k in key ): return self._broadcast_indexes_basic(key) self._validate_indexers(key) # Detect it can be mapped as an outer indexer # If all key is unlabeled, or # key can be mapped as an OuterIndexer. if all(not isinstance(k, Variable) for k in key): return self._broadcast_indexes_outer(key) # If all key is 1-dimensional and there are no duplicate labels, # key can be mapped as an OuterIndexer. dims = [] for k, d in zip(key, self.dims, strict=True): if isinstance(k, Variable): if len(k.dims) > 1: return self._broadcast_indexes_vectorized(key) dims.append(k.dims[0]) elif not isinstance(k, integer_types): dims.append(d) if len(set(dims)) == len(dims): return self._broadcast_indexes_outer(key) return self._broadcast_indexes_vectorized(key) def _broadcast_indexes_basic(self, key): dims = tuple( dim for k, dim in zip(key, self.dims, strict=True) if not isinstance(k, integer_types) ) return dims, BasicIndexer(key), None def _validate_indexers(self, key): """Make sanity checks""" for dim, k in zip(self.dims, key, strict=True): if not isinstance(k, BASIC_INDEXING_TYPES): if not isinstance(k, Variable): if not is_duck_array(k): k = np.asarray(k) if k.ndim > 1: raise IndexError( "Unlabeled multi-dimensional array cannot be " f"used for indexing: {k}" ) if k.dtype.kind == "b": if self.shape[self.get_axis_num(dim)] != len(k): raise IndexError( f"Boolean array size {len(k):d} is used to index array " f"with shape {self.shape}." ) if k.ndim > 1: raise IndexError( f"{k.ndim}-dimensional boolean indexing is not supported. " ) if is_duck_dask_array(k.data): raise KeyError( "Indexing with a boolean dask array is not allowed. " "This will result in a dask array of unknown shape. " "Such arrays are unsupported by Xarray." "Please compute the indexer first using .compute()" ) if getattr(k, "dims", (dim,)) != (dim,): raise IndexError( "Boolean indexer should be unlabeled or on the " "same dimension to the indexed array. Indexer is " f"on {k.dims} but the target dimension is {dim}." ) def _broadcast_indexes_outer(self, key): # drop dim if k is integer or if k is a 0d dask array dims = tuple( k.dims[0] if isinstance(k, Variable) else dim for k, dim in zip(key, self.dims, strict=True) if (not isinstance(k, integer_types) and not is_0d_dask_array(k)) ) new_key = [] for k in key: if isinstance(k, Variable): k = k.data if not isinstance(k, BASIC_INDEXING_TYPES): if not is_duck_array(k): k = np.asarray(k) if k.size == 0: # Slice by empty list; numpy could not infer the dtype k = k.astype(int) elif k.dtype.kind == "b": (k,) = np.nonzero(k) new_key.append(k) return dims, OuterIndexer(tuple(new_key)), None def _broadcast_indexes_vectorized(self, key): variables = [] out_dims_set = OrderedSet() for dim, value in zip(self.dims, key, strict=True): if isinstance(value, slice): out_dims_set.add(dim) else: variable = ( value if isinstance(value, Variable) else as_variable(value, name=dim, auto_convert=False) ) if variable.dims == (dim,): variable = variable.to_index_variable() if variable.dtype.kind == "b": # boolean indexing case (variable,) = variable._nonzero() variables.append(variable) out_dims_set.update(variable.dims) variable_dims = set() for variable in variables: variable_dims.update(variable.dims) slices = [] for i, (dim, value) in enumerate(zip(self.dims, key, strict=True)): if isinstance(value, slice): if dim in variable_dims: # We only convert slice objects to variables if they share # a dimension with at least one other variable. Otherwise, # we can equivalently leave them as slices aknd transpose # the result. This is significantly faster/more efficient # for most array backends. values = np.arange(*value.indices(self.sizes[dim])) variables.insert(i - len(slices), Variable((dim,), values)) else: slices.append((i, value)) try: variables = _broadcast_compat_variables(*variables) except ValueError as err: raise IndexError(f"Dimensions of indexers mismatch: {key}") from err out_key = [variable.data for variable in variables] out_dims = tuple(out_dims_set) slice_positions = set() for i, value in slices: out_key.insert(i, value) new_position = out_dims.index(self.dims[i]) slice_positions.add(new_position) if slice_positions: new_order = [i for i in range(len(out_dims)) if i not in slice_positions] else: new_order = None return out_dims, VectorizedIndexer(tuple(out_key)), new_order def __getitem__(self, key) -> Self: """Return a new Variable object whose contents are consistent with getting the provided key from the underlying data. NB. __getitem__ and __setitem__ implement xarray-style indexing, where if keys are unlabeled arrays, we index the array orthogonally with them. If keys are labeled array (such as Variables), they are broadcasted with our usual scheme and then the array is indexed with the broadcasted key, like numpy's fancy indexing. If you really want to do indexing like `x[x > 0]`, manipulate the numpy array `x.values` directly. """ dims, indexer, new_order = self._broadcast_indexes(key) indexable = as_indexable(self._data) data = indexing.apply_indexer(indexable, indexer) if new_order: data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order) return self._finalize_indexing_result(dims, data) def _finalize_indexing_result(self, dims, data) -> Self: """Used by IndexVariable to return IndexVariable objects when possible.""" return self._replace(dims=dims, data=data) def _getitem_with_mask(self, key, fill_value=dtypes.NA): """Index this Variable with -1 remapped to fill_value.""" # TODO(shoyer): expose this method in public API somewhere (isel?) and # use it for reindex. # TODO(shoyer): add a sanity check that all other integers are # non-negative # TODO(shoyer): add an optimization, remapping -1 to an adjacent value # that is actually indexed rather than mapping it to the last value # along each axis. if fill_value is dtypes.NA: fill_value = dtypes.get_fill_value(self.dtype) dims, indexer, new_order = self._broadcast_indexes(key) if self.size: if is_duck_dask_array(self._data): # dask's indexing is faster this way; also vindex does not # support negative indices yet: # https://github.com/dask/dask/pull/2967 actual_indexer = indexing.posify_mask_indexer(indexer) else: actual_indexer = indexer indexable = as_indexable(self._data) data = indexing.apply_indexer(indexable, actual_indexer) mask = indexing.create_mask(indexer, self.shape, data) # we need to invert the mask in order to pass data first. This helps # pint to choose the correct unit # TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed mask = to_like_array(mask, data) data = duck_array_ops.where( duck_array_ops.logical_not(mask), data, fill_value ) else: # array cannot be indexed along dimensions of size 0, so just # build the mask directly instead. mask = indexing.create_mask(indexer, self.shape) data = duck_array_ops.broadcast_to(fill_value, getattr(mask, "shape", ())) if new_order: data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order) return self._finalize_indexing_result(dims, data) def __setitem__(self, key, value): """__setitem__ is overloaded to access the underlying numpy values with orthogonal indexing. See __getitem__ for more details. """ dims, index_tuple, new_order = self._broadcast_indexes(key) if not isinstance(value, Variable): value = as_compatible_data(value) if value.ndim > len(dims): raise ValueError( f"shape mismatch: value array of shape {value.shape} could not be " f"broadcast to indexing result with {len(dims)} dimensions" ) if value.ndim == 0: value = Variable((), value) else: value = Variable(dims[-value.ndim :], value) # broadcast to become assignable value = value.set_dims(dims).data if new_order: value = duck_array_ops.asarray(value) value = value[(len(dims) - value.ndim) * (np.newaxis,) + (Ellipsis,)] value = duck_array_ops.moveaxis(value, new_order, range(len(new_order))) indexable = as_indexable(self._data) indexing.set_with_indexer(indexable, index_tuple, value) @property def encoding(self) -> dict[Any, Any]: """Dictionary of encodings on this variable.""" if self._encoding is None: encoding: dict[Any, Any] = {} self._encoding = encoding return self._encoding @encoding.setter def encoding(self, value): try: self._encoding = dict(value) except ValueError as err: raise ValueError("encoding must be castable to a dictionary") from err def reset_encoding(self) -> Self: warnings.warn( "reset_encoding is deprecated since 2023.11, use `drop_encoding` instead", stacklevel=2, ) return self.drop_encoding() def drop_encoding(self) -> Self: """Return a new Variable without encoding.""" return self._replace(encoding={}) def _copy( self, deep: bool = True, data: T_DuckArray | ArrayLike | None = None, memo: dict[int, Any] | None = None, ) -> Self: if data is None: data_old = self._data if not isinstance(data_old, indexing.MemoryCachedArray): ndata = data_old else: # don't share caching between copies # TODO: MemoryCachedArray doesn't match the array api: ndata = indexing.MemoryCachedArray(data_old.array) # type: ignore[assignment] if deep: ndata = copy.deepcopy(ndata, memo) else: ndata = as_compatible_data(data) if self.shape != ndata.shape: # type: ignore[attr-defined] raise ValueError( f"Data shape {ndata.shape} must match shape of object {self.shape}" # type: ignore[attr-defined] ) attrs = copy.deepcopy(self._attrs, memo) if deep else copy.copy(self._attrs) encoding = ( copy.deepcopy(self._encoding, memo) if deep else copy.copy(self._encoding) ) # note: dims is already an immutable tuple return self._replace(data=ndata, attrs=attrs, encoding=encoding) def _replace( self, dims=_default, data=_default, attrs=_default, encoding=_default, ) -> Self: if dims is _default: dims = copy.copy(self._dims) if data is _default: data = copy.copy(self.data) if attrs is _default: attrs = copy.copy(self._attrs) if encoding is _default: encoding = copy.copy(self._encoding) return type(self)(dims, data, attrs, encoding, fastpath=True) def load(self, **kwargs) -> Self: """Trigger loading data into memory and return this variable. Data will be computed and/or loaded from disk or a remote source. Unlike ``.compute``, the original variable is modified and returned. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.array.compute``. Returns ------- object : Variable Same object but with lazy data as an in-memory array. See Also -------- dask.array.compute Variable.compute Variable.load_async DataArray.load Dataset.load """ self._data = to_duck_array(self._data, **kwargs) return self async def load_async(self, **kwargs) -> Self: """Trigger and await asynchronous loading of data into memory and return this variable. Data will be computed and/or loaded from disk or a remote source. Unlike ``.compute``, the original variable is modified and returned. Only works when opening data lazily from IO storage backends which support lazy asynchronous loading. Otherwise will raise a NotImplementedError. Note users are expected to limit concurrency themselves - xarray does not internally limit concurrency in any way. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.array.compute``. Returns ------- object : Variable Same object but with lazy data as an in-memory array. See Also -------- dask.array.compute Variable.load Variable.compute DataArray.load_async Dataset.load_async """ self._data = await async_to_duck_array(self._data, **kwargs) return self def compute(self, **kwargs) -> Self: """Trigger loading data into memory and return a new variable. Data will be computed and/or loaded from disk or a remote source. The original variable is left unaltered. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.array.compute``. Returns ------- object : Variable New object with the data as an in-memory array. See Also -------- dask.array.compute Variable.load Variable.load_async DataArray.compute Dataset.compute """ new = self.copy(deep=False) return new.load(**kwargs) def _shuffle( self, indices: list[list[int]], dim: Hashable, chunks: T_Chunks ) -> Self: # TODO (dcherian): consider making this public API array = self._data if is_chunked_array(array): chunkmanager = get_chunked_array_type(array) return self._replace( data=chunkmanager.shuffle( array, indexer=indices, axis=self.get_axis_num(dim), chunks=chunks, ) ) else: return self.isel({dim: np.concatenate(indices)}) def isel( self, indexers: Mapping[Any, Any] | None = None, missing_dims: ErrorOptionsWithWarn = "raise", **indexers_kwargs: Any, ) -> Self: """Return a new array indexed along the specified dimension(s). Parameters ---------- **indexers : {dim: indexer, ...} Keyword arguments with names matching dimensions and values given by integers, slice objects or arrays. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the DataArray: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions Returns ------- obj : Array object A new Array with the selected data and dimensions. In general, the new variable's data will be a view of this variable's data, unless numpy fancy indexing was triggered by using an array indexer, in which case the data will be a copy. """ indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel") indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims) key = tuple(indexers.get(dim, slice(None)) for dim in self.dims) return self[key] def squeeze(self, dim=None): """Return a new object with squeezed data. Parameters ---------- dim : None or str or tuple of str, optional Selects a subset of the length one dimensions. If a dimension is selected with length greater than one, an error is raised. If None, all length one dimensions are squeezed. Returns ------- squeezed : same type as caller This object, but with with all or a subset of the dimensions of length 1 removed. See Also -------- numpy.squeeze """ dims = common.get_squeeze_dims(self, dim) return self.isel(dict.fromkeys(dims, 0)) def _shift_one_dim(self, dim, count, fill_value=dtypes.NA): axis = self.get_axis_num(dim) if count > 0: keep = slice(None, -count) elif count < 0: keep = slice(-count, None) else: keep = slice(None) trimmed_data = self[(slice(None),) * axis + (keep,)].data if fill_value is dtypes.NA: dtype, fill_value = dtypes.maybe_promote(self.dtype) else: dtype = self.dtype width = min(abs(count), self.shape[axis]) dim_pad = (width, 0) if count >= 0 else (0, width) pads = [(0, 0) if d != dim else dim_pad for d in self.dims] data = duck_array_ops.pad( duck_array_ops.astype(trimmed_data, dtype), pads, mode="constant", constant_values=fill_value, ) if is_duck_dask_array(data): # chunked data should come out with the same chunks; this makes # it feasible to combine shifted and unshifted data # TODO: remove this once dask.array automatically aligns chunks data = data.rechunk(self.data.chunks) return self._replace(data=data) def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs): """ Return a new Variable with shifted data. Parameters ---------- shifts : mapping of the form {dim: offset} Integer offset to shift along each of the given dimensions. Positive offsets shift to the right; negative offsets shift to the left. fill_value : scalar, optional Value to use for newly missing values **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- shifted : Variable Variable with the same dimensions and attributes but shifted data. """ shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift") result = self for dim, count in shifts.items(): result = result._shift_one_dim(dim, count, fill_value=fill_value) return result def _pad_options_dim_to_index( self, pad_option: Mapping[Any, int | float | tuple[int, int] | tuple[float, float]], fill_with_shape=False, ): # change number values to a tuple of two of those values for k, v in pad_option.items(): if isinstance(v, numbers.Number): pad_option[k] = (v, v) if fill_with_shape: return [ pad_option.get(d, (n, n)) for d, n in zip(self.dims, self.data.shape, strict=True) ] return [pad_option.get(d, (0, 0)) for d in self.dims] def pad( self, pad_width: Mapping[Any, int | tuple[int, int]] | None = None, mode: PadModeOptions = "constant", stat_length: ( int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None ) = None, constant_values: T_VarPadConstantValues | None = None, end_values: int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None = None, reflect_type: PadReflectOptions = None, keep_attrs: bool | None = None, **pad_width_kwargs: Any, ): """ Return a new Variable with padded data. Parameters ---------- pad_width : mapping of hashable to tuple of int Mapping with the form of {dim: (pad_before, pad_after)} describing the number of values padded along each dimension. {dim: pad} is a shortcut for pad_before = pad_after = pad mode : str, default: "constant" See numpy / Dask docs stat_length : int, tuple or mapping of hashable to tuple Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. constant_values : scalar, tuple or mapping of hashable to scalar or tuple Used in 'constant'. The values to set the padded values for each axis. end_values : scalar, tuple or mapping of hashable to tuple Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. reflect_type : {"even", "odd"}, optional Used in "reflect", and "symmetric". The "even" style is the default with an unaltered reflection around the edge value. For the "odd" style, the extended part of the array is created by subtracting the reflected values from two times the edge value. keep_attrs : bool, optional If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **pad_width_kwargs One of pad_width or pad_width_kwargs must be provided. Returns ------- padded : Variable Variable with the same dimensions and attributes but padded data. """ pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad") # change default behaviour of pad with mode constant if mode == "constant" and ( constant_values is None or constant_values is dtypes.NA ): dtype, constant_values = dtypes.maybe_promote(self.dtype) else: dtype = self.dtype # create pad_options_kwargs, numpy requires only relevant kwargs to be nonempty if isinstance(stat_length, dict): stat_length = self._pad_options_dim_to_index( stat_length, fill_with_shape=True ) if isinstance(constant_values, dict): constant_values = self._pad_options_dim_to_index(constant_values) if isinstance(end_values, dict): end_values = self._pad_options_dim_to_index(end_values) # workaround for bug in Dask's default value of stat_length https://github.com/dask/dask/issues/5303 if stat_length is None and mode in ["maximum", "mean", "median", "minimum"]: stat_length = [(n, n) for n in self.data.shape] # type: ignore[assignment] pad_width_by_index = self._pad_options_dim_to_index(pad_width) # create pad_options_kwargs, numpy/dask requires only relevant kwargs to be nonempty pad_option_kwargs: dict[str, Any] = {} if stat_length is not None: pad_option_kwargs["stat_length"] = stat_length if constant_values is not None: pad_option_kwargs["constant_values"] = constant_values if end_values is not None: pad_option_kwargs["end_values"] = end_values if reflect_type is not None: pad_option_kwargs["reflect_type"] = reflect_type array = duck_array_ops.pad( duck_array_ops.astype(self.data, dtype, copy=False), pad_width_by_index, mode=mode, **pad_option_kwargs, ) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) attrs = self._attrs if keep_attrs else None return type(self)(self.dims, array, attrs=attrs) def _roll_one_dim(self, dim, count): axis = self.get_axis_num(dim) count %= self.shape[axis] if count != 0: indices = [slice(-count, None), slice(None, -count)] else: indices = [slice(None)] arrays = [self[(slice(None),) * axis + (idx,)].data for idx in indices] data = duck_array_ops.concatenate(arrays, axis) if is_duck_dask_array(data): # chunked data should come out with the same chunks; this makes # it feasible to combine shifted and unshifted data # TODO: remove this once dask.array automatically aligns chunks data = data.rechunk(self.data.chunks) return self._replace(data=data) def roll(self, shifts=None, **shifts_kwargs): """ Return a new Variable with rolld data. Parameters ---------- shifts : mapping of hashable to int Integer offset to roll along each of the given dimensions. Positive offsets roll to the right; negative offsets roll to the left. **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns ------- shifted : Variable Variable with the same dimensions and attributes but rolled data. """ shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "roll") result = self for dim, count in shifts.items(): result = result._roll_one_dim(dim, count) return result @deprecate_dims def transpose( self, *dim: Hashable | EllipsisType, missing_dims: ErrorOptionsWithWarn = "raise", ) -> Self: """Return a new Variable object with transposed dimensions. Parameters ---------- *dim : Hashable, optional By default, reverse the dimensions. Otherwise, reorder the dimensions to this order. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the Variable: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions Returns ------- transposed : Variable The returned object has transposed data and dimensions with the same attributes as the original. Notes ----- This operation returns a view of this variable's data. It is lazy for dask-backed Variables but not for numpy-backed Variables. See Also -------- numpy.transpose """ if len(dim) == 0: dim = self.dims[::-1] else: dim = tuple(infix_dims(dim, self.dims, missing_dims)) if len(dim) < 2 or dim == self.dims: # no need to transpose if only one dimension # or dims are in same order return self.copy(deep=False) axes = self.get_axis_num(dim) data = as_indexable(self._data).transpose(axes) return self._replace(dims=dim, data=data) @property def T(self) -> Self: return self.transpose() @deprecate_dims def set_dims(self, dim, shape=None): """Return a new variable with given set of dimensions. This method might be used to attach new dimension(s) to variable. When possible, this operation does not copy this variable's data. Parameters ---------- dim : str or sequence of str or dict Dimensions to include on the new variable. If a dict, values are used to provide the sizes of new dimensions; otherwise, new dimensions are inserted with length 1. Returns ------- Variable """ if isinstance(dim, str): dim = [dim] if shape is None and is_dict_like(dim): shape = tuple(dim.values()) missing_dims = set(self.dims) - set(dim) if missing_dims: raise ValueError( f"new dimensions {dim!r} must be a superset of " f"existing dimensions {self.dims!r}" ) self_dims = set(self.dims) expanded_dims = tuple(d for d in dim if d not in self_dims) + self.dims if self.dims == expanded_dims: # don't use broadcast_to unless necessary so the result remains # writeable if possible expanded_data = self.data elif shape is None or all( s == 1 for s, e in zip(shape, dim, strict=True) if e not in self_dims ): # "Trivial" broadcasting, i.e. simply inserting a new dimension # This is typically easier for duck arrays to implement # than the full "broadcast_to" semantics indexer = (None,) * (len(expanded_dims) - self.ndim) + (...,) expanded_data = self.data[indexer] else: # elif shape is not None: dims_map = dict(zip(dim, shape, strict=True)) tmp_shape = tuple(dims_map[d] for d in expanded_dims) expanded_data = duck_array_ops.broadcast_to(self._data, tmp_shape) expanded_var = Variable( expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True ) return expanded_var.transpose(*dim) def _stack_once(self, dim: list[Hashable], new_dim: Hashable): if not set(dim) <= set(self.dims): raise ValueError(f"invalid existing dimensions: {dim}") if new_dim in self.dims: raise ValueError( "cannot create a new dimension with the same " "name as an existing dimension" ) if len(dim) == 0: # don't stack return self.copy(deep=False) other_dims = [d for d in self.dims if d not in dim] dim_order = other_dims + list(dim) reordered = self.transpose(*dim_order) new_shape = reordered.shape[: len(other_dims)] + (-1,) new_data = duck_array_ops.reshape(reordered.data, new_shape) new_dims = reordered.dims[: len(other_dims)] + (new_dim,) return type(self)( new_dims, new_data, self._attrs, self._encoding, fastpath=True ) @partial(deprecate_dims, old_name="dimensions") def stack(self, dim=None, **dim_kwargs): """ Stack any number of existing dim into a single new dimension. New dim will be added at the end, and the order of the data along each new dimension will be in contiguous (C) order. Parameters ---------- dim : mapping of hashable to tuple of hashable Mapping of form new_name=(dim1, dim2, ...) describing the names of new dim, and the existing dim that they replace. **dim_kwargs The keyword arguments form of ``dim``. One of dim or dim_kwargs must be provided. Returns ------- stacked : Variable Variable with the same attributes but stacked data. See Also -------- Variable.unstack """ dim = either_dict_or_kwargs(dim, dim_kwargs, "stack") result = self for new_dim, dims in dim.items(): result = result._stack_once(dims, new_dim) return result def _unstack_once_full(self, dim: Mapping[Any, int], old_dim: Hashable) -> Self: """ Unstacks the variable without needing an index. Unlike `_unstack_once`, this function requires the existing dimension to contain the full product of the new dimensions. """ new_dim_names = tuple(dim.keys()) new_dim_sizes = tuple(dim.values()) if old_dim not in self.dims: raise ValueError(f"invalid existing dimension: {old_dim}") if set(new_dim_names).intersection(self.dims): raise ValueError( "cannot create a new dimension with the same " "name as an existing dimension" ) if math.prod(new_dim_sizes) != self.sizes[old_dim]: raise ValueError( "the product of the new dimension sizes must " "equal the size of the old dimension" ) other_dims = [d for d in self.dims if d != old_dim] dim_order = other_dims + [old_dim] reordered = self.transpose(*dim_order) new_shape = reordered.shape[: len(other_dims)] + new_dim_sizes new_data = duck_array_ops.reshape(reordered.data, new_shape) new_dims = reordered.dims[: len(other_dims)] + new_dim_names return type(self)( new_dims, new_data, self._attrs, self._encoding, fastpath=True ) def _unstack_once( self, index: pd.MultiIndex, dim: Hashable, fill_value=dtypes.NA, sparse: bool = False, ) -> Variable: """ Unstacks this variable given an index to unstack and the name of the dimension to which the index refers. """ reordered = self.transpose(..., dim) new_dim_sizes = [lev.size for lev in index.levels] new_dim_names = index.names indexer = index.codes # Potentially we could replace `len(other_dims)` with just `-1` other_dims = [d for d in self.dims if d != dim] new_shape = tuple(list(reordered.shape[: len(other_dims)]) + new_dim_sizes) new_dims = reordered.dims[: len(other_dims)] + tuple(new_dim_names) create_template: Callable if fill_value is dtypes.NA: is_missing_values = math.prod(new_shape) > math.prod(self.shape) if is_missing_values: dtype, fill_value = dtypes.maybe_promote(self.dtype) create_template = partial( duck_array_ops.full_like, fill_value=fill_value ) else: dtype = self.dtype fill_value = dtypes.get_fill_value(dtype) create_template = duck_array_ops.empty_like else: dtype = self.dtype create_template = partial(duck_array_ops.full_like, fill_value=fill_value) if sparse: # unstacking a dense multitindexed array to a sparse array from sparse import COO codes = zip(*index.codes, strict=True) if reordered.ndim == 1: indexes = codes else: sizes = itertools.product(*[range(s) for s in reordered.shape[:-1]]) tuple_indexes = itertools.product(sizes, codes) indexes = (list(itertools.chain(*x)) for x in tuple_indexes) # type: ignore[assignment] data = COO( coords=np.array(list(indexes)).T, data=self.data.astype(dtype).ravel(), fill_value=fill_value, shape=new_shape, sorted=index.is_monotonic_increasing, ) else: data = create_template(self.data, shape=new_shape, dtype=dtype) # Indexer is a list of lists of locations. Each list is the locations # on the new dimension. This is robust to the data being sparse; in that # case the destinations will be NaN / zero. data[(..., *indexer)] = reordered return self.to_base_variable()._replace(dims=new_dims, data=data) @partial(deprecate_dims, old_name="dimensions") def unstack(self, dim=None, **dim_kwargs) -> Variable: """ Unstack an existing dimension into multiple new dimensions. New dimensions will be added at the end, and the order of the data along each new dimension will be in contiguous (C) order. Note that unlike ``DataArray.unstack`` and ``Dataset.unstack``, this method requires the existing dimension to contain the full product of the new dimensions. Parameters ---------- dim : mapping of hashable to mapping of hashable to int Mapping of the form old_dim={dim1: size1, ...} describing the names of existing dimensions, and the new dimensions and sizes that they map to. **dim_kwargs The keyword arguments form of ``dim``. One of dim or dim_kwargs must be provided. Returns ------- unstacked : Variable Variable with the same attributes but unstacked data. See Also -------- Variable.stack DataArray.unstack Dataset.unstack """ dim = either_dict_or_kwargs(dim, dim_kwargs, "unstack") result = self for old_dim, dims in dim.items(): result = result._unstack_once_full(dims, old_dim) return result def fillna(self, value): return ops.fillna(self, value) def where(self, cond, other=dtypes.NA): return ops.where_method(self, cond, other) def clip(self, min=None, max=None): """ Return an array whose values are limited to ``[min, max]``. At least one of max or min must be given. Refer to `numpy.clip` for full documentation. See Also -------- numpy.clip : equivalent function """ from xarray.computation.apply_ufunc import apply_ufunc xp = duck_array_ops.get_array_namespace(self.data) return apply_ufunc(xp.clip, self, min, max, dask="allowed") def reduce( # type: ignore[override] self, func: Callable[..., Any], dim: Dims = None, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs, ) -> Variable: """Reduce this array by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. dim : "...", str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. By default `func` is applied over all dimensions. axis : int or Sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dim' and 'axis' arguments can be supplied. If neither are supplied, then the reduction is calculated over the flattened array (by calling `func(x)` without an axis argument). keep_attrs : bool, optional If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Array Array with summarized data and the indicated dimension(s) removed. """ keep_attrs_ = ( _get_keep_attrs(default=False) if keep_attrs is None else keep_attrs ) # Note that the call order for Variable.mean is # Variable.mean -> NamedArray.mean -> Variable.reduce # -> NamedArray.reduce result = super().reduce( func=func, dim=dim, axis=axis, keepdims=keepdims, **kwargs ) # return Variable always to support IndexVariable return Variable( result.dims, result._data, attrs=result._attrs if keep_attrs_ else None ) @classmethod def concat( cls, variables, dim="concat_dim", positions=None, shortcut=False, combine_attrs="override", ): """Concatenate variables along a new or existing dimension. Parameters ---------- variables : iterable of Variable Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. dim : str or DataArray, optional Name of the dimension to stack along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. positions : None or list of array-like, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. shortcut : bool, optional This option is used internally to speed-up groupby operations. If `shortcut` is True, some checks of internal consistency between arrays to concatenate are skipped. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"}, default: "override" String indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. Returns ------- stacked : Variable Concatenated Variable formed by stacking all the supplied variables along the given dimension. """ from xarray.structure.merge import merge_attrs if not isinstance(dim, str): (dim,) = dim.dims # can't do this lazily: we need to loop through variables at least # twice variables = list(variables) first_var = variables[0] first_var_dims = first_var.dims arrays = [v._data for v in variables] if dim in first_var_dims: axis = first_var.get_axis_num(dim) dims = first_var_dims data = duck_array_ops.concatenate(arrays, axis=axis) if positions is not None: # TODO: deprecate this option -- we don't need it for groupby # any more. indices = nputils.inverse_permutation(np.concatenate(positions)) data = duck_array_ops.take(data, indices, axis=axis) else: axis = 0 dims = (dim,) + first_var_dims data = duck_array_ops.stack(arrays, axis=axis) attrs = merge_attrs( [var.attrs for var in variables], combine_attrs=combine_attrs ) encoding = dict(first_var.encoding) if not shortcut: for var in variables: if var.dims != first_var_dims: raise ValueError( f"Variable has dimensions {tuple(var.dims)} but first Variable has dimensions {tuple(first_var_dims)}" ) return cls(dims, data, attrs, encoding, fastpath=True) def equals(self, other, equiv=duck_array_ops.array_equiv): """True if two Variables have the same dimensions and values; otherwise False. Variables can still be equal (like pandas objects) if they have NaN values in the same locations. This method is necessary because `v1 == v2` for Variables does element-wise comparisons (like numpy.ndarrays). """ other = getattr(other, "variable", other) try: return self.dims == other.dims and ( self._data is other._data or equiv(self.data, other.data) ) except (TypeError, AttributeError): return False def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv): """True if two Variables have the values after being broadcast against each other; otherwise False. Variables can still be equal (like pandas objects) if they have NaN values in the same locations. """ try: self, other = broadcast_variables(self, other) except (ValueError, AttributeError): return False return self.equals(other, equiv=equiv) def identical(self, other, equiv=duck_array_ops.array_equiv): """Like equals, but also checks attributes.""" try: return utils.dict_equiv(self.attrs, other.attrs) and self.equals( other, equiv=equiv ) except (TypeError, AttributeError): return False def no_conflicts(self, other, equiv=duck_array_ops.array_notnull_equiv): """True if the intersection of two Variable's non-null data is equal; otherwise false. Variables can thus still be equal if there are locations where either, or both, contain NaN values. """ return self.broadcast_equals(other, equiv=equiv) def quantile( self, q: ArrayLike, dim: str | Sequence[Hashable] | None = None, method: QuantileMethods = "linear", keep_attrs: bool | None = None, skipna: bool | None = None, interpolation: QuantileMethods | None = None, ) -> Self: """Compute the qth quantile of the data along the specified dimension. Returns the qth quantiles(s) of the array elements. Parameters ---------- q : float or sequence of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or sequence of str, optional Dimension(s) over which to apply quantile. method : str, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points. The options sorted by their R type as summarized in the H&F paper [1]_ are: 1. "inverted_cdf" 2. "averaged_inverted_cdf" 3. "closest_observation" 4. "interpolated_inverted_cdf" 5. "hazen" 6. "weibull" 7. "linear" (default) 8. "median_unbiased" 9. "normal_unbiased" The first three methods are discontiuous. The following discontinuous variations of the default "linear" (7.) option are also available: * "lower" * "higher" * "midpoint" * "nearest" See :py:func:`numpy.quantile` or [1]_ for details. The "method" argument was previously called "interpolation", renamed in accordance with numpy version 1.22.0. keep_attrs : bool, optional If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- quantiles : Variable If `q` is a single quantile, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the quantile and a quantile dimension is added to the return array. The other dimensions are the dimensions that remain after the reduction of the array. See Also -------- numpy.nanquantile, pandas.Series.quantile, Dataset.quantile DataArray.quantile References ---------- .. [1] R. J. Hyndman and Y. Fan, "Sample quantiles in statistical packages," The American Statistician, 50(4), pp. 361-365, 1996 """ from xarray.computation.apply_ufunc import apply_ufunc if interpolation is not None: warnings.warn( "The `interpolation` argument to quantile was renamed to `method`.", FutureWarning, stacklevel=2, ) if method != "linear": raise TypeError("Cannot pass interpolation and method keywords!") method = interpolation if skipna or (skipna is None and self.dtype.kind in "cfO"): _quantile_func = nputils.nanquantile else: _quantile_func = duck_array_ops.quantile if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) scalar = utils.is_scalar(q) q = np.atleast_1d(np.asarray(q, dtype=np.float64)) if dim is None: dim = self.dims if utils.is_scalar(dim): dim = [dim] xp = duck_array_ops.get_array_namespace(self.data) def _wrapper(npa, **kwargs): # move quantile axis to end. required for apply_ufunc return xp.moveaxis(_quantile_func(npa, **kwargs), 0, -1) # jax requires hashable axis = tuple(range(-1, -1 * len(dim) - 1, -1)) kwargs = {"q": q, "axis": axis, "method": method} result = apply_ufunc( _wrapper, self, input_core_dims=[dim], exclude_dims=set(dim), output_core_dims=[["quantile"]], output_dtypes=[np.float64], dask_gufunc_kwargs=dict(output_sizes={"quantile": len(q)}), dask="allowed" if module_available("dask", "2024.11.0") else "parallelized", kwargs=kwargs, ) # for backward compatibility result = result.transpose("quantile", ...) if scalar: result = result.squeeze("quantile") if keep_attrs: result.attrs = self._attrs return result def rank(self, dim, pct=False): """Ranks the data. Equal values are assigned a rank that is the average of the ranks that would have been otherwise assigned to all of the values within that set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks. NaNs in the input array are returned as NaNs. The `bottleneck` library is required. Parameters ---------- dim : str Dimension over which to compute rank. pct : bool, optional If True, compute percentage ranks, otherwise compute integer ranks. Returns ------- ranked : Variable See Also -------- Dataset.rank, DataArray.rank """ # This could / should arguably be implemented at the DataArray & Dataset level if not OPTIONS["use_bottleneck"]: raise RuntimeError( "rank requires bottleneck to be enabled." " Call `xr.set_options(use_bottleneck=True)` to enable it." ) import bottleneck as bn func = bn.nanrankdata if self.dtype.kind == "f" else bn.rankdata ranked = xr.apply_ufunc( func, self, input_core_dims=[[dim]], output_core_dims=[[dim]], dask="parallelized", kwargs=dict(axis=-1), ).transpose(*self.dims) if pct: count = self.notnull().sum(dim) ranked /= count return ranked @_deprecate_positional_args("v2024.11.0") def rolling_window( self, dim, window, window_dim, *, center=False, fill_value=dtypes.NA, **kwargs, ): """ Make a rolling_window along dim and add a new_dim to the last place. Parameters ---------- dim : str Dimension over which to compute rolling_window. For nd-rolling, should be list of dimensions. window : int Window size of the rolling For nd-rolling, should be list of integers. window_dim : str New name of the window dimension. For nd-rolling, should be list of strings. center : bool, default: False If True, pad fill_value for both ends. Otherwise, pad in the head of the axis. fill_value value to be filled. **kwargs Keyword arguments that should be passed to the underlying array type's ``sliding_window_view`` function. Returns ------- Variable that is a view of the original array with a added dimension of size w. The return dim: self.dims + (window_dim, ) The return shape: self.shape + (window, ) See Also -------- numpy.lib.stride_tricks.sliding_window_view dask.array.lib.stride_tricks.sliding_window_view Examples -------- >>> v = Variable(("a", "b"), np.arange(8).reshape((2, 4))) >>> v.rolling_window("b", 3, "window_dim") Size: 192B array([[[nan, nan, 0.], [nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.]], [[nan, nan, 4.], [nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.]]]) >>> v.rolling_window("b", 3, "window_dim", center=True) Size: 192B array([[[nan, 0., 1.], [ 0., 1., 2.], [ 1., 2., 3.], [ 2., 3., nan]], [[nan, 4., 5.], [ 4., 5., 6.], [ 5., 6., 7.], [ 6., 7., nan]]]) """ if fill_value is dtypes.NA: # np.nan is passed dtype, fill_value = dtypes.maybe_promote(self.dtype) var = duck_array_ops.astype(self, dtype, copy=False) else: dtype = self.dtype var = self if utils.is_scalar(dim): for name, arg in zip( ["window", "window_dim", "center"], [window, window_dim, center], strict=True, ): if not utils.is_scalar(arg): raise ValueError( f"Expected {name}={arg!r} to be a scalar like 'dim'." ) dim = (dim,) # dim is now a list nroll = len(dim) if utils.is_scalar(window): window = [window] * nroll if utils.is_scalar(window_dim): window_dim = [window_dim] * nroll if utils.is_scalar(center): center = [center] * nroll if ( len(dim) != len(window) or len(dim) != len(window_dim) or len(dim) != len(center) ): raise ValueError( "'dim', 'window', 'window_dim', and 'center' must be the same length. " f"Received dim={dim!r}, window={window!r}, window_dim={window_dim!r}," f" and center={center!r}." ) pads = {} for d, win, cent in zip(dim, window, center, strict=True): if cent: start = win // 2 # 10 -> 5, 9 -> 4 end = win - 1 - start pads[d] = (start, end) else: pads[d] = (win - 1, 0) padded = var.pad(pads, mode="constant", constant_values=fill_value) axis = self.get_axis_num(dim) new_dims = self.dims + tuple(window_dim) return Variable( new_dims, duck_array_ops.sliding_window_view( padded.data, window_shape=window, axis=axis, **kwargs ), ) def coarsen( self, windows, func, boundary="exact", side="left", keep_attrs=None, **kwargs ): """ Apply reduction function. """ windows = {k: v for k, v in windows.items() if k in self.dims} if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) if keep_attrs: _attrs = self.attrs else: _attrs = None if not windows: return self._replace(attrs=_attrs) reshaped, axes = self.coarsen_reshape(windows, boundary, side) if isinstance(func, str): name = func func = getattr(duck_array_ops, name, None) if func is None: raise NameError(f"{name} is not a valid method.") return self._replace(data=func(reshaped, axis=axes, **kwargs), attrs=_attrs) def coarsen_reshape(self, windows, boundary, side): """ Construct a reshaped-array for coarsen """ if not is_dict_like(boundary): boundary = dict.fromkeys(windows.keys(), boundary) if not is_dict_like(side): side = dict.fromkeys(windows.keys(), side) # remove unrelated dimensions boundary = {k: v for k, v in boundary.items() if k in windows} side = {k: v for k, v in side.items() if k in windows} for d, window in windows.items(): if window <= 0: raise ValueError( f"window must be > 0. Given {window} for dimension {d}" ) variable = self for d, window in windows.items(): # trim or pad the object size = variable.shape[self._get_axis_num(d)] n = int(size / window) if boundary[d] == "exact": if n * window != size: raise ValueError( f"Could not coarsen a dimension of size {size} with " f"window {window} and boundary='exact'. Try a different 'boundary' option." ) elif boundary[d] == "trim": if side[d] == "left": variable = variable.isel({d: slice(0, window * n)}) else: excess = size - window * n variable = variable.isel({d: slice(excess, None)}) elif boundary[d] == "pad": # pad pad = window * n - size if pad < 0: pad += window if side[d] == "left": pad_width = {d: (0, pad)} else: pad_width = {d: (pad, 0)} variable = variable.pad(pad_width, mode="constant") else: raise TypeError( f"{boundary[d]} is invalid for boundary. Valid option is 'exact', " "'trim' and 'pad'" ) shape = [] axes = [] axis_count = 0 for i, d in enumerate(variable.dims): if d in windows: size = variable.shape[i] shape.extend((int(size / windows[d]), windows[d])) axis_count += 1 axes.append(i + axis_count) else: shape.append(variable.shape[i]) return duck_array_ops.reshape(variable.data, shape), tuple(axes) def isnull(self, keep_attrs: bool | None = None): """Test each value in the array for whether it is a missing value. Returns ------- isnull : Variable Same type and shape as object, but the dtype of the data is bool. See Also -------- pandas.isnull Examples -------- >>> var = xr.Variable("x", [1, np.nan, 3]) >>> var Size: 24B array([ 1., nan, 3.]) >>> var.isnull() Size: 3B array([False, True, False]) """ from xarray.computation.apply_ufunc import apply_ufunc if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) return apply_ufunc( duck_array_ops.isnull, self, dask="allowed", keep_attrs=keep_attrs, ) def notnull(self, keep_attrs: bool | None = None): """Test each value in the array for whether it is not a missing value. Returns ------- notnull : Variable Same type and shape as object, but the dtype of the data is bool. See Also -------- pandas.notnull Examples -------- >>> var = xr.Variable("x", [1, np.nan, 3]) >>> var Size: 24B array([ 1., nan, 3.]) >>> var.notnull() Size: 3B array([ True, False, True]) """ from xarray.computation.apply_ufunc import apply_ufunc if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) return apply_ufunc( duck_array_ops.notnull, self, dask="allowed", keep_attrs=keep_attrs, ) @property def imag(self) -> Variable: """ The imaginary part of the variable. See Also -------- numpy.ndarray.imag """ return self._new(data=self.data.imag) @property def real(self) -> Variable: """ The real part of the variable. See Also -------- numpy.ndarray.real """ return self._new(data=self.data.real) def __array_wrap__(self, obj, context=None, return_scalar=False): return Variable(self.dims, obj) def _unary_op(self, f, *args, **kwargs): keep_attrs = kwargs.pop("keep_attrs", None) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) with np.errstate(all="ignore"): result = self.__array_wrap__(f(self.data, *args, **kwargs)) if keep_attrs: result.attrs = self.attrs return result def _binary_op(self, other, f, reflexive=False): if isinstance(other, xr.DataTree | xr.DataArray | xr.Dataset): return NotImplemented if reflexive and issubclass(type(self), type(other)): other_data, self_data, dims = _broadcast_compat_data(other, self) else: self_data, other_data, dims = _broadcast_compat_data(self, other) keep_attrs = _get_keep_attrs(default=False) attrs = self._attrs if keep_attrs else None with np.errstate(all="ignore"): new_data = ( f(self_data, other_data) if not reflexive else f(other_data, self_data) ) result = Variable(dims, new_data, attrs=attrs) return result def _inplace_binary_op(self, other, f): if isinstance(other, xr.Dataset): raise TypeError("cannot add a Dataset to a Variable in-place") self_data, other_data, dims = _broadcast_compat_data(self, other) if dims != self.dims: raise ValueError("dimensions cannot change for in-place operations") with np.errstate(all="ignore"): self.values = f(self_data, other_data) return self def _to_numeric(self, offset=None, datetime_unit=None, dtype=float): """A (private) method to convert datetime array to numeric dtype See duck_array_ops.datetime_to_numeric """ numeric_array = duck_array_ops.datetime_to_numeric( self.data, offset, datetime_unit, dtype ) return type(self)(self.dims, numeric_array, self._attrs) def _unravel_argminmax( self, argminmax: str, dim: Dims, axis: int | None, keep_attrs: bool | None, skipna: bool | None, ) -> Variable | dict[Hashable, Variable]: """Apply argmin or argmax over one or more dimensions, returning the result as a dict of DataArray that can be passed directly to isel. """ if dim is None and axis is None: warnings.warn( "Behaviour of argmin/argmax with neither dim nor axis argument will " "change to return a dict of indices of each dimension. To get a " "single, flat index, please use np.argmin(da.data) or " "np.argmax(da.data) instead of da.argmin() or da.argmax().", DeprecationWarning, stacklevel=3, ) argminmax_func = getattr(duck_array_ops, argminmax) if dim is ...: # In future, should do this also when (dim is None and axis is None) dim = self.dims if ( dim is None or axis is not None or not isinstance(dim, Sequence) or isinstance(dim, str) ): # Return int index if single dimension is passed, and is not part of a # sequence return self.reduce( argminmax_func, dim=dim, axis=axis, keep_attrs=keep_attrs, skipna=skipna ) # Get a name for the new dimension that does not conflict with any existing # dimension newdimname = "_unravel_argminmax_dim_0" count = 1 while newdimname in self.dims: newdimname = f"_unravel_argminmax_dim_{count}" count += 1 stacked = self.stack({newdimname: dim}) result_dims = stacked.dims[:-1] reduce_shape = tuple(self.sizes[d] for d in dim) result_flat_indices = stacked.reduce(argminmax_func, axis=-1, skipna=skipna) result_unravelled_indices = duck_array_ops.unravel_index( result_flat_indices.data, reduce_shape ) result = { d: Variable(dims=result_dims, data=i) for d, i in zip(dim, result_unravelled_indices, strict=True) } if keep_attrs is None: keep_attrs = _get_keep_attrs(default=False) if keep_attrs: for v in result.values(): v.attrs = self.attrs return result def argmin( self, dim: Dims = None, axis: int | None = None, keep_attrs: bool | None = None, skipna: bool | None = None, ) -> Variable | dict[Hashable, Variable]: """Index or indices of the minimum of the Variable over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of Variables, which can be passed directly to isel(). If a single str is passed to 'dim' then returns a Variable with dtype int. If there are multiple minima, the indices of the first one found will be returned. Parameters ---------- dim : "...", str, Iterable of Hashable or None, optional The dimensions over which to find the minimum. By default, finds minimum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will return a dict with indices for all dimensions; to return a dict with all dimensions now, pass '...'. axis : int, optional Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments can be supplied. keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : Variable or dict of Variable See Also -------- DataArray.argmin, DataArray.idxmin """ return self._unravel_argminmax("argmin", dim, axis, keep_attrs, skipna) def argmax( self, dim: Dims = None, axis: int | None = None, keep_attrs: bool | None = None, skipna: bool | None = None, ) -> Variable | dict[Hashable, Variable]: """Index or indices of the maximum of the Variable over one or more dimensions. If a sequence is passed to 'dim', then result returned as dict of Variables, which can be passed directly to isel(). If a single str is passed to 'dim' then returns a Variable with dtype int. If there are multiple maxima, the indices of the first one found will be returned. Parameters ---------- dim : "...", str, Iterable of Hashable or None, optional The dimensions over which to find the maximum. By default, finds maximum over all dimensions - for now returning an int for backward compatibility, but this is deprecated, in future will return a dict with indices for all dimensions; to return a dict with all dimensions now, pass '...'. axis : int, optional Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments can be supplied. keep_attrs : bool, optional If True, the attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. skipna : bool, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). Returns ------- result : Variable or dict of Variable See Also -------- DataArray.argmax, DataArray.idxmax """ return self._unravel_argminmax("argmax", dim, axis, keep_attrs, skipna) def _as_sparse(self, sparse_format=_default, fill_value=_default) -> Variable: """ Use sparse-array as backend. """ from xarray.namedarray._typing import _default as _default_named if sparse_format is _default: sparse_format = _default_named if fill_value is _default: fill_value = _default_named out = super()._as_sparse(sparse_format, fill_value) return cast("Variable", out) def _to_dense(self) -> Variable: """ Change backend from sparse to np.array. """ out = super()._to_dense() return cast("Variable", out) def chunk( # type: ignore[override] self, chunks: T_Chunks = {}, # noqa: B006 # even though it's technically unsafe, it is being used intentionally here (#4667) name: str | None = None, lock: bool | None = None, inline_array: bool | None = None, chunked_array_type: str | ChunkManagerEntrypoint[Any] | None = None, from_array_kwargs: Any = None, **chunks_kwargs: Any, ) -> Self: """Coerce this array's data into a dask array with the given chunks. If this variable is a non-dask array, it will be converted to dask array. If it's a dask array, it will be rechunked to the given chunk sizes. If neither chunks is not provided for one or more dimensions, chunk sizes along that dimension will not be updated; non-dask arrays will be converted into dask arrays with a single block. Parameters ---------- chunks : int, tuple or dict, optional Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or ``{'x': 5, 'y': 5}``. name : str, optional Used to generate the name for this array in the internal dask graph. Does not need not be unique. lock : bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. inline_array : bool, default: False Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. chunked_array_type: str, optional Which chunked array type to coerce this datasets' arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntrypoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. **chunks_kwargs : {dim: chunks, ...}, optional The keyword arguments form of ``chunks``. One of chunks or chunks_kwargs must be provided. Returns ------- chunked : xarray.Variable See Also -------- Variable.chunks Variable.chunksizes xarray.unify_chunks dask.array.from_array """ if from_array_kwargs is None: from_array_kwargs = {} # TODO deprecate passing these dask-specific arguments explicitly. In future just pass everything via from_array_kwargs _from_array_kwargs = consolidate_dask_from_array_kwargs( from_array_kwargs, name=name, lock=lock, inline_array=inline_array, ) return super().chunk( chunks=chunks, chunked_array_type=chunked_array_type, from_array_kwargs=_from_array_kwargs, **chunks_kwargs, ) class IndexVariable(Variable): """Wrapper for accommodating a pandas.Index in an xarray.Variable. IndexVariable preserve loaded values in the form of a pandas.Index instead of a NumPy array. Hence, their values are immutable and must always be one- dimensional. They also have a name property, which is the name of their sole dimension unless another name is given. """ __slots__ = () # TODO: PandasIndexingAdapter doesn't match the array api: _data: PandasIndexingAdapter # type: ignore[assignment] def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False): super().__init__(dims, data, attrs, encoding, fastpath) if self.ndim != 1: raise ValueError(f"{type(self).__name__} objects must be 1-dimensional") # Unlike in Variable, always eagerly load values into memory if not isinstance(self._data, PandasIndexingAdapter): self._data = PandasIndexingAdapter(self._data) def __dask_tokenize__(self) -> object: from dask.base import normalize_token # Don't waste time converting pd.Index to np.ndarray return normalize_token( (type(self), self._dims, self._data.array, self._attrs or None) ) def load(self): # data is already loaded into memory for IndexVariable return self async def load_async(self): # data is already loaded into memory for IndexVariable return self # https://github.com/python/mypy/issues/1465 @Variable.data.setter # type: ignore[attr-defined] def data(self, data): raise ValueError( f"Cannot assign to the .data attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. " f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate." ) @Variable.values.setter # type: ignore[attr-defined] def values(self, values): raise ValueError( f"Cannot assign to the .values attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. " f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate." ) def chunk( self, chunks={}, # noqa: B006 # even though it's unsafe, it is being used intentionally here (#4667) name=None, lock=False, inline_array=False, chunked_array_type=None, from_array_kwargs=None, ): # Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk() return self.copy(deep=False) def _as_sparse(self, sparse_format=_default, fill_value=_default): # Dummy return self.copy(deep=False) def _to_dense(self): # Dummy return self.copy(deep=False) def _finalize_indexing_result(self, dims, data): if getattr(data, "ndim", 0) != 1: # returns Variable rather than IndexVariable if multi-dimensional return Variable(dims, data, self._attrs, self._encoding) else: return self._replace(dims=dims, data=data) def __setitem__(self, key, value): raise TypeError(f"{type(self).__name__} values cannot be modified") @classmethod def concat( cls, variables, dim="concat_dim", positions=None, shortcut=False, combine_attrs="override", ): """Specialized version of Variable.concat for IndexVariable objects. This exists because we want to avoid converting Index objects to NumPy arrays, if possible. """ from xarray.structure.merge import merge_attrs if not isinstance(dim, str): (dim,) = dim.dims variables = list(variables) first_var = variables[0] if any(not isinstance(v, cls) for v in variables): raise TypeError( "IndexVariable.concat requires that all input " "variables be IndexVariable objects" ) indexes = [v._data.array for v in variables] if not indexes: data = [] else: data = indexes[0].append(indexes[1:]) if positions is not None: indices = nputils.inverse_permutation(np.concatenate(positions)) data = data.take(indices) # keep as str if possible as pandas.Index uses object (converts to numpy array) data = maybe_coerce_to_str(data, variables) attrs = merge_attrs( [var.attrs for var in variables], combine_attrs=combine_attrs ) if not shortcut: for var in variables: if var.dims != first_var.dims: raise ValueError("inconsistent dimensions") return cls(first_var.dims, data, attrs) def copy(self, deep: bool = True, data: T_DuckArray | ArrayLike | None = None): """Returns a copy of this object. `deep` is ignored since data is stored in the form of pandas.Index, which is already immutable. Dimensions, attributes and encodings are always copied. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, default: True Deep is ignored when data is given. Whether the data array is loaded into memory and copied onto the new object. Default is True. data : array_like, optional Data to use in the new object. Must have same shape as original. Returns ------- object : Variable New object with dimensions, attributes, encodings, and optionally data copied from original. """ if data is None: ndata = self._data if deep: ndata = copy.deepcopy(ndata, None) else: ndata = as_compatible_data(data) if self.shape != ndata.shape: # type: ignore[attr-defined] raise ValueError( f"Data shape {ndata.shape} must match shape of object {self.shape}" # type: ignore[attr-defined] ) attrs = copy.deepcopy(self._attrs) if deep else copy.copy(self._attrs) encoding = copy.deepcopy(self._encoding) if deep else copy.copy(self._encoding) return self._replace(data=ndata, attrs=attrs, encoding=encoding) def equals(self, other, equiv=None): # if equiv is specified, super up if equiv is not None: return super().equals(other, equiv) # otherwise use the native index equals, rather than looking at _data other = getattr(other, "variable", other) try: return self.dims == other.dims and self._data_equals(other) except (TypeError, AttributeError): return False def _data_equals(self, other): return self._to_index().equals(other._to_index()) def to_index_variable(self) -> IndexVariable: """Return this variable as an xarray.IndexVariable""" return self.copy(deep=False) to_coord = utils.alias(to_index_variable, "to_coord") def _to_index(self) -> pd.Index: # n.b. creating a new pandas.Index from an old pandas.Index is # basically free as pandas.Index objects are immutable. # n.b.2. this method returns the multi-index instance for # a pandas multi-index level variable. assert self.ndim == 1 index = self._data.array if isinstance(index, pd.MultiIndex): # set default names for multi-index unnamed levels so that # we can safely rename dimension / coordinate later valid_level_names = [ name or f"{self.dims[0]}_level_{i}" for i, name in enumerate(index.names) ] index = index.set_names(valid_level_names) else: index = index.set_names(self.name) return index def to_index(self) -> pd.Index: """Convert this variable to a pandas.Index""" index = self._to_index() level = getattr(self._data, "level", None) if level is not None: # return multi-index level converted to a single index return index.get_level_values(level) else: return index @property def level_names(self) -> list[Hashable | None] | None: """Return MultiIndex level names or None if this IndexVariable has no MultiIndex. """ index = self.to_index() if isinstance(index, pd.MultiIndex): return list(index.names) else: return None def get_level_variable(self, level): """Return a new IndexVariable from a given MultiIndex level.""" if self.level_names is None: raise ValueError(f"IndexVariable {self.name!r} has no MultiIndex") index = self.to_index() return type(self)(self.dims, index.get_level_values(level)) @property def name(self) -> Hashable: return self.dims[0] @name.setter def name(self, value) -> NoReturn: raise AttributeError("cannot modify name of IndexVariable in-place") def _inplace_binary_op(self, other, f): raise TypeError( "Values of an IndexVariable are immutable and can not be modified inplace" ) def _unified_dims(variables): # validate dimensions all_dims = {} for var in variables: var_dims = var.dims _raise_if_any_duplicate_dimensions(var_dims, err_context="Broadcasting") for d, s in zip(var_dims, var.shape, strict=True): if d not in all_dims: all_dims[d] = s elif all_dims[d] != s: raise ValueError( "operands cannot be broadcast together " f"with mismatched lengths for dimension {d!r}: {(all_dims[d], s)}" ) return all_dims def _broadcast_compat_variables(*variables): """Create broadcast compatible variables, with the same dimensions. Unlike the result of broadcast_variables(), some variables may have dimensions of size 1 instead of the size of the broadcast dimension. """ dims = tuple(_unified_dims(variables)) return tuple(var.set_dims(dims) if var.dims != dims else var for var in variables) def broadcast_variables(*variables: Variable) -> tuple[Variable, ...]: """Given any number of variables, return variables with matching dimensions and broadcast data. The data on the returned variables will be a view of the data on the corresponding original arrays, but dimensions will be reordered and inserted so that both broadcast arrays have the same dimensions. The new dimensions are sorted in order of appearance in the first variable's dimensions followed by the second variable's dimensions. """ dims_map = _unified_dims(variables) dims_tuple = tuple(dims_map) return tuple( var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables ) def _broadcast_compat_data(self, other): if not OPTIONS["arithmetic_broadcast"] and ( (isinstance(other, Variable) and self.dims != other.dims) or (is_duck_array(other) and self.ndim != other.ndim) ): raise ValueError( "Broadcasting is necessary but automatic broadcasting is disabled via " "global option `'arithmetic_broadcast'`. " "Use `xr.set_options(arithmetic_broadcast=True)` to enable automatic broadcasting." ) if all(hasattr(other, attr) for attr in ["dims", "data", "shape", "encoding"]): # `other` satisfies the necessary Variable API for broadcast_variables new_self, new_other = _broadcast_compat_variables(self, other) self_data = new_self.data other_data = new_other.data dims = new_self.dims else: # rely on numpy broadcasting rules self_data = self.data other_data = other dims = self.dims return self_data, other_data, dims def concat( variables, dim="concat_dim", positions=None, shortcut=False, combine_attrs="override", ): """Concatenate variables along a new or existing dimension. Parameters ---------- variables : iterable of Variable Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. dim : str or DataArray, optional Name of the dimension to stack along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. positions : None or list of array-like, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. shortcut : bool, optional This option is used internally to speed-up groupby operations. If `shortcut` is True, some checks of internal consistency between arrays to concatenate are skipped. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"}, default: "override" String indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. Returns ------- stacked : Variable Concatenated Variable formed by stacking all the supplied variables along the given dimension. """ variables = list(variables) if all(isinstance(v, IndexVariable) for v in variables): return IndexVariable.concat(variables, dim, positions, shortcut, combine_attrs) else: return Variable.concat(variables, dim, positions, shortcut, combine_attrs) def calculate_dimensions(variables: Mapping[Any, Variable]) -> dict[Hashable, int]: """Calculate the dimensions corresponding to a set of variables. Returns dictionary mapping from dimension names to sizes. Raises ValueError if any of the dimension sizes conflict. """ dims: dict[Hashable, int] = {} last_used = {} scalar_vars = {k for k, v in variables.items() if not v.dims} for k, var in variables.items(): for dim, size in zip(var.dims, var.shape, strict=True): if dim in scalar_vars: raise ValueError( f"dimension {dim!r} already exists as a scalar variable" ) if dim not in dims: dims[dim] = size last_used[dim] = k elif dims[dim] != size: raise ValueError( f"conflicting sizes for dimension {dim!r}: " f"length {size} on {k!r} and length {dims[dim]} on {last_used!r}" ) return dims xarray-2025.09.0/xarray/groupers.py000066400000000000000000001160051505620616400170750ustar00rootroot00000000000000""" This module provides Grouper objects that encapsulate the "factorization" process - conversion of value we are grouping by to integer codes (one per group). """ from __future__ import annotations import datetime import functools import itertools import operator from abc import ABC, abstractmethod from collections import defaultdict from collections.abc import Hashable, Mapping, Sequence from dataclasses import dataclass, field from itertools import chain, pairwise from typing import TYPE_CHECKING, Any, Literal, cast import numpy as np import pandas as pd from numpy.typing import ArrayLike from xarray.coding.cftime_offsets import BaseCFTimeOffset, _new_to_legacy_freq from xarray.coding.cftimeindex import CFTimeIndex from xarray.compat.toolzcompat import sliding_window from xarray.computation.apply_ufunc import apply_ufunc from xarray.core.common import ( _contains_cftime_datetimes, _contains_datetime_like_objects, ) from xarray.core.coordinates import Coordinates, coordinates_from_variable from xarray.core.dataarray import DataArray from xarray.core.duck_array_ops import array_all, isnull from xarray.core.formatting import first_n_items from xarray.core.groupby import T_Group, _DummyGroup from xarray.core.indexes import safe_cast_to_index from xarray.core.resample_cftime import CFTimeGrouper from xarray.core.types import ( Bins, DatetimeLike, GroupIndices, ResampleCompatible, Self, SideOptions, ) from xarray.core.variable import Variable from xarray.namedarray.pycompat import is_chunked_array __all__ = [ "BinGrouper", "EncodedGroups", "Grouper", "Resampler", "SeasonGrouper", "SeasonResampler", "TimeResampler", "UniqueGrouper", ] RESAMPLE_DIM = "__resample_dim__" @dataclass(init=False) class EncodedGroups: """ Dataclass for storing intermediate values for GroupBy operation. Returned by the ``factorize`` method on Grouper objects. Attributes ---------- codes : DataArray Same shape as the DataArray to group by. Values consist of a unique integer code for each group. full_index : pd.Index Pandas Index for the group coordinate containing unique group labels. This can differ from ``unique_coord`` in the case of resampling and binning, where certain groups in the output need not be present in the input. group_indices : tuple of int or slice or list of int, optional List of indices of array elements belonging to each group. Inferred if not provided. unique_coord : Variable, optional Unique group values present in dataset. Inferred if not provided """ codes: DataArray full_index: pd.Index group_indices: GroupIndices = field(init=False, repr=False) unique_coord: Variable | _DummyGroup = field(init=False, repr=False) coords: Coordinates = field(init=False, repr=False) def __init__( self, codes: DataArray, full_index: pd.Index, group_indices: GroupIndices | None = None, unique_coord: Variable | _DummyGroup | None = None, coords: Coordinates | None = None, ): from xarray.core.groupby import _codes_to_group_indices assert isinstance(codes, DataArray) if codes.name is None: raise ValueError("Please set a name on the array you are grouping by.") self.codes = codes assert isinstance(full_index, pd.Index) self.full_index = full_index if group_indices is None: if not is_chunked_array(codes.data): self.group_indices = tuple( g for g in _codes_to_group_indices( codes.data.ravel(), len(full_index) ) if g ) else: # We will not use this when grouping by a chunked array self.group_indices = tuple() else: self.group_indices = group_indices if unique_coord is None: unique_codes = np.sort(pd.unique(codes.data)) # Skip the -1 sentinel unique_codes = unique_codes[unique_codes >= 0] unique_values = full_index[unique_codes] self.unique_coord = Variable( dims=codes.name, data=unique_values, attrs=codes.attrs ) else: self.unique_coord = unique_coord if coords is None: assert not isinstance(self.unique_coord, _DummyGroup) self.coords = coordinates_from_variable(self.unique_coord) else: self.coords = coords class Grouper(ABC): """Abstract base class for Grouper objects that allow specializing GroupBy instructions.""" @abstractmethod def factorize(self, group: T_Group) -> EncodedGroups: """ Creates intermediates necessary for GroupBy. Parameters ---------- group : DataArray DataArray we are grouping by. Returns ------- EncodedGroups """ pass @abstractmethod def reset(self) -> Self: """ Creates a new version of this Grouper clearing any caches. """ pass class Resampler(Grouper): """ Abstract base class for Grouper objects that allow specializing resampling-type GroupBy instructions. Currently only used for TimeResampler, but could be used for SpaceResampler in the future. """ def compute_chunks(self, variable: Variable, *, dim: Hashable) -> tuple[int, ...]: """ Compute chunk sizes for this resampler. This method should be implemented by subclasses to provide appropriate chunking behavior for their specific resampling strategy. Parameters ---------- variable : Variable The variable being chunked. dim : Hashable The name of the dimension being chunked. Returns ------- tuple[int, ...] A tuple of chunk sizes for the dimension. """ raise NotImplementedError("Subclasses must implement compute_chunks method") @dataclass class UniqueGrouper(Grouper): """ Grouper object for grouping by a categorical variable. Parameters ---------- labels: array-like, optional Group labels to aggregate on. This is required when grouping by a chunked array type (e.g. dask or cubed) since it is used to construct the coordinate on the output. Grouped operations will only be run on the specified group labels. Any group that is not present in ``labels`` will be ignored. """ _group_as_index: pd.Index | None = field(default=None, repr=False, init=False) labels: ArrayLike | None = field(default=None) @property def group_as_index(self) -> pd.Index: """Caches the group DataArray as a pandas Index.""" if self._group_as_index is None: if self.group.ndim == 1: self._group_as_index = self.group.to_index() else: self._group_as_index = pd.Index(np.array(self.group).ravel()) return self._group_as_index def reset(self) -> Self: return type(self)() def factorize(self, group: T_Group) -> EncodedGroups: self.group = group if is_chunked_array(group.data) and self.labels is None: raise ValueError( "When grouping by a dask array, `labels` must be passed using " "a UniqueGrouper object." ) if self.labels is not None: return self._factorize_given_labels(group) index = self.group_as_index is_unique_and_monotonic = isinstance(self.group, _DummyGroup) or ( index.is_unique and (index.is_monotonic_increasing or index.is_monotonic_decreasing) ) is_dimension = self.group.dims == (self.group.name,) can_squeeze = is_dimension and is_unique_and_monotonic if can_squeeze: return self._factorize_dummy() else: return self._factorize_unique() def _factorize_given_labels(self, group: T_Group) -> EncodedGroups: codes = apply_ufunc( _factorize_given_labels, group, kwargs={"labels": self.labels}, dask="parallelized", output_dtypes=[np.int64], keep_attrs=True, ) return EncodedGroups( codes=codes, full_index=pd.Index(self.labels), # type: ignore[arg-type] unique_coord=Variable( dims=codes.name, data=self.labels, attrs=self.group.attrs, ), ) def _factorize_unique(self) -> EncodedGroups: # look through group to find the unique values sort = not isinstance(self.group_as_index, pd.MultiIndex) unique_values, codes_ = unique_value_groups(self.group_as_index, sort=sort) if array_all(codes_ == -1): raise ValueError( "Failed to group data. Are you grouping by a variable that is all NaN?" ) codes = self.group.copy(data=codes_.reshape(self.group.shape), deep=False) unique_coord = Variable( dims=codes.name, data=unique_values, attrs=self.group.attrs ) full_index = ( unique_values if isinstance(unique_values, pd.MultiIndex) else pd.Index(unique_values) ) return EncodedGroups( codes=codes, full_index=full_index, unique_coord=unique_coord, coords=coordinates_from_variable(unique_coord), ) def _factorize_dummy(self) -> EncodedGroups: size = self.group.size # no need to factorize # use slices to do views instead of fancy indexing # equivalent to: group_indices = group_indices.reshape(-1, 1) group_indices: GroupIndices = tuple(slice(i, i + 1) for i in range(size)) size_range = np.arange(size) full_index: pd.Index unique_coord: _DummyGroup | Variable if isinstance(self.group, _DummyGroup): codes = self.group.to_dataarray().copy(data=size_range) unique_coord = self.group full_index = pd.RangeIndex(self.group.size) coords = Coordinates() else: codes = self.group.copy(data=size_range, deep=False) unique_coord = self.group.variable.to_base_variable() full_index = self.group_as_index if isinstance(full_index, pd.MultiIndex): coords = Coordinates.from_pandas_multiindex( full_index, dim=self.group.name ) else: if TYPE_CHECKING: assert isinstance(unique_coord, Variable) coords = coordinates_from_variable(unique_coord) return EncodedGroups( codes=codes, group_indices=group_indices, full_index=full_index, unique_coord=unique_coord, coords=coords, ) @dataclass class BinGrouper(Grouper): """ Grouper object for binning numeric data. Attributes ---------- bins : int, sequence of scalars, or IntervalIndex The criteria to bin by. * int : Defines the number of equal-width bins in the range of `x`. The range of `x` is extended by .1% on each side to include the minimum and maximum values of `x`. * sequence of scalars : Defines the bin edges allowing for non-uniform width. No extension of the range of `x` is done. * IntervalIndex : Defines the exact bins to be used. Note that IntervalIndex for `bins` must be non-overlapping. right : bool, default True Indicates whether `bins` includes the rightmost edge or not. If ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]`` indicate (1,2], (2,3], (3,4]. This argument is ignored when `bins` is an IntervalIndex. labels : array or False, default None Specifies the labels for the returned bins. Must be the same length as the resulting bins. If False, returns only integer indicators of the bins. This affects the type of the output container (see below). This argument is ignored when `bins` is an IntervalIndex. If True, raises an error. retbins : bool, default False Whether to return the bins or not. Useful when bins is provided as a scalar. precision : int, default 3 The precision at which to store and display the bins labels. include_lowest : bool, default False Whether the first interval should be left-inclusive or not. duplicates : {"raise", "drop"}, default: "raise" If bin edges are not unique, raise ValueError or drop non-uniques. """ bins: Bins # The rest are copied from pandas right: bool = True labels: Any = None precision: int = 3 include_lowest: bool = False duplicates: Literal["raise", "drop"] = "raise" def reset(self) -> Self: return type(self)( bins=self.bins, right=self.right, labels=self.labels, precision=self.precision, include_lowest=self.include_lowest, duplicates=self.duplicates, ) def __post_init__(self) -> None: if array_all(isnull(self.bins)): raise ValueError("All bin edges are NaN.") def _cut(self, data): return pd.cut( np.asarray(data).ravel(), bins=self.bins, right=self.right, labels=self.labels, precision=self.precision, include_lowest=self.include_lowest, duplicates=self.duplicates, retbins=True, ) def _pandas_cut_wrapper(self, data, **kwargs): binned, bins = self._cut(data) if isinstance(self.bins, int): # we are running eagerly, update self.bins with actual edges instead self.bins = bins return binned.codes.reshape(data.shape) def factorize(self, group: T_Group) -> EncodedGroups: if isinstance(group, _DummyGroup): group = DataArray(group.data, dims=group.dims, name=group.name) by_is_chunked = is_chunked_array(group.data) if isinstance(self.bins, int) and by_is_chunked: raise ValueError( f"Bin edges must be provided when grouping by chunked arrays. Received {self.bins=!r} instead" ) codes = apply_ufunc( self._pandas_cut_wrapper, group, dask="parallelized", keep_attrs=True, output_dtypes=[np.int64], ) if not by_is_chunked and array_all(codes == -1): raise ValueError( f"None of the data falls within bins with edges {self.bins!r}" ) new_dim_name = f"{group.name}_bins" codes.name = new_dim_name # This seems silly, but it lets us have Pandas handle the complexity # of `labels`, `precision`, and `include_lowest`, even when group is a chunked array # Pandas ignores labels when IntervalIndex is passed if self.labels is None or not isinstance(self.bins, pd.IntervalIndex): dummy, _ = self._cut(np.array([0]).astype(group.dtype)) full_index = dummy.categories else: full_index = pd.Index(self.labels) if not by_is_chunked: uniques = np.sort(pd.unique(codes.data.ravel())) unique_values = full_index[uniques[uniques != -1]] else: unique_values = full_index unique_coord = Variable( dims=new_dim_name, data=unique_values, attrs=group.attrs ) return EncodedGroups( codes=codes, full_index=full_index, unique_coord=unique_coord, coords=coordinates_from_variable(unique_coord), ) @dataclass(repr=False) class TimeResampler(Resampler): """ Grouper object specialized to resampling the time coordinate. Attributes ---------- freq : str, datetime.timedelta, pandas.Timestamp, or pandas.DateOffset Frequency to resample to. See `Pandas frequency aliases `_ for a list of possible values. closed : {"left", "right"}, optional Side of each interval to treat as closed. label : {"left", "right"}, optional Side of each interval to use for labeling. origin : {'epoch', 'start', 'start_day', 'end', 'end_day'}, pandas.Timestamp, datetime.datetime, numpy.datetime64, or cftime.datetime, default 'start_day' The datetime on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a datetime is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day offset : pd.Timedelta, datetime.timedelta, or str, default is None An offset timedelta added to the origin. """ freq: ResampleCompatible closed: SideOptions | None = field(default=None) label: SideOptions | None = field(default=None) origin: str | DatetimeLike = field(default="start_day") offset: pd.Timedelta | datetime.timedelta | str | None = field(default=None) index_grouper: CFTimeGrouper | pd.Grouper = field(init=False, repr=False) group_as_index: pd.Index = field(init=False, repr=False) def reset(self) -> Self: return type(self)( freq=self.freq, closed=self.closed, label=self.label, origin=self.origin, offset=self.offset, ) def _init_properties(self, group: T_Group) -> None: group_as_index = safe_cast_to_index(group) offset = self.offset if not group_as_index.is_monotonic_increasing: # TODO: sort instead of raising an error raise ValueError("Index must be monotonic for resampling") if isinstance(group_as_index, CFTimeIndex): self.index_grouper = CFTimeGrouper( freq=self.freq, closed=self.closed, label=self.label, origin=self.origin, offset=offset, ) else: if isinstance(self.freq, BaseCFTimeOffset): raise ValueError( "'BaseCFTimeOffset' resample frequencies are only supported " "when resampling a 'CFTimeIndex'" ) self.index_grouper = pd.Grouper( # TODO remove once requiring pandas >= 2.2 freq=_new_to_legacy_freq(self.freq), closed=self.closed, label=self.label, origin=self.origin, offset=offset, ) self.group_as_index = group_as_index def _get_index_and_items(self) -> tuple[pd.Index, pd.Series, np.ndarray]: first_items, codes = self.first_items() full_index = first_items.index if first_items.isnull().any(): first_items = first_items.dropna() full_index = full_index.rename("__resample_dim__") return full_index, first_items, codes def first_items(self) -> tuple[pd.Series, np.ndarray]: from xarray.coding.cftimeindex import CFTimeIndex from xarray.core.resample_cftime import CFTimeGrouper if isinstance(self.index_grouper, CFTimeGrouper): return self.index_grouper.first_items( cast(CFTimeIndex, self.group_as_index) ) else: s = pd.Series(np.arange(self.group_as_index.size), self.group_as_index) grouped = s.groupby(self.index_grouper) first_items = grouped.first() counts = grouped.count() # This way we generate codes for the final output index: full_index. # So for _flox_reduce we avoid one reindex and copy by avoiding # _maybe_reindex codes = np.repeat(np.arange(len(first_items)), counts) return first_items, codes def factorize(self, group: T_Group) -> EncodedGroups: self._init_properties(group) full_index, first_items, codes_ = self._get_index_and_items() sbins = first_items.values.astype(np.int64) group_indices: GroupIndices = tuple( list(itertools.starmap(slice, pairwise(sbins))) + [slice(sbins[-1], None)] ) unique_coord = Variable( dims=group.name, data=first_items.index, attrs=group.attrs ) codes = group.copy(data=codes_.reshape(group.shape), deep=False) return EncodedGroups( codes=codes, group_indices=group_indices, full_index=full_index, unique_coord=unique_coord, coords=coordinates_from_variable(unique_coord), ) def compute_chunks(self, variable: Variable, *, dim: Hashable) -> tuple[int, ...]: """ Compute chunk sizes for this time resampler. This method is used during chunking operations to determine appropriate chunk sizes for the given variable when using this resampler. Parameters ---------- name : Hashable The name of the dimension being chunked. variable : Variable The variable being chunked. Returns ------- tuple[int, ...] A tuple of chunk sizes for the dimension. """ from xarray.core.dataarray import DataArray if not _contains_datetime_like_objects(variable): raise ValueError( f"Computing chunks with {type(self)!r} only supported for datetime variables. " f"Received variable with dtype {variable.dtype!r} instead." ) chunks = ( DataArray( np.ones(variable.shape, dtype=int), dims=(dim,), coords={dim: variable}, ) .resample({dim: self}) .sum() ) # When bins (binning) or time periods are missing (resampling) # we can end up with NaNs. Drop them. if chunks.dtype.kind == "f": chunks = chunks.dropna(dim).astype(int) chunks_tuple: tuple[int, ...] = tuple(chunks.data.tolist()) return chunks_tuple def _factorize_given_labels(data: np.ndarray, labels: np.ndarray) -> np.ndarray: # Copied from flox sorter = np.argsort(labels) is_sorted = array_all(sorter == np.arange(sorter.size)) codes = np.searchsorted(labels, data, sorter=sorter) mask = ~np.isin(data, labels) | isnull(data) | (codes == len(labels)) # codes is the index in to the sorted array. # if we didn't want sorting, unsort it back if not is_sorted: codes[codes == len(labels)] = -1 codes = sorter[(codes,)] codes[mask] = -1 return codes def unique_value_groups( ar, sort: bool = True ) -> tuple[np.ndarray | pd.Index, np.ndarray]: """Group an array by its unique values. Parameters ---------- ar : array-like Input array. This will be flattened if it is not already 1-D. sort : bool, default: True Whether or not to sort unique values. Returns ------- values : np.ndarray Sorted, unique values as returned by `np.unique`. indices : list of lists of int Each element provides the integer indices in `ar` with values given by the corresponding value in `unique_values`. """ inverse, values = pd.factorize(ar, sort=sort) if isinstance(values, pd.MultiIndex): values.names = ar.names return values, inverse def season_to_month_tuple(seasons: Sequence[str]) -> tuple[tuple[int, ...], ...]: """ >>> season_to_month_tuple(["DJF", "MAM", "JJA", "SON"]) ((12, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11)) >>> season_to_month_tuple(["DJFM", "MAMJ", "JJAS", "SOND"]) ((12, 1, 2, 3), (3, 4, 5, 6), (6, 7, 8, 9), (9, 10, 11, 12)) >>> season_to_month_tuple(["DJFM", "SOND"]) ((12, 1, 2, 3), (9, 10, 11, 12)) """ initials = "JFMAMJJASOND" starts = { "".join(s): i + 1 for s, i in zip(sliding_window(2, initials + "J"), range(12), strict=True) } result: list[tuple[int, ...]] = [] for i, season in enumerate(seasons): if len(season) == 1: if i < len(seasons) - 1: suffix = seasons[i + 1][0] else: suffix = seasons[0][0] else: suffix = season[1] start = starts[season[0] + suffix] month_append = [] for i in range(len(season[1:])): elem = start + i + 1 month_append.append(elem - 12 * (elem > 12)) result.append((start,) + tuple(month_append)) return tuple(result) def inds_to_season_string(asints: tuple[tuple[int, ...], ...]) -> tuple[str, ...]: inits = "JFMAMJJASOND" return tuple("".join([inits[i_ - 1] for i_ in t]) for t in asints) def is_sorted_periodic(lst): """Used to verify that seasons provided to SeasonResampler are in order.""" n = len(lst) # Find the wraparound point where the list decreases wrap_point = -1 for i in range(1, n): if lst[i] < lst[i - 1]: wrap_point = i break # If no wraparound point is found, the list is already sorted if wrap_point == -1: return True # Check if both parts around the wrap point are sorted for i in range(1, wrap_point): if lst[i] < lst[i - 1]: return False for i in range(wrap_point + 1, n): if lst[i] < lst[i - 1]: return False # Check wraparound condition return lst[-1] <= lst[0] @dataclass(kw_only=True, frozen=True) class SeasonsGroup: seasons: tuple[str, ...] # tuple[integer months] corresponding to each season inds: tuple[tuple[int, ...], ...] # integer code for each season, this is not simply range(len(seasons)) # when the seasons have overlaps codes: Sequence[int] def find_independent_seasons(seasons: Sequence[str]) -> Sequence[SeasonsGroup]: """ Iterates though a list of seasons e.g. ["DJF", "FMA", ...], and splits that into multiple sequences of non-overlapping seasons. >>> find_independent_seasons( ... ["DJF", "FMA", "AMJ", "JJA", "ASO", "OND"] ... ) # doctest: +NORMALIZE_WHITESPACE [SeasonsGroup(seasons=('DJF', 'AMJ', 'ASO'), inds=((12, 1, 2), (4, 5, 6), (8, 9, 10)), codes=[0, 2, 4]), SeasonsGroup(seasons=('FMA', 'JJA', 'OND'), inds=((2, 3, 4), (6, 7, 8), (10, 11, 12)), codes=[1, 3, 5])] >>> find_independent_seasons(["DJF", "MAM", "JJA", "SON"]) [SeasonsGroup(seasons=('DJF', 'MAM', 'JJA', 'SON'), inds=((12, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11)), codes=[0, 1, 2, 3])] """ season_inds = season_to_month_tuple(seasons) grouped = defaultdict(list) codes = defaultdict(list) seen: set[tuple[int, ...]] = set() # This is quadratic, but the number of seasons is at most 12 for i, current in enumerate(season_inds): # Start with a group if current not in seen: grouped[i].append(current) codes[i].append(i) seen.add(current) # Loop through remaining groups, and look for overlaps for j, second in enumerate(season_inds[i:]): if not (set(chain(*grouped[i])) & set(second)) and second not in seen: grouped[i].append(second) codes[i].append(j + i) seen.add(second) if len(seen) == len(seasons): break # found all non-overlapping groups for this row start over grouped_ints = tuple(tuple(idx) for idx in grouped.values() if idx) return [ SeasonsGroup(seasons=inds_to_season_string(inds), inds=inds, codes=codes) for inds, codes in zip(grouped_ints, codes.values(), strict=False) ] @dataclass class SeasonGrouper(Grouper): """Allows grouping using a custom definition of seasons. Parameters ---------- seasons: sequence of str List of strings representing seasons. E.g. ``"JF"`` or ``"JJA"`` etc. Overlapping seasons are allowed (e.g. ``["DJFM", "MAMJ", "JJAS", "SOND"]``) Examples -------- >>> SeasonGrouper(["JF", "MAM", "JJAS", "OND"]) SeasonGrouper(seasons=['JF', 'MAM', 'JJAS', 'OND']) The ordering is preserved >>> SeasonGrouper(["MAM", "JJAS", "OND", "JF"]) SeasonGrouper(seasons=['MAM', 'JJAS', 'OND', 'JF']) Overlapping seasons are allowed >>> SeasonGrouper(["DJFM", "MAMJ", "JJAS", "SOND"]) SeasonGrouper(seasons=['DJFM', 'MAMJ', 'JJAS', 'SOND']) """ seasons: Sequence[str] # drop_incomplete: bool = field(default=True) # TODO def factorize(self, group: T_Group) -> EncodedGroups: if TYPE_CHECKING: assert not isinstance(group, _DummyGroup) if not _contains_datetime_like_objects(group.variable): raise ValueError( "SeasonGrouper can only be used to group by datetime-like arrays." ) months = group.dt.month.data seasons_groups = find_independent_seasons(self.seasons) codes_ = np.full((len(seasons_groups),) + group.shape, -1, dtype=np.int8) group_indices: list[list[int]] = [[]] * len(self.seasons) for axis_index, seasgroup in enumerate(seasons_groups): for season_tuple, code in zip( seasgroup.inds, seasgroup.codes, strict=False ): mask = np.isin(months, season_tuple) codes_[axis_index, mask] = code (indices,) = mask.nonzero() group_indices[code] = indices.tolist() if np.all(codes_ == -1): raise ValueError( "Failed to group data. Are you grouping by a variable that is all NaN?" ) needs_dummy_dim = len(seasons_groups) > 1 codes = DataArray( dims=(("__season_dim__",) if needs_dummy_dim else tuple()) + group.dims, data=codes_ if needs_dummy_dim else codes_.squeeze(), attrs=group.attrs, name="season", ) unique_coord = Variable("season", self.seasons, attrs=group.attrs) full_index = pd.Index(self.seasons) return EncodedGroups( codes=codes, group_indices=tuple(group_indices), unique_coord=unique_coord, full_index=full_index, ) def reset(self) -> Self: return type(self)(self.seasons) @dataclass class SeasonResampler(Resampler): """Allows grouping using a custom definition of seasons. Parameters ---------- seasons: Sequence[str] An ordered list of seasons. drop_incomplete: bool Whether to drop seasons that are not completely included in the data. For example, if a time series starts in Jan-2001, and seasons includes `"DJF"` then observations from Jan-2001, and Feb-2001 are ignored in the grouping since Dec-2000 isn't present. Examples -------- >>> SeasonResampler(["JF", "MAM", "JJAS", "OND"]) SeasonResampler(seasons=['JF', 'MAM', 'JJAS', 'OND'], drop_incomplete=True) >>> SeasonResampler(["DJFM", "AM", "JJA", "SON"]) SeasonResampler(seasons=['DJFM', 'AM', 'JJA', 'SON'], drop_incomplete=True) """ seasons: Sequence[str] drop_incomplete: bool = field(default=True, kw_only=True) season_inds: Sequence[Sequence[int]] = field(init=False, repr=False) season_tuples: Mapping[str, Sequence[int]] = field(init=False, repr=False) def __post_init__(self): self.season_inds = season_to_month_tuple(self.seasons) all_inds = functools.reduce(operator.add, self.season_inds) if len(all_inds) > len(set(all_inds)): raise ValueError( f"Overlapping seasons are not allowed. Received {self.seasons!r}" ) self.season_tuples = dict(zip(self.seasons, self.season_inds, strict=True)) if not is_sorted_periodic(list(itertools.chain(*self.season_inds))): raise ValueError( "Resampling is only supported with sorted seasons. " f"Provided seasons {self.seasons!r} are not sorted." ) def factorize(self, group: T_Group) -> EncodedGroups: if group.ndim != 1: raise ValueError( "SeasonResampler can only be used to resample by 1D arrays." ) if not isinstance(group, DataArray) or not _contains_datetime_like_objects( group.variable ): raise ValueError( "SeasonResampler can only be used to group by datetime-like DataArrays." ) seasons = self.seasons season_inds = self.season_inds season_tuples = self.season_tuples nstr = max(len(s) for s in seasons) year = group.dt.year.astype(int) month = group.dt.month.astype(int) season_label = np.full(group.shape, "", dtype=f"U{nstr}") # offset years for seasons with December and January for season_str, season_ind in zip(seasons, season_inds, strict=True): season_label[month.isin(season_ind)] = season_str if "DJ" in season_str: after_dec = season_ind[season_str.index("D") + 1 :] # important: this is assuming non-overlapping seasons year[month.isin(after_dec)] -= 1 # Allow users to skip one or more months? # present_seasons is a mask that is True for months that are requested in the output present_seasons = season_label != "" if present_seasons.all(): # avoid copies if we can. present_seasons = slice(None) frame = pd.DataFrame( data={ "index": np.arange(group[present_seasons].size), "month": month[present_seasons], }, index=pd.MultiIndex.from_arrays( [year.data[present_seasons], season_label[present_seasons]], names=["year", "season"], ), ) agged = ( frame["index"] .groupby(["year", "season"], sort=False) .agg(["first", "count"]) ) first_items = agged["first"] counts = agged["count"] index_class: type[CFTimeIndex | pd.DatetimeIndex] if _contains_cftime_datetimes(group.data): index_class = CFTimeIndex datetime_class = type(first_n_items(group.data, 1).item()) else: index_class = pd.DatetimeIndex datetime_class = datetime.datetime # these are the seasons that are present unique_coord = index_class( [ datetime_class(year=year, month=season_tuples[season][0], day=1) for year, season in first_items.index ] ) # This sorted call is a hack. It's hard to figure out how # to start the iteration for arbitrary season ordering # for example "DJF" as first entry or last entry # So we construct the largest possible index and slice it to the # range present in the data. complete_index = index_class( sorted( [ datetime_class(year=y, month=m, day=1) for y, m in itertools.product( range(year[0].item(), year[-1].item() + 1), [s[0] for s in season_inds], ) ] ) ) # all years and seasons def get_label(year, season): month, *_ = season_tuples[season] return f"{year}-{month:02d}-01" unique_codes = np.arange(len(unique_coord)) valid_season_mask = season_label != "" first_valid_season, last_valid_season = season_label[valid_season_mask][[0, -1]] first_year, last_year = year.data[[0, -1]] if self.drop_incomplete: if month.data[valid_season_mask][0] != season_tuples[first_valid_season][0]: if "DJ" in first_valid_season: first_year += 1 first_valid_season = seasons[ (seasons.index(first_valid_season) + 1) % len(seasons) ] unique_codes -= 1 if ( month.data[valid_season_mask][-1] != season_tuples[last_valid_season][-1] ): last_valid_season = seasons[seasons.index(last_valid_season) - 1] if "DJ" in last_valid_season: last_year -= 1 unique_codes[-1] = -1 first_label = get_label(first_year, first_valid_season) last_label = get_label(last_year, last_valid_season) slicer = complete_index.slice_indexer(first_label, last_label) full_index = complete_index[slicer] final_codes = np.full(group.data.size, -1) final_codes[present_seasons] = np.repeat(unique_codes, counts) codes = group.copy(data=final_codes, deep=False) return EncodedGroups(codes=codes, full_index=full_index) def compute_chunks(self, variable: Variable, *, dim: Hashable) -> tuple[int, ...]: """ Compute chunk sizes for this season resampler. This method is used during chunking operations to determine appropriate chunk sizes for the given variable when using this resampler. Parameters ---------- name : Hashable The name of the dimension being chunked. variable : Variable The variable being chunked. Returns ------- tuple[int, ...] A tuple of chunk sizes for the dimension. """ from xarray.core.dataarray import DataArray if not _contains_datetime_like_objects(variable): raise ValueError( f"Computing chunks with {type(self)!r} only supported for datetime variables. " f"Received variable with dtype {variable.dtype!r} instead." ) if len("".join(self.seasons)) != 12: raise ValueError( "Cannot rechunk with a SeasonResampler that does not cover all 12 months. " f"Received `seasons={self.seasons!r}`." ) # Create a temporary resampler that ignores drop_incomplete for chunking # This prevents data from being silently dropped during chunking resampler_for_chunking = type(self)(seasons=self.seasons, drop_incomplete=False) chunks = ( DataArray( np.ones(variable.shape, dtype=int), dims=(dim,), coords={dim: variable}, ) .resample({dim: resampler_for_chunking}) .sum() ) # When bins (binning) or time periods are missing (resampling) # we can end up with NaNs. Drop them. if chunks.dtype.kind == "f": chunks = chunks.dropna(dim).astype(int) chunks_tuple: tuple[int, ...] = tuple(chunks.data.tolist()) return chunks_tuple def reset(self) -> Self: return type(self)(seasons=self.seasons, drop_incomplete=self.drop_incomplete) xarray-2025.09.0/xarray/indexes/000077500000000000000000000000001505620616400163115ustar00rootroot00000000000000xarray-2025.09.0/xarray/indexes/__init__.py000066400000000000000000000011141505620616400204170ustar00rootroot00000000000000"""Xarray index objects for label-based selection and alignment of Dataset / DataArray objects. """ from xarray.core.coordinate_transform import CoordinateTransform from xarray.core.indexes import ( CoordinateTransformIndex, Index, PandasIndex, PandasMultiIndex, ) from xarray.indexes.nd_point_index import NDPointIndex, TreeAdapter from xarray.indexes.range_index import RangeIndex __all__ = [ "CoordinateTransform", "CoordinateTransformIndex", "Index", "NDPointIndex", "PandasIndex", "PandasMultiIndex", "RangeIndex", "TreeAdapter", ] xarray-2025.09.0/xarray/indexes/nd_point_index.py000066400000000000000000000320761505620616400216740ustar00rootroot00000000000000from __future__ import annotations import abc from collections.abc import Hashable, Iterable, Mapping from typing import TYPE_CHECKING, Any, Generic, TypeVar import numpy as np from xarray.core.dataarray import DataArray from xarray.core.indexes import Index from xarray.core.indexing import IndexSelResult from xarray.core.utils import is_scalar from xarray.core.variable import Variable from xarray.structure.alignment import broadcast if TYPE_CHECKING: from scipy.spatial import KDTree from xarray.core.types import Self class TreeAdapter(abc.ABC): """Lightweight adapter abstract class for plugging in 3rd-party structures like :py:class:`scipy.spatial.KDTree` or :py:class:`sklearn.neighbors.KDTree` into :py:class:`~xarray.indexes.NDPointIndex`. """ @abc.abstractmethod def __init__(self, points: np.ndarray, *, options: Mapping[str, Any]): """ Parameters ---------- points : ndarray of shape (n_points, n_coordinates) Two-dimensional array of points/samples (rows) and their corresponding coordinate labels (columns) to index. """ ... @abc.abstractmethod def query(self, points: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """Query points. Parameters ---------- points: ndarray of shape (n_points, n_coordinates) Two-dimensional array of points/samples (rows) and their corresponding coordinate labels (columns) to query. Returns ------- distances : ndarray of shape (n_points) Distances to the nearest neighbors. indices : ndarray of shape (n_points) Indices of the nearest neighbors in the array of the indexed points. """ ... def equals(self, other: Self) -> bool: """Check equality with another TreeAdapter of the same kind. Parameters ---------- other : The other TreeAdapter object to compare with this object. """ raise NotImplementedError class ScipyKDTreeAdapter(TreeAdapter): """:py:class:`scipy.spatial.KDTree` adapter for :py:class:`~xarray.indexes.NDPointIndex`.""" _kdtree: KDTree def __init__(self, points: np.ndarray, options: Mapping[str, Any]): from scipy.spatial import KDTree self._kdtree = KDTree(points, **options) def query(self, points: np.ndarray) -> tuple[np.ndarray, np.ndarray]: return self._kdtree.query(points) # type: ignore[return-value,unused-ignore] def equals(self, other: Self) -> bool: return np.array_equal(self._kdtree.data, other._kdtree.data) def get_points(coords: Iterable[Variable | Any]) -> np.ndarray: """Re-arrange data from a sequence of xarray coordinate variables or labels into a 2-d array of shape (n_points, n_coordinates). """ data = [c.values if isinstance(c, Variable | DataArray) else c for c in coords] return np.stack([np.ravel(d) for d in data]).T T_TreeAdapter = TypeVar("T_TreeAdapter", bound=TreeAdapter) class NDPointIndex(Index, Generic[T_TreeAdapter]): """Xarray index for irregular, n-dimensional data. This index may be associated with a set of coordinate variables representing the arbitrary location of data points in an n-dimensional space. All coordinates must have the same shape and dimensions. The number of associated coordinate variables must correspond to the number of dimensions of the space. This index supports label-based selection (nearest neighbor lookup). It also has limited support for alignment. By default, this index relies on :py:class:`scipy.spatial.KDTree` for fast lookup. Do not use :py:meth:`~xarray.indexes.NDPointIndex.__init__` directly. Instead use :py:meth:`xarray.Dataset.set_xindex` or :py:meth:`xarray.DataArray.set_xindex` to create and set the index from existing coordinates (see the example below). Examples -------- An example using a dataset with 2-dimensional coordinates. >>> xx = [[1.0, 2.0], [3.0, 0.0]] >>> yy = [[11.0, 21.0], [29.0, 9.0]] >>> ds = xr.Dataset(coords={"xx": (("y", "x"), xx), "yy": (("y", "x"), yy)}) >>> ds Size: 64B Dimensions: (y: 2, x: 2) Coordinates: xx (y, x) float64 32B 1.0 2.0 3.0 0.0 yy (y, x) float64 32B 11.0 21.0 29.0 9.0 Dimensions without coordinates: y, x Data variables: *empty* Creation of a NDPointIndex from the "xx" and "yy" coordinate variables: >>> ds = ds.set_xindex(("xx", "yy"), xr.indexes.NDPointIndex) >>> ds Size: 64B Dimensions: (y: 2, x: 2) Coordinates: * xx (y, x) float64 32B 1.0 2.0 3.0 0.0 * yy (y, x) float64 32B 11.0 21.0 29.0 9.0 Dimensions without coordinates: y, x Data variables: *empty* Indexes: β”Œ xx NDPointIndex (ScipyKDTreeAdapter) β”” yy Point-wise (nearest-neighbor) data selection using Xarray's advanced indexing, i.e., using arbitrary dimension(s) for the Variable objects passed as labels: >>> ds.sel( ... xx=xr.Variable("points", [1.9, 0.1]), ... yy=xr.Variable("points", [13.0, 8.0]), ... method="nearest", ... ) Size: 32B Dimensions: (points: 2) Coordinates: xx (points) float64 16B 1.0 0.0 yy (points) float64 16B 11.0 9.0 Dimensions without coordinates: points Data variables: *empty* Data selection with scalar labels: >>> ds.sel(xx=1.9, yy=13.0, method="nearest") Size: 16B Dimensions: () Coordinates: xx float64 8B 1.0 yy float64 8B 11.0 Data variables: *empty* Data selection with broadcasting the input labels: >>> ds.sel(xx=1.9, yy=xr.Variable("points", [13.0, 8.0]), method="nearest") Size: 32B Dimensions: (points: 2) Coordinates: xx (points) float64 16B 1.0 0.0 yy (points) float64 16B 11.0 9.0 Dimensions without coordinates: points Data variables: *empty* >>> da = xr.DataArray( ... [[45.1, 53.3], [65.4, 78.2]], ... coords={"u": [1.9, 0.1], "v": [13.0, 8.0]}, ... dims=("u", "v"), ... ) >>> ds.sel(xx=da.u, yy=da.v, method="nearest") Size: 64B Dimensions: (u: 2, v: 2) Coordinates: xx (u, v) float64 32B 1.0 0.0 1.0 0.0 yy (u, v) float64 32B 11.0 9.0 11.0 9.0 Dimensions without coordinates: u, v Data variables: *empty* Data selection with array-like labels (implicit dimensions): >>> ds.sel(xx=[[1.9], [0.1]], yy=[[13.0], [8.0]], method="nearest") Size: 32B Dimensions: (y: 2, x: 1) Coordinates: xx (y, x) float64 16B 1.0 0.0 yy (y, x) float64 16B 11.0 9.0 Dimensions without coordinates: y, x Data variables: *empty* """ _tree_obj: T_TreeAdapter _coord_names: tuple[Hashable, ...] _dims: tuple[Hashable, ...] _shape: tuple[int, ...] def __init__( self, tree_obj: T_TreeAdapter, *, coord_names: tuple[Hashable, ...], dims: tuple[Hashable, ...], shape: tuple[int, ...], ): # this constructor is "private" assert isinstance(tree_obj, TreeAdapter) self._tree_obj = tree_obj assert len(coord_names) == len(dims) == len(shape) self._coord_names = coord_names self._dims = dims self._shape = shape @classmethod def from_variables( cls, variables: Mapping[Any, Variable], *, options: Mapping[str, Any], ) -> Self: if len({var.dims for var in variables.values()}) > 1: var_names = ",".join(vn for vn in variables) raise ValueError( f"variables {var_names} must all have the same dimensions and the same shape" ) var0 = next(iter(variables.values())) if len(variables) != len(var0.dims): raise ValueError( f"the number of variables {len(variables)} doesn't match " f"the number of dimensions {len(var0.dims)}" ) opts = dict(options) tree_adapter_cls: type[T_TreeAdapter] = opts.pop("tree_adapter_cls", None) if tree_adapter_cls is None: tree_adapter_cls = ScipyKDTreeAdapter points = get_points(variables.values()) return cls( tree_adapter_cls(points, options=opts), coord_names=tuple(variables), dims=var0.dims, shape=var0.shape, ) def create_variables( self, variables: Mapping[Any, Variable] | None = None ) -> dict[Any, Variable]: if variables is not None: for var in variables.values(): # maybe re-sync variable dimensions with the index object # returned by NDPointIndex.rename() if var.dims != self._dims: var.dims = self._dims return dict(**variables) else: return {} def equals( self, other: Index, *, exclude: frozenset[Hashable] | None = None ) -> bool: if not isinstance(other, NDPointIndex): return False if type(self._tree_obj) is not type(other._tree_obj): return False return self._tree_obj.equals(other._tree_obj) def _get_dim_indexers( self, indices: np.ndarray, label_dims: tuple[Hashable, ...], label_shape: tuple[int, ...], ) -> dict[Hashable, Variable]: """Returns dimension indexers based on the query results (indices) and the original label dimensions and shape. 1. Unravel the flat indices returned from the query 2. Reshape the unraveled indices according to indexers shapes 3. Wrap the indices in xarray.Variable objects. """ dim_indexers = {} u_indices = list(np.unravel_index(indices.ravel(), self._shape)) for dim, ind in zip(self._dims, u_indices, strict=False): dim_indexers[dim] = Variable(label_dims, ind.reshape(label_shape)) return dim_indexers def sel( self, labels: dict[Any, Any], method=None, tolerance=None ) -> IndexSelResult: if method != "nearest": raise ValueError( "NDPointIndex only supports selection with method='nearest'" ) missing_labels = set(self._coord_names) - set(labels) if missing_labels: missing_labels_str = ",".join([f"{name}" for name in missing_labels]) raise ValueError(f"missing labels for coordinate(s): {missing_labels_str}.") # maybe convert labels into xarray DataArray objects xr_labels: dict[Any, DataArray] = {} for name, lbl in labels.items(): if isinstance(lbl, DataArray): xr_labels[name] = lbl elif isinstance(lbl, Variable): xr_labels[name] = DataArray(lbl) elif is_scalar(lbl): xr_labels[name] = DataArray(lbl, dims=()) elif np.asarray(lbl).ndim == len(self._dims): xr_labels[name] = DataArray(lbl, dims=self._dims) else: raise ValueError( "invalid label value. NDPointIndex only supports advanced (point-wise) indexing " "with the following label value kinds:\n" "- xarray.DataArray or xarray.Variable objects\n" "- scalar values\n" "- unlabelled array-like objects with the same number of dimensions " f"than the {self._coord_names} coordinate variables ({len(self._dims)})" ) # broadcast xarray labels against one another and determine labels shape and dimensions broadcasted = broadcast(*xr_labels.values()) label_dims = broadcasted[0].dims label_shape = broadcasted[0].shape xr_labels = dict(zip(xr_labels, broadcasted, strict=True)) # get and return dimension indexers points = get_points(xr_labels[name] for name in self._coord_names) _, indices = self._tree_obj.query(points) dim_indexers = self._get_dim_indexers(indices, label_dims, label_shape) return IndexSelResult(dim_indexers=dim_indexers) def rename( self, name_dict: Mapping[Any, Hashable], dims_dict: Mapping[Any, Hashable], ) -> Self: if not set(self._coord_names) & set(name_dict) and not set(self._dims) & set( dims_dict ): return self new_coord_names = tuple(name_dict.get(n, n) for n in self._coord_names) new_dims = tuple(dims_dict.get(d, d) for d in self._dims) return type(self)( self._tree_obj, coord_names=new_coord_names, dims=new_dims, shape=self._shape, ) def _repr_inline_(self, max_width: int) -> str: tree_obj_type = self._tree_obj.__class__.__name__ return f"{self.__class__.__name__} ({tree_obj_type})" xarray-2025.09.0/xarray/indexes/range_index.py000066400000000000000000000325661505620616400211620ustar00rootroot00000000000000import math from collections.abc import Hashable, Mapping from typing import Any import numpy as np import pandas as pd from xarray.core import duck_array_ops from xarray.core.coordinate_transform import CoordinateTransform from xarray.core.dataarray import DataArray from xarray.core.indexes import CoordinateTransformIndex, Index, PandasIndex from xarray.core.indexing import IndexSelResult from xarray.core.variable import Variable class RangeCoordinateTransform(CoordinateTransform): """1-dimensional coordinate transform representing a simple bounded interval with evenly spaced, floating-point values. """ start: float stop: float _step: float | None __slots__ = ("_step", "start", "stop") def __init__( self, start: float, stop: float, size: int, coord_name: Hashable, dim: str, dtype: Any = None, ): if dtype is None: dtype = np.dtype(np.float64) super().__init__([coord_name], {dim: size}, dtype=dtype) self.start = start self.stop = stop self._step = None # Will be calculated by property @property def coord_name(self) -> Hashable: return self.coord_names[0] @property def dim(self) -> str: return self.dims[0] @property def size(self) -> int: return self.dim_size[self.dim] @property def step(self) -> float: if self._step is not None: return self._step if self.size > 0: return (self.stop - self.start) / self.size else: # For empty arrays, default to 1.0 return 1.0 def forward(self, dim_positions: dict[str, Any]) -> dict[Hashable, Any]: positions = dim_positions[self.dim] labels = self.start + positions * self.step return {self.coord_name: labels} def reverse(self, coord_labels: dict[Hashable, Any]) -> dict[str, Any]: labels = coord_labels[self.coord_name] positions = (labels - self.start) / self.step return {self.dim: positions} def equals( self, other: CoordinateTransform, exclude: frozenset[Hashable] | None = None ) -> bool: if not isinstance(other, RangeCoordinateTransform): return False return ( self.start == other.start and self.stop == other.stop and self.size == other.size ) def slice(self, sl: slice) -> "RangeCoordinateTransform": new_range = range(self.size)[sl] new_size = len(new_range) new_start = self.start + new_range.start * self.step new_stop = self.start + new_range.stop * self.step result = type(self)( new_start, new_stop, new_size, self.coord_name, self.dim, dtype=self.dtype, ) if new_size == 0: # For empty slices, preserve step from parent result._step = self.step return result class RangeIndex(CoordinateTransformIndex): """Xarray index implementing a simple bounded 1-dimension interval with evenly spaced, monotonic floating-point values. This index is memory-saving, i.e., the values of its associated coordinate variable are not materialized in memory. Do not use :py:meth:`~xarray.indexes.RangeIndex.__init__` directly. Instead use :py:meth:`~xarray.indexes.RangeIndex.arange` or :py:meth:`~xarray.indexes.RangeIndex.linspace`, which are similar to :py:func:`numpy.arange` and :py:func:`numpy.linspace`. In the case of a monotonic integer range, it is better using a :py:class:`~xarray.indexes.PandasIndex` that wraps a :py:class:`pandas.RangeIndex`. """ transform: RangeCoordinateTransform def __init__(self, transform: RangeCoordinateTransform): super().__init__(transform) @classmethod def arange( cls, start: float | None = None, stop: float | None = None, step: float | None = None, *, coord_name: Hashable | None = None, dim: str, dtype: Any = None, ) -> "RangeIndex": """Create a new RangeIndex from given start, stop and step values. ``RangeIndex.arange`` can be called with a varying number of positional arguments: - ``RangeIndex.arange(stop)``: the index is within the half-open interval [0, stop) (in other words, the interval including start but excluding stop). - ``RangeIndex.arange(start, stop)``: the index is within the half-open interval [start, stop). - ``RangeIndex.arange(start, stop, step)``: the index is within the half-open interval [start, stop), with spacing between values given by step. .. note:: When using a non-integer step, such as 0.1, it is often better to use :py:meth:`~xarray.indexes.RangeIndex.linspace`. .. note:: ``RangeIndex.arange(start=4.0)`` returns a range index in the [0.0, 4.0) interval, i.e., ``start`` is interpreted as ``stop`` even when it is given as a unique keyword argument. Parameters ---------- start : float, optional Start of interval. The interval includes this value. The default start value is 0. If ``stop`` is not given, the value given here is interpreted as the end of the interval. stop : float End of interval. In general the interval does not include this value, except floating point round-off affects the size of the dimension. step : float, optional Spacing between values (default: 1.0). coord_name : Hashable, optional Name of the (lazy) coordinate variable that will be created and associated with the new index. If ``None``, the coordinate is named as the dimension name. dim : str Dimension name. dtype : dtype, optional The dtype of the coordinate variable (default: float64). Examples -------- >>> from xarray.indexes import RangeIndex >>> index = RangeIndex.arange(0.0, 1.0, 0.2, dim="x") >>> ds = xr.Dataset(coords=xr.Coordinates.from_xindex(index)) >>> ds Size: 40B Dimensions: (x: 5) Coordinates: * x (x) float64 40B 0.0 0.2 0.4 0.6 0.8 Data variables: *empty* Indexes: x RangeIndex (start=0, stop=1, step=0.2) """ if stop is None: if start is None: raise TypeError("RangeIndex.arange() requires stop to be specified") else: stop = start start = None if start is None: start = 0.0 if step is None: step = 1.0 if coord_name is None: coord_name = dim size = math.ceil((stop - start) / step) transform = RangeCoordinateTransform( start, stop, size, coord_name, dim, dtype=dtype ) return cls(transform) @classmethod def linspace( cls, start: float, stop: float, num: int = 50, endpoint: bool = True, *, coord_name: Hashable | None = None, dim: str, dtype: Any = None, ) -> "RangeIndex": """Create a new RangeIndex from given start / stop values and number of values. Parameters ---------- start : float Start of interval. The interval includes this value. stop : float, optional End of interval. The interval includes this value if ``endpoint=True``. num : float, optional Number of values in the interval, i.e., dimension size (default: 50). endpoint : bool, optional If True (default), the ``stop`` value is included in the interval. coord_name : Hashable, optional Name of the (lazy) coordinate variable that will be created and associated with the new index. If ``None``, the coordinate is named as the dimension name. dim : str Dimension name. dtype : dtype, optional The dtype of the coordinate variable (default: float64). Examples -------- >>> from xarray.indexes import RangeIndex >>> index = RangeIndex.linspace(0.0, 1.0, 5, dim="x") >>> ds = xr.Dataset(coords=xr.Coordinates.from_xindex(index)) >>> ds Size: 40B Dimensions: (x: 5) Coordinates: * x (x) float64 40B 0.0 0.25 0.5 0.75 1.0 Data variables: *empty* Indexes: x RangeIndex (start=0, stop=1.25, step=0.25) """ if coord_name is None: coord_name = dim if endpoint: stop += (stop - start) / (num - 1) transform = RangeCoordinateTransform( start, stop, num, coord_name, dim, dtype=dtype ) return cls(transform) @classmethod def from_variables( cls, variables: Mapping[Any, Variable], *, options: Mapping[str, Any], ) -> "RangeIndex": raise NotImplementedError( "cannot create a new RangeIndex from an existing coordinate. Use instead " "either `RangeIndex.arange()` or `RangeIndex.linspace()` together with " "`Coordinates.from_xindex()`" ) @property def start(self) -> float: """Returns the start of the interval (the interval includes this value).""" return self.transform.start @property def stop(self) -> float: """Returns the end of the interval (the interval does not include this value).""" return self.transform.stop @property def step(self) -> float: """Returns the spacing between values.""" return self.transform.step @property def coord_name(self) -> Hashable: return self.transform.coord_names[0] @property def dim(self) -> str: return self.transform.dims[0] @property def size(self) -> int: return self.transform.dim_size[self.dim] def isel( self, indexers: Mapping[Any, int | slice | np.ndarray | Variable] ) -> Index | None: idxer = indexers[self.dim] if isinstance(idxer, slice): return RangeIndex(self.transform.slice(idxer)) elif (isinstance(idxer, Variable) and idxer.ndim > 1) or duck_array_ops.ndim( idxer ) == 0: return None else: values = self.transform.forward({self.dim: np.asarray(idxer)})[ self.coord_name ] if isinstance(idxer, Variable): new_dim = idxer.dims[0] else: new_dim = self.dim pd_index = pd.Index(values, name=self.coord_name) return PandasIndex(pd_index, new_dim, coord_dtype=values.dtype) def sel( self, labels: dict[Any, Any], method=None, tolerance=None ) -> IndexSelResult: label = labels[self.dim] if method != "nearest": raise ValueError("RangeIndex only supports selection with method='nearest'") # TODO: for RangeIndex it might not be too hard to support tolerance if tolerance is not None: raise ValueError( "RangeIndex doesn't support selection with a given tolerance value yet" ) if isinstance(label, slice): if label.step is None: # continuous interval slice indexing (preserves the index) positions = self.transform.reverse( {self.coord_name: np.array([label.start, label.stop])} ) pos = np.round(positions[self.dim]).astype("int") new_start = max(pos[0], 0) new_stop = min(pos[1], self.size) return IndexSelResult({self.dim: slice(new_start, new_stop)}) else: # otherwise convert to basic (array) indexing label = np.arange(label.start, label.stop, label.step) # support basic indexing (in the 1D case basic vs. vectorized indexing # are pretty much similar) unwrap_xr = False if not isinstance(label, Variable | DataArray): # basic indexing -> either scalar or 1-d array try: var = Variable("_", label) except ValueError: var = Variable((), label) labels = {self.dim: var} unwrap_xr = True result = super().sel(labels, method=method, tolerance=tolerance) if unwrap_xr: dim_indexers = {self.dim: result.dim_indexers[self.dim].values} result = IndexSelResult(dim_indexers) return result def to_pandas_index(self) -> pd.Index: values = self.transform.generate_coords() return pd.Index(values[self.dim]) def _repr_inline_(self, max_width) -> str: params_fmt = ( f"start={self.start:.3g}, stop={self.stop:.3g}, step={self.step:.3g}" ) return f"{self.__class__.__name__} ({params_fmt})" def __repr__(self) -> str: params_fmt = ( f"start={self.start:.3g}, stop={self.stop:.3g}, step={self.step:.3g}, " f"size={self.size}, coord_name={self.coord_name!r}, dim={self.dim!r}" ) return f"{self.__class__.__name__} ({params_fmt})" xarray-2025.09.0/xarray/namedarray/000077500000000000000000000000001505620616400167755ustar00rootroot00000000000000xarray-2025.09.0/xarray/namedarray/__init__.py000066400000000000000000000000001505620616400210740ustar00rootroot00000000000000xarray-2025.09.0/xarray/namedarray/_aggregations.py000066400000000000000000000731311505620616400221650ustar00rootroot00000000000000"""Mixin classes with reduction operations.""" # This file was generated using xarray.util.generate_aggregations. Do not edit manually. from __future__ import annotations from collections.abc import Callable, Sequence from typing import Any from xarray.core import duck_array_ops from xarray.core.types import Dims, Self class NamedArrayAggregations: __slots__ = () def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keepdims: bool = False, **kwargs: Any, ) -> Self: raise NotImplementedError() def count( self, dim: Dims = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count Dataset.count DataArray.count :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.count() Size: 8B array(5) """ return self.reduce( duck_array_ops.count, dim=dim, **kwargs, ) def all( self, dim: Dims = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all Dataset.all DataArray.all :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray( ... "x", np.array([True, True, True, True, True, False], dtype=bool) ... ) >>> na Size: 6B array([ True, True, True, True, True, False]) >>> na.all() Size: 1B array(False) """ return self.reduce( duck_array_ops.array_all, dim=dim, **kwargs, ) def any( self, dim: Dims = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any Dataset.any DataArray.any :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray( ... "x", np.array([True, True, True, True, True, False], dtype=bool) ... ) >>> na Size: 6B array([ True, True, True, True, True, False]) >>> na.any() Size: 1B array(True) """ return self.reduce( duck_array_ops.array_any, dim=dim, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max Dataset.max DataArray.max :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.max() Size: 8B array(3.) Use ``skipna`` to control whether NaNs are ignored. >>> na.max(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min Dataset.min DataArray.min :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.min() Size: 8B array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> na.min(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean Dataset.mean DataArray.mean :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.mean() Size: 8B array(1.6) Use ``skipna`` to control whether NaNs are ignored. >>> na.mean(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod Dataset.prod DataArray.prod :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.prod() Size: 8B array(0.) Use ``skipna`` to control whether NaNs are ignored. >>> na.prod(skipna=False) Size: 8B array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> na.prod(skipna=True, min_count=2) Size: 8B array(0.) """ return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum Dataset.sum DataArray.sum :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.sum() Size: 8B array(8.) Use ``skipna`` to control whether NaNs are ignored. >>> na.sum(skipna=False) Size: 8B array(nan) Specify ``min_count`` for finer control over when NaNs are ignored. >>> na.sum(skipna=True, min_count=2) Size: 8B array(8.) """ return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std Dataset.std DataArray.std :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.std() Size: 8B array(1.0198039) Use ``skipna`` to control whether NaNs are ignored. >>> na.std(skipna=False) Size: 8B array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> na.std(skipna=True, ddof=1) Size: 8B array(1.14017543) """ return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var Dataset.var DataArray.var :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.var() Size: 8B array(1.04) Use ``skipna`` to control whether NaNs are ignored. >>> na.var(skipna=False) Size: 8B array(nan) Specify ``ddof=1`` for an unbiased estimate. >>> na.var(skipna=True, ddof=1) Size: 8B array(1.3) """ return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median Dataset.median DataArray.median :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.median() Size: 8B array(2.) Use ``skipna`` to control whether NaNs are ignored. >>> na.median(skipna=False) Size: 8B array(nan) """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum Dataset.cumsum DataArray.cumsum NamedArray.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.cumsum() Size: 48B array([1., 3., 6., 6., 8., 8.]) Use ``skipna`` to control whether NaNs are ignored. >>> na.cumsum(skipna=False) Size: 48B array([ 1., 3., 6., 6., 8., nan]) """ return self.reduce( duck_array_ops.cumsum, dim=dim, skipna=skipna, **kwargs, ) def cumprod( self, dim: Dims = None, *, skipna: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this NamedArray's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : NamedArray New NamedArray with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod Dataset.cumprod DataArray.cumprod NamedArray.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray("x", np.array([1, 2, 3, 0, 2, np.nan])) >>> na Size: 48B array([ 1., 2., 3., 0., 2., nan]) >>> na.cumprod() Size: 48B array([1., 2., 6., 0., 0., 0.]) Use ``skipna`` to control whether NaNs are ignored. >>> na.cumprod(skipna=False) Size: 48B array([ 1., 2., 6., 0., 0., nan]) """ return self.reduce( duck_array_ops.cumprod, dim=dim, skipna=skipna, **kwargs, ) xarray-2025.09.0/xarray/namedarray/_array_api.py000066400000000000000000000135231505620616400214610ustar00rootroot00000000000000from __future__ import annotations from types import ModuleType from typing import Any import numpy as np from xarray.namedarray._typing import ( Default, _arrayapi, _Axes, _Axis, _default, _Dim, _DType, _ScalarType, _ShapeType, _SupportsImag, _SupportsReal, ) from xarray.namedarray.core import NamedArray def _get_data_namespace(x: NamedArray[Any, Any]) -> ModuleType: if isinstance(x._data, _arrayapi): return x._data.__array_namespace__() return np # %% Creation Functions def astype( x: NamedArray[_ShapeType, Any], dtype: _DType, /, *, copy: bool = True ) -> NamedArray[_ShapeType, _DType]: """ Copies an array to a specified data type irrespective of Type Promotion Rules rules. Parameters ---------- x : NamedArray Array to cast. dtype : _DType Desired data type. copy : bool, optional Specifies whether to copy an array when the specified dtype matches the data type of the input array x. If True, a newly allocated array must always be returned. If False and the specified dtype matches the data type of the input array, the input array must be returned; otherwise, a newly allocated array must be returned. Default: True. Returns ------- out : NamedArray An array having the specified data type. The returned array must have the same shape as x. Examples -------- >>> narr = NamedArray(("x",), np.asarray([1.5, 2.5])) >>> narr Size: 16B array([1.5, 2.5]) >>> astype(narr, np.dtype(np.int32)) Size: 8B array([1, 2], dtype=int32) """ if isinstance(x._data, _arrayapi): xp = x._data.__array_namespace__() return x._new(data=xp.astype(x._data, dtype, copy=copy)) # np.astype doesn't exist yet: return x._new(data=x._data.astype(dtype, copy=copy)) # type: ignore[attr-defined] # %% Elementwise Functions def imag( x: NamedArray[_ShapeType, np.dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var] /, ) -> NamedArray[_ShapeType, np.dtype[_ScalarType]]: """ Returns the imaginary component of a complex number for each element x_i of the input array x. Parameters ---------- x : NamedArray Input array. Should have a complex floating-point data type. Returns ------- out : NamedArray An array containing the element-wise results. The returned array must have a floating-point data type with the same floating-point precision as x (e.g., if x is complex64, the returned array must have the floating-point data type float32). Examples -------- >>> narr = NamedArray(("x",), np.asarray([1.0 + 2j, 2 + 4j])) >>> imag(narr) Size: 16B array([2., 4.]) """ xp = _get_data_namespace(x) out = x._new(data=xp.imag(x._data)) return out def real( x: NamedArray[_ShapeType, np.dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var] /, ) -> NamedArray[_ShapeType, np.dtype[_ScalarType]]: """ Returns the real component of a complex number for each element x_i of the input array x. Parameters ---------- x : NamedArray Input array. Should have a complex floating-point data type. Returns ------- out : NamedArray An array containing the element-wise results. The returned array must have a floating-point data type with the same floating-point precision as x (e.g., if x is complex64, the returned array must have the floating-point data type float32). Examples -------- >>> narr = NamedArray(("x",), np.asarray([1.0 + 2j, 2 + 4j])) >>> real(narr) Size: 16B array([1., 2.]) """ xp = _get_data_namespace(x) out = x._new(data=xp.real(x._data)) return out # %% Manipulation functions def expand_dims( x: NamedArray[Any, _DType], /, *, dim: _Dim | Default = _default, axis: _Axis = 0, ) -> NamedArray[Any, _DType]: """ Expands the shape of an array by inserting a new dimension of size one at the position specified by dims. Parameters ---------- x : Array to expand. dim : Dimension name. New dimension will be stored in the axis position. axis : (Not recommended) Axis position (zero-based). Default is 0. Returns ------- out : An expanded output array having the same data type as x. Examples -------- >>> x = NamedArray(("x", "y"), np.asarray([[1.0, 2.0], [3.0, 4.0]])) >>> expand_dims(x) Size: 32B array([[[1., 2.], [3., 4.]]]) >>> expand_dims(x, dim="z") Size: 32B array([[[1., 2.], [3., 4.]]]) """ xp = _get_data_namespace(x) dims = x.dims if dim is _default: dim = f"dim_{len(dims)}" d = list(dims) d.insert(axis, dim) out = x._new(dims=tuple(d), data=xp.expand_dims(x._data, axis=axis)) return out def permute_dims(x: NamedArray[Any, _DType], axes: _Axes) -> NamedArray[Any, _DType]: """ Permutes the dimensions of an array. Parameters ---------- x : Array to permute. axes : Permutation of the dimensions of x. Returns ------- out : An array with permuted dimensions. The returned array must have the same data type as x. """ dims = x.dims new_dims = tuple(dims[i] for i in axes) if isinstance(x._data, _arrayapi): xp = _get_data_namespace(x) out = x._new(dims=new_dims, data=xp.permute_dims(x._data, axes)) else: out = x._new(dims=new_dims, data=x._data.transpose(axes)) # type: ignore[attr-defined] return out xarray-2025.09.0/xarray/namedarray/_typing.py000066400000000000000000000176131505620616400210300ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence from enum import Enum from types import EllipsisType, ModuleType from typing import ( TYPE_CHECKING, Any, Final, Literal, Protocol, SupportsIndex, TypeVar, Union, overload, runtime_checkable, ) import numpy as np try: from typing import TypeAlias except ImportError: if TYPE_CHECKING: raise else: Self: Any = None # Singleton type, as per https://github.com/python/typing/pull/240 class Default(Enum): token: Final = 0 _default = Default.token # https://stackoverflow.com/questions/74633074/how-to-type-hint-a-generic-numpy-array _T_co = TypeVar("_T_co", covariant=True) _dtype = np.dtype _DType = TypeVar("_DType", bound=np.dtype[Any]) _DType_co = TypeVar("_DType_co", covariant=True, bound=np.dtype[Any]) # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` _ScalarType = TypeVar("_ScalarType", bound=np.generic) _ScalarType_co = TypeVar("_ScalarType_co", bound=np.generic, covariant=True) # A protocol for anything with the dtype attribute @runtime_checkable class _SupportsDType(Protocol[_DType_co]): @property def dtype(self) -> _DType_co: ... _DTypeLike = Union[ np.dtype[_ScalarType], type[_ScalarType], _SupportsDType[np.dtype[_ScalarType]], ] # For unknown shapes Dask uses np.nan, array_api uses None: _IntOrUnknown = int _Shape = tuple[_IntOrUnknown, ...] _ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]] _ShapeType = TypeVar("_ShapeType", bound=Any) _ShapeType_co = TypeVar("_ShapeType_co", bound=Any, covariant=True) _Axis = int _Axes = tuple[_Axis, ...] _AxisLike = Union[_Axis, _Axes] _Chunks = tuple[_Shape, ...] _NormalizedChunks = tuple[tuple[int, ...], ...] # FYI in some cases we don't allow `None`, which this doesn't take account of. # # FYI the `str` is for a size string, e.g. "16MB", supported by dask. T_ChunkDim: TypeAlias = str | int | Literal["auto"] | tuple[int, ...] | None # noqa: PYI051 # We allow the tuple form of this (though arguably we could transition to named dims only) T_Chunks: TypeAlias = T_ChunkDim | Mapping[Any, T_ChunkDim] _Dim = Hashable _Dims = tuple[_Dim, ...] _DimsLike = Union[str, Iterable[_Dim]] # https://data-apis.org/array-api/latest/API_specification/indexing.html # TODO: np.array_api was bugged and didn't allow (None,), but should! # https://github.com/numpy/numpy/pull/25022 # https://github.com/data-apis/array-api/pull/674 _IndexKey = Union[int, slice, EllipsisType] _IndexKeys = tuple[_IndexKey, ...] # tuple[Union[_IndexKey, None], ...] _IndexKeyLike = Union[_IndexKey, _IndexKeys] _AttrsLike = Union[Mapping[Any, Any], None] class _SupportsReal(Protocol[_T_co]): @property def real(self) -> _T_co: ... class _SupportsImag(Protocol[_T_co]): @property def imag(self) -> _T_co: ... @runtime_checkable class _array(Protocol[_ShapeType_co, _DType_co]): """ Minimal duck array named array uses. Corresponds to np.ndarray. """ @property def shape(self) -> _Shape: ... @property def dtype(self) -> _DType_co: ... @runtime_checkable class _arrayfunction( _array[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co] ): """ Duck array supporting NEP 18. Corresponds to np.ndarray. """ @overload def __getitem__( self, key: _arrayfunction[Any, Any] | tuple[_arrayfunction[Any, Any], ...], / ) -> _arrayfunction[Any, _DType_co]: ... @overload def __getitem__(self, key: _IndexKeyLike, /) -> Any: ... def __getitem__( self, key: ( _IndexKeyLike | _arrayfunction[Any, Any] | tuple[_arrayfunction[Any, Any], ...] ), /, ) -> _arrayfunction[Any, _DType_co] | Any: ... @overload def __array__( self, dtype: None = ..., /, *, copy: bool | None = ... ) -> np.ndarray[Any, _DType_co]: ... @overload def __array__( self, dtype: _DType, /, *, copy: bool | None = ... ) -> np.ndarray[Any, _DType]: ... def __array__( self, dtype: _DType | None = ..., /, *, copy: bool | None = ... ) -> np.ndarray[Any, _DType] | np.ndarray[Any, _DType_co]: ... # TODO: Should return the same subclass but with a new dtype generic. # https://github.com/python/typing/issues/548 def __array_ufunc__( self, ufunc: Any, method: Any, *inputs: Any, **kwargs: Any, ) -> Any: ... # TODO: Should return the same subclass but with a new dtype generic. # https://github.com/python/typing/issues/548 def __array_function__( self, func: Callable[..., Any], types: Iterable[type], args: Iterable[Any], kwargs: Mapping[str, Any], ) -> Any: ... @property def imag(self) -> _arrayfunction[_ShapeType_co, Any]: ... @property def real(self) -> _arrayfunction[_ShapeType_co, Any]: ... @runtime_checkable class _arrayapi(_array[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co]): """ Duck array supporting NEP 47. Corresponds to np.ndarray. """ def __getitem__( self, key: ( _IndexKeyLike | Any ), # TODO: Any should be _arrayapi[Any, _dtype[np.integer]] /, ) -> _arrayapi[Any, Any]: ... def __array_namespace__(self) -> ModuleType: ... # NamedArray can most likely use both __array_function__ and __array_namespace__: _arrayfunction_or_api = (_arrayfunction, _arrayapi) duckarray = Union[ _arrayfunction[_ShapeType_co, _DType_co], _arrayapi[_ShapeType_co, _DType_co] ] # Corresponds to np.typing.NDArray: DuckArray = _arrayfunction[Any, np.dtype[_ScalarType_co]] @runtime_checkable class _chunkedarray( _array[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co] ): """ Minimal chunked duck array. Corresponds to np.ndarray. """ @property def chunks(self) -> _Chunks: ... @runtime_checkable class _chunkedarrayfunction( _arrayfunction[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co] ): """ Chunked duck array supporting NEP 18. Corresponds to np.ndarray. """ @property def chunks(self) -> _Chunks: ... @runtime_checkable class _chunkedarrayapi( _arrayapi[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co] ): """ Chunked duck array supporting NEP 47. Corresponds to np.ndarray. """ @property def chunks(self) -> _Chunks: ... # NamedArray can most likely use both __array_function__ and __array_namespace__: _chunkedarrayfunction_or_api = (_chunkedarrayfunction, _chunkedarrayapi) chunkedduckarray = Union[ _chunkedarrayfunction[_ShapeType_co, _DType_co], _chunkedarrayapi[_ShapeType_co, _DType_co], ] @runtime_checkable class _sparsearray( _array[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co] ): """ Minimal sparse duck array. Corresponds to np.ndarray. """ def todense(self) -> np.ndarray[Any, _DType_co]: ... @runtime_checkable class _sparsearrayfunction( _arrayfunction[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co] ): """ Sparse duck array supporting NEP 18. Corresponds to np.ndarray. """ def todense(self) -> np.ndarray[Any, _DType_co]: ... @runtime_checkable class _sparsearrayapi( _arrayapi[_ShapeType_co, _DType_co], Protocol[_ShapeType_co, _DType_co] ): """ Sparse duck array supporting NEP 47. Corresponds to np.ndarray. """ def todense(self) -> np.ndarray[Any, _DType_co]: ... # NamedArray can most likely use both __array_function__ and __array_namespace__: _sparsearrayfunction_or_api = (_sparsearrayfunction, _sparsearrayapi) sparseduckarray = Union[ _sparsearrayfunction[_ShapeType_co, _DType_co], _sparsearrayapi[_ShapeType_co, _DType_co], ] ErrorOptions = Literal["raise", "ignore"] ErrorOptionsWithWarn = Literal["raise", "warn", "ignore"] xarray-2025.09.0/xarray/namedarray/core.py000066400000000000000000001161251505620616400203050ustar00rootroot00000000000000from __future__ import annotations import copy import math import warnings from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence from itertools import starmap from types import EllipsisType from typing import ( TYPE_CHECKING, Any, Generic, Literal, TypeVar, cast, overload, ) import numpy as np # TODO: get rid of this after migrating this class to array API from xarray.core import dtypes, formatting, formatting_html from xarray.core.indexing import ( ExplicitlyIndexed, ImplicitToExplicitIndexingAdapter, OuterIndexer, ) from xarray.namedarray._aggregations import NamedArrayAggregations from xarray.namedarray._typing import ( ErrorOptionsWithWarn, _arrayapi, _arrayfunction_or_api, _chunkedarray, _default, _dtype, _DType_co, _ScalarType_co, _ShapeType_co, _sparsearrayfunction_or_api, _SupportsImag, _SupportsReal, ) from xarray.namedarray.parallelcompat import guess_chunkmanager from xarray.namedarray.pycompat import to_numpy from xarray.namedarray.utils import ( either_dict_or_kwargs, infix_dims, is_dict_like, is_duck_dask_array, to_0d_object_array, ) if TYPE_CHECKING: from numpy.typing import ArrayLike, NDArray from xarray.core.types import Dims, T_Chunks from xarray.namedarray._typing import ( Default, _AttrsLike, _Chunks, _Dim, _Dims, _DimsLike, _DType, _IntOrUnknown, _ScalarType, _Shape, _ShapeType, duckarray, ) from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint try: from dask.typing import ( Graph, NestedKeys, PostComputeCallable, PostPersistCallable, SchedulerGetCallable, ) except ImportError: Graph: Any # type: ignore[no-redef] NestedKeys: Any # type: ignore[no-redef] SchedulerGetCallable: Any # type: ignore[no-redef] PostComputeCallable: Any # type: ignore[no-redef] PostPersistCallable: Any # type: ignore[no-redef] from typing import Self T_NamedArray = TypeVar("T_NamedArray", bound="_NamedArray[Any]") T_NamedArrayInteger = TypeVar( "T_NamedArrayInteger", bound="_NamedArray[np.integer[Any]]" ) @overload def _new( x: NamedArray[Any, _DType_co], dims: _DimsLike | Default = ..., data: duckarray[_ShapeType, _DType] = ..., attrs: _AttrsLike | Default = ..., ) -> NamedArray[_ShapeType, _DType]: ... @overload def _new( x: NamedArray[_ShapeType_co, _DType_co], dims: _DimsLike | Default = ..., data: Default = ..., attrs: _AttrsLike | Default = ..., ) -> NamedArray[_ShapeType_co, _DType_co]: ... def _new( x: NamedArray[Any, _DType_co], dims: _DimsLike | Default = _default, data: duckarray[_ShapeType, _DType] | Default = _default, attrs: _AttrsLike | Default = _default, ) -> NamedArray[_ShapeType, _DType] | NamedArray[Any, _DType_co]: """ Create a new array with new typing information. Parameters ---------- x : NamedArray Array to create a new array from dims : Iterable of Hashable, optional Name(s) of the dimension(s). Will copy the dims from x by default. data : duckarray, optional The actual data that populates the array. Should match the shape specified by `dims`. Will copy the data from x by default. attrs : dict, optional A dictionary containing any additional information or attributes you want to store with the array. Will copy the attrs from x by default. """ dims_ = copy.copy(x._dims) if dims is _default else dims attrs_: Mapping[Any, Any] | None if attrs is _default: attrs_ = None if x._attrs is None else x._attrs.copy() else: attrs_ = attrs if data is _default: return type(x)(dims_, copy.copy(x._data), attrs_) else: cls_ = cast("type[NamedArray[_ShapeType, _DType]]", type(x)) return cls_(dims_, data, attrs_) @overload def from_array( dims: _DimsLike, data: duckarray[_ShapeType, _DType], attrs: _AttrsLike = ..., ) -> NamedArray[_ShapeType, _DType]: ... @overload def from_array( dims: _DimsLike, data: ArrayLike, attrs: _AttrsLike = ..., ) -> NamedArray[Any, Any]: ... def from_array( dims: _DimsLike, data: duckarray[_ShapeType, _DType] | ArrayLike, attrs: _AttrsLike = None, ) -> NamedArray[_ShapeType, _DType] | NamedArray[Any, Any]: """ Create a Named array from an array-like object. Parameters ---------- dims : str or iterable of str Name(s) of the dimension(s). data : T_DuckArray or ArrayLike The actual data that populates the array. Should match the shape specified by `dims`. attrs : dict, optional A dictionary containing any additional information or attributes you want to store with the array. Default is None, meaning no attributes will be stored. """ if isinstance(data, NamedArray): raise TypeError( "Array is already a Named array. Use 'data.data' to retrieve the data array" ) # TODO: dask.array.ma.MaskedArray also exists, better way? if isinstance(data, np.ma.MaskedArray): mask = np.ma.getmaskarray(data) # type: ignore[no-untyped-call] if mask.any(): # TODO: requires refactoring/vendoring xarray.core.dtypes and # xarray.core.duck_array_ops raise NotImplementedError("MaskedArray is not supported yet") return NamedArray(dims, data, attrs) if isinstance(data, _arrayfunction_or_api) and not isinstance(data, np.generic): return NamedArray(dims, data, attrs) if isinstance(data, tuple): return NamedArray(dims, to_0d_object_array(data), attrs) # validate whether the data is valid data types. return NamedArray(dims, np.asarray(data), attrs) class NamedArray(NamedArrayAggregations, Generic[_ShapeType_co, _DType_co]): """ A wrapper around duck arrays with named dimensions and attributes which describe a single Array. Numeric operations on this object implement array broadcasting and dimension alignment based on dimension names, rather than axis order. Parameters ---------- dims : str or iterable of hashable Name(s) of the dimension(s). data : array-like or duck-array The actual data that populates the array. Should match the shape specified by `dims`. attrs : dict, optional A dictionary containing any additional information or attributes you want to store with the array. Default is None, meaning no attributes will be stored. Raises ------ ValueError If the `dims` length does not match the number of data dimensions (ndim). Examples -------- >>> data = np.array([1.5, 2, 3], dtype=float) >>> narr = NamedArray(("x",), data, {"units": "m"}) # TODO: Better name than narr? """ __slots__ = ("_attrs", "_data", "_dims") _data: duckarray[Any, _DType_co] _dims: _Dims _attrs: dict[Any, Any] | None def __init__( self, dims: _DimsLike, data: duckarray[Any, _DType_co], attrs: _AttrsLike = None, ): self._data = data self._dims = self._parse_dimensions(dims) self._attrs = dict(attrs) if attrs else None def __init_subclass__(cls, **kwargs: Any) -> None: if NamedArray in cls.__bases__ and (cls._new == NamedArray._new): # Type hinting does not work for subclasses unless _new is # overridden with the correct class. raise TypeError( "Subclasses of `NamedArray` must override the `_new` method." ) super().__init_subclass__(**kwargs) @overload def _new( self, dims: _DimsLike | Default = ..., data: duckarray[_ShapeType, _DType] = ..., attrs: _AttrsLike | Default = ..., ) -> NamedArray[_ShapeType, _DType]: ... @overload def _new( self, dims: _DimsLike | Default = ..., data: Default = ..., attrs: _AttrsLike | Default = ..., ) -> NamedArray[_ShapeType_co, _DType_co]: ... def _new( self, dims: _DimsLike | Default = _default, data: duckarray[Any, _DType] | Default = _default, attrs: _AttrsLike | Default = _default, ) -> NamedArray[_ShapeType, _DType] | NamedArray[_ShapeType_co, _DType_co]: """ Create a new array with new typing information. _new has to be reimplemented each time NamedArray is subclassed, otherwise type hints will not be correct. The same is likely true for methods that relied on _new. Parameters ---------- dims : Iterable of Hashable, optional Name(s) of the dimension(s). Will copy the dims from x by default. data : duckarray, optional The actual data that populates the array. Should match the shape specified by `dims`. Will copy the data from x by default. attrs : dict, optional A dictionary containing any additional information or attributes you want to store with the array. Will copy the attrs from x by default. """ return _new(self, dims, data, attrs) def _replace( self, dims: _DimsLike | Default = _default, data: duckarray[_ShapeType_co, _DType_co] | Default = _default, attrs: _AttrsLike | Default = _default, ) -> Self: """ Create a new array with the same typing information. The types for each argument cannot change, use self._new if that is a risk. Parameters ---------- dims : Iterable of Hashable, optional Name(s) of the dimension(s). Will copy the dims from x by default. data : duckarray, optional The actual data that populates the array. Should match the shape specified by `dims`. Will copy the data from x by default. attrs : dict, optional A dictionary containing any additional information or attributes you want to store with the array. Will copy the attrs from x by default. """ return cast("Self", self._new(dims, data, attrs)) def _copy( self, deep: bool = True, data: duckarray[_ShapeType_co, _DType_co] | None = None, memo: dict[int, Any] | None = None, ) -> Self: if data is None: ndata = self._data if deep: ndata = copy.deepcopy(ndata, memo=memo) else: ndata = data self._check_shape(ndata) attrs = ( copy.deepcopy(self._attrs, memo=memo) if deep else copy.copy(self._attrs) ) return self._replace(data=ndata, attrs=attrs) def __copy__(self) -> Self: return self._copy(deep=False) def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: return self._copy(deep=True, memo=memo) def copy( self, deep: bool = True, data: duckarray[_ShapeType_co, _DType_co] | None = None, ) -> Self: """Returns a copy of this object. If `deep=True`, the data array is loaded into memory and copied onto the new object. Dimensions, attributes and encodings are always copied. Use `data` to create a new object with the same structure as original but entirely new data. Parameters ---------- deep : bool, default: True Whether the data array is loaded into memory and copied onto the new object. Default is True. data : array_like, optional Data to use in the new object. Must have same shape as original. When `data` is used, `deep` is ignored. Returns ------- object : NamedArray New object with dimensions, attributes, and optionally data copied from original. """ return self._copy(deep=deep, data=data) @property def ndim(self) -> int: """ Number of array dimensions. See Also -------- numpy.ndarray.ndim """ return len(self.shape) @property def size(self) -> _IntOrUnknown: """ Number of elements in the array. Equal to ``np.prod(a.shape)``, i.e., the product of the array’s dimensions. See Also -------- numpy.ndarray.size """ return math.prod(self.shape) def __len__(self) -> _IntOrUnknown: try: return self.shape[0] except Exception as exc: raise TypeError("len() of unsized object") from exc @property def dtype(self) -> _DType_co: """ Data-type of the array’s elements. See Also -------- ndarray.dtype numpy.dtype """ return self._data.dtype @property def shape(self) -> _Shape: """ Get the shape of the array. Returns ------- shape : tuple of ints Tuple of array dimensions. See Also -------- numpy.ndarray.shape """ return self._data.shape @property def nbytes(self) -> _IntOrUnknown: """ Total bytes consumed by the elements of the data array. If the underlying data array does not include ``nbytes``, estimates the bytes consumed based on the ``size`` and ``dtype``. """ from xarray.namedarray._array_api import _get_data_namespace if hasattr(self._data, "nbytes"): return self._data.nbytes # type: ignore[no-any-return] if hasattr(self.dtype, "itemsize"): itemsize = self.dtype.itemsize elif isinstance(self._data, _arrayapi): xp = _get_data_namespace(self) if xp.isdtype(self.dtype, "bool"): itemsize = 1 elif xp.isdtype(self.dtype, "integral"): itemsize = xp.iinfo(self.dtype).bits // 8 else: itemsize = xp.finfo(self.dtype).bits // 8 else: raise TypeError( "cannot compute the number of bytes (no array API nor nbytes / itemsize)" ) return self.size * itemsize @property def dims(self) -> _Dims: """Tuple of dimension names with which this NamedArray is associated.""" return self._dims @dims.setter def dims(self, value: _DimsLike) -> None: self._dims = self._parse_dimensions(value) def _parse_dimensions(self, dims: _DimsLike) -> _Dims: dims = (dims,) if isinstance(dims, str) else tuple(dims) if len(dims) != self.ndim: raise ValueError( f"dimensions {dims} must have the same length as the " f"number of data dimensions, ndim={self.ndim}" ) if len(set(dims)) < len(dims): repeated_dims = {d for d in dims if dims.count(d) > 1} warnings.warn( f"Duplicate dimension names present: dimensions {repeated_dims} appear more than once in dims={dims}. " "We do not yet support duplicate dimension names, but we do allow initial construction of the object. " "We recommend you rename the dims immediately to become distinct, as most xarray functionality is likely to fail silently if you do not. " "To rename the dimensions you will need to set the ``.dims`` attribute of each variable, ``e.g. var.dims=('x0', 'x1')``.", UserWarning, stacklevel=2, ) return dims @property def attrs(self) -> dict[Any, Any]: """Dictionary of local attributes on this NamedArray.""" if self._attrs is None: self._attrs = {} return self._attrs @attrs.setter def attrs(self, value: Mapping[Any, Any]) -> None: self._attrs = dict(value) if value else None def _check_shape(self, new_data: duckarray[Any, _DType_co]) -> None: if new_data.shape != self.shape: raise ValueError( f"replacement data must match the {self.__class__.__name__}'s shape. " f"replacement data has shape {new_data.shape}; {self.__class__.__name__} has shape {self.shape}" ) @property def data(self) -> duckarray[Any, _DType_co]: """ The NamedArray's data as an array. The underlying array type (e.g. dask, sparse, pint) is preserved. """ return self._data @data.setter def data(self, data: duckarray[Any, _DType_co]) -> None: self._check_shape(data) self._data = data @property def imag( self: NamedArray[_ShapeType, np.dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var] ) -> NamedArray[_ShapeType, _dtype[_ScalarType]]: """ The imaginary part of the array. See Also -------- numpy.ndarray.imag """ if isinstance(self._data, _arrayapi): from xarray.namedarray._array_api import imag return imag(self) return self._new(data=self._data.imag) @property def real( self: NamedArray[_ShapeType, np.dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var] ) -> NamedArray[_ShapeType, _dtype[_ScalarType]]: """ The real part of the array. See Also -------- numpy.ndarray.real """ if isinstance(self._data, _arrayapi): from xarray.namedarray._array_api import real return real(self) return self._new(data=self._data.real) def __dask_tokenize__(self) -> object: # Use v.data, instead of v._data, in order to cope with the wrappers # around NetCDF and the like from dask.base import normalize_token return normalize_token((type(self), self._dims, self.data, self._attrs or None)) def __dask_graph__(self) -> Graph | None: if is_duck_dask_array(self._data): return self._data.__dask_graph__() else: # TODO: Should this method just raise instead? # raise NotImplementedError("Method requires self.data to be a dask array") return None def __dask_keys__(self) -> NestedKeys: if is_duck_dask_array(self._data): return self._data.__dask_keys__() else: raise AttributeError("Method requires self.data to be a dask array.") def __dask_layers__(self) -> Sequence[str]: if is_duck_dask_array(self._data): return self._data.__dask_layers__() else: raise AttributeError("Method requires self.data to be a dask array.") @property def __dask_optimize__( self, ) -> Callable[..., dict[Any, Any]]: if is_duck_dask_array(self._data): return self._data.__dask_optimize__ # type: ignore[no-any-return] else: raise AttributeError("Method requires self.data to be a dask array.") @property def __dask_scheduler__(self) -> SchedulerGetCallable: if is_duck_dask_array(self._data): return self._data.__dask_scheduler__ else: raise AttributeError("Method requires self.data to be a dask array.") def __dask_postcompute__( self, ) -> tuple[PostComputeCallable, tuple[Any, ...]]: if is_duck_dask_array(self._data): array_func, array_args = self._data.__dask_postcompute__() # type: ignore[no-untyped-call] return self._dask_finalize, (array_func,) + array_args else: raise AttributeError("Method requires self.data to be a dask array.") def __dask_postpersist__( self, ) -> tuple[ Callable[ [Graph, PostPersistCallable[Any], Any, Any], Self, ], tuple[Any, ...], ]: if is_duck_dask_array(self._data): a: tuple[PostPersistCallable[Any], tuple[Any, ...]] a = self._data.__dask_postpersist__() # type: ignore[no-untyped-call] array_func, array_args = a return self._dask_finalize, (array_func,) + array_args else: raise AttributeError("Method requires self.data to be a dask array.") def _dask_finalize( self, results: Graph, array_func: PostPersistCallable[Any], *args: Any, **kwargs: Any, ) -> Self: data = array_func(results, *args, **kwargs) return type(self)(self._dims, data, attrs=self._attrs) @overload def get_axis_num(self, dim: str) -> int: ... # type: ignore [overload-overlap] @overload def get_axis_num(self, dim: Iterable[Hashable]) -> tuple[int, ...]: ... @overload def get_axis_num(self, dim: Hashable) -> int: ... def get_axis_num(self, dim: Hashable | Iterable[Hashable]) -> int | tuple[int, ...]: """Return axis number(s) corresponding to dimension(s) in this array. Parameters ---------- dim : str or iterable of str Dimension name(s) for which to lookup axes. Returns ------- int or tuple of int Axis number or numbers corresponding to the given dimensions. """ if not isinstance(dim, str) and isinstance(dim, Iterable): return tuple(self._get_axis_num(d) for d in dim) else: return self._get_axis_num(dim) def _get_axis_num(self: Any, dim: Hashable) -> int: _raise_if_any_duplicate_dimensions(self.dims) try: return self.dims.index(dim) # type: ignore[no-any-return] except ValueError as err: raise ValueError( f"{dim!r} not found in array dimensions {self.dims!r}" ) from err @property def chunks(self) -> _Chunks | None: """ Tuple of block lengths for this NamedArray's data, in order of dimensions, or None if the underlying data is not a dask array. See Also -------- NamedArray.chunk NamedArray.chunksizes xarray.unify_chunks """ data = self._data if isinstance(data, _chunkedarray): return data.chunks else: return None @property def chunksizes( self, ) -> Mapping[_Dim, _Shape]: """ Mapping from dimension names to block lengths for this NamedArray's data. If this NamedArray does not contain chunked arrays, the mapping will be empty. Cannot be modified directly, but can be modified by calling .chunk(). Differs from NamedArray.chunks because it returns a mapping of dimensions to chunk shapes instead of a tuple of chunk shapes. See Also -------- NamedArray.chunk NamedArray.chunks xarray.unify_chunks """ data = self._data if isinstance(data, _chunkedarray): return dict(zip(self.dims, data.chunks, strict=True)) else: return {} @property def sizes(self) -> dict[_Dim, _IntOrUnknown]: """Ordered mapping from dimension names to lengths.""" return dict(zip(self.dims, self.shape, strict=True)) def chunk( self, chunks: T_Chunks = {}, # noqa: B006 # even though it's unsafe, it is being used intentionally here (#4667) chunked_array_type: str | ChunkManagerEntrypoint[Any] | None = None, from_array_kwargs: Any = None, **chunks_kwargs: Any, ) -> Self: """Coerce this array's data into a dask array with the given chunks. If this variable is a non-dask array, it will be converted to dask array. If it's a dask array, it will be rechunked to the given chunk sizes. If neither chunks is not provided for one or more dimensions, chunk sizes along that dimension will not be updated; non-dask arrays will be converted into dask arrays with a single block. Parameters ---------- chunks : int, tuple or dict, optional Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or ``{'x': 5, 'y': 5}``. chunked_array_type: str, optional Which chunked array type to coerce this datasets' arrays to. Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntrypoint` system. Experimental API that should not be relied upon. from_array_kwargs: dict, optional Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg. For example, with dask as the default chunked array type, this method would pass additional kwargs to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon. **chunks_kwargs : {dim: chunks, ...}, optional The keyword arguments form of ``chunks``. One of chunks or chunks_kwargs must be provided. Returns ------- chunked : xarray.Variable See Also -------- Variable.chunks Variable.chunksizes xarray.unify_chunks dask.array.from_array """ if from_array_kwargs is None: from_array_kwargs = {} if chunks is None: warnings.warn( "None value for 'chunks' is deprecated. " "It will raise an error in the future. Use instead '{}'", category=FutureWarning, stacklevel=2, ) chunks = {} if isinstance(chunks, float | str | int | tuple | list): # TODO we shouldn't assume here that other chunkmanagers can handle these types # TODO should we call normalize_chunks here? pass # dask.array.from_array can handle these directly else: chunks = either_dict_or_kwargs(chunks, chunks_kwargs, "chunk") if is_dict_like(chunks): # This method of iteration allows for duplicated dimension names, GH8579 chunks = { dim_number: chunks[dim] for dim_number, dim in enumerate(self.dims) if dim in chunks } chunkmanager = guess_chunkmanager(chunked_array_type) data_old = self._data if chunkmanager.is_chunked_array(data_old): data_chunked = chunkmanager.rechunk(data_old, chunks) # type: ignore[arg-type] else: ndata: duckarray[Any, Any] if not isinstance(data_old, ExplicitlyIndexed): ndata = data_old else: # Unambiguously handle array storage backends (like NetCDF4 and h5py) # that can't handle general array indexing. For example, in netCDF4 you # can do "outer" indexing along two dimensions independent, which works # differently from how NumPy handles it. # da.from_array works by using lazy indexing with a tuple of slices. # Using OuterIndexer is a pragmatic choice: dask does not yet handle # different indexing types in an explicit way: # https://github.com/dask/dask/issues/2883 ndata = ImplicitToExplicitIndexingAdapter(data_old, OuterIndexer) # type: ignore[assignment] if is_dict_like(chunks): chunks = tuple(starmap(chunks.get, enumerate(ndata.shape))) data_chunked = chunkmanager.from_array(ndata, chunks, **from_array_kwargs) # type: ignore[arg-type] return self._replace(data=data_chunked) def to_numpy(self) -> np.ndarray[Any, Any]: """Coerces wrapped data to numpy and returns a numpy.ndarray""" # TODO an entrypoint so array libraries can choose coercion method? return to_numpy(self._data) def as_numpy(self) -> Self: """Coerces wrapped data into a numpy array, returning a Variable.""" return self._replace(data=self.to_numpy()) def reduce( self, func: Callable[..., Any], dim: Dims = None, axis: int | Sequence[int] | None = None, keepdims: bool = False, **kwargs: Any, ) -> NamedArray[Any, Any]: """Reduce this array by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. dim : "...", str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. By default `func` is applied over all dimensions. axis : int or Sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dim' and 'axis' arguments can be supplied. If neither are supplied, then the reduction is calculated over the flattened array (by calling `func(x)` without an axis argument). keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Array Array with summarized data and the indicated dimension(s) removed. """ if dim == ...: dim = None if dim is not None and axis is not None: raise ValueError("cannot supply both 'axis' and 'dim' arguments") if dim is not None: axis = self.get_axis_num(dim) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", r"Mean of empty slice", category=RuntimeWarning ) if axis is not None: if isinstance(axis, tuple) and len(axis) == 1: # unpack axis for the benefit of functions # like np.argmin which can't handle tuple arguments axis = axis[0] data = func(self.data, axis=axis, **kwargs) else: data = func(self.data, **kwargs) if getattr(data, "shape", ()) == self.shape: dims = self.dims else: removed_axes: Iterable[int] if axis is None: removed_axes = range(self.ndim) else: removed_axes = np.atleast_1d(axis) % self.ndim if keepdims: # Insert np.newaxis for removed dims slices = tuple( np.newaxis if i in removed_axes else slice(None, None) for i in range(self.ndim) ) if getattr(data, "shape", None) is None: # Reduce has produced a scalar value, not an array-like data = np.asanyarray(data)[slices] else: data = data[slices] dims = self.dims else: dims = tuple( adim for n, adim in enumerate(self.dims) if n not in removed_axes ) # Return NamedArray to handle IndexVariable when data is nD return from_array(dims, data, attrs=self._attrs) def _nonzero(self: T_NamedArrayInteger) -> tuple[T_NamedArrayInteger, ...]: """Equivalent numpy's nonzero but returns a tuple of NamedArrays.""" # TODO: we should replace dask's native nonzero # after https://github.com/dask/dask/issues/1076 is implemented. # TODO: cast to ndarray and back to T_DuckArray is a workaround nonzeros = np.nonzero(cast("NDArray[np.integer[Any]]", self.data)) _attrs = self.attrs return tuple( cast("T_NamedArrayInteger", self._new((dim,), nz, _attrs)) for nz, dim in zip(nonzeros, self.dims, strict=True) ) def __repr__(self) -> str: return formatting.array_repr(self) def _repr_html_(self) -> str: return formatting_html.array_repr(self) def _as_sparse( self, sparse_format: Literal["coo"] | Default = _default, fill_value: ArrayLike | Default = _default, ) -> NamedArray[Any, _DType_co]: """ Use sparse-array as backend. """ import sparse from xarray.namedarray._array_api import astype # TODO: what to do if dask-backended? if fill_value is _default: dtype, fill_value = dtypes.maybe_promote(self.dtype) else: dtype = dtypes.result_type(self.dtype, fill_value) if sparse_format is _default: sparse_format = "coo" try: as_sparse = getattr(sparse, f"as_{sparse_format.lower()}") except AttributeError as exc: raise ValueError(f"{sparse_format} is not a valid sparse format") from exc data = as_sparse(astype(self, dtype).data, fill_value=fill_value) return self._new(data=data) def _to_dense(self) -> NamedArray[Any, _DType_co]: """ Change backend from sparse to np.array. """ if isinstance(self._data, _sparsearrayfunction_or_api): data_dense: np.ndarray[Any, _DType_co] = self._data.todense() return self._new(data=data_dense) else: raise TypeError("self.data is not a sparse array") def permute_dims( self, *dim: Iterable[_Dim] | EllipsisType, missing_dims: ErrorOptionsWithWarn = "raise", ) -> NamedArray[Any, _DType_co]: """Return a new object with transposed dimensions. Parameters ---------- *dim : Hashable, optional By default, reverse the order of the dimensions. Otherwise, reorder the dimensions to this order. missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the NamedArray: - "raise": raise an exception - "warn": raise a warning, and ignore the missing dimensions - "ignore": ignore the missing dimensions Returns ------- NamedArray The returned NamedArray has permuted dimensions and data with the same attributes as the original. See Also -------- numpy.transpose """ from xarray.namedarray._array_api import permute_dims if not dim: dims = self.dims[::-1] else: dims = tuple(infix_dims(dim, self.dims, missing_dims)) # type: ignore[arg-type] if len(dims) < 2 or dims == self.dims: # no need to transpose if only one dimension # or dims are in same order return self.copy(deep=False) axes = self.get_axis_num(dims) assert isinstance(axes, tuple) return permute_dims(self, axes) @property def T(self) -> NamedArray[Any, _DType_co]: """Return a new object with transposed dimensions.""" if self.ndim != 2: raise ValueError( f"x.T requires x to have 2 dimensions, got {self.ndim}. Use x.permute_dims() to permute dimensions." ) return self.permute_dims() def broadcast_to( self, dim: Mapping[_Dim, int] | None = None, **dim_kwargs: Any ) -> NamedArray[Any, _DType_co]: """ Broadcast the NamedArray to a new shape. New dimensions are not allowed. This method allows for the expansion of the array's dimensions to a specified shape. It handles both positional and keyword arguments for specifying the dimensions to broadcast. An error is raised if new dimensions are attempted to be added. Parameters ---------- dim : dict, str, sequence of str, optional Dimensions to broadcast the array to. If a dict, keys are dimension names and values are the new sizes. If a string or sequence of strings, existing dimensions are matched with a size of 1. **dim_kwargs : Any Additional dimensions specified as keyword arguments. Each keyword argument specifies the name of an existing dimension and its size. Returns ------- NamedArray A new NamedArray with the broadcasted dimensions. Examples -------- >>> data = np.asarray([[1.0, 2.0], [3.0, 4.0]]) >>> array = xr.NamedArray(("x", "y"), data) >>> array.sizes {'x': 2, 'y': 2} >>> broadcasted = array.broadcast_to(x=2, y=2) >>> broadcasted.sizes {'x': 2, 'y': 2} """ from xarray.core import duck_array_ops combined_dims = either_dict_or_kwargs(dim, dim_kwargs, "broadcast_to") # Check that no new dimensions are added if new_dims := set(combined_dims) - set(self.dims): raise ValueError( f"Cannot add new dimensions: {new_dims}. Only existing dimensions are allowed. " "Use `expand_dims` method to add new dimensions." ) # Create a dictionary of the current dimensions and their sizes current_shape = self.sizes # Update the current shape with the new dimensions, keeping the order of the original dimensions broadcast_shape = {d: current_shape.get(d, 1) for d in self.dims} broadcast_shape |= combined_dims # Ensure the dimensions are in the correct order ordered_dims = list(broadcast_shape.keys()) ordered_shape = tuple(broadcast_shape[d] for d in ordered_dims) data = duck_array_ops.broadcast_to(self._data, ordered_shape) # type: ignore[no-untyped-call] # TODO: use array-api-compat function return self._new(data=data, dims=ordered_dims) def expand_dims( self, dim: _Dim | Default = _default, ) -> NamedArray[Any, _DType_co]: """ Expand the dimensions of the NamedArray. This method adds new dimensions to the object. The new dimensions are added at the beginning of the array. Parameters ---------- dim : Hashable, optional Dimension name to expand the array to. This dimension will be added at the beginning of the array. Returns ------- NamedArray A new NamedArray with expanded dimensions. Examples -------- >>> data = np.asarray([[1.0, 2.0], [3.0, 4.0]]) >>> array = xr.NamedArray(("x", "y"), data) # expand dimensions by specifying a new dimension name >>> expanded = array.expand_dims(dim="z") >>> expanded.dims ('z', 'x', 'y') """ from xarray.namedarray._array_api import expand_dims return expand_dims(self, dim=dim) _NamedArray = NamedArray[Any, np.dtype[_ScalarType_co]] def _raise_if_any_duplicate_dimensions( dims: _Dims, err_context: str = "This function" ) -> None: if len(set(dims)) < len(dims): repeated_dims = {d for d in dims if dims.count(d) > 1} raise ValueError( f"{err_context} cannot handle duplicate dimensions, but dimensions {repeated_dims} appear more than once on this object's dims: {dims}" ) xarray-2025.09.0/xarray/namedarray/daskmanager.py000066400000000000000000000174651505620616400216410ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Callable, Iterable, Sequence from typing import TYPE_CHECKING, Any import numpy as np from xarray.core.indexing import ImplicitToExplicitIndexingAdapter from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint, T_ChunkedArray from xarray.namedarray.utils import is_duck_dask_array, module_available if TYPE_CHECKING: from xarray.namedarray._typing import ( T_Chunks, _DType_co, _NormalizedChunks, duckarray, ) try: from dask.array import Array as DaskArray except ImportError: DaskArray = np.ndarray[Any, Any] dask_available = module_available("dask") class DaskManager(ChunkManagerEntrypoint["DaskArray"]): array_cls: type[DaskArray] available: bool = dask_available def __init__(self) -> None: # TODO can we replace this with a class attribute instead? from dask.array import Array self.array_cls = Array def is_chunked_array(self, data: duckarray[Any, Any]) -> bool: return is_duck_dask_array(data) def chunks(self, data: Any) -> _NormalizedChunks: return data.chunks # type: ignore[no-any-return] def normalize_chunks( self, chunks: T_Chunks | _NormalizedChunks, shape: tuple[int, ...] | None = None, limit: int | None = None, dtype: _DType_co | None = None, previous_chunks: _NormalizedChunks | None = None, ) -> Any: """Called by open_dataset""" from dask.array.core import normalize_chunks return normalize_chunks( chunks, shape=shape, limit=limit, dtype=dtype, previous_chunks=previous_chunks, ) # type: ignore[no-untyped-call] def from_array( self, data: Any, chunks: T_Chunks | _NormalizedChunks, **kwargs: Any ) -> DaskArray | Any: import dask.array as da if isinstance(data, ImplicitToExplicitIndexingAdapter): # lazily loaded backend array classes should use NumPy array operations. kwargs["meta"] = np.ndarray return da.from_array( data, chunks, **kwargs, ) # type: ignore[no-untyped-call] def compute( self, *data: Any, **kwargs: Any ) -> tuple[np.ndarray[Any, _DType_co], ...]: from dask.array import compute return compute(*data, **kwargs) # type: ignore[no-untyped-call, no-any-return] def persist(self, *data: Any, **kwargs: Any) -> tuple[DaskArray | Any, ...]: from dask import persist return persist(*data, **kwargs) # type: ignore[no-untyped-call, no-any-return] @property def array_api(self) -> Any: from dask import array as da return da def reduction( self, arr: T_ChunkedArray, func: Callable[..., Any], combine_func: Callable[..., Any] | None = None, aggregate_func: Callable[..., Any] | None = None, axis: int | Sequence[int] | None = None, dtype: _DType_co | None = None, keepdims: bool = False, ) -> DaskArray | Any: from dask.array import reduction return reduction( arr, chunk=func, combine=combine_func, aggregate=aggregate_func, axis=axis, dtype=dtype, keepdims=keepdims, ) # type: ignore[no-untyped-call] def scan( self, func: Callable[..., Any], binop: Callable[..., Any], ident: float, arr: T_ChunkedArray, axis: int | None = None, dtype: _DType_co | None = None, **kwargs: Any, ) -> DaskArray | Any: from dask.array.reductions import cumreduction return cumreduction( func, binop, ident, arr, axis=axis, dtype=dtype, **kwargs, ) # type: ignore[no-untyped-call] def apply_gufunc( self, func: Callable[..., Any], signature: str, *args: Any, axes: Sequence[tuple[int, ...]] | None = None, axis: int | None = None, keepdims: bool = False, output_dtypes: Sequence[_DType_co] | None = None, output_sizes: dict[str, int] | None = None, vectorize: bool | None = None, allow_rechunk: bool = False, meta: tuple[np.ndarray[Any, _DType_co], ...] | None = None, **kwargs: Any, ) -> Any: from dask.array.gufunc import apply_gufunc return apply_gufunc( func, signature, *args, axes=axes, axis=axis, keepdims=keepdims, output_dtypes=output_dtypes, output_sizes=output_sizes, vectorize=vectorize, allow_rechunk=allow_rechunk, meta=meta, **kwargs, ) # type: ignore[no-untyped-call] def map_blocks( self, func: Callable[..., Any], *args: Any, dtype: _DType_co | None = None, chunks: tuple[int, ...] | None = None, drop_axis: int | Sequence[int] | None = None, new_axis: int | Sequence[int] | None = None, **kwargs: Any, ) -> Any: from dask.array import map_blocks # pass through name, meta, token as kwargs return map_blocks( func, *args, dtype=dtype, chunks=chunks, drop_axis=drop_axis, new_axis=new_axis, **kwargs, ) # type: ignore[no-untyped-call] def blockwise( self, func: Callable[..., Any], out_ind: Iterable[Any], *args: Any, # can't type this as mypy assumes args are all same type, but dask blockwise args alternate types name: str | None = None, token: Any | None = None, dtype: _DType_co | None = None, adjust_chunks: dict[Any, Callable[..., Any]] | None = None, new_axes: dict[Any, int] | None = None, align_arrays: bool = True, concatenate: bool | None = None, meta: tuple[np.ndarray[Any, _DType_co], ...] | None = None, **kwargs: Any, ) -> DaskArray | Any: from dask.array import blockwise return blockwise( func, out_ind, *args, name=name, token=token, dtype=dtype, adjust_chunks=adjust_chunks, new_axes=new_axes, align_arrays=align_arrays, concatenate=concatenate, meta=meta, **kwargs, ) # type: ignore[no-untyped-call] def unify_chunks( self, *args: Any, # can't type this as mypy assumes args are all same type, but dask unify_chunks args alternate types **kwargs: Any, ) -> tuple[dict[str, _NormalizedChunks], list[DaskArray]]: from dask.array.core import unify_chunks return unify_chunks(*args, **kwargs) # type: ignore[no-any-return, no-untyped-call] def store( self, sources: Any | Sequence[Any], targets: Any, **kwargs: Any, ) -> Any: from dask.array import store return store( sources=sources, targets=targets, **kwargs, ) def shuffle( self, x: DaskArray, indexer: list[list[int]], axis: int, chunks: T_Chunks ) -> DaskArray: import dask.array if not module_available("dask", minversion="2024.08.1"): raise ValueError( "This method is very inefficient on dask<2024.08.1. Please upgrade." ) if chunks is None: chunks = "auto" if chunks != "auto": raise NotImplementedError("Only chunks='auto' is supported at present.") return dask.array.shuffle(x, indexer, axis, chunks="auto") xarray-2025.09.0/xarray/namedarray/dtypes.py000066400000000000000000000127111505620616400206610ustar00rootroot00000000000000from __future__ import annotations import functools from typing import Any, Literal, TypeGuard import numpy as np from xarray.namedarray import utils # Use as a sentinel value to indicate a dtype appropriate NA value. NA = utils.ReprObject("") @functools.total_ordering class AlwaysGreaterThan: def __gt__(self, other: object) -> Literal[True]: return True def __eq__(self, other: object) -> bool: return isinstance(other, type(self)) @functools.total_ordering class AlwaysLessThan: def __lt__(self, other: object) -> Literal[True]: return True def __eq__(self, other: object) -> bool: return isinstance(other, type(self)) # Equivalence to np.inf (-np.inf) for object-type INF = AlwaysGreaterThan() NINF = AlwaysLessThan() # Pairs of types that, if both found, should be promoted to object dtype # instead of following NumPy's own type-promotion rules. These type promotion # rules match pandas instead. For reference, see the NumPy type hierarchy: # https://numpy.org/doc/stable/reference/arrays.scalars.html PROMOTE_TO_OBJECT: tuple[tuple[type[np.generic], type[np.generic]], ...] = ( (np.number, np.character), # numpy promotes to character (np.bool_, np.character), # numpy promotes to character (np.bytes_, np.str_), # numpy promotes to unicode ) def maybe_promote(dtype: np.dtype[np.generic]) -> tuple[np.dtype[np.generic], Any]: """Simpler equivalent of pandas.core.common._maybe_promote Parameters ---------- dtype : np.dtype Returns ------- dtype : Promoted dtype that can hold missing values. fill_value : Valid missing value for the promoted dtype. """ # N.B. these casting rules should match pandas dtype_: np.typing.DTypeLike fill_value: Any if np.issubdtype(dtype, np.floating): dtype_ = dtype fill_value = np.nan elif np.issubdtype(dtype, np.timedelta64): # See https://github.com/numpy/numpy/issues/10685 # np.timedelta64 is a subclass of np.integer # Check np.timedelta64 before np.integer fill_value = np.timedelta64("NaT") dtype_ = dtype elif np.issubdtype(dtype, np.integer): dtype_ = np.float32 if dtype.itemsize <= 2 else np.float64 fill_value = np.nan elif np.issubdtype(dtype, np.complexfloating): dtype_ = dtype fill_value = np.nan + np.nan * 1j elif np.issubdtype(dtype, np.datetime64): dtype_ = dtype fill_value = np.datetime64("NaT") else: dtype_ = object fill_value = np.nan dtype_out = np.dtype(dtype_) fill_value = dtype_out.type(fill_value) return dtype_out, fill_value NAT_TYPES = {np.datetime64("NaT").dtype, np.timedelta64("NaT").dtype} def get_fill_value(dtype: np.dtype[np.generic]) -> Any: """Return an appropriate fill value for this dtype. Parameters ---------- dtype : np.dtype Returns ------- fill_value : Missing value corresponding to this dtype. """ _, fill_value = maybe_promote(dtype) return fill_value def get_pos_infinity( dtype: np.dtype[np.generic], max_for_int: bool = False ) -> float | complex | AlwaysGreaterThan: """Return an appropriate positive infinity for this dtype. Parameters ---------- dtype : np.dtype max_for_int : bool Return np.iinfo(dtype).max instead of np.inf Returns ------- fill_value : positive infinity value corresponding to this dtype. """ if issubclass(dtype.type, np.floating): return np.inf if issubclass(dtype.type, np.integer): return np.iinfo(dtype.type).max if max_for_int else np.inf if issubclass(dtype.type, np.complexfloating): return np.inf + 1j * np.inf return INF def get_neg_infinity( dtype: np.dtype[np.generic], min_for_int: bool = False ) -> float | complex | AlwaysLessThan: """Return an appropriate positive infinity for this dtype. Parameters ---------- dtype : np.dtype min_for_int : bool Return np.iinfo(dtype).min instead of -np.inf Returns ------- fill_value : positive infinity value corresponding to this dtype. """ if issubclass(dtype.type, np.floating): return -np.inf if issubclass(dtype.type, np.integer): return np.iinfo(dtype.type).min if min_for_int else -np.inf if issubclass(dtype.type, np.complexfloating): return -np.inf - 1j * np.inf return NINF def is_datetime_like( dtype: np.dtype[np.generic], ) -> TypeGuard[np.datetime64 | np.timedelta64]: """Check if a dtype is a subclass of the numpy datetime types""" return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64) def result_type( *arrays_and_dtypes: np.typing.ArrayLike | np.typing.DTypeLike, ) -> np.dtype[np.generic]: """Like np.result_type, but with type promotion rules matching pandas. Examples of changed behavior: number + string -> object (not string) bytes + unicode -> object (not unicode) Parameters ---------- *arrays_and_dtypes : list of arrays and dtypes The dtype is extracted from both numpy and dask arrays. Returns ------- numpy.dtype for the result. """ types = {np.result_type(t).type for t in arrays_and_dtypes} for left, right in PROMOTE_TO_OBJECT: if any(issubclass(t, left) for t in types) and any( issubclass(t, right) for t in types ): return np.dtype(object) return np.result_type(*arrays_and_dtypes) xarray-2025.09.0/xarray/namedarray/parallelcompat.py000066400000000000000000000646751505620616400223710ustar00rootroot00000000000000""" The code in this module is an experiment in going from N=1 to N=2 parallel computing frameworks in xarray. It could later be used as the basis for a public interface allowing any N frameworks to interoperate with xarray, but for now it is just a private experiment. """ from __future__ import annotations import functools from abc import ABC, abstractmethod from collections.abc import Callable, Iterable, Sequence from importlib.metadata import EntryPoint, entry_points from typing import TYPE_CHECKING, Any, Generic, Protocol, TypeVar import numpy as np from xarray.core.options import OPTIONS from xarray.core.utils import emit_user_level_warning from xarray.namedarray.pycompat import is_chunked_array if TYPE_CHECKING: from xarray.namedarray._typing import ( T_Chunks, _Chunks, _DType, _DType_co, _NormalizedChunks, _ShapeType, duckarray, ) class ChunkedArrayMixinProtocol(Protocol): def rechunk(self, chunks: Any, **kwargs: Any) -> Any: ... @property def dtype(self) -> np.dtype[Any]: ... @property def chunks(self) -> _NormalizedChunks: ... def compute( self, *data: Any, **kwargs: Any ) -> tuple[np.ndarray[Any, _DType_co], ...]: ... T_ChunkedArray = TypeVar("T_ChunkedArray", bound=ChunkedArrayMixinProtocol) KNOWN_CHUNKMANAGERS = { "dask": "dask", "cubed": "cubed-xarray", "arkouda": "arkouda-xarray", } @functools.lru_cache(maxsize=1) def list_chunkmanagers() -> dict[str, ChunkManagerEntrypoint[Any]]: """ Return a dictionary of available chunk managers and their ChunkManagerEntrypoint subclass objects. Returns ------- chunkmanagers : dict Dictionary whose values are registered ChunkManagerEntrypoint subclass instances, and whose values are the strings under which they are registered. """ entrypoints = entry_points(group="xarray.chunkmanagers") return load_chunkmanagers(entrypoints) def load_chunkmanagers( entrypoints: Sequence[EntryPoint], ) -> dict[str, ChunkManagerEntrypoint[Any]]: """Load entrypoints and instantiate chunkmanagers only once.""" loaded_entrypoints = {} for entrypoint in entrypoints: try: loaded_entrypoints[entrypoint.name] = entrypoint.load() except ModuleNotFoundError as e: emit_user_level_warning( f"Failed to load chunk manager entrypoint {entrypoint.name} due to {e}. Skipping.", ) available_chunkmanagers = { name: chunkmanager() for name, chunkmanager in loaded_entrypoints.items() if chunkmanager.available } return available_chunkmanagers def guess_chunkmanager( manager: str | ChunkManagerEntrypoint[Any] | None, ) -> ChunkManagerEntrypoint[Any]: """ Get namespace of chunk-handling methods, guessing from what's available. If the name of a specific ChunkManager is given (e.g. "dask"), then use that. Else use whatever is installed, defaulting to dask if there are multiple options. """ available_chunkmanagers = list_chunkmanagers() if manager is None: if len(available_chunkmanagers) == 1: # use the only option available manager = next(iter(available_chunkmanagers.keys())) else: # use the one in options (default dask) manager = OPTIONS["chunk_manager"] if isinstance(manager, str): if manager not in available_chunkmanagers and manager in KNOWN_CHUNKMANAGERS: raise ImportError( f"chunk manager {manager!r} is not available." f" Please make sure {KNOWN_CHUNKMANAGERS[manager]!r} is installed" " and importable." ) elif len(available_chunkmanagers) == 0: raise ImportError( "no chunk managers available. Try installing `dask` or another package" " that provides a chunk manager." ) elif manager not in available_chunkmanagers: raise ValueError( f"unrecognized chunk manager {manager!r} - must be one of the installed" f" chunk managers: {list(available_chunkmanagers)}" ) return available_chunkmanagers[manager] elif isinstance(manager, ChunkManagerEntrypoint): # already a valid ChunkManager so just pass through return manager else: raise TypeError( "manager must be a string or instance of ChunkManagerEntrypoint," f" but received type {type(manager)}" ) def get_chunked_array_type(*args: Any) -> ChunkManagerEntrypoint[Any]: """ Detects which parallel backend should be used for given set of arrays. Also checks that all arrays are of same chunking type (i.e. not a mix of cubed and dask). """ # TODO this list is probably redundant with something inside xarray.apply_ufunc ALLOWED_NON_CHUNKED_TYPES = {int, float, np.ndarray} chunked_arrays = [ a for a in args if is_chunked_array(a) and type(a) not in ALLOWED_NON_CHUNKED_TYPES ] # Asserts all arrays are the same type (or numpy etc.) chunked_array_types = {type(a) for a in chunked_arrays} if len(chunked_array_types) > 1: raise TypeError( f"Mixing chunked array types is not supported, but received multiple types: {chunked_array_types}" ) elif len(chunked_array_types) == 0: raise TypeError("Expected a chunked array but none were found") # iterate over defined chunk managers, seeing if each recognises this array type chunked_arr = chunked_arrays[0] chunkmanagers = list_chunkmanagers() selected = [ chunkmanager for chunkmanager in chunkmanagers.values() if chunkmanager.is_chunked_array(chunked_arr) ] if not selected: raise TypeError( f"Could not find a Chunk Manager which recognises type {type(chunked_arr)}" ) elif len(selected) >= 2: raise TypeError(f"Multiple ChunkManagers recognise type {type(chunked_arr)}") else: return selected[0] class ChunkManagerEntrypoint(ABC, Generic[T_ChunkedArray]): """ Interface between a particular parallel computing framework and xarray. This abstract base class must be subclassed by libraries implementing chunked array types, and registered via the ``chunkmanagers`` entrypoint. Abstract methods on this class must be implemented, whereas non-abstract methods are only required in order to enable a subset of xarray functionality, and by default will raise a ``NotImplementedError`` if called. Attributes ---------- array_cls Type of the array class this parallel computing framework provides. Parallel frameworks need to provide an array class that supports the array API standard. This attribute is used for array instance type checking at runtime. """ array_cls: type[T_ChunkedArray] available: bool = True @abstractmethod def __init__(self) -> None: """Used to set the array_cls attribute at import time.""" raise NotImplementedError() def is_chunked_array(self, data: duckarray[Any, Any]) -> bool: """ Check if the given object is an instance of this type of chunked array. Compares against the type stored in the array_cls attribute by default. Parameters ---------- data : Any Returns ------- is_chunked : bool See Also -------- dask.is_dask_collection """ return isinstance(data, self.array_cls) @abstractmethod def chunks(self, data: T_ChunkedArray) -> _NormalizedChunks: """ Return the current chunks of the given array. Returns chunks explicitly as a tuple of tuple of ints. Used internally by xarray objects' .chunks and .chunksizes properties. Parameters ---------- data : chunked array Returns ------- chunks : tuple[tuple[int, ...], ...] See Also -------- dask.array.Array.chunks cubed.Array.chunks """ raise NotImplementedError() @abstractmethod def normalize_chunks( self, chunks: _Chunks | _NormalizedChunks, shape: _ShapeType | None = None, limit: int | None = None, dtype: _DType | None = None, previous_chunks: _NormalizedChunks | None = None, ) -> _NormalizedChunks: """ Normalize given chunking pattern into an explicit tuple of tuples representation. Exposed primarily because different chunking backends may want to make different decisions about how to automatically chunk along dimensions not given explicitly in the input chunks. Called internally by xarray.open_dataset. Parameters ---------- chunks : tuple, int, dict, or string The chunks to be normalized. shape : Tuple[int] The shape of the array limit : int (optional) The maximum block size to target in bytes, if freedom is given to choose dtype : np.dtype previous_chunks : Tuple[Tuple[int]], optional Chunks from a previous array that we should use for inspiration when rechunking dimensions automatically. See Also -------- dask.array.core.normalize_chunks """ raise NotImplementedError() @abstractmethod def from_array( self, data: duckarray[Any, Any], chunks: _Chunks, **kwargs: Any ) -> T_ChunkedArray: """ Create a chunked array from a non-chunked numpy-like array. Generally input should have a ``.shape``, ``.ndim``, ``.dtype`` and support numpy-style slicing. Called when the .chunk method is called on an xarray object that is not already chunked. Also called within open_dataset (when chunks is not None) to create a chunked array from an xarray lazily indexed array. Parameters ---------- data : array_like chunks : int, tuple How to chunk the array. See Also -------- dask.array.from_array cubed.from_array """ raise NotImplementedError() def rechunk( self, data: T_ChunkedArray, chunks: _NormalizedChunks | tuple[int, ...] | _Chunks, **kwargs: Any, ) -> Any: """ Changes the chunking pattern of the given array. Called when the .chunk method is called on an xarray object that is already chunked. Parameters ---------- data : dask array Array to be rechunked. chunks : int, tuple, dict or str, optional The new block dimensions to create. -1 indicates the full size of the corresponding dimension. Default is "auto" which automatically determines chunk sizes. Returns ------- chunked array See Also -------- dask.array.Array.rechunk cubed.Array.rechunk """ return data.rechunk(chunks, **kwargs) @abstractmethod def compute( self, *data: T_ChunkedArray | Any, **kwargs: Any ) -> tuple[np.ndarray[Any, _DType_co], ...]: """ Computes one or more chunked arrays, returning them as eager numpy arrays. Called anytime something needs to computed, including multiple arrays at once. Used by `.compute`, `.persist`, `.values`. Parameters ---------- *data : object Any number of objects. If an object is an instance of the chunked array type, it is computed and the in-memory result returned as a numpy array. All other types should be passed through unchanged. Returns ------- objs The input, but with all chunked arrays now computed. See Also -------- dask.compute cubed.compute """ raise NotImplementedError() def shuffle( self, x: T_ChunkedArray, indexer: list[list[int]], axis: int, chunks: T_Chunks ) -> T_ChunkedArray: raise NotImplementedError() def persist( self, *data: T_ChunkedArray | Any, **kwargs: Any ) -> tuple[T_ChunkedArray | Any, ...]: """ Persist one or more chunked arrays in memory. Parameters ---------- *data : object Any number of objects. If an object is an instance of the chunked array type, it is persisted as a chunked array in memory. All other types should be passed through unchanged. Returns ------- objs The input, but with all chunked arrays now persisted in memory. See Also -------- dask.persist """ raise NotImplementedError() @property def array_api(self) -> Any: """ Return the array_api namespace following the python array API standard. See https://data-apis.org/array-api/latest/ . Currently used to access the array API function ``full_like``, which is called within the xarray constructors ``xarray.full_like``, ``xarray.ones_like``, ``xarray.zeros_like``, etc. See Also -------- dask.array cubed.array_api """ raise NotImplementedError() def reduction( self, arr: T_ChunkedArray, func: Callable[..., Any], combine_func: Callable[..., Any] | None = None, aggregate_func: Callable[..., Any] | None = None, axis: int | Sequence[int] | None = None, dtype: _DType_co | None = None, keepdims: bool = False, ) -> T_ChunkedArray: """ A general version of array reductions along one or more axes. Used inside some reductions like nanfirst, which is used by ``groupby.first``. Parameters ---------- arr : chunked array Data to be reduced along one or more axes. func : Callable(x_chunk, axis, keepdims) First function to be executed when resolving the dask graph. This function is applied in parallel to all original chunks of x. See below for function parameters. combine_func : Callable(x_chunk, axis, keepdims), optional Function used for intermediate recursive aggregation (see split_every below). If omitted, it defaults to aggregate_func. aggregate_func : Callable(x_chunk, axis, keepdims) Last function to be executed, producing the final output. It is always invoked, even when the reduced Array counts a single chunk along the reduced axes. axis : int or sequence of ints, optional Axis or axes to aggregate upon. If omitted, aggregate along all axes. dtype : np.dtype data type of output. This argument was previously optional, but leaving as ``None`` will now raise an exception. keepdims : boolean, optional Whether the reduction function should preserve the reduced axes, leaving them at size ``output_size``, or remove them. Returns ------- chunked array See Also -------- dask.array.reduction cubed.core.reduction """ raise NotImplementedError() def scan( self, func: Callable[..., Any], binop: Callable[..., Any], ident: float, arr: T_ChunkedArray, axis: int | None = None, dtype: _DType_co | None = None, **kwargs: Any, ) -> T_ChunkedArray: """ General version of a 1D scan, also known as a cumulative array reduction. Used in ``ffill`` and ``bfill`` in xarray. Parameters ---------- func: callable Cumulative function like np.cumsum or np.cumprod binop: callable Associated binary operator like ``np.cumsum->add`` or ``np.cumprod->mul`` ident: Number Associated identity like ``np.cumsum->0`` or ``np.cumprod->1`` arr: dask Array axis: int, optional dtype: dtype Returns ------- Chunked array See also -------- dask.array.cumreduction """ raise NotImplementedError() @abstractmethod def apply_gufunc( self, func: Callable[..., Any], signature: str, *args: Any, axes: Sequence[tuple[int, ...]] | None = None, keepdims: bool = False, output_dtypes: Sequence[_DType_co] | None = None, vectorize: bool | None = None, **kwargs: Any, ) -> Any: """ Apply a generalized ufunc or similar python function to arrays. ``signature`` determines if the function consumes or produces core dimensions. The remaining dimensions in given input arrays (``*args``) are considered loop dimensions and are required to broadcast naturally against each other. In other terms, this function is like ``np.vectorize``, but for the blocks of chunked arrays. If the function itself shall also be vectorized use ``vectorize=True`` for convenience. Called inside ``xarray.apply_ufunc``, which is called internally for most xarray operations. Therefore this method must be implemented for the vast majority of xarray computations to be supported. Parameters ---------- func : callable Function to call like ``func(*args, **kwargs)`` on input arrays (``*args``) that returns an array or tuple of arrays. If multiple arguments with non-matching dimensions are supplied, this function is expected to vectorize (broadcast) over axes of positional arguments in the style of NumPy universal functions [1]_ (if this is not the case, set ``vectorize=True``). If this function returns multiple outputs, ``output_core_dims`` has to be set as well. signature: string Specifies what core dimensions are consumed and produced by ``func``. According to the specification of numpy.gufunc signature [2]_ *args : numeric Input arrays or scalars to the callable function. axes: List of tuples, optional, keyword only A list of tuples with indices of axes a generalized ufunc should operate on. For instance, for a signature of ``"(i,j),(j,k)->(i,k)"`` appropriate for matrix multiplication, the base elements are two-dimensional matrices and these are taken to be stored in the two last axes of each argument. The corresponding axes keyword would be ``[(-2, -1), (-2, -1), (-2, -1)]``. For simplicity, for generalized ufuncs that operate on 1-dimensional arrays (vectors), a single integer is accepted instead of a single-element tuple, and for generalized ufuncs for which all outputs are scalars, the output tuples can be omitted. keepdims: bool, optional, keyword only If this is set to True, axes which are reduced over will be left in the result as a dimension with size one, so that the result will broadcast correctly against the inputs. This option can only be used for generalized ufuncs that operate on inputs that all have the same number of core dimensions and with outputs that have no core dimensions , i.e., with signatures like ``"(i),(i)->()"`` or ``"(m,m)->()"``. If used, the location of the dimensions in the output can be controlled with axes and axis. output_dtypes : Optional, dtype or list of dtypes, keyword only Valid numpy dtype specification or list thereof. If not given, a call of ``func`` with a small set of data is performed in order to try to automatically determine the output dtypes. vectorize: bool, keyword only If set to ``True``, ``np.vectorize`` is applied to ``func`` for convenience. Defaults to ``False``. **kwargs : dict Extra keyword arguments to pass to `func` Returns ------- Single chunked array or tuple of chunked arrays See Also -------- dask.array.gufunc.apply_gufunc cubed.apply_gufunc References ---------- .. [1] https://docs.scipy.org/doc/numpy/reference/ufuncs.html .. [2] https://docs.scipy.org/doc/numpy/reference/c-api/generalized-ufuncs.html """ raise NotImplementedError() def map_blocks( self, func: Callable[..., Any], *args: Any, dtype: _DType_co | None = None, chunks: tuple[int, ...] | None = None, drop_axis: int | Sequence[int] | None = None, new_axis: int | Sequence[int] | None = None, **kwargs: Any, ) -> Any: """ Map a function across all blocks of a chunked array. Called in elementwise operations, but notably not (currently) called within xarray.map_blocks. Parameters ---------- func : callable Function to apply to every block in the array. If ``func`` accepts ``block_info=`` or ``block_id=`` as keyword arguments, these will be passed dictionaries containing information about input and output chunks/arrays during computation. See examples for details. args : dask arrays or other objects dtype : np.dtype, optional The ``dtype`` of the output array. It is recommended to provide this. If not provided, will be inferred by applying the function to a small set of fake data. chunks : tuple, optional Chunk shape of resulting blocks if the function does not preserve shape. If not provided, the resulting array is assumed to have the same block structure as the first input array. drop_axis : number or iterable, optional Dimensions lost by the function. new_axis : number or iterable, optional New dimensions created by the function. Note that these are applied after ``drop_axis`` (if present). **kwargs : Other keyword arguments to pass to function. Values must be constants (not dask.arrays) See Also -------- dask.array.map_blocks cubed.map_blocks """ raise NotImplementedError() def blockwise( self, func: Callable[..., Any], out_ind: Iterable[Any], *args: Any, # can't type this as mypy assumes args are all same type, but dask blockwise args alternate types adjust_chunks: dict[Any, Callable[..., Any]] | None = None, new_axes: dict[Any, int] | None = None, align_arrays: bool = True, **kwargs: Any, ) -> Any: """ Tensor operation: Generalized inner and outer products. A broad class of blocked algorithms and patterns can be specified with a concise multi-index notation. The ``blockwise`` function applies an in-memory function across multiple blocks of multiple inputs in a variety of ways. Many chunked array operations are special cases of blockwise including elementwise, broadcasting, reductions, tensordot, and transpose. Currently only called explicitly in xarray when performing multidimensional interpolation. Parameters ---------- func : callable Function to apply to individual tuples of blocks out_ind : iterable Block pattern of the output, something like 'ijk' or (1, 2, 3) *args : sequence of Array, index pairs You may also pass literal arguments, accompanied by None index e.g. (x, 'ij', y, 'jk', z, 'i', some_literal, None) **kwargs : dict Extra keyword arguments to pass to function adjust_chunks : dict Dictionary mapping index to function to be applied to chunk sizes new_axes : dict, keyword only New indexes and their dimension lengths align_arrays: bool Whether or not to align chunks along equally sized dimensions when multiple arrays are provided. This allows for larger chunks in some arrays to be broken into smaller ones that match chunk sizes in other arrays such that they are compatible for block function mapping. If this is false, then an error will be thrown if arrays do not already have the same number of blocks in each dimension. See Also -------- dask.array.blockwise cubed.core.blockwise """ raise NotImplementedError() def unify_chunks( self, *args: Any, # can't type this as mypy assumes args are all same type, but dask unify_chunks args alternate types **kwargs: Any, ) -> tuple[dict[str, _NormalizedChunks], list[T_ChunkedArray]]: """ Unify chunks across a sequence of arrays. Called by xarray.unify_chunks. Parameters ---------- *args: sequence of Array, index pairs Sequence like (x, 'ij', y, 'jk', z, 'i') See Also -------- dask.array.core.unify_chunks cubed.core.unify_chunks """ raise NotImplementedError() def store( self, sources: T_ChunkedArray | Sequence[T_ChunkedArray], targets: Any, **kwargs: dict[str, Any], ) -> Any: """ Store chunked arrays in array-like objects, overwriting data in target. This stores chunked arrays into object that supports numpy-style setitem indexing (e.g. a Zarr Store). Allows storing values chunk by chunk so that it does not have to fill up memory. For best performance you likely want to align the block size of the storage target with the block size of your array. Used when writing to any registered xarray I/O backend. Parameters ---------- sources: Array or collection of Arrays targets: array-like or collection of array-likes These should support setitem syntax ``target[10:20] = ...``. If sources is a single item, targets must be a single item; if sources is a collection of arrays, targets must be a matching collection. kwargs: Parameters passed to compute/persist (only used if compute=True) See Also -------- dask.array.store cubed.store """ raise NotImplementedError() xarray-2025.09.0/xarray/namedarray/pycompat.py000066400000000000000000000126341505620616400212110ustar00rootroot00000000000000from __future__ import annotations from importlib import import_module from types import ModuleType from typing import TYPE_CHECKING, Any, Literal import numpy as np from packaging.version import Version from xarray.core.utils import is_scalar from xarray.namedarray.utils import is_duck_array, is_duck_dask_array integer_types = (int, np.integer) if TYPE_CHECKING: ModType = Literal["dask", "pint", "cupy", "sparse", "cubed", "numbagg"] DuckArrayTypes = tuple[type[Any], ...] # TODO: improve this? maybe Generic from xarray.namedarray._typing import _DType, _ShapeType, duckarray class DuckArrayModule: """ Solely for internal isinstance and version checks. Motivated by having to only import pint when required (as pint currently imports xarray) https://github.com/pydata/xarray/pull/5561#discussion_r664815718 """ module: ModuleType | None version: Version type: DuckArrayTypes available: bool def __init__(self, mod: ModType) -> None: duck_array_module: ModuleType | None duck_array_version: Version duck_array_type: DuckArrayTypes try: duck_array_module = import_module(mod) duck_array_version = Version(duck_array_module.__version__) if mod == "dask": duck_array_type = (import_module("dask.array").Array,) elif mod == "pint": duck_array_type = (duck_array_module.Quantity,) elif mod == "cupy": duck_array_type = (duck_array_module.ndarray,) elif mod == "sparse": duck_array_type = (duck_array_module.SparseArray,) elif mod == "cubed": duck_array_type = (duck_array_module.Array,) # Not a duck array module, but using this system regardless, to get lazy imports elif mod == "numbagg": duck_array_type = () else: raise NotImplementedError except (ImportError, AttributeError): # pragma: no cover duck_array_module = None duck_array_version = Version("0.0.0") duck_array_type = () self.module = duck_array_module self.version = duck_array_version self.type = duck_array_type self.available = duck_array_module is not None _cached_duck_array_modules: dict[ModType, DuckArrayModule] = {} def _get_cached_duck_array_module(mod: ModType) -> DuckArrayModule: if mod not in _cached_duck_array_modules: duckmod = DuckArrayModule(mod) _cached_duck_array_modules[mod] = duckmod return duckmod else: return _cached_duck_array_modules[mod] def array_type(mod: ModType) -> DuckArrayTypes: """Quick wrapper to get the array class of the module.""" return _get_cached_duck_array_module(mod).type def mod_version(mod: ModType) -> Version: """Quick wrapper to get the version of the module.""" return _get_cached_duck_array_module(mod).version def is_chunked_array(x: duckarray[Any, Any]) -> bool: return is_duck_dask_array(x) or (is_duck_array(x) and hasattr(x, "chunks")) def is_0d_dask_array(x: duckarray[Any, Any]) -> bool: return is_duck_dask_array(x) and is_scalar(x) def to_numpy( data: duckarray[Any, Any], **kwargs: dict[str, Any] ) -> np.ndarray[Any, np.dtype[Any]]: from xarray.core.indexing import ExplicitlyIndexed from xarray.namedarray.parallelcompat import get_chunked_array_type try: # for tests only at the moment return data.to_numpy() # type: ignore[no-any-return,union-attr] except AttributeError: pass if isinstance(data, ExplicitlyIndexed): data = data.get_duck_array() # type: ignore[no-untyped-call] # TODO first attempt to call .to_numpy() once some libraries implement it if is_chunked_array(data): chunkmanager = get_chunked_array_type(data) data, *_ = chunkmanager.compute(data, **kwargs) if isinstance(data, array_type("cupy")): data = data.get() # pint has to be imported dynamically as pint imports xarray if isinstance(data, array_type("pint")): data = data.magnitude if isinstance(data, array_type("sparse")): data = data.todense() data = np.asarray(data) return data def to_duck_array(data: Any, **kwargs: dict[str, Any]) -> duckarray[_ShapeType, _DType]: from xarray.core.indexing import ( ExplicitlyIndexed, ImplicitToExplicitIndexingAdapter, ) from xarray.namedarray.parallelcompat import get_chunked_array_type if is_chunked_array(data): chunkmanager = get_chunked_array_type(data) loaded_data, *_ = chunkmanager.compute(data, **kwargs) # type: ignore[var-annotated] return loaded_data if isinstance(data, ExplicitlyIndexed | ImplicitToExplicitIndexingAdapter): return data.get_duck_array() # type: ignore[no-untyped-call, no-any-return] elif is_duck_array(data): return data else: return np.asarray(data) # type: ignore[return-value] async def async_to_duck_array( data: Any, **kwargs: dict[str, Any] ) -> duckarray[_ShapeType, _DType]: from xarray.core.indexing import ( ExplicitlyIndexed, ImplicitToExplicitIndexingAdapter, ) if isinstance(data, ExplicitlyIndexed | ImplicitToExplicitIndexingAdapter): return await data.async_get_duck_array() # type: ignore[union-attr, no-any-return] else: return to_duck_array(data, **kwargs) xarray-2025.09.0/xarray/namedarray/utils.py000066400000000000000000000152241505620616400205130ustar00rootroot00000000000000from __future__ import annotations import importlib import warnings from collections.abc import Hashable, Iterable, Iterator, Mapping from functools import lru_cache from typing import TYPE_CHECKING, Any, TypeVar, cast import numpy as np from packaging.version import Version from xarray.namedarray._typing import ErrorOptionsWithWarn, _DimsLike if TYPE_CHECKING: from typing import TypeGuard from numpy.typing import NDArray try: from dask.array.core import Array as DaskArray from dask.typing import DaskCollection except ImportError: DaskArray = NDArray # type: ignore[assignment, misc] DaskCollection: Any = NDArray # type: ignore[no-redef] from xarray.namedarray._typing import _Dim, duckarray K = TypeVar("K") V = TypeVar("V") T = TypeVar("T") @lru_cache def module_available(module: str, minversion: str | None = None) -> bool: """Checks whether a module is installed without importing it. Use this for a lightweight check and lazy imports. Parameters ---------- module : str Name of the module. minversion : str, optional Minimum version of the module Returns ------- available : bool Whether the module is installed. """ if importlib.util.find_spec(module) is None: return False if minversion is not None: version = importlib.metadata.version(module) return Version(version) >= Version(minversion) return True def is_dask_collection(x: object) -> TypeGuard[DaskCollection]: if module_available("dask"): from dask.base import is_dask_collection # use is_dask_collection function instead of dask.typing.DaskCollection # see https://github.com/pydata/xarray/pull/8241#discussion_r1476276023 return is_dask_collection(x) return False def is_duck_array(value: Any) -> TypeGuard[duckarray[Any, Any]]: # TODO: replace is_duck_array with runtime checks via _arrayfunction_or_api protocol on # python 3.12 and higher (see https://github.com/pydata/xarray/issues/8696#issuecomment-1924588981) if isinstance(value, np.ndarray): return True return ( hasattr(value, "ndim") and hasattr(value, "shape") and hasattr(value, "dtype") and ( (hasattr(value, "__array_function__") and hasattr(value, "__array_ufunc__")) or hasattr(value, "__array_namespace__") ) ) def is_duck_dask_array(x: duckarray[Any, Any]) -> TypeGuard[DaskArray]: return is_duck_array(x) and is_dask_collection(x) def to_0d_object_array( value: object, ) -> NDArray[np.object_]: """Given a value, wrap it in a 0-D numpy.ndarray with dtype=object.""" result = np.empty((), dtype=object) result[()] = value return result def is_dict_like(value: Any) -> TypeGuard[Mapping[Any, Any]]: return hasattr(value, "keys") and hasattr(value, "__getitem__") def drop_missing_dims( supplied_dims: Iterable[_Dim], dims: Iterable[_Dim], missing_dims: ErrorOptionsWithWarn, ) -> _DimsLike: """Depending on the setting of missing_dims, drop any dimensions from supplied_dims that are not present in dims. Parameters ---------- supplied_dims : Iterable of Hashable dims : Iterable of Hashable missing_dims : {"raise", "warn", "ignore"} """ if missing_dims == "raise": supplied_dims_set = {val for val in supplied_dims if val is not ...} if invalid := supplied_dims_set - set(dims): raise ValueError( f"Dimensions {invalid} do not exist. Expected one or more of {dims}" ) return supplied_dims elif missing_dims == "warn": if invalid := set(supplied_dims) - set(dims): warnings.warn( f"Dimensions {invalid} do not exist. Expected one or more of {dims}", stacklevel=2, ) return [val for val in supplied_dims if val in dims or val is ...] elif missing_dims == "ignore": return [val for val in supplied_dims if val in dims or val is ...] else: raise ValueError( f"Unrecognised option {missing_dims} for missing_dims argument" ) def infix_dims( dims_supplied: Iterable[_Dim], dims_all: Iterable[_Dim], missing_dims: ErrorOptionsWithWarn = "raise", ) -> Iterator[_Dim]: """ Resolves a supplied list containing an ellipsis representing other items, to a generator with the 'realized' list of all items """ if ... in dims_supplied: dims_all_list = list(dims_all) if len(set(dims_all)) != len(dims_all_list): raise ValueError("Cannot use ellipsis with repeated dims") if list(dims_supplied).count(...) > 1: raise ValueError("More than one ellipsis supplied") other_dims = [d for d in dims_all if d not in dims_supplied] existing_dims = drop_missing_dims(dims_supplied, dims_all, missing_dims) for d in existing_dims: if d is ...: yield from other_dims else: yield d else: existing_dims = drop_missing_dims(dims_supplied, dims_all, missing_dims) if set(existing_dims) ^ set(dims_all): raise ValueError( f"{dims_supplied} must be a permuted list of {dims_all}, unless `...` is included" ) yield from existing_dims def either_dict_or_kwargs( pos_kwargs: Mapping[Any, T] | None, kw_kwargs: Mapping[str, T], func_name: str, ) -> Mapping[Hashable, T]: if pos_kwargs is None or pos_kwargs == {}: # Need an explicit cast to appease mypy due to invariance; see # https://github.com/python/mypy/issues/6228 return cast(Mapping[Hashable, T], kw_kwargs) if not is_dict_like(pos_kwargs): raise ValueError(f"the first argument to .{func_name} must be a dictionary") if kw_kwargs: raise ValueError( f"cannot specify both keyword and positional arguments to .{func_name}" ) return pos_kwargs class ReprObject: """Object that prints as the given value, for use with sentinel values.""" __slots__ = ("_value",) _value: str def __init__(self, value: str): self._value = value def __repr__(self) -> str: return self._value def __eq__(self, other: ReprObject | Any) -> bool: # TODO: What type can other be? ArrayLike? return self._value == other._value if isinstance(other, ReprObject) else False def __hash__(self) -> int: return hash((type(self), self._value)) def __dask_tokenize__(self) -> object: from dask.base import normalize_token return normalize_token((type(self), self._value)) xarray-2025.09.0/xarray/plot/000077500000000000000000000000001505620616400156305ustar00rootroot00000000000000xarray-2025.09.0/xarray/plot/__init__.py000066400000000000000000000010761505620616400177450ustar00rootroot00000000000000""" Use this module directly: import xarray.plot as xplt Or use the methods on a DataArray or Dataset: DataArray.plot._____ Dataset.plot._____ """ from xarray.plot.dataarray_plot import ( contour, contourf, hist, imshow, line, pcolormesh, plot, step, surface, ) from xarray.plot.dataset_plot import scatter from xarray.plot.facetgrid import FacetGrid __all__ = [ "FacetGrid", "contour", "contourf", "hist", "imshow", "line", "pcolormesh", "plot", "scatter", "step", "surface", ] xarray-2025.09.0/xarray/plot/accessor.py000066400000000000000000001243151505620616400200120ustar00rootroot00000000000000from __future__ import annotations import functools from collections.abc import Hashable, Iterable from typing import TYPE_CHECKING, Any, Literal, NoReturn, overload import numpy as np # Accessor methods have the same name as plotting methods, so we need a different namespace from xarray.plot import dataarray_plot, dataset_plot if TYPE_CHECKING: from matplotlib.axes import Axes from matplotlib.collections import LineCollection, PathCollection, QuadMesh from matplotlib.colors import Normalize from matplotlib.container import BarContainer from matplotlib.contour import QuadContourSet from matplotlib.image import AxesImage from matplotlib.patches import Polygon from matplotlib.quiver import Quiver from mpl_toolkits.mplot3d.art3d import Line3D, Poly3DCollection from numpy.typing import ArrayLike from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import AspectOptions, HueStyleOptions, ScaleOptions from xarray.plot.facetgrid import FacetGrid class DataArrayPlotAccessor: """ Enables use of xarray.plot functions as attributes on a DataArray. For example, DataArray.plot.imshow """ _da: DataArray __slots__ = ("_da",) __doc__ = dataarray_plot.plot.__doc__ def __init__(self, darray: DataArray) -> None: self._da = darray # Should return Any such that the user does not run into problems # with the many possible return values @functools.wraps(dataarray_plot.plot, assigned=("__doc__", "__annotations__")) def __call__(self, **kwargs) -> Any: return dataarray_plot.plot(self._da, **kwargs) @functools.wraps(dataarray_plot.hist) def hist( self, *args, **kwargs ) -> tuple[np.ndarray, np.ndarray, BarContainer | Polygon]: return dataarray_plot.hist(self._da, *args, **kwargs) @overload def line( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive figsize: Iterable[float] | None = None, aspect: AspectOptions = None, size: float | None = None, ax: Axes | None = None, hue: Hashable | None = None, x: Hashable | None = None, y: Hashable | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, add_legend: bool = True, _labels: bool = True, **kwargs: Any, ) -> list[Line3D]: ... @overload def line( self, *args: Any, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, figsize: Iterable[float] | None = None, aspect: AspectOptions = None, size: float | None = None, ax: Axes | None = None, hue: Hashable | None = None, x: Hashable | None = None, y: Hashable | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, add_legend: bool = True, _labels: bool = True, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def line( self, *args: Any, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid figsize: Iterable[float] | None = None, aspect: AspectOptions = None, size: float | None = None, ax: Axes | None = None, hue: Hashable | None = None, x: Hashable | None = None, y: Hashable | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, add_legend: bool = True, _labels: bool = True, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @functools.wraps(dataarray_plot.line, assigned=("__doc__",)) def line(self, *args, **kwargs) -> list[Line3D] | FacetGrid[DataArray]: return dataarray_plot.line(self._da, *args, **kwargs) @overload def step( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, where: Literal["pre", "post", "mid"] = "pre", drawstyle: str | None = None, ds: str | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive **kwargs: Any, ) -> list[Line3D]: ... @overload def step( self, *args: Any, where: Literal["pre", "post", "mid"] = "pre", drawstyle: str | None = None, ds: str | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def step( self, *args: Any, where: Literal["pre", "post", "mid"] = "pre", drawstyle: str | None = None, ds: str | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid **kwargs: Any, ) -> FacetGrid[DataArray]: ... @functools.wraps(dataarray_plot.step, assigned=("__doc__",)) def step(self, *args, **kwargs) -> list[Line3D] | FacetGrid[DataArray]: return dataarray_plot.step(self._da, *args, **kwargs) @overload def scatter( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, cmap=None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend=None, levels=None, **kwargs, ) -> PathCollection: ... @overload def scatter( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, cmap=None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend=None, levels=None, **kwargs, ) -> FacetGrid[DataArray]: ... @overload def scatter( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, cmap=None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend=None, levels=None, **kwargs, ) -> FacetGrid[DataArray]: ... @functools.wraps(dataarray_plot.scatter, assigned=("__doc__",)) def scatter(self, *args, **kwargs) -> PathCollection | FacetGrid[DataArray]: return dataarray_plot.scatter(self._da, *args, **kwargs) @overload def imshow( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> AxesImage: ... @overload def imshow( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def imshow( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @functools.wraps(dataarray_plot.imshow, assigned=("__doc__",)) def imshow(self, *args, **kwargs) -> AxesImage | FacetGrid[DataArray]: return dataarray_plot.imshow(self._da, *args, **kwargs) @overload def contour( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> QuadContourSet: ... @overload def contour( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def contour( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @functools.wraps(dataarray_plot.contour, assigned=("__doc__",)) def contour(self, *args, **kwargs) -> QuadContourSet | FacetGrid[DataArray]: return dataarray_plot.contour(self._da, *args, **kwargs) @overload def contourf( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> QuadContourSet: ... @overload def contourf( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def contourf( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid: ... @functools.wraps(dataarray_plot.contourf, assigned=("__doc__",)) def contourf(self, *args, **kwargs) -> QuadContourSet | FacetGrid[DataArray]: return dataarray_plot.contourf(self._da, *args, **kwargs) @overload def pcolormesh( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> QuadMesh: ... @overload def pcolormesh( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def pcolormesh( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @functools.wraps(dataarray_plot.pcolormesh, assigned=("__doc__",)) def pcolormesh(self, *args, **kwargs) -> QuadMesh | FacetGrid[DataArray]: return dataarray_plot.pcolormesh(self._da, *args, **kwargs) @overload def surface( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> Poly3DCollection: ... @overload def surface( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid: ... @overload def surface( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap=None, center=None, robust: bool = False, extend=None, levels=None, infer_intervals=None, colors=None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid: ... @functools.wraps(dataarray_plot.surface, assigned=("__doc__",)) def surface(self, *args, **kwargs) -> Poly3DCollection: return dataarray_plot.surface(self._da, *args, **kwargs) class DatasetPlotAccessor: """ Enables use of xarray.plot functions as attributes on a Dataset. For example, Dataset.plot.scatter """ _ds: Dataset __slots__ = ("_ds",) def __init__(self, dataset: Dataset) -> None: self._ds = dataset def __call__(self, *args, **kwargs) -> NoReturn: raise ValueError( "Dataset.plot cannot be called directly. Use " "an explicit plot method, e.g. ds.plot.scatter(...)" ) @overload def scatter( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, cmap=None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend=None, levels=None, **kwargs: Any, ) -> PathCollection: ... @overload def scatter( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, cmap=None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend=None, levels=None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @overload def scatter( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, cmap=None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend=None, levels=None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @functools.wraps(dataset_plot.scatter, assigned=("__doc__",)) def scatter(self, *args, **kwargs) -> PathCollection | FacetGrid[Dataset]: return dataset_plot.scatter(self._ds, *args, **kwargs) @overload def quiver( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: None = None, # no wrap -> primitive row: None = None, # no wrap -> primitive ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals=None, center=None, levels=None, robust: bool | None = None, colors=None, extend=None, cmap=None, **kwargs: Any, ) -> Quiver: ... @overload def quiver( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable, # wrap -> FacetGrid row: Hashable | None = None, ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals=None, center=None, levels=None, robust: bool | None = None, colors=None, extend=None, cmap=None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @overload def quiver( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable | None = None, row: Hashable, # wrap -> FacetGrid ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals=None, center=None, levels=None, robust: bool | None = None, colors=None, extend=None, cmap=None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @functools.wraps(dataset_plot.quiver, assigned=("__doc__",)) def quiver(self, *args, **kwargs) -> Quiver | FacetGrid[Dataset]: return dataset_plot.quiver(self._ds, *args, **kwargs) @overload def streamplot( # type: ignore[misc,unused-ignore] # None is hashable :( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: None = None, # no wrap -> primitive row: None = None, # no wrap -> primitive ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals=None, center=None, levels=None, robust: bool | None = None, colors=None, extend=None, cmap=None, **kwargs: Any, ) -> LineCollection: ... @overload def streamplot( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable, # wrap -> FacetGrid row: Hashable | None = None, ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals=None, center=None, levels=None, robust: bool | None = None, colors=None, extend=None, cmap=None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @overload def streamplot( self, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable | None = None, row: Hashable, # wrap -> FacetGrid ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals=None, center=None, levels=None, robust: bool | None = None, colors=None, extend=None, cmap=None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @functools.wraps(dataset_plot.streamplot, assigned=("__doc__",)) def streamplot(self, *args, **kwargs) -> LineCollection | FacetGrid[Dataset]: return dataset_plot.streamplot(self._ds, *args, **kwargs) xarray-2025.09.0/xarray/plot/dataarray_plot.py000066400000000000000000002510711505620616400212160ustar00rootroot00000000000000from __future__ import annotations import functools import warnings from collections.abc import Callable, Hashable, Iterable, MutableMapping from typing import TYPE_CHECKING, Any, Literal, Union, cast, overload import numpy as np import pandas as pd from xarray.core.utils import attempt_import from xarray.plot.facetgrid import _easy_facetgrid from xarray.plot.utils import ( _LINEWIDTH_RANGE, _MARKERSIZE_RANGE, _add_colorbar, _add_legend, _assert_valid_xy, _determine_guide, _ensure_plottable, _guess_coords_to_plot, _infer_interval_breaks, _infer_xy_labels, _Normalize, _process_cmap_cbar_kwargs, _rescale_imshow_rgb, _resolve_intervals_1dplot, _resolve_intervals_2dplot, _set_concise_date, _update_axes, get_axis, label_from_attrs, ) from xarray.structure.alignment import broadcast from xarray.structure.concat import concat if TYPE_CHECKING: from matplotlib.axes import Axes from matplotlib.collections import PathCollection, QuadMesh from matplotlib.colors import Colormap, Normalize from matplotlib.container import BarContainer from matplotlib.contour import QuadContourSet from matplotlib.image import AxesImage from matplotlib.patches import Polygon from mpl_toolkits.mplot3d.art3d import Line3D, Poly3DCollection from numpy.typing import ArrayLike from xarray.core.dataarray import DataArray from xarray.core.types import ( AspectOptions, ExtendOptions, HueStyleOptions, ScaleOptions, T_DataArray, ) from xarray.plot.facetgrid import FacetGrid _styles: dict[str, Any] = { # Add a white border to make it easier seeing overlapping markers: "scatter.edgecolors": "w", } def _infer_line_data( darray: DataArray, x: Hashable | None, y: Hashable | None, hue: Hashable | None ) -> tuple[DataArray, DataArray, DataArray | None, str]: ndims = len(darray.dims) if x is not None and y is not None: raise ValueError("Cannot specify both x and y kwargs for line plots.") if x is not None: _assert_valid_xy(darray, x, "x") if y is not None: _assert_valid_xy(darray, y, "y") if ndims == 1: huename = None hueplt = None huelabel = "" if x is not None: xplt = darray[x] yplt = darray elif y is not None: xplt = darray yplt = darray[y] else: # Both x & y are None dim = darray.dims[0] xplt = darray[dim] yplt = darray else: if x is None and y is None and hue is None: raise ValueError("For 2D inputs, please specify either hue, x or y.") if y is None: if hue is not None: _assert_valid_xy(darray, hue, "hue") xname, huename = _infer_xy_labels(darray=darray, x=x, y=hue) xplt = darray[xname] if xplt.ndim > 1: if huename in darray.dims: otherindex = 1 if darray.dims.index(huename) == 0 else 0 otherdim = darray.dims[otherindex] yplt = darray.transpose(otherdim, huename, transpose_coords=False) xplt = xplt.transpose(otherdim, huename, transpose_coords=False) else: raise ValueError( "For 2D inputs, hue must be a dimension" " i.e. one of " + repr(darray.dims) ) else: (xdim,) = darray[xname].dims (huedim,) = darray[huename].dims yplt = darray.transpose(xdim, huedim) else: yname, huename = _infer_xy_labels(darray=darray, x=y, y=hue) yplt = darray[yname] if yplt.ndim > 1: if huename in darray.dims: otherindex = 1 if darray.dims.index(huename) == 0 else 0 otherdim = darray.dims[otherindex] xplt = darray.transpose(otherdim, huename, transpose_coords=False) yplt = yplt.transpose(otherdim, huename, transpose_coords=False) else: raise ValueError( "For 2D inputs, hue must be a dimension" " i.e. one of " + repr(darray.dims) ) else: (ydim,) = darray[yname].dims (huedim,) = darray[huename].dims xplt = darray.transpose(ydim, huedim) huelabel = label_from_attrs(darray[huename]) hueplt = darray[huename] return xplt, yplt, hueplt, huelabel def _prepare_plot1d_data( darray: T_DataArray, coords_to_plot: MutableMapping[str, Hashable], plotfunc_name: str | None = None, _is_facetgrid: bool = False, ) -> dict[str, T_DataArray]: """ Prepare data for usage with plt.scatter. Parameters ---------- darray : T_DataArray Base DataArray. coords_to_plot : MutableMapping[str, Hashable] Coords that will be plotted. plotfunc_name : str | None Name of the plotting function that will be used. Returns ------- plts : dict[str, T_DataArray] Dict of DataArrays that will be sent to matplotlib. Examples -------- >>> # Make sure int coords are plotted: >>> a = xr.DataArray( ... data=[1, 2], ... coords={1: ("x", [0, 1], {"units": "s"})}, ... dims=("x",), ... name="a", ... ) >>> plts = xr.plot.dataarray_plot._prepare_plot1d_data( ... a, coords_to_plot={"x": 1, "z": None, "hue": None, "size": None} ... ) >>> # Check which coords to plot: >>> print({k: v.name for k, v in plts.items()}) {'y': 'a', 'x': 1} """ # If there are more than 1 dimension in the array than stack all the # dimensions so the plotter can plot anything: if darray.ndim > 1: # When stacking dims the lines will continue connecting. For floats # this can be solved by adding a nan element in between the flattening # points: dims_T = [] if np.issubdtype(darray.dtype, np.floating): for v in ["z", "x"]: dim = coords_to_plot.get(v, None) if (dim is not None) and (dim in darray.dims): darray_nan = np.nan * darray.isel({dim: -1}) darray = concat( [darray, darray_nan], dim=dim, coords="minimal", compat="override", join="exact", ) dims_T.append(coords_to_plot[v]) # Lines should never connect to the same coordinate when stacked, # transpose to avoid this as much as possible: darray = darray.transpose(..., *dims_T) # Array is now ready to be stacked: darray = darray.stack(_stacked_dim=darray.dims) # Broadcast together all the chosen variables: plts = dict(y=darray) plts.update( {k: darray.coords[v] for k, v in coords_to_plot.items() if v is not None} ) plts = dict(zip(plts.keys(), broadcast(*(plts.values())), strict=True)) return plts # return type is Any due to the many different possibilities def plot( darray: DataArray, *, row: Hashable | None = None, col: Hashable | None = None, col_wrap: int | None = None, ax: Axes | None = None, hue: Hashable | None = None, subplot_kws: dict[str, Any] | None = None, **kwargs: Any, ) -> Any: """ Default plot of DataArray using :py:mod:`matplotlib:matplotlib.pyplot`. Calls xarray plotting function based on the dimensions of the squeezed DataArray. =============== =========================== Dimensions Plotting function =============== =========================== 1 :py:func:`xarray.plot.line` 2 :py:func:`xarray.plot.pcolormesh` Anything else :py:func:`xarray.plot.hist` =============== =========================== Parameters ---------- darray : DataArray row : Hashable or None, optional If passed, make row faceted plots on this dimension name. col : Hashable or None, optional If passed, make column faceted plots on this dimension name. col_wrap : int or None, optional Use together with ``col`` to wrap faceted plots. ax : matplotlib axes object, optional Axes on which to plot. By default, use the current axes. Mutually exclusive with ``size``, ``figsize`` and facets. hue : Hashable or None, optional If passed, make faceted line plots with hue on this dimension name. subplot_kws : dict, optional Dictionary of keyword arguments for Matplotlib subplots (see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`). **kwargs : optional Additional keyword arguments for Matplotlib. See Also -------- xarray.DataArray.squeeze """ darray = darray.squeeze( d for d, s in darray.sizes.items() if s == 1 and d not in (row, col, hue) ).compute() plot_dims = set(darray.dims) plot_dims.discard(row) plot_dims.discard(col) plot_dims.discard(hue) ndims = len(plot_dims) plotfunc: Callable if ndims == 0 or darray.size == 0: raise TypeError("No numeric data to plot.") if ndims in (1, 2): if row or col: kwargs["subplot_kws"] = subplot_kws kwargs["row"] = row kwargs["col"] = col kwargs["col_wrap"] = col_wrap if ndims == 1: plotfunc = line kwargs["hue"] = hue elif ndims == 2: if hue: plotfunc = line kwargs["hue"] = hue else: plotfunc = pcolormesh kwargs["subplot_kws"] = subplot_kws else: if row or col or hue: raise ValueError( "Only 1d and 2d plots are supported for facets in xarray. " "See the package `Seaborn` for more options." ) plotfunc = hist kwargs["ax"] = ax return plotfunc(darray, **kwargs) @overload def line( # type: ignore[misc,unused-ignore] # None is hashable :( darray: DataArray, *args: Any, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive figsize: Iterable[float] | None = None, aspect: AspectOptions = None, size: float | None = None, ax: Axes | None = None, hue: Hashable | None = None, x: Hashable | None = None, y: Hashable | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, add_legend: bool = True, _labels: bool = True, **kwargs: Any, ) -> list[Line3D]: ... @overload def line( darray: T_DataArray, *args: Any, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, figsize: Iterable[float] | None = None, aspect: AspectOptions = None, size: float | None = None, ax: Axes | None = None, hue: Hashable | None = None, x: Hashable | None = None, y: Hashable | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, add_legend: bool = True, _labels: bool = True, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @overload def line( darray: T_DataArray, *args: Any, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid figsize: Iterable[float] | None = None, aspect: AspectOptions = None, size: float | None = None, ax: Axes | None = None, hue: Hashable | None = None, x: Hashable | None = None, y: Hashable | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, add_legend: bool = True, _labels: bool = True, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... # This function signature should not change so that it can use # matplotlib format strings def line( darray: T_DataArray, *args: Any, row: Hashable | None = None, col: Hashable | None = None, figsize: Iterable[float] | None = None, aspect: AspectOptions = None, size: float | None = None, ax: Axes | None = None, hue: Hashable | None = None, x: Hashable | None = None, y: Hashable | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, add_legend: bool = True, _labels: bool = True, **kwargs: Any, ) -> list[Line3D] | FacetGrid[T_DataArray]: """ Line plot of DataArray values. Wraps :py:func:`matplotlib:matplotlib.pyplot.plot`. Parameters ---------- darray : DataArray Either 1D or 2D. If 2D, one of ``hue``, ``x`` or ``y`` must be provided. row : Hashable, optional If passed, make row faceted plots on this dimension name. col : Hashable, optional If passed, make column faceted plots on this dimension name. figsize : tuple, optional A tuple (width, height) of the figure in inches. Mutually exclusive with ``size`` and ``ax``. aspect : "auto", "equal", scalar or None, optional Aspect ratio of plot, so that ``aspect * size`` gives the *width* in inches. Only used if a ``size`` is provided. size : scalar, optional If provided, create a new figure for the plot with the given size: *height* (in inches) of each plot. See also: ``aspect``. ax : matplotlib axes object, optional Axes on which to plot. By default, the current is used. Mutually exclusive with ``size`` and ``figsize``. hue : Hashable, optional Dimension or coordinate for which you want multiple lines plotted. If plotting against a 2D coordinate, ``hue`` must be a dimension. x, y : Hashable, optional Dimension, coordinate or multi-index level for *x*, *y* axis. Only one of these may be specified. The other will be used for values from the DataArray on which this plot method is called. xincrease : bool or None, optional Should the values on the *x* axis be increasing from left to right? if ``None``, use the default for the Matplotlib function. yincrease : bool or None, optional Should the values on the *y* axis be increasing from top to bottom? if ``None``, use the default for the Matplotlib function. xscale, yscale : {'linear', 'symlog', 'log', 'logit'}, optional Specifies scaling for the *x*- and *y*-axis, respectively. xticks, yticks : array-like, optional Specify tick locations for *x*- and *y*-axis. xlim, ylim : tuple[float, float], optional Specify *x*- and *y*-axis limits. add_legend : bool, default: True Add legend with *y* axis coordinates (2D inputs only). *args, **kwargs : optional Additional arguments to :py:func:`matplotlib:matplotlib.pyplot.plot`. Returns ------- primitive : list of Line3D or FacetGrid When either col or row is given, returns a FacetGrid, otherwise a list of matplotlib Line3D objects. """ # Handle facetgrids first if row or col: allargs = locals().copy() allargs.update(allargs.pop("kwargs")) allargs.pop("darray") return _easy_facetgrid(darray, line, kind="line", **allargs) ndims = len(darray.dims) if ndims == 0 or darray.size == 0: # TypeError to be consistent with pandas raise TypeError("No numeric data to plot.") if ndims > 2: raise ValueError( "Line plots are for 1- or 2-dimensional DataArrays. " f"Passed DataArray has {ndims} " "dimensions" ) # The allargs dict passed to _easy_facetgrid above contains args if args == (): args = kwargs.pop("args", ()) else: assert "args" not in kwargs ax = get_axis(figsize, size, aspect, ax) xplt, yplt, hueplt, hue_label = _infer_line_data(darray, x, y, hue) # Remove pd.Intervals if contained in xplt.values and/or yplt.values. xplt_val, yplt_val, x_suffix, y_suffix, kwargs = _resolve_intervals_1dplot( xplt.to_numpy(), yplt.to_numpy(), kwargs ) xlabel = label_from_attrs(xplt, extra=x_suffix) ylabel = label_from_attrs(yplt, extra=y_suffix) _ensure_plottable(xplt_val, yplt_val) primitive = ax.plot(xplt_val, yplt_val, *args, **kwargs) if _labels: if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) ax.set_title(darray._title_for_slice()) if darray.ndim == 2 and add_legend: assert hueplt is not None ax.legend(handles=primitive, labels=list(hueplt.to_numpy()), title=hue_label) if isinstance(xplt.dtype, np.dtype) and np.issubdtype(xplt.dtype, np.datetime64): # type: ignore[redundant-expr] _set_concise_date(ax, axis="x") _update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim) return primitive @overload def step( # type: ignore[misc,unused-ignore] # None is hashable :( darray: DataArray, *args: Any, where: Literal["pre", "post", "mid"] = "pre", drawstyle: str | None = None, ds: str | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive **kwargs: Any, ) -> list[Line3D]: ... @overload def step( darray: DataArray, *args: Any, where: Literal["pre", "post", "mid"] = "pre", drawstyle: str | None = None, ds: str | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def step( darray: DataArray, *args: Any, where: Literal["pre", "post", "mid"] = "pre", drawstyle: str | None = None, ds: str | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid **kwargs: Any, ) -> FacetGrid[DataArray]: ... def step( darray: DataArray, *args: Any, where: Literal["pre", "post", "mid"] = "pre", drawstyle: str | None = None, ds: str | None = None, row: Hashable | None = None, col: Hashable | None = None, **kwargs: Any, ) -> list[Line3D] | FacetGrid[DataArray]: """ Step plot of DataArray values. Similar to :py:func:`matplotlib:matplotlib.pyplot.step`. Parameters ---------- where : {'pre', 'post', 'mid'}, default: 'pre' Define where the steps should be placed: - ``'pre'``: The y value is continued constantly to the left from every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the value ``y[i]``. - ``'post'``: The y value is continued constantly to the right from every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the value ``y[i]``. - ``'mid'``: Steps occur half-way between the *x* positions. Note that this parameter is ignored if one coordinate consists of :py:class:`pandas.Interval` values, e.g. as a result of :py:func:`xarray.Dataset.groupby_bins`. In this case, the actual boundaries of the interval are used. drawstyle, ds : str or None, optional Additional drawstyle. Only use one of drawstyle and ds. row : Hashable, optional If passed, make row faceted plots on this dimension name. col : Hashable, optional If passed, make column faceted plots on this dimension name. *args, **kwargs : optional Additional arguments for :py:func:`xarray.plot.line`. Returns ------- primitive : list of Line3D or FacetGrid When either col or row is given, returns a FacetGrid, otherwise a list of matplotlib Line3D objects. """ if where not in {"pre", "post", "mid"}: raise ValueError("'where' argument to step must be 'pre', 'post' or 'mid'") if ds is not None: if drawstyle is None: drawstyle = ds else: raise TypeError("ds and drawstyle are mutually exclusive") if drawstyle is None: drawstyle = "" drawstyle = "steps-" + where + drawstyle return line(darray, *args, drawstyle=drawstyle, col=col, row=row, **kwargs) def hist( darray: DataArray, *args: Any, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, xincrease: bool | None = None, yincrease: bool | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, **kwargs: Any, ) -> tuple[np.ndarray, np.ndarray, BarContainer | Polygon]: """ Histogram of DataArray. Wraps :py:func:`matplotlib:matplotlib.pyplot.hist`. Plots *N*-dimensional arrays by first flattening the array. Parameters ---------- darray : DataArray Can have any number of dimensions. figsize : Iterable of float, optional A tuple (width, height) of the figure in inches. Mutually exclusive with ``size`` and ``ax``. aspect : "auto", "equal", scalar or None, optional Aspect ratio of plot, so that ``aspect * size`` gives the *width* in inches. Only used if a ``size`` is provided. size : scalar, optional If provided, create a new figure for the plot with the given size: *height* (in inches) of each plot. See also: ``aspect``. ax : matplotlib axes object, optional Axes on which to plot. By default, use the current axes. Mutually exclusive with ``size`` and ``figsize``. xincrease : bool or None, optional Should the values on the *x* axis be increasing from left to right? if ``None``, use the default for the Matplotlib function. yincrease : bool or None, optional Should the values on the *y* axis be increasing from top to bottom? if ``None``, use the default for the Matplotlib function. xscale, yscale : {'linear', 'symlog', 'log', 'logit'}, optional Specifies scaling for the *x*- and *y*-axis, respectively. xticks, yticks : array-like, optional Specify tick locations for *x*- and *y*-axis. xlim, ylim : tuple[float, float], optional Specify *x*- and *y*-axis limits. **kwargs : optional Additional keyword arguments to :py:func:`matplotlib:matplotlib.pyplot.hist`. """ assert len(args) == 0 if darray.ndim == 0 or darray.size == 0: # TypeError to be consistent with pandas raise TypeError("No numeric data to plot.") ax = get_axis(figsize, size, aspect, ax) no_nan_arr = np.ravel(darray.to_numpy()) no_nan = no_nan_arr[pd.notnull(no_nan_arr)] n, bins, patches = cast( tuple[np.ndarray, np.ndarray, Union["BarContainer", "Polygon"]], ax.hist(no_nan, **kwargs), ) ax.set_title(darray._title_for_slice()) ax.set_xlabel(label_from_attrs(darray)) _update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim) return n, bins, patches def _plot1d(plotfunc): """Decorator for common 1d plotting logic.""" commondoc = """ Parameters ---------- darray : DataArray Must be 2 dimensional, unless creating faceted plots. x : Hashable or None, optional Coordinate for x axis. If None use darray.dims[1]. y : Hashable or None, optional Coordinate for y axis. If None use darray.dims[0]. z : Hashable or None, optional If specified plot 3D and use this coordinate for *z* axis. hue : Hashable or None, optional Dimension or coordinate for which you want multiple lines plotted. markersize: Hashable or None, optional scatter only. Variable by which to vary size of scattered points. linewidth: Hashable or None, optional Variable by which to vary linewidth. row : Hashable, optional If passed, make row faceted plots on this dimension name. col : Hashable, optional If passed, make column faceted plots on this dimension name. col_wrap : int, optional Use together with ``col`` to wrap faceted plots ax : matplotlib axes object, optional If None, uses the current axis. Not applicable when using facets. figsize : Iterable[float] or None, optional A tuple (width, height) of the figure in inches. Mutually exclusive with ``size`` and ``ax``. size : scalar, optional If provided, create a new figure for the plot with the given size. Height (in inches) of each plot. See also: ``aspect``. aspect : "auto", "equal", scalar or None, optional Aspect ratio of plot, so that ``aspect * size`` gives the width in inches. Only used if a ``size`` is provided. xincrease : bool or None, default: True Should the values on the x axes be increasing from left to right? if None, use the default for the matplotlib function. yincrease : bool or None, default: True Should the values on the y axes be increasing from top to bottom? if None, use the default for the matplotlib function. add_legend : bool or None, optional If True use xarray metadata to add a legend. add_colorbar : bool or None, optional If True add a colorbar. add_labels : bool or None, optional If True use xarray metadata to label axes add_title : bool or None, optional If True use xarray metadata to add a title subplot_kws : dict, optional Dictionary of keyword arguments for matplotlib subplots. Only applies to FacetGrid plotting. xscale : {'linear', 'symlog', 'log', 'logit'} or None, optional Specifies scaling for the x-axes. yscale : {'linear', 'symlog', 'log', 'logit'} or None, optional Specifies scaling for the y-axes. xticks : ArrayLike or None, optional Specify tick locations for x-axes. yticks : ArrayLike or None, optional Specify tick locations for y-axes. xlim : tuple[float, float] or None, optional Specify x-axes limits. ylim : tuple[float, float] or None, optional Specify y-axes limits. cmap : matplotlib colormap name or colormap, optional The mapping from data values to color space. Either a Matplotlib colormap name or object. If not provided, this will be either ``'viridis'`` (if the function infers a sequential dataset) or ``'RdBu_r'`` (if the function infers a diverging dataset). See :doc:`Choosing Colormaps in Matplotlib ` for more information. If *seaborn* is installed, ``cmap`` may also be a `seaborn color palette `_. Note: if ``cmap`` is a seaborn color palette, ``levels`` must also be specified. vmin : float or None, optional Lower value to anchor the colormap, otherwise it is inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting `vmin` or `vmax` will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. vmax : float or None, optional Upper value to anchor the colormap, otherwise it is inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting `vmin` or `vmax` will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. norm : matplotlib.colors.Normalize, optional If ``norm`` has ``vmin`` or ``vmax`` specified, the corresponding kwarg must be ``None``. extend : {'neither', 'both', 'min', 'max'}, optional How to draw arrows extending the colorbar beyond its limits. If not provided, ``extend`` is inferred from ``vmin``, ``vmax`` and the data limits. levels : int or array-like, optional Split the colormap (``cmap``) into discrete color intervals. If an integer is provided, "nice" levels are chosen based on the data range: this can imply that the final number of levels is not exactly the expected one. Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to setting ``levels=np.linspace(vmin, vmax, N)``. **kwargs : optional Additional arguments to wrapped matplotlib function Returns ------- artist : The same type of primitive artist that the wrapped matplotlib function returns """ # Build on the original docstring plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}" @functools.wraps( plotfunc, assigned=("__module__", "__name__", "__qualname__", "__doc__") ) def newplotfunc( darray: DataArray, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, row: Hashable | None = None, col: Hashable | None = None, col_wrap: int | None = None, ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs, ) -> Any: # All 1d plots in xarray share this function signature. # Method signature below should be consistent. if TYPE_CHECKING: import matplotlib.pyplot as plt else: plt = attempt_import("matplotlib.pyplot") if subplot_kws is None: subplot_kws = dict() # Handle facetgrids first if row or col: if z is not None: subplot_kws.update(projection="3d") allargs = locals().copy() allargs.update(allargs.pop("kwargs")) allargs.pop("darray") allargs.pop("plt") allargs["plotfunc"] = globals()[plotfunc.__name__] return _easy_facetgrid(darray, kind="plot1d", **allargs) if darray.ndim == 0 or darray.size == 0: # TypeError to be consistent with pandas raise TypeError("No numeric data to plot.") # The allargs dict passed to _easy_facetgrid above contains args if args == (): args = kwargs.pop("args", ()) if args: assert "args" not in kwargs # TODO: Deprecated since 2022.10: msg = "Using positional arguments is deprecated for plot methods, use keyword arguments instead." assert x is None x = args[0] if len(args) > 1: assert y is None y = args[1] if len(args) > 2: assert z is None z = args[2] if len(args) > 3: assert hue is None hue = args[3] if len(args) > 4: raise ValueError(msg) else: warnings.warn(msg, DeprecationWarning, stacklevel=2) del args if hue_style is not None: # TODO: Not used since 2022.10. Deprecated since 2023.07. warnings.warn( ( "hue_style is no longer used for plot1d plots " "and the argument will eventually be removed. " "Convert numbers to string for a discrete hue " "and use add_legend or add_colorbar to control which guide to display." ), DeprecationWarning, stacklevel=2, ) _is_facetgrid = kwargs.pop("_is_facetgrid", False) if plotfunc.__name__ == "scatter": size_ = kwargs.pop("_size", markersize) size_r = _MARKERSIZE_RANGE # Remove any nulls, .where(m, drop=True) doesn't work when m is # a dask array, so load the array to memory. # It will have to be loaded to memory at some point anyway: darray = darray.compute() darray = darray.where(darray.notnull(), drop=True) else: size_ = kwargs.pop("_size", linewidth) size_r = _LINEWIDTH_RANGE # Get data to plot: coords_to_plot: MutableMapping[str, Hashable | None] = dict( x=x, z=z, hue=hue, size=size_ ) if not _is_facetgrid: # Guess what coords to use if some of the values in coords_to_plot are None: coords_to_plot = _guess_coords_to_plot(darray, coords_to_plot, kwargs) plts = _prepare_plot1d_data(darray, coords_to_plot, plotfunc.__name__) xplt = plts.pop("x", None) yplt = plts.pop("y", None) zplt = plts.pop("z", None) kwargs.update(zplt=zplt) hueplt = plts.pop("hue", None) sizeplt = plts.pop("size", None) # Handle size and hue: hueplt_norm = _Normalize(data=hueplt) kwargs.update(hueplt=hueplt_norm.values) sizeplt_norm = _Normalize( data=sizeplt, width=size_r, _is_facetgrid=_is_facetgrid ) kwargs.update(sizeplt=sizeplt_norm.values) cmap_params_subset = kwargs.pop("cmap_params_subset", {}) cbar_kwargs = kwargs.pop("cbar_kwargs", {}) if hueplt_norm.data is not None: if not hueplt_norm.data_is_numeric: # Map hue values back to its original value: cbar_kwargs.update(format=hueplt_norm.format, ticks=hueplt_norm.ticks) levels = kwargs.get("levels", hueplt_norm.levels) cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs( plotfunc, cast("DataArray", hueplt_norm.values).data, **locals(), ) # subset that can be passed to scatter, hist2d if not cmap_params_subset: ckw = {vv: cmap_params[vv] for vv in ("vmin", "vmax", "norm", "cmap")} cmap_params_subset.update(**ckw) with plt.rc_context(_styles): if z is not None: import mpl_toolkits if ax is None: subplot_kws.update(projection="3d") ax = get_axis(figsize, size, aspect, ax, **subplot_kws) assert isinstance(ax, mpl_toolkits.mplot3d.axes3d.Axes3D) # Using 30, 30 minimizes rotation of the plot. Making it easier to # build on your intuition from 2D plots: ax.view_init(azim=30, elev=30, vertical_axis="y") else: ax = get_axis(figsize, size, aspect, ax, **subplot_kws) primitive = plotfunc( xplt, yplt, ax=ax, add_labels=add_labels, **cmap_params_subset, **kwargs, ) if np.any(np.asarray(add_labels)) and add_title: ax.set_title(darray._title_for_slice()) add_colorbar_, add_legend_ = _determine_guide( hueplt_norm, sizeplt_norm, add_colorbar, add_legend, plotfunc_name=plotfunc.__name__, ) if add_colorbar_: if "label" not in cbar_kwargs: cbar_kwargs["label"] = label_from_attrs(hueplt_norm.data) _add_colorbar( primitive, ax, kwargs.get("cbar_ax"), cbar_kwargs, cmap_params ) if add_legend_: if plotfunc.__name__ in ["scatter", "line"]: _add_legend( ( hueplt_norm if add_legend or not add_colorbar_ else _Normalize(None) ), sizeplt_norm, primitive, legend_ax=ax, plotfunc=plotfunc.__name__, ) else: hueplt_norm_values: list[np.ndarray | None] if hueplt_norm.data is not None: hueplt_norm_values = list(hueplt_norm.data.to_numpy()) else: hueplt_norm_values = [hueplt_norm.data] if plotfunc.__name__ == "hist": ax.legend( handles=primitive[-1], labels=hueplt_norm_values, title=label_from_attrs(hueplt_norm.data), ) else: ax.legend( handles=primitive, labels=hueplt_norm_values, title=label_from_attrs(hueplt_norm.data), ) _update_axes( ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim ) return primitive # we want to actually expose the signature of newplotfunc # and not the copied **kwargs from the plotfunc which # functools.wraps adds, so delete the wrapped attr del newplotfunc.__wrapped__ return newplotfunc def _add_labels( add_labels: bool | Iterable[bool], darrays: Iterable[DataArray | None], suffixes: Iterable[str], ax: Axes, ) -> None: """Set x, y, z labels.""" add_labels = [add_labels] * 3 if isinstance(add_labels, bool) else add_labels axes: tuple[Literal["x", "y", "z"], ...] = ("x", "y", "z") for axis, add_label, darray, suffix in zip( axes, add_labels, darrays, suffixes, strict=True ): if darray is None: continue if add_label: label = label_from_attrs(darray, extra=suffix) if label is not None: getattr(ax, f"set_{axis}label")(label) if np.issubdtype(darray.dtype, np.datetime64): _set_concise_date(ax, axis=axis) @overload def scatter( # type: ignore[misc,unused-ignore] # None is hashable :( darray: DataArray, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs, ) -> PathCollection: ... @overload def scatter( darray: T_DataArray, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs, ) -> FacetGrid[T_DataArray]: ... @overload def scatter( darray: T_DataArray, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs, ) -> FacetGrid[T_DataArray]: ... @_plot1d def scatter( xplt: DataArray | None, yplt: DataArray | None, ax: Axes, add_labels: bool | Iterable[bool] = True, **kwargs, ) -> PathCollection: """Scatter variables against each other. Wraps :py:func:`matplotlib:matplotlib.pyplot.scatter`. """ if "u" in kwargs or "v" in kwargs: raise ValueError("u, v are not allowed in scatter plots.") zplt: DataArray | None = kwargs.pop("zplt", None) hueplt: DataArray | None = kwargs.pop("hueplt", None) sizeplt: DataArray | None = kwargs.pop("sizeplt", None) if hueplt is not None: kwargs.update(c=hueplt.to_numpy().ravel()) if sizeplt is not None: kwargs.update(s=sizeplt.to_numpy().ravel()) plts_or_none = (xplt, yplt, zplt) _add_labels(add_labels, plts_or_none, ("", "", ""), ax) xplt_np = None if xplt is None else xplt.to_numpy().ravel() yplt_np = None if yplt is None else yplt.to_numpy().ravel() zplt_np = None if zplt is None else zplt.to_numpy().ravel() plts_np = tuple(p for p in (xplt_np, yplt_np, zplt_np) if p is not None) if len(plts_np) == 3: import mpl_toolkits assert isinstance(ax, mpl_toolkits.mplot3d.axes3d.Axes3D) return ax.scatter(xplt_np, yplt_np, zplt_np, **kwargs) if len(plts_np) == 2: return ax.scatter(plts_np[0], plts_np[1], **kwargs) raise ValueError("At least two variables required for a scatter plot.") def _plot2d(plotfunc): """Decorator for common 2d plotting logic.""" commondoc = """ Parameters ---------- darray : DataArray Must be two-dimensional, unless creating faceted plots. x : Hashable or None, optional Coordinate for *x* axis. If ``None``, use ``darray.dims[1]``. y : Hashable or None, optional Coordinate for *y* axis. If ``None``, use ``darray.dims[0]``. figsize : Iterable or float or None, optional A tuple (width, height) of the figure in inches. Mutually exclusive with ``size`` and ``ax``. size : scalar, optional If provided, create a new figure for the plot with the given size: *height* (in inches) of each plot. See also: ``aspect``. aspect : "auto", "equal", scalar or None, optional Aspect ratio of plot, so that ``aspect * size`` gives the *width* in inches. Only used if a ``size`` is provided. ax : matplotlib axes object, optional Axes on which to plot. By default, use the current axes. Mutually exclusive with ``size`` and ``figsize``. row : Hashable or None, optional If passed, make row faceted plots on this dimension name. col : Hashable or None, optional If passed, make column faceted plots on this dimension name. col_wrap : int, optional Use together with ``col`` to wrap faceted plots. xincrease : None, True, or False, optional Should the values on the *x* axis be increasing from left to right? If ``None``, use the default for the Matplotlib function. yincrease : None, True, or False, optional Should the values on the *y* axis be increasing from top to bottom? If ``None``, use the default for the Matplotlib function. add_colorbar : bool, optional Add colorbar to axes. add_labels : bool, optional Use xarray metadata to label axes. vmin : float or None, optional Lower value to anchor the colormap, otherwise it is inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting `vmin` or `vmax` will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. vmax : float or None, optional Upper value to anchor the colormap, otherwise it is inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting `vmin` or `vmax` will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. cmap : matplotlib colormap name or colormap, optional The mapping from data values to color space. If not provided, this will be either be ``'viridis'`` (if the function infers a sequential dataset) or ``'RdBu_r'`` (if the function infers a diverging dataset). See :doc:`Choosing Colormaps in Matplotlib ` for more information. If *seaborn* is installed, ``cmap`` may also be a `seaborn color palette `_. Note: if ``cmap`` is a seaborn color palette and the plot type is not ``'contour'`` or ``'contourf'``, ``levels`` must also be specified. center : float or False, optional The value at which to center the colormap. Passing this value implies use of a diverging colormap. Setting it to ``False`` prevents use of a diverging colormap. robust : bool, optional If ``True`` and ``vmin`` or ``vmax`` are absent, the colormap range is computed with 2nd and 98th percentiles instead of the extreme values. extend : {'neither', 'both', 'min', 'max'}, optional How to draw arrows extending the colorbar beyond its limits. If not provided, ``extend`` is inferred from ``vmin``, ``vmax`` and the data limits. levels : int or array-like, optional Split the colormap (``cmap``) into discrete color intervals. If an integer is provided, "nice" levels are chosen based on the data range: this can imply that the final number of levels is not exactly the expected one. Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to setting ``levels=np.linspace(vmin, vmax, N)``. infer_intervals : bool, optional Only applies to pcolormesh. If ``True``, the coordinate intervals are passed to pcolormesh. If ``False``, the original coordinates are used (this can be useful for certain map projections). The default is to always infer intervals, unless the mesh is irregular and plotted on a map projection. colors : str or array-like of color-like, optional A single color or a sequence of colors. If the plot type is not ``'contour'`` or ``'contourf'``, the ``levels`` argument is required. subplot_kws : dict, optional Dictionary of keyword arguments for Matplotlib subplots. Only used for 2D and faceted plots. (see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`). cbar_ax : matplotlib axes object, optional Axes in which to draw the colorbar. cbar_kwargs : dict, optional Dictionary of keyword arguments to pass to the colorbar (see :meth:`matplotlib:matplotlib.figure.Figure.colorbar`). xscale : {'linear', 'symlog', 'log', 'logit'} or None, optional Specifies scaling for the x-axes. yscale : {'linear', 'symlog', 'log', 'logit'} or None, optional Specifies scaling for the y-axes. xticks : ArrayLike or None, optional Specify tick locations for x-axes. yticks : ArrayLike or None, optional Specify tick locations for y-axes. xlim : tuple[float, float] or None, optional Specify x-axes limits. ylim : tuple[float, float] or None, optional Specify y-axes limits. norm : matplotlib.colors.Normalize, optional If ``norm`` has ``vmin`` or ``vmax`` specified, the corresponding kwarg must be ``None``. **kwargs : optional Additional keyword arguments to wrapped Matplotlib function. Returns ------- artist : The same type of primitive artist that the wrapped Matplotlib function returns. """ # Build on the original docstring plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}" @functools.wraps( plotfunc, assigned=("__module__", "__name__", "__qualname__", "__doc__") ) def newplotfunc( darray: DataArray, *args: Any, x: Hashable | None = None, y: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> Any: # All 2d plots in xarray share this function signature. if args: # TODO: Deprecated since 2022.10: msg = "Using positional arguments is deprecated for plot methods, use keyword arguments instead." assert x is None x = args[0] if len(args) > 1: assert y is None y = args[1] if len(args) > 2: raise ValueError(msg) else: warnings.warn(msg, DeprecationWarning, stacklevel=2) del args # Decide on a default for the colorbar before facetgrids if add_colorbar is None: add_colorbar = True if plotfunc.__name__ == "contour" or ( plotfunc.__name__ == "surface" and cmap is None ): add_colorbar = False imshow_rgb = plotfunc.__name__ == "imshow" and darray.ndim == ( 3 + (row is not None) + (col is not None) ) if imshow_rgb: # Don't add a colorbar when showing an image with explicit colors add_colorbar = False # Matplotlib does not support normalising RGB data, so do it here. # See eg. https://github.com/matplotlib/matplotlib/pull/10220 if robust or vmax is not None or vmin is not None: darray = _rescale_imshow_rgb(darray.as_numpy(), vmin, vmax, robust) vmin, vmax, robust = None, None, False if subplot_kws is None: subplot_kws = dict() if plotfunc.__name__ == "surface" and not kwargs.get("_is_facetgrid"): if ax is None: # TODO: Importing Axes3D is no longer necessary in matplotlib >= 3.2. # Remove when minimum requirement of matplotlib is 3.2: from mpl_toolkits.mplot3d import Axes3D # delete so it does not end up in locals() del Axes3D # Need to create a "3d" Axes instance for surface plots subplot_kws["projection"] = "3d" # In facet grids, shared axis labels don't make sense for surface plots sharex = False sharey = False # Handle facetgrids first if row or col: allargs = locals().copy() del allargs["darray"] del allargs["imshow_rgb"] allargs.update(allargs.pop("kwargs")) # Need the decorated plotting function allargs["plotfunc"] = globals()[plotfunc.__name__] return _easy_facetgrid(darray, kind="dataarray", **allargs) if darray.ndim == 0 or darray.size == 0: # TypeError to be consistent with pandas raise TypeError("No numeric data to plot.") if ( plotfunc.__name__ == "surface" and not kwargs.get("_is_facetgrid") and ax is not None ): import mpl_toolkits if not isinstance(ax, mpl_toolkits.mplot3d.Axes3D): raise ValueError( "If ax is passed to surface(), it must be created with " 'projection="3d"' ) rgb = kwargs.pop("rgb", None) if rgb is not None and plotfunc.__name__ != "imshow": raise ValueError('The "rgb" keyword is only valid for imshow()') elif rgb is not None and not imshow_rgb: raise ValueError( 'The "rgb" keyword is only valid for imshow()' "with a three-dimensional array (per facet)" ) xlab, ylab = _infer_xy_labels( darray=darray, x=x, y=y, imshow=imshow_rgb, rgb=rgb ) xval = darray[xlab] yval = darray[ylab] if xval.ndim > 1 or yval.ndim > 1 or plotfunc.__name__ == "surface": # Passing 2d coordinate values, need to ensure they are transposed the same # way as darray. # Also surface plots always need 2d coordinates xval = xval.broadcast_like(darray) yval = yval.broadcast_like(darray) dims = darray.dims else: dims = (yval.dims[0], xval.dims[0]) # May need to transpose for correct x, y labels # xlab may be the name of a coord, we have to check for dim names if imshow_rgb: # For RGB[A] images, matplotlib requires the color dimension # to be last. In Xarray the order should be unimportant, so # we transpose to (y, x, color) to make this work. yx_dims = (ylab, xlab) dims = yx_dims + tuple(d for d in darray.dims if d not in yx_dims) if dims != darray.dims: darray = darray.transpose(*dims, transpose_coords=True) # better to pass the ndarrays directly to plotting functions xvalnp = xval.to_numpy() yvalnp = yval.to_numpy() # Pass the data as a masked ndarray too zval = darray.to_masked_array(copy=False) # Replace pd.Intervals if contained in xval or yval. xplt, xlab_extra = _resolve_intervals_2dplot(xvalnp, plotfunc.__name__) yplt, ylab_extra = _resolve_intervals_2dplot(yvalnp, plotfunc.__name__) _ensure_plottable(xplt, yplt, zval) cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs( plotfunc, zval.data, **locals(), _is_facetgrid=kwargs.pop("_is_facetgrid", False), ) if "contour" in plotfunc.__name__: # extend is a keyword argument only for contour and contourf, but # passing it to the colorbar is sufficient for imshow and # pcolormesh kwargs["extend"] = cmap_params["extend"] kwargs["levels"] = cmap_params["levels"] # if colors == a single color, matplotlib draws dashed negative # contours. we lose this feature if we pass cmap and not colors if colors is not None: cmap_params["cmap"] = None kwargs["colors"] = colors if "pcolormesh" == plotfunc.__name__: kwargs["infer_intervals"] = infer_intervals kwargs["xscale"] = xscale kwargs["yscale"] = yscale if "imshow" == plotfunc.__name__ and isinstance(aspect, str): # forbid usage of mpl strings raise ValueError("plt.imshow's `aspect` kwarg is not available in xarray") ax = get_axis(figsize, size, aspect, ax, **subplot_kws) primitive = plotfunc( xplt, yplt, zval, ax=ax, cmap=cmap_params["cmap"], vmin=cmap_params["vmin"], vmax=cmap_params["vmax"], norm=cmap_params["norm"], **kwargs, ) # Label the plot with metadata if add_labels: ax.set_xlabel(label_from_attrs(darray[xlab], xlab_extra)) ax.set_ylabel(label_from_attrs(darray[ylab], ylab_extra)) ax.set_title(darray._title_for_slice()) if plotfunc.__name__ == "surface": import mpl_toolkits assert isinstance(ax, mpl_toolkits.mplot3d.axes3d.Axes3D) ax.set_zlabel(label_from_attrs(darray)) if add_colorbar: if add_labels and "label" not in cbar_kwargs: cbar_kwargs["label"] = label_from_attrs(darray) cbar = _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params) elif cbar_ax is not None or cbar_kwargs: # inform the user about keywords which aren't used raise ValueError( "cbar_ax and cbar_kwargs can't be used with add_colorbar=False." ) # origin kwarg overrides yincrease if "origin" in kwargs: yincrease = None _update_axes( ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim ) if np.issubdtype(xplt.dtype, np.datetime64): _set_concise_date(ax, "x") return primitive # we want to actually expose the signature of newplotfunc # and not the copied **kwargs from the plotfunc which # functools.wraps adds, so delete the wrapped attr del newplotfunc.__wrapped__ return newplotfunc @overload def imshow( # type: ignore[misc,unused-ignore] # None is hashable :( darray: DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> AxesImage: ... @overload def imshow( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @overload def imshow( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @_plot2d def imshow( x: np.ndarray, y: np.ndarray, z: np.ma.core.MaskedArray, ax: Axes, **kwargs: Any ) -> AxesImage: """ Image plot of 2D DataArray. Wraps :py:func:`matplotlib:matplotlib.pyplot.imshow`. While other plot methods require the DataArray to be strictly two-dimensional, ``imshow`` also accepts a 3D array where some dimension can be interpreted as RGB or RGBA color channels and allows this dimension to be specified via the kwarg ``rgb=``. Unlike :py:func:`matplotlib:matplotlib.pyplot.imshow`, which ignores ``vmin``/``vmax`` for RGB(A) data, xarray *will* use ``vmin`` and ``vmax`` for RGB(A) data by applying a single scaling factor and offset to all bands. Passing ``robust=True`` infers ``vmin`` and ``vmax`` :ref:`in the usual way `. Additionally the y-axis is not inverted by default, you can restore the matplotlib behavior by setting `yincrease=False`. .. note:: This function needs uniformly spaced coordinates to properly label the axes. Call :py:meth:`DataArray.plot` to check. The pixels are centered on the coordinates. For example, if the coordinate value is 3.2, then the pixels for those coordinates will be centered on 3.2. """ if x.ndim != 1 or y.ndim != 1: raise ValueError( "imshow requires 1D coordinates, try using pcolormesh or contour(f)" ) def _center_pixels(x): """Center the pixels on the coordinates.""" if np.issubdtype(x.dtype, str): # When using strings as inputs imshow converts it to # integers. Choose extent values which puts the indices in # in the center of the pixels: return 0 - 0.5, len(x) - 0.5 try: # Center the pixels assuming uniform spacing: xstep = 0.5 * (x[1] - x[0]) except IndexError: # Arbitrary default value, similar to matplotlib behaviour: xstep = 0.1 return x[0] - xstep, x[-1] + xstep # Center the pixels: left, right = _center_pixels(x) top, bottom = _center_pixels(y) defaults: dict[str, Any] = {"origin": "upper", "interpolation": "nearest"} if not hasattr(ax, "projection"): # not for cartopy geoaxes defaults["aspect"] = "auto" # Allow user to override these defaults defaults.update(kwargs) if defaults["origin"] == "upper": defaults["extent"] = [left, right, bottom, top] else: defaults["extent"] = [left, right, top, bottom] if z.ndim == 3: # matplotlib imshow uses black for missing data, but Xarray makes # missing data transparent. We therefore add an alpha channel if # there isn't one, and set it to transparent where data is masked. if z.shape[-1] == 3: safe_dtype = np.promote_types(z.dtype, np.uint8) alpha = np.ma.ones(z.shape[:2] + (1,), dtype=safe_dtype) if np.issubdtype(z.dtype, np.integer): alpha[:] = 255 z = np.ma.concatenate((z, alpha), axis=2) else: z = z.copy() z[np.any(z.mask, axis=-1), -1] = 0 primitive = ax.imshow(z, **defaults) # If x or y are strings the ticklabels have been replaced with # integer indices. Replace them back to strings: for axis, v in [("x", x), ("y", y)]: if np.issubdtype(v.dtype, str): getattr(ax, f"set_{axis}ticks")(np.arange(len(v))) getattr(ax, f"set_{axis}ticklabels")(v) return primitive @overload def contour( # type: ignore[misc,unused-ignore] # None is hashable :( darray: DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> QuadContourSet: ... @overload def contour( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @overload def contour( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @_plot2d def contour( x: np.ndarray, y: np.ndarray, z: np.ndarray, ax: Axes, **kwargs: Any ) -> QuadContourSet: """ Contour plot of 2D DataArray. Wraps :py:func:`matplotlib:matplotlib.pyplot.contour`. """ primitive = ax.contour(x, y, z, **kwargs) return primitive @overload def contourf( # type: ignore[misc,unused-ignore] # None is hashable :( darray: DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> QuadContourSet: ... @overload def contourf( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @overload def contourf( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @_plot2d def contourf( x: np.ndarray, y: np.ndarray, z: np.ndarray, ax: Axes, **kwargs: Any ) -> QuadContourSet: """ Filled contour plot of 2D DataArray. Wraps :py:func:`matplotlib:matplotlib.pyplot.contourf`. """ primitive = ax.contourf(x, y, z, **kwargs) return primitive @overload def pcolormesh( # type: ignore[misc,unused-ignore] # None is hashable :( darray: DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> QuadMesh: ... @overload def pcolormesh( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @overload def pcolormesh( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @_plot2d def pcolormesh( x: np.ndarray, y: np.ndarray, z: np.ndarray, ax: Axes, xscale: ScaleOptions | None = None, yscale: ScaleOptions | None = None, infer_intervals=None, **kwargs: Any, ) -> QuadMesh: """ Pseudocolor plot of 2D DataArray. Wraps :py:func:`matplotlib:matplotlib.pyplot.pcolormesh`. """ # decide on a default for infer_intervals (GH781) x = np.asarray(x) if infer_intervals is None: if hasattr(ax, "projection"): if len(x.shape) == 1: infer_intervals = True else: infer_intervals = False else: infer_intervals = True if any(np.issubdtype(k.dtype, str) for k in (x, y)): # do not infer intervals if any axis contains str ticks, see #6775 infer_intervals = False if infer_intervals and ( (np.shape(x)[0] == np.shape(z)[1]) or ((x.ndim > 1) and (np.shape(x)[1] == np.shape(z)[1])) ): if x.ndim == 1: x = _infer_interval_breaks(x, check_monotonic=True, scale=xscale) else: # we have to infer the intervals on both axes x = _infer_interval_breaks(x, axis=1, scale=xscale) x = _infer_interval_breaks(x, axis=0, scale=xscale) if infer_intervals and (np.shape(y)[0] == np.shape(z)[0]): if y.ndim == 1: y = _infer_interval_breaks(y, check_monotonic=True, scale=yscale) else: # we have to infer the intervals on both axes y = _infer_interval_breaks(y, axis=1, scale=yscale) y = _infer_interval_breaks(y, axis=0, scale=yscale) ax.grid(False) primitive = ax.pcolormesh(x, y, z, **kwargs) # by default, pcolormesh picks "round" values for bounds # this results in ugly looking plots with lots of surrounding whitespace if not hasattr(ax, "projection") and x.ndim == 1 and y.ndim == 1: # not a cartopy geoaxis ax.set_xlim(x[0], x[-1]) ax.set_ylim(y[0], y[-1]) return primitive @overload def surface( darray: DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> Poly3DCollection: ... @overload def surface( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @overload def surface( darray: T_DataArray, x: Hashable | None = None, y: Hashable | None = None, *, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_colorbar: bool | None = None, add_labels: bool = True, vmin: float | None = None, vmax: float | None = None, cmap: str | Colormap | None = None, center: float | Literal[False] | None = None, robust: bool = False, extend: ExtendOptions = None, levels: ArrayLike | None = None, infer_intervals=None, colors: str | ArrayLike | None = None, subplot_kws: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cbar_kwargs: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, norm: Normalize | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArray]: ... @_plot2d def surface( x: np.ndarray, y: np.ndarray, z: np.ndarray, ax: Axes, **kwargs: Any ) -> Poly3DCollection: """ Surface plot of 2D DataArray. Wraps :py:meth:`matplotlib:mpl_toolkits.mplot3d.axes3d.Axes3D.plot_surface`. """ import mpl_toolkits assert isinstance(ax, mpl_toolkits.mplot3d.axes3d.Axes3D) primitive = ax.plot_surface(x, y, z, **kwargs) return primitive xarray-2025.09.0/xarray/plot/dataset_plot.py000066400000000000000000000742261505620616400207000ustar00rootroot00000000000000from __future__ import annotations import functools import inspect import warnings from collections.abc import Callable, Hashable, Iterable from typing import TYPE_CHECKING, Any, TypeVar, overload from xarray.plot import dataarray_plot from xarray.plot.facetgrid import _easy_facetgrid from xarray.plot.utils import ( _add_colorbar, _get_nice_quiver_magnitude, _infer_meta_data, _process_cmap_cbar_kwargs, get_axis, ) from xarray.structure.alignment import broadcast if TYPE_CHECKING: from matplotlib.axes import Axes from matplotlib.collections import LineCollection, PathCollection from matplotlib.colors import Colormap, Normalize from matplotlib.quiver import Quiver from numpy.typing import ArrayLike from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import ( AspectOptions, ExtendOptions, HueStyleOptions, ScaleOptions, ) from xarray.plot.facetgrid import FacetGrid def _dsplot(plotfunc): commondoc = """ Parameters ---------- ds : Dataset x : Hashable or None, optional Variable name for x-axis. y : Hashable or None, optional Variable name for y-axis. u : Hashable or None, optional Variable name for the *u* velocity (in *x* direction). quiver/streamplot plots only. v : Hashable or None, optional Variable name for the *v* velocity (in *y* direction). quiver/streamplot plots only. hue: Hashable or None, optional Variable by which to color scatter points or arrows. hue_style: {'continuous', 'discrete'} or None, optional How to use the ``hue`` variable: - ``'continuous'`` -- continuous color scale (default for numeric ``hue`` variables) - ``'discrete'`` -- a color for each unique value, using the default color cycle (default for non-numeric ``hue`` variables) row : Hashable or None, optional If passed, make row faceted plots on this dimension name. col : Hashable or None, optional If passed, make column faceted plots on this dimension name. col_wrap : int, optional Use together with ``col`` to wrap faceted plots. ax : matplotlib axes object or None, optional If ``None``, use the current axes. Not applicable when using facets. figsize : Iterable[float] or None, optional A tuple (width, height) of the figure in inches. Mutually exclusive with ``size`` and ``ax``. size : scalar, optional If provided, create a new figure for the plot with the given size. Height (in inches) of each plot. See also: ``aspect``. aspect : "auto", "equal", scalar or None, optional Aspect ratio of plot, so that ``aspect * size`` gives the width in inches. Only used if a ``size`` is provided. sharex : bool or None, optional If True all subplots share the same x-axis. sharey : bool or None, optional If True all subplots share the same y-axis. add_guide: bool or None, optional Add a guide that depends on ``hue_style``: - ``'continuous'`` -- build a colorbar - ``'discrete'`` -- build a legend subplot_kws : dict or None, optional Dictionary of keyword arguments for Matplotlib subplots (see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`). Only applies to FacetGrid plotting. cbar_kwargs : dict, optional Dictionary of keyword arguments to pass to the colorbar (see :meth:`matplotlib:matplotlib.figure.Figure.colorbar`). cbar_ax : matplotlib axes object, optional Axes in which to draw the colorbar. cmap : matplotlib colormap name or colormap, optional The mapping from data values to color space. Either a Matplotlib colormap name or object. If not provided, this will be either ``'viridis'`` (if the function infers a sequential dataset) or ``'RdBu_r'`` (if the function infers a diverging dataset). See :doc:`Choosing Colormaps in Matplotlib ` for more information. If *seaborn* is installed, ``cmap`` may also be a `seaborn color palette `_. Note: if ``cmap`` is a seaborn color palette, ``levels`` must also be specified. vmin : float or None, optional Lower value to anchor the colormap, otherwise it is inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting `vmin` or `vmax` will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. vmax : float or None, optional Upper value to anchor the colormap, otherwise it is inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting `vmin` or `vmax` will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. norm : matplotlib.colors.Normalize, optional If ``norm`` has ``vmin`` or ``vmax`` specified, the corresponding kwarg must be ``None``. infer_intervals: bool | None If True the intervals are inferred. center : float, optional The value at which to center the colormap. Passing this value implies use of a diverging colormap. Setting it to ``False`` prevents use of a diverging colormap. robust : bool, optional If ``True`` and ``vmin`` or ``vmax`` are absent, the colormap range is computed with 2nd and 98th percentiles instead of the extreme values. colors : str or array-like of color-like, optional A single color or a list of colors. The ``levels`` argument is required. extend : {'neither', 'both', 'min', 'max'}, optional How to draw arrows extending the colorbar beyond its limits. If not provided, ``extend`` is inferred from ``vmin``, ``vmax`` and the data limits. levels : int or array-like, optional Split the colormap (``cmap``) into discrete color intervals. If an integer is provided, "nice" levels are chosen based on the data range: this can imply that the final number of levels is not exactly the expected one. Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to setting ``levels=np.linspace(vmin, vmax, N)``. **kwargs : optional Additional keyword arguments to wrapped Matplotlib function. """ # Build on the original docstring plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}" @functools.wraps( plotfunc, assigned=("__module__", "__name__", "__qualname__", "__doc__") ) def newplotfunc( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, row: Hashable | None = None, col: Hashable | None = None, col_wrap: int | None = None, ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, sharex: bool = True, sharey: bool = True, add_guide: bool | None = None, subplot_kws: dict[str, Any] | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals: bool | None = None, center: float | None = None, robust: bool | None = None, colors: str | ArrayLike | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs: Any, ) -> Any: if args: # TODO: Deprecated since 2022.10: msg = "Using positional arguments is deprecated for plot methods, use keyword arguments instead." assert x is None x = args[0] if len(args) > 1: assert y is None y = args[1] if len(args) > 2: assert u is None u = args[2] if len(args) > 3: assert v is None v = args[3] if len(args) > 4: assert hue is None hue = args[4] if len(args) > 5: raise ValueError(msg) else: warnings.warn(msg, DeprecationWarning, stacklevel=2) del args _is_facetgrid = kwargs.pop("_is_facetgrid", False) if _is_facetgrid: # facetgrid call meta_data = kwargs.pop("meta_data") else: meta_data = _infer_meta_data( ds, x, y, hue, hue_style, add_guide, funcname=plotfunc.__name__ ) hue_style = meta_data["hue_style"] # handle facetgrids first if col or row: allargs = locals().copy() allargs["plotfunc"] = globals()[plotfunc.__name__] allargs["data"] = ds # remove kwargs to avoid passing the information twice for arg in ["meta_data", "kwargs", "ds"]: del allargs[arg] return _easy_facetgrid(kind="dataset", **allargs, **kwargs) figsize = kwargs.pop("figsize", None) ax = get_axis(figsize, size, aspect, ax) if hue_style == "continuous" and hue is not None: if _is_facetgrid: cbar_kwargs = meta_data["cbar_kwargs"] cmap_params = meta_data["cmap_params"] else: cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs( plotfunc, ds[hue].values, **locals() ) # subset that can be passed to scatter, hist2d cmap_params_subset = { vv: cmap_params[vv] for vv in ["vmin", "vmax", "norm", "cmap"] } else: cmap_params_subset = {} if (u is not None or v is not None) and plotfunc.__name__ not in ( "quiver", "streamplot", ): raise ValueError("u, v are only allowed for quiver or streamplot plots.") primitive = plotfunc( ds=ds, x=x, y=y, ax=ax, u=u, v=v, hue=hue, hue_style=hue_style, cmap_params=cmap_params_subset, **kwargs, ) if _is_facetgrid: # if this was called from Facetgrid.map_dataset, return primitive # finish here. Else, make labels if meta_data.get("xlabel", None): ax.set_xlabel(meta_data.get("xlabel")) if meta_data.get("ylabel", None): ax.set_ylabel(meta_data.get("ylabel")) if meta_data["add_legend"]: ax.legend(handles=primitive, title=meta_data.get("hue_label", None)) if meta_data["add_colorbar"]: cbar_kwargs = {} if cbar_kwargs is None else cbar_kwargs if "label" not in cbar_kwargs: cbar_kwargs["label"] = meta_data.get("hue_label", None) _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params) if meta_data["add_quiverkey"]: magnitude = _get_nice_quiver_magnitude(ds[u], ds[v]) units = ds[u].attrs.get("units", "") ax.quiverkey( primitive, X=0.85, Y=0.9, U=magnitude, label=f"{magnitude}\n{units}", labelpos="E", coordinates="figure", ) if plotfunc.__name__ in ("quiver", "streamplot"): title = ds[u]._title_for_slice() else: title = ds[x]._title_for_slice() ax.set_title(title) return primitive # we want to actually expose the signature of newplotfunc # and not the copied **kwargs from the plotfunc which # functools.wraps adds, so delete the wrapped attr del newplotfunc.__wrapped__ return newplotfunc @overload def quiver( # type: ignore[misc,unused-ignore] # None is hashable :( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: None = None, # no wrap -> primitive row: None = None, # no wrap -> primitive ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals: bool | None = None, center: float | None = None, levels: ArrayLike | None = None, robust: bool | None = None, colors: str | ArrayLike | None = None, extend: ExtendOptions = None, cmap: str | Colormap | None = None, **kwargs: Any, ) -> Quiver: ... @overload def quiver( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable, # wrap -> FacetGrid row: Hashable | None = None, ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals: bool | None = None, center: float | None = None, levels: ArrayLike | None = None, robust: bool | None = None, colors: str | ArrayLike | None = None, extend: ExtendOptions = None, cmap: str | Colormap | None = None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @overload def quiver( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable | None = None, row: Hashable, # wrap -> FacetGrid ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals: bool | None = None, center: float | None = None, levels: ArrayLike | None = None, robust: bool | None = None, colors: str | ArrayLike | None = None, extend: ExtendOptions = None, cmap: str | Colormap | None = None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @_dsplot def quiver( ds: Dataset, x: Hashable, y: Hashable, ax: Axes, u: Hashable, v: Hashable, **kwargs: Any, ) -> Quiver: """Quiver plot of Dataset variables. Wraps :py:func:`matplotlib:matplotlib.pyplot.quiver`. """ import matplotlib as mpl if x is None or y is None or u is None or v is None: raise ValueError("Must specify x, y, u, v for quiver plots.") dx, dy, du, dv = broadcast(ds[x], ds[y], ds[u], ds[v]) args = [dx.values, dy.values, du.values, dv.values] hue = kwargs.pop("hue") cmap_params = kwargs.pop("cmap_params") if hue: args.append(ds[hue].values) # TODO: Fix this by always returning a norm with vmin, vmax in cmap_params if not cmap_params["norm"]: cmap_params["norm"] = mpl.colors.Normalize( cmap_params.pop("vmin"), cmap_params.pop("vmax") ) kwargs.pop("hue_style") kwargs.setdefault("pivot", "middle") hdl = ax.quiver(*args, **kwargs, **cmap_params) return hdl @overload def streamplot( # type: ignore[misc,unused-ignore] # None is hashable :( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: None = None, # no wrap -> primitive row: None = None, # no wrap -> primitive ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals: bool | None = None, center: float | None = None, levels: ArrayLike | None = None, robust: bool | None = None, colors: str | ArrayLike | None = None, extend: ExtendOptions = None, cmap: str | Colormap | None = None, **kwargs: Any, ) -> LineCollection: ... @overload def streamplot( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable, # wrap -> FacetGrid row: Hashable | None = None, ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals: bool | None = None, center: float | None = None, levels: ArrayLike | None = None, robust: bool | None = None, colors: str | ArrayLike | None = None, extend: ExtendOptions = None, cmap: str | Colormap | None = None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @overload def streamplot( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, u: Hashable | None = None, v: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, col: Hashable | None = None, row: Hashable, # wrap -> FacetGrid ax: Axes | None = None, figsize: Iterable[float] | None = None, size: float | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: AspectOptions = None, subplot_kws: dict[str, Any] | None = None, add_guide: bool | None = None, cbar_kwargs: dict[str, Any] | None = None, cbar_ax: Axes | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, infer_intervals: bool | None = None, center: float | None = None, levels: ArrayLike | None = None, robust: bool | None = None, colors: str | ArrayLike | None = None, extend: ExtendOptions = None, cmap: str | Colormap | None = None, **kwargs: Any, ) -> FacetGrid[Dataset]: ... @_dsplot def streamplot( ds: Dataset, x: Hashable, y: Hashable, ax: Axes, u: Hashable, v: Hashable, **kwargs: Any, ) -> LineCollection: """Plot streamlines of Dataset variables. Wraps :py:func:`matplotlib:matplotlib.pyplot.streamplot`. """ import matplotlib as mpl if x is None or y is None or u is None or v is None: raise ValueError("Must specify x, y, u, v for streamplot plots.") # Matplotlib's streamplot has strong restrictions on what x and y can be, so need to # get arrays transposed the 'right' way around. 'x' cannot vary within 'rows', so # the dimension of x must be the second dimension. 'y' cannot vary with 'columns' so # the dimension of y must be the first dimension. If x and y are both 2d, assume the # user has got them right already. xdim = ds[x].dims[0] if len(ds[x].dims) == 1 else None ydim = ds[y].dims[0] if len(ds[y].dims) == 1 else None if xdim is not None and ydim is None: ydims = set(ds[y].dims) - {xdim} if len(ydims) == 1: ydim = next(iter(ydims)) if ydim is not None and xdim is None: xdims = set(ds[x].dims) - {ydim} if len(xdims) == 1: xdim = next(iter(xdims)) dx, dy, du, dv = broadcast(ds[x], ds[y], ds[u], ds[v]) if xdim is not None and ydim is not None: # Need to ensure the arrays are transposed correctly dx = dx.transpose(ydim, xdim) dy = dy.transpose(ydim, xdim) du = du.transpose(ydim, xdim) dv = dv.transpose(ydim, xdim) hue = kwargs.pop("hue") cmap_params = kwargs.pop("cmap_params") if hue: if xdim is not None and ydim is not None: ds[hue] = ds[hue].transpose(ydim, xdim) kwargs["color"] = ds[hue].values # TODO: Fix this by always returning a norm with vmin, vmax in cmap_params if not cmap_params["norm"]: cmap_params["norm"] = mpl.colors.Normalize( cmap_params.pop("vmin"), cmap_params.pop("vmax") ) kwargs.pop("hue_style") hdl = ax.streamplot( dx.values, dy.values, du.values, dv.values, **kwargs, **cmap_params ) # Return .lines so colorbar creation works properly return hdl.lines F = TypeVar("F", bound=Callable) def _update_doc_to_dataset(dataarray_plotfunc: Callable) -> Callable[[F], F]: """ Add a common docstring by reusing the DataArray one. TODO: Reduce code duplication. * The goal is to reduce code duplication by moving all Dataset specific plots to the DataArray side and use this thin wrapper to handle the conversion between Dataset and DataArray. * Improve docstring handling, maybe reword the DataArray versions to explain Datasets better. Parameters ---------- dataarray_plotfunc : Callable Function that returns a finished plot primitive. """ # Build on the original docstring da_doc = dataarray_plotfunc.__doc__ if da_doc is None: raise NotImplementedError("DataArray plot method requires a docstring") da_str = """ Parameters ---------- darray : DataArray """ ds_str = """ The `y` DataArray will be used as base, any other variables are added as coords. Parameters ---------- ds : Dataset """ # TODO: improve this? if da_str in da_doc: ds_doc = da_doc.replace(da_str, ds_str).replace("darray", "ds") else: ds_doc = da_doc @functools.wraps(dataarray_plotfunc) def wrapper(dataset_plotfunc: F) -> F: dataset_plotfunc.__doc__ = ds_doc return dataset_plotfunc return wrapper # type: ignore[return-value] def _normalize_args( plotmethod: str, args: tuple[Any, ...], kwargs: dict[str, Any] ) -> dict[str, Any]: from xarray.core.dataarray import DataArray # Determine positional arguments keyword by inspecting the # signature of the plotmethod: locals_ = dict( inspect.signature(getattr(DataArray().plot, plotmethod)) .bind(*args, **kwargs) .arguments.items() ) locals_.update(locals_.pop("kwargs", {})) return locals_ def _temp_dataarray(ds: Dataset, y: Hashable, locals_: dict[str, Any]) -> DataArray: """Create a temporary datarray with extra coords.""" from xarray.core.dataarray import DataArray coords = dict(ds[y].coords) dims = set(ds[y].dims) # Add extra coords to the DataArray from valid kwargs, if using all # kwargs there is a risk that we add unnecessary dataarrays as # coords straining RAM further for example: # ds.both and extend="both" would add ds.both to the coords: valid_coord_kwargs = {"x", "z", "markersize", "hue", "row", "col", "u", "v"} coord_kwargs = locals_.keys() & valid_coord_kwargs for k in coord_kwargs: key = locals_[k] darray = ds.get(key) if darray is not None: coords[key] = darray dims.update(darray.dims) # Trim dataset from unnecessary dims: ds_trimmed = ds.drop_dims(ds.sizes.keys() - dims) # TODO: Use ds.dims in the future # The dataarray has to include all the dims. Broadcast to that shape # and add the additional coords: _y = ds[y].broadcast_like(ds_trimmed) return DataArray(_y, coords=coords) @overload def scatter( # type: ignore[misc,unused-ignore] # None is hashable :( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: None = None, # no wrap -> primitive col: None = None, # no wrap -> primitive col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs: Any, ) -> PathCollection: ... @overload def scatter( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable, # wrap -> FacetGrid col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @overload def scatter( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable, # wrap -> FacetGrid col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs: Any, ) -> FacetGrid[DataArray]: ... @_update_doc_to_dataset(dataarray_plot.scatter) def scatter( ds: Dataset, *args: Any, x: Hashable | None = None, y: Hashable | None = None, z: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, figsize: Iterable[float] | None = None, size: float | None = None, aspect: float | None = None, ax: Axes | None = None, row: Hashable | None = None, col: Hashable | None = None, col_wrap: int | None = None, xincrease: bool | None = True, yincrease: bool | None = True, add_legend: bool | None = None, add_colorbar: bool | None = None, add_labels: bool | Iterable[bool] = True, add_title: bool = True, subplot_kws: dict[str, Any] | None = None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: ArrayLike | None = None, ylim: ArrayLike | None = None, cmap: str | Colormap | None = None, vmin: float | None = None, vmax: float | None = None, norm: Normalize | None = None, extend: ExtendOptions = None, levels: ArrayLike | None = None, **kwargs: Any, ) -> PathCollection | FacetGrid[DataArray]: """Scatter plot Dataset data variables against each other.""" locals_ = locals() del locals_["ds"] locals_.update(locals_.pop("kwargs", {})) da = _temp_dataarray(ds, y, locals_) return da.plot.scatter(*locals_.pop("args", ()), **locals_) xarray-2025.09.0/xarray/plot/facetgrid.py000066400000000000000000001121341505620616400201340ustar00rootroot00000000000000from __future__ import annotations import functools import itertools import warnings from collections.abc import Callable, Hashable, Iterable, MutableMapping from typing import TYPE_CHECKING, Any, Generic, Literal, TypeVar, cast import numpy as np from xarray.core.formatting import format_item from xarray.core.types import HueStyleOptions, T_DataArrayOrSet from xarray.plot.utils import ( _LINEWIDTH_RANGE, _MARKERSIZE_RANGE, _add_legend, _determine_guide, _get_nice_quiver_magnitude, _guess_coords_to_plot, _infer_xy_labels, _Normalize, _parse_size, _process_cmap_cbar_kwargs, label_from_attrs, ) if TYPE_CHECKING: from matplotlib.axes import Axes from matplotlib.cm import ScalarMappable from matplotlib.colorbar import Colorbar from matplotlib.figure import Figure from matplotlib.legend import Legend from matplotlib.quiver import QuiverKey from matplotlib.text import Annotation from xarray.core.dataarray import DataArray # Overrides axes.labelsize, xtick.major.size, ytick.major.size # from mpl.rcParams _FONTSIZE = "small" # For major ticks on x, y axes _NTICKS = 5 def _nicetitle(coord, value, maxchar, template): """ Put coord, value in template and truncate at maxchar """ prettyvalue = format_item(value, quote_strings=False) title = template.format(coord=coord, value=prettyvalue) if len(title) > maxchar: title = title[: (maxchar - 3)] + "..." return title T_FacetGrid = TypeVar("T_FacetGrid", bound="FacetGrid") class FacetGrid(Generic[T_DataArrayOrSet]): """ Initialize the Matplotlib figure and FacetGrid object. The :class:`FacetGrid` is an object that links a xarray DataArray to a Matplotlib figure with a particular structure. In particular, :class:`FacetGrid` is used to draw plots with multiple axes, where each axes shows the same relationship conditioned on different levels of some dimension. It's possible to condition on up to two variables by assigning variables to the rows and columns of the grid. The general approach to plotting here is called "small multiples", where the same kind of plot is repeated multiple times, and the specific use of small multiples to display the same relationship conditioned on one or more other variables is often called a "trellis plot". The basic workflow is to initialize the :class:`FacetGrid` object with the DataArray and the variable names that are used to structure the grid. Then plotting functions can be applied to each subset by calling :meth:`FacetGrid.map_dataarray` or :meth:`FacetGrid.map`. Attributes ---------- axs : ndarray of matplotlib.axes.Axes Array containing axes in corresponding position, as returned from :py:func:`matplotlib.pyplot.subplots`. col_labels : list of matplotlib.text.Annotation Column titles. row_labels : list of matplotlib.text.Annotation Row titles. fig : matplotlib.figure.Figure The figure containing all the axes. name_dicts : ndarray of dict Array containing dictionaries mapping coordinate names to values. ``None`` is used as a sentinel value for axes that should remain empty, i.e., sometimes the rightmost grid positions in the bottom row. """ data: T_DataArrayOrSet name_dicts: np.ndarray fig: Figure axs: np.ndarray row_names: list[np.ndarray] col_names: list[np.ndarray] figlegend: Legend | None quiverkey: QuiverKey | None cbar: Colorbar | None _single_group: bool | Hashable _nrow: int _row_var: Hashable | None _ncol: int _col_var: Hashable | None _col_wrap: int | None row_labels: list[Annotation | None] col_labels: list[Annotation | None] _x_var: None _y_var: None _hue_var: DataArray | None _cmap_extend: Any | None _mappables: list[ScalarMappable] _finalized: bool def __init__( self, data: T_DataArrayOrSet, col: Hashable | None = None, row: Hashable | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, figsize: Iterable[float] | None = None, aspect: float = 1, size: float = 3, subplot_kws: dict[str, Any] | None = None, ) -> None: """ Parameters ---------- data : DataArray or Dataset DataArray or Dataset to be plotted. row, col : str Dimension names that define subsets of the data, which will be drawn on separate facets in the grid. col_wrap : int, optional "Wrap" the grid the for the column variable after this number of columns, adding rows if ``col_wrap`` is less than the number of facets. sharex : bool, optional If true, the facets will share *x* axes. sharey : bool, optional If true, the facets will share *y* axes. figsize : Iterable of float or None, optional A tuple (width, height) of the figure in inches. If set, overrides ``size`` and ``aspect``. aspect : scalar, default: 1 Aspect ratio of each facet, so that ``aspect * size`` gives the width of each facet in inches. size : scalar, default: 3 Height (in inches) of each facet. See also: ``aspect``. subplot_kws : dict, optional Dictionary of keyword arguments for Matplotlib subplots (:py:func:`matplotlib.pyplot.subplots`). """ import matplotlib.pyplot as plt # Handle corner case of nonunique coordinates rep_col = col is not None and not data[col].to_index().is_unique rep_row = row is not None and not data[row].to_index().is_unique if rep_col or rep_row: raise ValueError( "Coordinates used for faceting cannot " "contain repeated (nonunique) values." ) # single_group is the grouping variable, if there is exactly one single_group: bool | Hashable if col and row: single_group = False nrow = len(data[row]) ncol = len(data[col]) nfacet = nrow * ncol if col_wrap is not None: warnings.warn( "Ignoring col_wrap since both col and row were passed", stacklevel=2 ) elif row and not col: single_group = row elif not row and col: single_group = col else: raise ValueError("Pass a coordinate name as an argument for row or col") # Compute grid shape if single_group: nfacet = len(data[single_group]) if col: # idea - could add heuristic for nice shapes like 3x4 ncol = nfacet if row: ncol = 1 if col_wrap is not None: # Overrides previous settings ncol = col_wrap nrow = int(np.ceil(nfacet / ncol)) # Set the subplot kwargs subplot_kws = {} if subplot_kws is None else subplot_kws if figsize is None: # Calculate the base figure size with extra horizontal space for a # colorbar cbar_space = 1 figsize = (ncol * size * aspect + cbar_space, nrow * size) fig, axs = plt.subplots( nrow, ncol, sharex=sharex, sharey=sharey, squeeze=False, figsize=figsize, subplot_kw=subplot_kws, ) # Set up the lists of names for the row and column facet variables col_names = list(data[col].to_numpy()) if col else [] row_names = list(data[row].to_numpy()) if row else [] if single_group: full: list[dict[Hashable, Any] | None] = [ {single_group: x} for x in data[single_group].to_numpy() ] empty: list[dict[Hashable, Any] | None] = [ None for x in range(nrow * ncol - len(full)) ] name_dict_list = full + empty else: rowcols = itertools.product(row_names, col_names) name_dict_list = [{row: r, col: c} for r, c in rowcols] name_dicts = np.array(name_dict_list).reshape(nrow, ncol) # Set up the class attributes # --------------------------- # First the public API self.data = data self.name_dicts = name_dicts self.fig = fig self.axs = axs self.row_names = row_names self.col_names = col_names # guides self.figlegend = None self.quiverkey = None self.cbar = None # Next the private variables self._single_group = single_group self._nrow = nrow self._row_var = row self._ncol = ncol self._col_var = col self._col_wrap = col_wrap self.row_labels = [None] * nrow self.col_labels = [None] * ncol self._x_var = None self._y_var = None self._hue_var = None self._cmap_extend = None self._mappables = [] self._finalized = False @property def axes(self) -> np.ndarray: warnings.warn( ( "self.axes is deprecated since 2022.11 in order to align with " "matplotlibs plt.subplots, use self.axs instead." ), DeprecationWarning, stacklevel=2, ) return self.axs @axes.setter def axes(self, axs: np.ndarray) -> None: warnings.warn( ( "self.axes is deprecated since 2022.11 in order to align with " "matplotlibs plt.subplots, use self.axs instead." ), DeprecationWarning, stacklevel=2, ) self.axs = axs @property def _left_axes(self) -> np.ndarray: return self.axs[:, 0] @property def _bottom_axes(self) -> np.ndarray: return self.axs[-1, :] def map_dataarray( self: T_FacetGrid, func: Callable, x: Hashable | None, y: Hashable | None, **kwargs: Any, ) -> T_FacetGrid: """ Apply a plotting function to a 2d facet's subset of the data. This is more convenient and less general than ``FacetGrid.map`` Parameters ---------- func : callable A plotting function with the same signature as a 2d xarray plotting method such as `xarray.plot.imshow` x, y : string Names of the coordinates to plot on x, y axes **kwargs additional keyword arguments to func Returns ------- self : FacetGrid object """ if kwargs.get("cbar_ax") is not None: raise ValueError("cbar_ax not supported by FacetGrid.") cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs( func, self.data.to_numpy(), **kwargs ) self._cmap_extend = cmap_params.get("extend") # Order is important func_kwargs = { k: v for k, v in kwargs.items() if k not in {"cmap", "colors", "cbar_kwargs", "levels"} } func_kwargs.update(cmap_params) # to avoid redundant calling, colorbar and labelling is instead handled # by `_finalize_grid` at the end func_kwargs["add_colorbar"] = False if func.__name__ != "surface": func_kwargs["add_labels"] = False # Get x, y labels for the first subplot x, y = _infer_xy_labels( darray=self.data.loc[self.name_dicts.flat[0]], x=x, y=y, imshow=func.__name__ == "imshow", rgb=kwargs.get("rgb"), ) for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True): # None is the sentinel value if d is not None: subset = self.data.loc[d] mappable = func( subset, x=x, y=y, ax=ax, **func_kwargs, _is_facetgrid=True ) self._mappables.append(mappable) xlabel = label_from_attrs(self.data[x]) ylabel = label_from_attrs(self.data[y]) self._finalize_grid(xlabel, ylabel) if kwargs.get("add_colorbar", True): self.add_colorbar(**cbar_kwargs) return self def map_plot1d( self: T_FacetGrid, func: Callable, x: Hashable | None, y: Hashable | None, *, z: Hashable | None = None, hue: Hashable | None = None, markersize: Hashable | None = None, linewidth: Hashable | None = None, **kwargs: Any, ) -> T_FacetGrid: """ Apply a plotting function to a 1d facet's subset of the data. This is more convenient and less general than ``FacetGrid.map`` Parameters ---------- func : A plotting function with the same signature as a 1d xarray plotting method such as `xarray.plot.scatter` x, y : Names of the coordinates to plot on x, y axes **kwargs additional keyword arguments to func Returns ------- self : FacetGrid object """ # Copy data to allow converting categoricals to integers and storing # them in self.data. It is not possible to copy in the init # unfortunately as there are tests that relies on self.data being # mutable (test_names_appear_somewhere()). Maybe something to deprecate # not sure how much that is used outside these tests. self.data = self.data.copy() if kwargs.get("cbar_ax") is not None: raise ValueError("cbar_ax not supported by FacetGrid.") if func.__name__ == "scatter": size_ = kwargs.pop("_size", markersize) size_r = _MARKERSIZE_RANGE else: size_ = kwargs.pop("_size", linewidth) size_r = _LINEWIDTH_RANGE # Guess what coords to use if some of the values in coords_to_plot are None: coords_to_plot: MutableMapping[str, Hashable | None] = dict( x=x, z=z, hue=hue, size=size_ ) coords_to_plot = _guess_coords_to_plot(self.data, coords_to_plot, kwargs) # Handle hues: hue = coords_to_plot["hue"] hueplt = self.data.coords[hue] if hue else None # TODO: _infer_line_data2 ? hueplt_norm = _Normalize(hueplt) self._hue_var = hueplt cbar_kwargs = kwargs.pop("cbar_kwargs", {}) if hueplt_norm.data is not None: if not hueplt_norm.data_is_numeric: # TODO: Ticks seems a little too hardcoded, since it will always # show all the values. But maybe it's ok, since plotting hundreds # of categorical data isn't that meaningful anyway. cbar_kwargs.update(format=hueplt_norm.format, ticks=hueplt_norm.ticks) kwargs.update(levels=hueplt_norm.levels) cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs( func, cast("DataArray", hueplt_norm.values).data, cbar_kwargs=cbar_kwargs, **kwargs, ) self._cmap_extend = cmap_params.get("extend") else: cmap_params = {} # Handle sizes: size_ = coords_to_plot["size"] sizeplt = self.data.coords[size_] if size_ else None sizeplt_norm = _Normalize(data=sizeplt, width=size_r) if sizeplt_norm.data is not None: self.data[size_] = sizeplt_norm.values # Add kwargs that are sent to the plotting function, # order is important ??? func_kwargs = { k: v for k, v in kwargs.items() if k not in {"cmap", "colors", "cbar_kwargs", "levels"} } func_kwargs.update(cmap_params) # Annotations will be handled later, skip those parts in the plotfunc: func_kwargs["add_colorbar"] = False func_kwargs["add_legend"] = False func_kwargs["add_title"] = False add_labels_ = np.zeros(self.axs.shape + (3,), dtype=bool) if kwargs.get("z") is not None: # 3d plots looks better with all labels. 3d plots can't sharex either so it # is easy to get lost while rotating the plots: add_labels_[:] = True else: # Subplots should have labels on the left and bottom edges only: add_labels_[-1, :, 0] = True # x add_labels_[:, 0, 1] = True # y # add_labels_[:, :, 2] = True # z # Set up the lists of names for the row and column facet variables: if self._single_group: full = tuple( {self._single_group: x} for x in range(self.data[self._single_group].size) ) empty = tuple(None for x in range(self._nrow * self._ncol - len(full))) name_d = full + empty else: rowcols = itertools.product( range(self.data[self._row_var].size), range(self.data[self._col_var].size), ) name_d = tuple({self._row_var: r, self._col_var: c} for r, c in rowcols) name_dicts = np.array(name_d).reshape(self._nrow, self._ncol) # Plot the data for each subplot: for add_lbls, d, ax in zip( add_labels_.reshape((self.axs.size, -1)), name_dicts.flat, self.axs.flat, strict=True, ): func_kwargs["add_labels"] = add_lbls # None is the sentinel value if d is not None: subset = self.data.isel(d) mappable = func( subset, x=x, y=y, ax=ax, hue=hue, _size=size_, **func_kwargs, _is_facetgrid=True, ) self._mappables.append(mappable) # Add titles and some touch ups: self._finalize_grid() self._set_lims() add_colorbar, add_legend = _determine_guide( hueplt_norm, sizeplt_norm, kwargs.get("add_colorbar"), kwargs.get("add_legend"), # kwargs.get("add_guide", None), # kwargs.get("hue_style", None), ) if add_legend: use_legend_elements = func.__name__ != "hist" if use_legend_elements: self.add_legend( use_legend_elements=use_legend_elements, hueplt_norm=hueplt_norm if not add_colorbar else _Normalize(None), sizeplt_norm=sizeplt_norm, primitive=self._mappables, legend_ax=self.fig, plotfunc=func.__name__, ) else: self.add_legend(use_legend_elements=use_legend_elements) if add_colorbar: # Colorbar is after legend so it correctly fits the plot: if "label" not in cbar_kwargs: cbar_kwargs["label"] = label_from_attrs(hueplt_norm.data) self.add_colorbar(**cbar_kwargs) return self def map_dataarray_line( self: T_FacetGrid, func: Callable, x: Hashable | None, y: Hashable | None, hue: Hashable | None, add_legend: bool = True, _labels=None, **kwargs: Any, ) -> T_FacetGrid: from xarray.plot.dataarray_plot import _infer_line_data for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True): # None is the sentinel value if d is not None: subset = self.data.loc[d] mappable = func( subset, x=x, y=y, ax=ax, hue=hue, add_legend=False, _labels=False, **kwargs, ) self._mappables.append(mappable) xplt, yplt, hueplt, huelabel = _infer_line_data( darray=self.data.loc[self.name_dicts.flat[0]], x=x, y=y, hue=hue ) xlabel = label_from_attrs(xplt) ylabel = label_from_attrs(yplt) self._hue_var = hueplt self._finalize_grid(xlabel, ylabel) if add_legend and hueplt is not None and huelabel is not None: self.add_legend(label=huelabel) return self def map_dataset( self: T_FacetGrid, func: Callable, x: Hashable | None = None, y: Hashable | None = None, hue: Hashable | None = None, hue_style: HueStyleOptions = None, add_guide: bool | None = None, **kwargs: Any, ) -> T_FacetGrid: from xarray.plot.dataset_plot import _infer_meta_data kwargs["add_guide"] = False if kwargs.get("markersize"): kwargs["size_mapping"] = _parse_size( self.data[kwargs["markersize"]], kwargs.pop("size_norm", None) ) meta_data = _infer_meta_data( self.data, x, y, hue, hue_style, add_guide, funcname=func.__name__ ) kwargs["meta_data"] = meta_data if hue and meta_data["hue_style"] == "continuous": cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs( func, self.data[hue].to_numpy(), **kwargs ) kwargs["meta_data"]["cmap_params"] = cmap_params kwargs["meta_data"]["cbar_kwargs"] = cbar_kwargs kwargs["_is_facetgrid"] = True if func.__name__ == "quiver" and "scale" not in kwargs: raise ValueError("Please provide scale.") # TODO: come up with an algorithm for reasonable scale choice for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True): # None is the sentinel value if d is not None: subset = self.data.loc[d] maybe_mappable = func( ds=subset, x=x, y=y, hue=hue, hue_style=hue_style, ax=ax, **kwargs ) # TODO: this is needed to get legends to work. # but maybe_mappable is a list in that case :/ self._mappables.append(maybe_mappable) self._finalize_grid(meta_data["xlabel"], meta_data["ylabel"]) if hue: hue_label = meta_data.pop("hue_label", None) self._hue_label = hue_label if meta_data["add_legend"]: self._hue_var = meta_data["hue"] self.add_legend(label=hue_label) elif meta_data["add_colorbar"]: self.add_colorbar(label=hue_label, **cbar_kwargs) if meta_data["add_quiverkey"]: self.add_quiverkey(kwargs["u"], kwargs["v"]) return self def _finalize_grid(self, *axlabels: Hashable) -> None: """Finalize the annotations and layout.""" if not self._finalized: self.set_axis_labels(*axlabels) self.set_titles() self.fig.tight_layout() for ax, namedict in zip(self.axs.flat, self.name_dicts.flat, strict=True): if namedict is None: ax.set_visible(False) self._finalized = True def _adjust_fig_for_guide(self, guide) -> None: # Draw the plot to set the bounding boxes correctly if hasattr(self.fig.canvas, "get_renderer"): renderer = self.fig.canvas.get_renderer() else: raise RuntimeError("MPL backend has no renderer") self.fig.draw(renderer) # Calculate and set the new width of the figure so the legend fits guide_width = guide.get_window_extent(renderer).width / self.fig.dpi figure_width = self.fig.get_figwidth() total_width = figure_width + guide_width self.fig.set_figwidth(total_width) # Draw the plot again to get the new transformations self.fig.draw(renderer) # Now calculate how much space we need on the right side guide_width = guide.get_window_extent(renderer).width / self.fig.dpi space_needed = guide_width / total_width + 0.02 # margin = .01 # _space_needed = margin + space_needed right = 1 - space_needed # Place the subplot axes to give space for the legend self.fig.subplots_adjust(right=right) def add_legend( self, *, label: str | None = None, use_legend_elements: bool = False, **kwargs: Any, ) -> None: if use_legend_elements: self.figlegend = _add_legend(**kwargs) else: assert self._hue_var is not None self.figlegend = self.fig.legend( handles=self._mappables[-1], labels=list(self._hue_var.to_numpy()), title=label if label is not None else label_from_attrs(self._hue_var), loc=kwargs.pop("loc", "center right"), **kwargs, ) self._adjust_fig_for_guide(self.figlegend) def add_colorbar(self, **kwargs: Any) -> None: """Draw a colorbar.""" kwargs = kwargs.copy() if self._cmap_extend is not None: kwargs.setdefault("extend", self._cmap_extend) # dont pass extend as kwarg if it is in the mappable if hasattr(self._mappables[-1], "extend"): kwargs.pop("extend", None) if "label" not in kwargs: from xarray import DataArray assert isinstance(self.data, DataArray) kwargs.setdefault("label", label_from_attrs(self.data)) self.cbar = self.fig.colorbar( self._mappables[-1], ax=list(self.axs.flat), **kwargs ) def add_quiverkey(self, u: Hashable, v: Hashable, **kwargs: Any) -> None: kwargs = kwargs.copy() magnitude = _get_nice_quiver_magnitude(self.data[u], self.data[v]) units = self.data[u].attrs.get("units", "") self.quiverkey = self.axs.flat[-1].quiverkey( self._mappables[-1], X=0.8, Y=0.9, U=magnitude, label=f"{magnitude}\n{units}", labelpos="E", coordinates="figure", ) # TODO: does not work because self.quiverkey.get_window_extent(renderer) = 0 # https://github.com/matplotlib/matplotlib/issues/18530 # self._adjust_fig_for_guide(self.quiverkey.text) def _get_largest_lims(self) -> dict[str, tuple[float, float]]: """ Get largest limits in the facetgrid. Returns ------- lims_largest : dict[str, tuple[float, float]] Dictionary with the largest limits along each axis. Examples -------- >>> ds = xr.tutorial.scatter_example_dataset(seed=42) >>> fg = ds.plot.scatter(x="A", y="B", hue="y", row="x", col="w") >>> round(fg._get_largest_lims()["x"][0], 3) np.float64(-0.334) """ lims_largest: dict[str, tuple[float, float]] = dict( x=(np.inf, -np.inf), y=(np.inf, -np.inf), z=(np.inf, -np.inf) ) for axis in ("x", "y", "z"): # Find the plot with the largest xlim values: lower, upper = lims_largest[axis] for ax in self.axs.flat: get_lim: Callable[[], tuple[float, float]] | None = getattr( ax, f"get_{axis}lim", None ) if get_lim: lower_new, upper_new = get_lim() lower, upper = (min(lower, lower_new), max(upper, upper_new)) lims_largest[axis] = (lower, upper) return lims_largest def _set_lims( self, x: tuple[float, float] | None = None, y: tuple[float, float] | None = None, z: tuple[float, float] | None = None, ) -> None: """ Set the same limits for all the subplots in the facetgrid. Parameters ---------- x : tuple[float, float] or None, optional x axis limits. y : tuple[float, float] or None, optional y axis limits. z : tuple[float, float] or None, optional z axis limits. Examples -------- >>> ds = xr.tutorial.scatter_example_dataset(seed=42) >>> fg = ds.plot.scatter(x="A", y="B", hue="y", row="x", col="w") >>> fg._set_lims(x=(-0.3, 0.3), y=(0, 2), z=(0, 4)) >>> fg.axs[0, 0].get_xlim(), fg.axs[0, 0].get_ylim() ((np.float64(-0.3), np.float64(0.3)), (np.float64(0.0), np.float64(2.0))) """ lims_largest = self._get_largest_lims() # Set limits: for ax in self.axs.flat: for (axis, data_limit), parameter_limit in zip( lims_largest.items(), (x, y, z), strict=True ): set_lim = getattr(ax, f"set_{axis}lim", None) if set_lim: set_lim(data_limit if parameter_limit is None else parameter_limit) def set_axis_labels(self, *axlabels: Hashable) -> None: """Set axis labels on the left column and bottom row of the grid.""" from xarray.core.dataarray import DataArray for var, axis in zip(axlabels, ["x", "y", "z"], strict=False): if var is not None: if isinstance(var, DataArray): getattr(self, f"set_{axis}labels")(label_from_attrs(var)) else: getattr(self, f"set_{axis}labels")(str(var)) def _set_labels( self, axis: str, axes: Iterable, label: str | None = None, **kwargs ) -> None: if label is None: label = label_from_attrs(self.data[getattr(self, f"_{axis}_var")]) for ax in axes: getattr(ax, f"set_{axis}label")(label, **kwargs) def set_xlabels(self, label: str | None = None, **kwargs: Any) -> None: """Label the x axis on the bottom row of the grid.""" self._set_labels("x", self._bottom_axes, label, **kwargs) def set_ylabels(self, label: str | None = None, **kwargs: Any) -> None: """Label the y axis on the left column of the grid.""" self._set_labels("y", self._left_axes, label, **kwargs) def set_zlabels(self, label: str | None = None, **kwargs: Any) -> None: """Label the z axis.""" self._set_labels("z", self._left_axes, label, **kwargs) def set_titles( self, template: str = "{coord} = {value}", maxchar: int = 30, size=None, **kwargs, ) -> None: """ Draw titles either above each facet or on the grid margins. Parameters ---------- template : str, default: "{coord} = {value}" Template for plot titles containing {coord} and {value} maxchar : int, default: 30 Truncate titles at maxchar **kwargs : keyword args additional arguments to matplotlib.text Returns ------- self: FacetGrid object """ import matplotlib as mpl if size is None: size = mpl.rcParams["axes.labelsize"] nicetitle = functools.partial(_nicetitle, maxchar=maxchar, template=template) if self._single_group: for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True): # Only label the ones with data if d is not None: coord, value = list(d.items()).pop() title = nicetitle(coord, value) ax.set_title(title, size=size, **kwargs) else: # The row titles on the right edge of the grid for index, (ax, row_name, handle) in enumerate( zip(self.axs[:, -1], self.row_names, self.row_labels, strict=True) ): title = nicetitle(coord=self._row_var, value=row_name) if not handle: self.row_labels[index] = ax.annotate( title, xy=(1.02, 0.5), xycoords="axes fraction", rotation=270, ha="left", va="center", **kwargs, ) else: handle.set_text(title) handle.update(kwargs) # The column titles on the top row for index, (ax, col_name, handle) in enumerate( zip(self.axs[0, :], self.col_names, self.col_labels, strict=True) ): title = nicetitle(coord=self._col_var, value=col_name) if not handle: self.col_labels[index] = ax.set_title(title, size=size, **kwargs) else: handle.set_text(title) handle.update(kwargs) def set_ticks( self, max_xticks: int = _NTICKS, max_yticks: int = _NTICKS, fontsize: str | int = _FONTSIZE, ) -> None: """ Set and control tick behavior. Parameters ---------- max_xticks, max_yticks : int, optional Maximum number of labeled ticks to plot on x, y axes fontsize : string or int Font size as used by matplotlib text Returns ------- self : FacetGrid object """ from matplotlib.ticker import MaxNLocator # Both are necessary x_major_locator = MaxNLocator(nbins=max_xticks) y_major_locator = MaxNLocator(nbins=max_yticks) for ax in self.axs.flat: ax.xaxis.set_major_locator(x_major_locator) ax.yaxis.set_major_locator(y_major_locator) for tick in itertools.chain( ax.xaxis.get_major_ticks(), ax.yaxis.get_major_ticks() ): tick.label1.set_fontsize(fontsize) def map( self: T_FacetGrid, func: Callable, *args: Hashable, **kwargs: Any ) -> T_FacetGrid: """ Apply a plotting function to each facet's subset of the data. Parameters ---------- func : callable A plotting function that takes data and keyword arguments. It must plot to the currently active matplotlib Axes and take a `color` keyword argument. If faceting on the `hue` dimension, it must also take a `label` keyword argument. *args : Hashable Column names in self.data that identify variables with data to plot. The data for each variable is passed to `func` in the order the variables are specified in the call. **kwargs : keyword arguments All keyword arguments are passed to the plotting function. Returns ------- self : FacetGrid object """ import matplotlib.pyplot as plt for ax, namedict in zip(self.axs.flat, self.name_dicts.flat, strict=True): if namedict is not None: data = self.data.loc[namedict] plt.sca(ax) innerargs = [data[a].to_numpy() for a in args] maybe_mappable = func(*innerargs, **kwargs) # TODO: better way to verify that an artist is mappable? # https://stackoverflow.com/questions/33023036/is-it-possible-to-detect-if-a-matplotlib-artist-is-a-mappable-suitable-for-use-w#33023522 if maybe_mappable and hasattr(maybe_mappable, "autoscale_None"): self._mappables.append(maybe_mappable) self._finalize_grid(*args[:2]) return self def _easy_facetgrid( data: T_DataArrayOrSet, plotfunc: Callable, kind: Literal["line", "dataarray", "dataset", "plot1d"], x: Hashable | None = None, y: Hashable | None = None, row: Hashable | None = None, col: Hashable | None = None, col_wrap: int | None = None, sharex: bool = True, sharey: bool = True, aspect: float | None = None, size: float | None = None, subplot_kws: dict[str, Any] | None = None, ax: Axes | None = None, figsize: Iterable[float] | None = None, **kwargs: Any, ) -> FacetGrid[T_DataArrayOrSet]: """ Convenience method to call xarray.plot.FacetGrid from 2d plotting methods kwargs are the arguments to 2d plotting method """ if ax is not None: raise ValueError("Can't use axes when making faceted plots.") if aspect is None: aspect = 1 if size is None: size = 3 elif figsize is not None: raise ValueError("cannot provide both `figsize` and `size` arguments") if kwargs.get("z") is not None: # 3d plots doesn't support sharex, sharey, reset to mpl defaults: sharex = False sharey = False g = FacetGrid( data=data, col=col, row=row, col_wrap=col_wrap, sharex=sharex, sharey=sharey, figsize=figsize, aspect=aspect, size=size, subplot_kws=subplot_kws, ) if kind == "line": return g.map_dataarray_line(plotfunc, x, y, **kwargs) if kind == "dataarray": return g.map_dataarray(plotfunc, x, y, **kwargs) if kind == "plot1d": return g.map_plot1d(plotfunc, x, y, **kwargs) if kind == "dataset": return g.map_dataset(plotfunc, x, y, **kwargs) raise ValueError( f"kind must be one of `line`, `dataarray`, `dataset` or `plot1d`, got {kind}" ) xarray-2025.09.0/xarray/plot/utils.py000066400000000000000000001664151505620616400173570ustar00rootroot00000000000000from __future__ import annotations import itertools import textwrap import warnings from collections.abc import ( Callable, Hashable, Iterable, Mapping, MutableMapping, Sequence, ) from datetime import date, datetime from inspect import getfullargspec from typing import TYPE_CHECKING, Any, Literal, cast, overload import numpy as np import pandas as pd from xarray.core.indexes import PandasMultiIndex from xarray.core.options import OPTIONS from xarray.core.utils import ( attempt_import, is_scalar, module_available, ) from xarray.namedarray.pycompat import DuckArrayModule nc_time_axis_available = module_available("nc_time_axis") try: import cftime except ImportError: cftime = None if TYPE_CHECKING: from matplotlib.axes import Axes from matplotlib.colors import Normalize from matplotlib.ticker import FuncFormatter from numpy.typing import ArrayLike from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import AspectOptions, ScaleOptions try: import matplotlib.pyplot as plt except ImportError: plt: Any = None # type: ignore[no-redef] ROBUST_PERCENTILE = 2.0 # copied from seaborn _MARKERSIZE_RANGE = (18.0, 36.0, 72.0) _LINEWIDTH_RANGE = (1.5, 1.5, 6.0) def _determine_extend(calc_data, vmin, vmax): extend_min = calc_data.min() < vmin extend_max = calc_data.max() > vmax if extend_min and extend_max: return "both" elif extend_min: return "min" elif extend_max: return "max" else: return "neither" def _build_discrete_cmap(cmap, levels, extend, filled): """ Build a discrete colormap and normalization of the data. """ import matplotlib as mpl if len(levels) == 1: levels = [levels[0], levels[0]] if not filled: # non-filled contour plots extend = "max" if extend == "both": ext_n = 2 elif extend in ["min", "max"]: ext_n = 1 else: ext_n = 0 n_colors = len(levels) + ext_n - 1 pal = _color_palette(cmap, n_colors) new_cmap, cnorm = mpl.colors.from_levels_and_colors(levels, pal, extend=extend) # copy the old cmap name, for easier testing new_cmap.name = getattr(cmap, "name", cmap) # copy colors to use for bad, under, and over values in case they have been # set to non-default values try: # matplotlib<3.2 only uses bad color for masked values bad = cmap(np.ma.masked_invalid([np.nan]))[0] except TypeError: # cmap was a str or list rather than a color-map object, so there are # no bad, under or over values to check or copy pass else: under = cmap(-np.inf) over = cmap(np.inf) new_cmap.set_bad(bad) # Only update under and over if they were explicitly changed by the user # (i.e. are different from the lowest or highest values in cmap). Otherwise # leave unchanged so new_cmap uses its default values (its own lowest and # highest values). if under != cmap(0): new_cmap.set_under(under) if over != cmap(cmap.N - 1): new_cmap.set_over(over) return new_cmap, cnorm def _color_palette(cmap, n_colors): import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap colors_i = np.linspace(0, 1.0, n_colors) if isinstance(cmap, list | tuple): # expand or truncate the list of colors to n_colors cmap = list(itertools.islice(itertools.cycle(cmap), n_colors)) cmap = ListedColormap(cmap) pal = cmap(colors_i) elif isinstance(cmap, str): # we have some sort of named palette try: # is this a matplotlib cmap? cmap = plt.get_cmap(cmap) pal = cmap(colors_i) except ValueError: # ValueError happens when mpl doesn't like a colormap, try seaborn try: from seaborn import color_palette pal = color_palette(cmap, n_colors=n_colors) except (ValueError, ImportError): # or maybe we just got a single color as a string cmap = ListedColormap([cmap] * n_colors) pal = cmap(colors_i) else: # cmap better be a LinearSegmentedColormap (e.g. viridis) pal = cmap(colors_i) return pal # _determine_cmap_params is adapted from Seaborn: # https://github.com/mwaskom/seaborn/blob/v0.6/seaborn/matrix.py#L158 # Used under the terms of Seaborn's license, see licenses/SEABORN_LICENSE. def _determine_cmap_params( plot_data, vmin=None, vmax=None, cmap=None, center=None, robust=False, extend=None, levels=None, filled=True, norm=None, _is_facetgrid=False, ): """ Use some heuristics to set good defaults for colorbar and range. Parameters ---------- plot_data : Numpy array Doesn't handle xarray objects Returns ------- cmap_params : dict Use depends on the type of the plotting function """ if TYPE_CHECKING: import matplotlib as mpl else: mpl = attempt_import("matplotlib") if isinstance(levels, Iterable): levels = sorted(levels) calc_data = np.ravel(plot_data[np.isfinite(plot_data)]) # Handle all-NaN input data gracefully if calc_data.size == 0: # Arbitrary default for when all values are NaN calc_data = np.array(0.0) # Setting center=False prevents a divergent cmap possibly_divergent = center is not False # Set center to 0 so math below makes sense but remember its state center_is_none = False if center is None: center = 0 center_is_none = True # Setting both vmin and vmax prevents a divergent cmap if (vmin is not None) and (vmax is not None): possibly_divergent = False # Setting vmin or vmax implies linspaced levels user_minmax = (vmin is not None) or (vmax is not None) # vlim might be computed below vlim = None # save state; needed later vmin_was_none = vmin is None vmax_was_none = vmax is None if vmin is None: if robust: vmin = np.percentile(calc_data, ROBUST_PERCENTILE) else: vmin = calc_data.min() elif possibly_divergent: vlim = abs(vmin - center) if vmax is None: if robust: vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE) else: vmax = calc_data.max() elif possibly_divergent: vlim = abs(vmax - center) if possibly_divergent: levels_are_divergent = ( isinstance(levels, Iterable) and levels[0] * levels[-1] < 0 ) # kwargs not specific about divergent or not: infer defaults from data divergent = (vmin < 0 < vmax) or not center_is_none or levels_are_divergent else: divergent = False # A divergent map should be symmetric around the center value if divergent: if vlim is None: vlim = max(abs(vmin - center), abs(vmax - center)) vmin, vmax = -vlim, vlim # Now add in the centering value and set the limits vmin += center vmax += center # now check norm and harmonize with vmin, vmax if norm is not None: if norm.vmin is None: norm.vmin = vmin else: if not vmin_was_none and vmin != norm.vmin: raise ValueError("Cannot supply vmin and a norm with a different vmin.") vmin = norm.vmin if norm.vmax is None: norm.vmax = vmax else: if not vmax_was_none and vmax != norm.vmax: raise ValueError("Cannot supply vmax and a norm with a different vmax.") vmax = norm.vmax # if BoundaryNorm, then set levels if isinstance(norm, mpl.colors.BoundaryNorm): levels = norm.boundaries # Choose default colormaps if not provided if cmap is None: if divergent: cmap = OPTIONS["cmap_divergent"] else: cmap = OPTIONS["cmap_sequential"] # Handle discrete levels if levels is not None: if is_scalar(levels): if user_minmax: levels = np.linspace(vmin, vmax, levels) elif levels == 1: levels = np.asarray([(vmin + vmax) / 2]) else: # N in MaxNLocator refers to bins, not ticks ticker = mpl.ticker.MaxNLocator(levels - 1) levels = ticker.tick_values(vmin, vmax) vmin, vmax = levels[0], levels[-1] # GH3734 if vmin == vmax: vmin, vmax = mpl.ticker.LinearLocator(2).tick_values(vmin, vmax) if extend is None: extend = _determine_extend(calc_data, vmin, vmax) if (levels is not None) and (not isinstance(norm, mpl.colors.BoundaryNorm)): cmap, newnorm = _build_discrete_cmap(cmap, levels, extend, filled) norm = newnorm if norm is None else norm # vmin & vmax needs to be None if norm is passed # TODO: always return a norm with vmin and vmax if norm is not None: vmin = None vmax = None return dict( vmin=vmin, vmax=vmax, cmap=cmap, extend=extend, levels=levels, norm=norm ) def _infer_xy_labels_3d( darray: DataArray | Dataset, x: Hashable | None, y: Hashable | None, rgb: Hashable | None, ) -> tuple[Hashable, Hashable]: """ Determine x and y labels for showing RGB images. Attempts to infer which dimension is RGB/RGBA by size and order of dims. """ assert rgb is None or rgb != x assert rgb is None or rgb != y # Start by detecting and reporting invalid combinations of arguments assert darray.ndim == 3 not_none = [a for a in (x, y, rgb) if a is not None] if len(set(not_none)) < len(not_none): raise ValueError( "Dimension names must be None or unique strings, but imshow was " f"passed x={x!r}, y={y!r}, and rgb={rgb!r}." ) for label in not_none: if label not in darray.dims: raise ValueError(f"{label!r} is not a dimension") # Then calculate rgb dimension if certain and check validity could_be_color = [ label for label in darray.dims if darray[label].size in (3, 4) and label not in (x, y) ] if rgb is None and not could_be_color: raise ValueError( "A 3-dimensional array was passed to imshow(), but there is no " "dimension that could be color. At least one dimension must be " "of size 3 (RGB) or 4 (RGBA), and not given as x or y." ) if rgb is None and len(could_be_color) == 1: rgb = could_be_color[0] if rgb is not None and darray[rgb].size not in (3, 4): raise ValueError( f"Cannot interpret dim {rgb!r} of size {darray[rgb].size} as RGB or RGBA." ) # If rgb dimension is still unknown, there must be two or three dimensions # in could_be_color. We therefore warn, and use a heuristic to break ties. if rgb is None: assert len(could_be_color) in (2, 3) rgb = could_be_color[-1] warnings.warn( "Several dimensions of this array could be colors. Xarray " f"will use the last possible dimension ({rgb!r}) to match " "matplotlib.pyplot.imshow. You can pass names of x, y, " "and/or rgb dimensions to override this guess.", stacklevel=2, ) assert rgb is not None # Finally, we pick out the red slice and delegate to the 2D version: return _infer_xy_labels(darray.isel({rgb: 0}), x, y) def _infer_xy_labels( darray: DataArray | Dataset, x: Hashable | None, y: Hashable | None, imshow: bool = False, rgb: Hashable | None = None, ) -> tuple[Hashable, Hashable]: """ Determine x and y labels. For use in _plot2d darray must be a 2 dimensional data array, or 3d for imshow only. """ if (x is not None) and (x == y): raise ValueError("x and y cannot be equal.") if imshow and darray.ndim == 3: return _infer_xy_labels_3d(darray, x, y, rgb) if x is None and y is None: if darray.ndim != 2: raise ValueError("DataArray must be 2d") y, x = darray.dims elif x is None: _assert_valid_xy(darray, y, "y") x = darray.dims[0] if y == darray.dims[1] else darray.dims[1] elif y is None: _assert_valid_xy(darray, x, "x") y = darray.dims[0] if x == darray.dims[1] else darray.dims[1] else: _assert_valid_xy(darray, x, "x") _assert_valid_xy(darray, y, "y") if darray._indexes.get(x, 1) is darray._indexes.get(y, 2) and isinstance( darray._indexes[x], PandasMultiIndex ): raise ValueError("x and y cannot be levels of the same MultiIndex") return x, y # TODO: Can by used to more than x or y, rename? def _assert_valid_xy( darray: DataArray | Dataset, xy: Hashable | None, name: str ) -> None: """ make sure x and y passed to plotting functions are valid """ # MultiIndex cannot be plotted; no point in allowing them here multiindex_dims = { idx.dim for idx in darray.xindexes.get_unique() if isinstance(idx, PandasMultiIndex) } valid_xy = (set(darray.dims) | set(darray.coords)) - multiindex_dims if (xy is not None) and (xy not in valid_xy): valid_xy_str = "', '".join(sorted(str(v) for v in valid_xy)) raise ValueError( f"{name} must be one of None, '{valid_xy_str}'. Received '{xy}' instead." ) def get_axis( figsize: Iterable[float] | None = None, size: float | None = None, aspect: AspectOptions = None, ax: Axes | None = None, **subplot_kws: Any, ) -> Axes: if TYPE_CHECKING: import matplotlib as mpl import matplotlib.pyplot as plt else: mpl = attempt_import("matplotlib") plt = attempt_import("matplotlib.pyplot") if figsize is not None: if ax is not None: raise ValueError("cannot provide both `figsize` and `ax` arguments") if size is not None: raise ValueError("cannot provide both `figsize` and `size` arguments") _, ax = plt.subplots(figsize=figsize, subplot_kw=subplot_kws) return ax if size is not None: if ax is not None: raise ValueError("cannot provide both `size` and `ax` arguments") if aspect is None or aspect == "auto": width, height = mpl.rcParams["figure.figsize"] faspect = width / height elif aspect == "equal": faspect = 1 else: faspect = aspect figsize = (size * faspect, size) _, ax = plt.subplots(figsize=figsize, subplot_kw=subplot_kws) return ax if aspect is not None: raise ValueError("cannot provide `aspect` argument without `size`") if subplot_kws and ax is not None: raise ValueError("cannot use subplot_kws with existing ax") if ax is None: ax = _maybe_gca(**subplot_kws) return ax def _maybe_gca(**subplot_kws: Any) -> Axes: import matplotlib.pyplot as plt # can call gcf unconditionally: either it exists or would be created by plt.axes f = plt.gcf() # only call gca if an active axes exists if f.axes: # can not pass kwargs to active axes return plt.gca() return plt.axes(**subplot_kws) def _get_units_from_attrs(da: DataArray) -> str: """Extracts and formats the unit/units from a attributes.""" pint_array_type = DuckArrayModule("pint").type units = " [{}]" if isinstance(da.data, pint_array_type): return units.format(str(da.data.units)) if "units" in da.attrs: return units.format(da.attrs["units"]) if "unit" in da.attrs: return units.format(da.attrs["unit"]) return "" def label_from_attrs(da: DataArray | None, extra: str = "") -> str: """Makes informative labels if variable metadata (attrs) follows CF conventions.""" if da is None: return "" name: str = "{}" if "long_name" in da.attrs: name = name.format(da.attrs["long_name"]) elif "standard_name" in da.attrs: name = name.format(da.attrs["standard_name"]) elif da.name is not None: name = name.format(da.name) else: name = "" units = _get_units_from_attrs(da) # Treat `name` differently if it's a latex sequence if name.startswith("$") and (name.count("$") % 2 == 0): return "$\n$".join( textwrap.wrap(name + extra + units, 60, break_long_words=False) ) else: return "\n".join(textwrap.wrap(name + extra + units, 30)) def _interval_to_mid_points(array: Iterable[pd.Interval]) -> np.ndarray: """ Helper function which returns an array with the Intervals' mid points. """ return np.array([x.mid for x in array]) def _interval_to_bound_points(array: Sequence[pd.Interval]) -> np.ndarray: """ Helper function which returns an array with the Intervals' boundaries. """ array_boundaries = np.array([x.left for x in array]) array_boundaries = np.concatenate((array_boundaries, np.array([array[-1].right]))) return array_boundaries def _interval_to_double_bound_points( xarray: Iterable[pd.Interval], yarray: Iterable ) -> tuple[np.ndarray, np.ndarray]: """ Helper function to deal with a xarray consisting of pd.Intervals. Each interval is replaced with both boundaries. I.e. the length of xarray doubles. yarray is modified so it matches the new shape of xarray. """ xarray1 = np.array([x.left for x in xarray]) xarray2 = np.array([x.right for x in xarray]) xarray_out = np.array( list(itertools.chain.from_iterable(zip(xarray1, xarray2, strict=True))) ) yarray_out = np.array( list(itertools.chain.from_iterable(zip(yarray, yarray, strict=True))) ) return xarray_out, yarray_out def _resolve_intervals_1dplot( xval: np.ndarray, yval: np.ndarray, kwargs: dict ) -> tuple[np.ndarray, np.ndarray, str, str, dict]: """ Helper function to replace the values of x and/or y coordinate arrays containing pd.Interval with their mid-points or - for step plots - double points which double the length. """ x_suffix = "" y_suffix = "" # Is it a step plot? (see matplotlib.Axes.step) if kwargs.get("drawstyle", "").startswith("steps-"): remove_drawstyle = False # Convert intervals to double points x_is_interval = _valid_other_type(xval, pd.Interval) y_is_interval = _valid_other_type(yval, pd.Interval) if x_is_interval and y_is_interval: raise TypeError("Can't step plot intervals against intervals.") elif x_is_interval: xval, yval = _interval_to_double_bound_points(xval, yval) remove_drawstyle = True elif y_is_interval: yval, xval = _interval_to_double_bound_points(yval, xval) remove_drawstyle = True # Remove steps-* to be sure that matplotlib is not confused if remove_drawstyle: del kwargs["drawstyle"] # Is it another kind of plot? else: # Convert intervals to mid points and adjust labels if _valid_other_type(xval, pd.Interval): xval = _interval_to_mid_points(xval) x_suffix = "_center" if _valid_other_type(yval, pd.Interval): yval = _interval_to_mid_points(yval) y_suffix = "_center" # return converted arguments return xval, yval, x_suffix, y_suffix, kwargs def _resolve_intervals_2dplot(val, func_name): """ Helper function to replace the values of a coordinate array containing pd.Interval with their mid-points or - for pcolormesh - boundaries which increases length by 1. """ label_extra = "" if _valid_other_type(val, pd.Interval): if func_name == "pcolormesh": val = _interval_to_bound_points(val) else: val = _interval_to_mid_points(val) label_extra = "_center" return val, label_extra def _valid_other_type( x: ArrayLike, types: type[object] | tuple[type[object], ...] ) -> bool: """ Do all elements of x have a type from types? """ return all(isinstance(el, types) for el in np.ravel(x)) def _valid_numpy_subdtype(x, numpy_types): """ Is any dtype from numpy_types superior to the dtype of x? """ # If any of the types given in numpy_types is understood as numpy.generic, # all possible x will be considered valid. This is probably unwanted. for t in numpy_types: assert not np.issubdtype(np.generic, t) return any(np.issubdtype(x.dtype, t) for t in numpy_types) def _ensure_plottable(*args) -> None: """ Raise exception if there is anything in args that can't be plotted on an axis by matplotlib. """ numpy_types: tuple[type[object], ...] = ( np.floating, np.integer, np.timedelta64, np.datetime64, np.bool_, np.str_, ) other_types: tuple[type[object], ...] = (datetime, date) cftime_datetime_types: tuple[type[object], ...] = ( () if cftime is None else (cftime.datetime,) ) other_types += cftime_datetime_types for x in args: if not ( _valid_numpy_subdtype(np.asarray(x), numpy_types) or _valid_other_type(np.asarray(x), other_types) ): raise TypeError( "Plotting requires coordinates to be numeric, boolean, " "or dates of type numpy.datetime64, " "datetime.datetime, cftime.datetime or " f"pandas.Interval. Received data of type {np.asarray(x).dtype} instead." ) if _valid_other_type(np.asarray(x), cftime_datetime_types): if nc_time_axis_available: # Register cftime datetypes to matplotlib.units.registry, # otherwise matplotlib will raise an error: import nc_time_axis # noqa: F401 else: raise ImportError( "Plotting of arrays of cftime.datetime " "objects or arrays indexed by " "cftime.datetime objects requires the " "optional `nc-time-axis` (v1.2.0 or later) " "package." ) def _is_numeric(arr): numpy_types = [np.floating, np.integer] return _valid_numpy_subdtype(arr, numpy_types) def _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params): cbar_kwargs.setdefault("extend", cmap_params["extend"]) if cbar_ax is None: cbar_kwargs.setdefault("ax", ax) else: cbar_kwargs.setdefault("cax", cbar_ax) # dont pass extend as kwarg if it is in the mappable if hasattr(primitive, "extend"): cbar_kwargs.pop("extend") fig = ax.get_figure() cbar = fig.colorbar(primitive, **cbar_kwargs) return cbar def _rescale_imshow_rgb(darray, vmin, vmax, robust): assert robust or vmin is not None or vmax is not None # Calculate vmin and vmax automatically for `robust=True` if robust: if vmax is None: vmax = np.nanpercentile(darray, 100 - ROBUST_PERCENTILE) if vmin is None: vmin = np.nanpercentile(darray, ROBUST_PERCENTILE) # If not robust and one bound is None, calculate the default other bound # and check that an interval between them exists. elif vmax is None: vmax = 255 if np.issubdtype(darray.dtype, np.integer) else 1 if vmax < vmin: raise ValueError( f"vmin={vmin!r} is less than the default vmax ({vmax!r}) - you must supply " "a vmax > vmin in this case." ) elif vmin is None: vmin = 0 if vmin > vmax: raise ValueError( f"vmax={vmax!r} is less than the default vmin (0) - you must supply " "a vmin < vmax in this case." ) # Scale interval [vmin .. vmax] to [0 .. 1], with darray as 64-bit float # to avoid precision loss, integer over/underflow, etc with extreme inputs. # After scaling, downcast to 32-bit float. This substantially reduces # memory usage after we hand `darray` off to matplotlib. darray = ((darray.astype("f8") - vmin) / (vmax - vmin)).astype("f4") return np.minimum(np.maximum(darray, 0), 1) def _update_axes( ax: Axes, xincrease: bool | None, yincrease: bool | None, xscale: ScaleOptions = None, yscale: ScaleOptions = None, xticks: ArrayLike | None = None, yticks: ArrayLike | None = None, xlim: tuple[float, float] | None = None, ylim: tuple[float, float] | None = None, ) -> None: """ Update axes with provided parameters """ if xincrease is None: pass elif (xincrease and ax.xaxis_inverted()) or ( not xincrease and not ax.xaxis_inverted() ): ax.invert_xaxis() if yincrease is None: pass elif (yincrease and ax.yaxis_inverted()) or ( not yincrease and not ax.yaxis_inverted() ): ax.invert_yaxis() # The default xscale, yscale needs to be None. # If we set a scale it resets the axes formatters, # This means that set_xscale('linear') on a datetime axis # will remove the date labels. So only set the scale when explicitly # asked to. https://github.com/matplotlib/matplotlib/issues/8740 if xscale is not None: ax.set_xscale(xscale) if yscale is not None: ax.set_yscale(yscale) if xticks is not None: ax.set_xticks(xticks) if yticks is not None: ax.set_yticks(yticks) if xlim is not None: ax.set_xlim(xlim) if ylim is not None: ax.set_ylim(ylim) def _is_monotonic(coord, axis=0): """ >>> _is_monotonic(np.array([0, 1, 2])) np.True_ >>> _is_monotonic(np.array([2, 1, 0])) np.True_ >>> _is_monotonic(np.array([0, 2, 1])) np.False_ """ if coord.shape[axis] < 3: return True else: n = coord.shape[axis] delta_pos = coord.take(np.arange(1, n), axis=axis) >= coord.take( np.arange(0, n - 1), axis=axis ) delta_neg = coord.take(np.arange(1, n), axis=axis) <= coord.take( np.arange(0, n - 1), axis=axis ) return np.all(delta_pos) or np.all(delta_neg) def _infer_interval_breaks(coord, axis=0, scale=None, check_monotonic=False): """ >>> _infer_interval_breaks(np.arange(5)) array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5]) >>> _infer_interval_breaks([[0, 1], [3, 4]], axis=1) array([[-0.5, 0.5, 1.5], [ 2.5, 3.5, 4.5]]) >>> _infer_interval_breaks(np.logspace(-2, 2, 5), scale="log") array([3.16227766e-03, 3.16227766e-02, 3.16227766e-01, 3.16227766e+00, 3.16227766e+01, 3.16227766e+02]) """ coord = np.asarray(coord) if check_monotonic and not _is_monotonic(coord, axis=axis): raise ValueError( "The input coordinate is not sorted in increasing " f"order along axis {axis}. This can lead to unexpected " "results. Consider calling the `sortby` method on " "the input DataArray. To plot data with categorical " "axes, consider using the `heatmap` function from " "the `seaborn` statistical plotting library." ) # If logscale, compute the intervals in the logarithmic space if scale == "log": if (coord <= 0).any(): raise ValueError( "Found negative or zero value in coordinates. " "Coordinates must be positive on logscale plots." ) coord = np.log10(coord) deltas = 0.5 * np.diff(coord, axis=axis) if deltas.size == 0: deltas = np.array(0.0) first = np.take(coord, [0], axis=axis) - np.take(deltas, [0], axis=axis) last = np.take(coord, [-1], axis=axis) + np.take(deltas, [-1], axis=axis) trim_last = tuple( slice(None, -1) if n == axis else slice(None) for n in range(coord.ndim) ) interval_breaks = np.concatenate( [first, coord[trim_last] + deltas, last], axis=axis ) if scale == "log": # Recovert the intervals into the linear space return np.power(10, interval_breaks) return interval_breaks def _process_cmap_cbar_kwargs( func, data, cmap=None, colors=None, cbar_kwargs: Iterable[tuple[str, Any]] | Mapping[str, Any] | None = None, levels=None, _is_facetgrid=False, **kwargs, ) -> tuple[dict[str, Any], dict[str, Any]]: """ Parameters ---------- func : plotting function data : ndarray, Data values Returns ------- cmap_params : dict cbar_kwargs : dict """ if func.__name__ == "surface": # Leave user to specify cmap settings for surface plots kwargs["cmap"] = cmap return { k: kwargs.get(k) for k in ["vmin", "vmax", "cmap", "extend", "levels", "norm"] }, {} cbar_kwargs = {} if cbar_kwargs is None else dict(cbar_kwargs) # colors is mutually exclusive with cmap if cmap and colors: raise ValueError("Can't specify both cmap and colors.") # colors is only valid when levels is supplied or the plot is of type # contour or contourf if colors and (("contour" not in func.__name__) and (levels is None)): raise ValueError("Can only specify colors with contour or levels") # we should not be getting a list of colors in cmap anymore # is there a better way to do this test? if isinstance(cmap, list | tuple): raise ValueError( "Specifying a list of colors in cmap is deprecated. " "Use colors keyword instead." ) cmap_kwargs = { "plot_data": data, "levels": levels, "cmap": colors or cmap, "filled": func.__name__ != "contour", } cmap_args = getfullargspec(_determine_cmap_params).args cmap_kwargs.update((a, kwargs[a]) for a in cmap_args if a in kwargs) if not _is_facetgrid: cmap_params = _determine_cmap_params(**cmap_kwargs) else: cmap_params = { k: cmap_kwargs[k] for k in ["vmin", "vmax", "cmap", "extend", "levels", "norm"] } return cmap_params, cbar_kwargs def _get_nice_quiver_magnitude(u, v): import matplotlib as mpl ticker = mpl.ticker.MaxNLocator(3) mean = np.mean(np.hypot(u.to_numpy(), v.to_numpy())) magnitude = ticker.tick_values(0, mean)[-2] return magnitude # Copied from matplotlib, tweaked so func can return strings. # https://github.com/matplotlib/matplotlib/issues/19555 def legend_elements( self, prop="colors", num="auto", fmt=None, func=lambda x: x, **kwargs ): """ Create legend handles and labels for a PathCollection. Each legend handle is a `.Line2D` representing the Path that was drawn, and each label is a string what each Path represents. This is useful for obtaining a legend for a `~.Axes.scatter` plot; e.g.:: scatter = plt.scatter([1, 2, 3], [4, 5, 6], c=[7, 2, 3]) plt.legend(*scatter.legend_elements()) creates three legend elements, one for each color with the numerical values passed to *c* as the labels. Also see the :ref:`automatedlegendcreation` example. Parameters ---------- prop : {"colors", "sizes"}, default: "colors" If "colors", the legend handles will show the different colors of the collection. If "sizes", the legend will show the different sizes. To set both, use *kwargs* to directly edit the `.Line2D` properties. num : int, None, "auto" (default), array-like, or `~.ticker.Locator` Target number of elements to create. If None, use all unique elements of the mappable array. If an integer, target to use *num* elements in the normed range. If *"auto"*, try to determine which option better suits the nature of the data. The number of created elements may slightly deviate from *num* due to a `~.ticker.Locator` being used to find useful locations. If a list or array, use exactly those elements for the legend. Finally, a `~.ticker.Locator` can be provided. fmt : str, `~matplotlib.ticker.Formatter`, or None (default) The format or formatter to use for the labels. If a string must be a valid input for a `~.StrMethodFormatter`. If None (the default), use a `~.ScalarFormatter`. func : function, default: ``lambda x: x`` Function to calculate the labels. Often the size (or color) argument to `~.Axes.scatter` will have been pre-processed by the user using a function ``s = f(x)`` to make the markers visible; e.g. ``size = np.log10(x)``. Providing the inverse of this function here allows that pre-processing to be inverted, so that the legend labels have the correct values; e.g. ``func = lambda x: 10**x``. **kwargs Allowed keyword arguments are *color* and *size*. E.g. it may be useful to set the color of the markers if *prop="sizes"* is used; similarly to set the size of the markers if *prop="colors"* is used. Any further parameters are passed onto the `.Line2D` instance. This may be useful to e.g. specify a different *markeredgecolor* or *alpha* for the legend handles. Returns ------- handles : list of `.Line2D` Visual representation of each element of the legend. labels : list of str The string labels for elements of the legend. """ import matplotlib as mpl mlines = mpl.lines handles = [] labels = [] if prop == "colors": arr = self.get_array() if arr is None: warnings.warn( "Collection without array used. Make sure to " "specify the values to be colormapped via the " "`c` argument.", stacklevel=2, ) return handles, labels _size = kwargs.pop("size", mpl.rcParams["lines.markersize"]) def _get_color_and_size(value): return self.cmap(self.norm(value)), _size elif prop == "sizes": if isinstance(self, mpl.collections.LineCollection): arr = self.get_linewidths() else: arr = self.get_sizes() _color = kwargs.pop("color", "k") def _get_color_and_size(value): return _color, np.sqrt(value) else: raise ValueError( "Valid values for `prop` are 'colors' or " f"'sizes'. You supplied '{prop}' instead." ) # Get the unique values and their labels: values = np.unique(arr) label_values = np.asarray(func(values)) label_values_are_numeric = np.issubdtype(label_values.dtype, np.number) # Handle the label format: if fmt is None and label_values_are_numeric: fmt = mpl.ticker.ScalarFormatter(useOffset=False, useMathText=True) elif fmt is None and not label_values_are_numeric: fmt = mpl.ticker.StrMethodFormatter("{x}") elif isinstance(fmt, str): fmt = mpl.ticker.StrMethodFormatter(fmt) fmt.create_dummy_axis() if num == "auto": num = 9 if len(values) <= num: num = None if label_values_are_numeric: label_values_min = label_values.min() label_values_max = label_values.max() fmt.axis.set_view_interval(label_values_min, label_values_max) fmt.axis.set_data_interval(label_values_min, label_values_max) if num is not None: # Labels are numerical but larger than the target # number of elements, reduce to target using matplotlibs # ticker classes: if isinstance(num, mpl.ticker.Locator): loc = num elif np.iterable(num): loc = mpl.ticker.FixedLocator(num) else: num = int(num) loc = mpl.ticker.MaxNLocator( nbins=num, min_n_ticks=num - 1, steps=[1, 2, 2.5, 3, 5, 6, 8, 10] ) # Get nicely spaced label_values: label_values = loc.tick_values(label_values_min, label_values_max) # Remove extrapolated label_values: cond = (label_values >= label_values_min) & ( label_values <= label_values_max ) label_values = label_values[cond] # Get the corresponding values by creating a linear interpolant # with small step size: values_interp = np.linspace(values.min(), values.max(), 256) label_values_interp = func(values_interp) ix = np.argsort(label_values_interp) values = np.interp(label_values, label_values_interp[ix], values_interp[ix]) elif num is not None and not label_values_are_numeric: # Labels are not numerical so modifying label_values is not # possible, instead filter the array with nicely distributed # indexes: if type(num) is int: loc = mpl.ticker.LinearLocator(num) else: raise ValueError("`num` only supports integers for non-numeric labels.") ind = loc.tick_values(0, len(label_values) - 1).astype(int) label_values = label_values[ind] values = values[ind] # Some formatters requires set_locs: if hasattr(fmt, "set_locs"): fmt.set_locs(label_values) # Default settings for handles, add or override with kwargs: kw = dict(markeredgewidth=self.get_linewidths()[0], alpha=self.get_alpha()) kw.update(kwargs) for val, lab in zip(values, label_values, strict=True): color, size = _get_color_and_size(val) if isinstance(self, mpl.collections.PathCollection): kw.update(linestyle="", marker=self.get_paths()[0], markersize=size) elif isinstance(self, mpl.collections.LineCollection): kw.update(linestyle=self.get_linestyle()[0], linewidth=size) h = mlines.Line2D([0], [0], color=color, **kw) handles.append(h) labels.append(fmt(lab)) return handles, labels def _legend_add_subtitle(handles, labels, text): """Add a subtitle to legend handles.""" import matplotlib.pyplot as plt if text and len(handles) > 1: # Create a blank handle that's not visible, the # invisibility will be used to discern which are subtitles # or not: blank_handle = plt.Line2D([], [], label=text) blank_handle.set_visible(False) # Subtitles are shown first: handles = [blank_handle] + handles labels = [text] + labels return handles, labels def _adjust_legend_subtitles(legend): """Make invisible-handle "subtitles" entries look more like titles.""" import matplotlib.pyplot as plt # Legend title not in rcParams until 3.0 font_size = plt.rcParams.get("legend.title_fontsize", None) hpackers = legend.findobj(plt.matplotlib.offsetbox.VPacker)[0].get_children() hpackers = [v for v in hpackers if isinstance(v, plt.matplotlib.offsetbox.HPacker)] for hpack in hpackers: areas = hpack.get_children() if len(areas) < 2: continue draw_area, text_area = areas handles = draw_area.get_children() # Assume that all artists that are not visible are # subtitles: if not all(artist.get_visible() for artist in handles): # Remove the dummy marker which will bring the text # more to the center: draw_area.set_width(0) for text in text_area.get_children(): if font_size is not None: # The sutbtitles should have the same font size # as normal legend titles: text.set_size(font_size) def _infer_meta_data(ds, x, y, hue, hue_style, add_guide, funcname): dvars = set(ds.variables.keys()) error_msg = f" must be one of ({', '.join(sorted(str(v) for v in dvars))})" if x not in dvars: raise ValueError(f"Expected 'x' {error_msg}. Received {x} instead.") if y not in dvars: raise ValueError(f"Expected 'y' {error_msg}. Received {y} instead.") if hue is not None and hue not in dvars: raise ValueError(f"Expected 'hue' {error_msg}. Received {hue} instead.") if hue: hue_is_numeric = _is_numeric(ds[hue].values) if hue_style is None: hue_style = "continuous" if hue_is_numeric else "discrete" if not hue_is_numeric and (hue_style == "continuous"): raise ValueError( f"Cannot create a colorbar for a non numeric coordinate: {hue}" ) if add_guide is None or add_guide is True: add_colorbar = hue_style == "continuous" add_legend = hue_style == "discrete" else: add_colorbar = False add_legend = False else: if add_guide is True and funcname not in ("quiver", "streamplot"): raise ValueError("Cannot set add_guide when hue is None.") add_legend = False add_colorbar = False if (add_guide or add_guide is None) and funcname == "quiver": add_quiverkey = True if hue: add_colorbar = True if not hue_style: hue_style = "continuous" elif hue_style != "continuous": raise ValueError( "hue_style must be 'continuous' or None for .plot.quiver or " ".plot.streamplot" ) else: add_quiverkey = False if (add_guide or add_guide is None) and funcname == "streamplot" and hue: add_colorbar = True if not hue_style: hue_style = "continuous" elif hue_style != "continuous": raise ValueError( "hue_style must be 'continuous' or None for .plot.quiver or " ".plot.streamplot" ) if hue_style is not None and hue_style not in ["discrete", "continuous"]: raise ValueError("hue_style must be either None, 'discrete' or 'continuous'.") if hue: hue_label = label_from_attrs(ds[hue]) hue = ds[hue] else: hue_label = None hue = None return { "add_colorbar": add_colorbar, "add_legend": add_legend, "add_quiverkey": add_quiverkey, "hue_label": hue_label, "hue_style": hue_style, "xlabel": label_from_attrs(ds[x]), "ylabel": label_from_attrs(ds[y]), "hue": hue, } @overload def _parse_size( data: None, norm: tuple[float | None, float | None, bool] | Normalize | None, ) -> None: ... @overload def _parse_size( data: DataArray, norm: tuple[float | None, float | None, bool] | Normalize | None, ) -> pd.Series: ... # copied from seaborn def _parse_size( data: DataArray | None, norm: tuple[float | None, float | None, bool] | Normalize | None, ) -> pd.Series | None: import matplotlib as mpl if data is None: return None flatdata = data.values.flatten() if not _is_numeric(flatdata): levels = np.unique(flatdata) numbers = np.arange(1, 1 + len(levels))[::-1] else: levels = numbers = np.sort(np.unique(flatdata)) min_width, default_width, max_width = _MARKERSIZE_RANGE # width_range = min_width, max_width if norm is None: norm = mpl.colors.Normalize() elif isinstance(norm, tuple): norm = mpl.colors.Normalize(*norm) elif not isinstance(norm, mpl.colors.Normalize): err = "``size_norm`` must be None, tuple, or Normalize object." raise ValueError(err) assert isinstance(norm, mpl.colors.Normalize) norm.clip = True if not norm.scaled(): norm(np.asarray(numbers)) # limits = norm.vmin, norm.vmax scl = norm(numbers) widths = np.asarray(min_width + scl * (max_width - min_width)) if scl.mask.any(): widths[scl.mask] = 0 sizes = dict(zip(levels, widths, strict=True)) return pd.Series(sizes) class _Normalize(Sequence): """ Normalize numerical or categorical values to numerical values. The class includes helper methods that simplifies transforming to and from normalized values. Parameters ---------- data : DataArray DataArray to normalize. width : Sequence of three numbers, optional Normalize the data to these (min, default, max) values. The default is None. """ _data: DataArray | None _data_unique: np.ndarray _data_unique_index: np.ndarray _data_unique_inverse: np.ndarray _data_is_numeric: bool _width: tuple[float, float, float] | None __slots__ = ( "_data", "_data_is_numeric", "_data_unique", "_data_unique_index", "_data_unique_inverse", "_width", ) def __init__( self, data: DataArray | None, width: tuple[float, float, float] | None = None, _is_facetgrid: bool = False, ) -> None: self._data = data self._width = width if not _is_facetgrid else None pint_array_type = DuckArrayModule("pint").type to_unique = ( data.to_numpy() # type: ignore[union-attr] if isinstance(data if data is None else data.data, pint_array_type) else data ) data_unique, data_unique_inverse = np.unique(to_unique, return_inverse=True) # type: ignore[call-overload] self._data_unique = data_unique self._data_unique_index = np.arange(0, data_unique.size) self._data_unique_inverse = data_unique_inverse self._data_is_numeric = False if data is None else _is_numeric(data) def __repr__(self) -> str: with np.printoptions(precision=4, suppress=True, threshold=5): return ( f"<_Normalize(data, width={self._width})>\n" f"{self._data_unique} -> {self._values_unique}" ) def __len__(self) -> int: return len(self._data_unique) def __getitem__(self, key): return self._data_unique[key] @property def data(self) -> DataArray | None: return self._data @property def data_is_numeric(self) -> bool: """ Check if data is numeric. Examples -------- >>> a = xr.DataArray(["b", "a", "a", "b", "c"]) >>> _Normalize(a).data_is_numeric False >>> a = xr.DataArray([0.5, 0, 0, 0.5, 2, 3]) >>> _Normalize(a).data_is_numeric True >>> # TODO: Datetime should be numeric right? >>> a = xr.DataArray(pd.date_range("2000-1-1", periods=4)) >>> _Normalize(a).data_is_numeric False # TODO: Timedelta should be numeric right? >>> a = xr.DataArray(pd.timedelta_range("-1D", periods=4, freq="D")) >>> _Normalize(a).data_is_numeric True """ return self._data_is_numeric @overload def _calc_widths(self, y: np.ndarray) -> np.ndarray: ... @overload def _calc_widths(self, y: DataArray) -> DataArray: ... def _calc_widths(self, y: np.ndarray | DataArray) -> np.ndarray | DataArray: """ Normalize the values so they're in between self._width. """ if self._width is None: return y xmin, xdefault, xmax = self._width diff_maxy_miny = np.max(y) - np.min(y) if diff_maxy_miny == 0: # Use default with if y is constant: widths = xdefault + 0 * y else: # Normalize in between xmin and xmax: k = (y - np.min(y)) / diff_maxy_miny widths = xmin + k * (xmax - xmin) return widths @overload def _indexes_centered(self, x: np.ndarray) -> np.ndarray: ... @overload def _indexes_centered(self, x: DataArray) -> DataArray: ... def _indexes_centered(self, x: np.ndarray | DataArray) -> np.ndarray | DataArray: """ Offset indexes to make sure being in the center of self.levels. ["a", "b", "c"] -> [1, 3, 5] """ return x * 2 + 1 @property def values(self) -> DataArray | None: """ Return a normalized number array for the unique levels. Examples -------- >>> a = xr.DataArray(["b", "a", "a", "b", "c"]) >>> _Normalize(a).values Size: 40B array([3, 1, 1, 3, 5]) Dimensions without coordinates: dim_0 >>> _Normalize(a, width=(18, 36, 72)).values Size: 40B array([45., 18., 18., 45., 72.]) Dimensions without coordinates: dim_0 >>> a = xr.DataArray([0.5, 0, 0, 0.5, 2, 3]) >>> _Normalize(a).values Size: 48B array([0.5, 0. , 0. , 0.5, 2. , 3. ]) Dimensions without coordinates: dim_0 >>> _Normalize(a, width=(18, 36, 72)).values Size: 48B array([27., 18., 18., 27., 54., 72.]) Dimensions without coordinates: dim_0 >>> _Normalize(a * 0, width=(18, 36, 72)).values Size: 48B array([36., 36., 36., 36., 36., 36.]) Dimensions without coordinates: dim_0 """ if self.data is None: return None val: DataArray if self.data_is_numeric: val = self.data else: arr = self._indexes_centered(self._data_unique_inverse) val = self.data.copy(data=arr.reshape(self.data.shape)) return self._calc_widths(val) @property def _values_unique(self) -> np.ndarray | None: """ Return unique values. Examples -------- >>> a = xr.DataArray(["b", "a", "a", "b", "c"]) >>> _Normalize(a)._values_unique array([1, 3, 5]) >>> _Normalize(a, width=(18, 36, 72))._values_unique array([18., 45., 72.]) >>> a = xr.DataArray([0.5, 0, 0, 0.5, 2, 3]) >>> _Normalize(a)._values_unique array([0. , 0.5, 2. , 3. ]) >>> _Normalize(a, width=(18, 36, 72))._values_unique array([18., 27., 54., 72.]) """ if self.data is None: return None val: np.ndarray if self.data_is_numeric: val = self._data_unique else: val = self._indexes_centered(self._data_unique_index) return self._calc_widths(val) @property def ticks(self) -> np.ndarray | None: """ Return ticks for plt.colorbar if the data is not numeric. Examples -------- >>> a = xr.DataArray(["b", "a", "a", "b", "c"]) >>> _Normalize(a).ticks array([1, 3, 5]) """ val: np.ndarray | None if self.data_is_numeric: val = None else: val = self._indexes_centered(self._data_unique_index) return val @property def levels(self) -> np.ndarray: """ Return discrete levels that will evenly bound self.values. ["a", "b", "c"] -> [0, 2, 4, 6] Examples -------- >>> a = xr.DataArray(["b", "a", "a", "b", "c"]) >>> _Normalize(a).levels array([0, 2, 4, 6]) """ return ( np.append(self._data_unique_index, np.max(self._data_unique_index) + 1) * 2 ) @property def _lookup(self) -> pd.Series: if self._values_unique is None: raise ValueError("self.data can't be None.") return pd.Series(dict(zip(self._values_unique, self._data_unique, strict=True))) def _lookup_arr(self, x) -> np.ndarray: # Use reindex to be less sensitive to float errors. reindex only # works with sorted index. # Return as numpy array since legend_elements # seems to require that: return self._lookup.sort_index().reindex(x, method="nearest").to_numpy() @property def format(self) -> FuncFormatter: """ Return a FuncFormatter that maps self.values elements back to the original value as a string. Useful with plt.colorbar. Examples -------- >>> a = xr.DataArray([0.5, 0, 0, 0.5, 2, 3]) >>> aa = _Normalize(a, width=(0, 0.5, 1)) >>> aa._lookup 0.000000 0.0 0.166667 0.5 0.666667 2.0 1.000000 3.0 dtype: float64 >>> aa.format(1) '3.0' """ import matplotlib.pyplot as plt def _func(x: Any, pos: Any | None = None): return f"{self._lookup_arr([x])[0]}" return plt.FuncFormatter(_func) @property def func(self) -> Callable[[Any, Any | None], Any]: """ Return a lambda function that maps self.values elements back to the original value as a numpy array. Useful with ax.legend_elements. Examples -------- >>> a = xr.DataArray([0.5, 0, 0, 0.5, 2, 3]) >>> aa = _Normalize(a, width=(0, 0.5, 1)) >>> aa._lookup 0.000000 0.0 0.166667 0.5 0.666667 2.0 1.000000 3.0 dtype: float64 >>> aa.func([0.16, 1]) array([0.5, 3. ]) """ def _func(x: Any, pos: Any | None = None): return self._lookup_arr(x) return _func def _determine_guide( hueplt_norm: _Normalize, sizeplt_norm: _Normalize, add_colorbar: bool | None = None, add_legend: bool | None = None, plotfunc_name: str | None = None, ) -> tuple[bool, bool]: if plotfunc_name == "hist": return False, False if (add_colorbar) and hueplt_norm.data is None: raise KeyError("Cannot create a colorbar when hue is None.") if add_colorbar is None: if hueplt_norm.data is not None: add_colorbar = True else: add_colorbar = False if add_legend and hueplt_norm.data is None and sizeplt_norm.data is None: raise KeyError("Cannot create a legend when hue and markersize is None.") if add_legend is None: if ( not add_colorbar and (hueplt_norm.data is not None and hueplt_norm.data_is_numeric is False) ) or sizeplt_norm.data is not None: add_legend = True else: add_legend = False return add_colorbar, add_legend def _add_legend( hueplt_norm: _Normalize, sizeplt_norm: _Normalize, primitive, legend_ax, plotfunc: str, ): primitive = primitive if isinstance(primitive, list) else [primitive] handles, labels = [], [] for huesizeplt, prop in [ (hueplt_norm, "colors"), (sizeplt_norm, "sizes"), ]: if huesizeplt.data is not None: # Get legend handles and labels that displays the # values correctly. Order might be different because # legend_elements uses np.unique instead of pd.unique, # FacetGrid.add_legend might have troubles with this: hdl, lbl = [], [] for p in primitive: hdl_, lbl_ = legend_elements(p, prop, num="auto", func=huesizeplt.func) hdl += hdl_ lbl += lbl_ # Only save unique values: u, ind = np.unique(lbl, return_index=True) ind = np.argsort(ind) lbl = cast(list, u[ind].tolist()) hdl = cast(list, np.array(hdl)[ind].tolist()) # Add a subtitle: hdl, lbl = _legend_add_subtitle(hdl, lbl, label_from_attrs(huesizeplt.data)) handles += hdl labels += lbl legend = legend_ax.legend(handles, labels, framealpha=0.5) _adjust_legend_subtitles(legend) return legend def _guess_coords_to_plot( darray: DataArray, coords_to_plot: MutableMapping[str, Hashable | None], kwargs: dict, default_guess: tuple[str, ...] = ("x",), # TODO: Can this be normalized, plt.cbook.normalize_kwargs? ignore_guess_kwargs: tuple[tuple[str, ...], ...] = ((),), ) -> MutableMapping[str, Hashable]: """ Guess what coords to plot if some of the values in coords_to_plot are None which happens when the user has not defined all available ways of visualizing the data. Parameters ---------- darray : DataArray The DataArray to check for available coords. coords_to_plot : MutableMapping[str, Hashable] Coords defined by the user to plot. kwargs : dict Extra kwargs that will be sent to matplotlib. default_guess : Iterable[str], optional Default values and order to retrieve dims if values in dims_plot is missing, default: ("x", "hue", "size"). ignore_guess_kwargs : tuple[tuple[str, ...], ...] Matplotlib arguments to ignore. Examples -------- >>> ds = xr.tutorial.scatter_example_dataset(seed=42) >>> # Only guess x by default: >>> xr.plot.utils._guess_coords_to_plot( ... ds.A, ... coords_to_plot={"x": None, "z": None, "hue": None, "size": None}, ... kwargs={}, ... ) {'x': 'x', 'z': None, 'hue': None, 'size': None} >>> # Guess all plot dims with other default values: >>> xr.plot.utils._guess_coords_to_plot( ... ds.A, ... coords_to_plot={"x": None, "z": None, "hue": None, "size": None}, ... kwargs={}, ... default_guess=("x", "hue", "size"), ... ignore_guess_kwargs=((), ("c", "color"), ("s",)), ... ) {'x': 'x', 'z': None, 'hue': 'y', 'size': 'z'} >>> # Don't guess Β΄sizeΒ΄, since the matplotlib kwarg Β΄sΒ΄ has been defined: >>> xr.plot.utils._guess_coords_to_plot( ... ds.A, ... coords_to_plot={"x": None, "z": None, "hue": None, "size": None}, ... kwargs={"s": 5}, ... default_guess=("x", "hue", "size"), ... ignore_guess_kwargs=((), ("c", "color"), ("s",)), ... ) {'x': 'x', 'z': None, 'hue': 'y', 'size': None} >>> # Prioritize Β΄sizeΒ΄ over Β΄sΒ΄: >>> xr.plot.utils._guess_coords_to_plot( ... ds.A, ... coords_to_plot={"x": None, "z": None, "hue": None, "size": "x"}, ... kwargs={"s": 5}, ... default_guess=("x", "hue", "size"), ... ignore_guess_kwargs=((), ("c", "color"), ("s",)), ... ) {'x': 'y', 'z': None, 'hue': 'z', 'size': 'x'} """ coords_to_plot_exist = {k: v for k, v in coords_to_plot.items() if v is not None} available_coords = tuple( k for k in darray.coords.keys() if k not in coords_to_plot_exist.values() ) # If dims_plot[k] isn't defined then fill with one of the available dims, unless # one of related mpl kwargs has been used. This should have similar behaviour as # * plt.plot(x, y) -> Multiple lines with different colors if y is 2d. # * plt.plot(x, y, color="red") -> Multiple red lines if y is 2d. for k, dim, ign_kws in zip( default_guess, available_coords, ignore_guess_kwargs, strict=False ): if coords_to_plot.get(k, None) is None and all( kwargs.get(ign_kw) is None for ign_kw in ign_kws ): coords_to_plot[k] = dim for k, dim in coords_to_plot.items(): _assert_valid_xy(darray, dim, k) return coords_to_plot def _set_concise_date(ax: Axes, axis: Literal["x", "y", "z"] = "x") -> None: """ Use ConciseDateFormatter which is meant to improve the strings chosen for the ticklabels, and to minimize the strings used in those tick labels as much as possible. https://matplotlib.org/stable/gallery/ticks/date_concise_formatter.html Parameters ---------- ax : Axes Figure axes. axis : Literal["x", "y", "z"], optional Which axis to make concise. The default is "x". """ import matplotlib.dates as mdates locator = mdates.AutoDateLocator() formatter = mdates.ConciseDateFormatter(locator) _axis = getattr(ax, f"{axis}axis") _axis.set_major_locator(locator) _axis.set_major_formatter(formatter) xarray-2025.09.0/xarray/py.typed000066400000000000000000000000001505620616400163370ustar00rootroot00000000000000xarray-2025.09.0/xarray/static/000077500000000000000000000000001505620616400161415ustar00rootroot00000000000000xarray-2025.09.0/xarray/static/__init__.py000066400000000000000000000000001505620616400202400ustar00rootroot00000000000000xarray-2025.09.0/xarray/static/css/000077500000000000000000000000001505620616400167315ustar00rootroot00000000000000xarray-2025.09.0/xarray/static/css/__init__.py000066400000000000000000000000001505620616400210300ustar00rootroot00000000000000xarray-2025.09.0/xarray/static/css/style.css000066400000000000000000000175561505620616400206210ustar00rootroot00000000000000/* CSS stylesheet for displaying xarray objects in jupyterlab. * */ :root { --xr-font-color0: var( --jp-content-font-color0, var(--pst-color-text-base rgba(0, 0, 0, 1)) ); --xr-font-color2: var( --jp-content-font-color2, var(--pst-color-text-base, rgba(0, 0, 0, 0.54)) ); --xr-font-color3: var( --jp-content-font-color3, var(--pst-color-text-base, rgba(0, 0, 0, 0.38)) ); --xr-border-color: var( --jp-border-color2, hsl(from var(--pst-color-on-background, white) h s calc(l - 10)) ); --xr-disabled-color: var( --jp-layout-color3, hsl(from var(--pst-color-on-background, white) h s calc(l - 40)) ); --xr-background-color: var( --jp-layout-color0, var(--pst-color-on-background, white) ); --xr-background-color-row-even: var( --jp-layout-color1, hsl(from var(--pst-color-on-background, white) h s calc(l - 5)) ); --xr-background-color-row-odd: var( --jp-layout-color2, hsl(from var(--pst-color-on-background, white) h s calc(l - 15)) ); } html[theme="dark"], html[data-theme="dark"], body[data-theme="dark"], body.vscode-dark { --xr-font-color0: var( --jp-content-font-color0, var(--pst-color-text-base, rgba(255, 255, 255, 1)) ); --xr-font-color2: var( --jp-content-font-color2, var(--pst-color-text-base, rgba(255, 255, 255, 0.54)) ); --xr-font-color3: var( --jp-content-font-color3, var(--pst-color-text-base, rgba(255, 255, 255, 0.38)) ); --xr-border-color: var( --jp-border-color2, hsl(from var(--pst-color-on-background, #111111) h s calc(l + 10)) ); --xr-disabled-color: var( --jp-layout-color3, hsl(from var(--pst-color-on-background, #111111) h s calc(l + 40)) ); --xr-background-color: var( --jp-layout-color0, var(--pst-color-on-background, #111111) ); --xr-background-color-row-even: var( --jp-layout-color1, hsl(from var(--pst-color-on-background, #111111) h s calc(l + 5)) ); --xr-background-color-row-odd: var( --jp-layout-color2, hsl(from var(--pst-color-on-background, #111111) h s calc(l + 15)) ); } .xr-wrap { display: block !important; min-width: 300px; max-width: 700px; } .xr-text-repr-fallback { /* fallback to plain text repr when CSS is not injected (untrusted notebook) */ display: none; } .xr-header { padding-top: 6px; padding-bottom: 6px; margin-bottom: 4px; border-bottom: solid 1px var(--xr-border-color); } .xr-header > div, .xr-header > ul { display: inline; margin-top: 0; margin-bottom: 0; } .xr-obj-type, .xr-array-name { margin-left: 2px; margin-right: 10px; } .xr-obj-type { color: var(--xr-font-color2); } .xr-sections { padding-left: 0 !important; display: grid; grid-template-columns: 150px auto auto 1fr 0 20px 0 20px; } .xr-section-item { display: contents; } .xr-section-item input { display: inline-block; opacity: 0; height: 0; } .xr-section-item input + label { color: var(--xr-disabled-color); border: 2px solid transparent !important; } .xr-section-item input:enabled + label { cursor: pointer; color: var(--xr-font-color2); } .xr-section-item input:focus + label { border: 2px solid var(--xr-font-color0) !important; } .xr-section-item input:enabled + label:hover { color: var(--xr-font-color0); } .xr-section-summary { grid-column: 1; color: var(--xr-font-color2); font-weight: 500; } .xr-section-summary > span { display: inline-block; padding-left: 0.5em; } .xr-section-summary-in:disabled + label { color: var(--xr-font-color2); } .xr-section-summary-in + label:before { display: inline-block; content: "β–Ί"; font-size: 11px; width: 15px; text-align: center; } .xr-section-summary-in:disabled + label:before { color: var(--xr-disabled-color); } .xr-section-summary-in:checked + label:before { content: "β–Ό"; } .xr-section-summary-in:checked + label > span { display: none; } .xr-section-summary, .xr-section-inline-details { padding-top: 4px; padding-bottom: 4px; } .xr-section-inline-details { grid-column: 2 / -1; } .xr-section-details { display: none; grid-column: 1 / -1; margin-bottom: 5px; } .xr-section-summary-in:checked ~ .xr-section-details { display: contents; } .xr-array-wrap { grid-column: 1 / -1; display: grid; grid-template-columns: 20px auto; } .xr-array-wrap > label { grid-column: 1; vertical-align: top; } .xr-preview { color: var(--xr-font-color3); } .xr-array-preview, .xr-array-data { padding: 0 5px !important; grid-column: 2; } .xr-array-data, .xr-array-in:checked ~ .xr-array-preview { display: none; } .xr-array-in:checked ~ .xr-array-data, .xr-array-preview { display: inline-block; } .xr-dim-list { display: inline-block !important; list-style: none; padding: 0 !important; margin: 0; } .xr-dim-list li { display: inline-block; padding: 0; margin: 0; } .xr-dim-list:before { content: "("; } .xr-dim-list:after { content: ")"; } .xr-dim-list li:not(:last-child):after { content: ","; padding-right: 5px; } .xr-has-index { font-weight: bold; } .xr-var-list, .xr-var-item { display: contents; } .xr-var-item > div, .xr-var-item label, .xr-var-item > .xr-var-name span { background-color: var(--xr-background-color-row-even); border-color: var(--xr-background-color-row-odd); margin-bottom: 0; padding-top: 2px; } .xr-var-item > .xr-var-name:hover span { padding-right: 5px; } .xr-var-list > li:nth-child(odd) > div, .xr-var-list > li:nth-child(odd) > label, .xr-var-list > li:nth-child(odd) > .xr-var-name span { background-color: var(--xr-background-color-row-odd); border-color: var(--xr-background-color-row-even); } .xr-var-name { grid-column: 1; } .xr-var-dims { grid-column: 2; } .xr-var-dtype { grid-column: 3; text-align: right; color: var(--xr-font-color2); } .xr-var-preview { grid-column: 4; } .xr-index-preview { grid-column: 2 / 5; color: var(--xr-font-color2); } .xr-var-name, .xr-var-dims, .xr-var-dtype, .xr-preview, .xr-attrs dt { white-space: nowrap; overflow: hidden; text-overflow: ellipsis; padding-right: 10px; } .xr-var-name:hover, .xr-var-dims:hover, .xr-var-dtype:hover, .xr-attrs dt:hover { overflow: visible; width: auto; z-index: 1; } .xr-var-attrs, .xr-var-data, .xr-index-data { display: none; border-top: 2px dotted var(--xr-background-color); padding-bottom: 20px !important; padding-top: 10px !important; } .xr-var-attrs-in + label, .xr-var-data-in + label, .xr-index-data-in + label { padding: 0 1px; } .xr-var-attrs-in:checked ~ .xr-var-attrs, .xr-var-data-in:checked ~ .xr-var-data, .xr-index-data-in:checked ~ .xr-index-data { display: block; } .xr-var-data > table { float: right; } .xr-var-data > pre, .xr-index-data > pre, .xr-var-data > table > tbody > tr { background-color: transparent !important; } .xr-var-name span, .xr-var-data, .xr-index-name div, .xr-index-data, .xr-attrs { padding-left: 25px !important; } .xr-attrs, .xr-var-attrs, .xr-var-data, .xr-index-data { grid-column: 1 / -1; } dl.xr-attrs { padding: 0; margin: 0; display: grid; grid-template-columns: 125px auto; } .xr-attrs dt, .xr-attrs dd { padding: 0; margin: 0; float: left; padding-right: 10px; width: auto; } .xr-attrs dt { font-weight: normal; grid-column: 1; } .xr-attrs dt:hover span { display: inline-block; background: var(--xr-background-color); padding-right: 10px; } .xr-attrs dd { grid-column: 2; white-space: pre-wrap; word-break: break-all; } .xr-icon-database, .xr-icon-file-text2, .xr-no-icon { display: inline-block; vertical-align: middle; width: 1em; height: 1.5em !important; stroke-width: 0; stroke: currentColor; fill: currentColor; } .xr-var-attrs-in:checked + label > .xr-icon-file-text2, .xr-var-data-in:checked + label > .xr-icon-database, .xr-index-data-in:checked + label > .xr-icon-database { color: var(--xr-font-color0); filter: drop-shadow(1px 1px 5px var(--xr-font-color2)); stroke-width: 0.8px; } xarray-2025.09.0/xarray/static/html/000077500000000000000000000000001505620616400171055ustar00rootroot00000000000000xarray-2025.09.0/xarray/static/html/__init__.py000066400000000000000000000000001505620616400212040ustar00rootroot00000000000000xarray-2025.09.0/xarray/static/html/icons-svg-inline.html000066400000000000000000000024771505620616400231710ustar00rootroot00000000000000 xarray-2025.09.0/xarray/structure/000077500000000000000000000000001505620616400167125ustar00rootroot00000000000000xarray-2025.09.0/xarray/structure/__init__.py000066400000000000000000000000001505620616400210110ustar00rootroot00000000000000xarray-2025.09.0/xarray/structure/alignment.py000066400000000000000000001303511505620616400212450ustar00rootroot00000000000000from __future__ import annotations import functools import operator from collections import defaultdict from collections.abc import Callable, Hashable, Iterable, Mapping from contextlib import suppress from itertools import starmap from typing import TYPE_CHECKING, Any, Final, Generic, TypeVar, get_args, overload import numpy as np import pandas as pd from xarray.core import dtypes from xarray.core.indexes import ( Index, Indexes, PandasIndex, PandasMultiIndex, indexes_all_equal, safe_cast_to_index, ) from xarray.core.types import JoinOptions, T_Alignable from xarray.core.utils import emit_user_level_warning, is_dict_like, is_full_slice from xarray.core.variable import Variable, as_compatible_data, calculate_dimensions from xarray.util.deprecation_helpers import CombineKwargDefault if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import ( Alignable, JoinOptions, T_DataArray, T_Dataset, T_DuckArray, ) class AlignmentError(ValueError): """Error class for alignment failures due to incompatible arguments.""" def reindex_variables( variables: Mapping[Any, Variable], dim_pos_indexers: Mapping[Any, Any], copy: bool = True, fill_value: Any = dtypes.NA, sparse: bool = False, ) -> dict[Hashable, Variable]: """Conform a dictionary of variables onto a new set of variables reindexed with dimension positional indexers and possibly filled with missing values. Not public API. """ new_variables = {} dim_sizes = calculate_dimensions(variables) masked_dims = set() unchanged_dims = set() for dim, indxr in dim_pos_indexers.items(): # Negative values in dim_pos_indexers mean values missing in the new index # See ``Index.reindex_like``. if (indxr < 0).any(): masked_dims.add(dim) elif np.array_equal(indxr, np.arange(dim_sizes.get(dim, 0))): unchanged_dims.add(dim) for name, var in variables.items(): if isinstance(fill_value, dict): fill_value_ = fill_value.get(name, dtypes.NA) else: fill_value_ = fill_value if sparse: var = var._as_sparse(fill_value=fill_value_) indxr = tuple( slice(None) if d in unchanged_dims else dim_pos_indexers.get(d, slice(None)) for d in var.dims ) needs_masking = any(d in masked_dims for d in var.dims) if needs_masking: new_var = var._getitem_with_mask(indxr, fill_value=fill_value_) elif all(is_full_slice(k) for k in indxr): # no reindexing necessary # here we need to manually deal with copying data, since # we neither created a new ndarray nor used fancy indexing new_var = var.copy(deep=copy) else: new_var = var[indxr] new_variables[name] = new_var return new_variables def _normalize_indexes( indexes: Mapping[Any, Any | T_DuckArray], ) -> Indexes: """Normalize the indexes/indexers given for re-indexing or alignment. Wrap any arbitrary array or `pandas.Index` as an Xarray `PandasIndex` associated with its corresponding dimension coordinate variable. """ xr_indexes: dict[Hashable, Index] = {} xr_variables: dict[Hashable, Variable] if isinstance(indexes, Indexes): xr_variables = dict(indexes.variables) else: xr_variables = {} for k, idx in indexes.items(): if not isinstance(idx, Index): if getattr(idx, "dims", (k,)) != (k,): raise AlignmentError( f"Indexer has dimensions {idx.dims} that are different " f"from that to be indexed along '{k}'" ) data: T_DuckArray = as_compatible_data(idx) pd_idx = safe_cast_to_index(data) if pd_idx.name != k: pd_idx = pd_idx.copy() pd_idx.name = k if isinstance(pd_idx, pd.MultiIndex): idx = PandasMultiIndex(pd_idx, k) else: idx = PandasIndex(pd_idx, k, coord_dtype=data.dtype) xr_variables.update(idx.create_variables()) xr_indexes[k] = idx return Indexes(xr_indexes, xr_variables) CoordNamesAndDims = tuple[tuple[Hashable, tuple[Hashable, ...]], ...] MatchingIndexKey = tuple[CoordNamesAndDims, type[Index]] IndexesToAlign = dict[MatchingIndexKey, Index] IndexVarsToAlign = dict[MatchingIndexKey, dict[Hashable, Variable]] class Aligner(Generic[T_Alignable]): """Implements all the complex logic for the re-indexing and alignment of Xarray objects. For internal use only, not public API. Usage: aligner = Aligner(*objects, **kwargs) aligner.align() aligned_objects = aligner.results """ objects: tuple[T_Alignable, ...] results: tuple[T_Alignable, ...] objects_matching_index_vars: tuple[ dict[MatchingIndexKey, dict[Hashable, Variable]], ... ] join: JoinOptions | CombineKwargDefault exclude_dims: frozenset[Hashable] exclude_vars: frozenset[Hashable] copy: bool fill_value: Any sparse: bool indexes: dict[MatchingIndexKey, Index] index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]] all_indexes: dict[MatchingIndexKey, list[Index]] all_index_vars: dict[MatchingIndexKey, list[dict[Hashable, Variable]]] aligned_indexes: dict[MatchingIndexKey, Index] aligned_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]] reindex: dict[MatchingIndexKey, bool] keep_original_indexes: set[MatchingIndexKey] reindex_kwargs: dict[str, Any] unindexed_dim_sizes: dict[Hashable, set] new_indexes: Indexes[Index] def __init__( self, objects: Iterable[T_Alignable], join: JoinOptions | CombineKwargDefault = "inner", indexes: Mapping[Any, Any] | None = None, exclude_dims: str | Iterable[Hashable] = frozenset(), exclude_vars: Iterable[Hashable] = frozenset(), method: str | None = None, tolerance: float | Iterable[float] | str | None = None, copy: bool = True, fill_value: Any = dtypes.NA, sparse: bool = False, ): self.objects = tuple(objects) self.objects_matching_indexes: tuple[Any, ...] = () self.objects_matching_index_vars = () if not isinstance(join, CombineKwargDefault) and join not in get_args( JoinOptions ): raise ValueError(f"invalid value for join: {join}") self.join = join self.copy = copy self.fill_value = fill_value self.sparse = sparse if method is None and tolerance is None: self.reindex_kwargs = {} else: self.reindex_kwargs = {"method": method, "tolerance": tolerance} if isinstance(exclude_dims, str): exclude_dims = [exclude_dims] self.exclude_dims = frozenset(exclude_dims) self.exclude_vars = frozenset(exclude_vars) if indexes is None: indexes = {} self.indexes, self.index_vars = self._collect_indexes( _normalize_indexes(indexes) ) self.all_indexes = {} self.all_index_vars = {} self.unindexed_dim_sizes = {} self.aligned_indexes = {} self.aligned_index_vars = {} self.reindex = {} self.keep_original_indexes = set() self.results = tuple() def _collect_indexes( self, indexes: Indexes ) -> tuple[IndexesToAlign, IndexVarsToAlign]: """Collect input and/or object indexes for alignment. Return new dictionaries of xarray Index objects and coordinate variables, whose keys are used to later retrieve all the indexes to compare with each other (based on the name and dimensions of their associated coordinate variables as well as the Index type). """ collected_indexes = {} collected_index_vars = {} for idx, idx_vars in indexes.group_by_index(): idx_coord_names_and_dims = [] idx_all_dims: set[Hashable] = set() for name, var in idx_vars.items(): dims = var.dims idx_coord_names_and_dims.append((name, dims)) idx_all_dims.update(dims) key: MatchingIndexKey = (tuple(idx_coord_names_and_dims), type(idx)) if idx_all_dims: exclude_dims = idx_all_dims & self.exclude_dims if exclude_dims == idx_all_dims: # Do not collect an index if all the dimensions it uses are # also excluded from the alignment continue elif exclude_dims: # If the dimensions used by index partially overlap with the dimensions # excluded from alignment, it is possible to check index equality along # non-excluded dimensions only. However, in this case each of the aligned # objects must retain (a copy of) their original index. Re-indexing and # overriding the index are not supported. if self.join == "override": excl_dims_str = ", ".join(str(d) for d in exclude_dims) incl_dims_str = ", ".join( str(d) for d in idx_all_dims - exclude_dims ) raise AlignmentError( f"cannot exclude dimension(s) {excl_dims_str} from alignment " "with `join='override` because these are used by an index " f"together with non-excluded dimensions {incl_dims_str}" "(cannot safely override the index)." ) else: self.keep_original_indexes.add(key) collected_indexes[key] = idx collected_index_vars[key] = idx_vars return collected_indexes, collected_index_vars def find_matching_indexes(self) -> None: all_indexes: dict[MatchingIndexKey, list[Index]] all_index_vars: dict[MatchingIndexKey, list[dict[Hashable, Variable]]] all_indexes_dim_sizes: dict[MatchingIndexKey, dict[Hashable, set]] objects_matching_indexes: list[dict[MatchingIndexKey, Index]] objects_matching_index_vars: list[ dict[MatchingIndexKey, dict[Hashable, Variable]] ] all_indexes = defaultdict(list) all_index_vars = defaultdict(list) all_indexes_dim_sizes = defaultdict(lambda: defaultdict(set)) objects_matching_indexes = [] objects_matching_index_vars = [] for obj in self.objects: obj_indexes, obj_index_vars = self._collect_indexes(obj.xindexes) objects_matching_indexes.append(obj_indexes) objects_matching_index_vars.append(obj_index_vars) for key, idx in obj_indexes.items(): all_indexes[key].append(idx) for key, index_vars in obj_index_vars.items(): all_index_vars[key].append(index_vars) for dim, size in calculate_dimensions(index_vars).items(): all_indexes_dim_sizes[key][dim].add(size) self.objects_matching_indexes = tuple(objects_matching_indexes) self.objects_matching_index_vars = tuple(objects_matching_index_vars) self.all_indexes = all_indexes self.all_index_vars = all_index_vars if self.join == "override": for dim_sizes in all_indexes_dim_sizes.values(): for dim, sizes in dim_sizes.items(): if len(sizes) > 1: raise AlignmentError( "cannot align objects with join='override' with matching indexes " f"along dimension {dim!r} that don't have the same size" ) def find_matching_unindexed_dims(self) -> None: unindexed_dim_sizes = defaultdict(set) for obj in self.objects: for dim in obj.dims: if dim not in self.exclude_dims and dim not in obj.xindexes.dims: unindexed_dim_sizes[dim].add(obj.sizes[dim]) self.unindexed_dim_sizes = unindexed_dim_sizes def _need_reindex(self, dim, cmp_indexes) -> bool: """Whether or not we need to reindex variables for a set of matching indexes. We don't reindex when all matching indexes are equal for two reasons: - It's faster for the usual case (already aligned objects). - It ensures it's possible to do operations that don't require alignment on indexes with duplicate values (which cannot be reindexed with pandas). This is useful, e.g., for overwriting such duplicate indexes. """ if not indexes_all_equal(cmp_indexes, self.exclude_dims): # always reindex when matching indexes are not equal return True unindexed_dims_sizes = {} for d in dim: if d in self.unindexed_dim_sizes: sizes = self.unindexed_dim_sizes[d] if len(sizes) > 1: # reindex if different sizes are found for unindexed dims return True else: unindexed_dims_sizes[d] = next(iter(sizes)) if unindexed_dims_sizes: indexed_dims_sizes = {} for cmp in cmp_indexes: index_vars = cmp[1] for var in index_vars.values(): indexed_dims_sizes.update(var.sizes) for d, size in unindexed_dims_sizes.items(): if indexed_dims_sizes.get(d, -1) != size: # reindex if unindexed dimension size doesn't match return True return False def _get_index_joiner(self, index_cls) -> Callable: if self.join in ["outer", "inner"]: return functools.partial( functools.reduce, functools.partial(index_cls.join, how=self.join), ) elif self.join == "left": return operator.itemgetter(0) elif self.join == "right": return operator.itemgetter(-1) elif self.join == "override": # We rewrite all indexes and then use join='left' return operator.itemgetter(0) else: # join='exact' return dummy lambda (error is raised) return lambda _: None def align_indexes(self) -> None: """Compute all aligned indexes and their corresponding coordinate variables.""" aligned_indexes: dict[MatchingIndexKey, Index] = {} aligned_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]] = {} reindex: dict[MatchingIndexKey, bool] = {} new_indexes: dict[Hashable, Index] = {} new_index_vars: dict[Hashable, Variable] = {} def update_dicts( key: MatchingIndexKey, idx: Index, idx_vars: dict[Hashable, Variable], need_reindex: bool, ): reindex[key] = need_reindex aligned_indexes[key] = idx aligned_index_vars[key] = idx_vars for name, var in idx_vars.items(): if name in new_indexes: other_idx = new_indexes[name] other_var = new_index_vars[name] raise AlignmentError( f"cannot align objects on coordinate {name!r} because of conflicting indexes\n" f"first index: {idx!r}\nsecond index: {other_idx!r}\n" f"first variable: {var!r}\nsecond variable: {other_var!r}\n" ) new_indexes[name] = idx new_index_vars[name] = var for key, matching_indexes in self.all_indexes.items(): matching_index_vars = self.all_index_vars[key] dims = {d for coord in matching_index_vars[0].values() for d in coord.dims} index_cls = key[1] if self.join == "override": joined_index = matching_indexes[0] joined_index_vars = matching_index_vars[0] need_reindex = False elif key in self.indexes: joined_index = self.indexes[key] joined_index_vars = self.index_vars[key] cmp_indexes = list( zip( [joined_index] + matching_indexes, [joined_index_vars] + matching_index_vars, strict=True, ) ) need_reindex = self._need_reindex(dims, cmp_indexes) else: if len(matching_indexes) > 1: need_reindex = self._need_reindex( dims, list(zip(matching_indexes, matching_index_vars, strict=True)), ) else: need_reindex = False if need_reindex: if ( isinstance(self.join, CombineKwargDefault) and self.join != "exact" ): emit_user_level_warning( self.join.warning_message( "This change will result in the following ValueError: " "cannot be aligned with join='exact' because " "index/labels/sizes are not equal along " "these coordinates (dimensions): " + ", ".join( f"{name!r} {dims!r}" for name, dims in key[0] ), recommend_set_options=False, ), FutureWarning, ) if self.join == "exact": raise AlignmentError( "cannot align objects with join='exact' where " "index/labels/sizes are not equal along " "these coordinates (dimensions): " + ", ".join(f"{name!r} {dims!r}" for name, dims in key[0]) + ( self.join.error_message() if isinstance(self.join, CombineKwargDefault) else "" ) ) joiner = self._get_index_joiner(index_cls) joined_index = joiner(matching_indexes) if self.join == "left": joined_index_vars = matching_index_vars[0] elif self.join == "right": joined_index_vars = matching_index_vars[-1] else: joined_index_vars = joined_index.create_variables() else: joined_index = matching_indexes[0] joined_index_vars = matching_index_vars[0] update_dicts(key, joined_index, joined_index_vars, need_reindex) # Explicitly provided indexes that are not found in objects to align # may relate to unindexed dimensions so we add them too for key, idx in self.indexes.items(): if key not in aligned_indexes: index_vars = self.index_vars[key] update_dicts(key, idx, index_vars, False) self.aligned_indexes = aligned_indexes self.aligned_index_vars = aligned_index_vars self.reindex = reindex self.new_indexes = Indexes(new_indexes, new_index_vars) def assert_unindexed_dim_sizes_equal(self) -> None: for dim, sizes in self.unindexed_dim_sizes.items(): index_size = self.new_indexes.dims.get(dim) if index_size is not None: sizes.add(index_size) add_err_msg = ( f" (note: an index is found along that dimension " f"with size={index_size!r})" ) else: add_err_msg = "" if len(sizes) > 1: raise AlignmentError( f"cannot reindex or align along dimension {dim!r} " f"because of conflicting dimension sizes: {sizes!r}" + add_err_msg ) def override_indexes(self) -> None: objects = list(self.objects) for i, obj in enumerate(objects[1:]): new_indexes = {} new_variables = {} matching_indexes = self.objects_matching_indexes[i + 1] for key, aligned_idx in self.aligned_indexes.items(): obj_idx = matching_indexes.get(key) if obj_idx is not None: for name, var in self.aligned_index_vars[key].items(): new_indexes[name] = aligned_idx new_variables[name] = var.copy(deep=self.copy) objects[i + 1] = obj._overwrite_indexes(new_indexes, new_variables) self.results = tuple(objects) def _get_dim_pos_indexers( self, matching_indexes: dict[MatchingIndexKey, Index], ) -> dict[Hashable, Any]: dim_pos_indexers: dict[Hashable, Any] = {} dim_index: dict[Hashable, Index] = {} for key, aligned_idx in self.aligned_indexes.items(): obj_idx = matching_indexes.get(key) if obj_idx is not None and self.reindex[key]: indexers = obj_idx.reindex_like(aligned_idx, **self.reindex_kwargs) for dim, idxer in indexers.items(): if dim in self.exclude_dims: raise AlignmentError( f"cannot reindex or align along dimension {dim!r} because " "it is explicitly excluded from alignment. This is likely caused by " "wrong results returned by the `reindex_like` method of this index:\n" f"{obj_idx!r}" ) if dim in dim_pos_indexers and not np.array_equal( idxer, dim_pos_indexers[dim] ): raise AlignmentError( f"cannot reindex or align along dimension {dim!r} because " "of conflicting re-indexers returned by multiple indexes\n" f"first index: {obj_idx!r}\nsecond index: {dim_index[dim]!r}\n" ) dim_pos_indexers[dim] = idxer dim_index[dim] = obj_idx return dim_pos_indexers def _get_indexes_and_vars( self, obj: T_Alignable, matching_indexes: dict[MatchingIndexKey, Index], matching_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]], ) -> tuple[dict[Hashable, Index], dict[Hashable, Variable]]: new_indexes = {} new_variables = {} for key, aligned_idx in self.aligned_indexes.items(): aligned_idx_vars = self.aligned_index_vars[key] obj_idx = matching_indexes.get(key) obj_idx_vars = matching_index_vars.get(key) if obj_idx is None: # add the aligned index if it relates to unindexed dimensions in obj dims = {d for var in aligned_idx_vars.values() for d in var.dims} if dims <= set(obj.dims): obj_idx = aligned_idx if obj_idx is not None: # TODO: always copy object's index when no re-indexing is required? # (instead of assigning the aligned index) # (need performance assessment) if key in self.keep_original_indexes: assert self.reindex[key] is False new_idx = obj_idx.copy(deep=self.copy) new_idx_vars = new_idx.create_variables(obj_idx_vars) else: new_idx = aligned_idx new_idx_vars = { k: v.copy(deep=self.copy) for k, v in aligned_idx_vars.items() } new_indexes.update(dict.fromkeys(new_idx_vars, new_idx)) new_variables.update(new_idx_vars) return new_indexes, new_variables def _reindex_one( self, obj: T_Alignable, matching_indexes: dict[MatchingIndexKey, Index], matching_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]], ) -> T_Alignable: new_indexes, new_variables = self._get_indexes_and_vars( obj, matching_indexes, matching_index_vars ) dim_pos_indexers = self._get_dim_pos_indexers(matching_indexes) return obj._reindex_callback( self, dim_pos_indexers, new_variables, new_indexes, self.fill_value, self.exclude_dims, self.exclude_vars, ) def reindex_all(self) -> None: self.results = tuple( starmap( self._reindex_one, zip( self.objects, self.objects_matching_indexes, self.objects_matching_index_vars, strict=True, ), ) ) def align(self) -> None: if not self.indexes and len(self.objects) == 1: # fast path for the trivial case (obj,) = self.objects self.results = (obj.copy(deep=self.copy),) return self.find_matching_indexes() self.find_matching_unindexed_dims() self.align_indexes() self.assert_unindexed_dim_sizes_equal() if self.join == "override": self.override_indexes() elif self.join == "exact" and not self.copy: self.results = self.objects else: self.reindex_all() T_Obj1 = TypeVar("T_Obj1", bound="Alignable") T_Obj2 = TypeVar("T_Obj2", bound="Alignable") T_Obj3 = TypeVar("T_Obj3", bound="Alignable") T_Obj4 = TypeVar("T_Obj4", bound="Alignable") T_Obj5 = TypeVar("T_Obj5", bound="Alignable") @overload def align( obj1: T_Obj1, /, *, join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Obj1]: ... @overload def align( obj1: T_Obj1, obj2: T_Obj2, /, *, join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Obj1, T_Obj2]: ... @overload def align( obj1: T_Obj1, obj2: T_Obj2, obj3: T_Obj3, /, *, join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Obj1, T_Obj2, T_Obj3]: ... @overload def align( obj1: T_Obj1, obj2: T_Obj2, obj3: T_Obj3, obj4: T_Obj4, /, *, join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Obj1, T_Obj2, T_Obj3, T_Obj4]: ... @overload def align( obj1: T_Obj1, obj2: T_Obj2, obj3: T_Obj3, obj4: T_Obj4, obj5: T_Obj5, /, *, join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Obj1, T_Obj2, T_Obj3, T_Obj4, T_Obj5]: ... @overload def align( *objects: T_Alignable, join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Alignable, ...]: ... def align( *objects: T_Alignable, join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), fill_value=dtypes.NA, ) -> tuple[T_Alignable, ...]: """ Given any number of Dataset and/or DataArray objects, returns new objects with aligned indexes and dimension sizes. Array from the aligned objects are suitable as input to mathematical operators, because along each dimension they have the same index and size. Missing values (if ``join != 'inner'``) are filled with ``fill_value``. The default fill value is NaN. Parameters ---------- *objects : Dataset or DataArray Objects to align. join : {"outer", "inner", "left", "right", "exact", "override"}, optional Method for joining the indexes of the passed objects along each dimension: - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. copy : bool, default: True If ``copy=True``, data in the return values is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, new xarray objects are always returned. indexes : dict-like, optional Any indexes explicitly provided with the `indexes` argument should be used in preference to the aligned indexes. exclude : str, iterable of hashable or None, optional Dimensions that must be excluded from alignment fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names to fill values. Use a data array's name to refer to its values. Returns ------- aligned : tuple of DataArray or Dataset Tuple of objects with the same type as `*objects` with aligned coordinates. Raises ------ AlignmentError If any dimensions without labels on the arguments have different sizes, or a different size than the size of the aligned dimension labels. Examples -------- >>> x = xr.DataArray( ... [[25, 35], [10, 24]], ... dims=("lat", "lon"), ... coords={"lat": [35.0, 40.0], "lon": [100.0, 120.0]}, ... ) >>> y = xr.DataArray( ... [[20, 5], [7, 13]], ... dims=("lat", "lon"), ... coords={"lat": [35.0, 42.0], "lon": [100.0, 120.0]}, ... ) >>> x Size: 32B array([[25, 35], [10, 24]]) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 >>> y Size: 32B array([[20, 5], [ 7, 13]]) Coordinates: * lat (lat) float64 16B 35.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> a, b = xr.align(x, y) >>> a Size: 16B array([[25, 35]]) Coordinates: * lat (lat) float64 8B 35.0 * lon (lon) float64 16B 100.0 120.0 >>> b Size: 16B array([[20, 5]]) Coordinates: * lat (lat) float64 8B 35.0 * lon (lon) float64 16B 100.0 120.0 >>> a, b = xr.align(x, y, join="outer") >>> a Size: 48B array([[25., 35.], [10., 24.], [nan, nan]]) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> b Size: 48B array([[20., 5.], [nan, nan], [ 7., 13.]]) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> a, b = xr.align(x, y, join="outer", fill_value=-999) >>> a Size: 48B array([[ 25, 35], [ 10, 24], [-999, -999]]) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> b Size: 48B array([[ 20, 5], [-999, -999], [ 7, 13]]) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> a, b = xr.align(x, y, join="left") >>> a Size: 32B array([[25, 35], [10, 24]]) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 >>> b Size: 32B array([[20., 5.], [nan, nan]]) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 >>> a, b = xr.align(x, y, join="right") >>> a Size: 32B array([[25., 35.], [nan, nan]]) Coordinates: * lat (lat) float64 16B 35.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> b Size: 32B array([[20, 5], [ 7, 13]]) Coordinates: * lat (lat) float64 16B 35.0 42.0 * lon (lon) float64 16B 100.0 120.0 >>> a, b = xr.align(x, y, join="exact") Traceback (most recent call last): ... xarray.structure.alignment.AlignmentError: cannot align objects with join='exact' ... >>> a, b = xr.align(x, y, join="override") >>> a Size: 32B array([[25, 35], [10, 24]]) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 >>> b Size: 32B array([[20, 5], [ 7, 13]]) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 """ aligner = Aligner( objects, join=join, copy=copy, indexes=indexes, exclude_dims=exclude, fill_value=fill_value, ) aligner.align() return aligner.results def deep_align( objects: Iterable[Any], join: JoinOptions | CombineKwargDefault = "inner", copy: bool = True, indexes=None, exclude: str | Iterable[Hashable] = frozenset(), raise_on_invalid: bool = True, fill_value=dtypes.NA, ) -> list[Any]: """Align objects for merging, recursing into dictionary values. This function is not public API. """ from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset if indexes is None: indexes = {} def is_alignable(obj): return isinstance(obj, Coordinates | DataArray | Dataset) positions: list[int] = [] keys: list[type[object] | Hashable] = [] out: list[Any] = [] targets: list[Alignable] = [] no_key: Final = object() not_replaced: Final = object() for position, variables in enumerate(objects): if is_alignable(variables): positions.append(position) keys.append(no_key) targets.append(variables) out.append(not_replaced) elif is_dict_like(variables): current_out = {} for k, v in variables.items(): if is_alignable(v) and k not in indexes: # Skip variables in indexes for alignment, because these # should to be overwritten instead: # https://github.com/pydata/xarray/issues/725 # https://github.com/pydata/xarray/issues/3377 # TODO(shoyer): doing this here feels super-hacky -- can we # move it explicitly into merge instead? positions.append(position) keys.append(k) targets.append(v) current_out[k] = not_replaced else: current_out[k] = v out.append(current_out) elif raise_on_invalid: raise ValueError( "object to align is neither an xarray.Dataset, " f"an xarray.DataArray nor a dictionary: {variables!r}" ) else: out.append(variables) aligned = align( *targets, join=join, copy=copy, indexes=indexes, exclude=exclude, fill_value=fill_value, ) for position, key, aligned_obj in zip(positions, keys, aligned, strict=True): if key is no_key: out[position] = aligned_obj else: out[position][key] = aligned_obj return out def reindex( obj: T_Alignable, indexers: Mapping[Any, Any], method: str | None = None, tolerance: float | Iterable[float] | str | None = None, copy: bool = True, fill_value: Any = dtypes.NA, sparse: bool = False, exclude_vars: Iterable[Hashable] = frozenset(), ) -> T_Alignable: """Re-index either a Dataset or a DataArray. Not public API. """ # TODO: (benbovy - explicit indexes): uncomment? # --> from reindex docstrings: "any mismatched dimension is simply ignored" # bad_keys = [k for k in indexers if k not in obj._indexes and k not in obj.dims] # if bad_keys: # raise ValueError( # f"indexer keys {bad_keys} do not correspond to any indexed coordinate " # "or unindexed dimension in the object to reindex" # ) aligner = Aligner( (obj,), indexes=indexers, method=method, tolerance=tolerance, copy=copy, fill_value=fill_value, sparse=sparse, exclude_vars=exclude_vars, ) aligner.align() return aligner.results[0] def reindex_like( obj: T_Alignable, other: Dataset | DataArray, method: str | None = None, tolerance: float | Iterable[float] | str | None = None, copy: bool = True, fill_value: Any = dtypes.NA, ) -> T_Alignable: """Re-index either a Dataset or a DataArray like another Dataset/DataArray. Not public API. """ if not other._indexes: # This check is not performed in Aligner. for dim in other.dims: if dim in obj.dims: other_size = other.sizes[dim] obj_size = obj.sizes[dim] if other_size != obj_size: raise ValueError( "different size for unlabeled " f"dimension on argument {dim!r}: {other_size!r} vs {obj_size!r}" ) return reindex( obj, indexers=other.xindexes, method=method, tolerance=tolerance, copy=copy, fill_value=fill_value, ) def _get_broadcast_dims_map_common_coords(args, exclude): common_coords = {} dims_map = {} for arg in args: for dim in arg.dims: if dim not in common_coords and dim not in exclude: dims_map[dim] = arg.sizes[dim] if dim in arg._indexes: common_coords.update(arg.xindexes.get_all_coords(dim)) return dims_map, common_coords def _broadcast_helper( arg: T_Alignable, exclude, dims_map, common_coords ) -> T_Alignable: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset def _set_dims(var): # Add excluded dims to a copy of dims_map var_dims_map = dims_map.copy() for dim in exclude: with suppress(ValueError): # ignore dim not in var.dims var_dims_map[dim] = var.shape[var.dims.index(dim)] return var.set_dims(var_dims_map) def _broadcast_array(array: T_DataArray) -> T_DataArray: data = _set_dims(array.variable) coords = dict(array.coords) coords.update(common_coords) return array.__class__( data, coords, data.dims, name=array.name, attrs=array.attrs ) def _broadcast_dataset(ds: T_Dataset) -> T_Dataset: data_vars = {k: _set_dims(ds.variables[k]) for k in ds.data_vars} coords = dict(ds.coords) coords.update(common_coords) return ds.__class__(data_vars, coords, ds.attrs) # remove casts once https://github.com/python/mypy/issues/12800 is resolved if isinstance(arg, DataArray): return _broadcast_array(arg) # type: ignore[return-value,unused-ignore] elif isinstance(arg, Dataset): return _broadcast_dataset(arg) # type: ignore[return-value,unused-ignore] else: raise ValueError("all input must be Dataset or DataArray objects") @overload def broadcast( obj1: T_Obj1, /, *, exclude: str | Iterable[Hashable] | None = None ) -> tuple[T_Obj1]: ... @overload def broadcast( obj1: T_Obj1, obj2: T_Obj2, /, *, exclude: str | Iterable[Hashable] | None = None ) -> tuple[T_Obj1, T_Obj2]: ... @overload def broadcast( obj1: T_Obj1, obj2: T_Obj2, obj3: T_Obj3, /, *, exclude: str | Iterable[Hashable] | None = None, ) -> tuple[T_Obj1, T_Obj2, T_Obj3]: ... @overload def broadcast( obj1: T_Obj1, obj2: T_Obj2, obj3: T_Obj3, obj4: T_Obj4, /, *, exclude: str | Iterable[Hashable] | None = None, ) -> tuple[T_Obj1, T_Obj2, T_Obj3, T_Obj4]: ... @overload def broadcast( obj1: T_Obj1, obj2: T_Obj2, obj3: T_Obj3, obj4: T_Obj4, obj5: T_Obj5, /, *, exclude: str | Iterable[Hashable] | None = None, ) -> tuple[T_Obj1, T_Obj2, T_Obj3, T_Obj4, T_Obj5]: ... @overload def broadcast( *args: T_Alignable, exclude: str | Iterable[Hashable] | None = None ) -> tuple[T_Alignable, ...]: ... def broadcast( *args: T_Alignable, exclude: str | Iterable[Hashable] | None = None ) -> tuple[T_Alignable, ...]: """Explicitly broadcast any number of DataArray or Dataset objects against one another. xarray objects automatically broadcast against each other in arithmetic operations, so this function should not be necessary for normal use. If no change is needed, the input data is returned to the output without being copied. Parameters ---------- *args : DataArray or Dataset Arrays to broadcast against each other. exclude : str, iterable of hashable or None, optional Dimensions that must not be broadcasted Returns ------- broadcast : tuple of DataArray or tuple of Dataset The same data as the input arrays, but with additional dimensions inserted so that all data arrays have the same dimensions and shape. Examples -------- Broadcast two data arrays against one another to fill out their dimensions: >>> a = xr.DataArray([1, 2, 3], dims="x") >>> b = xr.DataArray([5, 6], dims="y") >>> a Size: 24B array([1, 2, 3]) Dimensions without coordinates: x >>> b Size: 16B array([5, 6]) Dimensions without coordinates: y >>> a2, b2 = xr.broadcast(a, b) >>> a2 Size: 48B array([[1, 1], [2, 2], [3, 3]]) Dimensions without coordinates: x, y >>> b2 Size: 48B array([[5, 6], [5, 6], [5, 6]]) Dimensions without coordinates: x, y Fill out the dimensions of all data variables in a dataset: >>> ds = xr.Dataset({"a": a, "b": b}) >>> (ds2,) = xr.broadcast(ds) # use tuple unpacking to extract one dataset >>> ds2 Size: 96B Dimensions: (x: 3, y: 2) Dimensions without coordinates: x, y Data variables: a (x, y) int64 48B 1 1 2 2 3 3 b (x, y) int64 48B 5 6 5 6 5 6 """ if exclude is None: exclude = set() args = align(*args, join="outer", copy=False, exclude=exclude) dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude) result = [_broadcast_helper(arg, exclude, dims_map, common_coords) for arg in args] return tuple(result) xarray-2025.09.0/xarray/structure/chunks.py000066400000000000000000000177751505620616400206000ustar00rootroot00000000000000""" Functions for handling chunked arrays. """ from __future__ import annotations import itertools from collections.abc import Hashable, Mapping from functools import lru_cache from numbers import Number from typing import TYPE_CHECKING, Any, Literal, TypeVar, Union, overload from xarray.core import utils from xarray.core.utils import emit_user_level_warning from xarray.core.variable import IndexVariable, Variable from xarray.namedarray.parallelcompat import ( ChunkManagerEntrypoint, get_chunked_array_type, guess_chunkmanager, ) if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import T_ChunkDim MissingCoreDimOptions = Literal["raise", "copy", "drop"] @lru_cache(maxsize=512) def _get_breaks_cached( *, size: int, chunk_sizes: tuple[int, ...], preferred_chunk_sizes: int | tuple[int, ...], ) -> int | None: if isinstance(preferred_chunk_sizes, int) and preferred_chunk_sizes == 1: # short-circuit for the trivial case return None # Determine the stop indices of the preferred chunks, but omit the last stop # (equal to the dim size). In particular, assume that when a sequence # expresses the preferred chunks, the sequence sums to the size. preferred_stops = ( range(preferred_chunk_sizes, size, preferred_chunk_sizes) if isinstance(preferred_chunk_sizes, int) else set(itertools.accumulate(preferred_chunk_sizes[:-1])) ) # Gather any stop indices of the specified chunks that are not a stop index # of a preferred chunk. Again, omit the last stop, assuming that it equals # the dim size. actual_stops = itertools.accumulate(chunk_sizes[:-1]) # This copy is required for parallel iteration actual_stops_2 = itertools.accumulate(chunk_sizes[:-1]) disagrees = itertools.compress( actual_stops_2, (a not in preferred_stops for a in actual_stops) ) try: return next(disagrees) except StopIteration: return None def _get_chunk(var: Variable, chunks, chunkmanager: ChunkManagerEntrypoint): """ Return map from each dim to chunk sizes, accounting for backend's preferred chunks. """ if isinstance(var, IndexVariable): return {} dims = var.dims shape = var.shape # Determine the explicit requested chunks. preferred_chunks = var.encoding.get("preferred_chunks", {}) preferred_chunk_shape = tuple( itertools.starmap(preferred_chunks.get, zip(dims, shape, strict=True)) ) if isinstance(chunks, Number) or (chunks == "auto"): chunks = dict.fromkeys(dims, chunks) chunk_shape = tuple( chunks.get(dim, None) or preferred_chunk_sizes for dim, preferred_chunk_sizes in zip(dims, preferred_chunk_shape, strict=True) ) chunk_shape = chunkmanager.normalize_chunks( chunk_shape, shape=shape, dtype=var.dtype, previous_chunks=preferred_chunk_shape ) # Warn where requested chunks break preferred chunks, provided that the variable # contains data. if var.size: for dim, size, chunk_sizes in zip(dims, shape, chunk_shape, strict=True): try: preferred_chunk_sizes = preferred_chunks[dim] except KeyError: continue disagreement = _get_breaks_cached( size=size, chunk_sizes=chunk_sizes, preferred_chunk_sizes=preferred_chunk_sizes, ) if disagreement: emit_user_level_warning( "The specified chunks separate the stored chunks along " f'dimension "{dim}" starting at index {disagreement}. This could ' "degrade performance. Instead, consider rechunking after loading.", ) return dict(zip(dims, chunk_shape, strict=True)) def _maybe_chunk( name: Hashable, var: Variable, chunks: Mapping[Any, T_ChunkDim] | None, token=None, lock=None, name_prefix: str = "xarray-", overwrite_encoded_chunks: bool = False, inline_array: bool = False, chunked_array_type: str | ChunkManagerEntrypoint | None = None, from_array_kwargs=None, ) -> Variable: from xarray.namedarray.daskmanager import DaskManager if chunks is not None: chunks = {dim: chunks[dim] for dim in var.dims if dim in chunks} if var.ndim: chunked_array_type = guess_chunkmanager( chunked_array_type ) # coerce string to ChunkManagerEntrypoint type if isinstance(chunked_array_type, DaskManager): from dask.base import tokenize # when rechunking by different amounts, make sure dask names change # by providing chunks as an input to tokenize. # subtle bugs result otherwise. see GH3350 # we use str() for speed, and use the name for the final array name on the next line token2 = tokenize(token or var._data, str(chunks)) name2 = f"{name_prefix}{name}-{token2}" from_array_kwargs = utils.consolidate_dask_from_array_kwargs( from_array_kwargs, name=name2, lock=lock, inline_array=inline_array, ) var = var.chunk( chunks, chunked_array_type=chunked_array_type, from_array_kwargs=from_array_kwargs, ) if overwrite_encoded_chunks and var.chunks is not None: var.encoding["chunks"] = tuple(x[0] for x in var.chunks) return var else: return var _T = TypeVar("_T", bound=Union["Dataset", "DataArray"]) _U = TypeVar("_U", bound=Union["Dataset", "DataArray"]) _V = TypeVar("_V", bound=Union["Dataset", "DataArray"]) @overload def unify_chunks(obj: _T, /) -> tuple[_T]: ... @overload def unify_chunks(obj1: _T, obj2: _U, /) -> tuple[_T, _U]: ... @overload def unify_chunks(obj1: _T, obj2: _U, obj3: _V, /) -> tuple[_T, _U, _V]: ... @overload def unify_chunks(*objects: Dataset | DataArray) -> tuple[Dataset | DataArray, ...]: ... def unify_chunks(*objects: Dataset | DataArray) -> tuple[Dataset | DataArray, ...]: """ Given any number of Dataset and/or DataArray objects, returns new objects with unified chunk size along all chunked dimensions. Returns ------- unified (DataArray or Dataset) – Tuple of objects with the same type as *objects with consistent chunk sizes for all dask-array variables See Also -------- dask.array.core.unify_chunks """ from xarray.core.dataarray import DataArray # Convert all objects to datasets datasets = [ obj._to_temp_dataset() if isinstance(obj, DataArray) else obj.copy() for obj in objects ] # Get arguments to pass into dask.array.core.unify_chunks unify_chunks_args = [] sizes: dict[Hashable, int] = {} for ds in datasets: for v in ds._variables.values(): if v.chunks is not None: # Check that sizes match across different datasets for dim, size in v.sizes.items(): try: if sizes[dim] != size: raise ValueError( f"Dimension {dim!r} size mismatch: {sizes[dim]} != {size}" ) except KeyError: sizes[dim] = size unify_chunks_args += [v._data, v._dims] # No dask arrays: Return inputs if not unify_chunks_args: return objects chunkmanager = get_chunked_array_type(*list(unify_chunks_args)) _, chunked_data = chunkmanager.unify_chunks(*unify_chunks_args) chunked_data_iter = iter(chunked_data) out: list[Dataset | DataArray] = [] for obj, ds in zip(objects, datasets, strict=True): for k, v in ds._variables.items(): if v.chunks is not None: ds._variables[k] = v.copy(data=next(chunked_data_iter)) out.append(obj._from_temp_dataset(ds) if isinstance(obj, DataArray) else ds) return tuple(out) xarray-2025.09.0/xarray/structure/combine.py000066400000000000000000001170041505620616400207030ustar00rootroot00000000000000from __future__ import annotations from collections import Counter, defaultdict from collections.abc import Callable, Hashable, Iterable, Iterator, Sequence from typing import TYPE_CHECKING, Literal, TypeVar, Union, cast import pandas as pd from xarray.core import dtypes from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.utils import iterate_nested from xarray.structure.alignment import AlignmentError from xarray.structure.concat import concat from xarray.structure.merge import merge from xarray.util.deprecation_helpers import ( _COMPAT_DEFAULT, _COORDS_DEFAULT, _DATA_VARS_DEFAULT, _JOIN_DEFAULT, CombineKwargDefault, ) if TYPE_CHECKING: from xarray.core.types import ( CombineAttrsOptions, CompatOptions, JoinOptions, NestedSequence, ) T = TypeVar("T") def _infer_concat_order_from_positions( datasets: NestedSequence[T], ) -> dict[tuple[int, ...], T]: return dict(_infer_tile_ids_from_nested_list(datasets, ())) def _infer_tile_ids_from_nested_list( entry: NestedSequence[T], current_pos: tuple[int, ...] ) -> Iterator[tuple[tuple[int, ...], T]]: """ Given a list of lists (of lists...) of objects, returns a iterator which returns a tuple containing the index of each object in the nested list structure as the key, and the object. This can then be called by the dict constructor to create a dictionary of the objects organised by their position in the original nested list. Recursively traverses the given structure, while keeping track of the current position. Should work for any type of object which isn't a list. Parameters ---------- entry : list[list[obj, obj, ...], ...] List of lists of arbitrary depth, containing objects in the order they are to be concatenated. Returns ------- combined_tile_ids : dict[tuple(int, ...), obj] """ if not isinstance(entry, str) and isinstance(entry, Sequence): for i, item in enumerate(entry): yield from _infer_tile_ids_from_nested_list(item, current_pos + (i,)) else: yield current_pos, cast(T, entry) def _ensure_same_types(series, dim): if series.dtype == object: types = set(series.map(type)) if len(types) > 1: try: import cftime cftimes = any(issubclass(t, cftime.datetime) for t in types) except ImportError: cftimes = False types = ", ".join(t.__name__ for t in types) error_msg = ( f"Cannot combine along dimension '{dim}' with mixed types." f" Found: {types}." ) if cftimes: error_msg = ( f"{error_msg} If importing data directly from a file then " f"setting `use_cftime=True` may fix this issue." ) raise TypeError(error_msg) def _infer_concat_order_from_coords(datasets): concat_dims = [] tile_ids = [() for ds in datasets] # All datasets have same variables because they've been grouped as such ds0 = datasets[0] for dim in ds0.dims: # Check if dim is a coordinate dimension if dim in ds0: # Need to read coordinate values to do ordering indexes = [ds._indexes.get(dim) for ds in datasets] if any(index is None for index in indexes): error_msg = ( f"Every dimension requires a corresponding 1D coordinate " f"and index for inferring concatenation order but the " f"coordinate '{dim}' has no corresponding index" ) raise ValueError(error_msg) # TODO (benbovy, flexible indexes): support flexible indexes? indexes = [index.to_pandas_index() for index in indexes] # If dimension coordinate values are same on every dataset then # should be leaving this dimension alone (it's just a "bystander") if not all(index.equals(indexes[0]) for index in indexes[1:]): # Infer order datasets should be arranged in along this dim concat_dims.append(dim) if all(index.is_monotonic_increasing for index in indexes): ascending = True elif all(index.is_monotonic_decreasing for index in indexes): ascending = False else: raise ValueError( f"Coordinate variable {dim} is neither " "monotonically increasing nor " "monotonically decreasing on all datasets" ) # Assume that any two datasets whose coord along dim starts # with the same value have the same coord values throughout. if any(index.size == 0 for index in indexes): raise ValueError("Cannot handle size zero dimensions") first_items = pd.Index([index[0] for index in indexes]) series = first_items.to_series() # ensure series does not contain mixed types, e.g. cftime calendars _ensure_same_types(series, dim) # Sort datasets along dim # We want rank but with identical elements given identical # position indices - they should be concatenated along another # dimension, not along this one rank = series.rank( method="dense", ascending=ascending, numeric_only=False ) order = rank.astype(int).values - 1 # Append positions along extra dimension to structure which # encodes the multi-dimensional concatenation order tile_ids = [ tile_id + (position,) for tile_id, position in zip(tile_ids, order, strict=True) ] if len(datasets) > 1 and not concat_dims: raise ValueError( "Could not find any dimension coordinates to use to " "order the datasets for concatenation" ) combined_ids = dict(zip(tile_ids, datasets, strict=True)) return combined_ids, concat_dims def _check_dimension_depth_tile_ids(combined_tile_ids): """ Check all tuples are the same length, i.e. check that all lists are nested to the same depth. """ tile_ids = combined_tile_ids.keys() nesting_depths = [len(tile_id) for tile_id in tile_ids] if not nesting_depths: nesting_depths = [0] if set(nesting_depths) != {nesting_depths[0]}: raise ValueError( "The supplied objects do not form a hypercube because" " sub-lists do not have consistent depths" ) # return these just to be reused in _check_shape_tile_ids return tile_ids, nesting_depths def _check_shape_tile_ids(combined_tile_ids): """Check all lists along one dimension are same length.""" tile_ids, nesting_depths = _check_dimension_depth_tile_ids(combined_tile_ids) for dim in range(nesting_depths[0]): indices_along_dim = [tile_id[dim] for tile_id in tile_ids] occurrences = Counter(indices_along_dim) if len(set(occurrences.values())) != 1: raise ValueError( "The supplied objects do not form a hypercube " "because sub-lists do not have consistent " f"lengths along dimension {dim}" ) def _combine_nd( combined_ids, concat_dims, data_vars, coords, compat: CompatOptions | CombineKwargDefault, fill_value, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions, ): """ Combines an N-dimensional structure of datasets into one by applying a series of either concat and merge operations along each dimension. No checks are performed on the consistency of the datasets, concat_dims or tile_IDs, because it is assumed that this has already been done. Parameters ---------- combined_ids : Dict[Tuple[int, ...]], xarray.Dataset] Structure containing all datasets to be concatenated with "tile_IDs" as keys, which specify position within the desired final combined result. concat_dims : sequence of str The dimensions along which the datasets should be concatenated. Must be in order, and the length must match the length of the tuples used as keys in combined_ids. If the string is a dimension name then concat along that dimension, if it is None then merge. Returns ------- combined_ds : xarray.Dataset """ example_tile_id = next(iter(combined_ids.keys())) n_dims = len(example_tile_id) if len(concat_dims) != n_dims: raise ValueError( f"concat_dims has length {len(concat_dims)} but the datasets " f"passed are nested in a {n_dims}-dimensional structure" ) # Each iteration of this loop reduces the length of the tile_ids tuples # by one. It always combines along the first dimension, removing the first # element of the tuple for concat_dim in concat_dims: combined_ids = _combine_all_along_first_dim( combined_ids, dim=concat_dim, data_vars=data_vars, coords=coords, compat=compat, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) (combined_ds,) = combined_ids.values() return combined_ds def _combine_all_along_first_dim( combined_ids, dim, data_vars, coords, compat: CompatOptions | CombineKwargDefault, fill_value, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions, ): # Group into lines of datasets which must be combined along dim grouped = groupby_defaultdict(list(combined_ids.items()), key=_new_tile_id) # Combine all of these datasets along dim new_combined_ids = {} for new_id, group in grouped: combined_ids = dict(sorted(group)) datasets = combined_ids.values() new_combined_ids[new_id] = _combine_1d( datasets, concat_dim=dim, compat=compat, data_vars=data_vars, coords=coords, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) return new_combined_ids def _combine_1d( datasets, concat_dim, compat: CompatOptions | CombineKwargDefault, data_vars, coords, fill_value, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions, ): """ Applies either concat or merge to 1D list of datasets depending on value of concat_dim """ if concat_dim is not None: try: combined = concat( datasets, dim=concat_dim, data_vars=data_vars, coords=coords, compat=compat, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) except ValueError as err: if "encountered unexpected variable" in str(err): raise ValueError( "These objects cannot be combined using only " "xarray.combine_nested, instead either use " "xarray.combine_by_coords, or do it manually " "with xarray.concat, xarray.merge and " "xarray.align" ) from err else: raise else: try: combined = merge( datasets, compat=compat, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) except AlignmentError as e: e.add_note( "If you are intending to concatenate datasets, please specify the concatenation dimension explicitly. " "Using merge to concatenate is quite inefficient." ) raise e return combined def _new_tile_id(single_id_ds_pair): tile_id, ds = single_id_ds_pair return tile_id[1:] def _nested_combine( datasets, concat_dims, compat, data_vars, coords, ids, fill_value, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions, ): if len(datasets) == 0: return Dataset() # Arrange datasets for concatenation # Use information from the shape of the user input if not ids: # Determine tile_IDs by structure of input in N-D # (i.e. ordering in list-of-lists) combined_ids = _infer_concat_order_from_positions(datasets) else: # Already sorted so just use the ids already passed combined_ids = dict(zip(ids, datasets, strict=True)) # Check that the inferred shape is combinable _check_shape_tile_ids(combined_ids) # Apply series of concatenate or merge operations along each dimension combined = _combine_nd( combined_ids, concat_dims=concat_dims, compat=compat, data_vars=data_vars, coords=coords, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) return combined # Define type for arbitrarily-nested list of lists recursively: DATASET_HYPERCUBE = Union[Dataset, Iterable["DATASET_HYPERCUBE"]] def combine_nested( datasets: DATASET_HYPERCUBE, concat_dim: str | DataArray | Sequence[str | DataArray | pd.Index | None], compat: str | CombineKwargDefault = _COMPAT_DEFAULT, data_vars: str | CombineKwargDefault = _DATA_VARS_DEFAULT, coords: str | CombineKwargDefault = _COORDS_DEFAULT, fill_value: object = dtypes.NA, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, combine_attrs: CombineAttrsOptions = "drop", ) -> Dataset: """ Explicitly combine an N-dimensional grid of datasets into one by using a succession of concat and merge operations along each dimension of the grid. Does not sort the supplied datasets under any circumstances, so the datasets must be passed in the order you wish them to be concatenated. It does align coordinates, but different variables on datasets can cause it to fail under some scenarios. In complex cases, you may need to clean up your data and use concat/merge explicitly. To concatenate along multiple dimensions the datasets must be passed as a nested list-of-lists, with a depth equal to the length of ``concat_dims``. ``combine_nested`` will concatenate along the top-level list first. Useful for combining datasets from a set of nested directories, or for collecting the output of a simulation parallelized along multiple dimensions. Parameters ---------- datasets : list or nested list of Dataset Dataset objects to combine. If concatenation or merging along more than one dimension is desired, then datasets must be supplied in a nested list-of-lists. concat_dim : str, or list of str, DataArray, Index or None Dimensions along which to concatenate variables, as used by :py:func:`xarray.concat`. Set ``concat_dim=[..., None, ...]`` explicitly to disable concatenation and merge instead along a particular dimension. The position of ``None`` in the list specifies the dimension of the nested-list input along which to merge. Must be the same length as the depth of the list passed to ``datasets``. compat : {"identical", "equals", "broadcast_equals", \ "no_conflicts", "override"}, optional String indicating how to compare variables of the same name for potential merge conflicts: - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - "equals": all values and dimensions must be the same. - "identical": all values, dimensions and attributes must be the same. - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - "override": skip comparing and pick variable from first dataset data_vars : {"minimal", "different", "all" or list of str}, optional These data variables will be concatenated together: * "minimal": Only data variables in which the dimension already appears are included. * "different": Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. * "all": All data variables will be concatenated. * None: Means ``"all"`` if ``dim`` is not present in any of the ``objs``, and ``"minimal"`` if ``dim`` is present in any of ``objs``. * list of dims: The listed data variables will be concatenated, in addition to the "minimal" data variables. coords : {"minimal", "different", "all" or list of str}, optional These coordinate variables will be concatenated together: * "minimal": Only coordinates in which the dimension already appears are included. If concatenating over a dimension _not_ present in any of the objects, then all data variables will be concatenated along that new dimension. * "different": Coordinates which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of coordinate variables into memory if they are not already loaded. * "all": All coordinate variables will be concatenated, except those corresponding to other dimensions. * list of Hashable: The listed coordinate variables will be concatenated, in addition to the "minimal" coordinates. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names to fill values. Use a data array's name to refer to its values. join : {"outer", "inner", "left", "right", "exact"}, optional String indicating how to combine differing indexes (excluding concat_dim) in objects - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "drop" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. Returns ------- combined : xarray.Dataset Examples -------- A common task is collecting data from a parallelized simulation in which each process wrote out to a separate file. A domain which was decomposed into 4 parts, 2 each along both the x and y axes, requires organising the datasets into a doubly-nested list, e.g: >>> x1y1 = xr.Dataset( ... { ... "temperature": (("x", "y"), np.random.randn(2, 2)), ... "precipitation": (("x", "y"), np.random.randn(2, 2)), ... } ... ) >>> x1y1 Size: 64B Dimensions: (x: 2, y: 2) Dimensions without coordinates: x, y Data variables: temperature (x, y) float64 32B 1.764 0.4002 0.9787 2.241 precipitation (x, y) float64 32B 1.868 -0.9773 0.9501 -0.1514 >>> x1y2 = xr.Dataset( ... { ... "temperature": (("x", "y"), np.random.randn(2, 2)), ... "precipitation": (("x", "y"), np.random.randn(2, 2)), ... } ... ) >>> x2y1 = xr.Dataset( ... { ... "temperature": (("x", "y"), np.random.randn(2, 2)), ... "precipitation": (("x", "y"), np.random.randn(2, 2)), ... } ... ) >>> x2y2 = xr.Dataset( ... { ... "temperature": (("x", "y"), np.random.randn(2, 2)), ... "precipitation": (("x", "y"), np.random.randn(2, 2)), ... } ... ) >>> ds_grid = [[x1y1, x1y2], [x2y1, x2y2]] >>> combined = xr.combine_nested(ds_grid, concat_dim=["x", "y"]) >>> combined Size: 256B Dimensions: (x: 4, y: 4) Dimensions without coordinates: x, y Data variables: temperature (x, y) float64 128B 1.764 0.4002 -0.1032 ... 0.04576 -0.1872 precipitation (x, y) float64 128B 1.868 -0.9773 0.761 ... 0.1549 0.3782 ``combine_nested`` can also be used to explicitly merge datasets with different variables. For example if we have 4 datasets, which are divided along two times, and contain two different variables, we can pass ``None`` to ``concat_dim`` to specify the dimension of the nested list over which we wish to use ``merge`` instead of ``concat``: >>> t1temp = xr.Dataset({"temperature": ("t", np.random.randn(5))}) >>> t1temp Size: 40B Dimensions: (t: 5) Dimensions without coordinates: t Data variables: temperature (t) float64 40B -0.8878 -1.981 -0.3479 0.1563 1.23 >>> t1precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))}) >>> t1precip Size: 40B Dimensions: (t: 5) Dimensions without coordinates: t Data variables: precipitation (t) float64 40B 1.202 -0.3873 -0.3023 -1.049 -1.42 >>> t2temp = xr.Dataset({"temperature": ("t", np.random.randn(5))}) >>> t2precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))}) >>> ds_grid = [[t1temp, t1precip], [t2temp, t2precip]] >>> combined = xr.combine_nested(ds_grid, concat_dim=["t", None]) >>> combined Size: 160B Dimensions: (t: 10) Dimensions without coordinates: t Data variables: temperature (t) float64 80B -0.8878 -1.981 -0.3479 ... -0.4381 -1.253 precipitation (t) float64 80B 1.202 -0.3873 -0.3023 ... -0.8955 0.3869 See also -------- concat merge """ mixed_datasets_and_arrays = any( isinstance(obj, Dataset) for obj in iterate_nested(datasets) ) and any( isinstance(obj, DataArray) and obj.name is None for obj in iterate_nested(datasets) ) if mixed_datasets_and_arrays: raise ValueError("Can't combine datasets with unnamed arrays.") if isinstance(concat_dim, str | DataArray) or concat_dim is None: concat_dim = [concat_dim] # The IDs argument tells _nested_combine that datasets aren't yet sorted return _nested_combine( datasets, concat_dims=concat_dim, compat=compat, data_vars=data_vars, coords=coords, ids=False, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) def vars_as_keys(ds): return tuple(sorted(ds)) K = TypeVar("K", bound=Hashable) def groupby_defaultdict( iter: list[T], key: Callable[[T], K], ) -> Iterator[tuple[K, Iterator[T]]]: """replacement for itertools.groupby""" idx = defaultdict(list) for i, obj in enumerate(iter): idx[key(obj)].append(i) for k, ix in idx.items(): yield k, (iter[i] for i in ix) def _combine_single_variable_hypercube( datasets, fill_value, data_vars, coords, compat: CompatOptions | CombineKwargDefault, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions, ): """ Attempt to combine a list of Datasets into a hypercube using their coordinates. All provided Datasets must belong to a single variable, ie. must be assigned the same variable name. This precondition is not checked by this function, so the caller is assumed to know what it's doing. This function is NOT part of the public API. """ if len(datasets) == 0: raise ValueError( "At least one Dataset is required to resolve variable names " "for combined hypercube." ) combined_ids, concat_dims = _infer_concat_order_from_coords(list(datasets)) if fill_value is None: # check that datasets form complete hypercube _check_shape_tile_ids(combined_ids) else: # check only that all datasets have same dimension depth for these # vars _check_dimension_depth_tile_ids(combined_ids) # Concatenate along all of concat_dims one by one to create single ds concatenated = _combine_nd( combined_ids, concat_dims=concat_dims, data_vars=data_vars, coords=coords, compat=compat, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) # Check the overall coordinates are monotonically increasing for dim in concat_dims: indexes = concatenated.indexes.get(dim) if not (indexes.is_monotonic_increasing or indexes.is_monotonic_decreasing): raise ValueError( "Resulting object does not have monotonic" f" global indexes along dimension {dim}" ) return concatenated def combine_by_coords( data_objects: Iterable[Dataset | DataArray] = [], compat: CompatOptions | CombineKwargDefault = _COMPAT_DEFAULT, data_vars: Literal["all", "minimal", "different"] | None | list[str] | CombineKwargDefault = _DATA_VARS_DEFAULT, coords: str | CombineKwargDefault = _COORDS_DEFAULT, fill_value: object = dtypes.NA, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, combine_attrs: CombineAttrsOptions = "no_conflicts", ) -> Dataset | DataArray: """ Attempt to auto-magically combine the given datasets (or data arrays) into one by using dimension coordinates. This function attempts to combine a group of datasets along any number of dimensions into a single entity by inspecting coords and metadata and using a combination of concat and merge. Will attempt to order the datasets such that the values in their dimension coordinates are monotonic along all dimensions. If it cannot determine the order in which to concatenate the datasets, it will raise a ValueError. Non-coordinate dimensions will be ignored, as will any coordinate dimensions which do not vary between each dataset. Aligns coordinates, but different variables on datasets can cause it to fail under some scenarios. In complex cases, you may need to clean up your data and use concat/merge explicitly (also see `combine_nested`). Works well if, for example, you have N years of data and M data variables, and each combination of a distinct time period and set of data variables is saved as its own dataset. Also useful for if you have a simulation which is parallelized in multiple dimensions, but has global coordinates saved in each file specifying the positions of points within the global domain. Parameters ---------- data_objects : Iterable of Datasets or DataArrays Data objects to combine. compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional String indicating how to compare variables of the same name for potential conflicts: - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - "equals": all values and dimensions must be the same. - "identical": all values, dimensions and attributes must be the same. - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - "override": skip comparing and pick variable from first dataset data_vars : {"minimal", "different", "all" or list of str}, optional These data variables will be concatenated together: - "minimal": Only data variables in which the dimension already appears are included. - "different": Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. - "all": All data variables will be concatenated. - list of str: The listed data variables will be concatenated, in addition to the "minimal" data variables. If objects are DataArrays, `data_vars` must be "all". coords : {"minimal", "different", "all"} or list of str, optional As per the "data_vars" kwarg, but for coordinate variables. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names to fill values. Use a data array's name to refer to its values. If None, raises a ValueError if the passed Datasets do not create a complete hypercube. join : {"outer", "inner", "left", "right", "exact"}, optional String indicating how to combine differing indexes in objects - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "no_conflicts" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. Returns ------- combined : xarray.Dataset or xarray.DataArray Will return a Dataset unless all the inputs are unnamed DataArrays, in which case a DataArray will be returned. See also -------- concat merge combine_nested Examples -------- Combining two datasets using their common dimension coordinates. Notice they are concatenated based on the values in their dimension coordinates, not on their position in the list passed to `combine_by_coords`. >>> x1 = xr.Dataset( ... { ... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)), ... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)), ... }, ... coords={"y": [0, 1], "x": [10, 20, 30]}, ... ) >>> x2 = xr.Dataset( ... { ... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)), ... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)), ... }, ... coords={"y": [2, 3], "x": [10, 20, 30]}, ... ) >>> x3 = xr.Dataset( ... { ... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)), ... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)), ... }, ... coords={"y": [2, 3], "x": [40, 50, 60]}, ... ) >>> x1 Size: 136B Dimensions: (y: 2, x: 3) Coordinates: * y (y) int64 16B 0 1 * x (x) int64 24B 10 20 30 Data variables: temperature (y, x) float64 48B 10.98 14.3 12.06 10.9 8.473 12.92 precipitation (y, x) float64 48B 0.4376 0.8918 0.9637 0.3834 0.7917 0.5289 >>> x2 Size: 136B Dimensions: (y: 2, x: 3) Coordinates: * y (y) int64 16B 2 3 * x (x) int64 24B 10 20 30 Data variables: temperature (y, x) float64 48B 11.36 18.51 1.421 1.743 0.4044 16.65 precipitation (y, x) float64 48B 0.7782 0.87 0.9786 0.7992 0.4615 0.7805 >>> x3 Size: 136B Dimensions: (y: 2, x: 3) Coordinates: * y (y) int64 16B 2 3 * x (x) int64 24B 40 50 60 Data variables: temperature (y, x) float64 48B 2.365 12.8 2.867 18.89 10.44 8.293 precipitation (y, x) float64 48B 0.2646 0.7742 0.4562 0.5684 0.01879 0.6176 >>> xr.combine_by_coords([x2, x1]) Size: 248B Dimensions: (y: 4, x: 3) Coordinates: * y (y) int64 32B 0 1 2 3 * x (x) int64 24B 10 20 30 Data variables: temperature (y, x) float64 96B 10.98 14.3 12.06 ... 1.743 0.4044 16.65 precipitation (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.4615 0.7805 >>> xr.combine_by_coords([x3, x1], join="outer") Size: 464B Dimensions: (y: 4, x: 6) Coordinates: * y (y) int64 32B 0 1 2 3 * x (x) int64 48B 10 20 30 40 50 60 Data variables: temperature (y, x) float64 192B 10.98 14.3 12.06 ... 18.89 10.44 8.293 precipitation (y, x) float64 192B 0.4376 0.8918 0.9637 ... 0.01879 0.6176 >>> xr.combine_by_coords([x3, x1], join="override") Size: 256B Dimensions: (y: 2, x: 6) Coordinates: * y (y) int64 16B 0 1 * x (x) int64 48B 10 20 30 40 50 60 Data variables: temperature (y, x) float64 96B 10.98 14.3 12.06 ... 18.89 10.44 8.293 precipitation (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.01879 0.6176 >>> xr.combine_by_coords([x1, x2, x3], join="outer") Size: 464B Dimensions: (y: 4, x: 6) Coordinates: * y (y) int64 32B 0 1 2 3 * x (x) int64 48B 10 20 30 40 50 60 Data variables: temperature (y, x) float64 192B 10.98 14.3 12.06 ... 18.89 10.44 8.293 precipitation (y, x) float64 192B 0.4376 0.8918 0.9637 ... 0.01879 0.6176 You can also combine DataArray objects, but the behaviour will differ depending on whether or not the DataArrays are named. If all DataArrays are named then they will be promoted to Datasets before combining, and then the resultant Dataset will be returned, e.g. >>> named_da1 = xr.DataArray( ... name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x" ... ) >>> named_da1 Size: 16B array([1., 2.]) Coordinates: * x (x) int64 16B 0 1 >>> named_da2 = xr.DataArray( ... name="a", data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x" ... ) >>> named_da2 Size: 16B array([3., 4.]) Coordinates: * x (x) int64 16B 2 3 >>> xr.combine_by_coords([named_da1, named_da2]) Size: 64B Dimensions: (x: 4) Coordinates: * x (x) int64 32B 0 1 2 3 Data variables: a (x) float64 32B 1.0 2.0 3.0 4.0 If all the DataArrays are unnamed, a single DataArray will be returned, e.g. >>> unnamed_da1 = xr.DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") >>> unnamed_da2 = xr.DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x") >>> xr.combine_by_coords([unnamed_da1, unnamed_da2]) Size: 32B array([1., 2., 3., 4.]) Coordinates: * x (x) int64 32B 0 1 2 3 Finally, if you attempt to combine a mix of unnamed DataArrays with either named DataArrays or Datasets, a ValueError will be raised (as this is an ambiguous operation). """ if not data_objects: return Dataset() objs_are_unnamed_dataarrays = [ isinstance(data_object, DataArray) and data_object.name is None for data_object in data_objects ] if any(objs_are_unnamed_dataarrays): if all(objs_are_unnamed_dataarrays): # Combine into a single larger DataArray temp_datasets = [ unnamed_dataarray._to_temp_dataset() for unnamed_dataarray in data_objects ] combined_temp_dataset = _combine_single_variable_hypercube( temp_datasets, fill_value=fill_value, data_vars=data_vars, coords=coords, compat=compat, join=join, combine_attrs=combine_attrs, ) return DataArray()._from_temp_dataset(combined_temp_dataset) else: # Must be a mix of unnamed dataarrays with either named dataarrays or with datasets # Can't combine these as we wouldn't know whether to merge or concatenate the arrays raise ValueError( "Can't automatically combine unnamed DataArrays with either named DataArrays or Datasets." ) else: # Promote any named DataArrays to single-variable Datasets to simplify combining data_objects = [ obj.to_dataset() if isinstance(obj, DataArray) else obj for obj in data_objects ] # Group by data vars grouped_by_vars = groupby_defaultdict(data_objects, key=vars_as_keys) # Perform the multidimensional combine on each group of data variables # before merging back together concatenated_grouped_by_data_vars = tuple( _combine_single_variable_hypercube( tuple(datasets_with_same_vars), fill_value=fill_value, data_vars=data_vars, coords=coords, compat=compat, join=join, combine_attrs=combine_attrs, ) for vars, datasets_with_same_vars in grouped_by_vars ) return merge( concatenated_grouped_by_data_vars, compat=compat, fill_value=fill_value, join=join, combine_attrs=combine_attrs, ) xarray-2025.09.0/xarray/structure/concat.py000066400000000000000000001050231505620616400205340ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Hashable, Iterable from typing import TYPE_CHECKING, Any, Literal, Union, overload import numpy as np import pandas as pd from xarray.core import dtypes, utils from xarray.core.coordinates import Coordinates from xarray.core.duck_array_ops import lazy_array_equiv from xarray.core.indexes import Index, PandasIndex from xarray.core.types import T_DataArray, T_Dataset, T_Variable from xarray.core.utils import emit_user_level_warning from xarray.core.variable import Variable from xarray.core.variable import concat as concat_vars from xarray.structure.alignment import align, reindex_variables from xarray.structure.merge import ( _VALID_COMPAT, collect_variables_and_indexes, merge_attrs, merge_collected, ) from xarray.util.deprecation_helpers import ( _COMPAT_CONCAT_DEFAULT, _COORDS_DEFAULT, _DATA_VARS_DEFAULT, _JOIN_DEFAULT, CombineKwargDefault, ) if TYPE_CHECKING: from xarray.core.types import ( CombineAttrsOptions, CompatOptions, ConcatOptions, JoinOptions, ) T_DataVars = Union[ConcatOptions, Iterable[Hashable], None] # TODO: replace dim: Any by 1D array_likes @overload def concat( objs: Iterable[T_Dataset], dim: Hashable | T_Variable | T_DataArray | pd.Index | Any, data_vars: T_DataVars | CombineKwargDefault = _DATA_VARS_DEFAULT, coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault = _COORDS_DEFAULT, compat: CompatOptions | CombineKwargDefault = _COMPAT_CONCAT_DEFAULT, positions: Iterable[Iterable[int]] | None = None, fill_value: object = dtypes.NA, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, combine_attrs: CombineAttrsOptions = "override", create_index_for_new_dim: bool = True, ) -> T_Dataset: ... @overload def concat( objs: Iterable[T_DataArray], dim: Hashable | T_Variable | T_DataArray | pd.Index | Any, data_vars: T_DataVars | CombineKwargDefault = _DATA_VARS_DEFAULT, coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault = _COORDS_DEFAULT, compat: CompatOptions | CombineKwargDefault = _COMPAT_CONCAT_DEFAULT, positions: Iterable[Iterable[int]] | None = None, fill_value: object = dtypes.NA, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, combine_attrs: CombineAttrsOptions = "override", create_index_for_new_dim: bool = True, ) -> T_DataArray: ... def concat( objs, dim, data_vars: T_DataVars | CombineKwargDefault = _DATA_VARS_DEFAULT, coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault = _COORDS_DEFAULT, compat: CompatOptions | CombineKwargDefault = _COMPAT_CONCAT_DEFAULT, positions=None, fill_value=dtypes.NA, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, combine_attrs: CombineAttrsOptions = "override", create_index_for_new_dim: bool = True, ): """Concatenate xarray objects along a new or existing dimension. Parameters ---------- objs : sequence of Dataset and DataArray xarray objects to concatenate together. Each object is expected to consist of variables and coordinates with matching shapes except for along the concatenated dimension. dim : Hashable or Variable or DataArray or pandas.Index Name of the dimension to concatenate along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. If dimension is provided as a Variable, DataArray or Index, its name is used as the dimension to concatenate along and the values are added as a coordinate. data_vars : {"minimal", "different", "all", None} or list of Hashable, optional These data variables will be concatenated together: * "minimal": Only data variables in which the dimension already appears are included. * "different": Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. * "all": All data variables will be concatenated. * None: Means ``"all"`` if ``dim`` is not present in any of the ``objs``, and ``"minimal"`` if ``dim`` is present in any of ``objs``. * list of dims: The listed data variables will be concatenated, in addition to the "minimal" data variables. If objects are DataArrays, data_vars must be "all". coords : {"minimal", "different", "all"} or list of Hashable, optional These coordinate variables will be concatenated together: * "minimal": Only coordinates in which the dimension already appears are included. If concatenating over a dimension _not_ present in any of the objects, then all data variables will be concatenated along that new dimension. * "different": Coordinates which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of coordinate variables into memory if they are not already loaded. * "all": All coordinate variables will be concatenated, except those corresponding to other dimensions. * list of Hashable: The listed coordinate variables will be concatenated, in addition to the "minimal" coordinates. compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional String indicating how to compare non-concatenated variables of the same name for potential conflicts. This is passed down to merge. - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - "equals": all values and dimensions must be the same. - "identical": all values, dimensions and attributes must be the same. - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - "override": skip comparing and pick variable from first dataset positions : None or list of integer arrays, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names to fill values. Use a data array's name to refer to its values. join : {"outer", "inner", "left", "right", "exact"}, optional String indicating how to combine differing indexes (excluding dim) in objects - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "override" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. create_index_for_new_dim : bool, default: True Whether to create a new ``PandasIndex`` object when the objects being concatenated contain scalar variables named ``dim``. Returns ------- concatenated : type of objs See also -------- merge Examples -------- >>> da = xr.DataArray( ... np.arange(6).reshape(2, 3), [("x", ["a", "b"]), ("y", [10, 20, 30])] ... ) >>> da Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * x (x) >> xr.concat([da.isel(y=slice(0, 1)), da.isel(y=slice(1, None))], dim="y") Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * x (x) >> xr.concat([da.isel(x=0), da.isel(x=1)], "x", coords="minimal") Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: * x (x) >> xr.concat([da.isel(x=0), da.isel(x=1)], "new_dim", coords="all") Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: x (new_dim) >> xr.concat( ... [da.isel(x=0), da.isel(x=1)], ... pd.Index([-90, -100], name="new_dim"), ... coords="all", ... ) Size: 48B array([[0, 1, 2], [3, 4, 5]]) Coordinates: x (new_dim) >> ds = xr.Dataset(coords={"x": 0}) >>> xr.concat([ds, ds], dim="x") Size: 16B Dimensions: (x: 2) Coordinates: * x (x) int64 16B 0 0 Data variables: *empty* >>> xr.concat([ds, ds], dim="x").indexes Indexes: x Index([0, 0], dtype='int64', name='x') >>> xr.concat([ds, ds], dim="x", create_index_for_new_dim=False).indexes Indexes: *empty* """ # TODO: add ignore_index arguments copied from pandas.concat # TODO: support concatenating scalar coordinates even if the concatenated # dimension already exists from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset try: first_obj, objs = utils.peek_at(objs) except StopIteration as err: raise ValueError("must supply at least one object to concatenate") from err if not isinstance(compat, CombineKwargDefault) and compat not in set( _VALID_COMPAT ) - {"minimal"}: raise ValueError( f"compat={compat!r} invalid: must be 'broadcast_equals', 'equals', 'identical', 'no_conflicts' or 'override'" ) if isinstance(first_obj, DataArray): return _dataarray_concat( objs, dim=dim, data_vars=data_vars, coords=coords, compat=compat, positions=positions, fill_value=fill_value, join=join, combine_attrs=combine_attrs, create_index_for_new_dim=create_index_for_new_dim, ) elif isinstance(first_obj, Dataset): return _dataset_concat( objs, dim=dim, data_vars=data_vars, coords=coords, compat=compat, positions=positions, fill_value=fill_value, join=join, combine_attrs=combine_attrs, create_index_for_new_dim=create_index_for_new_dim, ) else: raise TypeError( "can only concatenate xarray Dataset and DataArray " f"objects, got {type(first_obj)}" ) def _calc_concat_dim_index( dim_or_data: Hashable | Any, ) -> tuple[Hashable, PandasIndex | None]: """Infer the dimension name and 1d index / coordinate variable (if appropriate) for concatenating along the new dimension. """ from xarray.core.dataarray import DataArray dim: Hashable | None if utils.hashable(dim_or_data): dim = dim_or_data index = None else: if not isinstance(dim_or_data, DataArray | Variable): dim = getattr(dim_or_data, "name", None) if dim is None: dim = "concat_dim" else: (dim,) = dim_or_data.dims coord_dtype = getattr(dim_or_data, "dtype", None) index = PandasIndex(dim_or_data, dim, coord_dtype=coord_dtype) return dim, index def _calc_concat_over( datasets: list[T_Dataset], dim: Hashable, all_dims: set[Hashable], data_vars: T_DataVars | CombineKwargDefault, coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault, compat: CompatOptions | CombineKwargDefault, ) -> tuple[set[Hashable], dict[Hashable, bool], list[int], set[Hashable]]: """ Determine which dataset variables need to be concatenated in the result, """ # variables to be concatenated concat_over = set() # variables checked for equality equals: dict[Hashable, bool] = {} # skip merging these variables. # if concatenating over a dimension 'x' that is associated with an index over 2 variables, # 'x' and 'y', then we assert join="equals" on `y` and don't need to merge it. # that assertion happens in the align step prior to this function being called skip_merge: set[Hashable] = set() if dim in all_dims: concat_over_existing_dim = True concat_over.add(dim) else: concat_over_existing_dim = False if data_vars == "minimal" and coords == "minimal" and not concat_over_existing_dim: raise ValueError( "Cannot specify both data_vars='minimal' and coords='minimal' when " "concatenating over a new dimension." ) if data_vars is None or ( isinstance(data_vars, CombineKwargDefault) and data_vars._value is None ): data_vars = "minimal" if concat_over_existing_dim else "all" concat_dim_lengths = [] for ds in datasets: if concat_over_existing_dim and dim not in ds.dims and dim in ds: ds = ds.set_coords(dim) concat_over.update(k for k, v in ds.variables.items() if dim in v.dims) for _, idx_vars in ds.xindexes.group_by_index(): if any(dim in v.dims for v in idx_vars.values()): skip_merge.update(idx_vars.keys()) concat_dim_lengths.append(ds.sizes.get(dim, 1)) def process_subset_opt( opt: ConcatOptions | Iterable[Hashable] | CombineKwargDefault, subset: Literal["coords", "data_vars"], ) -> None: original = set(concat_over) compat_str = ( compat._value if isinstance(compat, CombineKwargDefault) else compat ) assert compat_str is not None if isinstance(opt, str | CombineKwargDefault): if opt == "different": if isinstance(compat, CombineKwargDefault) and compat != "override": if not isinstance(opt, CombineKwargDefault): emit_user_level_warning( compat.warning_message( "This change will result in the following ValueError: " f"Cannot specify both {subset}='different' and compat='override'.", recommend_set_options=False, ), FutureWarning, ) if compat == "override": raise ValueError( f"Cannot specify both {subset}='different' and compat='override'." + ( compat.error_message() if isinstance(compat, CombineKwargDefault) else "" ) ) # all nonindexes that are not the same in each dataset for k in getattr(datasets[0], subset): if k not in concat_over: equal = None variables = [ ds.variables[k] for ds in datasets if k in ds.variables ] if len(variables) == 1: # coords="different" doesn't make sense when only one object # contains a particular variable. break elif len(variables) != len(datasets) and opt == "different": raise ValueError( f"{k!r} not present in all datasets and coords='different'. " f"Either add {k!r} to datasets where it is missing or " "specify coords='minimal'." ) # first check without comparing values i.e. no computes for var in variables[1:]: equal = getattr(variables[0], compat_str)( var, equiv=lazy_array_equiv ) if equal is not True: # exit early if we know these are not equal or that # equality cannot be determined i.e. one or all of # the variables wraps a numpy array break if equal is False: concat_over.add(k) elif equal is None: # Compare the variable of all datasets vs. the one # of the first dataset. Perform the minimum amount of # loads in order to avoid multiple loads from disk # while keeping the RAM footprint low. v_lhs = datasets[0].variables[k].load() # We'll need to know later on if variables are equal. computed = [] for ds_rhs in datasets[1:]: v_rhs = ds_rhs.variables[k].compute() computed.append(v_rhs) if not getattr(v_lhs, compat_str)(v_rhs): concat_over.add(k) equals[k] = False # computed variables are not to be re-computed # again in the future for ds, v in zip( datasets[1:], computed, strict=False ): ds.variables[k].data = v.data break else: equal = True if TYPE_CHECKING: assert equal is not None equals[k] = equal elif opt == "all": concat_over.update( set().union( *[set(getattr(d, subset)) - set(d.dims) for d in datasets] ) ) elif opt == "minimal": pass else: raise ValueError(f"unexpected value for {subset}: {opt}") if ( isinstance(opt, CombineKwargDefault) and opt._value is not None and original != concat_over and concat_over_existing_dim ): warnings.append( opt.warning_message( "This is likely to lead to different results when multiple datasets " "have matching variables with overlapping values.", ) ) else: valid_vars = tuple(getattr(datasets[0], subset)) invalid_vars = [k for k in opt if k not in valid_vars] if invalid_vars: if subset == "coords": raise ValueError( f"the variables {invalid_vars} in coords are not " f"found in the coordinates of the first dataset {valid_vars}" ) else: # note: data_vars are not listed in the error message here, # because there may be lots of them raise ValueError( f"the variables {invalid_vars} in data_vars are not " f"found in the data variables of the first dataset" ) concat_over.update(opt) warnings: list[str] = [] process_subset_opt(data_vars, "data_vars") process_subset_opt(coords, "coords") for warning in warnings: emit_user_level_warning(warning, FutureWarning) return concat_over, equals, concat_dim_lengths, skip_merge # determine dimensional coordinate names and a dict mapping name to DataArray def _parse_datasets( datasets: list[T_Dataset], ) -> tuple[ set[Hashable], dict[Hashable, Variable], dict[Hashable, int], set[Hashable], set[Hashable], list[Hashable], ]: dims: set[Hashable] = set() all_coord_names: set[Hashable] = set() data_vars: set[Hashable] = set() # list of data_vars dim_coords: dict[Hashable, Variable] = {} # maps dim name to variable dims_sizes: dict[Hashable, int] = {} # shared dimension sizes to expand variables variables_order: dict[Hashable, Variable] = {} # variables in order of appearance for ds in datasets: dims_sizes.update(ds.sizes) all_coord_names.update(ds.coords) data_vars.update(ds.data_vars) variables_order.update(ds.variables) # preserves ordering of dimensions for dim in ds.dims: if dim in dims: continue if dim in ds.coords and dim not in dim_coords: dim_coords[dim] = ds.coords[dim].variable dims = dims | set(ds.dims) return ( dims, dim_coords, dims_sizes, all_coord_names, data_vars, list(variables_order), ) def _dataset_concat( datasets: Iterable[T_Dataset], dim: str | T_Variable | T_DataArray | pd.Index, data_vars: T_DataVars | CombineKwargDefault, coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault, compat: CompatOptions | CombineKwargDefault, positions: Iterable[Iterable[int]] | None, fill_value: Any, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions, create_index_for_new_dim: bool, ) -> T_Dataset: """ Concatenate a sequence of datasets along a new or existing dimension """ from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset datasets = list(datasets) if not all(isinstance(dataset, Dataset) for dataset in datasets): raise TypeError( "The elements in the input list need to be either all 'Dataset's or all 'DataArray's" ) dim_var: Variable | None if isinstance(dim, DataArray): dim_var = dim.variable elif isinstance(dim, Variable): dim_var = dim else: dim_var = None dim_name, index = _calc_concat_dim_index(dim) # Make sure we're working on a copy (we'll be loading variables) datasets = [ds.copy() for ds in datasets] datasets = list( align( *datasets, join=join, copy=False, exclude=[dim_name], fill_value=fill_value ) ) all_dims, dim_coords, dims_sizes, coord_names, data_names, vars_order = ( _parse_datasets(datasets) ) indexed_dim_names = set(dim_coords) both_data_and_coords = coord_names & data_names if both_data_and_coords: raise ValueError( f"{both_data_and_coords!r} is a coordinate in some datasets but not others." ) # we don't want the concat dimension in the result dataset yet dim_coords.pop(dim_name, None) dims_sizes.pop(dim_name, None) # case where concat dimension is a coordinate or data_var but not a dimension if ( dim_name in coord_names or dim_name in data_names ) and dim_name not in indexed_dim_names: datasets = [ ds.expand_dims(dim_name, create_index_for_new_dim=create_index_for_new_dim) for ds in datasets ] all_dims.add(dim_name) # This isn't being used any more, but keeping it up to date # just in case we decide to use it later. indexed_dim_names.add(dim_name) # determine which variables to concatenate concat_over, equals, concat_dim_lengths, skip_merge = _calc_concat_over( datasets, dim_name, all_dims, data_vars, coords, compat ) # determine which variables to merge, and then merge them according to compat variables_to_merge = (coord_names | data_names) - concat_over - skip_merge result_vars = {} result_indexes = {} if variables_to_merge: grouped = { k: v for k, v in collect_variables_and_indexes(datasets).items() if k in variables_to_merge } merged_vars, merged_indexes = merge_collected( grouped, compat=compat, equals=equals ) result_vars.update(merged_vars) result_indexes.update(merged_indexes) result_vars.update(dim_coords) # assign attrs and encoding from first dataset result_attrs = merge_attrs([ds.attrs for ds in datasets], combine_attrs) result_encoding = datasets[0].encoding # check that global attributes are fixed across all datasets if necessary if compat == "identical": for ds in datasets[1:]: if not utils.dict_equiv(ds.attrs, result_attrs): raise ValueError("Dataset global attributes not equal.") # we've already verified everything is consistent; now, calculate # shared dimension sizes so we can expand the necessary variables def ensure_common_dims(vars, concat_dim_lengths): # ensure each variable with the given name shares the same # dimensions and the same shape for all of them except along the # concat dimension common_dims = tuple(utils.OrderedSet(d for v in vars for d in v.dims)) if dim_name not in common_dims: common_dims = (dim_name,) + common_dims for var, dim_len in zip(vars, concat_dim_lengths, strict=True): if var.dims != common_dims: common_shape = tuple(dims_sizes.get(d, dim_len) for d in common_dims) var = var.set_dims(common_dims, common_shape) yield var # get the indexes to concatenate together, create a PandasIndex # for any scalar coordinate variable found with ``name`` matching ``dim``. # TODO: depreciate concat a mix of scalar and dimensional indexed coordinates? # TODO: (benbovy - explicit indexes): check index types and/or coordinates # of all datasets? def get_indexes(name): for ds in datasets: if name in ds._indexes: yield ds._indexes[name] elif name == dim_name: var = ds._variables[name] if not var.dims: data = var.set_dims(dim_name).values if create_index_for_new_dim: yield PandasIndex(data, dim_name, coord_dtype=var.dtype) # create concatenation index, needed for later reindexing file_start_indexes = np.append(0, np.cumsum(concat_dim_lengths)) concat_index = np.arange(file_start_indexes[-1]) concat_index_size = concat_index.size variable_index_mask = np.ones(concat_index_size, dtype=bool) # stack up each variable and/or index to fill-out the dataset (in order) # n.b. this loop preserves variable order, needed for groupby. ndatasets = len(datasets) for name in vars_order: if name in concat_over and name not in result_indexes: variables = [] # Initialize the mask to all True then set False if any name is missing in # the datasets: variable_index_mask.fill(True) var_concat_dim_length = [] for i, ds in enumerate(datasets): if name in ds.variables: variables.append(ds[name].variable) var_concat_dim_length.append(concat_dim_lengths[i]) else: # raise if coordinate not in all datasets if name in coord_names: raise ValueError( f"coordinate {name!r} not present in all datasets." ) # Mask out the indexes without the name: start = file_start_indexes[i] end = file_start_indexes[i + 1] variable_index_mask[slice(start, end)] = False variable_index = concat_index[variable_index_mask] vars = ensure_common_dims(variables, var_concat_dim_length) # Try to concatenate the indexes, concatenate the variables when no index # is found on all datasets. indexes: list[Index] = list(get_indexes(name)) if indexes: if len(indexes) < ndatasets: raise ValueError( f"{name!r} must have either an index or no index in all datasets, " f"found {len(indexes)}/{len(datasets)} datasets with an index." ) combined_idx = indexes[0].concat(indexes, dim_name, positions) if name in datasets[0]._indexes: idx_vars = datasets[0].xindexes.get_all_coords(name) else: # index created from a scalar coordinate idx_vars = {name: datasets[0][name].variable} result_indexes.update(dict.fromkeys(idx_vars, combined_idx)) combined_idx_vars = combined_idx.create_variables(idx_vars) for k, v in combined_idx_vars.items(): v.attrs = merge_attrs( [ds.variables[k].attrs for ds in datasets], combine_attrs=combine_attrs, ) result_vars[k] = v else: combined_var = concat_vars( vars, dim_name, positions, combine_attrs=combine_attrs ) # reindex if variable is not present in all datasets if len(variable_index) < concat_index_size: combined_var = reindex_variables( variables={name: combined_var}, dim_pos_indexers={ dim_name: pd.Index(variable_index).get_indexer(concat_index) }, fill_value=fill_value, )[name] result_vars[name] = combined_var elif name in result_vars: # preserves original variable order result_vars[name] = result_vars.pop(name) absent_coord_names = coord_names - set(result_vars) if absent_coord_names: raise ValueError( f"Variables {absent_coord_names!r} are coordinates in some datasets but not others." ) result_data_vars = {} coord_vars = {} for name, result_var in result_vars.items(): if name in coord_names: coord_vars[name] = result_var else: result_data_vars[name] = result_var if index is not None: if dim_var is not None: index_vars = index.create_variables({dim_name: dim_var}) else: index_vars = index.create_variables() coord_vars[dim_name] = index_vars[dim_name] result_indexes[dim_name] = index coords_obj = Coordinates(coord_vars, indexes=result_indexes) result = type(datasets[0])(result_data_vars, coords=coords_obj, attrs=result_attrs) result.encoding = result_encoding return result def _dataarray_concat( arrays: Iterable[T_DataArray], dim: str | T_Variable | T_DataArray | pd.Index, data_vars: T_DataVars | CombineKwargDefault, coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault, compat: CompatOptions | CombineKwargDefault, positions: Iterable[Iterable[int]] | None, fill_value: object, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions, create_index_for_new_dim: bool, ) -> T_DataArray: from xarray.core.dataarray import DataArray arrays = list(arrays) if not all(isinstance(array, DataArray) for array in arrays): raise TypeError( "The elements in the input list need to be either all 'Dataset's or all 'DataArray's" ) # Allow passing `all` or `None` even though we always use `data_vars='all'` # when passing off to `_dataset_concat`. if not isinstance(data_vars, CombineKwargDefault) and data_vars not in [ "all", None, ]: raise ValueError( "data_vars is not a valid argument when concatenating DataArray objects" ) datasets = [] for n, arr in enumerate(arrays): if n == 0: name = arr.name elif name != arr.name: if compat == "identical": raise ValueError("array names not identical") else: arr = arr.rename(name) datasets.append(arr._to_temp_dataset()) ds = _dataset_concat( datasets, dim=dim, data_vars="all", coords=coords, compat=compat, positions=positions, fill_value=fill_value, join=join, combine_attrs=combine_attrs, create_index_for_new_dim=create_index_for_new_dim, ) merged_attrs = merge_attrs([da.attrs for da in arrays], combine_attrs) result = arrays[0]._from_temp_dataset(ds, name) result.attrs = merged_attrs return result xarray-2025.09.0/xarray/structure/merge.py000066400000000000000000001211061505620616400203640ustar00rootroot00000000000000from __future__ import annotations from collections import defaultdict from collections.abc import Hashable, Iterable, Mapping, Sequence from collections.abc import Set as AbstractSet from typing import TYPE_CHECKING, Any, NamedTuple, Union import pandas as pd from xarray.core import dtypes from xarray.core.duck_array_ops import lazy_array_equiv from xarray.core.indexes import ( Index, create_default_index_implicit, filter_indexes_from_coords, indexes_equal, ) from xarray.core.utils import ( Frozen, compat_dict_union, dict_equiv, emit_user_level_warning, equivalent, ) from xarray.core.variable import Variable, as_variable, calculate_dimensions from xarray.structure.alignment import deep_align from xarray.util.deprecation_helpers import ( _COMPAT_DEFAULT, _JOIN_DEFAULT, CombineKwargDefault, ) if TYPE_CHECKING: from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.types import ( CombineAttrsOptions, CompatOptions, DataVars, JoinOptions, ) DimsLike = Union[Hashable, Sequence[Hashable]] ArrayLike = Any VariableLike = Union[ ArrayLike, tuple[DimsLike, ArrayLike], tuple[DimsLike, ArrayLike, Mapping], tuple[DimsLike, ArrayLike, Mapping, Mapping], ] XarrayValue = Union[DataArray, Variable, VariableLike] DatasetLike = Union[Dataset, Coordinates, Mapping[Any, XarrayValue]] CoercibleValue = Union[XarrayValue, pd.Series, pd.DataFrame] CoercibleMapping = Union[Dataset, Mapping[Any, CoercibleValue]] PANDAS_TYPES = (pd.Series, pd.DataFrame) _VALID_COMPAT = Frozen( { "identical": 0, "equals": 1, "broadcast_equals": 2, "minimal": 3, "no_conflicts": 4, "override": 5, } ) class Context: """object carrying the information of a call""" def __init__(self, func): self.func = func def broadcast_dimension_size(variables: list[Variable]) -> dict[Hashable, int]: """Extract dimension sizes from a dictionary of variables. Raises ValueError if any dimensions have different sizes. """ dims: dict[Hashable, int] = {} for var in variables: for dim, size in zip(var.dims, var.shape, strict=True): if dim in dims and size != dims[dim]: raise ValueError(f"index {dim!r} not aligned") dims[dim] = size return dims class MergeError(ValueError): """Error class for merge failures due to incompatible arguments.""" # inherits from ValueError for backward compatibility # TODO: move this to an xarray.exceptions module? def unique_variable( name: Hashable, variables: list[Variable], compat: CompatOptions | CombineKwargDefault = "broadcast_equals", equals: bool | None = None, ) -> tuple[bool | None, Variable]: """Return the unique variable from a list of variables or raise MergeError. Parameters ---------- name : hashable Name for this variable. variables : list of Variable List of Variable objects, all of which go by the same name in different inputs. compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional Type of equality check to use. equals : None or bool, optional corresponding to result of compat test Returns ------- Variable to use in the result. Raises ------ MergeError: if any of the variables are not equal. """ out = variables[0] if len(variables) == 1 or compat == "override": return equals, out combine_method = None if compat == "minimal": compat = "broadcast_equals" if compat == "broadcast_equals": dim_lengths = broadcast_dimension_size(variables) out = out.set_dims(dim_lengths) if compat == "no_conflicts": combine_method = "fillna" # we return the lazy equals, so we can warn about behaviour changes lazy_equals = equals if equals is None: compat_str = ( compat._value if isinstance(compat, CombineKwargDefault) else compat ) assert compat_str is not None # first check without comparing values i.e. no computes for var in variables[1:]: equals = getattr(out, compat_str)(var, equiv=lazy_array_equiv) if equals is not True: break lazy_equals = equals if equals is None: # now compare values with minimum number of computes out = out.compute() for var in variables[1:]: equals = getattr(out, compat_str)(var) if not equals: break if not equals: raise MergeError( f"conflicting values for variable {name!r} on objects to be combined. " "You can skip this check by specifying compat='override'." ) if combine_method: for var in variables[1:]: out = getattr(out, combine_method)(var) return lazy_equals, out def _assert_compat_valid(compat): if not isinstance(compat, CombineKwargDefault) and compat not in _VALID_COMPAT: raise ValueError(f"compat={compat!r} invalid: must be {set(_VALID_COMPAT)}") MergeElement = tuple[Variable, Index | None] def _assert_prioritized_valid( grouped: dict[Hashable, list[MergeElement]], prioritized: Mapping[Any, MergeElement], ) -> None: """Make sure that elements given in prioritized will not corrupt any index given in grouped. """ prioritized_names = set(prioritized) grouped_by_index: dict[int, list[Hashable]] = defaultdict(list) indexes: dict[int, Index] = {} for name, elements_list in grouped.items(): for _, index in elements_list: if index is not None: grouped_by_index[id(index)].append(name) indexes[id(index)] = index # An index may be corrupted when the set of its corresponding coordinate name(s) # partially overlaps the set of names given in prioritized for index_id, index_coord_names in grouped_by_index.items(): index_names = set(index_coord_names) common_names = index_names & prioritized_names if common_names and len(common_names) != len(index_names): common_names_str = ", ".join(f"{k!r}" for k in common_names) index_names_str = ", ".join(f"{k!r}" for k in index_coord_names) raise ValueError( f"cannot set or update variable(s) {common_names_str}, which would corrupt " f"the following index built from coordinates {index_names_str}:\n" f"{indexes[index_id]!r}" ) def merge_collected( grouped: dict[Any, list[MergeElement]], prioritized: Mapping[Any, MergeElement] | None = None, compat: CompatOptions | CombineKwargDefault = "minimal", combine_attrs: CombineAttrsOptions = "override", equals: dict[Any, bool] | None = None, ) -> tuple[dict[Hashable, Variable], dict[Hashable, Index]]: """Merge dicts of variables, while resolving conflicts appropriately. Parameters ---------- grouped : mapping prioritized : mapping compat : str Type of equality check to use when checking for conflicts. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "override" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. equals : mapping, optional corresponding to result of compat test Returns ------- Dict with keys taken by the union of keys on list_of_mappings, and Variable values corresponding to those that should be found on the merged result. """ if prioritized is None: prioritized = {} if equals is None: equals = {} _assert_compat_valid(compat) _assert_prioritized_valid(grouped, prioritized) merged_vars: dict[Hashable, Variable] = {} merged_indexes: dict[Hashable, Index] = {} index_cmp_cache: dict[tuple[int, int], bool | None] = {} for name, elements_list in grouped.items(): if name in prioritized: variable, index = prioritized[name] merged_vars[name] = variable if index is not None: merged_indexes[name] = index else: attrs: dict[Any, Any] = {} indexed_elements = [ (variable, index) for variable, index in elements_list if index is not None ] if indexed_elements: # TODO(shoyer): consider adjusting this logic. Are we really # OK throwing away variable without an index in favor of # indexed variables, without even checking if values match? variable, index = indexed_elements[0] for other_var, other_index in indexed_elements[1:]: if not indexes_equal( index, other_index, variable, other_var, index_cmp_cache ): raise MergeError( f"conflicting values/indexes on objects to be combined for coordinate {name!r}\n" f"first index: {index!r}\nsecond index: {other_index!r}\n" f"first variable: {variable!r}\nsecond variable: {other_var!r}\n" ) if compat == "identical": for other_variable, _ in indexed_elements[1:]: if not dict_equiv(variable.attrs, other_variable.attrs): raise MergeError( "conflicting attribute values on combined " f"variable {name!r}:\nfirst value: {variable.attrs!r}\nsecond value: {other_variable.attrs!r}" ) attrs = merge_attrs( [var.attrs for var, _ in indexed_elements], combine_attrs=combine_attrs, ) merged_vars[name] = variable merged_indexes[name] = index else: variables = [variable for variable, _ in elements_list] try: equals_this_var, merged_vars[name] = unique_variable( name, variables, compat, equals.get(name) ) # This is very likely to result in false positives, but there is no way # to tell if the output will change without computing. if ( isinstance(compat, CombineKwargDefault) and compat == "no_conflicts" and len(variables) > 1 and not equals_this_var ): emit_user_level_warning( compat.warning_message( "This is likely to lead to different results when " "combining overlapping variables with the same name.", ), FutureWarning, ) except MergeError: if compat != "minimal": # we need more than "minimal" compatibility (for which # we drop conflicting coordinates) raise if name in merged_vars: attrs = merge_attrs( [var.attrs for var in variables], combine_attrs=combine_attrs ) if name in merged_vars and (merged_vars[name].attrs or attrs): # Ensure that assigning attrs does not affect the original input variable. merged_vars[name] = merged_vars[name].copy(deep=False) merged_vars[name].attrs = attrs return merged_vars, merged_indexes def collect_variables_and_indexes( list_of_mappings: Iterable[DatasetLike], indexes: Mapping[Any, Any] | None = None, ) -> dict[Hashable, list[MergeElement]]: """Collect variables and indexes from list of mappings of xarray objects. Mappings can be Dataset or Coordinates objects, in which case both variables and indexes are extracted from it. It can also have values of one of the following types: - an xarray.Variable - a tuple `(dims, data[, attrs[, encoding]])` that can be converted in an xarray.Variable - or an xarray.DataArray If a mapping of indexes is given, those indexes are assigned to all variables with a matching key/name. For dimension variables with no matching index, a default (pandas) index is assigned. DataArray indexes that don't match mapping keys are also extracted. """ from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset if indexes is None: indexes = {} grouped: dict[Hashable, list[MergeElement]] = defaultdict(list) def append(name, variable, index): grouped[name].append((variable, index)) def append_all(variables, indexes): for name, variable in variables.items(): append(name, variable, indexes.get(name)) for mapping in list_of_mappings: if isinstance(mapping, Coordinates | Dataset): append_all(mapping.variables, mapping.xindexes) continue for name, variable in mapping.items(): if isinstance(variable, DataArray): coords_ = variable._coords.copy() # use private API for speed indexes_ = dict(variable._indexes) # explicitly overwritten variables should take precedence coords_.pop(name, None) indexes_.pop(name, None) append_all(coords_, indexes_) variable = as_variable(variable, name=name, auto_convert=False) if name in indexes: append(name, variable, indexes[name]) elif variable.dims == (name,): idx, idx_vars = create_default_index_implicit(variable) append_all(idx_vars, dict.fromkeys(idx_vars, idx)) else: append(name, variable, None) return grouped def collect_from_coordinates( list_of_coords: list[Coordinates], ) -> dict[Hashable, list[MergeElement]]: """Collect variables and indexes to be merged from Coordinate objects.""" grouped: dict[Hashable, list[MergeElement]] = defaultdict(list) for coords in list_of_coords: variables = coords.variables indexes = coords.xindexes for name, variable in variables.items(): grouped[name].append((variable, indexes.get(name))) return grouped def merge_coordinates_without_align( objects: list[Coordinates], prioritized: Mapping[Any, MergeElement] | None = None, exclude_dims: AbstractSet = frozenset(), combine_attrs: CombineAttrsOptions = "override", ) -> tuple[dict[Hashable, Variable], dict[Hashable, Index]]: """Merge variables/indexes from coordinates without automatic alignments. This function is used for merging coordinate from pre-existing xarray objects. """ collected = collect_from_coordinates(objects) if exclude_dims: filtered: dict[Hashable, list[MergeElement]] = {} for name, elements in collected.items(): new_elements = [ (variable, index) for variable, index in elements if exclude_dims.isdisjoint(variable.dims) ] if new_elements: filtered[name] = new_elements else: filtered = collected # TODO: indexes should probably be filtered in collected elements # before merging them merged_coords, merged_indexes = merge_collected( filtered, prioritized, combine_attrs=combine_attrs ) merged_indexes = filter_indexes_from_coords(merged_indexes, set(merged_coords)) return merged_coords, merged_indexes def determine_coords( list_of_mappings: Iterable[DatasetLike], ) -> tuple[set[Hashable], set[Hashable]]: """Given a list of dicts with xarray object values, identify coordinates. Parameters ---------- list_of_mappings : list of dict or list of Dataset Of the same form as the arguments to expand_variable_dicts. Returns ------- coord_names : set of variable names noncoord_names : set of variable names All variable found in the input should appear in either the set of coordinate or non-coordinate names. """ from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset coord_names: set[Hashable] = set() noncoord_names: set[Hashable] = set() for mapping in list_of_mappings: if isinstance(mapping, Dataset): coord_names.update(mapping.coords) noncoord_names.update(mapping.data_vars) else: for name, var in mapping.items(): if isinstance(var, DataArray): coords = set(var._coords) # use private API for speed # explicitly overwritten variables should take precedence coords.discard(name) coord_names.update(coords) return coord_names, noncoord_names def coerce_pandas_values(objects: Iterable[CoercibleMapping]) -> list[DatasetLike]: """Convert pandas values found in a list of labeled objects. Parameters ---------- objects : list of Dataset or mapping The mappings may contain any sort of objects coercible to xarray.Variables as keys, including pandas objects. Returns ------- List of Dataset or dictionary objects. Any inputs or values in the inputs that were pandas objects have been converted into native xarray objects. """ from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset out: list[DatasetLike] = [] for obj in objects: variables: DatasetLike if isinstance(obj, Dataset | Coordinates): variables = obj else: variables = {} if isinstance(obj, PANDAS_TYPES): obj = dict(obj.items()) for k, v in obj.items(): if isinstance(v, PANDAS_TYPES): v = DataArray(v) variables[k] = v out.append(variables) return out def _get_priority_vars_and_indexes( objects: Sequence[DatasetLike], priority_arg: int | None, compat: CompatOptions | CombineKwargDefault = "equals", ) -> dict[Hashable, MergeElement]: """Extract the priority variable from a list of mappings. We need this method because in some cases the priority argument itself might have conflicting values (e.g., if it is a dict with two DataArray values with conflicting coordinate values). Parameters ---------- objects : sequence of dict-like of Variable Dictionaries in which to find the priority variables. priority_arg : int or None Integer object whose variable should take priority. compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional String indicating how to compare non-concatenated variables of the same name for potential conflicts. This is passed down to merge. - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - "equals": all values and dimensions must be the same. - "identical": all values, dimensions and attributes must be the same. - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - "override": skip comparing and pick variable from first dataset Returns ------- A dictionary of variables and associated indexes (if any) to prioritize. """ if priority_arg is None: return {} collected = collect_variables_and_indexes([objects[priority_arg]]) variables, indexes = merge_collected(collected, compat=compat) grouped: dict[Hashable, MergeElement] = {} for name, variable in variables.items(): grouped[name] = (variable, indexes.get(name)) return grouped def merge_coords( objects: Iterable[CoercibleMapping], compat: CompatOptions = "minimal", join: JoinOptions = "outer", priority_arg: int | None = None, indexes: Mapping[Any, Index] | None = None, fill_value: object = dtypes.NA, ) -> tuple[dict[Hashable, Variable], dict[Hashable, Index]]: """Merge coordinate variables. See merge_core below for argument descriptions. This works similarly to merge_core, except everything we don't worry about whether variables are coordinates or not. """ _assert_compat_valid(compat) coerced = coerce_pandas_values(objects) aligned = deep_align( coerced, join=join, copy=False, indexes=indexes, fill_value=fill_value ) collected = collect_variables_and_indexes(aligned, indexes=indexes) prioritized = _get_priority_vars_and_indexes(aligned, priority_arg, compat=compat) variables, out_indexes = merge_collected(collected, prioritized, compat=compat) return variables, out_indexes def merge_attrs(variable_attrs, combine_attrs, context=None): """Combine attributes from different variables according to combine_attrs""" if not variable_attrs: # no attributes to merge return None if callable(combine_attrs): return combine_attrs(variable_attrs, context=context) elif combine_attrs == "drop": return {} elif combine_attrs == "override": return dict(variable_attrs[0]) elif combine_attrs == "no_conflicts": result = dict(variable_attrs[0]) for attrs in variable_attrs[1:]: try: result = compat_dict_union(result, attrs) except ValueError as e: raise MergeError( "combine_attrs='no_conflicts', but some values are not " f"the same. Merging {result} with {attrs}" ) from e return result elif combine_attrs == "drop_conflicts": result = {} dropped_keys = set() for attrs in variable_attrs: result.update( { key: value for key, value in attrs.items() if key not in result and key not in dropped_keys } ) result = { key: value for key, value in result.items() if key not in attrs or equivalent(attrs[key], value) } dropped_keys |= {key for key in attrs if key not in result} return result elif combine_attrs == "identical": result = dict(variable_attrs[0]) for attrs in variable_attrs[1:]: if not dict_equiv(result, attrs): raise MergeError( f"combine_attrs='identical', but attrs differ. First is {result} " f", other is {attrs}." ) return result else: raise ValueError(f"Unrecognised value for combine_attrs={combine_attrs}") class _MergeResult(NamedTuple): variables: dict[Hashable, Variable] coord_names: set[Hashable] dims: dict[Hashable, int] indexes: dict[Hashable, Index] attrs: dict[Hashable, Any] def merge_core( objects: Iterable[CoercibleMapping], compat: CompatOptions | CombineKwargDefault, join: JoinOptions | CombineKwargDefault, combine_attrs: CombineAttrsOptions = "override", priority_arg: int | None = None, explicit_coords: Iterable[Hashable] | None = None, indexes: Mapping[Any, Any] | None = None, fill_value: object = dtypes.NA, skip_align_args: list[int] | None = None, ) -> _MergeResult: """Core logic for merging labeled objects. This is not public API. Parameters ---------- objects : list of mapping All values must be convertible to labeled arrays. compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional Compatibility checks to use when merging variables. join : {"outer", "inner", "left", "right"}, optional How to combine objects with different indexes. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "override" How to combine attributes of objects priority_arg : int, optional Optional argument in `objects` that takes precedence over the others. explicit_coords : set, optional An explicit list of variables from `objects` that are coordinates. indexes : dict, optional Dictionary with values given by xarray.Index objects or anything that may be cast to pandas.Index objects. fill_value : scalar, optional Value to use for newly missing values skip_align_args : list of int, optional Optional arguments in `objects` that are not included in alignment. Returns ------- variables : dict Dictionary of Variable objects. coord_names : set Set of coordinate names. dims : dict Dictionary mapping from dimension names to sizes. attrs : dict Dictionary of attributes """ from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset _assert_compat_valid(compat) objects = list(objects) if skip_align_args is None: skip_align_args = [] skip_align_objs = [(pos, objects.pop(pos)) for pos in skip_align_args] coerced = coerce_pandas_values(objects) aligned = deep_align( coerced, join=join, copy=False, indexes=indexes, fill_value=fill_value, ) for pos, obj in skip_align_objs: aligned.insert(pos, obj) collected = collect_variables_and_indexes(aligned, indexes=indexes) prioritized = _get_priority_vars_and_indexes(aligned, priority_arg, compat=compat) variables, out_indexes = merge_collected( collected, prioritized, compat=compat, combine_attrs=combine_attrs, ) dims = calculate_dimensions(variables) coord_names, noncoord_names = determine_coords(coerced) if compat == "minimal": # coordinates may be dropped in merged results coord_names.intersection_update(variables) if explicit_coords is not None: coord_names.update(explicit_coords) for dim in dims: if dim in variables: coord_names.add(dim) ambiguous_coords = coord_names.intersection(noncoord_names) if ambiguous_coords: raise MergeError( "unable to determine if these variables should be " f"coordinates or not in the merged result: {ambiguous_coords}" ) attrs = merge_attrs( [var.attrs for var in coerced if isinstance(var, Dataset | DataArray)], combine_attrs, ) return _MergeResult(variables, coord_names, dims, out_indexes, attrs) def merge( objects: Iterable[DataArray | CoercibleMapping], compat: CompatOptions | CombineKwargDefault = _COMPAT_DEFAULT, join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT, fill_value: object = dtypes.NA, combine_attrs: CombineAttrsOptions = "override", ) -> Dataset: """Merge any number of xarray objects into a single Dataset as variables. Parameters ---------- objects : iterable of Dataset or iterable of DataArray or iterable of dict-like Merge together all variables from these objects. If any of them are DataArray objects, they must have a name. compat : {"identical", "equals", "broadcast_equals", "no_conflicts", \ "override", "minimal"}, default: "no_conflicts" String indicating how to compare variables of the same name for potential conflicts: - "identical": all values, dimensions and attributes must be the same. - "equals": all values and dimensions must be the same. - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - "override": skip comparing and pick variable from first dataset - "minimal": drop conflicting coordinates join : {"outer", "inner", "left", "right", "exact", "override"}, default: "outer" String indicating how to combine differing indexes in objects. - "outer": use the union of object indexes - "inner": use the intersection of object indexes - "left": use indexes from the first object with each dimension - "right": use indexes from the last object with each dimension - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. fill_value : scalar or dict-like, optional Value to use for newly missing values. If a dict-like, maps variable names to fill values. Use a data array's name to refer to its values. combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \ "override"} or callable, default: "override" A callable or a string indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. - "identical": all attrs must be the same on every object. - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - "drop_conflicts": attrs from all objects are combined, any that have the same name but different values are dropped. - "override": skip comparing and copy attrs from the first dataset to the result. If a callable, it must expect a sequence of ``attrs`` dicts and a context object as its only parameters. Returns ------- Dataset Dataset with combined variables from each object. Examples -------- >>> x = xr.DataArray( ... [[1.0, 2.0], [3.0, 5.0]], ... dims=("lat", "lon"), ... coords={"lat": [35.0, 40.0], "lon": [100.0, 120.0]}, ... name="var1", ... ) >>> y = xr.DataArray( ... [[5.0, 6.0], [7.0, 8.0]], ... dims=("lat", "lon"), ... coords={"lat": [35.0, 42.0], "lon": [100.0, 150.0]}, ... name="var2", ... ) >>> z = xr.DataArray( ... [[0.0, 3.0], [4.0, 9.0]], ... dims=("time", "lon"), ... coords={"time": [30.0, 60.0], "lon": [100.0, 150.0]}, ... name="var3", ... ) >>> x Size: 32B array([[1., 2.], [3., 5.]]) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 >>> y Size: 32B array([[5., 6.], [7., 8.]]) Coordinates: * lat (lat) float64 16B 35.0 42.0 * lon (lon) float64 16B 100.0 150.0 >>> z Size: 32B array([[0., 3.], [4., 9.]]) Coordinates: * time (time) float64 16B 30.0 60.0 * lon (lon) float64 16B 100.0 150.0 >>> xr.merge([x, y, z], join="outer") Size: 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 24B 100.0 120.0 150.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0 >>> xr.merge([x, y, z], compat="identical", join="outer") Size: 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 24B 100.0 120.0 150.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0 >>> xr.merge([x, y, z], compat="equals", join="outer") Size: 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 24B 100.0 120.0 150.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0 >>> xr.merge([x, y, z], compat="equals", join="outer", fill_value=-999.0) Size: 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 24B 100.0 120.0 150.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 72B 1.0 2.0 -999.0 3.0 ... -999.0 -999.0 -999.0 var2 (lat, lon) float64 72B 5.0 -999.0 6.0 -999.0 ... 7.0 -999.0 8.0 var3 (time, lon) float64 48B 0.0 -999.0 3.0 4.0 -999.0 9.0 >>> xr.merge([x, y, z], join="override") Size: 144B Dimensions: (lat: 2, lon: 2, time: 2) Coordinates: * lat (lat) float64 16B 35.0 40.0 * lon (lon) float64 16B 100.0 120.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 32B 1.0 2.0 3.0 5.0 var2 (lat, lon) float64 32B 5.0 6.0 7.0 8.0 var3 (time, lon) float64 32B 0.0 3.0 4.0 9.0 >>> xr.merge([x, y, z], join="inner") Size: 64B Dimensions: (lat: 1, lon: 1, time: 2) Coordinates: * lat (lat) float64 8B 35.0 * lon (lon) float64 8B 100.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 8B 1.0 var2 (lat, lon) float64 8B 5.0 var3 (time, lon) float64 16B 0.0 4.0 >>> xr.merge([x, y, z], compat="identical", join="inner") Size: 64B Dimensions: (lat: 1, lon: 1, time: 2) Coordinates: * lat (lat) float64 8B 35.0 * lon (lon) float64 8B 100.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 8B 1.0 var2 (lat, lon) float64 8B 5.0 var3 (time, lon) float64 16B 0.0 4.0 >>> xr.merge([x, y, z], compat="broadcast_equals", join="outer") Size: 256B Dimensions: (lat: 3, lon: 3, time: 2) Coordinates: * lat (lat) float64 24B 35.0 40.0 42.0 * lon (lon) float64 24B 100.0 120.0 150.0 * time (time) float64 16B 30.0 60.0 Data variables: var1 (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan var2 (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0 var3 (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0 >>> xr.merge([x, y, z], join="exact") Traceback (most recent call last): ... xarray.structure.alignment.AlignmentError: cannot align objects with join='exact' where ... Raises ------ xarray.MergeError If any variables with the same name have conflicting values. See also -------- concat combine_nested combine_by_coords """ from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset dict_like_objects = [] for obj in objects: if not isinstance(obj, DataArray | Dataset | Coordinates | dict): raise TypeError( "objects must be an iterable containing only " "Dataset(s), DataArray(s), and dictionaries." ) if isinstance(obj, DataArray): obj = obj.to_dataset(promote_attrs=True) elif isinstance(obj, Coordinates): obj = obj.to_dataset() dict_like_objects.append(obj) merge_result = merge_core( dict_like_objects, compat=compat, join=join, combine_attrs=combine_attrs, fill_value=fill_value, ) return Dataset._construct_direct(**merge_result._asdict()) def dataset_merge_method( dataset: Dataset, other: CoercibleMapping, overwrite_vars: Hashable | Iterable[Hashable], compat: CompatOptions | CombineKwargDefault, join: JoinOptions | CombineKwargDefault, fill_value: Any, combine_attrs: CombineAttrsOptions, ) -> _MergeResult: """Guts of the Dataset.merge method.""" # we are locked into supporting overwrite_vars for the Dataset.merge # method due for backwards compatibility # TODO: consider deprecating it? if not isinstance(overwrite_vars, str) and isinstance(overwrite_vars, Iterable): overwrite_vars = set(overwrite_vars) else: overwrite_vars = {overwrite_vars} if not overwrite_vars: objs = [dataset, other] priority_arg = None elif overwrite_vars == set(other): objs = [dataset, other] priority_arg = 1 else: other_overwrite: dict[Hashable, CoercibleValue] = {} other_no_overwrite: dict[Hashable, CoercibleValue] = {} for k, v in other.items(): if k in overwrite_vars: other_overwrite[k] = v else: other_no_overwrite[k] = v objs = [dataset, other_no_overwrite, other_overwrite] priority_arg = 2 return merge_core( objs, compat=compat, join=join, priority_arg=priority_arg, fill_value=fill_value, combine_attrs=combine_attrs, ) def dataset_update_method(dataset: Dataset, other: CoercibleMapping) -> _MergeResult: """Guts of the Dataset.update method. This drops a duplicated coordinates from `other` if `other` is not an `xarray.Dataset`, e.g., if it's a dict with DataArray values (GH2068, GH2180). """ from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset if not isinstance(other, Dataset): other = dict(other) for key, value in other.items(): if isinstance(value, DataArray): # drop conflicting coordinates coord_names = [ c for c in value.coords if c not in value.dims and c in dataset.coords ] if coord_names: other[key] = value.drop_vars(coord_names) return merge_core( [dataset, other], compat="broadcast_equals", join="outer", priority_arg=1, indexes=dataset.xindexes, combine_attrs="override", ) def merge_data_and_coords(data_vars: DataVars, coords) -> _MergeResult: """Used in Dataset.__init__.""" from xarray.core.coordinates import Coordinates, create_coords_with_default_indexes if isinstance(coords, Coordinates): coords = coords.copy() else: coords = create_coords_with_default_indexes(coords, data_vars) # exclude coords from alignment (all variables in a Coordinates object should # already be aligned together) and use coordinates' indexes to align data_vars return merge_core( [data_vars, coords], compat="broadcast_equals", join="outer", combine_attrs="override", explicit_coords=tuple(coords), indexes=coords.xindexes, priority_arg=1, skip_align_args=[1], ) xarray-2025.09.0/xarray/testing/000077500000000000000000000000001505620616400163275ustar00rootroot00000000000000xarray-2025.09.0/xarray/testing/__init__.py000066400000000000000000000011551505620616400204420ustar00rootroot00000000000000from xarray.testing.assertions import ( # noqa: F401 _assert_dataarray_invariants, _assert_dataset_invariants, _assert_indexes_invariants_checks, _assert_internal_invariants, _assert_variable_invariants, _data_allclose_or_equiv, assert_allclose, assert_chunks_equal, assert_duckarray_allclose, assert_duckarray_equal, assert_equal, assert_identical, assert_isomorphic, ) __all__ = [ "assert_allclose", "assert_chunks_equal", "assert_duckarray_allclose", "assert_duckarray_equal", "assert_equal", "assert_identical", "assert_isomorphic", ] xarray-2025.09.0/xarray/testing/assertions.py000066400000000000000000000432051505620616400210770ustar00rootroot00000000000000"""Testing functions exposed to the user API""" import functools import warnings from collections.abc import Hashable import numpy as np import pandas as pd from xarray.core import duck_array_ops, formatting, utils from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.datatree_mapping import map_over_datasets from xarray.core.formatting import diff_datatree_repr from xarray.core.indexes import Index, PandasIndex, PandasMultiIndex, default_indexes from xarray.core.variable import IndexVariable, Variable def ensure_warnings(func): # sometimes tests elevate warnings to errors # -> make sure that does not happen in the assert_* functions @functools.wraps(func) def wrapper(*args, **kwargs): __tracebackhide__ = True with warnings.catch_warnings(): # only remove filters that would "error" warnings.filters = [f for f in warnings.filters if f[0] != "error"] return func(*args, **kwargs) return wrapper def _decode_string_data(data): if data.dtype.kind == "S": return np.char.decode(data, "utf-8", "replace") return data def _data_allclose_or_equiv(arr1, arr2, rtol=1e-05, atol=1e-08, decode_bytes=True): if any(arr.dtype.kind == "S" for arr in [arr1, arr2]) and decode_bytes: arr1 = _decode_string_data(arr1) arr2 = _decode_string_data(arr2) exact_dtypes = ["M", "m", "O", "S", "U"] if any(arr.dtype.kind in exact_dtypes for arr in [arr1, arr2]): return duck_array_ops.array_equiv(arr1, arr2) else: return duck_array_ops.allclose_or_equiv(arr1, arr2, rtol=rtol, atol=atol) @ensure_warnings def assert_isomorphic(a: DataTree, b: DataTree): """ Two DataTrees are considered isomorphic if the set of paths to their descendent nodes are the same. Nothing about the data or attrs in each node is checked. Isomorphism is a necessary condition for two trees to be used in a nodewise binary operation, such as tree1 + tree2. Parameters ---------- a : DataTree The first object to compare. b : DataTree The second object to compare. See Also -------- DataTree.isomorphic assert_equal assert_identical """ __tracebackhide__ = True assert isinstance(a, type(b)) if isinstance(a, DataTree): assert a.isomorphic(b), diff_datatree_repr(a, b, "isomorphic") else: raise TypeError(f"{type(a)} not of type DataTree") def maybe_transpose_dims(a, b, check_dim_order: bool): """Helper for assert_equal/allclose/identical""" __tracebackhide__ = True def _maybe_transpose_dims(a, b): if not isinstance(a, Variable | DataArray | Dataset): return b if set(a.dims) == set(b.dims): # Ensure transpose won't fail if a dimension is missing # If this is the case, the difference will be caught by the caller return b.transpose(*a.dims) return b if check_dim_order: return b if isinstance(a, DataTree): return map_over_datasets(_maybe_transpose_dims, a, b) return _maybe_transpose_dims(a, b) @ensure_warnings def assert_equal(a, b, check_dim_order: bool = True): """Like :py:func:`numpy.testing.assert_array_equal`, but for xarray objects. Raises an AssertionError if two objects are not equal. This will match data values, dimensions and coordinates, but not names or attributes (except for Dataset objects for which the variable names must match). Arrays with NaN in the same location are considered equal. For DataTree objects, assert_equal is mapped over all Datasets on each node, with the DataTrees being equal if both are isomorphic and the corresponding Datasets at each node are themselves equal. Parameters ---------- a : xarray.Dataset, xarray.DataArray, xarray.Variable, xarray.Coordinates or xarray.core.datatree.DataTree. The first object to compare. b : xarray.Dataset, xarray.DataArray, xarray.Variable, xarray.Coordinates or xarray.core.datatree.DataTree. The second object to compare. check_dim_order : bool, optional, default is True Whether dimensions must be in the same order. See Also -------- assert_identical, assert_allclose, Dataset.equals, DataArray.equals numpy.testing.assert_array_equal """ __tracebackhide__ = True assert type(a) is type(b) or ( isinstance(a, Coordinates) and isinstance(b, Coordinates) ) b = maybe_transpose_dims(a, b, check_dim_order) if isinstance(a, Variable | DataArray): assert a.equals(b), formatting.diff_array_repr(a, b, "equals") elif isinstance(a, Dataset): assert a.equals(b), formatting.diff_dataset_repr(a, b, "equals") elif isinstance(a, Coordinates): assert a.equals(b), formatting.diff_coords_repr(a, b, "equals") elif isinstance(a, DataTree): assert a.equals(b), diff_datatree_repr(a, b, "equals") else: raise TypeError(f"{type(a)} not supported by assertion comparison") @ensure_warnings def assert_identical(a, b): """Like :py:func:`xarray.testing.assert_equal`, but also matches the objects' names and attributes. Raises an AssertionError if two objects are not identical. For DataTree objects, assert_identical is mapped over all Datasets on each node, with the DataTrees being identical if both are isomorphic and the corresponding Datasets at each node are themselves identical. Parameters ---------- a : xarray.Dataset, xarray.DataArray, xarray.Variable or xarray.Coordinates The first object to compare. b : xarray.Dataset, xarray.DataArray, xarray.Variable or xarray.Coordinates The second object to compare. See Also -------- assert_equal, assert_allclose, Dataset.equals, DataArray.equals """ __tracebackhide__ = True assert type(a) is type(b) or ( isinstance(a, Coordinates) and isinstance(b, Coordinates) ) if isinstance(a, Variable): assert a.identical(b), formatting.diff_array_repr(a, b, "identical") elif isinstance(a, DataArray): assert a.name == b.name, ( f"DataArray names are different. L: {a.name}, R: {b.name}" ) assert a.identical(b), formatting.diff_array_repr(a, b, "identical") elif isinstance(a, Dataset | Variable): assert a.identical(b), formatting.diff_dataset_repr(a, b, "identical") elif isinstance(a, Coordinates): assert a.identical(b), formatting.diff_coords_repr(a, b, "identical") elif isinstance(a, DataTree): assert a.identical(b), diff_datatree_repr(a, b, "identical") else: raise TypeError(f"{type(a)} not supported by assertion comparison") @ensure_warnings def assert_allclose( a, b, rtol=1e-05, atol=1e-08, decode_bytes=True, check_dim_order: bool = True ): """Like :py:func:`numpy.testing.assert_allclose`, but for xarray objects. Raises an AssertionError if two objects are not equal up to desired tolerance. Parameters ---------- a : xarray.Dataset, xarray.DataArray or xarray.Variable The first object to compare. b : xarray.Dataset, xarray.DataArray or xarray.Variable The second object to compare. rtol : float, optional Relative tolerance. atol : float, optional Absolute tolerance. decode_bytes : bool, optional Whether byte dtypes should be decoded to strings as UTF-8 or not. This is useful for testing serialization methods on Python 3 that return saved strings as bytes. check_dim_order : bool, optional, default is True Whether dimensions must be in the same order. See Also -------- assert_identical, assert_equal, numpy.testing.assert_allclose """ __tracebackhide__ = True assert type(a) is type(b) b = maybe_transpose_dims(a, b, check_dim_order) equiv = functools.partial( _data_allclose_or_equiv, rtol=rtol, atol=atol, decode_bytes=decode_bytes ) equiv.__name__ = "allclose" # type: ignore[attr-defined] def compat_variable(a, b): a = getattr(a, "variable", a) b = getattr(b, "variable", b) return a.dims == b.dims and (a._data is b._data or equiv(a.data, b.data)) if isinstance(a, Variable): allclose = compat_variable(a, b) assert allclose, formatting.diff_array_repr(a, b, compat=equiv) elif isinstance(a, DataArray): allclose = utils.dict_equiv( a.coords, b.coords, compat=compat_variable ) and compat_variable(a.variable, b.variable) assert allclose, formatting.diff_array_repr(a, b, compat=equiv) elif isinstance(a, Dataset): allclose = a._coord_names == b._coord_names and utils.dict_equiv( a.variables, b.variables, compat=compat_variable ) assert allclose, formatting.diff_dataset_repr(a, b, compat=equiv) elif isinstance(a, Coordinates): allclose = utils.dict_equiv(a.variables, b.variables, compat=compat_variable) assert allclose, formatting.diff_coords_repr(a, b, compat=equiv) else: raise TypeError(f"{type(a)} not supported by assertion comparison") def _format_message(x, y, err_msg, verbose): diff = x - y abs_diff = max(abs(diff)) rel_diff = "not implemented" n_diff = np.count_nonzero(diff) n_total = diff.size fraction = f"{n_diff} / {n_total}" percentage = float(n_diff / n_total * 100) parts = [ "Arrays are not equal", err_msg, f"Mismatched elements: {fraction} ({percentage:.0f}%)", f"Max absolute difference: {abs_diff}", f"Max relative difference: {rel_diff}", ] if verbose: parts += [ f" x: {x!r}", f" y: {y!r}", ] return "\n".join(parts) @ensure_warnings def assert_duckarray_allclose( actual, desired, rtol=1e-07, atol=0, err_msg="", verbose=True ): """Like `np.testing.assert_allclose`, but for duckarrays.""" __tracebackhide__ = True allclose = duck_array_ops.allclose_or_equiv(actual, desired, rtol=rtol, atol=atol) assert allclose, _format_message(actual, desired, err_msg=err_msg, verbose=verbose) @ensure_warnings def assert_duckarray_equal(x, y, err_msg="", verbose=True): """Like `np.testing.assert_array_equal`, but for duckarrays""" __tracebackhide__ = True if not utils.is_duck_array(x) and not utils.is_scalar(x): x = np.asarray(x) if not utils.is_duck_array(y) and not utils.is_scalar(y): y = np.asarray(y) if (utils.is_duck_array(x) and utils.is_scalar(y)) or ( utils.is_scalar(x) and utils.is_duck_array(y) ): equiv = duck_array_ops.array_all(x == y) else: equiv = duck_array_ops.array_equiv(x, y) assert equiv, _format_message(x, y, err_msg=err_msg, verbose=verbose) def assert_chunks_equal(a, b): """ Assert that chunksizes along chunked dimensions are equal. Parameters ---------- a : xarray.Dataset or xarray.DataArray The first object to compare. b : xarray.Dataset or xarray.DataArray The second object to compare. """ if isinstance(a, DataArray) != isinstance(b, DataArray): raise TypeError("a and b have mismatched types") left = a.unify_chunks() right = b.unify_chunks() assert left.chunks == right.chunks def _assert_indexes_invariants_checks( indexes, possible_coord_variables, dims, check_default=True ): assert isinstance(indexes, dict), indexes assert all(isinstance(v, Index) for v in indexes.values()), { k: type(v) for k, v in indexes.items() } if check_default: index_vars = { k for k, v in possible_coord_variables.items() if isinstance(v, IndexVariable) } assert indexes.keys() <= index_vars, (set(indexes), index_vars) # check pandas index wrappers vs. coordinate data adapters for k, index in indexes.items(): if isinstance(index, PandasIndex): pd_index = index.index var = possible_coord_variables[k] assert (index.dim,) == var.dims, (pd_index, var) if k == index.dim: # skip multi-index levels here (checked below) assert index.coord_dtype == var.dtype, (index.coord_dtype, var.dtype) assert isinstance(var._data.array, pd.Index), var._data.array # TODO: check identity instead of equality? assert pd_index.equals(var._data.array), (pd_index, var) if isinstance(index, PandasMultiIndex): pd_index = index.index for name in index.index.names: assert name in possible_coord_variables, (pd_index, index_vars) var = possible_coord_variables[name] assert (index.dim,) == var.dims, (pd_index, var) assert index.level_coords_dtype[name] == var.dtype, ( index.level_coords_dtype[name], var.dtype, ) assert isinstance(var._data.array, pd.MultiIndex), var._data.array assert pd_index.equals(var._data.array), (pd_index, var) # check all all levels are in `indexes` assert name in indexes, (name, set(indexes)) # index identity is used to find unique indexes in `indexes` assert index is indexes[name], (pd_index, indexes[name].index) if check_default: defaults = default_indexes(possible_coord_variables, dims) assert indexes.keys() == defaults.keys(), (set(indexes), set(defaults)) assert all(v.equals(defaults[k]) for k, v in indexes.items()), ( indexes, defaults, ) def _assert_variable_invariants(var: Variable, name: Hashable = None): if name is None: name_or_empty: tuple = () else: name_or_empty = (name,) assert isinstance(var._dims, tuple), name_or_empty + (var._dims,) assert len(var._dims) == len(var._data.shape), name_or_empty + ( var._dims, var._data.shape, ) assert isinstance(var._encoding, type(None) | dict), name_or_empty + ( var._encoding, ) assert isinstance(var._attrs, type(None) | dict), name_or_empty + (var._attrs,) def _assert_dataarray_invariants(da: DataArray, check_default_indexes: bool): assert isinstance(da._variable, Variable), da._variable _assert_variable_invariants(da._variable) assert isinstance(da._coords, dict), da._coords assert all(isinstance(v, Variable) for v in da._coords.values()), da._coords if check_default_indexes: assert all(set(v.dims) <= set(da.dims) for v in da._coords.values()), ( da.dims, {k: v.dims for k, v in da._coords.items()}, ) assert all( isinstance(v, IndexVariable) for (k, v) in da._coords.items() if v.dims == (k,) ), {k: type(v) for k, v in da._coords.items()} for k, v in da._coords.items(): _assert_variable_invariants(v, k) if da._indexes is not None: _assert_indexes_invariants_checks( da._indexes, da._coords, da.dims, check_default=check_default_indexes ) def _assert_dataset_invariants(ds: Dataset, check_default_indexes: bool): assert isinstance(ds._variables, dict), type(ds._variables) assert all(isinstance(v, Variable) for v in ds._variables.values()), ds._variables for k, v in ds._variables.items(): _assert_variable_invariants(v, k) assert isinstance(ds._coord_names, set), ds._coord_names assert ds._coord_names <= ds._variables.keys(), ( ds._coord_names, set(ds._variables), ) assert type(ds._dims) is dict, ds._dims assert all(isinstance(v, int) for v in ds._dims.values()), ds._dims var_dims: set[Hashable] = set() for v in ds._variables.values(): var_dims.update(v.dims) assert ds._dims.keys() == var_dims, (set(ds._dims), var_dims) assert all( ds._dims[k] == v.sizes[k] for v in ds._variables.values() for k in v.sizes ), (ds._dims, {k: v.sizes for k, v in ds._variables.items()}) if check_default_indexes: assert all( isinstance(v, IndexVariable) for (k, v) in ds._variables.items() if v.dims == (k,) ), {k: type(v) for k, v in ds._variables.items() if v.dims == (k,)} if ds._indexes is not None: _assert_indexes_invariants_checks( ds._indexes, ds._variables, ds._dims, check_default=check_default_indexes ) assert isinstance(ds._encoding, type(None) | dict) assert isinstance(ds._attrs, type(None) | dict) def _assert_internal_invariants( xarray_obj: DataArray | Dataset | Variable, check_default_indexes: bool ): """Validate that an xarray object satisfies its own internal invariants. This exists for the benefit of xarray's own test suite, but may be useful in external projects if they (ill-advisedly) create objects using xarray's private APIs. """ if isinstance(xarray_obj, Variable): _assert_variable_invariants(xarray_obj) elif isinstance(xarray_obj, DataArray): _assert_dataarray_invariants( xarray_obj, check_default_indexes=check_default_indexes ) elif isinstance(xarray_obj, Dataset): _assert_dataset_invariants( xarray_obj, check_default_indexes=check_default_indexes ) elif isinstance(xarray_obj, Coordinates): _assert_dataset_invariants( xarray_obj.to_dataset(), check_default_indexes=check_default_indexes ) else: raise TypeError( f"{type(xarray_obj)} is not a supported type for xarray invariant checks" ) xarray-2025.09.0/xarray/testing/strategies.py000066400000000000000000000422701505620616400210600ustar00rootroot00000000000000import datetime import warnings from collections.abc import Hashable, Iterable, Mapping, Sequence from typing import TYPE_CHECKING, Any, Protocol, overload import hypothesis.extra.numpy as npst import numpy as np from hypothesis.errors import InvalidArgument import xarray as xr from xarray.core.types import T_DuckArray from xarray.core.utils import attempt_import if TYPE_CHECKING: from xarray.core.types import _DTypeLikeNested, _ShapeLike if TYPE_CHECKING: import hypothesis.strategies as st else: st = attempt_import("hypothesis.strategies") __all__ = [ "attrs", "dimension_names", "dimension_sizes", "names", "pandas_index_dtypes", "supported_dtypes", "unique_subset_of", "variables", ] class ArrayStrategyFn(Protocol[T_DuckArray]): def __call__( self, *, shape: "_ShapeLike", dtype: "_DTypeLikeNested", ) -> st.SearchStrategy[T_DuckArray]: ... def supported_dtypes() -> st.SearchStrategy[np.dtype]: """ Generates only those numpy dtypes which xarray can handle. Use instead of hypothesis.extra.numpy.scalar_dtypes in order to exclude weirder dtypes such as unicode, byte_string, array, or nested dtypes. Also excludes datetimes, which dodges bugs with pandas non-nanosecond datetime overflows. Checks only native endianness. Requires the hypothesis package to be installed. See Also -------- :ref:`testing.hypothesis`_ """ # TODO should this be exposed publicly? # We should at least decide what the set of numpy dtypes that xarray officially supports is. return ( npst.integer_dtypes(endianness="=") | npst.unsigned_integer_dtypes(endianness="=") | npst.floating_dtypes(endianness="=") | npst.complex_number_dtypes(endianness="=") # | npst.datetime64_dtypes() # | npst.timedelta64_dtypes() # | npst.unicode_string_dtypes() ) def pandas_index_dtypes() -> st.SearchStrategy[np.dtype]: """ Dtypes supported by pandas indexes. Restrict datetime64 and timedelta64 to ns frequency till Xarray relaxes that. """ return ( npst.integer_dtypes(endianness="=", sizes=(32, 64)) | npst.unsigned_integer_dtypes(endianness="=", sizes=(32, 64)) | npst.floating_dtypes(endianness="=", sizes=(32, 64)) # TODO: unset max_period | npst.datetime64_dtypes(endianness="=", max_period="ns") # TODO: set max_period="D" | npst.timedelta64_dtypes(endianness="=", max_period="ns") | npst.unicode_string_dtypes(endianness="=") ) # TODO Generalize to all valid unicode characters once formatting bugs in xarray's reprs are fixed + docs can handle it. _readable_characters = st.characters( categories=["L", "N"], max_codepoint=0x017F ) # only use characters within the "Latin Extended-A" subset of unicode def names() -> st.SearchStrategy[str]: """ Generates arbitrary string names for dimensions / variables. Requires the hypothesis package to be installed. See Also -------- :ref:`testing.hypothesis`_ """ return st.text( _readable_characters, min_size=1, max_size=5, ) def dimension_names( *, name_strategy=None, min_dims: int = 0, max_dims: int = 3, ) -> st.SearchStrategy[list[Hashable]]: """ Generates an arbitrary list of valid dimension names. Requires the hypothesis package to be installed. Parameters ---------- name_strategy Strategy for making names. Useful if we need to share this. min_dims Minimum number of dimensions in generated list. max_dims Maximum number of dimensions in generated list. """ if name_strategy is None: name_strategy = names() return st.lists( elements=name_strategy, min_size=min_dims, max_size=max_dims, unique=True, ) def dimension_sizes( *, dim_names: st.SearchStrategy[Hashable] = names(), # noqa: B008 min_dims: int = 0, max_dims: int = 3, min_side: int = 1, max_side: int | None = None, ) -> st.SearchStrategy[Mapping[Hashable, int]]: """ Generates an arbitrary mapping from dimension names to lengths. Requires the hypothesis package to be installed. Parameters ---------- dim_names: strategy generating strings, optional Strategy for generating dimension names. Defaults to the `names` strategy. min_dims: int, optional Minimum number of dimensions in generated list. Default is 1. max_dims: int, optional Maximum number of dimensions in generated list. Default is 3. min_side: int, optional Minimum size of a dimension. Default is 1. max_side: int, optional Minimum size of a dimension. Default is `min_length` + 5. See Also -------- :ref:`testing.hypothesis`_ """ if max_side is None: max_side = min_side + 3 return st.dictionaries( keys=dim_names, values=st.integers(min_value=min_side, max_value=max_side), min_size=min_dims, max_size=max_dims, ) _readable_strings = st.text( _readable_characters, max_size=5, ) _attr_keys = _readable_strings _small_arrays = npst.arrays( shape=npst.array_shapes( max_side=2, max_dims=2, ), dtype=npst.scalar_dtypes() | npst.byte_string_dtypes() | npst.unicode_string_dtypes(), ) _attr_values = st.none() | st.booleans() | _readable_strings | _small_arrays simple_attrs = st.dictionaries(_attr_keys, _attr_values) def attrs() -> st.SearchStrategy[Mapping[Hashable, Any]]: """ Generates arbitrary valid attributes dictionaries for xarray objects. The generated dictionaries can potentially be recursive. Requires the hypothesis package to be installed. See Also -------- :ref:`testing.hypothesis`_ """ return st.recursive( st.dictionaries(_attr_keys, _attr_values), lambda children: st.dictionaries(_attr_keys, children), max_leaves=3, ) ATTRS = attrs() @st.composite def variables( draw: st.DrawFn, *, array_strategy_fn: ArrayStrategyFn | None = None, dims: st.SearchStrategy[Sequence[Hashable] | Mapping[Hashable, int]] | None = None, dtype: st.SearchStrategy[np.dtype] | None = None, attrs: st.SearchStrategy[Mapping] = ATTRS, ) -> xr.Variable: """ Generates arbitrary xarray.Variable objects. Follows the basic signature of the xarray.Variable constructor, but allows passing alternative strategies to generate either numpy-like array data or dimensions. Also allows specifying the shape or dtype of the wrapped array up front. Passing nothing will generate a completely arbitrary Variable (containing a numpy array). Requires the hypothesis package to be installed. Parameters ---------- array_strategy_fn: Callable which returns a strategy generating array-likes, optional Callable must only accept shape and dtype kwargs, and must generate results consistent with its input. If not passed the default is to generate a small numpy array with one of the supported_dtypes. dims: Strategy for generating the dimensions, optional Can either be a strategy for generating a sequence of string dimension names, or a strategy for generating a mapping of string dimension names to integer lengths along each dimension. If provided as a mapping the array shape will be passed to array_strategy_fn. Default is to generate arbitrary dimension names for each axis in data. dtype: Strategy which generates np.dtype objects, optional Will be passed in to array_strategy_fn. Default is to generate any scalar dtype using supported_dtypes. Be aware that this default set of dtypes includes some not strictly allowed by the array API standard. attrs: Strategy which generates dicts, optional Default is to generate a nested attributes dictionary containing arbitrary strings, booleans, integers, Nones, and numpy arrays. Returns ------- variable_strategy Strategy for generating xarray.Variable objects. Raises ------ ValueError If a custom array_strategy_fn returns a strategy which generates an example array inconsistent with the shape & dtype input passed to it. Examples -------- Generate completely arbitrary Variable objects backed by a numpy array: >>> variables().example() # doctest: +SKIP array([43506, -16, -151], dtype=int32) >>> variables().example() # doctest: +SKIP array([[[-10000000., -10000000.], [-10000000., -10000000.]], [[-10000000., -10000000.], [ 0., -10000000.]], [[ 0., -10000000.], [-10000000., inf]], [[ -0., -10000000.], [-10000000., -0.]]], dtype=float32) Attributes: Ε›Ε™Δ΄: {'Δ‰': {'iΔ₯f': array([-30117, -1740], dtype=int16)}} Generate only Variable objects with certain dimension names: >>> variables(dims=st.just(["a", "b"])).example() # doctest: +SKIP array([[ 248, 4294967295, 4294967295], [2412855555, 3514117556, 4294967295], [ 111, 4294967295, 4294967295], [4294967295, 1084434988, 51688], [ 47714, 252, 11207]], dtype=uint32) Generate only Variable objects with certain dimension names and lengths: >>> variables(dims=st.just({"a": 2, "b": 1})).example() # doctest: +SKIP array([[-1.00000000e+007+3.40282347e+038j], [-2.75034266e-225+2.22507386e-311j]]) See Also -------- :ref:`testing.hypothesis`_ """ if dtype is None: dtype = supported_dtypes() if not isinstance(dims, st.SearchStrategy) and dims is not None: raise InvalidArgument( f"dims must be provided as a hypothesis.strategies.SearchStrategy object (or None), but got type {type(dims)}. " "To specify fixed contents, use hypothesis.strategies.just()." ) if not isinstance(dtype, st.SearchStrategy) and dtype is not None: raise InvalidArgument( f"dtype must be provided as a hypothesis.strategies.SearchStrategy object (or None), but got type {type(dtype)}. " "To specify fixed contents, use hypothesis.strategies.just()." ) if not isinstance(attrs, st.SearchStrategy) and attrs is not None: raise InvalidArgument( f"attrs must be provided as a hypothesis.strategies.SearchStrategy object (or None), but got type {type(attrs)}. " "To specify fixed contents, use hypothesis.strategies.just()." ) _array_strategy_fn: ArrayStrategyFn if array_strategy_fn is None: # For some reason if I move the default value to the function signature definition mypy incorrectly says the ignore is no longer necessary, making it impossible to satisfy mypy _array_strategy_fn = npst.arrays # type: ignore[assignment] # npst.arrays has extra kwargs that we aren't using later elif not callable(array_strategy_fn): raise InvalidArgument( "array_strategy_fn must be a Callable that accepts the kwargs dtype and shape and returns a hypothesis " "strategy which generates corresponding array-like objects." ) else: _array_strategy_fn = ( array_strategy_fn # satisfy mypy that this new variable cannot be None ) _dtype = draw(dtype) if dims is not None: # generate dims first then draw data to match _dims = draw(dims) if isinstance(_dims, Sequence): dim_names = list(_dims) valid_shapes = npst.array_shapes(min_dims=len(_dims), max_dims=len(_dims)) _shape = draw(valid_shapes) array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype) elif isinstance(_dims, Mapping | dict): # should be a mapping of form {dim_names: lengths} dim_names, _shape = list(_dims.keys()), tuple(_dims.values()) array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype) else: raise InvalidArgument( f"Invalid type returned by dims strategy - drew an object of type {type(dims)}" ) else: # nothing provided, so generate everything consistently # We still generate the shape first here just so that we always pass shape to array_strategy_fn _shape = draw(npst.array_shapes()) array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype) dim_names = draw(dimension_names(min_dims=len(_shape), max_dims=len(_shape))) _data = draw(array_strategy) if _data.shape != _shape: raise ValueError( "array_strategy_fn returned an array object with a different shape than it was passed." f"Passed {_shape}, but returned {_data.shape}." "Please either specify a consistent shape via the dims kwarg or ensure the array_strategy_fn callable " "obeys the shape argument passed to it." ) if _data.dtype != _dtype: raise ValueError( "array_strategy_fn returned an array object with a different dtype than it was passed." f"Passed {_dtype}, but returned {_data.dtype}" "Please either specify a consistent dtype via the dtype kwarg or ensure the array_strategy_fn callable " "obeys the dtype argument passed to it." ) return xr.Variable(dims=dim_names, data=_data, attrs=draw(attrs)) @overload def unique_subset_of( objs: Sequence[Hashable], *, min_size: int = 0, max_size: int | None = None, ) -> st.SearchStrategy[Sequence[Hashable]]: ... @overload def unique_subset_of( objs: Mapping[Hashable, Any], *, min_size: int = 0, max_size: int | None = None, ) -> st.SearchStrategy[Mapping[Hashable, Any]]: ... @st.composite def unique_subset_of( draw: st.DrawFn, objs: Sequence[Hashable] | Mapping[Hashable, Any], *, min_size: int = 0, max_size: int | None = None, ) -> Sequence[Hashable] | Mapping[Hashable, Any]: """ Return a strategy which generates a unique subset of the given objects. Each entry in the output subset will be unique (if input was a sequence) or have a unique key (if it was a mapping). Requires the hypothesis package to be installed. Parameters ---------- objs: Union[Sequence[Hashable], Mapping[Hashable, Any]] Objects from which to sample to produce the subset. min_size: int, optional Minimum size of the returned subset. Default is 0. max_size: int, optional Maximum size of the returned subset. Default is the full length of the input. If set to 0 the result will be an empty mapping. Returns ------- unique_subset_strategy Strategy generating subset of the input. Examples -------- >>> unique_subset_of({"x": 2, "y": 3}).example() # doctest: +SKIP {'y': 3} >>> unique_subset_of(["x", "y"]).example() # doctest: +SKIP ['x'] See Also -------- :ref:`testing.hypothesis`_ """ if not isinstance(objs, Iterable): raise TypeError( f"Object to sample from must be an Iterable or a Mapping, but received type {type(objs)}" ) if len(objs) == 0: raise ValueError("Can't sample from a length-zero object.") keys = list(objs.keys()) if isinstance(objs, Mapping) else objs subset_keys = draw( st.lists( st.sampled_from(keys), unique=True, min_size=min_size, max_size=max_size, ) ) return ( {k: objs[k] for k in subset_keys} if isinstance(objs, Mapping) else subset_keys ) class CFTimeStrategy(st.SearchStrategy): def __init__(self, min_value, max_value): super().__init__() self.min_value = min_value self.max_value = max_value def do_draw(self, data): unit_microsecond = datetime.timedelta(microseconds=1) timespan_microseconds = (self.max_value - self.min_value) // unit_microsecond result = data.draw_integer(0, timespan_microseconds) with warnings.catch_warnings(): warnings.filterwarnings("ignore", message=".*date/calendar/year zero.*") return self.min_value + datetime.timedelta(microseconds=result) class CFTimeStrategyISO8601(st.SearchStrategy): def __init__(self): from xarray.tests.test_coding_times import _all_cftime_date_types super().__init__() self.date_types = _all_cftime_date_types() self.calendars = list(self.date_types) def do_draw(self, data): calendar = data.draw(st.sampled_from(self.calendars)) date_type = self.date_types[calendar] with warnings.catch_warnings(): warnings.filterwarnings("ignore", message=".*date/calendar/year zero.*") daysinmonth = date_type(99999, 12, 1).daysinmonth min_value = date_type(-99999, 1, 1) max_value = date_type(99999, 12, daysinmonth, 23, 59, 59, 999999) strategy = CFTimeStrategy(min_value, max_value) return strategy.do_draw(data) xarray-2025.09.0/xarray/tests/000077500000000000000000000000001505620616400160145ustar00rootroot00000000000000xarray-2025.09.0/xarray/tests/__init__.py000066400000000000000000000341041505620616400201270ustar00rootroot00000000000000from __future__ import annotations import importlib import platform import string import warnings from contextlib import contextmanager, nullcontext from unittest import mock # noqa: F401 import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_equal # noqa: F401 from packaging.version import Version from pandas.testing import assert_frame_equal # noqa: F401 import xarray.testing from xarray import Dataset from xarray.coding.times import _STANDARD_CALENDARS as _STANDARD_CALENDARS_UNSORTED from xarray.core.duck_array_ops import allclose_or_equiv # noqa: F401 from xarray.core.extension_array import PandasExtensionArray from xarray.core.options import set_options from xarray.core.variable import IndexVariable from xarray.testing import ( # noqa: F401 assert_chunks_equal, assert_duckarray_allclose, assert_duckarray_equal, ) from xarray.tests.arrays import ( # noqa: F401 ConcatenatableArray, DuckArrayWrapper, FirstElementAccessibleArray, InaccessibleArray, UnexpectedDataAccess, ) # import mpl and change the backend before other mpl imports try: import matplotlib as mpl # Order of imports is important here. # Using a different backend makes Travis CI work mpl.use("Agg") except ImportError: pass # https://github.com/pydata/xarray/issues/7322 warnings.filterwarnings("ignore", "'urllib3.contrib.pyopenssl' module is deprecated") warnings.filterwarnings("ignore", "Deprecated call to `pkg_resources.declare_namespace") warnings.filterwarnings("ignore", "pkg_resources is deprecated as an API") warnings.filterwarnings("ignore", message="numpy.ndarray size changed") arm_xfail = pytest.mark.xfail( platform.machine() == "aarch64" or "arm" in platform.machine(), reason="expected failure on ARM", ) def assert_writeable(ds): readonly = [ name for name, var in ds.variables.items() if not isinstance(var, IndexVariable) and not isinstance( var.data, PandasExtensionArray | pd.api.extensions.ExtensionArray ) and not var.data.flags.writeable ] assert not readonly, readonly def _importorskip( modname: str, minversion: str | None = None ) -> tuple[bool, pytest.MarkDecorator]: try: mod = importlib.import_module(modname) has = True if minversion is not None: v = getattr(mod, "__version__", "999") if Version(v) < Version(minversion): raise ImportError("Minimum version not satisfied") except ImportError: has = False reason = f"requires {modname}" if minversion is not None: reason += f">={minversion}" func = pytest.mark.skipif(not has, reason=reason) return has, func has_matplotlib, requires_matplotlib = _importorskip("matplotlib") has_scipy, requires_scipy = _importorskip("scipy") has_scipy_ge_1_13, requires_scipy_ge_1_13 = _importorskip("scipy", "1.13") with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="'cgi' is deprecated and slated for removal in Python 3.13", category=DeprecationWarning, ) has_pydap, requires_pydap = _importorskip("pydap.client") has_netCDF4, requires_netCDF4 = _importorskip("netCDF4") with warnings.catch_warnings(): # see https://github.com/pydata/xarray/issues/8537 warnings.filterwarnings( "ignore", message="h5py is running against HDF5 1.14.3", category=UserWarning, ) has_h5netcdf, requires_h5netcdf = _importorskip("h5netcdf") has_cftime, requires_cftime = _importorskip("cftime") has_dask, requires_dask = _importorskip("dask") has_dask_ge_2024_08_1, requires_dask_ge_2024_08_1 = _importorskip( "dask", minversion="2024.08.1" ) has_dask_ge_2024_11_0, requires_dask_ge_2024_11_0 = _importorskip("dask", "2024.11.0") has_dask_ge_2025_1_0, requires_dask_ge_2025_1_0 = _importorskip("dask", "2025.1.0") if has_dask_ge_2025_1_0: has_dask_expr = True requires_dask_expr = pytest.mark.skipif(not has_dask_expr, reason="should not skip") else: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="The current Dask DataFrame implementation is deprecated.", category=DeprecationWarning, ) has_dask_expr, requires_dask_expr = _importorskip("dask_expr") has_bottleneck, requires_bottleneck = _importorskip("bottleneck") has_rasterio, requires_rasterio = _importorskip("rasterio") has_zarr, requires_zarr = _importorskip("zarr") has_zarr_v3, requires_zarr_v3 = _importorskip("zarr", "3.0.0") has_zarr_v3_dtypes, requires_zarr_v3_dtypes = _importorskip("zarr", "3.1.0") has_zarr_v3_async_oindex, requires_zarr_v3_async_oindex = _importorskip("zarr", "3.1.2") if has_zarr_v3: import zarr # manual update by checking attrs for now # TODO: use version specifier # installing from git main is giving me a lower version than the # most recently released zarr has_zarr_v3_dtypes = hasattr(zarr.core, "dtype") has_zarr_v3_async_oindex = hasattr(zarr.AsyncArray, "oindex") requires_zarr_v3_dtypes = pytest.mark.skipif( not has_zarr_v3_dtypes, reason="requires zarr>3.1.0" ) requires_zarr_v3_async_oindex = pytest.mark.skipif( not has_zarr_v3_async_oindex, reason="requires zarr>3.1.1" ) has_fsspec, requires_fsspec = _importorskip("fsspec") has_iris, requires_iris = _importorskip("iris") has_numbagg, requires_numbagg = _importorskip("numbagg") has_pyarrow, requires_pyarrow = _importorskip("pyarrow") with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="is_categorical_dtype is deprecated and will be removed in a future version.", category=DeprecationWarning, ) # seaborn uses the deprecated `pandas.is_categorical_dtype` has_seaborn, requires_seaborn = _importorskip("seaborn") has_sparse, requires_sparse = _importorskip("sparse") has_cupy, requires_cupy = _importorskip("cupy") has_cartopy, requires_cartopy = _importorskip("cartopy") has_pint, requires_pint = _importorskip("pint") has_numexpr, requires_numexpr = _importorskip("numexpr") has_flox, requires_flox = _importorskip("flox") has_netcdf, requires_netcdf = _importorskip("netcdf") has_pandas_ge_2_2, requires_pandas_ge_2_2 = _importorskip("pandas", "2.2") has_pandas_3, requires_pandas_3 = _importorskip("pandas", "3.0.0.dev0") # some special cases has_scipy_or_netCDF4 = has_scipy or has_netCDF4 requires_scipy_or_netCDF4 = pytest.mark.skipif( not has_scipy_or_netCDF4, reason="requires scipy or netCDF4" ) has_numbagg_or_bottleneck = has_numbagg or has_bottleneck requires_numbagg_or_bottleneck = pytest.mark.skipif( not has_numbagg_or_bottleneck, reason="requires numbagg or bottleneck" ) has_numpy_2, requires_numpy_2 = _importorskip("numpy", "2.0.0") has_flox_0_9_12, requires_flox_0_9_12 = _importorskip("flox", "0.9.12") has_array_api_strict, requires_array_api_strict = _importorskip("array_api_strict") parametrize_zarr_format = pytest.mark.parametrize( "zarr_format", [ pytest.param(2, id="zarr_format=2"), pytest.param( 3, marks=pytest.mark.skipif( not has_zarr_v3, reason="zarr-python v2 cannot understand the zarr v3 format", ), id="zarr_format=3", ), ], ) def _importorskip_h5netcdf_ros3(has_h5netcdf: bool): if not has_h5netcdf: return has_h5netcdf, pytest.mark.skipif( not has_h5netcdf, reason="requires h5netcdf" ) import h5py h5py_with_ros3 = h5py.get_config().ros3 return h5py_with_ros3, pytest.mark.skipif( not h5py_with_ros3, reason="requires h5netcdf>=1.3.0 and h5py with ros3 support", ) has_h5netcdf_ros3, requires_h5netcdf_ros3 = _importorskip_h5netcdf_ros3(has_h5netcdf) has_netCDF4_1_6_2_or_above, requires_netCDF4_1_6_2_or_above = _importorskip( "netCDF4", "1.6.2" ) has_h5netcdf_1_4_0_or_above, requires_h5netcdf_1_4_0_or_above = _importorskip( "h5netcdf", "1.4.0.dev" ) has_netCDF4_1_7_0_or_above, requires_netCDF4_1_7_0_or_above = _importorskip( "netCDF4", "1.7.0" ) # change some global options for tests set_options(warn_for_unclosed_files=True) if has_dask: import dask class CountingScheduler: """Simple dask scheduler counting the number of computes. Reference: https://stackoverflow.com/questions/53289286/""" def __init__(self, max_computes=0): self.total_computes = 0 self.max_computes = max_computes def __call__(self, dsk, keys, **kwargs): self.total_computes += 1 if self.total_computes > self.max_computes: raise RuntimeError( f"Too many computes. Total: {self.total_computes} > max: {self.max_computes}." ) return dask.get(dsk, keys, **kwargs) def raise_if_dask_computes(max_computes=0): # return a dummy context manager so that this can be used for non-dask objects if not has_dask: return nullcontext() scheduler = CountingScheduler(max_computes) return dask.config.set(scheduler=scheduler) flaky = pytest.mark.flaky network = pytest.mark.network class ReturnItem: def __getitem__(self, key): return key class IndexerMaker: def __init__(self, indexer_cls): self._indexer_cls = indexer_cls def __getitem__(self, key): if not isinstance(key, tuple): key = (key,) return self._indexer_cls(key) def source_ndarray(array): """Given an ndarray, return the base object which holds its memory, or the object itself. """ with warnings.catch_warnings(): warnings.filterwarnings("ignore", "DatetimeIndex.base") warnings.filterwarnings("ignore", "TimedeltaIndex.base") base = getattr(array, "base", np.asarray(array).base) if base is None: base = array return base def format_record(record) -> str: """Format warning record like `FutureWarning('Function will be deprecated...')`""" return f"{str(record.category)[8:-2]}('{record.message}'))" @contextmanager def assert_no_warnings(): with warnings.catch_warnings(record=True) as record: yield record assert len(record) == 0, ( f"Got {len(record)} unexpected warning(s): {[format_record(r) for r in record]}" ) # Internal versions of xarray's test functions that validate additional # invariants def assert_equal(a, b, check_default_indexes=True): __tracebackhide__ = True xarray.testing.assert_equal(a, b) xarray.testing._assert_internal_invariants(a, check_default_indexes) xarray.testing._assert_internal_invariants(b, check_default_indexes) def assert_identical(a, b, check_default_indexes=True): __tracebackhide__ = True xarray.testing.assert_identical(a, b) xarray.testing._assert_internal_invariants(a, check_default_indexes) xarray.testing._assert_internal_invariants(b, check_default_indexes) def assert_allclose(a, b, check_default_indexes=True, **kwargs): __tracebackhide__ = True xarray.testing.assert_allclose(a, b, **kwargs) xarray.testing._assert_internal_invariants(a, check_default_indexes) xarray.testing._assert_internal_invariants(b, check_default_indexes) _DEFAULT_TEST_DIM_SIZES = (8, 9, 10) def create_test_data( seed: int = 12345, add_attrs: bool = True, dim_sizes: tuple[int, int, int] = _DEFAULT_TEST_DIM_SIZES, use_extension_array: bool = False, ) -> Dataset: rs = np.random.default_rng(seed) _vars = { "var1": ["dim1", "dim2"], "var2": ["dim1", "dim2"], "var3": ["dim3", "dim1"], } _dims = {"dim1": dim_sizes[0], "dim2": dim_sizes[1], "dim3": dim_sizes[2]} obj = Dataset() obj["dim2"] = ("dim2", 0.5 * np.arange(_dims["dim2"])) if _dims["dim3"] > 26: raise RuntimeError( f"Not enough letters for filling this dimension size ({_dims['dim3']})" ) obj["dim3"] = ("dim3", list(string.ascii_lowercase[0 : _dims["dim3"]])) obj["time"] = ( "time", pd.date_range( "2000-01-01", periods=20, unit="ns", ), ) for v, dims in sorted(_vars.items()): data = rs.normal(size=tuple(_dims[d] for d in dims)) obj[v] = (dims, data) if add_attrs: obj[v].attrs = {"foo": "variable"} if use_extension_array: obj["var4"] = ( "dim1", pd.Categorical( rs.choice( list(string.ascii_lowercase[: rs.integers(1, 5)]), size=dim_sizes[0], ) ), ) if has_pyarrow: obj["var5"] = ( "dim1", pd.array( rs.integers(1, 10, size=dim_sizes[0]).tolist(), dtype="int64[pyarrow]", ), ) if dim_sizes == _DEFAULT_TEST_DIM_SIZES: numbers_values = np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 3], dtype="int64") else: numbers_values = rs.integers(0, 3, _dims["dim3"], dtype="int64") obj.coords["numbers"] = ("dim3", numbers_values) obj.encoding = {"foo": "bar"} assert_writeable(obj) return obj _STANDARD_CALENDAR_NAMES = sorted(_STANDARD_CALENDARS_UNSORTED) _NON_STANDARD_CALENDAR_NAMES = { "noleap", "365_day", "360_day", "julian", "all_leap", "366_day", } _NON_STANDARD_CALENDARS = [ pytest.param(cal, marks=requires_cftime) for cal in sorted(_NON_STANDARD_CALENDAR_NAMES) ] _STANDARD_CALENDARS = [ pytest.param(cal, marks=requires_cftime if cal != "standard" else ()) for cal in _STANDARD_CALENDAR_NAMES ] _ALL_CALENDARS = sorted(_STANDARD_CALENDARS + _NON_STANDARD_CALENDARS) _CFTIME_CALENDARS = [ pytest.param(*p.values, marks=requires_cftime) for p in _ALL_CALENDARS ] def _all_cftime_date_types(): import cftime return { "noleap": cftime.DatetimeNoLeap, "365_day": cftime.DatetimeNoLeap, "360_day": cftime.Datetime360Day, "julian": cftime.DatetimeJulian, "all_leap": cftime.DatetimeAllLeap, "366_day": cftime.DatetimeAllLeap, "gregorian": cftime.DatetimeGregorian, "proleptic_gregorian": cftime.DatetimeProlepticGregorian, } xarray-2025.09.0/xarray/tests/arrays.py000066400000000000000000000157471505620616400177050ustar00rootroot00000000000000""" This module contains various lazy array classes which can be wrapped and manipulated by xarray objects but will raise on data access. """ from collections.abc import Callable, Iterable from typing import Any, Self import numpy as np from xarray.core import utils from xarray.core.indexing import ExplicitlyIndexed class UnexpectedDataAccess(Exception): pass class InaccessibleArray(utils.NDArrayMixin, ExplicitlyIndexed): """Disallows any loading.""" def __init__(self, array): self.array = array def get_duck_array(self): raise UnexpectedDataAccess("Tried accessing data") def __array__( self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None ) -> np.ndarray: raise UnexpectedDataAccess("Tried accessing data") def __getitem__(self, key): raise UnexpectedDataAccess("Tried accessing data.") class FirstElementAccessibleArray(InaccessibleArray): def __getitem__(self, key): tuple_idxr = key.tuple if len(tuple_idxr) > 1: raise UnexpectedDataAccess("Tried accessing more than one element.") return self.array[tuple_idxr] class DuckArrayWrapper(utils.NDArrayMixin): """Array-like that prevents casting to array. Modeled after cupy.""" def __init__(self, array: np.ndarray): self.array = array def __getitem__(self, key): return type(self)(self.array[key]) def to_numpy(self) -> np.ndarray: """Allow explicit conversions to numpy in `to_numpy`, but disallow np.asarray etc.""" return self.array def __array__( self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None ) -> np.ndarray: raise UnexpectedDataAccess("Tried accessing data") def __array_namespace__(self): """Present to satisfy is_duck_array test.""" from xarray.tests import namespace return namespace CONCATENATABLEARRAY_HANDLED_ARRAY_FUNCTIONS: dict[str, Callable] = {} def implements(numpy_function): """Register an __array_function__ implementation for ConcatenatableArray objects.""" def decorator(func): CONCATENATABLEARRAY_HANDLED_ARRAY_FUNCTIONS[numpy_function] = func return func return decorator @implements(np.concatenate) def concatenate( arrays: Iterable["ConcatenatableArray"], /, *, axis=0 ) -> "ConcatenatableArray": if any(not isinstance(arr, ConcatenatableArray) for arr in arrays): raise TypeError result = np.concatenate([arr._array for arr in arrays], axis=axis) return ConcatenatableArray(result) @implements(np.stack) def stack( arrays: Iterable["ConcatenatableArray"], /, *, axis=0 ) -> "ConcatenatableArray": if any(not isinstance(arr, ConcatenatableArray) for arr in arrays): raise TypeError result = np.stack([arr._array for arr in arrays], axis=axis) return ConcatenatableArray(result) @implements(np.result_type) def result_type(*arrays_and_dtypes) -> np.dtype: """Called by xarray to ensure all arguments to concat have the same dtype.""" first_dtype, *other_dtypes = (np.dtype(obj) for obj in arrays_and_dtypes) for other_dtype in other_dtypes: if other_dtype != first_dtype: raise ValueError("dtypes not all consistent") return first_dtype @implements(np.broadcast_to) def broadcast_to( x: "ConcatenatableArray", /, shape: tuple[int, ...] ) -> "ConcatenatableArray": """ Broadcasts an array to a specified shape, by either manipulating chunk keys or copying chunk manifest entries. """ if not isinstance(x, ConcatenatableArray): raise TypeError result = np.broadcast_to(x._array, shape=shape) return ConcatenatableArray(result) @implements(np.full_like) def full_like( x: "ConcatenatableArray", /, fill_value, **kwargs ) -> "ConcatenatableArray": """ Broadcasts an array to a specified shape, by either manipulating chunk keys or copying chunk manifest entries. """ if not isinstance(x, ConcatenatableArray): raise TypeError return ConcatenatableArray(np.full(x.shape, fill_value=fill_value, **kwargs)) @implements(np.all) def numpy_all(x: "ConcatenatableArray", **kwargs) -> "ConcatenatableArray": return type(x)(np.all(x._array, **kwargs)) class ConcatenatableArray: """Disallows loading or coercing to an index but does support concatenation / stacking.""" def __init__(self, array): # use ._array instead of .array because we don't want this to be accessible even to xarray's internals (e.g. create_default_index_implicit) self._array = array @property def dtype(self: Any) -> np.dtype: return self._array.dtype @property def shape(self: Any) -> tuple[int, ...]: return self._array.shape @property def ndim(self: Any) -> int: return self._array.ndim def __repr__(self: Any) -> str: return f"{type(self).__name__}(array={self._array!r})" def get_duck_array(self): raise UnexpectedDataAccess("Tried accessing data") def __array__( self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None ) -> np.ndarray: raise UnexpectedDataAccess("Tried accessing data") def __getitem__(self, key) -> Self: """Some cases of concat require supporting expanding dims by dimensions of size 1""" # see https://data-apis.org/array-api/2022.12/API_specification/indexing.html#multi-axis-indexing arr = self._array for axis, indexer_1d in enumerate(key): if indexer_1d is None: arr = np.expand_dims(arr, axis) elif indexer_1d is Ellipsis: pass else: raise UnexpectedDataAccess("Tried accessing data.") return type(self)(arr) def __eq__(self, other: Self) -> Self: # type: ignore[override] return type(self)(self._array == other._array) def __array_function__(self, func, types, args, kwargs) -> Any: if func not in CONCATENATABLEARRAY_HANDLED_ARRAY_FUNCTIONS: return NotImplemented # Note: this allows subclasses that don't override # __array_function__ to handle ManifestArray objects if not all(issubclass(t, ConcatenatableArray) for t in types): return NotImplemented return CONCATENATABLEARRAY_HANDLED_ARRAY_FUNCTIONS[func](*args, **kwargs) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs) -> Any: """We have to define this in order to convince xarray that this class is a duckarray, even though we will never support ufuncs.""" return NotImplemented def astype(self, dtype: np.dtype, /, *, copy: bool = True) -> Self: """Needed because xarray will call this even when it's a no-op""" if dtype != self.dtype: raise NotImplementedError() else: return self def __and__(self, other: Self) -> Self: return type(self)(self._array & other._array) def __or__(self, other: Self) -> Self: return type(self)(self._array | other._array) xarray-2025.09.0/xarray/tests/conftest.py000066400000000000000000000163531505620616400202230ustar00rootroot00000000000000from __future__ import annotations import warnings import numpy as np import pandas as pd import pytest import xarray as xr from xarray import DataArray, Dataset, DataTree from xarray.tests import create_test_data, has_cftime, requires_dask @pytest.fixture(autouse=True) def handle_numpy_1_warnings(): """Handle NumPy 1.x DeprecationWarnings for out-of-bound integer conversions. NumPy 1.x raises DeprecationWarning when converting out-of-bounds values (e.g., 255 to int8), while NumPy 2.x raises OverflowError. This fixture suppresses the warning in NumPy 1.x environments to allow tests to pass. """ # Only apply for NumPy < 2.0 if np.__version__.startswith("1."): with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "NumPy will stop allowing conversion of out-of-bound Python integers", DeprecationWarning, ) yield else: yield @pytest.fixture(params=["numpy", pytest.param("dask", marks=requires_dask)]) def backend(request): return request.param @pytest.fixture(params=["numbagg", "bottleneck", None]) def compute_backend(request): if request.param is None: options = dict(use_bottleneck=False, use_numbagg=False) elif request.param == "bottleneck": options = dict(use_bottleneck=True, use_numbagg=False) elif request.param == "numbagg": options = dict(use_bottleneck=False, use_numbagg=True) else: raise ValueError with xr.set_options(**options): yield request.param @pytest.fixture(params=[1]) def ds(request, backend): if request.param == 1: ds = Dataset( dict( z1=(["y", "x"], np.random.randn(2, 8)), z2=(["time", "y"], np.random.randn(10, 2)), ), dict( x=("x", np.linspace(0, 1.0, 8)), time=("time", np.linspace(0, 1.0, 10)), c=("y", ["a", "b"]), y=range(2), ), ) elif request.param == 2: ds = Dataset( dict( z1=(["time", "y"], np.random.randn(10, 2)), z2=(["time"], np.random.randn(10)), z3=(["x", "time"], np.random.randn(8, 10)), ), dict( x=("x", np.linspace(0, 1.0, 8)), time=("time", np.linspace(0, 1.0, 10)), c=("y", ["a", "b"]), y=range(2), ), ) elif request.param == 3: ds = create_test_data() else: raise ValueError if backend == "dask": return ds.chunk() return ds @pytest.fixture(params=[1]) def da(request, backend): if request.param == 1: times = pd.date_range("2000-01-01", freq="1D", periods=21) da = DataArray( np.random.random((3, 21, 4)), dims=("a", "time", "x"), coords=dict(time=times), ) if request.param == 2: da = DataArray([0, np.nan, 1, 2, np.nan, 3, 4, 5, np.nan, 6, 7], dims="time") if request.param == "repeating_ints": da = DataArray( np.tile(np.arange(12), 5).reshape(5, 4, 3), coords={"x": list("abc"), "y": list("defg")}, dims=list("zyx"), ) if backend == "dask": return da.chunk() elif backend == "numpy": return da else: raise ValueError @pytest.fixture( params=[ False, pytest.param( True, marks=pytest.mark.skipif(not has_cftime, reason="no cftime") ), ] ) def use_cftime(request): return request.param @pytest.fixture(params=[Dataset, DataArray]) def type(request): return request.param @pytest.fixture(params=[1]) def d(request, backend, type) -> DataArray | Dataset: """ For tests which can test either a DataArray or a Dataset. """ result: DataArray | Dataset if request.param == 1: ds = Dataset( dict( a=(["x", "z"], np.arange(24).reshape(2, 12)), b=(["y", "z"], np.arange(100, 136).reshape(3, 12).astype(np.float64)), ), dict( x=("x", np.linspace(0, 1.0, 2)), y=range(3), z=("z", pd.date_range("2000-01-01", periods=12)), w=("x", ["a", "b"]), ), ) if type == DataArray: result = ds["a"].assign_coords(w=ds.coords["w"]) elif type == Dataset: result = ds else: raise ValueError else: raise ValueError if backend == "dask": return result.chunk() elif backend == "numpy": return result else: raise ValueError @pytest.fixture def byte_attrs_dataset(): """For testing issue #9407""" null_byte = b"\x00" other_bytes = bytes(range(1, 256)) ds = Dataset({"x": 1}, coords={"x_coord": [1]}) ds["x"].attrs["null_byte"] = null_byte ds["x"].attrs["other_bytes"] = other_bytes expected = ds.copy() expected["x"].attrs["null_byte"] = "" expected["x"].attrs["other_bytes"] = other_bytes.decode(errors="replace") return { "input": ds, "expected": expected, "h5netcdf_error": r"Invalid value provided for attribute .*: .*\. Null characters .*", } @pytest.fixture(scope="module") def create_test_datatree(): """ Create a test datatree with this structure: Group: / β”‚ Dimensions: (y: 3, x: 2) β”‚ Dimensions without coordinates: y, x β”‚ Data variables: β”‚ a (y) int64 24B 6 7 8 β”‚ set0 (x) int64 16B 9 10 β”œβ”€β”€ Group: /set1 β”‚ β”‚ Dimensions: () β”‚ β”‚ Data variables: β”‚ β”‚ a int64 8B 0 β”‚ β”‚ b int64 8B 1 β”‚ β”œβ”€β”€ Group: /set1/set1 β”‚ └── Group: /set1/set2 β”œβ”€β”€ Group: /set2 β”‚ β”‚ Dimensions: (x: 2) β”‚ β”‚ Dimensions without coordinates: x β”‚ β”‚ Data variables: β”‚ β”‚ a (x) int64 16B 2 3 β”‚ β”‚ b (x) float64 16B 0.1 0.2 β”‚ └── Group: /set2/set1 └── Group: /set3 The structure has deliberately repeated names of tags, variables, and dimensions in order to better check for bugs caused by name conflicts. """ def _create_test_datatree(modify=lambda ds: ds): set1_data = modify(xr.Dataset({"a": 0, "b": 1})) set2_data = modify(xr.Dataset({"a": ("x", [2, 3]), "b": ("x", [0.1, 0.2])})) root_data = modify(xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])})) root = DataTree.from_dict( { "/": root_data, "/set1": set1_data, "/set1/set1": None, "/set1/set2": None, "/set2": set2_data, "/set2/set1": None, "/set3": None, } ) return root return _create_test_datatree @pytest.fixture(scope="module") def simple_datatree(create_test_datatree): """ Invoke create_test_datatree fixture (callback). Returns a DataTree. """ return create_test_datatree() @pytest.fixture(params=["s", "ms", "us", "ns"]) def time_unit(request): return request.param xarray-2025.09.0/xarray/tests/data/000077500000000000000000000000001505620616400167255ustar00rootroot00000000000000xarray-2025.09.0/xarray/tests/data/bears.nc000066400000000000000000000022401505620616400203410ustar00rootroot00000000000000CDF ij bears_lenl historyξThis is an example of a multi-line global\012attribute. It could be used for representing the\012processing history of the data, for example. 2017-12-12 15:55:12 GMT Hyrax-1.14.0 http://test.opendap.org/opendap/hyrax/data/nc/bears.nc.nc?DODS_EXTRA.Unlimited_Dimensionk i attr11attr21 2 3 4 i_1.attr3_117 i_1.attr3_2@3@7@;j bears acttext string\012\011123acsΨaclBhacfΐ?€acdΏπ?θ string_lengthorder ,shot8aloanPcross0hl˜ @@€@ΐindistinguishable@@@€@ @ΐ@ΰShΤ₯@@?0@@ B _ €xarray-2025.09.0/xarray/tests/data/example.grib000066400000000000000000000121601505620616400212250ustar00rootroot00000000000000GRIBΪαH ]J€0…]J€ί€]J€]J€%€d³  FEΠ€ )`P`P`P`PŠy€j€{π7777GRIBΪα H ]J€0…]J€ί€]J€]J€%€d³  FGτ€ )˜@˜@˜@˜@ό ΦΰΤ`ο€7777GRIBΪαH ]J€0…]J€ί€]J€]J€%€d³  FJŒ€ )] ] ] ] θΐΣ€Ά Σ@7777GRIBΪαH ]J€0…]J€ί€]J€]J€%€dLK@ GF²€ ) x x x x(Λΰ͐Λ87777GRIBΪα H ]J€0…]J€ί€]J€]J€%€dLK@ GG"€ )XXXXΜXΕ¨Κ Θ°7777GRIBΪαH ]J€0…]J€ί€]J€]J€%€dLK@ GEζ€ )ՈxΠΤ8    7777GRIBΪαH ]J€0…]J€ί€]J€]J€%€d³  FFD€ )____‰Pwiπy@7777GRIBΪα H ]J€0…]J€ί€]J€]J€%€d³  FH$€ )– – – – ύΡ@Τ@ρ 7777GRIBΪαH ]J€0…]J€ί€]J€]J€%€d³  FJ|€ )\ΰ\ΰ\ΰ\ΰμ`Σ`Ή`@7777GRIBΪαH ]J€0…]J€ί€]J€]J€%€dLK@ GFΒ€ ) Ρ°ΛpΝ€Κψ7777GRIBΪα H ]J€0…]J€ί€]J€]J€%€dLK@ GG'€ )8888Μ8ňΚXΘ¨7777GRIBΪαH ]J€0…]J€ί€]J€]J€%€dLK@ GEβ€ )ΦHΡ°ψΤΰ7777GRIBΪαH ]J€0…]J€ί€]J€]J€%€d³  C|€)œ˜¨7777GRIBΪα H ]J€0…]J€ί€]J€]J€%€d³  C{€) œ˜˜7777GRIBΪαH ]J€0…]J€ί€]J€]J€%€d³  C{€)œ €˜7777GRIBΪαH ]J€0…]J€ί€]J€]J€%€dLK@ Ci€)ˆŒ˜7777GRIBΪα H ]J€0…]J€ί€]J€]J€%€dLK@ Ci€)ˆ””Œ7777GRIBΪαH ]J€0…]J€ί€]J€]J€%€dLK@ Cg€)” ˜$$$$7777GRIBΪαH ]J€0…]J€ί€]J€]J€%€d³  C|€)œ˜€7777GRIBΪα H ]J€0…]J€ί€]J€]J€%€d³  C{€) œ ”7777GRIBΪαH ]J€0…]J€ί€]J€]J€%€d³  Cz€)  €œ 7777GRIBΪαH ]J€0…]J€ί€]J€]J€%€dLK@ Ci€)ˆŒ˜7777GRIBΪα H ]J€0…]J€ί€]J€]J€%€dLK@ Ci€)ˆ˜Œ7777GRIBΪαH ]J€0…]J€ί€]J€]J€%€dLK@ Cg€)” ”$$$$7777xarray-2025.09.0/xarray/tests/data/example.ict000066400000000000000000000014171505620616400210640ustar00rootroot0000000000000029, 1001 Henderson, Barron U.S. EPA Example file with artificial data JUST_A_TEST 1, 1 2018, 04, 27 2018, 04, 27 0 Start_UTC 5 1, 1, 1, 1, 1 -9999, -9999, -9999, -9999, -9999 lat, degrees_north lon, degrees_east elev, meters TEST_ppbv, ppbv TESTM_ppbv, ppbv 0 9 INDEPENDENT_VARIABLE_DEFINITION: Start_UTC INDEPENDENT_VARIABLE_UNITS: Start_UTC ULOD_FLAG: -7777 ULOD_VALUE: N/A LLOD_FLAG: -8888 LLOD_VALUE: N/A, N/A, N/A, N/A, 0.025 OTHER_COMMENTS: www-air.larc.nasa.gov/missions/etc/IcarttDataFormat.htm REVISION: R0 R0: No comments for this revision. Start_UTC, lat, lon, elev, TEST_ppbv, TESTM_ppbv 43200, 41.00000, -71.00000, 5, 1.2345, 2.220 46800, 42.00000, -72.00000, 15, 2.3456, -9999 50400, 42.00000, -73.00000, 20, 3.4567, -7777 50400, 42.00000, -74.00000, 25, 4.5678, -8888 xarray-2025.09.0/xarray/tests/data/example.uamiv000066400000000000000000000011401505620616400214170ustar00rootroot000000000000000A V E R A G E C A M x 5 . 4 0 T e s t P r o b l e m - - M e c h 6 C F C B 0 5 v 5 . 4 0 . m i d w e s t . 3 6 . 1 2 . jj?€0<ΙA\ΙΚ&G  G  <(O 3 (jj?€|O 3 ?€@@@@€@ @ΐ@ΰAAA A0A@APA`ApA€AˆAA˜|xarray-2025.09.0/xarray/tests/data/example_1.nc000066400000000000000000000033101505620616400211170ustar00rootroot00000000000000CDF latlon leveltime sourceFictional Model Output temp  long_name temperatureunitscelsius άrh  long_namerelative humidity valid_range?πΘόlat units degrees_northlon units degrees_east(€level units millibarsΜtime unitshours since 1996-1-1Δ(2<`tŠ ¬ΜΣέηρθRΌτ|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π|π?>LΜΝ>ΜΜΝ>LΜΝ>™™š>LΜΝ>ΜΜΝ??™š?333=ΜΜΝ>™™š=ΜΜΝ=ΜΜΝ=ΜΜΝ=ΜΜΝ??333?LΜΝ?LΜΝ=ΜΜΝ>LΜΝ>LΜΝ>LΜΝ>LΜΝ??333?LΜΝ?fff?fff=ΜΜΝ>LΜΝ>™™š>™™š>™™š>™™š?333?LΜΝ?fff?fff=ΜΜΝ>LΜΝ>ΜΜΝ>ΜΜΝ>ΜΜΝ>ΜΜΝ?333?fff?fff €xarray-2025.09.0/xarray/tests/data/example_1.nc.gz000066400000000000000000000007261505620616400215460ustar00rootroot00000000000000‹±!Texample_2.ncν”ΏKΓ@Η/φ‡ΤVP(‚‹dtιP BΑ6ƒIόjl―νΑ5)w—‚ΰΰμδΪNŽώ ŽqrΥYGG‘Ύ»ά™΄XπhΰΓεξ}οϋ^ή%Ω?hX!Ι RΤ0 ŒΊχ=€cЇ˜"uI]Z>ަ¨ =²άX+Z]6€%ˆοΉΤ>ςۘΪǁΚε₯^ωΰώΐδΦϋφ+θyκθ6=Χ$”kyΉ3WL­fΗ—[˜rpύ,)–_dŒυ”&₯ωOu†‘+dˆν^Π'm"Ξ£X~θRn2Χλ*u6Q?r>̝κε# ί‰ώZ3½›­΅» cήτ|&zqί‹ΎNœ9Ώy>γƒ].΄Η6„ngΞΤ2ϊ9>Ή>‘”œΉŒΗ΅¬A(Dρ»`ιžΞσ(φΰύΰ6'^ Ϋεju·T.•υπΛ>DΟ‡Ά’Ρ°7™LN\7ΐΟΐ+π|‚Ν;p©ξ!ύΧτΑ‚γΐΗU? Ÿκ! ΗΡhlζ2ζlŽΖN₯R©…QLŽI”βμ‘ΤŒO‚)M§Σ‘Τ’ωΜκδOγWN£΄FWΈ΄~;kΗ…Θxarray-2025.09.0/xarray/tests/indexes.py000066400000000000000000000045511505620616400200320ustar00rootroot00000000000000from collections.abc import Hashable, Iterable, Mapping, Sequence from typing import Any import numpy as np from xarray import Variable from xarray.core.indexes import Index, PandasIndex from xarray.core.types import Self class ScalarIndex(Index): def __init__(self, value: int): self.value = value @classmethod def from_variables(cls, variables, *, options) -> Self: var = next(iter(variables.values())) return cls(int(var.values)) def equals(self, other, *, exclude=None) -> bool: return isinstance(other, ScalarIndex) and other.value == self.value class XYIndex(Index): def __init__(self, x: PandasIndex, y: PandasIndex): self.x: PandasIndex = x self.y: PandasIndex = y @classmethod def from_variables(cls, variables, *, options): return cls( x=PandasIndex.from_variables({"x": variables["x"]}, options=options), y=PandasIndex.from_variables({"y": variables["y"]}, options=options), ) def create_variables( self, variables: Mapping[Any, Variable] | None = None ) -> dict[Any, Variable]: return self.x.create_variables() | self.y.create_variables() def equals(self, other, exclude=None): if exclude is None: exclude = frozenset() x_eq = True if self.x.dim in exclude else self.x.equals(other.x) y_eq = True if self.y.dim in exclude else self.y.equals(other.y) return x_eq and y_eq @classmethod def concat( cls, indexes: Sequence[Self], dim: Hashable, positions: Iterable[Iterable[int]] | None = None, ) -> Self: first = next(iter(indexes)) if dim == "x": newx = PandasIndex.concat( tuple(i.x for i in indexes), dim=dim, positions=positions ) newy = first.y elif dim == "y": newx = first.x newy = PandasIndex.concat( tuple(i.y for i in indexes), dim=dim, positions=positions ) return cls(x=newx, y=newy) def isel(self, indexers: Mapping[Any, int | slice | np.ndarray | Variable]) -> Self: newx = self.x.isel({"x": indexers.get("x", slice(None))}) newy = self.y.isel({"y": indexers.get("y", slice(None))}) assert newx is not None assert newy is not None return type(self)(newx, newy) xarray-2025.09.0/xarray/tests/namespace.py000066400000000000000000000002411505620616400203170ustar00rootroot00000000000000from xarray.core import duck_array_ops def reshape(array, shape, **kwargs): return type(array)(duck_array_ops.reshape(array.array, shape=shape, **kwargs)) xarray-2025.09.0/xarray/tests/test_accessor_dt.py000066400000000000000000000546561505620616400217360ustar00rootroot00000000000000from __future__ import annotations import numpy as np import pandas as pd import pytest import xarray as xr from xarray.tests import ( _CFTIME_CALENDARS, _all_cftime_date_types, assert_allclose, assert_array_equal, assert_chunks_equal, assert_equal, assert_identical, raise_if_dask_computes, requires_cftime, requires_dask, ) class TestDatetimeAccessor: @pytest.fixture(autouse=True) def setup(self): nt = 100 data = np.random.rand(10, 10, nt) lons = np.linspace(0, 11, 10) lats = np.linspace(0, 20, 10) self.times = pd.date_range(start="2000/01/01", freq="h", periods=nt) self.data = xr.DataArray( data, coords=[lons, lats, self.times], dims=["lon", "lat", "time"], name="data", ) self.times_arr = np.random.choice(self.times, size=(10, 10, nt)) self.times_data = xr.DataArray( self.times_arr, coords=[lons, lats, self.times], dims=["lon", "lat", "time"], name="data", ) @pytest.mark.parametrize( "field", [ "year", "month", "day", "hour", "minute", "second", "microsecond", "nanosecond", "week", "weekofyear", "dayofweek", "weekday", "dayofyear", "quarter", "date", "time", "daysinmonth", "days_in_month", "is_month_start", "is_month_end", "is_quarter_start", "is_quarter_end", "is_year_start", "is_year_end", "is_leap_year", ], ) def test_field_access(self, field) -> None: if field in ["week", "weekofyear"]: data = self.times.isocalendar()["week"] else: data = getattr(self.times, field) if data.dtype.kind != "b" and field not in ("date", "time"): # pandas 2.0 returns int32 for integer fields now data = data.astype("int64") translations = { "weekday": "dayofweek", "daysinmonth": "days_in_month", "weekofyear": "week", } name = translations.get(field, field) expected = xr.DataArray(data, name=name, coords=[self.times], dims=["time"]) if field in ["week", "weekofyear"]: with pytest.warns( FutureWarning, match="dt.weekofyear and dt.week have been deprecated" ): actual = getattr(self.data.time.dt, field) else: actual = getattr(self.data.time.dt, field) assert not isinstance(actual.variable, xr.IndexVariable) assert expected.dtype == actual.dtype assert_identical(expected, actual) def test_total_seconds(self) -> None: # Subtract a value in the middle of the range to ensure that some values # are negative delta = self.data.time - np.datetime64("2000-01-03") actual = delta.dt.total_seconds() expected = xr.DataArray( np.arange(-48, 52, dtype=np.float64) * 3600, name="total_seconds", coords=[self.data.time], ) # This works with assert_identical when pandas is >=1.5.0. assert_allclose(expected, actual) @pytest.mark.parametrize( "field, pandas_field", [ ("year", "year"), ("week", "week"), ("weekday", "day"), ], ) def test_isocalendar(self, field, pandas_field) -> None: # pandas isocalendar has dtypy UInt32Dtype, convert to Int64 expected = pd.Index(getattr(self.times.isocalendar(), pandas_field).astype(int)) expected = xr.DataArray( expected, name=field, coords=[self.times], dims=["time"] ) actual = self.data.time.dt.isocalendar()[field] assert_equal(expected, actual) def test_calendar(self) -> None: cal = self.data.time.dt.calendar assert cal == "proleptic_gregorian" def test_strftime(self) -> None: assert ( "2000-01-01 01:00:00" == self.data.time.dt.strftime("%Y-%m-%d %H:%M:%S")[1] ) @requires_cftime @pytest.mark.parametrize( "calendar,expected", [("standard", 366), ("noleap", 365), ("360_day", 360), ("all_leap", 366)], ) def test_days_in_year(self, calendar, expected) -> None: assert ( self.data.convert_calendar(calendar, align_on="year").time.dt.days_in_year == expected ).all() def test_not_datetime_type(self) -> None: nontime_data = self.data.copy() int_data = np.arange(len(self.data.time)).astype("int8") nontime_data = nontime_data.assign_coords(time=int_data) with pytest.raises(AttributeError, match=r"dt"): _ = nontime_data.time.dt @pytest.mark.filterwarnings("ignore:dt.weekofyear and dt.week have been deprecated") @requires_dask @pytest.mark.parametrize( "field", [ "year", "month", "day", "hour", "minute", "second", "microsecond", "nanosecond", "week", "weekofyear", "dayofweek", "weekday", "dayofyear", "quarter", "date", "time", "is_month_start", "is_month_end", "is_quarter_start", "is_quarter_end", "is_year_start", "is_year_end", "is_leap_year", "days_in_year", ], ) def test_dask_field_access(self, field) -> None: import dask.array as da expected = getattr(self.times_data.dt, field) dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50)) dask_times_2d = xr.DataArray( dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data" ) with raise_if_dask_computes(): actual = getattr(dask_times_2d.dt, field) assert isinstance(actual.data, da.Array) assert_chunks_equal(actual, dask_times_2d) assert_equal(actual.compute(), expected.compute()) @requires_dask @pytest.mark.parametrize( "field", [ "year", "week", "weekday", ], ) def test_isocalendar_dask(self, field) -> None: import dask.array as da expected = getattr(self.times_data.dt.isocalendar(), field) dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50)) dask_times_2d = xr.DataArray( dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data" ) with raise_if_dask_computes(): actual = dask_times_2d.dt.isocalendar()[field] assert isinstance(actual.data, da.Array) assert_chunks_equal(actual, dask_times_2d) assert_equal(actual.compute(), expected.compute()) @requires_dask @pytest.mark.parametrize( "method, parameters", [ ("floor", "D"), ("ceil", "D"), ("round", "D"), ("strftime", "%Y-%m-%d %H:%M:%S"), ], ) def test_dask_accessor_method(self, method, parameters) -> None: import dask.array as da expected = getattr(self.times_data.dt, method)(parameters) dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50)) dask_times_2d = xr.DataArray( dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data" ) with raise_if_dask_computes(): actual = getattr(dask_times_2d.dt, method)(parameters) assert isinstance(actual.data, da.Array) assert_chunks_equal(actual, dask_times_2d) assert_equal(actual.compute(), expected.compute()) def test_seasons(self) -> None: dates = xr.date_range( start="2000/01/01", freq="ME", periods=12, use_cftime=False ) dates = dates.append(pd.Index([np.datetime64("NaT")])) dates = xr.DataArray(dates) seasons = xr.DataArray( [ "DJF", "DJF", "MAM", "MAM", "MAM", "JJA", "JJA", "JJA", "SON", "SON", "SON", "DJF", "nan", ] ) assert_array_equal(seasons.values, dates.dt.season.values) @pytest.mark.parametrize( "method, parameters", [("floor", "D"), ("ceil", "D"), ("round", "D")] ) def test_accessor_method(self, method, parameters) -> None: dates = pd.date_range("2014-01-01", "2014-05-01", freq="h") xdates = xr.DataArray(dates, dims=["time"]) expected = getattr(dates, method)(parameters) actual = getattr(xdates.dt, method)(parameters) assert_array_equal(expected, actual) class TestTimedeltaAccessor: @pytest.fixture(autouse=True) def setup(self): nt = 100 data = np.random.rand(10, 10, nt) lons = np.linspace(0, 11, 10) lats = np.linspace(0, 20, 10) self.times = pd.timedelta_range(start="1 day", freq="6h", periods=nt) self.data = xr.DataArray( data, coords=[lons, lats, self.times], dims=["lon", "lat", "time"], name="data", ) self.times_arr = np.random.choice(self.times, size=(10, 10, nt)) self.times_data = xr.DataArray( self.times_arr, coords=[lons, lats, self.times], dims=["lon", "lat", "time"], name="data", ) def test_not_datetime_type(self) -> None: nontime_data = self.data.copy() int_data = np.arange(len(self.data.time)).astype("int8") nontime_data = nontime_data.assign_coords(time=int_data) with pytest.raises(AttributeError, match=r"dt"): _ = nontime_data.time.dt @pytest.mark.parametrize( "field", ["days", "seconds", "microseconds", "nanoseconds"] ) def test_field_access(self, field) -> None: expected = xr.DataArray( getattr(self.times, field), name=field, coords=[self.times], dims=["time"] ) actual = getattr(self.data.time.dt, field) assert_equal(expected, actual) @pytest.mark.parametrize( "method, parameters", [("floor", "D"), ("ceil", "D"), ("round", "D")] ) def test_accessor_methods(self, method, parameters) -> None: dates = pd.timedelta_range(start="1 day", end="30 days", freq="6h") xdates = xr.DataArray(dates, dims=["time"]) expected = getattr(dates, method)(parameters) actual = getattr(xdates.dt, method)(parameters) assert_array_equal(expected, actual) @requires_dask @pytest.mark.parametrize( "field", ["days", "seconds", "microseconds", "nanoseconds"] ) def test_dask_field_access(self, field) -> None: import dask.array as da expected = getattr(self.times_data.dt, field) dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50)) dask_times_2d = xr.DataArray( dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data" ) with raise_if_dask_computes(): actual = getattr(dask_times_2d.dt, field) assert isinstance(actual.data, da.Array) assert_chunks_equal(actual, dask_times_2d) assert_equal(actual, expected) @requires_dask @pytest.mark.parametrize( "method, parameters", [("floor", "D"), ("ceil", "D"), ("round", "D")] ) def test_dask_accessor_method(self, method, parameters) -> None: import dask.array as da expected = getattr(self.times_data.dt, method)(parameters) dask_times_arr = da.from_array(self.times_arr, chunks=(5, 5, 50)) dask_times_2d = xr.DataArray( dask_times_arr, coords=self.data.coords, dims=self.data.dims, name="data" ) with raise_if_dask_computes(): actual = getattr(dask_times_2d.dt, method)(parameters) assert isinstance(actual.data, da.Array) assert_chunks_equal(actual, dask_times_2d) assert_equal(actual.compute(), expected.compute()) _NT = 100 @pytest.fixture(params=_CFTIME_CALENDARS) def calendar(request): return request.param @pytest.fixture def cftime_date_type(calendar): if calendar == "standard": calendar = "proleptic_gregorian" return _all_cftime_date_types()[calendar] @pytest.fixture def times(calendar): import cftime return cftime.num2date( np.arange(_NT), units="hours since 2000-01-01", calendar=calendar, only_use_cftime_datetimes=True, ) @pytest.fixture def data(times): data = np.random.rand(10, 10, _NT) lons = np.linspace(0, 11, 10) lats = np.linspace(0, 20, 10) return xr.DataArray( data, coords=[lons, lats, times], dims=["lon", "lat", "time"], name="data" ) @pytest.fixture def times_3d(times): lons = np.linspace(0, 11, 10) lats = np.linspace(0, 20, 10) times_arr = np.random.choice(times, size=(10, 10, _NT)) return xr.DataArray( times_arr, coords=[lons, lats, times], dims=["lon", "lat", "time"], name="data" ) @requires_cftime @pytest.mark.parametrize( "field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"] ) def test_field_access(data, field) -> None: result = getattr(data.time.dt, field) expected = xr.DataArray( getattr(xr.coding.cftimeindex.CFTimeIndex(data.time.values), field), name=field, coords=data.time.coords, dims=data.time.dims, ) assert_equal(result, expected) @requires_cftime def test_calendar_cftime(data) -> None: expected = data.time.values[0].calendar assert data.time.dt.calendar == expected def test_calendar_datetime64_2d() -> None: data = xr.DataArray(np.zeros((4, 5), dtype="datetime64[ns]"), dims=("x", "y")) assert data.dt.calendar == "proleptic_gregorian" @requires_dask def test_calendar_datetime64_3d_dask() -> None: import dask.array as da data = xr.DataArray( da.zeros((4, 5, 6), dtype="datetime64[ns]"), dims=("x", "y", "z") ) with raise_if_dask_computes(): assert data.dt.calendar == "proleptic_gregorian" @requires_dask @requires_cftime def test_calendar_dask_cftime() -> None: from cftime import num2date # 3D lazy dask data = xr.DataArray( num2date( np.random.randint(1, 1000000, size=(4, 5, 6)), "hours since 1970-01-01T00:00", calendar="noleap", ), dims=("x", "y", "z"), ).chunk() with raise_if_dask_computes(max_computes=2): assert data.dt.calendar == "noleap" @requires_cftime def test_isocalendar_cftime(data) -> None: with pytest.raises( AttributeError, match=r"'CFTimeIndex' object has no attribute 'isocalendar'" ): data.time.dt.isocalendar() @requires_cftime def test_date_cftime(data) -> None: with pytest.raises( AttributeError, match=r"'CFTimeIndex' object has no attribute `date`. Consider using the floor method instead, for instance: `.time.dt.floor\('D'\)`.", ): data.time.dt.date() @requires_cftime @pytest.mark.filterwarnings("ignore::RuntimeWarning") def test_cftime_strftime_access(data) -> None: """compare cftime formatting against datetime formatting""" date_format = "%Y%m%d%H" result = data.time.dt.strftime(date_format) datetime_array = xr.DataArray( xr.coding.cftimeindex.CFTimeIndex(data.time.values).to_datetimeindex( time_unit="ns" ), name="stftime", coords=data.time.coords, dims=data.time.dims, ) expected = datetime_array.dt.strftime(date_format) assert_equal(result, expected) @requires_cftime @requires_dask @pytest.mark.parametrize( "field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"] ) def test_dask_field_access_1d(data, field) -> None: import dask.array as da expected = xr.DataArray( getattr(xr.coding.cftimeindex.CFTimeIndex(data.time.values), field), name=field, dims=["time"], ) times = xr.DataArray(data.time.values, dims=["time"]).chunk({"time": 50}) result = getattr(times.dt, field) assert isinstance(result.data, da.Array) assert result.chunks == times.chunks assert_equal(result.compute(), expected) @requires_cftime @requires_dask @pytest.mark.parametrize( "field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"] ) def test_dask_field_access(times_3d, data, field) -> None: import dask.array as da expected = xr.DataArray( getattr( xr.coding.cftimeindex.CFTimeIndex(times_3d.values.ravel()), field ).reshape(times_3d.shape), name=field, coords=times_3d.coords, dims=times_3d.dims, ) times_3d = times_3d.chunk({"lon": 5, "lat": 5, "time": 50}) result = getattr(times_3d.dt, field) assert isinstance(result.data, da.Array) assert result.chunks == times_3d.chunks assert_equal(result.compute(), expected) @requires_cftime def test_seasons(cftime_date_type) -> None: dates = xr.DataArray( np.array([cftime_date_type(2000, month, 15) for month in range(1, 13)]) ) seasons = xr.DataArray( [ "DJF", "DJF", "MAM", "MAM", "MAM", "JJA", "JJA", "JJA", "SON", "SON", "SON", "DJF", ] ) assert_array_equal(seasons.values, dates.dt.season.values) @pytest.fixture def cftime_rounding_dataarray(cftime_date_type): return xr.DataArray( [ [cftime_date_type(1, 1, 1, 1), cftime_date_type(1, 1, 1, 15)], [cftime_date_type(1, 1, 1, 23), cftime_date_type(1, 1, 2, 1)], ] ) @requires_cftime @requires_dask @pytest.mark.parametrize("use_dask", [False, True]) def test_cftime_floor_accessor( cftime_rounding_dataarray, cftime_date_type, use_dask ) -> None: import dask.array as da freq = "D" expected = xr.DataArray( [ [cftime_date_type(1, 1, 1, 0), cftime_date_type(1, 1, 1, 0)], [cftime_date_type(1, 1, 1, 0), cftime_date_type(1, 1, 2, 0)], ], name="floor", ) if use_dask: chunks = {"dim_0": 1} # Currently a compute is done to inspect a single value of the array # if it is of object dtype to check if it is a cftime.datetime (if not # we raise an error when using the dt accessor). with raise_if_dask_computes(max_computes=1): result = cftime_rounding_dataarray.chunk(chunks).dt.floor(freq) expected = expected.chunk(chunks) assert isinstance(result.data, da.Array) assert result.chunks == expected.chunks else: result = cftime_rounding_dataarray.dt.floor(freq) assert_identical(result, expected) @requires_cftime @requires_dask @pytest.mark.parametrize("use_dask", [False, True]) def test_cftime_ceil_accessor( cftime_rounding_dataarray, cftime_date_type, use_dask ) -> None: import dask.array as da freq = "D" expected = xr.DataArray( [ [cftime_date_type(1, 1, 2, 0), cftime_date_type(1, 1, 2, 0)], [cftime_date_type(1, 1, 2, 0), cftime_date_type(1, 1, 3, 0)], ], name="ceil", ) if use_dask: chunks = {"dim_0": 1} # Currently a compute is done to inspect a single value of the array # if it is of object dtype to check if it is a cftime.datetime (if not # we raise an error when using the dt accessor). with raise_if_dask_computes(max_computes=1): result = cftime_rounding_dataarray.chunk(chunks).dt.ceil(freq) expected = expected.chunk(chunks) assert isinstance(result.data, da.Array) assert result.chunks == expected.chunks else: result = cftime_rounding_dataarray.dt.ceil(freq) assert_identical(result, expected) @requires_cftime @requires_dask @pytest.mark.parametrize("use_dask", [False, True]) def test_cftime_round_accessor( cftime_rounding_dataarray, cftime_date_type, use_dask ) -> None: import dask.array as da freq = "D" expected = xr.DataArray( [ [cftime_date_type(1, 1, 1, 0), cftime_date_type(1, 1, 2, 0)], [cftime_date_type(1, 1, 2, 0), cftime_date_type(1, 1, 2, 0)], ], name="round", ) if use_dask: chunks = {"dim_0": 1} # Currently a compute is done to inspect a single value of the array # if it is of object dtype to check if it is a cftime.datetime (if not # we raise an error when using the dt accessor). with raise_if_dask_computes(max_computes=1): result = cftime_rounding_dataarray.chunk(chunks).dt.round(freq) expected = expected.chunk(chunks) assert isinstance(result.data, da.Array) assert result.chunks == expected.chunks else: result = cftime_rounding_dataarray.dt.round(freq) assert_identical(result, expected) @pytest.mark.parametrize( "use_cftime", [False, pytest.param(True, marks=requires_cftime)], ids=lambda x: f"use_cftime={x}", ) @pytest.mark.parametrize( "use_dask", [False, pytest.param(True, marks=requires_dask)], ids=lambda x: f"use_dask={x}", ) def test_decimal_year(use_cftime, use_dask) -> None: year = 2000 periods = 10 freq = "h" shape = (2, 5) dims = ["x", "y"] hours_in_year = 24 * 366 times = xr.date_range(f"{year}", periods=periods, freq=freq, use_cftime=use_cftime) da = xr.DataArray(times.values.reshape(shape), dims=dims) if use_dask: da = da.chunk({"y": 2}) # Computing the decimal year for a cftime datetime array requires a # number of small computes (6): # - 4x one compute per .dt accessor call (requires inspecting one # object-dtype array element to see if it is time-like) # - 2x one compute per calendar inference (requires inspecting one # array element to read off the calendar) max_computes = 6 * use_cftime with raise_if_dask_computes(max_computes=max_computes): result = da.dt.decimal_year else: result = da.dt.decimal_year expected = xr.DataArray( year + np.arange(periods).reshape(shape) / hours_in_year, dims=dims ) xr.testing.assert_equal(result, expected) xarray-2025.09.0/xarray/tests/test_accessor_str.py000066400000000000000000003557451505620616400221420ustar00rootroot00000000000000# Tests for the `str` accessor are derived from the original # pandas string accessor tests. # For reference, here is a copy of the pandas copyright notice: # (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team # All rights reserved. # Copyright (c) 2008-2011 AQR Capital Management, LLC # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the copyright holder nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import annotations import re from collections.abc import Callable import numpy as np import pytest import xarray as xr from xarray.tests import assert_equal, assert_identical, requires_dask @pytest.fixture( params=[pytest.param(np.str_, id="str"), pytest.param(np.bytes_, id="bytes")] ) def dtype(request): return request.param @requires_dask def test_dask() -> None: import dask.array as da arr = da.from_array(["a", "b", "c"], chunks=-1) xarr = xr.DataArray(arr) result = xarr.str.len().compute() expected = xr.DataArray([1, 1, 1]) assert result.dtype == expected.dtype assert_equal(result, expected) def test_count(dtype) -> None: values = xr.DataArray(["foo", "foofoo", "foooofooofommmfoo"]).astype(dtype) pat_str = dtype(r"f[o]+") pat_re = re.compile(pat_str) result_str = values.str.count(pat_str) result_re = values.str.count(pat_re) expected = xr.DataArray([1, 2, 4]) assert result_str.dtype == expected.dtype assert result_re.dtype == expected.dtype assert_equal(result_str, expected) assert_equal(result_re, expected) def test_count_broadcast(dtype) -> None: values = xr.DataArray(["foo", "foofoo", "foooofooofommmfoo"]).astype(dtype) pat_str = np.array([r"f[o]+", r"o", r"m"]).astype(dtype) pat_re = np.array([re.compile(x) for x in pat_str]) result_str = values.str.count(pat_str) result_re = values.str.count(pat_re) expected = xr.DataArray([1, 4, 3]) assert result_str.dtype == expected.dtype assert result_re.dtype == expected.dtype assert_equal(result_str, expected) assert_equal(result_re, expected) def test_contains(dtype) -> None: values = xr.DataArray(["Foo", "xYz", "fOOomMm__fOo", "MMM_"]).astype(dtype) # case insensitive using regex pat = values.dtype.type("FOO|mmm") result = values.str.contains(pat, case=False) expected = xr.DataArray([True, False, True, True]) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.contains(re.compile(pat, flags=re.IGNORECASE)) assert result.dtype == expected.dtype assert_equal(result, expected) # case sensitive using regex pat = values.dtype.type("Foo|mMm") result = values.str.contains(pat) expected = xr.DataArray([True, False, True, False]) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.contains(re.compile(pat)) assert result.dtype == expected.dtype assert_equal(result, expected) # case insensitive without regex result = values.str.contains("foo", regex=False, case=False) expected = xr.DataArray([True, False, True, False]) assert result.dtype == expected.dtype assert_equal(result, expected) # case sensitive without regex result = values.str.contains("fO", regex=False, case=True) expected = xr.DataArray([False, False, True, False]) assert result.dtype == expected.dtype assert_equal(result, expected) # regex regex=False pat_re = re.compile("(/w+)") with pytest.raises( ValueError, match="Must use regular expression matching for regular expression object.", ): values.str.contains(pat_re, regex=False) def test_contains_broadcast(dtype) -> None: values = xr.DataArray(["Foo", "xYz", "fOOomMm__fOo", "MMM_"], dims="X").astype( dtype ) pat_str = xr.DataArray(["FOO|mmm", "Foo", "MMM"], dims="Y").astype(dtype) pat_re = xr.DataArray([re.compile(x) for x in pat_str.data], dims="Y") # case insensitive using regex result = values.str.contains(pat_str, case=False) expected = xr.DataArray( [ [True, True, False], [False, False, False], [True, True, True], [True, False, True], ], dims=["X", "Y"], ) assert result.dtype == expected.dtype assert_equal(result, expected) # case sensitive using regex result = values.str.contains(pat_str) expected = xr.DataArray( [ [False, True, False], [False, False, False], [False, False, False], [False, False, True], ], dims=["X", "Y"], ) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.contains(pat_re) assert result.dtype == expected.dtype assert_equal(result, expected) # case insensitive without regex result = values.str.contains(pat_str, regex=False, case=False) expected = xr.DataArray( [ [False, True, False], [False, False, False], [False, True, True], [False, False, True], ], dims=["X", "Y"], ) assert result.dtype == expected.dtype assert_equal(result, expected) # case insensitive with regex result = values.str.contains(pat_str, regex=False, case=True) expected = xr.DataArray( [ [False, True, False], [False, False, False], [False, False, False], [False, False, True], ], dims=["X", "Y"], ) assert result.dtype == expected.dtype assert_equal(result, expected) def test_starts_ends_with(dtype) -> None: values = xr.DataArray(["om", "foo_nom", "nom", "bar_foo", "foo"]).astype(dtype) result = values.str.startswith("foo") expected = xr.DataArray([False, True, False, False, True]) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.endswith("foo") expected = xr.DataArray([False, False, False, True, True]) assert result.dtype == expected.dtype assert_equal(result, expected) def test_starts_ends_with_broadcast(dtype) -> None: values = xr.DataArray( ["om", "foo_nom", "nom", "bar_foo", "foo_bar"], dims="X" ).astype(dtype) pat = xr.DataArray(["foo", "bar"], dims="Y").astype(dtype) result = values.str.startswith(pat) expected = xr.DataArray( [[False, False], [True, False], [False, False], [False, True], [True, False]], dims=["X", "Y"], ) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.endswith(pat) expected = xr.DataArray( [[False, False], [False, False], [False, False], [True, False], [False, True]], dims=["X", "Y"], ) assert result.dtype == expected.dtype assert_equal(result, expected) def test_case_bytes() -> None: value = xr.DataArray(["SOme wOrd"]).astype(np.bytes_) exp_capitalized = xr.DataArray(["Some word"]).astype(np.bytes_) exp_lowered = xr.DataArray(["some word"]).astype(np.bytes_) exp_swapped = xr.DataArray(["soME WoRD"]).astype(np.bytes_) exp_titled = xr.DataArray(["Some Word"]).astype(np.bytes_) exp_uppered = xr.DataArray(["SOME WORD"]).astype(np.bytes_) res_capitalized = value.str.capitalize() res_lowered = value.str.lower() res_swapped = value.str.swapcase() res_titled = value.str.title() res_uppered = value.str.upper() assert res_capitalized.dtype == exp_capitalized.dtype assert res_lowered.dtype == exp_lowered.dtype assert res_swapped.dtype == exp_swapped.dtype assert res_titled.dtype == exp_titled.dtype assert res_uppered.dtype == exp_uppered.dtype assert_equal(res_capitalized, exp_capitalized) assert_equal(res_lowered, exp_lowered) assert_equal(res_swapped, exp_swapped) assert_equal(res_titled, exp_titled) assert_equal(res_uppered, exp_uppered) def test_case_str() -> None: # This string includes some unicode characters # that are common case management corner cases value = xr.DataArray(["SOme wOrd Η„ ß αΎ› ΣΣ ffi⁡Å Γ‡ β… "]).astype(np.str_) exp_capitalized = xr.DataArray(["Some word Η† ß αΎ“ σς ffi⁡Γ₯ Γ§ β…°"]).astype(np.str_) exp_lowered = xr.DataArray(["some word Η† ß αΎ“ σς ffi⁡Γ₯ Γ§ β…°"]).astype(np.str_) exp_swapped = xr.DataArray(["soME WoRD Η† SS αΎ› σς FFI⁡Γ₯ Γ§ β…°"]).astype(np.str_) exp_titled = xr.DataArray(["Some Word Η… Ss αΎ› Σς Ffi⁡Å Γ‡ β… "]).astype(np.str_) exp_uppered = xr.DataArray(["SOME WORD Η„ SS αΌ«Ξ™ ΣΣ FFI⁡Å Γ‡ β… "]).astype(np.str_) exp_casefolded = xr.DataArray(["some word Η† ss αΌ£ΞΉ σσ ffi⁡Γ₯ Γ§ β…°"]).astype(np.str_) exp_norm_nfc = xr.DataArray(["SOme wOrd Η„ ß αΎ› ΣΣ ffi⁡Å Γ‡ β… "]).astype(np.str_) exp_norm_nfkc = xr.DataArray(["SOme wOrd DΕ½ ß αΎ› ΣΣ ffi5Γ… Γ‡ I"]).astype(np.str_) exp_norm_nfd = xr.DataArray(["SOme wOrd Η„ ß Ξ—Μ”Μ€Ν… ΣΣ ffi⁡Å CΜ§ β… "]).astype(np.str_) exp_norm_nfkd = xr.DataArray(["SOme wOrd DŽ ß Ξ—Μ”Μ€Ν… ΣΣ ffi5Å CΜ§ I"]).astype(np.str_) res_capitalized = value.str.capitalize() res_casefolded = value.str.casefold() res_lowered = value.str.lower() res_swapped = value.str.swapcase() res_titled = value.str.title() res_uppered = value.str.upper() res_norm_nfc = value.str.normalize("NFC") res_norm_nfd = value.str.normalize("NFD") res_norm_nfkc = value.str.normalize("NFKC") res_norm_nfkd = value.str.normalize("NFKD") assert res_capitalized.dtype == exp_capitalized.dtype assert res_casefolded.dtype == exp_casefolded.dtype assert res_lowered.dtype == exp_lowered.dtype assert res_swapped.dtype == exp_swapped.dtype assert res_titled.dtype == exp_titled.dtype assert res_uppered.dtype == exp_uppered.dtype assert res_norm_nfc.dtype == exp_norm_nfc.dtype assert res_norm_nfd.dtype == exp_norm_nfd.dtype assert res_norm_nfkc.dtype == exp_norm_nfkc.dtype assert res_norm_nfkd.dtype == exp_norm_nfkd.dtype assert_equal(res_capitalized, exp_capitalized) assert_equal(res_casefolded, exp_casefolded) assert_equal(res_lowered, exp_lowered) assert_equal(res_swapped, exp_swapped) assert_equal(res_titled, exp_titled) assert_equal(res_uppered, exp_uppered) assert_equal(res_norm_nfc, exp_norm_nfc) assert_equal(res_norm_nfd, exp_norm_nfd) assert_equal(res_norm_nfkc, exp_norm_nfkc) assert_equal(res_norm_nfkd, exp_norm_nfkd) def test_replace(dtype) -> None: values = xr.DataArray(["fooBAD__barBAD"], dims=["x"]).astype(dtype) result = values.str.replace("BAD[_]*", "") expected = xr.DataArray(["foobar"], dims=["x"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.replace("BAD[_]*", "", n=1) expected = xr.DataArray(["foobarBAD"], dims=["x"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) pat = xr.DataArray(["BAD[_]*", "AD[_]*"], dims=["y"]).astype(dtype) result = values.str.replace(pat, "") expected = xr.DataArray([["foobar", "fooBbarB"]], dims=["x", "y"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) repl = xr.DataArray(["", "spam"], dims=["y"]).astype(dtype) result = values.str.replace(pat, repl, n=1) expected = xr.DataArray([["foobarBAD", "fooBspambarBAD"]], dims=["x", "y"]).astype( dtype ) assert result.dtype == expected.dtype assert_equal(result, expected) values = xr.DataArray( ["A", "B", "C", "Aaba", "Baca", "", "CABA", "dog", "cat"] ).astype(dtype) expected = xr.DataArray( ["YYY", "B", "C", "YYYaba", "Baca", "", "CYYYBYYY", "dog", "cat"] ).astype(dtype) result = values.str.replace("A", "YYY") assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.replace("A", "YYY", regex=False) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.replace("A", "YYY", case=False) expected = xr.DataArray( ["YYY", "B", "C", "YYYYYYbYYY", "BYYYcYYY", "", "CYYYBYYY", "dog", "cYYYt"] ).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.replace("^.a|dog", "XX-XX ", case=False) expected = xr.DataArray( ["A", "B", "C", "XX-XX ba", "XX-XX ca", "", "XX-XX BA", "XX-XX ", "XX-XX t"] ).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_replace_callable() -> None: values = xr.DataArray(["fooBAD__barBAD"]) # test with callable repl = lambda m: m.group(0).swapcase() result = values.str.replace("[a-z][A-Z]{2}", repl, n=2) exp = xr.DataArray(["foObaD__baRbaD"]) assert result.dtype == exp.dtype assert_equal(result, exp) # test regex named groups values = xr.DataArray(["Foo Bar Baz"]) pat = r"(?P\w+) (?P\w+) (?P\w+)" repl = lambda m: m.group("middle").swapcase() result = values.str.replace(pat, repl) exp = xr.DataArray(["bAR"]) assert result.dtype == exp.dtype assert_equal(result, exp) # test broadcast values = xr.DataArray(["Foo Bar Baz"], dims=["x"]) pat = r"(?P\w+) (?P\w+) (?P\w+)" repl2 = xr.DataArray( [ lambda m: m.group("first").swapcase(), lambda m: m.group("middle").swapcase(), lambda m: m.group("last").swapcase(), ], dims=["Y"], ) result = values.str.replace(pat, repl2) exp = xr.DataArray([["fOO", "bAR", "bAZ"]], dims=["x", "Y"]) assert result.dtype == exp.dtype assert_equal(result, exp) def test_replace_unicode() -> None: # flags + unicode values = xr.DataArray([b"abcd,\xc3\xa0".decode("utf-8")]) expected = xr.DataArray([b"abcd, \xc3\xa0".decode("utf-8")]) pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE) result = values.str.replace(pat, ", ") assert result.dtype == expected.dtype assert_equal(result, expected) # broadcast version values = xr.DataArray([b"abcd,\xc3\xa0".decode("utf-8")], dims=["X"]) expected = xr.DataArray( [[b"abcd, \xc3\xa0".decode("utf-8"), b"BAcd,\xc3\xa0".decode("utf-8")]], dims=["X", "Y"], ) pat2 = xr.DataArray( [re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE), r"ab"], dims=["Y"] ) repl = xr.DataArray([", ", "BA"], dims=["Y"]) result = values.str.replace(pat2, repl) assert result.dtype == expected.dtype assert_equal(result, expected) def test_replace_compiled_regex(dtype) -> None: values = xr.DataArray(["fooBAD__barBAD"], dims=["x"]).astype(dtype) # test with compiled regex pat = re.compile(dtype("BAD[_]*")) result = values.str.replace(pat, "") expected = xr.DataArray(["foobar"], dims=["x"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.replace(pat, "", n=1) expected = xr.DataArray(["foobarBAD"], dims=["x"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) # broadcast pat2 = xr.DataArray( [re.compile(dtype("BAD[_]*")), re.compile(dtype("AD[_]*"))], dims=["y"] ) result = values.str.replace(pat2, "") expected = xr.DataArray([["foobar", "fooBbarB"]], dims=["x", "y"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) repl = xr.DataArray(["", "spam"], dims=["y"]).astype(dtype) result = values.str.replace(pat2, repl, n=1) expected = xr.DataArray([["foobarBAD", "fooBspambarBAD"]], dims=["x", "y"]).astype( dtype ) assert result.dtype == expected.dtype assert_equal(result, expected) # case and flags provided to str.replace will have no effect # and will produce warnings values = xr.DataArray(["fooBAD__barBAD__bad"]).astype(dtype) pat3 = re.compile(dtype("BAD[_]*")) with pytest.raises( ValueError, match="Flags cannot be set when pat is a compiled regex." ): result = values.str.replace(pat3, "", flags=re.IGNORECASE) with pytest.raises( ValueError, match="Case cannot be set when pat is a compiled regex." ): result = values.str.replace(pat3, "", case=False) with pytest.raises( ValueError, match="Case cannot be set when pat is a compiled regex." ): result = values.str.replace(pat3, "", case=True) # test with callable values = xr.DataArray(["fooBAD__barBAD"]).astype(dtype) repl2 = lambda m: m.group(0).swapcase() pat4 = re.compile(dtype("[a-z][A-Z]{2}")) result = values.str.replace(pat4, repl2, n=2) expected = xr.DataArray(["foObaD__baRbaD"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_replace_literal(dtype) -> None: # GH16808 literal replace (regex=False vs regex=True) values = xr.DataArray(["f.o", "foo"], dims=["X"]).astype(dtype) expected = xr.DataArray(["bao", "bao"], dims=["X"]).astype(dtype) result = values.str.replace("f.", "ba") assert result.dtype == expected.dtype assert_equal(result, expected) expected = xr.DataArray(["bao", "foo"], dims=["X"]).astype(dtype) result = values.str.replace("f.", "ba", regex=False) assert result.dtype == expected.dtype assert_equal(result, expected) # Broadcast pat = xr.DataArray(["f.", ".o"], dims=["yy"]).astype(dtype) expected = xr.DataArray([["bao", "fba"], ["bao", "bao"]], dims=["X", "yy"]).astype( dtype ) result = values.str.replace(pat, "ba") assert result.dtype == expected.dtype assert_equal(result, expected) expected = xr.DataArray([["bao", "fba"], ["foo", "foo"]], dims=["X", "yy"]).astype( dtype ) result = values.str.replace(pat, "ba", regex=False) assert result.dtype == expected.dtype assert_equal(result, expected) # Cannot do a literal replace if given a callable repl or compiled # pattern callable_repl = lambda m: m.group(0).swapcase() compiled_pat = re.compile("[a-z][A-Z]{2}") msg = "Cannot use a callable replacement when regex=False" with pytest.raises(ValueError, match=msg): values.str.replace("abc", callable_repl, regex=False) msg = "Cannot use a compiled regex as replacement pattern with regex=False" with pytest.raises(ValueError, match=msg): values.str.replace(compiled_pat, "", regex=False) def test_extract_extractall_findall_empty_raises(dtype) -> None: pat_str = dtype(r".*") pat_re = re.compile(pat_str) value = xr.DataArray([["a"]], dims=["X", "Y"]).astype(dtype) with pytest.raises(ValueError, match="No capture groups found in pattern."): value.str.extract(pat=pat_str, dim="ZZ") with pytest.raises(ValueError, match="No capture groups found in pattern."): value.str.extract(pat=pat_re, dim="ZZ") with pytest.raises(ValueError, match="No capture groups found in pattern."): value.str.extractall(pat=pat_str, group_dim="XX", match_dim="YY") with pytest.raises(ValueError, match="No capture groups found in pattern."): value.str.extractall(pat=pat_re, group_dim="XX", match_dim="YY") with pytest.raises(ValueError, match="No capture groups found in pattern."): value.str.findall(pat=pat_str) with pytest.raises(ValueError, match="No capture groups found in pattern."): value.str.findall(pat=pat_re) def test_extract_multi_None_raises(dtype) -> None: pat_str = r"(\w+)_(\d+)" pat_re = re.compile(pat_str) value = xr.DataArray([["a_b"]], dims=["X", "Y"]).astype(dtype) with pytest.raises( ValueError, match="Dimension must be specified if more than one capture group is given.", ): value.str.extract(pat=pat_str, dim=None) with pytest.raises( ValueError, match="Dimension must be specified if more than one capture group is given.", ): value.str.extract(pat=pat_re, dim=None) def test_extract_extractall_findall_case_re_raises(dtype) -> None: pat_str = r".*" pat_re = re.compile(pat_str) value = xr.DataArray([["a"]], dims=["X", "Y"]).astype(dtype) with pytest.raises( ValueError, match="Case cannot be set when pat is a compiled regex." ): value.str.extract(pat=pat_re, case=True, dim="ZZ") with pytest.raises( ValueError, match="Case cannot be set when pat is a compiled regex." ): value.str.extract(pat=pat_re, case=False, dim="ZZ") with pytest.raises( ValueError, match="Case cannot be set when pat is a compiled regex." ): value.str.extractall(pat=pat_re, case=True, group_dim="XX", match_dim="YY") with pytest.raises( ValueError, match="Case cannot be set when pat is a compiled regex." ): value.str.extractall(pat=pat_re, case=False, group_dim="XX", match_dim="YY") with pytest.raises( ValueError, match="Case cannot be set when pat is a compiled regex." ): value.str.findall(pat=pat_re, case=True) with pytest.raises( ValueError, match="Case cannot be set when pat is a compiled regex." ): value.str.findall(pat=pat_re, case=False) def test_extract_extractall_name_collision_raises(dtype) -> None: pat_str = r"(\w+)" pat_re = re.compile(pat_str) value = xr.DataArray([["a"]], dims=["X", "Y"]).astype(dtype) with pytest.raises(KeyError, match="Dimension 'X' already present in DataArray."): value.str.extract(pat=pat_str, dim="X") with pytest.raises(KeyError, match="Dimension 'X' already present in DataArray."): value.str.extract(pat=pat_re, dim="X") with pytest.raises( KeyError, match="Group dimension 'X' already present in DataArray." ): value.str.extractall(pat=pat_str, group_dim="X", match_dim="ZZ") with pytest.raises( KeyError, match="Group dimension 'X' already present in DataArray." ): value.str.extractall(pat=pat_re, group_dim="X", match_dim="YY") with pytest.raises( KeyError, match="Match dimension 'Y' already present in DataArray." ): value.str.extractall(pat=pat_str, group_dim="XX", match_dim="Y") with pytest.raises( KeyError, match="Match dimension 'Y' already present in DataArray." ): value.str.extractall(pat=pat_re, group_dim="XX", match_dim="Y") with pytest.raises( KeyError, match="Group dimension 'ZZ' is the same as match dimension 'ZZ'." ): value.str.extractall(pat=pat_str, group_dim="ZZ", match_dim="ZZ") with pytest.raises( KeyError, match="Group dimension 'ZZ' is the same as match dimension 'ZZ'." ): value.str.extractall(pat=pat_re, group_dim="ZZ", match_dim="ZZ") def test_extract_single_case(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) targ_none = xr.DataArray( [["a", "bab", "abc"], ["abcd", "", "abcdef"]], dims=["X", "Y"] ).astype(dtype) targ_dim = xr.DataArray( [[["a"], ["bab"], ["abc"]], [["abcd"], [""], ["abcdef"]]], dims=["X", "Y", "XX"] ).astype(dtype) res_str_none = value.str.extract(pat=pat_str, dim=None) res_str_dim = value.str.extract(pat=pat_str, dim="XX") res_str_none_case = value.str.extract(pat=pat_str, dim=None, case=True) res_str_dim_case = value.str.extract(pat=pat_str, dim="XX", case=True) res_re_none = value.str.extract(pat=pat_compiled, dim=None) res_re_dim = value.str.extract(pat=pat_compiled, dim="XX") assert res_str_none.dtype == targ_none.dtype assert res_str_dim.dtype == targ_dim.dtype assert res_str_none_case.dtype == targ_none.dtype assert res_str_dim_case.dtype == targ_dim.dtype assert res_re_none.dtype == targ_none.dtype assert res_re_dim.dtype == targ_dim.dtype assert_equal(res_str_none, targ_none) assert_equal(res_str_dim, targ_dim) assert_equal(res_str_none_case, targ_none) assert_equal(res_str_dim_case, targ_dim) assert_equal(res_re_none, targ_none) assert_equal(res_re_dim, targ_dim) def test_extract_single_nocase(dtype) -> None: pat_str = r"(\w+)?_Xy_\d*" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re, flags=re.IGNORECASE) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "_Xy_1", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) targ_none = xr.DataArray( [["a", "ab", "abc"], ["abcd", "", "abcdef"]], dims=["X", "Y"] ).astype(dtype) targ_dim = xr.DataArray( [[["a"], ["ab"], ["abc"]], [["abcd"], [""], ["abcdef"]]], dims=["X", "Y", "XX"] ).astype(dtype) res_str_none = value.str.extract(pat=pat_str, dim=None, case=False) res_str_dim = value.str.extract(pat=pat_str, dim="XX", case=False) res_re_none = value.str.extract(pat=pat_compiled, dim=None) res_re_dim = value.str.extract(pat=pat_compiled, dim="XX") assert res_re_dim.dtype == targ_none.dtype assert res_str_dim.dtype == targ_dim.dtype assert res_re_none.dtype == targ_none.dtype assert res_re_dim.dtype == targ_dim.dtype assert_equal(res_str_none, targ_none) assert_equal(res_str_dim, targ_dim) assert_equal(res_re_none, targ_none) assert_equal(res_re_dim, targ_dim) def test_extract_multi_case(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [["a", "0"], ["bab", "110"], ["abc", "01"]], [["abcd", ""], ["", ""], ["abcdef", "101"]], ], dims=["X", "Y", "XX"], ).astype(dtype) res_str = value.str.extract(pat=pat_str, dim="XX") res_re = value.str.extract(pat=pat_compiled, dim="XX") res_str_case = value.str.extract(pat=pat_str, dim="XX", case=True) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_extract_multi_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re, flags=re.IGNORECASE) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [["a", "0"], ["ab", "10"], ["abc", "01"]], [["abcd", ""], ["", ""], ["abcdef", "101"]], ], dims=["X", "Y", "XX"], ).astype(dtype) res_str = value.str.extract(pat=pat_str, dim="XX", case=False) res_re = value.str.extract(pat=pat_compiled, dim="XX") assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_extract_broadcast(dtype) -> None: value = xr.DataArray( ["a_Xy_0", "ab_xY_10", "abc_Xy_01"], dims=["X"], ).astype(dtype) pat_str = xr.DataArray( [r"(\w+)_Xy_(\d*)", r"(\w+)_xY_(\d*)"], dims=["Y"], ).astype(dtype) pat_compiled = value.str._re_compile(pat=pat_str) expected_list = [ [["a", "0"], ["", ""]], [["", ""], ["ab", "10"]], [["abc", "01"], ["", ""]], ] expected = xr.DataArray(expected_list, dims=["X", "Y", "Zz"]).astype(dtype) res_str = value.str.extract(pat=pat_str, dim="Zz") res_re = value.str.extract(pat=pat_compiled, dim="Zz") assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_extractall_single_single_case(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [[[["a"]], [[""]], [["abc"]]], [[["abcd"]], [[""]], [["abcdef"]]]], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall(pat=pat_str, group_dim="XX", match_dim="YY") res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") res_str_case = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=True ) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_extractall_single_single_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re, flags=re.IGNORECASE) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [[[["a"]], [["ab"]], [["abc"]]], [[["abcd"]], [[""]], [["abcdef"]]]], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=False ) res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_extractall_single_multi_case(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [[["a"], [""], [""]], [["bab"], ["baab"], [""]], [["abc"], ["cbc"], [""]]], [ [["abcd"], ["dcd"], ["dccd"]], [[""], [""], [""]], [["abcdef"], ["fef"], [""]], ], ], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall(pat=pat_str, group_dim="XX", match_dim="YY") res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") res_str_case = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=True ) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_extractall_single_multi_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re, flags=re.IGNORECASE) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [ [["a"], [""], [""]], [["ab"], ["bab"], ["baab"]], [["abc"], ["cbc"], [""]], ], [ [["abcd"], ["dcd"], ["dccd"]], [[""], [""], [""]], [["abcdef"], ["fef"], [""]], ], ], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=False ) res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_extractall_multi_single_case(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [[["a", "0"]], [["", ""]], [["abc", "01"]]], [[["abcd", ""]], [["", ""]], [["abcdef", "101"]]], ], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall(pat=pat_str, group_dim="XX", match_dim="YY") res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") res_str_case = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=True ) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_extractall_multi_single_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re, flags=re.IGNORECASE) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [[["a", "0"]], [["ab", "10"]], [["abc", "01"]]], [[["abcd", ""]], [["", ""]], [["abcdef", "101"]]], ], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=False ) res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_extractall_multi_multi_case(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [ [["a", "0"], ["", ""], ["", ""]], [["bab", "110"], ["baab", "1100"], ["", ""]], [["abc", "01"], ["cbc", "2210"], ["", ""]], ], [ [["abcd", ""], ["dcd", "33210"], ["dccd", "332210"]], [["", ""], ["", ""], ["", ""]], [["abcdef", "101"], ["fef", "5543210"], ["", ""]], ], ], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall(pat=pat_str, group_dim="XX", match_dim="YY") res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") res_str_case = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=True ) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_extractall_multi_multi_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re: str | bytes = ( pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8") ) pat_compiled = re.compile(pat_re, flags=re.IGNORECASE) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected = xr.DataArray( [ [ [["a", "0"], ["", ""], ["", ""]], [["ab", "10"], ["bab", "110"], ["baab", "1100"]], [["abc", "01"], ["cbc", "2210"], ["", ""]], ], [ [["abcd", ""], ["dcd", "33210"], ["dccd", "332210"]], [["", ""], ["", ""], ["", ""]], [["abcdef", "101"], ["fef", "5543210"], ["", ""]], ], ], dims=["X", "Y", "XX", "YY"], ).astype(dtype) res_str = value.str.extractall( pat=pat_str, group_dim="XX", match_dim="YY", case=False ) res_re = value.str.extractall(pat=pat_compiled, group_dim="XX", match_dim="YY") assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_extractall_broadcast(dtype) -> None: value = xr.DataArray( ["a_Xy_0", "ab_xY_10", "abc_Xy_01"], dims=["X"], ).astype(dtype) pat_str = xr.DataArray( [r"(\w+)_Xy_(\d*)", r"(\w+)_xY_(\d*)"], dims=["Y"], ).astype(dtype) pat_re = value.str._re_compile(pat=pat_str) expected_list = [ [[["a", "0"]], [["", ""]]], [[["", ""]], [["ab", "10"]]], [[["abc", "01"]], [["", ""]]], ] expected = xr.DataArray(expected_list, dims=["X", "Y", "ZX", "ZY"]).astype(dtype) res_str = value.str.extractall(pat=pat_str, group_dim="ZX", match_dim="ZY") res_re = value.str.extractall(pat=pat_re, group_dim="ZX", match_dim="ZY") assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_findall_single_single_case(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = re.compile(dtype(pat_str)) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list]] = [[["a"], [], ["abc"]], [["abcd"], [], ["abcdef"]]] expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected_list] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str) res_re = value.str.findall(pat=pat_re) res_str_case = value.str.findall(pat=pat_str, case=True) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_findall_single_single_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = re.compile(dtype(pat_str), flags=re.IGNORECASE) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list]] = [ [["a"], ["ab"], ["abc"]], [["abcd"], [], ["abcdef"]], ] expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected_list] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str, case=False) res_re = value.str.findall(pat=pat_re) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_findall_single_multi_case(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = re.compile(dtype(pat_str)) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list]] = [ [["a"], ["bab", "baab"], ["abc", "cbc"]], [ ["abcd", "dcd", "dccd"], [], ["abcdef", "fef"], ], ] expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected_list] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str) res_re = value.str.findall(pat=pat_re) res_str_case = value.str.findall(pat=pat_str, case=True) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_findall_single_multi_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = re.compile(dtype(pat_str), flags=re.IGNORECASE) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list]] = [ [ ["a"], ["ab", "bab", "baab"], ["abc", "cbc"], ], [ ["abcd", "dcd", "dccd"], [], ["abcdef", "fef"], ], ] expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected_list] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str, case=False) res_re = value.str.findall(pat=pat_re) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_findall_multi_single_case(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = re.compile(dtype(pat_str)) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list[list]]] = [ [[["a", "0"]], [], [["abc", "01"]]], [[["abcd", ""]], [], [["abcdef", "101"]]], ] expected_dtype = [ [[tuple(dtype(x) for x in y) for y in z] for z in w] for w in expected_list ] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str) res_re = value.str.findall(pat=pat_re) res_str_case = value.str.findall(pat=pat_str, case=True) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_findall_multi_single_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = re.compile(dtype(pat_str), flags=re.IGNORECASE) value = xr.DataArray( [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list[list]]] = [ [[["a", "0"]], [["ab", "10"]], [["abc", "01"]]], [[["abcd", ""]], [], [["abcdef", "101"]]], ] expected_dtype = [ [[tuple(dtype(x) for x in y) for y in z] for z in w] for w in expected_list ] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str, case=False) res_re = value.str.findall(pat=pat_re) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_findall_multi_multi_case(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = re.compile(dtype(pat_str)) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list[list]]] = [ [ [["a", "0"]], [["bab", "110"], ["baab", "1100"]], [["abc", "01"], ["cbc", "2210"]], ], [ [["abcd", ""], ["dcd", "33210"], ["dccd", "332210"]], [], [["abcdef", "101"], ["fef", "5543210"]], ], ] expected_dtype = [ [[tuple(dtype(x) for x in y) for y in z] for z in w] for w in expected_list ] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str) res_re = value.str.findall(pat=pat_re) res_str_case = value.str.findall(pat=pat_str, case=True) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert res_str_case.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) assert_equal(res_str_case, expected) def test_findall_multi_multi_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = re.compile(dtype(pat_str), flags=re.IGNORECASE) value = xr.DataArray( [ ["a_Xy_0", "ab_xY_10-bab_Xy_110-baab_Xy_1100", "abc_Xy_01-cbc_Xy_2210"], [ "abcd_Xy_-dcd_Xy_33210-dccd_Xy_332210", "", "abcdef_Xy_101-fef_Xy_5543210", ], ], dims=["X", "Y"], ).astype(dtype) expected_list: list[list[list[list]]] = [ [ [["a", "0"]], [["ab", "10"], ["bab", "110"], ["baab", "1100"]], [["abc", "01"], ["cbc", "2210"]], ], [ [["abcd", ""], ["dcd", "33210"], ["dccd", "332210"]], [], [["abcdef", "101"], ["fef", "5543210"]], ], ] expected_dtype = [ [[tuple(dtype(x) for x in y) for y in z] for z in w] for w in expected_list ] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str, case=False) res_re = value.str.findall(pat=pat_re) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_findall_broadcast(dtype) -> None: value = xr.DataArray( ["a_Xy_0", "ab_xY_10", "abc_Xy_01"], dims=["X"], ).astype(dtype) pat_str = xr.DataArray( [r"(\w+)_Xy_\d*", r"\w+_Xy_(\d*)"], dims=["Y"], ).astype(dtype) pat_re = value.str._re_compile(pat=pat_str) expected_list: list[list[list]] = [[["a"], ["0"]], [[], []], [["abc"], ["01"]]] expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected_list] expected_np = np.array(expected_dtype, dtype=np.object_) expected = xr.DataArray(expected_np, dims=["X", "Y"]) res_str = value.str.findall(pat=pat_str) res_re = value.str.findall(pat=pat_re) assert res_str.dtype == expected.dtype assert res_re.dtype == expected.dtype assert_equal(res_str, expected) assert_equal(res_re, expected) def test_repeat(dtype) -> None: values = xr.DataArray(["a", "b", "c", "d"]).astype(dtype) result = values.str.repeat(3) result_mul = values.str * 3 expected = xr.DataArray(["aaa", "bbb", "ccc", "ddd"]).astype(dtype) assert result.dtype == expected.dtype assert result_mul.dtype == expected.dtype assert_equal(result_mul, expected) assert_equal(result, expected) def test_repeat_broadcast(dtype) -> None: values = xr.DataArray(["a", "b", "c", "d"], dims=["X"]).astype(dtype) reps = xr.DataArray([3, 4], dims=["Y"]) result = values.str.repeat(reps) result_mul = values.str * reps expected = xr.DataArray( [["aaa", "aaaa"], ["bbb", "bbbb"], ["ccc", "cccc"], ["ddd", "dddd"]], dims=["X", "Y"], ).astype(dtype) assert result.dtype == expected.dtype assert result_mul.dtype == expected.dtype assert_equal(result_mul, expected) assert_equal(result, expected) def test_match(dtype) -> None: values = xr.DataArray(["fooBAD__barBAD", "foo"]).astype(dtype) # New match behavior introduced in 0.13 pat = values.dtype.type(".*(BAD[_]+).*(BAD)") result = values.str.match(pat) expected = xr.DataArray([True, False]) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.match(re.compile(pat)) assert result.dtype == expected.dtype assert_equal(result, expected) # Case-sensitive pat = values.dtype.type(".*BAD[_]+.*BAD") result = values.str.match(pat) expected = xr.DataArray([True, False]) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.match(re.compile(pat)) assert result.dtype == expected.dtype assert_equal(result, expected) # Case-insensitive pat = values.dtype.type(".*bAd[_]+.*bad") result = values.str.match(pat, case=False) expected = xr.DataArray([True, False]) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.match(re.compile(pat, flags=re.IGNORECASE)) assert result.dtype == expected.dtype assert_equal(result, expected) def test_empty_str_methods() -> None: empty = xr.DataArray(np.empty(shape=(0,), dtype="U")) empty_str = empty empty_int = xr.DataArray(np.empty(shape=(0,), dtype=int)) empty_bool = xr.DataArray(np.empty(shape=(0,), dtype=bool)) empty_bytes = xr.DataArray(np.empty(shape=(0,), dtype="S")) # TODO: Determine why U and S dtype sizes don't match and figure # out a reliable way to predict what they should be assert empty_bool.dtype == empty.str.contains("a").dtype assert empty_bool.dtype == empty.str.endswith("a").dtype assert empty_bool.dtype == empty.str.match("^a").dtype assert empty_bool.dtype == empty.str.startswith("a").dtype assert empty_bool.dtype == empty.str.isalnum().dtype assert empty_bool.dtype == empty.str.isalpha().dtype assert empty_bool.dtype == empty.str.isdecimal().dtype assert empty_bool.dtype == empty.str.isdigit().dtype assert empty_bool.dtype == empty.str.islower().dtype assert empty_bool.dtype == empty.str.isnumeric().dtype assert empty_bool.dtype == empty.str.isspace().dtype assert empty_bool.dtype == empty.str.istitle().dtype assert empty_bool.dtype == empty.str.isupper().dtype assert empty_bytes.dtype.kind == empty.str.encode("ascii").dtype.kind assert empty_int.dtype.kind == empty.str.count("a").dtype.kind assert empty_int.dtype.kind == empty.str.find("a").dtype.kind assert empty_int.dtype.kind == empty.str.len().dtype.kind assert empty_int.dtype.kind == empty.str.rfind("a").dtype.kind assert empty_str.dtype.kind == empty.str.capitalize().dtype.kind assert empty_str.dtype.kind == empty.str.center(42).dtype.kind assert empty_str.dtype.kind == empty.str.get(0).dtype.kind assert empty_str.dtype.kind == empty.str.lower().dtype.kind assert empty_str.dtype.kind == empty.str.lstrip().dtype.kind assert empty_str.dtype.kind == empty.str.pad(42).dtype.kind assert empty_str.dtype.kind == empty.str.repeat(3).dtype.kind assert empty_str.dtype.kind == empty.str.rstrip().dtype.kind assert empty_str.dtype.kind == empty.str.slice(step=1).dtype.kind assert empty_str.dtype.kind == empty.str.slice(stop=1).dtype.kind assert empty_str.dtype.kind == empty.str.strip().dtype.kind assert empty_str.dtype.kind == empty.str.swapcase().dtype.kind assert empty_str.dtype.kind == empty.str.title().dtype.kind assert empty_str.dtype.kind == empty.str.upper().dtype.kind assert empty_str.dtype.kind == empty.str.wrap(42).dtype.kind assert empty_str.dtype.kind == empty_bytes.str.decode("ascii").dtype.kind assert_equal(empty_bool, empty.str.contains("a")) assert_equal(empty_bool, empty.str.endswith("a")) assert_equal(empty_bool, empty.str.match("^a")) assert_equal(empty_bool, empty.str.startswith("a")) assert_equal(empty_bool, empty.str.isalnum()) assert_equal(empty_bool, empty.str.isalpha()) assert_equal(empty_bool, empty.str.isdecimal()) assert_equal(empty_bool, empty.str.isdigit()) assert_equal(empty_bool, empty.str.islower()) assert_equal(empty_bool, empty.str.isnumeric()) assert_equal(empty_bool, empty.str.isspace()) assert_equal(empty_bool, empty.str.istitle()) assert_equal(empty_bool, empty.str.isupper()) assert_equal(empty_bytes, empty.str.encode("ascii")) assert_equal(empty_int, empty.str.count("a")) assert_equal(empty_int, empty.str.find("a")) assert_equal(empty_int, empty.str.len()) assert_equal(empty_int, empty.str.rfind("a")) assert_equal(empty_str, empty.str.capitalize()) assert_equal(empty_str, empty.str.center(42)) assert_equal(empty_str, empty.str.get(0)) assert_equal(empty_str, empty.str.lower()) assert_equal(empty_str, empty.str.lstrip()) assert_equal(empty_str, empty.str.pad(42)) assert_equal(empty_str, empty.str.repeat(3)) assert_equal(empty_str, empty.str.replace("a", "b")) assert_equal(empty_str, empty.str.rstrip()) assert_equal(empty_str, empty.str.slice(step=1)) assert_equal(empty_str, empty.str.slice(stop=1)) assert_equal(empty_str, empty.str.strip()) assert_equal(empty_str, empty.str.swapcase()) assert_equal(empty_str, empty.str.title()) assert_equal(empty_str, empty.str.upper()) assert_equal(empty_str, empty.str.wrap(42)) assert_equal(empty_str, empty_bytes.str.decode("ascii")) table = str.maketrans("a", "b") assert empty_str.dtype.kind == empty.str.translate(table).dtype.kind assert_equal(empty_str, empty.str.translate(table)) @pytest.mark.parametrize( ["func", "expected"], [ pytest.param( lambda x: x.str.isalnum(), [True, True, True, True, True, False, True, True, False, False], id="isalnum", ), pytest.param( lambda x: x.str.isalpha(), [True, True, True, False, False, False, True, False, False, False], id="isalpha", ), pytest.param( lambda x: x.str.isdigit(), [False, False, False, True, False, False, False, True, False, False], id="isdigit", ), pytest.param( lambda x: x.str.islower(), [False, True, False, False, False, False, False, False, False, False], id="islower", ), pytest.param( lambda x: x.str.isspace(), [False, False, False, False, False, False, False, False, False, True], id="isspace", ), pytest.param( lambda x: x.str.istitle(), [True, False, True, False, True, False, False, False, False, False], id="istitle", ), pytest.param( lambda x: x.str.isupper(), [True, False, False, False, True, False, True, False, False, False], id="isupper", ), ], ) def test_ismethods( dtype, func: Callable[[xr.DataArray], xr.DataArray], expected: list[bool] ) -> None: values = xr.DataArray( ["A", "b", "Xy", "4", "3A", "", "TT", "55", "-", " "] ).astype(dtype) expected_da = xr.DataArray(expected) actual = func(values) assert actual.dtype == expected_da.dtype assert_equal(actual, expected_da) def test_isnumeric() -> None: # 0x00bc: ΒΌ VULGAR FRACTION ONE QUARTER # 0x2605: β˜… not number # 0x1378: ፸ ETHIOPIC NUMBER SEVENTY # 0xFF13: οΌ“ Em 3 values = xr.DataArray(["A", "3", "ΒΌ", "β˜…", "፸", "οΌ“", "four"]) exp_numeric = xr.DataArray([False, True, True, False, True, True, False]) exp_decimal = xr.DataArray([False, True, False, False, False, True, False]) res_numeric = values.str.isnumeric() res_decimal = values.str.isdecimal() assert res_numeric.dtype == exp_numeric.dtype assert res_decimal.dtype == exp_decimal.dtype assert_equal(res_numeric, exp_numeric) assert_equal(res_decimal, exp_decimal) def test_len(dtype) -> None: values = ["foo", "fooo", "fooooo", "fooooooo"] result = xr.DataArray(values).astype(dtype).str.len() expected = xr.DataArray([len(x) for x in values]) assert result.dtype == expected.dtype assert_equal(result, expected) def test_find(dtype) -> None: values = xr.DataArray(["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF", "XXX"]) values = values.astype(dtype) result_0 = values.str.find("EF") result_1 = values.str.find("EF", side="left") expected_0 = xr.DataArray([4, 3, 1, 0, -1]) expected_1 = xr.DataArray([v.find(dtype("EF")) for v in values.values]) assert result_0.dtype == expected_0.dtype assert result_0.dtype == expected_1.dtype assert result_1.dtype == expected_0.dtype assert result_1.dtype == expected_1.dtype assert_equal(result_0, expected_0) assert_equal(result_0, expected_1) assert_equal(result_1, expected_0) assert_equal(result_1, expected_1) result_0 = values.str.rfind("EF") result_1 = values.str.find("EF", side="right") expected_0 = xr.DataArray([4, 5, 7, 4, -1]) expected_1 = xr.DataArray([v.rfind(dtype("EF")) for v in values.values]) assert result_0.dtype == expected_0.dtype assert result_0.dtype == expected_1.dtype assert result_1.dtype == expected_0.dtype assert result_1.dtype == expected_1.dtype assert_equal(result_0, expected_0) assert_equal(result_0, expected_1) assert_equal(result_1, expected_0) assert_equal(result_1, expected_1) result_0 = values.str.find("EF", 3) result_1 = values.str.find("EF", 3, side="left") expected_0 = xr.DataArray([4, 3, 7, 4, -1]) expected_1 = xr.DataArray([v.find(dtype("EF"), 3) for v in values.values]) assert result_0.dtype == expected_0.dtype assert result_0.dtype == expected_1.dtype assert result_1.dtype == expected_0.dtype assert result_1.dtype == expected_1.dtype assert_equal(result_0, expected_0) assert_equal(result_0, expected_1) assert_equal(result_1, expected_0) assert_equal(result_1, expected_1) result_0 = values.str.rfind("EF", 3) result_1 = values.str.find("EF", 3, side="right") expected_0 = xr.DataArray([4, 5, 7, 4, -1]) expected_1 = xr.DataArray([v.rfind(dtype("EF"), 3) for v in values.values]) assert result_0.dtype == expected_0.dtype assert result_0.dtype == expected_1.dtype assert result_1.dtype == expected_0.dtype assert result_1.dtype == expected_1.dtype assert_equal(result_0, expected_0) assert_equal(result_0, expected_1) assert_equal(result_1, expected_0) assert_equal(result_1, expected_1) result_0 = values.str.find("EF", 3, 6) result_1 = values.str.find("EF", 3, 6, side="left") expected_0 = xr.DataArray([4, 3, -1, 4, -1]) expected_1 = xr.DataArray([v.find(dtype("EF"), 3, 6) for v in values.values]) assert result_0.dtype == expected_0.dtype assert result_0.dtype == expected_1.dtype assert result_1.dtype == expected_0.dtype assert result_1.dtype == expected_1.dtype assert_equal(result_0, expected_0) assert_equal(result_0, expected_1) assert_equal(result_1, expected_0) assert_equal(result_1, expected_1) result_0 = values.str.rfind("EF", 3, 6) result_1 = values.str.find("EF", 3, 6, side="right") expected_0 = xr.DataArray([4, 3, -1, 4, -1]) expected_1 = xr.DataArray([v.rfind(dtype("EF"), 3, 6) for v in values.values]) assert result_0.dtype == expected_0.dtype assert result_0.dtype == expected_1.dtype assert result_1.dtype == expected_0.dtype assert result_1.dtype == expected_1.dtype assert_equal(result_0, expected_0) assert_equal(result_0, expected_1) assert_equal(result_1, expected_0) assert_equal(result_1, expected_1) def test_find_broadcast(dtype) -> None: values = xr.DataArray( ["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF", "XXX"], dims=["X"] ) values = values.astype(dtype) sub = xr.DataArray(["EF", "BC", "XX"], dims=["Y"]).astype(dtype) start = xr.DataArray([0, 7], dims=["Z"]) end = xr.DataArray([6, 9], dims=["Z"]) result_0 = values.str.find(sub, start, end) result_1 = values.str.find(sub, start, end, side="left") expected = xr.DataArray( [ [[4, -1], [1, -1], [-1, -1]], [[3, -1], [0, -1], [-1, -1]], [[1, 7], [-1, -1], [-1, -1]], [[0, -1], [-1, -1], [-1, -1]], [[-1, -1], [-1, -1], [0, -1]], ], dims=["X", "Y", "Z"], ) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) result_0 = values.str.rfind(sub, start, end) result_1 = values.str.find(sub, start, end, side="right") expected = xr.DataArray( [ [[4, -1], [1, -1], [-1, -1]], [[3, -1], [0, -1], [-1, -1]], [[1, 7], [-1, -1], [-1, -1]], [[4, -1], [-1, -1], [-1, -1]], [[-1, -1], [-1, -1], [1, -1]], ], dims=["X", "Y", "Z"], ) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) def test_index(dtype) -> None: s = xr.DataArray(["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"]).astype(dtype) result_0 = s.str.index("EF") result_1 = s.str.index("EF", side="left") expected = xr.DataArray([4, 3, 1, 0]) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) result_0 = s.str.rindex("EF") result_1 = s.str.index("EF", side="right") expected = xr.DataArray([4, 5, 7, 4]) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) result_0 = s.str.index("EF", 3) result_1 = s.str.index("EF", 3, side="left") expected = xr.DataArray([4, 3, 7, 4]) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) result_0 = s.str.rindex("EF", 3) result_1 = s.str.index("EF", 3, side="right") expected = xr.DataArray([4, 5, 7, 4]) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) result_0 = s.str.index("E", 4, 8) result_1 = s.str.index("E", 4, 8, side="left") expected = xr.DataArray([4, 5, 7, 4]) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) result_0 = s.str.rindex("E", 0, 5) result_1 = s.str.index("E", 0, 5, side="right") expected = xr.DataArray([4, 3, 1, 4]) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) matchtype = "subsection" if dtype == np.bytes_ else "substring" with pytest.raises(ValueError, match=f"{matchtype} not found"): s.str.index("DE") def test_index_broadcast(dtype) -> None: values = xr.DataArray( ["ABCDEFGEFDBCA", "BCDEFEFEFDBC", "DEFBCGHIEFBC", "EFGHBCEFBCBCBCEF"], dims=["X"], ) values = values.astype(dtype) sub = xr.DataArray(["EF", "BC"], dims=["Y"]).astype(dtype) start = xr.DataArray([0, 6], dims=["Z"]) end = xr.DataArray([6, 12], dims=["Z"]) result_0 = values.str.index(sub, start, end) result_1 = values.str.index(sub, start, end, side="left") expected = xr.DataArray( [[[4, 7], [1, 10]], [[3, 7], [0, 10]], [[1, 8], [3, 10]], [[0, 6], [4, 8]]], dims=["X", "Y", "Z"], ) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) result_0 = values.str.rindex(sub, start, end) result_1 = values.str.index(sub, start, end, side="right") expected = xr.DataArray( [[[4, 7], [1, 10]], [[3, 7], [0, 10]], [[1, 8], [3, 10]], [[0, 6], [4, 10]]], dims=["X", "Y", "Z"], ) assert result_0.dtype == expected.dtype assert result_1.dtype == expected.dtype assert_equal(result_0, expected) assert_equal(result_1, expected) def test_translate() -> None: values = xr.DataArray(["abcdefg", "abcc", "cdddfg", "cdefggg"]) table = str.maketrans("abc", "cde") result = values.str.translate(table) expected = xr.DataArray(["cdedefg", "cdee", "edddfg", "edefggg"]) assert result.dtype == expected.dtype assert_equal(result, expected) def test_pad_center_ljust_rjust(dtype) -> None: values = xr.DataArray(["a", "b", "c", "eeeee"]).astype(dtype) result = values.str.center(5) expected = xr.DataArray([" a ", " b ", " c ", "eeeee"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.pad(5, side="both") assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.ljust(5) expected = xr.DataArray(["a ", "b ", "c ", "eeeee"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.pad(5, side="right") assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.rjust(5) expected = xr.DataArray([" a", " b", " c", "eeeee"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.pad(5, side="left") assert result.dtype == expected.dtype assert_equal(result, expected) def test_pad_center_ljust_rjust_fillchar(dtype) -> None: values = xr.DataArray(["a", "bb", "cccc", "ddddd", "eeeeee"]).astype(dtype) result = values.str.center(5, fillchar="X") expected = xr.DataArray(["XXaXX", "XXbbX", "Xcccc", "ddddd", "eeeeee"]).astype( dtype ) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.pad(5, side="both", fillchar="X") assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.ljust(5, fillchar="X") expected = xr.DataArray(["aXXXX", "bbXXX", "ccccX", "ddddd", "eeeeee"]).astype( dtype ) assert result.dtype == expected.dtype assert_equal(result, expected.astype(dtype)) result = values.str.pad(5, side="right", fillchar="X") assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.rjust(5, fillchar="X") expected = xr.DataArray(["XXXXa", "XXXbb", "Xcccc", "ddddd", "eeeeee"]).astype( dtype ) assert result.dtype == expected.dtype assert_equal(result, expected.astype(dtype)) result = values.str.pad(5, side="left", fillchar="X") assert result.dtype == expected.dtype assert_equal(result, expected) # If fillchar is not a charatter, normal str raises TypeError # 'aaa'.ljust(5, 'XY') # TypeError: must be char, not str template = "fillchar must be a character, not {dtype}" with pytest.raises(TypeError, match=template.format(dtype="str")): values.str.center(5, fillchar="XY") with pytest.raises(TypeError, match=template.format(dtype="str")): values.str.ljust(5, fillchar="XY") with pytest.raises(TypeError, match=template.format(dtype="str")): values.str.rjust(5, fillchar="XY") with pytest.raises(TypeError, match=template.format(dtype="str")): values.str.pad(5, fillchar="XY") def test_pad_center_ljust_rjust_broadcast(dtype) -> None: values = xr.DataArray(["a", "bb", "cccc", "ddddd", "eeeeee"], dims="X").astype( dtype ) width = xr.DataArray([5, 4], dims="Y") fillchar = xr.DataArray(["X", "#"], dims="Y").astype(dtype) result = values.str.center(width, fillchar=fillchar) expected = xr.DataArray( [ ["XXaXX", "#a##"], ["XXbbX", "#bb#"], ["Xcccc", "cccc"], ["ddddd", "ddddd"], ["eeeeee", "eeeeee"], ], dims=["X", "Y"], ).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.pad(width, side="both", fillchar=fillchar) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.ljust(width, fillchar=fillchar) expected = xr.DataArray( [ ["aXXXX", "a###"], ["bbXXX", "bb##"], ["ccccX", "cccc"], ["ddddd", "ddddd"], ["eeeeee", "eeeeee"], ], dims=["X", "Y"], ).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected.astype(dtype)) result = values.str.pad(width, side="right", fillchar=fillchar) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.rjust(width, fillchar=fillchar) expected = xr.DataArray( [ ["XXXXa", "###a"], ["XXXbb", "##bb"], ["Xcccc", "cccc"], ["ddddd", "ddddd"], ["eeeeee", "eeeeee"], ], dims=["X", "Y"], ).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected.astype(dtype)) result = values.str.pad(width, side="left", fillchar=fillchar) assert result.dtype == expected.dtype assert_equal(result, expected) def test_zfill(dtype) -> None: values = xr.DataArray(["1", "22", "aaa", "333", "45678"]).astype(dtype) result = values.str.zfill(5) expected = xr.DataArray(["00001", "00022", "00aaa", "00333", "45678"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.zfill(3) expected = xr.DataArray(["001", "022", "aaa", "333", "45678"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_zfill_broadcast(dtype) -> None: values = xr.DataArray(["1", "22", "aaa", "333", "45678"]).astype(dtype) width = np.array([4, 5, 0, 3, 8]) result = values.str.zfill(width) expected = xr.DataArray(["0001", "00022", "aaa", "333", "00045678"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_slice(dtype) -> None: arr = xr.DataArray(["aafootwo", "aabartwo", "aabazqux"]).astype(dtype) result = arr.str.slice(2, 5) exp = xr.DataArray(["foo", "bar", "baz"]).astype(dtype) assert result.dtype == exp.dtype assert_equal(result, exp) for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2), (3, 0, -1)]: try: result = arr.str[start:stop:step] expected = xr.DataArray([s[start:stop:step] for s in arr.values]) assert_equal(result, expected.astype(dtype)) except IndexError: print(f"failed on {start}:{stop}:{step}") raise def test_slice_broadcast(dtype) -> None: arr = xr.DataArray(["aafootwo", "aabartwo", "aabazqux"]).astype(dtype) start = xr.DataArray([1, 2, 3]) stop = 5 result = arr.str.slice(start=start, stop=stop) exp = xr.DataArray(["afoo", "bar", "az"]).astype(dtype) assert result.dtype == exp.dtype assert_equal(result, exp) def test_slice_replace(dtype) -> None: da = lambda x: xr.DataArray(x).astype(dtype) values = da(["short", "a bit longer", "evenlongerthanthat", ""]) expected = da(["shrt", "a it longer", "evnlongerthanthat", ""]) result = values.str.slice_replace(2, 3) assert result.dtype == expected.dtype assert_equal(result, expected) expected = da(["shzrt", "a zit longer", "evznlongerthanthat", "z"]) result = values.str.slice_replace(2, 3, "z") assert result.dtype == expected.dtype assert_equal(result, expected) expected = da(["shzort", "a zbit longer", "evzenlongerthanthat", "z"]) result = values.str.slice_replace(2, 2, "z") assert result.dtype == expected.dtype assert_equal(result, expected) expected = da(["shzort", "a zbit longer", "evzenlongerthanthat", "z"]) result = values.str.slice_replace(2, 1, "z") assert result.dtype == expected.dtype assert_equal(result, expected) expected = da(["shorz", "a bit longez", "evenlongerthanthaz", "z"]) result = values.str.slice_replace(-1, None, "z") assert result.dtype == expected.dtype assert_equal(result, expected) expected = da(["zrt", "zer", "zat", "z"]) result = values.str.slice_replace(None, -2, "z") assert result.dtype == expected.dtype assert_equal(result, expected) expected = da(["shortz", "a bit znger", "evenlozerthanthat", "z"]) result = values.str.slice_replace(6, 8, "z") assert result.dtype == expected.dtype assert_equal(result, expected) expected = da(["zrt", "a zit longer", "evenlongzerthanthat", "z"]) result = values.str.slice_replace(-10, 3, "z") assert result.dtype == expected.dtype assert_equal(result, expected) def test_slice_replace_broadcast(dtype) -> None: values = xr.DataArray(["short", "a bit longer", "evenlongerthanthat", ""]).astype( dtype ) start = 2 stop = np.array([4, 5, None, 7]) repl = "test" expected = xr.DataArray(["shtestt", "a test longer", "evtest", "test"]).astype( dtype ) result = values.str.slice_replace(start, stop, repl) assert result.dtype == expected.dtype assert_equal(result, expected) def test_strip_lstrip_rstrip(dtype) -> None: values = xr.DataArray([" aa ", " bb \n", "cc "]).astype(dtype) result = values.str.strip() expected = xr.DataArray(["aa", "bb", "cc"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.lstrip() expected = xr.DataArray(["aa ", "bb \n", "cc "]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.rstrip() expected = xr.DataArray([" aa", " bb", "cc"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_strip_lstrip_rstrip_args(dtype) -> None: values = xr.DataArray(["xxABCxx", "xx BNSD", "LDFJH xx"]).astype(dtype) result = values.str.strip("x") expected = xr.DataArray(["ABC", " BNSD", "LDFJH "]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.lstrip("x") expected = xr.DataArray(["ABCxx", " BNSD", "LDFJH xx"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.rstrip("x") expected = xr.DataArray(["xxABC", "xx BNSD", "LDFJH "]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_strip_lstrip_rstrip_broadcast(dtype) -> None: values = xr.DataArray(["xxABCxx", "yy BNSD", "LDFJH zz"]).astype(dtype) to_strip = xr.DataArray(["x", "y", "z"]).astype(dtype) result = values.str.strip(to_strip) expected = xr.DataArray(["ABC", " BNSD", "LDFJH "]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.lstrip(to_strip) expected = xr.DataArray(["ABCxx", " BNSD", "LDFJH zz"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.rstrip(to_strip) expected = xr.DataArray(["xxABC", "yy BNSD", "LDFJH "]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_wrap() -> None: # test values are: two words less than width, two words equal to width, # two words greater than width, one word less than width, one word # equal to width, one word greater than width, multiple tokens with # trailing whitespace equal to width values = xr.DataArray( [ "hello world", "hello world!", "hello world!!", "abcdefabcde", "abcdefabcdef", "abcdefabcdefa", "ab ab ab ab ", "ab ab ab ab a", "\t", ] ) # expected values expected = xr.DataArray( [ "hello world", "hello world!", "hello\nworld!!", "abcdefabcde", "abcdefabcdef", "abcdefabcdef\na", "ab ab ab ab", "ab ab ab ab\na", "", ] ) result = values.str.wrap(12, break_long_words=True) assert result.dtype == expected.dtype assert_equal(result, expected) # test with pre and post whitespace (non-unicode), NaN, and non-ascii # Unicode values = xr.DataArray([" pre ", "\xac\u20ac\U00008000 abadcafe"]) expected = xr.DataArray([" pre", "\xac\u20ac\U00008000 ab\nadcafe"]) result = values.str.wrap(6) assert result.dtype == expected.dtype assert_equal(result, expected) def test_wrap_kwargs_passed() -> None: # GH4334 values = xr.DataArray(" hello world ") result = values.str.wrap(7) expected = xr.DataArray(" hello\nworld") assert result.dtype == expected.dtype assert_equal(result, expected) result = values.str.wrap(7, drop_whitespace=False) expected = xr.DataArray(" hello\n world\n ") assert result.dtype == expected.dtype assert_equal(result, expected) def test_get(dtype) -> None: values = xr.DataArray(["a_b_c", "c_d_e", "f_g_h"]).astype(dtype) result = values.str[2] expected = xr.DataArray(["b", "d", "g"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) # bounds testing values = xr.DataArray(["1_2_3_4_5", "6_7_8_9_10", "11_12"]).astype(dtype) # positive index result = values.str[5] expected = xr.DataArray(["_", "_", ""]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) # negative index result = values.str[-6] expected = xr.DataArray(["_", "8", ""]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_get_default(dtype) -> None: # GH4334 values = xr.DataArray(["a_b", "c", ""]).astype(dtype) result = values.str.get(2, "default") expected = xr.DataArray(["b", "default", "default"]).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_get_broadcast(dtype) -> None: values = xr.DataArray(["a_b_c", "c_d_e", "f_g_h"], dims=["X"]).astype(dtype) inds = xr.DataArray([0, 2], dims=["Y"]) result = values.str.get(inds) expected = xr.DataArray( [["a", "b"], ["c", "d"], ["f", "g"]], dims=["X", "Y"] ).astype(dtype) assert result.dtype == expected.dtype assert_equal(result, expected) def test_encode_decode() -> None: data = xr.DataArray(["a", "b", "a\xe4"]) encoded = data.str.encode("utf-8") decoded = encoded.str.decode("utf-8") assert data.dtype == decoded.dtype assert_equal(data, decoded) def test_encode_decode_errors() -> None: encodeBase = xr.DataArray(["a", "b", "a\x9d"]) msg = ( r"'charmap' codec can't encode character '\\x9d' in position 1:" " character maps to " ) with pytest.raises(UnicodeEncodeError, match=msg): encodeBase.str.encode("cp1252") f = lambda x: x.encode("cp1252", "ignore") result = encodeBase.str.encode("cp1252", "ignore") expected = xr.DataArray([f(x) for x in encodeBase.values.tolist()]) assert result.dtype == expected.dtype assert_equal(result, expected) decodeBase = xr.DataArray([b"a", b"b", b"a\x9d"]) msg = ( "'charmap' codec can't decode byte 0x9d in position 1:" " character maps to " ) with pytest.raises(UnicodeDecodeError, match=msg): decodeBase.str.decode("cp1252") f = lambda x: x.decode("cp1252", "ignore") result = decodeBase.str.decode("cp1252", "ignore") expected = xr.DataArray([f(x) for x in decodeBase.values.tolist()]) assert result.dtype == expected.dtype assert_equal(result, expected) def test_partition_whitespace(dtype) -> None: values = xr.DataArray( [ ["abc def", "spam eggs swallow", "red_blue"], ["test0 test1 test2 test3", "", "abra ka da bra"], ], dims=["X", "Y"], ).astype(dtype) exp_part_dim_list = [ [ ["abc", " ", "def"], ["spam", " ", "eggs swallow"], ["red_blue", "", ""], ], [ ["test0", " ", "test1 test2 test3"], ["", "", ""], ["abra", " ", "ka da bra"], ], ] exp_rpart_dim_list = [ [ ["abc", " ", "def"], ["spam eggs", " ", "swallow"], ["", "", "red_blue"], ], [ ["test0 test1 test2", " ", "test3"], ["", "", ""], ["abra ka da", " ", "bra"], ], ] exp_part_dim = xr.DataArray(exp_part_dim_list, dims=["X", "Y", "ZZ"]).astype(dtype) exp_rpart_dim = xr.DataArray(exp_rpart_dim_list, dims=["X", "Y", "ZZ"]).astype( dtype ) res_part_dim = values.str.partition(dim="ZZ") res_rpart_dim = values.str.rpartition(dim="ZZ") assert res_part_dim.dtype == exp_part_dim.dtype assert res_rpart_dim.dtype == exp_rpart_dim.dtype assert_equal(res_part_dim, exp_part_dim) assert_equal(res_rpart_dim, exp_rpart_dim) def test_partition_comma(dtype) -> None: values = xr.DataArray( [ ["abc, def", "spam, eggs, swallow", "red_blue"], ["test0, test1, test2, test3", "", "abra, ka, da, bra"], ], dims=["X", "Y"], ).astype(dtype) exp_part_dim_list = [ [ ["abc", ", ", "def"], ["spam", ", ", "eggs, swallow"], ["red_blue", "", ""], ], [ ["test0", ", ", "test1, test2, test3"], ["", "", ""], ["abra", ", ", "ka, da, bra"], ], ] exp_rpart_dim_list = [ [ ["abc", ", ", "def"], ["spam, eggs", ", ", "swallow"], ["", "", "red_blue"], ], [ ["test0, test1, test2", ", ", "test3"], ["", "", ""], ["abra, ka, da", ", ", "bra"], ], ] exp_part_dim = xr.DataArray(exp_part_dim_list, dims=["X", "Y", "ZZ"]).astype(dtype) exp_rpart_dim = xr.DataArray(exp_rpart_dim_list, dims=["X", "Y", "ZZ"]).astype( dtype ) res_part_dim = values.str.partition(sep=", ", dim="ZZ") res_rpart_dim = values.str.rpartition(sep=", ", dim="ZZ") assert res_part_dim.dtype == exp_part_dim.dtype assert res_rpart_dim.dtype == exp_rpart_dim.dtype assert_equal(res_part_dim, exp_part_dim) assert_equal(res_rpart_dim, exp_rpart_dim) def test_partition_empty(dtype) -> None: values = xr.DataArray([], dims=["X"]).astype(dtype) expected = xr.DataArray(np.zeros((0, 0)), dims=["X", "ZZ"]).astype(dtype) res = values.str.partition(sep=", ", dim="ZZ") assert res.dtype == expected.dtype assert_equal(res, expected) @pytest.mark.parametrize( ["func", "expected"], [ pytest.param( lambda x: x.str.split(dim=None), [ [["abc", "def"], ["spam", "eggs", "swallow"], ["red_blue"]], [["test0", "test1", "test2", "test3"], [], ["abra", "ka", "da", "bra"]], ], id="split_full", ), pytest.param( lambda x: x.str.rsplit(dim=None), [ [["abc", "def"], ["spam", "eggs", "swallow"], ["red_blue"]], [["test0", "test1", "test2", "test3"], [], ["abra", "ka", "da", "bra"]], ], id="rsplit_full", ), pytest.param( lambda x: x.str.split(dim=None, maxsplit=1), [ [["abc", "def"], ["spam", "eggs\tswallow"], ["red_blue"]], [["test0", "test1\ntest2\n\ntest3"], [], ["abra", "ka\nda\tbra"]], ], id="split_1", ), pytest.param( lambda x: x.str.rsplit(dim=None, maxsplit=1), [ [["abc", "def"], ["spam\t\teggs", "swallow"], ["red_blue"]], [["test0\ntest1\ntest2", "test3"], [], ["abra ka\nda", "bra"]], ], id="rsplit_1", ), ], ) def test_split_whitespace_nodim( dtype, func: Callable[[xr.DataArray], xr.DataArray], expected: xr.DataArray ) -> None: values = xr.DataArray( [ ["abc def", "spam\t\teggs\tswallow", "red_blue"], ["test0\ntest1\ntest2\n\ntest3", "", "abra ka\nda\tbra"], ], dims=["X", "Y"], ).astype(dtype) expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected] expected_np = np.array(expected_dtype, dtype=np.object_) expected_da = xr.DataArray(expected_np, dims=["X", "Y"]) actual = func(values) assert actual.dtype == expected_da.dtype assert_equal(actual, expected_da) @pytest.mark.parametrize( ["func", "expected"], [ pytest.param( lambda x: x.str.split(dim="ZZ"), [ [ ["abc", "def", "", ""], ["spam", "eggs", "swallow", ""], ["red_blue", "", "", ""], ], [ ["test0", "test1", "test2", "test3"], ["", "", "", ""], ["abra", "ka", "da", "bra"], ], ], id="split_full", ), pytest.param( lambda x: x.str.rsplit(dim="ZZ"), [ [ ["", "", "abc", "def"], ["", "spam", "eggs", "swallow"], ["", "", "", "red_blue"], ], [ ["test0", "test1", "test2", "test3"], ["", "", "", ""], ["abra", "ka", "da", "bra"], ], ], id="rsplit_full", ), pytest.param( lambda x: x.str.split(dim="ZZ", maxsplit=1), [ [["abc", "def"], ["spam", "eggs\tswallow"], ["red_blue", ""]], [["test0", "test1\ntest2\n\ntest3"], ["", ""], ["abra", "ka\nda\tbra"]], ], id="split_1", ), pytest.param( lambda x: x.str.rsplit(dim="ZZ", maxsplit=1), [ [["abc", "def"], ["spam\t\teggs", "swallow"], ["", "red_blue"]], [["test0\ntest1\ntest2", "test3"], ["", ""], ["abra ka\nda", "bra"]], ], id="rsplit_1", ), ], ) def test_split_whitespace_dim( dtype, func: Callable[[xr.DataArray], xr.DataArray], expected: xr.DataArray ) -> None: values = xr.DataArray( [ ["abc def", "spam\t\teggs\tswallow", "red_blue"], ["test0\ntest1\ntest2\n\ntest3", "", "abra ka\nda\tbra"], ], dims=["X", "Y"], ).astype(dtype) expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected] expected_np = np.array(expected_dtype, dtype=np.object_) expected_da = xr.DataArray(expected_np, dims=["X", "Y", "ZZ"]).astype(dtype) actual = func(values) assert actual.dtype == expected_da.dtype assert_equal(actual, expected_da) @pytest.mark.parametrize( ["func", "expected"], [ pytest.param( lambda x: x.str.split(sep=",", dim=None), [ [["abc", "def"], ["spam", "", "eggs", "swallow"], ["red_blue"]], [ ["test0", "test1", "test2", "test3"], [""], ["abra", "ka", "da", "bra"], ], ], id="split_full", ), pytest.param( lambda x: x.str.rsplit(sep=",", dim=None), [ [["abc", "def"], ["spam", "", "eggs", "swallow"], ["red_blue"]], [ ["test0", "test1", "test2", "test3"], [""], ["abra", "ka", "da", "bra"], ], ], id="rsplit_full", ), pytest.param( lambda x: x.str.split(sep=",", dim=None, maxsplit=1), [ [["abc", "def"], ["spam", ",eggs,swallow"], ["red_blue"]], [["test0", "test1,test2,test3"], [""], ["abra", "ka,da,bra"]], ], id="split_1", ), pytest.param( lambda x: x.str.rsplit(sep=",", dim=None, maxsplit=1), [ [["abc", "def"], ["spam,,eggs", "swallow"], ["red_blue"]], [["test0,test1,test2", "test3"], [""], ["abra,ka,da", "bra"]], ], id="rsplit_1", ), pytest.param( lambda x: x.str.split(sep=",", dim=None, maxsplit=10), [ [["abc", "def"], ["spam", "", "eggs", "swallow"], ["red_blue"]], [ ["test0", "test1", "test2", "test3"], [""], ["abra", "ka", "da", "bra"], ], ], id="split_10", ), pytest.param( lambda x: x.str.rsplit(sep=",", dim=None, maxsplit=10), [ [["abc", "def"], ["spam", "", "eggs", "swallow"], ["red_blue"]], [ ["test0", "test1", "test2", "test3"], [""], ["abra", "ka", "da", "bra"], ], ], id="rsplit_10", ), ], ) def test_split_comma_nodim( dtype, func: Callable[[xr.DataArray], xr.DataArray], expected: xr.DataArray ) -> None: values = xr.DataArray( [ ["abc,def", "spam,,eggs,swallow", "red_blue"], ["test0,test1,test2,test3", "", "abra,ka,da,bra"], ], dims=["X", "Y"], ).astype(dtype) expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected] expected_np = np.array(expected_dtype, dtype=np.object_) expected_da = xr.DataArray(expected_np, dims=["X", "Y"]) actual = func(values) assert actual.dtype == expected_da.dtype assert_equal(actual, expected_da) @pytest.mark.parametrize( ["func", "expected"], [ pytest.param( lambda x: x.str.split(sep=",", dim="ZZ"), [ [ ["abc", "def", "", ""], ["spam", "", "eggs", "swallow"], ["red_blue", "", "", ""], ], [ ["test0", "test1", "test2", "test3"], ["", "", "", ""], ["abra", "ka", "da", "bra"], ], ], id="split_full", ), pytest.param( lambda x: x.str.rsplit(sep=",", dim="ZZ"), [ [ ["", "", "abc", "def"], ["spam", "", "eggs", "swallow"], ["", "", "", "red_blue"], ], [ ["test0", "test1", "test2", "test3"], ["", "", "", ""], ["abra", "ka", "da", "bra"], ], ], id="rsplit_full", ), pytest.param( lambda x: x.str.split(sep=",", dim="ZZ", maxsplit=1), [ [["abc", "def"], ["spam", ",eggs,swallow"], ["red_blue", ""]], [["test0", "test1,test2,test3"], ["", ""], ["abra", "ka,da,bra"]], ], id="split_1", ), pytest.param( lambda x: x.str.rsplit(sep=",", dim="ZZ", maxsplit=1), [ [["abc", "def"], ["spam,,eggs", "swallow"], ["", "red_blue"]], [["test0,test1,test2", "test3"], ["", ""], ["abra,ka,da", "bra"]], ], id="rsplit_1", ), pytest.param( lambda x: x.str.split(sep=",", dim="ZZ", maxsplit=10), [ [ ["abc", "def", "", ""], ["spam", "", "eggs", "swallow"], ["red_blue", "", "", ""], ], [ ["test0", "test1", "test2", "test3"], ["", "", "", ""], ["abra", "ka", "da", "bra"], ], ], id="split_10", ), pytest.param( lambda x: x.str.rsplit(sep=",", dim="ZZ", maxsplit=10), [ [ ["", "", "abc", "def"], ["spam", "", "eggs", "swallow"], ["", "", "", "red_blue"], ], [ ["test0", "test1", "test2", "test3"], ["", "", "", ""], ["abra", "ka", "da", "bra"], ], ], id="rsplit_10", ), ], ) def test_split_comma_dim( dtype, func: Callable[[xr.DataArray], xr.DataArray], expected: xr.DataArray ) -> None: values = xr.DataArray( [ ["abc,def", "spam,,eggs,swallow", "red_blue"], ["test0,test1,test2,test3", "", "abra,ka,da,bra"], ], dims=["X", "Y"], ).astype(dtype) expected_dtype = [[[dtype(x) for x in y] for y in z] for z in expected] expected_np = np.array(expected_dtype, dtype=np.object_) expected_da = xr.DataArray(expected_np, dims=["X", "Y", "ZZ"]).astype(dtype) actual = func(values) assert actual.dtype == expected_da.dtype assert_equal(actual, expected_da) def test_splitters_broadcast(dtype) -> None: values = xr.DataArray( ["ab cd,de fg", "spam, ,eggs swallow", "red_blue"], dims=["X"], ).astype(dtype) sep = xr.DataArray( [" ", ","], dims=["Y"], ).astype(dtype) expected_left = xr.DataArray( [ [["ab", "cd,de fg"], ["ab cd", "de fg"]], [["spam,", ",eggs swallow"], ["spam", " ,eggs swallow"]], [["red_blue", ""], ["red_blue", ""]], ], dims=["X", "Y", "ZZ"], ).astype(dtype) expected_right = xr.DataArray( [ [["ab cd,de", "fg"], ["ab cd", "de fg"]], [["spam, ,eggs", "swallow"], ["spam, ", "eggs swallow"]], [["", "red_blue"], ["", "red_blue"]], ], dims=["X", "Y", "ZZ"], ).astype(dtype) res_left = values.str.split(dim="ZZ", sep=sep, maxsplit=1) res_right = values.str.rsplit(dim="ZZ", sep=sep, maxsplit=1) # assert res_left.dtype == expected_left.dtype # assert res_right.dtype == expected_right.dtype assert_equal(res_left, expected_left) assert_equal(res_right, expected_right) expected_left = xr.DataArray( [ [["ab", " ", "cd,de fg"], ["ab cd", ",", "de fg"]], [["spam,", " ", ",eggs swallow"], ["spam", ",", " ,eggs swallow"]], [["red_blue", "", ""], ["red_blue", "", ""]], ], dims=["X", "Y", "ZZ"], ).astype(dtype) expected_right = xr.DataArray( [ [["ab", " ", "cd,de fg"], ["ab cd", ",", "de fg"]], [["spam,", " ", ",eggs swallow"], ["spam", ",", " ,eggs swallow"]], [["red_blue", "", ""], ["red_blue", "", ""]], ], dims=["X", "Y", "ZZ"], ).astype(dtype) res_left = values.str.partition(dim="ZZ", sep=sep) res_right = values.str.partition(dim="ZZ", sep=sep) # assert res_left.dtype == expected_left.dtype # assert res_right.dtype == expected_right.dtype assert_equal(res_left, expected_left) assert_equal(res_right, expected_right) def test_split_empty(dtype) -> None: values = xr.DataArray([], dims=["X"]).astype(dtype) expected = xr.DataArray(np.zeros((0, 0)), dims=["X", "ZZ"]).astype(dtype) res = values.str.split(sep=", ", dim="ZZ") assert res.dtype == expected.dtype assert_equal(res, expected) def test_get_dummies(dtype) -> None: values_line = xr.DataArray( [["a|ab~abc|abc", "ab", "a||abc|abcd"], ["abcd|ab|a", "abc|ab~abc", "|a"]], dims=["X", "Y"], ).astype(dtype) values_comma = xr.DataArray( [["a~ab|abc~~abc", "ab", "a~abc~abcd"], ["abcd~ab~a", "abc~ab|abc", "~a"]], dims=["X", "Y"], ).astype(dtype) vals_line = np.array(["a", "ab", "abc", "abcd", "ab~abc"]).astype(dtype) vals_comma = np.array(["a", "ab", "abc", "abcd", "ab|abc"]).astype(dtype) expected_list = [ [ [True, False, True, False, True], [False, True, False, False, False], [True, False, True, True, False], ], [ [True, True, False, True, False], [False, False, True, False, True], [True, False, False, False, False], ], ] expected_np = np.array(expected_list) expected = xr.DataArray(expected_np, dims=["X", "Y", "ZZ"]) targ_line = expected.copy() targ_comma = expected.copy() targ_line.coords["ZZ"] = vals_line targ_comma.coords["ZZ"] = vals_comma res_default = values_line.str.get_dummies(dim="ZZ") res_line = values_line.str.get_dummies(dim="ZZ", sep="|") res_comma = values_comma.str.get_dummies(dim="ZZ", sep="~") assert res_default.dtype == targ_line.dtype assert res_line.dtype == targ_line.dtype assert res_comma.dtype == targ_comma.dtype assert_equal(res_default, targ_line) assert_equal(res_line, targ_line) assert_equal(res_comma, targ_comma) def test_get_dummies_broadcast(dtype) -> None: values = xr.DataArray( ["x~x|x~x", "x", "x|x~x", "x~x"], dims=["X"], ).astype(dtype) sep = xr.DataArray( ["|", "~"], dims=["Y"], ).astype(dtype) expected_list = [ [[False, False, True], [True, True, False]], [[True, False, False], [True, False, False]], [[True, False, True], [True, True, False]], [[False, False, True], [True, False, False]], ] expected_np = np.array(expected_list) expected = xr.DataArray(expected_np, dims=["X", "Y", "ZZ"]) expected.coords["ZZ"] = np.array(["x", "x|x", "x~x"]).astype(dtype) res = values.str.get_dummies(dim="ZZ", sep=sep) assert res.dtype == expected.dtype assert_equal(res, expected) def test_get_dummies_empty(dtype) -> None: values = xr.DataArray([], dims=["X"]).astype(dtype) expected = xr.DataArray(np.zeros((0, 0)), dims=["X", "ZZ"]).astype(dtype) res = values.str.get_dummies(dim="ZZ") assert res.dtype == expected.dtype assert_equal(res, expected) def test_splitters_empty_str(dtype) -> None: values = xr.DataArray( [["", "", ""], ["", "", ""]], dims=["X", "Y"], ).astype(dtype) targ_partition_dim = xr.DataArray( [ [["", "", ""], ["", "", ""], ["", "", ""]], [["", "", ""], ["", "", ""], ["", "", ""]], ], dims=["X", "Y", "ZZ"], ).astype(dtype) targ_partition_none_list = [ [["", "", ""], ["", "", ""], ["", "", ""]], [["", "", ""], ["", "", ""], ["", "", "", ""]], ] targ_partition_none_list = [ [[dtype(x) for x in y] for y in z] for z in targ_partition_none_list ] targ_partition_none_np = np.array(targ_partition_none_list, dtype=np.object_) del targ_partition_none_np[-1, -1][-1] targ_partition_none = xr.DataArray( targ_partition_none_np, dims=["X", "Y"], ) targ_split_dim = xr.DataArray( [[[""], [""], [""]], [[""], [""], [""]]], dims=["X", "Y", "ZZ"], ).astype(dtype) targ_split_none = xr.DataArray( np.array([[[], [], []], [[], [], [""]]], dtype=np.object_), dims=["X", "Y"], ) del targ_split_none.data[-1, -1][-1] res_partition_dim = values.str.partition(dim="ZZ") res_rpartition_dim = values.str.rpartition(dim="ZZ") res_partition_none = values.str.partition(dim=None) res_rpartition_none = values.str.rpartition(dim=None) res_split_dim = values.str.split(dim="ZZ") res_rsplit_dim = values.str.rsplit(dim="ZZ") res_split_none = values.str.split(dim=None) res_rsplit_none = values.str.rsplit(dim=None) res_dummies = values.str.rsplit(dim="ZZ") assert res_partition_dim.dtype == targ_partition_dim.dtype assert res_rpartition_dim.dtype == targ_partition_dim.dtype assert res_partition_none.dtype == targ_partition_none.dtype assert res_rpartition_none.dtype == targ_partition_none.dtype assert res_split_dim.dtype == targ_split_dim.dtype assert res_rsplit_dim.dtype == targ_split_dim.dtype assert res_split_none.dtype == targ_split_none.dtype assert res_rsplit_none.dtype == targ_split_none.dtype assert res_dummies.dtype == targ_split_dim.dtype assert_equal(res_partition_dim, targ_partition_dim) assert_equal(res_rpartition_dim, targ_partition_dim) assert_equal(res_partition_none, targ_partition_none) assert_equal(res_rpartition_none, targ_partition_none) assert_equal(res_split_dim, targ_split_dim) assert_equal(res_rsplit_dim, targ_split_dim) assert_equal(res_split_none, targ_split_none) assert_equal(res_rsplit_none, targ_split_none) assert_equal(res_dummies, targ_split_dim) def test_cat_str(dtype) -> None: values_1 = xr.DataArray( [["a", "bb", "cccc"], ["ddddd", "eeee", "fff"]], dims=["X", "Y"], ).astype(dtype) values_2 = "111" targ_blank = xr.DataArray( [["a111", "bb111", "cccc111"], ["ddddd111", "eeee111", "fff111"]], dims=["X", "Y"], ).astype(dtype) targ_space = xr.DataArray( [["a 111", "bb 111", "cccc 111"], ["ddddd 111", "eeee 111", "fff 111"]], dims=["X", "Y"], ).astype(dtype) targ_bars = xr.DataArray( [["a||111", "bb||111", "cccc||111"], ["ddddd||111", "eeee||111", "fff||111"]], dims=["X", "Y"], ).astype(dtype) targ_comma = xr.DataArray( [["a, 111", "bb, 111", "cccc, 111"], ["ddddd, 111", "eeee, 111", "fff, 111"]], dims=["X", "Y"], ).astype(dtype) res_blank = values_1.str.cat(values_2) res_add = values_1.str + values_2 res_space = values_1.str.cat(values_2, sep=" ") res_bars = values_1.str.cat(values_2, sep="||") res_comma = values_1.str.cat(values_2, sep=", ") assert res_blank.dtype == targ_blank.dtype assert res_add.dtype == targ_blank.dtype assert res_space.dtype == targ_space.dtype assert res_bars.dtype == targ_bars.dtype assert res_comma.dtype == targ_comma.dtype assert_equal(res_blank, targ_blank) assert_equal(res_add, targ_blank) assert_equal(res_space, targ_space) assert_equal(res_bars, targ_bars) assert_equal(res_comma, targ_comma) def test_cat_uniform(dtype) -> None: values_1 = xr.DataArray( [["a", "bb", "cccc"], ["ddddd", "eeee", "fff"]], dims=["X", "Y"], ).astype(dtype) values_2 = xr.DataArray( [["11111", "222", "33"], ["4", "5555", "66"]], dims=["X", "Y"], ) targ_blank = xr.DataArray( [["a11111", "bb222", "cccc33"], ["ddddd4", "eeee5555", "fff66"]], dims=["X", "Y"], ).astype(dtype) targ_space = xr.DataArray( [["a 11111", "bb 222", "cccc 33"], ["ddddd 4", "eeee 5555", "fff 66"]], dims=["X", "Y"], ).astype(dtype) targ_bars = xr.DataArray( [["a||11111", "bb||222", "cccc||33"], ["ddddd||4", "eeee||5555", "fff||66"]], dims=["X", "Y"], ).astype(dtype) targ_comma = xr.DataArray( [["a, 11111", "bb, 222", "cccc, 33"], ["ddddd, 4", "eeee, 5555", "fff, 66"]], dims=["X", "Y"], ).astype(dtype) res_blank = values_1.str.cat(values_2) res_add = values_1.str + values_2 res_space = values_1.str.cat(values_2, sep=" ") res_bars = values_1.str.cat(values_2, sep="||") res_comma = values_1.str.cat(values_2, sep=", ") assert res_blank.dtype == targ_blank.dtype assert res_add.dtype == targ_blank.dtype assert res_space.dtype == targ_space.dtype assert res_bars.dtype == targ_bars.dtype assert res_comma.dtype == targ_comma.dtype assert_equal(res_blank, targ_blank) assert_equal(res_add, targ_blank) assert_equal(res_space, targ_space) assert_equal(res_bars, targ_bars) assert_equal(res_comma, targ_comma) def test_cat_broadcast_right(dtype) -> None: values_1 = xr.DataArray( [["a", "bb", "cccc"], ["ddddd", "eeee", "fff"]], dims=["X", "Y"], ).astype(dtype) values_2 = xr.DataArray( ["11111", "222", "33"], dims=["Y"], ) targ_blank = xr.DataArray( [["a11111", "bb222", "cccc33"], ["ddddd11111", "eeee222", "fff33"]], dims=["X", "Y"], ).astype(dtype) targ_space = xr.DataArray( [["a 11111", "bb 222", "cccc 33"], ["ddddd 11111", "eeee 222", "fff 33"]], dims=["X", "Y"], ).astype(dtype) targ_bars = xr.DataArray( [["a||11111", "bb||222", "cccc||33"], ["ddddd||11111", "eeee||222", "fff||33"]], dims=["X", "Y"], ).astype(dtype) targ_comma = xr.DataArray( [["a, 11111", "bb, 222", "cccc, 33"], ["ddddd, 11111", "eeee, 222", "fff, 33"]], dims=["X", "Y"], ).astype(dtype) res_blank = values_1.str.cat(values_2) res_add = values_1.str + values_2 res_space = values_1.str.cat(values_2, sep=" ") res_bars = values_1.str.cat(values_2, sep="||") res_comma = values_1.str.cat(values_2, sep=", ") assert res_blank.dtype == targ_blank.dtype assert res_add.dtype == targ_blank.dtype assert res_space.dtype == targ_space.dtype assert res_bars.dtype == targ_bars.dtype assert res_comma.dtype == targ_comma.dtype assert_equal(res_blank, targ_blank) assert_equal(res_add, targ_blank) assert_equal(res_space, targ_space) assert_equal(res_bars, targ_bars) assert_equal(res_comma, targ_comma) def test_cat_broadcast_left(dtype) -> None: values_1 = xr.DataArray( ["a", "bb", "cccc"], dims=["Y"], ).astype(dtype) values_2 = xr.DataArray( [["11111", "222", "33"], ["4", "5555", "66"]], dims=["X", "Y"], ) targ_blank = ( xr.DataArray( [["a11111", "bb222", "cccc33"], ["a4", "bb5555", "cccc66"]], dims=["X", "Y"], ) .astype(dtype) .T ) targ_space = ( xr.DataArray( [["a 11111", "bb 222", "cccc 33"], ["a 4", "bb 5555", "cccc 66"]], dims=["X", "Y"], ) .astype(dtype) .T ) targ_bars = ( xr.DataArray( [["a||11111", "bb||222", "cccc||33"], ["a||4", "bb||5555", "cccc||66"]], dims=["X", "Y"], ) .astype(dtype) .T ) targ_comma = ( xr.DataArray( [["a, 11111", "bb, 222", "cccc, 33"], ["a, 4", "bb, 5555", "cccc, 66"]], dims=["X", "Y"], ) .astype(dtype) .T ) res_blank = values_1.str.cat(values_2) res_add = values_1.str + values_2 res_space = values_1.str.cat(values_2, sep=" ") res_bars = values_1.str.cat(values_2, sep="||") res_comma = values_1.str.cat(values_2, sep=", ") assert res_blank.dtype == targ_blank.dtype assert res_add.dtype == targ_blank.dtype assert res_space.dtype == targ_space.dtype assert res_bars.dtype == targ_bars.dtype assert res_comma.dtype == targ_comma.dtype assert_equal(res_blank, targ_blank) assert_equal(res_add, targ_blank) assert_equal(res_space, targ_space) assert_equal(res_bars, targ_bars) assert_equal(res_comma, targ_comma) def test_cat_broadcast_both(dtype) -> None: values_1 = xr.DataArray( ["a", "bb", "cccc"], dims=["Y"], ).astype(dtype) values_2 = xr.DataArray( ["11111", "4"], dims=["X"], ) targ_blank = ( xr.DataArray( [["a11111", "bb11111", "cccc11111"], ["a4", "bb4", "cccc4"]], dims=["X", "Y"], ) .astype(dtype) .T ) targ_space = ( xr.DataArray( [["a 11111", "bb 11111", "cccc 11111"], ["a 4", "bb 4", "cccc 4"]], dims=["X", "Y"], ) .astype(dtype) .T ) targ_bars = ( xr.DataArray( [["a||11111", "bb||11111", "cccc||11111"], ["a||4", "bb||4", "cccc||4"]], dims=["X", "Y"], ) .astype(dtype) .T ) targ_comma = ( xr.DataArray( [["a, 11111", "bb, 11111", "cccc, 11111"], ["a, 4", "bb, 4", "cccc, 4"]], dims=["X", "Y"], ) .astype(dtype) .T ) res_blank = values_1.str.cat(values_2) res_add = values_1.str + values_2 res_space = values_1.str.cat(values_2, sep=" ") res_bars = values_1.str.cat(values_2, sep="||") res_comma = values_1.str.cat(values_2, sep=", ") assert res_blank.dtype == targ_blank.dtype assert res_add.dtype == targ_blank.dtype assert res_space.dtype == targ_space.dtype assert res_bars.dtype == targ_bars.dtype assert res_comma.dtype == targ_comma.dtype assert_equal(res_blank, targ_blank) assert_equal(res_add, targ_blank) assert_equal(res_space, targ_space) assert_equal(res_bars, targ_bars) assert_equal(res_comma, targ_comma) def test_cat_multi() -> None: values_1 = xr.DataArray( ["11111", "4"], dims=["X"], ) values_2 = xr.DataArray( ["a", "bb", "cccc"], dims=["Y"], ).astype(np.bytes_) values_3 = np.array(3.4) values_4 = "" values_5 = np.array("", dtype=np.str_) sep = xr.DataArray( [" ", ", "], dims=["ZZ"], ).astype(np.str_) expected = xr.DataArray( [ [ ["11111 a 3.4 ", "11111, a, 3.4, , "], ["11111 bb 3.4 ", "11111, bb, 3.4, , "], ["11111 cccc 3.4 ", "11111, cccc, 3.4, , "], ], [ ["4 a 3.4 ", "4, a, 3.4, , "], ["4 bb 3.4 ", "4, bb, 3.4, , "], ["4 cccc 3.4 ", "4, cccc, 3.4, , "], ], ], dims=["X", "Y", "ZZ"], ).astype(np.str_) res = values_1.str.cat(values_2, values_3, values_4, values_5, sep=sep) assert res.dtype == expected.dtype assert_equal(res, expected) def test_join_scalar(dtype) -> None: values = xr.DataArray("aaa").astype(dtype) targ = xr.DataArray("aaa").astype(dtype) res_blank = values.str.join() res_space = values.str.join(sep=" ") assert res_blank.dtype == targ.dtype assert res_space.dtype == targ.dtype assert_identical(res_blank, targ) assert_identical(res_space, targ) def test_join_vector(dtype) -> None: values = xr.DataArray( ["a", "bb", "cccc"], dims=["Y"], ).astype(dtype) targ_blank = xr.DataArray("abbcccc").astype(dtype) targ_space = xr.DataArray("a bb cccc").astype(dtype) res_blank_none = values.str.join() res_blank_y = values.str.join(dim="Y") res_space_none = values.str.join(sep=" ") res_space_y = values.str.join(dim="Y", sep=" ") assert res_blank_none.dtype == targ_blank.dtype assert res_blank_y.dtype == targ_blank.dtype assert res_space_none.dtype == targ_space.dtype assert res_space_y.dtype == targ_space.dtype assert_identical(res_blank_none, targ_blank) assert_identical(res_blank_y, targ_blank) assert_identical(res_space_none, targ_space) assert_identical(res_space_y, targ_space) def test_join_2d(dtype) -> None: values = xr.DataArray( [["a", "bb", "cccc"], ["ddddd", "eeee", "fff"]], dims=["X", "Y"], ).astype(dtype) targ_blank_x = xr.DataArray( ["addddd", "bbeeee", "ccccfff"], dims=["Y"], ).astype(dtype) targ_space_x = xr.DataArray( ["a ddddd", "bb eeee", "cccc fff"], dims=["Y"], ).astype(dtype) targ_blank_y = xr.DataArray( ["abbcccc", "dddddeeeefff"], dims=["X"], ).astype(dtype) targ_space_y = xr.DataArray( ["a bb cccc", "ddddd eeee fff"], dims=["X"], ).astype(dtype) res_blank_x = values.str.join(dim="X") res_blank_y = values.str.join(dim="Y") res_space_x = values.str.join(dim="X", sep=" ") res_space_y = values.str.join(dim="Y", sep=" ") assert res_blank_x.dtype == targ_blank_x.dtype assert res_blank_y.dtype == targ_blank_y.dtype assert res_space_x.dtype == targ_space_x.dtype assert res_space_y.dtype == targ_space_y.dtype assert_identical(res_blank_x, targ_blank_x) assert_identical(res_blank_y, targ_blank_y) assert_identical(res_space_x, targ_space_x) assert_identical(res_space_y, targ_space_y) with pytest.raises( ValueError, match="Dimension must be specified for multidimensional arrays." ): values.str.join() def test_join_broadcast(dtype) -> None: values = xr.DataArray( ["a", "bb", "cccc"], dims=["X"], ).astype(dtype) sep = xr.DataArray( [" ", ", "], dims=["ZZ"], ).astype(dtype) expected = xr.DataArray( ["a bb cccc", "a, bb, cccc"], dims=["ZZ"], ).astype(dtype) res = values.str.join(sep=sep) assert res.dtype == expected.dtype assert_identical(res, expected) def test_format_scalar() -> None: values = xr.DataArray( ["{}.{Y}.{ZZ}", "{},{},{X},{X}", "{X}-{Y}-{ZZ}"], dims=["X"], ).astype(np.str_) pos0 = 1 pos1 = 1.2 pos2 = "2.3" X = "'test'" Y = "X" ZZ = None W = "NO!" expected = xr.DataArray( ["1.X.None", "1,1.2,'test','test'", "'test'-X-None"], dims=["X"], ).astype(np.str_) res = values.str.format(pos0, pos1, pos2, X=X, Y=Y, ZZ=ZZ, W=W) assert res.dtype == expected.dtype assert_equal(res, expected) def test_format_broadcast() -> None: values = xr.DataArray( ["{}.{Y}.{ZZ}", "{},{},{X},{X}", "{X}-{Y}-{ZZ}"], dims=["X"], ).astype(np.str_) pos0 = 1 pos1 = 1.2 pos2 = xr.DataArray( ["2.3", "3.44444"], dims=["YY"], ) X = "'test'" Y = "X" ZZ = None W = "NO!" expected = xr.DataArray( [ ["1.X.None", "1.X.None"], ["1,1.2,'test','test'", "1,1.2,'test','test'"], ["'test'-X-None", "'test'-X-None"], ], dims=["X", "YY"], ).astype(np.str_) res = values.str.format(pos0, pos1, pos2, X=X, Y=Y, ZZ=ZZ, W=W) assert res.dtype == expected.dtype assert_equal(res, expected) def test_mod_scalar() -> None: values = xr.DataArray( ["%s.%s.%s", "%s,%s,%s", "%s-%s-%s"], dims=["X"], ).astype(np.str_) pos0 = 1 pos1 = 1.2 pos2 = "2.3" expected = xr.DataArray( ["1.1.2.2.3", "1,1.2,2.3", "1-1.2-2.3"], dims=["X"], ).astype(np.str_) res = values.str % (pos0, pos1, pos2) assert res.dtype == expected.dtype assert_equal(res, expected) def test_mod_dict() -> None: values = xr.DataArray( ["%(a)s.%(a)s.%(b)s", "%(b)s,%(c)s,%(b)s", "%(c)s-%(b)s-%(a)s"], dims=["X"], ).astype(np.str_) a = 1 b = 1.2 c = "2.3" expected = xr.DataArray( ["1.1.1.2", "1.2,2.3,1.2", "2.3-1.2-1"], dims=["X"], ).astype(np.str_) res = values.str % {"a": a, "b": b, "c": c} assert res.dtype == expected.dtype assert_equal(res, expected) def test_mod_broadcast_single() -> None: values = xr.DataArray( ["%s_1", "%s_2", "%s_3"], dims=["X"], ).astype(np.str_) pos = xr.DataArray( ["2.3", "3.44444"], dims=["YY"], ) expected = xr.DataArray( [["2.3_1", "3.44444_1"], ["2.3_2", "3.44444_2"], ["2.3_3", "3.44444_3"]], dims=["X", "YY"], ).astype(np.str_) res = values.str % pos assert res.dtype == expected.dtype assert_equal(res, expected) def test_mod_broadcast_multi() -> None: values = xr.DataArray( ["%s.%s.%s", "%s,%s,%s", "%s-%s-%s"], dims=["X"], ).astype(np.str_) pos0 = 1 pos1 = 1.2 pos2 = xr.DataArray( ["2.3", "3.44444"], dims=["YY"], ) expected = xr.DataArray( [ ["1.1.2.2.3", "1.1.2.3.44444"], ["1,1.2,2.3", "1,1.2,3.44444"], ["1-1.2-2.3", "1-1.2-3.44444"], ], dims=["X", "YY"], ).astype(np.str_) res = values.str % (pos0, pos1, pos2) assert res.dtype == expected.dtype assert_equal(res, expected) xarray-2025.09.0/xarray/tests/test_array_api.py000066400000000000000000000107151505620616400214000ustar00rootroot00000000000000from __future__ import annotations import pytest import xarray as xr from xarray.testing import assert_equal np = pytest.importorskip("numpy", minversion="1.22") xp = pytest.importorskip("array_api_strict") from array_api_strict._array_object import Array # isort:skip # type: ignore[no-redef] @pytest.fixture def arrays() -> tuple[xr.DataArray, xr.DataArray]: np_arr = xr.DataArray( np.array([[1.0, 2.0, 3.0], [4.0, 5.0, np.nan]]), dims=("x", "y"), coords={"x": [10, 20]}, ) xp_arr = xr.DataArray( xp.asarray([[1.0, 2.0, 3.0], [4.0, 5.0, np.nan]]), dims=("x", "y"), coords={"x": [10, 20]}, ) assert isinstance(xp_arr.data, Array) return np_arr, xp_arr def test_arithmetic(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = np_arr + 7 actual = xp_arr + 7 assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_aggregation(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = np_arr.sum() actual = xp_arr.sum() assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_aggregation_skipna(arrays) -> None: np_arr, xp_arr = arrays expected = np_arr.sum(skipna=False) actual = xp_arr.sum(skipna=False) assert isinstance(actual.data, Array) assert_equal(actual, expected) # casting nan warns @pytest.mark.filterwarnings("ignore:invalid value encountered in cast") def test_astype(arrays) -> None: np_arr, xp_arr = arrays expected = np_arr.astype(np.int64) actual = xp_arr.astype(xp.int64) assert actual.dtype == xp.int64 assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_broadcast(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays np_arr2 = xr.DataArray(np.array([1.0, 2.0]), dims="x") xp_arr2 = xr.DataArray(xp.asarray([1.0, 2.0]), dims="x") expected = xr.broadcast(np_arr, np_arr2) actual = xr.broadcast(xp_arr, xp_arr2) assert len(actual) == len(expected) for a, e in zip(actual, expected, strict=True): assert isinstance(a.data, Array) assert_equal(a, e) def test_broadcast_during_arithmetic(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays np_arr2 = xr.DataArray(np.array([1.0, 2.0]), dims="x") xp_arr2 = xr.DataArray(xp.asarray([1.0, 2.0]), dims="x") expected = np_arr * np_arr2 actual = xp_arr * xp_arr2 assert isinstance(actual.data, Array) assert_equal(actual, expected) expected = np_arr2 * np_arr actual = xp_arr2 * xp_arr assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_concat(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = xr.concat((np_arr, np_arr), dim="x") actual = xr.concat((xp_arr, xp_arr), dim="x") assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_indexing(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = np_arr[:, 0] actual = xp_arr[:, 0] assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_properties(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = np_arr.data.nbytes assert np_arr.nbytes == expected assert xp_arr.nbytes == expected def test_reorganizing_operation(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = np_arr.transpose() actual = xp_arr.transpose() assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_stack(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = np_arr.stack(z=("x", "y")) actual = xp_arr.stack(z=("x", "y")) assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_unstack(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: np_arr, xp_arr = arrays expected = np_arr.stack(z=("x", "y")).unstack() actual = xp_arr.stack(z=("x", "y")).unstack() assert isinstance(actual.data, Array) assert_equal(actual, expected) def test_where() -> None: np_arr = xr.DataArray(np.array([1, 0]), dims="x") xp_arr = xr.DataArray(xp.asarray([1, 0]), dims="x") expected = xr.where(np_arr, 1, 0) actual = xr.where(xp_arr, 1, 0) assert isinstance(actual.data, Array) assert_equal(actual, expected) xarray-2025.09.0/xarray/tests/test_assertions.py000066400000000000000000000146501505620616400216250ustar00rootroot00000000000000from __future__ import annotations import warnings import numpy as np import pytest import xarray as xr from xarray.tests import has_dask try: from dask.array import from_array as dask_from_array except ImportError: dask_from_array = lambda x: x # type: ignore[assignment, misc] try: import pint unit_registry = pint.UnitRegistry(force_ndarray_like=True) def quantity(x): return unit_registry.Quantity(x, "m") has_pint = True except ImportError: def quantity(x): return x has_pint = False def test_allclose_regression() -> None: x = xr.DataArray(1.01) y = xr.DataArray(1.02) xr.testing.assert_allclose(x, y, atol=0.01) @pytest.mark.parametrize( "obj1,obj2", ( pytest.param( xr.Variable("x", [1e-17, 2]), xr.Variable("x", [0, 3]), id="Variable" ), pytest.param( xr.DataArray([1e-17, 2], dims="x"), xr.DataArray([0, 3], dims="x"), id="DataArray", ), pytest.param( xr.Dataset({"a": ("x", [1e-17, 2]), "b": ("y", [-2e-18, 2])}), xr.Dataset({"a": ("x", [0, 2]), "b": ("y", [0, 1])}), id="Dataset", ), pytest.param( xr.DataArray(np.array("a", dtype="|S1")), xr.DataArray(np.array("b", dtype="|S1")), id="DataArray_with_character_dtype", ), pytest.param( xr.Coordinates({"x": [1e-17, 2]}), xr.Coordinates({"x": [0, 3]}), id="Coordinates", ), ), ) def test_assert_allclose(obj1, obj2) -> None: with pytest.raises(AssertionError): xr.testing.assert_allclose(obj1, obj2) with pytest.raises(AssertionError): xr.testing.assert_allclose(obj1, obj2, check_dim_order=False) @pytest.mark.parametrize("func", ["assert_equal", "assert_allclose"]) def test_assert_allclose_equal_transpose(func) -> None: """Transposed DataArray raises assertion unless check_dim_order=False.""" obj1 = xr.DataArray([[0, 1, 2], [2, 3, 4]], dims=["a", "b"]) obj2 = xr.DataArray([[0, 2], [1, 3], [2, 4]], dims=["b", "a"]) with pytest.raises(AssertionError): getattr(xr.testing, func)(obj1, obj2) getattr(xr.testing, func)(obj1, obj2, check_dim_order=False) ds1 = obj1.to_dataset(name="varname") ds1["var2"] = obj1 ds2 = obj1.to_dataset(name="varname") ds2["var2"] = obj1.transpose() with pytest.raises(AssertionError): getattr(xr.testing, func)(ds1, ds2) getattr(xr.testing, func)(ds1, ds2, check_dim_order=False) def test_assert_equal_transpose_datatree() -> None: """Ensure `check_dim_order=False` works for transposed DataTree""" ds = xr.Dataset(data_vars={"data": (("x", "y"), [[1, 2]])}) a = xr.DataTree.from_dict({"node": ds}) b = xr.DataTree.from_dict({"node": ds.transpose("y", "x")}) with pytest.raises(AssertionError): xr.testing.assert_equal(a, b) xr.testing.assert_equal(a, b, check_dim_order=False) @pytest.mark.filterwarnings("error") @pytest.mark.parametrize( "duckarray", ( pytest.param(np.array, id="numpy"), pytest.param( dask_from_array, id="dask", marks=pytest.mark.skipif(not has_dask, reason="requires dask"), ), pytest.param( quantity, id="pint", marks=pytest.mark.skipif(not has_pint, reason="requires pint"), ), ), ) @pytest.mark.parametrize( ["obj1", "obj2"], ( pytest.param([1e-10, 2], [0.0, 2.0], id="both arrays"), pytest.param([1e-17, 2], 0.0, id="second scalar"), pytest.param(0.0, [1e-17, 2], id="first scalar"), ), ) def test_assert_duckarray_equal_failing(duckarray, obj1, obj2) -> None: # TODO: actually check the repr a = duckarray(obj1) b = duckarray(obj2) with pytest.raises(AssertionError): xr.testing.assert_duckarray_equal(a, b) @pytest.mark.filterwarnings("error") @pytest.mark.parametrize( "duckarray", ( pytest.param( np.array, id="numpy", ), pytest.param( dask_from_array, id="dask", marks=pytest.mark.skipif(not has_dask, reason="requires dask"), ), pytest.param( quantity, id="pint", marks=pytest.mark.skipif(not has_pint, reason="requires pint"), ), ), ) @pytest.mark.parametrize( ["obj1", "obj2"], ( pytest.param([0, 2], [0.0, 2.0], id="both arrays"), pytest.param([0, 0], 0.0, id="second scalar"), pytest.param(0.0, [0, 0], id="first scalar"), ), ) def test_assert_duckarray_equal(duckarray, obj1, obj2) -> None: a = duckarray(obj1) b = duckarray(obj2) xr.testing.assert_duckarray_equal(a, b) @pytest.mark.parametrize( "func", [ "assert_equal", "assert_identical", "assert_allclose", "assert_duckarray_equal", "assert_duckarray_allclose", ], ) def test_ensure_warnings_not_elevated(func) -> None: # make sure warnings are not elevated to errors in the assertion functions # e.g. by @pytest.mark.filterwarnings("error") # see https://github.com/pydata/xarray/pull/4760#issuecomment-774101639 # define a custom Variable class that raises a warning in assert_* class WarningVariable(xr.Variable): @property # type: ignore[misc] def dims(self): warnings.warn("warning in test", stacklevel=2) return super().dims def __array__( self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None ) -> np.ndarray: warnings.warn("warning in test", stacklevel=2) return super().__array__(dtype, copy=copy) a = WarningVariable("x", [1]) b = WarningVariable("x", [2]) with warnings.catch_warnings(record=True) as w: # elevate warnings to errors warnings.filterwarnings("error") with pytest.raises(AssertionError): getattr(xr.testing, func)(a, b) assert len(w) > 0 # ensure warnings still raise outside of assert_* with pytest.raises(UserWarning): warnings.warn("test", stacklevel=2) # ensure warnings stay ignored in assert_* with warnings.catch_warnings(record=True) as w: # ignore warnings warnings.filterwarnings("ignore") with pytest.raises(AssertionError): getattr(xr.testing, func)(a, b) assert len(w) == 0 xarray-2025.09.0/xarray/tests/test_backends.py000066400000000000000000011251521505620616400212060ustar00rootroot00000000000000from __future__ import annotations import asyncio import contextlib import gzip import itertools import math import os.path import pickle import platform import re import shutil import sys import tempfile import uuid import warnings from collections import ChainMap from collections.abc import Generator, Iterator, Mapping from contextlib import ExitStack from importlib import import_module from io import BytesIO from pathlib import Path from typing import TYPE_CHECKING, Any, Final, Literal, cast from unittest.mock import patch import numpy as np import pandas as pd import pytest from packaging.version import Version from pandas.errors import OutOfBoundsDatetime import xarray as xr import xarray.testing as xrt from xarray import ( DataArray, Dataset, DataTree, backends, load_dataarray, load_dataset, load_datatree, open_dataarray, open_dataset, open_mfdataset, save_mfdataset, ) from xarray.backends.common import robust_getitem from xarray.backends.h5netcdf_ import H5netcdfBackendEntrypoint from xarray.backends.netcdf3 import _nc3_dtype_coercions from xarray.backends.netCDF4_ import ( NetCDF4BackendEntrypoint, _extract_nc4_variable_encoding, ) from xarray.backends.pydap_ import PydapDataStore from xarray.backends.scipy_ import ScipyBackendEntrypoint from xarray.backends.zarr import ZarrStore from xarray.coders import CFDatetimeCoder, CFTimedeltaCoder from xarray.coding.cftime_offsets import date_range from xarray.coding.strings import check_vlen_dtype, create_vlen_dtype from xarray.coding.variables import SerializationWarning from xarray.conventions import encode_dataset_coordinates from xarray.core import indexing from xarray.core.indexes import PandasIndex from xarray.core.options import set_options from xarray.core.types import PDDatetimeUnitOptions from xarray.core.utils import module_available from xarray.namedarray.pycompat import array_type from xarray.structure.alignment import AlignmentError from xarray.tests import ( assert_allclose, assert_array_equal, assert_equal, assert_identical, assert_no_warnings, has_dask, has_h5netcdf_1_4_0_or_above, has_netCDF4, has_numpy_2, has_scipy, has_zarr, has_zarr_v3, has_zarr_v3_async_oindex, has_zarr_v3_dtypes, mock, network, parametrize_zarr_format, requires_cftime, requires_dask, requires_fsspec, requires_h5netcdf, requires_h5netcdf_1_4_0_or_above, requires_h5netcdf_ros3, requires_iris, requires_netcdf, requires_netCDF4, requires_netCDF4_1_6_2_or_above, requires_netCDF4_1_7_0_or_above, requires_pydap, requires_scipy, requires_scipy_or_netCDF4, requires_zarr, requires_zarr_v3, ) from xarray.tests.test_coding_times import ( _ALL_CALENDARS, _NON_STANDARD_CALENDARS, _STANDARD_CALENDARS, ) from xarray.tests.test_dataset import ( create_append_string_length_mismatch_test_data, create_append_test_data, create_test_data, ) with contextlib.suppress(ImportError): import netCDF4 as nc4 try: import dask import dask.array as da except ImportError: pass if has_zarr: import zarr import zarr.codecs if has_zarr_v3: from zarr.storage import MemoryStore as KVStore from zarr.storage import WrapperStore ZARR_FORMATS = [2, 3] else: ZARR_FORMATS = [2] try: from zarr import ( # type: ignore[attr-defined,no-redef,unused-ignore] KVStoreV3 as KVStore, ) except ImportError: KVStore = None # type: ignore[assignment,misc,unused-ignore] WrapperStore = object # type: ignore[assignment,misc,unused-ignore] else: KVStore = None # type: ignore[assignment,misc,unused-ignore] WrapperStore = object # type: ignore[assignment,misc,unused-ignore] ZARR_FORMATS = [] @pytest.fixture(scope="module", params=ZARR_FORMATS) def default_zarr_format(request) -> Generator[None, None]: if has_zarr_v3: with zarr.config.set(default_zarr_format=request.param): yield else: yield def skip_if_zarr_format_3(reason: str): if has_zarr_v3 and zarr.config["default_zarr_format"] == 3: pytest.skip(reason=f"Unsupported with zarr_format=3: {reason}") def skip_if_zarr_format_2(reason: str): if not has_zarr_v3 or (zarr.config["default_zarr_format"] == 2): pytest.skip(reason=f"Unsupported with zarr_format=2: {reason}") ON_WINDOWS = sys.platform == "win32" default_value = object() def _check_compression_codec_available(codec: str | None) -> bool: """Check if a compression codec is available in the netCDF4 library. Parameters ---------- codec : str or None The compression codec name (e.g., 'zstd', 'blosc_lz', etc.) Returns ------- bool True if the codec is available, False otherwise. """ if codec is None or codec in ("zlib", "szip"): # These are standard and should be available return True if not has_netCDF4: return False try: import os import tempfile import netCDF4 # Try to create a file with the compression to test availability with tempfile.NamedTemporaryFile(suffix=".nc", delete=False) as tmp: tmp_path = tmp.name try: nc = netCDF4.Dataset(tmp_path, "w", format="NETCDF4") nc.createDimension("x", 10) # Attempt to create a variable with the compression if codec and codec.startswith("blosc"): nc.createVariable( # type: ignore[call-overload] varname="test", datatype="f4", dimensions=("x",), compression=codec, blosc_shuffle=1, ) else: nc.createVariable( # type: ignore[call-overload] varname="test", datatype="f4", dimensions=("x",), compression=codec ) nc.close() os.unlink(tmp_path) return True except (RuntimeError, netCDF4.NetCDF4MissingFeatureException): # Codec not available if os.path.exists(tmp_path): with contextlib.suppress(OSError): os.unlink(tmp_path) return False except Exception: # Any other error, assume codec is not available return False dask_array_type = array_type("dask") if TYPE_CHECKING: from xarray.backends.api import T_NetcdfEngine, T_NetcdfTypes def open_example_dataset(name, *args, **kwargs) -> Dataset: return open_dataset( os.path.join(os.path.dirname(__file__), "data", name), *args, **kwargs ) def open_example_mfdataset(names, *args, **kwargs) -> Dataset: return open_mfdataset( [os.path.join(os.path.dirname(__file__), "data", name) for name in names], *args, **kwargs, ) def create_masked_and_scaled_data(dtype: np.dtype) -> Dataset: x = np.array([np.nan, np.nan, 10, 10.1, 10.2], dtype=dtype) encoding = { "_FillValue": -1, "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), "dtype": "i2", } return Dataset({"x": ("t", x, {}, encoding)}) def create_encoded_masked_and_scaled_data(dtype: np.dtype) -> Dataset: attributes = { "_FillValue": -1, "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } return Dataset( {"x": ("t", np.array([-1, -1, 0, 1, 2], dtype=np.int16), attributes)} ) def create_unsigned_masked_scaled_data(dtype: np.dtype) -> Dataset: encoding = { "_FillValue": -1, "_Unsigned": "true", "dtype": "i1", "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } x = np.array([10.0, 10.1, 22.7, 22.8, np.nan], dtype=dtype) return Dataset({"x": ("t", x, {}, encoding)}) def create_encoded_unsigned_masked_scaled_data(dtype: np.dtype) -> Dataset: # These are values as written to the file: the _FillValue will # be represented in the signed form. attributes = { "_FillValue": -1, "_Unsigned": "true", "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } # Create unsigned data corresponding to [0, 1, 127, 128, 255] unsigned sb = np.asarray([0, 1, 127, -128, -1], dtype="i1") return Dataset({"x": ("t", sb, attributes)}) def create_bad_unsigned_masked_scaled_data(dtype: np.dtype) -> Dataset: encoding = { "_FillValue": 255, "_Unsigned": True, "dtype": "i1", "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } x = np.array([10.0, 10.1, 22.7, 22.8, np.nan], dtype=dtype) return Dataset({"x": ("t", x, {}, encoding)}) def create_bad_encoded_unsigned_masked_scaled_data(dtype: np.dtype) -> Dataset: # These are values as written to the file: the _FillValue will # be represented in the signed form. attributes = { "_FillValue": -1, "_Unsigned": True, "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } # Create signed data corresponding to [0, 1, 127, 128, 255] unsigned sb = np.asarray([0, 1, 127, -128, -1], dtype="i1") return Dataset({"x": ("t", sb, attributes)}) def create_signed_masked_scaled_data(dtype: np.dtype) -> Dataset: encoding = { "_FillValue": -127, "_Unsigned": "false", "dtype": "i1", "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } x = np.array([-1.0, 10.1, 22.7, np.nan], dtype=dtype) return Dataset({"x": ("t", x, {}, encoding)}) def create_encoded_signed_masked_scaled_data(dtype: np.dtype) -> Dataset: # These are values as written to the file: the _FillValue will # be represented in the signed form. attributes = { "_FillValue": -127, "_Unsigned": "false", "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } # Create signed data corresponding to [0, 1, 127, 128, 255] unsigned sb = np.asarray([-110, 1, 127, -127], dtype="i1") return Dataset({"x": ("t", sb, attributes)}) def create_unsigned_false_masked_scaled_data(dtype: np.dtype) -> Dataset: encoding = { "_FillValue": 255, "_Unsigned": "false", "dtype": "u1", "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } x = np.array([-1.0, 10.1, 22.7, np.nan], dtype=dtype) return Dataset({"x": ("t", x, {}, encoding)}) def create_encoded_unsigned_false_masked_scaled_data(dtype: np.dtype) -> Dataset: # These are values as written to the file: the _FillValue will # be represented in the unsigned form. attributes = { "_FillValue": 255, "_Unsigned": "false", "add_offset": dtype.type(10), "scale_factor": dtype.type(0.1), } # Create unsigned data corresponding to [-110, 1, 127, 255] signed sb = np.asarray([146, 1, 127, 255], dtype="u1") return Dataset({"x": ("t", sb, attributes)}) def create_boolean_data() -> Dataset: attributes = {"units": "-"} return Dataset( { "x": ( ("t", "x"), [[False, True, False, True], [True, False, False, True]], attributes, ) } ) class TestCommon: def test_robust_getitem(self) -> None: class UnreliableArrayFailure(Exception): pass class UnreliableArray: def __init__(self, array, failures=1): self.array = array self.failures = failures def __getitem__(self, key): if self.failures > 0: self.failures -= 1 raise UnreliableArrayFailure return self.array[key] array = UnreliableArray([0]) with pytest.raises(UnreliableArrayFailure): array[0] assert array[0] == 0 actual = robust_getitem(array, 0, catch=UnreliableArrayFailure, initial_delay=0) assert actual == 0 class NetCDF3Only: netcdf3_formats: tuple[T_NetcdfTypes, ...] = ("NETCDF3_CLASSIC", "NETCDF3_64BIT") @pytest.mark.asyncio @pytest.mark.skip(reason="NetCDF backends don't support async loading") async def test_load_async(self) -> None: pass @requires_scipy def test_dtype_coercion_error(self) -> None: """Failing dtype coercion should lead to an error""" for dtype, format in itertools.product( _nc3_dtype_coercions, self.netcdf3_formats ): if dtype == "bool": # coerced upcast (bool to int8) ==> can never fail continue # Using the largest representable value, create some data that will # no longer compare equal after the coerced downcast maxval = np.iinfo(dtype).max x = np.array([0, 1, 2, maxval], dtype=dtype) ds = Dataset({"x": ("t", x, {})}) with create_tmp_file(allow_cleanup_failure=False) as path: with pytest.raises(ValueError, match="could not safely cast"): ds.to_netcdf(path, format=format) class DatasetIOBase: engine: T_NetcdfEngine | None = None file_format: T_NetcdfTypes | None = None def create_store(self): raise NotImplementedError() @contextlib.contextmanager def roundtrip( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): if save_kwargs is None: save_kwargs = {} if open_kwargs is None: open_kwargs = {} with create_tmp_file(allow_cleanup_failure=allow_cleanup_failure) as path: self.save(data, path, **save_kwargs) with self.open(path, **open_kwargs) as ds: yield ds @contextlib.contextmanager def roundtrip_append( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): if save_kwargs is None: save_kwargs = {} if open_kwargs is None: open_kwargs = {} with create_tmp_file(allow_cleanup_failure=allow_cleanup_failure) as path: for i, key in enumerate(data.variables): mode = "a" if i > 0 else "w" self.save(data[[key]], path, mode=mode, **save_kwargs) with self.open(path, **open_kwargs) as ds: yield ds # The save/open methods may be overwritten below def save(self, dataset, path, **kwargs): return dataset.to_netcdf( path, engine=self.engine, format=self.file_format, **kwargs ) @contextlib.contextmanager def open(self, path, **kwargs): with open_dataset(path, engine=self.engine, **kwargs) as ds: yield ds def test_zero_dimensional_variable(self) -> None: expected = create_test_data() expected["float_var"] = ([], 1.0e9, {"units": "units of awesome"}) expected["bytes_var"] = ([], b"foobar") expected["string_var"] = ([], "foobar") with self.roundtrip(expected) as actual: assert_identical(expected, actual) def test_write_store(self) -> None: expected = create_test_data() with self.create_store() as store: expected.dump_to_store(store) # we need to cf decode the store because it has time and # non-dimension coordinates with xr.decode_cf(store) as actual: assert_allclose(expected, actual) def check_dtypes_roundtripped(self, expected, actual): for k in expected.variables: expected_dtype = expected.variables[k].dtype # For NetCDF3, the backend should perform dtype coercion if ( isinstance(self, NetCDF3Only) and str(expected_dtype) in _nc3_dtype_coercions ): expected_dtype = np.dtype(_nc3_dtype_coercions[str(expected_dtype)]) actual_dtype = actual.variables[k].dtype # TODO: check expected behavior for string dtypes more carefully string_kinds = {"O", "S", "U"} assert expected_dtype == actual_dtype or ( expected_dtype.kind in string_kinds and actual_dtype.kind in string_kinds ) def test_roundtrip_test_data(self) -> None: expected = create_test_data() with self.roundtrip(expected) as actual: self.check_dtypes_roundtripped(expected, actual) assert_identical(expected, actual) def test_load(self) -> None: # Note: please keep this in sync with test_load_async below as much as possible! expected = create_test_data() @contextlib.contextmanager def assert_loads(vars=None): if vars is None: vars = expected with self.roundtrip(expected) as actual: for k, v in actual.variables.items(): # IndexVariables are eagerly loaded into memory assert v._in_memory == (k in actual.dims) yield actual for k, v in actual.variables.items(): if k in vars: assert v._in_memory assert_identical(expected, actual) with pytest.raises(AssertionError): # make sure the contextmanager works! with assert_loads() as ds: pass with assert_loads() as ds: ds.load() with assert_loads(["var1", "dim1", "dim2"]) as ds: ds["var1"].load() # verify we can read data even after closing the file with self.roundtrip(expected) as ds: actual = ds.load() assert_identical(expected, actual) @pytest.mark.asyncio async def test_load_async(self) -> None: # Note: please keep this in sync with test_load above as much as possible! # Copied from `test_load` on the base test class, but won't work for netcdf expected = create_test_data() @contextlib.contextmanager def assert_loads(vars=None): if vars is None: vars = expected with self.roundtrip(expected) as actual: for k, v in actual.variables.items(): # IndexVariables are eagerly loaded into memory assert v._in_memory == (k in actual.dims) yield actual for k, v in actual.variables.items(): if k in vars: assert v._in_memory assert_identical(expected, actual) with pytest.raises(AssertionError): # make sure the contextmanager works! with assert_loads() as ds: pass with assert_loads() as ds: await ds.load_async() with assert_loads(["var1", "dim1", "dim2"]) as ds: await ds["var1"].load_async() # verify we can read data even after closing the file with self.roundtrip(expected) as ds: actual = await ds.load_async() assert_identical(expected, actual) def test_dataset_compute(self) -> None: expected = create_test_data() with self.roundtrip(expected) as actual: # Test Dataset.compute() for k, v in actual.variables.items(): # IndexVariables are eagerly cached assert v._in_memory == (k in actual.dims) computed = actual.compute() for k, v in actual.variables.items(): assert v._in_memory == (k in actual.dims) for v in computed.variables.values(): assert v._in_memory assert_identical(expected, actual) assert_identical(expected, computed) def test_pickle(self) -> None: expected = Dataset({"foo": ("x", [42])}) with self.roundtrip(expected, allow_cleanup_failure=ON_WINDOWS) as roundtripped: with roundtripped: # Windows doesn't like reopening an already open file raw_pickle = pickle.dumps(roundtripped) with pickle.loads(raw_pickle) as unpickled_ds: assert_identical(expected, unpickled_ds) @pytest.mark.filterwarnings("ignore:deallocating CachingFileManager") def test_pickle_dataarray(self) -> None: expected = Dataset({"foo": ("x", [42])}) with self.roundtrip(expected, allow_cleanup_failure=ON_WINDOWS) as roundtripped: with roundtripped: raw_pickle = pickle.dumps(roundtripped["foo"]) # TODO: figure out how to explicitly close the file for the # unpickled DataArray? unpickled = pickle.loads(raw_pickle) assert_identical(expected["foo"], unpickled) def test_dataset_caching(self) -> None: expected = Dataset({"foo": ("x", [5, 6, 7])}) with self.roundtrip(expected) as actual: assert isinstance(actual.foo.variable._data, indexing.MemoryCachedArray) assert not actual.foo.variable._in_memory _ = actual.foo.values # cache assert actual.foo.variable._in_memory with self.roundtrip(expected, open_kwargs={"cache": False}) as actual: assert isinstance(actual.foo.variable._data, indexing.CopyOnWriteArray) assert not actual.foo.variable._in_memory _ = actual.foo.values # no caching assert not actual.foo.variable._in_memory @pytest.mark.filterwarnings("ignore:deallocating CachingFileManager") def test_roundtrip_None_variable(self) -> None: expected = Dataset({None: (("x", "y"), [[0, 1], [2, 3]])}) with self.roundtrip(expected) as actual: assert_identical(expected, actual) def test_roundtrip_object_dtype(self) -> None: floats = np.array([0.0, 0.0, 1.0, 2.0, 3.0], dtype=object) floats_nans = np.array([np.nan, np.nan, 1.0, 2.0, 3.0], dtype=object) bytes_ = np.array([b"ab", b"cdef", b"g"], dtype=object) bytes_nans = np.array([b"ab", b"cdef", np.nan], dtype=object) strings = np.array(["ab", "cdef", "g"], dtype=object) strings_nans = np.array(["ab", "cdef", np.nan], dtype=object) all_nans = np.array([np.nan, np.nan], dtype=object) original = Dataset( { "floats": ("a", floats), "floats_nans": ("a", floats_nans), "bytes": ("b", bytes_), "bytes_nans": ("b", bytes_nans), "strings": ("b", strings), "strings_nans": ("b", strings_nans), "all_nans": ("c", all_nans), "nan": ([], np.nan), } ) expected = original.copy(deep=True) with self.roundtrip(original) as actual: try: assert_identical(expected, actual) except AssertionError: # Most stores use '' for nans in strings, but some don't. # First try the ideal case (where the store returns exactly) # the original Dataset), then try a more realistic case. # This currently includes all netCDF files when encoding is not # explicitly set. # https://github.com/pydata/xarray/issues/1647 # Also Zarr expected["bytes_nans"][-1] = b"" expected["strings_nans"][-1] = "" assert_identical(expected, actual) def test_roundtrip_string_data(self) -> None: expected = Dataset({"x": ("t", ["ab", "cdef"])}) with self.roundtrip(expected) as actual: assert_identical(expected, actual) def test_roundtrip_string_encoded_characters(self) -> None: expected = Dataset({"x": ("t", ["ab", "cdef"])}) expected["x"].encoding["dtype"] = "S1" with self.roundtrip(expected) as actual: assert_identical(expected, actual) assert actual["x"].encoding["_Encoding"] == "utf-8" expected["x"].encoding["_Encoding"] = "ascii" with self.roundtrip(expected) as actual: assert_identical(expected, actual) assert actual["x"].encoding["_Encoding"] == "ascii" def test_roundtrip_numpy_datetime_data(self) -> None: times = pd.to_datetime(["2000-01-01", "2000-01-02", "NaT"], unit="ns") expected = Dataset({"t": ("t", times), "t0": times[0]}) kwargs = {"encoding": {"t0": {"units": "days since 1950-01-01"}}} with self.roundtrip(expected, save_kwargs=kwargs) as actual: assert_identical(expected, actual) assert actual.t0.encoding["units"] == "days since 1950-01-01" @requires_cftime def test_roundtrip_cftime_datetime_data(self) -> None: from xarray.tests.test_coding_times import _all_cftime_date_types date_types = _all_cftime_date_types() for date_type in date_types.values(): times = [date_type(1, 1, 1), date_type(1, 1, 2)] expected = Dataset({"t": ("t", times), "t0": times[0]}) kwargs = {"encoding": {"t0": {"units": "days since 0001-01-01"}}} expected_decoded_t = np.array(times) expected_decoded_t0 = np.array([date_type(1, 1, 1)]) expected_calendar = times[0].calendar with warnings.catch_warnings(): if expected_calendar in {"proleptic_gregorian", "standard"}: warnings.filterwarnings("ignore", "Unable to decode time axis") with self.roundtrip(expected, save_kwargs=kwargs) as actual: # proleptic gregorian will be decoded into numpy datetime64 # fixing to expectations if actual.t.dtype.kind == "M": dtype = actual.t.dtype expected_decoded_t = expected_decoded_t.astype(dtype) expected_decoded_t0 = expected_decoded_t0.astype(dtype) assert_array_equal(actual.t.values, expected_decoded_t) assert ( actual.t.encoding["units"] == "days since 0001-01-01 00:00:00.000000" ) assert actual.t.encoding["calendar"] == expected_calendar assert_array_equal(actual.t0.values, expected_decoded_t0) assert actual.t0.encoding["units"] == "days since 0001-01-01" assert actual.t.encoding["calendar"] == expected_calendar def test_roundtrip_timedelta_data(self) -> None: # todo: suggestion from review: # roundtrip large microsecond or coarser resolution timedeltas, # though we cannot test that until we fix the timedelta decoding # to support large ranges time_deltas = pd.to_timedelta(["1h", "2h", "NaT"]).as_unit("s") # type: ignore[arg-type, unused-ignore] encoding = {"units": "seconds"} expected = Dataset({"td": ("td", time_deltas), "td0": time_deltas[0]}) expected["td"].encoding = encoding expected["td0"].encoding = encoding with self.roundtrip( expected, open_kwargs={"decode_timedelta": CFTimedeltaCoder(time_unit="ns")} ) as actual: assert_identical(expected, actual) def test_roundtrip_timedelta_data_via_dtype( self, time_unit: PDDatetimeUnitOptions ) -> None: time_deltas = pd.to_timedelta(["1h", "2h", "NaT"]).as_unit(time_unit) # type: ignore[arg-type, unused-ignore] expected = Dataset( {"td": ("td", time_deltas), "td0": time_deltas[0].to_numpy()} ) with self.roundtrip(expected) as actual: assert_identical(expected, actual) def test_roundtrip_float64_data(self) -> None: expected = Dataset({"x": ("y", np.array([1.0, 2.0, np.pi], dtype="float64"))}) with self.roundtrip(expected) as actual: assert_identical(expected, actual) @requires_netcdf def test_roundtrip_example_1_netcdf(self) -> None: with open_example_dataset("example_1.nc") as expected: with self.roundtrip(expected) as actual: # we allow the attributes to differ since that # will depend on the encoding used. For example, # without CF encoding 'actual' will end up with # a dtype attribute. assert_equal(expected, actual) def test_roundtrip_coordinates(self) -> None: original = Dataset( {"foo": ("x", [0, 1])}, {"x": [2, 3], "y": ("a", [42]), "z": ("x", [4, 5])} ) with self.roundtrip(original) as actual: assert_identical(original, actual) original["foo"].encoding["coordinates"] = "y" with self.roundtrip(original, open_kwargs={"decode_coords": False}) as expected: # check roundtripping when decode_coords=False with self.roundtrip( expected, open_kwargs={"decode_coords": False} ) as actual: assert_identical(expected, actual) def test_roundtrip_global_coordinates(self) -> None: original = Dataset( {"foo": ("x", [0, 1])}, {"x": [2, 3], "y": ("a", [42]), "z": ("x", [4, 5])} ) with self.roundtrip(original) as actual: assert_identical(original, actual) # test that global "coordinates" is as expected _, attrs = encode_dataset_coordinates(original) assert attrs["coordinates"] == "y" # test warning when global "coordinates" is already set original.attrs["coordinates"] = "foo" with pytest.warns(SerializationWarning): _, attrs = encode_dataset_coordinates(original) assert attrs["coordinates"] == "foo" def test_roundtrip_coordinates_with_space(self) -> None: original = Dataset(coords={"x": 0, "y z": 1}) expected = Dataset({"y z": 1}, {"x": 0}) with pytest.warns(SerializationWarning): with self.roundtrip(original) as actual: assert_identical(expected, actual) def test_roundtrip_boolean_dtype(self) -> None: original = create_boolean_data() assert original["x"].dtype == "bool" with self.roundtrip(original) as actual: assert_identical(original, actual) assert actual["x"].dtype == "bool" # this checks for preserving dtype during second roundtrip # see https://github.com/pydata/xarray/issues/7652#issuecomment-1476956975 with self.roundtrip(actual) as actual2: assert_identical(original, actual2) assert actual2["x"].dtype == "bool" with self.roundtrip(actual) as actual3: # GH10536 assert_identical(original.transpose(), actual3.transpose()) def test_orthogonal_indexing(self) -> None: in_memory = create_test_data() with self.roundtrip(in_memory) as on_disk: indexers = {"dim1": [1, 2, 0], "dim2": [3, 2, 0, 3], "dim3": np.arange(5)} expected = in_memory.isel(indexers) actual = on_disk.isel(**indexers) # make sure the array is not yet loaded into memory assert not actual["var1"].variable._in_memory assert_identical(expected, actual) # do it twice, to make sure we're switched from orthogonal -> numpy # when we cached the values actual = on_disk.isel(**indexers) assert_identical(expected, actual) def test_vectorized_indexing(self) -> None: in_memory = create_test_data() with self.roundtrip(in_memory) as on_disk: indexers = { "dim1": DataArray([0, 2, 0], dims="a"), "dim2": DataArray([0, 2, 3], dims="a"), } expected = in_memory.isel(indexers) actual = on_disk.isel(**indexers) # make sure the array is not yet loaded into memory assert not actual["var1"].variable._in_memory assert_identical(expected, actual.load()) # do it twice, to make sure we're switched from # vectorized -> numpy when we cached the values actual = on_disk.isel(**indexers) assert_identical(expected, actual) def multiple_indexing(indexers): # make sure a sequence of lazy indexings certainly works. with self.roundtrip(in_memory) as on_disk: actual = on_disk["var3"] expected = in_memory["var3"] for ind in indexers: actual = actual.isel(ind) expected = expected.isel(ind) # make sure the array is not yet loaded into memory assert not actual.variable._in_memory assert_identical(expected, actual.load()) # two-staged vectorized-indexing indexers2 = [ { "dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]), "dim3": DataArray([[0, 4], [1, 3], [2, 2]], dims=["a", "b"]), }, {"a": DataArray([0, 1], dims=["c"]), "b": DataArray([0, 1], dims=["c"])}, ] multiple_indexing(indexers2) # vectorized-slice mixed indexers3 = [ { "dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]), "dim3": slice(None, 10), } ] multiple_indexing(indexers3) # vectorized-integer mixed indexers4 = [ {"dim3": 0}, {"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"])}, {"a": slice(None, None, 2)}, ] multiple_indexing(indexers4) # vectorized-integer mixed indexers5 = [ {"dim3": 0}, {"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"])}, {"a": 1, "b": 0}, ] multiple_indexing(indexers5) def test_vectorized_indexing_negative_step(self) -> None: # use dask explicitly when present open_kwargs: dict[str, Any] | None if has_dask: open_kwargs = {"chunks": {}} else: open_kwargs = None in_memory = create_test_data() def multiple_indexing(indexers): # make sure a sequence of lazy indexings certainly works. with self.roundtrip(in_memory, open_kwargs=open_kwargs) as on_disk: actual = on_disk["var3"] expected = in_memory["var3"] for ind in indexers: actual = actual.isel(ind) expected = expected.isel(ind) # make sure the array is not yet loaded into memory assert not actual.variable._in_memory assert_identical(expected, actual.load()) # with negative step slice. indexers = [ { "dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]), "dim3": slice(-1, 1, -1), } ] multiple_indexing(indexers) # with negative step slice. indexers = [ { "dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]), "dim3": slice(-1, 1, -2), } ] multiple_indexing(indexers) def test_outer_indexing_reversed(self) -> None: # regression test for GH6560 ds = xr.Dataset( {"z": (("t", "p", "y", "x"), np.ones((1, 1, 31, 40)))}, ) with self.roundtrip(ds) as on_disk: subset = on_disk.isel(t=[0], p=0).z[:, ::10, ::10][:, ::-1, :] assert subset.sizes == subset.load().sizes def test_isel_dataarray(self) -> None: # Make sure isel works lazily. GH:issue:1688 in_memory = create_test_data() with self.roundtrip(in_memory) as on_disk: expected = in_memory.isel(dim2=in_memory["dim2"] < 3) actual = on_disk.isel(dim2=on_disk["dim2"] < 3) assert_identical(expected, actual) def validate_array_type(self, ds): # Make sure that only NumpyIndexingAdapter stores a bare np.ndarray. def find_and_validate_array(obj): # recursively called function. obj: array or array wrapper. if hasattr(obj, "array"): if isinstance(obj.array, indexing.ExplicitlyIndexed): find_and_validate_array(obj.array) elif isinstance(obj.array, np.ndarray): assert isinstance(obj, indexing.NumpyIndexingAdapter) elif isinstance(obj.array, dask_array_type): assert isinstance(obj, indexing.DaskIndexingAdapter) elif isinstance(obj.array, pd.Index): assert isinstance(obj, indexing.PandasIndexingAdapter) else: raise TypeError(f"{type(obj.array)} is wrapped by {type(obj)}") for v in ds.variables.values(): find_and_validate_array(v._data) def test_array_type_after_indexing(self) -> None: in_memory = create_test_data() with self.roundtrip(in_memory) as on_disk: self.validate_array_type(on_disk) indexers = {"dim1": [1, 2, 0], "dim2": [3, 2, 0, 3], "dim3": np.arange(5)} expected = in_memory.isel(indexers) actual = on_disk.isel(**indexers) assert_identical(expected, actual) self.validate_array_type(actual) # do it twice, to make sure we're switched from orthogonal -> numpy # when we cached the values actual = on_disk.isel(**indexers) assert_identical(expected, actual) self.validate_array_type(actual) def test_dropna(self) -> None: # regression test for GH:issue:1694 a = np.random.randn(4, 3) a[1, 1] = np.nan in_memory = xr.Dataset( {"a": (("y", "x"), a)}, coords={"y": np.arange(4), "x": np.arange(3)} ) assert_identical( in_memory.dropna(dim="x"), in_memory.isel(x=slice(None, None, 2)) ) with self.roundtrip(in_memory) as on_disk: self.validate_array_type(on_disk) expected = in_memory.dropna(dim="x") actual = on_disk.dropna(dim="x") assert_identical(expected, actual) def test_ondisk_after_print(self) -> None: """Make sure print does not load file into memory""" in_memory = create_test_data() with self.roundtrip(in_memory) as on_disk: repr(on_disk) assert not on_disk["var1"]._in_memory class CFEncodedBase(DatasetIOBase): def test_roundtrip_bytes_with_fill_value(self) -> None: values = np.array([b"ab", b"cdef", np.nan], dtype=object) encoding = {"_FillValue": b"X", "dtype": "S1"} original = Dataset({"x": ("t", values, {}, encoding)}) expected = original.copy(deep=True) with self.roundtrip(original) as actual: assert_identical(expected, actual) original = Dataset({"x": ("t", values, {}, {"_FillValue": b""})}) with self.roundtrip(original) as actual: assert_identical(expected, actual) def test_roundtrip_string_with_fill_value_nchar(self) -> None: values = np.array(["ab", "cdef", np.nan], dtype=object) expected = Dataset({"x": ("t", values)}) encoding = {"dtype": "S1", "_FillValue": b"X"} original = Dataset({"x": ("t", values, {}, encoding)}) # Not supported yet. with pytest.raises(NotImplementedError): with self.roundtrip(original) as actual: assert_identical(expected, actual) def test_roundtrip_empty_vlen_string_array(self) -> None: # checks preserving vlen dtype for empty arrays GH7862 dtype = create_vlen_dtype(str) original = Dataset({"a": np.array([], dtype=dtype)}) assert check_vlen_dtype(original["a"].dtype) is str with self.roundtrip(original) as actual: assert_identical(original, actual) if np.issubdtype(actual["a"].dtype, object): # only check metadata for capable backends # eg. NETCDF3 based backends do not roundtrip metadata if actual["a"].dtype.metadata is not None: assert check_vlen_dtype(actual["a"].dtype) is str else: # zarr v3 sends back " None: if hasattr(self, "zarr_version") and dtype == np.float32: pytest.skip("float32 will be treated as float64 in zarr") decoded = decoded_fn(dtype) encoded = encoded_fn(dtype) if decoded["x"].encoding["dtype"] == "u1" and not ( (self.engine == "netcdf4" and self.file_format is None) or self.file_format == "NETCDF4" ): pytest.skip("uint8 data can't be written to non-NetCDF4 data") with self.roundtrip(decoded) as actual: for k in decoded.variables: assert decoded.variables[k].dtype == actual.variables[k].dtype # CF _FillValue is always on-disk type assert ( decoded.variables[k].encoding["_FillValue"] == actual.variables[k].encoding["_FillValue"] ) assert_allclose(decoded, actual, decode_bytes=False) with self.roundtrip(decoded, open_kwargs=dict(decode_cf=False)) as actual: # TODO: this assumes that all roundtrips will first # encode. Is that something we want to test for? for k in encoded.variables: assert encoded.variables[k].dtype == actual.variables[k].dtype # CF _FillValue is always on-disk type assert ( decoded.variables[k].encoding["_FillValue"] == actual.variables[k].attrs["_FillValue"] ) assert_allclose(encoded, actual, decode_bytes=False) with self.roundtrip(encoded, open_kwargs=dict(decode_cf=False)) as actual: for k in encoded.variables: assert encoded.variables[k].dtype == actual.variables[k].dtype # CF _FillValue is always on-disk type assert ( encoded.variables[k].attrs["_FillValue"] == actual.variables[k].attrs["_FillValue"] ) assert_allclose(encoded, actual, decode_bytes=False) # make sure roundtrip encoding didn't change the # original dataset. assert_allclose(encoded, encoded_fn(dtype), decode_bytes=False) with self.roundtrip(encoded) as actual: for k in decoded.variables: assert decoded.variables[k].dtype == actual.variables[k].dtype assert_allclose(decoded, actual, decode_bytes=False) @pytest.mark.parametrize( ("fill_value", "exp_fill_warning"), [ (np.int8(-1), False), (np.uint8(255), True), (-1, False), (255, True), ], ) def test_roundtrip_unsigned(self, fill_value, exp_fill_warning): @contextlib.contextmanager def _roundtrip_with_warnings(*args, **kwargs): is_np2 = module_available("numpy", minversion="2.0.0.dev0") if exp_fill_warning and is_np2: warn_checker: contextlib.AbstractContextManager = pytest.warns( SerializationWarning, match="_FillValue attribute can't be represented", ) else: warn_checker = contextlib.nullcontext() with warn_checker: with self.roundtrip(*args, **kwargs) as actual: yield actual # regression/numpy2 test for encoding = { "_FillValue": fill_value, "_Unsigned": "true", "dtype": "i1", } x = np.array([0, 1, 127, 128, 254, np.nan], dtype=np.float32) decoded = Dataset({"x": ("t", x, {}, encoding)}) attributes = { "_FillValue": fill_value, "_Unsigned": "true", } # Create unsigned data corresponding to [0, 1, 127, 128, 255] unsigned sb = np.asarray([0, 1, 127, -128, -2, -1], dtype="i1") encoded = Dataset({"x": ("t", sb, attributes)}) unsigned_dtype = np.dtype(f"u{sb.dtype.itemsize}") with _roundtrip_with_warnings(decoded) as actual: for k in decoded.variables: assert decoded.variables[k].dtype == actual.variables[k].dtype exp_fv = decoded.variables[k].encoding["_FillValue"] if exp_fill_warning: exp_fv = np.array(exp_fv, dtype=unsigned_dtype).view(sb.dtype) assert exp_fv == actual.variables[k].encoding["_FillValue"] assert_allclose(decoded, actual, decode_bytes=False) with _roundtrip_with_warnings( decoded, open_kwargs=dict(decode_cf=False) ) as actual: for k in encoded.variables: assert encoded.variables[k].dtype == actual.variables[k].dtype exp_fv = encoded.variables[k].attrs["_FillValue"] if exp_fill_warning: exp_fv = np.array(exp_fv, dtype=unsigned_dtype).view(sb.dtype) assert exp_fv == actual.variables[k].attrs["_FillValue"] assert_allclose(encoded, actual, decode_bytes=False) @staticmethod def _create_cf_dataset(): original = Dataset( dict( variable=( ("ln_p", "latitude", "longitude"), np.arange(8, dtype="f4").reshape(2, 2, 2), {"ancillary_variables": "std_devs det_lim"}, ), std_devs=( ("ln_p", "latitude", "longitude"), np.arange(0.1, 0.9, 0.1).reshape(2, 2, 2), {"standard_name": "standard_error"}, ), det_lim=( (), 0.1, {"standard_name": "detection_minimum"}, ), ), dict( latitude=("latitude", [0, 1], {"units": "degrees_north"}), longitude=("longitude", [0, 1], {"units": "degrees_east"}), latlon=((), -1, {"grid_mapping_name": "latitude_longitude"}), latitude_bnds=(("latitude", "bnds2"), [[0, 1], [1, 2]]), longitude_bnds=(("longitude", "bnds2"), [[0, 1], [1, 2]]), areas=( ("latitude", "longitude"), [[1, 1], [1, 1]], {"units": "degree^2"}, ), ln_p=( "ln_p", [1.0, 0.5], { "standard_name": "atmosphere_ln_pressure_coordinate", "computed_standard_name": "air_pressure", }, ), P0=((), 1013.25, {"units": "hPa"}), ), ) original["variable"].encoding.update( {"cell_measures": "area: areas", "grid_mapping": "latlon"}, ) original.coords["latitude"].encoding.update( dict(grid_mapping="latlon", bounds="latitude_bnds") ) original.coords["longitude"].encoding.update( dict(grid_mapping="latlon", bounds="longitude_bnds") ) original.coords["ln_p"].encoding.update({"formula_terms": "p0: P0 lev : ln_p"}) return original def test_grid_mapping_and_bounds_are_not_coordinates_in_file(self) -> None: original = self._create_cf_dataset() with self.roundtrip(original, open_kwargs={"decode_coords": False}) as ds: assert ds.coords["latitude"].attrs["bounds"] == "latitude_bnds" assert ds.coords["longitude"].attrs["bounds"] == "longitude_bnds" assert "coordinates" not in ds["variable"].attrs assert "coordinates" not in ds.attrs def test_coordinate_variables_after_dataset_roundtrip(self) -> None: original = self._create_cf_dataset() with self.roundtrip(original, open_kwargs={"decode_coords": "all"}) as actual: assert_identical(actual, original) with self.roundtrip(original) as actual: expected = original.reset_coords( ["latitude_bnds", "longitude_bnds", "areas", "P0", "latlon"] ) # equal checks that coords and data_vars are equal which # should be enough # identical would require resetting a number of attributes # skip that. assert_equal(actual, expected) def test_grid_mapping_and_bounds_are_coordinates_after_dataarray_roundtrip( self, ) -> None: original = self._create_cf_dataset() # The DataArray roundtrip should have the same warnings as the # Dataset, but we already tested for those, so just go for the # new warnings. It would appear that there is no way to tell # pytest "This warning and also this warning should both be # present". # xarray/tests/test_conventions.py::TestCFEncodedDataStore # needs the to_dataset. The other backends should be fine # without it. with pytest.warns( UserWarning, match=( r"Variable\(s\) referenced in bounds not in variables: " r"\['l(at|ong)itude_bnds'\]" ), ): with self.roundtrip( original["variable"].to_dataset(), open_kwargs={"decode_coords": "all"} ) as actual: assert_identical(actual, original["variable"].to_dataset()) @requires_iris @requires_netcdf def test_coordinate_variables_after_iris_roundtrip(self) -> None: original = self._create_cf_dataset() iris_cube = original["variable"].to_iris() actual = DataArray.from_iris(iris_cube) # Bounds will be missing (xfail) del original.coords["latitude_bnds"], original.coords["longitude_bnds"] # Ancillary vars will be missing # Those are data_vars, and will be dropped when grabbing the variable assert_identical(actual, original["variable"]) def test_coordinates_encoding(self) -> None: def equals_latlon(obj): return obj in {"lat lon", "lon lat"} original = Dataset( {"temp": ("x", [0, 1]), "precip": ("x", [0, -1])}, {"lat": ("x", [2, 3]), "lon": ("x", [4, 5])}, ) with self.roundtrip(original) as actual: assert_identical(actual, original) with self.roundtrip(original, open_kwargs=dict(decode_coords=False)) as ds: assert equals_latlon(ds["temp"].attrs["coordinates"]) assert equals_latlon(ds["precip"].attrs["coordinates"]) assert "coordinates" not in ds.attrs assert "coordinates" not in ds["lat"].attrs assert "coordinates" not in ds["lon"].attrs modified = original.drop_vars(["temp", "precip"]) with self.roundtrip(modified) as actual: assert_identical(actual, modified) with self.roundtrip(modified, open_kwargs=dict(decode_coords=False)) as ds: assert equals_latlon(ds.attrs["coordinates"]) assert "coordinates" not in ds["lat"].attrs assert "coordinates" not in ds["lon"].attrs original["temp"].encoding["coordinates"] = "lat" with self.roundtrip(original) as actual: assert_identical(actual, original) original["precip"].encoding["coordinates"] = "lat" with self.roundtrip(original, open_kwargs=dict(decode_coords=True)) as ds: assert "lon" not in ds["temp"].encoding["coordinates"] assert "lon" not in ds["precip"].encoding["coordinates"] assert "coordinates" not in ds["lat"].encoding assert "coordinates" not in ds["lon"].encoding def test_roundtrip_endian(self) -> None: skip_if_zarr_format_3("zarr v3 has not implemented endian support yet") ds = Dataset( { "x": np.arange(3, 10, dtype=">i2"), "y": np.arange(3, 20, dtype=" None: te = (TypeError, "string or None") ve = (ValueError, "string must be length 1 or") data = np.random.random((2, 2)) da = xr.DataArray(data) for name, (error, msg) in zip( [0, (4, 5), True, ""], [te, te, te, ve], strict=True ): ds = Dataset({name: da}) with pytest.raises(error) as excinfo: with self.roundtrip(ds): pass excinfo.match(msg) excinfo.match(repr(name)) def test_encoding_kwarg(self) -> None: ds = Dataset({"x": ("y", np.arange(10.0))}) kwargs: dict[str, Any] = dict(encoding={"x": {"dtype": "f4"}}) with self.roundtrip(ds, save_kwargs=kwargs) as actual: encoded_dtype = actual.x.encoding["dtype"] # On OS X, dtype sometimes switches endianness for unclear reasons assert encoded_dtype.kind == "f" and encoded_dtype.itemsize == 4 assert ds.x.encoding == {} kwargs = dict(encoding={"x": {"foo": "bar"}}) with pytest.raises(ValueError, match=r"unexpected encoding"): with self.roundtrip(ds, save_kwargs=kwargs) as actual: pass kwargs = dict(encoding={"x": "foo"}) with pytest.raises(ValueError, match=r"must be castable"): with self.roundtrip(ds, save_kwargs=kwargs) as actual: pass kwargs = dict(encoding={"invalid": {}}) with pytest.raises(KeyError): with self.roundtrip(ds, save_kwargs=kwargs) as actual: pass def test_encoding_unlimited_dims(self) -> None: if isinstance(self, ZarrBase): pytest.skip("No unlimited_dims handled in zarr.") ds = Dataset({"x": ("y", np.arange(10.0))}) with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["y"])) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) # Regression test for https://github.com/pydata/xarray/issues/2134 with self.roundtrip(ds, save_kwargs=dict(unlimited_dims="y")) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) ds.encoding = {"unlimited_dims": ["y"]} with self.roundtrip(ds) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) # Regression test for https://github.com/pydata/xarray/issues/2134 ds.encoding = {"unlimited_dims": "y"} with self.roundtrip(ds) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) # test unlimited_dims validation # https://github.com/pydata/xarray/issues/10549 ds.encoding = {"unlimited_dims": "z"} with pytest.warns( UserWarning, match=r"Unlimited dimension\(s\) .* declared in 'dataset.encoding'", ): with self.roundtrip(ds) as _: pass ds.encoding = {} with pytest.raises( ValueError, match=r"Unlimited dimension\(s\) .* declared in 'unlimited_dims-kwarg'", ): with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["z"])) as _: pass def test_encoding_kwarg_dates(self) -> None: ds = Dataset({"t": pd.date_range("2000-01-01", periods=3)}) units = "days since 1900-01-01" kwargs = dict(encoding={"t": {"units": units}}) with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert actual.t.encoding["units"] == units assert_identical(actual, ds) def test_encoding_kwarg_fixed_width_string(self) -> None: # regression test for GH2149 for strings in [[b"foo", b"bar", b"baz"], ["foo", "bar", "baz"]]: ds = Dataset({"x": strings}) kwargs = dict(encoding={"x": {"dtype": "S1"}}) with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert actual["x"].encoding["dtype"] == "S1" assert_identical(actual, ds) def test_default_fill_value(self) -> None: # Test default encoding for float: ds = Dataset({"x": ("y", np.arange(10.0))}) kwargs = dict(encoding={"x": {"dtype": "f4"}}) with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert math.isnan(actual.x.encoding["_FillValue"]) assert ds.x.encoding == {} # Test default encoding for int: ds = Dataset({"x": ("y", np.arange(10.0))}) kwargs = dict(encoding={"x": {"dtype": "int16"}}) with warnings.catch_warnings(): warnings.filterwarnings("ignore", ".*floating point data as an integer") with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert "_FillValue" not in actual.x.encoding assert ds.x.encoding == {} # Test default encoding for implicit int: ds = Dataset({"x": ("y", np.arange(10, dtype="int16"))}) with self.roundtrip(ds) as actual: assert "_FillValue" not in actual.x.encoding assert ds.x.encoding == {} def test_explicitly_omit_fill_value(self) -> None: ds = Dataset({"x": ("y", [np.pi, -np.pi])}) ds.x.encoding["_FillValue"] = None with self.roundtrip(ds) as actual: assert "_FillValue" not in actual.x.encoding def test_explicitly_omit_fill_value_via_encoding_kwarg(self) -> None: ds = Dataset({"x": ("y", [np.pi, -np.pi])}) kwargs = dict(encoding={"x": {"_FillValue": None}}) # _FillValue is not a valid encoding for Zarr with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert "_FillValue" not in actual.x.encoding assert ds.y.encoding == {} def test_explicitly_omit_fill_value_in_coord(self) -> None: ds = Dataset({"x": ("y", [np.pi, -np.pi])}, coords={"y": [0.0, 1.0]}) ds.y.encoding["_FillValue"] = None with self.roundtrip(ds) as actual: assert "_FillValue" not in actual.y.encoding def test_explicitly_omit_fill_value_in_coord_via_encoding_kwarg(self) -> None: ds = Dataset({"x": ("y", [np.pi, -np.pi])}, coords={"y": [0.0, 1.0]}) kwargs = dict(encoding={"y": {"_FillValue": None}}) with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert "_FillValue" not in actual.y.encoding assert ds.y.encoding == {} def test_encoding_same_dtype(self) -> None: ds = Dataset({"x": ("y", np.arange(10.0, dtype="f4"))}) kwargs = dict(encoding={"x": {"dtype": "f4"}}) with self.roundtrip(ds, save_kwargs=kwargs) as actual: encoded_dtype = actual.x.encoding["dtype"] # On OS X, dtype sometimes switches endianness for unclear reasons assert encoded_dtype.kind == "f" and encoded_dtype.itemsize == 4 assert ds.x.encoding == {} def test_append_write(self) -> None: # regression for GH1215 data = create_test_data() with self.roundtrip_append(data) as actual: assert_identical(data, actual) def test_append_overwrite_values(self) -> None: # regression for GH1215 data = create_test_data() with create_tmp_file(allow_cleanup_failure=False) as tmp_file: self.save(data, tmp_file, mode="w") data["var2"][:] = -999 data["var9"] = data["var2"] * 3 self.save(data[["var2", "var9"]], tmp_file, mode="a") with self.open(tmp_file) as actual: assert_identical(data, actual) def test_append_with_invalid_dim_raises(self) -> None: data = create_test_data() with create_tmp_file(allow_cleanup_failure=False) as tmp_file: self.save(data, tmp_file, mode="w") data["var9"] = data["var2"] * 3 data = data.isel(dim1=slice(2, 6)) # modify one dimension with pytest.raises( ValueError, match=r"Unable to update size for existing dimension" ): self.save(data, tmp_file, mode="a") def test_multiindex_not_implemented(self) -> None: ds = Dataset(coords={"y": ("x", [1, 2]), "z": ("x", ["a", "b"])}).set_index( x=["y", "z"] ) with pytest.raises(NotImplementedError, match=r"MultiIndex"): with self.roundtrip(ds): pass # regression GH8628 (can serialize reset multi-index level coordinates) ds_reset = ds.reset_index("x") with self.roundtrip(ds_reset) as actual: assert_identical(actual, ds_reset) @requires_dask def test_string_object_warning(self) -> None: original = Dataset( { "x": ( [ "y", ], np.array(["foo", "bar"], dtype=object), ) } ).chunk() with pytest.warns(SerializationWarning, match="dask array with dtype=object"): with self.roundtrip(original) as actual: assert_identical(original, actual) @pytest.mark.parametrize( "indexer", ( {"y": [1]}, {"y": slice(2)}, {"y": 1}, {"x": [1], "y": [1]}, {"x": ("x0", [0, 1]), "y": ("x0", [0, 1])}, ), ) def test_indexing_roundtrip(self, indexer) -> None: # regression test for GH8909 ds = xr.Dataset() ds["A"] = xr.DataArray([[1, "a"], [2, "b"]], dims=["x", "y"]) with self.roundtrip(ds) as ds2: expected = ds2.sel(indexer) with self.roundtrip(expected) as actual: assert_identical(actual, expected) class NetCDFBase(CFEncodedBase): """Tests for all netCDF3 and netCDF4 backends.""" @pytest.mark.asyncio @pytest.mark.skip(reason="NetCDF backends don't support async loading") async def test_load_async(self) -> None: await super().test_load_async() @pytest.mark.skipif( ON_WINDOWS, reason="Windows does not allow modifying open files" ) def test_refresh_from_disk(self) -> None: # regression test for https://github.com/pydata/xarray/issues/4862 with create_tmp_file() as example_1_path: with create_tmp_file() as example_1_modified_path: with open_example_dataset("example_1.nc") as example_1: self.save(example_1, example_1_path) example_1.rh.values += 100 self.save(example_1, example_1_modified_path) a = open_dataset(example_1_path, engine=self.engine).load() # Simulate external process modifying example_1.nc while this script is running shutil.copy(example_1_modified_path, example_1_path) # Reopen example_1.nc (modified) as `b`; note that `a` has NOT been closed b = open_dataset(example_1_path, engine=self.engine).load() try: assert not np.array_equal(a.rh.values, b.rh.values) finally: a.close() b.close() def test_byte_attrs(self, byte_attrs_dataset: dict[str, Any]) -> None: # test for issue #9407 input = byte_attrs_dataset["input"] expected = byte_attrs_dataset["expected"] with self.roundtrip(input) as actual: assert_identical(actual, expected) _counter = itertools.count() @contextlib.contextmanager def create_tmp_file( suffix: str = ".nc", allow_cleanup_failure: bool = False ) -> Iterator[str]: temp_dir = tempfile.mkdtemp() path = os.path.join(temp_dir, f"temp-{next(_counter)}{suffix}") try: yield path finally: try: shutil.rmtree(temp_dir) except OSError: if not allow_cleanup_failure: raise @contextlib.contextmanager def create_tmp_files( nfiles: int, suffix: str = ".nc", allow_cleanup_failure: bool = False ) -> Iterator[list[str]]: with ExitStack() as stack: files = [ stack.enter_context(create_tmp_file(suffix, allow_cleanup_failure)) for _ in range(nfiles) ] yield files class NetCDF4Base(NetCDFBase): """Tests for both netCDF4-python and h5netcdf.""" engine: T_NetcdfEngine = "netcdf4" def test_open_group(self) -> None: # Create a netCDF file with a dataset stored within a group with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, "w") as rootgrp: foogrp = rootgrp.createGroup("foo") ds = foogrp ds.createDimension("time", size=10) x = np.arange(10) ds.createVariable("x", np.int32, dimensions=("time",)) ds.variables["x"][:] = x expected = Dataset() expected["x"] = ("time", x) # check equivalent ways to specify group for group in "foo", "/foo", "foo/", "/foo/": with self.open(tmp_file, group=group) as actual: assert_equal(actual["x"], expected["x"]) # check that missing group raises appropriate exception with pytest.raises(OSError): open_dataset(tmp_file, group="bar") with pytest.raises(ValueError, match=r"must be a string"): open_dataset(tmp_file, group=(1, 2, 3)) def test_open_subgroup(self) -> None: # Create a netCDF file with a dataset stored within a group within a # group with create_tmp_file() as tmp_file: rootgrp = nc4.Dataset(tmp_file, "w") foogrp = rootgrp.createGroup("foo") bargrp = foogrp.createGroup("bar") ds = bargrp ds.createDimension("time", size=10) x = np.arange(10) ds.createVariable("x", np.int32, dimensions=("time",)) ds.variables["x"][:] = x rootgrp.close() expected = Dataset() expected["x"] = ("time", x) # check equivalent ways to specify group for group in "foo/bar", "/foo/bar", "foo/bar/", "/foo/bar/": with self.open(tmp_file, group=group) as actual: assert_equal(actual["x"], expected["x"]) def test_write_groups(self) -> None: data1 = create_test_data() data2 = data1 * 2 with create_tmp_file() as tmp_file: self.save(data1, tmp_file, group="data/1") self.save(data2, tmp_file, group="data/2", mode="a") with self.open(tmp_file, group="data/1") as actual1: assert_identical(data1, actual1) with self.open(tmp_file, group="data/2") as actual2: assert_identical(data2, actual2) def test_child_group_with_inconsistent_dimensions(self) -> None: base = Dataset(coords={"x": [1, 2]}) child = Dataset(coords={"x": [1, 2, 3]}) with create_tmp_file() as tmp_file: self.save(base, tmp_file) self.save(child, tmp_file, group="child", mode="a") with self.open(tmp_file) as actual_base: assert_identical(base, actual_base) with self.open(tmp_file, group="child") as actual_child: assert_identical(child, actual_child) @pytest.mark.parametrize( "input_strings, is_bytes", [ ([b"foo", b"bar", b"baz"], True), (["foo", "bar", "baz"], False), (["foΓ³", "bΓ‘r", "baΕΊ"], False), ], ) def test_encoding_kwarg_vlen_string( self, input_strings: list[str], is_bytes: bool ) -> None: original = Dataset({"x": input_strings}) expected_string = ["foo", "bar", "baz"] if is_bytes else input_strings expected = Dataset({"x": expected_string}) kwargs = dict(encoding={"x": {"dtype": str}}) with self.roundtrip(original, save_kwargs=kwargs) as actual: assert actual["x"].encoding["dtype"] == "=U3" assert actual["x"].dtype == "=U3" assert_identical(actual, expected) @pytest.mark.parametrize("fill_value", ["XXX", "", "bΓ‘r"]) def test_roundtrip_string_with_fill_value_vlen(self, fill_value: str) -> None: values = np.array(["ab", "cdef", np.nan], dtype=object) expected = Dataset({"x": ("t", values)}) original = Dataset({"x": ("t", values, {}, {"_FillValue": fill_value})}) with self.roundtrip(original) as actual: assert_identical(expected, actual) original = Dataset({"x": ("t", values, {}, {"_FillValue": ""})}) with self.roundtrip(original) as actual: assert_identical(expected, actual) def test_roundtrip_character_array(self) -> None: with create_tmp_file() as tmp_file: values = np.array([["a", "b", "c"], ["d", "e", "f"]], dtype="S") with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("x", 2) nc.createDimension("string3", 3) v = nc.createVariable("x", np.dtype("S1"), ("x", "string3")) v[:] = values values = np.array(["abc", "def"], dtype="S") expected = Dataset({"x": ("x", values)}) with open_dataset(tmp_file) as actual: assert_identical(expected, actual) # regression test for #157 with self.roundtrip(actual) as roundtripped: assert_identical(expected, roundtripped) def test_default_to_char_arrays(self) -> None: data = Dataset({"x": np.array(["foo", "zzzz"], dtype="S")}) with self.roundtrip(data) as actual: assert_identical(data, actual) assert actual["x"].dtype == np.dtype("S4") def test_open_encodings(self) -> None: # Create a netCDF file with explicit time units # and make sure it makes it into the encodings # and survives a round trip with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, "w") as ds: ds.createDimension("time", size=10) ds.createVariable("time", np.int32, dimensions=("time",)) units = "days since 1999-01-01" ds.variables["time"].setncattr("units", units) ds.variables["time"][:] = np.arange(10) + 4 expected = Dataset() time = pd.date_range("1999-01-05", periods=10, unit="ns") encoding = {"units": units, "dtype": np.dtype("int32")} expected["time"] = ("time", time, {}, encoding) with open_dataset(tmp_file) as actual: assert_equal(actual["time"], expected["time"]) actual_encoding = { k: v for k, v in actual["time"].encoding.items() if k in expected["time"].encoding } assert actual_encoding == expected["time"].encoding def test_dump_encodings(self) -> None: # regression test for #709 ds = Dataset({"x": ("y", np.arange(10.0))}) kwargs = dict(encoding={"x": {"zlib": True}}) with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert actual.x.encoding["zlib"] def test_dump_and_open_encodings(self) -> None: # Create a netCDF file with explicit time units # and make sure it makes it into the encodings # and survives a round trip with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, "w") as ds: ds.createDimension("time", size=10) ds.createVariable("time", np.int32, dimensions=("time",)) units = "days since 1999-01-01" ds.variables["time"].setncattr("units", units) ds.variables["time"][:] = np.arange(10) + 4 with open_dataset(tmp_file) as xarray_dataset: with create_tmp_file() as tmp_file2: xarray_dataset.to_netcdf(tmp_file2) with nc4.Dataset(tmp_file2, "r") as ds: assert ds.variables["time"].getncattr("units") == units assert_array_equal(ds.variables["time"], np.arange(10) + 4) def test_compression_encoding_legacy(self) -> None: data = create_test_data() data["var2"].encoding.update( { "zlib": True, "chunksizes": (5, 5), "fletcher32": True, "shuffle": True, "original_shape": data.var2.shape, } ) with self.roundtrip(data) as actual: for k, v in data["var2"].encoding.items(): assert v == actual["var2"].encoding[k] # regression test for #156 expected = data.isel(dim1=0) with self.roundtrip(expected) as actual: assert_equal(expected, actual) def test_encoding_kwarg_compression(self) -> None: ds = Dataset({"x": np.arange(10.0)}) encoding = dict( dtype="f4", zlib=True, complevel=9, fletcher32=True, chunksizes=(5,), shuffle=True, ) kwargs = dict(encoding=dict(x=encoding)) with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert_equal(actual, ds) assert actual.x.encoding["dtype"] == "f4" assert actual.x.encoding["zlib"] assert actual.x.encoding["complevel"] == 9 assert actual.x.encoding["fletcher32"] assert actual.x.encoding["chunksizes"] == (5,) assert actual.x.encoding["shuffle"] assert ds.x.encoding == {} def test_keep_chunksizes_if_no_original_shape(self) -> None: ds = Dataset({"x": [1, 2, 3]}) chunksizes = (2,) ds.variables["x"].encoding = {"chunksizes": chunksizes} with self.roundtrip(ds) as actual: assert_identical(ds, actual) assert_array_equal( ds["x"].encoding["chunksizes"], actual["x"].encoding["chunksizes"] ) def test_preferred_chunks_is_present(self) -> None: ds = Dataset({"x": [1, 2, 3]}) chunksizes = (2,) ds.variables["x"].encoding = {"chunksizes": chunksizes} with self.roundtrip(ds) as actual: assert actual["x"].encoding["preferred_chunks"] == {"x": 2} @requires_dask def test_auto_chunking_is_based_on_disk_chunk_sizes(self) -> None: x_size = y_size = 1000 y_chunksize = y_size x_chunksize = 10 with dask.config.set({"array.chunk-size": "100KiB"}): with self.chunked_roundtrip( (1, y_size, x_size), (1, y_chunksize, x_chunksize), open_kwargs={"chunks": "auto"}, ) as ds: t_chunks, y_chunks, x_chunks = ds["image"].data.chunks assert all(np.asanyarray(y_chunks) == y_chunksize) # Check that the chunk size is a multiple of the file chunk size assert all(np.asanyarray(x_chunks) % x_chunksize == 0) @requires_dask def test_base_chunking_uses_disk_chunk_sizes(self) -> None: x_size = y_size = 1000 y_chunksize = y_size x_chunksize = 10 with self.chunked_roundtrip( (1, y_size, x_size), (1, y_chunksize, x_chunksize), open_kwargs={"chunks": {}}, ) as ds: for chunksizes, expected in zip( ds["image"].data.chunks, (1, y_chunksize, x_chunksize), strict=True ): assert all(np.asanyarray(chunksizes) == expected) @contextlib.contextmanager def chunked_roundtrip( self, array_shape: tuple[int, int, int], chunk_sizes: tuple[int, int, int], open_kwargs: dict[str, Any] | None = None, ) -> Generator[Dataset, None, None]: t_size, y_size, x_size = array_shape t_chunksize, y_chunksize, x_chunksize = chunk_sizes image = xr.DataArray( np.arange(t_size * x_size * y_size, dtype=np.int16).reshape( (t_size, y_size, x_size) ), dims=["t", "y", "x"], ) image.encoding = {"chunksizes": (t_chunksize, y_chunksize, x_chunksize)} dataset = xr.Dataset(dict(image=image)) with self.roundtrip(dataset, open_kwargs=open_kwargs) as ds: yield ds def test_preferred_chunks_are_disk_chunk_sizes(self) -> None: x_size = y_size = 1000 y_chunksize = y_size x_chunksize = 10 with self.chunked_roundtrip( (1, y_size, x_size), (1, y_chunksize, x_chunksize) ) as ds: assert ds["image"].encoding["preferred_chunks"] == { "t": 1, "y": y_chunksize, "x": x_chunksize, } def test_encoding_chunksizes_unlimited(self) -> None: # regression test for GH1225 ds = Dataset({"x": [1, 2, 3], "y": ("x", [2, 3, 4])}) ds.variables["x"].encoding = { "zlib": False, "shuffle": False, "complevel": 0, "fletcher32": False, "contiguous": False, "chunksizes": (2**20,), "original_shape": (3,), } with self.roundtrip(ds) as actual: assert_equal(ds, actual) def test_mask_and_scale(self) -> None: with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("t", 5) nc.createVariable("x", "int16", ("t",), fill_value=-1) v = nc.variables["x"] v.set_auto_maskandscale(False) v.add_offset = 10 v.scale_factor = 0.1 v[:] = np.array([-1, -1, 0, 1, 2]) dtype = type(v.scale_factor) # first make sure netCDF4 reads the masked and scaled data # correctly with nc4.Dataset(tmp_file, mode="r") as nc: expected = np.ma.array( [-1, -1, 10, 10.1, 10.2], mask=[True, True, False, False, False] ) actual = nc.variables["x"][:] assert_array_equal(expected, actual) # now check xarray with open_dataset(tmp_file) as ds: expected = create_masked_and_scaled_data(np.dtype(dtype)) assert_identical(expected, ds) def test_0dimensional_variable(self) -> None: # This fix verifies our work-around to this netCDF4-python bug: # https://github.com/Unidata/netcdf4-python/pull/220 with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode="w") as nc: v = nc.createVariable("x", "int16") v[...] = 123 with open_dataset(tmp_file) as ds: expected = Dataset({"x": ((), 123)}) assert_identical(expected, ds) def test_read_variable_len_strings(self) -> None: with create_tmp_file() as tmp_file: values = np.array(["foo", "bar", "baz"], dtype=object) with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("x", 3) v = nc.createVariable("x", str, ("x",)) v[:] = values expected = Dataset({"x": ("x", values)}) for kwargs in [{}, {"decode_cf": True}]: with open_dataset(tmp_file, **cast(dict, kwargs)) as actual: assert_identical(expected, actual) def test_raise_on_forward_slashes_in_names(self) -> None: # test for forward slash in variable names and dimensions # see GH 7943 data_vars: list[dict[str, Any]] = [ {"PASS/FAIL": (["PASSFAIL"], np.array([0]))}, {"PASS/FAIL": np.array([0])}, {"PASSFAIL": (["PASS/FAIL"], np.array([0]))}, ] for dv in data_vars: ds = Dataset(data_vars=dv) with pytest.raises(ValueError, match="Forward slashes '/' are not allowed"): with self.roundtrip(ds): pass @requires_netCDF4 def test_encoding_enum__no_fill_value(self, recwarn): with create_tmp_file() as tmp_file: cloud_type_dict = {"clear": 0, "cloudy": 1} with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("time", size=2) cloud_type = nc.createEnumType(np.uint8, "cloud_type", cloud_type_dict) v = nc.createVariable( "clouds", cloud_type, "time", fill_value=None, ) v[:] = 1 with open_dataset(tmp_file) as original: save_kwargs = {} # We don't expect any errors. # This is effectively a void context manager expected_warnings = 0 if self.engine == "h5netcdf": if not has_h5netcdf_1_4_0_or_above: save_kwargs["invalid_netcdf"] = True expected_warnings = 1 expected_msg = "You are writing invalid netcdf features to file" else: expected_warnings = 1 expected_msg = "Creating variable with default fill_value 0 which IS defined in enum type" with self.roundtrip(original, save_kwargs=save_kwargs) as actual: assert len(recwarn) == expected_warnings if expected_warnings: assert issubclass(recwarn[0].category, UserWarning) assert str(recwarn[0].message).startswith(expected_msg) assert_equal(original, actual) assert ( actual.clouds.encoding["dtype"].metadata["enum"] == cloud_type_dict ) if not ( self.engine == "h5netcdf" and not has_h5netcdf_1_4_0_or_above ): # not implemented in h5netcdf yet assert ( actual.clouds.encoding["dtype"].metadata["enum_name"] == "cloud_type" ) @requires_netCDF4 def test_encoding_enum__multiple_variable_with_enum(self): with create_tmp_file() as tmp_file: cloud_type_dict = {"clear": 0, "cloudy": 1, "missing": 255} with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("time", size=2) cloud_type = nc.createEnumType(np.uint8, "cloud_type", cloud_type_dict) nc.createVariable( "clouds", cloud_type, "time", fill_value=255, ) nc.createVariable( "tifa", cloud_type, "time", fill_value=255, ) with open_dataset(tmp_file) as original: save_kwargs = {} if self.engine == "h5netcdf" and not has_h5netcdf_1_4_0_or_above: save_kwargs["invalid_netcdf"] = True with self.roundtrip(original, save_kwargs=save_kwargs) as actual: assert_equal(original, actual) assert ( actual.clouds.encoding["dtype"] == actual.tifa.encoding["dtype"] ) assert ( actual.clouds.encoding["dtype"].metadata == actual.tifa.encoding["dtype"].metadata ) assert ( actual.clouds.encoding["dtype"].metadata["enum"] == cloud_type_dict ) if not ( self.engine == "h5netcdf" and not has_h5netcdf_1_4_0_or_above ): # not implemented in h5netcdf yet assert ( actual.clouds.encoding["dtype"].metadata["enum_name"] == "cloud_type" ) @requires_netCDF4 def test_encoding_enum__error_multiple_variable_with_changing_enum(self): """ Given 2 variables, if they share the same enum type, the 2 enum definition should be identical. """ with create_tmp_file() as tmp_file: cloud_type_dict = {"clear": 0, "cloudy": 1, "missing": 255} with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("time", size=2) cloud_type = nc.createEnumType(np.uint8, "cloud_type", cloud_type_dict) nc.createVariable( "clouds", cloud_type, "time", fill_value=255, ) nc.createVariable( "tifa", cloud_type, "time", fill_value=255, ) with open_dataset(tmp_file) as original: assert ( original.clouds.encoding["dtype"].metadata == original.tifa.encoding["dtype"].metadata ) modified_enum = original.clouds.encoding["dtype"].metadata["enum"] modified_enum.update({"neblig": 2}) original.clouds.encoding["dtype"] = np.dtype( "u1", metadata={"enum": modified_enum, "enum_name": "cloud_type"}, ) if not (self.engine == "h5netcdf" and not has_h5netcdf_1_4_0_or_above): # not implemented yet in h5netcdf with pytest.raises( ValueError, match=( "Cannot save variable .*" " because an enum `cloud_type` already exists in the Dataset .*" ), ): with self.roundtrip(original): pass @pytest.mark.parametrize("create_default_indexes", [True, False]) def test_create_default_indexes(self, tmp_path, create_default_indexes) -> None: store_path = tmp_path / "tmp.nc" original_ds = xr.Dataset( {"data": ("x", np.arange(3))}, coords={"x": [-1, 0, 1]} ) original_ds.to_netcdf(store_path, engine=self.engine, mode="w") with open_dataset( store_path, engine=self.engine, create_default_indexes=create_default_indexes, ) as loaded_ds: if create_default_indexes: assert list(loaded_ds.xindexes) == ["x"] and isinstance( loaded_ds.xindexes["x"], PandasIndex ) else: assert len(loaded_ds.xindexes) == 0 @requires_netCDF4 class TestNetCDF4Data(NetCDF4Base): @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: with backends.NetCDF4DataStore.open(tmp_file, mode="w") as store: yield store def test_variable_order(self) -> None: # doesn't work with scipy or h5py :( ds = Dataset() ds["a"] = 1 ds["z"] = 2 ds["b"] = 3 ds.coords["c"] = 4 with self.roundtrip(ds) as actual: assert list(ds.variables) == list(actual.variables) def test_unsorted_index_raises(self) -> None: # should be fixed in netcdf4 v1.2.1 random_data = np.random.random(size=(4, 6)) dim0 = [0, 1, 2, 3] dim1 = [0, 2, 1, 3, 5, 4] # We will sort this in a later step da = xr.DataArray( data=random_data, dims=("dim0", "dim1"), coords={"dim0": dim0, "dim1": dim1}, name="randovar", ) ds = da.to_dataset() with self.roundtrip(ds) as ondisk: inds = np.argsort(dim1) ds2 = ondisk.isel(dim1=inds) # Older versions of NetCDF4 raise an exception here, and if so we # want to ensure we improve (that is, replace) the error message try: _ = ds2.randovar.values except IndexError as err: assert "first by calling .load" in str(err) def test_setncattr_string(self) -> None: list_of_strings = ["list", "of", "strings"] one_element_list_of_strings = ["one element"] one_string = "one string" attrs = { "foo": list_of_strings, "bar": one_element_list_of_strings, "baz": one_string, } ds = Dataset({"x": ("y", [1, 2, 3], attrs)}, attrs=attrs) with self.roundtrip(ds) as actual: for totest in [actual, actual["x"]]: assert_array_equal(list_of_strings, totest.attrs["foo"]) assert_array_equal(one_element_list_of_strings, totest.attrs["bar"]) assert one_string == totest.attrs["baz"] @pytest.mark.parametrize( "compression", [ None, "zlib", "szip", pytest.param( "zstd", marks=pytest.mark.xfail( not _check_compression_codec_available("zstd"), reason="zstd codec not available in netCDF4 installation", ), ), pytest.param( "blosc_lz", marks=pytest.mark.xfail( not _check_compression_codec_available("blosc_lz"), reason="blosc_lz codec not available in netCDF4 installation", ), ), pytest.param( "blosc_lz4", marks=pytest.mark.xfail( not _check_compression_codec_available("blosc_lz4"), reason="blosc_lz4 codec not available in netCDF4 installation", ), ), pytest.param( "blosc_lz4hc", marks=pytest.mark.xfail( not _check_compression_codec_available("blosc_lz4hc"), reason="blosc_lz4hc codec not available in netCDF4 installation", ), ), pytest.param( "blosc_zlib", marks=pytest.mark.xfail( not _check_compression_codec_available("blosc_zlib"), reason="blosc_zlib codec not available in netCDF4 installation", ), ), pytest.param( "blosc_zstd", marks=pytest.mark.xfail( not _check_compression_codec_available("blosc_zstd"), reason="blosc_zstd codec not available in netCDF4 installation", ), ), ], ) @requires_netCDF4_1_6_2_or_above @pytest.mark.xfail(ON_WINDOWS, reason="new compression not yet implemented") def test_compression_encoding(self, compression: str | None) -> None: data = create_test_data(dim_sizes=(20, 80, 10)) encoding_params: dict[str, Any] = dict(compression=compression, blosc_shuffle=1) data["var2"].encoding.update(encoding_params) data["var2"].encoding.update( { "chunksizes": (20, 40), "original_shape": data.var2.shape, "blosc_shuffle": 1, "fletcher32": False, } ) with self.roundtrip(data) as actual: expected_encoding = data["var2"].encoding.copy() # compression does not appear in the retrieved encoding, that differs # from the input encoding. shuffle also chantges. Here we modify the # expected encoding to account for this compression = expected_encoding.pop("compression") blosc_shuffle = expected_encoding.pop("blosc_shuffle") if compression is not None: if "blosc" in compression and blosc_shuffle: expected_encoding["blosc"] = { "compressor": compression, "shuffle": blosc_shuffle, } expected_encoding["shuffle"] = False elif compression == "szip": expected_encoding["szip"] = { "coding": "nn", "pixels_per_block": 8, } expected_encoding["shuffle"] = False else: # This will set a key like zlib=true which is what appears in # the encoding when we read it. expected_encoding[compression] = True if compression == "zstd": expected_encoding["shuffle"] = False else: expected_encoding["shuffle"] = False actual_encoding = actual["var2"].encoding assert expected_encoding.items() <= actual_encoding.items() if ( encoding_params["compression"] is not None and "blosc" not in encoding_params["compression"] ): # regression test for #156 expected = data.isel(dim1=0) with self.roundtrip(expected) as actual: assert_equal(expected, actual) @pytest.mark.skip(reason="https://github.com/Unidata/netcdf4-python/issues/1195") def test_refresh_from_disk(self) -> None: super().test_refresh_from_disk() @requires_netCDF4_1_7_0_or_above def test_roundtrip_complex(self): expected = Dataset({"x": ("y", np.ones(5) + 1j * np.ones(5))}) skwargs = dict(auto_complex=True) okwargs = dict(auto_complex=True) with self.roundtrip( expected, save_kwargs=skwargs, open_kwargs=okwargs ) as actual: assert_equal(expected, actual) @requires_netCDF4 class TestNetCDF4AlreadyOpen: def test_base_case(self) -> None: with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode="w") as nc: v = nc.createVariable("x", "int") v[...] = 42 nc = nc4.Dataset(tmp_file, mode="r") store = backends.NetCDF4DataStore(nc) with open_dataset(store) as ds: expected = Dataset({"x": ((), 42)}) assert_identical(expected, ds) def test_group(self) -> None: with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode="w") as nc: group = nc.createGroup("g") v = group.createVariable("x", "int") v[...] = 42 nc = nc4.Dataset(tmp_file, mode="r") store = backends.NetCDF4DataStore(nc.groups["g"]) with open_dataset(store) as ds: expected = Dataset({"x": ((), 42)}) assert_identical(expected, ds) nc = nc4.Dataset(tmp_file, mode="r") store = backends.NetCDF4DataStore(nc, group="g") with open_dataset(store) as ds: expected = Dataset({"x": ((), 42)}) assert_identical(expected, ds) with nc4.Dataset(tmp_file, mode="r") as nc: with pytest.raises(ValueError, match="must supply a root"): backends.NetCDF4DataStore(nc.groups["g"], group="g") def test_deepcopy(self) -> None: # regression test for https://github.com/pydata/xarray/issues/4425 with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("x", 10) v = nc.createVariable("y", np.int32, ("x",)) v[:] = np.arange(10) h5 = nc4.Dataset(tmp_file, mode="r") store = backends.NetCDF4DataStore(h5) with open_dataset(store) as ds: copied = ds.copy(deep=True) expected = Dataset({"y": ("x", np.arange(10))}) assert_identical(expected, copied) @requires_netCDF4 @requires_dask @pytest.mark.filterwarnings("ignore:deallocating CachingFileManager") class TestNetCDF4ViaDaskData(TestNetCDF4Data): @contextlib.contextmanager def roundtrip( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): if open_kwargs is None: open_kwargs = {} if save_kwargs is None: save_kwargs = {} open_kwargs.setdefault("chunks", -1) with TestNetCDF4Data.roundtrip( self, data, save_kwargs, open_kwargs, allow_cleanup_failure ) as ds: yield ds def test_unsorted_index_raises(self) -> None: # Skip when using dask because dask rewrites indexers to getitem, # dask first pulls items by block. pass @pytest.mark.skip(reason="caching behavior differs for dask") def test_dataset_caching(self) -> None: pass def test_write_inconsistent_chunks(self) -> None: # Construct two variables with the same dimensions, but different # chunk sizes. x = da.zeros((100, 100), dtype="f4", chunks=(50, 100)) x = DataArray(data=x, dims=("lat", "lon"), name="x") x.encoding["chunksizes"] = (50, 100) x.encoding["original_shape"] = (100, 100) y = da.ones((100, 100), dtype="f4", chunks=(100, 50)) y = DataArray(data=y, dims=("lat", "lon"), name="y") y.encoding["chunksizes"] = (100, 50) y.encoding["original_shape"] = (100, 100) # Put them both into the same dataset ds = Dataset({"x": x, "y": y}) with self.roundtrip(ds) as actual: assert actual["x"].encoding["chunksizes"] == (50, 100) assert actual["y"].encoding["chunksizes"] == (100, 50) # Flaky test. Very open to contributions on fixing this @pytest.mark.flaky def test_roundtrip_coordinates(self) -> None: super().test_roundtrip_coordinates() @requires_cftime def test_roundtrip_cftime_bnds(self): # Regression test for issue #7794 import cftime original = xr.Dataset( { "foo": ("time", [0.0]), "time_bnds": ( ("time", "bnds"), [ [ cftime.Datetime360Day(2005, 12, 1, 0, 0, 0, 0), cftime.Datetime360Day(2005, 12, 2, 0, 0, 0, 0), ] ], ), }, {"time": [cftime.Datetime360Day(2005, 12, 1, 12, 0, 0, 0)]}, ) with create_tmp_file() as tmp_file: original.to_netcdf(tmp_file) with open_dataset(tmp_file) as actual: # Operation to load actual time_bnds into memory assert_array_equal(actual.time_bnds.values, original.time_bnds.values) chunked = actual.chunk(time=1) with create_tmp_file() as tmp_file_chunked: chunked.to_netcdf(tmp_file_chunked) @requires_zarr @pytest.mark.usefixtures("default_zarr_format") class ZarrBase(CFEncodedBase): DIMENSION_KEY = "_ARRAY_DIMENSIONS" zarr_version = 2 version_kwargs: dict[str, Any] = {} def create_zarr_target(self): raise NotImplementedError @contextlib.contextmanager def create_store(self, cache_members: bool = False): with self.create_zarr_target() as store_target: yield backends.ZarrStore.open_group( store_target, mode="w", cache_members=cache_members, **self.version_kwargs, ) def save(self, dataset, store_target, **kwargs): # type: ignore[override] return dataset.to_zarr(store=store_target, **kwargs, **self.version_kwargs) @contextlib.contextmanager def open(self, path, **kwargs): with xr.open_dataset( path, engine="zarr", mode="r", **kwargs, **self.version_kwargs ) as ds: yield ds @contextlib.contextmanager def roundtrip( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): if save_kwargs is None: save_kwargs = {} if open_kwargs is None: open_kwargs = {} with self.create_zarr_target() as store_target: self.save(data, store_target, **save_kwargs) with self.open(store_target, **open_kwargs) as ds: yield ds @pytest.mark.asyncio @pytest.mark.skipif( not has_zarr_v3, reason="zarr-python <3 did not support async loading", ) async def test_load_async(self) -> None: await super().test_load_async() def test_roundtrip_bytes_with_fill_value(self): pytest.xfail("Broken by Zarr 3.0.7") @pytest.mark.parametrize("consolidated", [False, True, None]) def test_roundtrip_consolidated(self, consolidated) -> None: expected = create_test_data() with self.roundtrip( expected, save_kwargs={"consolidated": consolidated}, open_kwargs={"backend_kwargs": {"consolidated": consolidated}}, ) as actual: self.check_dtypes_roundtripped(expected, actual) assert_identical(expected, actual) def test_read_non_consolidated_warning(self) -> None: expected = create_test_data() with self.create_zarr_target() as store: self.save( expected, store_target=store, consolidated=False, **self.version_kwargs ) if getattr(store, "supports_consolidated_metadata", True): with pytest.warns( RuntimeWarning, match="Failed to open Zarr store with consolidated", ): with xr.open_zarr(store, **self.version_kwargs) as ds: assert_identical(ds, expected) def test_non_existent_store(self) -> None: with pytest.raises( FileNotFoundError, match="(No such file or directory|Unable to find group|No group found in store|does not exist)", ): xr.open_zarr(f"{uuid.uuid4()}") @pytest.mark.skipif(has_zarr_v3, reason="chunk_store not implemented in zarr v3") def test_with_chunkstore(self) -> None: expected = create_test_data() with ( self.create_zarr_target() as store_target, self.create_zarr_target() as chunk_store, ): save_kwargs = {"chunk_store": chunk_store} self.save(expected, store_target, **save_kwargs) # the chunk store must have been populated with some entries assert len(chunk_store) > 0 open_kwargs = {"backend_kwargs": {"chunk_store": chunk_store}} with self.open(store_target, **open_kwargs) as ds: assert_equal(ds, expected) @requires_dask def test_auto_chunk(self) -> None: original = create_test_data().chunk() with self.roundtrip(original, open_kwargs={"chunks": None}) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) # there should be no chunks assert v.chunks is None with self.roundtrip(original, open_kwargs={"chunks": {}}) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) # chunk size should be the same as original assert v.chunks == original[k].chunks @requires_dask @pytest.mark.filterwarnings("ignore:The specified chunks separate:UserWarning") def test_manual_chunk(self) -> None: original = create_test_data().chunk({"dim1": 3, "dim2": 4, "dim3": 3}) # Using chunks = None should return non-chunked arrays open_kwargs: dict[str, Any] = {"chunks": None} with self.roundtrip(original, open_kwargs=open_kwargs) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) # there should be no chunks assert v.chunks is None # uniform arrays for i in range(2, 6): rechunked = original.chunk(chunks=i) open_kwargs = {"chunks": i} with self.roundtrip(original, open_kwargs=open_kwargs) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) # chunk size should be the same as rechunked assert v.chunks == rechunked[k].chunks chunks = {"dim1": 2, "dim2": 3, "dim3": 5} rechunked = original.chunk(chunks=chunks) open_kwargs = { "chunks": chunks, "backend_kwargs": {"overwrite_encoded_chunks": True}, } with self.roundtrip(original, open_kwargs=open_kwargs) as actual: for k, v in actual.variables.items(): assert v.chunks == rechunked[k].chunks with self.roundtrip(actual) as auto: # encoding should have changed for k, v in actual.variables.items(): assert v.chunks == rechunked[k].chunks assert_identical(actual, auto) assert_identical(actual.load(), auto.load()) def test_unlimited_dims_encoding_is_ignored(self) -> None: ds = Dataset({"x": np.arange(10)}) ds.encoding = {"unlimited_dims": ["x"]} with self.roundtrip(ds) as actual: assert_identical(ds, actual) @requires_dask @pytest.mark.filterwarnings("ignore:.*does not have a Zarr V3 specification.*") def test_warning_on_bad_chunks(self) -> None: original = create_test_data().chunk({"dim1": 4, "dim2": 3, "dim3": 3}) bad_chunks = (2, {"dim2": (3, 3, 2, 1)}) for chunks in bad_chunks: kwargs = {"chunks": chunks} with pytest.warns(UserWarning): with self.roundtrip(original, open_kwargs=kwargs) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) good_chunks: tuple[dict[str, Any], ...] = ({"dim2": 3}, {"dim3": (6, 4)}, {}) for chunks in good_chunks: kwargs = {"chunks": chunks} with assert_no_warnings(): with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=".*Zarr format 3 specification.*", category=UserWarning, ) with self.roundtrip(original, open_kwargs=kwargs) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) @requires_dask def test_deprecate_auto_chunk(self) -> None: original = create_test_data().chunk() with pytest.raises(TypeError): with self.roundtrip(original, open_kwargs={"auto_chunk": True}) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) # chunk size should be the same as original assert v.chunks == original[k].chunks with pytest.raises(TypeError): with self.roundtrip(original, open_kwargs={"auto_chunk": False}) as actual: for k, v in actual.variables.items(): # only index variables should be in memory assert v._in_memory == (k in actual.dims) # there should be no chunks assert v.chunks is None @requires_dask def test_write_uneven_dask_chunks(self) -> None: # regression for GH#2225 original = create_test_data().chunk({"dim1": 3, "dim2": 4, "dim3": 3}) with self.roundtrip(original, open_kwargs={"chunks": {}}) as actual: for k, v in actual.data_vars.items(): assert v.chunks == actual[k].chunks def test_chunk_encoding(self) -> None: # These datasets have no dask chunks. All chunking specified in # encoding data = create_test_data() chunks = (5, 5) data["var2"].encoding.update({"chunks": chunks}) with self.roundtrip(data) as actual: assert chunks == actual["var2"].encoding["chunks"] # expect an error with non-integer chunks data["var2"].encoding.update({"chunks": (5, 4.5)}) with pytest.raises(TypeError): with self.roundtrip(data) as actual: pass def test_shard_encoding(self) -> None: # These datasets have no dask chunks. All chunking/sharding specified in # encoding if has_zarr_v3 and zarr.config.config["default_zarr_format"] == 3: data = create_test_data() chunks = (1, 1) shards = (5, 5) data["var2"].encoding.update({"chunks": chunks}) data["var2"].encoding.update({"shards": shards}) with self.roundtrip(data) as actual: assert shards == actual["var2"].encoding["shards"] # expect an error with shards not divisible by chunks data["var2"].encoding.update({"chunks": (2, 2)}) with pytest.raises(ValueError): with self.roundtrip(data) as actual: pass @requires_dask @pytest.mark.skipif( ON_WINDOWS, reason="Very flaky on Windows CI. Can re-enable assuming it starts consistently passing.", ) def test_chunk_encoding_with_dask(self) -> None: # These datasets DO have dask chunks. Need to check for various # interactions between dask and zarr chunks ds = xr.DataArray((np.arange(12)), dims="x", name="var1").to_dataset() # - no encoding specified - # zarr automatically gets chunk information from dask chunks ds_chunk4 = ds.chunk({"x": 4}) with self.roundtrip(ds_chunk4) as actual: assert (4,) == actual["var1"].encoding["chunks"] # should fail if dask_chunks are irregular... ds_chunk_irreg = ds.chunk({"x": (5, 4, 3)}) with pytest.raises(ValueError, match=r"uniform chunk sizes."): with self.roundtrip(ds_chunk_irreg) as actual: pass # should fail if encoding["chunks"] clashes with dask_chunks badenc = ds.chunk({"x": 4}) badenc.var1.encoding["chunks"] = (6,) with pytest.raises(ValueError, match=r"named 'var1' would overlap"): with self.roundtrip(badenc) as actual: pass # unless... with self.roundtrip(badenc, save_kwargs={"safe_chunks": False}) as actual: # don't actually check equality because the data could be corrupted pass # if dask chunks (4) are an integer multiple of zarr chunks (2) it should not fail... goodenc = ds.chunk({"x": 4}) goodenc.var1.encoding["chunks"] = (2,) with self.roundtrip(goodenc) as actual: pass # if initial dask chunks are aligned, size of last dask chunk doesn't matter goodenc = ds.chunk({"x": (3, 3, 6)}) goodenc.var1.encoding["chunks"] = (3,) with self.roundtrip(goodenc) as actual: pass goodenc = ds.chunk({"x": (3, 6, 3)}) goodenc.var1.encoding["chunks"] = (3,) with self.roundtrip(goodenc) as actual: pass # ... also if the last chunk is irregular ds_chunk_irreg = ds.chunk({"x": (5, 5, 2)}) with self.roundtrip(ds_chunk_irreg) as actual: assert (5,) == actual["var1"].encoding["chunks"] # re-save Zarr arrays with self.roundtrip(ds_chunk_irreg) as original: with self.roundtrip(original) as actual: assert_identical(original, actual) # but intermediate unaligned chunks are bad badenc = ds.chunk({"x": (3, 5, 3, 1)}) badenc.var1.encoding["chunks"] = (3,) with pytest.raises(ValueError, match=r"would overlap multiple Dask chunks"): with self.roundtrip(badenc) as actual: pass # - encoding specified - # specify compatible encodings for chunk_enc in 4, (4,): ds_chunk4["var1"].encoding.update({"chunks": chunk_enc}) with self.roundtrip(ds_chunk4) as actual: assert (4,) == actual["var1"].encoding["chunks"] # TODO: remove this failure once synchronized overlapping writes are # supported by xarray ds_chunk4["var1"].encoding.update({"chunks": 5}) with pytest.raises(ValueError, match=r"named 'var1' would overlap"): with self.roundtrip(ds_chunk4) as actual: pass # override option with self.roundtrip(ds_chunk4, save_kwargs={"safe_chunks": False}) as actual: # don't actually check equality because the data could be corrupted pass @requires_netcdf def test_drop_encoding(self): with open_example_dataset("example_1.nc") as ds: encodings = {v: {**ds[v].encoding} for v in ds.data_vars} with self.create_zarr_target() as store: ds.to_zarr(store, encoding=encodings) def test_hidden_zarr_keys(self) -> None: skip_if_zarr_format_3("This test is unnecessary; no hidden Zarr keys") expected = create_test_data() with self.create_store() as store: expected.dump_to_store(store) zarr_group = store.ds # check that a variable hidden attribute is present and correct # JSON only has a single array type, which maps to list in Python. # In contrast, dims in xarray is always a tuple. for var in expected.variables.keys(): dims = zarr_group[var].attrs[self.DIMENSION_KEY] assert dims == list(expected[var].dims) with xr.decode_cf(store): # make sure it is hidden for var in expected.variables.keys(): assert self.DIMENSION_KEY not in expected[var].attrs # put it back and try removing from a variable attrs = dict(zarr_group["var2"].attrs) del attrs[self.DIMENSION_KEY] zarr_group["var2"].attrs.put(attrs) with pytest.raises(KeyError): with xr.decode_cf(store): pass def test_dimension_names(self) -> None: skip_if_zarr_format_2("No dimension names in V2") expected = create_test_data() with self.create_store() as store: expected.dump_to_store(store) zarr_group = store.ds for var in zarr_group: assert expected[var].dims == zarr_group[var].metadata.dimension_names @pytest.mark.parametrize("group", [None, "group1"]) def test_write_persistence_modes(self, group) -> None: original = create_test_data() # overwrite mode with self.roundtrip( original, save_kwargs={"mode": "w", "group": group}, open_kwargs={"group": group}, ) as actual: assert_identical(original, actual) # don't overwrite mode with self.roundtrip( original, save_kwargs={"mode": "w-", "group": group}, open_kwargs={"group": group}, ) as actual: assert_identical(original, actual) # make sure overwriting works as expected with self.create_zarr_target() as store: self.save(original, store) # should overwrite with no error self.save(original, store, mode="w", group=group) with self.open(store, group=group) as actual: assert_identical(original, actual) with pytest.raises((ValueError, FileExistsError)): self.save(original, store, mode="w-") # check append mode for normal write with self.roundtrip( original, save_kwargs={"mode": "a", "group": group}, open_kwargs={"group": group}, ) as actual: assert_identical(original, actual) # check append mode for append write ds, ds_to_append, _ = create_append_test_data() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", group=group, **self.version_kwargs) ds_to_append.to_zarr( store_target, append_dim="time", group=group, **self.version_kwargs ) original = xr.concat([ds, ds_to_append], dim="time") actual = xr.open_dataset( store_target, group=group, engine="zarr", **self.version_kwargs ) assert_identical(original, actual) def test_compressor_encoding(self) -> None: # specify a custom compressor original = create_test_data() if has_zarr_v3 and zarr.config.config["default_zarr_format"] == 3: encoding_key = "compressors" # all parameters need to be explicitly specified in order for the comparison to pass below encoding = { "serializer": zarr.codecs.BytesCodec(endian="little"), encoding_key: ( zarr.codecs.BloscCodec( cname="zstd", clevel=3, shuffle="shuffle", typesize=8, blocksize=0, ), ), } else: from numcodecs.blosc import Blosc encoding_key = "compressors" if has_zarr_v3 else "compressor" comp = Blosc(cname="zstd", clevel=3, shuffle=2) encoding = {encoding_key: (comp,) if has_zarr_v3 else comp} save_kwargs = dict(encoding={"var1": encoding}) with self.roundtrip(original, save_kwargs=save_kwargs) as ds: enc = ds["var1"].encoding[encoding_key] assert enc == encoding[encoding_key] def test_group(self) -> None: original = create_test_data() group = "some/random/path" with self.roundtrip( original, save_kwargs={"group": group}, open_kwargs={"group": group} ) as actual: assert_identical(original, actual) def test_zarr_mode_w_overwrites_encoding(self) -> None: data = Dataset({"foo": ("x", [1.0, 1.0, 1.0])}) with self.create_zarr_target() as store: data.to_zarr( store, **self.version_kwargs, encoding={"foo": {"add_offset": 1}} ) np.testing.assert_equal( zarr.open_group(store, **self.version_kwargs)["foo"], data.foo.data - 1 ) data.to_zarr( store, **self.version_kwargs, encoding={"foo": {"add_offset": 0}}, mode="w", ) np.testing.assert_equal( zarr.open_group(store, **self.version_kwargs)["foo"], data.foo.data ) def test_encoding_kwarg_fixed_width_string(self) -> None: # not relevant for zarr, since we don't use EncodedStringCoder pass def test_dataset_caching(self) -> None: super().test_dataset_caching() def test_append_write(self) -> None: super().test_append_write() def test_append_with_mode_rplus_success(self) -> None: original = Dataset({"foo": ("x", [1])}) modified = Dataset({"foo": ("x", [2])}) with self.create_zarr_target() as store: original.to_zarr(store, **self.version_kwargs) modified.to_zarr(store, mode="r+", **self.version_kwargs) with self.open(store) as actual: assert_identical(actual, modified) def test_append_with_mode_rplus_fails(self) -> None: original = Dataset({"foo": ("x", [1])}) modified = Dataset({"bar": ("x", [2])}) with self.create_zarr_target() as store: original.to_zarr(store, **self.version_kwargs) with pytest.raises( ValueError, match="dataset contains non-pre-existing variables" ): modified.to_zarr(store, mode="r+", **self.version_kwargs) def test_append_with_invalid_dim_raises(self) -> None: ds, ds_to_append, _ = create_append_test_data() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", **self.version_kwargs) with pytest.raises( ValueError, match="does not match any existing dataset dimensions" ): ds_to_append.to_zarr( store_target, append_dim="notvalid", **self.version_kwargs ) def test_append_with_no_dims_raises(self) -> None: with self.create_zarr_target() as store_target: Dataset({"foo": ("x", [1])}).to_zarr( store_target, mode="w", **self.version_kwargs ) with pytest.raises(ValueError, match="different dimension names"): Dataset({"foo": ("y", [2])}).to_zarr( store_target, mode="a", **self.version_kwargs ) def test_append_with_append_dim_not_set_raises(self) -> None: ds, ds_to_append, _ = create_append_test_data() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", **self.version_kwargs) with pytest.raises(ValueError, match="different dimension sizes"): ds_to_append.to_zarr(store_target, mode="a", **self.version_kwargs) def test_append_with_mode_not_a_raises(self) -> None: ds, ds_to_append, _ = create_append_test_data() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", **self.version_kwargs) with pytest.raises(ValueError, match="cannot set append_dim unless"): ds_to_append.to_zarr( store_target, mode="w", append_dim="time", **self.version_kwargs ) def test_append_with_existing_encoding_raises(self) -> None: ds, ds_to_append, _ = create_append_test_data() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", **self.version_kwargs) with pytest.raises(ValueError, match="but encoding was provided"): ds_to_append.to_zarr( store_target, append_dim="time", encoding={"da": {"compressor": None}}, **self.version_kwargs, ) @pytest.mark.parametrize("dtype", ["U", "S"]) def test_append_string_length_mismatch_raises(self, dtype) -> None: if has_zarr_v3 and not has_zarr_v3_dtypes: skip_if_zarr_format_3("This actually works fine with Zarr format 3") ds, ds_to_append = create_append_string_length_mismatch_test_data(dtype) with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", **self.version_kwargs) with pytest.raises(ValueError, match="Mismatched dtypes for variable"): ds_to_append.to_zarr( store_target, append_dim="time", **self.version_kwargs ) @pytest.mark.parametrize("dtype", ["U", "S"]) def test_append_string_length_mismatch_works(self, dtype) -> None: skip_if_zarr_format_2("This doesn't work with Zarr format 2") # ...but it probably would if we used object dtype if has_zarr_v3_dtypes: pytest.skip("This works on pre ZDtype Zarr-Python, but fails after.") ds, ds_to_append = create_append_string_length_mismatch_test_data(dtype) expected = xr.concat([ds, ds_to_append], dim="time") with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", **self.version_kwargs) ds_to_append.to_zarr(store_target, append_dim="time", **self.version_kwargs) actual = xr.open_dataset(store_target, engine="zarr") xr.testing.assert_identical(expected, actual) def test_check_encoding_is_consistent_after_append(self) -> None: ds, ds_to_append, _ = create_append_test_data() # check encoding consistency with self.create_zarr_target() as store_target: import numcodecs encoding_value: Any if has_zarr_v3 and zarr.config.config["default_zarr_format"] == 3: compressor = zarr.codecs.BloscCodec() else: compressor = numcodecs.Blosc() encoding_key = "compressors" if has_zarr_v3 else "compressor" encoding_value = (compressor,) if has_zarr_v3 else compressor encoding = {"da": {encoding_key: encoding_value}} ds.to_zarr(store_target, mode="w", encoding=encoding, **self.version_kwargs) original_ds = xr.open_dataset( store_target, engine="zarr", **self.version_kwargs ) original_encoding = original_ds["da"].encoding[encoding_key] ds_to_append.to_zarr(store_target, append_dim="time", **self.version_kwargs) actual_ds = xr.open_dataset( store_target, engine="zarr", **self.version_kwargs ) actual_encoding = actual_ds["da"].encoding[encoding_key] assert original_encoding == actual_encoding assert_identical( xr.open_dataset( store_target, engine="zarr", **self.version_kwargs ).compute(), xr.concat([ds, ds_to_append], dim="time"), ) def test_append_with_new_variable(self) -> None: ds, ds_to_append, ds_with_new_var = create_append_test_data() # check append mode for new variable with self.create_zarr_target() as store_target: combined = xr.concat([ds, ds_to_append], dim="time") combined.to_zarr(store_target, mode="w", **self.version_kwargs) assert_identical( combined, xr.open_dataset(store_target, engine="zarr", **self.version_kwargs), ) ds_with_new_var.to_zarr(store_target, mode="a", **self.version_kwargs) combined = xr.concat([ds, ds_to_append], dim="time") combined["new_var"] = ds_with_new_var["new_var"] assert_identical( combined, xr.open_dataset(store_target, engine="zarr", **self.version_kwargs), ) def test_append_with_append_dim_no_overwrite(self) -> None: ds, ds_to_append, _ = create_append_test_data() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, mode="w", **self.version_kwargs) original = xr.concat([ds, ds_to_append], dim="time") original2 = xr.concat([original, ds_to_append], dim="time") # overwrite a coordinate; # for mode='a-', this will not get written to the store # because it does not have the append_dim as a dim lon = ds_to_append.lon.to_numpy().copy() lon[:] = -999 ds_to_append["lon"] = lon ds_to_append.to_zarr( store_target, mode="a-", append_dim="time", **self.version_kwargs ) actual = xr.open_dataset(store_target, engine="zarr", **self.version_kwargs) assert_identical(original, actual) # by default, mode="a" will overwrite all coordinates. ds_to_append.to_zarr(store_target, append_dim="time", **self.version_kwargs) actual = xr.open_dataset(store_target, engine="zarr", **self.version_kwargs) lon = original2.lon.to_numpy().copy() lon[:] = -999 original2["lon"] = lon assert_identical(original2, actual) @requires_dask def test_to_zarr_compute_false_roundtrip(self) -> None: from dask.delayed import Delayed original = create_test_data().chunk() with self.create_zarr_target() as store: delayed_obj = self.save(original, store, compute=False) assert isinstance(delayed_obj, Delayed) # make sure target store has not been written to yet with pytest.raises(AssertionError): with self.open(store) as actual: assert_identical(original, actual) delayed_obj.compute() with self.open(store) as actual: assert_identical(original, actual) @requires_dask def test_to_zarr_append_compute_false_roundtrip(self) -> None: from dask.delayed import Delayed ds, ds_to_append, _ = create_append_test_data() ds, ds_to_append = ds.chunk(), ds_to_append.chunk() with pytest.warns(SerializationWarning): with self.create_zarr_target() as store: delayed_obj = self.save(ds, store, compute=False, mode="w") assert isinstance(delayed_obj, Delayed) with pytest.raises(AssertionError): with self.open(store) as actual: assert_identical(ds, actual) delayed_obj.compute() with self.open(store) as actual: assert_identical(ds, actual) delayed_obj = self.save( ds_to_append, store, compute=False, append_dim="time" ) assert isinstance(delayed_obj, Delayed) with pytest.raises(AssertionError): with self.open(store) as actual: assert_identical( xr.concat([ds, ds_to_append], dim="time"), actual ) delayed_obj.compute() with self.open(store) as actual: assert_identical(xr.concat([ds, ds_to_append], dim="time"), actual) @pytest.mark.parametrize("chunk", [False, True]) def test_save_emptydim(self, chunk) -> None: if chunk and not has_dask: pytest.skip("requires dask") ds = Dataset({"x": (("a", "b"), np.empty((5, 0))), "y": ("a", [1, 2, 5, 8, 9])}) if chunk: ds = ds.chunk({}) # chunk dataset to save dask array with self.roundtrip(ds) as ds_reload: assert_identical(ds, ds_reload) @requires_dask def test_no_warning_from_open_emptydim_with_chunks(self) -> None: ds = Dataset({"x": (("a", "b"), np.empty((5, 0)))}).chunk({"a": 1}) with assert_no_warnings(): with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=".*Zarr format 3 specification.*", category=UserWarning, ) with self.roundtrip(ds, open_kwargs=dict(chunks={"a": 1})) as ds_reload: assert_identical(ds, ds_reload) @pytest.mark.parametrize("consolidated", [False, True, None]) @pytest.mark.parametrize("compute", [False, True]) @pytest.mark.parametrize("use_dask", [False, True]) @pytest.mark.parametrize("write_empty", [False, True, None]) def test_write_region(self, consolidated, compute, use_dask, write_empty) -> None: if (use_dask or not compute) and not has_dask: pytest.skip("requires dask") zeros = Dataset({"u": (("x",), np.zeros(10))}) nonzeros = Dataset({"u": (("x",), np.arange(1, 11))}) if use_dask: zeros = zeros.chunk(2) nonzeros = nonzeros.chunk(2) with self.create_zarr_target() as store: zeros.to_zarr( store, consolidated=consolidated, compute=compute, encoding={"u": dict(chunks=2)}, **self.version_kwargs, ) if compute: with xr.open_zarr( store, consolidated=consolidated, **self.version_kwargs ) as actual: assert_identical(actual, zeros) for i in range(0, 10, 2): region = {"x": slice(i, i + 2)} nonzeros.isel(region).to_zarr( store, region=region, consolidated=consolidated, write_empty_chunks=write_empty, **self.version_kwargs, ) with xr.open_zarr( store, consolidated=consolidated, **self.version_kwargs ) as actual: assert_identical(actual, nonzeros) @pytest.mark.parametrize("mode", [None, "r+", "a"]) def test_write_region_mode(self, mode) -> None: zeros = Dataset({"u": (("x",), np.zeros(10))}) nonzeros = Dataset({"u": (("x",), np.arange(1, 11))}) with self.create_zarr_target() as store: zeros.to_zarr(store, **self.version_kwargs) for region in [{"x": slice(5)}, {"x": slice(5, 10)}]: nonzeros.isel(region).to_zarr( store, region=region, mode=mode, **self.version_kwargs ) with xr.open_zarr(store, **self.version_kwargs) as actual: assert_identical(actual, nonzeros) @requires_dask def test_write_preexisting_override_metadata(self) -> None: """Metadata should be overridden if mode="a" but not in mode="r+".""" original = Dataset( {"u": (("x",), np.zeros(10), {"variable": "original"})}, attrs={"global": "original"}, ) both_modified = Dataset( {"u": (("x",), np.ones(10), {"variable": "modified"})}, attrs={"global": "modified"}, ) global_modified = Dataset( {"u": (("x",), np.ones(10), {"variable": "original"})}, attrs={"global": "modified"}, ) only_new_data = Dataset( {"u": (("x",), np.ones(10), {"variable": "original"})}, attrs={"global": "original"}, ) with self.create_zarr_target() as store: original.to_zarr(store, compute=False, **self.version_kwargs) both_modified.to_zarr(store, mode="a", **self.version_kwargs) with self.open(store) as actual: # NOTE: this arguably incorrect -- we should probably be # overriding the variable metadata, too. See the TODO note in # ZarrStore.set_variables. assert_identical(actual, global_modified) with self.create_zarr_target() as store: original.to_zarr(store, compute=False, **self.version_kwargs) both_modified.to_zarr(store, mode="r+", **self.version_kwargs) with self.open(store) as actual: assert_identical(actual, only_new_data) with self.create_zarr_target() as store: original.to_zarr(store, compute=False, **self.version_kwargs) # with region, the default mode becomes r+ both_modified.to_zarr( store, region={"x": slice(None)}, **self.version_kwargs ) with self.open(store) as actual: assert_identical(actual, only_new_data) def test_write_region_errors(self) -> None: data = Dataset({"u": (("x",), np.arange(5))}) data2 = Dataset({"u": (("x",), np.array([10, 11]))}) @contextlib.contextmanager def setup_and_verify_store(expected=data): with self.create_zarr_target() as store: data.to_zarr(store, **self.version_kwargs) yield store with self.open(store) as actual: assert_identical(actual, expected) # verify the base case works expected = Dataset({"u": (("x",), np.array([10, 11, 2, 3, 4]))}) with setup_and_verify_store(expected) as store: data2.to_zarr(store, region={"x": slice(2)}, **self.version_kwargs) with setup_and_verify_store() as store: with pytest.raises( ValueError, match=re.escape( "cannot set region unless mode='a', mode='a-', mode='r+' or mode=None" ), ): data.to_zarr( store, region={"x": slice(None)}, mode="w", **self.version_kwargs ) with setup_and_verify_store() as store: with pytest.raises(TypeError, match=r"must be a dict"): data.to_zarr(store, region=slice(None), **self.version_kwargs) # type: ignore[call-overload] with setup_and_verify_store() as store: with pytest.raises(TypeError, match=r"must be slice objects"): data2.to_zarr(store, region={"x": [0, 1]}, **self.version_kwargs) # type: ignore[dict-item] with setup_and_verify_store() as store: with pytest.raises(ValueError, match=r"step on all slices"): data2.to_zarr( store, region={"x": slice(None, None, 2)}, **self.version_kwargs ) with setup_and_verify_store() as store: with pytest.raises( ValueError, match=r"all keys in ``region`` are not in Dataset dimensions", ): data.to_zarr(store, region={"y": slice(None)}, **self.version_kwargs) with setup_and_verify_store() as store: with pytest.raises( ValueError, match=r"all variables in the dataset to write must have at least one dimension in common", ): data2.assign(v=2).to_zarr( store, region={"x": slice(2)}, **self.version_kwargs ) with setup_and_verify_store() as store: with pytest.raises( ValueError, match=r"cannot list the same dimension in both" ): data.to_zarr( store, region={"x": slice(None)}, append_dim="x", **self.version_kwargs, ) with setup_and_verify_store() as store: with pytest.raises( ValueError, match=r"variable 'u' already exists with different dimension sizes", ): data2.to_zarr(store, region={"x": slice(3)}, **self.version_kwargs) @requires_dask def test_encoding_chunksizes(self) -> None: # regression test for GH2278 # see also test_encoding_chunksizes_unlimited nx, ny, nt = 4, 4, 5 original = xr.Dataset( {}, coords={ "x": np.arange(nx), "y": np.arange(ny), "t": np.arange(nt), }, ) original["v"] = xr.Variable(("x", "y", "t"), np.zeros((nx, ny, nt))) original = original.chunk({"t": 1, "x": 2, "y": 2}) with self.roundtrip(original) as ds1: assert_equal(ds1, original) with self.roundtrip(ds1.isel(t=0)) as ds2: assert_equal(ds2, original.isel(t=0)) @requires_dask def test_chunk_encoding_with_partial_dask_chunks(self) -> None: original = xr.Dataset( {"x": xr.DataArray(np.random.random(size=(6, 8)), dims=("a", "b"))} ).chunk({"a": 3}) with self.roundtrip( original, save_kwargs={"encoding": {"x": {"chunks": [3, 2]}}} ) as ds1: assert_equal(ds1, original) @requires_dask def test_chunk_encoding_with_larger_dask_chunks(self) -> None: original = xr.Dataset({"a": ("x", [1, 2, 3, 4])}).chunk({"x": 2}) with self.roundtrip( original, save_kwargs={"encoding": {"a": {"chunks": [1]}}} ) as ds1: assert_equal(ds1, original) @requires_cftime def test_open_zarr_use_cftime(self) -> None: ds = create_test_data() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, **self.version_kwargs) ds_a = xr.open_zarr(store_target, **self.version_kwargs) assert_identical(ds, ds_a) decoder = CFDatetimeCoder(use_cftime=True) ds_b = xr.open_zarr( store_target, decode_times=decoder, **self.version_kwargs ) assert xr.coding.times.contains_cftime_datetimes(ds_b.time.variable) def test_write_read_select_write(self) -> None: # Test for https://github.com/pydata/xarray/issues/4084 ds = create_test_data() # NOTE: using self.roundtrip, which uses open_dataset, will not trigger the bug. with self.create_zarr_target() as initial_store: ds.to_zarr(initial_store, mode="w", **self.version_kwargs) ds1 = xr.open_zarr(initial_store, **self.version_kwargs) # Combination of where+squeeze triggers error on write. ds_sel = ds1.where(ds1.coords["dim3"] == "a", drop=True).squeeze("dim3") with self.create_zarr_target() as final_store: ds_sel.to_zarr(final_store, mode="w", **self.version_kwargs) @pytest.mark.parametrize("obj", [Dataset(), DataArray(name="foo")]) def test_attributes(self, obj) -> None: obj = obj.copy() obj.attrs["good"] = {"key": "value"} ds = obj if isinstance(obj, Dataset) else obj.to_dataset() with self.create_zarr_target() as store_target: ds.to_zarr(store_target, **self.version_kwargs) assert_identical(ds, xr.open_zarr(store_target, **self.version_kwargs)) obj.attrs["bad"] = DataArray() ds = obj if isinstance(obj, Dataset) else obj.to_dataset() with self.create_zarr_target() as store_target: with pytest.raises(TypeError, match=r"Invalid attribute in Dataset.attrs."): ds.to_zarr(store_target, **self.version_kwargs) @requires_dask @pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"]) def test_chunked_datetime64_or_timedelta64(self, dtype) -> None: # Generalized from @malmans2's test in PR #8253 original = create_test_data().astype(dtype).chunk(1) with self.roundtrip( original, open_kwargs={ "chunks": {}, "decode_timedelta": CFTimedeltaCoder(time_unit="ns"), }, ) as actual: for name, actual_var in actual.variables.items(): assert original[name].chunks == actual_var.chunks assert original.chunks == actual.chunks @requires_cftime @requires_dask def test_chunked_cftime_datetime(self) -> None: # Based on @malmans2's test in PR #8253 times = date_range("2000", freq="D", periods=3, use_cftime=True) original = xr.Dataset(data_vars={"chunked_times": (["time"], times)}) original = original.chunk({"time": 1}) with self.roundtrip(original, open_kwargs={"chunks": {}}) as actual: for name, actual_var in actual.variables.items(): assert original[name].chunks == actual_var.chunks assert original.chunks == actual.chunks def test_cache_members(self) -> None: """ Ensure that if `ZarrStore` is created with `cache_members` set to `True`, a `ZarrStore` only inspects the underlying zarr group once, and that the results of that inspection are cached. Otherwise, `ZarrStore.members` should inspect the underlying zarr group each time it is invoked """ with self.create_zarr_target() as store_target: zstore_mut = backends.ZarrStore.open_group( store_target, mode="w", cache_members=False ) # ensure that the keys are sorted array_keys = sorted(("foo", "bar")) # create some arrays for ak in array_keys: zstore_mut.zarr_group.create(name=ak, shape=(1,), dtype="uint8") zstore_stat = backends.ZarrStore.open_group( store_target, mode="r", cache_members=True ) observed_keys_0 = sorted(zstore_stat.array_keys()) assert observed_keys_0 == array_keys # create a new array new_key = "baz" zstore_mut.zarr_group.create(name=new_key, shape=(1,), dtype="uint8") observed_keys_1 = sorted(zstore_stat.array_keys()) assert observed_keys_1 == array_keys observed_keys_2 = sorted(zstore_mut.array_keys()) assert observed_keys_2 == sorted(array_keys + [new_key]) @requires_dask @pytest.mark.parametrize("dtype", [int, float]) def test_zarr_fill_value_setting(self, dtype): # When zarr_format=2, _FillValue sets fill_value # When zarr_format=3, fill_value is set independently # We test this by writing a dask array with compute=False, # on read we should receive chunks filled with `fill_value` fv = -1 ds = xr.Dataset( {"foo": ("x", dask.array.from_array(np.array([0, 0, 0], dtype=dtype)))} ) expected = xr.Dataset({"foo": ("x", [fv] * 3)}) zarr_format_2 = ( has_zarr_v3 and zarr.config.get("default_zarr_format") == 2 ) or not has_zarr_v3 if zarr_format_2: attr = "_FillValue" expected.foo.attrs[attr] = fv else: attr = "fill_value" if dtype is float: # for floats, Xarray inserts a default `np.nan` expected.foo.attrs["_FillValue"] = np.nan # turn off all decoding so we see what Zarr returns to us. # Since chunks, are not written, we should receive on `fill_value` open_kwargs = { "mask_and_scale": False, "consolidated": False, "use_zarr_fill_value_as_mask": False, } save_kwargs = dict(compute=False, consolidated=False) with self.roundtrip( ds, save_kwargs=ChainMap(save_kwargs, dict(encoding={"foo": {attr: fv}})), open_kwargs=open_kwargs, ) as actual: assert_identical(actual, expected) ds.foo.encoding[attr] = fv with self.roundtrip( ds, save_kwargs=save_kwargs, open_kwargs=open_kwargs ) as actual: assert_identical(actual, expected) if zarr_format_2: ds = ds.drop_encoding() with pytest.raises(ValueError, match="_FillValue"): with self.roundtrip( ds, save_kwargs=ChainMap( save_kwargs, dict(encoding={"foo": {"fill_value": fv}}) ), open_kwargs=open_kwargs, ): pass # TODO: this doesn't fail because of the # ``raise_on_invalid=vn in check_encoding_set`` line in zarr.py # ds.foo.encoding["fill_value"] = fv @requires_zarr @pytest.mark.skipif( KVStore is None, reason="zarr-python 2.x or ZARR_V3_EXPERIMENTAL_API is unset." ) class TestInstrumentedZarrStore: if has_zarr_v3: methods = [ "get", "set", "list_dir", "list_prefix", ] else: methods = [ "__iter__", "__contains__", "__setitem__", "__getitem__", "listdir", "list_prefix", ] @contextlib.contextmanager def create_zarr_target(self): if Version(zarr.__version__) < Version("2.18.0"): pytest.skip("Instrumented tests only work on latest Zarr.") if has_zarr_v3: kwargs = {"read_only": False} else: kwargs = {} # type: ignore[arg-type,unused-ignore] store = KVStore({}, **kwargs) # type: ignore[arg-type,unused-ignore] yield store def make_patches(self, store): from unittest.mock import MagicMock return { method: MagicMock( f"KVStore.{method}", side_effect=getattr(store, method), autospec=True, ) for method in self.methods } def summarize(self, patches): summary = {} for name, patch_ in patches.items(): count = 0 for call in patch_.mock_calls: if "zarr.json" not in call.args: count += 1 summary[name.strip("_")] = count return summary def check_requests(self, expected, patches): summary = self.summarize(patches) for k in summary: assert summary[k] <= expected[k], (k, summary) def test_append(self) -> None: original = Dataset({"foo": ("x", [1])}, coords={"x": [0]}) modified = Dataset({"foo": ("x", [2])}, coords={"x": [1]}) with self.create_zarr_target() as store: if has_zarr_v3: # TODO: verify these expected = { "set": 5, "get": 4, "list_dir": 2, "list_prefix": 1, } else: expected = { "iter": 1, "contains": 18, "setitem": 10, "getitem": 13, "listdir": 0, "list_prefix": 3, } patches = self.make_patches(store) with patch.multiple(KVStore, **patches): original.to_zarr(store) self.check_requests(expected, patches) patches = self.make_patches(store) # v2024.03.0: {'iter': 6, 'contains': 2, 'setitem': 5, 'getitem': 10, 'listdir': 6, 'list_prefix': 0} # 6057128b: {'iter': 5, 'contains': 2, 'setitem': 5, 'getitem': 10, "listdir": 5, "list_prefix": 0} if has_zarr_v3: expected = { "set": 4, "get": 9, # TODO: fixme upstream (should be 8) "list_dir": 2, # TODO: fixme upstream (should be 2) "list_prefix": 0, } else: expected = { "iter": 1, "contains": 11, "setitem": 6, "getitem": 15, "listdir": 0, "list_prefix": 1, } with patch.multiple(KVStore, **patches): modified.to_zarr(store, mode="a", append_dim="x") self.check_requests(expected, patches) patches = self.make_patches(store) if has_zarr_v3: expected = { "set": 4, "get": 9, # TODO: fixme upstream (should be 8) "list_dir": 2, # TODO: fixme upstream (should be 2) "list_prefix": 0, } else: expected = { "iter": 1, "contains": 11, "setitem": 6, "getitem": 15, "listdir": 0, "list_prefix": 1, } with patch.multiple(KVStore, **patches): modified.to_zarr(store, mode="a-", append_dim="x") self.check_requests(expected, patches) with open_dataset(store, engine="zarr") as actual: assert_identical( actual, xr.concat([original, modified, modified], dim="x") ) @requires_dask def test_region_write(self) -> None: ds = Dataset({"foo": ("x", [1, 2, 3])}, coords={"x": [1, 2, 3]}).chunk() with self.create_zarr_target() as store: if has_zarr_v3: expected = { "set": 5, "get": 2, "list_dir": 2, "list_prefix": 4, } else: expected = { "iter": 1, "contains": 16, "setitem": 9, "getitem": 13, "listdir": 0, "list_prefix": 5, } patches = self.make_patches(store) with patch.multiple(KVStore, **patches): ds.to_zarr(store, mode="w", compute=False) self.check_requests(expected, patches) # v2024.03.0: {'iter': 5, 'contains': 2, 'setitem': 1, 'getitem': 6, 'listdir': 5, 'list_prefix': 0} # 6057128b: {'iter': 4, 'contains': 2, 'setitem': 1, 'getitem': 5, 'listdir': 4, 'list_prefix': 0} if has_zarr_v3: expected = { "set": 1, "get": 3, "list_dir": 0, "list_prefix": 0, } else: expected = { "iter": 1, "contains": 6, "setitem": 1, "getitem": 7, "listdir": 0, "list_prefix": 0, } patches = self.make_patches(store) with patch.multiple(KVStore, **patches): ds.to_zarr(store, region={"x": slice(None)}) self.check_requests(expected, patches) # v2024.03.0: {'iter': 6, 'contains': 4, 'setitem': 1, 'getitem': 11, 'listdir': 6, 'list_prefix': 0} # 6057128b: {'iter': 4, 'contains': 2, 'setitem': 1, 'getitem': 7, 'listdir': 4, 'list_prefix': 0} if has_zarr_v3: expected = { "set": 1, "get": 4, "list_dir": 0, "list_prefix": 0, } else: expected = { "iter": 1, "contains": 6, "setitem": 1, "getitem": 8, "listdir": 0, "list_prefix": 0, } patches = self.make_patches(store) with patch.multiple(KVStore, **patches): ds.to_zarr(store, region="auto") self.check_requests(expected, patches) if has_zarr_v3: expected = { "set": 0, "get": 5, "list_dir": 0, "list_prefix": 0, } else: expected = { "iter": 1, "contains": 6, "setitem": 0, "getitem": 8, "listdir": 0, "list_prefix": 0, } patches = self.make_patches(store) with patch.multiple(KVStore, **patches): with open_dataset(store, engine="zarr") as actual: assert_identical(actual, ds) self.check_requests(expected, patches) @requires_zarr class TestZarrDictStore(ZarrBase): @contextlib.contextmanager def create_zarr_target(self): if has_zarr_v3: yield zarr.storage.MemoryStore({}, read_only=False) else: yield {} def test_chunk_key_encoding_v2(self) -> None: encoding = {"name": "v2", "configuration": {"separator": "/"}} # Create a dataset with a variable name containing a period data = np.ones((4, 4)) original = Dataset({"var1": (("x", "y"), data)}) # Set up chunk key encoding with slash separator encoding = { "var1": { "chunk_key_encoding": encoding, "chunks": (2, 2), } } # Write to store with custom encoding with self.create_zarr_target() as store: original.to_zarr(store, encoding=encoding) # Verify the chunk keys in store use the slash separator if not has_zarr_v3: chunk_keys = [k for k in store.keys() if k.startswith("var1/")] assert len(chunk_keys) > 0 for key in chunk_keys: assert "/" in key assert "." not in key.split("/")[1:] # No dots in chunk coordinates # Read back and verify data with xr.open_zarr(store) as actual: assert_identical(original, actual) # Verify chunks are preserved assert actual["var1"].encoding["chunks"] == (2, 2) @pytest.mark.asyncio @requires_zarr_v3 async def test_async_load_multiple_variables(self) -> None: target_class = zarr.AsyncArray method_name = "getitem" original_method = getattr(target_class, method_name) # the indexed coordinate variables is not lazy, so the create_test_dataset has 4 lazy variables in total N_LAZY_VARS = 4 original = create_test_data() with self.create_zarr_target() as store: original.to_zarr(store, zarr_format=3, consolidated=False) with patch.object( target_class, method_name, wraps=original_method, autospec=True ) as mocked_meth: # blocks upon loading the coordinate variables here ds = xr.open_zarr(store, consolidated=False, chunks=None) # TODO we're not actually testing that these indexing methods are not blocking... result_ds = await ds.load_async() mocked_meth.assert_called() assert mocked_meth.call_count == N_LAZY_VARS mocked_meth.assert_awaited() xrt.assert_identical(result_ds, ds.load()) @pytest.mark.asyncio @requires_zarr_v3 @pytest.mark.parametrize("cls_name", ["Variable", "DataArray", "Dataset"]) async def test_concurrent_load_multiple_objects( self, cls_name, ) -> None: N_OBJECTS = 5 N_LAZY_VARS = { "Variable": 1, "DataArray": 1, "Dataset": 4, } # specific to the create_test_data() used target_class = zarr.AsyncArray method_name = "getitem" original_method = getattr(target_class, method_name) original = create_test_data() with self.create_zarr_target() as store: original.to_zarr(store, consolidated=False, zarr_format=3) with patch.object( target_class, method_name, wraps=original_method, autospec=True ) as mocked_meth: xr_obj = get_xr_obj(store, cls_name) # TODO we're not actually testing that these indexing methods are not blocking... coros = [xr_obj.load_async() for _ in range(N_OBJECTS)] results = await asyncio.gather(*coros) mocked_meth.assert_called() assert mocked_meth.call_count == N_OBJECTS * N_LAZY_VARS[cls_name] mocked_meth.assert_awaited() for result in results: xrt.assert_identical(result, xr_obj.load()) @pytest.mark.asyncio @requires_zarr_v3 @pytest.mark.parametrize("cls_name", ["Variable", "DataArray", "Dataset"]) @pytest.mark.parametrize( "indexer, method, target_zarr_class", [ pytest.param({}, "sel", "zarr.AsyncArray", id="no-indexing-sel"), pytest.param({}, "isel", "zarr.AsyncArray", id="no-indexing-isel"), pytest.param({"dim2": 1.0}, "sel", "zarr.AsyncArray", id="basic-int-sel"), pytest.param({"dim2": 2}, "isel", "zarr.AsyncArray", id="basic-int-isel"), pytest.param( {"dim2": slice(1.0, 3.0)}, "sel", "zarr.AsyncArray", id="basic-slice-sel", ), pytest.param( {"dim2": slice(1, 3)}, "isel", "zarr.AsyncArray", id="basic-slice-isel" ), pytest.param( {"dim2": [1.0, 3.0]}, "sel", "zarr.core.indexing.AsyncOIndex", id="outer-sel", ), pytest.param( {"dim2": [1, 3]}, "isel", "zarr.core.indexing.AsyncOIndex", id="outer-isel", ), pytest.param( { "dim1": xr.Variable(data=[2, 3], dims="points"), "dim2": xr.Variable(data=[1.0, 2.0], dims="points"), }, "sel", "zarr.core.indexing.AsyncVIndex", id="vectorized-sel", ), pytest.param( { "dim1": xr.Variable(data=[2, 3], dims="points"), "dim2": xr.Variable(data=[1, 3], dims="points"), }, "isel", "zarr.core.indexing.AsyncVIndex", id="vectorized-isel", ), ], ) async def test_indexing( self, cls_name, method, indexer, target_zarr_class, ) -> None: if not has_zarr_v3_async_oindex and target_zarr_class in ( "zarr.core.indexing.AsyncOIndex", "zarr.core.indexing.AsyncVIndex", ): pytest.skip( "current version of zarr does not support orthogonal or vectorized async indexing" ) if cls_name == "Variable" and method == "sel": pytest.skip("Variable doesn't have a .sel method") # Each type of indexing ends up calling a different zarr indexing method # They all use a method named .getitem, but on a different internal zarr class def _resolve_class_from_string(class_path: str) -> type[Any]: """Resolve a string class path like 'zarr.AsyncArray' to the actual class.""" module_path, class_name = class_path.rsplit(".", 1) module = import_module(module_path) return getattr(module, class_name) target_class = _resolve_class_from_string(target_zarr_class) method_name = "getitem" original_method = getattr(target_class, method_name) original = create_test_data() with self.create_zarr_target() as store: original.to_zarr(store, consolidated=False, zarr_format=3) with patch.object( target_class, method_name, wraps=original_method, autospec=True ) as mocked_meth: xr_obj = get_xr_obj(store, cls_name) # TODO we're not actually testing that these indexing methods are not blocking... result = await getattr(xr_obj, method)(**indexer).load_async() mocked_meth.assert_called() mocked_meth.assert_awaited() assert mocked_meth.call_count > 0 expected = getattr(xr_obj, method)(**indexer).load() xrt.assert_identical(result, expected) @pytest.mark.asyncio @pytest.mark.parametrize( ("indexer", "expected_err_msg"), [ pytest.param( {"dim2": 2}, "basic async indexing", marks=pytest.mark.skipif( has_zarr_v3, reason="current version of zarr has basic async indexing", ), ), # tests basic indexing pytest.param( {"dim2": [1, 3]}, "orthogonal async indexing", marks=pytest.mark.skipif( has_zarr_v3_async_oindex, reason="current version of zarr has async orthogonal indexing", ), ), # tests oindexing pytest.param( { "dim1": xr.Variable(data=[2, 3], dims="points"), "dim2": xr.Variable(data=[1, 3], dims="points"), }, "vectorized async indexing", marks=pytest.mark.skipif( has_zarr_v3_async_oindex, reason="current version of zarr has async vectorized indexing", ), ), # tests vindexing ], ) @parametrize_zarr_format async def test_raise_on_older_zarr_version( self, indexer, expected_err_msg, zarr_format, ): """Test that trying to use async load with insufficiently new version of zarr raises a clear error""" original = create_test_data() with self.create_zarr_target() as store: original.to_zarr(store, consolidated=False, zarr_format=zarr_format) ds = xr.open_zarr(store, consolidated=False, chunks=None) var = ds["var1"].variable with pytest.raises(NotImplementedError, match=expected_err_msg): await var.isel(**indexer).load_async() def get_xr_obj( store: zarr.abc.store.Store, cls_name: Literal["Variable", "DataArray", "Dataset"] ): ds = xr.open_zarr(store, consolidated=False, chunks=None) match cls_name: case "Variable": return ds["var1"].variable case "DataArray": return ds["var1"] case "Dataset": return ds class NoConsolidatedMetadataSupportStore(WrapperStore): """ Store that explicitly does not support consolidated metadata. Useful as a proxy for stores like Icechunk, see https://github.com/zarr-developers/zarr-python/pull/3119. """ supports_consolidated_metadata = False def __init__( self, store, *, read_only: bool = False, ) -> None: self._store = store.with_read_only(read_only=read_only) def with_read_only( self, read_only: bool = False ) -> NoConsolidatedMetadataSupportStore: return type(self)( store=self._store, read_only=read_only, ) @requires_zarr_v3 class TestZarrNoConsolidatedMetadataSupport(ZarrBase): @contextlib.contextmanager def create_zarr_target(self): # TODO the zarr version would need to be >3.08 for the supports_consolidated_metadata property to have any effect yield NoConsolidatedMetadataSupportStore( zarr.storage.MemoryStore({}, read_only=False) ) @requires_zarr @pytest.mark.skipif( ON_WINDOWS, reason="Very flaky on Windows CI. Can re-enable assuming it starts consistently passing.", ) class TestZarrDirectoryStore(ZarrBase): @contextlib.contextmanager def create_zarr_target(self): with create_tmp_file(suffix=".zarr") as tmp: yield tmp @requires_zarr class TestZarrWriteEmpty(TestZarrDirectoryStore): @contextlib.contextmanager def temp_dir(self) -> Iterator[tuple[str, str]]: with tempfile.TemporaryDirectory() as d: store = os.path.join(d, "test.zarr") yield d, store @contextlib.contextmanager def roundtrip_dir( self, data, store, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False, ) -> Iterator[Dataset]: if save_kwargs is None: save_kwargs = {} if open_kwargs is None: open_kwargs = {} data.to_zarr(store, **save_kwargs, **self.version_kwargs) with xr.open_dataset( store, engine="zarr", **open_kwargs, **self.version_kwargs ) as ds: yield ds @pytest.mark.parametrize("consolidated", [True, False, None]) @pytest.mark.parametrize("write_empty", [True, False, None]) def test_write_empty( self, consolidated: bool | None, write_empty: bool | None, ) -> None: def assert_expected_files(expected: list[str], store: str) -> None: """Convenience for comparing with actual files written""" ls = [] test_root = os.path.join(store, "test") for root, _, files in os.walk(test_root): ls.extend( [ os.path.join(root, f).removeprefix(test_root).lstrip("/") for f in files ] ) assert set(expected) == { file.lstrip("c/") for file in ls if (file not in (".zattrs", ".zarray", "zarr.json")) } # The zarr format is set by the `default_zarr_format` # pytest fixture that acts on a superclass zarr_format_3 = has_zarr_v3 and zarr.config.config["default_zarr_format"] == 3 if (write_empty is False) or (write_empty is None and has_zarr_v3): expected = ["0.1.0"] else: expected = [ "0.0.0", "0.0.1", "0.1.0", "0.1.1", ] if zarr_format_3: data = np.array([0.0, 0, 1.0, 0]).reshape((1, 2, 2)) # transform to the path style of zarr 3 # e.g. 0/0/1 expected = [e.replace(".", "/") for e in expected] else: # use nan for default fill_value behaviour data = np.array([np.nan, np.nan, 1.0, np.nan]).reshape((1, 2, 2)) ds = xr.Dataset(data_vars={"test": (("Z", "Y", "X"), data)}) if has_dask: ds["test"] = ds["test"].chunk(1) encoding = None else: encoding = {"test": {"chunks": (1, 1, 1)}} with self.temp_dir() as (d, store): ds.to_zarr( store, mode="w", encoding=encoding, write_empty_chunks=write_empty, ) # check expected files after a write assert_expected_files(expected, store) with self.roundtrip_dir( ds, store, save_kwargs={ "mode": "a", "append_dim": "Z", "write_empty_chunks": write_empty, }, ) as a_ds: expected_ds = xr.concat([ds, ds], dim="Z") assert_identical(a_ds, expected_ds.compute()) # add the new files we expect to be created by the append # that was performed by the roundtrip_dir if (write_empty is False) or (write_empty is None and has_zarr_v3): expected.append("1.1.0") elif not has_zarr_v3 or has_zarr_v3_async_oindex: # this was broken from zarr 3.0.0 until 3.1.2 # async oindex released in 3.1.2 along with a fix # for write_empty_chunks in append expected.extend( [ "1.1.0", "1.0.0", "1.0.1", "1.1.1", ] ) else: expected.append("1.1.0") if zarr_format_3: expected = [e.replace(".", "/") for e in expected] assert_expected_files(expected, store) def test_avoid_excess_metadata_calls(self) -> None: """Test that chunk requests do not trigger redundant metadata requests. This test targets logic in backends.zarr.ZarrArrayWrapper, asserting that calls to retrieve chunk data after initialization do not trigger additional metadata requests. https://github.com/pydata/xarray/issues/8290 """ ds = xr.Dataset(data_vars={"test": (("Z",), np.array([123]).reshape(1))}) # The call to retrieve metadata performs a group lookup. We patch Group.__getitem__ # so that we can inspect calls to this method - specifically count of calls. # Use of side_effect means that calls are passed through to the original method # rather than a mocked method. Group: Any if has_zarr_v3: Group = zarr.AsyncGroup patched = patch.object( Group, "getitem", side_effect=Group.getitem, autospec=True ) else: Group = zarr.Group patched = patch.object( Group, "__getitem__", side_effect=Group.__getitem__, autospec=True ) with self.create_zarr_target() as store, patched as mock: ds.to_zarr(store, mode="w") # We expect this to request array metadata information, so call_count should be == 1, xrds = xr.open_zarr(store) call_count = mock.call_count assert call_count == 1 # compute() requests array data, which should not trigger additional metadata requests # we assert that the number of calls has not increased after fetchhing the array xrds.test.compute(scheduler="sync") assert mock.call_count == call_count @requires_zarr @requires_fsspec @pytest.mark.skipif(has_zarr_v3, reason="Difficult to test.") def test_zarr_storage_options() -> None: pytest.importorskip("aiobotocore") ds = create_test_data() store_target = "memory://test.zarr" ds.to_zarr(store_target, storage_options={"test": "zarr_write"}) ds_a = xr.open_zarr(store_target, storage_options={"test": "zarr_read"}) assert_identical(ds, ds_a) @requires_zarr def test_zarr_version_deprecated() -> None: ds = create_test_data() store: Any if has_zarr_v3: store = KVStore() else: store = {} with pytest.warns(FutureWarning, match="zarr_version"): ds.to_zarr(store=store, zarr_version=2) with pytest.warns(FutureWarning, match="zarr_version"): xr.open_zarr(store=store, zarr_version=2) with pytest.raises(ValueError, match="zarr_format"): xr.open_zarr(store=store, zarr_version=2, zarr_format=3) @requires_scipy class TestScipyInMemoryData(NetCDF3Only, CFEncodedBase): engine: T_NetcdfEngine = "scipy" @contextlib.contextmanager def create_store(self): fobj = BytesIO() yield backends.ScipyDataStore(fobj, "w") @pytest.mark.asyncio @pytest.mark.skip(reason="NetCDF backends don't support async loading") async def test_load_async(self) -> None: await super().test_load_async() def test_to_netcdf_explicit_engine(self) -> None: Dataset({"foo": 42}).to_netcdf(engine="scipy") def test_roundtrip_via_bytes(self) -> None: original = create_test_data() netcdf_bytes = original.to_netcdf(engine="scipy") roundtrip = open_dataset(netcdf_bytes, engine="scipy") assert_identical(roundtrip, original) def test_to_bytes_compute_false(self) -> None: original = create_test_data() with pytest.raises( NotImplementedError, match=re.escape("to_netcdf() with compute=False is not yet implemented"), ): original.to_netcdf(engine="scipy", compute=False) def test_bytes_pickle(self) -> None: data = Dataset({"foo": ("x", [1, 2, 3])}) fobj = data.to_netcdf(engine="scipy") with self.open(fobj) as ds: unpickled = pickle.loads(pickle.dumps(ds)) assert_identical(unpickled, data) @requires_scipy class TestScipyFileObject(NetCDF3Only, CFEncodedBase): # TODO: Consider consolidating some of these cases (e.g., # test_file_remains_open) with TestH5NetCDFFileObject engine: T_NetcdfEngine = "scipy" @contextlib.contextmanager def create_store(self): fobj = BytesIO() yield backends.ScipyDataStore(fobj, "w") @contextlib.contextmanager def roundtrip( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): if save_kwargs is None: save_kwargs = {} if open_kwargs is None: open_kwargs = {} with create_tmp_file() as tmp_file: with open(tmp_file, "wb") as f: self.save(data, f, **save_kwargs) with open(tmp_file, "rb") as f: with self.open(f, **open_kwargs) as ds: yield ds @pytest.mark.xfail( reason="scipy.io.netcdf_file closes files upon garbage collection" ) def test_file_remains_open(self) -> None: data = Dataset({"foo": ("x", [1, 2, 3])}) f = BytesIO() data.to_netcdf(f, engine="scipy") assert not f.closed restored = open_dataset(f, engine="scipy") assert not f.closed assert_identical(restored, data) restored.close() assert not f.closed @pytest.mark.skip(reason="cannot pickle file objects") def test_pickle(self) -> None: pass @pytest.mark.skip(reason="cannot pickle file objects") def test_pickle_dataarray(self) -> None: pass @pytest.mark.parametrize("create_default_indexes", [True, False]) def test_create_default_indexes(self, tmp_path, create_default_indexes) -> None: store_path = tmp_path / "tmp.nc" original_ds = xr.Dataset( {"data": ("x", np.arange(3))}, coords={"x": [-1, 0, 1]} ) original_ds.to_netcdf(store_path, engine=self.engine, mode="w") with open_dataset( store_path, engine=self.engine, create_default_indexes=create_default_indexes, ) as loaded_ds: if create_default_indexes: assert list(loaded_ds.xindexes) == ["x"] and isinstance( loaded_ds.xindexes["x"], PandasIndex ) else: assert len(loaded_ds.xindexes) == 0 @requires_scipy class TestScipyFilePath(NetCDF3Only, CFEncodedBase): engine: T_NetcdfEngine = "scipy" @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: with backends.ScipyDataStore(tmp_file, mode="w") as store: yield store def test_array_attrs(self) -> None: ds = Dataset(attrs={"foo": [[1, 2], [3, 4]]}) with pytest.raises(ValueError, match=r"must be 1-dimensional"): with self.roundtrip(ds): pass def test_roundtrip_example_1_netcdf_gz(self) -> None: with open_example_dataset("example_1.nc.gz") as expected: with open_example_dataset("example_1.nc") as actual: assert_identical(expected, actual) def test_netcdf3_endianness(self) -> None: # regression test for GH416 with open_example_dataset("bears.nc", engine="scipy") as expected: for var in expected.variables.values(): assert var.dtype.isnative @requires_netCDF4 def test_nc4_scipy(self) -> None: with create_tmp_file(allow_cleanup_failure=True) as tmp_file: with nc4.Dataset(tmp_file, "w", format="NETCDF4") as rootgrp: rootgrp.createGroup("foo") with pytest.raises(TypeError, match=r"pip install netcdf4"): open_dataset(tmp_file, engine="scipy") @requires_netCDF4 class TestNetCDF3ViaNetCDF4Data(NetCDF3Only, CFEncodedBase): engine: T_NetcdfEngine = "netcdf4" file_format: T_NetcdfTypes = "NETCDF3_CLASSIC" @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: with backends.NetCDF4DataStore.open( tmp_file, mode="w", format="NETCDF3_CLASSIC" ) as store: yield store def test_encoding_kwarg_vlen_string(self) -> None: original = Dataset({"x": ["foo", "bar", "baz"]}) kwargs = dict(encoding={"x": {"dtype": str}}) with pytest.raises(ValueError, match=r"encoding dtype=str for vlen"): with self.roundtrip(original, save_kwargs=kwargs): pass @requires_netCDF4 class TestNetCDF4ClassicViaNetCDF4Data(NetCDF3Only, CFEncodedBase): engine: T_NetcdfEngine = "netcdf4" file_format: T_NetcdfTypes = "NETCDF4_CLASSIC" @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: with backends.NetCDF4DataStore.open( tmp_file, mode="w", format="NETCDF4_CLASSIC" ) as store: yield store @requires_scipy_or_netCDF4 class TestGenericNetCDFData(NetCDF3Only, CFEncodedBase): # verify that we can read and write netCDF3 files as long as we have scipy # or netCDF4-python installed file_format: T_NetcdfTypes = "NETCDF3_64BIT" def test_write_store(self) -> None: # there's no specific store to test here pass @requires_scipy @requires_netCDF4 def test_engine(self) -> None: data = create_test_data() with pytest.raises(ValueError, match=r"unrecognized engine"): data.to_netcdf("foo.nc", engine="foobar") # type: ignore[call-overload] with pytest.raises( ValueError, match=re.escape( "can only read bytes or file-like objects with engine='scipy' or 'h5netcdf'" ), ): data.to_netcdf(engine="netcdf4") with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file) with pytest.raises(ValueError, match=r"unrecognized engine"): open_dataset(tmp_file, engine="foobar") bytes_io = BytesIO() data.to_netcdf(bytes_io, engine="scipy") with pytest.raises(ValueError, match=r"unrecognized engine"): open_dataset(bytes_io, engine="foobar") def test_cross_engine_read_write_netcdf3(self) -> None: data = create_test_data() valid_engines: set[T_NetcdfEngine] = set() if has_netCDF4: valid_engines.add("netcdf4") if has_scipy: valid_engines.add("scipy") for write_engine in valid_engines: for format in self.netcdf3_formats: with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file, format=format, engine=write_engine) for read_engine in valid_engines: with open_dataset(tmp_file, engine=read_engine) as actual: # hack to allow test to work: # coord comes back as DataArray rather than coord, # and so need to loop through here rather than in # the test function (or we get recursion) [ assert_allclose(data[k].variable, actual[k].variable) for k in data.variables ] def test_encoding_unlimited_dims(self) -> None: ds = Dataset({"x": ("y", np.arange(10.0))}) with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["y"])) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) # Regression test for https://github.com/pydata/xarray/issues/2134 with self.roundtrip(ds, save_kwargs=dict(unlimited_dims="y")) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) ds.encoding = {"unlimited_dims": ["y"]} with self.roundtrip(ds) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) # Regression test for https://github.com/pydata/xarray/issues/2134 ds.encoding = {"unlimited_dims": "y"} with self.roundtrip(ds) as actual: assert actual.encoding["unlimited_dims"] == set("y") assert_equal(ds, actual) @requires_scipy def test_roundtrip_via_bytes(self) -> None: original = create_test_data() netcdf_bytes = original.to_netcdf() roundtrip = load_dataset(netcdf_bytes) assert_identical(roundtrip, original) @pytest.mark.xfail( reason="scipy.io.netcdf_file closes files upon garbage collection" ) @requires_scipy def test_roundtrip_via_file_object(self) -> None: original = create_test_data() f = BytesIO() original.to_netcdf(f) assert not f.closed restored = open_dataset(f) assert not f.closed assert_identical(restored, original) restored.close() assert not f.closed @requires_h5netcdf @requires_netCDF4 @pytest.mark.filterwarnings("ignore:use make_scale(name) instead") class TestH5NetCDFData(NetCDF4Base): engine: T_NetcdfEngine = "h5netcdf" @contextlib.contextmanager def create_store(self): with create_tmp_file() as tmp_file: yield backends.H5NetCDFStore.open(tmp_file, "w") @pytest.mark.skipif( has_h5netcdf_1_4_0_or_above, reason="only valid for h5netcdf < 1.4.0" ) def test_complex(self) -> None: expected = Dataset({"x": ("y", np.ones(5) + 1j * np.ones(5))}) save_kwargs = {"invalid_netcdf": True} with pytest.warns(UserWarning, match="You are writing invalid netcdf features"): with self.roundtrip(expected, save_kwargs=save_kwargs) as actual: assert_equal(expected, actual) @pytest.mark.skipif( has_h5netcdf_1_4_0_or_above, reason="only valid for h5netcdf < 1.4.0" ) @pytest.mark.parametrize("invalid_netcdf", [None, False]) def test_complex_error(self, invalid_netcdf) -> None: import h5netcdf expected = Dataset({"x": ("y", np.ones(5) + 1j * np.ones(5))}) save_kwargs = {"invalid_netcdf": invalid_netcdf} with pytest.raises( h5netcdf.CompatibilityError, match="are not a supported NetCDF feature" ): with self.roundtrip(expected, save_kwargs=save_kwargs) as actual: assert_equal(expected, actual) def test_numpy_bool_(self) -> None: # h5netcdf loads booleans as numpy.bool_, this type needs to be supported # when writing invalid_netcdf datasets in order to support a roundtrip expected = Dataset({"x": ("y", np.ones(5), {"numpy_bool": np.bool_(True)})}) save_kwargs = {"invalid_netcdf": True} with pytest.warns(UserWarning, match="You are writing invalid netcdf features"): with self.roundtrip(expected, save_kwargs=save_kwargs) as actual: assert_identical(expected, actual) def test_cross_engine_read_write_netcdf4(self) -> None: # Drop dim3, because its labels include strings. These appear to be # not properly read with python-netCDF4, which converts them into # unicode instead of leaving them as bytes. data = create_test_data().drop_vars("dim3") data.attrs["foo"] = "bar" valid_engines: list[T_NetcdfEngine] = ["netcdf4", "h5netcdf"] for write_engine in valid_engines: with create_tmp_file() as tmp_file: data.to_netcdf(tmp_file, engine=write_engine) for read_engine in valid_engines: with open_dataset(tmp_file, engine=read_engine) as actual: assert_identical(data, actual) def test_read_byte_attrs_as_unicode(self) -> None: with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, "w") as nc: nc.foo = b"bar" with open_dataset(tmp_file) as actual: expected = Dataset(attrs={"foo": "bar"}) assert_identical(expected, actual) def test_compression_encoding_h5py(self) -> None: ENCODINGS: tuple[tuple[dict[str, Any], dict[str, Any]], ...] = ( # h5py style compression with gzip codec will be converted to # NetCDF4-Python style on round-trip ( {"compression": "gzip", "compression_opts": 9}, {"zlib": True, "complevel": 9}, ), # What can't be expressed in NetCDF4-Python style is # round-tripped unaltered ( {"compression": "lzf", "compression_opts": None}, {"compression": "lzf", "compression_opts": None}, ), # If both styles are used together, h5py format takes precedence ( { "compression": "lzf", "compression_opts": None, "zlib": True, "complevel": 9, }, {"compression": "lzf", "compression_opts": None}, ), ) for compr_in, compr_out in ENCODINGS: data = create_test_data() compr_common = { "chunksizes": (5, 5), "fletcher32": True, "shuffle": True, "original_shape": data.var2.shape, } data["var2"].encoding.update(compr_in) data["var2"].encoding.update(compr_common) compr_out.update(compr_common) data["scalar"] = ("scalar_dim", np.array([2.0])) data["scalar"] = data["scalar"][0] with self.roundtrip(data) as actual: for k, v in compr_out.items(): assert v == actual["var2"].encoding[k] def test_compression_check_encoding_h5py(self) -> None: """When mismatched h5py and NetCDF4-Python encodings are expressed in to_netcdf(encoding=...), must raise ValueError """ data = Dataset({"x": ("y", np.arange(10.0))}) # Compatible encodings are graciously supported with create_tmp_file() as tmp_file: data.to_netcdf( tmp_file, engine="h5netcdf", encoding={ "x": { "compression": "gzip", "zlib": True, "compression_opts": 6, "complevel": 6, } }, ) with open_dataset(tmp_file, engine="h5netcdf") as actual: assert actual.x.encoding["zlib"] is True assert actual.x.encoding["complevel"] == 6 # Incompatible encodings cause a crash with create_tmp_file() as tmp_file: with pytest.raises( ValueError, match=r"'zlib' and 'compression' encodings mismatch" ): data.to_netcdf( tmp_file, engine="h5netcdf", encoding={"x": {"compression": "lzf", "zlib": True}}, ) with create_tmp_file() as tmp_file: with pytest.raises( ValueError, match=r"'complevel' and 'compression_opts' encodings mismatch", ): data.to_netcdf( tmp_file, engine="h5netcdf", encoding={ "x": { "compression": "gzip", "compression_opts": 5, "complevel": 6, } }, ) def test_dump_encodings_h5py(self) -> None: # regression test for #709 ds = Dataset({"x": ("y", np.arange(10.0))}) kwargs = {"encoding": {"x": {"compression": "gzip", "compression_opts": 9}}} with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert actual.x.encoding["zlib"] assert actual.x.encoding["complevel"] == 9 kwargs = {"encoding": {"x": {"compression": "lzf", "compression_opts": None}}} with self.roundtrip(ds, save_kwargs=kwargs) as actual: assert actual.x.encoding["compression"] == "lzf" assert actual.x.encoding["compression_opts"] is None def test_decode_utf8_warning(self) -> None: title = b"\xc3" with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, "w") as f: f.title = title with pytest.warns(UnicodeWarning, match="returning bytes undecoded") as w: ds = xr.load_dataset(tmp_file, engine="h5netcdf") assert ds.title == title assert "attribute 'title' of h5netcdf object '/'" in str(w[0].message) def test_byte_attrs(self, byte_attrs_dataset: dict[str, Any]) -> None: with pytest.raises(ValueError, match=byte_attrs_dataset["h5netcdf_error"]): super().test_byte_attrs(byte_attrs_dataset) @requires_h5netcdf_1_4_0_or_above def test_roundtrip_complex(self): expected = Dataset({"x": ("y", np.ones(5) + 1j * np.ones(5))}) with self.roundtrip(expected) as actual: assert_equal(expected, actual) def test_phony_dims_warning(self) -> None: import h5py foo_data = np.arange(125).reshape(5, 5, 5) bar_data = np.arange(625).reshape(25, 5, 5) var = {"foo1": foo_data, "foo2": bar_data, "foo3": foo_data, "foo4": bar_data} with create_tmp_file() as tmp_file: with h5py.File(tmp_file, "w") as f: grps = ["bar", "baz"] for grp in grps: fx = f.create_group(grp) for k, v in var.items(): fx.create_dataset(k, data=v) with pytest.warns(UserWarning, match="The 'phony_dims' kwarg"): with xr.open_dataset(tmp_file, engine="h5netcdf", group="bar") as ds: assert ds.sizes == { "phony_dim_0": 5, "phony_dim_1": 5, "phony_dim_2": 5, "phony_dim_3": 25, } @requires_h5netcdf @requires_netCDF4 class TestH5NetCDFAlreadyOpen: def test_open_dataset_group(self) -> None: import h5netcdf with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode="w") as nc: group = nc.createGroup("g") v = group.createVariable("x", "int") v[...] = 42 kwargs = {"decode_vlen_strings": True} h5 = h5netcdf.File(tmp_file, mode="r", **kwargs) store = backends.H5NetCDFStore(h5["g"]) with open_dataset(store) as ds: expected = Dataset({"x": ((), 42)}) assert_identical(expected, ds) h5 = h5netcdf.File(tmp_file, mode="r", **kwargs) store = backends.H5NetCDFStore(h5, group="g") with open_dataset(store) as ds: expected = Dataset({"x": ((), 42)}) assert_identical(expected, ds) def test_deepcopy(self) -> None: import h5netcdf with create_tmp_file() as tmp_file: with nc4.Dataset(tmp_file, mode="w") as nc: nc.createDimension("x", 10) v = nc.createVariable("y", np.int32, ("x",)) v[:] = np.arange(10) kwargs = {"decode_vlen_strings": True} h5 = h5netcdf.File(tmp_file, mode="r", **kwargs) store = backends.H5NetCDFStore(h5) with open_dataset(store) as ds: copied = ds.copy(deep=True) expected = Dataset({"y": ("x", np.arange(10))}) assert_identical(expected, copied) @requires_h5netcdf class TestH5NetCDFFileObject(TestH5NetCDFData): engine: T_NetcdfEngine = "h5netcdf" def test_open_badbytes(self) -> None: with pytest.raises( ValueError, match=r"match in any of xarray's currently installed IO" ): with open_dataset(b"garbage"): pass with pytest.raises(ValueError, match=r"can only read bytes"): with open_dataset(b"garbage", engine="netcdf4"): pass with pytest.raises( ValueError, match=r"not the signature of a valid netCDF4 file" ): with open_dataset(BytesIO(b"garbage"), engine="h5netcdf"): pass def test_open_twice(self) -> None: expected = create_test_data() expected.attrs["foo"] = "bar" with create_tmp_file() as tmp_file: expected.to_netcdf(tmp_file, engine="h5netcdf") with open(tmp_file, "rb") as f: with open_dataset(f, engine="h5netcdf"): with open_dataset(f, engine="h5netcdf"): pass @requires_scipy def test_open_fileobj(self) -> None: # open in-memory datasets instead of local file paths expected = create_test_data().drop_vars("dim3") expected.attrs["foo"] = "bar" with create_tmp_file() as tmp_file: expected.to_netcdf(tmp_file, engine="h5netcdf") with open(tmp_file, "rb") as f: with open_dataset(f, engine="h5netcdf") as actual: assert_identical(expected, actual) f.seek(0) with open_dataset(f) as actual: assert_identical(expected, actual) f.seek(0) with BytesIO(f.read()) as bio: with open_dataset(bio, engine="h5netcdf") as actual: assert_identical(expected, actual) f.seek(0) with pytest.raises(TypeError, match="not a valid NetCDF 3"): open_dataset(f, engine="scipy") # TODO: this additional open is required since scipy seems to close the file # when it fails on the TypeError (though didn't when we used # `raises_regex`?). Ref https://github.com/pydata/xarray/pull/5191 with open(tmp_file, "rb") as f: f.seek(8) with open_dataset(f): # ensure file gets closed pass def test_file_remains_open(self) -> None: data = Dataset({"foo": ("x", [1, 2, 3])}) f = BytesIO() data.to_netcdf(f, engine="h5netcdf") assert not f.closed restored = open_dataset(f, engine="h5netcdf") assert not f.closed assert_identical(restored, data) restored.close() assert not f.closed @requires_h5netcdf class TestH5NetCDFInMemoryData: def test_roundtrip_via_bytes(self) -> None: original = create_test_data() netcdf_bytes = original.to_netcdf(engine="h5netcdf") roundtrip = open_dataset(netcdf_bytes, engine="h5netcdf") assert_identical(roundtrip, original) def test_roundtrip_group_via_bytes(self) -> None: original = create_test_data() netcdf_bytes = original.to_netcdf(group="sub", engine="h5netcdf") roundtrip = open_dataset(netcdf_bytes, group="sub", engine="h5netcdf") assert_identical(roundtrip, original) @requires_h5netcdf @requires_dask @pytest.mark.filterwarnings("ignore:deallocating CachingFileManager") class TestH5NetCDFViaDaskData(TestH5NetCDFData): @contextlib.contextmanager def roundtrip( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): if save_kwargs is None: save_kwargs = {} if open_kwargs is None: open_kwargs = {} open_kwargs.setdefault("chunks", -1) with TestH5NetCDFData.roundtrip( self, data, save_kwargs, open_kwargs, allow_cleanup_failure ) as ds: yield ds @pytest.mark.skip(reason="caching behavior differs for dask") def test_dataset_caching(self) -> None: pass def test_write_inconsistent_chunks(self) -> None: # Construct two variables with the same dimensions, but different # chunk sizes. x = da.zeros((100, 100), dtype="f4", chunks=(50, 100)) x = DataArray(data=x, dims=("lat", "lon"), name="x") x.encoding["chunksizes"] = (50, 100) x.encoding["original_shape"] = (100, 100) y = da.ones((100, 100), dtype="f4", chunks=(100, 50)) y = DataArray(data=y, dims=("lat", "lon"), name="y") y.encoding["chunksizes"] = (100, 50) y.encoding["original_shape"] = (100, 100) # Put them both into the same dataset ds = Dataset({"x": x, "y": y}) with self.roundtrip(ds) as actual: assert actual["x"].encoding["chunksizes"] == (50, 100) assert actual["y"].encoding["chunksizes"] == (100, 50) @requires_h5netcdf_ros3 class TestH5NetCDFDataRos3Driver(TestCommon): engine: T_NetcdfEngine = "h5netcdf" test_remote_dataset: str = ( "https://www.unidata.ucar.edu/software/netcdf/examples/OMI-Aura_L2-example.nc" ) @pytest.mark.filterwarnings("ignore:Duplicate dimension names") def test_get_variable_list(self) -> None: with open_dataset( self.test_remote_dataset, engine="h5netcdf", backend_kwargs={"driver": "ros3"}, ) as actual: assert "Temperature" in list(actual) @pytest.mark.filterwarnings("ignore:Duplicate dimension names") def test_get_variable_list_empty_driver_kwds(self) -> None: driver_kwds = { "secret_id": b"", "secret_key": b"", } backend_kwargs = {"driver": "ros3", "driver_kwds": driver_kwds} with open_dataset( self.test_remote_dataset, engine="h5netcdf", backend_kwargs=backend_kwargs ) as actual: assert "Temperature" in list(actual) @pytest.fixture(params=["scipy", "netcdf4", "h5netcdf", "zarr"]) def readengine(request): return request.param @pytest.fixture(params=[1, 20]) def nfiles(request): return request.param @pytest.fixture(params=[5, None]) def file_cache_maxsize(request): maxsize = request.param if maxsize is not None: with set_options(file_cache_maxsize=maxsize): yield maxsize else: yield maxsize @pytest.fixture(params=[True, False]) def parallel(request): return request.param @pytest.fixture(params=[None, 5]) def chunks(request): return request.param @pytest.fixture(params=["tmp_path", "ZipStore", "Dict"]) def tmp_store(request, tmp_path): if request.param == "tmp_path": return tmp_path elif request.param == "ZipStore": from zarr.storage import ZipStore path = tmp_path / "store.zip" return ZipStore(path) elif request.param == "Dict": return dict() else: raise ValueError("not supported") # using pytest.mark.skipif does not work so this a work around def skip_if_not_engine(engine): if engine == "netcdf4": pytest.importorskip("netCDF4") else: pytest.importorskip(engine) @requires_dask @pytest.mark.filterwarnings("ignore:use make_scale(name) instead") @pytest.mark.skip( reason="Flaky test which can cause the worker to crash (so don't xfail). Very open to contributions fixing this" ) def test_open_mfdataset_manyfiles( readengine, nfiles, parallel, chunks, file_cache_maxsize ): # skip certain combinations skip_if_not_engine(readengine) randdata = np.random.randn(nfiles) original = Dataset({"foo": ("x", randdata)}) # test standard open_mfdataset approach with too many files with create_tmp_files(nfiles) as tmpfiles: # split into multiple sets of temp files for ii in original.x.values: subds = original.isel(x=slice(ii, ii + 1)) if readengine != "zarr": subds.to_netcdf(tmpfiles[ii], engine=readengine) else: # if writeengine == "zarr": subds.to_zarr(store=tmpfiles[ii]) # check that calculation on opened datasets works properly with open_mfdataset( tmpfiles, combine="nested", concat_dim="x", engine=readengine, parallel=parallel, chunks=chunks if (not chunks and readengine != "zarr") else "auto", ) as actual: # check that using open_mfdataset returns dask arrays for variables assert isinstance(actual["foo"].data, dask_array_type) assert_identical(original, actual) @requires_netCDF4 @requires_dask def test_open_mfdataset_can_open_path_objects() -> None: dataset = os.path.join(os.path.dirname(__file__), "data", "example_1.nc") with open_mfdataset(Path(dataset)) as actual: assert isinstance(actual, Dataset) @requires_netCDF4 @requires_dask def test_open_mfdataset_list_attr() -> None: """ Case when an attribute of type list differs across the multiple files """ from netCDF4 import Dataset with create_tmp_files(2) as nfiles: for i in range(2): with Dataset(nfiles[i], "w") as f: f.createDimension("x", 3) vlvar = f.createVariable("test_var", np.int32, ("x")) # here create an attribute as a list vlvar.test_attr = [f"string a {i}", f"string b {i}"] vlvar[:] = np.arange(3) with open_dataset(nfiles[0]) as ds1: with open_dataset(nfiles[1]) as ds2: original = xr.concat([ds1, ds2], dim="x") with xr.open_mfdataset( [nfiles[0], nfiles[1]], combine="nested", concat_dim="x" ) as actual: assert_identical(actual, original) @requires_scipy_or_netCDF4 @requires_dask class TestOpenMFDatasetWithDataVarsAndCoordsKw: coord_name = "lon" var_name = "v1" @contextlib.contextmanager def setup_files_and_datasets(self, *, fuzz=0, new_combine_kwargs: bool = False): ds1, ds2 = self.gen_datasets_with_common_coord_and_time() # to test join='exact' ds1["x"] = ds1.x + fuzz with create_tmp_file() as tmpfile1: with create_tmp_file() as tmpfile2: # save data to the temporary files ds1.to_netcdf(tmpfile1) ds2.to_netcdf(tmpfile2) with set_options(use_new_combine_kwarg_defaults=new_combine_kwargs): yield [tmpfile1, tmpfile2], [ds1, ds2] def gen_datasets_with_common_coord_and_time(self): # create coordinate data nx = 10 nt = 10 x = np.arange(nx) t1 = np.arange(nt) t2 = np.arange(nt, 2 * nt, 1) v1 = np.random.randn(nt, nx) v2 = np.random.randn(nt, nx) ds1 = Dataset( data_vars={self.var_name: (["t", "x"], v1), self.coord_name: ("x", 2 * x)}, coords={"t": (["t"], t1), "x": (["x"], x)}, ) ds2 = Dataset( data_vars={self.var_name: (["t", "x"], v2), self.coord_name: ("x", 2 * x)}, coords={"t": (["t"], t2), "x": (["x"], x)}, ) return ds1, ds2 @pytest.mark.parametrize( "combine, concat_dim", [("nested", "t"), ("by_coords", None)] ) @pytest.mark.parametrize("opt", ["all", "minimal", "different"]) @pytest.mark.parametrize("join", ["outer", "inner", "left", "right"]) def test_open_mfdataset_does_same_as_concat( self, combine, concat_dim, opt, join ) -> None: with self.setup_files_and_datasets() as (files, [ds1, ds2]): if combine == "by_coords": files.reverse() with open_mfdataset( files, data_vars=opt, combine=combine, concat_dim=concat_dim, join=join, compat="equals", ) as ds: ds_expect = xr.concat( [ds1, ds2], data_vars=opt, dim="t", join=join, compat="equals" ) assert_identical(ds, ds_expect) @pytest.mark.parametrize("use_new_combine_kwarg_defaults", [True, False]) @pytest.mark.parametrize( ["combine_attrs", "attrs", "expected", "expect_error"], ( pytest.param("drop", [{"a": 1}, {"a": 2}], {}, False, id="drop"), pytest.param( "override", [{"a": 1}, {"a": 2}], {"a": 1}, False, id="override" ), pytest.param( "no_conflicts", [{"a": 1}, {"a": 2}], None, True, id="no_conflicts" ), pytest.param( "identical", [{"a": 1, "b": 2}, {"a": 1, "c": 3}], None, True, id="identical", ), pytest.param( "drop_conflicts", [{"a": 1, "b": 2}, {"b": -1, "c": 3}], {"a": 1, "c": 3}, False, id="drop_conflicts", ), ), ) def test_open_mfdataset_dataset_combine_attrs( self, use_new_combine_kwarg_defaults, combine_attrs, attrs, expected, expect_error, ): with self.setup_files_and_datasets() as (files, [ds1, ds2]): # Give the files an inconsistent attribute for i, f in enumerate(files): ds = open_dataset(f).load() ds.attrs = attrs[i] ds.close() ds.to_netcdf(f) with set_options( use_new_combine_kwarg_defaults=use_new_combine_kwarg_defaults ): warning: contextlib.AbstractContextManager = ( pytest.warns(FutureWarning) if not use_new_combine_kwarg_defaults else contextlib.nullcontext() ) error: contextlib.AbstractContextManager = ( pytest.raises(xr.MergeError) if expect_error else contextlib.nullcontext() ) with warning: with error: with xr.open_mfdataset( files, combine="nested", concat_dim="t", combine_attrs=combine_attrs, ) as ds: assert ds.attrs == expected def test_open_mfdataset_dataset_attr_by_coords(self) -> None: """ Case when an attribute differs across the multiple files """ with self.setup_files_and_datasets() as (files, [ds1, ds2]): # Give the files an inconsistent attribute for i, f in enumerate(files): ds = open_dataset(f).load() ds.attrs["test_dataset_attr"] = 10 + i ds.close() ds.to_netcdf(f) with set_options(use_new_combine_kwarg_defaults=True): with xr.open_mfdataset(files, combine="nested", concat_dim="t") as ds: assert ds.test_dataset_attr == 10 def test_open_mfdataset_dataarray_attr_by_coords(self) -> None: """ Case when an attribute of a member DataArray differs across the multiple files """ with self.setup_files_and_datasets(new_combine_kwargs=True) as ( files, [ds1, ds2], ): # Give the files an inconsistent attribute for i, f in enumerate(files): ds = open_dataset(f).load() ds["v1"].attrs["test_dataarray_attr"] = i ds.close() ds.to_netcdf(f) with xr.open_mfdataset( files, data_vars=None, combine="nested", concat_dim="t" ) as ds: assert ds["v1"].test_dataarray_attr == 0 @pytest.mark.parametrize( "combine, concat_dim", [("nested", "t"), ("by_coords", None)] ) @pytest.mark.parametrize( "kwargs", [ {"data_vars": "all"}, {"data_vars": "minimal"}, { "data_vars": "all", "coords": "different", "compat": "no_conflicts", }, # old defaults { "data_vars": None, "coords": "minimal", "compat": "override", }, # new defaults {"data_vars": "different", "compat": "no_conflicts"}, {}, ], ) def test_open_mfdataset_exact_join_raises_error( self, combine, concat_dim, kwargs ) -> None: with self.setup_files_and_datasets(fuzz=0.1, new_combine_kwargs=True) as ( files, _, ): if combine == "by_coords": files.reverse() with pytest.raises( ValueError, match="cannot align objects with join='exact'" ): open_mfdataset( files, **kwargs, combine=combine, concat_dim=concat_dim, join="exact", ) def test_open_mfdataset_defaults_with_exact_join_warns_as_well_as_raising( self, ) -> None: with self.setup_files_and_datasets(fuzz=0.1, new_combine_kwargs=True) as ( files, _, ): files.reverse() with pytest.raises( ValueError, match="cannot align objects with join='exact'" ): open_mfdataset(files, combine="by_coords") def test_common_coord_when_datavars_all(self) -> None: opt: Final = "all" with self.setup_files_and_datasets() as (files, [ds1, ds2]): # open the files with the data_var option with open_mfdataset( files, data_vars=opt, combine="nested", concat_dim="t" ) as ds: coord_shape = ds[self.coord_name].shape coord_shape1 = ds1[self.coord_name].shape coord_shape2 = ds2[self.coord_name].shape var_shape = ds[self.var_name].shape assert var_shape == coord_shape assert coord_shape1 != coord_shape assert coord_shape2 != coord_shape def test_common_coord_when_datavars_minimal(self) -> None: opt: Final = "minimal" with self.setup_files_and_datasets(new_combine_kwargs=True) as ( files, [ds1, ds2], ): # open the files using data_vars option with open_mfdataset( files, data_vars=opt, combine="nested", concat_dim="t" ) as ds: coord_shape = ds[self.coord_name].shape coord_shape1 = ds1[self.coord_name].shape coord_shape2 = ds2[self.coord_name].shape var_shape = ds[self.var_name].shape assert var_shape != coord_shape assert coord_shape1 == coord_shape assert coord_shape2 == coord_shape def test_invalid_data_vars_value_should_fail(self) -> None: with self.setup_files_and_datasets() as (files, _): with pytest.raises(ValueError): with open_mfdataset(files, data_vars="minimum", combine="by_coords"): # type: ignore[arg-type] pass # test invalid coord parameter with pytest.raises(ValueError): with open_mfdataset(files, coords="minimum", combine="by_coords"): pass @pytest.mark.parametrize( "combine, concat_dim", [("nested", "t"), ("by_coords", None)] ) @pytest.mark.parametrize( "kwargs", [{"data_vars": "different"}, {"coords": "different"}] ) def test_open_mfdataset_warns_when_kwargs_set_to_different( self, combine, concat_dim, kwargs ) -> None: with self.setup_files_and_datasets(new_combine_kwargs=True) as ( files, [ds1, ds2], ): if combine == "by_coords": files.reverse() with pytest.raises( ValueError, match="Previously the default was `compat='no_conflicts'`" ): open_mfdataset(files, combine=combine, concat_dim=concat_dim, **kwargs) with pytest.raises( ValueError, match="Previously the default was `compat='equals'`" ): xr.concat([ds1, ds2], dim="t", **kwargs) with set_options(use_new_combine_kwarg_defaults=False): expectation: contextlib.AbstractContextManager = ( pytest.warns( FutureWarning, match="will change from data_vars='all'", ) if "data_vars" not in kwargs else contextlib.nullcontext() ) with pytest.warns( FutureWarning, match="will change from compat='equals'", ): with expectation: ds_expect = xr.concat([ds1, ds2], dim="t", **kwargs) with pytest.warns( FutureWarning, match="will change from compat='no_conflicts'" ): with expectation: with open_mfdataset( files, combine=combine, concat_dim=concat_dim, **kwargs ) as ds: assert_identical(ds, ds_expect) @requires_dask @requires_scipy @requires_netCDF4 class TestDask(DatasetIOBase): @contextlib.contextmanager def create_store(self): yield Dataset() @contextlib.contextmanager def roundtrip( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): yield data.chunk() # Override methods in DatasetIOBase - not applicable to dask def test_roundtrip_string_encoded_characters(self) -> None: pass def test_roundtrip_coordinates_with_space(self) -> None: pass def test_roundtrip_numpy_datetime_data(self) -> None: # Override method in DatasetIOBase - remove not applicable # save_kwargs times = pd.to_datetime(["2000-01-01", "2000-01-02", "NaT"], unit="ns") expected = Dataset({"t": ("t", times), "t0": times[0]}) with self.roundtrip(expected) as actual: assert_identical(expected, actual) def test_roundtrip_cftime_datetime_data(self) -> None: # Override method in DatasetIOBase - remove not applicable # save_kwargs from xarray.tests.test_coding_times import _all_cftime_date_types date_types = _all_cftime_date_types() for date_type in date_types.values(): times = [date_type(1, 1, 1), date_type(1, 1, 2)] expected = Dataset({"t": ("t", times), "t0": times[0]}) expected_decoded_t = np.array(times) expected_decoded_t0 = np.array([date_type(1, 1, 1)]) with self.roundtrip(expected) as actual: assert_array_equal(actual.t.values, expected_decoded_t) assert_array_equal(actual.t0.values, expected_decoded_t0) def test_write_store(self) -> None: # Override method in DatasetIOBase - not applicable to dask pass def test_dataset_caching(self) -> None: expected = Dataset({"foo": ("x", [5, 6, 7])}) with self.roundtrip(expected) as actual: assert not actual.foo.variable._in_memory _ = actual.foo.values # no caching assert not actual.foo.variable._in_memory def test_open_mfdataset(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: original.isel(x=slice(5)).to_netcdf(tmp1) original.isel(x=slice(5, 10)).to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested" ) as actual: assert isinstance(actual.foo.variable.data, da.Array) assert actual.foo.variable.data.chunks == ((5, 5),) assert_identical(original, actual) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested", chunks={"x": 3} ) as actual: assert actual.foo.variable.data.chunks == ((3, 2, 3, 2),) with pytest.raises(OSError, match=r"no files to open"): open_mfdataset("foo-bar-baz-*.nc") with pytest.raises(ValueError, match=r"wild-card"): open_mfdataset("http://some/remote/uri") @requires_fsspec def test_open_mfdataset_no_files(self) -> None: pytest.importorskip("aiobotocore") # glob is attempted as of #4823, but finds no files with pytest.raises(OSError, match=r"no files"): open_mfdataset("http://some/remote/uri", engine="zarr") def test_open_mfdataset_2d(self) -> None: original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))}) with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: with create_tmp_file() as tmp3: with create_tmp_file() as tmp4: original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1) original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2) original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3) original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4) with open_mfdataset( [[tmp1, tmp2], [tmp3, tmp4]], combine="nested", concat_dim=["y", "x"], ) as actual: assert isinstance(actual.foo.variable.data, da.Array) assert actual.foo.variable.data.chunks == ((5, 5), (4, 4)) assert_identical(original, actual) with open_mfdataset( [[tmp1, tmp2], [tmp3, tmp4]], combine="nested", concat_dim=["y", "x"], chunks={"x": 3, "y": 2}, ) as actual: assert actual.foo.variable.data.chunks == ( (3, 2, 3, 2), (2, 2, 2, 2), ) def test_open_mfdataset_pathlib(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_file() as tmps1: with create_tmp_file() as tmps2: tmp1 = Path(tmps1) tmp2 = Path(tmps2) original.isel(x=slice(5)).to_netcdf(tmp1) original.isel(x=slice(5, 10)).to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested" ) as actual: assert_identical(original, actual) def test_open_mfdataset_2d_pathlib(self) -> None: original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))}) with create_tmp_file() as tmps1: with create_tmp_file() as tmps2: with create_tmp_file() as tmps3: with create_tmp_file() as tmps4: tmp1 = Path(tmps1) tmp2 = Path(tmps2) tmp3 = Path(tmps3) tmp4 = Path(tmps4) original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1) original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2) original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3) original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4) with open_mfdataset( [[tmp1, tmp2], [tmp3, tmp4]], combine="nested", concat_dim=["y", "x"], ) as actual: assert_identical(original, actual) def test_open_mfdataset_2(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: original.isel(x=slice(5)).to_netcdf(tmp1) original.isel(x=slice(5, 10)).to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested" ) as actual: assert_identical(original, actual) def test_open_mfdataset_with_ignore(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_files(2) as (tmp1, tmp2): ds1 = original.isel(x=slice(5)) ds2 = original.isel(x=slice(5, 10)) ds1.to_netcdf(tmp1) ds2.to_netcdf(tmp2) with open_mfdataset( [tmp1, "non-existent-file.nc", tmp2], concat_dim="x", combine="nested", errors="ignore", ) as actual: assert_identical(original, actual) def test_open_mfdataset_with_warn(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with pytest.warns(UserWarning, match="Ignoring."): with create_tmp_files(2) as (tmp1, tmp2): ds1 = original.isel(x=slice(5)) ds2 = original.isel(x=slice(5, 10)) ds1.to_netcdf(tmp1) ds2.to_netcdf(tmp2) with open_mfdataset( [tmp1, "non-existent-file.nc", tmp2], concat_dim="x", combine="nested", errors="warn", ) as actual: assert_identical(original, actual) def test_open_mfdataset_2d_with_ignore(self) -> None: original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))}) with create_tmp_files(4) as (tmp1, tmp2, tmp3, tmp4): original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1) original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2) original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3) original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4) with open_mfdataset( [[tmp1, tmp2], ["non-existent-file.nc", tmp3, tmp4]], combine="nested", concat_dim=["y", "x"], errors="ignore", ) as actual: assert_identical(original, actual) def test_open_mfdataset_2d_with_warn(self) -> None: original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))}) with pytest.warns(UserWarning, match="Ignoring."): with create_tmp_files(4) as (tmp1, tmp2, tmp3, tmp4): original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1) original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2) original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3) original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4) with open_mfdataset( [[tmp1, tmp2, "non-existent-file.nc"], [tmp3, tmp4]], combine="nested", concat_dim=["y", "x"], errors="warn", ) as actual: assert_identical(original, actual) def test_attrs_mfdataset(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: ds1 = original.isel(x=slice(5)) ds2 = original.isel(x=slice(5, 10)) ds1.attrs["test1"] = "foo" ds2.attrs["test2"] = "bar" ds1.to_netcdf(tmp1) ds2.to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested" ) as actual: # presumes that attributes inherited from # first dataset loaded assert actual.test1 == ds1.test1 # attributes from ds2 are not retained, e.g., with pytest.raises(AttributeError, match=r"no attribute"): _ = actual.test2 def test_open_mfdataset_attrs_file(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_files(2) as (tmp1, tmp2): ds1 = original.isel(x=slice(5)) ds2 = original.isel(x=slice(5, 10)) ds1.attrs["test1"] = "foo" ds2.attrs["test2"] = "bar" ds1.to_netcdf(tmp1) ds2.to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested", attrs_file=tmp2 ) as actual: # attributes are inherited from the master file assert actual.attrs["test2"] == ds2.attrs["test2"] # attributes from ds1 are not retained, e.g., assert "test1" not in actual.attrs def test_open_mfdataset_attrs_file_path(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_files(2) as (tmps1, tmps2): tmp1 = Path(tmps1) tmp2 = Path(tmps2) ds1 = original.isel(x=slice(5)) ds2 = original.isel(x=slice(5, 10)) ds1.attrs["test1"] = "foo" ds2.attrs["test2"] = "bar" ds1.to_netcdf(tmp1) ds2.to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested", attrs_file=tmp2 ) as actual: # attributes are inherited from the master file assert actual.attrs["test2"] == ds2.attrs["test2"] # attributes from ds1 are not retained, e.g., assert "test1" not in actual.attrs def test_open_mfdataset_auto_combine(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10)), "x": np.arange(10)}) with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: original.isel(x=slice(5)).to_netcdf(tmp1) original.isel(x=slice(5, 10)).to_netcdf(tmp2) with open_mfdataset([tmp2, tmp1], combine="by_coords") as actual: assert_identical(original, actual) def test_open_mfdataset_raise_on_bad_combine_args(self) -> None: # Regression test for unhelpful error shown in #5230 original = Dataset({"foo": ("x", np.random.randn(10)), "x": np.arange(10)}) with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: original.isel(x=slice(5)).to_netcdf(tmp1) original.isel(x=slice(5, 10)).to_netcdf(tmp2) with pytest.raises(ValueError, match="`concat_dim` has no effect"): open_mfdataset([tmp1, tmp2], concat_dim="x") def test_encoding_mfdataset(self) -> None: original = Dataset( { "foo": ("t", np.random.randn(10)), "t": ("t", pd.date_range(start="2010-01-01", periods=10, freq="1D")), } ) original.t.encoding["units"] = "days since 2010-01-01" with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: ds1 = original.isel(t=slice(5)) ds2 = original.isel(t=slice(5, 10)) ds1.t.encoding["units"] = "days since 2010-01-01" ds2.t.encoding["units"] = "days since 2000-01-01" ds1.to_netcdf(tmp1) ds2.to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], combine="nested", concat_dim="t" ) as actual: assert actual.t.encoding["units"] == original.t.encoding["units"] assert actual.t.encoding["units"] == ds1.t.encoding["units"] assert actual.t.encoding["units"] != ds2.t.encoding["units"] def test_encoding_mfdataset_new_defaults(self) -> None: original = Dataset( { "foo": ("t", np.random.randn(10)), "t": ("t", pd.date_range(start="2010-01-01", periods=10, freq="1D")), } ) original.t.encoding["units"] = "days since 2010-01-01" with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: ds1 = original.isel(t=slice(5)) ds2 = original.isel(t=slice(5, 10)) ds1.t.encoding["units"] = "days since 2010-01-01" ds2.t.encoding["units"] = "days since 2000-01-01" ds1.to_netcdf(tmp1) ds2.to_netcdf(tmp2) for setting in [True, False]: with set_options(use_new_combine_kwarg_defaults=setting): with open_mfdataset( [tmp1, tmp2], combine="nested", concat_dim="t" ) as old: assert ( old.t.encoding["units"] == original.t.encoding["units"] ) assert old.t.encoding["units"] == ds1.t.encoding["units"] assert old.t.encoding["units"] != ds2.t.encoding["units"] with set_options(use_new_combine_kwarg_defaults=True): with pytest.raises( AlignmentError, match="If you are intending to concatenate" ): open_mfdataset([tmp1, tmp2], combine="nested") def test_preprocess_mfdataset(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_file() as tmp: original.to_netcdf(tmp) def preprocess(ds): return ds.assign_coords(z=0) expected = preprocess(original) with open_mfdataset( tmp, preprocess=preprocess, combine="by_coords" ) as actual: assert_identical(expected, actual) def test_save_mfdataset_roundtrip(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))] with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: save_mfdataset(datasets, [tmp1, tmp2]) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested" ) as actual: assert_identical(actual, original) def test_save_mfdataset_invalid(self) -> None: ds = Dataset() with pytest.raises(ValueError, match=r"cannot use mode"): save_mfdataset([ds, ds], ["same", "same"]) with pytest.raises(ValueError, match=r"same length"): save_mfdataset([ds, ds], ["only one path"]) def test_save_mfdataset_invalid_dataarray(self) -> None: # regression test for GH1555 da = DataArray([1, 2]) with pytest.raises(TypeError, match=r"supports writing Dataset"): save_mfdataset([da], ["dataarray"]) def test_save_mfdataset_pathlib_roundtrip(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))] with create_tmp_file() as tmps1: with create_tmp_file() as tmps2: tmp1 = Path(tmps1) tmp2 = Path(tmps2) save_mfdataset(datasets, [tmp1, tmp2]) with open_mfdataset( [tmp1, tmp2], concat_dim="x", combine="nested" ) as actual: assert_identical(actual, original) def test_save_mfdataset_pass_kwargs(self) -> None: # create a timeseries to store in a netCDF file times = [0, 1] time = xr.DataArray(times, dims=("time",)) # create a simple dataset to write using save_mfdataset test_ds = xr.Dataset() test_ds["time"] = time # make sure the times are written as double and # turn off fill values encoding = dict(time=dict(dtype="double")) unlimited_dims = ["time"] # set the output file name output_path = "test.nc" # attempt to write the dataset with the encoding and unlimited args # passed through xr.save_mfdataset( [test_ds], [output_path], encoding=encoding, unlimited_dims=unlimited_dims ) def test_open_and_do_math(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_file() as tmp: original.to_netcdf(tmp) with open_mfdataset(tmp, combine="by_coords") as ds: actual = 1.0 * ds assert_allclose(original, actual, decode_bytes=False) @pytest.mark.parametrize( "kwargs", [pytest.param({"concat_dim": None}, id="none"), pytest.param({}, id="default")], ) def test_open_mfdataset_concat_dim(self, kwargs) -> None: with set_options(use_new_combine_kwarg_defaults=True): with create_tmp_file() as tmp1: with create_tmp_file() as tmp2: data = Dataset({"x": 0}) data.to_netcdf(tmp1) Dataset({"x": np.nan}).to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], **kwargs, combine="nested" ) as actual: assert_identical(data, actual) def test_open_dataset(self) -> None: original = Dataset({"foo": ("x", np.random.randn(10))}) with create_tmp_file() as tmp: original.to_netcdf(tmp) with open_dataset(tmp, chunks={"x": 5}) as actual: assert isinstance(actual.foo.variable.data, da.Array) assert actual.foo.variable.data.chunks == ((5, 5),) assert_identical(original, actual) with open_dataset(tmp, chunks=5) as actual: assert_identical(original, actual) with open_dataset(tmp) as actual: assert isinstance(actual.foo.variable.data, np.ndarray) assert_identical(original, actual) def test_open_single_dataset(self) -> None: # Test for issue GH #1988. This makes sure that the # concat_dim is utilized when specified in open_mfdataset(). rnddata = np.random.randn(10) original = Dataset({"foo": ("x", rnddata)}) dim = DataArray([100], name="baz", dims="baz") expected = Dataset( {"foo": (("baz", "x"), rnddata[np.newaxis, :])}, {"baz": [100]} ) with create_tmp_file() as tmp: original.to_netcdf(tmp) with open_mfdataset( [tmp], concat_dim=dim, data_vars="all", combine="nested" ) as actual: assert_identical(expected, actual) def test_open_multi_dataset(self) -> None: # Test for issue GH #1988 and #2647. This makes sure that the # concat_dim is utilized when specified in open_mfdataset(). # The additional wrinkle is to ensure that a length greater # than one is tested as well due to numpy's implicit casting # of 1-length arrays to booleans in tests, which allowed # #2647 to still pass the test_open_single_dataset(), # which is itself still needed as-is because the original # bug caused one-length arrays to not be used correctly # in concatenation. rnddata = np.random.randn(10) original = Dataset({"foo": ("x", rnddata)}) dim = DataArray([100, 150], name="baz", dims="baz") expected = Dataset( {"foo": (("baz", "x"), np.tile(rnddata[np.newaxis, :], (2, 1)))}, {"baz": [100, 150]}, ) with create_tmp_file() as tmp1, create_tmp_file() as tmp2: original.to_netcdf(tmp1) original.to_netcdf(tmp2) with open_mfdataset( [tmp1, tmp2], concat_dim=dim, data_vars="all", combine="nested" ) as actual: assert_identical(expected, actual) # Flaky test. Very open to contributions on fixing this @pytest.mark.flaky def test_dask_roundtrip(self) -> None: with create_tmp_file() as tmp: data = create_test_data() data.to_netcdf(tmp) chunks = {"dim1": 4, "dim2": 4, "dim3": 4, "time": 10} with open_dataset(tmp, chunks=chunks) as dask_ds: assert_identical(data, dask_ds) with create_tmp_file() as tmp2: dask_ds.to_netcdf(tmp2) with open_dataset(tmp2) as on_disk: assert_identical(data, on_disk) def test_deterministic_names(self) -> None: with create_tmp_file() as tmp: data = create_test_data() data.to_netcdf(tmp) with open_mfdataset(tmp, combine="by_coords") as ds: original_names = {k: v.data.name for k, v in ds.data_vars.items()} with open_mfdataset(tmp, combine="by_coords") as ds: repeat_names = {k: v.data.name for k, v in ds.data_vars.items()} for var_name, dask_name in original_names.items(): assert var_name in dask_name assert dask_name[:13] == "open_dataset-" assert original_names == repeat_names def test_dataarray_compute(self) -> None: # Test DataArray.compute() on dask backend. # The test for Dataset.compute() is already in DatasetIOBase; # however dask is the only tested backend which supports DataArrays actual = DataArray([1, 2]).chunk() computed = actual.compute() assert not actual._in_memory assert computed._in_memory assert_allclose(actual, computed, decode_bytes=False) def test_save_mfdataset_compute_false_roundtrip(self) -> None: from dask.delayed import Delayed original = Dataset({"foo": ("x", np.random.randn(10))}).chunk() datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))] with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp1: with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp2: delayed_obj = save_mfdataset( datasets, [tmp1, tmp2], engine=self.engine, compute=False ) assert isinstance(delayed_obj, Delayed) delayed_obj.compute() with open_mfdataset( [tmp1, tmp2], combine="nested", concat_dim="x" ) as actual: assert_identical(actual, original) def test_load_dataset(self) -> None: with create_tmp_file() as tmp: original = Dataset({"foo": ("x", np.random.randn(10))}) original.to_netcdf(tmp) ds = load_dataset(tmp) assert_identical(original, ds) # this would fail if we used open_dataset instead of load_dataset ds.to_netcdf(tmp) def test_load_dataarray(self) -> None: with create_tmp_file() as tmp: original = DataArray(np.random.randn(10), dims=["x"]) original.to_netcdf(tmp) da = load_dataarray(tmp) assert_identical(original, da) # this would fail if we used open_dataarray instead of # load_dataarray da.to_netcdf(tmp) def test_load_datatree(self) -> None: with create_tmp_file() as tmp: original = DataTree(Dataset({"foo": ("x", np.random.randn(10))})) original.to_netcdf(tmp) dt = load_datatree(tmp) xr.testing.assert_identical(original, dt) # this would fail if we used open_datatree instead of # load_datatree dt.to_netcdf(tmp) @pytest.mark.skipif( ON_WINDOWS, reason="counting number of tasks in graph fails on windows for some reason", ) def test_inline_array(self) -> None: with create_tmp_file() as tmp: original = Dataset({"foo": ("x", np.random.randn(10))}) original.to_netcdf(tmp) chunks = {"time": 10} def num_graph_nodes(obj): return len(obj.__dask_graph__()) with ( open_dataset(tmp, inline_array=False, chunks=chunks) as not_inlined_ds, open_dataset(tmp, inline_array=True, chunks=chunks) as inlined_ds, ): assert num_graph_nodes(inlined_ds) < num_graph_nodes(not_inlined_ds) with ( open_dataarray( tmp, inline_array=False, chunks=chunks ) as not_inlined_da, open_dataarray(tmp, inline_array=True, chunks=chunks) as inlined_da, ): assert num_graph_nodes(inlined_da) < num_graph_nodes(not_inlined_da) @requires_scipy_or_netCDF4 @requires_pydap @pytest.mark.filterwarnings("ignore:The binary mode of fromstring is deprecated") class TestPydap: def convert_to_pydap_dataset(self, original): from pydap.model import BaseType, DatasetType ds = DatasetType("bears", **original.attrs) for key, var in original.data_vars.items(): ds[key] = BaseType( key, var.values, dtype=var.values.dtype.kind, dims=var.dims, **var.attrs ) # check all dims are stored in ds for d in original.coords: ds[d] = BaseType(d, original[d].values, dims=(d,), **original[d].attrs) return ds @contextlib.contextmanager def create_datasets(self, **kwargs): with open_example_dataset("bears.nc") as expected: # print("QQ0:", expected["bears"].load()) pydap_ds = self.convert_to_pydap_dataset(expected) actual = open_dataset(PydapDataStore(pydap_ds)) # netcdf converts string to byte not unicode # fixed in pydap 3.5.6. https://github.com/pydap/pydap/issues/510 actual["bears"].values = actual["bears"].values.astype("S") yield actual, expected def test_cmp_local_file(self) -> None: with self.create_datasets() as (actual, expected): assert_equal(actual, expected) # global attributes should be global attributes on the dataset assert "NC_GLOBAL" not in actual.attrs assert "history" in actual.attrs # we don't check attributes exactly with assertDatasetIdentical() # because the test DAP server seems to insert some extra # attributes not found in the netCDF file. assert actual.attrs.keys() == expected.attrs.keys() with self.create_datasets() as (actual, expected): assert_equal(actual[{"l": 2}], expected[{"l": 2}]) with self.create_datasets() as (actual, expected): # always return arrays and not scalars # scalars will be promoted to unicode for numpy >= 2.3.0 assert_equal(actual.isel(i=[0], j=[-1]), expected.isel(i=[0], j=[-1])) with self.create_datasets() as (actual, expected): assert_equal(actual.isel(j=slice(1, 2)), expected.isel(j=slice(1, 2))) with self.create_datasets() as (actual, expected): indexers = {"i": [1, 0, 0], "j": [1, 2, 0, 1]} assert_equal(actual.isel(**indexers), expected.isel(**indexers)) with self.create_datasets() as (actual, expected): indexers2 = { "i": DataArray([0, 1, 0], dims="a"), "j": DataArray([0, 2, 1], dims="a"), } assert_equal(actual.isel(**indexers2), expected.isel(**indexers2)) def test_compatible_to_netcdf(self) -> None: # make sure it can be saved as a netcdf with self.create_datasets() as (actual, expected): with create_tmp_file() as tmp_file: actual.to_netcdf(tmp_file) with open_dataset(tmp_file) as actual2: assert_equal(actual2, expected) @requires_dask def test_dask(self) -> None: with self.create_datasets(chunks={"j": 2}) as (actual, expected): assert_equal(actual, expected) @network @requires_scipy_or_netCDF4 @requires_pydap class TestPydapOnline(TestPydap): @contextlib.contextmanager def create_dap2_datasets(self, **kwargs): # in pydap 3.5.0, urls defaults to dap2. url = "http://test.opendap.org/opendap/data/nc/bears.nc" actual = open_dataset(url, engine="pydap", **kwargs) # pydap <3.5.6 converts to unicode dtype=|U. Not what # xarray expects. Thus force to bytes dtype. pydap >=3.5.6 # does not convert to unicode. https://github.com/pydap/pydap/issues/510 actual["bears"].values = actual["bears"].values.astype("S") with open_example_dataset("bears.nc") as expected: yield actual, expected def output_grid_deprecation_warning_dap2dataset(self): with pytest.warns(DeprecationWarning, match="`output_grid` is deprecated"): with self.create_dap2_datasets(output_grid=True) as (actual, expected): assert_equal(actual, expected) def create_dap4_dataset(self, **kwargs): url = "dap4://test.opendap.org/opendap/data/nc/bears.nc" actual = open_dataset(url, engine="pydap", **kwargs) with open_example_dataset("bears.nc") as expected: # workaround to restore string which is converted to byte # only needed for pydap <3.5.6 https://github.com/pydap/pydap/issues/510 expected["bears"].values = expected["bears"].values.astype("S") yield actual, expected def test_session(self) -> None: from requests import Session session = Session() # blank requests.Session object with mock.patch("pydap.client.open_url") as mock_func: xr.backends.PydapDataStore.open("http://test.url", session=session) mock_func.assert_called_with( url="http://test.url", application=None, session=session, output_grid=False, timeout=120, verify=True, user_charset=None, ) class TestEncodingInvalid: def test_extract_nc4_variable_encoding(self) -> None: var = xr.Variable(("x",), [1, 2, 3], {}, {"foo": "bar"}) with pytest.raises(ValueError, match=r"unexpected encoding"): _extract_nc4_variable_encoding(var, raise_on_invalid=True) var = xr.Variable(("x",), [1, 2, 3], {}, {"chunking": (2, 1)}) encoding = _extract_nc4_variable_encoding(var) assert {} == encoding # regression test var = xr.Variable(("x",), [1, 2, 3], {}, {"shuffle": True}) encoding = _extract_nc4_variable_encoding(var, raise_on_invalid=True) assert {"shuffle": True} == encoding # Variables with unlim dims must be chunked on output. var = xr.Variable(("x",), [1, 2, 3], {}, {"contiguous": True}) encoding = _extract_nc4_variable_encoding(var, unlimited_dims=("x",)) assert {} == encoding @requires_netCDF4 def test_extract_nc4_variable_encoding_netcdf4(self): # New netCDF4 1.6.0 compression argument. var = xr.Variable(("x",), [1, 2, 3], {}, {"compression": "szlib"}) _extract_nc4_variable_encoding(var, backend="netCDF4", raise_on_invalid=True) @pytest.mark.xfail def test_extract_h5nc_encoding(self) -> None: # not supported with h5netcdf (yet) var = xr.Variable(("x",), [1, 2, 3], {}, {"least_significant_digit": 2}) with pytest.raises(ValueError, match=r"unexpected encoding"): _extract_nc4_variable_encoding(var, raise_on_invalid=True) class MiscObject: pass @requires_netCDF4 class TestValidateAttrs: def test_validating_attrs(self) -> None: def new_dataset(): return Dataset({"data": ("y", np.arange(10.0))}, {"y": np.arange(10)}) def new_dataset_and_dataset_attrs(): ds = new_dataset() return ds, ds.attrs def new_dataset_and_data_attrs(): ds = new_dataset() return ds, ds.data.attrs def new_dataset_and_coord_attrs(): ds = new_dataset() return ds, ds.coords["y"].attrs for new_dataset_and_attrs in [ new_dataset_and_dataset_attrs, new_dataset_and_data_attrs, new_dataset_and_coord_attrs, ]: ds, attrs = new_dataset_and_attrs() attrs[123] = "test" with pytest.raises(TypeError, match=r"Invalid name for attr: 123"): ds.to_netcdf("test.nc") ds, attrs = new_dataset_and_attrs() attrs[MiscObject()] = "test" with pytest.raises(TypeError, match=r"Invalid name for attr: "): ds.to_netcdf("test.nc") ds, attrs = new_dataset_and_attrs() attrs[""] = "test" with pytest.raises(ValueError, match=r"Invalid name for attr '':"): ds.to_netcdf("test.nc") # This one should work ds, attrs = new_dataset_and_attrs() attrs["test"] = "test" with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) ds, attrs = new_dataset_and_attrs() attrs["test"] = {"a": 5} with pytest.raises(TypeError, match=r"Invalid value for attr 'test'"): ds.to_netcdf("test.nc") ds, attrs = new_dataset_and_attrs() attrs["test"] = MiscObject() with pytest.raises(TypeError, match=r"Invalid value for attr 'test'"): ds.to_netcdf("test.nc") ds, attrs = new_dataset_and_attrs() attrs["test"] = 5 with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) ds, attrs = new_dataset_and_attrs() attrs["test"] = 3.14 with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) ds, attrs = new_dataset_and_attrs() attrs["test"] = [1, 2, 3, 4] with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) ds, attrs = new_dataset_and_attrs() attrs["test"] = (1.9, 2.5) with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) ds, attrs = new_dataset_and_attrs() attrs["test"] = np.arange(5) with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) ds, attrs = new_dataset_and_attrs() attrs["test"] = "This is a string" with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) ds, attrs = new_dataset_and_attrs() attrs["test"] = "" with create_tmp_file() as tmp_file: ds.to_netcdf(tmp_file) @requires_scipy_or_netCDF4 class TestDataArrayToNetCDF: def test_dataarray_to_netcdf_no_name(self) -> None: original_da = DataArray(np.arange(12).reshape((3, 4))) with create_tmp_file() as tmp: original_da.to_netcdf(tmp) with open_dataarray(tmp) as loaded_da: assert_identical(original_da, loaded_da) def test_dataarray_to_netcdf_with_name(self) -> None: original_da = DataArray(np.arange(12).reshape((3, 4)), name="test") with create_tmp_file() as tmp: original_da.to_netcdf(tmp) with open_dataarray(tmp) as loaded_da: assert_identical(original_da, loaded_da) def test_dataarray_to_netcdf_coord_name_clash(self) -> None: original_da = DataArray( np.arange(12).reshape((3, 4)), dims=["x", "y"], name="x" ) with create_tmp_file() as tmp: original_da.to_netcdf(tmp) with open_dataarray(tmp) as loaded_da: assert_identical(original_da, loaded_da) def test_open_dataarray_options(self) -> None: data = DataArray(np.arange(5), coords={"y": ("x", range(5))}, dims=["x"]) with create_tmp_file() as tmp: data.to_netcdf(tmp) expected = data.drop_vars("y") with open_dataarray(tmp, drop_variables=["y"]) as loaded: assert_identical(expected, loaded) @requires_scipy def test_dataarray_to_netcdf_return_bytes(self) -> None: # regression test for GH1410 data = xr.DataArray([1, 2, 3]) output = data.to_netcdf(engine="scipy") assert isinstance(output, memoryview) def test_dataarray_to_netcdf_no_name_pathlib(self) -> None: original_da = DataArray(np.arange(12).reshape((3, 4))) with create_tmp_file() as tmps: tmp = Path(tmps) original_da.to_netcdf(tmp) with open_dataarray(tmp) as loaded_da: assert_identical(original_da, loaded_da) @requires_zarr class TestDataArrayToZarr: def skip_if_zarr_python_3_and_zip_store(self, store) -> None: if has_zarr_v3 and isinstance(store, zarr.storage.ZipStore): pytest.skip( reason="zarr-python 3.x doesn't support reopening ZipStore with a new mode." ) def test_dataarray_to_zarr_no_name(self, tmp_store) -> None: self.skip_if_zarr_python_3_and_zip_store(tmp_store) original_da = DataArray(np.arange(12).reshape((3, 4))) original_da.to_zarr(tmp_store) with open_dataarray(tmp_store, engine="zarr") as loaded_da: assert_identical(original_da, loaded_da) def test_dataarray_to_zarr_with_name(self, tmp_store) -> None: self.skip_if_zarr_python_3_and_zip_store(tmp_store) original_da = DataArray(np.arange(12).reshape((3, 4)), name="test") original_da.to_zarr(tmp_store) with open_dataarray(tmp_store, engine="zarr") as loaded_da: assert_identical(original_da, loaded_da) def test_dataarray_to_zarr_coord_name_clash(self, tmp_store) -> None: self.skip_if_zarr_python_3_and_zip_store(tmp_store) original_da = DataArray( np.arange(12).reshape((3, 4)), dims=["x", "y"], name="x" ) original_da.to_zarr(tmp_store) with open_dataarray(tmp_store, engine="zarr") as loaded_da: assert_identical(original_da, loaded_da) def test_open_dataarray_options(self, tmp_store) -> None: self.skip_if_zarr_python_3_and_zip_store(tmp_store) data = DataArray(np.arange(5), coords={"y": ("x", range(1, 6))}, dims=["x"]) data.to_zarr(tmp_store) expected = data.drop_vars("y") with open_dataarray(tmp_store, engine="zarr", drop_variables=["y"]) as loaded: assert_identical(expected, loaded) @requires_dask def test_dataarray_to_zarr_compute_false(self, tmp_store) -> None: from dask.delayed import Delayed skip_if_zarr_format_3(tmp_store) original_da = DataArray(np.arange(12).reshape((3, 4))) output = original_da.to_zarr(tmp_store, compute=False) assert isinstance(output, Delayed) output.compute() with open_dataarray(tmp_store, engine="zarr") as loaded_da: assert_identical(original_da, loaded_da) @requires_dask def test_dataarray_to_zarr_align_chunks_true(self, tmp_store) -> None: # TODO: Improve data integrity checks when using Dask. # Detecting automatic alignment issues in Dask can be tricky, # as unintended misalignment might lead to subtle data corruption. # For now, ensure that the parameter is present, but explore # more robust verification methods to confirm data consistency. skip_if_zarr_format_3(tmp_store) arr = DataArray( np.arange(4), dims=["a"], coords={"a": np.arange(4)}, name="foo" ).chunk(a=(2, 1, 1)) arr.to_zarr( tmp_store, align_chunks=True, encoding={"foo": {"chunks": (3,)}}, ) with open_dataarray(tmp_store, engine="zarr") as loaded_da: assert_identical(arr, loaded_da) @requires_scipy_or_netCDF4 def test_no_warning_from_dask_effective_get() -> None: with create_tmp_file() as tmpfile: with assert_no_warnings(): ds = Dataset() ds.to_netcdf(tmpfile) @requires_scipy_or_netCDF4 def test_source_encoding_always_present() -> None: # Test for GH issue #2550. rnddata = np.random.randn(10) original = Dataset({"foo": ("x", rnddata)}) with create_tmp_file() as tmp: original.to_netcdf(tmp) with open_dataset(tmp) as ds: assert ds.encoding["source"] == tmp @requires_scipy_or_netCDF4 def test_source_encoding_always_present_with_pathlib() -> None: # Test for GH issue #5888. rnddata = np.random.randn(10) original = Dataset({"foo": ("x", rnddata)}) with create_tmp_file() as tmp: original.to_netcdf(tmp) with open_dataset(Path(tmp)) as ds: assert ds.encoding["source"] == tmp @requires_h5netcdf @requires_fsspec def test_source_encoding_always_present_with_fsspec() -> None: import fsspec rnddata = np.random.randn(10) original = Dataset({"foo": ("x", rnddata)}) with create_tmp_file() as tmp: original.to_netcdf(tmp) fs = fsspec.filesystem("file") with fs.open(tmp) as f, open_dataset(f) as ds: assert ds.encoding["source"] == tmp with fs.open(tmp) as f, open_mfdataset([f]) as ds: assert "foo" in ds def _assert_no_dates_out_of_range_warning(record): undesired_message = "dates out of range" for warning in record: assert undesired_message not in str(warning.message) @requires_scipy_or_netCDF4 @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) def test_use_cftime_standard_calendar_default_in_range(calendar) -> None: x = [0, 1] time = [0, 720] units_date = "2000-01-01" units = "days since 2000-01-01" original = DataArray(x, [("time", time)], name="x").to_dataset() for v in ["x", "time"]: original[v].attrs["units"] = units original[v].attrs["calendar"] = calendar x_timedeltas = np.array(x).astype("timedelta64[D]") time_timedeltas = np.array(time).astype("timedelta64[D]") decoded_x = np.datetime64(units_date, "ns") + x_timedeltas decoded_time = np.datetime64(units_date, "ns") + time_timedeltas expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x") expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time") with create_tmp_file() as tmp_file: original.to_netcdf(tmp_file) with warnings.catch_warnings(record=True) as record: with open_dataset(tmp_file) as ds: assert_identical(expected_x, ds.x) assert_identical(expected_time, ds.time) _assert_no_dates_out_of_range_warning(record) @requires_cftime @requires_scipy_or_netCDF4 @pytest.mark.parametrize("calendar", ["standard", "gregorian"]) def test_use_cftime_standard_calendar_default_out_of_range(calendar) -> None: # todo: check, if we still need to test for two dates import cftime x = [0, 1] time = [0, 720] units = "days since 1582-01-01" original = DataArray(x, [("time", time)], name="x").to_dataset() for v in ["x", "time"]: original[v].attrs["units"] = units original[v].attrs["calendar"] = calendar decoded_x = cftime.num2date(x, units, calendar, only_use_cftime_datetimes=True) decoded_time = cftime.num2date( time, units, calendar, only_use_cftime_datetimes=True ) expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x") expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time") with create_tmp_file() as tmp_file: original.to_netcdf(tmp_file) with pytest.warns(SerializationWarning): with open_dataset(tmp_file) as ds: assert_identical(expected_x, ds.x) assert_identical(expected_time, ds.time) @requires_cftime @requires_scipy_or_netCDF4 @pytest.mark.parametrize("calendar", _ALL_CALENDARS) @pytest.mark.parametrize("units_year", [1500, 2000, 2500]) def test_use_cftime_true(calendar, units_year) -> None: import cftime x = [0, 1] time = [0, 720] units = f"days since {units_year}-01-01" original = DataArray(x, [("time", time)], name="x").to_dataset() for v in ["x", "time"]: original[v].attrs["units"] = units original[v].attrs["calendar"] = calendar decoded_x = cftime.num2date(x, units, calendar, only_use_cftime_datetimes=True) decoded_time = cftime.num2date( time, units, calendar, only_use_cftime_datetimes=True ) expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x") expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time") with create_tmp_file() as tmp_file: original.to_netcdf(tmp_file) with warnings.catch_warnings(record=True) as record: decoder = CFDatetimeCoder(use_cftime=True) with open_dataset(tmp_file, decode_times=decoder) as ds: assert_identical(expected_x, ds.x) assert_identical(expected_time, ds.time) _assert_no_dates_out_of_range_warning(record) @requires_scipy_or_netCDF4 @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) @pytest.mark.xfail( has_numpy_2, reason="https://github.com/pandas-dev/pandas/issues/56996" ) def test_use_cftime_false_standard_calendar_in_range(calendar) -> None: x = [0, 1] time = [0, 720] units_date = "2000-01-01" units = "days since 2000-01-01" original = DataArray(x, [("time", time)], name="x").to_dataset() for v in ["x", "time"]: original[v].attrs["units"] = units original[v].attrs["calendar"] = calendar x_timedeltas = np.array(x).astype("timedelta64[D]") time_timedeltas = np.array(time).astype("timedelta64[D]") decoded_x = np.datetime64(units_date, "ns") + x_timedeltas decoded_time = np.datetime64(units_date, "ns") + time_timedeltas expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x") expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time") with create_tmp_file() as tmp_file: original.to_netcdf(tmp_file) with warnings.catch_warnings(record=True) as record: coder = xr.coders.CFDatetimeCoder(use_cftime=False) with open_dataset(tmp_file, decode_times=coder) as ds: assert_identical(expected_x, ds.x) assert_identical(expected_time, ds.time) _assert_no_dates_out_of_range_warning(record) @requires_scipy_or_netCDF4 @pytest.mark.parametrize("calendar", ["standard", "gregorian"]) def test_use_cftime_false_standard_calendar_out_of_range(calendar) -> None: x = [0, 1] time = [0, 720] units = "days since 1582-01-01" original = DataArray(x, [("time", time)], name="x").to_dataset() for v in ["x", "time"]: original[v].attrs["units"] = units original[v].attrs["calendar"] = calendar with create_tmp_file() as tmp_file: original.to_netcdf(tmp_file) with pytest.raises((OutOfBoundsDatetime, ValueError)): decoder = CFDatetimeCoder(use_cftime=False) open_dataset(tmp_file, decode_times=decoder) @requires_scipy_or_netCDF4 @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) @pytest.mark.parametrize("units_year", [1500, 2000, 2500]) def test_use_cftime_false_nonstandard_calendar(calendar, units_year) -> None: x = [0, 1] time = [0, 720] units = f"days since {units_year}" original = DataArray(x, [("time", time)], name="x").to_dataset() for v in ["x", "time"]: original[v].attrs["units"] = units original[v].attrs["calendar"] = calendar with create_tmp_file() as tmp_file: original.to_netcdf(tmp_file) with pytest.raises((OutOfBoundsDatetime, ValueError)): decoder = CFDatetimeCoder(use_cftime=False) open_dataset(tmp_file, decode_times=decoder) @pytest.mark.parametrize("engine", ["netcdf4", "scipy"]) def test_invalid_netcdf_raises(engine) -> None: data = create_test_data() with pytest.raises(ValueError, match=r"unrecognized option 'invalid_netcdf'"): data.to_netcdf("foo.nc", engine=engine, invalid_netcdf=True) @requires_zarr def test_encode_zarr_attr_value() -> None: # array -> list arr = np.array([1, 2, 3]) expected1 = [1, 2, 3] actual1 = backends.zarr.encode_zarr_attr_value(arr) assert isinstance(actual1, list) assert actual1 == expected1 # scalar array -> scalar sarr = np.array(1)[()] expected2 = 1 actual2 = backends.zarr.encode_zarr_attr_value(sarr) assert isinstance(actual2, int) assert actual2 == expected2 # string -> string (no change) expected3 = "foo" actual3 = backends.zarr.encode_zarr_attr_value(expected3) assert isinstance(actual3, str) assert actual3 == expected3 @requires_zarr def test_extract_zarr_variable_encoding() -> None: var = xr.Variable("x", [1, 2]) actual = backends.zarr.extract_zarr_variable_encoding(var, zarr_format=3) assert "chunks" in actual assert actual["chunks"] == ("auto" if has_zarr_v3 else None) var = xr.Variable("x", [1, 2], encoding={"chunks": (1,)}) actual = backends.zarr.extract_zarr_variable_encoding(var, zarr_format=3) assert actual["chunks"] == (1,) # does not raise on invalid var = xr.Variable("x", [1, 2], encoding={"foo": (1,)}) actual = backends.zarr.extract_zarr_variable_encoding(var, zarr_format=3) # raises on invalid var = xr.Variable("x", [1, 2], encoding={"foo": (1,)}) with pytest.raises(ValueError, match=r"unexpected encoding parameters"): actual = backends.zarr.extract_zarr_variable_encoding( var, raise_on_invalid=True, zarr_format=3 ) @requires_zarr @requires_fsspec @pytest.mark.filterwarnings("ignore:deallocating CachingFileManager") def test_open_fsspec() -> None: import fsspec if not ( ( hasattr(zarr.storage, "FSStore") and hasattr(zarr.storage.FSStore, "getitems") ) # zarr v2 or hasattr(zarr.storage, "FsspecStore") # zarr v3 ): pytest.skip("zarr too old") ds = open_dataset(os.path.join(os.path.dirname(__file__), "data", "example_1.nc")) m = fsspec.filesystem("memory") mm = m.get_mapper("out1.zarr") ds.to_zarr(mm) # old interface ds0 = ds.copy() # pd.to_timedelta returns ns-precision, but the example data is in second precision # so we need to fix this ds0["time"] = ds.time + np.timedelta64(1, "D") mm = m.get_mapper("out2.zarr") ds0.to_zarr(mm) # old interface # single dataset url = "memory://out2.zarr" ds2 = open_dataset(url, engine="zarr") xr.testing.assert_equal(ds0, ds2) # single dataset with caching url = "simplecache::memory://out2.zarr" ds2 = open_dataset(url, engine="zarr") xr.testing.assert_equal(ds0, ds2) # open_mfdataset requires dask if has_dask: # multi dataset url = "memory://out*.zarr" ds2 = open_mfdataset(url, engine="zarr") xr.testing.assert_equal(xr.concat([ds, ds0], dim="time"), ds2) # multi dataset with caching url = "simplecache::memory://out*.zarr" ds2 = open_mfdataset(url, engine="zarr") xr.testing.assert_equal(xr.concat([ds, ds0], dim="time"), ds2) @requires_h5netcdf @requires_netCDF4 def test_load_single_value_h5netcdf(tmp_path: Path) -> None: """Test that numeric single-element vector attributes are handled fine. At present (h5netcdf v0.8.1), the h5netcdf exposes single-valued numeric variable attributes as arrays of length 1, as opposed to scalars for the NetCDF4 backend. This was leading to a ValueError upon loading a single value from a file, see #4471. Test that loading causes no failure. """ ds = xr.Dataset( { "test": xr.DataArray( np.array([0]), dims=("x",), attrs={"scale_factor": 1, "add_offset": 0} ) } ) ds.to_netcdf(tmp_path / "test.nc") with xr.open_dataset(tmp_path / "test.nc", engine="h5netcdf") as ds2: ds2["test"][0].load() @requires_zarr @requires_dask @pytest.mark.parametrize( "chunks", ["auto", -1, {}, {"x": "auto"}, {"x": -1}, {"x": "auto", "y": -1}] ) def test_open_dataset_chunking_zarr(chunks, tmp_path: Path) -> None: encoded_chunks = 100 dask_arr = da.from_array( np.ones((500, 500), dtype="float64"), chunks=encoded_chunks ) ds = xr.Dataset( { "test": xr.DataArray( dask_arr, dims=("x", "y"), ) } ) ds["test"].encoding["chunks"] = encoded_chunks ds.to_zarr(tmp_path / "test.zarr") with dask.config.set({"array.chunk-size": "1MiB"}): expected = ds.chunk(chunks) with open_dataset( tmp_path / "test.zarr", engine="zarr", chunks=chunks ) as actual: xr.testing.assert_chunks_equal(actual, expected) @requires_zarr @requires_dask @pytest.mark.parametrize( "chunks", ["auto", -1, {}, {"x": "auto"}, {"x": -1}, {"x": "auto", "y": -1}] ) @pytest.mark.filterwarnings("ignore:The specified chunks separate") def test_chunking_consintency(chunks, tmp_path: Path) -> None: encoded_chunks: dict[str, Any] = {} dask_arr = da.from_array( np.ones((500, 500), dtype="float64"), chunks=encoded_chunks ) ds = xr.Dataset( { "test": xr.DataArray( dask_arr, dims=("x", "y"), ) } ) ds["test"].encoding["chunks"] = encoded_chunks ds.to_zarr(tmp_path / "test.zarr") ds.to_netcdf(tmp_path / "test.nc") with dask.config.set({"array.chunk-size": "1MiB"}): expected = ds.chunk(chunks) with xr.open_dataset( tmp_path / "test.zarr", engine="zarr", chunks=chunks ) as actual: xr.testing.assert_chunks_equal(actual, expected) with xr.open_dataset(tmp_path / "test.nc", chunks=chunks) as actual: xr.testing.assert_chunks_equal(actual, expected) def _check_guess_can_open_and_open(entrypoint, obj, engine, expected): assert entrypoint.guess_can_open(obj) with open_dataset(obj, engine=engine) as actual: assert_identical(expected, actual) @requires_netCDF4 def test_netcdf4_entrypoint(tmp_path: Path) -> None: entrypoint = NetCDF4BackendEntrypoint() ds = create_test_data() path = tmp_path / "foo" ds.to_netcdf(path, format="NETCDF3_CLASSIC") _check_guess_can_open_and_open(entrypoint, path, engine="netcdf4", expected=ds) _check_guess_can_open_and_open(entrypoint, str(path), engine="netcdf4", expected=ds) path = tmp_path / "bar" ds.to_netcdf(path, format="NETCDF4_CLASSIC") _check_guess_can_open_and_open(entrypoint, path, engine="netcdf4", expected=ds) _check_guess_can_open_and_open(entrypoint, str(path), engine="netcdf4", expected=ds) assert entrypoint.guess_can_open("http://something/remote") assert entrypoint.guess_can_open("something-local.nc") assert entrypoint.guess_can_open("something-local.nc4") assert entrypoint.guess_can_open("something-local.cdf") assert not entrypoint.guess_can_open("not-found-and-no-extension") path = tmp_path / "baz" with open(path, "wb") as f: f.write(b"not-a-netcdf-file") assert not entrypoint.guess_can_open(path) @requires_scipy def test_scipy_entrypoint(tmp_path: Path) -> None: entrypoint = ScipyBackendEntrypoint() ds = create_test_data() path = tmp_path / "foo" ds.to_netcdf(path, engine="scipy") _check_guess_can_open_and_open(entrypoint, path, engine="scipy", expected=ds) _check_guess_can_open_and_open(entrypoint, str(path), engine="scipy", expected=ds) with open(path, "rb") as f: _check_guess_can_open_and_open(entrypoint, f, engine="scipy", expected=ds) contents = ds.to_netcdf(engine="scipy") _check_guess_can_open_and_open(entrypoint, contents, engine="scipy", expected=ds) _check_guess_can_open_and_open( entrypoint, BytesIO(contents), engine="scipy", expected=ds ) path = tmp_path / "foo.nc.gz" with gzip.open(path, mode="wb") as f: f.write(contents) _check_guess_can_open_and_open(entrypoint, path, engine="scipy", expected=ds) _check_guess_can_open_and_open(entrypoint, str(path), engine="scipy", expected=ds) assert entrypoint.guess_can_open("something-local.nc") assert entrypoint.guess_can_open("something-local.nc.gz") assert not entrypoint.guess_can_open("not-found-and-no-extension") assert not entrypoint.guess_can_open(b"not-a-netcdf-file") @requires_h5netcdf def test_h5netcdf_entrypoint(tmp_path: Path) -> None: entrypoint = H5netcdfBackendEntrypoint() ds = create_test_data() path = tmp_path / "foo" ds.to_netcdf(path, engine="h5netcdf") _check_guess_can_open_and_open(entrypoint, path, engine="h5netcdf", expected=ds) _check_guess_can_open_and_open( entrypoint, str(path), engine="h5netcdf", expected=ds ) with open(path, "rb") as f: _check_guess_can_open_and_open(entrypoint, f, engine="h5netcdf", expected=ds) assert entrypoint.guess_can_open("something-local.nc") assert entrypoint.guess_can_open("something-local.nc4") assert entrypoint.guess_can_open("something-local.cdf") assert not entrypoint.guess_can_open("not-found-and-no-extension") @requires_zarr def test_zarr_entrypoint(tmp_path: Path) -> None: from xarray.backends.zarr import ZarrBackendEntrypoint entrypoint = ZarrBackendEntrypoint() ds = create_test_data() path = tmp_path / "foo.zarr" ds.to_zarr(path) _check_guess_can_open_and_open(entrypoint, path, engine="zarr", expected=ds) _check_guess_can_open_and_open(entrypoint, str(path), engine="zarr", expected=ds) # add a trailing slash to the path and check again _check_guess_can_open_and_open( entrypoint, str(path) + "/", engine="zarr", expected=ds ) # Test the new functionality: .zarr with trailing slash assert entrypoint.guess_can_open("something-local.zarr") assert entrypoint.guess_can_open("something-local.zarr/") # With trailing slash assert not entrypoint.guess_can_open("something-local.nc") assert not entrypoint.guess_can_open("not-found-and-no-extension") assert not entrypoint.guess_can_open("something.zarr.txt") @requires_netCDF4 @pytest.mark.parametrize("str_type", (str, np.str_)) def test_write_file_from_np_str(str_type: type[str | np.str_], tmpdir: str) -> None: # https://github.com/pydata/xarray/pull/5264 scenarios = [str_type(v) for v in ["scenario_a", "scenario_b", "scenario_c"]] years = range(2015, 2100 + 1) tdf = pd.DataFrame( data=np.random.random((len(scenarios), len(years))), columns=years, index=scenarios, ) tdf.index.name = "scenario" tdf.columns.name = "year" tdf = cast(pd.DataFrame, tdf.stack()) tdf.name = "tas" txr = tdf.to_xarray() txr.to_netcdf(tmpdir.join("test.nc")) @requires_zarr @requires_netCDF4 class TestNCZarr: @property def netcdfc_version(self): return Version(nc4.getlibversion().split()[0].split("-development")[0]) def _create_nczarr(self, filename): if self.netcdfc_version < Version("4.8.1"): pytest.skip("requires netcdf-c>=4.8.1") if platform.system() == "Windows" and self.netcdfc_version == Version("4.8.1"): # Bug in netcdf-c==4.8.1 (typo: Nan instead of NaN) # https://github.com/Unidata/netcdf-c/issues/2265 pytest.skip("netcdf-c==4.8.1 has issues on Windows") ds = create_test_data() # Drop dim3: netcdf-c does not support dtype=' None: with create_tmp_file(suffix=".zarr") as tmp: expected = self._create_nczarr(tmp) actual = xr.open_zarr(tmp, consolidated=False) assert_identical(expected, actual) def test_overwriting_nczarr(self) -> None: with create_tmp_file(suffix=".zarr") as tmp: ds = self._create_nczarr(tmp) expected = ds[["var1"]] expected.to_zarr(tmp, mode="w") actual = xr.open_zarr(tmp, consolidated=False) assert_identical(expected, actual) @pytest.mark.parametrize("mode", ["a", "r+"]) @pytest.mark.filterwarnings("ignore:.*non-consolidated metadata.*") def test_raise_writing_to_nczarr(self, mode) -> None: if self.netcdfc_version > Version("4.8.1"): pytest.skip("netcdf-c>4.8.1 adds the _ARRAY_DIMENSIONS attribute") with create_tmp_file(suffix=".zarr") as tmp: ds = self._create_nczarr(tmp) with pytest.raises( KeyError, match="missing the attribute `_ARRAY_DIMENSIONS`," ): ds.to_zarr(tmp, mode=mode) @requires_netCDF4 @requires_dask @pytest.mark.usefixtures("default_zarr_format") def test_pickle_open_mfdataset_dataset(): with open_example_mfdataset(["bears.nc"]) as ds: assert_identical(ds, pickle.loads(pickle.dumps(ds))) @requires_zarr @pytest.mark.usefixtures("default_zarr_format") def test_zarr_closing_internal_zip_store(): store_name = "tmp.zarr.zip" original_da = DataArray(np.arange(12).reshape((3, 4))) original_da.to_zarr(store_name, mode="w") with open_dataarray(store_name, engine="zarr") as loaded_da: assert_identical(original_da, loaded_da) @requires_zarr @pytest.mark.parametrize("create_default_indexes", [True, False]) def test_zarr_create_default_indexes(tmp_path, create_default_indexes) -> None: from xarray.core.indexes import PandasIndex store_path = tmp_path / "tmp.zarr" original_ds = xr.Dataset({"data": ("x", np.arange(3))}, coords={"x": [-1, 0, 1]}) original_ds.to_zarr(store_path, mode="w") with open_dataset( store_path, engine="zarr", create_default_indexes=create_default_indexes ) as loaded_ds: if create_default_indexes: assert list(loaded_ds.xindexes) == ["x"] and isinstance( loaded_ds.xindexes["x"], PandasIndex ) else: assert len(loaded_ds.xindexes) == 0 @requires_zarr @pytest.mark.usefixtures("default_zarr_format") def test_raises_key_error_on_invalid_zarr_store(tmp_path): root = zarr.open_group(tmp_path / "tmp.zarr") if Version(zarr.__version__) < Version("3.0.0"): root.create_dataset("bar", shape=(3, 5), dtype=np.float32) else: root.create_array("bar", shape=(3, 5), dtype=np.float32) with pytest.raises(KeyError, match=r"xarray to determine variable dimensions"): xr.open_zarr(tmp_path / "tmp.zarr", consolidated=False) @requires_zarr @pytest.mark.usefixtures("default_zarr_format") class TestZarrRegionAuto: """These are separated out since we should not need to test this logic with every store.""" @contextlib.contextmanager def create_zarr_target(self): with create_tmp_file(suffix=".zarr") as tmp: yield tmp @contextlib.contextmanager def create(self): x = np.arange(0, 50, 10) y = np.arange(0, 20, 2) data = np.ones((5, 10)) ds = xr.Dataset( {"test": xr.DataArray(data, dims=("x", "y"), coords={"x": x, "y": y})} ) with self.create_zarr_target() as target: self.save(target, ds) yield target, ds def save(self, target, ds, **kwargs): ds.to_zarr(target, **kwargs) @pytest.mark.parametrize( "region", [ pytest.param("auto", id="full-auto"), pytest.param({"x": "auto", "y": slice(6, 8)}, id="mixed-auto"), ], ) def test_zarr_region_auto(self, region): with self.create() as (target, ds): ds_region = 1 + ds.isel(x=slice(2, 4), y=slice(6, 8)) self.save(target, ds_region, region=region) ds_updated = xr.open_zarr(target) expected = ds.copy() expected["test"][2:4, 6:8] += 1 assert_identical(ds_updated, expected) def test_zarr_region_auto_noncontiguous(self): with self.create() as (target, ds): with pytest.raises(ValueError): self.save(target, ds.isel(x=[0, 2, 3], y=[5, 6]), region="auto") dsnew = ds.copy() dsnew["x"] = dsnew.x + 5 with pytest.raises(KeyError): self.save(target, dsnew, region="auto") def test_zarr_region_index_write(self, tmp_path): region: Mapping[str, slice] | Literal["auto"] region_slice = dict(x=slice(2, 4), y=slice(6, 8)) with self.create() as (target, ds): ds_region = 1 + ds.isel(region_slice) for region in [region_slice, "auto"]: # type: ignore[assignment] with patch.object( ZarrStore, "set_variables", side_effect=ZarrStore.set_variables, autospec=True, ) as mock: self.save(target, ds_region, region=region, mode="r+") # should write the data vars but never the index vars with auto mode for call in mock.call_args_list: written_variables = call.args[1].keys() assert "test" in written_variables assert "x" not in written_variables assert "y" not in written_variables def test_zarr_region_append(self): with self.create() as (target, ds): x_new = np.arange(40, 70, 10) data_new = np.ones((3, 10)) ds_new = xr.Dataset( { "test": xr.DataArray( data_new, dims=("x", "y"), coords={"x": x_new, "y": ds.y}, ) } ) # Now it is valid to use auto region detection with the append mode, # but it is still unsafe to modify dimensions or metadata using the region # parameter. with pytest.raises(KeyError): self.save(target, ds_new, mode="a", append_dim="x", region="auto") def test_zarr_region(self): with self.create() as (target, ds): ds_transposed = ds.transpose("y", "x") ds_region = 1 + ds_transposed.isel(x=[0], y=[0]) self.save(target, ds_region, region={"x": slice(0, 1), "y": slice(0, 1)}) # Write without region self.save(target, ds_transposed, mode="r+") @requires_dask def test_zarr_region_chunk_partial(self): """ Check that writing to partial chunks with `region` fails, assuming `safe_chunks=False`. """ ds = ( xr.DataArray(np.arange(120).reshape(4, 3, -1), dims=list("abc")) .rename("var1") .to_dataset() ) with self.create_zarr_target() as target: self.save(target, ds.chunk(5), compute=False, mode="w") with pytest.raises(ValueError): for r in range(ds.sizes["a"]): self.save( target, ds.chunk(3).isel(a=[r]), region=dict(a=slice(r, r + 1)) ) @requires_dask def test_zarr_append_chunk_partial(self): t_coords = np.array([np.datetime64("2020-01-01").astype("datetime64[ns]")]) data = np.ones((10, 10)) da = xr.DataArray( data.reshape((-1, 10, 10)), dims=["time", "x", "y"], coords={"time": t_coords}, name="foo", ) new_time = np.array([np.datetime64("2021-01-01").astype("datetime64[ns]")]) da2 = xr.DataArray( data.reshape((-1, 10, 10)), dims=["time", "x", "y"], coords={"time": new_time}, name="foo", ) with self.create_zarr_target() as target: self.save(target, da, mode="w", encoding={"foo": {"chunks": (5, 5, 1)}}) with pytest.raises(ValueError, match="encoding was provided"): self.save( target, da2, append_dim="time", mode="a", encoding={"foo": {"chunks": (1, 1, 1)}}, ) # chunking with dask sidesteps the encoding check, so we need a different check with pytest.raises(ValueError, match="Specified Zarr chunks"): self.save( target, da2.chunk({"x": 1, "y": 1, "time": 1}), append_dim="time", mode="a", ) @requires_dask def test_zarr_region_chunk_partial_offset(self): # https://github.com/pydata/xarray/pull/8459#issuecomment-1819417545 with self.create_zarr_target() as store: data = np.ones((30,)) da = xr.DataArray( data, dims=["x"], coords={"x": range(30)}, name="foo" ).chunk(x=10) self.save(store, da, compute=False) self.save(store, da.isel(x=slice(10)).chunk(x=(10,)), region="auto") self.save( store, da.isel(x=slice(5, 25)).chunk(x=(10, 10)), safe_chunks=False, region="auto", ) with pytest.raises(ValueError): self.save( store, da.isel(x=slice(5, 25)).chunk(x=(10, 10)), region="auto" ) @requires_dask def test_zarr_safe_chunk_append_dim(self): with self.create_zarr_target() as store: data = np.ones((20,)) da = xr.DataArray( data, dims=["x"], coords={"x": range(20)}, name="foo" ).chunk(x=5) self.save(store, da.isel(x=slice(0, 7)), safe_chunks=True, mode="w") with pytest.raises(ValueError): # If the first chunk is smaller than the border size then raise an error self.save( store, da.isel(x=slice(7, 11)).chunk(x=(2, 2)), append_dim="x", safe_chunks=True, ) self.save(store, da.isel(x=slice(0, 7)), safe_chunks=True, mode="w") # If the first chunk is of the size of the border size then it is valid self.save( store, da.isel(x=slice(7, 11)).chunk(x=(3, 1)), safe_chunks=True, append_dim="x", ) assert xr.open_zarr(store)["foo"].equals(da.isel(x=slice(0, 11))) self.save(store, da.isel(x=slice(0, 7)), safe_chunks=True, mode="w") # If the first chunk is of the size of the border size + N * zchunk then it is valid self.save( store, da.isel(x=slice(7, 17)).chunk(x=(8, 2)), safe_chunks=True, append_dim="x", ) assert xr.open_zarr(store)["foo"].equals(da.isel(x=slice(0, 17))) self.save(store, da.isel(x=slice(0, 7)), safe_chunks=True, mode="w") with pytest.raises(ValueError): # If the first chunk is valid but the other are not then raise an error self.save( store, da.isel(x=slice(7, 14)).chunk(x=(3, 3, 1)), append_dim="x", safe_chunks=True, ) self.save(store, da.isel(x=slice(0, 7)), safe_chunks=True, mode="w") with pytest.raises(ValueError): # If the first chunk have a size bigger than the border size but not enough # to complete the size of the next chunk then an error must be raised self.save( store, da.isel(x=slice(7, 14)).chunk(x=(4, 3)), append_dim="x", safe_chunks=True, ) self.save(store, da.isel(x=slice(0, 7)), safe_chunks=True, mode="w") # Append with a single chunk it's totally valid, # and it does not matter the size of the chunk self.save( store, da.isel(x=slice(7, 19)).chunk(x=-1), append_dim="x", safe_chunks=True, ) assert xr.open_zarr(store)["foo"].equals(da.isel(x=slice(0, 19))) @requires_dask @pytest.mark.parametrize("mode", ["r+", "a"]) def test_zarr_safe_chunk_region(self, mode: Literal["r+", "a"]): with self.create_zarr_target() as store: arr = xr.DataArray( list(range(11)), dims=["a"], coords={"a": list(range(11))}, name="foo" ).chunk(a=3) self.save(store, arr, mode="w") with pytest.raises(ValueError): # There are two Dask chunks on the same Zarr chunk, # which means that it is unsafe in any mode self.save( store, arr.isel(a=slice(0, 3)).chunk(a=(2, 1)), region="auto", mode=mode, ) with pytest.raises(ValueError): # the first chunk is covering the border size, but it is not # completely covering the second chunk, which means that it is # unsafe in any mode self.save( store, arr.isel(a=slice(1, 5)).chunk(a=(3, 1)), region="auto", mode=mode, ) with pytest.raises(ValueError): # The first chunk is safe but the other two chunks are overlapping with # the same Zarr chunk self.save( store, arr.isel(a=slice(0, 5)).chunk(a=(3, 1, 1)), region="auto", mode=mode, ) # Fully update two contiguous chunks is safe in any mode self.save(store, arr.isel(a=slice(3, 9)), region="auto", mode=mode) # The last chunk is considered full based on their current size (2) self.save(store, arr.isel(a=slice(9, 11)), region="auto", mode=mode) self.save( store, arr.isel(a=slice(6, None)).chunk(a=-1), region="auto", mode=mode ) # Write the last chunk of a region partially is safe in "a" mode self.save(store, arr.isel(a=slice(3, 8)), region="auto", mode="a") with pytest.raises(ValueError): # with "r+" mode it is invalid to write partial chunk self.save(store, arr.isel(a=slice(3, 8)), region="auto", mode="r+") # This is safe with mode "a", the border size is covered by the first chunk of Dask self.save( store, arr.isel(a=slice(1, 4)).chunk(a=(2, 1)), region="auto", mode="a" ) with pytest.raises(ValueError): # This is considered unsafe in mode "r+" because it is writing in a partial chunk self.save( store, arr.isel(a=slice(1, 4)).chunk(a=(2, 1)), region="auto", mode="r+", ) # This is safe on mode "a" because there is a single dask chunk self.save( store, arr.isel(a=slice(1, 5)).chunk(a=(4,)), region="auto", mode="a" ) with pytest.raises(ValueError): # This is unsafe on mode "r+", because the Dask chunk is partially writing # in the first chunk of Zarr self.save( store, arr.isel(a=slice(1, 5)).chunk(a=(4,)), region="auto", mode="r+", ) # The first chunk is completely covering the first Zarr chunk # and the last chunk is a partial one self.save( store, arr.isel(a=slice(0, 5)).chunk(a=(3, 2)), region="auto", mode="a" ) with pytest.raises(ValueError): # The last chunk is partial, so it is considered unsafe on mode "r+" self.save( store, arr.isel(a=slice(0, 5)).chunk(a=(3, 2)), region="auto", mode="r+", ) # The first chunk is covering the border size (2 elements) # and also the second chunk (3 elements), so it is valid self.save( store, arr.isel(a=slice(1, 8)).chunk(a=(5, 2)), region="auto", mode="a" ) with pytest.raises(ValueError): # The first chunk is not fully covering the first zarr chunk self.save( store, arr.isel(a=slice(1, 8)).chunk(a=(5, 2)), region="auto", mode="r+", ) with pytest.raises(ValueError): # Validate that the border condition is not affecting the "r+" mode self.save(store, arr.isel(a=slice(1, 9)), region="auto", mode="r+") self.save(store, arr.isel(a=slice(10, 11)), region="auto", mode="a") with pytest.raises(ValueError): # Validate that even if we write with a single Dask chunk on the last Zarr # chunk it is still unsafe if it is not fully covering it # (the last Zarr chunk has size 2) self.save(store, arr.isel(a=slice(10, 11)), region="auto", mode="r+") # Validate the same as the above test but in the beginning of the last chunk self.save(store, arr.isel(a=slice(9, 10)), region="auto", mode="a") with pytest.raises(ValueError): self.save(store, arr.isel(a=slice(9, 10)), region="auto", mode="r+") self.save( store, arr.isel(a=slice(7, None)).chunk(a=-1), region="auto", mode="a" ) with pytest.raises(ValueError): # Test that even a Dask chunk that covers the last Zarr chunk can be unsafe # if it is partial covering other Zarr chunks self.save( store, arr.isel(a=slice(7, None)).chunk(a=-1), region="auto", mode="r+", ) with pytest.raises(ValueError): # If the chunk is of size equal to the one in the Zarr encoding, but # it is partially writing in the first chunk then raise an error self.save( store, arr.isel(a=slice(8, None)).chunk(a=3), region="auto", mode="r+", ) with pytest.raises(ValueError): self.save( store, arr.isel(a=slice(5, -1)).chunk(a=5), region="auto", mode="r+" ) # Test if the code is detecting the last chunk correctly data = np.random.default_rng(0).random((2920, 25, 53)) ds = xr.Dataset({"temperature": (("time", "lat", "lon"), data)}) chunks = {"time": 1000, "lat": 25, "lon": 53} self.save(store, ds.chunk(chunks), compute=False, mode="w") region = {"time": slice(1000, 2000, 1)} chunk = ds.isel(region) chunk = chunk.chunk() self.save(store, chunk.chunk(), region=region) @requires_h5netcdf @requires_fsspec def test_h5netcdf_storage_options() -> None: with create_tmp_files(2, allow_cleanup_failure=ON_WINDOWS) as (f1, f2): ds1 = create_test_data() ds1.to_netcdf(f1, engine="h5netcdf") ds2 = create_test_data() ds2.to_netcdf(f2, engine="h5netcdf") files = [f"file://{f}" for f in [f1, f2]] with xr.open_mfdataset( files, engine="h5netcdf", concat_dim="time", data_vars="all", combine="nested", storage_options={"skip_instance_cache": False}, ) as ds: assert_identical(xr.concat([ds1, ds2], dim="time", data_vars="all"), ds) xarray-2025.09.0/xarray/tests/test_backends_api.py000066400000000000000000000232331505620616400220330ustar00rootroot00000000000000from __future__ import annotations import re import sys from numbers import Number import numpy as np import pytest import xarray as xr from xarray.backends.api import get_default_netcdf_write_engine from xarray.tests import ( assert_identical, assert_no_warnings, requires_dask, requires_h5netcdf, requires_netCDF4, requires_scipy, ) @requires_netCDF4 @requires_scipy @requires_h5netcdf def test_get_default_netcdf_write_engine() -> None: engine = get_default_netcdf_write_engine( format=None, to_fileobject_or_memoryview=False ) assert engine == "netcdf4" engine = get_default_netcdf_write_engine( format="NETCDF4", to_fileobject_or_memoryview=False ) assert engine == "netcdf4" engine = get_default_netcdf_write_engine( format="NETCDF4", to_fileobject_or_memoryview=True ) assert engine == "h5netcdf" engine = get_default_netcdf_write_engine( format="NETCDF3_CLASSIC", to_fileobject_or_memoryview=True ) assert engine == "scipy" @requires_h5netcdf def test_default_engine_h5netcdf(monkeypatch): """Test the default netcdf engine when h5netcdf is the only importable module.""" monkeypatch.delitem(sys.modules, "netCDF4", raising=False) monkeypatch.delitem(sys.modules, "scipy", raising=False) monkeypatch.setattr(sys, "meta_path", []) engine = get_default_netcdf_write_engine( format=None, to_fileobject_or_memoryview=False ) assert engine == "h5netcdf" with pytest.raises( ValueError, match=re.escape( "cannot write NetCDF files with format='NETCDF3_CLASSIC' because none of the suitable backend libraries (netCDF4, scipy) are installed" ), ): get_default_netcdf_write_engine( format="NETCDF3_CLASSIC", to_fileobject_or_memoryview=False ) def test_custom_engine() -> None: expected = xr.Dataset( dict(a=2 * np.arange(5)), coords=dict(x=("x", np.arange(5), dict(units="s"))) ) class CustomBackend(xr.backends.BackendEntrypoint): def open_dataset( self, filename_or_obj, drop_variables=None, **kwargs, ) -> xr.Dataset: return expected.copy(deep=True) actual = xr.open_dataset("fake_filename", engine=CustomBackend) assert_identical(expected, actual) def test_multiindex() -> None: # GH7139 # Check that we properly handle backends that change index variables dataset = xr.Dataset(coords={"coord1": ["A", "B"], "coord2": [1, 2]}) dataset = dataset.stack(z=["coord1", "coord2"]) class MultiindexBackend(xr.backends.BackendEntrypoint): def open_dataset( self, filename_or_obj, drop_variables=None, **kwargs, ) -> xr.Dataset: return dataset.copy(deep=True) loaded = xr.open_dataset("fake_filename", engine=MultiindexBackend) assert_identical(dataset, loaded) class PassThroughBackendEntrypoint(xr.backends.BackendEntrypoint): """Access an object passed to the `open_dataset` method.""" def open_dataset(self, dataset, *, drop_variables=None): """Return the first argument.""" return dataset def explicit_chunks(chunks, shape): """Return explicit chunks, expanding any integer member to a tuple of integers.""" # Emulate `dask.array.core.normalize_chunks` but for simpler inputs. return tuple( ( ( (size // chunk) * (chunk,) + ((size % chunk,) if size % chunk or size == 0 else ()) ) if isinstance(chunk, Number) else chunk ) for chunk, size in zip(chunks, shape, strict=True) ) @requires_dask class TestPreferredChunks: """Test behaviors related to the backend's preferred chunks.""" var_name = "data" def create_dataset(self, shape, pref_chunks): """Return a dataset with a variable with the given shape and preferred chunks.""" dims = tuple(f"dim_{idx}" for idx in range(len(shape))) return xr.Dataset( { self.var_name: xr.Variable( dims, np.empty(shape, dtype=np.dtype("V1")), encoding={ "preferred_chunks": dict(zip(dims, pref_chunks, strict=True)) }, ) } ) def check_dataset(self, initial, final, expected_chunks): assert_identical(initial, final) assert final[self.var_name].chunks == expected_chunks @pytest.mark.parametrize( "shape,pref_chunks", [ # Represent preferred chunking with int. ((5,), (2,)), # Represent preferred chunking with tuple. ((5,), ((2, 2, 1),)), # Represent preferred chunking with int in two dims. ((5, 6), (4, 2)), # Represent preferred chunking with tuple in second dim. ((5, 6), (4, (2, 2, 2))), ], ) @pytest.mark.parametrize("request_with_empty_map", [False, True]) def test_honor_chunks(self, shape, pref_chunks, request_with_empty_map): """Honor the backend's preferred chunks when opening a dataset.""" initial = self.create_dataset(shape, pref_chunks) # To keep the backend's preferred chunks, the `chunks` argument must be an # empty mapping or map dimensions to `None`. chunks = ( {} if request_with_empty_map else dict.fromkeys(initial[self.var_name].dims, None) ) final = xr.open_dataset( initial, engine=PassThroughBackendEntrypoint, chunks=chunks ) self.check_dataset(initial, final, explicit_chunks(pref_chunks, shape)) @pytest.mark.parametrize( "shape,pref_chunks,req_chunks", [ # Preferred chunking is int; requested chunking is int. ((5,), (2,), (3,)), # Preferred chunking is int; requested chunking is tuple. ((5,), (2,), ((2, 1, 1, 1),)), # Preferred chunking is tuple; requested chunking is int. ((5,), ((2, 2, 1),), (3,)), # Preferred chunking is tuple; requested chunking is tuple. ((5,), ((2, 2, 1),), ((2, 1, 1, 1),)), # Split chunks along a dimension other than the first. ((1, 5), (1, 2), (1, 3)), ], ) def test_split_chunks(self, shape, pref_chunks, req_chunks): """Warn when the requested chunks separate the backend's preferred chunks.""" initial = self.create_dataset(shape, pref_chunks) with pytest.warns(UserWarning): final = xr.open_dataset( initial, engine=PassThroughBackendEntrypoint, chunks=dict(zip(initial[self.var_name].dims, req_chunks, strict=True)), ) self.check_dataset(initial, final, explicit_chunks(req_chunks, shape)) @pytest.mark.parametrize( "shape,pref_chunks,req_chunks", [ # Keep preferred chunks using int representation. ((5,), (2,), (2,)), # Keep preferred chunks using tuple representation. ((5,), (2,), ((2, 2, 1),)), # Join chunks, leaving a final short chunk. ((5,), (2,), (4,)), # Join all chunks with an int larger than the dimension size. ((5,), (2,), (6,)), # Join one chunk using tuple representation. ((5,), (1,), ((1, 1, 2, 1),)), # Join one chunk using int representation. ((5,), ((1, 1, 2, 1),), (2,)), # Join multiple chunks using tuple representation. ((5,), ((1, 1, 2, 1),), ((2, 3),)), # Join chunks in multiple dimensions. ((5, 5), (2, (1, 1, 2, 1)), (4, (2, 3))), ], ) def test_join_chunks(self, shape, pref_chunks, req_chunks): """Don't warn when the requested chunks join or keep the preferred chunks.""" initial = self.create_dataset(shape, pref_chunks) with assert_no_warnings(): final = xr.open_dataset( initial, engine=PassThroughBackendEntrypoint, chunks=dict(zip(initial[self.var_name].dims, req_chunks, strict=True)), ) self.check_dataset(initial, final, explicit_chunks(req_chunks, shape)) @pytest.mark.parametrize("create_default_indexes", [True, False]) def test_default_indexes(self, create_default_indexes): """Create default indexes if the backend does not create them.""" coords = xr.Coordinates({"x": ("x", [0, 1]), "y": list("abc")}, indexes={}) initial = xr.Dataset({"a": ("x", [1, 2])}, coords=coords) with assert_no_warnings(): final = xr.open_dataset( initial, engine=PassThroughBackendEntrypoint, create_default_indexes=create_default_indexes, ) if create_default_indexes: assert all(name in final.xindexes for name in ["x", "y"]) else: assert len(final.xindexes) == 0 @pytest.mark.parametrize("create_default_indexes", [True, False]) def test_default_indexes_passthrough(self, create_default_indexes): """Allow creating indexes in the backend.""" initial = xr.Dataset( {"a": (["x", "y"], [[1, 2, 3], [4, 5, 6]])}, coords={"x": ("x", [0, 1]), "y": ("y", list("abc"))}, ).stack(z=["x", "y"]) with assert_no_warnings(): final = xr.open_dataset( initial, engine=PassThroughBackendEntrypoint, create_default_indexes=create_default_indexes, ) assert initial.coords.equals(final.coords) xarray-2025.09.0/xarray/tests/test_backends_chunks.py000066400000000000000000000060641505620616400225600ustar00rootroot00000000000000import numpy as np import pytest import xarray as xr from xarray.backends.chunks import align_nd_chunks, build_grid_chunks, grid_rechunk from xarray.tests import requires_dask @pytest.mark.parametrize( "size, chunk_size, region, expected_chunks", [ (10, 3, slice(1, 11), (2, 3, 3, 2)), (10, 3, slice(None, None), (3, 3, 3, 1)), (10, 3, None, (3, 3, 3, 1)), (10, 3, slice(None, 10), (3, 3, 3, 1)), (10, 3, slice(0, None), (3, 3, 3, 1)), ], ) def test_build_grid_chunks(size, chunk_size, region, expected_chunks): grid_chunks = build_grid_chunks( size, chunk_size=chunk_size, region=region, ) assert grid_chunks == expected_chunks @pytest.mark.parametrize( "nd_var_chunks, nd_backend_chunks, expected_chunks", [ (((2, 2, 2, 2),), ((3, 3, 2),), ((3, 3, 2),)), # ND cases (((2, 4), (2, 3)), ((2, 2, 2), (3, 2)), ((2, 4), (3, 2))), ], ) def test_align_nd_chunks(nd_var_chunks, nd_backend_chunks, expected_chunks): aligned_nd_chunks = align_nd_chunks( nd_var_chunks=nd_var_chunks, nd_backend_chunks=nd_backend_chunks, ) assert aligned_nd_chunks == expected_chunks @requires_dask @pytest.mark.parametrize( "enc_chunks, region, nd_var_chunks, expected_chunks", [ ( (3,), (slice(2, 14),), ((6, 6),), ( ( 4, 6, 2, ), ), ), ( (6,), (slice(0, 13),), ((6, 7),), ( ( 6, 7, ), ), ), ((6,), (slice(0, 13),), ((6, 6, 1),), ((6, 6, 1),)), ((3,), (slice(2, 14),), ((1, 3, 2, 6),), ((1, 3, 6, 2),)), ((3,), (slice(2, 14),), ((2, 2, 2, 6),), ((4, 6, 2),)), ((3,), (slice(2, 14),), ((3, 1, 3, 5),), ((4, 3, 5),)), ((4,), (slice(1, 13),), ((1, 1, 1, 4, 3, 2),), ((3, 4, 4, 1),)), ((5,), (slice(4, 16),), ((5, 7),), ((6, 6),)), # ND cases ( (3, 6), (slice(2, 14), slice(0, 13)), ((6, 6), (6, 7)), ( ( 4, 6, 2, ), ( 6, 7, ), ), ), ], ) def test_grid_rechunk(enc_chunks, region, nd_var_chunks, expected_chunks): dims = [f"dim_{i}" for i in range(len(region))] coords = { dim: list(range(r.start, r.stop)) for dim, r in zip(dims, region, strict=False) } shape = tuple(r.stop - r.start for r in region) arr = xr.DataArray( np.arange(np.prod(shape)).reshape(shape), dims=dims, coords=coords, ) arr = arr.chunk(dict(zip(dims, nd_var_chunks, strict=False))) result = grid_rechunk( arr.variable, enc_chunks=enc_chunks, region=region, ) assert result.chunks == expected_chunks xarray-2025.09.0/xarray/tests/test_backends_common.py000066400000000000000000000031641505620616400225530ustar00rootroot00000000000000from __future__ import annotations import io import re import numpy as np import pytest import xarray as xr from xarray.backends.common import _infer_dtype, robust_getitem from xarray.tests import requires_scipy class DummyFailure(Exception): pass class DummyArray: def __init__(self, failures): self.failures = failures def __getitem__(self, key): if self.failures: self.failures -= 1 raise DummyFailure return "success" def test_robust_getitem() -> None: array = DummyArray(failures=2) with pytest.raises(DummyFailure): array[...] result = robust_getitem(array, ..., catch=DummyFailure, initial_delay=1) assert result == "success" array = DummyArray(failures=3) with pytest.raises(DummyFailure): robust_getitem(array, ..., catch=DummyFailure, initial_delay=1, max_retries=2) @pytest.mark.parametrize( "data", [ np.array([["ab", "cdef", b"X"], [1, 2, "c"]], dtype=object), np.array([["x", 1], ["y", 2]], dtype="object"), ], ) def test_infer_dtype_error_on_mixed_types(data): with pytest.raises(ValueError, match="unable to infer dtype on variable"): _infer_dtype(data, "test") @requires_scipy def test_encoding_failure_note(): # Create an arbitrary value that cannot be encoded in netCDF3 ds = xr.Dataset({"invalid": np.array([2**63 - 1], dtype=np.int64)}) f = io.BytesIO() with pytest.raises( ValueError, match=re.escape( "Raised while encoding variable 'invalid' with value None: with pytest.raises( ValueError, match=r"group '/child' is not aligned with its parents" ): super().test_child_group_with_inconsistent_dimensions() def diff_chunks( comparison: dict[tuple[str, Hashable], bool], tree1: DataTree, tree2: DataTree ) -> str: mismatching_variables = [loc for loc, equals in comparison.items() if not equals] variable_messages = [ "\n".join( [ f"L {path}:{name}: {tree1[path].variables[name].chunksizes}", f"R {path}:{name}: {tree2[path].variables[name].chunksizes}", ] ) for path, name in mismatching_variables ] return "\n".join(["Differing chunk sizes:"] + variable_messages) def assert_chunks_equal( actual: DataTree, expected: DataTree, enforce_dask: bool = False ) -> None: __tracebackhide__ = True from xarray.namedarray.pycompat import array_type dask_array_type = array_type("dask") comparison = { (path, name): ( ( not enforce_dask or isinstance(node1.variables[name].data, dask_array_type) ) and node1.variables[name].chunksizes == node2.variables[name].chunksizes ) for path, (node1, node2) in xr.group_subtrees(actual, expected) for name in node1.variables.keys() } assert all(comparison.values()), diff_chunks(comparison, actual, expected) @pytest.fixture(scope="module") def unaligned_datatree_nc(tmp_path_factory): """Creates a test netCDF4 file with the following unaligned structure, writes it to a /tmp directory and returns the file path of the netCDF4 file. Group: / β”‚ Dimensions: (lat: 1, lon: 2) β”‚ Dimensions without coordinates: lat, lon β”‚ Data variables: β”‚ root_variable (lat, lon) float64 16B ... └── Group: /Group1 β”‚ Dimensions: (lat: 1, lon: 2) β”‚ Dimensions without coordinates: lat, lon β”‚ Data variables: β”‚ group_1_var (lat, lon) float64 16B ... └── Group: /Group1/subgroup1 Dimensions: (lat: 2, lon: 2) Dimensions without coordinates: lat, lon Data variables: subgroup1_var (lat, lon) float64 32B ... """ filepath = tmp_path_factory.mktemp("data") / "unaligned_subgroups.nc" with nc4.Dataset(filepath, "w", format="NETCDF4") as root_group: group_1 = root_group.createGroup("/Group1") subgroup_1 = group_1.createGroup("/subgroup1") root_group.createDimension("lat", 1) root_group.createDimension("lon", 2) root_group.createVariable("root_variable", np.float64, ("lat", "lon")) group_1_var = group_1.createVariable("group_1_var", np.float64, ("lat", "lon")) group_1_var[:] = np.array([[0.1, 0.2]]) group_1_var.units = "K" group_1_var.long_name = "air_temperature" subgroup_1.createDimension("lat", 2) subgroup1_var = subgroup_1.createVariable( "subgroup1_var", np.float64, ("lat", "lon") ) subgroup1_var[:] = np.array([[0.1, 0.2]]) yield filepath @pytest.fixture(scope="module") def unaligned_datatree_zarr_factory( tmp_path_factory, ) -> Generator[ Callable[[Literal[2, 3]], Path], None, None, ]: """Creates a zarr store with the following unaligned group hierarchy: Group: / β”‚ Dimensions: (y: 3, x: 2) β”‚ Dimensions without coordinates: y, x β”‚ Data variables: β”‚ a (y) int64 24B ... β”‚ set0 (x) int64 16B ... └── Group: /Group1 β”‚ β”‚ Dimensions: () β”‚ β”‚ Data variables: β”‚ β”‚ a int64 8B ... β”‚ β”‚ b int64 8B ... β”‚ └── /Group1/subgroup1 β”‚ Dimensions: () β”‚ Data variables: β”‚ a int64 8B ... β”‚ b int64 8B ... └── Group: /Group2 Dimensions: (y: 2, x: 2) Dimensions without coordinates: y, x Data variables: a (y) int64 16B ... b (x) float64 16B ... """ def _unaligned_datatree_zarr(zarr_format: Literal[2, 3]) -> Path: filepath = tmp_path_factory.mktemp("data") / "unaligned_simple_datatree.zarr" root_data = xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])}) set1_data = xr.Dataset({"a": 0, "b": 1}) set2_data = xr.Dataset({"a": ("y", [2, 3]), "b": ("x", [0.1, 0.2])}) root_data.to_zarr( filepath, mode="w", zarr_format=zarr_format, ) set1_data.to_zarr( filepath, group="/Group1", mode="a", zarr_format=zarr_format, ) set2_data.to_zarr( filepath, group="/Group2", mode="a", zarr_format=zarr_format, ) set1_data.to_zarr( filepath, group="/Group1/subgroup1", mode="a", zarr_format=zarr_format, ) return filepath yield _unaligned_datatree_zarr class DatatreeIOBase: engine: T_DataTreeNetcdfEngine | None = None def test_to_netcdf(self, tmpdir, simple_datatree): filepath = tmpdir / "test.nc" original_dt = simple_datatree original_dt.to_netcdf(filepath, engine=self.engine) with open_datatree(filepath, engine=self.engine) as roundtrip_dt: assert roundtrip_dt._close is not None assert_equal(original_dt, roundtrip_dt) def test_decode_cf(self, tmpdir): filepath = tmpdir / "test-cf-convention.nc" original_dt = xr.DataTree( xr.Dataset( { "test": xr.DataArray( data=np.array([0, 1, 2], dtype=np.uint16), attrs={"_FillValue": 99}, ), } ) ) original_dt.to_netcdf(filepath, engine=self.engine) with open_datatree( filepath, engine=self.engine, decode_cf=False ) as roundtrip_dt: assert original_dt["test"].dtype == roundtrip_dt["test"].dtype def test_to_netcdf_inherited_coords(self, tmpdir) -> None: filepath = tmpdir / "test.nc" original_dt = DataTree.from_dict( { "/": xr.Dataset({"a": (("x",), [1, 2])}, coords={"x": [3, 4]}), "/sub": xr.Dataset({"b": (("x",), [5, 6])}), } ) original_dt.to_netcdf(filepath, engine=self.engine) with open_datatree(filepath, engine=self.engine) as roundtrip_dt: assert_equal(original_dt, roundtrip_dt) subtree = cast(DataTree, roundtrip_dt["/sub"]) assert "x" not in subtree.to_dataset(inherit=False).coords def test_netcdf_encoding(self, tmpdir, simple_datatree) -> None: filepath = tmpdir / "test.nc" original_dt = simple_datatree # add compression comp = dict(zlib=True, complevel=9) enc = {"/set2": dict.fromkeys(original_dt["/set2"].dataset.data_vars, comp)} original_dt.to_netcdf(filepath, encoding=enc, engine=self.engine) with open_datatree(filepath, engine=self.engine) as roundtrip_dt: assert roundtrip_dt["/set2/a"].encoding["zlib"] == comp["zlib"] assert roundtrip_dt["/set2/a"].encoding["complevel"] == comp["complevel"] enc["/not/a/group"] = {"foo": "bar"} # type: ignore[dict-item] with pytest.raises(ValueError, match="unexpected encoding group.*"): original_dt.to_netcdf(filepath, encoding=enc, engine=self.engine) def test_write_subgroup(self, tmpdir) -> None: original_dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2, 3]}), "/child": xr.Dataset({"foo": ("x", [4, 5, 6])}), } ).children["child"] expected_dt = original_dt.copy() expected_dt.name = None filepath = tmpdir / "test.zarr" original_dt.to_netcdf(filepath, engine=self.engine) with open_datatree(filepath, engine=self.engine) as roundtrip_dt: assert_equal(original_dt, roundtrip_dt) assert_identical(expected_dt, roundtrip_dt) @requires_netCDF4 def test_no_redundant_dimensions(self, tmpdir) -> None: # regression test for https://github.com/pydata/xarray/issues/10241 original_dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2, 3]}), "/child": xr.Dataset({"foo": ("x", [4, 5, 6])}), } ) filepath = tmpdir / "test.zarr" original_dt.to_netcdf(filepath, engine=self.engine) root = nc4.Dataset(str(filepath)) child = root.groups["child"] assert list(root.dimensions) == ["x"] assert list(child.dimensions) == [] @requires_dask def test_compute_false(self, tmpdir, simple_datatree): filepath = tmpdir / "test.nc" original_dt = simple_datatree.chunk() result = original_dt.to_netcdf(filepath, engine=self.engine, compute=False) with open_datatree(filepath, engine=self.engine) as in_progress_dt: assert in_progress_dt.isomorphic(original_dt) assert not in_progress_dt.equals(original_dt) result.compute() with open_datatree(filepath, engine=self.engine) as written_dt: assert_identical(written_dt, original_dt) def test_default_write_engine(self, tmpdir, simple_datatree, monkeypatch): # Ensure the other netCDF library are not installed exclude = "netCDF4" if self.engine == "h5netcdf" else "h5netcdf" monkeypatch.delitem(sys.modules, exclude, raising=False) monkeypatch.setattr(sys, "meta_path", []) filepath = tmpdir + "/phony_dims.nc" original_dt = simple_datatree original_dt.to_netcdf(filepath) # should not raise @requires_netCDF4 class TestNetCDF4DatatreeIO(DatatreeIOBase): engine: T_DataTreeNetcdfEngine | None = "netcdf4" def test_open_datatree(self, unaligned_datatree_nc) -> None: """Test if `open_datatree` fails to open a netCDF4 with an unaligned group hierarchy.""" with pytest.raises( ValueError, match=( re.escape( "group '/Group1/subgroup1' is not aligned with its parents:\nGroup:\n" ) + ".*" ), ): open_datatree(unaligned_datatree_nc) @requires_dask def test_open_datatree_chunks(self, tmpdir, simple_datatree) -> None: filepath = tmpdir / "test.nc" chunks = {"x": 2, "y": 1} root_data = xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])}) set1_data = xr.Dataset({"a": ("y", [-1, 0, 1]), "b": ("x", [-10, 6])}) set2_data = xr.Dataset({"a": ("y", [1, 2, 3]), "b": ("x", [0.1, 0.2])}) original_tree = DataTree.from_dict( { "/": root_data.chunk(chunks), "/group1": set1_data.chunk(chunks), "/group2": set2_data.chunk(chunks), } ) original_tree.to_netcdf(filepath, engine="netcdf4") with open_datatree(filepath, engine="netcdf4", chunks=chunks) as tree: xr.testing.assert_identical(tree, original_tree) assert_chunks_equal(tree, original_tree, enforce_dask=True) def test_open_groups(self, unaligned_datatree_nc) -> None: """Test `open_groups` with a netCDF4 file with an unaligned group hierarchy.""" unaligned_dict_of_datasets = open_groups(unaligned_datatree_nc) # Check that group names are keys in the dictionary of `xr.Datasets` assert "/" in unaligned_dict_of_datasets.keys() assert "/Group1" in unaligned_dict_of_datasets.keys() assert "/Group1/subgroup1" in unaligned_dict_of_datasets.keys() # Check that group name returns the correct datasets with xr.open_dataset(unaligned_datatree_nc, group="/") as expected: assert_identical(unaligned_dict_of_datasets["/"], expected) with xr.open_dataset(unaligned_datatree_nc, group="Group1") as expected: assert_identical(unaligned_dict_of_datasets["/Group1"], expected) with xr.open_dataset( unaligned_datatree_nc, group="/Group1/subgroup1" ) as expected: assert_identical(unaligned_dict_of_datasets["/Group1/subgroup1"], expected) for ds in unaligned_dict_of_datasets.values(): ds.close() @requires_dask def test_open_groups_chunks(self, tmpdir) -> None: """Test `open_groups` with chunks on a netcdf4 file.""" chunks = {"x": 2, "y": 1} filepath = tmpdir / "test.nc" chunks = {"x": 2, "y": 1} root_data = xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])}) set1_data = xr.Dataset({"a": ("y", [-1, 0, 1]), "b": ("x", [-10, 6])}) set2_data = xr.Dataset({"a": ("y", [1, 2, 3]), "b": ("x", [0.1, 0.2])}) original_tree = DataTree.from_dict( { "/": root_data.chunk(chunks), "/group1": set1_data.chunk(chunks), "/group2": set2_data.chunk(chunks), } ) original_tree.to_netcdf(filepath, mode="w") dict_of_datasets = open_groups(filepath, engine="netcdf4", chunks=chunks) for path, ds in dict_of_datasets.items(): assert {k: max(vs) for k, vs in ds.chunksizes.items()} == chunks, ( f"unexpected chunking for {path}" ) for ds in dict_of_datasets.values(): ds.close() def test_open_groups_to_dict(self, tmpdir) -> None: """Create an aligned netCDF4 with the following structure to test `open_groups` and `DataTree.from_dict`. Group: / β”‚ Dimensions: (lat: 1, lon: 2) β”‚ Dimensions without coordinates: lat, lon β”‚ Data variables: β”‚ root_variable (lat, lon) float64 16B ... └── Group: /Group1 β”‚ Dimensions: (lat: 1, lon: 2) β”‚ Dimensions without coordinates: lat, lon β”‚ Data variables: β”‚ group_1_var (lat, lon) float64 16B ... └── Group: /Group1/subgroup1 Dimensions: (lat: 1, lon: 2) Dimensions without coordinates: lat, lon Data variables: subgroup1_var (lat, lon) float64 16B ... """ filepath = tmpdir + "/all_aligned_child_nodes.nc" with nc4.Dataset(filepath, "w", format="NETCDF4") as root_group: group_1 = root_group.createGroup("/Group1") subgroup_1 = group_1.createGroup("/subgroup1") root_group.createDimension("lat", 1) root_group.createDimension("lon", 2) root_group.createVariable("root_variable", np.float64, ("lat", "lon")) group_1_var = group_1.createVariable( "group_1_var", np.float64, ("lat", "lon") ) group_1_var[:] = np.array([[0.1, 0.2]]) group_1_var.units = "K" group_1_var.long_name = "air_temperature" subgroup1_var = subgroup_1.createVariable( "subgroup1_var", np.float64, ("lat", "lon") ) subgroup1_var[:] = np.array([[0.1, 0.2]]) aligned_dict_of_datasets = open_groups(filepath) aligned_dt = DataTree.from_dict(aligned_dict_of_datasets) with open_datatree(filepath) as opened_tree: assert opened_tree.identical(aligned_dt) for ds in aligned_dict_of_datasets.values(): ds.close() def test_open_datatree_specific_group(self, tmpdir, simple_datatree) -> None: """Test opening a specific group within a NetCDF file using `open_datatree`.""" filepath = tmpdir / "test.nc" group = "/set1" original_dt = simple_datatree original_dt.to_netcdf(filepath) expected_subtree = original_dt[group].copy() expected_subtree.orphan() with open_datatree(filepath, group=group, engine=self.engine) as subgroup_tree: assert subgroup_tree.root.parent is None assert_equal(subgroup_tree, expected_subtree) @network @requires_pydap class TestPyDAPDatatreeIO: """Test PyDAP backend for DataTree.""" engine: T_DataTreeNetcdfEngine | None = "pydap" # you can check these by adding a .dmr to urls, and replacing dap4 with http unaligned_datatree_url = ( "dap4://test.opendap.org/opendap/dap4/unaligned_simple_datatree.nc.h5" ) all_aligned_child_nodes_url = ( "dap4://test.opendap.org/opendap/dap4/all_aligned_child_nodes.nc.h5" ) simplegroup_datatree_url = "dap4://test.opendap.org/opendap/dap4/SimpleGroup.nc4.h5" def test_open_datatree(self, url=unaligned_datatree_url) -> None: """Test if `open_datatree` fails to open a netCDF4 with an unaligned group hierarchy.""" with pytest.raises( ValueError, match=( re.escape( "group '/Group1/subgroup1' is not aligned with its parents:\nGroup:\n" ) + ".*" ), ): open_datatree(url, engine=self.engine) def test_open_groups(self, url=unaligned_datatree_url) -> None: """Test `open_groups` with a netCDF4/HDF5 file with an unaligned group hierarchy.""" unaligned_dict_of_datasets = open_groups(url, engine=self.engine) # Check that group names are keys in the dictionary of `xr.Datasets` assert "/" in unaligned_dict_of_datasets.keys() assert "/Group1" in unaligned_dict_of_datasets.keys() assert "/Group1/subgroup1" in unaligned_dict_of_datasets.keys() # Check that group name returns the correct datasets with xr.open_dataset(url, engine=self.engine, group="/") as expected: assert_identical(unaligned_dict_of_datasets["/"], expected) with xr.open_dataset(url, group="Group1", engine=self.engine) as expected: assert_identical(unaligned_dict_of_datasets["/Group1"], expected) with xr.open_dataset( url, group="/Group1/subgroup1", engine=self.engine, ) as expected: assert_identical(unaligned_dict_of_datasets["/Group1/subgroup1"], expected) def test_inherited_coords(self, url=simplegroup_datatree_url) -> None: """Test that `open_datatree` inherits coordinates from root tree. This particular h5 file is a test file that inherits the time coordinate from the root dataset to the child dataset. Group: / β”‚ Dimensions: (time: 1, Z: 1000, nv: 2) β”‚ Coordinates: | time: (time) float32 0.5 | Z: (Z) float32 -0.0 -1.0 -2.0 ... β”‚ Data variables: β”‚ Pressure (Z) float32 ... | time_bnds (time, nv) float32 ... └── Group: /SimpleGroup β”‚ Dimensions: (time: 1, Z: 1000, nv: 2, Y: 40, X: 40) β”‚ Coordinates: | Y: (Y) int16 1 2 3 4 ... | X: (X) int16 1 2 3 4 ... | Inherited coordinates: | time: (time) float32 0.5 | Z: (Z) float32 -0.0 -1.0 -2.0 ... β”‚ Data variables: β”‚ Temperature (time, Z, Y, X) float32 ... | Salinity (time, Z, Y, X) float32 ... """ tree = open_datatree(url, engine=self.engine) assert set(tree.dims) == {"time", "Z", "nv"} assert tree["/SimpleGroup"].coords["time"].dims == ("time",) assert tree["/SimpleGroup"].coords["Z"].dims == ("Z",) assert tree["/SimpleGroup"].coords["Y"].dims == ("Y",) assert tree["/SimpleGroup"].coords["X"].dims == ("X",) with xr.open_dataset(url, engine=self.engine, group="/SimpleGroup") as expected: assert set(tree["/SimpleGroup"].dims) == set( list(expected.dims) + ["Z", "nv"] ) def test_open_groups_to_dict(self, url=all_aligned_child_nodes_url) -> None: aligned_dict_of_datasets = open_groups(url, engine=self.engine) aligned_dt = DataTree.from_dict(aligned_dict_of_datasets) with open_datatree(url, engine=self.engine) as opened_tree: assert opened_tree.identical(aligned_dt) @requires_h5netcdf class TestH5NetCDFDatatreeIO(DatatreeIOBase): engine: T_DataTreeNetcdfEngine | None = "h5netcdf" def test_phony_dims_warning(self, tmpdir) -> None: filepath = tmpdir + "/phony_dims.nc" import h5py foo_data = np.arange(125).reshape(5, 5, 5) bar_data = np.arange(625).reshape(25, 5, 5) var = {"foo1": foo_data, "foo2": bar_data, "foo3": foo_data, "foo4": bar_data} with h5py.File(filepath, "w") as f: grps = ["bar", "baz"] for grp in grps: fx = f.create_group(grp) for k, v in var.items(): fx.create_dataset(k, data=v) with pytest.warns(UserWarning, match="The 'phony_dims' kwarg"): with open_datatree(filepath, engine=self.engine) as tree: assert tree.bar.dims == { "phony_dim_0": 5, "phony_dim_1": 5, "phony_dim_2": 5, "phony_dim_3": 25, } def test_roundtrip_via_bytes(self, simple_datatree) -> None: original_dt = simple_datatree roundtrip_dt = open_datatree(original_dt.to_netcdf()) assert_equal(original_dt, roundtrip_dt) def test_roundtrip_via_bytes_engine_specified(self, simple_datatree) -> None: original_dt = simple_datatree roundtrip_dt = open_datatree(original_dt.to_netcdf(engine=self.engine)) assert_equal(original_dt, roundtrip_dt) def test_to_bytes_compute_false(self, simple_datatree) -> None: original_dt = simple_datatree with pytest.raises( NotImplementedError, match=re.escape("to_netcdf() with compute=False is not yet implemented"), ): original_dt.to_netcdf(compute=False) def test_roundtrip_using_filelike_object(self, tmpdir, simple_datatree) -> None: original_dt = simple_datatree filepath = tmpdir + "/test.nc" # h5py requires both read and write access when writing, it will # work with file-like objects provided they support both, and are # seekable. with open(filepath, "wb+") as file: original_dt.to_netcdf(file, engine=self.engine) with open(filepath, "rb") as file: with open_datatree(file, engine=self.engine) as roundtrip_dt: assert_equal(original_dt, roundtrip_dt) @requires_zarr @parametrize_zarr_format class TestZarrDatatreeIO: engine = "zarr" def test_to_zarr(self, tmpdir, simple_datatree, zarr_format) -> None: filepath = str(tmpdir / "test.zarr") original_dt = simple_datatree original_dt.to_zarr(filepath, zarr_format=zarr_format) with open_datatree(filepath, engine="zarr") as roundtrip_dt: assert_equal(original_dt, roundtrip_dt) @pytest.mark.filterwarnings( "ignore:Numcodecs codecs are not in the Zarr version 3 specification" ) def test_zarr_encoding(self, tmpdir, simple_datatree, zarr_format) -> None: filepath = str(tmpdir / "test.zarr") original_dt = simple_datatree if zarr_format == 2: from numcodecs.blosc import Blosc codec = Blosc(cname="zstd", clevel=3, shuffle=2) comp = {"compressors": (codec,)} if has_zarr_v3 else {"compressor": codec} elif zarr_format == 3: # specifying codecs in zarr_format=3 requires importing from zarr 3 namespace import numcodecs.zarr3 comp = {"compressors": (numcodecs.zarr3.Blosc(cname="zstd", clevel=3),)} enc = {"/set2": dict.fromkeys(original_dt["/set2"].dataset.data_vars, comp)} original_dt.to_zarr(filepath, encoding=enc, zarr_format=zarr_format) with open_datatree(filepath, engine="zarr") as roundtrip_dt: compressor_key = "compressors" if has_zarr_v3 else "compressor" assert ( roundtrip_dt["/set2/a"].encoding[compressor_key] == comp[compressor_key] ) enc["/not/a/group"] = {"foo": "bar"} # type: ignore[dict-item] with pytest.raises(ValueError, match="unexpected encoding group.*"): original_dt.to_zarr(filepath, encoding=enc, zarr_format=zarr_format) @pytest.mark.xfail(reason="upstream zarr read-only changes have broken this test") @pytest.mark.filterwarnings("ignore:Duplicate name") def test_to_zarr_zip_store(self, tmpdir, simple_datatree, zarr_format) -> None: from zarr.storage import ZipStore filepath = str(tmpdir / "test.zarr.zip") original_dt = simple_datatree store = ZipStore(filepath, mode="w") original_dt.to_zarr(store, zarr_format=zarr_format) with open_datatree(store, engine="zarr") as roundtrip_dt: # type: ignore[arg-type, unused-ignore] assert_equal(original_dt, roundtrip_dt) def test_to_zarr_not_consolidated( self, tmpdir, simple_datatree, zarr_format ) -> None: filepath = tmpdir / "test.zarr" zmetadata = filepath / ".zmetadata" s1zmetadata = filepath / "set1" / ".zmetadata" filepath = str(filepath) # casting to str avoids a pathlib bug in xarray original_dt = simple_datatree original_dt.to_zarr(filepath, consolidated=False, zarr_format=zarr_format) assert not zmetadata.exists() assert not s1zmetadata.exists() with pytest.warns(RuntimeWarning, match="consolidated"): with open_datatree(filepath, engine="zarr") as roundtrip_dt: assert_equal(original_dt, roundtrip_dt) def test_to_zarr_default_write_mode( self, tmpdir, simple_datatree, zarr_format ) -> None: simple_datatree.to_zarr(str(tmpdir), zarr_format=zarr_format) import zarr # expected exception type changed in zarr-python v2->v3, see https://github.com/zarr-developers/zarr-python/issues/2821 expected_exception_type = ( FileExistsError if has_zarr_v3 else zarr.errors.ContainsGroupError ) # with default settings, to_zarr should not overwrite an existing dir with pytest.raises(expected_exception_type): simple_datatree.to_zarr(str(tmpdir)) @requires_dask def test_to_zarr_compute_false( self, tmp_path: Path, simple_datatree: DataTree, zarr_format: Literal[2, 3] ) -> None: import dask.array as da storepath = tmp_path / "test.zarr" original_dt = simple_datatree.chunk() result = original_dt.to_zarr( str(storepath), compute=False, zarr_format=zarr_format ) def assert_expected_zarr_files_exist( arr_dir: Path, chunks_expected: bool, is_scalar: bool, zarr_format: Literal[2, 3], ) -> None: """For one zarr array, check that all expected metadata and chunk data files exist.""" # TODO: This function is now so complicated that it's practically checking compliance with the whole zarr spec... # TODO: Perhaps it would be better to instead trust that zarr-python is spec-compliant and check `DataTree` against zarr-python? # TODO: The way to do that would ideally be to use zarr-pythons ability to determine how many chunks have been initialized. if zarr_format == 2: zarray_file, zattrs_file = (arr_dir / ".zarray"), (arr_dir / ".zattrs") assert zarray_file.exists() and zarray_file.is_file() assert zattrs_file.exists() and zattrs_file.is_file() chunk_file = arr_dir / "0" if chunks_expected: # assumes empty chunks were written # (i.e. they did not contain only fill_value and write_empty_chunks was False) assert chunk_file.exists() and chunk_file.is_file() else: # either dask array or array of all fill_values assert not chunk_file.exists() elif zarr_format == 3: metadata_file = arr_dir / "zarr.json" assert metadata_file.exists() and metadata_file.is_file() chunks_dir = arr_dir / "c" chunk_file = chunks_dir / "0" if chunks_expected: # assumes empty chunks were written # (i.e. they did not contain only fill_value and write_empty_chunks was False) if is_scalar: # this is the expected behaviour for storing scalars in zarr 3, see https://github.com/pydata/xarray/issues/10147 assert chunks_dir.exists() and chunks_dir.is_file() else: assert chunks_dir.exists() and chunks_dir.is_dir() assert chunk_file.exists() and chunk_file.is_file() else: assert not chunks_dir.exists() assert not chunk_file.exists() DEFAULT_ZARR_FILL_VALUE = 0 # The default value of write_empty_chunks changed from True->False in zarr-python v2->v3 WRITE_EMPTY_CHUNKS_DEFAULT = not has_zarr_v3 for node in original_dt.subtree: # inherited variables aren't meant to be written to zarr local_node_variables = node.to_dataset(inherit=False).variables for name, var in local_node_variables.items(): var_dir = storepath / node.path.removeprefix("/") / name # type: ignore[operator] assert_expected_zarr_files_exist( arr_dir=var_dir, # don't expect dask.Arrays to be written to disk, as compute=False # also don't expect numpy arrays containing only zarr's fill_value to be written to disk chunks_expected=( not isinstance(var.data, da.Array) and ( var.data != DEFAULT_ZARR_FILL_VALUE or WRITE_EMPTY_CHUNKS_DEFAULT ) ), is_scalar=not bool(var.dims), zarr_format=zarr_format, ) with open_datatree(str(storepath), engine="zarr") as in_progress_dt: assert in_progress_dt.isomorphic(original_dt) assert not in_progress_dt.equals(original_dt) result.compute() with open_datatree(str(storepath), engine="zarr") as written_dt: assert_identical(written_dt, original_dt) @requires_dask def test_rplus_mode( self, tmp_path: Path, simple_datatree: DataTree, zarr_format: Literal[2, 3] ) -> None: storepath = tmp_path / "test.zarr" original_dt = simple_datatree.chunk() original_dt.to_zarr(storepath, compute=False, zarr_format=zarr_format) original_dt.to_zarr(storepath, mode="r+") with open_datatree(str(storepath), engine="zarr") as written_dt: assert_identical(written_dt, original_dt) @requires_dask def test_to_zarr_no_redundant_computation(self, tmpdir, zarr_format) -> None: import dask.array as da eval_count = 0 def expensive_func(x): nonlocal eval_count eval_count += 1 return x + 1 base = da.random.random((), chunks=()) derived1 = da.map_blocks(expensive_func, base, meta=np.array((), np.float64)) derived2 = derived1 + 1 # depends on derived1 tree = DataTree.from_dict( { "group1": xr.Dataset({"derived": derived1}), "group2": xr.Dataset({"derived": derived2}), } ) filepath = str(tmpdir / "test.zarr") tree.to_zarr(filepath, zarr_format=zarr_format) assert eval_count == 1 # not 2 def test_to_zarr_inherited_coords(self, tmpdir, zarr_format): original_dt = DataTree.from_dict( { "/": xr.Dataset({"a": (("x",), [1, 2])}, coords={"x": [3, 4]}), "/sub": xr.Dataset({"b": (("x",), [5, 6])}), } ) filepath = str(tmpdir / "test.zarr") original_dt.to_zarr(filepath, zarr_format=zarr_format) with open_datatree(filepath, engine="zarr") as roundtrip_dt: assert_equal(original_dt, roundtrip_dt) subtree = cast(DataTree, roundtrip_dt["/sub"]) assert "x" not in subtree.to_dataset(inherit=False).coords def test_open_groups_round_trip(self, tmpdir, simple_datatree, zarr_format) -> None: """Test `open_groups` opens a zarr store with the `simple_datatree` structure.""" filepath = str(tmpdir / "test.zarr") original_dt = simple_datatree original_dt.to_zarr(filepath, zarr_format=zarr_format) roundtrip_dict = open_groups(filepath, engine="zarr") roundtrip_dt = DataTree.from_dict(roundtrip_dict) with open_datatree(filepath, engine="zarr") as opened_tree: assert opened_tree.identical(roundtrip_dt) for ds in roundtrip_dict.values(): ds.close() @pytest.mark.filterwarnings( "ignore:Failed to open Zarr store with consolidated metadata:RuntimeWarning" ) def test_open_datatree(self, unaligned_datatree_zarr_factory, zarr_format) -> None: """Test if `open_datatree` fails to open a zarr store with an unaligned group hierarchy.""" storepath = unaligned_datatree_zarr_factory(zarr_format=zarr_format) with pytest.raises( ValueError, match=( re.escape("group '/Group2' is not aligned with its parents:") + ".*" ), ): open_datatree(storepath, engine="zarr") @requires_dask def test_open_datatree_chunks(self, tmpdir, zarr_format) -> None: filepath = str(tmpdir / "test.zarr") chunks = {"x": 2, "y": 1} root_data = xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])}) set1_data = xr.Dataset({"a": ("y", [-1, 0, 1]), "b": ("x", [-10, 6])}) set2_data = xr.Dataset({"a": ("y", [1, 2, 3]), "b": ("x", [0.1, 0.2])}) original_tree = DataTree.from_dict( { "/": root_data.chunk(chunks), "/group1": set1_data.chunk(chunks), "/group2": set2_data.chunk(chunks), } ) original_tree.to_zarr(filepath, zarr_format=zarr_format) with open_datatree(filepath, engine="zarr", chunks=chunks) as tree: xr.testing.assert_identical(tree, original_tree) assert_chunks_equal(tree, original_tree, enforce_dask=True) # https://github.com/pydata/xarray/issues/10098 # If the open tasks are not give unique tokens per node, and the # dask graph is computed in one go, data won't be uniquely loaded # from each node. xr.testing.assert_identical(tree.compute(), original_tree) @pytest.mark.filterwarnings( "ignore:Failed to open Zarr store with consolidated metadata:RuntimeWarning" ) def test_open_groups(self, unaligned_datatree_zarr_factory, zarr_format) -> None: """Test `open_groups` with a zarr store of an unaligned group hierarchy.""" storepath = unaligned_datatree_zarr_factory(zarr_format=zarr_format) unaligned_dict_of_datasets = open_groups(storepath, engine="zarr") assert "/" in unaligned_dict_of_datasets.keys() assert "/Group1" in unaligned_dict_of_datasets.keys() assert "/Group1/subgroup1" in unaligned_dict_of_datasets.keys() assert "/Group2" in unaligned_dict_of_datasets.keys() # Check that group name returns the correct datasets with xr.open_dataset(storepath, group="/", engine="zarr") as expected: assert_identical(unaligned_dict_of_datasets["/"], expected) with xr.open_dataset(storepath, group="Group1", engine="zarr") as expected: assert_identical(unaligned_dict_of_datasets["/Group1"], expected) with xr.open_dataset( storepath, group="/Group1/subgroup1", engine="zarr" ) as expected: assert_identical(unaligned_dict_of_datasets["/Group1/subgroup1"], expected) with xr.open_dataset(storepath, group="/Group2", engine="zarr") as expected: assert_identical(unaligned_dict_of_datasets["/Group2"], expected) for ds in unaligned_dict_of_datasets.values(): ds.close() @pytest.mark.filterwarnings( "ignore:Failed to open Zarr store with consolidated metadata:RuntimeWarning" ) @pytest.mark.parametrize("write_consolidated_metadata", [True, False, None]) def test_open_datatree_specific_group( self, tmpdir, simple_datatree, write_consolidated_metadata, zarr_format, ) -> None: """Test opening a specific group within a Zarr store using `open_datatree`.""" filepath = str(tmpdir / "test.zarr") group = "/set2" original_dt = simple_datatree original_dt.to_zarr( filepath, consolidated=write_consolidated_metadata, zarr_format=zarr_format ) expected_subtree = original_dt[group].copy() expected_subtree.orphan() with open_datatree(filepath, group=group, engine=self.engine) as subgroup_tree: assert subgroup_tree.root.parent is None assert_equal(subgroup_tree, expected_subtree) @requires_dask def test_open_groups_chunks(self, tmpdir, zarr_format) -> None: """Test `open_groups` with chunks on a zarr store.""" chunks = {"x": 2, "y": 1} filepath = str(tmpdir / "test.zarr") root_data = xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])}) set1_data = xr.Dataset({"a": ("y", [-1, 0, 1]), "b": ("x", [-10, 6])}) set2_data = xr.Dataset({"a": ("y", [1, 2, 3]), "b": ("x", [0.1, 0.2])}) original_tree = DataTree.from_dict( { "/": root_data.chunk(chunks), "/group1": set1_data.chunk(chunks), "/group2": set2_data.chunk(chunks), } ) original_tree.to_zarr(filepath, mode="w", zarr_format=zarr_format) dict_of_datasets = open_groups(filepath, engine="zarr", chunks=chunks) for path, ds in dict_of_datasets.items(): assert {k: max(vs) for k, vs in ds.chunksizes.items()} == chunks, ( f"unexpected chunking for {path}" ) for ds in dict_of_datasets.values(): ds.close() def test_write_subgroup(self, tmpdir, zarr_format) -> None: original_dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2, 3]}), "/child": xr.Dataset({"foo": ("x", [4, 5, 6])}), } ).children["child"] expected_dt = original_dt.copy() expected_dt.name = None filepath = str(tmpdir / "test.zarr") original_dt.to_zarr(filepath, zarr_format=zarr_format) with open_datatree(filepath, engine="zarr") as roundtrip_dt: assert_equal(original_dt, roundtrip_dt) assert_identical(expected_dt, roundtrip_dt) @pytest.mark.filterwarnings( "ignore:Failed to open Zarr store with consolidated metadata:RuntimeWarning" ) def test_write_inherited_coords_false(self, tmpdir, zarr_format) -> None: original_dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2, 3]}), "/child": xr.Dataset({"foo": ("x", [4, 5, 6])}), } ) filepath = str(tmpdir / "test.zarr") original_dt.to_zarr( filepath, write_inherited_coords=False, zarr_format=zarr_format ) with open_datatree(filepath, engine="zarr") as roundtrip_dt: assert_identical(original_dt, roundtrip_dt) expected_child = original_dt.children["child"].copy(inherit=False) expected_child.name = None with open_datatree(filepath, group="child", engine="zarr") as roundtrip_child: assert_identical(expected_child, roundtrip_child) @pytest.mark.filterwarnings( "ignore:Failed to open Zarr store with consolidated metadata:RuntimeWarning" ) def test_write_inherited_coords_true(self, tmpdir, zarr_format) -> None: original_dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2, 3]}), "/child": xr.Dataset({"foo": ("x", [4, 5, 6])}), } ) filepath = str(tmpdir / "test.zarr") original_dt.to_zarr( filepath, write_inherited_coords=True, zarr_format=zarr_format ) with open_datatree(filepath, engine="zarr") as roundtrip_dt: assert_identical(original_dt, roundtrip_dt) expected_child = original_dt.children["child"].copy(inherit=True) expected_child.name = None with open_datatree(filepath, group="child", engine="zarr") as roundtrip_child: assert_identical(expected_child, roundtrip_child) xarray-2025.09.0/xarray/tests/test_backends_file_manager.py000066400000000000000000000160241505620616400236730ustar00rootroot00000000000000from __future__ import annotations import gc import pickle import threading from unittest import mock import pytest from xarray.backends.file_manager import CachingFileManager from xarray.backends.lru_cache import LRUCache from xarray.core.options import set_options from xarray.tests import assert_no_warnings @pytest.fixture(params=[1, 2, 3, None]) def file_cache(request): maxsize = request.param if maxsize is None: yield {} else: yield LRUCache(maxsize) def test_file_manager_mock_write(file_cache) -> None: mock_file = mock.Mock() opener = mock.Mock(spec=open, return_value=mock_file) lock = mock.MagicMock(spec=threading.Lock()) manager = CachingFileManager(opener, "filename", lock=lock, cache=file_cache) f = manager.acquire() f.write("contents") manager.close() assert not file_cache opener.assert_called_once_with("filename") mock_file.write.assert_called_once_with("contents") mock_file.close.assert_called_once_with() lock.__enter__.assert_has_calls([mock.call(), mock.call()]) @pytest.mark.parametrize("warn_for_unclosed_files", [True, False]) def test_file_manager_autoclose(warn_for_unclosed_files) -> None: mock_file = mock.Mock() opener = mock.Mock(return_value=mock_file) cache: dict = {} manager = CachingFileManager(opener, "filename", cache=cache) manager.acquire() assert cache # can no longer use pytest.warns(None) if warn_for_unclosed_files: ctx = pytest.warns(RuntimeWarning) else: ctx = assert_no_warnings() # type: ignore[assignment] with set_options(warn_for_unclosed_files=warn_for_unclosed_files): with ctx: del manager gc.collect() assert not cache mock_file.close.assert_called_once_with() def test_file_manager_autoclose_while_locked() -> None: opener = mock.Mock() lock = threading.Lock() cache: dict = {} manager = CachingFileManager(opener, "filename", lock=lock, cache=cache) manager.acquire() assert cache lock.acquire() with set_options(warn_for_unclosed_files=False): del manager gc.collect() # can't clear the cache while locked, but also don't block in __del__ assert cache def test_file_manager_repr() -> None: opener = mock.Mock() manager = CachingFileManager(opener, "my-file") assert "my-file" in repr(manager) def test_file_manager_cache_and_refcounts() -> None: mock_file = mock.Mock() opener = mock.Mock(spec=open, return_value=mock_file) cache: dict = {} ref_counts: dict = {} manager = CachingFileManager(opener, "filename", cache=cache, ref_counts=ref_counts) assert ref_counts[manager._key] == 1 assert not cache manager.acquire() assert len(cache) == 1 with set_options(warn_for_unclosed_files=False): del manager gc.collect() assert not ref_counts assert not cache def test_file_manager_cache_repeated_open() -> None: mock_file = mock.Mock() opener = mock.Mock(spec=open, return_value=mock_file) cache: dict = {} manager = CachingFileManager(opener, "filename", cache=cache) manager.acquire() assert len(cache) == 1 manager2 = CachingFileManager(opener, "filename", cache=cache) manager2.acquire() assert len(cache) == 2 with set_options(warn_for_unclosed_files=False): del manager gc.collect() assert len(cache) == 1 with set_options(warn_for_unclosed_files=False): del manager2 gc.collect() assert not cache def test_file_manager_cache_with_pickle(tmpdir) -> None: path = str(tmpdir.join("testing.txt")) with open(path, "w") as f: f.write("data") cache: dict = {} with mock.patch("xarray.backends.file_manager.FILE_CACHE", cache): assert not cache manager = CachingFileManager(open, path, mode="r") manager.acquire() assert len(cache) == 1 manager2 = pickle.loads(pickle.dumps(manager)) manager2.acquire() assert len(cache) == 1 with set_options(warn_for_unclosed_files=False): del manager gc.collect() # assert len(cache) == 1 with set_options(warn_for_unclosed_files=False): del manager2 gc.collect() assert not cache def test_file_manager_write_consecutive(tmpdir, file_cache) -> None: path1 = str(tmpdir.join("testing1.txt")) path2 = str(tmpdir.join("testing2.txt")) manager1 = CachingFileManager(open, path1, mode="w", cache=file_cache) manager2 = CachingFileManager(open, path2, mode="w", cache=file_cache) f1a = manager1.acquire() f1a.write("foo") f1a.flush() f2 = manager2.acquire() f2.write("bar") f2.flush() f1b = manager1.acquire() f1b.write("baz") assert (getattr(file_cache, "maxsize", float("inf")) > 1) == (f1a is f1b) manager1.close() manager2.close() with open(path1) as f: assert f.read() == "foobaz" with open(path2) as f: assert f.read() == "bar" def test_file_manager_write_concurrent(tmpdir, file_cache) -> None: path = str(tmpdir.join("testing.txt")) manager = CachingFileManager(open, path, mode="w", cache=file_cache) f1 = manager.acquire() f2 = manager.acquire() f3 = manager.acquire() assert f1 is f2 assert f2 is f3 f1.write("foo") f1.flush() f2.write("bar") f2.flush() f3.write("baz") f3.flush() manager.close() with open(path) as f: assert f.read() == "foobarbaz" def test_file_manager_write_pickle(tmpdir, file_cache) -> None: path = str(tmpdir.join("testing.txt")) manager = CachingFileManager(open, path, mode="w", cache=file_cache) f = manager.acquire() f.write("foo") f.flush() manager2 = pickle.loads(pickle.dumps(manager)) f2 = manager2.acquire() f2.write("bar") manager2.close() manager.close() with open(path) as f: assert f.read() == "foobar" def test_file_manager_read(tmpdir, file_cache) -> None: path = str(tmpdir.join("testing.txt")) with open(path, "w") as f: f.write("foobar") manager = CachingFileManager(open, path, cache=file_cache) f = manager.acquire() assert f.read() == "foobar" manager.close() def test_file_manager_acquire_context(tmpdir, file_cache) -> None: path = str(tmpdir.join("testing.txt")) with open(path, "w") as f: f.write("foobar") class AcquisitionError(Exception): pass manager = CachingFileManager(open, path, cache=file_cache) with pytest.raises(AcquisitionError): with manager.acquire_context() as f: assert f.read() == "foobar" raise AcquisitionError assert not file_cache # file was *not* already open with manager.acquire_context() as f: assert f.read() == "foobar" with pytest.raises(AcquisitionError): with manager.acquire_context() as f: f.seek(0) assert f.read() == "foobar" raise AcquisitionError assert file_cache # file *was* already open manager.close() xarray-2025.09.0/xarray/tests/test_backends_locks.py000066400000000000000000000005561505620616400224000ustar00rootroot00000000000000from __future__ import annotations import threading from xarray.backends import locks def test_threaded_lock() -> None: lock1 = locks._get_threaded_lock("foo") assert isinstance(lock1, type(threading.Lock())) lock2 = locks._get_threaded_lock("foo") assert lock1 is lock2 lock3 = locks._get_threaded_lock("bar") assert lock1 is not lock3 xarray-2025.09.0/xarray/tests/test_backends_lru_cache.py000066400000000000000000000044061505620616400232100ustar00rootroot00000000000000from __future__ import annotations from typing import Any from unittest import mock import pytest from xarray.backends.lru_cache import LRUCache def test_simple() -> None: cache: LRUCache[Any, Any] = LRUCache(maxsize=2) cache["x"] = 1 cache["y"] = 2 assert cache["x"] == 1 assert cache["y"] == 2 assert len(cache) == 2 assert dict(cache) == {"x": 1, "y": 2} assert list(cache.keys()) == ["x", "y"] assert list(cache.items()) == [("x", 1), ("y", 2)] cache["z"] = 3 assert len(cache) == 2 assert list(cache.items()) == [("y", 2), ("z", 3)] def test_trivial() -> None: cache: LRUCache[Any, Any] = LRUCache(maxsize=0) cache["x"] = 1 assert len(cache) == 0 def test_invalid() -> None: with pytest.raises(TypeError): LRUCache(maxsize=None) # type: ignore[arg-type] with pytest.raises(ValueError): LRUCache(maxsize=-1) def test_update_priority() -> None: cache: LRUCache[Any, Any] = LRUCache(maxsize=2) cache["x"] = 1 cache["y"] = 2 assert list(cache) == ["x", "y"] assert "x" in cache # contains assert list(cache) == ["y", "x"] assert cache["y"] == 2 # getitem assert list(cache) == ["x", "y"] cache["x"] = 3 # setitem assert list(cache.items()) == [("y", 2), ("x", 3)] def test_del() -> None: cache: LRUCache[Any, Any] = LRUCache(maxsize=2) cache["x"] = 1 cache["y"] = 2 del cache["x"] assert dict(cache) == {"y": 2} def test_on_evict() -> None: on_evict = mock.Mock() cache = LRUCache(maxsize=1, on_evict=on_evict) cache["x"] = 1 cache["y"] = 2 on_evict.assert_called_once_with("x", 1) def test_on_evict_trivial() -> None: on_evict = mock.Mock() cache = LRUCache(maxsize=0, on_evict=on_evict) cache["x"] = 1 on_evict.assert_called_once_with("x", 1) def test_resize() -> None: cache: LRUCache[Any, Any] = LRUCache(maxsize=2) assert cache.maxsize == 2 cache["w"] = 0 cache["x"] = 1 cache["y"] = 2 assert list(cache.items()) == [("x", 1), ("y", 2)] cache.maxsize = 10 cache["z"] = 3 assert list(cache.items()) == [("x", 1), ("y", 2), ("z", 3)] cache.maxsize = 1 assert list(cache.items()) == [("z", 3)] with pytest.raises(ValueError): cache.maxsize = -1 xarray-2025.09.0/xarray/tests/test_calendar_ops.py000066400000000000000000000246141505620616400220660ustar00rootroot00000000000000from __future__ import annotations import numpy as np import pandas as pd import pytest from xarray import CFTimeIndex, DataArray, Dataset, infer_freq from xarray.coding.calendar_ops import convert_calendar, interp_calendar from xarray.coding.cftime_offsets import date_range from xarray.testing import assert_identical from xarray.tests import requires_cftime cftime = pytest.importorskip("cftime") @pytest.mark.parametrize( "source, target, use_cftime, freq", [ ("standard", "noleap", None, "D"), ("noleap", "proleptic_gregorian", True, "D"), ("noleap", "all_leap", None, "D"), ("all_leap", "proleptic_gregorian", False, "4h"), ], ) def test_convert_calendar(source, target, use_cftime, freq): src = DataArray( date_range("2004-01-01", "2004-12-31", freq=freq, calendar=source), dims=("time",), name="time", ) da_src = DataArray( np.linspace(0, 1, src.size), dims=("time",), coords={"time": src} ) conv = convert_calendar(da_src, target, use_cftime=use_cftime) assert conv.time.dt.calendar == target if source != "noleap": expected_times = date_range( "2004-01-01", "2004-12-31", freq=freq, use_cftime=use_cftime, calendar=target, ) else: expected_times_pre_leap = date_range( "2004-01-01", "2004-02-28", freq=freq, use_cftime=use_cftime, calendar=target, ) expected_times_post_leap = date_range( "2004-03-01", "2004-12-31", freq=freq, use_cftime=use_cftime, calendar=target, ) expected_times = expected_times_pre_leap.append(expected_times_post_leap) np.testing.assert_array_equal(conv.time, expected_times) def test_convert_calendar_dataset(): # Check that variables without a time dimension are not modified src = DataArray( date_range("2004-01-01", "2004-12-31", freq="D", calendar="standard"), dims=("time",), name="time", ) da_src = DataArray( np.linspace(0, 1, src.size), dims=("time",), coords={"time": src} ).expand_dims(lat=[0, 1]) ds_src = Dataset({"hastime": da_src, "notime": (("lat",), [0, 1])}) conv = convert_calendar(ds_src, "360_day", align_on="date") assert conv.time.dt.calendar == "360_day" assert_identical(ds_src.notime, conv.notime) @pytest.mark.parametrize( "source,target,freq", [ ("standard", "360_day", "D"), ("360_day", "proleptic_gregorian", "D"), ("proleptic_gregorian", "360_day", "4h"), ], ) @pytest.mark.parametrize("align_on", ["date", "year"]) def test_convert_calendar_360_days(source, target, freq, align_on): src = DataArray( date_range("2004-01-01", "2004-12-30", freq=freq, calendar=source), dims=("time",), name="time", ) da_src = DataArray( np.linspace(0, 1, src.size), dims=("time",), coords={"time": src} ) conv = convert_calendar(da_src, target, align_on=align_on) assert conv.time.dt.calendar == target if align_on == "date": np.testing.assert_array_equal( conv.time.resample(time="ME").last().dt.day, [30, 29, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30], ) elif target == "360_day": np.testing.assert_array_equal( conv.time.resample(time="ME").last().dt.day, [30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 29], ) else: np.testing.assert_array_equal( conv.time.resample(time="ME").last().dt.day, [30, 29, 30, 30, 31, 30, 30, 31, 30, 31, 29, 31], ) if source == "360_day" and align_on == "year": assert conv.size == 360 if freq == "D" else 360 * 4 else: assert conv.size == 359 if freq == "D" else 359 * 4 def test_convert_calendar_360_days_random(): da_std = DataArray( np.linspace(0, 1, 366), dims=("time",), coords={ "time": date_range( "2004-01-01", "2004-12-31", freq="D", calendar="standard", use_cftime=False, ) }, ) da_360 = DataArray( np.linspace(0, 1, 360), dims=("time",), coords={ "time": date_range("2004-01-01", "2004-12-30", freq="D", calendar="360_day") }, ) conv = convert_calendar(da_std, "360_day", align_on="random") conv2 = convert_calendar(da_std, "360_day", align_on="random") assert (conv != conv2).any() conv = convert_calendar(da_360, "standard", use_cftime=False, align_on="random") assert np.datetime64("2004-02-29") not in conv.time conv2 = convert_calendar(da_360, "standard", use_cftime=False, align_on="random") assert (conv2 != conv).any() # Ensure that added days are evenly distributed in the 5 fifths of each year conv = convert_calendar(da_360, "noleap", align_on="random", missing=np.nan) conv = conv.where(conv.isnull(), drop=True) nandoys = conv.time.dt.dayofyear[:366] assert all(nandoys < np.array([74, 147, 220, 293, 366])) assert all(nandoys > np.array([0, 73, 146, 219, 292])) @requires_cftime @pytest.mark.parametrize( "source,target,freq", [ ("standard", "noleap", "D"), ("noleap", "proleptic_gregorian", "4h"), ("noleap", "all_leap", "ME"), ("360_day", "noleap", "D"), ("noleap", "360_day", "D"), ], ) def test_convert_calendar_missing(source, target, freq): src = DataArray( date_range( "2004-01-01", "2004-12-31" if source != "360_day" else "2004-12-30", freq=freq, calendar=source, ), dims=("time",), name="time", ) da_src = DataArray( np.linspace(0, 1, src.size), dims=("time",), coords={"time": src} ) out = convert_calendar(da_src, target, missing=np.nan, align_on="date") expected_freq = freq assert infer_freq(out.time) == expected_freq expected = date_range( "2004-01-01", "2004-12-31" if target != "360_day" else "2004-12-30", freq=freq, calendar=target, ) np.testing.assert_array_equal(out.time, expected) if freq != "ME": out_without_missing = convert_calendar(da_src, target, align_on="date") expected_nan = out.isel(time=~out.time.isin(out_without_missing.time)) assert expected_nan.isnull().all() expected_not_nan = out.sel(time=out_without_missing.time) assert_identical(expected_not_nan, out_without_missing) @requires_cftime def test_convert_calendar_errors(): src_nl = DataArray( date_range("0000-01-01", "0000-12-31", freq="D", calendar="noleap"), dims=("time",), name="time", ) # no align_on for conversion to 360_day with pytest.raises(ValueError, match="Argument `align_on` must be specified"): convert_calendar(src_nl, "360_day") # Standard doesn't support year 0 with pytest.raises( ValueError, match="Source time coordinate contains dates with year 0" ): convert_calendar(src_nl, "standard") # no align_on for conversion from 360 day src_360 = convert_calendar(src_nl, "360_day", align_on="year") with pytest.raises(ValueError, match="Argument `align_on` must be specified"): convert_calendar(src_360, "noleap") # Datetime objects da = DataArray([0, 1, 2], dims=("x",), name="x") with pytest.raises(ValueError, match="Coordinate x must contain datetime objects."): convert_calendar(da, "standard", dim="x") def test_convert_calendar_dimension_name(): src = DataArray( date_range("2004-01-01", "2004-01-31", freq="D", calendar="noleap"), dims=("date",), name="date", ) out = convert_calendar(src, "proleptic_gregorian", dim="date") np.testing.assert_array_equal(src, out) def test_convert_calendar_same_calendar(): src = DataArray( date_range("2000-01-01", periods=12, freq="6h", use_cftime=False), dims=("time",), name="time", ) out = convert_calendar(src, "proleptic_gregorian") assert src is out @pytest.mark.parametrize( "source,target", [ ("standard", "noleap"), ("noleap", "proleptic_gregorian"), ("standard", "360_day"), ("360_day", "proleptic_gregorian"), ("noleap", "all_leap"), ("360_day", "noleap"), ], ) def test_interp_calendar(source, target): src = DataArray( date_range("2004-01-01", "2004-07-30", freq="D", calendar=source), dims=("time",), name="time", ) tgt = DataArray( date_range("2004-01-01", "2004-07-30", freq="D", calendar=target), dims=("time",), name="time", ) da_src = DataArray( np.linspace(0, 1, src.size), dims=("time",), coords={"time": src} ) conv = interp_calendar(da_src, tgt) assert_identical(tgt.time, conv.time) np.testing.assert_almost_equal(conv.max(), 1, 2) assert conv.min() == 0 @requires_cftime def test_interp_calendar_errors(): src_nl = DataArray( [1] * 100, dims=("time",), coords={ "time": date_range("0000-01-01", periods=100, freq="MS", calendar="noleap") }, ) tgt_360 = date_range("0001-01-01", "0001-12-30", freq="MS", calendar="standard") with pytest.raises( ValueError, match="Source time coordinate contains dates with year 0" ): interp_calendar(src_nl, tgt_360) da1 = DataArray([0, 1, 2], dims=("x",), name="x") da2 = da1 + 1 with pytest.raises( ValueError, match="Both 'source.x' and 'target' must contain datetime objects." ): interp_calendar(da1, da2, dim="x") @requires_cftime @pytest.mark.parametrize( ("source_calendar", "target_calendar", "expected_index"), [("standard", "noleap", CFTimeIndex), ("all_leap", "standard", pd.DatetimeIndex)], ) def test_convert_calendar_produces_time_index( source_calendar, target_calendar, expected_index ): # https://github.com/pydata/xarray/issues/9138 time = date_range("2000-01-01", "2002-01-01", freq="D", calendar=source_calendar) temperature = np.ones(len(time)) da = DataArray( data=temperature, dims=["time"], coords=dict( time=time, ), ) converted = da.convert_calendar(target_calendar) assert isinstance(converted.indexes["time"], expected_index) xarray-2025.09.0/xarray/tests/test_cftime_offsets.py000066400000000000000000001540201505620616400224270ustar00rootroot00000000000000from __future__ import annotations import warnings from itertools import product, starmap from typing import TYPE_CHECKING, Literal import numpy as np import pandas as pd import pytest from xarray import CFTimeIndex from xarray.coding.cftime_offsets import ( _MONTH_ABBREVIATIONS, BaseCFTimeOffset, Day, Hour, Microsecond, Millisecond, Minute, MonthBegin, MonthEnd, QuarterBegin, QuarterEnd, Second, Tick, YearBegin, YearEnd, _legacy_to_new_freq, _new_to_legacy_freq, cftime_range, date_range, date_range_like, get_date_type, to_cftime_datetime, to_offset, ) from xarray.coding.frequencies import infer_freq from xarray.core.dataarray import DataArray from xarray.tests import ( _CFTIME_CALENDARS, assert_no_warnings, has_cftime, has_pandas_ge_2_2, requires_cftime, requires_pandas_3, ) cftime = pytest.importorskip("cftime") def _id_func(param): """Called on each parameter passed to pytest.mark.parametrize""" return str(param) @pytest.fixture(params=_CFTIME_CALENDARS) def calendar(request): return request.param @pytest.mark.parametrize( ("offset", "expected_n"), [ (BaseCFTimeOffset(), 1), (YearBegin(), 1), (YearEnd(), 1), (QuarterBegin(), 1), (QuarterEnd(), 1), (Tick(), 1), (Day(), 1), (Hour(), 1), (Minute(), 1), (Second(), 1), (Millisecond(), 1), (Microsecond(), 1), (BaseCFTimeOffset(n=2), 2), (YearBegin(n=2), 2), (YearEnd(n=2), 2), (QuarterBegin(n=2), 2), (QuarterEnd(n=2), 2), (Tick(n=2), 2), (Day(n=2), 2), (Hour(n=2), 2), (Minute(n=2), 2), (Second(n=2), 2), (Millisecond(n=2), 2), (Microsecond(n=2), 2), ], ids=_id_func, ) def test_cftime_offset_constructor_valid_n(offset, expected_n): assert offset.n == expected_n @pytest.mark.parametrize( ("offset", "invalid_n"), [ (BaseCFTimeOffset, 1.5), (YearBegin, 1.5), (YearEnd, 1.5), (QuarterBegin, 1.5), (QuarterEnd, 1.5), (MonthBegin, 1.5), (MonthEnd, 1.5), (Tick, 1.5), (Day, 1.5), (Hour, 1.5), (Minute, 1.5), (Second, 1.5), (Millisecond, 1.5), (Microsecond, 1.5), ], ids=_id_func, ) def test_cftime_offset_constructor_invalid_n(offset, invalid_n): with pytest.raises(TypeError): offset(n=invalid_n) @pytest.mark.parametrize( ("offset", "expected_month"), [ (YearBegin(), 1), (YearEnd(), 12), (YearBegin(month=5), 5), (YearEnd(month=5), 5), (QuarterBegin(), 3), (QuarterEnd(), 3), (QuarterBegin(month=5), 5), (QuarterEnd(month=5), 5), ], ids=_id_func, ) def test_year_offset_constructor_valid_month(offset, expected_month): assert offset.month == expected_month @pytest.mark.parametrize( ("offset", "invalid_month", "exception"), [ (YearBegin, 0, ValueError), (YearEnd, 0, ValueError), (YearBegin, 13, ValueError), (YearEnd, 13, ValueError), (YearBegin, 1.5, TypeError), (YearEnd, 1.5, TypeError), (QuarterBegin, 0, ValueError), (QuarterEnd, 0, ValueError), (QuarterBegin, 1.5, TypeError), (QuarterEnd, 1.5, TypeError), (QuarterBegin, 13, ValueError), (QuarterEnd, 13, ValueError), ], ids=_id_func, ) def test_year_offset_constructor_invalid_month(offset, invalid_month, exception): with pytest.raises(exception): offset(month=invalid_month) @pytest.mark.parametrize( ("offset", "expected"), [ (BaseCFTimeOffset(), None), (MonthBegin(), "MS"), (MonthEnd(), "ME"), (YearBegin(), "YS-JAN"), (YearEnd(), "YE-DEC"), (QuarterBegin(), "QS-MAR"), (QuarterEnd(), "QE-MAR"), (Day(), "D"), (Hour(), "h"), (Minute(), "min"), (Second(), "s"), (Millisecond(), "ms"), (Microsecond(), "us"), ], ids=_id_func, ) def test_rule_code(offset, expected): assert offset.rule_code() == expected @pytest.mark.parametrize( ("offset", "expected"), [ (BaseCFTimeOffset(), ""), (YearBegin(), ""), (QuarterBegin(), ""), ], ids=_id_func, ) def test_str_and_repr(offset, expected): assert str(offset) == expected assert repr(offset) == expected @pytest.mark.parametrize( "offset", [BaseCFTimeOffset(), MonthBegin(), QuarterBegin(), YearBegin()], ids=_id_func, ) def test_to_offset_offset_input(offset): assert to_offset(offset) == offset @pytest.mark.parametrize( ("freq", "expected"), [ ("M", MonthEnd()), ("2M", MonthEnd(n=2)), ("ME", MonthEnd()), ("2ME", MonthEnd(n=2)), ("MS", MonthBegin()), ("2MS", MonthBegin(n=2)), ("D", Day()), ("2D", Day(n=2)), ("H", Hour()), ("2H", Hour(n=2)), ("h", Hour()), ("2h", Hour(n=2)), ("T", Minute()), ("2T", Minute(n=2)), ("min", Minute()), ("2min", Minute(n=2)), ("S", Second()), ("2S", Second(n=2)), ("L", Millisecond(n=1)), ("2L", Millisecond(n=2)), ("ms", Millisecond(n=1)), ("2ms", Millisecond(n=2)), ("U", Microsecond(n=1)), ("2U", Microsecond(n=2)), ("us", Microsecond(n=1)), ("2us", Microsecond(n=2)), # negative ("-2M", MonthEnd(n=-2)), ("-2ME", MonthEnd(n=-2)), ("-2MS", MonthBegin(n=-2)), ("-2D", Day(n=-2)), ("-2H", Hour(n=-2)), ("-2h", Hour(n=-2)), ("-2T", Minute(n=-2)), ("-2min", Minute(n=-2)), ("-2S", Second(n=-2)), ("-2L", Millisecond(n=-2)), ("-2ms", Millisecond(n=-2)), ("-2U", Microsecond(n=-2)), ("-2us", Microsecond(n=-2)), ], ids=_id_func, ) @pytest.mark.filterwarnings("ignore::FutureWarning") # Deprecation of "M" etc. def test_to_offset_sub_annual(freq, expected): assert to_offset(freq) == expected _ANNUAL_OFFSET_TYPES = { "A": YearEnd, "AS": YearBegin, "Y": YearEnd, "YS": YearBegin, "YE": YearEnd, } @pytest.mark.parametrize( ("month_int", "month_label"), list(_MONTH_ABBREVIATIONS.items()) + [(0, "")] ) @pytest.mark.parametrize("multiple", [None, 2, -1]) @pytest.mark.parametrize("offset_str", ["AS", "A", "YS", "Y"]) @pytest.mark.filterwarnings("ignore::FutureWarning") # Deprecation of "A" etc. def test_to_offset_annual(month_label, month_int, multiple, offset_str): freq = offset_str offset_type = _ANNUAL_OFFSET_TYPES[offset_str] if month_label: freq = f"{freq}-{month_label}" if multiple: freq = f"{multiple}{freq}" result = to_offset(freq) if multiple and month_int: expected = offset_type(n=multiple, month=month_int) elif multiple: expected = offset_type(n=multiple) elif month_int: expected = offset_type(month=month_int) else: expected = offset_type() assert result == expected _QUARTER_OFFSET_TYPES = {"Q": QuarterEnd, "QS": QuarterBegin, "QE": QuarterEnd} @pytest.mark.parametrize( ("month_int", "month_label"), list(_MONTH_ABBREVIATIONS.items()) + [(0, "")] ) @pytest.mark.parametrize("multiple", [None, 2, -1]) @pytest.mark.parametrize("offset_str", ["QS", "Q", "QE"]) @pytest.mark.filterwarnings("ignore::FutureWarning") # Deprecation of "Q" etc. def test_to_offset_quarter(month_label, month_int, multiple, offset_str): freq = offset_str offset_type = _QUARTER_OFFSET_TYPES[offset_str] if month_label: freq = f"{freq}-{month_label}" if multiple: freq = f"{multiple}{freq}" result = to_offset(freq) if multiple and month_int: expected = offset_type(n=multiple, month=month_int) elif multiple: if month_int: expected = offset_type(n=multiple) elif offset_type == QuarterBegin: expected = offset_type(n=multiple, month=1) elif offset_type == QuarterEnd: expected = offset_type(n=multiple, month=12) elif month_int: expected = offset_type(month=month_int) elif offset_type == QuarterBegin: expected = offset_type(month=1) elif offset_type == QuarterEnd: expected = offset_type(month=12) assert result == expected @pytest.mark.parametrize("freq", ["Z", "7min2", "AM", "M-", "AS-", "QS-", "1H1min"]) def test_invalid_to_offset_str(freq): with pytest.raises(ValueError): to_offset(freq) @pytest.mark.parametrize( ("argument", "expected_date_args"), [("2000-01-01", (2000, 1, 1)), ((2000, 1, 1), (2000, 1, 1))], ids=_id_func, ) def test_to_cftime_datetime(calendar, argument, expected_date_args): date_type = get_date_type(calendar) expected = date_type(*expected_date_args) if isinstance(argument, tuple): argument = date_type(*argument) result = to_cftime_datetime(argument, calendar=calendar) assert result == expected def test_to_cftime_datetime_error_no_calendar(): with pytest.raises(ValueError): to_cftime_datetime("2000") def test_to_cftime_datetime_error_type_error(): with pytest.raises(TypeError): to_cftime_datetime(1) _EQ_TESTS_A = [ BaseCFTimeOffset(), YearBegin(), YearEnd(), YearBegin(month=2), YearEnd(month=2), QuarterBegin(), QuarterEnd(), QuarterBegin(month=2), QuarterEnd(month=2), MonthBegin(), MonthEnd(), Day(), Hour(), Minute(), Second(), Millisecond(), Microsecond(), ] _EQ_TESTS_B = [ BaseCFTimeOffset(n=2), YearBegin(n=2), YearEnd(n=2), YearBegin(n=2, month=2), YearEnd(n=2, month=2), QuarterBegin(n=2), QuarterEnd(n=2), QuarterBegin(n=2, month=2), QuarterEnd(n=2, month=2), MonthBegin(n=2), MonthEnd(n=2), Day(n=2), Hour(n=2), Minute(n=2), Second(n=2), Millisecond(n=2), Microsecond(n=2), ] @pytest.mark.parametrize(("a", "b"), product(_EQ_TESTS_A, _EQ_TESTS_B), ids=_id_func) def test_neq(a, b): assert a != b _EQ_TESTS_B_COPY = [ BaseCFTimeOffset(n=2), YearBegin(n=2), YearEnd(n=2), YearBegin(n=2, month=2), YearEnd(n=2, month=2), QuarterBegin(n=2), QuarterEnd(n=2), QuarterBegin(n=2, month=2), QuarterEnd(n=2, month=2), MonthBegin(n=2), MonthEnd(n=2), Day(n=2), Hour(n=2), Minute(n=2), Second(n=2), Millisecond(n=2), Microsecond(n=2), ] @pytest.mark.parametrize( ("a", "b"), zip(_EQ_TESTS_B, _EQ_TESTS_B_COPY, strict=True), ids=_id_func ) def test_eq(a, b): assert a == b _MUL_TESTS = [ (BaseCFTimeOffset(), 3, BaseCFTimeOffset(n=3)), (BaseCFTimeOffset(), -3, BaseCFTimeOffset(n=-3)), (YearEnd(), 3, YearEnd(n=3)), (YearBegin(), 3, YearBegin(n=3)), (QuarterEnd(), 3, QuarterEnd(n=3)), (QuarterBegin(), 3, QuarterBegin(n=3)), (MonthEnd(), 3, MonthEnd(n=3)), (MonthBegin(), 3, MonthBegin(n=3)), (Tick(), 3, Tick(n=3)), (Day(), 3, Day(n=3)), (Hour(), 3, Hour(n=3)), (Minute(), 3, Minute(n=3)), (Second(), 3, Second(n=3)), (Millisecond(), 3, Millisecond(n=3)), (Microsecond(), 3, Microsecond(n=3)), (Hour(), 0.5, Minute(n=30)), (Hour(), -0.5, Minute(n=-30)), (Minute(), 0.5, Second(n=30)), (Second(), 0.5, Millisecond(n=500)), (Millisecond(), 0.5, Microsecond(n=500)), ] @pytest.mark.parametrize(("offset", "multiple", "expected"), _MUL_TESTS, ids=_id_func) def test_mul(offset, multiple, expected): assert offset * multiple == expected @pytest.mark.parametrize(("offset", "multiple", "expected"), _MUL_TESTS, ids=_id_func) def test_rmul(offset, multiple, expected): assert multiple * offset == expected def test_mul_float_multiple_next_higher_resolution(): """Test more than one iteration through _next_higher_resolution is required.""" assert 1e-6 * Second() == Microsecond() assert 1e-6 / 60 * Minute() == Microsecond() @pytest.mark.parametrize( "offset", [ YearBegin(), YearEnd(), QuarterBegin(), QuarterEnd(), MonthBegin(), MonthEnd(), Day(), ], ids=_id_func, ) def test_nonTick_offset_multiplied_float_error(offset): """Test that the appropriate error is raised if a non-Tick offset is multiplied by a float.""" with pytest.raises(TypeError, match="unsupported operand type"): offset * 0.5 def test_Microsecond_multiplied_float_error(): """Test that the appropriate error is raised if a Tick offset is multiplied by a float which causes it not to be representable by a microsecond-precision timedelta.""" with pytest.raises( ValueError, match="Could not convert to integer offset at any resolution" ): Microsecond() * 0.5 @pytest.mark.parametrize( ("offset", "expected"), [ (BaseCFTimeOffset(), BaseCFTimeOffset(n=-1)), (YearEnd(), YearEnd(n=-1)), (YearBegin(), YearBegin(n=-1)), (QuarterEnd(), QuarterEnd(n=-1)), (QuarterBegin(), QuarterBegin(n=-1)), (MonthEnd(), MonthEnd(n=-1)), (MonthBegin(), MonthBegin(n=-1)), (Day(), Day(n=-1)), (Hour(), Hour(n=-1)), (Minute(), Minute(n=-1)), (Second(), Second(n=-1)), (Millisecond(), Millisecond(n=-1)), (Microsecond(), Microsecond(n=-1)), ], ids=_id_func, ) def test_neg(offset: BaseCFTimeOffset, expected: BaseCFTimeOffset) -> None: assert -offset == expected _ADD_TESTS = [ (Day(n=2), (1, 1, 3)), (Hour(n=2), (1, 1, 1, 2)), (Minute(n=2), (1, 1, 1, 0, 2)), (Second(n=2), (1, 1, 1, 0, 0, 2)), (Millisecond(n=2), (1, 1, 1, 0, 0, 0, 2000)), (Microsecond(n=2), (1, 1, 1, 0, 0, 0, 2)), ] @pytest.mark.parametrize(("offset", "expected_date_args"), _ADD_TESTS, ids=_id_func) def test_add_sub_monthly(offset, expected_date_args, calendar): date_type = get_date_type(calendar) initial = date_type(1, 1, 1) expected = date_type(*expected_date_args) result = offset + initial assert result == expected def test_add_daily_offsets() -> None: offset = Day(n=2) expected = Day(n=4) result = offset + offset assert result == expected def test_subtract_daily_offsets() -> None: offset = Day(n=2) expected = Day(n=0) result = offset - offset assert result == expected @pytest.mark.parametrize(("offset", "expected_date_args"), _ADD_TESTS, ids=_id_func) def test_radd_sub_monthly(offset, expected_date_args, calendar): date_type = get_date_type(calendar) initial = date_type(1, 1, 1) expected = date_type(*expected_date_args) result = initial + offset assert result == expected @pytest.mark.parametrize( ("offset", "expected_date_args"), [ (Day(n=2), (1, 1, 1)), (Hour(n=2), (1, 1, 2, 22)), (Minute(n=2), (1, 1, 2, 23, 58)), (Second(n=2), (1, 1, 2, 23, 59, 58)), (Millisecond(n=2), (1, 1, 2, 23, 59, 59, 998000)), (Microsecond(n=2), (1, 1, 2, 23, 59, 59, 999998)), ], ids=_id_func, ) def test_rsub_sub_monthly(offset, expected_date_args, calendar): date_type = get_date_type(calendar) initial = date_type(1, 1, 3) expected = date_type(*expected_date_args) result = initial - offset assert result == expected @pytest.mark.parametrize("offset", _EQ_TESTS_A, ids=_id_func) def test_sub_error(offset, calendar): date_type = get_date_type(calendar) initial = date_type(1, 1, 1) with pytest.raises(TypeError): offset - initial @pytest.mark.parametrize( ("a", "b"), zip(_EQ_TESTS_A, _EQ_TESTS_B, strict=True), ids=_id_func ) def test_minus_offset(a, b): result = b - a expected = a assert result == expected @pytest.mark.parametrize( ("a", "b"), list(zip(np.roll(_EQ_TESTS_A, 1), _EQ_TESTS_B, strict=True)) # type: ignore[arg-type] + [(YearEnd(month=1), YearEnd(month=2))], ids=_id_func, ) def test_minus_offset_error(a, b): with pytest.raises(TypeError): b - a @pytest.mark.parametrize( ("initial_date_args", "offset", "expected_date_args"), [ ((1, 1, 1), MonthBegin(), (1, 2, 1)), ((1, 1, 1), MonthBegin(n=2), (1, 3, 1)), ((1, 1, 7), MonthBegin(), (1, 2, 1)), ((1, 1, 7), MonthBegin(n=2), (1, 3, 1)), ((1, 3, 1), MonthBegin(n=-1), (1, 2, 1)), ((1, 3, 1), MonthBegin(n=-2), (1, 1, 1)), ((1, 3, 3), MonthBegin(n=-1), (1, 3, 1)), ((1, 3, 3), MonthBegin(n=-2), (1, 2, 1)), ((1, 2, 1), MonthBegin(n=14), (2, 4, 1)), ((2, 4, 1), MonthBegin(n=-14), (1, 2, 1)), ((1, 1, 1, 5, 5, 5, 5), MonthBegin(), (1, 2, 1, 5, 5, 5, 5)), ((1, 1, 3, 5, 5, 5, 5), MonthBegin(), (1, 2, 1, 5, 5, 5, 5)), ((1, 1, 3, 5, 5, 5, 5), MonthBegin(n=-1), (1, 1, 1, 5, 5, 5, 5)), ], ids=_id_func, ) def test_add_month_begin(calendar, initial_date_args, offset, expected_date_args): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) result = initial + offset expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ("initial_date_args", "offset", "expected_year_month", "expected_sub_day"), [ ((1, 1, 1), MonthEnd(), (1, 1), ()), ((1, 1, 1), MonthEnd(n=2), (1, 2), ()), ((1, 3, 1), MonthEnd(n=-1), (1, 2), ()), ((1, 3, 1), MonthEnd(n=-2), (1, 1), ()), ((1, 2, 1), MonthEnd(n=14), (2, 3), ()), ((2, 4, 1), MonthEnd(n=-14), (1, 2), ()), ((1, 1, 1, 5, 5, 5, 5), MonthEnd(), (1, 1), (5, 5, 5, 5)), ((1, 2, 1, 5, 5, 5, 5), MonthEnd(n=-1), (1, 1), (5, 5, 5, 5)), ], ids=_id_func, ) def test_add_month_end( calendar, initial_date_args, offset, expected_year_month, expected_sub_day ): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) result = initial + offset reference_args = expected_year_month + (1,) reference = date_type(*reference_args) # Here the days at the end of each month varies based on the calendar used expected_date_args = ( expected_year_month + (reference.daysinmonth,) + expected_sub_day ) expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ( "initial_year_month", "initial_sub_day", "offset", "expected_year_month", "expected_sub_day", ), [ ((1, 1), (), MonthEnd(), (1, 2), ()), ((1, 1), (), MonthEnd(n=2), (1, 3), ()), ((1, 3), (), MonthEnd(n=-1), (1, 2), ()), ((1, 3), (), MonthEnd(n=-2), (1, 1), ()), ((1, 2), (), MonthEnd(n=14), (2, 4), ()), ((2, 4), (), MonthEnd(n=-14), (1, 2), ()), ((1, 1), (5, 5, 5, 5), MonthEnd(), (1, 2), (5, 5, 5, 5)), ((1, 2), (5, 5, 5, 5), MonthEnd(n=-1), (1, 1), (5, 5, 5, 5)), ], ids=_id_func, ) def test_add_month_end_onOffset( calendar, initial_year_month, initial_sub_day, offset, expected_year_month, expected_sub_day, ): date_type = get_date_type(calendar) reference_args = initial_year_month + (1,) reference = date_type(*reference_args) initial_date_args = initial_year_month + (reference.daysinmonth,) + initial_sub_day initial = date_type(*initial_date_args) result = initial + offset reference_args = expected_year_month + (1,) reference = date_type(*reference_args) # Here the days at the end of each month varies based on the calendar used expected_date_args = ( expected_year_month + (reference.daysinmonth,) + expected_sub_day ) expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ("initial_date_args", "offset", "expected_date_args"), [ ((1, 1, 1), YearBegin(), (2, 1, 1)), ((1, 1, 1), YearBegin(n=2), (3, 1, 1)), ((1, 1, 1), YearBegin(month=2), (1, 2, 1)), ((1, 1, 7), YearBegin(n=2), (3, 1, 1)), ((2, 2, 1), YearBegin(n=-1), (2, 1, 1)), ((1, 1, 2), YearBegin(n=-1), (1, 1, 1)), ((1, 1, 1, 5, 5, 5, 5), YearBegin(), (2, 1, 1, 5, 5, 5, 5)), ((2, 1, 1, 5, 5, 5, 5), YearBegin(n=-1), (1, 1, 1, 5, 5, 5, 5)), ], ids=_id_func, ) def test_add_year_begin(calendar, initial_date_args, offset, expected_date_args): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) result = initial + offset expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ("initial_date_args", "offset", "expected_year_month", "expected_sub_day"), [ ((1, 1, 1), YearEnd(), (1, 12), ()), ((1, 1, 1), YearEnd(n=2), (2, 12), ()), ((1, 1, 1), YearEnd(month=1), (1, 1), ()), ((2, 3, 1), YearEnd(n=-1), (1, 12), ()), ((1, 3, 1), YearEnd(n=-1, month=2), (1, 2), ()), ((1, 1, 1, 5, 5, 5, 5), YearEnd(), (1, 12), (5, 5, 5, 5)), ((1, 1, 1, 5, 5, 5, 5), YearEnd(n=2), (2, 12), (5, 5, 5, 5)), ], ids=_id_func, ) def test_add_year_end( calendar, initial_date_args, offset, expected_year_month, expected_sub_day ): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) result = initial + offset reference_args = expected_year_month + (1,) reference = date_type(*reference_args) # Here the days at the end of each month varies based on the calendar used expected_date_args = ( expected_year_month + (reference.daysinmonth,) + expected_sub_day ) expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ( "initial_year_month", "initial_sub_day", "offset", "expected_year_month", "expected_sub_day", ), [ ((1, 12), (), YearEnd(), (2, 12), ()), ((1, 12), (), YearEnd(n=2), (3, 12), ()), ((2, 12), (), YearEnd(n=-1), (1, 12), ()), ((3, 12), (), YearEnd(n=-2), (1, 12), ()), ((1, 1), (), YearEnd(month=2), (1, 2), ()), ((1, 12), (5, 5, 5, 5), YearEnd(), (2, 12), (5, 5, 5, 5)), ((2, 12), (5, 5, 5, 5), YearEnd(n=-1), (1, 12), (5, 5, 5, 5)), ], ids=_id_func, ) def test_add_year_end_onOffset( calendar, initial_year_month, initial_sub_day, offset, expected_year_month, expected_sub_day, ): date_type = get_date_type(calendar) reference_args = initial_year_month + (1,) reference = date_type(*reference_args) initial_date_args = initial_year_month + (reference.daysinmonth,) + initial_sub_day initial = date_type(*initial_date_args) result = initial + offset reference_args = expected_year_month + (1,) reference = date_type(*reference_args) # Here the days at the end of each month varies based on the calendar used expected_date_args = ( expected_year_month + (reference.daysinmonth,) + expected_sub_day ) expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ("initial_date_args", "offset", "expected_date_args"), [ ((1, 1, 1), QuarterBegin(), (1, 3, 1)), ((1, 1, 1), QuarterBegin(n=2), (1, 6, 1)), ((1, 1, 1), QuarterBegin(month=2), (1, 2, 1)), ((1, 1, 7), QuarterBegin(n=2), (1, 6, 1)), ((2, 2, 1), QuarterBegin(n=-1), (1, 12, 1)), ((1, 3, 2), QuarterBegin(n=-1), (1, 3, 1)), ((1, 1, 1, 5, 5, 5, 5), QuarterBegin(), (1, 3, 1, 5, 5, 5, 5)), ((2, 1, 1, 5, 5, 5, 5), QuarterBegin(n=-1), (1, 12, 1, 5, 5, 5, 5)), ], ids=_id_func, ) def test_add_quarter_begin(calendar, initial_date_args, offset, expected_date_args): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) result = initial + offset expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ("initial_date_args", "offset", "expected_year_month", "expected_sub_day"), [ ((1, 1, 1), QuarterEnd(), (1, 3), ()), ((1, 1, 1), QuarterEnd(n=2), (1, 6), ()), ((1, 1, 1), QuarterEnd(month=1), (1, 1), ()), ((2, 3, 1), QuarterEnd(n=-1), (1, 12), ()), ((1, 3, 1), QuarterEnd(n=-1, month=2), (1, 2), ()), ((1, 1, 1, 5, 5, 5, 5), QuarterEnd(), (1, 3), (5, 5, 5, 5)), ((1, 1, 1, 5, 5, 5, 5), QuarterEnd(n=2), (1, 6), (5, 5, 5, 5)), ], ids=_id_func, ) def test_add_quarter_end( calendar, initial_date_args, offset, expected_year_month, expected_sub_day ): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) result = initial + offset reference_args = expected_year_month + (1,) reference = date_type(*reference_args) # Here the days at the end of each month varies based on the calendar used expected_date_args = ( expected_year_month + (reference.daysinmonth,) + expected_sub_day ) expected = date_type(*expected_date_args) assert result == expected @pytest.mark.parametrize( ( "initial_year_month", "initial_sub_day", "offset", "expected_year_month", "expected_sub_day", ), [ ((1, 12), (), QuarterEnd(), (2, 3), ()), ((1, 12), (), QuarterEnd(n=2), (2, 6), ()), ((1, 12), (), QuarterEnd(n=-1), (1, 9), ()), ((1, 12), (), QuarterEnd(n=-2), (1, 6), ()), ((1, 1), (), QuarterEnd(month=2), (1, 2), ()), ((1, 12), (5, 5, 5, 5), QuarterEnd(), (2, 3), (5, 5, 5, 5)), ((1, 12), (5, 5, 5, 5), QuarterEnd(n=-1), (1, 9), (5, 5, 5, 5)), ], ids=_id_func, ) def test_add_quarter_end_onOffset( calendar, initial_year_month, initial_sub_day, offset, expected_year_month, expected_sub_day, ): date_type = get_date_type(calendar) reference_args = initial_year_month + (1,) reference = date_type(*reference_args) initial_date_args = initial_year_month + (reference.daysinmonth,) + initial_sub_day initial = date_type(*initial_date_args) result = initial + offset reference_args = expected_year_month + (1,) reference = date_type(*reference_args) # Here the days at the end of each month varies based on the calendar used expected_date_args = ( expected_year_month + (reference.daysinmonth,) + expected_sub_day ) expected = date_type(*expected_date_args) assert result == expected # Note for all sub-monthly offsets, pandas always returns True for onOffset @pytest.mark.parametrize( ("date_args", "offset", "expected"), [ ((1, 1, 1), MonthBegin(), True), ((1, 1, 1, 1), MonthBegin(), True), ((1, 1, 5), MonthBegin(), False), ((1, 1, 5), MonthEnd(), False), ((1, 3, 1), QuarterBegin(), True), ((1, 3, 1, 1), QuarterBegin(), True), ((1, 3, 5), QuarterBegin(), False), ((1, 12, 1), QuarterEnd(), False), ((1, 1, 1), YearBegin(), True), ((1, 1, 1, 1), YearBegin(), True), ((1, 1, 5), YearBegin(), False), ((1, 12, 1), YearEnd(), False), ((1, 1, 1), Day(), True), ((1, 1, 1, 1), Day(), True), ((1, 1, 1), Hour(), True), ((1, 1, 1), Minute(), True), ((1, 1, 1), Second(), True), ((1, 1, 1), Millisecond(), True), ((1, 1, 1), Microsecond(), True), ], ids=_id_func, ) def test_onOffset(calendar, date_args, offset, expected): date_type = get_date_type(calendar) date = date_type(*date_args) result = offset.onOffset(date) assert result == expected @pytest.mark.parametrize( ("year_month_args", "sub_day_args", "offset"), [ ((1, 1), (), MonthEnd()), ((1, 1), (1,), MonthEnd()), ((1, 12), (), QuarterEnd()), ((1, 1), (), QuarterEnd(month=1)), ((1, 12), (), YearEnd()), ((1, 1), (), YearEnd(month=1)), ], ids=_id_func, ) def test_onOffset_month_or_quarter_or_year_end( calendar, year_month_args, sub_day_args, offset ): date_type = get_date_type(calendar) reference_args = year_month_args + (1,) reference = date_type(*reference_args) date_args = year_month_args + (reference.daysinmonth,) + sub_day_args date = date_type(*date_args) result = offset.onOffset(date) assert result @pytest.mark.parametrize( ("offset", "initial_date_args", "partial_expected_date_args"), [ (YearBegin(), (1, 3, 1), (2, 1)), (YearBegin(), (1, 1, 1), (1, 1)), (YearBegin(n=2), (1, 3, 1), (2, 1)), (YearBegin(n=2, month=2), (1, 3, 1), (2, 2)), (YearEnd(), (1, 3, 1), (1, 12)), (YearEnd(n=2), (1, 3, 1), (1, 12)), (YearEnd(n=2, month=2), (1, 3, 1), (2, 2)), (YearEnd(n=2, month=4), (1, 4, 30), (1, 4)), (QuarterBegin(), (1, 3, 2), (1, 6)), (QuarterBegin(), (1, 4, 1), (1, 6)), (QuarterBegin(n=2), (1, 4, 1), (1, 6)), (QuarterBegin(n=2, month=2), (1, 4, 1), (1, 5)), (QuarterEnd(), (1, 3, 1), (1, 3)), (QuarterEnd(n=2), (1, 3, 1), (1, 3)), (QuarterEnd(n=2, month=2), (1, 3, 1), (1, 5)), (QuarterEnd(n=2, month=4), (1, 4, 30), (1, 4)), (MonthBegin(), (1, 3, 2), (1, 4)), (MonthBegin(), (1, 3, 1), (1, 3)), (MonthBegin(n=2), (1, 3, 2), (1, 4)), (MonthEnd(), (1, 3, 2), (1, 3)), (MonthEnd(), (1, 4, 30), (1, 4)), (MonthEnd(n=2), (1, 3, 2), (1, 3)), (Day(), (1, 3, 2, 1), (1, 3, 2, 1)), (Hour(), (1, 3, 2, 1, 1), (1, 3, 2, 1, 1)), (Minute(), (1, 3, 2, 1, 1, 1), (1, 3, 2, 1, 1, 1)), (Second(), (1, 3, 2, 1, 1, 1, 1), (1, 3, 2, 1, 1, 1, 1)), (Millisecond(), (1, 3, 2, 1, 1, 1, 1000), (1, 3, 2, 1, 1, 1, 1000)), (Microsecond(), (1, 3, 2, 1, 1, 1, 1), (1, 3, 2, 1, 1, 1, 1)), ], ids=_id_func, ) def test_rollforward(calendar, offset, initial_date_args, partial_expected_date_args): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) if isinstance(offset, MonthBegin | QuarterBegin | YearBegin): expected_date_args = partial_expected_date_args + (1,) elif isinstance(offset, MonthEnd | QuarterEnd | YearEnd): reference_args = partial_expected_date_args + (1,) reference = date_type(*reference_args) expected_date_args = partial_expected_date_args + (reference.daysinmonth,) else: expected_date_args = partial_expected_date_args expected = date_type(*expected_date_args) result = offset.rollforward(initial) assert result == expected @pytest.mark.parametrize( ("offset", "initial_date_args", "partial_expected_date_args"), [ (YearBegin(), (1, 3, 1), (1, 1)), (YearBegin(n=2), (1, 3, 1), (1, 1)), (YearBegin(n=2, month=2), (1, 3, 1), (1, 2)), (YearBegin(), (1, 1, 1), (1, 1)), (YearBegin(n=2, month=2), (1, 2, 1), (1, 2)), (YearEnd(), (2, 3, 1), (1, 12)), (YearEnd(n=2), (2, 3, 1), (1, 12)), (YearEnd(n=2, month=2), (2, 3, 1), (2, 2)), (YearEnd(month=4), (1, 4, 30), (1, 4)), (QuarterBegin(), (1, 3, 2), (1, 3)), (QuarterBegin(), (1, 4, 1), (1, 3)), (QuarterBegin(n=2), (1, 4, 1), (1, 3)), (QuarterBegin(n=2, month=2), (1, 4, 1), (1, 2)), (QuarterEnd(), (2, 3, 1), (1, 12)), (QuarterEnd(n=2), (2, 3, 1), (1, 12)), (QuarterEnd(n=2, month=2), (2, 3, 1), (2, 2)), (QuarterEnd(n=2, month=4), (1, 4, 30), (1, 4)), (MonthBegin(), (1, 3, 2), (1, 3)), (MonthBegin(n=2), (1, 3, 2), (1, 3)), (MonthBegin(), (1, 3, 1), (1, 3)), (MonthEnd(), (1, 3, 2), (1, 2)), (MonthEnd(n=2), (1, 3, 2), (1, 2)), (MonthEnd(), (1, 4, 30), (1, 4)), (Day(), (1, 3, 2, 1), (1, 3, 2, 1)), (Hour(), (1, 3, 2, 1, 1), (1, 3, 2, 1, 1)), (Minute(), (1, 3, 2, 1, 1, 1), (1, 3, 2, 1, 1, 1)), (Second(), (1, 3, 2, 1, 1, 1, 1), (1, 3, 2, 1, 1, 1, 1)), (Millisecond(), (1, 3, 2, 1, 1, 1, 1000), (1, 3, 2, 1, 1, 1, 1000)), (Microsecond(), (1, 3, 2, 1, 1, 1, 1), (1, 3, 2, 1, 1, 1, 1)), ], ids=_id_func, ) def test_rollback(calendar, offset, initial_date_args, partial_expected_date_args): date_type = get_date_type(calendar) initial = date_type(*initial_date_args) if isinstance(offset, MonthBegin | QuarterBegin | YearBegin): expected_date_args = partial_expected_date_args + (1,) elif isinstance(offset, MonthEnd | QuarterEnd | YearEnd): reference_args = partial_expected_date_args + (1,) reference = date_type(*reference_args) expected_date_args = partial_expected_date_args + (reference.daysinmonth,) else: expected_date_args = partial_expected_date_args expected = date_type(*expected_date_args) result = offset.rollback(initial) assert result == expected _CFTIME_RANGE_TESTS = [ ( "0001-01-01", "0001-01-04", None, "D", "neither", False, [(1, 1, 2), (1, 1, 3)], ), ( "0001-01-01", "0001-01-04", None, "D", "both", False, [(1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( "0001-01-01", "0001-01-04", None, "D", "left", False, [(1, 1, 1), (1, 1, 2), (1, 1, 3)], ), ( "0001-01-01", "0001-01-04", None, "D", "right", False, [(1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( "0001-01-01T01:00:00", "0001-01-04", None, "D", "both", False, [(1, 1, 1, 1), (1, 1, 2, 1), (1, 1, 3, 1)], ), ( "0001-01-01 01:00:00", "0001-01-04", None, "D", "both", False, [(1, 1, 1, 1), (1, 1, 2, 1), (1, 1, 3, 1)], ), ( "0001-01-01T01:00:00", "0001-01-04", None, "D", "both", True, [(1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( "0001-01-01", None, 4, "D", "both", False, [(1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( None, "0001-01-04", 4, "D", "both", False, [(1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( (1, 1, 1), "0001-01-04", None, "D", "both", False, [(1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( (1, 1, 1), (1, 1, 4), None, "D", "both", False, [(1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( "0001-01-30", "0011-02-01", None, "3YS-JUN", "both", False, [(1, 6, 1), (4, 6, 1), (7, 6, 1), (10, 6, 1)], ), ("0001-01-04", "0001-01-01", None, "D", "both", False, []), ( "0010", None, 4, YearBegin(n=-2), "both", False, [(10, 1, 1), (8, 1, 1), (6, 1, 1), (4, 1, 1)], ), ( "0010", None, 4, "-2YS", "both", False, [(10, 1, 1), (8, 1, 1), (6, 1, 1), (4, 1, 1)], ), ( "0001-01-01", "0001-01-04", 4, None, "both", False, [(1, 1, 1), (1, 1, 2), (1, 1, 3), (1, 1, 4)], ), ( "0001-06-01", None, 4, "3QS-JUN", "both", False, [(1, 6, 1), (2, 3, 1), (2, 12, 1), (3, 9, 1)], ), ( "0001-06-01", None, 4, "-1MS", "both", False, [(1, 6, 1), (1, 5, 1), (1, 4, 1), (1, 3, 1)], ), ( "0001-01-30", None, 4, "-1D", "both", False, [(1, 1, 30), (1, 1, 29), (1, 1, 28), (1, 1, 27)], ), ] @pytest.mark.parametrize( ("start", "end", "periods", "freq", "inclusive", "normalize", "expected_date_args"), _CFTIME_RANGE_TESTS, ids=_id_func, ) def test_cftime_range( start, end, periods, freq, inclusive, normalize, calendar, expected_date_args ): date_type = get_date_type(calendar) expected_dates = list(starmap(date_type, expected_date_args)) if isinstance(start, tuple): start = date_type(*start) if isinstance(end, tuple): end = date_type(*end) with pytest.warns(DeprecationWarning): result = cftime_range( start=start, end=end, periods=periods, freq=freq, inclusive=inclusive, normalize=normalize, calendar=calendar, ) resulting_dates = result.values assert isinstance(result, CFTimeIndex) if freq is not None: np.testing.assert_equal(resulting_dates, expected_dates) else: # If we create a linear range of dates using cftime.num2date # we will not get exact round number dates. This is because # datetime arithmetic in cftime is accurate approximately to # 1 millisecond (see https://unidata.github.io/cftime/api.html). deltas = resulting_dates - expected_dates deltas = np.array([delta.total_seconds() for delta in deltas]) assert np.max(np.abs(deltas)) < 0.001 def test_date_range_name(): result = date_range(start="2000", periods=4, name="foo") assert result.name == "foo" result = date_range(start="2000", periods=4) assert result.name is None @pytest.mark.parametrize( ("start", "end", "periods", "freq", "inclusive"), [ (None, None, 5, "YE", None), ("2000", None, None, "YE", None), (None, "2000", None, "YE", None), (None, None, None, None, None), ("2000", "2001", None, "YE", "up"), ("2000", "2001", 5, "YE", None), ], ) def test_invalid_date_range_cftime_inputs( start: str | None, end: str | None, periods: int | None, freq: str | None, inclusive: Literal["up"] | None, ) -> None: with pytest.raises(ValueError): date_range(start, end, periods, freq, inclusive=inclusive, use_cftime=True) # type: ignore[arg-type] _CALENDAR_SPECIFIC_MONTH_END_TESTS = [ ("noleap", [(2, 28), (4, 30), (6, 30), (8, 31), (10, 31), (12, 31)]), ("all_leap", [(2, 29), (4, 30), (6, 30), (8, 31), (10, 31), (12, 31)]), ("360_day", [(2, 30), (4, 30), (6, 30), (8, 30), (10, 30), (12, 30)]), ("standard", [(2, 29), (4, 30), (6, 30), (8, 31), (10, 31), (12, 31)]), ("gregorian", [(2, 29), (4, 30), (6, 30), (8, 31), (10, 31), (12, 31)]), ("julian", [(2, 29), (4, 30), (6, 30), (8, 31), (10, 31), (12, 31)]), ] @pytest.mark.parametrize( ("calendar", "expected_month_day"), _CALENDAR_SPECIFIC_MONTH_END_TESTS, ids=_id_func, ) def test_calendar_specific_month_end( calendar: str, expected_month_day: list[tuple[int, int]] ) -> None: year = 2000 # Use a leap-year to highlight calendar differences date_type = get_date_type(calendar) expected = [date_type(year, *args) for args in expected_month_day] result = date_range( start="2000-02", end="2001", freq="2ME", calendar=calendar, use_cftime=True, ).values np.testing.assert_equal(result, expected) @pytest.mark.parametrize( ("calendar", "expected_month_day"), _CALENDAR_SPECIFIC_MONTH_END_TESTS, ids=_id_func, ) def test_calendar_specific_month_end_negative_freq( calendar: str, expected_month_day: list[tuple[int, int]] ) -> None: year = 2000 # Use a leap-year to highlight calendar differences date_type = get_date_type(calendar) expected = [date_type(year, *args) for args in expected_month_day[::-1]] result = date_range( start="2001", end="2000", freq="-2ME", calendar=calendar, use_cftime=True ).values np.testing.assert_equal(result, expected) @pytest.mark.parametrize( ("calendar", "start", "end", "expected_number_of_days"), [ ("noleap", "2000", "2001", 365), ("all_leap", "2000", "2001", 366), ("360_day", "2000", "2001", 360), ("standard", "2000", "2001", 366), ("gregorian", "2000", "2001", 366), ("julian", "2000", "2001", 366), ("noleap", "2001", "2002", 365), ("all_leap", "2001", "2002", 366), ("360_day", "2001", "2002", 360), ("standard", "2001", "2002", 365), ("gregorian", "2001", "2002", 365), ("julian", "2001", "2002", 365), ], ) def test_calendar_year_length( calendar: str, start: str, end: str, expected_number_of_days: int ) -> None: result = date_range( start, end, freq="D", inclusive="left", calendar=calendar, use_cftime=True ) assert len(result) == expected_number_of_days @pytest.mark.parametrize("freq", ["YE", "ME", "D"]) def test_dayofweek_after_cftime(freq: str) -> None: result = date_range("2000-02-01", periods=3, freq=freq, use_cftime=True).dayofweek # TODO: remove once requiring pandas 2.2+ freq = _new_to_legacy_freq(freq) expected = pd.date_range("2000-02-01", periods=3, freq=freq).dayofweek np.testing.assert_array_equal(result, expected) @pytest.mark.parametrize("freq", ["YE", "ME", "D"]) def test_dayofyear_after_cftime(freq: str) -> None: result = date_range("2000-02-01", periods=3, freq=freq, use_cftime=True).dayofyear # TODO: remove once requiring pandas 2.2+ freq = _new_to_legacy_freq(freq) expected = pd.date_range("2000-02-01", periods=3, freq=freq).dayofyear np.testing.assert_array_equal(result, expected) def test_cftime_range_standard_calendar_refers_to_gregorian() -> None: from cftime import DatetimeGregorian (result,) = date_range("2000", periods=1, use_cftime=True) assert isinstance(result, DatetimeGregorian) @pytest.mark.parametrize( "start,calendar,use_cftime,expected_type", [ ("1990-01-01", "standard", None, pd.DatetimeIndex), ("1990-01-01", "proleptic_gregorian", True, CFTimeIndex), ("1990-01-01", "noleap", None, CFTimeIndex), ("1990-01-01", "gregorian", False, pd.DatetimeIndex), ("1400-01-01", "standard", None, CFTimeIndex), ("3400-01-01", "standard", None, CFTimeIndex), ], ) def test_date_range( start: str, calendar: str, use_cftime: bool | None, expected_type ) -> None: dr = date_range( start, periods=14, freq="D", calendar=calendar, use_cftime=use_cftime ) assert isinstance(dr, expected_type) def test_date_range_errors() -> None: with pytest.raises(ValueError, match="Date range is invalid"): date_range( "1400-01-01", periods=1, freq="D", calendar="standard", use_cftime=False ) with pytest.raises(ValueError, match="Date range is invalid"): date_range( "2480-01-01", periods=1, freq="D", calendar="proleptic_gregorian", use_cftime=False, ) with pytest.raises(ValueError, match="Invalid calendar "): date_range( "1900-01-01", periods=1, freq="D", calendar="noleap", use_cftime=False ) @requires_cftime @pytest.mark.parametrize( "start,freq,cal_src,cal_tgt,use_cftime,exp0,exp_pd", [ ("2020-02-01", "4ME", "standard", "noleap", None, "2020-02-28", False), ("2020-02-01", "ME", "noleap", "gregorian", True, "2020-02-29", True), ("2020-02-01", "QE-DEC", "noleap", "gregorian", True, "2020-03-31", True), ("2020-02-01", "YS-FEB", "noleap", "gregorian", True, "2020-02-01", True), ("2020-02-01", "YE-FEB", "noleap", "gregorian", True, "2020-02-29", True), ("2020-02-01", "-1YE-FEB", "noleap", "gregorian", True, "2019-02-28", True), ("2020-02-28", "3h", "all_leap", "gregorian", False, "2020-02-28", True), ("2020-03-30", "ME", "360_day", "gregorian", False, "2020-03-31", True), ("2020-03-31", "ME", "gregorian", "360_day", None, "2020-03-30", False), ("2020-03-31", "-1ME", "gregorian", "360_day", None, "2020-03-30", False), ], ) def test_date_range_like(start, freq, cal_src, cal_tgt, use_cftime, exp0, exp_pd): expected_freq = freq source = date_range(start, periods=12, freq=freq, calendar=cal_src) out = date_range_like(source, cal_tgt, use_cftime=use_cftime) assert len(out) == 12 assert infer_freq(out) == expected_freq assert out[0].isoformat().startswith(exp0) if exp_pd: assert isinstance(out, pd.DatetimeIndex) else: assert isinstance(out, CFTimeIndex) assert out.calendar == cal_tgt @requires_cftime @pytest.mark.parametrize( "freq", ("YE", "YS", "YE-MAY", "MS", "ME", "QS", "h", "min", "s") ) @pytest.mark.parametrize("use_cftime", (True, False)) def test_date_range_like_no_deprecation(freq, use_cftime): # ensure no internal warnings # TODO: remove once freq string deprecation is finished source = date_range("2000", periods=3, freq=freq, use_cftime=False) with assert_no_warnings(): date_range_like(source, "standard", use_cftime=use_cftime) def test_date_range_like_same_calendar(): src = date_range("2000-01-01", periods=12, freq="6h", use_cftime=False) out = date_range_like(src, "standard", use_cftime=False) assert src is out @pytest.mark.filterwarnings("ignore:Converting non-default") def test_date_range_like_errors(): src = date_range("1899-02-03", periods=20, freq="D", use_cftime=False) src = src[np.arange(20) != 10] # Remove 1 day so the frequency is not inferable. with pytest.raises( ValueError, match="`date_range_like` was unable to generate a range as the source frequency was not inferable.", ): date_range_like(src, "gregorian") src = DataArray( np.array( [["1999-01-01", "1999-01-02"], ["1999-01-03", "1999-01-04"]], dtype=np.datetime64, ), dims=("x", "y"), ) with pytest.raises( ValueError, match="'source' must be a 1D array of datetime objects for inferring its range.", ): date_range_like(src, "noleap") da = DataArray([1, 2, 3, 4], dims=("time",)) with pytest.raises( ValueError, match="'source' must be a 1D array of datetime objects for inferring its range.", ): date_range_like(da, "noleap") def as_timedelta_not_implemented_error(): tick = Tick() with pytest.raises(NotImplementedError): tick.as_timedelta() @pytest.mark.parametrize("use_cftime", [True, False]) def test_cftime_or_date_range_invalid_inclusive_value(use_cftime: bool) -> None: if use_cftime and not has_cftime: pytest.skip("requires cftime") if TYPE_CHECKING: pytest.skip("inclusive type checked internally") with pytest.raises(ValueError, match="nclusive"): date_range("2000", periods=3, inclusive="foo", use_cftime=use_cftime) @pytest.mark.parametrize("use_cftime", [True, False]) def test_cftime_or_date_range_inclusive_None(use_cftime: bool) -> None: if use_cftime and not has_cftime: pytest.skip("requires cftime") result_None = date_range("2000-01-01", "2000-01-04", use_cftime=use_cftime) result_both = date_range( "2000-01-01", "2000-01-04", inclusive="both", use_cftime=use_cftime ) np.testing.assert_equal(result_None.values, result_both.values) @pytest.mark.parametrize( "freq", ["A", "AS", "Q", "M", "H", "T", "S", "L", "U", "Y", "A-MAY"] ) def test_to_offset_deprecation_warning(freq): # Test for deprecations outlined in GitHub issue #8394 with pytest.warns(FutureWarning, match="is deprecated"): to_offset(freq) @pytest.mark.skipif(has_pandas_ge_2_2, reason="only relevant for pandas lt 2.2") @pytest.mark.parametrize( "freq, expected", ( ["Y", "YE"], ["A", "YE"], ["Q", "QE"], ["M", "ME"], ["AS", "YS"], ["YE", "YE"], ["QE", "QE"], ["ME", "ME"], ["YS", "YS"], ), ) @pytest.mark.parametrize("n", ("", "2")) def test_legacy_to_new_freq(freq, expected, n): freq = f"{n}{freq}" result = _legacy_to_new_freq(freq) expected = f"{n}{expected}" assert result == expected @pytest.mark.skipif(has_pandas_ge_2_2, reason="only relevant for pandas lt 2.2") @pytest.mark.parametrize("year_alias", ("YE", "Y", "A")) @pytest.mark.parametrize("n", ("", "2")) def test_legacy_to_new_freq_anchored(year_alias, n): for month in _MONTH_ABBREVIATIONS.values(): freq = f"{n}{year_alias}-{month}" result = _legacy_to_new_freq(freq) expected = f"{n}YE-{month}" assert result == expected @pytest.mark.skipif(has_pandas_ge_2_2, reason="only relevant for pandas lt 2.2") @pytest.mark.filterwarnings("ignore:'[AY]' is deprecated") @pytest.mark.parametrize( "freq, expected", (["A", "A"], ["YE", "A"], ["Y", "A"], ["QE", "Q"], ["ME", "M"], ["YS", "AS"]), ) @pytest.mark.parametrize("n", ("", "2")) def test_new_to_legacy_freq(freq, expected, n): freq = f"{n}{freq}" result = _new_to_legacy_freq(freq) expected = f"{n}{expected}" assert result == expected @pytest.mark.skipif(has_pandas_ge_2_2, reason="only relevant for pandas lt 2.2") @pytest.mark.filterwarnings("ignore:'[AY]-.{3}' is deprecated") @pytest.mark.parametrize("year_alias", ("A", "Y", "YE")) @pytest.mark.parametrize("n", ("", "2")) def test_new_to_legacy_freq_anchored(year_alias, n): for month in _MONTH_ABBREVIATIONS.values(): freq = f"{n}{year_alias}-{month}" result = _new_to_legacy_freq(freq) expected = f"{n}A-{month}" assert result == expected @pytest.mark.skipif(has_pandas_ge_2_2, reason="only for pandas lt 2.2") @pytest.mark.parametrize( "freq, expected", ( # pandas-only freq strings are passed through ("BH", "BH"), ("CBH", "CBH"), ("N", "N"), ), ) def test_legacy_to_new_freq_pd_freq_passthrough(freq, expected): result = _legacy_to_new_freq(freq) assert result == expected @pytest.mark.filterwarnings("ignore:'.' is deprecated ") @pytest.mark.skipif(has_pandas_ge_2_2, reason="only for pandas lt 2.2") @pytest.mark.parametrize( "freq, expected", ( # these are each valid in pandas lt 2.2 ("T", "T"), ("min", "min"), ("S", "S"), ("s", "s"), ("L", "L"), ("ms", "ms"), ("U", "U"), ("us", "us"), # pandas-only freq strings are passed through ("bh", "bh"), ("cbh", "cbh"), ("ns", "ns"), ), ) def test_new_to_legacy_freq_pd_freq_passthrough(freq, expected): result = _new_to_legacy_freq(freq) assert result == expected @pytest.mark.filterwarnings("ignore:Converting a CFTimeIndex with:") @pytest.mark.parametrize("start", ("2000", "2001")) @pytest.mark.parametrize("end", ("2000", "2001")) @pytest.mark.parametrize( "freq", ( "MS", pytest.param("-1MS", marks=requires_pandas_3), "YS", pytest.param("-1YS", marks=requires_pandas_3), "ME", pytest.param("-1ME", marks=requires_pandas_3), "YE", pytest.param("-1YE", marks=requires_pandas_3), ), ) def test_cftime_range_same_as_pandas(start, end, freq) -> None: result = date_range(start, end, freq=freq, calendar="standard", use_cftime=True) result = result.to_datetimeindex(time_unit="ns") expected = date_range(start, end, freq=freq, use_cftime=False) np.testing.assert_array_equal(result, expected) @pytest.mark.filterwarnings("ignore:Converting a CFTimeIndex with:") @pytest.mark.parametrize( "start, end, periods", [ ("2022-01-01", "2022-01-10", 2), ("2022-03-01", "2022-03-31", 2), ("2022-01-01", "2022-01-10", None), ("2022-03-01", "2022-03-31", None), ], ) def test_cftime_range_no_freq(start, end, periods): """ Test whether date_range produces the same result as Pandas when freq is not provided, but start, end and periods are. """ # Generate date ranges using cftime_range cftimeindex = date_range(start=start, end=end, periods=periods, use_cftime=True) result = cftimeindex.to_datetimeindex(time_unit="ns") expected = pd.date_range(start=start, end=end, periods=periods) np.testing.assert_array_equal(result, expected) @pytest.mark.parametrize( "start, end, periods", [ ("2022-01-01", "2022-01-10", 2), ("2022-03-01", "2022-03-31", 2), ("2022-01-01", "2022-01-10", None), ("2022-03-01", "2022-03-31", None), ], ) def test_date_range_no_freq(start, end, periods): """ Test whether date_range produces the same result as Pandas when freq is not provided, but start, end and periods are. """ # Generate date ranges using date_range result = date_range(start=start, end=end, periods=periods) expected = pd.date_range(start=start, end=end, periods=periods) np.testing.assert_array_equal(result, expected) @pytest.mark.parametrize( "offset", [ MonthBegin(n=1), MonthEnd(n=1), QuarterBegin(n=1), QuarterEnd(n=1), YearBegin(n=1), YearEnd(n=1), ], ids=lambda x: f"{x}", ) @pytest.mark.parametrize("has_year_zero", [False, True]) def test_offset_addition_preserves_has_year_zero(offset, has_year_zero): with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="this date/calendar/year zero") datetime = cftime.DatetimeGregorian(-1, 12, 31, has_year_zero=has_year_zero) result = datetime + offset assert result.has_year_zero == datetime.has_year_zero if has_year_zero: assert result.year == 0 else: assert result.year == 1 @pytest.mark.parametrize( "offset", [ MonthBegin(n=1), MonthEnd(n=1), QuarterBegin(n=1), QuarterEnd(n=1), YearBegin(n=1), YearEnd(n=1), ], ids=lambda x: f"{x}", ) @pytest.mark.parametrize("has_year_zero", [False, True]) def test_offset_subtraction_preserves_has_year_zero(offset, has_year_zero): datetime = cftime.DatetimeGregorian(1, 1, 1, has_year_zero=has_year_zero) result = datetime - offset assert result.has_year_zero == datetime.has_year_zero if has_year_zero: assert result.year == 0 else: assert result.year == -1 @pytest.mark.parametrize("has_year_zero", [False, True]) def test_offset_day_option_end_accounts_for_has_year_zero(has_year_zero): offset = MonthEnd(n=1) with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="this date/calendar/year zero") datetime = cftime.DatetimeGregorian(-1, 1, 31, has_year_zero=has_year_zero) result = datetime + offset assert result.has_year_zero == datetime.has_year_zero if has_year_zero: assert result.day == 28 else: assert result.day == 29 xarray-2025.09.0/xarray/tests/test_cftimeindex.py000066400000000000000000001305731505620616400217350ustar00rootroot00000000000000from __future__ import annotations import pickle from datetime import timedelta from textwrap import dedent import numpy as np import pandas as pd import pytest import xarray as xr from xarray.coding.cftimeindex import ( CFTimeIndex, _parse_array_of_cftime_strings, _parsed_string_to_bounds, assert_all_valid_date_type, ) from xarray.coding.times import ( _parse_iso8601, parse_iso8601_like, ) from xarray.core.types import PDDatetimeUnitOptions from xarray.tests import ( _ALL_CALENDARS, _NON_STANDARD_CALENDAR_NAMES, _all_cftime_date_types, assert_array_equal, assert_identical, has_cftime, requires_cftime, ) # cftime 1.5.2 renames "gregorian" to "standard" standard_or_gregorian = "" if has_cftime: standard_or_gregorian = "standard" def date_dict( year=None, month=None, day=None, hour=None, minute=None, second=None, microsecond=None, ): return dict( year=year, month=month, day=day, hour=hour, minute=minute, second=second, microsecond=microsecond, ) ISO8601_LIKE_STRING_TESTS = { "year": ("1999", date_dict(year="1999")), "month": ("199901", date_dict(year="1999", month="01")), "month-dash": ("1999-01", date_dict(year="1999", month="01")), "day": ("19990101", date_dict(year="1999", month="01", day="01")), "day-dash": ("1999-01-01", date_dict(year="1999", month="01", day="01")), "hour": ("19990101T12", date_dict(year="1999", month="01", day="01", hour="12")), "hour-dash": ( "1999-01-01T12", date_dict(year="1999", month="01", day="01", hour="12"), ), "hour-space-separator": ( "1999-01-01 12", date_dict(year="1999", month="01", day="01", hour="12"), ), "minute": ( "19990101T1234", date_dict(year="1999", month="01", day="01", hour="12", minute="34"), ), "minute-dash": ( "1999-01-01T12:34", date_dict(year="1999", month="01", day="01", hour="12", minute="34"), ), "minute-space-separator": ( "1999-01-01 12:34", date_dict(year="1999", month="01", day="01", hour="12", minute="34"), ), "second": ( "19990101T123456", date_dict( year="1999", month="01", day="01", hour="12", minute="34", second="56" ), ), "second-dash": ( "1999-01-01T12:34:56", date_dict( year="1999", month="01", day="01", hour="12", minute="34", second="56" ), ), "second-space-separator": ( "1999-01-01 12:34:56", date_dict( year="1999", month="01", day="01", hour="12", minute="34", second="56" ), ), "microsecond-1": ( "19990101T123456.123456", date_dict( year="1999", month="01", day="01", hour="12", minute="34", second="56", microsecond="123456", ), ), "microsecond-2": ( "19990101T123456.1", date_dict( year="1999", month="01", day="01", hour="12", minute="34", second="56", microsecond="1", ), ), } @pytest.mark.parametrize( ("string", "expected"), list(ISO8601_LIKE_STRING_TESTS.values()), ids=list(ISO8601_LIKE_STRING_TESTS.keys()), ) @pytest.mark.parametrize( "five_digit_year", [False, True], ids=["four-digit-year", "five-digit-year"] ) @pytest.mark.parametrize("sign", ["", "+", "-"], ids=["None", "plus", "minus"]) def test_parse_iso8601_like( five_digit_year: bool, sign: str, string: str, expected: dict ) -> None: pre = "1" if five_digit_year else "" datestring = sign + pre + string result = parse_iso8601_like(datestring) expected = expected.copy() expected.update(year=sign + pre + expected["year"]) assert result == expected # check malformed single digit addendum # this check is only performed when we have at least "hour" given # like "1999010101", where a single added digit should raise # for "1999" (year), "199901" (month) and "19990101" (day) # and a single added digit the string would just be interpreted # as having a 5-digit year. if result["microsecond"] is None and result["hour"] is not None: with pytest.raises(ValueError): parse_iso8601_like(datestring + "3") # check malformed floating point addendum if result["second"] is None or result["microsecond"] is not None: with pytest.raises(ValueError): parse_iso8601_like(datestring + ".3") _CFTIME_CALENDARS = [ "365_day", "360_day", "julian", "all_leap", "366_day", "gregorian", "proleptic_gregorian", ] @pytest.fixture(params=_CFTIME_CALENDARS) def date_type(request): return _all_cftime_date_types()[request.param] @pytest.fixture def index(date_type): dates = [ date_type(1, 1, 1), date_type(1, 2, 1), date_type(2, 1, 1), date_type(2, 2, 1), ] return CFTimeIndex(dates) @pytest.fixture def monotonic_decreasing_index(date_type): dates = [ date_type(2, 2, 1), date_type(2, 1, 1), date_type(1, 2, 1), date_type(1, 1, 1), ] return CFTimeIndex(dates) @pytest.fixture def length_one_index(date_type): dates = [date_type(1, 1, 1)] return CFTimeIndex(dates) @pytest.fixture def da(index): return xr.DataArray([1, 2, 3, 4], coords=[index], dims=["time"]) @pytest.fixture def series(index): return pd.Series([1, 2, 3, 4], index=index) @pytest.fixture def df(index): return pd.DataFrame([1, 2, 3, 4], index=index) @pytest.fixture def feb_days(date_type): import cftime if date_type is cftime.DatetimeAllLeap: return 29 elif date_type is cftime.Datetime360Day: return 30 else: return 28 @pytest.fixture def dec_days(date_type): import cftime if date_type is cftime.Datetime360Day: return 30 else: return 31 @pytest.fixture def index_with_name(date_type): dates = [ date_type(1, 1, 1), date_type(1, 2, 1), date_type(2, 1, 1), date_type(2, 2, 1), ] return CFTimeIndex(dates, name="foo") @requires_cftime @pytest.mark.parametrize(("name", "expected_name"), [("bar", "bar"), (None, "foo")]) def test_constructor_with_name(index_with_name, name, expected_name): result = CFTimeIndex(index_with_name, name=name).name assert result == expected_name @requires_cftime def test_assert_all_valid_date_type(date_type, index): import cftime if date_type is cftime.DatetimeNoLeap: mixed_date_types = np.array( [date_type(1, 1, 1), cftime.DatetimeAllLeap(1, 2, 1)] ) else: mixed_date_types = np.array( [date_type(1, 1, 1), cftime.DatetimeNoLeap(1, 2, 1)] ) with pytest.raises(TypeError): assert_all_valid_date_type(mixed_date_types) with pytest.raises(TypeError): assert_all_valid_date_type(np.array([1, date_type(1, 1, 1)])) assert_all_valid_date_type(np.array([date_type(1, 1, 1), date_type(1, 2, 1)])) @requires_cftime @pytest.mark.parametrize( ("field", "expected"), [ ("year", [1, 1, 2, 2]), ("month", [1, 2, 1, 2]), ("day", [1, 1, 1, 1]), ("hour", [0, 0, 0, 0]), ("minute", [0, 0, 0, 0]), ("second", [0, 0, 0, 0]), ("microsecond", [0, 0, 0, 0]), ], ) def test_cftimeindex_field_accessors(index, field, expected): result = getattr(index, field) expected = np.array(expected, dtype=np.int64) assert_array_equal(result, expected) assert result.dtype == expected.dtype @requires_cftime @pytest.mark.parametrize( ("field"), [ "year", "month", "day", "hour", "minute", "second", "microsecond", "dayofyear", "dayofweek", "days_in_month", ], ) def test_empty_cftimeindex_field_accessors(field): index = CFTimeIndex([]) result = getattr(index, field) expected = np.array([], dtype=np.int64) assert_array_equal(result, expected) assert result.dtype == expected.dtype @requires_cftime def test_cftimeindex_dayofyear_accessor(index): result = index.dayofyear expected = np.array([date.dayofyr for date in index], dtype=np.int64) assert_array_equal(result, expected) assert result.dtype == expected.dtype @requires_cftime def test_cftimeindex_dayofweek_accessor(index): result = index.dayofweek expected = np.array([date.dayofwk for date in index], dtype=np.int64) assert_array_equal(result, expected) assert result.dtype == expected.dtype @requires_cftime def test_cftimeindex_days_in_month_accessor(index): result = index.days_in_month expected = np.array([date.daysinmonth for date in index], dtype=np.int64) assert_array_equal(result, expected) assert result.dtype == expected.dtype @requires_cftime @pytest.mark.parametrize( ("string", "date_args", "reso"), [ ("1999", (1999, 1, 1), "year"), ("199902", (1999, 2, 1), "month"), ("19990202", (1999, 2, 2), "day"), ("19990202T01", (1999, 2, 2, 1), "hour"), ("19990202T0101", (1999, 2, 2, 1, 1), "minute"), ("19990202T010156", (1999, 2, 2, 1, 1, 56), "second"), ("19990202T010156.123456", (1999, 2, 2, 1, 1, 56, 123456), "microsecond"), ], ) def test_parse_iso8601_with_reso(date_type, string, date_args, reso): expected_date = date_type(*date_args) expected_reso = reso result_date, result_reso = _parse_iso8601(date_type, string) assert result_date == expected_date assert result_reso == expected_reso @requires_cftime def test_parse_string_to_bounds_year(date_type, dec_days): parsed = date_type(2, 2, 10, 6, 2, 8, 1) expected_start = date_type(2, 1, 1) expected_end = date_type(2, 12, dec_days, 23, 59, 59, 999999) result_start, result_end = _parsed_string_to_bounds(date_type, "year", parsed) assert result_start == expected_start assert result_end == expected_end @requires_cftime def test_parse_string_to_bounds_month_feb(date_type, feb_days): parsed = date_type(2, 2, 10, 6, 2, 8, 1) expected_start = date_type(2, 2, 1) expected_end = date_type(2, 2, feb_days, 23, 59, 59, 999999) result_start, result_end = _parsed_string_to_bounds(date_type, "month", parsed) assert result_start == expected_start assert result_end == expected_end @requires_cftime def test_parse_string_to_bounds_month_dec(date_type, dec_days): parsed = date_type(2, 12, 1) expected_start = date_type(2, 12, 1) expected_end = date_type(2, 12, dec_days, 23, 59, 59, 999999) result_start, result_end = _parsed_string_to_bounds(date_type, "month", parsed) assert result_start == expected_start assert result_end == expected_end @requires_cftime @pytest.mark.parametrize( ("reso", "ex_start_args", "ex_end_args"), [ ("day", (2, 2, 10), (2, 2, 10, 23, 59, 59, 999999)), ("hour", (2, 2, 10, 6), (2, 2, 10, 6, 59, 59, 999999)), ("minute", (2, 2, 10, 6, 2), (2, 2, 10, 6, 2, 59, 999999)), ("second", (2, 2, 10, 6, 2, 8), (2, 2, 10, 6, 2, 8, 999999)), ], ) def test_parsed_string_to_bounds_sub_monthly( date_type, reso, ex_start_args, ex_end_args ): parsed = date_type(2, 2, 10, 6, 2, 8, 123456) expected_start = date_type(*ex_start_args) expected_end = date_type(*ex_end_args) result_start, result_end = _parsed_string_to_bounds(date_type, reso, parsed) assert result_start == expected_start assert result_end == expected_end @requires_cftime def test_parsed_string_to_bounds_raises(date_type): with pytest.raises(KeyError): _parsed_string_to_bounds(date_type, "a", date_type(1, 1, 1)) @requires_cftime def test_get_loc(date_type, index): result = index.get_loc("0001") assert result == slice(0, 2) result = index.get_loc(date_type(1, 2, 1)) assert result == 1 result = index.get_loc("0001-02-01") assert result == slice(1, 2) with pytest.raises(KeyError, match=r"1234"): index.get_loc("1234") @requires_cftime def test_get_slice_bound(date_type, index): result = index.get_slice_bound("0001", "left") expected = 0 assert result == expected result = index.get_slice_bound("0001", "right") expected = 2 assert result == expected result = index.get_slice_bound(date_type(1, 3, 1), "left") expected = 2 assert result == expected result = index.get_slice_bound(date_type(1, 3, 1), "right") expected = 2 assert result == expected @requires_cftime def test_get_slice_bound_decreasing_index(date_type, monotonic_decreasing_index): result = monotonic_decreasing_index.get_slice_bound("0001", "left") expected = 2 assert result == expected result = monotonic_decreasing_index.get_slice_bound("0001", "right") expected = 4 assert result == expected result = monotonic_decreasing_index.get_slice_bound(date_type(1, 3, 1), "left") expected = 2 assert result == expected result = monotonic_decreasing_index.get_slice_bound(date_type(1, 3, 1), "right") expected = 2 assert result == expected @requires_cftime def test_get_slice_bound_length_one_index(date_type, length_one_index): result = length_one_index.get_slice_bound("0001", "left") expected = 0 assert result == expected result = length_one_index.get_slice_bound("0001", "right") expected = 1 assert result == expected result = length_one_index.get_slice_bound(date_type(1, 3, 1), "left") expected = 1 assert result == expected result = length_one_index.get_slice_bound(date_type(1, 3, 1), "right") expected = 1 assert result == expected @requires_cftime def test_string_slice_length_one_index(length_one_index): da = xr.DataArray([1], coords=[length_one_index], dims=["time"]) result = da.sel(time=slice("0001", "0001")) assert_identical(result, da) @requires_cftime def test_date_type_property(date_type, index): assert index.date_type is date_type @requires_cftime def test_contains(date_type, index): assert "0001-01-01" in index assert "0001" in index assert "0003" not in index assert date_type(1, 1, 1) in index assert date_type(3, 1, 1) not in index @requires_cftime def test_groupby(da): result = da.groupby("time.month").sum("time") expected = xr.DataArray([4, 6], coords=[[1, 2]], dims=["month"]) assert_identical(result, expected) SEL_STRING_OR_LIST_TESTS = { "string": "0001", "string-slice": slice("0001-01-01", "0001-12-30"), "bool-list": [True, True, False, False], } @requires_cftime @pytest.mark.parametrize( "sel_arg", list(SEL_STRING_OR_LIST_TESTS.values()), ids=list(SEL_STRING_OR_LIST_TESTS.keys()), ) def test_sel_string_or_list(da, index, sel_arg): expected = xr.DataArray([1, 2], coords=[index[:2]], dims=["time"]) result = da.sel(time=sel_arg) assert_identical(result, expected) @requires_cftime def test_sel_date_slice_or_list(da, index, date_type): expected = xr.DataArray([1, 2], coords=[index[:2]], dims=["time"]) result = da.sel(time=slice(date_type(1, 1, 1), date_type(1, 12, 30))) assert_identical(result, expected) result = da.sel(time=[date_type(1, 1, 1), date_type(1, 2, 1)]) assert_identical(result, expected) @requires_cftime def test_sel_date_scalar(da, date_type, index): expected = xr.DataArray(1).assign_coords(time=index[0]) result = da.sel(time=date_type(1, 1, 1)) assert_identical(result, expected) @requires_cftime def test_sel_date_distant_date(da, date_type, index): expected = xr.DataArray(4).assign_coords(time=index[3]) result = da.sel(time=date_type(2000, 1, 1), method="nearest") assert_identical(result, expected) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [ {"method": "nearest"}, {"method": "nearest", "tolerance": timedelta(days=70)}, {"method": "nearest", "tolerance": timedelta(days=1800000)}, ], ) def test_sel_date_scalar_nearest(da, date_type, index, sel_kwargs): expected = xr.DataArray(2).assign_coords(time=index[1]) result = da.sel(time=date_type(1, 4, 1), **sel_kwargs) assert_identical(result, expected) expected = xr.DataArray(3).assign_coords(time=index[2]) result = da.sel(time=date_type(1, 11, 1), **sel_kwargs) assert_identical(result, expected) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [{"method": "pad"}, {"method": "pad", "tolerance": timedelta(days=365)}], ) def test_sel_date_scalar_pad(da, date_type, index, sel_kwargs): expected = xr.DataArray(2).assign_coords(time=index[1]) result = da.sel(time=date_type(1, 4, 1), **sel_kwargs) assert_identical(result, expected) expected = xr.DataArray(2).assign_coords(time=index[1]) result = da.sel(time=date_type(1, 11, 1), **sel_kwargs) assert_identical(result, expected) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [{"method": "backfill"}, {"method": "backfill", "tolerance": timedelta(days=365)}], ) def test_sel_date_scalar_backfill(da, date_type, index, sel_kwargs): expected = xr.DataArray(3).assign_coords(time=index[2]) result = da.sel(time=date_type(1, 4, 1), **sel_kwargs) assert_identical(result, expected) expected = xr.DataArray(3).assign_coords(time=index[2]) result = da.sel(time=date_type(1, 11, 1), **sel_kwargs) assert_identical(result, expected) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [ {"method": "pad", "tolerance": timedelta(days=20)}, {"method": "backfill", "tolerance": timedelta(days=20)}, {"method": "nearest", "tolerance": timedelta(days=20)}, ], ) def test_sel_date_scalar_tolerance_raises(da, date_type, sel_kwargs): with pytest.raises(KeyError): da.sel(time=date_type(1, 5, 1), **sel_kwargs) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [{"method": "nearest"}, {"method": "nearest", "tolerance": timedelta(days=70)}], ) def test_sel_date_list_nearest(da, date_type, index, sel_kwargs): expected = xr.DataArray([2, 2], coords=[[index[1], index[1]]], dims=["time"]) result = da.sel(time=[date_type(1, 3, 1), date_type(1, 4, 1)], **sel_kwargs) assert_identical(result, expected) expected = xr.DataArray([2, 3], coords=[[index[1], index[2]]], dims=["time"]) result = da.sel(time=[date_type(1, 3, 1), date_type(1, 12, 1)], **sel_kwargs) assert_identical(result, expected) expected = xr.DataArray([3, 3], coords=[[index[2], index[2]]], dims=["time"]) result = da.sel(time=[date_type(1, 11, 1), date_type(1, 12, 1)], **sel_kwargs) assert_identical(result, expected) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [{"method": "pad"}, {"method": "pad", "tolerance": timedelta(days=365)}], ) def test_sel_date_list_pad(da, date_type, index, sel_kwargs): expected = xr.DataArray([2, 2], coords=[[index[1], index[1]]], dims=["time"]) result = da.sel(time=[date_type(1, 3, 1), date_type(1, 4, 1)], **sel_kwargs) assert_identical(result, expected) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [{"method": "backfill"}, {"method": "backfill", "tolerance": timedelta(days=365)}], ) def test_sel_date_list_backfill(da, date_type, index, sel_kwargs): expected = xr.DataArray([3, 3], coords=[[index[2], index[2]]], dims=["time"]) result = da.sel(time=[date_type(1, 3, 1), date_type(1, 4, 1)], **sel_kwargs) assert_identical(result, expected) @requires_cftime @pytest.mark.parametrize( "sel_kwargs", [ {"method": "pad", "tolerance": timedelta(days=20)}, {"method": "backfill", "tolerance": timedelta(days=20)}, {"method": "nearest", "tolerance": timedelta(days=20)}, ], ) def test_sel_date_list_tolerance_raises(da, date_type, sel_kwargs): with pytest.raises(KeyError): da.sel(time=[date_type(1, 2, 1), date_type(1, 5, 1)], **sel_kwargs) @requires_cftime def test_isel(da, index): expected = xr.DataArray(1).assign_coords(time=index[0]) result = da.isel(time=0) assert_identical(result, expected) expected = xr.DataArray([1, 2], coords=[index[:2]], dims=["time"]) result = da.isel(time=[0, 1]) assert_identical(result, expected) @pytest.fixture def scalar_args(date_type): return [date_type(1, 1, 1)] @pytest.fixture def range_args(date_type): return [ "0001", slice("0001-01-01", "0001-12-30"), slice(None, "0001-12-30"), slice(date_type(1, 1, 1), date_type(1, 12, 30)), slice(None, date_type(1, 12, 30)), ] @requires_cftime def test_indexing_in_series_getitem(series, index, scalar_args, range_args): for arg in scalar_args: assert series[arg] == 1 expected = pd.Series([1, 2], index=index[:2]) for arg in range_args: assert series[arg].equals(expected) @requires_cftime def test_indexing_in_series_loc(series, index, scalar_args, range_args): for arg in scalar_args: assert series.loc[arg] == 1 expected = pd.Series([1, 2], index=index[:2]) for arg in range_args: assert series.loc[arg].equals(expected) @requires_cftime def test_indexing_in_series_iloc(series, index): expected1 = 1 assert series.iloc[0] == expected1 expected2 = pd.Series([1, 2], index=index[:2]) assert series.iloc[:2].equals(expected2) @requires_cftime def test_series_dropna(index): series = pd.Series([0.0, 1.0, np.nan, np.nan], index=index) expected = series.iloc[:2] result = series.dropna() assert result.equals(expected) @requires_cftime def test_indexing_in_dataframe_loc(df, index, scalar_args, range_args): expected_s = pd.Series([1], name=index[0]) for arg in scalar_args: result_s = df.loc[arg] assert result_s.equals(expected_s) expected_df = pd.DataFrame([1, 2], index=index[:2]) for arg in range_args: result_df = df.loc[arg] assert result_df.equals(expected_df) @requires_cftime def test_indexing_in_dataframe_iloc(df, index): expected_s = pd.Series([1], name=index[0]) result_s = df.iloc[0] assert result_s.equals(expected_s) assert result_s.equals(expected_s) expected_df = pd.DataFrame([1, 2], index=index[:2]) result_df = df.iloc[:2] assert result_df.equals(expected_df) @requires_cftime def test_concat_cftimeindex(date_type): da1 = xr.DataArray( [1.0, 2.0], coords=[[date_type(1, 1, 1), date_type(1, 2, 1)]], dims=["time"] ) da2 = xr.DataArray( [3.0, 4.0], coords=[[date_type(1, 3, 1), date_type(1, 4, 1)]], dims=["time"] ) da = xr.concat([da1, da2], dim="time") assert isinstance(da.xindexes["time"].to_pandas_index(), CFTimeIndex) @requires_cftime def test_empty_cftimeindex(): index = CFTimeIndex([]) assert index.date_type is None @requires_cftime def test_cftimeindex_add(index): date_type = index.date_type expected_dates = [ date_type(1, 1, 2), date_type(1, 2, 2), date_type(2, 1, 2), date_type(2, 2, 2), ] expected = CFTimeIndex(expected_dates) result = index + timedelta(days=1) assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftimeindex_add_timedeltaindex(calendar) -> None: a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) deltas = pd.TimedeltaIndex([timedelta(days=2) for _ in range(5)]) result = a + deltas expected = a.shift(2, "D") assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime @pytest.mark.parametrize("n", [2.0, 1.5]) @pytest.mark.parametrize( "freq,units", [ ("h", "h"), ("min", "min"), ("s", "s"), ("ms", "ms"), ], ) @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftimeindex_shift_float(n, freq, units, calendar) -> None: a = xr.date_range("2000", periods=3, calendar=calendar, freq="D", use_cftime=True) result = a + pd.Timedelta(n, units) expected = a.shift(n, freq) assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime def test_cftimeindex_shift_float_us() -> None: a = xr.date_range("2000", periods=3, freq="D", use_cftime=True) with pytest.raises( ValueError, match="Could not convert to integer offset at any resolution" ): a.shift(2.5, "us") @requires_cftime @pytest.mark.parametrize("freq", ["YS", "YE", "QS", "QE", "MS", "ME", "D"]) def test_cftimeindex_shift_float_fails_for_non_tick_freqs(freq) -> None: a = xr.date_range("2000", periods=3, freq="D", use_cftime=True) with pytest.raises(TypeError, match="unsupported operand type"): a.shift(2.5, freq) @requires_cftime def test_cftimeindex_radd(index): date_type = index.date_type expected_dates = [ date_type(1, 1, 2), date_type(1, 2, 2), date_type(2, 1, 2), date_type(2, 2, 2), ] expected = CFTimeIndex(expected_dates) result = timedelta(days=1) + index assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_timedeltaindex_add_cftimeindex(calendar) -> None: a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) deltas = pd.TimedeltaIndex([timedelta(days=2) for _ in range(5)]) result = deltas + a expected = a.shift(2, "D") assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime def test_cftimeindex_sub_timedelta(index): date_type = index.date_type expected_dates = [ date_type(1, 1, 2), date_type(1, 2, 2), date_type(2, 1, 2), date_type(2, 2, 2), ] expected = CFTimeIndex(expected_dates) result = index + timedelta(days=2) result = result - timedelta(days=1) assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime @pytest.mark.parametrize( "other", [np.array(4 * [timedelta(days=1)]), np.array(timedelta(days=1))], ids=["1d-array", "scalar-array"], ) def test_cftimeindex_sub_timedelta_array(index, other): date_type = index.date_type expected_dates = [ date_type(1, 1, 2), date_type(1, 2, 2), date_type(2, 1, 2), date_type(2, 2, 2), ] expected = CFTimeIndex(expected_dates) result = index + timedelta(days=2) result = result - other assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftimeindex_sub_cftimeindex(calendar) -> None: a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) b = a.shift(2, "D") result = b - a expected = pd.TimedeltaIndex([timedelta(days=2) for _ in range(5)]) assert result.equals(expected) assert isinstance(result, pd.TimedeltaIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftimeindex_sub_cftime_datetime(calendar): a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) result = a - a[0] expected = pd.TimedeltaIndex([timedelta(days=i) for i in range(5)]) assert result.equals(expected) assert isinstance(result, pd.TimedeltaIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftime_datetime_sub_cftimeindex(calendar): a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) result = a[0] - a expected = pd.TimedeltaIndex([timedelta(days=-i) for i in range(5)]) assert result.equals(expected) assert isinstance(result, pd.TimedeltaIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_distant_cftime_datetime_sub_cftimeindex(calendar): a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) with pytest.raises(ValueError, match="difference exceeds"): a.date_type(1, 1, 1) - a @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftimeindex_sub_timedeltaindex(calendar) -> None: a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) deltas = pd.TimedeltaIndex([timedelta(days=2) for _ in range(5)]) result = a - deltas expected = a.shift(-2, "D") assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftimeindex_sub_index_of_cftime_datetimes(calendar): a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) b = pd.Index(a.values) expected = a - a result = a - b assert result.equals(expected) assert isinstance(result, pd.TimedeltaIndex) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_cftimeindex_sub_not_implemented(calendar): a = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) with pytest.raises(TypeError, match="unsupported operand"): a - 1 @requires_cftime def test_cftimeindex_rsub(index): with pytest.raises(TypeError): timedelta(days=1) - index @requires_cftime @pytest.mark.parametrize("freq", ["D", timedelta(days=1)]) def test_cftimeindex_shift(index, freq) -> None: date_type = index.date_type expected_dates = [ date_type(1, 1, 3), date_type(1, 2, 3), date_type(2, 1, 3), date_type(2, 2, 3), ] expected = CFTimeIndex(expected_dates) result = index.shift(2, freq) assert result.equals(expected) assert isinstance(result, CFTimeIndex) @requires_cftime def test_cftimeindex_shift_invalid_periods() -> None: index = xr.date_range("2000", periods=3, use_cftime=True) with pytest.raises(TypeError): index.shift("a", "D") @requires_cftime def test_cftimeindex_shift_invalid_freq() -> None: index = xr.date_range("2000", periods=3, use_cftime=True) with pytest.raises(TypeError): index.shift(1, 1) @requires_cftime @pytest.mark.parametrize( ("calendar", "expected"), [ ("noleap", "noleap"), ("365_day", "noleap"), ("360_day", "360_day"), ("julian", "julian"), ("gregorian", standard_or_gregorian), ("standard", standard_or_gregorian), ("proleptic_gregorian", "proleptic_gregorian"), ], ) def test_cftimeindex_calendar_property(calendar, expected): index = xr.date_range(start="2000", periods=3, calendar=calendar, use_cftime=True) assert index.calendar == expected @requires_cftime def test_empty_cftimeindex_calendar_property(): index = CFTimeIndex([]) assert index.calendar is None @requires_cftime @pytest.mark.parametrize( "calendar", [ "noleap", "365_day", "360_day", "julian", "gregorian", "standard", "proleptic_gregorian", ], ) def test_cftimeindex_freq_property_none_size_lt_3(calendar): for periods in range(3): index = xr.date_range( start="2000", periods=periods, calendar=calendar, use_cftime=True ) assert index.freq is None @requires_cftime @pytest.mark.parametrize( ("calendar", "expected"), [ ("noleap", "noleap"), ("365_day", "noleap"), ("360_day", "360_day"), ("julian", "julian"), ("gregorian", standard_or_gregorian), ("standard", standard_or_gregorian), ("proleptic_gregorian", "proleptic_gregorian"), ], ) def test_cftimeindex_calendar_repr(calendar, expected): """Test that cftimeindex has calendar property in repr.""" index = xr.date_range(start="2000", periods=3, calendar=calendar, use_cftime=True) repr_str = index.__repr__() assert f" calendar='{expected}'" in repr_str assert "2000-01-01 00:00:00, 2000-01-02 00:00:00" in repr_str @requires_cftime @pytest.mark.parametrize("periods", [2, 40]) def test_cftimeindex_periods_repr(periods): """Test that cftimeindex has periods property in repr.""" index = xr.date_range(start="2000", periods=periods, use_cftime=True) repr_str = index.__repr__() assert f" length={periods}" in repr_str @requires_cftime @pytest.mark.parametrize("calendar", ["noleap", "360_day", "standard"]) @pytest.mark.parametrize("freq", ["D", "h"]) def test_cftimeindex_freq_in_repr(freq, calendar): """Test that cftimeindex has frequency property in repr.""" index = xr.date_range( start="2000", periods=3, freq=freq, calendar=calendar, use_cftime=True ) repr_str = index.__repr__() assert f", freq='{freq}'" in repr_str @requires_cftime @pytest.mark.parametrize( "periods,expected", [ ( 2, f"""\ CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00], dtype='object', length=2, calendar='{standard_or_gregorian}', freq=None)""", ), ( 4, f"""\ CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00, 2000-01-03 00:00:00, 2000-01-04 00:00:00], dtype='object', length=4, calendar='{standard_or_gregorian}', freq='D')""", ), ( 101, f"""\ CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00, 2000-01-03 00:00:00, 2000-01-04 00:00:00, 2000-01-05 00:00:00, 2000-01-06 00:00:00, 2000-01-07 00:00:00, 2000-01-08 00:00:00, 2000-01-09 00:00:00, 2000-01-10 00:00:00, ... 2000-04-01 00:00:00, 2000-04-02 00:00:00, 2000-04-03 00:00:00, 2000-04-04 00:00:00, 2000-04-05 00:00:00, 2000-04-06 00:00:00, 2000-04-07 00:00:00, 2000-04-08 00:00:00, 2000-04-09 00:00:00, 2000-04-10 00:00:00], dtype='object', length=101, calendar='{standard_or_gregorian}', freq='D')""", ), ], ) def test_cftimeindex_repr_formatting(periods, expected): """Test that cftimeindex.__repr__ is formatted similar to pd.Index.__repr__.""" index = xr.date_range(start="2000", periods=periods, freq="D", use_cftime=True) expected = dedent(expected) assert expected == repr(index) @requires_cftime @pytest.mark.parametrize("display_width", [40, 80, 100]) @pytest.mark.parametrize("periods", [2, 3, 4, 100, 101]) def test_cftimeindex_repr_formatting_width(periods, display_width): """Test that cftimeindex is sensitive to OPTIONS['display_width'].""" index = xr.date_range(start="2000", periods=periods, use_cftime=True) len_intro_str = len("CFTimeIndex(") with xr.set_options(display_width=display_width): repr_str = index.__repr__() splitted = repr_str.split("\n") for i, s in enumerate(splitted): # check that lines not longer than OPTIONS['display_width'] assert len(s) <= display_width, f"{len(s)} {s} {display_width}" if i > 0: # check for initial spaces assert s[:len_intro_str] == " " * len_intro_str @requires_cftime @pytest.mark.parametrize("periods", [22, 50, 100]) def test_cftimeindex_repr_101_shorter(periods): index_101 = xr.date_range(start="2000", periods=101, use_cftime=True) index_periods = xr.date_range(start="2000", periods=periods, use_cftime=True) index_101_repr_str = index_101.__repr__() index_periods_repr_str = index_periods.__repr__() assert len(index_101_repr_str) < len(index_periods_repr_str) @requires_cftime def test_parse_array_of_cftime_strings(): from cftime import DatetimeNoLeap strings = np.array([["2000-01-01", "2000-01-02"], ["2000-01-03", "2000-01-04"]]) expected = np.array( [ [DatetimeNoLeap(2000, 1, 1), DatetimeNoLeap(2000, 1, 2)], [DatetimeNoLeap(2000, 1, 3), DatetimeNoLeap(2000, 1, 4)], ] ) result = _parse_array_of_cftime_strings(strings, DatetimeNoLeap) np.testing.assert_array_equal(result, expected) # Test scalar array case strings = np.array("2000-01-01") expected = np.array(DatetimeNoLeap(2000, 1, 1)) result = _parse_array_of_cftime_strings(strings, DatetimeNoLeap) np.testing.assert_array_equal(result, expected) @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_strftime_of_cftime_array(calendar): date_format = "%Y%m%d%H%M" cf_values = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) dt_values = pd.date_range("2000", periods=5) expected = pd.Index(dt_values.strftime(date_format)) result = cf_values.strftime(date_format) assert result.equals(expected) @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) @pytest.mark.parametrize("unsafe", [False, True]) def test_to_datetimeindex(calendar, unsafe) -> None: index = xr.date_range("2000", periods=5, calendar=calendar, use_cftime=True) expected = pd.date_range("2000", periods=5, unit="ns") if calendar in _NON_STANDARD_CALENDAR_NAMES and not unsafe: with pytest.warns(RuntimeWarning, match="non-standard"): result = index.to_datetimeindex(time_unit="ns") else: result = index.to_datetimeindex(unsafe=unsafe, time_unit="ns") assert result.equals(expected) np.testing.assert_array_equal(result, expected) assert isinstance(result, pd.DatetimeIndex) @requires_cftime def test_to_datetimeindex_future_warning() -> None: index = xr.date_range("2000", periods=5, use_cftime=True) expected = pd.date_range("2000", periods=5, unit="ns") with pytest.warns(FutureWarning, match="In a future version"): result = index.to_datetimeindex() assert result.equals(expected) assert result.dtype == expected.dtype @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_to_datetimeindex_out_of_range(calendar) -> None: index = xr.date_range("0001", periods=5, calendar=calendar, use_cftime=True) with pytest.raises(ValueError, match="0001"): index.to_datetimeindex(time_unit="ns") @requires_cftime @pytest.mark.parametrize("unsafe", [False, True]) def test_to_datetimeindex_gregorian_pre_reform(unsafe) -> None: index = xr.date_range("1582", periods=5, calendar="gregorian", use_cftime=True) if unsafe: result = index.to_datetimeindex(time_unit="us", unsafe=unsafe) else: with pytest.warns(RuntimeWarning, match="reform"): result = index.to_datetimeindex(time_unit="us", unsafe=unsafe) expected = pd.date_range("1582", periods=5, unit="us") assert result.equals(expected) assert result.dtype == expected.dtype @requires_cftime @pytest.mark.parametrize("calendar", ["all_leap", "360_day"]) def test_to_datetimeindex_feb_29(calendar) -> None: index = xr.date_range("2001-02-28", periods=2, calendar=calendar, use_cftime=True) with pytest.raises(ValueError, match="29"): index.to_datetimeindex(time_unit="ns") @pytest.mark.xfail(reason="fails on pandas main branch") @requires_cftime def test_multiindex(): index = xr.date_range( "2001-01-01", periods=100, calendar="360_day", use_cftime=True ) mindex = pd.MultiIndex.from_arrays([index]) assert mindex.get_loc("2001-01") == slice(0, 30) @requires_cftime @pytest.mark.parametrize("freq", ["3663s", "33min", "2h"]) @pytest.mark.parametrize("method", ["floor", "ceil", "round"]) def test_rounding_methods_against_datetimeindex(freq, method) -> None: # for now unit="us" seems good enough expected = pd.date_range("2000-01-02T01:03:51", periods=10, freq="1777s", unit="ns") expected = getattr(expected, method)(freq) result = xr.date_range( "2000-01-02T01:03:51", periods=10, freq="1777s", use_cftime=True ) result = getattr(result, method)(freq).to_datetimeindex(time_unit="ns") assert result.equals(expected) @requires_cftime @pytest.mark.parametrize("method", ["floor", "ceil", "round"]) def test_rounding_methods_empty_cftimindex(method): index = CFTimeIndex([]) result = getattr(index, method)("2s") expected = CFTimeIndex([]) assert result.equals(expected) assert result is not index @requires_cftime @pytest.mark.parametrize("method", ["floor", "ceil", "round"]) def test_rounding_methods_invalid_freq(method): index = xr.date_range( "2000-01-02T01:03:51", periods=10, freq="1777s", use_cftime=True ) with pytest.raises(ValueError, match="fixed"): getattr(index, method)("MS") @pytest.fixture def rounding_index(date_type): return xr.CFTimeIndex( [ date_type(1, 1, 1, 1, 59, 59, 999512), date_type(1, 1, 1, 3, 0, 1, 500001), date_type(1, 1, 1, 7, 0, 6, 499999), ] ) @requires_cftime def test_ceil(rounding_index, date_type): result = rounding_index.ceil("s") expected = xr.CFTimeIndex( [ date_type(1, 1, 1, 2, 0, 0, 0), date_type(1, 1, 1, 3, 0, 2, 0), date_type(1, 1, 1, 7, 0, 7, 0), ] ) assert result.equals(expected) @requires_cftime def test_floor(rounding_index, date_type): result = rounding_index.floor("s") expected = xr.CFTimeIndex( [ date_type(1, 1, 1, 1, 59, 59, 0), date_type(1, 1, 1, 3, 0, 1, 0), date_type(1, 1, 1, 7, 0, 6, 0), ] ) assert result.equals(expected) @requires_cftime def test_round(rounding_index, date_type): result = rounding_index.round("s") expected = xr.CFTimeIndex( [ date_type(1, 1, 1, 2, 0, 0, 0), date_type(1, 1, 1, 3, 0, 2, 0), date_type(1, 1, 1, 7, 0, 6, 0), ] ) assert result.equals(expected) @requires_cftime def test_asi8(date_type): index = xr.CFTimeIndex([date_type(1970, 1, 1), date_type(1970, 1, 2)]) result = index.asi8 expected = 1000000 * 86400 * np.array([0, 1]) np.testing.assert_array_equal(result, expected) @requires_cftime def test_asi8_distant_date(): """Test that asi8 conversion is truly exact.""" import cftime date_type = cftime.DatetimeProlepticGregorian index = xr.CFTimeIndex([date_type(10731, 4, 22, 3, 25, 45, 123456)]) result = index.asi8 expected = np.array([1000000 * 86400 * 400 * 8000 + 12345 * 1000000 + 123456]) np.testing.assert_array_equal(result, expected) @requires_cftime def test_asi8_empty_cftimeindex(): index = xr.CFTimeIndex([]) result = index.asi8 expected = np.array([], dtype=np.int64) np.testing.assert_array_equal(result, expected) @requires_cftime def test_infer_freq_valid_types(time_unit: PDDatetimeUnitOptions) -> None: cf_indx = xr.date_range("2000-01-01", periods=3, freq="D", use_cftime=True) assert xr.infer_freq(cf_indx) == "D" assert xr.infer_freq(xr.DataArray(cf_indx)) == "D" pd_indx = pd.date_range("2000-01-01", periods=3, freq="D").as_unit(time_unit) assert xr.infer_freq(pd_indx) == "D" assert xr.infer_freq(xr.DataArray(pd_indx)) == "D" pd_td_indx = pd.timedelta_range(start="1D", periods=3, freq="D").as_unit(time_unit) assert xr.infer_freq(pd_td_indx) == "D" assert xr.infer_freq(xr.DataArray(pd_td_indx)) == "D" @requires_cftime def test_infer_freq_invalid_inputs(): # Non-datetime DataArray with pytest.raises(ValueError, match="must contain datetime-like objects"): xr.infer_freq(xr.DataArray([0, 1, 2])) indx = xr.date_range("1990-02-03", periods=4, freq="MS", use_cftime=True) # 2D DataArray with pytest.raises(ValueError, match="must be 1D"): xr.infer_freq(xr.DataArray([indx, indx])) # CFTimeIndex too short with pytest.raises(ValueError, match="Need at least 3 dates to infer frequency"): xr.infer_freq(indx[:2]) # Non-monotonic input assert xr.infer_freq(indx[np.array([0, 2, 1, 3])]) is None # Non-unique input assert xr.infer_freq(indx[np.array([0, 1, 1, 2])]) is None # No unique frequency (here 1st step is MS, second is 2MS) assert xr.infer_freq(indx[np.array([0, 1, 3])]) is None # Same, but for QS indx = xr.date_range("1990-02-03", periods=4, freq="QS", use_cftime=True) assert xr.infer_freq(indx[np.array([0, 1, 3])]) is None @requires_cftime @pytest.mark.parametrize( "freq", [ "300YS-JAN", "YE-DEC", "YS-JUL", "2YS-FEB", "QE-NOV", "3QS-DEC", "MS", "4ME", "7D", "D", "30h", "5min", "40s", ], ) @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_infer_freq(freq, calendar): indx = xr.date_range( "2000-01-01", periods=3, freq=freq, calendar=calendar, use_cftime=True ) out = xr.infer_freq(indx) assert out == freq @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_pickle_cftimeindex(calendar): idx = xr.date_range( "2000-01-01", periods=3, freq="D", calendar=calendar, use_cftime=True ) idx_pkl = pickle.loads(pickle.dumps(idx)) assert (idx == idx_pkl).all() xarray-2025.09.0/xarray/tests/test_cftimeindex_resample.py000066400000000000000000000220021505620616400236100ustar00rootroot00000000000000from __future__ import annotations import datetime from typing import TypedDict import numpy as np import pandas as pd import pytest import xarray as xr from xarray.coding.cftime_offsets import ( CFTIME_TICKS, Day, _new_to_legacy_freq, to_offset, ) from xarray.coding.cftimeindex import CFTimeIndex from xarray.core.resample_cftime import CFTimeGrouper from xarray.tests import has_pandas_3 cftime = pytest.importorskip("cftime") # Create a list of pairs of similar-length initial and resample frequencies # that cover: # - Resampling from shorter to longer frequencies # - Resampling from longer to shorter frequencies # - Resampling from one initial frequency to another. # These are used to test the cftime version of resample against pandas # with a standard calendar. FREQS = [ ("8003D", "4001D"), ("8003D", "16006D"), ("8003D", "21YS"), ("6h", "3h"), ("6h", "12h"), ("6h", "400min"), ("3D", "D"), ("3D", "6D"), ("11D", "MS"), ("3MS", "MS"), ("3MS", "6MS"), ("3MS", "85D"), ("7ME", "3ME"), ("7ME", "14ME"), ("7ME", "2QS-APR"), ("43QS-AUG", "21QS-AUG"), ("43QS-AUG", "86QS-AUG"), ("43QS-AUG", "11YE-JUN"), ("11QE-JUN", "5QE-JUN"), ("11QE-JUN", "22QE-JUN"), ("11QE-JUN", "51MS"), ("3YS-MAR", "YS-MAR"), ("3YS-MAR", "6YS-MAR"), ("3YS-MAR", "14QE-FEB"), ("7YE-MAY", "3YE-MAY"), ("7YE-MAY", "14YE-MAY"), ("7YE-MAY", "85ME"), ] def has_tick_resample_freq(freqs): resample_freq, _ = freqs resample_freq_as_offset = to_offset(resample_freq) return isinstance(resample_freq_as_offset, CFTIME_TICKS) def has_non_tick_resample_freq(freqs): return not has_tick_resample_freq(freqs) FREQS_WITH_TICK_RESAMPLE_FREQ = list(filter(has_tick_resample_freq, FREQS)) FREQS_WITH_NON_TICK_RESAMPLE_FREQ = list(filter(has_non_tick_resample_freq, FREQS)) def compare_against_pandas( da_datetimeindex, da_cftimeindex, freq, closed=None, label=None, offset=None, origin=None, ) -> None: if isinstance(origin, tuple): origin_pandas = pd.Timestamp(datetime.datetime(*origin)) origin_cftime = cftime.DatetimeGregorian(*origin) else: origin_pandas = origin origin_cftime = origin try: result_datetimeindex = da_datetimeindex.resample( time=freq, closed=closed, label=label, offset=offset, origin=origin_pandas, ).mean() except ValueError: with pytest.raises(ValueError): da_cftimeindex.resample( time=freq, closed=closed, label=label, origin=origin_cftime, offset=offset, ).mean() else: result_cftimeindex = da_cftimeindex.resample( time=freq, closed=closed, label=label, origin=origin_cftime, offset=offset, ).mean() # TODO (benbovy - flexible indexes): update when CFTimeIndex is a xarray Index subclass result_cftimeindex["time"] = ( result_cftimeindex.xindexes["time"] .to_pandas_index() .to_datetimeindex(time_unit="ns") ) xr.testing.assert_identical(result_cftimeindex, result_datetimeindex) def da(index) -> xr.DataArray: return xr.DataArray( np.arange(100.0, 100.0 + index.size), coords=[index], dims=["time"] ) @pytest.mark.parametrize( "freqs", FREQS_WITH_TICK_RESAMPLE_FREQ, ids=lambda x: "{}->{}".format(*x) ) @pytest.mark.parametrize("closed", [None, "left", "right"]) @pytest.mark.parametrize("label", [None, "left", "right"]) @pytest.mark.parametrize("offset", [None, "5s"], ids=lambda x: f"{x}") def test_resample_with_tick_resample_freq(freqs, closed, label, offset) -> None: initial_freq, resample_freq = freqs start = "2000-01-01T12:07:01" origin = "start" datetime_index = pd.date_range( start=start, periods=5, freq=_new_to_legacy_freq(initial_freq) ) cftime_index = xr.date_range( start=start, periods=5, freq=initial_freq, use_cftime=True ) da_datetimeindex = da(datetime_index) da_cftimeindex = da(cftime_index) compare_against_pandas( da_datetimeindex, da_cftimeindex, resample_freq, closed=closed, label=label, offset=offset, origin=origin, ) @pytest.mark.parametrize( "freqs", FREQS_WITH_NON_TICK_RESAMPLE_FREQ, ids=lambda x: "{}->{}".format(*x) ) @pytest.mark.parametrize("closed", [None, "left", "right"]) @pytest.mark.parametrize("label", [None, "left", "right"]) def test_resample_with_non_tick_resample_freq(freqs, closed, label) -> None: initial_freq, resample_freq = freqs resample_freq_as_offset = to_offset(resample_freq) if isinstance(resample_freq_as_offset, Day) and not has_pandas_3: pytest.skip("Only valid for pandas >= 3.0") start = "2000-01-01T12:07:01" # Set offset and origin to their default values since they have no effect # on resampling data with a non-tick resample frequency. offset = None origin = "start_day" datetime_index = pd.date_range( start=start, periods=5, freq=_new_to_legacy_freq(initial_freq) ) cftime_index = xr.date_range( start=start, periods=5, freq=initial_freq, use_cftime=True ) da_datetimeindex = da(datetime_index) da_cftimeindex = da(cftime_index) compare_against_pandas( da_datetimeindex, da_cftimeindex, resample_freq, closed=closed, label=label, offset=offset, origin=origin, ) @pytest.mark.parametrize( ("freq", "expected"), [ ("s", "left"), ("min", "left"), ("h", "left"), ("D", "left"), ("ME", "right"), ("MS", "left"), ("QE", "right"), ("QS", "left"), ("YE", "right"), ("YS", "left"), ], ) def test_closed_label_defaults(freq, expected) -> None: assert CFTimeGrouper(freq=freq).closed == expected assert CFTimeGrouper(freq=freq).label == expected @pytest.mark.filterwarnings("ignore:Converting a CFTimeIndex") @pytest.mark.parametrize( "calendar", ["gregorian", "noleap", "all_leap", "360_day", "julian"] ) def test_calendars(calendar: str) -> None: # Limited testing for non-standard calendars freq, closed, label = "8001min", None, None xr_index = xr.date_range( start="2004-01-01T12:07:01", periods=7, freq="3D", calendar=calendar, use_cftime=True, ) pd_index = pd.date_range(start="2004-01-01T12:07:01", periods=7, freq="3D") da_cftime = da(xr_index).resample(time=freq, closed=closed, label=label).mean() da_datetime = da(pd_index).resample(time=freq, closed=closed, label=label).mean() # TODO (benbovy - flexible indexes): update when CFTimeIndex is a xarray Index subclass new_pd_index = da_cftime.xindexes["time"].to_pandas_index() assert isinstance(new_pd_index, CFTimeIndex) # shouldn't that be a pd.Index? da_cftime["time"] = new_pd_index.to_datetimeindex(time_unit="ns") xr.testing.assert_identical(da_cftime, da_datetime) class DateRangeKwargs(TypedDict): start: str periods: int freq: str @pytest.mark.parametrize("closed", ["left", "right"]) @pytest.mark.parametrize( "origin", ["start_day", "start", "end", "end_day", "epoch", (1970, 1, 1, 3, 2)], ids=lambda x: f"{x}", ) def test_origin(closed, origin) -> None: initial_freq, resample_freq = ("3h", "9h") start = "1969-12-31T12:07:01" index_kwargs: DateRangeKwargs = dict(start=start, periods=12, freq=initial_freq) datetime_index = pd.date_range(**index_kwargs) cftime_index = xr.date_range(**index_kwargs, use_cftime=True) da_datetimeindex = da(datetime_index) da_cftimeindex = da(cftime_index) compare_against_pandas( da_datetimeindex, da_cftimeindex, resample_freq, closed=closed, origin=origin, ) @pytest.mark.parametrize("offset", ["foo", "5MS", 10]) def test_invalid_offset_error(offset: str | int) -> None: cftime_index = xr.date_range("2000", periods=5, use_cftime=True) da_cftime = da(cftime_index) with pytest.raises(ValueError, match="offset must be"): da_cftime.resample(time="2h", offset=offset) # type: ignore[arg-type] def test_timedelta_offset() -> None: timedelta = datetime.timedelta(seconds=5) string = "5s" cftime_index = xr.date_range("2000", periods=5, use_cftime=True) da_cftime = da(cftime_index) timedelta_result = da_cftime.resample(time="2h", offset=timedelta).mean() string_result = da_cftime.resample(time="2h", offset=string).mean() xr.testing.assert_identical(timedelta_result, string_result) @pytest.mark.parametrize(("option", "value"), [("offset", "5s"), ("origin", "start")]) def test_non_tick_option_warning(option, value) -> None: cftime_index = xr.date_range("2000", periods=5, use_cftime=True) da_cftime = da(cftime_index) kwargs = {option: value} with pytest.warns(RuntimeWarning, match=option): da_cftime.resample(time="ME", **kwargs) xarray-2025.09.0/xarray/tests/test_coarsen.py000066400000000000000000000270551505620616400210700ustar00rootroot00000000000000from __future__ import annotations import numpy as np import pandas as pd import pytest import xarray as xr from xarray import DataArray, Dataset, set_options from xarray.core import duck_array_ops from xarray.tests import ( assert_allclose, assert_equal, assert_identical, has_dask, raise_if_dask_computes, requires_cftime, ) def test_coarsen_absent_dims_error(ds: Dataset) -> None: with pytest.raises( ValueError, match=r"Window dimensions \('foo',\) not found in Dataset dimensions", ): ds.coarsen(foo=2) @pytest.mark.parametrize("dask", [True, False]) @pytest.mark.parametrize(("boundary", "side"), [("trim", "left"), ("pad", "right")]) def test_coarsen_dataset(ds, dask, boundary, side): if dask and has_dask: ds = ds.chunk({"x": 4}) actual = ds.coarsen(time=2, x=3, boundary=boundary, side=side).max() assert_equal( actual["z1"], ds["z1"].coarsen(x=3, boundary=boundary, side=side).max() ) # coordinate should be mean by default assert_equal( actual["time"], ds["time"].coarsen(time=2, boundary=boundary, side=side).mean() ) @pytest.mark.parametrize("dask", [True, False]) def test_coarsen_coords(ds, dask): if dask and has_dask: ds = ds.chunk({"x": 4}) # check if coord_func works actual = ds.coarsen(time=2, x=3, boundary="trim", coord_func={"time": "max"}).max() assert_equal(actual["z1"], ds["z1"].coarsen(x=3, boundary="trim").max()) assert_equal(actual["time"], ds["time"].coarsen(time=2, boundary="trim").max()) # raise if exact with pytest.raises(ValueError): ds.coarsen(x=3).mean() # should be no error ds.isel(x=slice(0, 3 * (len(ds["x"]) // 3))).coarsen(x=3).mean() # working test with pd.time da = xr.DataArray( np.linspace(0, 365, num=364), dims="time", coords={"time": pd.date_range("1999-12-15", periods=364)}, ) actual = da.coarsen(time=2).mean() @requires_cftime def test_coarsen_coords_cftime(): times = xr.date_range("2000", periods=6, use_cftime=True) da = xr.DataArray(range(6), [("time", times)]) actual = da.coarsen(time=3).mean() expected_times = xr.date_range("2000-01-02", freq="3D", periods=2, use_cftime=True) np.testing.assert_array_equal(actual.time, expected_times) @pytest.mark.parametrize( "funcname, argument", [ ("reduce", (np.mean,)), ("mean", ()), ], ) def test_coarsen_keep_attrs(funcname, argument) -> None: global_attrs = {"units": "test", "long_name": "testing"} da_attrs = {"da_attr": "test"} attrs_coords = {"attrs_coords": "test"} da_not_coarsend_attrs = {"da_not_coarsend_attr": "test"} data = np.linspace(10, 15, 100) coords = np.linspace(1, 10, 100) ds = Dataset( data_vars={ "da": ("coord", data, da_attrs), "da_not_coarsend": ("no_coord", data, da_not_coarsend_attrs), }, coords={"coord": ("coord", coords, attrs_coords)}, attrs=global_attrs, ) # attrs are now kept per default func = getattr(ds.coarsen(dim={"coord": 5}), funcname) result = func(*argument) assert result.attrs == global_attrs assert result.da.attrs == da_attrs assert result.da_not_coarsend.attrs == da_not_coarsend_attrs assert result.coord.attrs == attrs_coords assert result.da.name == "da" assert result.da_not_coarsend.name == "da_not_coarsend" # discard attrs func = getattr(ds.coarsen(dim={"coord": 5}), funcname) result = func(*argument, keep_attrs=False) assert result.attrs == {} assert result.da.attrs == {} assert result.da_not_coarsend.attrs == {} assert result.coord.attrs == {} assert result.da.name == "da" assert result.da_not_coarsend.name == "da_not_coarsend" # test discard attrs using global option func = getattr(ds.coarsen(dim={"coord": 5}), funcname) with set_options(keep_attrs=False): result = func(*argument) assert result.attrs == {} assert result.da.attrs == {} assert result.da_not_coarsend.attrs == {} assert result.coord.attrs == {} assert result.da.name == "da" assert result.da_not_coarsend.name == "da_not_coarsend" # keyword takes precedence over global option func = getattr(ds.coarsen(dim={"coord": 5}), funcname) with set_options(keep_attrs=False): result = func(*argument, keep_attrs=True) assert result.attrs == global_attrs assert result.da.attrs == da_attrs assert result.da_not_coarsend.attrs == da_not_coarsend_attrs assert result.coord.attrs == attrs_coords assert result.da.name == "da" assert result.da_not_coarsend.name == "da_not_coarsend" func = getattr(ds.coarsen(dim={"coord": 5}), funcname) with set_options(keep_attrs=True): result = func(*argument, keep_attrs=False) assert result.attrs == {} assert result.da.attrs == {} assert result.da_not_coarsend.attrs == {} assert result.coord.attrs == {} assert result.da.name == "da" assert result.da_not_coarsend.name == "da_not_coarsend" @pytest.mark.slow @pytest.mark.parametrize("ds", (1, 2), indirect=True) @pytest.mark.parametrize("window", (1, 2, 3, 4)) @pytest.mark.parametrize("name", ("sum", "mean", "std", "var", "min", "max", "median")) def test_coarsen_reduce(ds: Dataset, window, name) -> None: # Use boundary="trim" to accommodate all window sizes used in tests coarsen_obj = ds.coarsen(time=window, boundary="trim") # add nan prefix to numpy methods to get similar behavior as bottleneck actual = coarsen_obj.reduce(getattr(np, f"nan{name}")) expected = getattr(coarsen_obj, name)() assert_allclose(actual, expected) # make sure the order of data_var are not changed. assert list(ds.data_vars.keys()) == list(actual.data_vars.keys()) # Make sure the dimension order is restored for key, src_var in ds.data_vars.items(): assert src_var.dims == actual[key].dims @pytest.mark.parametrize( "funcname, argument", [ ("reduce", (np.mean,)), ("mean", ()), ], ) def test_coarsen_da_keep_attrs(funcname, argument) -> None: attrs_da = {"da_attr": "test"} attrs_coords = {"attrs_coords": "test"} data = np.linspace(10, 15, 100) coords = np.linspace(1, 10, 100) da = DataArray( data, dims=("coord"), coords={"coord": ("coord", coords, attrs_coords)}, attrs=attrs_da, name="name", ) # attrs are now kept per default func = getattr(da.coarsen(dim={"coord": 5}), funcname) result = func(*argument) assert result.attrs == attrs_da assert da.coord.attrs == attrs_coords assert result.name == "name" # discard attrs func = getattr(da.coarsen(dim={"coord": 5}), funcname) result = func(*argument, keep_attrs=False) assert result.attrs == {} # XXX: no assert? _ = da.coord.attrs == {} assert result.name == "name" # test discard attrs using global option func = getattr(da.coarsen(dim={"coord": 5}), funcname) with set_options(keep_attrs=False): result = func(*argument) assert result.attrs == {} # XXX: no assert? _ = da.coord.attrs == {} assert result.name == "name" # keyword takes precedence over global option func = getattr(da.coarsen(dim={"coord": 5}), funcname) with set_options(keep_attrs=False): result = func(*argument, keep_attrs=True) assert result.attrs == attrs_da # XXX: no assert? _ = da.coord.attrs == {} assert result.name == "name" func = getattr(da.coarsen(dim={"coord": 5}), funcname) with set_options(keep_attrs=True): result = func(*argument, keep_attrs=False) assert result.attrs == {} # XXX: no assert? _ = da.coord.attrs == {} assert result.name == "name" @pytest.mark.parametrize("da", (1, 2), indirect=True) @pytest.mark.parametrize("window", (1, 2, 3, 4)) @pytest.mark.parametrize("name", ("sum", "mean", "std", "max")) def test_coarsen_da_reduce(da, window, name) -> None: if da.isnull().sum() > 1 and window == 1: pytest.skip("These parameters lead to all-NaN slices") # Use boundary="trim" to accommodate all window sizes used in tests coarsen_obj = da.coarsen(time=window, boundary="trim") # add nan prefix to numpy methods to get similar # behavior as bottleneck actual = coarsen_obj.reduce(getattr(np, f"nan{name}")) expected = getattr(coarsen_obj, name)() assert_allclose(actual, expected) class TestCoarsenConstruct: @pytest.mark.parametrize("dask", [True, False]) def test_coarsen_construct(self, dask: bool) -> None: ds = Dataset( { "vart": ("time", np.arange(48), {"a": "b"}), "varx": ("x", np.arange(10), {"a": "b"}), "vartx": (("x", "time"), np.arange(480).reshape(10, 48), {"a": "b"}), "vary": ("y", np.arange(12)), }, coords={"time": np.arange(48), "y": np.arange(12)}, attrs={"foo": "bar"}, ) if dask and has_dask: ds = ds.chunk({"x": 4, "time": 10}) expected = xr.Dataset(attrs={"foo": "bar"}) expected["vart"] = ( ("year", "month"), duck_array_ops.reshape(ds.vart.data, (-1, 12)), {"a": "b"}, ) expected["varx"] = ( ("x", "x_reshaped"), duck_array_ops.reshape(ds.varx.data, (-1, 5)), {"a": "b"}, ) expected["vartx"] = ( ("x", "x_reshaped", "year", "month"), duck_array_ops.reshape(ds.vartx.data, (2, 5, 4, 12)), {"a": "b"}, ) expected["vary"] = ds.vary expected.coords["time"] = ( ("year", "month"), duck_array_ops.reshape(ds.time.data, (-1, 12)), ) with raise_if_dask_computes(): actual = ds.coarsen(time=12, x=5).construct( {"time": ("year", "month"), "x": ("x", "x_reshaped")} ) assert_identical(actual, expected) with raise_if_dask_computes(): actual = ds.coarsen(time=12, x=5).construct( time=("year", "month"), x=("x", "x_reshaped") ) assert_identical(actual, expected) with raise_if_dask_computes(): actual = ds.coarsen(time=12, x=5).construct( {"time": ("year", "month"), "x": ("x", "x_reshaped")}, keep_attrs=False ) for var in actual: assert actual[var].attrs == {} assert actual.attrs == {} with raise_if_dask_computes(): actual = ds.vartx.coarsen(time=12, x=5).construct( {"time": ("year", "month"), "x": ("x", "x_reshaped")} ) assert_identical(actual, expected["vartx"]) with pytest.raises(ValueError): ds.coarsen(time=12).construct(foo="bar") with pytest.raises(ValueError): ds.coarsen(time=12, x=2).construct(time=("year", "month")) with pytest.raises(ValueError): ds.coarsen(time=12).construct() with pytest.raises(ValueError): ds.coarsen(time=12).construct(time="bar") with pytest.raises(ValueError): ds.coarsen(time=12).construct(time=("bar",)) def test_coarsen_construct_keeps_all_coords(self): da = xr.DataArray(np.arange(24), dims=["time"]) da = da.assign_coords(day=365 * da) result = da.coarsen(time=12).construct(time=("year", "month")) assert list(da.coords) == list(result.coords) ds = da.to_dataset(name="T") result = ds.coarsen(time=12).construct(time=("year", "month")) assert list(da.coords) == list(result.coords) xarray-2025.09.0/xarray/tests/test_coding.py000066400000000000000000000120661505620616400206750ustar00rootroot00000000000000from __future__ import annotations from contextlib import suppress import numpy as np import pandas as pd import pytest import xarray as xr from xarray.coding import variables from xarray.conventions import decode_cf_variable, encode_cf_variable from xarray.tests import assert_allclose, assert_equal, assert_identical, requires_dask with suppress(ImportError): import dask.array as da def test_CFMaskCoder_decode() -> None: original = xr.Variable(("x",), [0, -1, 1], {"_FillValue": -1}) expected = xr.Variable(("x",), [0, np.nan, 1]) coder = variables.CFMaskCoder() encoded = coder.decode(original) assert_identical(expected, encoded) encoding_with_dtype = { "dtype": np.dtype("float64"), "_FillValue": np.float32(1e20), "missing_value": np.float64(1e20), } encoding_without_dtype = { "_FillValue": np.float32(1e20), "missing_value": np.float64(1e20), } CFMASKCODER_ENCODE_DTYPE_CONFLICT_TESTS = { "numeric-with-dtype": ([0.0, -1.0, 1.0], encoding_with_dtype), "numeric-without-dtype": ([0.0, -1.0, 1.0], encoding_without_dtype), "times-with-dtype": (pd.date_range("2000", periods=3), encoding_with_dtype), } @pytest.mark.parametrize( ("data", "encoding"), CFMASKCODER_ENCODE_DTYPE_CONFLICT_TESTS.values(), ids=list(CFMASKCODER_ENCODE_DTYPE_CONFLICT_TESTS.keys()), ) def test_CFMaskCoder_encode_missing_fill_values_conflict(data, encoding) -> None: original = xr.Variable(("x",), data, encoding=encoding) encoded = encode_cf_variable(original) assert encoded.dtype == encoded.attrs["missing_value"].dtype assert encoded.dtype == encoded.attrs["_FillValue"].dtype roundtripped = decode_cf_variable("foo", encoded) assert_identical(roundtripped, original) def test_CFMaskCoder_missing_value() -> None: expected = xr.DataArray( np.array([[26915, 27755, -9999, 27705], [25595, -9999, 28315, -9999]]), dims=["npts", "ntimes"], name="tmpk", ) expected.attrs["missing_value"] = -9999 decoded = xr.decode_cf(expected.to_dataset()) encoded, _ = xr.conventions.cf_encoder(decoded.variables, decoded.attrs) assert_equal(encoded["tmpk"], expected.variable) decoded.tmpk.encoding["_FillValue"] = -9940 with pytest.raises(ValueError): encoded, _ = xr.conventions.cf_encoder(decoded.variables, decoded.attrs) @requires_dask def test_CFMaskCoder_decode_dask() -> None: original = xr.Variable(("x",), [0, -1, 1], {"_FillValue": -1}).chunk() expected = xr.Variable(("x",), [0, np.nan, 1]) coder = variables.CFMaskCoder() encoded = coder.decode(original) assert isinstance(encoded.data, da.Array) assert_identical(expected, encoded) # TODO(shoyer): port other fill-value tests # TODO(shoyer): parameterize when we have more coders def test_coder_roundtrip() -> None: original = xr.Variable(("x",), [0.0, np.nan, 1.0]) coder = variables.CFMaskCoder() roundtripped = coder.decode(coder.encode(original)) assert_identical(original, roundtripped) @pytest.mark.parametrize("dtype", ["u1", "u2", "i1", "i2", "f2", "f4"]) @pytest.mark.parametrize("dtype2", ["f4", "f8"]) def test_scaling_converts_to_float(dtype: str, dtype2: str) -> None: dt = np.dtype(dtype2) original = xr.Variable( ("x",), np.arange(10, dtype=dtype), encoding=dict(scale_factor=dt.type(10)) ) coder = variables.CFScaleOffsetCoder() encoded = coder.encode(original) assert encoded.dtype == dt roundtripped = coder.decode(encoded) assert_identical(original, roundtripped) assert roundtripped.dtype == dt @pytest.mark.parametrize("scale_factor", (10, [10])) @pytest.mark.parametrize("add_offset", (0.1, [0.1])) def test_scaling_offset_as_list(scale_factor, add_offset) -> None: # test for #4631 encoding = dict(scale_factor=scale_factor, add_offset=add_offset) original = xr.Variable(("x",), np.arange(10.0), encoding=encoding) coder = variables.CFScaleOffsetCoder() encoded = coder.encode(original) roundtripped = coder.decode(encoded) assert_allclose(original, roundtripped) @pytest.mark.parametrize("bits", [1, 2, 4, 8]) def test_decode_unsigned_from_signed(bits) -> None: unsigned_dtype = np.dtype(f"u{bits}") signed_dtype = np.dtype(f"i{bits}") original_values = np.array([np.iinfo(unsigned_dtype).max], dtype=unsigned_dtype) encoded = xr.Variable( ("x",), original_values.astype(signed_dtype), attrs={"_Unsigned": "true"} ) coder = variables.CFMaskCoder() decoded = coder.decode(encoded) assert decoded.dtype == unsigned_dtype assert decoded.values == original_values @pytest.mark.parametrize("bits", [1, 2, 4, 8]) def test_decode_signed_from_unsigned(bits) -> None: unsigned_dtype = np.dtype(f"u{bits}") signed_dtype = np.dtype(f"i{bits}") original_values = np.array([-1], dtype=signed_dtype) encoded = xr.Variable( ("x",), original_values.astype(unsigned_dtype), attrs={"_Unsigned": "false"} ) coder = variables.CFMaskCoder() decoded = coder.decode(encoded) assert decoded.dtype == signed_dtype assert decoded.values == original_values xarray-2025.09.0/xarray/tests/test_coding_strings.py000066400000000000000000000224101505620616400224400ustar00rootroot00000000000000from __future__ import annotations from contextlib import suppress import numpy as np import pytest from xarray import Variable from xarray.coding import strings from xarray.core import indexing from xarray.tests import ( IndexerMaker, assert_array_equal, assert_identical, requires_dask, ) with suppress(ImportError): import dask.array as da def test_vlen_dtype() -> None: dtype = strings.create_vlen_dtype(str) assert dtype.metadata["element_type"] is str assert strings.is_unicode_dtype(dtype) assert not strings.is_bytes_dtype(dtype) assert strings.check_vlen_dtype(dtype) is str dtype = strings.create_vlen_dtype(bytes) assert dtype.metadata["element_type"] is bytes assert not strings.is_unicode_dtype(dtype) assert strings.is_bytes_dtype(dtype) assert strings.check_vlen_dtype(dtype) is bytes # check h5py variant ("vlen") dtype = np.dtype("O", metadata={"vlen": str}) # type: ignore[call-overload,unused-ignore] assert strings.check_vlen_dtype(dtype) is str assert strings.check_vlen_dtype(np.dtype(object)) is None @pytest.mark.parametrize("numpy_str_type", (np.str_, np.bytes_)) def test_numpy_subclass_handling(numpy_str_type) -> None: with pytest.raises(TypeError, match="unsupported type for vlen_dtype"): strings.create_vlen_dtype(numpy_str_type) def test_EncodedStringCoder_decode() -> None: coder = strings.EncodedStringCoder() raw_data = np.array([b"abc", "ΓŸβˆ‚Β΅βˆ†".encode()]) raw = Variable(("x",), raw_data, {"_Encoding": "utf-8"}) actual = coder.decode(raw) expected = Variable(("x",), np.array(["abc", "ΓŸβˆ‚Β΅βˆ†"], dtype=object)) assert_identical(actual, expected) assert_identical(coder.decode(actual[0]), expected[0]) @requires_dask def test_EncodedStringCoder_decode_dask() -> None: coder = strings.EncodedStringCoder() raw_data = np.array([b"abc", "ΓŸβˆ‚Β΅βˆ†".encode()]) raw = Variable(("x",), raw_data, {"_Encoding": "utf-8"}).chunk() actual = coder.decode(raw) assert isinstance(actual.data, da.Array) expected = Variable(("x",), np.array(["abc", "ΓŸβˆ‚Β΅βˆ†"], dtype=object)) assert_identical(actual, expected) actual_indexed = coder.decode(actual[0]) assert isinstance(actual_indexed.data, da.Array) assert_identical(actual_indexed, expected[0]) def test_EncodedStringCoder_encode() -> None: dtype = strings.create_vlen_dtype(str) raw_data = np.array(["abc", "ΓŸβˆ‚Β΅βˆ†"], dtype=dtype) expected_data = np.array([r.encode("utf-8") for r in raw_data], dtype=object) coder = strings.EncodedStringCoder(allows_unicode=True) raw = Variable(("x",), raw_data, encoding={"dtype": "S1"}) actual = coder.encode(raw) expected = Variable(("x",), expected_data, attrs={"_Encoding": "utf-8"}) assert_identical(actual, expected) raw = Variable(("x",), raw_data) assert_identical(coder.encode(raw), raw) coder = strings.EncodedStringCoder(allows_unicode=False) assert_identical(coder.encode(raw), expected) @pytest.mark.parametrize( "original", [ Variable(("x",), [b"ab", b"cdef"]), Variable((), b"ab"), Variable(("x",), [b"a", b"b"]), Variable((), b"a"), ], ) def test_CharacterArrayCoder_roundtrip(original) -> None: coder = strings.CharacterArrayCoder() roundtripped = coder.decode(coder.encode(original)) assert_identical(original, roundtripped) @pytest.mark.parametrize( "data", [ np.array([b"a", b"bc"]), np.array([b"a", b"bc"], dtype=strings.create_vlen_dtype(bytes)), ], ) def test_CharacterArrayCoder_encode(data) -> None: coder = strings.CharacterArrayCoder() raw = Variable(("x",), data) actual = coder.encode(raw) expected = Variable(("x", "string2"), np.array([[b"a", b""], [b"b", b"c"]])) assert_identical(actual, expected) @pytest.mark.parametrize( ["original", "expected_char_dim_name"], [ (Variable(("x",), [b"ab", b"cdef"]), "string4"), (Variable(("x",), [b"ab", b"cdef"], encoding={"char_dim_name": "foo"}), "foo"), ], ) def test_CharacterArrayCoder_char_dim_name(original, expected_char_dim_name) -> None: coder = strings.CharacterArrayCoder() encoded = coder.encode(original) roundtripped = coder.decode(encoded) assert encoded.dims[-1] == expected_char_dim_name assert roundtripped.encoding["char_dim_name"] == expected_char_dim_name assert roundtripped.dims[-1] == original.dims[-1] @pytest.mark.parametrize( [ "original", "expected_char_dim_name", "expected_char_dim_length", "warning_message", ], [ ( Variable(("x",), [b"ab", b"cde"], encoding={"char_dim_name": "foo4"}), "foo3", 3, "String dimension naming mismatch", ), ( Variable( ("x",), [b"ab", b"cde"], encoding={"original_shape": (2, 4), "char_dim_name": "foo"}, ), "foo3", 3, "String dimension length mismatch", ), ], ) def test_CharacterArrayCoder_dim_mismatch_warnings( original, expected_char_dim_name, expected_char_dim_length, warning_message ) -> None: coder = strings.CharacterArrayCoder() with pytest.warns(UserWarning, match=warning_message): encoded = coder.encode(original) roundtripped = coder.decode(encoded) assert encoded.dims[-1] == expected_char_dim_name assert encoded.sizes[expected_char_dim_name] == expected_char_dim_length assert roundtripped.encoding["char_dim_name"] == expected_char_dim_name assert roundtripped.dims[-1] == original.dims[-1] def test_StackedBytesArray() -> None: array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]], dtype="S") actual = strings.StackedBytesArray(array) expected = np.array([b"abc", b"def"], dtype="S") assert actual.dtype == expected.dtype assert actual.shape == expected.shape assert actual.size == expected.size assert actual.ndim == expected.ndim assert len(actual) == len(expected) assert_array_equal(expected, actual) B = IndexerMaker(indexing.BasicIndexer) assert_array_equal(expected[:1], actual[B[:1]]) with pytest.raises(IndexError): actual[B[:, :2]] def test_StackedBytesArray_scalar() -> None: array = np.array([b"a", b"b", b"c"], dtype="S") actual = strings.StackedBytesArray(array) expected = np.array(b"abc") assert actual.dtype == expected.dtype assert actual.shape == expected.shape assert actual.size == expected.size assert actual.ndim == expected.ndim with pytest.raises(TypeError): len(actual) np.testing.assert_array_equal(expected, actual) B = IndexerMaker(indexing.BasicIndexer) with pytest.raises(IndexError): actual[B[:2]] def test_StackedBytesArray_vectorized_indexing() -> None: array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]], dtype="S") stacked = strings.StackedBytesArray(array) expected = np.array([[b"abc", b"def"], [b"def", b"abc"]]) V = IndexerMaker(indexing.VectorizedIndexer) indexer = V[np.array([[0, 1], [1, 0]])] actual = stacked.vindex[indexer] assert_array_equal(actual, expected) def test_char_to_bytes() -> None: array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]]) expected = np.array([b"abc", b"def"]) actual = strings.char_to_bytes(array) assert_array_equal(actual, expected) expected = np.array([b"ad", b"be", b"cf"]) actual = strings.char_to_bytes(array.T) # non-contiguous assert_array_equal(actual, expected) def test_char_to_bytes_ndim_zero() -> None: expected = np.array(b"a") actual = strings.char_to_bytes(expected) assert_array_equal(actual, expected) def test_char_to_bytes_size_zero() -> None: array = np.zeros((3, 0), dtype="S1") expected = np.array([b"", b"", b""]) actual = strings.char_to_bytes(array) assert_array_equal(actual, expected) @requires_dask def test_char_to_bytes_dask() -> None: numpy_array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]]) array = da.from_array(numpy_array, ((2,), (3,))) expected = np.array([b"abc", b"def"]) actual = strings.char_to_bytes(array) assert isinstance(actual, da.Array) assert actual.chunks == ((2,),) assert actual.dtype == "S3" assert_array_equal(np.array(actual), expected) with pytest.raises(ValueError, match=r"stacked dask character array"): strings.char_to_bytes(array.rechunk(1)) def test_bytes_to_char() -> None: array = np.array([[b"ab", b"cd"], [b"ef", b"gh"]]) expected = np.array([[[b"a", b"b"], [b"c", b"d"]], [[b"e", b"f"], [b"g", b"h"]]]) actual = strings.bytes_to_char(array) assert_array_equal(actual, expected) expected = np.array([[[b"a", b"b"], [b"e", b"f"]], [[b"c", b"d"], [b"g", b"h"]]]) actual = strings.bytes_to_char(array.T) # non-contiguous assert_array_equal(actual, expected) @requires_dask def test_bytes_to_char_dask() -> None: numpy_array = np.array([b"ab", b"cd"]) array = da.from_array(numpy_array, ((1, 1),)) expected = np.array([[b"a", b"b"], [b"c", b"d"]]) actual = strings.bytes_to_char(array) assert isinstance(actual, da.Array) assert actual.chunks == ((1, 1), ((2,))) assert actual.dtype == "S1" assert_array_equal(np.array(actual), expected) xarray-2025.09.0/xarray/tests/test_coding_times.py000066400000000000000000002353241505620616400221020ustar00rootroot00000000000000from __future__ import annotations import warnings from datetime import datetime, timedelta from itertools import product, starmap from typing import Literal import numpy as np import pandas as pd import pytest from pandas.errors import OutOfBoundsDatetime, OutOfBoundsTimedelta from xarray import ( DataArray, Dataset, Variable, conventions, date_range, decode_cf, ) from xarray.coders import CFDatetimeCoder, CFTimedeltaCoder from xarray.coding.times import ( _encode_datetime_with_cftime, _netcdf_to_numpy_timeunit, _numpy_to_netcdf_timeunit, _should_cftime_be_used, cftime_to_nptime, decode_cf_datetime, decode_cf_timedelta, encode_cf_datetime, encode_cf_timedelta, format_cftime_datetime, infer_datetime_units, infer_timedelta_units, ) from xarray.coding.variables import SerializationWarning from xarray.conventions import _update_bounds_attributes, cf_encoder from xarray.core.common import contains_cftime_datetimes from xarray.core.types import PDDatetimeUnitOptions from xarray.core.utils import is_duck_dask_array from xarray.testing import assert_equal, assert_identical from xarray.tests import ( _ALL_CALENDARS, _NON_STANDARD_CALENDARS, _STANDARD_CALENDAR_NAMES, _STANDARD_CALENDARS, DuckArrayWrapper, FirstElementAccessibleArray, _all_cftime_date_types, arm_xfail, assert_array_equal, assert_duckarray_allclose, assert_duckarray_equal, assert_no_warnings, has_cftime, requires_cftime, requires_dask, ) _CF_DATETIME_NUM_DATES_UNITS = [ (np.arange(10), "days since 2000-01-01", "s"), (np.arange(10).astype("float64"), "days since 2000-01-01", "s"), (np.arange(10).astype("float32"), "days since 2000-01-01", "s"), (np.arange(10).reshape(2, 5), "days since 2000-01-01", "s"), (12300 + np.arange(5), "hours since 1680-01-01 00:00:00", "s"), # here we add a couple minor formatting errors to test # the robustness of the parsing algorithm. (12300 + np.arange(5), "hour since 1680-01-01 00:00:00", "s"), (12300 + np.arange(5), "Hour since 1680-01-01 00:00:00", "s"), (12300 + np.arange(5), " Hour since 1680-01-01 00:00:00 ", "s"), (10, "days since 2000-01-01", "s"), ([10], "daYs since 2000-01-01", "s"), ([[10]], "days since 2000-01-01", "s"), ([10, 10], "days since 2000-01-01", "s"), (np.array(10), "days since 2000-01-01", "s"), (0, "days since 1000-01-01", "s"), ([0], "days since 1000-01-01", "s"), ([[0]], "days since 1000-01-01", "s"), (np.arange(2), "days since 1000-01-01", "s"), (np.arange(0, 100000, 20000), "days since 1900-01-01", "s"), (np.arange(0, 100000, 20000), "days since 1-01-01", "s"), (17093352.0, "hours since 1-1-1 00:00:0.0", "s"), ([0.5, 1.5], "hours since 1900-01-01T00:00:00", "s"), (0, "milliseconds since 2000-01-01T00:00:00", "s"), (0, "microseconds since 2000-01-01T00:00:00", "s"), (np.int32(788961600), "seconds since 1981-01-01", "s"), # GH2002 (12300 + np.arange(5), "hour since 1680-01-01 00:00:00.500000", "us"), (164375, "days since 1850-01-01 00:00:00", "s"), (164374.5, "days since 1850-01-01 00:00:00", "s"), ([164374.5, 168360.5], "days since 1850-01-01 00:00:00", "s"), ] _CF_DATETIME_TESTS = [ num_dates_units + (calendar,) for num_dates_units, calendar in product( _CF_DATETIME_NUM_DATES_UNITS, _STANDARD_CALENDAR_NAMES ) ] @requires_cftime @pytest.mark.filterwarnings("ignore:Ambiguous reference date string") @pytest.mark.filterwarnings("ignore:Times can't be serialized faithfully") @pytest.mark.parametrize( ["num_dates", "units", "minimum_resolution", "calendar"], _CF_DATETIME_TESTS ) def test_cf_datetime( num_dates, units: str, minimum_resolution: PDDatetimeUnitOptions, calendar: str, time_unit: PDDatetimeUnitOptions, ) -> None: import cftime expected = cftime.num2date( num_dates, units, calendar, only_use_cftime_datetimes=True ) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Unable to decode time axis") actual = decode_cf_datetime(num_dates, units, calendar, time_unit=time_unit) if actual.dtype.kind != "O": if np.timedelta64(1, time_unit) > np.timedelta64(1, minimum_resolution): expected_unit = minimum_resolution else: expected_unit = time_unit expected = cftime_to_nptime(expected, time_unit=expected_unit) assert_array_equal(actual, expected) encoded1, _, _ = encode_cf_datetime(actual, units, calendar) assert_array_equal(num_dates, encoded1) if hasattr(num_dates, "ndim") and num_dates.ndim == 1 and "1000" not in units: # verify that wrapping with a pandas.Index works # note that it *does not* currently work to put # non-datetime64 compatible dates into a pandas.Index encoded2, _, _ = encode_cf_datetime(pd.Index(actual), units, calendar) assert_array_equal(num_dates, encoded2) @requires_cftime def test_decode_cf_datetime_overflow(time_unit: PDDatetimeUnitOptions) -> None: # checks for # https://github.com/pydata/pandas/issues/14068 # https://github.com/pydata/xarray/issues/975 from cftime import DatetimeGregorian datetime = DatetimeGregorian units = "days since 2000-01-01 00:00:00" # date after 2262 and before 1678 days = (-117710, 95795) expected = (datetime(1677, 9, 20), datetime(2262, 4, 12)) for i, day in enumerate(days): with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Unable to decode time axis") result = decode_cf_datetime( day, units, calendar="standard", time_unit=time_unit ) assert result == expected[i] # additional check to see if type/dtypes are correct if time_unit == "ns": assert isinstance(result.item(), datetime) else: assert result.dtype == np.dtype(f"=M8[{time_unit}]") def test_decode_cf_datetime_non_standard_units() -> None: expected = pd.date_range(periods=100, start="1970-01-01", freq="h") # netCDFs from madis.noaa.gov use this format for their time units # they cannot be parsed by cftime, but pd.Timestamp works units = "hours since 1-1-1970" actual = decode_cf_datetime(np.arange(100), units) assert_array_equal(actual, expected) @requires_cftime def test_decode_cf_datetime_non_iso_strings() -> None: # datetime strings that are _almost_ ISO compliant but not quite, # but which cftime.num2date can still parse correctly expected = pd.date_range(periods=100, start="2000-01-01", freq="h") cases = [ (np.arange(100), "hours since 2000-01-01 0"), (np.arange(100), "hours since 2000-1-1 0"), (np.arange(100), "hours since 2000-01-01 0:00"), ] for num_dates, units in cases: actual = decode_cf_datetime(num_dates, units) assert_array_equal(actual, expected) @requires_cftime @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) def test_decode_standard_calendar_inside_timestamp_range( calendar, time_unit: PDDatetimeUnitOptions ) -> None: import cftime units = "hours since 0001-01-01" times = pd.date_range( "2001-04-01-00", end="2001-04-30-23", unit=time_unit, freq="h" ) # to_pydatetime() will return microsecond time = cftime.date2num(times.to_pydatetime(), units, calendar=calendar) expected = times.values # for cftime we get "us" resolution # ns resolution is handled by cftime due to the reference date # being out of bounds, but the times themselves are # representable with nanosecond resolution. actual = decode_cf_datetime(time, units, calendar=calendar, time_unit=time_unit) assert actual.dtype == np.dtype(f"=M8[{time_unit}]") assert_array_equal(actual, expected) @requires_cftime @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) def test_decode_non_standard_calendar_inside_timestamp_range(calendar) -> None: import cftime units = "days since 0001-01-01" times = pd.date_range("2001-04-01-00", end="2001-04-30-23", freq="h") non_standard_time = cftime.date2num(times.to_pydatetime(), units, calendar=calendar) expected = cftime.num2date( non_standard_time, units, calendar=calendar, only_use_cftime_datetimes=True ) expected_dtype = np.dtype("O") actual = decode_cf_datetime(non_standard_time, units, calendar=calendar) assert actual.dtype == expected_dtype assert_array_equal(actual, expected) @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_decode_dates_outside_timestamp_range( calendar, time_unit: PDDatetimeUnitOptions ) -> None: import cftime units = "days since 0001-01-01" times = [datetime(1, 4, 1, h) for h in range(1, 5)] time = cftime.date2num(times, units, calendar=calendar) expected = cftime.num2date( time, units, calendar=calendar, only_use_cftime_datetimes=True ) if calendar == "proleptic_gregorian" and time_unit != "ns": expected = cftime_to_nptime(expected, time_unit=time_unit) expected_date_type = type(expected[0]) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Unable to decode time axis") actual = decode_cf_datetime(time, units, calendar=calendar, time_unit=time_unit) assert all(isinstance(value, expected_date_type) for value in actual) assert_array_equal(actual, expected) @requires_cftime @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) @pytest.mark.parametrize("num_time", [735368, [735368], [[735368]]]) def test_decode_standard_calendar_single_element_inside_timestamp_range( calendar, time_unit: PDDatetimeUnitOptions, num_time, ) -> None: units = "days since 0001-01-01" with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Unable to decode time axis") actual = decode_cf_datetime( num_time, units, calendar=calendar, time_unit=time_unit ) assert actual.dtype == np.dtype(f"=M8[{time_unit}]") @requires_cftime @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) def test_decode_non_standard_calendar_single_element_inside_timestamp_range( calendar, ) -> None: units = "days since 0001-01-01" for num_time in [735368, [735368], [[735368]]]: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Unable to decode time axis") actual = decode_cf_datetime(num_time, units, calendar=calendar) assert actual.dtype == np.dtype("O") @requires_cftime @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) def test_decode_single_element_outside_timestamp_range(calendar) -> None: import cftime units = "days since 0001-01-01" for days in [1, 1470376]: for num_time in [days, [days], [[days]]]: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Unable to decode time axis") actual = decode_cf_datetime(num_time, units, calendar=calendar) expected = cftime.num2date( days, units, calendar, only_use_cftime_datetimes=True ) assert isinstance(actual.item(), type(expected)) @requires_cftime @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) def test_decode_standard_calendar_multidim_time_inside_timestamp_range( calendar, time_unit: PDDatetimeUnitOptions, ) -> None: import cftime units = "days since 0001-01-01" times1 = pd.date_range("2001-04-01", end="2001-04-05", freq="D") times2 = pd.date_range("2001-05-01", end="2001-05-05", freq="D") time1 = cftime.date2num(times1.to_pydatetime(), units, calendar=calendar) time2 = cftime.date2num(times2.to_pydatetime(), units, calendar=calendar) mdim_time = np.empty((len(time1), 2)) mdim_time[:, 0] = time1 mdim_time[:, 1] = time2 expected1 = times1.values expected2 = times2.values actual = decode_cf_datetime( mdim_time, units, calendar=calendar, time_unit=time_unit ) assert actual.dtype == np.dtype(f"=M8[{time_unit}]") assert_array_equal(actual[:, 0], expected1) assert_array_equal(actual[:, 1], expected2) @requires_cftime @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) def test_decode_nonstandard_calendar_multidim_time_inside_timestamp_range( calendar, ) -> None: import cftime units = "days since 0001-01-01" times1 = pd.date_range("2001-04-01", end="2001-04-05", freq="D") times2 = pd.date_range("2001-05-01", end="2001-05-05", freq="D") time1 = cftime.date2num(times1.to_pydatetime(), units, calendar=calendar) time2 = cftime.date2num(times2.to_pydatetime(), units, calendar=calendar) mdim_time = np.empty((len(time1), 2)) mdim_time[:, 0] = time1 mdim_time[:, 1] = time2 if cftime.__name__ == "cftime": expected1 = cftime.num2date( time1, units, calendar, only_use_cftime_datetimes=True ) expected2 = cftime.num2date( time2, units, calendar, only_use_cftime_datetimes=True ) else: expected1 = cftime.num2date(time1, units, calendar) expected2 = cftime.num2date(time2, units, calendar) expected_dtype = np.dtype("O") actual = decode_cf_datetime(mdim_time, units, calendar=calendar) assert actual.dtype == expected_dtype assert_array_equal(actual[:, 0], expected1) assert_array_equal(actual[:, 1], expected2) @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_decode_multidim_time_outside_timestamp_range( calendar, time_unit: PDDatetimeUnitOptions ) -> None: import cftime units = "days since 0001-01-01" times1 = [datetime(1, 4, day) for day in range(1, 6)] times2 = [datetime(1, 5, day) for day in range(1, 6)] time1 = cftime.date2num(times1, units, calendar=calendar) time2 = cftime.date2num(times2, units, calendar=calendar) mdim_time = np.empty((len(time1), 2)) mdim_time[:, 0] = time1 mdim_time[:, 1] = time2 expected1 = cftime.num2date(time1, units, calendar, only_use_cftime_datetimes=True) expected2 = cftime.num2date(time2, units, calendar, only_use_cftime_datetimes=True) if calendar == "proleptic_gregorian" and time_unit != "ns": expected1 = cftime_to_nptime(expected1, time_unit=time_unit) expected2 = cftime_to_nptime(expected2, time_unit=time_unit) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Unable to decode time axis") actual = decode_cf_datetime( mdim_time, units, calendar=calendar, time_unit=time_unit ) dtype: np.dtype dtype = np.dtype("O") if calendar == "proleptic_gregorian" and time_unit != "ns": dtype = np.dtype(f"=M8[{time_unit}]") assert actual.dtype == dtype assert_array_equal(actual[:, 0], expected1) assert_array_equal(actual[:, 1], expected2) @requires_cftime @pytest.mark.parametrize( ("calendar", "num_time"), [("360_day", 720058.0), ("all_leap", 732059.0), ("366_day", 732059.0)], ) def test_decode_non_standard_calendar_single_element(calendar, num_time) -> None: import cftime units = "days since 0001-01-01" actual = decode_cf_datetime(num_time, units, calendar=calendar) expected = np.asarray( cftime.num2date(num_time, units, calendar, only_use_cftime_datetimes=True) ) assert actual.dtype == np.dtype("O") assert expected == actual @requires_cftime def test_decode_360_day_calendar() -> None: import cftime calendar = "360_day" # ensure leap year doesn't matter for year in [2010, 2011, 2012, 2013, 2014]: units = f"days since {year}-01-01" num_times = np.arange(100) expected = cftime.num2date( num_times, units, calendar, only_use_cftime_datetimes=True ) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") actual = decode_cf_datetime(num_times, units, calendar=calendar) assert len(w) == 0 assert actual.dtype == np.dtype("O") assert_array_equal(actual, expected) @requires_cftime def test_decode_abbreviation() -> None: """Test making sure we properly fall back to cftime on abbreviated units.""" import cftime val = np.array([1586628000000.0]) units = "msecs since 1970-01-01T00:00:00Z" actual = decode_cf_datetime(val, units) expected = cftime_to_nptime(cftime.num2date(val, units)) assert_array_equal(actual, expected) @arm_xfail @requires_cftime @pytest.mark.parametrize( ["num_dates", "units", "expected_list"], [ ([np.nan], "days since 2000-01-01", ["NaT"]), ([np.nan, 0], "days since 2000-01-01", ["NaT", "2000-01-01T00:00:00Z"]), ( [np.nan, 0, 1], "days since 2000-01-01", ["NaT", "2000-01-01T00:00:00Z", "2000-01-02T00:00:00Z"], ), ], ) def test_cf_datetime_nan(num_dates, units, expected_list) -> None: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "All-NaN") actual = decode_cf_datetime(num_dates, units) # use pandas because numpy will deprecate timezone-aware conversions expected = pd.to_datetime(expected_list).to_numpy(dtype="datetime64[ns]") assert_array_equal(expected, actual) @requires_cftime def test_decoded_cf_datetime_array_2d(time_unit: PDDatetimeUnitOptions) -> None: # regression test for GH1229 variable = Variable( ("x", "y"), np.array([[0, 1], [2, 3]]), {"units": "days since 2000-01-01"} ) result = CFDatetimeCoder(time_unit=time_unit).decode(variable) assert result.dtype == f"datetime64[{time_unit}]" expected = pd.date_range("2000-01-01", periods=4).values.reshape(2, 2) assert_array_equal(np.asarray(result), expected) @pytest.mark.parametrize("decode_times", [True, False]) @pytest.mark.parametrize("mask_and_scale", [True, False]) def test_decode_datetime_mask_and_scale( decode_times: bool, mask_and_scale: bool ) -> None: attrs = { "units": "nanoseconds since 1970-01-01", "calendar": "proleptic_gregorian", "_FillValue": np.int16(-1), "add_offset": 100000.0, } encoded = Variable(["time"], np.array([0, -1, 1], "int16"), attrs=attrs) decoded = conventions.decode_cf_variable( "foo", encoded, mask_and_scale=mask_and_scale, decode_times=decode_times ) result = conventions.encode_cf_variable(decoded, name="foo") assert_identical(encoded, result) assert encoded.dtype == result.dtype FREQUENCIES_TO_ENCODING_UNITS = { "ns": "nanoseconds", "us": "microseconds", "ms": "milliseconds", "s": "seconds", "min": "minutes", "h": "hours", "D": "days", } @pytest.mark.parametrize(("freq", "units"), FREQUENCIES_TO_ENCODING_UNITS.items()) def test_infer_datetime_units(freq, units) -> None: dates = pd.date_range("2000", periods=2, freq=freq) expected = f"{units} since 2000-01-01 00:00:00" assert expected == infer_datetime_units(dates) @pytest.mark.parametrize( ["dates", "expected"], [ ( pd.to_datetime(["1900-01-01", "1900-01-02", "NaT"], unit="ns"), "days since 1900-01-01 00:00:00", ), ( pd.to_datetime(["NaT", "1900-01-01"], unit="ns"), "days since 1900-01-01 00:00:00", ), (pd.to_datetime(["NaT"], unit="ns"), "days since 1970-01-01 00:00:00"), ], ) def test_infer_datetime_units_with_NaT(dates, expected) -> None: assert expected == infer_datetime_units(dates) _CFTIME_DATETIME_UNITS_TESTS = [ ([(1900, 1, 1), (1900, 1, 1)], "days since 1900-01-01 00:00:00.000000"), ( [(1900, 1, 1), (1900, 1, 2), (1900, 1, 2, 0, 0, 1)], "seconds since 1900-01-01 00:00:00.000000", ), ( [(1900, 1, 1), (1900, 1, 8), (1900, 1, 16)], "days since 1900-01-01 00:00:00.000000", ), ] @requires_cftime @pytest.mark.parametrize( "calendar", _NON_STANDARD_CALENDARS + ["gregorian", "proleptic_gregorian"] ) @pytest.mark.parametrize(("date_args", "expected"), _CFTIME_DATETIME_UNITS_TESTS) def test_infer_cftime_datetime_units(calendar, date_args, expected) -> None: date_type = _all_cftime_date_types()[calendar] dates = list(starmap(date_type, date_args)) assert expected == infer_datetime_units(dates) @pytest.mark.filterwarnings("ignore:Timedeltas can't be serialized faithfully") @pytest.mark.parametrize( ["timedeltas", "units", "numbers"], [ ("1D", "days", np.int64(1)), (["1D", "2D", "3D"], "days", np.array([1, 2, 3], "int64")), ("1h", "hours", np.int64(1)), ("1ms", "milliseconds", np.int64(1)), ("1us", "microseconds", np.int64(1)), ("1ns", "nanoseconds", np.int64(1)), (["NaT", "0s", "1s"], None, [np.iinfo(np.int64).min, 0, 1]), (["30m", "60m"], "hours", [0.5, 1.0]), ("NaT", "days", np.iinfo(np.int64).min), (["NaT", "NaT"], "days", [np.iinfo(np.int64).min, np.iinfo(np.int64).min]), ], ) def test_cf_timedelta(timedeltas, units, numbers) -> None: if timedeltas == "NaT": timedeltas = np.timedelta64("NaT", "ns") else: timedeltas = pd.to_timedelta(timedeltas).to_numpy() numbers = np.array(numbers) expected = numbers actual, _ = encode_cf_timedelta(timedeltas, units) assert_array_equal(expected, actual) assert expected.dtype == actual.dtype if units is not None: expected = timedeltas actual = decode_cf_timedelta(numbers, units) assert_array_equal(expected, actual) assert expected.dtype == actual.dtype expected = np.timedelta64("NaT", "ns") actual = decode_cf_timedelta(np.array(np.nan), "days") assert_array_equal(expected, actual) assert expected.dtype == actual.dtype def test_cf_timedelta_2d() -> None: units = "days" numbers = np.atleast_2d([1, 2, 3]) timedeltas = np.atleast_2d(pd.to_timedelta(["1D", "2D", "3D"]).to_numpy()) expected = timedeltas actual = decode_cf_timedelta(numbers, units) assert_array_equal(expected, actual) assert expected.dtype == actual.dtype @pytest.mark.parametrize("encoding_unit", FREQUENCIES_TO_ENCODING_UNITS.values()) def test_decode_cf_timedelta_time_unit( time_unit: PDDatetimeUnitOptions, encoding_unit ) -> None: encoded = 1 encoding_unit_as_numpy = _netcdf_to_numpy_timeunit(encoding_unit) if np.timedelta64(1, time_unit) > np.timedelta64(1, encoding_unit_as_numpy): expected = np.timedelta64(encoded, encoding_unit_as_numpy) else: expected = np.timedelta64(encoded, encoding_unit_as_numpy).astype( f"timedelta64[{time_unit}]" ) result = decode_cf_timedelta(encoded, encoding_unit, time_unit) assert result == expected assert result.dtype == expected.dtype def test_decode_cf_timedelta_time_unit_out_of_bounds( time_unit: PDDatetimeUnitOptions, ) -> None: # Define a scale factor that will guarantee overflow with the given # time_unit. scale_factor = np.timedelta64(1, time_unit) // np.timedelta64(1, "ns") encoded = scale_factor * 300 * 365 with pytest.raises(OutOfBoundsTimedelta): decode_cf_timedelta(encoded, "days", time_unit) def test_cf_timedelta_roundtrip_large_value(time_unit: PDDatetimeUnitOptions) -> None: value = np.timedelta64(np.iinfo(np.int64).max, time_unit) encoded, units = encode_cf_timedelta(value) decoded = decode_cf_timedelta(encoded, units, time_unit=time_unit) assert value == decoded assert value.dtype == decoded.dtype @pytest.mark.parametrize( ["deltas", "expected"], [ (pd.to_timedelta(["1 day", "2 days"]), "days"), (pd.to_timedelta(["1h", "1 day 1 hour"]), "hours"), (pd.to_timedelta(["1m", "2m", np.nan]), "minutes"), (pd.to_timedelta(["1m3s", "1m4s"]), "seconds"), ], ) def test_infer_timedelta_units(deltas, expected) -> None: assert expected == infer_timedelta_units(deltas) @requires_cftime @pytest.mark.parametrize( ["date_args", "expected"], [ ((1, 2, 3, 4, 5, 6), "0001-02-03 04:05:06.000000"), ((10, 2, 3, 4, 5, 6), "0010-02-03 04:05:06.000000"), ((100, 2, 3, 4, 5, 6), "0100-02-03 04:05:06.000000"), ((1000, 2, 3, 4, 5, 6), "1000-02-03 04:05:06.000000"), ], ) def test_format_cftime_datetime(date_args, expected) -> None: date_types = _all_cftime_date_types() for date_type in date_types.values(): result = format_cftime_datetime(date_type(*date_args)) assert result == expected @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_decode_cf(calendar, time_unit: PDDatetimeUnitOptions) -> None: days = [1.0, 2.0, 3.0] # TODO: GH5690 β€”Β do we want to allow this type for `coords`? da = DataArray(days, coords=[days], dims=["time"], name="test") ds = da.to_dataset() for v in ["test", "time"]: ds[v].attrs["units"] = "days since 2001-01-01" ds[v].attrs["calendar"] = calendar if not has_cftime and calendar not in _STANDARD_CALENDAR_NAMES: with pytest.raises(ValueError): ds = decode_cf(ds) else: ds = decode_cf(ds, decode_times=CFDatetimeCoder(time_unit=time_unit)) if calendar not in _STANDARD_CALENDAR_NAMES: assert ds.test.dtype == np.dtype("O") else: assert ds.test.dtype == np.dtype(f"=M8[{time_unit}]") def test_decode_cf_time_bounds(time_unit: PDDatetimeUnitOptions) -> None: da = DataArray( np.arange(6, dtype="int64").reshape((3, 2)), coords={"time": [1, 2, 3]}, dims=("time", "nbnd"), name="time_bnds", ) attrs = { "units": "days since 2001-01", "calendar": "standard", "bounds": "time_bnds", } ds = da.to_dataset() ds["time"].attrs.update(attrs) _update_bounds_attributes(ds.variables) assert ds.variables["time_bnds"].attrs == { "units": "days since 2001-01", "calendar": "standard", } dsc = decode_cf(ds, decode_times=CFDatetimeCoder(time_unit=time_unit)) assert dsc.time_bnds.dtype == np.dtype(f"=M8[{time_unit}]") dsc = decode_cf(ds, decode_times=False) assert dsc.time_bnds.dtype == np.dtype("int64") # Do not overwrite existing attrs ds = da.to_dataset() ds["time"].attrs.update(attrs) bnd_attr = {"units": "hours since 2001-01", "calendar": "noleap"} ds["time_bnds"].attrs.update(bnd_attr) _update_bounds_attributes(ds.variables) assert ds.variables["time_bnds"].attrs == bnd_attr # If bounds variable not available do not complain ds = da.to_dataset() ds["time"].attrs.update(attrs) ds["time"].attrs["bounds"] = "fake_var" _update_bounds_attributes(ds.variables) @requires_cftime def test_encode_time_bounds() -> None: time = pd.date_range("2000-01-16", periods=1) time_bounds = pd.date_range("2000-01-01", periods=2, freq="MS") ds = Dataset(dict(time=time, time_bounds=time_bounds)) ds.time.attrs = {"bounds": "time_bounds"} ds.time.encoding = {"calendar": "noleap", "units": "days since 2000-01-01"} expected = {} # expected['time'] = Variable(data=np.array([15]), dims=['time']) expected["time_bounds"] = Variable(data=np.array([0, 31]), dims=["time_bounds"]) encoded, _ = cf_encoder(ds.variables, ds.attrs) assert_equal(encoded["time_bounds"], expected["time_bounds"]) assert "calendar" not in encoded["time_bounds"].attrs assert "units" not in encoded["time_bounds"].attrs # if time_bounds attrs are same as time attrs, it doesn't matter ds.time_bounds.encoding = {"calendar": "noleap", "units": "days since 2000-01-01"} encoded, _ = cf_encoder(dict(ds.variables.items()), ds.attrs) assert_equal(encoded["time_bounds"], expected["time_bounds"]) assert "calendar" not in encoded["time_bounds"].attrs assert "units" not in encoded["time_bounds"].attrs # for CF-noncompliant case of time_bounds attrs being different from # time attrs; preserve them for faithful roundtrip ds.time_bounds.encoding = {"calendar": "noleap", "units": "days since 1849-01-01"} encoded, _ = cf_encoder(dict(ds.variables.items()), ds.attrs) with pytest.raises(AssertionError): assert_equal(encoded["time_bounds"], expected["time_bounds"]) assert "calendar" not in encoded["time_bounds"].attrs assert encoded["time_bounds"].attrs["units"] == ds.time_bounds.encoding["units"] ds.time.encoding = {} with pytest.warns(UserWarning): cf_encoder(ds.variables, ds.attrs) @pytest.fixture(params=_ALL_CALENDARS) def calendar(request): return request.param @pytest.fixture def times(calendar): import cftime return cftime.num2date( np.arange(4), units="hours since 2000-01-01", calendar=calendar, only_use_cftime_datetimes=True, ) @pytest.fixture def data(times): data = np.random.rand(2, 2, 4) lons = np.linspace(0, 11, 2) lats = np.linspace(0, 20, 2) return DataArray( data, coords=[lons, lats, times], dims=["lon", "lat", "time"], name="data" ) @pytest.fixture def times_3d(times): lons = np.linspace(0, 11, 2) lats = np.linspace(0, 20, 2) times_arr = np.random.choice(times, size=(2, 2, 4)) return DataArray( times_arr, coords=[lons, lats, times], dims=["lon", "lat", "time"], name="data" ) @requires_cftime def test_contains_cftime_datetimes_1d(data) -> None: assert contains_cftime_datetimes(data.time.variable) @requires_cftime @requires_dask def test_contains_cftime_datetimes_dask_1d(data) -> None: assert contains_cftime_datetimes(data.time.variable.chunk()) @requires_cftime def test_contains_cftime_datetimes_3d(times_3d) -> None: assert contains_cftime_datetimes(times_3d.variable) @requires_cftime @requires_dask def test_contains_cftime_datetimes_dask_3d(times_3d) -> None: assert contains_cftime_datetimes(times_3d.variable.chunk()) @pytest.mark.parametrize("non_cftime_data", [DataArray([]), DataArray([1, 2])]) def test_contains_cftime_datetimes_non_cftimes(non_cftime_data) -> None: assert not contains_cftime_datetimes(non_cftime_data.variable) @requires_dask @pytest.mark.parametrize("non_cftime_data", [DataArray([]), DataArray([1, 2])]) def test_contains_cftime_datetimes_non_cftimes_dask(non_cftime_data) -> None: assert not contains_cftime_datetimes(non_cftime_data.variable.chunk()) @requires_cftime @pytest.mark.parametrize("shape", [(24,), (8, 3), (2, 4, 3)]) def test_encode_cf_datetime_overflow(shape) -> None: # Test for fix to GH 2272 dates = pd.date_range("2100", periods=24).values.reshape(shape) units = "days since 1800-01-01" calendar = "standard" num, _, _ = encode_cf_datetime(dates, units, calendar) roundtrip = decode_cf_datetime(num, units, calendar) np.testing.assert_array_equal(dates, roundtrip) def test_encode_expected_failures() -> None: dates = pd.date_range("2000", periods=3) with pytest.raises(ValueError, match="invalid time units"): encode_cf_datetime(dates, units="days after 2000-01-01") with pytest.raises(ValueError, match="invalid reference date"): encode_cf_datetime(dates, units="days since NO_YEAR") def test_encode_cf_datetime_pandas_min() -> None: # GH 2623 dates = pd.date_range("2000", periods=3) num, units, calendar = encode_cf_datetime(dates) expected_num = np.array([0.0, 1.0, 2.0]) expected_units = "days since 2000-01-01 00:00:00" expected_calendar = "proleptic_gregorian" np.testing.assert_array_equal(num, expected_num) assert units == expected_units assert calendar == expected_calendar @requires_cftime def test_encode_cf_datetime_invalid_pandas_valid_cftime() -> None: num, units, calendar = encode_cf_datetime( pd.date_range("2000", periods=3), # Pandas fails to parse this unit, but cftime is quite happy with it "days since 1970-01-01 00:00:00 00", "standard", ) expected_num = [10957, 10958, 10959] expected_units = "days since 1970-01-01 00:00:00 00" expected_calendar = "standard" assert_array_equal(num, expected_num) assert units == expected_units assert calendar == expected_calendar @requires_cftime def test_time_units_with_timezone_roundtrip(calendar) -> None: # Regression test for GH 2649 expected_units = "days since 2000-01-01T00:00:00-05:00" expected_num_dates = np.array([1, 2, 3]) dates = decode_cf_datetime(expected_num_dates, expected_units, calendar) # Check that dates were decoded to UTC; here the hours should all # equal 5. result_hours = DataArray(dates).dt.hour expected_hours = DataArray([5, 5, 5]) assert_equal(result_hours, expected_hours) # Check that the encoded values are accurately roundtripped. result_num_dates, result_units, result_calendar = encode_cf_datetime( dates, expected_units, calendar ) if calendar in _STANDARD_CALENDARS: assert_duckarray_equal(result_num_dates, expected_num_dates) else: # cftime datetime arithmetic is not quite exact. assert_duckarray_allclose(result_num_dates, expected_num_dates) assert result_units == expected_units assert result_calendar == calendar @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) def test_use_cftime_default_standard_calendar_in_range(calendar) -> None: numerical_dates = [0, 1] units = "days since 2000-01-01" expected = pd.date_range("2000", periods=2) with assert_no_warnings(): result = decode_cf_datetime(numerical_dates, units, calendar) np.testing.assert_array_equal(result, expected) @requires_cftime @pytest.mark.parametrize("calendar", ["standard", "gregorian"]) @pytest.mark.parametrize("units_year", [1500, 1580]) def test_use_cftime_default_standard_calendar_out_of_range( calendar, units_year ) -> None: from cftime import num2date numerical_dates = [0, 1] units = f"days since {units_year}-01-01" expected = num2date( numerical_dates, units, calendar, only_use_cftime_datetimes=True ) with pytest.warns(SerializationWarning): result = decode_cf_datetime(numerical_dates, units, calendar) np.testing.assert_array_equal(result, expected) @requires_cftime @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) @pytest.mark.parametrize("units_year", [1500, 2000, 2500]) def test_use_cftime_default_non_standard_calendar( calendar, units_year, time_unit: PDDatetimeUnitOptions ) -> None: from cftime import num2date numerical_dates = [0, 1] units = f"days since {units_year}-01-01" expected = num2date( numerical_dates, units, calendar, only_use_cftime_datetimes=True ) if time_unit == "ns" and units_year == 2500: with pytest.warns(SerializationWarning, match="Unable to decode time axis"): result = decode_cf_datetime( numerical_dates, units, calendar, time_unit=time_unit ) else: with assert_no_warnings(): result = decode_cf_datetime( numerical_dates, units, calendar, time_unit=time_unit ) np.testing.assert_array_equal(result, expected) @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) @pytest.mark.parametrize("units_year", [1500, 2000, 2500]) def test_use_cftime_true(calendar, units_year) -> None: from cftime import num2date numerical_dates = [0, 1] units = f"days since {units_year}-01-01" expected = num2date( numerical_dates, units, calendar, only_use_cftime_datetimes=True ) with assert_no_warnings(): result = decode_cf_datetime(numerical_dates, units, calendar, use_cftime=True) np.testing.assert_array_equal(result, expected) @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) def test_use_cftime_false_standard_calendar_in_range(calendar) -> None: numerical_dates = [0, 1] units = "days since 2000-01-01" expected = pd.date_range("2000", periods=2) with assert_no_warnings(): result = decode_cf_datetime(numerical_dates, units, calendar, use_cftime=False) np.testing.assert_array_equal(result, expected) @pytest.mark.parametrize("calendar", ["standard", "gregorian"]) @pytest.mark.parametrize("units_year", [1500, 1582]) def test_use_cftime_false_standard_calendar_out_of_range(calendar, units_year) -> None: numerical_dates = [0, 1] units = f"days since {units_year}-01-01" with pytest.raises(OutOfBoundsDatetime): decode_cf_datetime(numerical_dates, units, calendar, use_cftime=False) @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) @pytest.mark.parametrize("units_year", [1500, 2000, 2500]) def test_use_cftime_false_non_standard_calendar(calendar, units_year) -> None: numerical_dates = [0, 1] units = f"days since {units_year}-01-01" with pytest.raises(OutOfBoundsDatetime): decode_cf_datetime(numerical_dates, units, calendar, use_cftime=False) @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_decode_ambiguous_time_warns(calendar) -> None: # GH 4422, 4506 from cftime import num2date # we don't decode non-standard calendards with # pandas so expect no warning will be emitted is_standard_calendar = calendar in _STANDARD_CALENDAR_NAMES dates = [1, 2, 3] units = "days since 1-1-1" expected = num2date(dates, units, calendar=calendar, only_use_cftime_datetimes=True) if is_standard_calendar: with pytest.warns(SerializationWarning) as record: result = decode_cf_datetime(dates, units, calendar=calendar) relevant_warnings = [ r for r in record.list if str(r.message).startswith("Ambiguous reference date string: 1-1-1") ] assert len(relevant_warnings) == 1 else: with assert_no_warnings(): result = decode_cf_datetime(dates, units, calendar=calendar) np.testing.assert_array_equal(result, expected) @pytest.mark.filterwarnings("ignore:Times can't be serialized faithfully") @pytest.mark.parametrize("encoding_units", FREQUENCIES_TO_ENCODING_UNITS.values()) @pytest.mark.parametrize("freq", FREQUENCIES_TO_ENCODING_UNITS.keys()) @pytest.mark.parametrize("use_cftime", [True, False]) def test_encode_cf_datetime_defaults_to_correct_dtype( encoding_units, freq, use_cftime ) -> None: if not has_cftime and use_cftime: pytest.skip("Test requires cftime") if (freq == "ns" or encoding_units == "nanoseconds") and use_cftime: pytest.skip("Nanosecond frequency is not valid for cftime dates.") times = date_range("2000", periods=3, freq=freq, use_cftime=use_cftime) units = f"{encoding_units} since 2000-01-01" encoded, _units, _ = encode_cf_datetime(times, units) numpy_timeunit = _netcdf_to_numpy_timeunit(encoding_units) encoding_units_as_timedelta = np.timedelta64(1, numpy_timeunit) if pd.to_timedelta(1, freq) >= encoding_units_as_timedelta: assert encoded.dtype == np.int64 else: assert encoded.dtype == np.float64 @pytest.mark.parametrize("freq", FREQUENCIES_TO_ENCODING_UNITS.keys()) def test_encode_decode_roundtrip_datetime64( freq, time_unit: PDDatetimeUnitOptions ) -> None: # See GH 4045. Prior to GH 4684 this test would fail for frequencies of # "s", "ms", "us", and "ns". initial_time = pd.date_range("1678-01-01", periods=1) times = initial_time.append(pd.date_range("1968", periods=2, freq=freq)) variable = Variable(["time"], times) encoded = conventions.encode_cf_variable(variable) decoded = conventions.decode_cf_variable( "time", encoded, decode_times=CFDatetimeCoder(time_unit=time_unit) ) assert_equal(variable, decoded) @requires_cftime @pytest.mark.parametrize("freq", ["us", "ms", "s", "min", "h", "D"]) def test_encode_decode_roundtrip_cftime(freq) -> None: initial_time = date_range("0001", periods=1, use_cftime=True) times = initial_time.append( date_range("0001", periods=2, freq=freq, use_cftime=True) + timedelta(days=291000 * 365) ) variable = Variable(["time"], times) encoded = conventions.encode_cf_variable(variable) decoder = CFDatetimeCoder(use_cftime=True) decoded = conventions.decode_cf_variable("time", encoded, decode_times=decoder) assert_equal(variable, decoded) @requires_cftime def test__encode_datetime_with_cftime() -> None: # See GH 4870. cftime versions > 1.4.0 required us to adapt the # way _encode_datetime_with_cftime was written. import cftime calendar = "gregorian" times = cftime.num2date([0, 1], "hours since 2000-01-01", calendar) encoding_units = "days since 2000-01-01" # Since netCDF files do not support storing float128 values, we ensure that # float64 values are used by setting longdouble=False in num2date. This try # except logic can be removed when xarray's minimum version of cftime is at # least 1.6.2. try: expected = cftime.date2num(times, encoding_units, calendar, longdouble=False) except TypeError: expected = cftime.date2num(times, encoding_units, calendar) result = _encode_datetime_with_cftime(times, encoding_units, calendar) np.testing.assert_equal(result, expected) @requires_cftime def test_round_trip_standard_calendar_cftime_datetimes_pre_reform() -> None: from cftime import DatetimeGregorian dates = np.array([DatetimeGregorian(1, 1, 1), DatetimeGregorian(2000, 1, 1)]) encoded = encode_cf_datetime(dates, "seconds since 2000-01-01", "standard") with pytest.warns(SerializationWarning, match="Unable to decode time axis"): decoded = decode_cf_datetime(*encoded) np.testing.assert_equal(decoded, dates) @pytest.mark.parametrize("calendar", ["standard", "gregorian"]) def test_encode_cf_datetime_gregorian_proleptic_gregorian_mismatch_error( calendar: str, time_unit: PDDatetimeUnitOptions, ) -> None: if time_unit == "ns": pytest.skip("datetime64[ns] values can only be defined post reform") dates = np.array(["0001-01-01", "2001-01-01"], dtype=f"datetime64[{time_unit}]") with pytest.raises(ValueError, match="proleptic_gregorian"): encode_cf_datetime(dates, "seconds since 2000-01-01", calendar) @pytest.mark.parametrize("calendar", ["gregorian", "Gregorian", "GREGORIAN"]) def test_decode_encode_roundtrip_with_non_lowercase_letters( calendar, time_unit: PDDatetimeUnitOptions ) -> None: # See GH 5093. times = [0, 1] units = "days since 2000-01-01" attrs = {"calendar": calendar, "units": units} variable = Variable(["time"], times, attrs) decoded = conventions.decode_cf_variable( "time", variable, decode_times=CFDatetimeCoder(time_unit=time_unit) ) encoded = conventions.encode_cf_variable(decoded) # Previously this would erroneously be an array of cftime.datetime # objects. We check here that it is decoded properly to np.datetime64. assert np.issubdtype(decoded.dtype, np.datetime64) # Use assert_identical to ensure that the calendar attribute maintained its # original form throughout the roundtripping process, uppercase letters and # all. assert_identical(variable, encoded) @requires_cftime def test_should_cftime_be_used_source_outside_range(): src = date_range( "1000-01-01", periods=100, freq="MS", calendar="noleap", use_cftime=True ) with pytest.raises( ValueError, match="Source time range is not valid for numpy datetimes." ): _should_cftime_be_used(src, "standard", False) @requires_cftime def test_should_cftime_be_used_target_not_npable(): src = date_range( "2000-01-01", periods=100, freq="MS", calendar="noleap", use_cftime=True ) with pytest.raises( ValueError, match="Calendar 'noleap' is only valid with cftime." ): _should_cftime_be_used(src, "noleap", False) @pytest.mark.parametrize( "dtype", [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64], ) def test_decode_cf_datetime_varied_integer_dtypes(dtype): units = "seconds since 2018-08-22T03:23:03Z" num_dates = dtype(50) # Set use_cftime=False to ensure we cannot mask a failure by falling back # to cftime. result = decode_cf_datetime(num_dates, units, use_cftime=False) expected = np.asarray(np.datetime64("2018-08-22T03:23:53", "ns")) np.testing.assert_equal(result, expected) @requires_cftime def test_decode_cf_datetime_uint64_with_cftime(): units = "days since 1700-01-01" num_dates = np.uint64(182621) result = decode_cf_datetime(num_dates, units) expected = np.asarray(np.datetime64("2200-01-01", "ns")) np.testing.assert_equal(result, expected) def test_decode_cf_datetime_uint64_with_pandas_overflow_error(): units = "nanoseconds since 1970-01-01" calendar = "standard" num_dates = np.uint64(1_000_000 * 86_400 * 360 * 500_000) with pytest.raises(OutOfBoundsTimedelta): decode_cf_datetime(num_dates, units, calendar, use_cftime=False) @requires_cftime def test_decode_cf_datetime_uint64_with_cftime_overflow_error(): units = "microseconds since 1700-01-01" calendar = "360_day" num_dates = np.uint64(1_000_000 * 86_400 * 360 * 500_000) with pytest.raises(OverflowError): decode_cf_datetime(num_dates, units, calendar) @pytest.mark.parametrize("use_cftime", [True, False]) def test_decode_0size_datetime(use_cftime): # GH1329 if use_cftime and not has_cftime: pytest.skip() dtype = object if use_cftime else "=M8[ns]" expected = np.array([], dtype=dtype) actual = decode_cf_datetime( np.zeros(shape=0, dtype=np.int64), units="days since 1970-01-01 00:00:00", calendar="proleptic_gregorian", use_cftime=use_cftime, ) np.testing.assert_equal(expected, actual) def test_decode_float_datetime(): num_dates = np.array([1867128, 1867134, 1867140], dtype="float32") units = "hours since 1800-01-01" calendar = "standard" expected = np.array( ["2013-01-01T00:00:00", "2013-01-01T06:00:00", "2013-01-01T12:00:00"], dtype="datetime64[ns]", ) actual = decode_cf_datetime( num_dates, units=units, calendar=calendar, use_cftime=False ) np.testing.assert_equal(actual, expected) @pytest.mark.parametrize("time_unit", ["ms", "us", "ns"]) def test_decode_float_datetime_with_decimals( time_unit: PDDatetimeUnitOptions, ) -> None: # test resolution enhancement for floats values = np.array([0, 0.125, 0.25, 0.375, 0.75, 1.0], dtype="float32") expected = np.array( [ "2000-01-01T00:00:00.000", "2000-01-01T00:00:00.125", "2000-01-01T00:00:00.250", "2000-01-01T00:00:00.375", "2000-01-01T00:00:00.750", "2000-01-01T00:00:01.000", ], dtype=f"=M8[{time_unit}]", ) units = "seconds since 2000-01-01" calendar = "standard" actual = decode_cf_datetime(values, units, calendar, time_unit=time_unit) assert actual.dtype == expected.dtype np.testing.assert_equal(actual, expected) @pytest.mark.parametrize( "time_unit, num", [("s", 0.123), ("ms", 0.1234), ("us", 0.1234567)] ) def test_coding_float_datetime_warning( time_unit: PDDatetimeUnitOptions, num: float ) -> None: units = "seconds since 2000-01-01" calendar = "standard" values = np.array([num], dtype="float32") with pytest.warns( SerializationWarning, match=f"Can't decode floating point datetimes to {time_unit!r}", ): decode_cf_datetime(values, units, calendar, time_unit=time_unit) @requires_cftime def test_scalar_unit() -> None: # test that a scalar units (often NaN when using to_netcdf) does not raise an error variable = Variable(("x", "y"), np.array([[0, 1], [2, 3]]), {"units": np.nan}) result = CFDatetimeCoder().decode(variable) assert np.isnan(result.attrs["units"]) @requires_cftime def test_contains_cftime_lazy() -> None: import cftime from xarray.core.common import _contains_cftime_datetimes times = np.array( [cftime.DatetimeGregorian(1, 1, 2, 0), cftime.DatetimeGregorian(1, 1, 2, 0)], dtype=object, ) array = FirstElementAccessibleArray(times) assert _contains_cftime_datetimes(array) @pytest.mark.parametrize( "timestr, format, dtype, fill_value, use_encoding", [ ("1677-09-21T00:12:43.145224193", "ns", np.int64, 20, True), ("1970-09-21T00:12:44.145224808", "ns", np.float64, 1e30, True), ( "1677-09-21T00:12:43.145225216", "ns", np.float64, -9.223372036854776e18, True, ), ("1677-09-21T00:12:43.145224193", "ns", np.int64, None, False), ("1677-09-21T00:12:43.145225", "us", np.int64, None, False), ("1970-01-01T00:00:01.000001", "us", np.int64, None, False), ("1677-09-21T00:21:52.901038080", "ns", np.float32, 20.0, True), ], ) def test_roundtrip_datetime64_nanosecond_precision( timestr: str, format: Literal["ns", "us"], dtype: np.typing.DTypeLike, fill_value: int | float | None, use_encoding: bool, time_unit: PDDatetimeUnitOptions, ) -> None: # test for GH7817 time = np.datetime64(timestr, format) times = [np.datetime64("1970-01-01T00:00:00", format), np.datetime64("NaT"), time] if use_encoding: encoding = dict(dtype=dtype, _FillValue=fill_value) else: encoding = {} var = Variable(["time"], times, encoding=encoding) assert var.dtype == np.dtype(f"=M8[{format}]") encoded_var = conventions.encode_cf_variable(var) assert ( encoded_var.attrs["units"] == f"{_numpy_to_netcdf_timeunit(format)} since 1970-01-01 00:00:00" ) assert encoded_var.attrs["calendar"] == "proleptic_gregorian" assert encoded_var.data.dtype == dtype decoded_var = conventions.decode_cf_variable( "foo", encoded_var, decode_times=CFDatetimeCoder(time_unit=time_unit) ) result_unit = ( format if np.timedelta64(1, format) <= np.timedelta64(1, time_unit) else time_unit ) assert decoded_var.dtype == np.dtype(f"=M8[{result_unit}]") assert ( decoded_var.encoding["units"] == f"{_numpy_to_netcdf_timeunit(format)} since 1970-01-01 00:00:00" ) assert decoded_var.encoding["dtype"] == dtype assert decoded_var.encoding["calendar"] == "proleptic_gregorian" assert_identical(var, decoded_var) def test_roundtrip_datetime64_nanosecond_precision_warning( time_unit: PDDatetimeUnitOptions, ) -> None: # test warning if times can't be serialized faithfully times = [ np.datetime64("1970-01-01T00:01:00", time_unit), np.datetime64("NaT", time_unit), np.datetime64("1970-01-02T00:01:00", time_unit), ] units = "days since 1970-01-10T01:01:00" needed_units = "hours" new_units = f"{needed_units} since 1970-01-10T01:01:00" encoding = dict(dtype=None, _FillValue=20, units=units) var = Variable(["time"], times, encoding=encoding) with pytest.warns(UserWarning, match=f"Resolution of {needed_units!r} needed."): encoded_var = conventions.encode_cf_variable(var) assert encoded_var.dtype == np.float64 assert encoded_var.attrs["units"] == units assert encoded_var.attrs["_FillValue"] == 20.0 decoded_var = conventions.decode_cf_variable("foo", encoded_var) assert_identical(var, decoded_var) encoding = dict(dtype="int64", _FillValue=20, units=units) var = Variable(["time"], times, encoding=encoding) with pytest.warns( UserWarning, match=f"Serializing with units {new_units!r} instead." ): encoded_var = conventions.encode_cf_variable(var) assert encoded_var.dtype == np.int64 assert encoded_var.attrs["units"] == new_units assert encoded_var.attrs["_FillValue"] == 20 decoded_var = conventions.decode_cf_variable( "foo", encoded_var, decode_times=CFDatetimeCoder(time_unit=time_unit) ) assert_identical(var, decoded_var) encoding = dict(dtype="float64", _FillValue=20, units=units) var = Variable(["time"], times, encoding=encoding) with warnings.catch_warnings(): warnings.simplefilter("error") encoded_var = conventions.encode_cf_variable(var) assert encoded_var.dtype == np.float64 assert encoded_var.attrs["units"] == units assert encoded_var.attrs["_FillValue"] == 20.0 decoded_var = conventions.decode_cf_variable( "foo", encoded_var, decode_times=CFDatetimeCoder(time_unit=time_unit) ) assert_identical(var, decoded_var) encoding = dict(dtype="int64", _FillValue=20, units=new_units) var = Variable(["time"], times, encoding=encoding) with warnings.catch_warnings(): warnings.simplefilter("error") encoded_var = conventions.encode_cf_variable(var) assert encoded_var.dtype == np.int64 assert encoded_var.attrs["units"] == new_units assert encoded_var.attrs["_FillValue"] == 20 decoded_var = conventions.decode_cf_variable( "foo", encoded_var, decode_times=CFDatetimeCoder(time_unit=time_unit) ) assert_identical(var, decoded_var) @pytest.mark.parametrize( "dtype, fill_value", [(np.int64, 20), (np.int64, np.iinfo(np.int64).min), (np.float64, 1e30)], ) def test_roundtrip_timedelta64_nanosecond_precision( dtype: np.typing.DTypeLike, fill_value: int | float, time_unit: PDDatetimeUnitOptions, ) -> None: # test for GH7942 one_day = np.timedelta64(1, "ns") nat = np.timedelta64("nat", "ns") timedelta_values = (np.arange(5) * one_day).astype("timedelta64[ns]") timedelta_values[2] = nat timedelta_values[4] = nat encoding = dict(dtype=dtype, _FillValue=fill_value, units="nanoseconds") var = Variable(["time"], timedelta_values, encoding=encoding) encoded_var = conventions.encode_cf_variable(var) decoded_var = conventions.decode_cf_variable( "foo", encoded_var, decode_times=CFDatetimeCoder(time_unit=time_unit), decode_timedelta=CFTimedeltaCoder(time_unit=time_unit), ) assert_identical(var, decoded_var) def test_roundtrip_timedelta64_nanosecond_precision_warning() -> None: # test warning if timedeltas can't be serialized faithfully one_day = np.timedelta64(1, "D") nat = np.timedelta64("nat", "ns") timedelta_values = (np.arange(5) * one_day).astype("timedelta64[ns]") timedelta_values[2] = nat timedelta_values[4] = np.timedelta64(12, "h").astype("timedelta64[ns]") units = "days" needed_units = "hours" wmsg = ( f"Timedeltas can't be serialized faithfully with requested units {units!r}. " f"Serializing with units {needed_units!r} instead." ) encoding = dict(dtype=np.int64, _FillValue=20, units=units) var = Variable(["time"], timedelta_values, encoding=encoding) with pytest.warns(UserWarning, match=wmsg): encoded_var = conventions.encode_cf_variable(var) assert encoded_var.dtype == np.int64 assert encoded_var.attrs["units"] == needed_units assert encoded_var.attrs["_FillValue"] == 20 decoded_var = conventions.decode_cf_variable( "foo", encoded_var, decode_timedelta=CFTimedeltaCoder(time_unit="ns") ) assert_identical(var, decoded_var) assert decoded_var.encoding["dtype"] == np.int64 _TEST_ROUNDTRIP_FLOAT_TIMES_TESTS = { "GH-8271": ( 20.0, np.array( ["1970-01-01 00:00:00", "1970-01-01 06:00:00", "NaT"], dtype="datetime64[ns]", ), "days since 1960-01-01", np.array([3653, 3653.25, 20.0]), ), "GH-9488-datetime64[ns]": ( 1.0e20, np.array(["2010-01-01 12:00:00", "NaT"], dtype="datetime64[ns]"), "seconds since 2010-01-01", np.array([43200, 1.0e20]), ), "GH-9488-timedelta64[ns]": ( 1.0e20, np.array([1_000_000_000, "NaT"], dtype="timedelta64[ns]"), "seconds", np.array([1.0, 1.0e20]), ), } @pytest.mark.parametrize( ("fill_value", "times", "units", "encoded_values"), _TEST_ROUNDTRIP_FLOAT_TIMES_TESTS.values(), ids=_TEST_ROUNDTRIP_FLOAT_TIMES_TESTS.keys(), ) def test_roundtrip_float_times(fill_value, times, units, encoded_values) -> None: # Regression test for GitHub issues #8271 and #9488 var = Variable( ["time"], times, encoding=dict(dtype=np.float64, _FillValue=fill_value, units=units), ) encoded_var = conventions.encode_cf_variable(var) np.testing.assert_array_equal(encoded_var, encoded_values) assert encoded_var.attrs["units"] == units assert encoded_var.attrs["_FillValue"] == fill_value decoded_var = conventions.decode_cf_variable( "foo", encoded_var, decode_timedelta=CFTimedeltaCoder(time_unit="ns") ) assert_identical(var, decoded_var) assert decoded_var.encoding["units"] == units assert decoded_var.encoding["_FillValue"] == fill_value _ENCODE_DATETIME64_VIA_DASK_TESTS = { "pandas-encoding-with-prescribed-units-and-dtype": ( "D", "days since 1700-01-01", np.dtype("int32"), ), "mixed-cftime-pandas-encoding-with-prescribed-units-and-dtype": pytest.param( "250YS", "days since 1700-01-01", np.dtype("int32"), marks=requires_cftime ), "pandas-encoding-with-default-units-and-dtype": ("250YS", None, None), } @requires_dask @pytest.mark.parametrize( ("freq", "units", "dtype"), _ENCODE_DATETIME64_VIA_DASK_TESTS.values(), ids=_ENCODE_DATETIME64_VIA_DASK_TESTS.keys(), ) def test_encode_cf_datetime_datetime64_via_dask( freq, units, dtype, time_unit: PDDatetimeUnitOptions ) -> None: import dask.array times_pd = pd.date_range(start="1700", freq=freq, periods=3, unit=time_unit) times = dask.array.from_array(times_pd, chunks=1) encoded_times, encoding_units, encoding_calendar = encode_cf_datetime( times, units, None, dtype ) assert is_duck_dask_array(encoded_times) assert encoded_times.chunks == times.chunks if units is not None and dtype is not None: assert encoding_units == units assert encoded_times.dtype == dtype else: expected_netcdf_time_unit = _numpy_to_netcdf_timeunit(time_unit) assert encoding_units == f"{expected_netcdf_time_unit} since 1970-01-01" assert encoded_times.dtype == np.dtype("int64") assert encoding_calendar == "proleptic_gregorian" decoded_times = decode_cf_datetime( encoded_times, encoding_units, encoding_calendar, time_unit=time_unit ) np.testing.assert_equal(decoded_times, times) assert decoded_times.dtype == times.dtype @requires_dask @pytest.mark.parametrize( ("range_function", "start", "units", "dtype"), [ (pd.date_range, "2000", None, np.dtype("int32")), (pd.date_range, "2000", "days since 2000-01-01", None), (pd.timedelta_range, "0D", None, np.dtype("int32")), (pd.timedelta_range, "0D", "days", None), ], ) def test_encode_via_dask_cannot_infer_error( range_function, start, units, dtype ) -> None: values = range_function(start=start, freq="D", periods=3) encoding = dict(units=units, dtype=dtype) variable = Variable(["time"], values, encoding=encoding).chunk({"time": 1}) with pytest.raises(ValueError, match="When encoding chunked arrays"): conventions.encode_cf_variable(variable) @requires_cftime @requires_dask @pytest.mark.parametrize( ("units", "dtype"), [("days since 1700-01-01", np.dtype("int32")), (None, None)] ) def test_encode_cf_datetime_cftime_datetime_via_dask(units, dtype) -> None: import dask.array calendar = "standard" times_idx = date_range( start="1700", freq="D", periods=3, calendar=calendar, use_cftime=True ) times = dask.array.from_array(times_idx, chunks=1) encoded_times, encoding_units, encoding_calendar = encode_cf_datetime( times, units, None, dtype ) assert is_duck_dask_array(encoded_times) assert encoded_times.chunks == times.chunks if units is not None and dtype is not None: assert encoding_units == units assert encoded_times.dtype == dtype else: assert encoding_units == "microseconds since 1970-01-01" assert encoded_times.dtype == np.int64 assert encoding_calendar == calendar decoded_times = decode_cf_datetime( encoded_times, encoding_units, encoding_calendar, use_cftime=True ) np.testing.assert_equal(decoded_times, times) @pytest.mark.parametrize( "use_cftime", [False, pytest.param(True, marks=requires_cftime)] ) @pytest.mark.parametrize("use_dask", [False, pytest.param(True, marks=requires_dask)]) def test_encode_cf_datetime_units_change(use_cftime, use_dask) -> None: times = date_range(start="2000", freq="12h", periods=3, use_cftime=use_cftime) encoding = dict(units="days since 2000-01-01", dtype=np.dtype("int64")) variable = Variable(["time"], times, encoding=encoding) if use_dask: variable = variable.chunk({"time": 1}) with pytest.raises(ValueError, match="Times can't be serialized"): conventions.encode_cf_variable(variable).compute() else: with pytest.warns(UserWarning, match="Times can't be serialized"): encoded = conventions.encode_cf_variable(variable) if use_cftime: expected_units = "hours since 2000-01-01 00:00:00.000000" else: expected_units = "hours since 2000-01-01" assert encoded.attrs["units"] == expected_units decoded = conventions.decode_cf_variable( "name", encoded, decode_times=CFDatetimeCoder(use_cftime=use_cftime) ) assert_equal(variable, decoded) @pytest.mark.parametrize("use_dask", [False, pytest.param(True, marks=requires_dask)]) def test_encode_cf_datetime_precision_loss_regression_test(use_dask) -> None: # Regression test for # https://github.com/pydata/xarray/issues/9134#issuecomment-2191446463 times = date_range("2000", periods=5, freq="ns") encoding = dict(units="seconds since 1970-01-01", dtype=np.dtype("int64")) variable = Variable(["time"], times, encoding=encoding) if use_dask: variable = variable.chunk({"time": 1}) with pytest.raises(ValueError, match="Times can't be serialized"): conventions.encode_cf_variable(variable).compute() else: with pytest.warns(UserWarning, match="Times can't be serialized"): encoded = conventions.encode_cf_variable(variable) decoded = conventions.decode_cf_variable("name", encoded) assert_equal(variable, decoded) @requires_dask @pytest.mark.parametrize( ("units", "dtype"), [("days", np.dtype("int32")), (None, None)] ) def test_encode_cf_timedelta_via_dask( units: str | None, dtype: np.dtype | None, time_unit: PDDatetimeUnitOptions ) -> None: import dask.array times_pd = pd.timedelta_range(start="0D", freq="D", periods=3, unit=time_unit) # type: ignore[call-arg,unused-ignore] times = dask.array.from_array(times_pd, chunks=1) encoded_times, encoding_units = encode_cf_timedelta(times, units, dtype) assert is_duck_dask_array(encoded_times) assert encoded_times.chunks == times.chunks if units is not None and dtype is not None: assert encoding_units == units assert encoded_times.dtype == dtype else: assert encoding_units == _numpy_to_netcdf_timeunit(time_unit) assert encoded_times.dtype == np.dtype("int64") decoded_times = decode_cf_timedelta( encoded_times, encoding_units, time_unit=time_unit ) np.testing.assert_equal(decoded_times, times) assert decoded_times.dtype == times.dtype @pytest.mark.parametrize("use_dask", [False, pytest.param(True, marks=requires_dask)]) def test_encode_cf_timedelta_units_change(use_dask) -> None: timedeltas = pd.timedelta_range(start="0h", freq="12h", periods=3) encoding = dict(units="days", dtype=np.dtype("int64")) variable = Variable(["time"], timedeltas, encoding=encoding) if use_dask: variable = variable.chunk({"time": 1}) with pytest.raises(ValueError, match="Timedeltas can't be serialized"): conventions.encode_cf_variable(variable).compute() else: # In this case we automatically modify the encoding units to continue # encoding with integer values. with pytest.warns(UserWarning, match="Timedeltas can't be serialized"): encoded = conventions.encode_cf_variable(variable) assert encoded.attrs["units"] == "hours" decoded = conventions.decode_cf_variable( "name", encoded, decode_timedelta=CFTimedeltaCoder(time_unit="ns") ) assert_equal(variable, decoded) @pytest.mark.parametrize("use_dask", [False, pytest.param(True, marks=requires_dask)]) def test_encode_cf_timedelta_small_dtype_missing_value(use_dask) -> None: # Regression test for GitHub issue #9134 timedeltas = np.array([1, 2, "NaT", 4], dtype="timedelta64[D]").astype( "timedelta64[ns]" ) encoding = dict(units="days", dtype=np.dtype("int16"), _FillValue=np.int16(-1)) variable = Variable(["time"], timedeltas, encoding=encoding) if use_dask: variable = variable.chunk({"time": 1}) encoded = conventions.encode_cf_variable(variable) decoded = conventions.decode_cf_variable("name", encoded, decode_timedelta=True) assert_equal(variable, decoded) _DECODE_TIMEDELTA_VIA_UNITS_TESTS = { "default": (True, None, np.dtype("timedelta64[ns]"), True), "decode_timedelta=True": (True, True, np.dtype("timedelta64[ns]"), False), "decode_timedelta=False": (True, False, np.dtype("int64"), False), "inherit-time_unit-from-decode_times": ( CFDatetimeCoder(time_unit="s"), None, np.dtype("timedelta64[s]"), True, ), "set-time_unit-via-CFTimedeltaCoder-decode_times=True": ( True, CFTimedeltaCoder(time_unit="s"), np.dtype("timedelta64[s]"), False, ), "set-time_unit-via-CFTimedeltaCoder-decode_times=False": ( False, CFTimedeltaCoder(time_unit="s"), np.dtype("timedelta64[s]"), False, ), "override-time_unit-from-decode_times": ( CFDatetimeCoder(time_unit="ns"), CFTimedeltaCoder(time_unit="s"), np.dtype("timedelta64[s]"), False, ), } @pytest.mark.parametrize( ("decode_times", "decode_timedelta", "expected_dtype", "warns"), list(_DECODE_TIMEDELTA_VIA_UNITS_TESTS.values()), ids=list(_DECODE_TIMEDELTA_VIA_UNITS_TESTS.keys()), ) def test_decode_timedelta_via_units( decode_times, decode_timedelta, expected_dtype, warns ) -> None: timedeltas = pd.timedelta_range(0, freq="D", periods=3) attrs = {"units": "days"} var = Variable(["time"], timedeltas, encoding=attrs) encoded = Variable(["time"], np.array([0, 1, 2]), attrs=attrs) if warns: with pytest.warns( FutureWarning, match="xarray will not decode the variable 'foo' into a timedelta64 dtype", ): decoded = conventions.decode_cf_variable( "foo", encoded, decode_times=decode_times, decode_timedelta=decode_timedelta, ) else: decoded = conventions.decode_cf_variable( "foo", encoded, decode_times=decode_times, decode_timedelta=decode_timedelta ) if decode_timedelta is False: assert_equal(encoded, decoded) else: assert_equal(var, decoded) assert decoded.dtype == expected_dtype _DECODE_TIMEDELTA_VIA_DTYPE_TESTS = { "default": (True, None, "ns", np.dtype("timedelta64[ns]")), "decode_timedelta=False": (True, False, "ns", np.dtype("int64")), "decode_timedelta=True": (True, True, "ns", np.dtype("timedelta64[ns]")), "use-original-units": (True, True, "s", np.dtype("timedelta64[s]")), "inherit-time_unit-from-decode_times": ( CFDatetimeCoder(time_unit="s"), None, "ns", np.dtype("timedelta64[s]"), ), "set-time_unit-via-CFTimedeltaCoder-decode_times=True": ( True, CFTimedeltaCoder(time_unit="s"), "ns", np.dtype("timedelta64[s]"), ), "set-time_unit-via-CFTimedeltaCoder-decode_times=False": ( False, CFTimedeltaCoder(time_unit="s"), "ns", np.dtype("timedelta64[s]"), ), "override-time_unit-from-decode_times": ( CFDatetimeCoder(time_unit="ns"), CFTimedeltaCoder(time_unit="s"), "ns", np.dtype("timedelta64[s]"), ), "decode-different-units": ( True, CFTimedeltaCoder(time_unit="us"), "s", np.dtype("timedelta64[us]"), ), } @pytest.mark.parametrize( ("decode_times", "decode_timedelta", "original_unit", "expected_dtype"), list(_DECODE_TIMEDELTA_VIA_DTYPE_TESTS.values()), ids=list(_DECODE_TIMEDELTA_VIA_DTYPE_TESTS.keys()), ) def test_decode_timedelta_via_dtype( decode_times, decode_timedelta, original_unit, expected_dtype ) -> None: timedeltas = pd.timedelta_range(0, freq="D", periods=3, unit=original_unit) # type: ignore[call-arg,unused-ignore] encoding = {"units": "days"} var = Variable(["time"], timedeltas, encoding=encoding) encoded = conventions.encode_cf_variable(var) assert encoded.attrs["dtype"] == f"timedelta64[{original_unit}]" assert encoded.attrs["units"] == encoding["units"] decoded = conventions.decode_cf_variable( "foo", encoded, decode_times=decode_times, decode_timedelta=decode_timedelta ) if decode_timedelta is False: assert_equal(encoded, decoded) else: assert_equal(var, decoded) assert decoded.dtype == expected_dtype def test_lazy_decode_timedelta_unexpected_dtype() -> None: attrs = {"units": "seconds"} encoded = Variable(["time"], [0, 0.5, 1], attrs=attrs) decoded = conventions.decode_cf_variable( "foo", encoded, decode_timedelta=CFTimedeltaCoder(time_unit="s") ) expected_dtype_upon_lazy_decoding = np.dtype("timedelta64[s]") assert decoded.dtype == expected_dtype_upon_lazy_decoding expected_dtype_upon_loading = np.dtype("timedelta64[ms]") with pytest.warns(SerializationWarning, match="Can't decode floating"): assert decoded.load().dtype == expected_dtype_upon_loading def test_lazy_decode_timedelta_error() -> None: attrs = {"units": "seconds"} encoded = Variable(["time"], [0, np.iinfo(np.int64).max, 1], attrs=attrs) decoded = conventions.decode_cf_variable( "foo", encoded, decode_timedelta=CFTimedeltaCoder(time_unit="ms") ) with pytest.raises(OutOfBoundsTimedelta, match="overflow"): decoded.load() @pytest.mark.parametrize( "calendar", [ "standard", pytest.param( "360_day", marks=pytest.mark.skipif(not has_cftime, reason="no cftime") ), ], ) def test_duck_array_decode_times(calendar) -> None: from xarray.core.indexing import LazilyIndexedArray days = LazilyIndexedArray(DuckArrayWrapper(np.array([1.0, 2.0, 3.0]))) var = Variable( ["time"], days, {"units": "days since 2001-01-01", "calendar": calendar} ) decoded = conventions.decode_cf_variable( "foo", var, decode_times=CFDatetimeCoder(use_cftime=None) ) if calendar not in _STANDARD_CALENDAR_NAMES: assert decoded.dtype == np.dtype("O") else: assert decoded.dtype == np.dtype("=M8[ns]") @pytest.mark.parametrize("decode_timedelta", [True, False]) @pytest.mark.parametrize("mask_and_scale", [True, False]) def test_decode_timedelta_mask_and_scale( decode_timedelta: bool, mask_and_scale: bool ) -> None: attrs = { "dtype": "timedelta64[ns]", "units": "nanoseconds", "_FillValue": np.int16(-1), "add_offset": 100000.0, } encoded = Variable(["time"], np.array([0, -1, 1], "int16"), attrs=attrs) decoded = conventions.decode_cf_variable( "foo", encoded, mask_and_scale=mask_and_scale, decode_timedelta=decode_timedelta ) result = conventions.encode_cf_variable(decoded, name="foo") assert_identical(encoded, result) assert encoded.dtype == result.dtype def test_decode_floating_point_timedelta_no_serialization_warning() -> None: attrs = {"units": "seconds"} encoded = Variable(["time"], [0, 0.1, 0.2], attrs=attrs) decoded = conventions.decode_cf_variable("foo", encoded, decode_timedelta=True) with assert_no_warnings(): decoded.load() def test_timedelta64_coding_via_dtype(time_unit: PDDatetimeUnitOptions) -> None: timedeltas = np.array([0, 1, "NaT"], dtype=f"timedelta64[{time_unit}]") variable = Variable(["time"], timedeltas) expected_units = _numpy_to_netcdf_timeunit(time_unit) encoded = conventions.encode_cf_variable(variable) assert encoded.attrs["dtype"] == f"timedelta64[{time_unit}]" assert encoded.attrs["units"] == expected_units decoded = conventions.decode_cf_variable("timedeltas", encoded) assert decoded.encoding["dtype"] == np.dtype("int64") assert decoded.encoding["units"] == expected_units assert_identical(decoded, variable) assert decoded.dtype == variable.dtype reencoded = conventions.encode_cf_variable(decoded) assert_identical(reencoded, encoded) assert reencoded.dtype == encoded.dtype def test_timedelta_coding_via_dtype_non_pandas_coarse_resolution_warning() -> None: attrs = {"dtype": "timedelta64[D]", "units": "days"} encoded = Variable(["time"], [0, 1, 2], attrs=attrs) with pytest.warns(UserWarning, match="xarray only supports"): decoded = conventions.decode_cf_variable("timedeltas", encoded) expected_array = np.array([0, 1, 2], dtype="timedelta64[D]") expected_array = expected_array.astype("timedelta64[s]") expected = Variable(["time"], expected_array) assert_identical(decoded, expected) assert decoded.dtype == np.dtype("timedelta64[s]") @pytest.mark.xfail(reason="xarray does not recognize picoseconds as time-like") def test_timedelta_coding_via_dtype_non_pandas_fine_resolution_warning() -> None: attrs = {"dtype": "timedelta64[ps]", "units": "picoseconds"} encoded = Variable(["time"], [0, 1000, 2000], attrs=attrs) with pytest.warns(UserWarning, match="xarray only supports"): decoded = conventions.decode_cf_variable("timedeltas", encoded) expected_array = np.array([0, 1000, 2000], dtype="timedelta64[ps]") expected_array = expected_array.astype("timedelta64[ns]") expected = Variable(["time"], expected_array) assert_identical(decoded, expected) assert decoded.dtype == np.dtype("timedelta64[ns]") def test_timedelta_decode_via_dtype_invalid_encoding() -> None: attrs = {"dtype": "timedelta64[s]", "units": "seconds"} encoding = {"units": "foo"} encoded = Variable(["time"], [0, 1, 2], attrs=attrs, encoding=encoding) with pytest.raises(ValueError, match="failed to prevent"): conventions.decode_cf_variable("timedeltas", encoded) @pytest.mark.parametrize("attribute", ["dtype", "units"]) def test_timedelta_encode_via_dtype_invalid_attribute(attribute) -> None: timedeltas = pd.timedelta_range(0, freq="D", periods=3) attrs = {attribute: "foo"} variable = Variable(["time"], timedeltas, attrs=attrs) with pytest.raises(ValueError, match="failed to prevent"): conventions.encode_cf_variable(variable) @pytest.mark.parametrize( ("decode_via_units", "decode_via_dtype", "attrs", "expect_timedelta64"), [ (True, True, {"units": "seconds"}, True), (True, False, {"units": "seconds"}, True), (False, True, {"units": "seconds"}, False), (False, False, {"units": "seconds"}, False), (True, True, {"dtype": "timedelta64[s]", "units": "seconds"}, True), (True, False, {"dtype": "timedelta64[s]", "units": "seconds"}, True), (False, True, {"dtype": "timedelta64[s]", "units": "seconds"}, True), (False, False, {"dtype": "timedelta64[s]", "units": "seconds"}, False), ], ids=lambda x: f"{x!r}", ) def test_timedelta_decoding_options( decode_via_units, decode_via_dtype, attrs, expect_timedelta64 ) -> None: array = np.array([0, 1, 2], dtype=np.dtype("int64")) encoded = Variable(["time"], array, attrs=attrs) # Confirm we decode to the expected dtype. decode_timedelta = CFTimedeltaCoder( time_unit="s", decode_via_units=decode_via_units, decode_via_dtype=decode_via_dtype, ) decoded = conventions.decode_cf_variable( "foo", encoded, decode_timedelta=decode_timedelta ) if expect_timedelta64: assert decoded.dtype == np.dtype("timedelta64[s]") else: assert decoded.dtype == np.dtype("int64") # Confirm we exactly roundtrip. reencoded = conventions.encode_cf_variable(decoded) expected = encoded.copy() if "dtype" not in attrs and decode_via_units: expected.attrs["dtype"] = "timedelta64[s]" assert_identical(reencoded, expected) def test_timedelta_encoding_explicit_non_timedelta64_dtype() -> None: encoding = {"dtype": np.dtype("int32")} timedeltas = pd.timedelta_range(0, freq="D", periods=3) variable = Variable(["time"], timedeltas, encoding=encoding) encoded = conventions.encode_cf_variable(variable) assert encoded.attrs["units"] == "days" assert encoded.attrs["dtype"] == "timedelta64[ns]" assert encoded.dtype == np.dtype("int32") decoded = conventions.decode_cf_variable("foo", encoded) assert_identical(decoded, variable) reencoded = conventions.encode_cf_variable(decoded) assert_identical(reencoded, encoded) assert encoded.attrs["units"] == "days" assert encoded.attrs["dtype"] == "timedelta64[ns]" assert encoded.dtype == np.dtype("int32") @pytest.mark.parametrize("mask_attribute", ["_FillValue", "missing_value"]) def test_timedelta64_coding_via_dtype_with_mask( time_unit: PDDatetimeUnitOptions, mask_attribute: str ) -> None: timedeltas = np.array([0, 1, "NaT"], dtype=f"timedelta64[{time_unit}]") mask = 10 variable = Variable(["time"], timedeltas, encoding={mask_attribute: mask}) expected_dtype = f"timedelta64[{time_unit}]" expected_units = _numpy_to_netcdf_timeunit(time_unit) encoded = conventions.encode_cf_variable(variable) assert encoded.attrs["dtype"] == expected_dtype assert encoded.attrs["units"] == expected_units assert encoded.attrs[mask_attribute] == mask assert encoded[-1] == mask decoded = conventions.decode_cf_variable("timedeltas", encoded) assert decoded.encoding["dtype"] == np.dtype("int64") assert decoded.encoding["units"] == expected_units assert decoded.encoding[mask_attribute] == mask assert np.isnat(decoded[-1]) assert_identical(decoded, variable) assert decoded.dtype == variable.dtype reencoded = conventions.encode_cf_variable(decoded) assert_identical(reencoded, encoded) assert reencoded.dtype == encoded.dtype def test_roundtrip_0size_timedelta(time_unit: PDDatetimeUnitOptions) -> None: # regression test for GitHub issue #10310 encoding = {"units": "days", "dtype": np.dtype("int64")} data = np.array([], dtype=f"=m8[{time_unit}]") decoded = Variable(["time"], data, encoding=encoding) encoded = conventions.encode_cf_variable(decoded, name="foo") assert encoded.dtype == encoding["dtype"] assert encoded.attrs["units"] == encoding["units"] decoded = conventions.decode_cf_variable("foo", encoded, decode_timedelta=True) assert decoded.dtype == np.dtype(f"=m8[{time_unit}]") with assert_no_warnings(): decoded.load() assert decoded.dtype == np.dtype("=m8[s]") assert decoded.encoding == encoding xarray-2025.09.0/xarray/tests/test_combine.py000066400000000000000000001470061505620616400210510ustar00rootroot00000000000000from __future__ import annotations from itertools import product import numpy as np import pytest from xarray import ( DataArray, Dataset, MergeError, combine_by_coords, combine_nested, concat, merge, set_options, ) from xarray.core import dtypes from xarray.structure.combine import ( _check_shape_tile_ids, _combine_all_along_first_dim, _combine_nd, _infer_concat_order_from_coords, _infer_concat_order_from_positions, _new_tile_id, ) from xarray.tests import assert_equal, assert_identical, requires_cftime from xarray.tests.test_dataset import create_test_data def assert_combined_tile_ids_equal(dict1, dict2): assert len(dict1) == len(dict2) for k in dict1.keys(): assert k in dict2.keys() assert_equal(dict1[k], dict2[k]) class TestTileIDsFromNestedList: def test_1d(self): ds = create_test_data input = [ds(0), ds(1)] expected = {(0,): ds(0), (1,): ds(1)} actual = _infer_concat_order_from_positions(input) assert_combined_tile_ids_equal(expected, actual) def test_2d(self): ds = create_test_data input = [[ds(0), ds(1)], [ds(2), ds(3)], [ds(4), ds(5)]] expected = { (0, 0): ds(0), (0, 1): ds(1), (1, 0): ds(2), (1, 1): ds(3), (2, 0): ds(4), (2, 1): ds(5), } actual = _infer_concat_order_from_positions(input) assert_combined_tile_ids_equal(expected, actual) def test_3d(self): ds = create_test_data input = [ [[ds(0), ds(1)], [ds(2), ds(3)], [ds(4), ds(5)]], [[ds(6), ds(7)], [ds(8), ds(9)], [ds(10), ds(11)]], ] expected = { (0, 0, 0): ds(0), (0, 0, 1): ds(1), (0, 1, 0): ds(2), (0, 1, 1): ds(3), (0, 2, 0): ds(4), (0, 2, 1): ds(5), (1, 0, 0): ds(6), (1, 0, 1): ds(7), (1, 1, 0): ds(8), (1, 1, 1): ds(9), (1, 2, 0): ds(10), (1, 2, 1): ds(11), } actual = _infer_concat_order_from_positions(input) assert_combined_tile_ids_equal(expected, actual) def test_single_dataset(self): ds = create_test_data(0) input = [ds] expected = {(0,): ds} actual = _infer_concat_order_from_positions(input) assert_combined_tile_ids_equal(expected, actual) def test_redundant_nesting(self): ds = create_test_data input = [[ds(0)], [ds(1)]] expected = {(0, 0): ds(0), (1, 0): ds(1)} actual = _infer_concat_order_from_positions(input) assert_combined_tile_ids_equal(expected, actual) def test_ignore_empty_list(self): ds = create_test_data(0) input = [ds, []] expected = {(0,): ds} actual = _infer_concat_order_from_positions(input) assert_combined_tile_ids_equal(expected, actual) def test_uneven_depth_input(self): # Auto_combine won't work on ragged input # but this is just to increase test coverage ds = create_test_data input = [ds(0), [ds(1), ds(2)]] expected = {(0,): ds(0), (1, 0): ds(1), (1, 1): ds(2)} actual = _infer_concat_order_from_positions(input) assert_combined_tile_ids_equal(expected, actual) def test_uneven_length_input(self): # Auto_combine won't work on ragged input # but this is just to increase test coverage ds = create_test_data input = [[ds(0)], [ds(1), ds(2)]] expected = {(0, 0): ds(0), (1, 0): ds(1), (1, 1): ds(2)} actual = _infer_concat_order_from_positions(input) assert_combined_tile_ids_equal(expected, actual) def test_infer_from_datasets(self): ds = create_test_data input = [ds(0), ds(1)] expected = {(0,): ds(0), (1,): ds(1)} actual = _infer_concat_order_from_positions(input) assert_combined_tile_ids_equal(expected, actual) class TestTileIDsFromCoords: def test_1d(self): ds0 = Dataset({"x": [0, 1]}) ds1 = Dataset({"x": [2, 3]}) expected = {(0,): ds0, (1,): ds1} actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0]) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == ["x"] def test_2d(self): ds0 = Dataset({"x": [0, 1], "y": [10, 20, 30]}) ds1 = Dataset({"x": [2, 3], "y": [10, 20, 30]}) ds2 = Dataset({"x": [0, 1], "y": [40, 50, 60]}) ds3 = Dataset({"x": [2, 3], "y": [40, 50, 60]}) ds4 = Dataset({"x": [0, 1], "y": [70, 80, 90]}) ds5 = Dataset({"x": [2, 3], "y": [70, 80, 90]}) expected = { (0, 0): ds0, (1, 0): ds1, (0, 1): ds2, (1, 1): ds3, (0, 2): ds4, (1, 2): ds5, } actual, concat_dims = _infer_concat_order_from_coords( [ds1, ds0, ds3, ds5, ds2, ds4] ) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == ["x", "y"] def test_no_dimension_coords(self): ds0 = Dataset({"foo": ("x", [0, 1])}) ds1 = Dataset({"foo": ("x", [2, 3])}) with pytest.raises(ValueError, match=r"Could not find any dimension"): _infer_concat_order_from_coords([ds1, ds0]) def test_coord_not_monotonic(self): ds0 = Dataset({"x": [0, 1]}) ds1 = Dataset({"x": [3, 2]}) with pytest.raises( ValueError, match=r"Coordinate variable x is neither monotonically increasing nor", ): _infer_concat_order_from_coords([ds1, ds0]) def test_coord_monotonically_decreasing(self): ds0 = Dataset({"x": [3, 2]}) ds1 = Dataset({"x": [1, 0]}) expected = {(0,): ds0, (1,): ds1} actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0]) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == ["x"] def test_no_concatenation_needed(self): ds = Dataset({"foo": ("x", [0, 1])}) expected = {(): ds} actual, concat_dims = _infer_concat_order_from_coords([ds]) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == [] def test_2d_plus_bystander_dim(self): ds0 = Dataset({"x": [0, 1], "y": [10, 20, 30], "t": [0.1, 0.2]}) ds1 = Dataset({"x": [2, 3], "y": [10, 20, 30], "t": [0.1, 0.2]}) ds2 = Dataset({"x": [0, 1], "y": [40, 50, 60], "t": [0.1, 0.2]}) ds3 = Dataset({"x": [2, 3], "y": [40, 50, 60], "t": [0.1, 0.2]}) expected = {(0, 0): ds0, (1, 0): ds1, (0, 1): ds2, (1, 1): ds3} actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0, ds3, ds2]) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == ["x", "y"] def test_string_coords(self): ds0 = Dataset({"person": ["Alice", "Bob"]}) ds1 = Dataset({"person": ["Caroline", "Daniel"]}) expected = {(0,): ds0, (1,): ds1} actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0]) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == ["person"] # Decided against natural sorting of string coords GH #2616 def test_lexicographic_sort_string_coords(self): ds0 = Dataset({"simulation": ["run8", "run9"]}) ds1 = Dataset({"simulation": ["run10", "run11"]}) expected = {(0,): ds1, (1,): ds0} actual, concat_dims = _infer_concat_order_from_coords([ds1, ds0]) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == ["simulation"] def test_datetime_coords(self): ds0 = Dataset( {"time": np.array(["2000-03-06", "2000-03-07"], dtype="datetime64[ns]")} ) ds1 = Dataset( {"time": np.array(["1999-01-01", "1999-02-04"], dtype="datetime64[ns]")} ) expected = {(0,): ds1, (1,): ds0} actual, concat_dims = _infer_concat_order_from_coords([ds0, ds1]) assert_combined_tile_ids_equal(expected, actual) assert concat_dims == ["time"] @pytest.fixture(scope="module") def create_combined_ids(): return _create_combined_ids def _create_combined_ids(shape): tile_ids = _create_tile_ids(shape) nums = range(len(tile_ids)) return { tile_id: create_test_data(num) for tile_id, num in zip(tile_ids, nums, strict=True) } def _create_tile_ids(shape): tile_ids = product(*(range(i) for i in shape)) return list(tile_ids) class TestNewTileIDs: @pytest.mark.parametrize( "old_id, new_id", [((3, 0, 1), (0, 1)), ((0, 0), (0,)), ((1,), ()), ((0,), ()), ((1, 0), (0,))], ) def test_new_tile_id(self, old_id, new_id): ds = create_test_data assert _new_tile_id((old_id, ds)) == new_id def test_get_new_tile_ids(self, create_combined_ids): shape = (1, 2, 3) combined_ids = create_combined_ids(shape) expected_tile_ids = sorted(combined_ids.keys()) actual_tile_ids = _create_tile_ids(shape) assert expected_tile_ids == actual_tile_ids class TestCombineND: @pytest.mark.parametrize( "concat_dim, kwargs", [("dim1", {}), ("new_dim", {"data_vars": "all"})] ) def test_concat_once(self, create_combined_ids, concat_dim, kwargs): shape = (2,) combined_ids = create_combined_ids(shape) ds = create_test_data result = _combine_all_along_first_dim( combined_ids, dim=concat_dim, data_vars="all", coords="different", compat="no_conflicts", fill_value=dtypes.NA, join="outer", combine_attrs="drop", ) expected_ds = concat([ds(0), ds(1)], dim=concat_dim, **kwargs) assert_combined_tile_ids_equal(result, {(): expected_ds}) def test_concat_only_first_dim(self, create_combined_ids): shape = (2, 3) combined_ids = create_combined_ids(shape) result = _combine_all_along_first_dim( combined_ids, dim="dim1", data_vars="all", coords="different", compat="no_conflicts", fill_value=dtypes.NA, join="outer", combine_attrs="drop", ) ds = create_test_data partway1 = concat([ds(0), ds(3)], dim="dim1") partway2 = concat([ds(1), ds(4)], dim="dim1") partway3 = concat([ds(2), ds(5)], dim="dim1") expected_datasets = [partway1, partway2, partway3] expected = {(i,): ds for i, ds in enumerate(expected_datasets)} assert_combined_tile_ids_equal(result, expected) @pytest.mark.parametrize( "concat_dim, kwargs", [("dim1", {}), ("new_dim", {"data_vars": "all"})] ) def test_concat_twice(self, create_combined_ids, concat_dim, kwargs): shape = (2, 3) combined_ids = create_combined_ids(shape) result = _combine_nd( combined_ids, concat_dims=["dim1", concat_dim], data_vars="all", coords="different", compat="no_conflicts", fill_value=dtypes.NA, join="outer", combine_attrs="drop", ) ds = create_test_data partway1 = concat([ds(0), ds(3)], dim="dim1") partway2 = concat([ds(1), ds(4)], dim="dim1") partway3 = concat([ds(2), ds(5)], dim="dim1") expected = concat([partway1, partway2, partway3], **kwargs, dim=concat_dim) assert_equal(result, expected) class TestCheckShapeTileIDs: def test_check_depths(self): ds = create_test_data(0) combined_tile_ids = {(0,): ds, (0, 1): ds} with pytest.raises( ValueError, match=r"sub-lists do not have consistent depths" ): _check_shape_tile_ids(combined_tile_ids) def test_check_lengths(self): ds = create_test_data(0) combined_tile_ids = {(0, 0): ds, (0, 1): ds, (0, 2): ds, (1, 0): ds, (1, 1): ds} with pytest.raises( ValueError, match=r"sub-lists do not have consistent lengths" ): _check_shape_tile_ids(combined_tile_ids) class TestNestedCombine: def test_nested_concat(self): objs = [Dataset({"x": [0]}), Dataset({"x": [1]})] expected = Dataset({"x": [0, 1]}) actual = combine_nested(objs, concat_dim="x") assert_identical(expected, actual) actual = combine_nested(objs, concat_dim=["x"]) assert_identical(expected, actual) actual = combine_nested([actual], concat_dim=None) assert_identical(expected, actual) actual = combine_nested([actual], concat_dim="x") assert_identical(expected, actual) objs = [Dataset({"x": [0, 1]}), Dataset({"x": [2]})] actual = combine_nested(objs, concat_dim="x") expected = Dataset({"x": [0, 1, 2]}) assert_identical(expected, actual) # ensure combine_nested handles non-sorted variables objs = [ Dataset({"x": ("a", [0]), "y": ("a", [0])}), Dataset({"y": ("a", [1]), "x": ("a", [1])}), ] actual = combine_nested(objs, concat_dim="a") expected = Dataset({"x": ("a", [0, 1]), "y": ("a", [0, 1])}) assert_identical(expected, actual) objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1]})] actual = combine_nested(objs, concat_dim="x") expected = Dataset({"x": [0, 1], "y": [0]}) assert_identical(expected, actual) @pytest.mark.parametrize( "join, expected", [ ("outer", Dataset({"x": [0, 1], "y": [0, 1]})), ("inner", Dataset({"x": [0, 1], "y": []})), ("left", Dataset({"x": [0, 1], "y": [0]})), ("right", Dataset({"x": [0, 1], "y": [1]})), ], ) def test_combine_nested_join(self, join, expected): objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1], "y": [1]})] actual = combine_nested(objs, concat_dim="x", join=join) assert_identical(expected, actual) def test_combine_nested_join_exact(self): objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1], "y": [1]})] with pytest.raises(ValueError, match=r"cannot align.*join.*exact"): combine_nested(objs, concat_dim="x", join="exact") def test_empty_input(self): assert_identical(Dataset(), combine_nested([], concat_dim="x")) # Fails because of concat's weird treatment of dimension coords, see #2975 @pytest.mark.xfail def test_nested_concat_too_many_dims_at_once(self): objs = [Dataset({"x": [0], "y": [1]}), Dataset({"y": [0], "x": [1]})] with pytest.raises(ValueError, match="not equal across datasets"): combine_nested(objs, concat_dim="x", coords="minimal") def test_nested_concat_along_new_dim(self): objs = [ Dataset({"a": ("x", [10]), "x": [0]}), Dataset({"a": ("x", [20]), "x": [0]}), ] expected = Dataset({"a": (("t", "x"), [[10], [20]]), "x": [0]}) actual = combine_nested(objs, data_vars="all", concat_dim="t") assert_identical(expected, actual) # Same but with a DataArray as new dim, see GH #1988 and #2647 dim = DataArray([100, 150], name="baz", dims="baz") expected = Dataset( {"a": (("baz", "x"), [[10], [20]]), "x": [0], "baz": [100, 150]} ) actual = combine_nested(objs, data_vars="all", concat_dim=dim) assert_identical(expected, actual) def test_nested_merge_with_self(self): data = Dataset({"x": 0}) actual = combine_nested([data, data, data], concat_dim=None) assert_identical(data, actual) def test_nested_merge_with_overlapping_values(self): ds1 = Dataset({"a": ("x", [1, 2]), "x": [0, 1]}) ds2 = Dataset({"a": ("x", [2, 3]), "x": [1, 2]}) expected = Dataset({"a": ("x", [1, 2, 3]), "x": [0, 1, 2]}) with pytest.warns( FutureWarning, match="will change from compat='no_conflicts' to compat='override'", ): actual = combine_nested([ds1, ds2], join="outer", concat_dim=None) assert_identical(expected, actual) actual = combine_nested( [ds1, ds2], join="outer", compat="no_conflicts", concat_dim=None ) assert_identical(expected, actual) actual = combine_nested( [ds1, ds2], join="outer", compat="no_conflicts", concat_dim=[None] ) assert_identical(expected, actual) def test_nested_merge_with_nan_no_conflicts(self): tmp1 = Dataset({"x": 0}) tmp2 = Dataset({"x": np.nan}) actual = combine_nested([tmp1, tmp2], compat="no_conflicts", concat_dim=None) assert_identical(tmp1, actual) with pytest.warns( FutureWarning, match="will change from compat='no_conflicts' to compat='override'", ): combine_nested([tmp1, tmp2], concat_dim=None) actual = combine_nested([tmp1, tmp2], compat="no_conflicts", concat_dim=[None]) assert_identical(tmp1, actual) def test_nested_merge_with_concat_dim_explicitly_provided(self): # Test the issue reported in GH #1988 objs = [Dataset({"x": 0, "y": 1})] dim = DataArray([100], name="baz", dims="baz") actual = combine_nested(objs, concat_dim=[dim], data_vars="all") expected = Dataset({"x": ("baz", [0]), "y": ("baz", [1])}, {"baz": [100]}) assert_identical(expected, actual) def test_nested_merge_with_non_scalars(self): # Just making sure that auto_combine is doing what is # expected for non-scalar values, too. objs = [Dataset({"x": ("z", [0, 1]), "y": ("z", [1, 2])})] dim = DataArray([100], name="baz", dims="baz") actual = combine_nested(objs, concat_dim=[dim], data_vars="all") expected = Dataset( {"x": (("baz", "z"), [[0, 1]]), "y": (("baz", "z"), [[1, 2]])}, {"baz": [100]}, ) assert_identical(expected, actual) def test_concat_multiple_dims(self): objs = [ [Dataset({"a": (("x", "y"), [[0]])}), Dataset({"a": (("x", "y"), [[1]])})], [Dataset({"a": (("x", "y"), [[2]])}), Dataset({"a": (("x", "y"), [[3]])})], ] actual = combine_nested(objs, concat_dim=["x", "y"]) expected = Dataset({"a": (("x", "y"), [[0, 1], [2, 3]])}) assert_identical(expected, actual) def test_concat_name_symmetry(self): """Inspired by the discussion on GH issue #2777""" da1 = DataArray(name="a", data=[[0]], dims=["x", "y"]) da2 = DataArray(name="b", data=[[1]], dims=["x", "y"]) da3 = DataArray(name="a", data=[[2]], dims=["x", "y"]) da4 = DataArray(name="b", data=[[3]], dims=["x", "y"]) x_first = combine_nested([[da1, da2], [da3, da4]], concat_dim=["x", "y"]) y_first = combine_nested([[da1, da3], [da2, da4]], concat_dim=["y", "x"]) assert_identical(x_first, y_first) def test_concat_one_dim_merge_another(self): data = create_test_data(add_attrs=False) data1 = data.copy(deep=True) data2 = data.copy(deep=True) objs = [ [data1.var1.isel(dim2=slice(4)), data2.var1.isel(dim2=slice(4, 9))], [data1.var2.isel(dim2=slice(4)), data2.var2.isel(dim2=slice(4, 9))], ] expected = data[["var1", "var2"]] actual = combine_nested(objs, concat_dim=[None, "dim2"]) assert_identical(expected, actual) def test_auto_combine_2d(self): ds = create_test_data partway1 = concat([ds(0), ds(3)], dim="dim1") partway2 = concat([ds(1), ds(4)], dim="dim1") partway3 = concat([ds(2), ds(5)], dim="dim1") expected = concat([partway1, partway2, partway3], data_vars="all", dim="dim2") datasets = [[ds(0), ds(1), ds(2)], [ds(3), ds(4), ds(5)]] result = combine_nested( datasets, data_vars="all", concat_dim=["dim1", "dim2"], ) assert_equal(result, expected) def test_auto_combine_2d_combine_attrs_kwarg(self): ds = lambda x: create_test_data(x, add_attrs=False) partway1 = concat([ds(0), ds(3)], dim="dim1") partway2 = concat([ds(1), ds(4)], dim="dim1") partway3 = concat([ds(2), ds(5)], dim="dim1") expected = concat([partway1, partway2, partway3], data_vars="all", dim="dim2") expected_dict = {} expected_dict["drop"] = expected.copy(deep=True) expected_dict["drop"].attrs = {} expected_dict["no_conflicts"] = expected.copy(deep=True) expected_dict["no_conflicts"].attrs = { "a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, } expected_dict["override"] = expected.copy(deep=True) expected_dict["override"].attrs = {"a": 1} f = lambda attrs, context: attrs[0] expected_dict[f] = expected.copy(deep=True) expected_dict[f].attrs = f([{"a": 1}], None) datasets = [[ds(0), ds(1), ds(2)], [ds(3), ds(4), ds(5)]] datasets[0][0].attrs = {"a": 1} datasets[0][1].attrs = {"a": 1, "b": 2} datasets[0][2].attrs = {"a": 1, "c": 3} datasets[1][0].attrs = {"a": 1, "d": 4} datasets[1][1].attrs = {"a": 1, "e": 5} datasets[1][2].attrs = {"a": 1, "f": 6} with pytest.raises(ValueError, match=r"combine_attrs='identical'"): result = combine_nested( datasets, concat_dim=["dim1", "dim2"], data_vars="all", combine_attrs="identical", ) for combine_attrs, expected in expected_dict.items(): result = combine_nested( datasets, concat_dim=["dim1", "dim2"], data_vars="all", combine_attrs=combine_attrs, ) assert_identical(result, expected) def test_combine_nested_missing_data_new_dim(self): # Your data includes "time" and "station" dimensions, and each year's # data has a different set of stations. datasets = [ Dataset({"a": ("x", [2, 3]), "x": [1, 2]}), Dataset({"a": ("x", [1, 2]), "x": [0, 1]}), ] expected = Dataset( {"a": (("t", "x"), [[np.nan, 2, 3], [1, 2, np.nan]])}, {"x": [0, 1, 2]} ) actual = combine_nested(datasets, data_vars="all", join="outer", concat_dim="t") assert_identical(expected, actual) def test_invalid_hypercube_input(self): ds = create_test_data datasets = [[ds(0), ds(1), ds(2)], [ds(3), ds(4)]] with pytest.raises( ValueError, match=r"sub-lists do not have consistent lengths" ): combine_nested(datasets, concat_dim=["dim1", "dim2"]) datasets = [[ds(0), ds(1)], [[ds(3), ds(4)]]] with pytest.raises( ValueError, match=r"sub-lists do not have consistent depths" ): combine_nested(datasets, concat_dim=["dim1", "dim2"]) datasets = [[ds(0), ds(1)], [ds(3), ds(4)]] with pytest.raises(ValueError, match=r"concat_dims has length"): combine_nested(datasets, concat_dim=["dim1"]) def test_merge_one_dim_concat_another(self): objs = [ [Dataset({"foo": ("x", [0, 1])}), Dataset({"bar": ("x", [10, 20])})], [Dataset({"foo": ("x", [2, 3])}), Dataset({"bar": ("x", [30, 40])})], ] expected = Dataset({"foo": ("x", [0, 1, 2, 3]), "bar": ("x", [10, 20, 30, 40])}) actual = combine_nested(objs, concat_dim=["x", None], compat="equals") assert_identical(expected, actual) # Proving it works symmetrically objs = [ [Dataset({"foo": ("x", [0, 1])}), Dataset({"foo": ("x", [2, 3])})], [Dataset({"bar": ("x", [10, 20])}), Dataset({"bar": ("x", [30, 40])})], ] actual = combine_nested(objs, concat_dim=[None, "x"], compat="equals") assert_identical(expected, actual) def test_combine_concat_over_redundant_nesting(self): objs = [[Dataset({"x": [0]}), Dataset({"x": [1]})]] actual = combine_nested(objs, concat_dim=[None, "x"]) expected = Dataset({"x": [0, 1]}) assert_identical(expected, actual) objs = [[Dataset({"x": [0]})], [Dataset({"x": [1]})]] actual = combine_nested(objs, concat_dim=["x", None]) expected = Dataset({"x": [0, 1]}) assert_identical(expected, actual) objs = [[Dataset({"x": [0]})]] actual = combine_nested(objs, concat_dim=[None, None]) expected = Dataset({"x": [0]}) assert_identical(expected, actual) @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"a": 2, "b": 1}]) def test_combine_nested_fill_value(self, fill_value): datasets = [ Dataset({"a": ("x", [2, 3]), "b": ("x", [-2, 1]), "x": [1, 2]}), Dataset({"a": ("x", [1, 2]), "b": ("x", [3, -1]), "x": [0, 1]}), ] if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value_a = fill_value_b = np.nan elif isinstance(fill_value, dict): fill_value_a = fill_value["a"] fill_value_b = fill_value["b"] else: fill_value_a = fill_value_b = fill_value expected = Dataset( { "a": (("t", "x"), [[fill_value_a, 2, 3], [1, 2, fill_value_a]]), "b": (("t", "x"), [[fill_value_b, -2, 1], [3, -1, fill_value_b]]), }, {"x": [0, 1, 2]}, ) actual = combine_nested( datasets, concat_dim="t", data_vars="all", join="outer", fill_value=fill_value, ) assert_identical(expected, actual) def test_combine_nested_unnamed_data_arrays(self): unnamed_array = DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") actual = combine_nested([unnamed_array], concat_dim="x") expected = unnamed_array assert_identical(expected, actual) unnamed_array1 = DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") unnamed_array2 = DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x") actual = combine_nested([unnamed_array1, unnamed_array2], concat_dim="x") expected = DataArray( data=[1.0, 2.0, 3.0, 4.0], coords={"x": [0, 1, 2, 3]}, dims="x" ) assert_identical(expected, actual) da1 = DataArray(data=[[0.0]], coords={"x": [0], "y": [0]}, dims=["x", "y"]) da2 = DataArray(data=[[1.0]], coords={"x": [0], "y": [1]}, dims=["x", "y"]) da3 = DataArray(data=[[2.0]], coords={"x": [1], "y": [0]}, dims=["x", "y"]) da4 = DataArray(data=[[3.0]], coords={"x": [1], "y": [1]}, dims=["x", "y"]) objs = [[da1, da2], [da3, da4]] expected = DataArray( data=[[0.0, 1.0], [2.0, 3.0]], coords={"x": [0, 1], "y": [0, 1]}, dims=["x", "y"], ) actual = combine_nested(objs, concat_dim=["x", "y"]) assert_identical(expected, actual) # TODO aijams - Determine if this test is appropriate. def test_nested_combine_mixed_datasets_arrays(self): objs = [ DataArray([0, 1], dims=("x"), coords=({"x": [0, 1]})), Dataset({"x": [2, 3]}), ] with pytest.raises( ValueError, match=r"Can't combine datasets with unnamed arrays." ): combine_nested(objs, "x") class TestCombineDatasetsbyCoords: def test_combine_by_coords(self): objs = [Dataset({"x": [0]}), Dataset({"x": [1]})] actual = combine_by_coords(objs) expected = Dataset({"x": [0, 1]}) assert_identical(expected, actual) actual = combine_by_coords([actual]) assert_identical(expected, actual) objs = [Dataset({"x": [0, 1]}), Dataset({"x": [2]})] actual = combine_by_coords(objs) expected = Dataset({"x": [0, 1, 2]}) assert_identical(expected, actual) def test_combine_by_coords_handles_non_sorted_variables(self): # ensure auto_combine handles non-sorted variables objs = [ Dataset({"x": ("a", [0]), "y": ("a", [0]), "a": [0]}), Dataset({"x": ("a", [1]), "y": ("a", [1]), "a": [1]}), ] actual = combine_by_coords(objs, join="outer") expected = Dataset({"x": ("a", [0, 1]), "y": ("a", [0, 1]), "a": [0, 1]}) assert_identical(expected, actual) def test_combine_by_coords_multiple_variables(self): objs = [Dataset({"x": [0], "y": [0]}), Dataset({"y": [1], "x": [1]})] actual = combine_by_coords(objs, join="outer") expected = Dataset({"x": [0, 1], "y": [0, 1]}) assert_equal(actual, expected) def test_combine_by_coords_for_scalar_variables(self): objs = [Dataset({"x": 0}), Dataset({"x": 1})] with pytest.raises( ValueError, match=r"Could not find any dimension coordinates" ): combine_by_coords(objs) def test_combine_by_coords_requires_coord_or_index(self): objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [0]})] with pytest.raises( ValueError, match=r"Every dimension requires a corresponding 1D coordinate and index", ): combine_by_coords(objs) def test_empty_input(self): assert_identical(Dataset(), combine_by_coords([])) @pytest.mark.parametrize( "join, expected", [ ("outer", Dataset({"x": [0, 1], "y": [0, 1]})), ("inner", Dataset({"x": [0, 1], "y": []})), ("left", Dataset({"x": [0, 1], "y": [0]})), ("right", Dataset({"x": [0, 1], "y": [1]})), ], ) def test_combine_coords_join(self, join, expected): objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1], "y": [1]})] actual = combine_nested(objs, concat_dim="x", join=join) assert_identical(expected, actual) def test_combine_coords_join_exact(self): objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1], "y": [1]})] with pytest.raises(ValueError, match=r"cannot align.*join.*exact.*"): combine_nested(objs, concat_dim="x", join="exact") @pytest.mark.parametrize( "combine_attrs, expected", [ ("drop", Dataset({"x": [0, 1], "y": [0, 1]}, attrs={})), ( "no_conflicts", Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1, "b": 2}), ), ("override", Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1})), ( lambda attrs, context: attrs[1], Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1, "b": 2}), ), ], ) def test_combine_coords_combine_attrs(self, combine_attrs, expected): objs = [ Dataset({"x": [0], "y": [0]}, attrs={"a": 1}), Dataset({"x": [1], "y": [1]}, attrs={"a": 1, "b": 2}), ] actual = combine_nested( objs, concat_dim="x", join="outer", combine_attrs=combine_attrs ) assert_identical(expected, actual) if combine_attrs == "no_conflicts": objs[1].attrs["a"] = 2 with pytest.raises(ValueError, match=r"combine_attrs='no_conflicts'"): actual = combine_nested( objs, concat_dim="x", join="outer", combine_attrs=combine_attrs ) def test_combine_coords_combine_attrs_identical(self): objs = [ Dataset({"x": [0], "y": [0]}, attrs={"a": 1}), Dataset({"x": [1], "y": [1]}, attrs={"a": 1}), ] expected = Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1}) actual = combine_nested( objs, concat_dim="x", join="outer", combine_attrs="identical" ) assert_identical(expected, actual) objs[1].attrs["b"] = 2 with pytest.raises(ValueError, match=r"combine_attrs='identical'"): actual = combine_nested( objs, concat_dim="x", join="outer", combine_attrs="identical" ) def test_combine_nested_combine_attrs_drop_conflicts(self): objs = [ Dataset({"x": [0], "y": [0]}, attrs={"a": 1, "b": 2, "c": 3}), Dataset({"x": [1], "y": [1]}, attrs={"a": 1, "b": 0, "d": 3}), ] expected = Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1, "c": 3, "d": 3}) actual = combine_nested( objs, concat_dim="x", join="outer", combine_attrs="drop_conflicts" ) assert_identical(expected, actual) @pytest.mark.parametrize( "combine_attrs, attrs1, attrs2, expected_attrs, expect_exception", [ ( "no_conflicts", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}, False, ), ("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False), ("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False), ( "no_conflicts", {"a": 1, "b": 2}, {"a": 4, "c": 3}, {"a": 1, "b": 2, "c": 3}, True, ), ("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True), ( "override", {"a": 1, "b": 2}, {"a": 4, "b": 5, "c": 3}, {"a": 1, "b": 2}, False, ), ( "drop_conflicts", {"a": 1, "b": 2, "c": 3}, {"b": 1, "c": 3, "d": 4}, {"a": 1, "c": 3, "d": 4}, False, ), ], ) def test_combine_nested_combine_attrs_variables( self, combine_attrs, attrs1, attrs2, expected_attrs, expect_exception ): """check that combine_attrs is used on data variables and coords""" data1 = Dataset( { "a": ("x", [1, 2], attrs1), "b": ("x", [3, -1], attrs1), "x": ("x", [0, 1], attrs1), } ) data2 = Dataset( { "a": ("x", [2, 3], attrs2), "b": ("x", [-2, 1], attrs2), "x": ("x", [2, 3], attrs2), } ) if expect_exception: with pytest.raises(MergeError, match="combine_attrs"): combine_by_coords([data1, data2], combine_attrs=combine_attrs) else: actual = combine_by_coords([data1, data2], combine_attrs=combine_attrs) expected = Dataset( { "a": ("x", [1, 2, 2, 3], expected_attrs), "b": ("x", [3, -1, -2, 1], expected_attrs), }, {"x": ("x", [0, 1, 2, 3], expected_attrs)}, ) assert_identical(actual, expected) @pytest.mark.parametrize( "combine_attrs, attrs1, attrs2, expected_attrs, expect_exception", [ ( "no_conflicts", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}, False, ), ("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False), ("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False), ( "no_conflicts", {"a": 1, "b": 2}, {"a": 4, "c": 3}, {"a": 1, "b": 2, "c": 3}, True, ), ("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True), ( "override", {"a": 1, "b": 2}, {"a": 4, "b": 5, "c": 3}, {"a": 1, "b": 2}, False, ), ( "drop_conflicts", {"a": 1, "b": 2, "c": 3}, {"b": 1, "c": 3, "d": 4}, {"a": 1, "c": 3, "d": 4}, False, ), ], ) def test_combine_by_coords_combine_attrs_variables( self, combine_attrs, attrs1, attrs2, expected_attrs, expect_exception ): """check that combine_attrs is used on data variables and coords""" data1 = Dataset( {"x": ("a", [0], attrs1), "y": ("a", [0], attrs1), "a": ("a", [0], attrs1)} ) data2 = Dataset( {"x": ("a", [1], attrs2), "y": ("a", [1], attrs2), "a": ("a", [1], attrs2)} ) if expect_exception: with pytest.raises(MergeError, match="combine_attrs"): combine_by_coords([data1, data2], combine_attrs=combine_attrs) else: actual = combine_by_coords([data1, data2], combine_attrs=combine_attrs) expected = Dataset( { "x": ("a", [0, 1], expected_attrs), "y": ("a", [0, 1], expected_attrs), "a": ("a", [0, 1], expected_attrs), } ) assert_identical(actual, expected) def test_infer_order_from_coords(self): data = create_test_data() objs = [data.isel(dim2=slice(4, 9)), data.isel(dim2=slice(4))] actual = combine_by_coords(objs, data_vars="all") expected = data assert expected.broadcast_equals(actual) with set_options(use_new_combine_kwarg_defaults=True): actual = combine_by_coords(objs) assert_identical(actual, expected) def test_combine_leaving_bystander_dimensions(self): # Check non-monotonic bystander dimension coord doesn't raise # ValueError on combine (https://github.com/pydata/xarray/issues/3150) ycoord = ["a", "c", "b"] data = np.random.rand(7, 3) ds1 = Dataset( data_vars=dict(data=(["x", "y"], data[:3, :])), coords=dict(x=[1, 2, 3], y=ycoord), ) ds2 = Dataset( data_vars=dict(data=(["x", "y"], data[3:, :])), coords=dict(x=[4, 5, 6, 7], y=ycoord), ) expected = Dataset( data_vars=dict(data=(["x", "y"], data)), coords=dict(x=[1, 2, 3, 4, 5, 6, 7], y=ycoord), ) actual = combine_by_coords((ds1, ds2)) assert_identical(expected, actual) def test_combine_by_coords_previously_failed(self): # In the above scenario, one file is missing, containing the data for # one year's data for one variable. datasets = [ Dataset({"a": ("x", [0]), "x": [0]}), Dataset({"b": ("x", [0]), "x": [0]}), Dataset({"a": ("x", [1]), "x": [1]}), ] expected = Dataset({"a": ("x", [0, 1]), "b": ("x", [0, np.nan])}, {"x": [0, 1]}) actual = combine_by_coords(datasets, join="outer") assert_identical(expected, actual) def test_combine_by_coords_still_fails(self): # concat can't handle new variables (yet): # https://github.com/pydata/xarray/issues/508 datasets = [Dataset({"x": 0}, {"y": 0}), Dataset({"x": 1}, {"y": 1, "z": 1})] with pytest.raises(ValueError): combine_by_coords(datasets, "y") def test_combine_by_coords_no_concat(self): objs = [Dataset({"x": 0}), Dataset({"y": 1})] actual = combine_by_coords(objs) expected = Dataset({"x": 0, "y": 1}) assert_identical(expected, actual) objs = [Dataset({"x": 0, "y": 1}), Dataset({"y": np.nan, "z": 2})] actual = combine_by_coords(objs, compat="no_conflicts") expected = Dataset({"x": 0, "y": 1, "z": 2}) assert_identical(expected, actual) def test_check_for_impossible_ordering(self): ds0 = Dataset({"x": [0, 1, 5]}) ds1 = Dataset({"x": [2, 3]}) with pytest.raises( ValueError, match=r"does not have monotonic global indexes along dimension x", ): combine_by_coords([ds1, ds0]) def test_combine_by_coords_incomplete_hypercube(self): # test that this succeeds with default fill_value x1 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [0], "x": [0]}) x2 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [1], "x": [0]}) x3 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [0], "x": [1]}) actual = combine_by_coords([x1, x2, x3], join="outer") expected = Dataset( {"a": (("y", "x"), [[1, 1], [1, np.nan]])}, coords={"y": [0, 1], "x": [0, 1]}, ) assert_identical(expected, actual) # test that this fails if fill_value is None with pytest.raises( ValueError, match="supplied objects do not form a hypercube" ): combine_by_coords([x1, x2, x3], join="outer", fill_value=None) def test_combine_by_coords_override_order(self) -> None: # regression test for https://github.com/pydata/xarray/issues/8828 x1 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [0], "x": [0]}) x2 = Dataset( {"a": (("y", "x"), [[2]]), "b": (("y", "x"), [[1]])}, coords={"y": [0], "x": [0]}, ) actual = combine_by_coords([x1, x2], compat="override") assert_equal(actual["a"], actual["b"]) assert_equal(actual["a"], x1["a"]) actual = combine_by_coords([x2, x1], compat="override") assert_equal(actual["a"], x2["a"]) class TestCombineMixedObjectsbyCoords: def test_combine_by_coords_mixed_unnamed_dataarrays(self): named_da = DataArray(name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") unnamed_da = DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x") with pytest.raises( ValueError, match="Can't automatically combine unnamed DataArrays with" ): combine_by_coords([named_da, unnamed_da]) da = DataArray([0, 1], dims="x", coords=({"x": [0, 1]})) ds = Dataset({"x": [2, 3]}) with pytest.raises( ValueError, match="Can't automatically combine unnamed DataArrays with", ): combine_by_coords([da, ds]) def test_combine_coords_mixed_datasets_named_dataarrays(self): da = DataArray(name="a", data=[4, 5], dims="x", coords=({"x": [0, 1]})) ds = Dataset({"b": ("x", [2, 3])}) actual = combine_by_coords([da, ds]) expected = Dataset( {"a": ("x", [4, 5]), "b": ("x", [2, 3])}, coords={"x": ("x", [0, 1])} ) assert_identical(expected, actual) def test_combine_by_coords_all_unnamed_dataarrays(self): unnamed_array = DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") actual = combine_by_coords([unnamed_array]) expected = unnamed_array assert_identical(expected, actual) unnamed_array1 = DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") unnamed_array2 = DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x") actual = combine_by_coords([unnamed_array1, unnamed_array2]) expected = DataArray( data=[1.0, 2.0, 3.0, 4.0], coords={"x": [0, 1, 2, 3]}, dims="x" ) assert_identical(expected, actual) def test_combine_by_coords_all_named_dataarrays(self): named_da = DataArray(name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") actual = combine_by_coords([named_da]) expected = named_da.to_dataset() assert_identical(expected, actual) named_da1 = DataArray(name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") named_da2 = DataArray(name="b", data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x") actual = combine_by_coords([named_da1, named_da2], join="outer") expected = Dataset( { "a": DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x"), "b": DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x"), } ) assert_identical(expected, actual) def test_combine_by_coords_all_dataarrays_with_the_same_name(self): named_da1 = DataArray(name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x") named_da2 = DataArray(name="a", data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x") actual = combine_by_coords([named_da1, named_da2], join="outer") expected = merge([named_da1, named_da2], compat="no_conflicts", join="outer") assert_identical(expected, actual) class TestNewDefaults: def test_concat_along_existing_dim(self): concat_dim = "dim1" ds = create_test_data with set_options(use_new_combine_kwarg_defaults=False): old = concat([ds(0), ds(1)], dim=concat_dim) with set_options(use_new_combine_kwarg_defaults=True): new = concat([ds(0), ds(1)], dim=concat_dim) assert_identical(old, new) def test_concat_along_new_dim(self): concat_dim = "new_dim" ds = create_test_data with set_options(use_new_combine_kwarg_defaults=False): old = concat([ds(0), ds(1)], dim=concat_dim) with set_options(use_new_combine_kwarg_defaults=True): new = concat([ds(0), ds(1)], dim=concat_dim) assert concat_dim in old.dims assert concat_dim in new.dims def test_nested_merge_with_overlapping_values(self): ds1 = Dataset({"a": ("x", [1, 2]), "x": [0, 1]}) ds2 = Dataset({"a": ("x", [2, 3]), "x": [1, 2]}) expected = Dataset({"a": ("x", [1, 2, 3]), "x": [0, 1, 2]}) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from join='outer' to join='exact'" ): with pytest.warns( FutureWarning, match="will change from compat='no_conflicts' to compat='override'", ): old = combine_nested([ds1, ds2], concat_dim=None) with set_options(use_new_combine_kwarg_defaults=True): with pytest.raises(ValueError, match="might be related to new default"): combine_nested([ds1, ds2], concat_dim=None) assert_identical(old, expected) def test_nested_merge_with_nan_order_matters(self): ds1 = Dataset({"x": 0}) ds2 = Dataset({"x": np.nan}) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from compat='no_conflicts' to compat='override'", ): old = combine_nested([ds1, ds2], concat_dim=None) with set_options(use_new_combine_kwarg_defaults=True): new = combine_nested([ds1, ds2], concat_dim=None) assert_identical(ds1, old) assert_identical(old, new) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from compat='no_conflicts' to compat='override'", ): old = combine_nested([ds2, ds1], concat_dim=None) with set_options(use_new_combine_kwarg_defaults=True): new = combine_nested([ds2, ds1], concat_dim=None) assert_identical(ds1, old) with pytest.raises(AssertionError): assert_identical(old, new) def test_nested_merge_with_concat_dim_explicitly_provided(self): # Test the issue reported in GH #1988 objs = [Dataset({"x": 0, "y": 1})] dim = DataArray([100], name="baz", dims="baz") expected = Dataset({"x": ("baz", [0]), "y": ("baz", [1])}, {"baz": [100]}) with set_options(use_new_combine_kwarg_defaults=False): old = combine_nested(objs, concat_dim=dim) with set_options(use_new_combine_kwarg_defaults=True): new = combine_nested(objs, concat_dim=dim) assert_identical(expected, old) assert_identical(old, new) def test_combine_nested_missing_data_new_dim(self): # Your data includes "time" and "station" dimensions, and each year's # data has a different set of stations. datasets = [ Dataset({"a": ("x", [2, 3]), "x": [1, 2]}), Dataset({"a": ("x", [1, 2]), "x": [0, 1]}), ] expected = Dataset( {"a": (("t", "x"), [[np.nan, 2, 3], [1, 2, np.nan]])}, {"x": [0, 1, 2]} ) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from join='outer' to join='exact'" ): old = combine_nested(datasets, concat_dim="t") with set_options(use_new_combine_kwarg_defaults=True): with pytest.raises(ValueError, match="might be related to new default"): combine_nested(datasets, concat_dim="t") new = combine_nested(datasets, concat_dim="t", join="outer") assert_identical(expected, old) assert_identical(expected, new) def test_combine_by_coords_multiple_variables(self): objs = [Dataset({"x": [0], "y": [0]}), Dataset({"y": [1], "x": [1]})] expected = Dataset({"x": [0, 1], "y": [0, 1]}) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from join='outer' to join='exact'" ): old = combine_by_coords(objs) with set_options(use_new_combine_kwarg_defaults=True): with pytest.raises(ValueError, match="might be related to new default"): combine_by_coords(objs) assert_identical(old, expected) @requires_cftime def test_combine_by_coords_distant_cftime_dates(): # Regression test for https://github.com/pydata/xarray/issues/3535 import cftime time_1 = [cftime.DatetimeGregorian(4500, 12, 31)] time_2 = [cftime.DatetimeGregorian(4600, 12, 31)] time_3 = [cftime.DatetimeGregorian(5100, 12, 31)] da_1 = DataArray([0], dims=["time"], coords=[time_1], name="a").to_dataset() da_2 = DataArray([1], dims=["time"], coords=[time_2], name="a").to_dataset() da_3 = DataArray([2], dims=["time"], coords=[time_3], name="a").to_dataset() result = combine_by_coords([da_1, da_2, da_3]) expected_time = np.concatenate([time_1, time_2, time_3]) expected = DataArray( [0, 1, 2], dims=["time"], coords=[expected_time], name="a" ).to_dataset() assert_identical(result, expected) @requires_cftime def test_combine_by_coords_raises_for_differing_calendars(): # previously failed with uninformative StopIteration instead of TypeError # https://github.com/pydata/xarray/issues/4495 import cftime time_1 = [cftime.DatetimeGregorian(2000, 1, 1)] time_2 = [cftime.DatetimeProlepticGregorian(2001, 1, 1)] da_1 = DataArray([0], dims=["time"], coords=[time_1], name="a").to_dataset() da_2 = DataArray([1], dims=["time"], coords=[time_2], name="a").to_dataset() error_msg = ( "Cannot combine along dimension 'time' with mixed types." " Found:.*" " If importing data directly from a file then setting" " `use_cftime=True` may fix this issue." ) with pytest.raises(TypeError, match=error_msg): combine_by_coords([da_1, da_2]) def test_combine_by_coords_raises_for_differing_types(): # str and byte cannot be compared da_1 = DataArray([0], dims=["time"], coords=[["a"]], name="a").to_dataset() da_2 = DataArray([1], dims=["time"], coords=[[b"b"]], name="a").to_dataset() with pytest.raises( TypeError, match=r"Cannot combine along dimension 'time' with mixed types." ): combine_by_coords([da_1, da_2]) xarray-2025.09.0/xarray/tests/test_computation.py000066400000000000000000002535401505620616400220000ustar00rootroot00000000000000from __future__ import annotations import functools import operator import pickle import numpy as np import pandas as pd import pytest from numpy.testing import assert_allclose, assert_array_equal import xarray as xr from xarray.computation.apply_ufunc import ( _UFuncSignature, apply_ufunc, broadcast_compat_data, collect_dict_values, join_dict_keys, ordered_set_intersection, ordered_set_union, unified_dim_sizes, ) from xarray.core.utils import result_name from xarray.structure.alignment import broadcast from xarray.tests import ( has_dask, raise_if_dask_computes, requires_cftime, requires_dask, ) def assert_identical(a, b): """A version of this function which accepts numpy arrays""" __tracebackhide__ = True from xarray.testing import assert_identical as assert_identical_ if hasattr(a, "identical"): assert_identical_(a, b) else: assert_array_equal(a, b) def test_signature_properties() -> None: sig = _UFuncSignature([["x"], ["x", "y"]], [["z"]]) assert sig.input_core_dims == (("x",), ("x", "y")) assert sig.output_core_dims == (("z",),) assert sig.all_input_core_dims == frozenset(["x", "y"]) assert sig.all_output_core_dims == frozenset(["z"]) assert sig.num_inputs == 2 assert sig.num_outputs == 1 assert str(sig) == "(x),(x,y)->(z)" assert sig.to_gufunc_string() == "(dim0),(dim0,dim1)->(dim2)" assert ( sig.to_gufunc_string(exclude_dims=set("x")) == "(dim0_0),(dim0_1,dim1)->(dim2)" ) # dimension names matter assert _UFuncSignature([["x"]]) != _UFuncSignature([["y"]]) def test_result_name() -> None: class Named: def __init__(self, name=None): self.name = name assert result_name([1, 2]) is None assert result_name([Named()]) is None assert result_name([Named("foo"), 2]) == "foo" assert result_name([Named("foo"), Named("bar")]) is None assert result_name([Named("foo"), Named()]) is None def test_ordered_set_union() -> None: assert list(ordered_set_union([[1, 2]])) == [1, 2] assert list(ordered_set_union([[1, 2], [2, 1]])) == [1, 2] assert list(ordered_set_union([[0], [1, 2], [1, 3]])) == [0, 1, 2, 3] def test_ordered_set_intersection() -> None: assert list(ordered_set_intersection([[1, 2]])) == [1, 2] assert list(ordered_set_intersection([[1, 2], [2, 1]])) == [1, 2] assert list(ordered_set_intersection([[1, 2], [1, 3]])) == [1] assert list(ordered_set_intersection([[1, 2], [2]])) == [2] def test_join_dict_keys() -> None: dicts = [dict.fromkeys(keys) for keys in [["x", "y"], ["y", "z"]]] assert list(join_dict_keys(dicts, "left")) == ["x", "y"] assert list(join_dict_keys(dicts, "right")) == ["y", "z"] assert list(join_dict_keys(dicts, "inner")) == ["y"] assert list(join_dict_keys(dicts, "outer")) == ["x", "y", "z"] with pytest.raises(ValueError): join_dict_keys(dicts, "exact") with pytest.raises(KeyError): join_dict_keys(dicts, "foobar") def test_collect_dict_values() -> None: dicts = [{"x": 1, "y": 2, "z": 3}, {"z": 4}, 5] expected = [[1, 0, 5], [2, 0, 5], [3, 4, 5]] collected = collect_dict_values(dicts, ["x", "y", "z"], fill_value=0) assert collected == expected def identity(x): return x def test_apply_identity() -> None: array = np.arange(10) variable = xr.Variable("x", array) data_array = xr.DataArray(variable, [("x", -array)]) dataset = xr.Dataset({"y": variable}, {"x": -array}) apply_identity = functools.partial(apply_ufunc, identity) assert_identical(array, apply_identity(array)) assert_identical(variable, apply_identity(variable)) assert_identical(data_array, apply_identity(data_array)) assert_identical(data_array, apply_identity(data_array.groupby("x"))) assert_identical(dataset, apply_identity(dataset)) assert_identical(dataset, apply_identity(dataset.groupby("x"))) def add(a, b): return apply_ufunc(operator.add, a, b) def test_apply_two_inputs() -> None: array = np.array([1, 2, 3]) variable = xr.Variable("x", array) data_array = xr.DataArray(variable, [("x", -array)]) dataset = xr.Dataset({"y": variable}, {"x": -array}) zero_array = np.zeros_like(array) zero_variable = xr.Variable("x", zero_array) zero_data_array = xr.DataArray(zero_variable, [("x", -array)]) zero_dataset = xr.Dataset({"y": zero_variable}, {"x": -array}) assert_identical(array, add(array, zero_array)) assert_identical(array, add(zero_array, array)) assert_identical(variable, add(variable, zero_array)) assert_identical(variable, add(variable, zero_variable)) assert_identical(variable, add(zero_array, variable)) assert_identical(variable, add(zero_variable, variable)) assert_identical(data_array, add(data_array, zero_array)) assert_identical(data_array, add(data_array, zero_variable)) assert_identical(data_array, add(data_array, zero_data_array)) assert_identical(data_array, add(zero_array, data_array)) assert_identical(data_array, add(zero_variable, data_array)) assert_identical(data_array, add(zero_data_array, data_array)) assert_identical(dataset, add(dataset, zero_array)) assert_identical(dataset, add(dataset, zero_variable)) assert_identical(dataset, add(dataset, zero_data_array)) assert_identical(dataset, add(dataset, zero_dataset)) assert_identical(dataset, add(zero_array, dataset)) assert_identical(dataset, add(zero_variable, dataset)) assert_identical(dataset, add(zero_data_array, dataset)) assert_identical(dataset, add(zero_dataset, dataset)) assert_identical(data_array, add(data_array.groupby("x"), zero_data_array)) assert_identical(data_array, add(zero_data_array, data_array.groupby("x"))) assert_identical(dataset, add(data_array.groupby("x"), zero_dataset)) assert_identical(dataset, add(zero_dataset, data_array.groupby("x"))) assert_identical(dataset, add(dataset.groupby("x"), zero_data_array)) assert_identical(dataset, add(dataset.groupby("x"), zero_dataset)) assert_identical(dataset, add(zero_data_array, dataset.groupby("x"))) assert_identical(dataset, add(zero_dataset, dataset.groupby("x"))) def test_apply_1d_and_0d() -> None: array = np.array([1, 2, 3]) variable = xr.Variable("x", array) data_array = xr.DataArray(variable, [("x", -array)]) dataset = xr.Dataset({"y": variable}, {"x": -array}) zero_array = 0 zero_variable = xr.Variable((), zero_array) zero_data_array = xr.DataArray(zero_variable) zero_dataset = xr.Dataset({"y": zero_variable}) assert_identical(array, add(array, zero_array)) assert_identical(array, add(zero_array, array)) assert_identical(variable, add(variable, zero_array)) assert_identical(variable, add(variable, zero_variable)) assert_identical(variable, add(zero_array, variable)) assert_identical(variable, add(zero_variable, variable)) assert_identical(data_array, add(data_array, zero_array)) assert_identical(data_array, add(data_array, zero_variable)) assert_identical(data_array, add(data_array, zero_data_array)) assert_identical(data_array, add(zero_array, data_array)) assert_identical(data_array, add(zero_variable, data_array)) assert_identical(data_array, add(zero_data_array, data_array)) assert_identical(dataset, add(dataset, zero_array)) assert_identical(dataset, add(dataset, zero_variable)) assert_identical(dataset, add(dataset, zero_data_array)) assert_identical(dataset, add(dataset, zero_dataset)) assert_identical(dataset, add(zero_array, dataset)) assert_identical(dataset, add(zero_variable, dataset)) assert_identical(dataset, add(zero_data_array, dataset)) assert_identical(dataset, add(zero_dataset, dataset)) assert_identical(data_array, add(data_array.groupby("x"), zero_data_array)) assert_identical(data_array, add(zero_data_array, data_array.groupby("x"))) assert_identical(dataset, add(data_array.groupby("x"), zero_dataset)) assert_identical(dataset, add(zero_dataset, data_array.groupby("x"))) assert_identical(dataset, add(dataset.groupby("x"), zero_data_array)) assert_identical(dataset, add(dataset.groupby("x"), zero_dataset)) assert_identical(dataset, add(zero_data_array, dataset.groupby("x"))) assert_identical(dataset, add(zero_dataset, dataset.groupby("x"))) def test_apply_two_outputs() -> None: array = np.arange(5) variable = xr.Variable("x", array) data_array = xr.DataArray(variable, [("x", -array)]) dataset = xr.Dataset({"y": variable}, {"x": -array}) def twice(obj): def func(x): return (x, x) return apply_ufunc(func, obj, output_core_dims=[[], []]) out0, out1 = twice(array) assert_identical(out0, array) assert_identical(out1, array) out0, out1 = twice(variable) assert_identical(out0, variable) assert_identical(out1, variable) out0, out1 = twice(data_array) assert_identical(out0, data_array) assert_identical(out1, data_array) out0, out1 = twice(dataset) assert_identical(out0, dataset) assert_identical(out1, dataset) out0, out1 = twice(data_array.groupby("x")) assert_identical(out0, data_array) assert_identical(out1, data_array) out0, out1 = twice(dataset.groupby("x")) assert_identical(out0, dataset) assert_identical(out1, dataset) def test_apply_missing_dims() -> None: ## Single arg def add_one(a, core_dims, on_missing_core_dim): return apply_ufunc( lambda x: x + 1, a, input_core_dims=core_dims, output_core_dims=core_dims, on_missing_core_dim=on_missing_core_dim, ) array = np.arange(6).reshape(2, 3) variable = xr.Variable(["x", "y"], array) variable_no_y = xr.Variable(["x", "z"], array) ds = xr.Dataset({"x_y": variable, "x_z": variable_no_y}) # Check the standard stuff works OK assert_identical( add_one(ds[["x_y"]], core_dims=[["y"]], on_missing_core_dim="raise"), ds[["x_y"]] + 1, ) # `raise` β€”Β should raise on a missing dim with pytest.raises(ValueError): add_one(ds, core_dims=[["y"]], on_missing_core_dim="raise") # `drop` β€” should drop the var with the missing dim assert_identical( add_one(ds, core_dims=[["y"]], on_missing_core_dim="drop"), (ds + 1).drop_vars("x_z"), ) # `copy` β€” should not add one to the missing with `copy` copy_result = add_one(ds, core_dims=[["y"]], on_missing_core_dim="copy") assert_identical(copy_result["x_y"], (ds + 1)["x_y"]) assert_identical(copy_result["x_z"], ds["x_z"]) ## Multiple args def sum_add(a, b, core_dims, on_missing_core_dim): return apply_ufunc( lambda a, b, axis=None: a.sum(axis) + b.sum(axis), a, b, input_core_dims=core_dims, on_missing_core_dim=on_missing_core_dim, ) # Check the standard stuff works OK assert_identical( sum_add( ds[["x_y"]], ds[["x_y"]], core_dims=[["x", "y"], ["x", "y"]], on_missing_core_dim="raise", ), ds[["x_y"]].sum() * 2, ) # `raise` β€”Β should raise on a missing dim with pytest.raises( ValueError, match=r".*Missing core dims \{'y'\} from arg number 1 on a variable named `x_z`:\n.* None: data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) def twice(obj): def func(x): return (x, x) return apply_ufunc(func, obj, output_core_dims=[[], []], dask="parallelized") out0, out1 = twice(data_array.chunk({"x": 1})) assert_identical(data_array, out0) assert_identical(data_array, out1) def test_apply_input_core_dimension() -> None: def first_element(obj, dim): def func(x): return x[..., 0] return apply_ufunc(func, obj, input_core_dims=[[dim]]) array = np.array([[1, 2], [3, 4]]) variable = xr.Variable(["x", "y"], array) data_array = xr.DataArray(variable, {"x": ["a", "b"], "y": [-1, -2]}) dataset = xr.Dataset({"data": data_array}) expected_variable_x = xr.Variable(["y"], [1, 2]) expected_data_array_x = xr.DataArray(expected_variable_x, {"y": [-1, -2]}) expected_dataset_x = xr.Dataset({"data": expected_data_array_x}) expected_variable_y = xr.Variable(["x"], [1, 3]) expected_data_array_y = xr.DataArray(expected_variable_y, {"x": ["a", "b"]}) expected_dataset_y = xr.Dataset({"data": expected_data_array_y}) assert_identical(expected_variable_x, first_element(variable, "x")) assert_identical(expected_variable_y, first_element(variable, "y")) assert_identical(expected_data_array_x, first_element(data_array, "x")) assert_identical(expected_data_array_y, first_element(data_array, "y")) assert_identical(expected_dataset_x, first_element(dataset, "x")) assert_identical(expected_dataset_y, first_element(dataset, "y")) assert_identical(expected_data_array_x, first_element(data_array.groupby("y"), "x")) assert_identical(expected_dataset_x, first_element(dataset.groupby("y"), "x")) def multiply(*args): val = args[0] for arg in args[1:]: val = val * arg return val # regression test for GH:2341 with pytest.raises(ValueError): apply_ufunc( multiply, data_array, data_array["y"].values, input_core_dims=[["y"]], output_core_dims=[["y"]], ) expected = xr.DataArray( multiply(data_array, data_array["y"]), dims=["x", "y"], coords=data_array.coords ) actual = apply_ufunc( multiply, data_array, data_array["y"].values, input_core_dims=[["y"], []], output_core_dims=[["y"]], ) assert_identical(expected, actual) def test_apply_output_core_dimension() -> None: def stack_negative(obj): def func(x): return np.stack([x, -x], axis=-1) result = apply_ufunc(func, obj, output_core_dims=[["sign"]]) if isinstance(result, xr.Dataset | xr.DataArray): result.coords["sign"] = [1, -1] return result array = np.array([[1, 2], [3, 4]]) variable = xr.Variable(["x", "y"], array) data_array = xr.DataArray(variable, {"x": ["a", "b"], "y": [-1, -2]}) dataset = xr.Dataset({"data": data_array}) stacked_array = np.array([[[1, -1], [2, -2]], [[3, -3], [4, -4]]]) stacked_variable = xr.Variable(["x", "y", "sign"], stacked_array) stacked_coords = {"x": ["a", "b"], "y": [-1, -2], "sign": [1, -1]} stacked_data_array = xr.DataArray(stacked_variable, stacked_coords) stacked_dataset = xr.Dataset({"data": stacked_data_array}) assert_identical(stacked_array, stack_negative(array)) assert_identical(stacked_variable, stack_negative(variable)) assert_identical(stacked_data_array, stack_negative(data_array)) assert_identical(stacked_dataset, stack_negative(dataset)) assert_identical(stacked_data_array, stack_negative(data_array.groupby("x"))) assert_identical(stacked_dataset, stack_negative(dataset.groupby("x"))) def original_and_stack_negative(obj): def func(x): return (x, np.stack([x, -x], axis=-1)) result = apply_ufunc(func, obj, output_core_dims=[[], ["sign"]]) if isinstance(result[1], xr.Dataset | xr.DataArray): result[1].coords["sign"] = [1, -1] return result out0, out1 = original_and_stack_negative(array) assert_identical(array, out0) assert_identical(stacked_array, out1) out0, out1 = original_and_stack_negative(variable) assert_identical(variable, out0) assert_identical(stacked_variable, out1) out0, out1 = original_and_stack_negative(data_array) assert_identical(data_array, out0) assert_identical(stacked_data_array, out1) out0, out1 = original_and_stack_negative(dataset) assert_identical(dataset, out0) assert_identical(stacked_dataset, out1) out0, out1 = original_and_stack_negative(data_array.groupby("x")) assert_identical(data_array, out0) assert_identical(stacked_data_array, out1) out0, out1 = original_and_stack_negative(dataset.groupby("x")) assert_identical(dataset, out0) assert_identical(stacked_dataset, out1) def test_apply_exclude() -> None: def concatenate(objects, dim="x"): def func(*x): return np.concatenate(x, axis=-1) result = apply_ufunc( func, *objects, input_core_dims=[[dim]] * len(objects), output_core_dims=[[dim]], exclude_dims={dim}, ) if isinstance(result, xr.Dataset | xr.DataArray): # note: this will fail if dim is not a coordinate on any input new_coord = np.concatenate([obj.coords[dim] for obj in objects]) result.coords[dim] = new_coord return result arrays = [np.array([1]), np.array([2, 3])] variables = [xr.Variable("x", a) for a in arrays] data_arrays = [ xr.DataArray(v, {"x": c, "y": ("x", range(len(c)))}) for v, c in zip(variables, [["a"], ["b", "c"]], strict=True) ] datasets = [xr.Dataset({"data": data_array}) for data_array in data_arrays] expected_array = np.array([1, 2, 3]) expected_variable = xr.Variable("x", expected_array) expected_data_array = xr.DataArray(expected_variable, [("x", list("abc"))]) expected_dataset = xr.Dataset({"data": expected_data_array}) assert_identical(expected_array, concatenate(arrays)) assert_identical(expected_variable, concatenate(variables)) assert_identical(expected_data_array, concatenate(data_arrays)) assert_identical(expected_dataset, concatenate(datasets)) # must also be a core dimension with pytest.raises(ValueError): apply_ufunc(identity, variables[0], exclude_dims={"x"}) def test_apply_groupby_add() -> None: array = np.arange(5) variable = xr.Variable("x", array) coords = {"x": -array, "y": ("x", [0, 0, 1, 1, 2])} data_array = xr.DataArray(variable, coords, dims="x") dataset = xr.Dataset({"z": variable}, coords) other_variable = xr.Variable("y", [0, 10]) other_data_array = xr.DataArray(other_variable, dims="y") other_dataset = xr.Dataset({"z": other_variable}) expected_variable = xr.Variable("x", [0, 1, 12, 13, np.nan]) expected_data_array = xr.DataArray(expected_variable, coords, dims="x") expected_dataset = xr.Dataset({"z": expected_variable}, coords) assert_identical( expected_data_array, add(data_array.groupby("y"), other_data_array) ) assert_identical(expected_dataset, add(data_array.groupby("y"), other_dataset)) assert_identical(expected_dataset, add(dataset.groupby("y"), other_data_array)) assert_identical(expected_dataset, add(dataset.groupby("y"), other_dataset)) # cannot be performed with xarray.Variable objects that share a dimension with pytest.raises(ValueError): add(data_array.groupby("y"), other_variable) # if they are all grouped the same way with pytest.raises(ValueError): add(data_array.groupby("y"), data_array[:4].groupby("y")) with pytest.raises(ValueError): add(data_array.groupby("y"), data_array[1:].groupby("y")) with pytest.raises(ValueError): add(data_array.groupby("y"), other_data_array.groupby("y")) with pytest.raises(ValueError): add(data_array.groupby("y"), data_array.groupby("x")) @pytest.mark.filterwarnings("ignore:Duplicate dimension names present") def test_unified_dim_sizes() -> None: assert unified_dim_sizes([xr.Variable((), 0)]) == {} assert unified_dim_sizes([xr.Variable("x", [1]), xr.Variable("x", [1])]) == {"x": 1} assert unified_dim_sizes([xr.Variable("x", [1]), xr.Variable("y", [1, 2])]) == { "x": 1, "y": 2, } assert unified_dim_sizes( [xr.Variable(("x", "z"), [[1]]), xr.Variable(("y", "z"), [[1, 2], [3, 4]])], exclude_dims={"z"}, ) == {"x": 1, "y": 2} with pytest.raises(ValueError, match="broadcasting cannot handle"): with pytest.warns(UserWarning, match="Duplicate dimension names"): unified_dim_sizes([xr.Variable(("x", "x"), [[1]])]) # mismatched lengths with pytest.raises(ValueError): unified_dim_sizes([xr.Variable("x", [1]), xr.Variable("x", [1, 2])]) def test_broadcast_compat_data_1d() -> None: data = np.arange(5) var = xr.Variable("x", data) assert_identical(data, broadcast_compat_data(var, ("x",), ())) assert_identical(data, broadcast_compat_data(var, (), ("x",))) assert_identical(data[:], broadcast_compat_data(var, ("w",), ("x",))) assert_identical(data[:, None], broadcast_compat_data(var, ("w", "x", "y"), ())) with pytest.raises(ValueError): broadcast_compat_data(var, ("x",), ("w",)) with pytest.raises(ValueError): broadcast_compat_data(var, (), ()) def test_broadcast_compat_data_2d() -> None: data = np.arange(12).reshape(3, 4) var = xr.Variable(["x", "y"], data) assert_identical(data, broadcast_compat_data(var, ("x", "y"), ())) assert_identical(data, broadcast_compat_data(var, ("x",), ("y",))) assert_identical(data, broadcast_compat_data(var, (), ("x", "y"))) assert_identical(data.T, broadcast_compat_data(var, ("y", "x"), ())) assert_identical(data.T, broadcast_compat_data(var, ("y",), ("x",))) assert_identical(data, broadcast_compat_data(var, ("w", "x"), ("y",))) assert_identical(data, broadcast_compat_data(var, ("w",), ("x", "y"))) assert_identical(data.T, broadcast_compat_data(var, ("w",), ("y", "x"))) assert_identical( data[:, :, None], broadcast_compat_data(var, ("w", "x", "y", "z"), ()) ) assert_identical( data[None, :, :].T, broadcast_compat_data(var, ("w", "y", "x", "z"), ()) ) def test_keep_attrs() -> None: def add(a, b, keep_attrs): if keep_attrs: return apply_ufunc(operator.add, a, b, keep_attrs=keep_attrs) else: return apply_ufunc(operator.add, a, b) a = xr.DataArray([0, 1], [("x", [0, 1])]) a.attrs["attr"] = "da" a["x"].attrs["attr"] = "da_coord" b = xr.DataArray([1, 2], [("x", [0, 1])]) actual = add(a, b, keep_attrs=False) assert not actual.attrs actual = add(a, b, keep_attrs=True) assert_identical(actual.attrs, a.attrs) assert_identical(actual["x"].attrs, a["x"].attrs) actual = add(a.variable, b.variable, keep_attrs=False) assert not actual.attrs actual = add(a.variable, b.variable, keep_attrs=True) assert_identical(actual.attrs, a.attrs) ds_a = xr.Dataset({"x": [0, 1]}) ds_a.attrs["attr"] = "ds" ds_a.x.attrs["attr"] = "da" ds_b = xr.Dataset({"x": [0, 1]}) actual = add(ds_a, ds_b, keep_attrs=False) assert not actual.attrs actual = add(ds_a, ds_b, keep_attrs=True) assert_identical(actual.attrs, ds_a.attrs) assert_identical(actual.x.attrs, ds_a.x.attrs) @pytest.mark.parametrize( ["strategy", "attrs", "expected", "error"], ( pytest.param( None, [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="default", ), pytest.param( False, [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="False", ), pytest.param( True, [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="True", ), pytest.param( "override", [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="override", ), pytest.param( "drop", [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="drop", ), pytest.param( "drop_conflicts", [{"a": 1, "b": 2}, {"b": 1, "c": 3}, {"c": 3, "d": 4}], {"a": 1, "c": 3, "d": 4}, False, id="drop_conflicts", ), pytest.param( "no_conflicts", [{"a": 1}, {"b": 2}, {"b": 3}], None, True, id="no_conflicts", ), ), ) def test_keep_attrs_strategies_variable(strategy, attrs, expected, error) -> None: a = xr.Variable("x", [0, 1], attrs=attrs[0]) b = xr.Variable("x", [0, 1], attrs=attrs[1]) c = xr.Variable("x", [0, 1], attrs=attrs[2]) if error: with pytest.raises(xr.MergeError): apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) else: expected = xr.Variable("x", [0, 3], attrs=expected) actual = apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) assert_identical(actual, expected) @pytest.mark.parametrize( ["strategy", "attrs", "expected", "error"], ( pytest.param( None, [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="default", ), pytest.param( False, [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="False", ), pytest.param( True, [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="True", ), pytest.param( "override", [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="override", ), pytest.param( "drop", [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="drop", ), pytest.param( "drop_conflicts", [{"a": 1, "b": 2}, {"b": 1, "c": 3}, {"c": 3, "d": 4}], {"a": 1, "c": 3, "d": 4}, False, id="drop_conflicts", ), pytest.param( "no_conflicts", [{"a": 1}, {"b": 2}, {"b": 3}], None, True, id="no_conflicts", ), ), ) def test_keep_attrs_strategies_dataarray(strategy, attrs, expected, error) -> None: a = xr.DataArray(dims="x", data=[0, 1], attrs=attrs[0]) b = xr.DataArray(dims="x", data=[0, 1], attrs=attrs[1]) c = xr.DataArray(dims="x", data=[0, 1], attrs=attrs[2]) if error: with pytest.raises(xr.MergeError): apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) else: expected = xr.DataArray(dims="x", data=[0, 3], attrs=expected) actual = apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) assert_identical(actual, expected) @pytest.mark.parametrize("variant", ("dim", "coord")) @pytest.mark.parametrize( ["strategy", "attrs", "expected", "error"], ( pytest.param( None, [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="default", ), pytest.param( False, [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="False", ), pytest.param( True, [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="True", ), pytest.param( "override", [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="override", ), pytest.param( "drop", [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="drop", ), pytest.param( "drop_conflicts", [{"a": 1, "b": 2}, {"b": 1, "c": 3}, {"c": 3, "d": 4}], {"a": 1, "c": 3, "d": 4}, False, id="drop_conflicts", ), pytest.param( "no_conflicts", [{"a": 1}, {"b": 2}, {"b": 3}], None, True, id="no_conflicts", ), ), ) def test_keep_attrs_strategies_dataarray_variables( variant, strategy, attrs, expected, error ): compute_attrs = { "dim": lambda attrs, default: (attrs, default), "coord": lambda attrs, default: (default, attrs), }.get(variant) dim_attrs, coord_attrs = compute_attrs(attrs, [{}, {}, {}]) a = xr.DataArray( dims="x", data=[0, 1], coords={"x": ("x", [0, 1], dim_attrs[0]), "u": ("x", [0, 1], coord_attrs[0])}, ) b = xr.DataArray( dims="x", data=[0, 1], coords={"x": ("x", [0, 1], dim_attrs[1]), "u": ("x", [0, 1], coord_attrs[1])}, ) c = xr.DataArray( dims="x", data=[0, 1], coords={"x": ("x", [0, 1], dim_attrs[2]), "u": ("x", [0, 1], coord_attrs[2])}, ) if error: with pytest.raises(xr.MergeError): apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) else: dim_attrs, coord_attrs = compute_attrs(expected, {}) expected = xr.DataArray( dims="x", data=[0, 3], coords={"x": ("x", [0, 1], dim_attrs), "u": ("x", [0, 1], coord_attrs)}, ) actual = apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) assert_identical(actual, expected) @pytest.mark.parametrize( ["strategy", "attrs", "expected", "error"], ( pytest.param( None, [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="default", ), pytest.param( False, [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="False", ), pytest.param( True, [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="True", ), pytest.param( "override", [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="override", ), pytest.param( "drop", [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="drop", ), pytest.param( "drop_conflicts", [{"a": 1, "b": 2}, {"b": 1, "c": 3}, {"c": 3, "d": 4}], {"a": 1, "c": 3, "d": 4}, False, id="drop_conflicts", ), pytest.param( "no_conflicts", [{"a": 1}, {"b": 2}, {"b": 3}], None, True, id="no_conflicts", ), ), ) def test_keep_attrs_strategies_dataset(strategy, attrs, expected, error) -> None: a = xr.Dataset({"a": ("x", [0, 1])}, attrs=attrs[0]) b = xr.Dataset({"a": ("x", [0, 1])}, attrs=attrs[1]) c = xr.Dataset({"a": ("x", [0, 1])}, attrs=attrs[2]) if error: with pytest.raises(xr.MergeError): apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) else: expected = xr.Dataset({"a": ("x", [0, 3])}, attrs=expected) actual = apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) assert_identical(actual, expected) @pytest.mark.parametrize("variant", ("data", "dim", "coord")) @pytest.mark.parametrize( ["strategy", "attrs", "expected", "error"], ( pytest.param( None, [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="default", ), pytest.param( False, [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="False", ), pytest.param( True, [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="True", ), pytest.param( "override", [{"a": 1}, {"a": 2}, {"a": 3}], {"a": 1}, False, id="override", ), pytest.param( "drop", [{"a": 1}, {"a": 2}, {"a": 3}], {}, False, id="drop", ), pytest.param( "drop_conflicts", [{"a": 1, "b": 2}, {"b": 1, "c": 3}, {"c": 3, "d": 4}], {"a": 1, "c": 3, "d": 4}, False, id="drop_conflicts", ), pytest.param( "no_conflicts", [{"a": 1}, {"b": 2}, {"b": 3}], None, True, id="no_conflicts", ), ), ) def test_keep_attrs_strategies_dataset_variables( variant, strategy, attrs, expected, error ): compute_attrs = { "data": lambda attrs, default: (attrs, default, default), "dim": lambda attrs, default: (default, attrs, default), "coord": lambda attrs, default: (default, default, attrs), }.get(variant) data_attrs, dim_attrs, coord_attrs = compute_attrs(attrs, [{}, {}, {}]) a = xr.Dataset( {"a": ("x", [], data_attrs[0])}, coords={"x": ("x", [], dim_attrs[0]), "u": ("x", [], coord_attrs[0])}, ) b = xr.Dataset( {"a": ("x", [], data_attrs[1])}, coords={"x": ("x", [], dim_attrs[1]), "u": ("x", [], coord_attrs[1])}, ) c = xr.Dataset( {"a": ("x", [], data_attrs[2])}, coords={"x": ("x", [], dim_attrs[2]), "u": ("x", [], coord_attrs[2])}, ) if error: with pytest.raises(xr.MergeError): apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) else: data_attrs, dim_attrs, coord_attrs = compute_attrs(expected, {}) expected = xr.Dataset( {"a": ("x", [], data_attrs)}, coords={"x": ("x", [], dim_attrs), "u": ("x", [], coord_attrs)}, ) actual = apply_ufunc(lambda *args: sum(args), a, b, c, keep_attrs=strategy) assert_identical(actual, expected) def test_dataset_join() -> None: ds0 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]}) ds1 = xr.Dataset({"a": ("x", [99, 3]), "x": [1, 2]}) # by default, cannot have different labels with pytest.raises(ValueError, match=r"cannot align.*join.*exact.*"): apply_ufunc(operator.add, ds0, ds1) with pytest.raises(TypeError, match=r"must supply"): apply_ufunc(operator.add, ds0, ds1, dataset_join="outer") def add(a, b, join, dataset_join): return apply_ufunc( operator.add, a, b, join=join, dataset_join=dataset_join, dataset_fill_value=np.nan, ) actual = add(ds0, ds1, "outer", "inner") expected = xr.Dataset({"a": ("x", [np.nan, 101, np.nan]), "x": [0, 1, 2]}) assert_identical(actual, expected) actual = add(ds0, ds1, "outer", "outer") assert_identical(actual, expected) with pytest.raises(ValueError, match=r"data variable names"): apply_ufunc(operator.add, ds0, xr.Dataset({"b": 1})) ds2 = xr.Dataset({"b": ("x", [99, 3]), "x": [1, 2]}) actual = add(ds0, ds2, "outer", "inner") expected = xr.Dataset({"x": [0, 1, 2]}) assert_identical(actual, expected) # we used np.nan as the fill_value in add() above actual = add(ds0, ds2, "outer", "outer") expected = xr.Dataset( { "a": ("x", [np.nan, np.nan, np.nan]), "b": ("x", [np.nan, np.nan, np.nan]), "x": [0, 1, 2], } ) assert_identical(actual, expected) @requires_dask def test_apply_dask() -> None: import dask.array as da array = da.ones((2,), chunks=2) variable = xr.Variable("x", array) coords = xr.DataArray(variable).coords.variables data_array = xr.DataArray(variable, dims=["x"], coords=coords) dataset = xr.Dataset({"y": variable}) # encountered dask array, but did not set dask='allowed' with pytest.raises(ValueError): apply_ufunc(identity, array) with pytest.raises(ValueError): apply_ufunc(identity, variable) with pytest.raises(ValueError): apply_ufunc(identity, data_array) with pytest.raises(ValueError): apply_ufunc(identity, dataset) # unknown setting for dask array handling with pytest.raises(ValueError): apply_ufunc(identity, array, dask="unknown") # type: ignore[arg-type] def dask_safe_identity(x): return apply_ufunc(identity, x, dask="allowed") assert array is dask_safe_identity(array) actual = dask_safe_identity(variable) assert isinstance(actual.data, da.Array) assert_identical(variable, actual) actual = dask_safe_identity(data_array) assert isinstance(actual.data, da.Array) assert_identical(data_array, actual) actual = dask_safe_identity(dataset) assert isinstance(actual["y"].data, da.Array) assert_identical(dataset, actual) @requires_dask def test_apply_dask_parallelized_one_arg() -> None: import dask.array as da array = da.ones((2, 2), chunks=(1, 1)) data_array = xr.DataArray(array, dims=("x", "y")) def parallel_identity(x): return apply_ufunc(identity, x, dask="parallelized", output_dtypes=[x.dtype]) actual = parallel_identity(data_array) assert isinstance(actual.data, da.Array) assert actual.data.chunks == array.chunks assert_identical(data_array, actual) computed = data_array.compute() actual = parallel_identity(computed) assert_identical(computed, actual) @requires_dask def test_apply_dask_parallelized_two_args() -> None: import dask.array as da array = da.ones((2, 2), chunks=(1, 1), dtype=np.int64) data_array = xr.DataArray(array, dims=("x", "y")) data_array.name = None def parallel_add(x, y): return apply_ufunc( operator.add, x, y, dask="parallelized", output_dtypes=[np.int64] ) def check(x, y): actual = parallel_add(x, y) assert isinstance(actual.data, da.Array) assert actual.data.chunks == array.chunks assert_identical(data_array, actual) check(data_array, 0) check(0, data_array) check(data_array, xr.DataArray(0)) check(data_array, 0 * data_array) check(data_array, 0 * data_array[0]) check(data_array[:, 0], 0 * data_array[0]) check(data_array, 0 * data_array.compute()) @requires_dask def test_apply_dask_parallelized_errors() -> None: import dask.array as da array = da.ones((2, 2), chunks=(1, 1)) data_array = xr.DataArray(array, dims=("x", "y")) # from apply_array_ufunc with pytest.raises(ValueError, match=r"at least one input is an xarray object"): apply_ufunc(identity, array, dask="parallelized") # formerly from _apply_blockwise, now from apply_variable_ufunc with pytest.raises(ValueError, match=r"consists of multiple chunks"): apply_ufunc( identity, data_array, dask="parallelized", output_dtypes=[float], input_core_dims=[("y",)], output_core_dims=[("y",)], ) # it's currently impossible to silence these warnings from inside dask.array: # https://github.com/dask/dask/issues/3245 @requires_dask @pytest.mark.filterwarnings("ignore:Mean of empty slice") def test_apply_dask_multiple_inputs() -> None: import dask.array as da def covariance(x, y): return ( (x - x.mean(axis=-1, keepdims=True)) * (y - y.mean(axis=-1, keepdims=True)) ).mean(axis=-1) rs = np.random.default_rng(42) array1 = da.from_array(rs.random((4, 4)), chunks=(2, 4)) array2 = da.from_array(rs.random((4, 4)), chunks=(2, 4)) data_array_1 = xr.DataArray(array1, dims=("x", "z")) data_array_2 = xr.DataArray(array2, dims=("y", "z")) expected = apply_ufunc( covariance, data_array_1.compute(), data_array_2.compute(), input_core_dims=[["z"], ["z"]], ) allowed = apply_ufunc( covariance, data_array_1, data_array_2, input_core_dims=[["z"], ["z"]], dask="allowed", ) assert isinstance(allowed.data, da.Array) xr.testing.assert_allclose(expected, allowed.compute()) parallelized = apply_ufunc( covariance, data_array_1, data_array_2, input_core_dims=[["z"], ["z"]], dask="parallelized", output_dtypes=[float], ) assert isinstance(parallelized.data, da.Array) xr.testing.assert_allclose(expected, parallelized.compute()) @requires_dask def test_apply_dask_new_output_dimension() -> None: import dask.array as da array = da.ones((2, 2), chunks=(1, 1)) data_array = xr.DataArray(array, dims=("x", "y")) def stack_negative(obj): def func(x): return np.stack([x, -x], axis=-1) return apply_ufunc( func, obj, output_core_dims=[["sign"]], dask="parallelized", output_dtypes=[obj.dtype], dask_gufunc_kwargs=dict(output_sizes={"sign": 2}), ) expected = stack_negative(data_array.compute()) actual = stack_negative(data_array) assert actual.dims == ("x", "y", "sign") assert actual.shape == (2, 2, 2) assert isinstance(actual.data, da.Array) assert_identical(expected, actual) @requires_dask def test_apply_dask_new_output_sizes() -> None: ds = xr.Dataset({"foo": (["lon", "lat"], np.arange(10 * 10).reshape((10, 10)))}) ds["bar"] = ds["foo"] newdims = {"lon_new": 3, "lat_new": 6} def extract(obj): def func(da): return da[1:4, 1:7] return apply_ufunc( func, obj, dask="parallelized", input_core_dims=[["lon", "lat"]], output_core_dims=[["lon_new", "lat_new"]], dask_gufunc_kwargs=dict(output_sizes=newdims), ) expected = extract(ds) actual = extract(ds.chunk()) assert actual.sizes == {"lon_new": 3, "lat_new": 6} assert_identical(expected.chunk(), actual) @requires_dask def test_apply_dask_new_output_sizes_not_supplied_same_dim_names() -> None: # test for missing output_sizes kwarg sneaking through # see GH discussion 7503 data = np.random.randn(4, 4, 3, 2) da = xr.DataArray(data=data, dims=("x", "y", "i", "j")).chunk(x=1, y=1) with pytest.raises(ValueError, match="output_sizes"): xr.apply_ufunc( np.linalg.pinv, da, input_core_dims=[["i", "j"]], output_core_dims=[["i", "j"]], exclude_dims={"i", "j"}, dask="parallelized", ) def pandas_median(x): return pd.Series(x).median() def test_vectorize() -> None: data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) expected = xr.DataArray([1, 2], dims=["x"]) actual = apply_ufunc( pandas_median, data_array, input_core_dims=[["y"]], vectorize=True ) assert_identical(expected, actual) @requires_dask def test_vectorize_dask() -> None: # run vectorization in dask.array.gufunc by using `dask='parallelized'` data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) expected = xr.DataArray([1, 2], dims=["x"]) actual = apply_ufunc( pandas_median, data_array.chunk({"x": 1}), input_core_dims=[["y"]], vectorize=True, dask="parallelized", output_dtypes=[float], ) assert_identical(expected, actual) @requires_dask def test_vectorize_dask_dtype() -> None: # ensure output_dtypes is preserved with vectorize=True # GH4015 # integer data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) expected = xr.DataArray([1, 2], dims=["x"]) actual = apply_ufunc( pandas_median, data_array.chunk({"x": 1}), input_core_dims=[["y"]], vectorize=True, dask="parallelized", output_dtypes=[int], ) assert_identical(expected, actual) assert expected.dtype == actual.dtype # complex data_array = xr.DataArray([[0 + 0j, 1 + 2j, 2 + 1j]], dims=("x", "y")) expected = data_array.copy() actual = apply_ufunc( identity, data_array.chunk({"x": 1}), vectorize=True, dask="parallelized", output_dtypes=[complex], ) assert_identical(expected, actual) assert expected.dtype == actual.dtype @requires_dask @pytest.mark.parametrize( "data_array", [ xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")), xr.DataArray([[0 + 0j, 1 + 2j, 2 + 1j]], dims=("x", "y")), ], ) def test_vectorize_dask_dtype_without_output_dtypes(data_array) -> None: # ensure output_dtypes is preserved with vectorize=True # GH4015 expected = data_array.copy() actual = apply_ufunc( identity, data_array.chunk({"x": 1}), vectorize=True, dask="parallelized", ) assert_identical(expected, actual) assert expected.dtype == actual.dtype @requires_dask def test_vectorize_dask_dtype_meta() -> None: data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) expected = xr.DataArray([1, 2], dims=["x"]) actual = apply_ufunc( pandas_median, data_array.chunk({"x": 1}), input_core_dims=[["y"]], vectorize=True, dask="parallelized", dask_gufunc_kwargs=dict(meta=np.ndarray((0, 0), dtype=float)), ) assert_identical(expected, actual) assert float == actual.dtype def pandas_median_add(x, y): # function which can consume input of unequal length return pd.Series(x).median() + pd.Series(y).median() def test_vectorize_exclude_dims() -> None: # GH 3890 data_array_a = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) data_array_b = xr.DataArray([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5]], dims=("x", "y")) expected = xr.DataArray([3, 5], dims=["x"]) actual = apply_ufunc( pandas_median_add, data_array_a, data_array_b, input_core_dims=[["y"], ["y"]], vectorize=True, exclude_dims=set("y"), ) assert_identical(expected, actual) @requires_dask def test_vectorize_exclude_dims_dask() -> None: # GH 3890 data_array_a = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) data_array_b = xr.DataArray([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5]], dims=("x", "y")) expected = xr.DataArray([3, 5], dims=["x"]) actual = apply_ufunc( pandas_median_add, data_array_a.chunk({"x": 1}), data_array_b.chunk({"x": 1}), input_core_dims=[["y"], ["y"]], exclude_dims=set("y"), vectorize=True, dask="parallelized", output_dtypes=[float], ) assert_identical(expected, actual) def test_corr_only_dataarray() -> None: with pytest.raises(TypeError, match="Only xr.DataArray is supported"): xr.corr(xr.Dataset(), xr.Dataset()) # type: ignore[type-var] @pytest.fixture(scope="module") def arrays(): da = xr.DataArray( np.random.random((3, 21, 4)), coords={"time": pd.date_range("2000-01-01", freq="1D", periods=21)}, dims=("a", "time", "x"), ) return [ da.isel(time=range(18)), da.isel(time=range(2, 20)).rolling(time=3, center=True).mean(), xr.DataArray([[1, 2], [1, np.nan]], dims=["x", "time"]), xr.DataArray([[1, 2], [np.nan, np.nan]], dims=["x", "time"]), xr.DataArray([[1, 2], [2, 1]], dims=["x", "time"]), ] @pytest.fixture(scope="module") def array_tuples(arrays): return [ (arrays[0], arrays[0]), (arrays[0], arrays[1]), (arrays[1], arrays[1]), (arrays[2], arrays[2]), (arrays[2], arrays[3]), (arrays[2], arrays[4]), (arrays[4], arrays[2]), (arrays[3], arrays[3]), (arrays[4], arrays[4]), ] @pytest.mark.parametrize("ddof", [0, 1]) @pytest.mark.parametrize("n", [3, 4, 5, 6, 7, 8]) @pytest.mark.parametrize("dim", [None, "x", "time"]) @requires_dask def test_lazy_corrcov( n: int, dim: str | None, ddof: int, array_tuples: tuple[xr.DataArray, xr.DataArray] ) -> None: # GH 5284 from dask import is_dask_collection da_a, da_b = array_tuples[n] with raise_if_dask_computes(): cov = xr.cov(da_a.chunk(), da_b.chunk(), dim=dim, ddof=ddof) assert is_dask_collection(cov) corr = xr.corr(da_a.chunk(), da_b.chunk(), dim=dim) assert is_dask_collection(corr) @pytest.mark.parametrize("ddof", [0, 1]) @pytest.mark.parametrize("n", [0, 1, 2]) @pytest.mark.parametrize("dim", [None, "time"]) def test_cov( n: int, dim: str | None, ddof: int, array_tuples: tuple[xr.DataArray, xr.DataArray] ) -> None: da_a, da_b = array_tuples[n] if dim is not None: def np_cov_ind(ts1, ts2, a, x): # Ensure the ts are aligned and missing values ignored ts1, ts2 = broadcast(ts1, ts2) valid_values = ts1.notnull() & ts2.notnull() # While dropping isn't ideal here, numpy will return nan # if any segment contains a NaN. ts1 = ts1.where(valid_values) ts2 = ts2.where(valid_values) return np.ma.cov( np.ma.masked_invalid(ts1.sel(a=a, x=x).data.flatten()), np.ma.masked_invalid(ts2.sel(a=a, x=x).data.flatten()), ddof=ddof, )[0, 1] expected = np.zeros((3, 4)) for a in [0, 1, 2]: for x in [0, 1, 2, 3]: expected[a, x] = np_cov_ind(da_a, da_b, a=a, x=x) actual = xr.cov(da_a, da_b, dim=dim, ddof=ddof) assert_allclose(actual, expected) else: def np_cov(ts1, ts2): # Ensure the ts are aligned and missing values ignored ts1, ts2 = broadcast(ts1, ts2) valid_values = ts1.notnull() & ts2.notnull() ts1 = ts1.where(valid_values) ts2 = ts2.where(valid_values) return np.ma.cov( np.ma.masked_invalid(ts1.data.flatten()), np.ma.masked_invalid(ts2.data.flatten()), ddof=ddof, )[0, 1] expected = np_cov(da_a, da_b) actual = xr.cov(da_a, da_b, dim=dim, ddof=ddof) assert_allclose(actual, expected) @pytest.mark.parametrize("n", [0, 1, 2]) @pytest.mark.parametrize("dim", [None, "time"]) def test_corr( n: int, dim: str | None, array_tuples: tuple[xr.DataArray, xr.DataArray] ) -> None: da_a, da_b = array_tuples[n] if dim is not None: def np_corr_ind(ts1, ts2, a, x): # Ensure the ts are aligned and missing values ignored ts1, ts2 = broadcast(ts1, ts2) valid_values = ts1.notnull() & ts2.notnull() ts1 = ts1.where(valid_values) ts2 = ts2.where(valid_values) return np.ma.corrcoef( np.ma.masked_invalid(ts1.sel(a=a, x=x).data.flatten()), np.ma.masked_invalid(ts2.sel(a=a, x=x).data.flatten()), )[0, 1] expected = np.zeros((3, 4)) for a in [0, 1, 2]: for x in [0, 1, 2, 3]: expected[a, x] = np_corr_ind(da_a, da_b, a=a, x=x) actual = xr.corr(da_a, da_b, dim) assert_allclose(actual, expected) else: def np_corr(ts1, ts2): # Ensure the ts are aligned and missing values ignored ts1, ts2 = broadcast(ts1, ts2) valid_values = ts1.notnull() & ts2.notnull() ts1 = ts1.where(valid_values) ts2 = ts2.where(valid_values) return np.ma.corrcoef( np.ma.masked_invalid(ts1.data.flatten()), np.ma.masked_invalid(ts2.data.flatten()), )[0, 1] expected = np_corr(da_a, da_b) actual = xr.corr(da_a, da_b, dim) assert_allclose(actual, expected) @pytest.mark.parametrize("n", range(9)) @pytest.mark.parametrize("dim", [None, "time", "x"]) def test_covcorr_consistency( n: int, dim: str | None, array_tuples: tuple[xr.DataArray, xr.DataArray] ) -> None: da_a, da_b = array_tuples[n] # Testing that xr.corr and xr.cov are consistent with each other # 1. Broadcast the two arrays da_a, da_b = broadcast(da_a, da_b) # 2. Ignore the nans valid_values = da_a.notnull() & da_b.notnull() da_a = da_a.where(valid_values) da_b = da_b.where(valid_values) expected = xr.cov(da_a, da_b, dim=dim, ddof=0) / ( da_a.std(dim=dim) * da_b.std(dim=dim) ) actual = xr.corr(da_a, da_b, dim=dim) assert_allclose(actual, expected) @requires_dask @pytest.mark.parametrize("n", range(9)) @pytest.mark.parametrize("dim", [None, "time", "x"]) @pytest.mark.filterwarnings("ignore:invalid value encountered in .*divide") def test_corr_lazycorr_consistency( n: int, dim: str | None, array_tuples: tuple[xr.DataArray, xr.DataArray] ) -> None: da_a, da_b = array_tuples[n] da_al = da_a.chunk() da_bl = da_b.chunk() c_abl = xr.corr(da_al, da_bl, dim=dim) c_ab = xr.corr(da_a, da_b, dim=dim) c_ab_mixed = xr.corr(da_a, da_bl, dim=dim) assert_allclose(c_ab, c_abl) assert_allclose(c_ab, c_ab_mixed) @requires_dask def test_corr_dtype_error(): da_a = xr.DataArray([[1, 2], [2, 1]], dims=["x", "time"]) da_b = xr.DataArray([[1, 2], [1, np.nan]], dims=["x", "time"]) xr.testing.assert_equal(xr.corr(da_a, da_b), xr.corr(da_a.chunk(), da_b.chunk())) xr.testing.assert_equal(xr.corr(da_a, da_b), xr.corr(da_a, da_b.chunk())) @pytest.mark.parametrize("n", range(5)) @pytest.mark.parametrize("dim", [None, "time", "x", ["time", "x"]]) def test_autocov(n: int, dim: str | None, arrays) -> None: da = arrays[n] # Testing that the autocovariance*(N-1) is ~=~ to the variance matrix # 1. Ignore the nans valid_values = da.notnull() # Because we're using ddof=1, this requires > 1 value in each sample da = da.where(valid_values.sum(dim=dim) > 1) expected = ((da - da.mean(dim=dim)) ** 2).sum(dim=dim, skipna=True, min_count=1) actual = xr.cov(da, da, dim=dim) * (valid_values.sum(dim) - 1) assert_allclose(actual, expected) def test_complex_cov() -> None: da = xr.DataArray([1j, -1j]) actual = xr.cov(da, da) assert abs(actual.item()) == 2 @pytest.mark.parametrize("weighted", [True, False]) def test_bilinear_cov_corr(weighted: bool) -> None: # Test the bilinear properties of covariance and correlation da = xr.DataArray( np.random.random((3, 21, 4)), coords={"time": pd.date_range("2000-01-01", freq="1D", periods=21)}, dims=("a", "time", "x"), ) db = xr.DataArray( np.random.random((3, 21, 4)), coords={"time": pd.date_range("2000-01-01", freq="1D", periods=21)}, dims=("a", "time", "x"), ) dc = xr.DataArray( np.random.random((3, 21, 4)), coords={"time": pd.date_range("2000-01-01", freq="1D", periods=21)}, dims=("a", "time", "x"), ) if weighted: weights = xr.DataArray( np.abs(np.random.random(4)), dims=("x"), ) else: weights = None k = np.random.random(1)[0] # Test covariance properties assert_allclose( xr.cov(da + k, db, weights=weights), xr.cov(da, db, weights=weights) ) assert_allclose( xr.cov(da, db + k, weights=weights), xr.cov(da, db, weights=weights) ) assert_allclose( xr.cov(da + dc, db, weights=weights), xr.cov(da, db, weights=weights) + xr.cov(dc, db, weights=weights), ) assert_allclose( xr.cov(da, db + dc, weights=weights), xr.cov(da, db, weights=weights) + xr.cov(da, dc, weights=weights), ) assert_allclose( xr.cov(k * da, db, weights=weights), k * xr.cov(da, db, weights=weights) ) assert_allclose( xr.cov(da, k * db, weights=weights), k * xr.cov(da, db, weights=weights) ) # Test correlation properties assert_allclose( xr.corr(da + k, db, weights=weights), xr.corr(da, db, weights=weights) ) assert_allclose( xr.corr(da, db + k, weights=weights), xr.corr(da, db, weights=weights) ) assert_allclose( xr.corr(k * da, db, weights=weights), xr.corr(da, db, weights=weights) ) assert_allclose( xr.corr(da, k * db, weights=weights), xr.corr(da, db, weights=weights) ) def test_equally_weighted_cov_corr() -> None: # Test that equal weights for all values produces same results as weights=None da = xr.DataArray( np.random.random((3, 21, 4)), coords={"time": pd.date_range("2000-01-01", freq="1D", periods=21)}, dims=("a", "time", "x"), ) db = xr.DataArray( np.random.random((3, 21, 4)), coords={"time": pd.date_range("2000-01-01", freq="1D", periods=21)}, dims=("a", "time", "x"), ) assert_allclose( xr.cov(da, db, weights=None), xr.cov(da, db, weights=xr.DataArray(1)) ) assert_allclose( xr.cov(da, db, weights=None), xr.cov(da, db, weights=xr.DataArray(2)) ) assert_allclose( xr.corr(da, db, weights=None), xr.corr(da, db, weights=xr.DataArray(1)) ) assert_allclose( xr.corr(da, db, weights=None), xr.corr(da, db, weights=xr.DataArray(2)) ) @requires_dask def test_vectorize_dask_new_output_dims() -> None: # regression test for GH3574 # run vectorization in dask.array.gufunc by using `dask='parallelized'` data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) func = lambda x: x[np.newaxis, ...] expected = data_array.expand_dims("z") actual = apply_ufunc( func, data_array.chunk({"x": 1}), output_core_dims=[["z"]], vectorize=True, dask="parallelized", output_dtypes=[float], dask_gufunc_kwargs=dict(output_sizes={"z": 1}), ).transpose(*expected.dims) assert_identical(expected, actual) with pytest.raises( ValueError, match=r"dimension 'z1' in 'output_sizes' must correspond" ): apply_ufunc( func, data_array.chunk({"x": 1}), output_core_dims=[["z"]], vectorize=True, dask="parallelized", output_dtypes=[float], dask_gufunc_kwargs=dict(output_sizes={"z1": 1}), ) with pytest.raises( ValueError, match=r"dimension 'z' in 'output_core_dims' needs corresponding" ): apply_ufunc( func, data_array.chunk({"x": 1}), output_core_dims=[["z"]], vectorize=True, dask="parallelized", output_dtypes=[float], ) def test_output_wrong_number() -> None: variable = xr.Variable("x", np.arange(10)) def identity(x): return x def tuple3x(x): return (x, x, x) with pytest.raises( ValueError, match=r"number of outputs.* Received a with 10 elements. Expected a tuple of 2 elements:\n\narray\(\[0", ): apply_ufunc(identity, variable, output_core_dims=[(), ()]) with pytest.raises(ValueError, match=r"number of outputs"): apply_ufunc(tuple3x, variable, output_core_dims=[(), ()]) def test_output_wrong_dims() -> None: variable = xr.Variable("x", np.arange(10)) def add_dim(x): return x[..., np.newaxis] def remove_dim(x): return x[..., 0] with pytest.raises( ValueError, match=r"unexpected number of dimensions.*from:\n\n.*array\(\[\[0", ): apply_ufunc(add_dim, variable, output_core_dims=[("y", "z")]) with pytest.raises(ValueError, match=r"unexpected number of dimensions"): apply_ufunc(add_dim, variable) with pytest.raises(ValueError, match=r"unexpected number of dimensions"): apply_ufunc(remove_dim, variable) def test_output_wrong_dim_size() -> None: array = np.arange(10) variable = xr.Variable("x", array) data_array = xr.DataArray(variable, [("x", -array)]) dataset = xr.Dataset({"y": variable}, {"x": -array}) def truncate(array): return array[:5] def apply_truncate_broadcast_invalid(obj): return apply_ufunc(truncate, obj) with pytest.raises(ValueError, match=r"size of dimension"): apply_truncate_broadcast_invalid(variable) with pytest.raises(ValueError, match=r"size of dimension"): apply_truncate_broadcast_invalid(data_array) with pytest.raises(ValueError, match=r"size of dimension"): apply_truncate_broadcast_invalid(dataset) def apply_truncate_x_x_invalid(obj): return apply_ufunc( truncate, obj, input_core_dims=[["x"]], output_core_dims=[["x"]] ) with pytest.raises(ValueError, match=r"size of dimension"): apply_truncate_x_x_invalid(variable) with pytest.raises(ValueError, match=r"size of dimension"): apply_truncate_x_x_invalid(data_array) with pytest.raises(ValueError, match=r"size of dimension"): apply_truncate_x_x_invalid(dataset) def apply_truncate_x_z(obj): return apply_ufunc( truncate, obj, input_core_dims=[["x"]], output_core_dims=[["z"]] ) assert_identical(xr.Variable("z", array[:5]), apply_truncate_x_z(variable)) assert_identical( xr.DataArray(array[:5], dims=["z"]), apply_truncate_x_z(data_array) ) assert_identical(xr.Dataset({"y": ("z", array[:5])}), apply_truncate_x_z(dataset)) def apply_truncate_x_x_valid(obj): return apply_ufunc( truncate, obj, input_core_dims=[["x"]], output_core_dims=[["x"]], exclude_dims={"x"}, ) assert_identical(xr.Variable("x", array[:5]), apply_truncate_x_x_valid(variable)) assert_identical( xr.DataArray(array[:5], dims=["x"]), apply_truncate_x_x_valid(data_array) ) assert_identical( xr.Dataset({"y": ("x", array[:5])}), apply_truncate_x_x_valid(dataset) ) @pytest.mark.parametrize("use_dask", [True, False]) def test_dot(use_dask: bool) -> None: if use_dask and not has_dask: pytest.skip("test for dask.") a = np.arange(30 * 4).reshape(30, 4) b = np.arange(30 * 4 * 5).reshape(30, 4, 5) c = np.arange(5 * 60).reshape(5, 60) da_a = xr.DataArray(a, dims=["a", "b"], coords={"a": np.linspace(0, 1, 30)}) da_b = xr.DataArray(b, dims=["a", "b", "c"], coords={"a": np.linspace(0, 1, 30)}) da_c = xr.DataArray(c, dims=["c", "e"]) if use_dask: da_a = da_a.chunk({"a": 3}) da_b = da_b.chunk({"a": 3}) da_c = da_c.chunk({"c": 3}) actual = xr.dot(da_a, da_b, dim=["a", "b"]) assert actual.dims == ("c",) assert (actual.data == np.einsum("ij,ijk->k", a, b)).all() assert isinstance(actual.variable.data, type(da_a.variable.data)) actual = xr.dot(da_a, da_b) assert actual.dims == ("c",) assert (actual.data == np.einsum("ij,ijk->k", a, b)).all() assert isinstance(actual.variable.data, type(da_a.variable.data)) # for only a single array is passed without dims argument, just return # as is actual = xr.dot(da_a) assert_identical(da_a, actual) # test for variable actual = xr.dot(da_a.variable, da_b.variable) assert actual.dims == ("c",) assert (actual.data == np.einsum("ij,ijk->k", a, b)).all() assert isinstance(actual.data, type(da_a.variable.data)) if use_dask: da_a = da_a.chunk({"a": 3}) da_b = da_b.chunk({"a": 3}) actual = xr.dot(da_a, da_b, dim=["b"]) assert actual.dims == ("a", "c") assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all() assert isinstance(actual.variable.data, type(da_a.variable.data)) actual = xr.dot(da_a, da_b, dim=["b"]) assert actual.dims == ("a", "c") assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all() actual = xr.dot(da_a, da_b, dim="b") assert actual.dims == ("a", "c") assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all() actual = xr.dot(da_a, da_b, dim="a") assert actual.dims == ("b", "c") assert (actual.data == np.einsum("ij,ijk->jk", a, b)).all() actual = xr.dot(da_a, da_b, dim="c") assert actual.dims == ("a", "b") assert (actual.data == np.einsum("ij,ijk->ij", a, b)).all() actual = xr.dot(da_a, da_b, da_c, dim=["a", "b"]) assert actual.dims == ("c", "e") assert (actual.data == np.einsum("ij,ijk,kl->kl ", a, b, c)).all() # should work with tuple actual = xr.dot(da_a, da_b, dim=("c",)) assert actual.dims == ("a", "b") assert (actual.data == np.einsum("ij,ijk->ij", a, b)).all() # default dims actual = xr.dot(da_a, da_b, da_c) assert actual.dims == ("e",) assert (actual.data == np.einsum("ij,ijk,kl->l ", a, b, c)).all() # 1 array summation actual = xr.dot(da_a, dim="a") assert actual.dims == ("b",) assert (actual.data == np.einsum("ij->j ", a)).all() # empty dim actual = xr.dot(da_a.sel(a=[]), da_a.sel(a=[]), dim="a") assert actual.dims == ("b",) assert (actual.data == np.zeros(actual.shape)).all() # Ellipsis (...) sums over all dimensions actual = xr.dot(da_a, da_b, dim=...) assert actual.dims == () assert (actual.data == np.einsum("ij,ijk->", a, b)).all() actual = xr.dot(da_a, da_b, da_c, dim=...) assert actual.dims == () assert (actual.data == np.einsum("ij,ijk,kl-> ", a, b, c)).all() actual = xr.dot(da_a, dim=...) assert actual.dims == () assert (actual.data == np.einsum("ij-> ", a)).all() actual = xr.dot(da_a.sel(a=[]), da_a.sel(a=[]), dim=...) assert actual.dims == () assert (actual.data == np.zeros(actual.shape)).all() # Invalid cases if not use_dask: with pytest.raises(TypeError): xr.dot(da_a, dim="a", invalid=None) with pytest.raises(TypeError): xr.dot(da_a.to_dataset(name="da"), dim="a") with pytest.raises(TypeError): xr.dot(dim="a") # einsum parameters actual = xr.dot(da_a, da_b, dim=["b"], order="C") assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all() assert actual.values.flags["C_CONTIGUOUS"] assert not actual.values.flags["F_CONTIGUOUS"] actual = xr.dot(da_a, da_b, dim=["b"], order="F") assert (actual.data == np.einsum("ij,ijk->ik", a, b)).all() # dask converts Fortran arrays to C order when merging the final array if not use_dask: assert not actual.values.flags["C_CONTIGUOUS"] assert actual.values.flags["F_CONTIGUOUS"] # einsum has a constant string as of the first parameter, which makes # it hard to pass to xarray.apply_ufunc. # make sure dot() uses functools.partial(einsum, subscripts), which # can be pickled, and not a lambda, which can't. pickle.loads(pickle.dumps(xr.dot(da_a))) @pytest.mark.parametrize("use_dask", [True, False]) def test_dot_align_coords(use_dask: bool) -> None: # GH 3694 if use_dask and not has_dask: pytest.skip("test for dask.") a = np.arange(30 * 4).reshape(30, 4) b = np.arange(30 * 4 * 5).reshape(30, 4, 5) # use partially overlapping coords coords_a = {"a": np.arange(30), "b": np.arange(4)} coords_b = {"a": np.arange(5, 35), "b": np.arange(1, 5)} da_a = xr.DataArray(a, dims=["a", "b"], coords=coords_a) da_b = xr.DataArray(b, dims=["a", "b", "c"], coords=coords_b) if use_dask: da_a = da_a.chunk({"a": 3}) da_b = da_b.chunk({"a": 3}) # join="inner" is the default actual = xr.dot(da_a, da_b) # `dot` sums over the common dimensions of the arguments expected = (da_a * da_b).sum(["a", "b"]) xr.testing.assert_allclose(expected, actual) actual = xr.dot(da_a, da_b, dim=...) expected = (da_a * da_b).sum() xr.testing.assert_allclose(expected, actual) with xr.set_options(arithmetic_join="exact"): with pytest.raises(ValueError, match=r"cannot align.*join.*exact.*not equal.*"): xr.dot(da_a, da_b) # NOTE: dot always uses `join="inner"` because `(a * b).sum()` yields the same for all # join method (except "exact") with xr.set_options(arithmetic_join="left"): actual = xr.dot(da_a, da_b) expected = (da_a * da_b).sum(["a", "b"]) xr.testing.assert_allclose(expected, actual) with xr.set_options(arithmetic_join="right"): actual = xr.dot(da_a, da_b) expected = (da_a * da_b).sum(["a", "b"]) xr.testing.assert_allclose(expected, actual) with xr.set_options(arithmetic_join="outer"): actual = xr.dot(da_a, da_b) expected = (da_a * da_b).sum(["a", "b"]) xr.testing.assert_allclose(expected, actual) def test_where() -> None: cond = xr.DataArray([True, False], dims="x") actual = xr.where(cond, 1, 0) expected = xr.DataArray([1, 0], dims="x") assert_identical(expected, actual) def test_where_attrs() -> None: cond = xr.DataArray([True, False], coords={"a": [0, 1]}, attrs={"attr": "cond_da"}) cond["a"].attrs = {"attr": "cond_coord"} input_cond = cond.copy() x = xr.DataArray([1, 1], coords={"a": [0, 1]}, attrs={"attr": "x_da"}) x["a"].attrs = {"attr": "x_coord"} y = xr.DataArray([0, 0], coords={"a": [0, 1]}, attrs={"attr": "y_da"}) y["a"].attrs = {"attr": "y_coord"} # 3 DataArrays, takes attrs from x actual = xr.where(cond, x, y, keep_attrs=True) expected = xr.DataArray([1, 0], coords={"a": [0, 1]}, attrs={"attr": "x_da"}) expected["a"].attrs = {"attr": "x_coord"} assert_identical(expected, actual) # Check also that input coordinate attributes weren't modified by reference assert x["a"].attrs == {"attr": "x_coord"} assert y["a"].attrs == {"attr": "y_coord"} assert cond["a"].attrs == {"attr": "cond_coord"} assert_identical(cond, input_cond) # 3 DataArrays, drop attrs actual = xr.where(cond, x, y, keep_attrs=False) expected = xr.DataArray([1, 0], coords={"a": [0, 1]}) assert_identical(expected, actual) assert_identical(expected.coords["a"], actual.coords["a"]) # Check also that input coordinate attributes weren't modified by reference assert x["a"].attrs == {"attr": "x_coord"} assert y["a"].attrs == {"attr": "y_coord"} assert cond["a"].attrs == {"attr": "cond_coord"} assert_identical(cond, input_cond) # x as a scalar, takes no attrs actual = xr.where(cond, 0, y, keep_attrs=True) expected = xr.DataArray([0, 0], coords={"a": [0, 1]}) assert_identical(expected, actual) # y as a scalar, takes attrs from x actual = xr.where(cond, x, 0, keep_attrs=True) expected = xr.DataArray([1, 0], coords={"a": [0, 1]}, attrs={"attr": "x_da"}) expected["a"].attrs = {"attr": "x_coord"} assert_identical(expected, actual) # x and y as a scalar, takes no attrs actual = xr.where(cond, 1, 0, keep_attrs=True) expected = xr.DataArray([1, 0], coords={"a": [0, 1]}) assert_identical(expected, actual) # cond and y as a scalar, takes attrs from x actual = xr.where(True, x, y, keep_attrs=True) expected = xr.DataArray([1, 1], coords={"a": [0, 1]}, attrs={"attr": "x_da"}) expected["a"].attrs = {"attr": "x_coord"} assert_identical(expected, actual) # no xarray objects, handle no attrs actual_np = xr.where(True, 0, 1, keep_attrs=True) expected_np = np.array(0) assert_identical(expected_np, actual_np) # DataArray and 2 Datasets, takes attrs from x ds_x = xr.Dataset(data_vars={"x": x}, attrs={"attr": "x_ds"}) ds_y = xr.Dataset(data_vars={"x": y}, attrs={"attr": "y_ds"}) ds_actual = xr.where(cond, ds_x, ds_y, keep_attrs=True) ds_expected = xr.Dataset( data_vars={ "x": xr.DataArray([1, 0], coords={"a": [0, 1]}, attrs={"attr": "x_da"}) }, attrs={"attr": "x_ds"}, ) ds_expected["a"].attrs = {"attr": "x_coord"} assert_identical(ds_expected, ds_actual) # 2 DataArrays and 1 Dataset, takes attrs from x ds_actual = xr.where(cond, x.rename("x"), ds_y, keep_attrs=True) ds_expected = xr.Dataset( data_vars={ "x": xr.DataArray([1, 0], coords={"a": [0, 1]}, attrs={"attr": "x_da"}) }, ) ds_expected["a"].attrs = {"attr": "x_coord"} assert_identical(ds_expected, ds_actual) @pytest.mark.parametrize( "use_dask", [pytest.param(False, id="nodask"), pytest.param(True, id="dask")] ) @pytest.mark.parametrize( ["x", "coeffs", "expected"], [ pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray([2, 3, 4], dims="degree", coords={"degree": [0, 1, 2]}), xr.DataArray([9, 2 + 6 + 16, 2 + 9 + 36], dims="x"), id="simple", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray( [[0, 1], [0, 1]], dims=("y", "degree"), coords={"degree": [0, 1]} ), xr.DataArray([[1, 1], [2, 2], [3, 3]], dims=("x", "y")), id="broadcast-x", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray( [[0, 1], [1, 0], [1, 1]], dims=("x", "degree"), coords={"degree": [0, 1]}, ), xr.DataArray([1, 1, 1 + 3], dims="x"), id="shared-dim", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray([1, 0, 0], dims="degree", coords={"degree": [2, 1, 0]}), xr.DataArray([1, 2**2, 3**2], dims="x"), id="reordered-index", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray([5], dims="degree", coords={"degree": [3]}), xr.DataArray([5, 5 * 2**3, 5 * 3**3], dims="x"), id="sparse-index", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.Dataset( {"a": ("degree", [0, 1]), "b": ("degree", [1, 0])}, coords={"degree": [0, 1]}, ), xr.Dataset({"a": ("x", [1, 2, 3]), "b": ("x", [1, 1, 1])}), id="array-dataset", ), pytest.param( xr.Dataset({"a": ("x", [1, 2, 3]), "b": ("x", [2, 3, 4])}), xr.DataArray([1, 1], dims="degree", coords={"degree": [0, 1]}), xr.Dataset({"a": ("x", [2, 3, 4]), "b": ("x", [3, 4, 5])}), id="dataset-array", ), pytest.param( xr.Dataset({"a": ("x", [1, 2, 3]), "b": ("y", [2, 3, 4])}), xr.Dataset( {"a": ("degree", [0, 1]), "b": ("degree", [1, 1])}, coords={"degree": [0, 1]}, ), xr.Dataset({"a": ("x", [1, 2, 3]), "b": ("y", [3, 4, 5])}), id="dataset-dataset", ), pytest.param( xr.DataArray(pd.date_range("1970-01-01", freq="s", periods=3), dims="x"), xr.DataArray([0, 1], dims="degree", coords={"degree": [0, 1]}), xr.DataArray( [0, 1e9, 2e9], dims="x", coords={"x": pd.date_range("1970-01-01", freq="s", periods=3)}, ), id="datetime", ), pytest.param( # Force a non-ns unit for the coordinate, make sure we convert to `ns` # for backwards compatibility at the moment. This can be relaxed in the future. xr.DataArray( pd.date_range("1970-01-01", freq="s", periods=3, unit="s"), dims="x" ), xr.DataArray([0, 1], dims="degree", coords={"degree": [0, 1]}), xr.DataArray( [0, 1e9, 2e9], dims="x", coords={"x": pd.date_range("1970-01-01", freq="s", periods=3)}, ), id="datetime-non-ns", ), pytest.param( xr.DataArray( np.array([1000, 2000, 3000], dtype="timedelta64[ns]"), dims="x" ), xr.DataArray([0, 1], dims="degree", coords={"degree": [0, 1]}), xr.DataArray([1000.0, 2000.0, 3000.0], dims="x"), id="timedelta", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray( [2, 3, 4], dims="degree", coords={"degree": np.array([0, 1, 2], dtype=np.int64)}, ), xr.DataArray([9, 2 + 6 + 16, 2 + 9 + 36], dims="x"), id="int64-degree", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray( [2, 3, 4], dims="degree", coords={"degree": np.array([0, 1, 2], dtype=np.int32)}, ), xr.DataArray([9, 2 + 6 + 16, 2 + 9 + 36], dims="x"), id="int32-degree", ), pytest.param( xr.DataArray([1, 2, 3], dims="x"), xr.DataArray( [2, 3, 4], dims="degree", coords={"degree": np.array([0, 1, 2], dtype=np.uint8)}, ), xr.DataArray([9, 2 + 6 + 16, 2 + 9 + 36], dims="x"), id="uint8-degree", ), ], ) def test_polyval( use_dask: bool, x: xr.DataArray | xr.Dataset, coeffs: xr.DataArray | xr.Dataset, expected: xr.DataArray | xr.Dataset, ) -> None: if use_dask: if not has_dask: pytest.skip("requires dask") coeffs = coeffs.chunk({"degree": 2}) x = x.chunk({"x": 2}) with raise_if_dask_computes(): actual = xr.polyval(coord=x, coeffs=coeffs) xr.testing.assert_allclose(actual, expected) @requires_cftime @pytest.mark.parametrize( "use_dask", [pytest.param(False, id="nodask"), pytest.param(True, id="dask")] ) @pytest.mark.parametrize("date", ["1970-01-01", "0753-04-21"]) def test_polyval_cftime(use_dask: bool, date: str) -> None: import cftime x = xr.DataArray( xr.date_range(date, freq="1s", periods=3, use_cftime=True), dims="x", ) coeffs = xr.DataArray([0, 1], dims="degree", coords={"degree": [0, 1]}) if use_dask: if not has_dask: pytest.skip("requires dask") coeffs = coeffs.chunk({"degree": 2}) x = x.chunk({"x": 2}) with raise_if_dask_computes(max_computes=1): actual = xr.polyval(coord=x, coeffs=coeffs) t0 = xr.date_range(date, periods=1)[0] offset = (t0 - cftime.DatetimeGregorian(1970, 1, 1)).total_seconds() * 1e9 expected = ( xr.DataArray( [0, 1e9, 2e9], dims="x", coords={"x": xr.date_range(date, freq="1s", periods=3, use_cftime=True)}, ) + offset ) xr.testing.assert_allclose(actual, expected) def test_polyval_degree_dim_checks() -> None: x = xr.DataArray([1, 2, 3], dims="x") coeffs = xr.DataArray([2, 3, 4], dims="degree", coords={"degree": [0, 1, 2]}) with pytest.raises(ValueError): xr.polyval(x, coeffs.drop_vars("degree")) with pytest.raises(ValueError): xr.polyval(x, coeffs.assign_coords(degree=coeffs.degree.astype(float))) @pytest.mark.parametrize( "use_dask", [pytest.param(False, id="nodask"), pytest.param(True, id="dask")] ) @pytest.mark.parametrize( "x", [ pytest.param(xr.DataArray([0, 1, 2], dims="x"), id="simple"), pytest.param( xr.DataArray(pd.date_range("1970-01-01", freq="ns", periods=3), dims="x"), id="datetime", ), # Force a non-ns unit for the coordinate, make sure we convert to `ns` in both polyfit & polval # for backwards compatibility at the moment. This can be relaxed in the future. pytest.param( xr.DataArray( pd.date_range("1970-01-01", freq="s", unit="s", periods=3), dims="x" ), id="datetime-non-ns", ), pytest.param( xr.DataArray(np.array([0, 1, 2], dtype="timedelta64[ns]"), dims="x"), id="timedelta", ), ], ) @pytest.mark.parametrize( "y", [ pytest.param(xr.DataArray([1, 6, 17], dims="x"), id="1D"), pytest.param( xr.DataArray([[1, 6, 17], [34, 57, 86]], dims=("y", "x")), id="2D" ), ], ) def test_polyfit_polyval_integration( use_dask: bool, x: xr.DataArray, y: xr.DataArray ) -> None: y.coords["x"] = x if use_dask: if not has_dask: pytest.skip("requires dask") y = y.chunk({"x": 2}) fit = y.polyfit(dim="x", deg=2) evaluated = xr.polyval(y.x, fit.polyfit_coefficients) expected = y.transpose(*evaluated.dims) xr.testing.assert_allclose(evaluated.variable, expected.variable) @pytest.mark.parametrize("use_dask", [False, True]) @pytest.mark.parametrize( "a, b, ae, be, dim, axis", [ [ xr.DataArray([1, 2, 3]), xr.DataArray([4, 5, 6]), np.array([1, 2, 3]), np.array([4, 5, 6]), "dim_0", -1, ], [ xr.DataArray([1, 2]), xr.DataArray([4, 5, 6]), np.array([1, 2, 0]), np.array([4, 5, 6]), "dim_0", -1, ], [ xr.Variable(dims=["dim_0"], data=[1, 2, 3]), xr.Variable(dims=["dim_0"], data=[4, 5, 6]), np.array([1, 2, 3]), np.array([4, 5, 6]), "dim_0", -1, ], [ xr.Variable(dims=["dim_0"], data=[1, 2]), xr.Variable(dims=["dim_0"], data=[4, 5, 6]), np.array([1, 2, 0]), np.array([4, 5, 6]), "dim_0", -1, ], [ # Test dim in the middle: xr.DataArray( np.arange(0, 5 * 3 * 4).reshape((5, 3, 4)), dims=["time", "cartesian", "var"], coords=dict( time=(["time"], np.arange(0, 5)), cartesian=(["cartesian"], ["x", "y", "z"]), var=(["var"], [1, 1.5, 2, 2.5]), ), ), xr.DataArray( np.arange(0, 5 * 3 * 4).reshape((5, 3, 4)) + 1, dims=["time", "cartesian", "var"], coords=dict( time=(["time"], np.arange(0, 5)), cartesian=(["cartesian"], ["x", "y", "z"]), var=(["var"], [1, 1.5, 2, 2.5]), ), ), np.arange(0, 5 * 3 * 4).reshape((5, 3, 4)), np.arange(0, 5 * 3 * 4).reshape((5, 3, 4)) + 1, "cartesian", 1, ], # Test 1 sized arrays with coords: pytest.param( xr.DataArray( np.array([1]), dims=["cartesian"], coords=dict(cartesian=(["cartesian"], ["z"])), ), xr.DataArray( np.array([4, 5, 6]), dims=["cartesian"], coords=dict(cartesian=(["cartesian"], ["x", "y", "z"])), ), np.array([0, 0, 1]), np.array([4, 5, 6]), "cartesian", -1, marks=(pytest.mark.xfail(),), ), # Test filling in between with coords: pytest.param( xr.DataArray( [1, 2], dims=["cartesian"], coords=dict(cartesian=(["cartesian"], ["x", "z"])), ), xr.DataArray( [4, 5, 6], dims=["cartesian"], coords=dict(cartesian=(["cartesian"], ["x", "y", "z"])), ), np.array([1, 0, 2]), np.array([4, 5, 6]), "cartesian", -1, marks=(pytest.mark.xfail(),), ), ], ) def test_cross(a, b, ae, be, dim: str, axis: int, use_dask: bool) -> None: expected = np.cross(ae, be, axis=axis) if use_dask: if not has_dask: pytest.skip("test for dask.") a = a.chunk() b = b.chunk() actual = xr.cross(a, b, dim=dim) xr.testing.assert_duckarray_allclose(expected, actual) @pytest.mark.parametrize("compute_backend", ["numbagg"], indirect=True) def test_complex_number_reduce(compute_backend): da = xr.DataArray(np.ones((2,), dtype=np.complex64), dims=["x"]) # Check that xarray doesn't call into numbagg, which doesn't compile for complex # numbers at the moment (but will when numba supports dynamic compilation) da.min() def test_fix() -> None: val = 3.0 val_fixed = np.fix(val) da = xr.DataArray([val]) expected = xr.DataArray([val_fixed]) actual = np.fix(da) assert_identical(expected, actual) xarray-2025.09.0/xarray/tests/test_concat.py000066400000000000000000001641051505620616400207030ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Callable from contextlib import AbstractContextManager, nullcontext from copy import deepcopy from typing import TYPE_CHECKING, Any, Literal import numpy as np import pandas as pd import pytest from xarray import AlignmentError, DataArray, Dataset, Variable, concat, set_options from xarray.core import dtypes, types from xarray.core.coordinates import Coordinates from xarray.core.indexes import PandasIndex from xarray.structure import merge from xarray.tests import ( ConcatenatableArray, InaccessibleArray, UnexpectedDataAccess, assert_array_equal, assert_equal, assert_identical, requires_dask, requires_pyarrow, ) from xarray.tests.indexes import XYIndex from xarray.tests.test_dataset import create_test_data if TYPE_CHECKING: from xarray.core.types import CombineAttrsOptions, JoinOptions # helper method to create multiple tests datasets to concat def create_concat_datasets( num_datasets: int = 2, seed: int | None = None, include_day: bool = True ) -> list[Dataset]: rng = np.random.default_rng(seed) lat = rng.standard_normal(size=(1, 4)) lon = rng.standard_normal(size=(1, 4)) result = [] variables = ["temperature", "pressure", "humidity", "precipitation", "cloud_cover"] for i in range(num_datasets): if include_day: data_tuple = ( ["x", "y", "day"], rng.standard_normal(size=(1, 4, 2)), ) data_vars = dict.fromkeys(variables, data_tuple) result.append( Dataset( data_vars=data_vars, coords={ "lat": (["x", "y"], lat), "lon": (["x", "y"], lon), "day": ["day" + str(i * 2 + 1), "day" + str(i * 2 + 2)], }, ) ) else: data_tuple = ( ["x", "y"], rng.standard_normal(size=(1, 4)), ) data_vars = dict.fromkeys(variables, data_tuple) result.append( Dataset( data_vars=data_vars, coords={"lat": (["x", "y"], lat), "lon": (["x", "y"], lon)}, ) ) return result # helper method to create multiple tests datasets to concat with specific types def create_typed_datasets( num_datasets: int = 2, seed: int | None = None ) -> list[Dataset]: var_strings = ["a", "b", "c", "d", "e", "f", "g", "h"] rng = np.random.default_rng(seed) lat = rng.standard_normal(size=(1, 4)) lon = rng.standard_normal(size=(1, 4)) return [ Dataset( data_vars={ "float": (["x", "y", "day"], rng.standard_normal(size=(1, 4, 2))), "float2": (["x", "y", "day"], rng.standard_normal(size=(1, 4, 2))), "string": ( ["x", "y", "day"], rng.choice(var_strings, size=(1, 4, 2)), ), "int": (["x", "y", "day"], rng.integers(0, 10, size=(1, 4, 2))), "datetime64": ( ["x", "y", "day"], np.arange( np.datetime64("2017-01-01"), np.datetime64("2017-01-09") ).reshape(1, 4, 2), ), "timedelta64": ( ["x", "y", "day"], np.reshape([pd.Timedelta(days=i) for i in range(8)], [1, 4, 2]), ), }, coords={ "lat": (["x", "y"], lat), "lon": (["x", "y"], lon), "day": ["day" + str(i * 2 + 1), "day" + str(i * 2 + 2)], }, ) for i in range(num_datasets) ] def test_concat_compat() -> None: ds1 = Dataset( { "has_x_y": (("y", "x"), [[1, 2]]), "has_x": ("x", [1, 2]), "no_x_y": ("z", [1, 2]), }, coords={"x": [0, 1], "y": [0], "z": [-1, -2]}, ) ds2 = Dataset( { "has_x_y": (("y", "x"), [[3, 4]]), "has_x": ("x", [1, 2]), "no_x_y": (("q", "z"), [[1, 2]]), }, coords={"x": [0, 1], "y": [1], "z": [-1, -2], "q": [0]}, ) result = concat([ds1, ds2], dim="y", data_vars="minimal", compat="broadcast_equals") assert_equal(ds2.no_x_y, result.no_x_y.transpose()) for var in ["has_x", "no_x_y"]: assert "y" not in result[var].dims and "y" not in result[var].coords with pytest.raises(ValueError, match=r"'q' not present in all datasets"): concat([ds1, ds2], dim="q", data_vars="all", join="outer") with pytest.raises(ValueError, match=r"'q' not present in all datasets"): concat([ds2, ds1], dim="q", data_vars="all", join="outer") def test_concat_missing_var() -> None: datasets = create_concat_datasets(2, seed=123) expected = concat(datasets, dim="day") vars_to_drop = ["humidity", "precipitation", "cloud_cover"] expected = expected.drop_vars(vars_to_drop) expected["pressure"][..., 2:] = np.nan datasets[0] = datasets[0].drop_vars(vars_to_drop) datasets[1] = datasets[1].drop_vars(vars_to_drop + ["pressure"]) actual = concat(datasets, dim="day") assert list(actual.data_vars.keys()) == ["temperature", "pressure"] assert_identical(actual, expected) @pytest.mark.parametrize("var", ["var4", pytest.param("var5", marks=requires_pyarrow)]) def test_concat_extension_array(var) -> None: data1 = create_test_data(use_extension_array=True) data2 = create_test_data(use_extension_array=True) concatenated = concat([data1, data2], dim="dim1") assert pd.Series( concatenated[var] == type(data2[var].variable.data)._concat_same_type( [ data1[var].variable.data, data2[var].variable.data, ] ) ).all() # need to wrap in series because pyarrow bool does not support `all` def test_concat_missing_multiple_consecutive_var() -> None: datasets = create_concat_datasets(3, seed=123) expected = concat(datasets, dim="day") vars_to_drop = ["humidity", "pressure"] expected["pressure"][..., :4] = np.nan expected["humidity"][..., :4] = np.nan datasets[0] = datasets[0].drop_vars(vars_to_drop) datasets[1] = datasets[1].drop_vars(vars_to_drop) actual = concat(datasets, dim="day") assert list(actual.data_vars.keys()) == [ "temperature", "precipitation", "cloud_cover", "pressure", "humidity", ] assert_identical(actual, expected) def test_concat_all_empty() -> None: ds1 = Dataset() ds2 = Dataset() expected = Dataset() actual = concat([ds1, ds2], dim="new_dim") assert_identical(actual, expected) def test_concat_second_empty() -> None: ds1 = Dataset(data_vars={"a": ("y", [0.1])}, coords={"x": 0.1}) ds2 = Dataset(coords={"x": 0.1}) expected = Dataset(data_vars={"a": ("y", [0.1, np.nan])}, coords={"x": 0.1}) actual = concat([ds1, ds2], dim="y") assert_identical(actual, expected) expected = Dataset( data_vars={"a": ("y", [0.1, np.nan])}, coords={"x": ("y", [0.1, 0.1])} ) actual = concat([ds1, ds2], dim="y", coords="all") assert_identical(actual, expected) def test_concat_second_empty_with_scalar_data_var_only_on_first() -> None: # Check concatenating scalar data_var only present in ds1 ds1 = Dataset(data_vars={"a": ("y", [0.1]), "b": 0.1}, coords={"x": 0.1}) ds2 = Dataset(coords={"x": 0.1}) expected = Dataset( data_vars={"a": ("y", [0.1, np.nan]), "b": ("y", [0.1, np.nan])}, coords={"x": ("y", [0.1, 0.1])}, ) actual = concat([ds1, ds2], dim="y", coords="all", data_vars="all") assert_identical(actual, expected) expected = Dataset( data_vars={"a": ("y", [0.1, np.nan]), "b": 0.1}, coords={"x": 0.1} ) actual = concat( [ds1, ds2], dim="y", coords="different", data_vars="different", compat="equals" ) assert_identical(actual, expected) def test_concat_multiple_missing_variables() -> None: datasets = create_concat_datasets(2, seed=123) expected = concat(datasets, dim="day") vars_to_drop = ["pressure", "cloud_cover"] expected["pressure"][..., 2:] = np.nan expected["cloud_cover"][..., 2:] = np.nan datasets[1] = datasets[1].drop_vars(vars_to_drop) actual = concat(datasets, dim="day") # check the variables orders are the same assert list(actual.data_vars.keys()) == [ "temperature", "pressure", "humidity", "precipitation", "cloud_cover", ] assert_identical(actual, expected) @pytest.mark.parametrize("include_day", [True, False]) def test_concat_multiple_datasets_missing_vars(include_day: bool) -> None: vars_to_drop = [ "temperature", "pressure", "humidity", "precipitation", "cloud_cover", ] # must specify if concat_dim='day' is not part of the vars kwargs = {"data_vars": "all"} if not include_day else {} datasets = create_concat_datasets( len(vars_to_drop), seed=123, include_day=include_day ) expected = concat(datasets, dim="day", **kwargs) # type: ignore[call-overload] for i, name in enumerate(vars_to_drop): if include_day: expected[name][..., i * 2 : (i + 1) * 2] = np.nan else: expected[name][i : i + 1, ...] = np.nan # set up the test data datasets = [ ds.drop_vars(varname) for ds, varname in zip(datasets, vars_to_drop, strict=True) ] actual = concat(datasets, dim="day", **kwargs) # type: ignore[call-overload] assert list(actual.data_vars.keys()) == [ "pressure", "humidity", "precipitation", "cloud_cover", "temperature", ] assert_identical(actual, expected) def test_concat_multiple_datasets_with_multiple_missing_variables() -> None: vars_to_drop_in_first = ["temperature", "pressure"] vars_to_drop_in_second = ["humidity", "precipitation", "cloud_cover"] datasets = create_concat_datasets(2, seed=123) expected = concat(datasets, dim="day") for name in vars_to_drop_in_first: expected[name][..., :2] = np.nan for name in vars_to_drop_in_second: expected[name][..., 2:] = np.nan # set up the test data datasets[0] = datasets[0].drop_vars(vars_to_drop_in_first) datasets[1] = datasets[1].drop_vars(vars_to_drop_in_second) actual = concat(datasets, dim="day") assert list(actual.data_vars.keys()) == [ "humidity", "precipitation", "cloud_cover", "temperature", "pressure", ] assert_identical(actual, expected) def test_concat_type_of_missing_fill() -> None: datasets = create_typed_datasets(2, seed=123) expected1 = concat(datasets, dim="day", fill_value=dtypes.NA) expected2 = concat(datasets[::-1], dim="day", fill_value=dtypes.NA) vars = ["float", "float2", "string", "int", "datetime64", "timedelta64"] expected = [expected2, expected1] for i, exp in enumerate(expected): sl = slice(i * 2, (i + 1) * 2) exp["float2"][..., sl] = np.nan exp["datetime64"][..., sl] = np.nan exp["timedelta64"][..., sl] = np.nan var = exp["int"] * 1.0 var[..., sl] = np.nan exp["int"] = var var = exp["string"].astype(object) var[..., sl] = np.nan exp["string"] = var # set up the test data datasets[1] = datasets[1].drop_vars(vars[1:]) actual = concat(datasets, dim="day", fill_value=dtypes.NA) assert_identical(actual, expected[1]) # reversed actual = concat(datasets[::-1], dim="day", fill_value=dtypes.NA) assert_identical(actual, expected[0]) def test_concat_order_when_filling_missing() -> None: vars_to_drop_in_first: list[str] = [] # drop middle vars_to_drop_in_second = ["humidity"] datasets = create_concat_datasets(2, seed=123) expected1 = concat(datasets, dim="day") for name in vars_to_drop_in_second: expected1[name][..., 2:] = np.nan expected2 = concat(datasets[::-1], dim="day") for name in vars_to_drop_in_second: expected2[name][..., :2] = np.nan # set up the test data datasets[0] = datasets[0].drop_vars(vars_to_drop_in_first) datasets[1] = datasets[1].drop_vars(vars_to_drop_in_second) actual = concat(datasets, dim="day") assert list(actual.data_vars.keys()) == [ "temperature", "pressure", "humidity", "precipitation", "cloud_cover", ] assert_identical(actual, expected1) actual = concat(datasets[::-1], dim="day") assert list(actual.data_vars.keys()) == [ "temperature", "pressure", "precipitation", "cloud_cover", "humidity", ] assert_identical(actual, expected2) @pytest.fixture def concat_var_names() -> Callable: # create var names list with one missing value def get_varnames(var_cnt: int = 10, list_cnt: int = 10) -> list[list[str]]: orig = [f"d{i:02d}" for i in range(var_cnt)] var_names = [] for _i in range(list_cnt): l1 = orig.copy() var_names.append(l1) return var_names return get_varnames @pytest.fixture def create_concat_ds() -> Callable: def create_ds( var_names: list[list[str]], dim: bool = False, coord: bool = False, drop_idx: list[int] | None = None, ) -> list[Dataset]: out_ds = [] ds = Dataset() ds = ds.assign_coords({"x": np.arange(2)}) ds = ds.assign_coords({"y": np.arange(3)}) ds = ds.assign_coords({"z": np.arange(4)}) for i, dsl in enumerate(var_names): vlist = dsl.copy() if drop_idx is not None: vlist.pop(drop_idx[i]) foo_data = np.arange(48, dtype=float).reshape(2, 2, 3, 4) dsi = ds.copy() if coord: dsi = ds.assign({"time": (["time"], [i * 2, i * 2 + 1])}) for k in vlist: dsi = dsi.assign({k: (["time", "x", "y", "z"], foo_data.copy())}) if not dim: dsi = dsi.isel(time=0) out_ds.append(dsi) return out_ds return create_ds @pytest.mark.parametrize("dim", [True, False]) @pytest.mark.parametrize("coord", [True, False]) def test_concat_fill_missing_variables( concat_var_names, create_concat_ds, dim: bool, coord: bool ) -> None: var_names = concat_var_names() drop_idx = [0, 7, 6, 4, 4, 8, 0, 6, 2, 0] expected = concat( create_concat_ds(var_names, dim=dim, coord=coord), dim="time", data_vars="all" ) for i, idx in enumerate(drop_idx): if dim: expected[var_names[0][idx]][i * 2 : i * 2 + 2] = np.nan else: expected[var_names[0][idx]][i] = np.nan concat_ds = create_concat_ds(var_names, dim=dim, coord=coord, drop_idx=drop_idx) actual = concat(concat_ds, dim="time", data_vars="all") assert list(actual.data_vars.keys()) == [ "d01", "d02", "d03", "d04", "d05", "d06", "d07", "d08", "d09", "d00", ] assert_identical(actual, expected) class TestConcatDataset: @pytest.fixture def data(self, request) -> Dataset: use_extension_array = request.param if hasattr(request, "param") else False return create_test_data(use_extension_array=use_extension_array).drop_dims( "dim3" ) def rectify_dim_order(self, data: Dataset, dataset) -> Dataset: # return a new dataset with all variable dimensions transposed into # the order in which they are found in `data` return Dataset( {k: v.transpose(*data[k].dims) for k, v in dataset.data_vars.items()}, dataset.coords, attrs=dataset.attrs, ) @pytest.mark.parametrize("coords", ["different", "minimal"]) @pytest.mark.parametrize( "dim,data", [["dim1", True], ["dim2", False]], indirect=["data"] ) def test_concat_simple(self, data: Dataset, dim, coords) -> None: datasets = [g for _, g in data.groupby(dim)] assert_identical(data, concat(datasets, dim, coords=coords, compat="equals")) def test_concat_merge_variables_present_in_some_datasets( self, data: Dataset ) -> None: # coordinates present in some datasets but not others ds1 = Dataset(data_vars={"a": ("y", [0.1])}, coords={"x": 0.1}) ds2 = Dataset(data_vars={"a": ("y", [0.2])}, coords={"z": 0.2}) actual = concat([ds1, ds2], dim="y", coords="minimal") expected = Dataset({"a": ("y", [0.1, 0.2])}, coords={"x": 0.1, "z": 0.2}) assert_identical(expected, actual) # data variables present in some datasets but not others split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))] data0, data1 = deepcopy(split_data) data1["foo"] = ("bar", np.random.randn(10)) actual = concat([data0, data1], "dim1", data_vars="minimal") expected = data.copy().assign(foo=data1.foo) assert_identical(expected, actual) # expand foo actual = concat([data0, data1], "dim1", data_vars="all") foo = np.ones((8, 10), dtype=data1.foo.dtype) * np.nan foo[3:] = data1.foo.values[None, ...] expected = data.copy().assign(foo=(["dim1", "bar"], foo)) assert_identical(expected, actual) @pytest.mark.parametrize("data", [False], indirect=["data"]) def test_concat_2(self, data: Dataset) -> None: dim = "dim2" datasets = [g.squeeze(dim) for _, g in data.groupby(dim, squeeze=False)] concat_over = [k for k, v in data.coords.items() if dim in v.dims and k != dim] actual = concat(datasets, data[dim], coords=concat_over) assert_identical(data, self.rectify_dim_order(data, actual)) @pytest.mark.parametrize("coords", ["different", "minimal", "all"]) @pytest.mark.parametrize("dim", ["dim1", "dim2"]) def test_concat_coords_kwarg( self, data: Dataset, dim: str, coords: Literal["all", "minimal", "different"] ) -> None: data = data.copy(deep=True) # make sure the coords argument behaves as expected data.coords["extra"] = ("dim4", np.arange(3)) datasets = [g for _, g in data.groupby(dim)] actual = concat( datasets, data[dim], coords=coords, data_vars="all", compat="equals" ) if coords == "all": expected = np.array([data["extra"].values for _ in range(data.sizes[dim])]) assert_array_equal(actual["extra"].values, expected) else: assert_equal(data["extra"], actual["extra"]) def test_concat(self, data: Dataset) -> None: split_data = [ data.isel(dim1=slice(3)), data.isel(dim1=3), data.isel(dim1=slice(4, None)), ] assert_identical(data, concat(split_data, "dim1")) def test_concat_dim_precedence(self, data: Dataset) -> None: # verify that the dim argument takes precedence over # concatenating dataset variables of the same name dim = (2 * data["dim1"]).rename("dim1") datasets = [g for _, g in data.groupby("dim1", squeeze=False)] expected = data.copy() expected["dim1"] = dim assert_identical(expected, concat(datasets, dim)) def test_concat_data_vars_typing(self) -> None: # Testing typing, can be removed if the next function works with annotations. data = Dataset({"foo": ("x", np.random.randn(10))}) objs: list[Dataset] = [data.isel(x=slice(5)), data.isel(x=slice(5, None))] actual = concat(objs, dim="x", data_vars="minimal") assert_identical(data, actual) @pytest.mark.parametrize("data_vars", ["minimal", "different", "all", [], ["foo"]]) def test_concat_data_vars(self, data_vars) -> None: data = Dataset({"foo": ("x", np.random.randn(10))}) objs: list[Dataset] = [data.isel(x=slice(5)), data.isel(x=slice(5, None))] actual = concat(objs, dim="x", data_vars=data_vars, compat="equals") assert_identical(data, actual) @pytest.mark.parametrize("coords", ["different", "all", ["c"]]) def test_concat_coords(self, coords) -> None: data = Dataset({"foo": ("x", np.random.randn(10))}) expected = data.assign_coords(c=("x", [0] * 5 + [1] * 5)) objs = [ data.isel(x=slice(5)).assign_coords(c=0), data.isel(x=slice(5, None)).assign_coords(c=1), ] if coords == "different": actual = concat(objs, dim="x", coords=coords, compat="equals") else: actual = concat(objs, dim="x", coords=coords) assert_identical(expected, actual) @pytest.mark.parametrize("coords", ["minimal", []]) def test_concat_coords_raises_merge_error(self, coords) -> None: data = Dataset({"foo": ("x", np.random.randn(10))}) objs = [ data.isel(x=slice(5)).assign_coords(c=0), data.isel(x=slice(5, None)).assign_coords(c=1), ] with pytest.raises(merge.MergeError, match="conflicting values"): concat(objs, dim="x", coords=coords, compat="equals") @pytest.mark.parametrize("data_vars", ["different", "all", ["foo"]]) def test_concat_constant_index(self, data_vars) -> None: # GH425 ds1 = Dataset({"foo": 1.5}, {"y": 1}) ds2 = Dataset({"foo": 2.5}, {"y": 1}) expected = Dataset({"foo": ("y", [1.5, 2.5]), "y": [1, 1]}) if data_vars == "different": actual = concat([ds1, ds2], "y", data_vars=data_vars, compat="equals") else: actual = concat([ds1, ds2], "y", data_vars=data_vars) assert_identical(expected, actual) def test_concat_constant_index_None(self) -> None: ds1 = Dataset({"foo": 1.5}, {"y": 1}) ds2 = Dataset({"foo": 2.5}, {"y": 1}) actual = concat([ds1, ds2], "new_dim", data_vars=None, compat="equals") expected = Dataset( {"foo": ("new_dim", [1.5, 2.5])}, coords={"y": 1}, ) assert_identical(actual, expected) def test_concat_constant_index_minimal(self) -> None: ds1 = Dataset({"foo": 1.5}, {"y": 1}) ds2 = Dataset({"foo": 2.5}, {"y": 1}) with set_options(use_new_combine_kwarg_defaults=False): with pytest.raises(merge.MergeError, match="conflicting values"): concat([ds1, ds2], dim="new_dim", data_vars="minimal") with set_options(use_new_combine_kwarg_defaults=True): with pytest.raises( ValueError, match="data_vars='minimal' and coords='minimal'" ): concat([ds1, ds2], dim="new_dim", data_vars="minimal") def test_concat_size0(self) -> None: data = create_test_data() split_data = [data.isel(dim1=slice(0, 0)), data] actual = concat(split_data, "dim1") assert_identical(data, actual) actual = concat(split_data[::-1], "dim1") assert_identical(data, actual) def test_concat_autoalign(self) -> None: ds1 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 2])])}) ds2 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 3])])}) actual = concat([ds1, ds2], "y", data_vars="all", join="outer") expected = Dataset( { "foo": DataArray( [[1, 2, np.nan], [1, np.nan, 2]], dims=["y", "x"], coords={"x": [1, 2, 3]}, ) } ) assert_identical(expected, actual) def test_concat_errors(self) -> None: data = create_test_data() split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))] with pytest.raises(ValueError, match=r"must supply at least one"): concat([], "dim1") with pytest.raises(ValueError, match=r"Cannot specify both .*='different'"): concat( [data, data], dim="concat_dim", data_vars="different", compat="override" ) with pytest.raises(ValueError, match=r"must supply at least one"): concat([], "dim1") with pytest.raises(ValueError, match=r"are not found in the coordinates"): concat([data, data], "new_dim", coords=["not_found"]) with pytest.raises(ValueError, match=r"are not found in the data variables"): concat([data, data], "new_dim", data_vars=["not_found"]) with pytest.raises(ValueError, match=r"global attributes not"): # call deepcopy separately to get unique attrs data0 = deepcopy(split_data[0]) data1 = deepcopy(split_data[1]) data1.attrs["foo"] = "bar" concat([data0, data1], "dim1", compat="identical") assert_identical(data, concat([data0, data1], "dim1", compat="equals")) with pytest.raises(ValueError, match=r"compat.* invalid"): concat(split_data, "dim1", compat="foobar") # type: ignore[call-overload] with pytest.raises(ValueError, match=r"compat.* invalid"): concat(split_data, "dim1", compat="minimal") with pytest.raises(ValueError, match=r"unexpected value for"): concat([data, data], "new_dim", coords="foobar") with pytest.raises( ValueError, match=r"coordinate in some datasets but not others" ): concat([Dataset({"x": 0}), Dataset({"x": [1]})], dim="z") with pytest.raises( ValueError, match=r"coordinate in some datasets but not others" ): concat([Dataset({"x": 0}), Dataset({}, {"x": 1})], dim="z") def test_concat_join_kwarg(self) -> None: ds1 = Dataset({"a": (("x", "y"), [[0]])}, coords={"x": [0], "y": [0]}) ds2 = Dataset({"a": (("x", "y"), [[0]])}, coords={"x": [1], "y": [0.0001]}) expected: dict[JoinOptions, Any] = {} expected["outer"] = Dataset( {"a": (("x", "y"), [[0, np.nan], [np.nan, 0]])}, {"x": [0, 1], "y": [0, 0.0001]}, ) expected["inner"] = Dataset( {"a": (("x", "y"), [[], []])}, {"x": [0, 1], "y": []} ) expected["left"] = Dataset( {"a": (("x", "y"), np.array([0, np.nan], ndmin=2).T)}, coords={"x": [0, 1], "y": [0]}, ) expected["right"] = Dataset( {"a": (("x", "y"), np.array([np.nan, 0], ndmin=2).T)}, coords={"x": [0, 1], "y": [0.0001]}, ) expected["override"] = Dataset( {"a": (("x", "y"), np.array([0, 0], ndmin=2).T)}, coords={"x": [0, 1], "y": [0]}, ) with pytest.raises(ValueError, match=r"cannot align.*exact.*dimensions.*'y'"): actual = concat([ds1, ds2], join="exact", dim="x") for join, expected_item in expected.items(): actual = concat([ds1, ds2], join=join, dim="x") assert_equal(actual, expected_item) # regression test for #3681 actual = concat( [ds1.drop_vars("x"), ds2.drop_vars("x")], join="override", dim="y" ) expected2 = Dataset( {"a": (("x", "y"), np.array([0, 0], ndmin=2))}, coords={"y": [0, 0.0001]} ) assert_identical(actual, expected2) @pytest.mark.parametrize( "combine_attrs, var1_attrs, var2_attrs, expected_attrs, expect_exception", [ ( "no_conflicts", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}, False, ), ("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False), ("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False), ( "no_conflicts", {"a": 1, "b": 2}, {"a": 4, "c": 3}, {"a": 1, "b": 2, "c": 3}, True, ), ("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True), ( "override", {"a": 1, "b": 2}, {"a": 4, "b": 5, "c": 3}, {"a": 1, "b": 2}, False, ), ( "drop_conflicts", {"a": 41, "b": 42, "c": 43}, {"b": 2, "c": 43, "d": 44}, {"a": 41, "c": 43, "d": 44}, False, ), ( lambda attrs, context: {"a": -1, "b": 0, "c": 1} if any(attrs) else {}, {"a": 41, "b": 42, "c": 43}, {"b": 2, "c": 43, "d": 44}, {"a": -1, "b": 0, "c": 1}, False, ), ], ) def test_concat_combine_attrs_kwarg( self, combine_attrs, var1_attrs, var2_attrs, expected_attrs, expect_exception ): ds1 = Dataset({"a": ("x", [0])}, coords={"x": [0]}, attrs=var1_attrs) ds2 = Dataset({"a": ("x", [0])}, coords={"x": [1]}, attrs=var2_attrs) if expect_exception: with pytest.raises(ValueError, match=f"combine_attrs='{combine_attrs}'"): concat([ds1, ds2], dim="x", combine_attrs=combine_attrs) else: actual = concat([ds1, ds2], dim="x", combine_attrs=combine_attrs) expected = Dataset( {"a": ("x", [0, 0])}, {"x": [0, 1]}, attrs=expected_attrs ) assert_identical(actual, expected) @pytest.mark.parametrize( "combine_attrs, attrs1, attrs2, expected_attrs, expect_exception", [ ( "no_conflicts", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}, False, ), ("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False), ("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False), ( "no_conflicts", {"a": 1, "b": 2}, {"a": 4, "c": 3}, {"a": 1, "b": 2, "c": 3}, True, ), ("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True), ( "override", {"a": 1, "b": 2}, {"a": 4, "b": 5, "c": 3}, {"a": 1, "b": 2}, False, ), ( "drop_conflicts", {"a": 41, "b": 42, "c": 43}, {"b": 2, "c": 43, "d": 44}, {"a": 41, "c": 43, "d": 44}, False, ), ( lambda attrs, context: {"a": -1, "b": 0, "c": 1} if any(attrs) else {}, {"a": 41, "b": 42, "c": 43}, {"b": 2, "c": 43, "d": 44}, {"a": -1, "b": 0, "c": 1}, False, ), ], ) def test_concat_combine_attrs_kwarg_variables( self, combine_attrs, attrs1, attrs2, expected_attrs, expect_exception ): """check that combine_attrs is used on data variables and coords""" ds1 = Dataset({"a": ("x", [0], attrs1)}, coords={"x": ("x", [0], attrs1)}) ds2 = Dataset({"a": ("x", [0], attrs2)}, coords={"x": ("x", [1], attrs2)}) if expect_exception: with pytest.raises(ValueError, match=f"combine_attrs='{combine_attrs}'"): concat([ds1, ds2], dim="x", combine_attrs=combine_attrs) else: actual = concat([ds1, ds2], dim="x", combine_attrs=combine_attrs) expected = Dataset( {"a": ("x", [0, 0], expected_attrs)}, {"x": ("x", [0, 1], expected_attrs)}, ) assert_identical(actual, expected) def test_concat_promote_shape_with_mixed_dims_within_variables(self) -> None: objs = [Dataset({}, {"x": 0}), Dataset({"x": [1]})] actual = concat(objs, "x") expected = Dataset({"x": [0, 1]}) assert_identical(actual, expected) objs = [Dataset({"x": [0]}), Dataset({}, {"x": 1})] actual = concat(objs, "x") assert_identical(actual, expected) def test_concat_promote_shape_with_mixed_dims_between_variables(self) -> None: objs = [Dataset({"x": [2], "y": 3}), Dataset({"x": [4], "y": 5})] actual = concat(objs, "x", data_vars="all") expected = Dataset({"x": [2, 4], "y": ("x", [3, 5])}) assert_identical(actual, expected) def test_concat_promote_shape_with_mixed_dims_in_coord_variable(self) -> None: objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1]}, {"y": ("x", [-2])})] actual = concat(objs, "x") expected = Dataset({"x": [0, 1]}, {"y": ("x", [-1, -2])}) assert_identical(actual, expected) def test_concat_promote_shape_for_scalars_with_mixed_lengths_along_concat_dim( self, ) -> None: # values should repeat objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1, 2]}, {"y": -2})] actual = concat(objs, "x", coords="different", compat="equals") expected = Dataset({"x": [0, 1, 2]}, {"y": ("x", [-1, -2, -2])}) assert_identical(actual, expected) actual = concat(objs, "x", coords="all") assert_identical(actual, expected) def test_concat_promote_shape_broadcast_1d_x_1d_goes_to_2d(self) -> None: objs = [ Dataset({"z": ("x", [-1])}, {"x": [0], "y": [0]}), Dataset({"z": ("y", [1])}, {"x": [1], "y": [0]}), ] actual = concat(objs, "x") expected = Dataset({"z": (("x", "y"), [[-1], [1]])}, {"x": [0, 1], "y": [0]}) assert_identical(actual, expected) def test_concat_promote_shape_with_scalar_coordinates(self) -> None: # regression GH6384 objs = [ Dataset({}, {"x": pd.Interval(-1, 0, closed="right")}), Dataset({"x": [pd.Interval(0, 1, closed="right")]}), ] actual = concat(objs, "x") expected = Dataset( { "x": [ pd.Interval(-1, 0, closed="right"), pd.Interval(0, 1, closed="right"), ] } ) assert_identical(actual, expected) def test_concat_promote_shape_with_coordinates_of_particular_dtypes(self) -> None: # regression GH6416 (coord dtype) and GH6434 time_data1 = np.array(["2022-01-01", "2022-02-01"], dtype="datetime64[ns]") time_data2 = np.array("2022-03-01", dtype="datetime64[ns]") time_expected = np.array( ["2022-01-01", "2022-02-01", "2022-03-01"], dtype="datetime64[ns]" ) objs = [Dataset({}, {"time": time_data1}), Dataset({}, {"time": time_data2})] actual = concat(objs, "time") expected = Dataset({}, {"time": time_expected}) assert_identical(actual, expected) assert isinstance(actual.indexes["time"], pd.DatetimeIndex) def test_concat_do_not_promote(self) -> None: # GH438 objs = [ Dataset({"y": ("t", [1])}, {"x": 1, "t": [0]}), Dataset({"y": ("t", [2])}, {"x": 1, "t": [0]}), ] expected = Dataset({"y": ("t", [1, 2])}, {"x": 1, "t": [0, 0]}) actual = concat(objs, "t") assert_identical(expected, actual) objs = [ Dataset({"y": ("t", [1])}, {"x": 1, "t": [0]}), Dataset({"y": ("t", [2])}, {"x": 2, "t": [0]}), ] with pytest.raises(ValueError): concat(objs, "t", coords="minimal") def test_concat_dim_is_variable(self) -> None: objs = [Dataset({"x": 0}), Dataset({"x": 1})] coord = Variable("y", [3, 4], attrs={"foo": "bar"}) expected = Dataset({"x": ("y", [0, 1]), "y": coord}) actual = concat(objs, coord, data_vars="all") assert_identical(actual, expected) def test_concat_dim_is_dataarray(self) -> None: objs = [Dataset({"x": 0}), Dataset({"x": 1})] coord = DataArray([3, 4], dims="y", attrs={"foo": "bar"}) expected = Dataset({"x": ("y", [0, 1]), "y": coord}) actual = concat(objs, coord, data_vars="all") assert_identical(actual, expected) def test_concat_multiindex(self) -> None: midx = pd.MultiIndex.from_product([[1, 2, 3], ["a", "b"]]) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") expected = Dataset(coords=midx_coords) actual = concat( [expected.isel(x=slice(2)), expected.isel(x=slice(2, None))], "x" ) assert expected.equals(actual) assert isinstance(actual.x.to_index(), pd.MultiIndex) def test_concat_along_new_dim_multiindex(self) -> None: # see https://github.com/pydata/xarray/issues/6881 level_names = ["x_level_0", "x_level_1"] midx = pd.MultiIndex.from_product([[1, 2, 3], ["a", "b"]], names=level_names) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") ds = Dataset(coords=midx_coords) concatenated = concat([ds], "new") actual = list(concatenated.xindexes.get_all_coords("x")) expected = ["x"] + level_names assert actual == expected @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"a": 2, "b": 1}]) def test_concat_fill_value(self, fill_value) -> None: datasets = [ Dataset({"a": ("x", [2, 3]), "b": ("x", [-2, 1])}, {"x": [1, 2]}), Dataset({"a": ("x", [1, 2]), "b": ("x", [3, -1])}, {"x": [0, 1]}), ] if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value_a = fill_value_b = np.nan elif isinstance(fill_value, dict): fill_value_a = fill_value["a"] fill_value_b = fill_value["b"] else: fill_value_a = fill_value_b = fill_value expected = Dataset( { "a": (("t", "x"), [[fill_value_a, 2, 3], [1, 2, fill_value_a]]), "b": (("t", "x"), [[fill_value_b, -2, 1], [3, -1, fill_value_b]]), }, {"x": [0, 1, 2]}, ) actual = concat( datasets, dim="t", fill_value=fill_value, data_vars="all", join="outer" ) assert_identical(actual, expected) @pytest.mark.parametrize("dtype", [str, bytes]) @pytest.mark.parametrize("dim", ["x1", "x2"]) def test_concat_str_dtype(self, dtype, dim) -> None: data = np.arange(4).reshape([2, 2]) da1 = Dataset( { "data": (["x1", "x2"], data), "x1": [0, 1], "x2": np.array(["a", "b"], dtype=dtype), } ) da2 = Dataset( { "data": (["x1", "x2"], data), "x1": np.array([1, 2]), "x2": np.array(["c", "d"], dtype=dtype), } ) actual = concat([da1, da2], dim=dim, join="outer") assert np.issubdtype(actual.x2.dtype, dtype) def test_concat_avoids_index_auto_creation(self) -> None: # TODO once passing indexes={} directly to Dataset constructor is allowed then no need to create coords first coords = Coordinates( {"x": ConcatenatableArray(np.array([1, 2, 3]))}, indexes={} ) datasets = [ Dataset( {"a": (["x", "y"], ConcatenatableArray(np.zeros((3, 3))))}, coords=coords, ) for _ in range(2) ] # should not raise on concat combined = concat(datasets, dim="x") assert combined["a"].shape == (6, 3) assert combined["a"].dims == ("x", "y") # nor have auto-created any indexes assert combined.indexes == {} # should not raise on stack combined = concat(datasets, dim="z", data_vars="all") assert combined["a"].shape == (2, 3, 3) assert combined["a"].dims == ("z", "x", "y") # nor have auto-created any indexes assert combined.indexes == {} def test_concat_avoids_index_auto_creation_new_1d_coord(self) -> None: # create 0D coordinates (without indexes) datasets = [ Dataset( coords={"x": ConcatenatableArray(np.array(10))}, ) for _ in range(2) ] with pytest.raises(UnexpectedDataAccess): concat(datasets, dim="x", create_index_for_new_dim=True) # should not raise on concat iff create_index_for_new_dim=False combined = concat(datasets, dim="x", create_index_for_new_dim=False) assert combined["x"].shape == (2,) assert combined["x"].dims == ("x",) # nor have auto-created any indexes assert combined.indexes == {} def test_concat_promote_shape_without_creating_new_index(self) -> None: # different shapes but neither have indexes ds1 = Dataset(coords={"x": 0}) ds2 = Dataset(data_vars={"x": [1]}).drop_indexes("x") actual = concat([ds1, ds2], dim="x", create_index_for_new_dim=False) expected = Dataset(data_vars={"x": [0, 1]}).drop_indexes("x") assert_identical(actual, expected, check_default_indexes=False) assert actual.indexes == {} class TestConcatDataArray: def test_concat(self) -> None: ds = Dataset( { "foo": (["x", "y"], np.random.random((2, 3))), "bar": (["x", "y"], np.random.random((2, 3))), }, {"x": [0, 1]}, ) foo = ds["foo"] bar = ds["bar"] # from dataset array: expected = DataArray( np.array([foo.values, bar.values]), dims=["w", "x", "y"], coords={"x": [0, 1]}, ) actual = concat([foo, bar], "w") assert_equal(expected, actual) # from iteration: grouped = [g.squeeze() for _, g in foo.groupby("x", squeeze=False)] stacked = concat(grouped, ds["x"]) assert_identical(foo, stacked) # with an index as the 'dim' argument stacked = concat(grouped, pd.Index(ds["x"], name="x")) assert_identical(foo, stacked) actual2 = concat( [foo.isel(x=0), foo.isel(x=1)], pd.Index([0, 1]), coords="all" ).reset_coords(drop=True) expected = foo[:2].rename({"x": "concat_dim"}) assert_identical(expected, actual2) actual3 = concat( [foo.isel(x=0), foo.isel(x=1)], [0, 1], coords="all" ).reset_coords(drop=True) expected = foo[:2].rename({"x": "concat_dim"}) assert_identical(expected, actual3) with pytest.raises(ValueError, match=r"not identical"): concat([foo, bar], dim="w", compat="identical") with pytest.raises(ValueError, match=r"not a valid argument"): concat([foo, bar], dim="w", data_vars="different") def test_concat_encoding(self) -> None: # Regression test for GH1297 ds = Dataset( { "foo": (["x", "y"], np.random.random((2, 3))), "bar": (["x", "y"], np.random.random((2, 3))), }, {"x": [0, 1]}, ) foo = ds["foo"] foo.encoding = {"complevel": 5} ds.encoding = {"unlimited_dims": "x"} assert concat([foo, foo], dim="x").encoding == foo.encoding assert concat([ds, ds], dim="x").encoding == ds.encoding @requires_dask def test_concat_lazy(self) -> None: import dask.array as da arrays = [ DataArray( da.from_array(InaccessibleArray(np.zeros((3, 3))), 3), dims=["x", "y"] ) for _ in range(2) ] # should not raise combined = concat(arrays, dim="z") assert combined.shape == (2, 3, 3) assert combined.dims == ("z", "x", "y") def test_concat_avoids_index_auto_creation(self) -> None: # TODO once passing indexes={} directly to DataArray constructor is allowed then no need to create coords first coords = Coordinates( {"x": ConcatenatableArray(np.array([1, 2, 3]))}, indexes={} ) arrays = [ DataArray( ConcatenatableArray(np.zeros((3, 3))), dims=["x", "y"], coords=coords, ) for _ in range(2) ] # should not raise on concat combined = concat(arrays, dim="x") assert combined.shape == (6, 3) assert combined.dims == ("x", "y") # nor have auto-created any indexes assert combined.indexes == {} # should not raise on stack combined = concat(arrays, dim="z") assert combined.shape == (2, 3, 3) assert combined.dims == ("z", "x", "y") # nor have auto-created any indexes assert combined.indexes == {} @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0]) def test_concat_fill_value(self, fill_value) -> None: foo = DataArray([1, 2], coords=[("x", [1, 2])]) bar = DataArray([1, 2], coords=[("x", [1, 3])]) if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value = np.nan expected = DataArray( [[1, 2, fill_value], [1, fill_value, 2]], dims=["y", "x"], coords={"x": [1, 2, 3]}, ) actual = concat((foo, bar), dim="y", fill_value=fill_value, join="outer") assert_identical(actual, expected) def test_concat_join_kwarg(self) -> None: ds1 = Dataset( {"a": (("x", "y"), [[0]])}, coords={"x": [0], "y": [0]} ).to_dataarray() ds2 = Dataset( {"a": (("x", "y"), [[0]])}, coords={"x": [1], "y": [0.0001]} ).to_dataarray() expected: dict[JoinOptions, Any] = {} expected["outer"] = Dataset( {"a": (("x", "y"), [[0, np.nan], [np.nan, 0]])}, {"x": [0, 1], "y": [0, 0.0001]}, ) expected["inner"] = Dataset( {"a": (("x", "y"), [[], []])}, {"x": [0, 1], "y": []} ) expected["left"] = Dataset( {"a": (("x", "y"), np.array([0, np.nan], ndmin=2).T)}, coords={"x": [0, 1], "y": [0]}, ) expected["right"] = Dataset( {"a": (("x", "y"), np.array([np.nan, 0], ndmin=2).T)}, coords={"x": [0, 1], "y": [0.0001]}, ) expected["override"] = Dataset( {"a": (("x", "y"), np.array([0, 0], ndmin=2).T)}, coords={"x": [0, 1], "y": [0]}, ) with pytest.raises(ValueError, match=r"cannot align.*exact.*dimensions.*'y'"): actual = concat([ds1, ds2], join="exact", dim="x") for join, expected_item in expected.items(): actual = concat([ds1, ds2], join=join, dim="x") assert_equal(actual, expected_item.to_dataarray()) def test_concat_combine_attrs_kwarg(self) -> None: da1 = DataArray([0], coords=[("x", [0])], attrs={"b": 42}) da2 = DataArray([0], coords=[("x", [1])], attrs={"b": 42, "c": 43}) expected: dict[CombineAttrsOptions, Any] = {} expected["drop"] = DataArray([0, 0], coords=[("x", [0, 1])]) expected["no_conflicts"] = DataArray( [0, 0], coords=[("x", [0, 1])], attrs={"b": 42, "c": 43} ) expected["override"] = DataArray( [0, 0], coords=[("x", [0, 1])], attrs={"b": 42} ) with pytest.raises(ValueError, match=r"combine_attrs='identical'"): actual = concat([da1, da2], dim="x", combine_attrs="identical") with pytest.raises(ValueError, match=r"combine_attrs='no_conflicts'"): da3 = da2.copy(deep=True) da3.attrs["b"] = 44 actual = concat([da1, da3], dim="x", combine_attrs="no_conflicts") for combine_attrs, expected_item in expected.items(): actual = concat([da1, da2], dim="x", combine_attrs=combine_attrs) assert_identical(actual, expected_item) @pytest.mark.parametrize("dtype", [str, bytes]) @pytest.mark.parametrize("dim", ["x1", "x2"]) def test_concat_str_dtype(self, dtype, dim) -> None: data = np.arange(4).reshape([2, 2]) da1 = DataArray( data=data, dims=["x1", "x2"], coords={"x1": [0, 1], "x2": np.array(["a", "b"], dtype=dtype)}, ) da2 = DataArray( data=data, dims=["x1", "x2"], coords={"x1": np.array([1, 2]), "x2": np.array(["c", "d"], dtype=dtype)}, ) actual = concat([da1, da2], dim=dim, join="outer") assert np.issubdtype(actual.x2.dtype, dtype) def test_concat_coord_name(self) -> None: da = DataArray([0], dims="a") da_concat = concat([da, da], dim=DataArray([0, 1], dims="b")) assert list(da_concat.coords) == ["b"] da_concat_std = concat([da, da], dim=DataArray([0, 1])) assert list(da_concat_std.coords) == ["dim_0"] @pytest.mark.parametrize("attr1", ({"a": {"meta": [10, 20, 30]}}, {"a": [1, 2, 3]}, {})) @pytest.mark.parametrize("attr2", ({"a": [1, 2, 3]}, {})) def test_concat_attrs_first_variable(attr1, attr2) -> None: arrs = [ DataArray([[1], [2]], dims=["x", "y"], attrs=attr1), DataArray([[3], [4]], dims=["x", "y"], attrs=attr2), ] concat_attrs = concat(arrs, "y").attrs assert concat_attrs == attr1 def test_concat_merge_single_non_dim_coord() -> None: da1 = DataArray([1, 2, 3], dims="x", coords={"x": [1, 2, 3], "y": 1}) da2 = DataArray([4, 5, 6], dims="x", coords={"x": [4, 5, 6]}) expected = DataArray(range(1, 7), dims="x", coords={"x": range(1, 7), "y": 1}) actual = concat([da1, da2], "x", coords="minimal", compat="override") assert_identical(actual, expected) actual = concat([da1, da2], "x", coords="different", compat="equals") assert_identical(actual, expected) with pytest.raises(ValueError, match=r"'y' not present in all datasets."): concat([da1, da2], dim="x", coords="all") da1 = DataArray([1, 2, 3], dims="x", coords={"x": [1, 2, 3], "y": 1}) da2 = DataArray([4, 5, 6], dims="x", coords={"x": [4, 5, 6]}) da3 = DataArray([7, 8, 9], dims="x", coords={"x": [7, 8, 9], "y": 1}) with pytest.raises(ValueError, match=r"'y' not present in all datasets"): concat([da1, da2, da3], dim="x", coords="all") with pytest.raises(ValueError, match=r"'y' not present in all datasets"): concat([da1, da2, da3], dim="x", coords="different", compat="equals") def test_concat_preserve_coordinate_order() -> None: x = np.arange(0, 5) y = np.arange(0, 10) time = np.arange(0, 4) data = np.zeros((4, 10, 5), dtype=bool) ds1 = Dataset( {"data": (["time", "y", "x"], data[0:2])}, coords={"time": time[0:2], "y": y, "x": x}, ) ds2 = Dataset( {"data": (["time", "y", "x"], data[2:4])}, coords={"time": time[2:4], "y": y, "x": x}, ) expected = Dataset( {"data": (["time", "y", "x"], data)}, coords={"time": time, "y": y, "x": x}, ) actual = concat([ds1, ds2], dim="time") # check dimension order for act, exp in zip(actual.dims, expected.dims, strict=True): assert act == exp assert actual.sizes[act] == expected.sizes[exp] # check coordinate order for act, exp in zip(actual.coords, expected.coords, strict=True): assert act == exp assert_identical(actual.coords[act], expected.coords[exp]) def test_concat_typing_check() -> None: ds = Dataset({"foo": 1}, {"bar": 2}) da = Dataset({"foo": 3}, {"bar": 4}).to_dataarray(dim="foo") # concatenate a list of non-homogeneous types must raise TypeError with pytest.raises( TypeError, match="The elements in the input list need to be either all 'Dataset's or all 'DataArray's", ): concat([ds, da], dim="foo") # type: ignore[type-var] with pytest.raises( TypeError, match="The elements in the input list need to be either all 'Dataset's or all 'DataArray's", ): concat([da, ds], dim="foo") # type: ignore[type-var] def test_concat_not_all_indexes() -> None: ds1 = Dataset(coords={"x": ("x", [1, 2])}) # ds2.x has no default index ds2 = Dataset(coords={"x": ("y", [3, 4])}) with pytest.raises( ValueError, match=r"'x' must have either an index or no index in all datasets.*" ): concat([ds1, ds2], dim="x") def test_concat_index_not_same_dim() -> None: ds1 = Dataset(coords={"x": ("x", [1, 2])}) ds2 = Dataset(coords={"x": ("y", [3, 4])}) # TODO: use public API for setting a non-default index, when available ds2._indexes["x"] = PandasIndex([3, 4], "y") with pytest.raises( ValueError, match=r"Cannot concatenate along dimension 'x' indexes with dimensions.*", ): concat([ds1, ds2], dim="x") class TestNewDefaults: def test_concat_second_empty_with_scalar_data_var_only_on_first(self) -> None: ds1 = Dataset(data_vars={"a": ("y", [0.1]), "b": 0.1}, coords={"x": 0.1}) ds2 = Dataset(coords={"x": 0.1}) expected = Dataset( data_vars={"a": ("y", [0.1, np.nan]), "b": 0.1}, coords={"x": 0.1} ) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from compat='equals' to compat='override'", ): actual = concat( [ds1, ds2], dim="y", coords="different", data_vars="different" ) assert_identical(actual, expected) with set_options(use_new_combine_kwarg_defaults=True): with pytest.raises(ValueError, match="might be related to new default"): concat([ds1, ds2], dim="y", coords="different", data_vars="different") def test_concat_multiple_datasets_missing_vars(self) -> None: vars_to_drop = [ "temperature", "pressure", "humidity", "precipitation", "cloud_cover", ] datasets = create_concat_datasets( len(vars_to_drop), seed=123, include_day=False ) # set up the test data datasets = [ ds.drop_vars(varname) for ds, varname in zip(datasets, vars_to_drop, strict=True) ] with set_options(use_new_combine_kwarg_defaults=False): old = concat(datasets, dim="day") with set_options(use_new_combine_kwarg_defaults=True): new = concat(datasets, dim="day") assert_identical(old, new) @pytest.mark.parametrize("coords", ["different", "minimal", "all"]) def test_concat_coords_kwarg( self, coords: Literal["all", "minimal", "different"] ) -> None: data = create_test_data().drop_dims("dim3") # make sure the coords argument behaves as expected data.coords["extra"] = ("dim4", np.arange(3)) datasets = [g for _, g in data.groupby("dim1")] with set_options(use_new_combine_kwarg_defaults=False): expectation: AbstractContextManager = ( pytest.warns( FutureWarning, match="will change from compat='equals' to compat='override'", ) if coords == "different" else nullcontext() ) with expectation: old = concat(datasets, data["dim1"], coords=coords) with set_options(use_new_combine_kwarg_defaults=True): if coords == "different": with pytest.raises(ValueError): concat(datasets, data["dim1"], coords=coords) else: new = concat(datasets, data["dim1"], coords=coords) assert_identical(old, new) def test_concat_promote_shape_for_scalars_with_mixed_lengths_along_concat_dim( self, ) -> None: # values should repeat objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1, 2]}, {"y": -2})] expected = Dataset({"x": [0, 1, 2]}, {"y": ("x", [-1, -2, -2])}) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from coords='different' to coords='minimal'", ): old = concat(objs, "x") assert_identical(old, expected) with set_options(use_new_combine_kwarg_defaults=True): new = concat(objs, "x") with pytest.raises(AssertionError): assert_identical(new, old) with pytest.raises(ValueError, match="might be related to new default"): concat(objs, "x", coords="different") with pytest.raises(merge.MergeError, match="conflicting values"): concat(objs, "x", compat="equals") new = concat(objs, "x", coords="different", compat="equals") assert_identical(old, new) def test_concat_multi_dim_index() -> None: ds1 = ( Dataset( {"foo": (("x", "y"), np.random.randn(2, 2))}, coords={"x": [1, 2], "y": [3, 4]}, ) .drop_indexes(["x", "y"]) .set_xindex(["x", "y"], XYIndex) ) ds2 = ( Dataset( {"foo": (("x", "y"), np.random.randn(2, 2))}, coords={"x": [1, 2], "y": [5, 6]}, ) .drop_indexes(["x", "y"]) .set_xindex(["x", "y"], XYIndex) ) expected = ( Dataset( { "foo": ( ("x", "y"), np.concatenate([ds1.foo.data, ds2.foo.data], axis=-1), ) }, coords={"x": [1, 2], "y": [3, 4, 5, 6]}, ) .drop_indexes(["x", "y"]) .set_xindex(["x", "y"], XYIndex) ) # note: missing 'override' joins: list[types.JoinOptions] = ["inner", "outer", "exact", "left", "right"] for join in joins: actual = concat([ds1, ds2], dim="y", join=join) assert_identical(actual, expected, check_default_indexes=False) with pytest.raises(AlignmentError): actual = concat([ds1, ds2], dim="x", join="exact") # TODO: fix these, or raise better error message with pytest.raises(AssertionError): joins_lr: list[types.JoinOptions] = ["left", "right"] for join in joins_lr: actual = concat([ds1, ds2], dim="x", join=join) xarray-2025.09.0/xarray/tests/test_conventions.py000066400000000000000000000646661505620616400220140ustar00rootroot00000000000000from __future__ import annotations import contextlib import warnings import numpy as np import pandas as pd import pytest from xarray import ( Dataset, SerializationWarning, Variable, coding, conventions, date_range, open_dataset, ) from xarray.backends.common import WritableCFDataStore from xarray.backends.memory import InMemoryDataStore from xarray.coders import CFDatetimeCoder, CFTimedeltaCoder from xarray.conventions import decode_cf from xarray.testing import assert_identical from xarray.tests import ( assert_array_equal, requires_cftime, requires_dask, requires_netCDF4, ) from xarray.tests.test_backends import CFEncodedBase class TestBoolTypeArray: def test_booltype_array(self) -> None: x = np.array([1, 0, 1, 1, 0], dtype="i1") bx = coding.variables.BoolTypeArray(x) assert bx.dtype == bool assert_array_equal(bx, np.array([True, False, True, True, False], dtype=bool)) x = np.array([[1, 0, 1], [0, 1, 0]], dtype="i1") bx = coding.variables.BoolTypeArray(x) assert_array_equal(bx.transpose((1, 0)), x.transpose((1, 0))) class TestNativeEndiannessArray: def test(self) -> None: x = np.arange(5, dtype=">i8") expected = np.arange(5, dtype="int64") a = coding.variables.NativeEndiannessArray(x) assert a.dtype == expected.dtype assert a.dtype == expected[:].dtype assert_array_equal(a, expected) y = np.arange(6, dtype=">i8").reshape((2, 3)) b = coding.variables.NativeEndiannessArray(y) expected2 = np.arange(6, dtype="int64").reshape((2, 3)) assert_array_equal(b.transpose((1, 0)), expected2.transpose((1, 0))) def test_decode_cf_with_conflicting_fill_missing_value() -> None: expected = Variable(["t"], [np.nan, np.nan, 2], {"units": "foobar"}) var = Variable( ["t"], np.arange(3), {"units": "foobar", "missing_value": 0, "_FillValue": 1} ) with pytest.warns(SerializationWarning, match="has multiple fill"): actual = conventions.decode_cf_variable("t", var) assert_identical(actual, expected) expected = Variable(["t"], np.arange(10), {"units": "foobar"}) var = Variable( ["t"], np.arange(10), {"units": "foobar", "missing_value": np.nan, "_FillValue": np.nan}, ) # the following code issues two warnings, so we need to check for both with pytest.warns(SerializationWarning) as winfo: actual = conventions.decode_cf_variable("t", var) for aw in winfo: assert "non-conforming" in str(aw.message) assert_identical(actual, expected) var = Variable( ["t"], np.arange(10), { "units": "foobar", "missing_value": np.float32(np.nan), "_FillValue": np.float32(np.nan), }, ) # the following code issues two warnings, so we need to check for both with pytest.warns(SerializationWarning) as winfo: actual = conventions.decode_cf_variable("t", var) for aw in winfo: assert "non-conforming" in str(aw.message) assert_identical(actual, expected) def test_decode_cf_variable_with_mismatched_coordinates() -> None: # tests for decoding mismatched coordinates attributes # see GH #1809 zeros1 = np.zeros((1, 5, 3)) orig = Dataset( { "XLONG": (["x", "y"], zeros1.squeeze(0), {}), "XLAT": (["x", "y"], zeros1.squeeze(0), {}), "foo": (["time", "x", "y"], zeros1, {"coordinates": "XTIME XLONG XLAT"}), "time": ("time", [0.0], {"units": "hours since 2017-01-01"}), } ) decoded = conventions.decode_cf(orig, decode_coords=True) assert decoded["foo"].encoding["coordinates"] == "XTIME XLONG XLAT" assert list(decoded.coords.keys()) == ["XLONG", "XLAT", "time"] decoded = conventions.decode_cf(orig, decode_coords=False) assert "coordinates" not in decoded["foo"].encoding assert decoded["foo"].attrs.get("coordinates") == "XTIME XLONG XLAT" assert list(decoded.coords.keys()) == ["time"] @requires_cftime class TestEncodeCFVariable: def test_incompatible_attributes(self) -> None: invalid_vars = [ Variable( ["t"], pd.date_range("2000-01-01", periods=3), {"units": "foobar"} ), Variable(["t"], pd.to_timedelta(["1 day"]), {"units": "foobar"}), # type: ignore[arg-type, unused-ignore] Variable(["t"], [0, 1, 2], {"add_offset": 0}, {"add_offset": 2}), Variable(["t"], [0, 1, 2], {"_FillValue": 0}, {"_FillValue": 2}), ] for var in invalid_vars: with pytest.raises(ValueError): conventions.encode_cf_variable(var) def test_missing_fillvalue(self) -> None: v = Variable(["x"], np.array([np.nan, 1, 2, 3])) v.encoding = {"dtype": "int16"} # Expect both the SerializationWarning and the RuntimeWarning from numpy with pytest.warns(Warning) as record: conventions.encode_cf_variable(v) # Check we got the expected warnings warning_messages = [str(w.message) for w in record] assert any( "floating point data as an integer" in msg for msg in warning_messages ) assert any( "invalid value encountered in cast" in msg for msg in warning_messages ) def test_multidimensional_coordinates(self) -> None: # regression test for GH1763 # Set up test case with coordinates that have overlapping (but not # identical) dimensions. zeros1 = np.zeros((1, 5, 3)) zeros2 = np.zeros((1, 6, 3)) zeros3 = np.zeros((1, 5, 4)) orig = Dataset( { "lon1": (["x1", "y1"], zeros1.squeeze(0), {}), "lon2": (["x2", "y1"], zeros2.squeeze(0), {}), "lon3": (["x1", "y2"], zeros3.squeeze(0), {}), "lat1": (["x1", "y1"], zeros1.squeeze(0), {}), "lat2": (["x2", "y1"], zeros2.squeeze(0), {}), "lat3": (["x1", "y2"], zeros3.squeeze(0), {}), "foo1": (["time", "x1", "y1"], zeros1, {"coordinates": "lon1 lat1"}), "foo2": (["time", "x2", "y1"], zeros2, {"coordinates": "lon2 lat2"}), "foo3": (["time", "x1", "y2"], zeros3, {"coordinates": "lon3 lat3"}), "time": ("time", [0.0], {"units": "hours since 2017-01-01"}), } ) orig = conventions.decode_cf(orig) # Encode the coordinates, as they would be in a netCDF output file. enc, attrs = conventions.encode_dataset_coordinates(orig) # Make sure we have the right coordinates for each variable. foo1_coords = enc["foo1"].attrs.get("coordinates", "") foo2_coords = enc["foo2"].attrs.get("coordinates", "") foo3_coords = enc["foo3"].attrs.get("coordinates", "") assert foo1_coords == "lon1 lat1" assert foo2_coords == "lon2 lat2" assert foo3_coords == "lon3 lat3" # Should not have any global coordinates. assert "coordinates" not in attrs def test_var_with_coord_attr(self) -> None: # regression test for GH6310 # don't overwrite user-defined "coordinates" attributes orig = Dataset( {"values": ("time", np.zeros(2), {"coordinates": "time lon lat"})}, coords={ "time": ("time", np.zeros(2)), "lat": ("time", np.zeros(2)), "lon": ("time", np.zeros(2)), }, ) # Encode the coordinates, as they would be in a netCDF output file. enc, attrs = conventions.encode_dataset_coordinates(orig) # Make sure we have the right coordinates for each variable. values_coords = enc["values"].attrs.get("coordinates", "") assert values_coords == "time lon lat" # Should not have any global coordinates. assert "coordinates" not in attrs def test_do_not_overwrite_user_coordinates(self) -> None: # don't overwrite user-defined "coordinates" encoding orig = Dataset( coords={"x": [0, 1, 2], "y": ("x", [5, 6, 7]), "z": ("x", [8, 9, 10])}, data_vars={"a": ("x", [1, 2, 3]), "b": ("x", [3, 5, 6])}, ) orig["a"].encoding["coordinates"] = "y" orig["b"].encoding["coordinates"] = "z" enc, _ = conventions.encode_dataset_coordinates(orig) assert enc["a"].attrs["coordinates"] == "y" assert enc["b"].attrs["coordinates"] == "z" orig["a"].attrs["coordinates"] = "foo" with pytest.raises(ValueError, match=r"'coordinates' found in both attrs"): conventions.encode_dataset_coordinates(orig) def test_deterministic_coords_encoding(self) -> None: # the coordinates attribute is sorted when set by xarray.conventions ... # ... on a variable's coordinates attribute ds = Dataset({"foo": 0}, coords={"baz": 0, "bar": 0}) vars, attrs = conventions.encode_dataset_coordinates(ds) assert vars["foo"].attrs["coordinates"] == "bar baz" assert attrs.get("coordinates") is None # ... on the global coordinates attribute ds = ds.drop_vars("foo") vars, attrs = conventions.encode_dataset_coordinates(ds) assert attrs["coordinates"] == "bar baz" def test_emit_coordinates_attribute_in_attrs(self) -> None: orig = Dataset( {"a": 1, "b": 1}, coords={"t": np.array("2004-11-01T00:00:00", dtype=np.datetime64)}, ) orig["a"].attrs["coordinates"] = None enc, _ = conventions.encode_dataset_coordinates(orig) # check coordinate attribute emitted for 'a' assert "coordinates" not in enc["a"].attrs assert "coordinates" not in enc["a"].encoding # check coordinate attribute not emitted for 'b' assert enc["b"].attrs.get("coordinates") == "t" assert "coordinates" not in enc["b"].encoding def test_emit_coordinates_attribute_in_encoding(self) -> None: orig = Dataset( {"a": 1, "b": 1}, coords={"t": np.array("2004-11-01T00:00:00", dtype=np.datetime64)}, ) orig["a"].encoding["coordinates"] = None enc, _ = conventions.encode_dataset_coordinates(orig) # check coordinate attribute emitted for 'a' assert "coordinates" not in enc["a"].attrs assert "coordinates" not in enc["a"].encoding # check coordinate attribute not emitted for 'b' assert enc["b"].attrs.get("coordinates") == "t" assert "coordinates" not in enc["b"].encoding @requires_cftime class TestDecodeCF: def test_dataset(self) -> None: original = Dataset( { "t": ("t", [0, 1, 2], {"units": "days since 2000-01-01"}), "foo": ("t", [0, 0, 0], {"coordinates": "y", "units": "bar"}), "y": ("t", [5, 10, -999], {"_FillValue": -999}), } ) expected = Dataset( {"foo": ("t", [0, 0, 0], {"units": "bar"})}, { "t": pd.date_range("2000-01-01", periods=3), "y": ("t", [5.0, 10.0, np.nan]), }, ) actual = conventions.decode_cf(original) assert_identical(expected, actual) def test_invalid_coordinates(self) -> None: # regression test for GH308, GH1809 original = Dataset({"foo": ("t", [1, 2], {"coordinates": "invalid"})}) decoded = Dataset({"foo": ("t", [1, 2], {}, {"coordinates": "invalid"})}) actual = conventions.decode_cf(original) assert_identical(decoded, actual) actual = conventions.decode_cf(original, decode_coords=False) assert_identical(original, actual) def test_decode_coordinates(self) -> None: # regression test for GH610 original = Dataset( {"foo": ("t", [1, 2], {"coordinates": "x"}), "x": ("t", [4, 5])} ) actual = conventions.decode_cf(original) assert actual.foo.encoding["coordinates"] == "x" def test_decode_coordinates_with_key_values(self) -> None: # regression test for GH9761 original = Dataset( { "temp": ( ("y", "x"), np.random.rand(2, 2), { "long_name": "temperature", "units": "K", "coordinates": "lat lon", "grid_mapping": "crs", }, ), "x": ( ("x"), np.arange(2), {"standard_name": "projection_x_coordinate", "units": "m"}, ), "y": ( ("y"), np.arange(2), {"standard_name": "projection_y_coordinate", "units": "m"}, ), "lat": ( ("y", "x"), np.random.rand(2, 2), {"standard_name": "latitude", "units": "degrees_north"}, ), "lon": ( ("y", "x"), np.random.rand(2, 2), {"standard_name": "longitude", "units": "degrees_east"}, ), "crs": ( (), None, { "grid_mapping_name": "transverse_mercator", "longitude_of_central_meridian": -2.0, }, ), "crs2": ( (), None, { "grid_mapping_name": "longitude_latitude", "longitude_of_central_meridian": -2.0, }, ), }, ) original.temp.attrs["grid_mapping"] = "crs: x y" vars, attrs, coords = conventions.decode_cf_variables( original.variables, {}, decode_coords="all" ) assert coords == {"lat", "lon", "crs"} original.temp.attrs["grid_mapping"] = "crs: x y crs2: lat lon" vars, attrs, coords = conventions.decode_cf_variables( original.variables, {}, decode_coords="all" ) assert coords == {"lat", "lon", "crs", "crs2"} # stray colon original.temp.attrs["grid_mapping"] = "crs: x y crs2 : lat lon" vars, attrs, coords = conventions.decode_cf_variables( original.variables, {}, decode_coords="all" ) assert coords == {"lat", "lon", "crs", "crs2"} original.temp.attrs["grid_mapping"] = "crs x y crs2: lat lon" with pytest.raises(ValueError, match="misses ':'"): conventions.decode_cf_variables(original.variables, {}, decode_coords="all") del original.temp.attrs["grid_mapping"] original.temp.attrs["formula_terms"] = "A: lat D: lon E: crs2" vars, attrs, coords = conventions.decode_cf_variables( original.variables, {}, decode_coords="all" ) assert coords == {"lat", "lon", "crs2"} original.temp.attrs["formula_terms"] = "A: lat lon D: crs E: crs2" with pytest.warns(UserWarning, match="has malformed content"): vars, attrs, coords = conventions.decode_cf_variables( original.variables, {}, decode_coords="all" ) assert coords == {"lat", "lon", "crs", "crs2"} def test_0d_int32_encoding(self) -> None: original = Variable((), np.int32(0), encoding={"dtype": "int64"}) expected = Variable((), np.int64(0)) actual = coding.variables.NonStringCoder().encode(original) assert_identical(expected, actual) def test_decode_cf_with_multiple_missing_values(self) -> None: original = Variable(["t"], [0, 1, 2], {"missing_value": np.array([0, 1])}) expected = Variable(["t"], [np.nan, np.nan, 2], {}) with pytest.warns(SerializationWarning, match="has multiple fill"): actual = conventions.decode_cf_variable("t", original) assert_identical(expected, actual) def test_decode_cf_with_drop_variables(self) -> None: original = Dataset( { "t": ("t", [0, 1, 2], {"units": "days since 2000-01-01"}), "x": ("x", [9, 8, 7], {"units": "km"}), "foo": ( ("t", "x"), [[0, 0, 0], [1, 1, 1], [2, 2, 2]], {"units": "bar"}, ), "y": ("t", [5, 10, -999], {"_FillValue": -999}), } ) expected = Dataset( { "t": pd.date_range("2000-01-01", periods=3), "foo": ( ("t", "x"), [[0, 0, 0], [1, 1, 1], [2, 2, 2]], {"units": "bar"}, ), "y": ("t", [5, 10, np.nan]), } ) actual = conventions.decode_cf(original, drop_variables=("x",)) actual2 = conventions.decode_cf(original, drop_variables="x") assert_identical(expected, actual) assert_identical(expected, actual2) @pytest.mark.filterwarnings("ignore:Ambiguous reference date string") def test_invalid_time_units_raises_eagerly(self) -> None: ds = Dataset({"time": ("time", [0, 1], {"units": "foobar since 123"})}) with pytest.raises(ValueError, match=r"unable to decode time"): decode_cf(ds) @pytest.mark.parametrize("decode_times", [True, False]) def test_invalid_timedelta_units_do_not_decode(self, decode_times) -> None: # regression test for #8269 ds = Dataset( {"time": ("time", [0, 1, 20], {"units": "days invalid", "_FillValue": 20})} ) expected = Dataset( {"time": ("time", [0.0, 1.0, np.nan], {"units": "days invalid"})} ) assert_identical(expected, decode_cf(ds, decode_times=decode_times)) @requires_cftime @pytest.mark.parametrize("time_unit", ["s", "ms", "us", "ns"]) def test_dataset_repr_with_netcdf4_datetimes(self, time_unit) -> None: # regression test for #347 attrs = {"units": "days since 0001-01-01", "calendar": "noleap"} with warnings.catch_warnings(): warnings.filterwarnings("ignore", "unable to decode time") ds = decode_cf(Dataset({"time": ("time", [0, 1], attrs)})) assert "(time) object" in repr(ds) attrs = {"units": "days since 1900-01-01"} ds = decode_cf( Dataset({"time": ("time", [0, 1], attrs)}), decode_times=CFDatetimeCoder(time_unit=time_unit), ) assert f"(time) datetime64[{time_unit}]" in repr(ds) @requires_cftime def test_decode_cf_datetime_transition_to_invalid(self) -> None: # manually create dataset with not-decoded date from datetime import datetime ds = Dataset(coords={"time": [0, 266 * 365]}) units = "days since 2000-01-01 00:00:00" ds.time.attrs = dict(units=units) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "unable to decode time") ds_decoded = conventions.decode_cf(ds) expected = np.array([datetime(2000, 1, 1, 0, 0), datetime(2265, 10, 28, 0, 0)]) assert_array_equal(ds_decoded.time.values, expected) @requires_dask def test_decode_cf_with_dask(self) -> None: import dask.array as da original = Dataset( { "t": ("t", [0, 1, 2], {"units": "days since 2000-01-01"}), "foo": ("t", [0, 0, 0], {"coordinates": "y", "units": "bar"}), "bar": ("string2", [b"a", b"b"]), "baz": (("x"), [b"abc"], {"_Encoding": "utf-8"}), "y": ("t", [5, 10, -999], {"_FillValue": -999}), } ).chunk() decoded = conventions.decode_cf(original) assert all( isinstance(var.data, da.Array) for name, var in decoded.variables.items() if name not in decoded.xindexes ) assert_identical(decoded, conventions.decode_cf(original).compute()) @requires_dask def test_decode_dask_times(self) -> None: original = Dataset.from_dict( { "coords": {}, "dims": {"time": 5}, "data_vars": { "average_T1": { "dims": ("time",), "attrs": {"units": "days since 1958-01-01 00:00:00"}, "data": [87659.0, 88024.0, 88389.0, 88754.0, 89119.0], } }, } ) assert_identical( conventions.decode_cf(original.chunk()), conventions.decode_cf(original).chunk(), ) @pytest.mark.parametrize("time_unit", ["s", "ms", "us", "ns"]) def test_decode_cf_time_kwargs(self, time_unit) -> None: ds = Dataset.from_dict( { "coords": { "timedelta": { "data": np.array([1, 2, 3], dtype="int64"), "dims": "timedelta", "attrs": {"units": "days"}, }, "time": { "data": np.array([1, 2, 3], dtype="int64"), "dims": "time", "attrs": {"units": "days since 2000-01-01"}, }, }, "dims": {"time": 3, "timedelta": 3}, "data_vars": { "a": {"dims": ("time", "timedelta"), "data": np.ones((3, 3))}, }, } ) dsc = conventions.decode_cf( ds, decode_times=CFDatetimeCoder(time_unit=time_unit), decode_timedelta=CFTimedeltaCoder(time_unit=time_unit), ) assert dsc.timedelta.dtype == np.dtype(f"m8[{time_unit}]") assert dsc.time.dtype == np.dtype(f"M8[{time_unit}]") dsc = conventions.decode_cf(ds, decode_times=False) assert dsc.timedelta.dtype == np.dtype("int64") assert dsc.time.dtype == np.dtype("int64") dsc = conventions.decode_cf( ds, decode_times=CFDatetimeCoder(time_unit=time_unit), decode_timedelta=False, ) assert dsc.timedelta.dtype == np.dtype("int64") assert dsc.time.dtype == np.dtype(f"M8[{time_unit}]") dsc = conventions.decode_cf(ds, decode_times=False, decode_timedelta=True) assert dsc.timedelta.dtype == np.dtype("m8[ns]") assert dsc.time.dtype == np.dtype("int64") class CFEncodedInMemoryStore(WritableCFDataStore, InMemoryDataStore): def encode_variable(self, var, name=None): """encode one variable""" coder = coding.strings.EncodedStringCoder(allows_unicode=True) var = coder.encode(var, name=name) return var @requires_netCDF4 class TestCFEncodedDataStore(CFEncodedBase): @contextlib.contextmanager def create_store(self): yield CFEncodedInMemoryStore() @contextlib.contextmanager def roundtrip( self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False ): if save_kwargs is None: save_kwargs = {} if open_kwargs is None: open_kwargs = {} store = CFEncodedInMemoryStore() data.dump_to_store(store, **save_kwargs) yield open_dataset(store, **open_kwargs) @pytest.mark.skip("cannot roundtrip coordinates yet for CFEncodedInMemoryStore") def test_roundtrip_coordinates(self) -> None: pass def test_invalid_dataarray_names_raise(self) -> None: # only relevant for on-disk file formats pass def test_encoding_kwarg(self) -> None: # we haven't bothered to raise errors yet for unexpected encodings in # this test dummy pass def test_encoding_kwarg_fixed_width_string(self) -> None: # CFEncodedInMemoryStore doesn't support explicit string encodings. pass def test_encoding_unlimited_dims(self) -> None: # CFEncodedInMemoryStore doesn't support unlimited_dims. pass class TestDecodeCFVariableWithArrayUnits: def test_decode_cf_variable_with_array_units(self) -> None: v = Variable(["t"], [1, 2, 3], {"units": np.array(["foobar"], dtype=object)}) v_decoded = conventions.decode_cf_variable("test2", v) assert_identical(v, v_decoded) def test_decode_cf_variable_timedelta64(): variable = Variable(["time"], pd.timedelta_range("1D", periods=2)) decoded = conventions.decode_cf_variable("time", variable) assert decoded.encoding == {} assert_identical(decoded, variable) def test_decode_cf_variable_datetime64(): variable = Variable(["time"], pd.date_range("2000", periods=2)) decoded = conventions.decode_cf_variable("time", variable) assert decoded.encoding == {} assert_identical(decoded, variable) @requires_cftime def test_decode_cf_variable_cftime(): variable = Variable(["time"], date_range("2000", periods=2, use_cftime=True)) decoded = conventions.decode_cf_variable("time", variable) assert decoded.encoding == {} assert_identical(decoded, variable) def test_scalar_units() -> None: # test that scalar units does not raise an exception var = Variable(["t"], [np.nan, np.nan, 2], {"units": np.nan}) actual = conventions.decode_cf_variable("t", var) assert_identical(actual, var) def test_decode_cf_error_includes_variable_name(): ds = Dataset({"invalid": ([], 1e36, {"units": "days since 2000-01-01"})}) with pytest.raises(ValueError, match="Failed to decode variable 'invalid'"): decode_cf(ds) def test_encode_cf_variable_with_vlen_dtype() -> None: v = Variable( ["x"], np.array(["a", "b"], dtype=coding.strings.create_vlen_dtype(str)) ) encoded_v = conventions.encode_cf_variable(v) assert encoded_v.data.dtype.kind == "O" assert coding.strings.check_vlen_dtype(encoded_v.data.dtype) is str # empty array v = Variable(["x"], np.array([], dtype=coding.strings.create_vlen_dtype(str))) encoded_v = conventions.encode_cf_variable(v) assert encoded_v.data.dtype.kind == "O" assert coding.strings.check_vlen_dtype(encoded_v.data.dtype) is str def test_decode_cf_variables_decode_timedelta_warning() -> None: v = Variable(["time"], [1, 2], attrs={"units": "seconds"}) variables = {"a": v} with warnings.catch_warnings(): warnings.filterwarnings("error", "decode_timedelta", FutureWarning) conventions.decode_cf_variables(variables, {}, decode_timedelta=True) with pytest.warns(FutureWarning, match="decode_timedelta"): conventions.decode_cf_variables(variables, {}) xarray-2025.09.0/xarray/tests/test_coordinate_transform.py000066400000000000000000000202461505620616400236530ustar00rootroot00000000000000from collections.abc import Hashable from typing import Any import numpy as np import pytest import xarray as xr from xarray.core.coordinate_transform import CoordinateTransform from xarray.core.indexes import CoordinateTransformIndex from xarray.tests import assert_equal, assert_identical class SimpleCoordinateTransform(CoordinateTransform): """Simple uniform scale transform in a 2D space (x/y coordinates).""" def __init__(self, shape: tuple[int, int], scale: float, dtype: Any = None): super().__init__(("x", "y"), {"x": shape[1], "y": shape[0]}, dtype=dtype) self.scale = scale # array dimensions in reverse order (y = rows, x = cols) self.xy_dims = tuple(self.dims) self.dims = (self.dims[1], self.dims[0]) def forward(self, dim_positions: dict[str, Any]) -> dict[Hashable, Any]: assert set(dim_positions) == set(self.dims) return { name: dim_positions[dim] * self.scale for name, dim in zip(self.coord_names, self.xy_dims, strict=False) } def reverse(self, coord_labels: dict[Hashable, Any]) -> dict[str, Any]: return {dim: coord_labels[dim] / self.scale for dim in self.xy_dims} def equals( self, other: CoordinateTransform, exclude: frozenset[Hashable] | None = None ) -> bool: if not isinstance(other, SimpleCoordinateTransform): return False return self.scale == other.scale def __repr__(self) -> str: return f"Scale({self.scale})" def test_abstract_coordinate_transform() -> None: tr = CoordinateTransform(["x"], {"x": 5}) with pytest.raises(NotImplementedError): tr.forward({"x": [1, 2]}) with pytest.raises(NotImplementedError): tr.reverse({"x": [3.0, 4.0]}) with pytest.raises(NotImplementedError): tr.equals(CoordinateTransform(["x"], {"x": 5})) def test_coordinate_transform_init() -> None: tr = SimpleCoordinateTransform((4, 4), 2.0) assert tr.coord_names == ("x", "y") # array dimensions in reverse order (y = rows, x = cols) assert tr.dims == ("y", "x") assert tr.dim_size == {"x": 4, "y": 4} assert tr.dtype == np.dtype(np.float64) tr2 = SimpleCoordinateTransform((4, 4), 2.0, dtype=np.int64) assert tr2.dtype == np.dtype(np.int64) @pytest.mark.parametrize("dims", [None, ("y", "x")]) def test_coordinate_transform_generate_coords(dims) -> None: tr = SimpleCoordinateTransform((2, 2), 2.0) actual = tr.generate_coords(dims) expected = {"x": [[0.0, 2.0], [0.0, 2.0]], "y": [[0.0, 0.0], [2.0, 2.0]]} assert set(actual) == set(expected) np.testing.assert_array_equal(actual["x"], expected["x"]) np.testing.assert_array_equal(actual["y"], expected["y"]) def create_coords(scale: float, shape: tuple[int, int]) -> xr.Coordinates: """Create x/y Xarray coordinate variables from a simple coordinate transform.""" tr = SimpleCoordinateTransform(shape, scale) index = CoordinateTransformIndex(tr) return xr.Coordinates.from_xindex(index) def test_coordinate_transform_variable() -> None: coords = create_coords(scale=2.0, shape=(2, 2)) assert coords["x"].dtype == np.dtype(np.float64) assert coords["y"].dtype == np.dtype(np.float64) assert coords["x"].shape == (2, 2) assert coords["y"].shape == (2, 2) np.testing.assert_array_equal(np.array(coords["x"]), [[0.0, 2.0], [0.0, 2.0]]) np.testing.assert_array_equal(np.array(coords["y"]), [[0.0, 0.0], [2.0, 2.0]]) def assert_repr(var: xr.Variable): assert ( repr(var._data) == "CoordinateTransformIndexingAdapter(transform=Scale(2.0))" ) assert_repr(coords["x"].variable) assert_repr(coords["y"].variable) def test_coordinate_transform_variable_repr_inline() -> None: var = create_coords(scale=2.0, shape=(2, 2))["x"].variable actual = var._data._repr_inline_(70) # type: ignore[union-attr] assert actual == "0.0 2.0 0.0 2.0" # truncated inline repr var2 = create_coords(scale=2.0, shape=(10, 10))["x"].variable actual2 = var2._data._repr_inline_(70) # type: ignore[union-attr] assert ( actual2 == "0.0 2.0 4.0 6.0 8.0 10.0 12.0 ... 6.0 8.0 10.0 12.0 14.0 16.0 18.0" ) def test_coordinate_transform_variable_repr() -> None: var = create_coords(scale=2.0, shape=(2, 2))["x"].variable actual = repr(var) expected = """ Size: 32B [4 values with dtype=float64] """.strip() assert actual == expected def test_coordinate_transform_variable_basic_outer_indexing() -> None: var = create_coords(scale=2.0, shape=(4, 4))["x"].variable assert var[0, 0] == 0.0 assert var[0, 1] == 2.0 assert var[0, -1] == 6.0 np.testing.assert_array_equal(var[:, 0:2], [[0.0, 2.0]] * 4) with pytest.raises(IndexError, match="out of bounds index"): var[5] with pytest.raises(IndexError, match="out of bounds index"): var[-5] def test_coordinate_transform_variable_vectorized_indexing() -> None: var = create_coords(scale=2.0, shape=(4, 4))["x"].variable actual = var[{"x": xr.Variable("z", [0]), "y": xr.Variable("z", [0])}] expected = xr.Variable("z", [0.0]) assert_equal(actual, expected) with pytest.raises(IndexError, match="out of bounds index"): var[{"x": xr.Variable("z", [5]), "y": xr.Variable("z", [5])}] def test_coordinate_transform_setitem_error() -> None: var = create_coords(scale=2.0, shape=(4, 4))["x"].variable # basic indexing with pytest.raises(TypeError, match="setting values is not supported"): var[0, 0] = 1.0 # outer indexing with pytest.raises(TypeError, match="setting values is not supported"): var[[0, 2], 0] = [1.0, 2.0] # vectorized indexing with pytest.raises(TypeError, match="setting values is not supported"): var[{"x": xr.Variable("z", [0]), "y": xr.Variable("z", [0])}] = 1.0 def test_coordinate_transform_transpose() -> None: coords = create_coords(scale=2.0, shape=(2, 2)) actual = coords["x"].transpose().values expected = [[0.0, 0.0], [2.0, 2.0]] np.testing.assert_array_equal(actual, expected) def test_coordinate_transform_equals() -> None: ds1 = create_coords(scale=2.0, shape=(2, 2)).to_dataset() ds2 = create_coords(scale=2.0, shape=(2, 2)).to_dataset() ds3 = create_coords(scale=4.0, shape=(2, 2)).to_dataset() # cannot use `assert_equal()` test utility function here yet # (indexes invariant check are still based on IndexVariable, which # doesn't work with coordinate transform index coordinate variables) assert ds1.equals(ds2) assert not ds1.equals(ds3) def test_coordinate_transform_sel() -> None: ds = create_coords(scale=2.0, shape=(4, 4)).to_dataset() data = [ [0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0], [8.0, 9.0, 10.0, 11.0], [12.0, 13.0, 14.0, 15.0], ] ds["data"] = (("y", "x"), data) actual = ds.sel( x=xr.Variable("z", [0.5, 5.5]), y=xr.Variable("z", [0.0, 0.5]), method="nearest" ) expected = ds.isel(x=xr.Variable("z", [0, 3]), y=xr.Variable("z", [0, 0])) # cannot use `assert_equal()` test utility function here yet # (indexes invariant check are still based on IndexVariable, which # doesn't work with coordinate transform index coordinate variables) assert actual.equals(expected) with pytest.raises(ValueError, match=".*only supports selection.*nearest"): ds.sel(x=xr.Variable("z", [0.5, 5.5]), y=xr.Variable("z", [0.0, 0.5])) with pytest.raises(ValueError, match="missing labels for coordinate.*y"): ds.sel(x=[0.5, 5.5], method="nearest") with pytest.raises(TypeError, match=".*only supports advanced.*indexing"): ds.sel(x=[0.5, 5.5], y=[0.0, 0.5], method="nearest") with pytest.raises(ValueError, match=".*only supports advanced.*indexing"): ds.sel( x=xr.Variable("z", [0.5, 5.5]), y=xr.Variable("z", [0.0, 0.5, 1.5]), method="nearest", ) def test_coordinate_transform_rename() -> None: ds = xr.Dataset(coords=create_coords(scale=2.0, shape=(2, 2))) roundtripped = ds.rename(x="u", y="v").rename(u="x", v="y") assert_identical(ds, roundtripped, check_default_indexes=False) xarray-2025.09.0/xarray/tests/test_coordinates.py000066400000000000000000000247621505620616400217520ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Mapping import numpy as np import pandas as pd import pytest from xarray.core.coordinates import Coordinates from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.indexes import Index, PandasIndex, PandasMultiIndex from xarray.core.variable import IndexVariable, Variable from xarray.structure.alignment import align from xarray.tests import assert_identical, source_ndarray class TestCoordinates: def test_init_noindex(self) -> None: coords = Coordinates(coords={"foo": ("x", [0, 1, 2])}) expected = Dataset(coords={"foo": ("x", [0, 1, 2])}) assert_identical(coords.to_dataset(), expected) def test_init_default_index(self) -> None: coords = Coordinates(coords={"x": [1, 2]}) expected = Dataset(coords={"x": [1, 2]}) assert_identical(coords.to_dataset(), expected) assert "x" in coords.xindexes @pytest.mark.filterwarnings("error:IndexVariable") def test_init_no_default_index(self) -> None: # dimension coordinate with no default index (explicit) coords = Coordinates(coords={"x": [1, 2]}, indexes={}) assert "x" not in coords.xindexes assert not isinstance(coords["x"], IndexVariable) def test_init_from_coords(self) -> None: expected = Dataset(coords={"foo": ("x", [0, 1, 2])}) coords = Coordinates(coords=expected.coords) assert_identical(coords.to_dataset(), expected) # test variables copied assert coords.variables["foo"] is not expected.variables["foo"] # test indexes are extracted expected = Dataset(coords={"x": [0, 1, 2]}) coords = Coordinates(coords=expected.coords) assert_identical(coords.to_dataset(), expected) assert expected.xindexes == coords.xindexes # coords + indexes not supported with pytest.raises( ValueError, match="passing both.*Coordinates.*indexes.*not allowed" ): coords = Coordinates( coords=expected.coords, indexes={"x": PandasIndex([0, 1, 2], "x")} ) def test_init_empty(self) -> None: coords = Coordinates() assert len(coords) == 0 def test_init_index_error(self) -> None: idx = PandasIndex([1, 2, 3], "x") with pytest.raises(ValueError, match="no coordinate variables found"): Coordinates(indexes={"x": idx}) with pytest.raises(TypeError, match=".* is not an `xarray.indexes.Index`"): Coordinates( coords={"x": ("x", [1, 2, 3])}, indexes={"x": "not_an_xarray_index"}, # type: ignore[dict-item] ) def test_init_dim_sizes_conflict(self) -> None: with pytest.raises(ValueError): Coordinates(coords={"foo": ("x", [1, 2]), "bar": ("x", [1, 2, 3, 4])}) def test_from_xindex(self) -> None: idx = PandasIndex([1, 2, 3], "x") coords = Coordinates.from_xindex(idx) assert isinstance(coords.xindexes["x"], PandasIndex) assert coords.xindexes["x"].equals(idx) expected = PandasIndex(idx, "x").create_variables() assert list(coords.variables) == list(expected) assert_identical(expected["x"], coords.variables["x"]) def test_from_xindex_error(self) -> None: class CustomIndexNoCoordsGenerated(Index): def create_variables(self, variables: Mapping | None = None): return {} idx = CustomIndexNoCoordsGenerated() with pytest.raises(ValueError, match=".*index.*did not create any coordinate"): Coordinates.from_xindex(idx) def test_from_pandas_multiindex(self) -> None: midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two")) coords = Coordinates.from_pandas_multiindex(midx, "x") assert isinstance(coords.xindexes["x"], PandasMultiIndex) assert coords.xindexes["x"].index.equals(midx) assert coords.xindexes["x"].dim == "x" expected = PandasMultiIndex(midx, "x").create_variables() assert list(coords.variables) == list(expected) for name in ("x", "one", "two"): assert_identical(expected[name], coords.variables[name]) @pytest.mark.filterwarnings("ignore:return type") def test_dims(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) assert set(coords.dims) == {"x"} def test_sizes(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) assert coords.sizes == {"x": 3} def test_dtypes(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) assert coords.dtypes == {"x": int} def test_getitem(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) assert_identical( coords["x"], DataArray([0, 1, 2], coords={"x": [0, 1, 2]}, name="x"), ) def test_delitem(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) del coords["x"] assert "x" not in coords with pytest.raises( KeyError, match="'nonexistent' is not in coordinate variables" ): del coords["nonexistent"] def test_update(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) coords.update({"y": ("y", [4, 5, 6])}) assert "y" in coords assert "y" in coords.xindexes expected = DataArray([4, 5, 6], coords={"y": [4, 5, 6]}, name="y") assert_identical(coords["y"], expected) def test_equals(self): coords = Coordinates(coords={"x": [0, 1, 2]}) assert coords.equals(coords) assert not coords.equals("not_a_coords") def test_identical(self): coords = Coordinates(coords={"x": [0, 1, 2]}) assert coords.identical(coords) assert not coords.identical("not_a_coords") def test_assign(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) expected = Coordinates(coords={"x": [0, 1, 2], "y": [3, 4]}) actual = coords.assign(y=[3, 4]) assert_identical(actual, expected) actual = coords.assign({"y": [3, 4]}) assert_identical(actual, expected) def test_copy(self) -> None: no_index_coords = Coordinates({"foo": ("x", [1, 2, 3])}) copied = no_index_coords.copy() assert_identical(no_index_coords, copied) v0 = no_index_coords.variables["foo"] v1 = copied.variables["foo"] assert v0 is not v1 assert source_ndarray(v0.data) is source_ndarray(v1.data) deep_copied = no_index_coords.copy(deep=True) assert_identical(no_index_coords.to_dataset(), deep_copied.to_dataset()) v0 = no_index_coords.variables["foo"] v1 = deep_copied.variables["foo"] assert v0 is not v1 assert source_ndarray(v0.data) is not source_ndarray(v1.data) def test_align(self) -> None: coords = Coordinates(coords={"x": [0, 1, 2]}) left = coords # test Coordinates._reindex_callback right = coords.to_dataset().isel(x=[0, 1]).coords left2, right2 = align(left, right, join="inner") assert_identical(left2, right2) # test Coordinates._overwrite_indexes right.update({"x": ("x", [4, 5, 6])}) left2, right2 = align(left, right, join="override") assert_identical(left2, left) assert_identical(left2, right2) def test_dataset_from_coords_with_multidim_var_same_name(self): # regression test for GH #8883 var = Variable(data=np.arange(6).reshape(2, 3), dims=["x", "y"]) coords = Coordinates(coords={"x": var}, indexes={}) ds = Dataset(coords=coords) assert ds.coords["x"].dims == ("x", "y") def test_drop_vars(self): coords = Coordinates( coords={ "x": Variable("x", range(3)), "y": Variable("y", list("ab")), "a": Variable(["x", "y"], np.arange(6).reshape(3, 2)), }, indexes={}, ) actual = coords.drop_vars("x") assert isinstance(actual, Coordinates) assert set(actual.variables) == {"a", "y"} actual = coords.drop_vars(["x", "y"]) assert isinstance(actual, Coordinates) assert set(actual.variables) == {"a"} def test_drop_dims(self) -> None: coords = Coordinates( coords={ "x": Variable("x", range(3)), "y": Variable("y", list("ab")), "a": Variable(["x", "y"], np.arange(6).reshape(3, 2)), }, indexes={}, ) actual = coords.drop_dims("x") assert isinstance(actual, Coordinates) assert set(actual.variables) == {"y"} actual = coords.drop_dims(["x", "y"]) assert isinstance(actual, Coordinates) assert set(actual.variables) == set() def test_rename_dims(self) -> None: coords = Coordinates( coords={ "x": Variable("x", range(3)), "y": Variable("y", list("ab")), "a": Variable(["x", "y"], np.arange(6).reshape(3, 2)), }, indexes={}, ) actual = coords.rename_dims({"x": "X"}) assert isinstance(actual, Coordinates) assert set(actual.dims) == {"X", "y"} assert set(actual.variables) == {"a", "x", "y"} actual = coords.rename_dims({"x": "u", "y": "v"}) assert isinstance(actual, Coordinates) assert set(actual.dims) == {"u", "v"} assert set(actual.variables) == {"a", "x", "y"} def test_rename_vars(self) -> None: coords = Coordinates( coords={ "x": Variable("x", range(3)), "y": Variable("y", list("ab")), "a": Variable(["x", "y"], np.arange(6).reshape(3, 2)), }, indexes={}, ) actual = coords.rename_vars({"x": "X"}) assert isinstance(actual, Coordinates) assert set(actual.dims) == {"x", "y"} assert set(actual.variables) == {"a", "X", "y"} actual = coords.rename_vars({"x": "u", "y": "v"}) assert isinstance(actual, Coordinates) assert set(actual.dims) == {"x", "y"} assert set(actual.variables) == {"a", "u", "v"} def test_operator_merge(self) -> None: coords1 = Coordinates({"x": ("x", [0, 1, 2])}) coords2 = Coordinates({"y": ("y", [3, 4, 5])}) expected = Dataset(coords={"x": [0, 1, 2], "y": [3, 4, 5]}) actual = coords1 | coords2 assert_identical(Dataset(coords=actual), expected) xarray-2025.09.0/xarray/tests/test_cupy.py000066400000000000000000000032211505620616400204030ustar00rootroot00000000000000from __future__ import annotations import numpy as np import pandas as pd import pytest import xarray as xr cp = pytest.importorskip("cupy") @pytest.fixture def toy_weather_data(): """Construct the example DataSet from the Toy weather data example. https://docs.xarray.dev/en/stable/examples/weather-data.html Here we construct the DataSet exactly as shown in the example and then convert the numpy arrays to cupy. """ np.random.seed(123) times = pd.date_range("2000-01-01", "2001-12-31", name="time") annual_cycle = np.sin(2 * np.pi * (times.dayofyear.values / 365.25 - 0.28)) base = 10 + 15 * annual_cycle.reshape(-1, 1) tmin_values = base + 3 * np.random.randn(annual_cycle.size, 3) tmax_values = base + 10 + 3 * np.random.randn(annual_cycle.size, 3) ds = xr.Dataset( { "tmin": (("time", "location"), tmin_values), "tmax": (("time", "location"), tmax_values), }, {"time": times, "location": ["IA", "IN", "IL"]}, ) ds.tmax.data = cp.asarray(ds.tmax.data) ds.tmin.data = cp.asarray(ds.tmin.data) return ds def test_cupy_import() -> None: """Check the import worked.""" assert cp def test_check_data_stays_on_gpu(toy_weather_data) -> None: """Perform some operations and check the data stays on the GPU.""" freeze = (toy_weather_data["tmin"] <= 0).groupby("time.month").mean("time") assert isinstance(freeze.data, cp.ndarray) def test_where() -> None: from xarray.core.duck_array_ops import where data = cp.zeros(10) output = where(data < 1, 1, data).all() assert output assert isinstance(output, cp.ndarray) xarray-2025.09.0/xarray/tests/test_dask.py000066400000000000000000001770601505620616400203620ustar00rootroot00000000000000from __future__ import annotations import operator import pickle import sys from contextlib import suppress from textwrap import dedent import numpy as np import pandas as pd import pytest import xarray as xr import xarray.ufuncs as xu from xarray import DataArray, Dataset, Variable from xarray.core import duck_array_ops from xarray.core.duck_array_ops import lazy_array_equiv from xarray.core.indexes import PandasIndex from xarray.testing import assert_chunks_equal from xarray.tests import ( assert_allclose, assert_array_equal, assert_equal, assert_frame_equal, assert_identical, mock, raise_if_dask_computes, requires_pint, requires_scipy_or_netCDF4, ) from xarray.tests.test_backends import create_tmp_file dask = pytest.importorskip("dask") da = pytest.importorskip("dask.array") dd = pytest.importorskip("dask.dataframe") ON_WINDOWS = sys.platform == "win32" def test_raise_if_dask_computes(): data = da.from_array(np.random.default_rng(0).random((4, 6)), chunks=(2, 2)) with pytest.raises(RuntimeError, match=r"Too many computes"): with raise_if_dask_computes(): data.compute() class DaskTestCase: def assertLazyAnd(self, expected, actual, test): with dask.config.set(scheduler="synchronous"): test(actual, expected) if isinstance(actual, Dataset): for k, v in actual.variables.items(): if k in actual.xindexes: assert isinstance(v.data, np.ndarray) else: assert isinstance(v.data, da.Array) elif isinstance(actual, DataArray): assert isinstance(actual.data, da.Array) for k, v in actual.coords.items(): if k in actual.xindexes: assert isinstance(v.data, np.ndarray) else: assert isinstance(v.data, da.Array) elif isinstance(actual, Variable): assert isinstance(actual.data, da.Array) else: raise AssertionError() class TestVariable(DaskTestCase): def assertLazyAndIdentical(self, expected, actual): self.assertLazyAnd(expected, actual, assert_identical) def assertLazyAndAllClose(self, expected, actual): self.assertLazyAnd(expected, actual, assert_allclose) @pytest.fixture(autouse=True) def setUp(self): self.values = np.random.default_rng(0).random((4, 6)) self.data = da.from_array(self.values, chunks=(2, 2)) self.eager_var = Variable(("x", "y"), self.values) self.lazy_var = Variable(("x", "y"), self.data) def test_basics(self): v = self.lazy_var assert self.data is v.data assert self.data.chunks == v.chunks assert_array_equal(self.values, v) def test_copy(self): self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy()) self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy(deep=True)) def test_chunk(self): for chunks, expected in [ ({}, ((2, 2), (2, 2, 2))), (3, ((3, 1), (3, 3))), ({"x": 3, "y": 3}, ((3, 1), (3, 3))), ({"x": 3}, ((3, 1), (2, 2, 2))), ({"x": (3, 1)}, ((3, 1), (2, 2, 2))), ]: rechunked = self.lazy_var.chunk(chunks) assert rechunked.chunks == expected self.assertLazyAndIdentical(self.eager_var, rechunked) expected_chunksizes = dict(zip(self.lazy_var.dims, expected, strict=True)) assert rechunked.chunksizes == expected_chunksizes def test_indexing(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u[0], v[0]) self.assertLazyAndIdentical(u[:1], v[:1]) self.assertLazyAndIdentical(u[[0, 1], [0, 1, 2]], v[[0, 1], [0, 1, 2]]) @pytest.mark.parametrize( "expected_data, index", [ (da.array([99, 2, 3, 4]), 0), (da.array([99, 99, 99, 4]), slice(2, None, -1)), (da.array([99, 99, 3, 99]), [0, -1, 1]), (da.array([99, 99, 99, 4]), np.arange(3)), (da.array([1, 99, 99, 99]), [False, True, True, True]), (da.array([1, 99, 99, 99]), np.array([False, True, True, True])), (da.array([99, 99, 99, 99]), Variable(("x"), np.array([True] * 4))), ], ) def test_setitem_dask_array(self, expected_data, index): arr = Variable(("x"), da.array([1, 2, 3, 4])) expected = Variable(("x"), expected_data) with raise_if_dask_computes(): arr[index] = 99 assert_identical(arr, expected) def test_squeeze(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u[0].squeeze(), v[0].squeeze()) def test_equals(self): v = self.lazy_var assert v.equals(v) assert isinstance(v.data, da.Array) assert v.identical(v) assert isinstance(v.data, da.Array) def test_transpose(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u.T, v.T) def test_shift(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u.shift(x=2), v.shift(x=2)) self.assertLazyAndIdentical(u.shift(x=-2), v.shift(x=-2)) assert v.data.chunks == v.shift(x=1).data.chunks def test_roll(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u.roll(x=2), v.roll(x=2)) assert v.data.chunks == v.roll(x=1).data.chunks def test_unary_op(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(-u, -v) self.assertLazyAndIdentical(abs(u), abs(v)) self.assertLazyAndIdentical(u.round(), v.round()) def test_binary_op(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(2 * u, 2 * v) self.assertLazyAndIdentical(u + u, v + v) self.assertLazyAndIdentical(u[0] + u, v[0] + v) def test_binary_op_bitshift(self) -> None: # bit shifts only work on ints so we need to generate # new eager and lazy vars rng = np.random.default_rng(0) values = rng.integers(low=-10000, high=10000, size=(4, 6)) data = da.from_array(values, chunks=(2, 2)) u = Variable(("x", "y"), values) v = Variable(("x", "y"), data) self.assertLazyAndIdentical(u << 2, v << 2) self.assertLazyAndIdentical(u << 5, v << 5) self.assertLazyAndIdentical(u >> 2, v >> 2) self.assertLazyAndIdentical(u >> 5, v >> 5) def test_repr(self): expected = dedent( f"""\ Size: 192B {self.lazy_var.data!r}""" ) assert expected == repr(self.lazy_var) def test_pickle(self): # Test that pickling/unpickling does not convert the dask # backend to numpy a1 = Variable(["x"], build_dask_array("x")) a1.compute() assert not a1._in_memory assert kernel_call_count == 1 a2 = pickle.loads(pickle.dumps(a1)) assert kernel_call_count == 1 assert_identical(a1, a2) assert not a1._in_memory assert not a2._in_memory def test_reduce(self): u = self.eager_var v = self.lazy_var self.assertLazyAndAllClose(u.mean(), v.mean()) self.assertLazyAndAllClose(u.std(), v.std()) with raise_if_dask_computes(): actual = v.argmax(dim="x") self.assertLazyAndAllClose(u.argmax(dim="x"), actual) with raise_if_dask_computes(): actual = v.argmin(dim="x") self.assertLazyAndAllClose(u.argmin(dim="x"), actual) self.assertLazyAndAllClose((u > 1).any(), (v > 1).any()) self.assertLazyAndAllClose((u < 1).all("x"), (v < 1).all("x")) with pytest.raises(NotImplementedError, match=r"only works along an axis"): v.median() with pytest.raises(NotImplementedError, match=r"only works along an axis"): v.median(v.dims) with raise_if_dask_computes(): v.reduce(duck_array_ops.mean) def test_missing_values(self): values = np.array([0, 1, np.nan, 3]) data = da.from_array(values, chunks=(2,)) eager_var = Variable("x", values) lazy_var = Variable("x", data) self.assertLazyAndIdentical(eager_var, lazy_var.fillna(lazy_var)) self.assertLazyAndIdentical(Variable("x", range(4)), lazy_var.fillna(2)) self.assertLazyAndIdentical(eager_var.count(), lazy_var.count()) def test_concat(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u, Variable.concat([v[:2], v[2:]], "x")) self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], v[1]], "x")) self.assertLazyAndIdentical(u[:2], Variable.concat([u[0], v[1]], "x")) self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], u[1]], "x")) self.assertLazyAndIdentical( u[:3], Variable.concat([v[[0, 2]], v[[1]]], "x", positions=[[0, 2], [1]]) ) def test_missing_methods(self): v = self.lazy_var with pytest.raises(NotImplementedError, match="dask"): v.argsort() with pytest.raises(NotImplementedError, match="dask"): v[0].item() def test_univariate_ufunc(self): u = self.eager_var v = self.lazy_var self.assertLazyAndAllClose(np.sin(u), np.sin(v)) def test_bivariate_ufunc(self): u = self.eager_var v = self.lazy_var self.assertLazyAndAllClose(np.maximum(u, 0), np.maximum(v, 0)) self.assertLazyAndAllClose(np.maximum(u, 0), np.maximum(0, v)) def test_univariate_xufunc(self): u = self.eager_var v = self.lazy_var self.assertLazyAndAllClose(np.sin(u), xu.sin(v)) def test_bivariate_xufunc(self): u = self.eager_var v = self.lazy_var self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(v, 0)) self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(0, v)) def test_compute(self): u = self.eager_var v = self.lazy_var assert dask.is_dask_collection(v) (v2,) = dask.compute(v + 1) assert not dask.is_dask_collection(v2) assert ((u + 1).data == v2.data).all() def test_persist(self): u = self.eager_var v = self.lazy_var + 1 (v2,) = dask.persist(v) assert v is not v2 assert len(v2.__dask_graph__()) < len(v.__dask_graph__()) assert v2.__dask_keys__() == v.__dask_keys__() assert dask.is_dask_collection(v) assert dask.is_dask_collection(v2) self.assertLazyAndAllClose(u + 1, v) self.assertLazyAndAllClose(u + 1, v2) @requires_pint def test_tokenize_duck_dask_array(self): import pint unit_registry = pint.UnitRegistry() q = unit_registry.Quantity(self.data, "meter") variable = xr.Variable(("x", "y"), q) token = dask.base.tokenize(variable) post_op = variable + 5 * unit_registry.meter assert dask.base.tokenize(variable) != dask.base.tokenize(post_op) # Immutability check assert dask.base.tokenize(variable) == token class TestDataArrayAndDataset(DaskTestCase): def assertLazyAndIdentical(self, expected, actual): self.assertLazyAnd(expected, actual, assert_identical) def assertLazyAndAllClose(self, expected, actual): self.assertLazyAnd(expected, actual, assert_allclose) def assertLazyAndEqual(self, expected, actual): self.assertLazyAnd(expected, actual, assert_equal) @pytest.fixture(autouse=True) def setUp(self): self.values = np.random.randn(4, 6) self.data = da.from_array(self.values, chunks=(2, 2)) self.eager_array = DataArray( self.values, coords={"x": range(4)}, dims=("x", "y"), name="foo" ) self.lazy_array = DataArray( self.data, coords={"x": range(4)}, dims=("x", "y"), name="foo" ) def test_chunk(self) -> None: for chunks, expected in [ ({}, ((2, 2), (2, 2, 2))), (3, ((3, 1), (3, 3))), ({"x": 3, "y": 3}, ((3, 1), (3, 3))), ({"x": 3}, ((3, 1), (2, 2, 2))), ({"x": (3, 1)}, ((3, 1), (2, 2, 2))), ({"x": "16B"}, ((1, 1, 1, 1), (2, 2, 2))), ("16B", ((1, 1, 1, 1), (1,) * 6)), ("16MB", ((4,), (6,))), ]: # Test DataArray rechunked = self.lazy_array.chunk(chunks) assert rechunked.chunks == expected self.assertLazyAndIdentical(self.eager_array, rechunked) expected_chunksizes = dict(zip(self.lazy_array.dims, expected, strict=True)) assert rechunked.chunksizes == expected_chunksizes # Test Dataset lazy_dataset = self.lazy_array.to_dataset() eager_dataset = self.eager_array.to_dataset() expected_chunksizes = dict(zip(lazy_dataset.dims, expected, strict=True)) rechunked = lazy_dataset.chunk(chunks) # Dataset.chunks has a different return type to DataArray.chunks - see issue #5843 assert rechunked.chunks == expected_chunksizes self.assertLazyAndIdentical(eager_dataset, rechunked) assert rechunked.chunksizes == expected_chunksizes def test_rechunk(self): chunked = self.eager_array.chunk({"x": 2}).chunk({"y": 2}) assert chunked.chunks == ((2,) * 2, (2,) * 3) self.assertLazyAndIdentical(self.lazy_array, chunked) def test_new_chunk(self): chunked = self.eager_array.chunk() assert chunked.data.name.startswith("xarray-") def test_lazy_dataset(self): lazy_ds = Dataset({"foo": (("x", "y"), self.data)}) assert isinstance(lazy_ds.foo.variable.data, da.Array) def test_lazy_array(self): u = self.eager_array v = self.lazy_array self.assertLazyAndAllClose(u, v) self.assertLazyAndAllClose(-u, -v) self.assertLazyAndAllClose(u.T, v.T) self.assertLazyAndAllClose(u.mean(), v.mean()) self.assertLazyAndAllClose(1 + u, 1 + v) actual = xr.concat([v[:2], v[2:]], "x") self.assertLazyAndAllClose(u, actual) def test_compute(self): u = self.eager_array v = self.lazy_array assert dask.is_dask_collection(v) (v2,) = dask.compute(v + 1) assert not dask.is_dask_collection(v2) assert ((u + 1).data == v2.data).all() def test_persist(self): u = self.eager_array v = self.lazy_array + 1 (v2,) = dask.persist(v) assert v is not v2 assert len(v2.__dask_graph__()) < len(v.__dask_graph__()) assert v2.__dask_keys__() == v.__dask_keys__() assert dask.is_dask_collection(v) assert dask.is_dask_collection(v2) self.assertLazyAndAllClose(u + 1, v) self.assertLazyAndAllClose(u + 1, v2) def test_concat_loads_variables(self): # Test that concat() computes not-in-memory variables at most once # and loads them in the output, while leaving the input unaltered. d1 = build_dask_array("d1") c1 = build_dask_array("c1") d2 = build_dask_array("d2") c2 = build_dask_array("c2") d3 = build_dask_array("d3") c3 = build_dask_array("c3") # Note: c is a non-index coord. # Index coords are loaded by IndexVariable.__init__. ds1 = Dataset(data_vars={"d": ("x", d1)}, coords={"c": ("x", c1)}) ds2 = Dataset(data_vars={"d": ("x", d2)}, coords={"c": ("x", c2)}) ds3 = Dataset(data_vars={"d": ("x", d3)}, coords={"c": ("x", c3)}) assert kernel_call_count == 0 out = xr.concat( [ds1, ds2, ds3], dim="n", data_vars="different", coords="different", compat="equals", ) # each kernel is computed exactly once assert kernel_call_count == 6 # variables are loaded in the output assert isinstance(out["d"].data, np.ndarray) assert isinstance(out["c"].data, np.ndarray) out = xr.concat([ds1, ds2, ds3], dim="n", data_vars="all", coords="all") # no extra kernel calls assert kernel_call_count == 6 assert isinstance(out["d"].data, dask.array.Array) assert isinstance(out["c"].data, dask.array.Array) out = xr.concat([ds1, ds2, ds3], dim="n", data_vars=["d"], coords=["c"]) # no extra kernel calls assert kernel_call_count == 6 assert isinstance(out["d"].data, dask.array.Array) assert isinstance(out["c"].data, dask.array.Array) out = xr.concat([ds1, ds2, ds3], dim="n", data_vars=[], coords=[]) # variables are loaded once as we are validating that they're identical assert kernel_call_count == 12 assert isinstance(out["d"].data, np.ndarray) assert isinstance(out["c"].data, np.ndarray) out = xr.concat( [ds1, ds2, ds3], dim="n", data_vars="different", coords="different", compat="identical", ) # compat=identical doesn't do any more kernel calls than compat=equals assert kernel_call_count == 18 assert isinstance(out["d"].data, np.ndarray) assert isinstance(out["c"].data, np.ndarray) # When the test for different turns true halfway through, # stop computing variables as it would not have any benefit ds4 = Dataset(data_vars={"d": ("x", [2.0])}, coords={"c": ("x", [2.0])}) out = xr.concat( [ds1, ds2, ds4, ds3], dim="n", data_vars="different", coords="different", compat="equals", ) # the variables of ds1 and ds2 were computed, but those of ds3 didn't assert kernel_call_count == 22 assert isinstance(out["d"].data, dask.array.Array) assert isinstance(out["c"].data, dask.array.Array) # the data of ds1 and ds2 was loaded into numpy and then # concatenated to the data of ds3. Thus, only ds3 is computed now. out.compute() assert kernel_call_count == 24 # Finally, test that originals are unaltered assert ds1["d"].data is d1 assert ds1["c"].data is c1 assert ds2["d"].data is d2 assert ds2["c"].data is c2 assert ds3["d"].data is d3 assert ds3["c"].data is c3 # now check that concat() is correctly using dask name equality to skip loads out = xr.concat( [ds1, ds1, ds1], dim="n", data_vars="different", coords="different", compat="equals", ) assert kernel_call_count == 24 # variables are not loaded in the output assert isinstance(out["d"].data, dask.array.Array) assert isinstance(out["c"].data, dask.array.Array) out = xr.concat( [ds1, ds1, ds1], dim="n", data_vars=[], coords=[], compat="identical" ) assert kernel_call_count == 24 # variables are not loaded in the output assert isinstance(out["d"].data, dask.array.Array) assert isinstance(out["c"].data, dask.array.Array) out = xr.concat( [ds1, ds2.compute(), ds3], dim="n", data_vars="all", coords="different", compat="identical", ) # c1,c3 must be computed for comparison since c2 is numpy; # d2 is computed too assert kernel_call_count == 28 out = xr.concat( [ds1, ds2.compute(), ds3], dim="n", data_vars="all", coords="all", compat="identical", ) # no extra computes assert kernel_call_count == 30 # Finally, test that originals are unaltered assert ds1["d"].data is d1 assert ds1["c"].data is c1 assert ds2["d"].data is d2 assert ds2["c"].data is c2 assert ds3["d"].data is d3 assert ds3["c"].data is c3 def test_groupby(self): u = self.eager_array v = self.lazy_array expected = u.groupby("x").mean(...) with raise_if_dask_computes(): actual = v.groupby("x").mean(...) self.assertLazyAndAllClose(expected, actual) def test_rolling(self): u = self.eager_array v = self.lazy_array expected = u.rolling(x=2).mean() with raise_if_dask_computes(): actual = v.rolling(x=2).mean() self.assertLazyAndAllClose(expected, actual) @pytest.mark.parametrize("func", ["first", "last"]) def test_groupby_first_last(self, func): method = operator.methodcaller(func) u = self.eager_array v = self.lazy_array for coords in [u.coords, v.coords]: coords["ab"] = ("x", ["a", "a", "b", "b"]) expected = method(u.groupby("ab")) with raise_if_dask_computes(): actual = method(v.groupby("ab")) self.assertLazyAndAllClose(expected, actual) with raise_if_dask_computes(): actual = method(v.groupby("ab")) self.assertLazyAndAllClose(expected, actual) def test_reindex(self): u = self.eager_array.assign_coords(y=range(6)) v = self.lazy_array.assign_coords(y=range(6)) for kwargs in [ {"x": [2, 3, 4]}, {"x": [1, 100, 2, 101, 3]}, {"x": [2.5, 3, 3.5], "y": [2, 2.5, 3]}, ]: expected = u.reindex(**kwargs) actual = v.reindex(**kwargs) self.assertLazyAndAllClose(expected, actual) def test_to_dataset_roundtrip(self): u = self.eager_array v = self.lazy_array expected = u.assign_coords(x=u["x"]) self.assertLazyAndEqual(expected, v.to_dataset("x").to_dataarray("x")) def test_merge(self): def duplicate_and_merge(array): return xr.merge([array, array.rename("bar")]).to_dataarray() expected = duplicate_and_merge(self.eager_array) actual = duplicate_and_merge(self.lazy_array) self.assertLazyAndEqual(expected, actual) def test_ufuncs(self): u = self.eager_array v = self.lazy_array self.assertLazyAndAllClose(np.sin(u), np.sin(v)) def test_where_dispatching(self): a = np.arange(10) b = a > 3 x = da.from_array(a, 5) y = da.from_array(b, 5) expected = DataArray(a).where(b) self.assertLazyAndEqual(expected, DataArray(a).where(y)) self.assertLazyAndEqual(expected, DataArray(x).where(b)) self.assertLazyAndEqual(expected, DataArray(x).where(y)) def test_simultaneous_compute(self): ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk() count = [0] def counting_get(*args, **kwargs): count[0] += 1 return dask.get(*args, **kwargs) ds.load(scheduler=counting_get) assert count[0] == 1 def test_duplicate_dims(self): data = np.random.normal(size=(4, 4)) with pytest.warns(UserWarning, match="Duplicate dimension"): arr = DataArray(data, dims=("x", "x")) with pytest.warns(UserWarning, match="Duplicate dimension"): chunked_array = arr.chunk({"x": 2}) assert chunked_array.chunks == ((2, 2), (2, 2)) assert chunked_array.chunksizes == {"x": (2, 2)} def test_stack(self): data = da.random.normal(size=(2, 3, 4), chunks=(1, 3, 4)) arr = DataArray(data, dims=("w", "x", "y")) stacked = arr.stack(z=("x", "y")) z = pd.MultiIndex.from_product([np.arange(3), np.arange(4)], names=["x", "y"]) expected = DataArray(data.reshape(2, -1), {"z": z}, dims=["w", "z"]) assert stacked.data.chunks == expected.data.chunks self.assertLazyAndEqual(expected, stacked) def test_dot(self): eager = self.eager_array.dot(self.eager_array[0]) lazy = self.lazy_array.dot(self.lazy_array[0]) self.assertLazyAndAllClose(eager, lazy) def test_dataarray_repr(self): data = build_dask_array("data") nonindex_coord = build_dask_array("coord") a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)}) expected = dedent( f"""\ Size: 8B {data!r} Coordinates: y (x) int64 8B dask.array Dimensions without coordinates: x""" ) assert expected == repr(a) assert kernel_call_count == 0 # should not evaluate dask array def test_dataset_repr(self): data = build_dask_array("data") nonindex_coord = build_dask_array("coord") ds = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)}) expected = dedent( """\ Size: 16B Dimensions: (x: 1) Coordinates: y (x) int64 8B dask.array Dimensions without coordinates: x Data variables: a (x) int64 8B dask.array""" ) assert expected == repr(ds) assert kernel_call_count == 0 # should not evaluate dask array def test_dataarray_pickle(self): # Test that pickling/unpickling converts the dask backend # to numpy in neither the data variable nor the non-index coords data = build_dask_array("data") nonindex_coord = build_dask_array("coord") a1 = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)}) a1.compute() assert not a1._in_memory assert not a1.coords["y"]._in_memory assert kernel_call_count == 2 a2 = pickle.loads(pickle.dumps(a1)) assert kernel_call_count == 2 assert_identical(a1, a2) assert not a1._in_memory assert not a2._in_memory assert not a1.coords["y"]._in_memory assert not a2.coords["y"]._in_memory def test_dataset_pickle(self): # Test that pickling/unpickling converts the dask backend # to numpy in neither the data variables nor the non-index coords data = build_dask_array("data") nonindex_coord = build_dask_array("coord") ds1 = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)}) ds1.compute() assert not ds1["a"]._in_memory assert not ds1["y"]._in_memory assert kernel_call_count == 2 ds2 = pickle.loads(pickle.dumps(ds1)) assert kernel_call_count == 2 assert_identical(ds1, ds2) assert not ds1["a"]._in_memory assert not ds2["a"]._in_memory assert not ds1["y"]._in_memory assert not ds2["y"]._in_memory def test_dataarray_getattr(self): # ipython/jupyter does a long list of getattr() calls to when trying to # represent an object. # Make sure we're not accidentally computing dask variables. data = build_dask_array("data") nonindex_coord = build_dask_array("coord") a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)}) with suppress(AttributeError): _ = a.NOTEXIST assert kernel_call_count == 0 def test_dataset_getattr(self): # Test that pickling/unpickling converts the dask backend # to numpy in neither the data variables nor the non-index coords data = build_dask_array("data") nonindex_coord = build_dask_array("coord") ds = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)}) with suppress(AttributeError): _ = ds.NOTEXIST assert kernel_call_count == 0 def test_values(self): # Test that invoking the values property does not convert the dask # backend to numpy a = DataArray([1, 2]).chunk() assert not a._in_memory assert a.values.tolist() == [1, 2] assert not a._in_memory def test_from_dask_variable(self): # Test array creation from Variable with dask backend. # This is used e.g. in broadcast() a = DataArray(self.lazy_array.variable, coords={"x": range(4)}, name="foo") self.assertLazyAndIdentical(self.lazy_array, a) @requires_pint def test_tokenize_duck_dask_array(self): import pint unit_registry = pint.UnitRegistry() q = unit_registry.Quantity(self.data, unit_registry.meter) data_array = xr.DataArray( data=q, coords={"x": range(4)}, dims=("x", "y"), name="foo" ) token = dask.base.tokenize(data_array) post_op = data_array + 5 * unit_registry.meter assert dask.base.tokenize(data_array) != dask.base.tokenize(post_op) # Immutability check assert dask.base.tokenize(data_array) == token class TestToDaskDataFrame: @pytest.mark.xfail(reason="https://github.com/dask/dask/issues/11584") def test_to_dask_dataframe(self): # Test conversion of Datasets to dask DataFrames x = np.random.randn(10) y = np.arange(10, dtype="uint8") t = list("abcdefghij") ds = Dataset( {"a": ("t", da.from_array(x, chunks=4)), "b": ("t", y), "t": ("t", t)} ) expected_pd = pd.DataFrame({"a": x, "b": y}, index=pd.Index(t, name="t")) # test if 1-D index is correctly set up expected = dd.from_pandas(expected_pd, chunksize=4) actual = ds.to_dask_dataframe(set_index=True) # test if we have dask dataframes assert isinstance(actual, dd.DataFrame) # use the .equals from pandas to check dataframes are equivalent assert_frame_equal(actual.compute(), expected.compute()) # test if no index is given expected = dd.from_pandas(expected_pd.reset_index(drop=False), chunksize=4) actual = ds.to_dask_dataframe(set_index=False) assert isinstance(actual, dd.DataFrame) assert_frame_equal(actual.compute(), expected.compute()) @pytest.mark.xfail( reason="Currently pandas with pyarrow installed will return a `string[pyarrow]` type, " "which causes the `y` column to have a different type depending on whether pyarrow is installed" ) def test_to_dask_dataframe_2D(self): # Test if 2-D dataset is supplied w = np.random.randn(2, 3) ds = Dataset({"w": (("x", "y"), da.from_array(w, chunks=(1, 2)))}) ds["x"] = ("x", np.array([0, 1], np.int64)) ds["y"] = ("y", list("abc")) # dask dataframes do not (yet) support multiindex, # but when it does, this would be the expected index: exp_index = pd.MultiIndex.from_arrays( [[0, 0, 0, 1, 1, 1], ["a", "b", "c", "a", "b", "c"]], names=["x", "y"] ) expected = pd.DataFrame({"w": w.reshape(-1)}, index=exp_index) # so for now, reset the index expected = expected.reset_index(drop=False) actual = ds.to_dask_dataframe(set_index=False) assert isinstance(actual, dd.DataFrame) assert_frame_equal(actual.compute(), expected) @pytest.mark.xfail(raises=NotImplementedError) def test_to_dask_dataframe_2D_set_index(self): # This will fail until dask implements MultiIndex support w = da.from_array(np.random.randn(2, 3), chunks=(1, 2)) ds = Dataset({"w": (("x", "y"), w)}) ds["x"] = ("x", np.array([0, 1], np.int64)) ds["y"] = ("y", list("abc")) expected = ds.compute().to_dataframe() actual = ds.to_dask_dataframe(set_index=True) assert isinstance(actual, dd.DataFrame) assert_frame_equal(expected, actual.compute()) def test_to_dask_dataframe_coordinates(self): # Test if coordinate is also a dask array x = np.random.randn(10) t = np.arange(10) * 2 ds = Dataset( { "a": ("t", da.from_array(x, chunks=4)), "t": ("t", da.from_array(t, chunks=4)), } ) expected_pd = pd.DataFrame({"a": x}, index=pd.Index(t, name="t")) expected = dd.from_pandas(expected_pd, chunksize=4) actual = ds.to_dask_dataframe(set_index=True) assert isinstance(actual, dd.DataFrame) assert_frame_equal(expected.compute(), actual.compute()) @pytest.mark.xfail( reason="Currently pandas with pyarrow installed will return a `string[pyarrow]` type, " "which causes the index to have a different type depending on whether pyarrow is installed" ) def test_to_dask_dataframe_not_daskarray(self): # Test if DataArray is not a dask array x = np.random.randn(10) y = np.arange(10, dtype="uint8") t = list("abcdefghij") ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)}) expected = pd.DataFrame({"a": x, "b": y}, index=pd.Index(t, name="t")) actual = ds.to_dask_dataframe(set_index=True) assert isinstance(actual, dd.DataFrame) assert_frame_equal(expected, actual.compute()) def test_to_dask_dataframe_no_coordinate(self): x = da.from_array(np.random.randn(10), chunks=4) ds = Dataset({"x": ("dim_0", x)}) expected = ds.compute().to_dataframe().reset_index() actual = ds.to_dask_dataframe() assert isinstance(actual, dd.DataFrame) assert_frame_equal(expected, actual.compute()) expected = ds.compute().to_dataframe() actual = ds.to_dask_dataframe(set_index=True) assert isinstance(actual, dd.DataFrame) assert_frame_equal(expected, actual.compute()) def test_to_dask_dataframe_dim_order(self): values = np.array([[1, 2], [3, 4]], dtype=np.int64) ds = Dataset({"w": (("x", "y"), values)}).chunk(1) expected = ds["w"].to_series().reset_index() actual = ds.to_dask_dataframe(dim_order=["x", "y"]) assert isinstance(actual, dd.DataFrame) assert_frame_equal(expected, actual.compute()) expected = ds["w"].T.to_series().reset_index() actual = ds.to_dask_dataframe(dim_order=["y", "x"]) assert isinstance(actual, dd.DataFrame) assert_frame_equal(expected, actual.compute()) with pytest.raises(ValueError, match=r"does not match the set of dimensions"): ds.to_dask_dataframe(dim_order=["x"]) @pytest.mark.parametrize("method", ["load", "compute"]) def test_dask_kwargs_variable(method): chunked_array = da.from_array(np.arange(3), chunks=(2,)) x = Variable("y", chunked_array) # args should be passed on to dask.compute() (via DaskManager.compute()) with mock.patch.object(da, "compute", return_value=(np.arange(3),)) as mock_compute: getattr(x, method)(foo="bar") mock_compute.assert_called_with(chunked_array, foo="bar") @pytest.mark.parametrize("method", ["load", "compute", "persist"]) def test_dask_kwargs_dataarray(method): data = da.from_array(np.arange(3), chunks=(2,)) x = DataArray(data) if method in ["load", "compute"]: dask_func = "dask.array.compute" else: dask_func = "dask.persist" # args should be passed on to "dask_func" with mock.patch(dask_func) as mock_func: getattr(x, method)(foo="bar") mock_func.assert_called_with(data, foo="bar") @pytest.mark.parametrize("method", ["load", "compute", "persist"]) def test_dask_kwargs_dataset(method): data = da.from_array(np.arange(3), chunks=(2,)) x = Dataset({"x": (("y"), data)}) if method in ["load", "compute"]: dask_func = "dask.array.compute" else: dask_func = "dask.persist" # args should be passed on to "dask_func" with mock.patch(dask_func) as mock_func: getattr(x, method)(foo="bar") mock_func.assert_called_with(data, foo="bar") kernel_call_count = 0 def kernel(name): """Dask kernel to test pickling/unpickling and __repr__. Must be global to make it pickleable. """ global kernel_call_count kernel_call_count += 1 return np.ones(1, dtype=np.int64) def build_dask_array(name): global kernel_call_count kernel_call_count = 0 return dask.array.Array( dask={(name, 0): (kernel, name)}, name=name, chunks=((1,),), dtype=np.int64 ) @pytest.mark.parametrize( "persist", [lambda x: x.persist(), lambda x: dask.persist(x)[0]] ) def test_persist_Dataset(persist): ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk() ds = ds + 1 n = len(ds.foo.data.dask) ds2 = persist(ds) assert len(ds2.foo.data.dask) == 1 assert len(ds.foo.data.dask) == n # doesn't mutate in place @pytest.mark.parametrize( "persist", [lambda x: x.persist(), lambda x: dask.persist(x)[0]] ) def test_persist_DataArray(persist): x = da.arange(10, chunks=(5,)) y = DataArray(x) z = y + 1 n = len(z.data.dask) zz = persist(z) assert len(z.data.dask) == n assert len(zz.data.dask) == zz.data.npartitions def test_dataarray_with_dask_coords(): import toolz x = xr.Variable("x", da.arange(8, chunks=(4,))) y = xr.Variable("y", da.arange(8, chunks=(4,)) * 2) data = da.random.random((8, 8), chunks=(4, 4)) + 1 array = xr.DataArray(data, dims=["x", "y"]) array.coords["xx"] = x array.coords["yy"] = y assert dict(array.__dask_graph__()) == toolz.merge( data.__dask_graph__(), x.__dask_graph__(), y.__dask_graph__() ) (array2,) = dask.compute(array) assert not dask.is_dask_collection(array2) assert all(isinstance(v._variable.data, np.ndarray) for v in array2.coords.values()) def test_basic_compute(): ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk({"x": 2}) for get in [dask.threaded.get, dask.multiprocessing.get, dask.local.get_sync, None]: with dask.config.set(scheduler=get): ds.compute() ds.foo.compute() ds.foo.variable.compute() def test_dataset_as_delayed(): ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk() assert dask.delayed(ds).compute() == ds.compute() def make_da(): da = xr.DataArray( np.ones((10, 20)), dims=["x", "y"], coords={"x": np.arange(10), "y": np.arange(100, 120)}, name="a", ).chunk({"x": 4, "y": 5}) da.x.attrs["long_name"] = "x" da.attrs["test"] = "test" da.coords["c2"] = 0.5 da.coords["ndcoord"] = da.x * 2 da.coords["cxy"] = (da.x * da.y).chunk({"x": 4, "y": 5}) return da def make_ds(): map_ds = xr.Dataset() map_ds["a"] = make_da() map_ds["b"] = map_ds.a + 50 map_ds["c"] = map_ds.x + 20 map_ds = map_ds.chunk({"x": 4, "y": 5}) map_ds["d"] = ("z", [1, 1, 1, 1]) map_ds["z"] = [0, 1, 2, 3] map_ds["e"] = map_ds.x + map_ds.y map_ds.coords["c1"] = 0.5 map_ds.coords["cx"] = ("x", np.arange(len(map_ds.x))) map_ds.coords["cx"].attrs["test2"] = "test2" map_ds.attrs["test"] = "test" map_ds.coords["xx"] = map_ds["a"] * map_ds.y map_ds.x.attrs["long_name"] = "x" map_ds.y.attrs["long_name"] = "y" return map_ds # fixtures cannot be used in parametrize statements # instead use this workaround # https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly @pytest.fixture def map_da(): return make_da() @pytest.fixture def map_ds(): return make_ds() def test_unify_chunks(map_ds): ds_copy = map_ds.copy() ds_copy["cxy"] = ds_copy.cxy.chunk({"y": 10}) with pytest.raises(ValueError, match=r"inconsistent chunks"): _ = ds_copy.chunks expected_chunks = {"x": (4, 4, 2), "y": (5, 5, 5, 5)} with raise_if_dask_computes(): actual_chunks = ds_copy.unify_chunks().chunks assert actual_chunks == expected_chunks assert_identical(map_ds, ds_copy.unify_chunks()) out_a, out_b = xr.unify_chunks(ds_copy.cxy, ds_copy.drop_vars("cxy")) assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5)) assert out_b.chunks == expected_chunks # Test unordered dims da = ds_copy["cxy"] out_a, out_b = xr.unify_chunks(da.chunk({"x": -1}), da.T.chunk({"y": -1})) assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5)) assert out_b.chunks == ((5, 5, 5, 5), (4, 4, 2)) # Test mismatch with pytest.raises(ValueError, match=r"Dimension 'x' size mismatch: 10 != 2"): xr.unify_chunks(da, da.isel(x=slice(2))) @pytest.mark.parametrize("obj", [make_ds(), make_da()]) @pytest.mark.parametrize( "transform", [lambda x: x.compute(), lambda x: x.unify_chunks()] ) def test_unify_chunks_shallow_copy(obj, transform): obj = transform(obj) unified = obj.unify_chunks() assert_identical(obj, unified) # assert obj is not unified @pytest.mark.parametrize("obj", [make_da()]) def test_auto_chunk_da(obj): actual = obj.chunk("auto").data expected = obj.data.rechunk("auto") np.testing.assert_array_equal(actual, expected) assert actual.chunks == expected.chunks def test_map_blocks_error(map_da, map_ds): def bad_func(darray): return (darray * darray.x + 5 * darray.y)[:1, :1] with pytest.raises(ValueError, match=r"Received dimension 'x' of length 1"): xr.map_blocks(bad_func, map_da).compute() def returns_numpy(darray): return (darray * darray.x + 5 * darray.y).values with pytest.raises(TypeError, match=r"Function must return an xarray DataArray"): xr.map_blocks(returns_numpy, map_da) with pytest.raises(TypeError, match=r"args must be"): xr.map_blocks(operator.add, map_da, args=10) with pytest.raises(TypeError, match=r"kwargs must be"): xr.map_blocks(operator.add, map_da, args=[10], kwargs=[20]) def really_bad_func(darray): raise ValueError("couldn't do anything.") with pytest.raises(Exception, match=r"Cannot infer"): xr.map_blocks(really_bad_func, map_da) ds_copy = map_ds.copy() ds_copy["cxy"] = ds_copy.cxy.chunk({"y": 10}) with pytest.raises(ValueError, match=r"inconsistent chunks"): xr.map_blocks(bad_func, ds_copy) with pytest.raises(TypeError, match=r"Cannot pass dask collections"): xr.map_blocks(bad_func, map_da, kwargs=dict(a=map_da.chunk())) @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks(obj): def func(obj): result = obj + obj.x + 5 * obj.y return result with raise_if_dask_computes(): actual = xr.map_blocks(func, obj) expected = func(obj) assert_chunks_equal(expected.chunk(), actual) assert_identical(actual, expected) @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks_mixed_type_inputs(obj): def func(obj1, non_xarray_input, obj2): result = obj1 + obj1.x + 5 * obj1.y return result with raise_if_dask_computes(): actual = xr.map_blocks(func, obj, args=["non_xarray_input", obj]) expected = func(obj, "non_xarray_input", obj) assert_chunks_equal(expected.chunk(), actual) assert_identical(actual, expected) @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks_convert_args_to_list(obj): expected = obj + 10 with raise_if_dask_computes(): actual = xr.map_blocks(operator.add, obj, [10]) assert_chunks_equal(expected.chunk(), actual) assert_identical(actual, expected) def test_map_blocks_dask_args(): da1 = xr.DataArray( np.ones((10, 20)), dims=["x", "y"], coords={"x": np.arange(10), "y": np.arange(20)}, ).chunk({"x": 5, "y": 4}) # check that block shapes are the same def sumda(da1, da2): assert da1.shape == da2.shape return da1 + da2 da2 = da1 + 1 with raise_if_dask_computes(): mapped = xr.map_blocks(sumda, da1, args=[da2]) xr.testing.assert_equal(da1 + da2, mapped) # one dimension in common da2 = (da1 + 1).isel(x=1, drop=True) with raise_if_dask_computes(): mapped = xr.map_blocks(operator.add, da1, args=[da2]) xr.testing.assert_equal(da1 + da2, mapped) # test that everything works when dimension names are different da2 = (da1 + 1).isel(x=1, drop=True).rename({"y": "k"}) with raise_if_dask_computes(): mapped = xr.map_blocks(operator.add, da1, args=[da2]) xr.testing.assert_equal(da1 + da2, mapped) with pytest.raises(ValueError, match=r"Chunk sizes along dimension 'x'"): xr.map_blocks(operator.add, da1, args=[da1.chunk({"x": 1})]) with pytest.raises(ValueError, match=r"cannot align.*index.*are not equal"): xr.map_blocks(operator.add, da1, args=[da1.reindex(x=np.arange(20))]) # reduction da1 = da1.chunk({"x": -1}) da2 = da1 + 1 with raise_if_dask_computes(): mapped = xr.map_blocks(lambda a, b: (a + b).sum("x"), da1, args=[da2]) xr.testing.assert_equal((da1 + da2).sum("x"), mapped) # reduction with template da1 = da1.chunk({"x": -1}) da2 = da1 + 1 with raise_if_dask_computes(): mapped = xr.map_blocks( lambda a, b: (a + b).sum("x"), da1, args=[da2], template=da1.sum("x") ) xr.testing.assert_equal((da1 + da2).sum("x"), mapped) # bad template: not chunked with pytest.raises(ValueError, match="Provided template has no dask arrays"): xr.map_blocks( lambda a, b: (a + b).sum("x"), da1, args=[da2], template=da1.sum("x").compute(), ) @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks_add_attrs(obj): def add_attrs(obj): obj = obj.copy(deep=True) obj.attrs["new"] = "new" obj.cxy.attrs["new2"] = "new2" return obj expected = add_attrs(obj) with raise_if_dask_computes(): actual = xr.map_blocks(add_attrs, obj) assert_identical(actual, expected) # when template is specified, attrs are copied from template, not set by function with raise_if_dask_computes(): actual = xr.map_blocks(add_attrs, obj, template=obj) assert_identical(actual, obj) def test_map_blocks_change_name(map_da): def change_name(obj): obj = obj.copy(deep=True) obj.name = "new" return obj expected = change_name(map_da) with raise_if_dask_computes(): actual = xr.map_blocks(change_name, map_da) assert_identical(actual, expected) @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks_kwargs(obj): expected = xr.full_like(obj, fill_value=np.nan) with raise_if_dask_computes(): actual = xr.map_blocks(xr.full_like, obj, kwargs=dict(fill_value=np.nan)) assert_chunks_equal(expected.chunk(), actual) assert_identical(actual, expected) def test_map_blocks_to_dataarray(map_ds): with raise_if_dask_computes(): actual = xr.map_blocks(lambda x: x.to_dataarray(), map_ds) # to_dataarray does not preserve name, so cannot use assert_identical assert_equal(actual, map_ds.to_dataarray()) @pytest.mark.parametrize( "func", [ lambda x: x, lambda x: x.to_dataset(), lambda x: x.drop_vars("x"), lambda x: x.expand_dims(k=[1, 2, 3]), lambda x: x.expand_dims(k=3), lambda x: x.assign_coords(new_coord=("y", x.y.data * 2)), lambda x: x.astype(np.int32), lambda x: x.x, ], ) def test_map_blocks_da_transformations(func, map_da): with raise_if_dask_computes(): actual = xr.map_blocks(func, map_da) assert_identical(actual, func(map_da)) @pytest.mark.parametrize( "func", [ lambda x: x, lambda x: x.drop_vars("cxy"), lambda x: x.drop_vars("a"), lambda x: x.drop_vars("x"), lambda x: x.expand_dims(k=[1, 2, 3]), lambda x: x.expand_dims(k=3), lambda x: x.rename({"a": "new1", "b": "new2"}), lambda x: x.x, ], ) def test_map_blocks_ds_transformations(func, map_ds): with raise_if_dask_computes(): actual = xr.map_blocks(func, map_ds) assert_identical(actual, func(map_ds)) @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks_da_ds_with_template(obj): func = lambda x: x.isel(x=[1]) # a simple .isel(x=[1, 5, 9]) puts all those in a single chunk. template = xr.concat([obj.isel(x=[i]) for i in [1, 5, 9]], data_vars=None, dim="x") with raise_if_dask_computes(): actual = xr.map_blocks(func, obj, template=template) assert_identical(actual, template) # Check that indexes are written into the graph directly dsk = dict(actual.__dask_graph__()) assert {k for k in dsk if "x-coordinate" in k} assert all( isinstance(v, PandasIndex) for k, v in dsk.items() if "x-coordinate" in k ) with raise_if_dask_computes(): actual = obj.map_blocks(func, template=template) assert_identical(actual, template) def test_map_blocks_roundtrip_string_index(): ds = xr.Dataset( {"data": (["label"], [1, 2, 3])}, coords={"label": ["foo", "bar", "baz"]} ).chunk(label=1) assert ds.label.dtype == np.dtype("=U3") mapped = ds.map_blocks(lambda x: x, template=ds) assert mapped.label.dtype == ds.label.dtype mapped = ds.map_blocks(lambda x: x, template=None) assert mapped.label.dtype == ds.label.dtype mapped = ds.data.map_blocks(lambda x: x, template=ds.data) assert mapped.label.dtype == ds.label.dtype mapped = ds.data.map_blocks(lambda x: x, template=None) assert mapped.label.dtype == ds.label.dtype def test_map_blocks_template_convert_object(): da = make_da() ds = da.to_dataset() func = lambda x: x.to_dataset().isel(x=[1]) template = xr.concat([da.to_dataset().isel(x=[i]) for i in [1, 5, 9]], dim="x") with raise_if_dask_computes(): actual = xr.map_blocks(func, da, template=template) assert_identical(actual, template) func = lambda x: x.to_dataarray().isel(x=[1]) template = xr.concat([ds.to_dataarray().isel(x=[i]) for i in [1, 5, 9]], dim="x") with raise_if_dask_computes(): actual = xr.map_blocks(func, ds, template=template) assert_identical(actual, template) @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks_errors_bad_template(obj): with pytest.raises(ValueError, match=r"unexpected coordinate variables"): xr.map_blocks(lambda x: x.assign_coords(a=10), obj, template=obj).compute() with pytest.raises(ValueError, match=r"does not contain coordinate variables"): xr.map_blocks(lambda x: x.drop_vars("cxy"), obj, template=obj).compute() with pytest.raises(ValueError, match=r"Dimensions {'x'} missing"): xr.map_blocks(lambda x: x.isel(x=1), obj, template=obj).compute() with pytest.raises(ValueError, match=r"Received dimension 'x' of length 1"): xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=obj).compute() with pytest.raises(TypeError, match=r"must be a DataArray"): xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=(obj,)).compute() with pytest.raises(ValueError, match=r"map_blocks requires that one block"): xr.map_blocks( lambda x: x.isel(x=[1]).assign_coords(x=10), obj, template=obj.isel(x=[1]) ).compute() with pytest.raises(ValueError, match=r"Expected index 'x' to be"): xr.map_blocks( lambda a: a.isel(x=[1]).assign_coords(x=[120]), # assign bad index values obj, template=xr.concat( [obj.isel(x=[i]) for i in [1, 5, 9]], data_vars=None, dim="x" ), ).compute() def test_map_blocks_errors_bad_template_2(map_ds): with pytest.raises(ValueError, match=r"unexpected data variables {'xyz'}"): xr.map_blocks(lambda x: x.assign(xyz=1), map_ds, template=map_ds).compute() @pytest.mark.parametrize("obj", [make_da(), make_ds()]) def test_map_blocks_object_method(obj): def func(obj): result = obj + obj.x + 5 * obj.y return result with raise_if_dask_computes(): expected = xr.map_blocks(func, obj) actual = obj.map_blocks(func) assert_identical(expected, actual) def test_map_blocks_hlg_layers(): # regression test for #3599 ds = xr.Dataset( { "x": (("a",), dask.array.ones(10, chunks=(5,))), "z": (("b",), dask.array.ones(10, chunks=(5,))), } ) mapped = ds.map_blocks(lambda x: x) xr.testing.assert_equal(mapped, ds) def test_make_meta(map_ds): from xarray.core.parallel import make_meta meta = make_meta(map_ds) for variable in map_ds._coord_names: assert variable in meta._coord_names assert meta.coords[variable].shape == (0,) * meta.coords[variable].ndim for variable in map_ds.data_vars: assert variable in meta.data_vars assert meta.data_vars[variable].shape == (0,) * meta.data_vars[variable].ndim def test_identical_coords_no_computes(): lons2 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x")) a = xr.DataArray( da.zeros((10, 10), chunks=2), dims=("y", "x"), coords={"lons": lons2} ) b = xr.DataArray( da.zeros((10, 10), chunks=2), dims=("y", "x"), coords={"lons": lons2} ) with raise_if_dask_computes(): c = a + b assert_identical(c, a) @pytest.mark.parametrize( "obj", [make_da(), make_da().compute(), make_ds(), make_ds().compute()] ) @pytest.mark.parametrize( "transform", [ lambda x: x.reset_coords(), lambda x: x.reset_coords(drop=True), lambda x: x.isel(x=1), lambda x: x.attrs.update(new_attrs=1), lambda x: x.assign_coords(cxy=1), lambda x: x.rename({"x": "xnew"}), lambda x: x.rename({"cxy": "cxynew"}), ], ) def test_token_changes_on_transform(obj, transform): with raise_if_dask_computes(): assert dask.base.tokenize(obj) != dask.base.tokenize(transform(obj)) @pytest.mark.parametrize( "obj", [make_da(), make_da().compute(), make_ds(), make_ds().compute()] ) def test_token_changes_when_data_changes(obj): with raise_if_dask_computes(): t1 = dask.base.tokenize(obj) # Change data_var if isinstance(obj, DataArray): obj *= 2 else: obj["a"] *= 2 with raise_if_dask_computes(): t2 = dask.base.tokenize(obj) assert t2 != t1 # Change non-index coord obj.coords["ndcoord"] *= 2 with raise_if_dask_computes(): t3 = dask.base.tokenize(obj) assert t3 != t2 # Change IndexVariable obj = obj.assign_coords(x=obj.x * 2) with raise_if_dask_computes(): t4 = dask.base.tokenize(obj) assert t4 != t3 @pytest.mark.parametrize("obj", [make_da().compute(), make_ds().compute()]) def test_token_changes_when_buffer_changes(obj): with raise_if_dask_computes(): t1 = dask.base.tokenize(obj) if isinstance(obj, DataArray): obj[0, 0] = 123 else: obj["a"][0, 0] = 123 with raise_if_dask_computes(): t2 = dask.base.tokenize(obj) assert t2 != t1 obj.coords["ndcoord"][0] = 123 with raise_if_dask_computes(): t3 = dask.base.tokenize(obj) assert t3 != t2 @pytest.mark.parametrize( "transform", [lambda x: x, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)], ) @pytest.mark.parametrize("obj", [make_da(), make_ds(), make_ds().variables["a"]]) def test_token_identical(obj, transform): with raise_if_dask_computes(): assert dask.base.tokenize(obj) == dask.base.tokenize(transform(obj)) assert dask.base.tokenize(obj.compute()) == dask.base.tokenize( transform(obj.compute()) ) @pytest.mark.parametrize( "obj", [ make_ds(), # Dataset make_ds().variables["c2"], # Variable make_ds().variables["x"], # IndexVariable ], ) def test_tokenize_empty_attrs(obj): """Issues #6970 and #8788""" obj.attrs = {} assert obj._attrs is None a = dask.base.tokenize(obj) assert obj.attrs == {} assert obj._attrs == {} # attrs getter changed None to dict b = dask.base.tokenize(obj) assert a == b obj2 = obj.copy() c = dask.base.tokenize(obj2) assert a == c def test_recursive_token(): """Test that tokenization is invoked recursively, and doesn't just rely on the output of str() """ a = np.ones(10000) b = np.ones(10000) b[5000] = 2 assert str(a) == str(b) assert dask.base.tokenize(a) != dask.base.tokenize(b) # Test DataArray and Variable da_a = DataArray(a) da_b = DataArray(b) assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b) # Test Dataset ds_a = da_a.to_dataset(name="x") ds_b = da_b.to_dataset(name="x") assert dask.base.tokenize(ds_a) != dask.base.tokenize(ds_b) # Test IndexVariable da_a = DataArray(a, dims=["x"], coords={"x": a}) da_b = DataArray(a, dims=["x"], coords={"x": b}) assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b) @requires_scipy_or_netCDF4 def test_normalize_token_with_backend(map_ds): with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp_file: map_ds.to_netcdf(tmp_file) read = xr.open_dataset(tmp_file) assert dask.base.tokenize(map_ds) != dask.base.tokenize(read) read.close() @pytest.mark.parametrize( "compat", ["broadcast_equals", "equals", "identical", "no_conflicts"] ) def test_lazy_array_equiv_variables(compat): var1 = xr.Variable(("y", "x"), da.zeros((10, 10), chunks=2)) var2 = xr.Variable(("y", "x"), da.zeros((10, 10), chunks=2)) var3 = xr.Variable(("y", "x"), da.zeros((20, 10), chunks=2)) with raise_if_dask_computes(): assert getattr(var1, compat)(var2, equiv=lazy_array_equiv) # values are actually equal, but we don't know that till we compute, return None with raise_if_dask_computes(): assert getattr(var1, compat)(var2 / 2, equiv=lazy_array_equiv) is None # shapes are not equal, return False without computes with raise_if_dask_computes(): assert getattr(var1, compat)(var3, equiv=lazy_array_equiv) is False # if one or both arrays are numpy, return None assert getattr(var1, compat)(var2.compute(), equiv=lazy_array_equiv) is None assert ( getattr(var1.compute(), compat)(var2.compute(), equiv=lazy_array_equiv) is None ) with raise_if_dask_computes(): assert getattr(var1, compat)(var2.transpose("y", "x")) @pytest.mark.parametrize( "compat", ["broadcast_equals", "equals", "identical", "no_conflicts"] ) def test_lazy_array_equiv_merge(compat): da1 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x")) da2 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x")) da3 = xr.DataArray(da.ones((20, 10), chunks=2), dims=("y", "x")) with raise_if_dask_computes(): xr.merge([da1, da2], compat=compat) # shapes are not equal; no computes necessary with raise_if_dask_computes(max_computes=0): with pytest.raises(ValueError): xr.merge([da1, da3], compat=compat) with raise_if_dask_computes(max_computes=2): xr.merge([da1, da2 / 2], compat=compat) @pytest.mark.filterwarnings("ignore::FutureWarning") # transpose_coords @pytest.mark.parametrize("obj", [make_da(), make_ds()]) @pytest.mark.parametrize( "transform", [ lambda a: a.assign_attrs(new_attr="anew"), lambda a: a.assign_coords(cxy=a.cxy), lambda a: a.copy(), lambda a: a.isel(x=slice(None)), lambda a: a.loc[dict(x=slice(None))], lambda a: a.transpose(...), lambda a: a.squeeze(), # no dimensions to squeeze lambda a: a.reindex(x=a.x), lambda a: a.reindex_like(a), lambda a: a.rename({"cxy": "cnew"}).rename({"cnew": "cxy"}), lambda a: a.pipe(lambda x: x), lambda a: xr.align(a, xr.zeros_like(a))[0], # assign # swap_dims # set_index / reset_index ], ) def test_transforms_pass_lazy_array_equiv(obj, transform): with raise_if_dask_computes(): assert_equal(obj, transform(obj)) def test_more_transforms_pass_lazy_array_equiv(map_da, map_ds): with raise_if_dask_computes(): assert_equal(map_ds.cxy.broadcast_like(map_ds.cxy), map_ds.cxy) assert_equal(xr.broadcast(map_ds.cxy, map_ds.cxy)[0], map_ds.cxy) assert_equal(map_ds.map(lambda x: x), map_ds) assert_equal(map_ds.set_coords("a").reset_coords("a"), map_ds) assert_equal(map_ds.assign({"a": map_ds.a}), map_ds) # fails because of index error # assert_equal( # map_ds.rename_dims({"x": "xnew"}).rename_dims({"xnew": "x"}), map_ds # ) assert_equal( map_ds.rename_vars({"cxy": "cnew"}).rename_vars({"cnew": "cxy"}), map_ds ) assert_equal(map_da._from_temp_dataset(map_da._to_temp_dataset()), map_da) assert_equal(map_da.astype(map_da.dtype), map_da) assert_equal(map_da.transpose("y", "x", transpose_coords=False).cxy, map_da.cxy) def test_optimize(): # https://github.com/pydata/xarray/issues/3698 a = dask.array.ones((10, 4), chunks=(5, 2)) arr = xr.DataArray(a).chunk(5) (arr2,) = dask.optimize(arr) arr2.compute() def test_graph_manipulation(): """dask.graph_manipulation passes an optional parameter, "rename", to the rebuilder function returned by __dask_postperist__; also, the dsk passed to the rebuilder is a HighLevelGraph whereas with dask.persist() and dask.optimize() it's a plain dict. """ import dask.graph_manipulation as gm v = Variable(["x"], [1, 2]).chunk(-1).chunk(1) * 2 da = DataArray(v) ds = Dataset({"d1": v[0], "d2": v[1], "d3": ("x", [3, 4])}) v2, da2, ds2 = gm.clone(v, da, ds) assert_equal(v2, v) assert_equal(da2, da) assert_equal(ds2, ds) for a, b in ((v, v2), (da, da2), (ds, ds2)): assert a.__dask_layers__() != b.__dask_layers__() assert len(a.__dask_layers__()) == len(b.__dask_layers__()) assert a.__dask_graph__().keys() != b.__dask_graph__().keys() assert len(a.__dask_graph__()) == len(b.__dask_graph__()) assert a.__dask_graph__().layers.keys() != b.__dask_graph__().layers.keys() assert len(a.__dask_graph__().layers) == len(b.__dask_graph__().layers) # Above we performed a slice operation; adding the two slices back together creates # a diamond-shaped dependency graph, which in turn will trigger a collision in layer # names if we were to use HighLevelGraph.cull() instead of # HighLevelGraph.cull_layers() in Dataset.__dask_postpersist__(). assert_equal(ds2.d1 + ds2.d2, ds.d1 + ds.d2) def test_new_index_var_computes_once(): # regression test for GH1533 data = dask.array.from_array(np.array([100, 200])) with raise_if_dask_computes(max_computes=1): Dataset(coords={"z": ("z", data)}) def test_minimize_graph_size(): # regression test for https://github.com/pydata/xarray/issues/8409 ds = Dataset( { "foo": ( ("x", "y", "z"), dask.array.ones((120, 120, 120), chunks=(20, 20, 1)), ) }, coords={"x": np.arange(120), "y": np.arange(120), "z": np.arange(120)}, ) mapped = ds.map_blocks(lambda x: x) graph = dict(mapped.__dask_graph__()) numchunks = {k: len(v) for k, v in ds.chunksizes.items()} for var in "xyz": actual = len([key for key in graph if var in key[0]]) # assert that we only include each chunk of an index variable # is only included once, not the product of number of chunks of # all the other dimensions. # e.g. previously for 'x', actual == numchunks['y'] * numchunks['z'] assert actual == numchunks[var], (actual, numchunks[var]) def test_idxmin_chunking(): # GH9425 x, y, t = 100, 100, 10 rang = np.arange(t * x * y) da = xr.DataArray( rang.reshape(t, x, y), coords={"time": range(t), "x": range(x), "y": range(y)} ) da = da.chunk(dict(time=-1, x=25, y=25)) actual = da.idxmin("time") assert actual.chunksizes == {k: da.chunksizes[k] for k in ["x", "y"]} assert_identical(actual, da.compute().idxmin("time")) def test_conjugate(): # Test for https://github.com/pydata/xarray/issues/10302 z = 1j * da.arange(100) data = xr.DataArray(z, coords={"x": np.arange(100)}) conj_data = data.conjugate() assert dask.is_dask_collection(conj_data) assert_equal(conj_data, data.conj()) xarray-2025.09.0/xarray/tests/test_dataarray.py000066400000000000000000010105431505620616400214010ustar00rootroot00000000000000from __future__ import annotations import pickle import re import sys import warnings from collections.abc import Hashable from copy import deepcopy from textwrap import dedent from typing import Any, Final, Literal, cast import numpy as np import pandas as pd import pytest # remove once numpy 2.0 is the oldest supported version try: from numpy.exceptions import RankWarning except ImportError: from numpy import RankWarning # type: ignore[no-redef,attr-defined,unused-ignore] import xarray as xr import xarray.core.missing from xarray import ( DataArray, Dataset, IndexVariable, Variable, align, broadcast, set_options, ) from xarray.coders import CFDatetimeCoder from xarray.core import dtypes from xarray.core.common import full_like from xarray.core.coordinates import Coordinates, CoordinateValidationError from xarray.core.indexes import Index, PandasIndex, filter_indexes_from_coords from xarray.core.types import QueryEngineOptions, QueryParserOptions from xarray.core.utils import is_scalar from xarray.testing import _assert_internal_invariants from xarray.tests import ( InaccessibleArray, ReturnItem, assert_allclose, assert_array_equal, assert_chunks_equal, assert_equal, assert_identical, assert_no_warnings, has_dask, has_dask_ge_2025_1_0, raise_if_dask_computes, requires_bottleneck, requires_cupy, requires_dask, requires_dask_expr, requires_iris, requires_numexpr, requires_pint, requires_scipy, requires_sparse, source_ndarray, ) try: from pandas.errors import UndefinedVariableError except ImportError: # TODO: remove once we stop supporting pandas<1.4.3 from pandas.core.computation.ops import UndefinedVariableError pytestmark = [ pytest.mark.filterwarnings("error:Mean of empty slice"), pytest.mark.filterwarnings("error:All-NaN (slice|axis) encountered"), ] class TestDataArray: @pytest.fixture(autouse=True) def setup(self): self.attrs = {"attr1": "value1", "attr2": 2929} self.x = np.random.random((10, 20)) self.v = Variable(["x", "y"], self.x) self.va = Variable(["x", "y"], self.x, self.attrs) self.ds = Dataset({"foo": self.v}) self.dv = self.ds["foo"] self.mindex = pd.MultiIndex.from_product( [["a", "b"], [1, 2]], names=("level_1", "level_2") ) self.mda = DataArray([0, 1, 2, 3], coords={"x": self.mindex}, dims="x").astype( np.uint64 ) def test_repr(self) -> None: v = Variable(["time", "x"], [[1, 2, 3], [4, 5, 6]], {"foo": "bar"}) v = v.astype(np.uint64) coords = {"x": np.arange(3, dtype=np.uint64), "other": np.uint64(0)} data_array = DataArray(v, coords, name="my_variable") expected = dedent( """\ Size: 48B array([[1, 2, 3], [4, 5, 6]], dtype=uint64) Coordinates: * x (x) uint64 24B 0 1 2 other uint64 8B 0 Dimensions without coordinates: time Attributes: foo: bar""" ) assert expected == repr(data_array) def test_repr_multiindex(self) -> None: obj_size = np.dtype("O").itemsize expected = dedent( f"""\ Size: 32B array([0, 1, 2, 3], dtype=uint64) Coordinates: * x (x) object {4 * obj_size}B MultiIndex * level_1 (x) object {4 * obj_size}B 'a' 'a' 'b' 'b' * level_2 (x) int64 32B 1 2 1 2""" ) assert expected == repr(self.mda) def test_repr_multiindex_long(self) -> None: mindex_long = pd.MultiIndex.from_product( [["a", "b", "c", "d"], [1, 2, 3, 4, 5, 6, 7, 8]], names=("level_1", "level_2"), ) mda_long = DataArray( list(range(32)), coords={"x": mindex_long}, dims="x" ).astype(np.uint64) obj_size = np.dtype("O").itemsize expected = dedent( f"""\ Size: 256B array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31], dtype=uint64) Coordinates: * x (x) object {32 * obj_size}B MultiIndex * level_1 (x) object {32 * obj_size}B 'a' 'a' 'a' 'a' 'a' 'a' ... 'd' 'd' 'd' 'd' 'd' 'd' * level_2 (x) int64 256B 1 2 3 4 5 6 7 8 1 2 3 4 ... 5 6 7 8 1 2 3 4 5 6 7 8""" ) assert expected == repr(mda_long) def test_properties(self) -> None: assert_equal(self.dv.variable, self.v) assert_array_equal(self.dv.values, self.v.values) for attr in ["dims", "dtype", "shape", "size", "nbytes", "ndim", "attrs"]: assert getattr(self.dv, attr) == getattr(self.v, attr) assert len(self.dv) == len(self.v) assert_equal(self.dv.variable, self.v) assert set(self.dv.coords) == set(self.ds.coords) for k, v in self.dv.coords.items(): assert_array_equal(v, self.ds.coords[k]) with pytest.raises(AttributeError): _ = self.dv.dataset assert isinstance(self.ds["x"].to_index(), pd.Index) with pytest.raises(ValueError, match=r"must be 1-dimensional"): self.ds["foo"].to_index() with pytest.raises(AttributeError): self.dv.variable = self.v def test_data_property(self) -> None: array = DataArray(np.zeros((3, 4))) actual = array.copy() actual.values = np.ones((3, 4)) assert_array_equal(np.ones((3, 4)), actual.values) actual.data = 2 * np.ones((3, 4)) assert_array_equal(2 * np.ones((3, 4)), actual.data) assert_array_equal(actual.data, actual.values) def test_indexes(self) -> None: array = DataArray(np.zeros((2, 3)), [("x", [0, 1]), ("y", ["a", "b", "c"])]) expected_indexes = {"x": pd.Index([0, 1]), "y": pd.Index(["a", "b", "c"])} expected_xindexes = { k: PandasIndex(idx, k) for k, idx in expected_indexes.items() } assert array.xindexes.keys() == expected_xindexes.keys() assert array.indexes.keys() == expected_indexes.keys() assert all(isinstance(idx, pd.Index) for idx in array.indexes.values()) assert all(isinstance(idx, Index) for idx in array.xindexes.values()) for k in expected_indexes: assert array.xindexes[k].equals(expected_xindexes[k]) assert array.indexes[k].equals(expected_indexes[k]) def test_get_index(self) -> None: array = DataArray(np.zeros((2, 3)), coords={"x": ["a", "b"]}, dims=["x", "y"]) assert array.get_index("x").equals(pd.Index(["a", "b"])) assert array.get_index("y").equals(pd.Index([0, 1, 2])) with pytest.raises(KeyError): array.get_index("z") def test_get_index_size_zero(self) -> None: array = DataArray(np.zeros((0,)), dims=["x"]) actual = array.get_index("x") expected = pd.Index([], dtype=np.int64) assert actual.equals(expected) assert actual.dtype == expected.dtype def test_struct_array_dims(self) -> None: """ This test checks subtraction of two DataArrays for the case when dimension is a structured array. """ # GH837, GH861 # checking array subtraction when dims are the same p_data = np.array( [("Abe", 180), ("Stacy", 150), ("Dick", 200)], dtype=[("name", "|S256"), ("height", object)], ) weights_0 = DataArray( [80, 56, 120], dims=["participant"], coords={"participant": p_data} ) weights_1 = DataArray( [81, 52, 115], dims=["participant"], coords={"participant": p_data} ) actual = weights_1 - weights_0 expected = DataArray( [1, -4, -5], dims=["participant"], coords={"participant": p_data} ) assert_identical(actual, expected) # checking array subtraction when dims are not the same p_data_alt = np.array( [("Abe", 180), ("Stacy", 151), ("Dick", 200)], dtype=[("name", "|S256"), ("height", object)], ) weights_1 = DataArray( [81, 52, 115], dims=["participant"], coords={"participant": p_data_alt} ) actual = weights_1 - weights_0 expected = DataArray( [1, -5], dims=["participant"], coords={"participant": p_data[[0, 2]]} ) assert_identical(actual, expected) # checking array subtraction when dims are not the same and one # is np.nan p_data_nan = np.array( [("Abe", 180), ("Stacy", np.nan), ("Dick", 200)], dtype=[("name", "|S256"), ("height", object)], ) weights_1 = DataArray( [81, 52, 115], dims=["participant"], coords={"participant": p_data_nan} ) actual = weights_1 - weights_0 expected = DataArray( [1, -5], dims=["participant"], coords={"participant": p_data[[0, 2]]} ) assert_identical(actual, expected) def test_name(self) -> None: arr = self.dv assert arr.name == "foo" copied = arr.copy() arr.name = "bar" assert arr.name == "bar" assert_equal(copied, arr) actual = DataArray(IndexVariable("x", [3])) actual.name = "y" expected = DataArray([3], [("x", [3])], name="y") assert_identical(actual, expected) def test_dims(self) -> None: arr = self.dv assert arr.dims == ("x", "y") with pytest.raises(AttributeError, match=r"you cannot assign"): arr.dims = ("w", "z") def test_sizes(self) -> None: array = DataArray(np.zeros((3, 4)), dims=["x", "y"]) assert array.sizes == {"x": 3, "y": 4} assert tuple(array.sizes) == array.dims with pytest.raises(TypeError): array.sizes["foo"] = 5 # type: ignore[index] def test_encoding(self) -> None: expected = {"foo": "bar"} self.dv.encoding["foo"] = "bar" assert expected == self.dv.encoding expected2 = {"baz": 0} self.dv.encoding = expected2 assert expected2 is not self.dv.encoding def test_drop_encoding(self) -> None: array = self.mda encoding = {"scale_factor": 10} array.encoding = encoding array["x"].encoding = encoding assert array.encoding == encoding assert array["x"].encoding == encoding actual = array.drop_encoding() # did not modify in place assert array.encoding == encoding assert array["x"].encoding == encoding # variable and coord encoding is empty assert actual.encoding == {} assert actual["x"].encoding == {} def test_constructor(self) -> None: data = np.random.random((2, 3)) # w/o coords, w/o dims actual = DataArray(data) expected = Dataset({None: (["dim_0", "dim_1"], data)})[None] assert_identical(expected, actual) actual = DataArray(data, [["a", "b"], [-1, -2, -3]]) expected = Dataset( { None: (["dim_0", "dim_1"], data), "dim_0": ("dim_0", ["a", "b"]), "dim_1": ("dim_1", [-1, -2, -3]), } )[None] assert_identical(expected, actual) # pd.Index coords, w/o dims actual = DataArray( data, [pd.Index(["a", "b"], name="x"), pd.Index([-1, -2, -3], name="y")] ) expected = Dataset( {None: (["x", "y"], data), "x": ("x", ["a", "b"]), "y": ("y", [-1, -2, -3])} )[None] assert_identical(expected, actual) # list coords, w dims coords1: list[Any] = [["a", "b"], [-1, -2, -3]] actual = DataArray(data, coords1, ["x", "y"]) assert_identical(expected, actual) # pd.Index coords, w dims coords2: list[pd.Index] = [ pd.Index(["a", "b"], name="A"), pd.Index([-1, -2, -3], name="B"), ] actual = DataArray(data, coords2, ["x", "y"]) assert_identical(expected, actual) # dict coords, w dims coords3 = {"x": ["a", "b"], "y": [-1, -2, -3]} actual = DataArray(data, coords3, ["x", "y"]) assert_identical(expected, actual) # dict coords, w/o dims actual = DataArray(data, coords3) assert_identical(expected, actual) # tuple[dim, list] coords, w/o dims coords4 = [("x", ["a", "b"]), ("y", [-1, -2, -3])] actual = DataArray(data, coords4) assert_identical(expected, actual) # partial dict coords, w dims expected = Dataset({None: (["x", "y"], data), "x": ("x", ["a", "b"])})[None] actual = DataArray(data, {"x": ["a", "b"]}, ["x", "y"]) assert_identical(expected, actual) # w/o coords, w dims actual = DataArray(data, dims=["x", "y"]) expected = Dataset({None: (["x", "y"], data)})[None] assert_identical(expected, actual) # w/o coords, w dims, w name actual = DataArray(data, dims=["x", "y"], name="foo") expected = Dataset({"foo": (["x", "y"], data)})["foo"] assert_identical(expected, actual) # w/o coords, w/o dims, w name actual = DataArray(data, name="foo") expected = Dataset({"foo": (["dim_0", "dim_1"], data)})["foo"] assert_identical(expected, actual) # w/o coords, w dims, w attrs actual = DataArray(data, dims=["x", "y"], attrs={"bar": 2}) expected = Dataset({None: (["x", "y"], data, {"bar": 2})})[None] assert_identical(expected, actual) # w/o coords, w dims (ds has attrs) actual = DataArray(data, dims=["x", "y"]) expected = Dataset({None: (["x", "y"], data, {}, {"bar": 2})})[None] assert_identical(expected, actual) # data is list, w coords actual = DataArray([1, 2, 3], coords={"x": [0, 1, 2]}) expected = DataArray([1, 2, 3], coords=[("x", [0, 1, 2])]) assert_identical(expected, actual) def test_constructor_invalid(self) -> None: data = np.random.randn(3, 2) with pytest.raises(ValueError, match=r"coords is not dict-like"): DataArray(data, [[0, 1, 2]], ["x", "y"]) with pytest.raises(ValueError, match=r"not a subset of the .* dim"): DataArray(data, {"x": [0, 1, 2]}, ["a", "b"]) with pytest.raises(ValueError, match=r"not a subset of the .* dim"): DataArray(data, {"x": [0, 1, 2]}) with pytest.raises(TypeError, match=r"is not hashable"): DataArray(data, dims=["x", []]) # type: ignore[list-item] with pytest.raises( CoordinateValidationError, match=r"conflicting sizes for dim" ): DataArray([1, 2, 3], coords=[("x", [0, 1])]) with pytest.raises( CoordinateValidationError, match=r"conflicting sizes for dim" ): DataArray([1, 2], coords={"x": [0, 1], "y": ("x", [1])}, dims="x") with pytest.raises(ValueError, match=r"conflicting MultiIndex"): DataArray(np.random.rand(4, 4), [("x", self.mindex), ("y", self.mindex)]) with pytest.raises(ValueError, match=r"conflicting MultiIndex"): DataArray(np.random.rand(4, 4), [("x", self.mindex), ("level_1", range(4))]) def test_constructor_from_self_described(self) -> None: data: list[list[float]] = [[-0.1, 21], [0, 2]] expected = DataArray( data, coords={"x": ["a", "b"], "y": [-1, -2]}, dims=["x", "y"], name="foobar", attrs={"bar": 2}, ) actual = DataArray(expected) assert_identical(expected, actual) actual = DataArray(expected.values, actual.coords) assert_equal(expected, actual) frame = pd.DataFrame( data, index=pd.Index(["a", "b"], name="x"), columns=pd.Index([-1, -2], name="y"), ) actual = DataArray(frame) assert_equal(expected, actual) series = pd.Series(data[0], index=pd.Index([-1, -2], name="y")) actual = DataArray(series) assert_equal(expected[0].reset_coords("x", drop=True), actual) expected = DataArray( data, coords={"x": ["a", "b"], "y": [-1, -2], "a": 0, "z": ("x", [-0.5, 0.5])}, dims=["x", "y"], ) actual = DataArray(expected) assert_identical(expected, actual) actual = DataArray(expected.values, expected.coords) assert_identical(expected, actual) expected = Dataset({"foo": ("foo", ["a", "b"])})["foo"] actual = DataArray(pd.Index(["a", "b"], name="foo")) assert_identical(expected, actual) actual = DataArray(IndexVariable("foo", ["a", "b"])) assert_identical(expected, actual) @requires_dask def test_constructor_from_self_described_chunked(self) -> None: expected = DataArray( [[-0.1, 21], [0, 2]], coords={"x": ["a", "b"], "y": [-1, -2]}, dims=["x", "y"], name="foobar", attrs={"bar": 2}, ).chunk() actual = DataArray(expected) assert_identical(expected, actual) assert_chunks_equal(expected, actual) def test_constructor_from_0d(self) -> None: expected = Dataset({None: ([], 0)})[None] actual = DataArray(0) assert_identical(expected, actual) @requires_dask def test_constructor_dask_coords(self) -> None: # regression test for GH1684 import dask.array as da coord = da.arange(8, chunks=(4,)) data = da.random.random((8, 8), chunks=(4, 4)) + 1 actual = DataArray(data, coords={"x": coord, "y": coord}, dims=["x", "y"]) ecoord = np.arange(8) expected = DataArray(data, coords={"x": ecoord, "y": ecoord}, dims=["x", "y"]) assert_equal(actual, expected) def test_constructor_no_default_index(self) -> None: # explicitly passing a Coordinates object skips the creation of default index da = DataArray(range(3), coords=Coordinates({"x": [1, 2, 3]}, indexes={})) assert "x" in da.coords assert "x" not in da.xindexes def test_constructor_multiindex(self) -> None: midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two")) coords = Coordinates.from_pandas_multiindex(midx, "x") da = DataArray(range(4), coords=coords, dims="x") assert_identical(da.coords, coords) def test_constructor_custom_index(self) -> None: class CustomIndex(Index): ... coords = Coordinates( coords={"x": ("x", [1, 2, 3])}, indexes={"x": CustomIndex()} ) da = DataArray(range(3), coords=coords) assert isinstance(da.xindexes["x"], CustomIndex) # test coordinate variables copied assert da.coords["x"] is not coords.variables["x"] def test_constructor_extra_dim_index_coord(self) -> None: class AnyIndex(Index): def should_add_coord_to_array(self, name, var, dims): return True idx = AnyIndex() coords = Coordinates( coords={ "x": ("x", [1, 2]), "x_bounds": (("x", "x_bnds"), [(0.5, 1.5), (1.5, 2.5)]), }, indexes={"x": idx, "x_bounds": idx}, ) actual = DataArray([1.0, 2.0], coords=coords, dims="x") assert_identical(actual.coords, coords, check_default_indexes=False) assert "x_bnds" not in actual.dims def test_equals_and_identical(self) -> None: orig = DataArray(np.arange(5.0), {"a": 42}, dims="x") expected = orig actual = orig.copy() assert expected.equals(actual) assert expected.identical(actual) actual = expected.rename("baz") assert expected.equals(actual) assert not expected.identical(actual) actual = expected.rename({"x": "xxx"}) assert not expected.equals(actual) assert not expected.identical(actual) actual = expected.copy() actual.attrs["foo"] = "bar" assert expected.equals(actual) assert not expected.identical(actual) actual = expected.copy() actual["x"] = ("x", -np.arange(5)) assert not expected.equals(actual) assert not expected.identical(actual) actual = expected.reset_coords(drop=True) assert not expected.equals(actual) assert not expected.identical(actual) actual = orig.copy() actual[0] = np.nan expected = actual.copy() assert expected.equals(actual) assert expected.identical(actual) actual[:] = np.nan assert not expected.equals(actual) assert not expected.identical(actual) actual = expected.copy() actual["a"] = 100000 assert not expected.equals(actual) assert not expected.identical(actual) def test_equals_failures(self) -> None: orig = DataArray(np.arange(5.0), {"a": 42}, dims="x") assert not orig.equals(np.arange(5)) # type: ignore[arg-type] assert not orig.identical(123) # type: ignore[arg-type] assert not orig.broadcast_equals({1: 2}) # type: ignore[arg-type] def test_broadcast_equals(self) -> None: a = DataArray([0, 0], {"y": 0}, dims="x") b = DataArray([0, 0], {"y": ("x", [0, 0])}, dims="x") assert a.broadcast_equals(b) assert b.broadcast_equals(a) assert not a.equals(b) assert not a.identical(b) c = DataArray([0], coords={"x": 0}, dims="y") assert not a.broadcast_equals(c) assert not c.broadcast_equals(a) def test_getitem(self) -> None: # strings pull out dataarrays assert_identical(self.dv, self.ds["foo"]) x = self.dv["x"] y = self.dv["y"] assert_identical(self.ds["x"], x) assert_identical(self.ds["y"], y) arr = ReturnItem() for i in [ arr[:], arr[...], arr[x.values], arr[x.variable], arr[x], arr[x, y], arr[x.values > -1], arr[x.variable > -1], arr[x > -1], arr[x > -1, y > -1], ]: assert_equal(self.dv, self.dv[i]) for i in [ arr[0], arr[:, 0], arr[:3, :2], arr[x.values[:3]], arr[x.variable[:3]], arr[x[:3]], arr[x[:3], y[:4]], arr[x.values > 3], arr[x.variable > 3], arr[x > 3], arr[x > 3, y > 3], ]: assert_array_equal(self.v[i], self.dv[i]) def test_getitem_dict(self) -> None: actual = self.dv[{"x": slice(3), "y": 0}] expected = self.dv.isel(x=slice(3), y=0) assert_identical(expected, actual) def test_getitem_coords(self) -> None: orig = DataArray( [[10], [20]], { "x": [1, 2], "y": [3], "z": 4, "x2": ("x", ["a", "b"]), "y2": ("y", ["c"]), "xy": (["y", "x"], [["d", "e"]]), }, dims=["x", "y"], ) assert_identical(orig, orig[:]) assert_identical(orig, orig[:, :]) assert_identical(orig, orig[...]) assert_identical(orig, orig[:2, :1]) assert_identical(orig, orig[[0, 1], [0]]) actual = orig[0, 0] expected = DataArray( 10, {"x": 1, "y": 3, "z": 4, "x2": "a", "y2": "c", "xy": "d"} ) assert_identical(expected, actual) actual = orig[0, :] expected = DataArray( [10], { "x": 1, "y": [3], "z": 4, "x2": "a", "y2": ("y", ["c"]), "xy": ("y", ["d"]), }, dims="y", ) assert_identical(expected, actual) actual = orig[:, 0] expected = DataArray( [10, 20], { "x": [1, 2], "y": 3, "z": 4, "x2": ("x", ["a", "b"]), "y2": "c", "xy": ("x", ["d", "e"]), }, dims="x", ) assert_identical(expected, actual) def test_getitem_dataarray(self) -> None: # It should not conflict da = DataArray(np.arange(12).reshape((3, 4)), dims=["x", "y"]) ind = DataArray([[0, 1], [0, 1]], dims=["x", "z"]) actual = da[ind] assert_array_equal(actual, da.values[[[0, 1], [0, 1]], :]) da = DataArray( np.arange(12).reshape((3, 4)), dims=["x", "y"], coords={"x": [0, 1, 2], "y": ["a", "b", "c", "d"]}, ) ind = xr.DataArray([[0, 1], [0, 1]], dims=["X", "Y"]) actual = da[ind] expected = da.values[[[0, 1], [0, 1]], :] assert_array_equal(actual, expected) assert actual.dims == ("X", "Y", "y") # boolean indexing ind = xr.DataArray([True, True, False], dims=["x"]) assert_equal(da[ind], da[[0, 1], :]) assert_equal(da[ind], da[[0, 1]]) assert_equal(da[ind], da[ind.values]) def test_getitem_empty_index(self) -> None: da = DataArray(np.arange(12).reshape((3, 4)), dims=["x", "y"]) assert_identical(da[{"x": []}], DataArray(np.zeros((0, 4)), dims=["x", "y"])) assert_identical( da.loc[{"y": []}], DataArray(np.zeros((3, 0)), dims=["x", "y"]) ) assert_identical(da[[]], DataArray(np.zeros((0, 4)), dims=["x", "y"])) def test_getitem_typeerror(self) -> None: with pytest.raises(TypeError, match=r"unexpected indexer type"): self.dv[True] with pytest.raises(TypeError, match=r"unexpected indexer type"): self.dv[np.array(True)] with pytest.raises(TypeError, match=r"invalid indexer array"): self.dv[3.0] with pytest.raises(TypeError, match=r"invalid indexer array"): self.dv[None] def test_setitem(self) -> None: # basic indexing should work as numpy's indexing tuples: list[tuple[int | list[int] | slice, int | list[int] | slice]] = [ (0, 0), (0, slice(None, None)), (slice(None, None), slice(None, None)), (slice(None, None), 0), ([1, 0], slice(None, None)), (slice(None, None), [1, 0]), ] for t in tuples: expected = np.arange(6).reshape(3, 2) orig = DataArray( np.arange(6).reshape(3, 2), { "x": [1, 2, 3], "y": ["a", "b"], "z": 4, "x2": ("x", ["a", "b", "c"]), "y2": ("y", ["d", "e"]), }, dims=["x", "y"], ) orig[t] = 1 expected[t] = 1 assert_array_equal(orig.values, expected) def test_setitem_fancy(self) -> None: # vectorized indexing da = DataArray(np.ones((3, 2)), dims=["x", "y"]) ind = Variable(["a"], [0, 1]) da[dict(x=ind, y=ind)] = 0 expected = DataArray([[0, 1], [1, 0], [1, 1]], dims=["x", "y"]) assert_identical(expected, da) # assign another 0d-variable da[dict(x=ind, y=ind)] = Variable((), 0) expected = DataArray([[0, 1], [1, 0], [1, 1]], dims=["x", "y"]) assert_identical(expected, da) # assign another 1d-variable da[dict(x=ind, y=ind)] = Variable(["a"], [2, 3]) expected = DataArray([[2, 1], [1, 3], [1, 1]], dims=["x", "y"]) assert_identical(expected, da) # 2d-vectorized indexing da = DataArray(np.ones((3, 2)), dims=["x", "y"]) ind_x = DataArray([[0, 1]], dims=["a", "b"]) ind_y = DataArray([[1, 0]], dims=["a", "b"]) da[dict(x=ind_x, y=ind_y)] = 0 expected = DataArray([[1, 0], [0, 1], [1, 1]], dims=["x", "y"]) assert_identical(expected, da) da = DataArray(np.ones((3, 2)), dims=["x", "y"]) ind = Variable(["a"], [0, 1]) da[ind] = 0 expected = DataArray([[0, 0], [0, 0], [1, 1]], dims=["x", "y"]) assert_identical(expected, da) def test_setitem_dataarray(self) -> None: def get_data(): return DataArray( np.ones((4, 3, 2)), dims=["x", "y", "z"], coords={ "x": np.arange(4), "y": ["a", "b", "c"], "non-dim": ("x", [1, 3, 4, 2]), }, ) da = get_data() # indexer with inconsistent coordinates. ind = DataArray(np.arange(1, 4), dims=["x"], coords={"x": np.random.randn(3)}) with pytest.raises(IndexError, match=r"dimension coordinate 'x'"): da[dict(x=ind)] = 0 # indexer with consistent coordinates. ind = DataArray(np.arange(1, 4), dims=["x"], coords={"x": np.arange(1, 4)}) da[dict(x=ind)] = 0 # should not raise assert np.allclose(da[dict(x=ind)].values, 0) assert_identical(da["x"], get_data()["x"]) assert_identical(da["non-dim"], get_data()["non-dim"]) da = get_data() # conflict in the assigning values value = xr.DataArray( np.zeros((3, 3, 2)), dims=["x", "y", "z"], coords={"x": [0, 1, 2], "non-dim": ("x", [0, 2, 4])}, ) with pytest.raises(IndexError, match=r"dimension coordinate 'x'"): da[dict(x=ind)] = value # consistent coordinate in the assigning values value = xr.DataArray( np.zeros((3, 3, 2)), dims=["x", "y", "z"], coords={"x": [1, 2, 3], "non-dim": ("x", [0, 2, 4])}, ) da[dict(x=ind)] = value assert np.allclose(da[dict(x=ind)].values, 0) assert_identical(da["x"], get_data()["x"]) assert_identical(da["non-dim"], get_data()["non-dim"]) # Conflict in the non-dimension coordinate value = xr.DataArray( np.zeros((3, 3, 2)), dims=["x", "y", "z"], coords={"x": [1, 2, 3], "non-dim": ("x", [0, 2, 4])}, ) da[dict(x=ind)] = value # should not raise # conflict in the assigning values value = xr.DataArray( np.zeros((3, 3, 2)), dims=["x", "y", "z"], coords={"x": [0, 1, 2], "non-dim": ("x", [0, 2, 4])}, ) with pytest.raises(IndexError, match=r"dimension coordinate 'x'"): da[dict(x=ind)] = value # consistent coordinate in the assigning values value = xr.DataArray( np.zeros((3, 3, 2)), dims=["x", "y", "z"], coords={"x": [1, 2, 3], "non-dim": ("x", [0, 2, 4])}, ) da[dict(x=ind)] = value # should not raise def test_setitem_vectorized(self) -> None: # Regression test for GH:7030 # Positional indexing v = xr.DataArray(np.r_[:120].reshape(2, 3, 4, 5), dims=["a", "b", "c", "d"]) b = xr.DataArray([[0, 0], [1, 0]], dims=["u", "v"]) c = xr.DataArray([[0, 1], [2, 3]], dims=["u", "v"]) w = xr.DataArray([-1, -2], dims=["u"]) index = dict(b=b, c=c) v[index] = w assert (v[index] == w).all() # Indexing with coordinates v = xr.DataArray(np.r_[:120].reshape(2, 3, 4, 5), dims=["a", "b", "c", "d"]) v.coords["b"] = [2, 4, 6] b = xr.DataArray([[2, 2], [4, 2]], dims=["u", "v"]) c = xr.DataArray([[0, 1], [2, 3]], dims=["u", "v"]) w = xr.DataArray([-1, -2], dims=["u"]) index = dict(b=b, c=c) v.loc[index] = w assert (v.loc[index] == w).all() def test_contains(self) -> None: data_array = DataArray([1, 2]) assert 1 in data_array assert 3 not in data_array def test_pickle(self) -> None: data = DataArray(np.random.random((3, 3)), dims=("id", "time")) roundtripped = pickle.loads(pickle.dumps(data)) assert_identical(data, roundtripped) @requires_dask def test_chunk(self) -> None: unblocked = DataArray(np.ones((3, 4))) assert unblocked.chunks is None blocked = unblocked.chunk() assert blocked.chunks == ((3,), (4,)) first_dask_name = blocked.data.name with pytest.warns(DeprecationWarning): blocked = unblocked.chunk(chunks=((2, 1), (2, 2))) # type: ignore[arg-type] assert blocked.chunks == ((2, 1), (2, 2)) assert blocked.data.name != first_dask_name blocked = unblocked.chunk(chunks=(3, 3)) assert blocked.chunks == ((3,), (3, 1)) assert blocked.data.name != first_dask_name with pytest.raises(ValueError): blocked.chunk(chunks=(3, 3, 3)) # name doesn't change when rechunking by same amount # this fails if ReprObject doesn't have __dask_tokenize__ defined assert unblocked.chunk(2).data.name == unblocked.chunk(2).data.name assert blocked.load().chunks is None # Check that kwargs are passed import dask.array as da blocked = unblocked.chunk(name_prefix="testname_") assert isinstance(blocked.data, da.Array) assert "testname_" in blocked.data.name # test kwargs form of chunks blocked = unblocked.chunk(dim_0=3, dim_1=3) assert blocked.chunks == ((3,), (3, 1)) assert blocked.data.name != first_dask_name def test_isel(self) -> None: assert_identical(self.dv[0], self.dv.isel(x=0)) assert_identical(self.dv, self.dv.isel(x=slice(None))) assert_identical(self.dv[:3], self.dv.isel(x=slice(3))) assert_identical(self.dv[:3, :5], self.dv.isel(x=slice(3), y=slice(5))) with pytest.raises( ValueError, match=r"Dimensions {'not_a_dim'} do not exist. Expected " r"one or more of \('x', 'y'\)", ): self.dv.isel(not_a_dim=0) with pytest.warns( UserWarning, match=r"Dimensions {'not_a_dim'} do not exist. " r"Expected one or more of \('x', 'y'\)", ): self.dv.isel(not_a_dim=0, missing_dims="warn") assert_identical(self.dv, self.dv.isel(not_a_dim=0, missing_dims="ignore")) def test_isel_types(self) -> None: # regression test for #1405 da = DataArray([1, 2, 3], dims="x") # uint64 assert_identical( da.isel(x=np.array([0], dtype="uint64")), da.isel(x=np.array([0])) ) # uint32 assert_identical( da.isel(x=np.array([0], dtype="uint32")), da.isel(x=np.array([0])) ) # int64 assert_identical( da.isel(x=np.array([0], dtype="int64")), da.isel(x=np.array([0])) ) @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_isel_fancy(self) -> None: shape = (10, 7, 6) np_array = np.random.random(shape) da = DataArray( np_array, dims=["time", "y", "x"], coords={"time": np.arange(0, 100, 10)} ) y = [1, 3] x = [3, 0] expected = da.values[:, y, x] actual = da.isel(y=(("test_coord",), y), x=(("test_coord",), x)) assert actual.coords["test_coord"].shape == (len(y),) assert list(actual.coords) == ["time"] assert actual.dims == ("time", "test_coord") np.testing.assert_equal(actual, expected) # a few corner cases da.isel( time=(("points",), [1, 2]), x=(("points",), [2, 2]), y=(("points",), [3, 4]) ) np.testing.assert_allclose( da.isel( time=(("p",), [1]), x=(("p",), [2]), y=(("p",), [4]) ).values.squeeze(), np_array[1, 4, 2].squeeze(), ) da.isel(time=(("points",), [1, 2])) y = [-1, 0] x = [-2, 2] expected2 = da.values[:, y, x] actual2 = da.isel(x=(("points",), x), y=(("points",), y)).values np.testing.assert_equal(actual2, expected2) # test that the order of the indexers doesn't matter assert_identical( da.isel(y=(("points",), y), x=(("points",), x)), da.isel(x=(("points",), x), y=(("points",), y)), ) # make sure we're raising errors in the right places with pytest.raises(IndexError, match=r"Dimensions of indexers mismatch"): da.isel(y=(("points",), [1, 2]), x=(("points",), [1, 2, 3])) # tests using index or DataArray as indexers stations = Dataset() stations["station"] = (("station",), ["A", "B", "C"]) stations["dim1s"] = (("station",), [1, 2, 3]) stations["dim2s"] = (("station",), [4, 5, 1]) actual3 = da.isel(x=stations["dim1s"], y=stations["dim2s"]) assert "station" in actual3.coords assert "station" in actual3.dims assert_identical(actual3["station"], stations["station"]) with pytest.raises(ValueError, match=r"conflicting values/indexes on "): da.isel( x=DataArray([0, 1, 2], dims="station", coords={"station": [0, 1, 2]}), y=DataArray([0, 1, 2], dims="station", coords={"station": [0, 1, 3]}), ) # multi-dimensional selection stations = Dataset() stations["a"] = (("a",), ["A", "B", "C"]) stations["b"] = (("b",), [0, 1]) stations["dim1s"] = (("a", "b"), [[1, 2], [2, 3], [3, 4]]) stations["dim2s"] = (("a",), [4, 5, 1]) actual4 = da.isel(x=stations["dim1s"], y=stations["dim2s"]) assert "a" in actual4.coords assert "a" in actual4.dims assert "b" in actual4.coords assert "b" in actual4.dims assert_identical(actual4["a"], stations["a"]) assert_identical(actual4["b"], stations["b"]) expected4 = da.variable[ :, stations["dim2s"].variable, stations["dim1s"].variable ] assert_array_equal(actual4, expected4) def test_sel(self) -> None: self.ds["x"] = ("x", np.array(list("abcdefghij"))) da = self.ds["foo"] assert_identical(da, da.sel(x=slice(None))) assert_identical(da[1], da.sel(x="b")) assert_identical(da[:3], da.sel(x=slice("c"))) assert_identical(da[:3], da.sel(x=["a", "b", "c"])) assert_identical(da[:, :4], da.sel(y=(self.ds["y"] < 4))) # verify that indexing with a dataarray works b = DataArray("b") assert_identical(da[1], da.sel(x=b)) assert_identical(da[[1]], da.sel(x=slice(b, b))) def test_sel_dataarray(self) -> None: # indexing with DataArray self.ds["x"] = ("x", np.array(list("abcdefghij"))) da = self.ds["foo"] ind = DataArray(["a", "b", "c"], dims=["x"]) actual = da.sel(x=ind) assert_identical(actual, da.isel(x=[0, 1, 2])) # along new dimension ind = DataArray(["a", "b", "c"], dims=["new_dim"]) actual = da.sel(x=ind) assert_array_equal(actual, da.isel(x=[0, 1, 2])) assert "new_dim" in actual.dims # with coordinate ind = DataArray( ["a", "b", "c"], dims=["new_dim"], coords={"new_dim": [0, 1, 2]} ) actual = da.sel(x=ind) assert_array_equal(actual, da.isel(x=[0, 1, 2])) assert "new_dim" in actual.dims assert "new_dim" in actual.coords assert_equal(actual["new_dim"].drop_vars("x"), ind["new_dim"]) def test_sel_invalid_slice(self) -> None: array = DataArray(np.arange(10), [("x", np.arange(10))]) with pytest.raises(ValueError, match=r"cannot use non-scalar arrays"): array.sel(x=slice(array.x)) def test_sel_dataarray_datetime_slice(self) -> None: # regression test for GH1240 times = pd.date_range("2000-01-01", freq="D", periods=365) array = DataArray(np.arange(365), [("time", times)]) result = array.sel(time=slice(array.time[0], array.time[-1])) assert_equal(result, array) array = DataArray(np.arange(365), [("delta", times - times[0])]) result = array.sel(delta=slice(array.delta[0], array.delta[-1])) assert_equal(result, array) @pytest.mark.parametrize( ["coord_values", "indices"], ( pytest.param( np.array([0.0, 0.111, 0.222, 0.333], dtype="float64"), slice(1, 3), id="float64", ), pytest.param( np.array([0.0, 0.111, 0.222, 0.333], dtype="float32"), slice(1, 3), id="float32", ), pytest.param( np.array([0.0, 0.111, 0.222, 0.333], dtype="float32"), [2], id="scalar" ), ), ) def test_sel_float(self, coord_values, indices) -> None: data_values = np.arange(4) arr = DataArray(data_values, coords={"x": coord_values}, dims="x") actual = arr.sel(x=coord_values[indices]) expected = DataArray( data_values[indices], coords={"x": coord_values[indices]}, dims="x" ) assert_equal(actual, expected) def test_sel_float16(self) -> None: data_values = np.arange(4) coord_values = np.array([0.0, 0.111, 0.222, 0.333], dtype="float16") indices = slice(1, 3) message = "`pandas.Index` does not support the `float16` dtype.*" with pytest.warns(DeprecationWarning, match=message): arr = DataArray(data_values, coords={"x": coord_values}, dims="x") with pytest.warns(DeprecationWarning, match=message): expected = DataArray( data_values[indices], coords={"x": coord_values[indices]}, dims="x" ) actual = arr.sel(x=coord_values[indices]) assert_equal(actual, expected) def test_sel_float_multiindex(self) -> None: # regression test https://github.com/pydata/xarray/issues/5691 # test multi-index created from coordinates, one with dtype=float32 lvl1 = ["a", "a", "b", "b"] lvl2 = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32) da = xr.DataArray( [1, 2, 3, 4], dims="x", coords={"lvl1": ("x", lvl1), "lvl2": ("x", lvl2)} ) da = da.set_index(x=["lvl1", "lvl2"]) actual = da.sel(lvl1="a", lvl2=0.1) expected = da.isel(x=0) assert_equal(actual, expected) def test_sel_no_index(self) -> None: array = DataArray(np.arange(10), dims="x") assert_identical(array[0], array.sel(x=0)) assert_identical(array[:5], array.sel(x=slice(5))) assert_identical(array[[0, -1]], array.sel(x=[0, -1])) assert_identical(array[array < 5], array.sel(x=(array < 5))) def test_sel_method(self) -> None: data = DataArray(np.random.randn(3, 4), [("x", [0, 1, 2]), ("y", list("abcd"))]) with pytest.raises(KeyError, match="Try setting the `method`"): data.sel(y="ab") expected = data.sel(y=["a", "b"]) actual = data.sel(y=["ab", "ba"], method="pad") assert_identical(expected, actual) expected = data.sel(x=[1, 2]) actual = data.sel(x=[0.9, 1.9], method="backfill", tolerance=1) assert_identical(expected, actual) def test_sel_drop(self) -> None: data = DataArray([1, 2, 3], [("x", [0, 1, 2])]) expected = DataArray(1) selected = data.sel(x=0, drop=True) assert_identical(expected, selected) expected = DataArray(1, {"x": 0}) selected = data.sel(x=0, drop=False) assert_identical(expected, selected) data = DataArray([1, 2, 3], dims=["x"]) expected = DataArray(1) selected = data.sel(x=0, drop=True) assert_identical(expected, selected) def test_isel_drop(self) -> None: data = DataArray([1, 2, 3], [("x", [0, 1, 2])]) expected = DataArray(1) selected = data.isel(x=0, drop=True) assert_identical(expected, selected) expected = DataArray(1, {"x": 0}) selected = data.isel(x=0, drop=False) assert_identical(expected, selected) def test_head(self) -> None: assert_equal(self.dv.isel(x=slice(5)), self.dv.head(x=5)) assert_equal(self.dv.isel(x=slice(0)), self.dv.head(x=0)) assert_equal( self.dv.isel({dim: slice(6) for dim in self.dv.dims}), self.dv.head(6) ) assert_equal( self.dv.isel({dim: slice(5) for dim in self.dv.dims}), self.dv.head() ) with pytest.raises(TypeError, match=r"either dict-like or a single int"): self.dv.head([3]) with pytest.raises(TypeError, match=r"expected integer type"): self.dv.head(x=3.1) with pytest.raises(ValueError, match=r"expected positive int"): self.dv.head(-3) def test_tail(self) -> None: assert_equal(self.dv.isel(x=slice(-5, None)), self.dv.tail(x=5)) assert_equal(self.dv.isel(x=slice(0)), self.dv.tail(x=0)) assert_equal( self.dv.isel({dim: slice(-6, None) for dim in self.dv.dims}), self.dv.tail(6), ) assert_equal( self.dv.isel({dim: slice(-5, None) for dim in self.dv.dims}), self.dv.tail() ) with pytest.raises(TypeError, match=r"either dict-like or a single int"): self.dv.tail([3]) with pytest.raises(TypeError, match=r"expected integer type"): self.dv.tail(x=3.1) with pytest.raises(ValueError, match=r"expected positive int"): self.dv.tail(-3) def test_thin(self) -> None: assert_equal(self.dv.isel(x=slice(None, None, 5)), self.dv.thin(x=5)) assert_equal( self.dv.isel({dim: slice(None, None, 6) for dim in self.dv.dims}), self.dv.thin(6), ) with pytest.raises(TypeError, match=r"either dict-like or a single int"): self.dv.thin([3]) with pytest.raises(TypeError, match=r"expected integer type"): self.dv.thin(x=3.1) with pytest.raises(ValueError, match=r"expected positive int"): self.dv.thin(-3) with pytest.raises(ValueError, match=r"cannot be zero"): self.dv.thin(time=0) def test_loc(self) -> None: self.ds["x"] = ("x", np.array(list("abcdefghij"))) da = self.ds["foo"] # typing issue: see https://github.com/python/mypy/issues/2410 assert_identical(da[:3], da.loc[:"c"]) # type: ignore[misc] assert_identical(da[1], da.loc["b"]) assert_identical(da[1], da.loc[{"x": "b"}]) assert_identical(da[1], da.loc["b", ...]) assert_identical(da[:3], da.loc[["a", "b", "c"]]) assert_identical(da[:3, :4], da.loc[["a", "b", "c"], np.arange(4)]) assert_identical(da[:, :4], da.loc[:, self.ds["y"] < 4]) def test_loc_datetime64_value(self) -> None: # regression test for https://github.com/pydata/xarray/issues/4283 t = np.array(["2017-09-05T12", "2017-09-05T15"], dtype="datetime64[ns]") array = DataArray(np.ones(t.shape), dims=("time",), coords=(t,)) assert_identical(array.loc[{"time": t[0]}], array[0]) def test_loc_assign(self) -> None: self.ds["x"] = ("x", np.array(list("abcdefghij"))) da = self.ds["foo"] # assignment # typing issue: see https://github.com/python/mypy/issues/2410 da.loc["a":"j"] = 0 # type: ignore[misc] assert np.all(da.values == 0) da.loc[{"x": slice("a", "j")}] = 2 assert np.all(da.values == 2) da.loc[{"x": slice("a", "j")}] = 2 assert np.all(da.values == 2) # Multi dimensional case da = DataArray(np.arange(12).reshape(3, 4), dims=["x", "y"]) da.loc[0, 0] = 0 assert da.values[0, 0] == 0 assert da.values[0, 1] != 0 da = DataArray(np.arange(12).reshape(3, 4), dims=["x", "y"]) da.loc[0] = 0 assert np.all(da.values[0] == np.zeros(4)) assert da.values[1, 0] != 0 def test_loc_assign_dataarray(self) -> None: def get_data(): return DataArray( np.ones((4, 3, 2)), dims=["x", "y", "z"], coords={ "x": np.arange(4), "y": ["a", "b", "c"], "non-dim": ("x", [1, 3, 4, 2]), }, ) da = get_data() # indexer with inconsistent coordinates. ind = DataArray(np.arange(1, 4), dims=["y"], coords={"y": np.random.randn(3)}) with pytest.raises(IndexError, match=r"dimension coordinate 'y'"): da.loc[dict(x=ind)] = 0 # indexer with consistent coordinates. ind = DataArray(np.arange(1, 4), dims=["x"], coords={"x": np.arange(1, 4)}) da.loc[dict(x=ind)] = 0 # should not raise assert np.allclose(da[dict(x=ind)].values, 0) assert_identical(da["x"], get_data()["x"]) assert_identical(da["non-dim"], get_data()["non-dim"]) da = get_data() # conflict in the assigning values value = xr.DataArray( np.zeros((3, 3, 2)), dims=["x", "y", "z"], coords={"x": [0, 1, 2], "non-dim": ("x", [0, 2, 4])}, ) with pytest.raises(IndexError, match=r"dimension coordinate 'x'"): da.loc[dict(x=ind)] = value # consistent coordinate in the assigning values value = xr.DataArray( np.zeros((3, 3, 2)), dims=["x", "y", "z"], coords={"x": [1, 2, 3], "non-dim": ("x", [0, 2, 4])}, ) da.loc[dict(x=ind)] = value assert np.allclose(da[dict(x=ind)].values, 0) assert_identical(da["x"], get_data()["x"]) assert_identical(da["non-dim"], get_data()["non-dim"]) def test_loc_single_boolean(self) -> None: data = DataArray([0, 1], coords=[[True, False]]) assert data.loc[True] == 0 assert data.loc[False] == 1 def test_loc_dim_name_collision_with_sel_params(self) -> None: da = xr.DataArray( [[0, 0], [1, 1]], dims=["dim1", "method"], coords={"dim1": ["x", "y"], "method": ["a", "b"]}, ) np.testing.assert_array_equal( da.loc[dict(dim1=["x", "y"], method=["a"])], [[0], [1]] ) def test_selection_multiindex(self) -> None: mindex = pd.MultiIndex.from_product( [["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three") ) mdata = DataArray(range(8), [("x", mindex)]) def test_sel( lab_indexer, pos_indexer, replaced_idx=False, renamed_dim=None ) -> None: da = mdata.sel(x=lab_indexer) expected_da = mdata.isel(x=pos_indexer) if not replaced_idx: assert_identical(da, expected_da) else: if renamed_dim: assert da.dims[0] == renamed_dim da = da.rename({renamed_dim: "x"}) assert_identical(da.variable, expected_da.variable) assert not da["x"].equals(expected_da["x"]) test_sel(("a", 1, -1), 0) test_sel(("b", 2, -2), -1) test_sel(("a", 1), [0, 1], replaced_idx=True, renamed_dim="three") test_sel(("a",), range(4), replaced_idx=True) test_sel("a", range(4), replaced_idx=True) test_sel([("a", 1, -1), ("b", 2, -2)], [0, 7]) test_sel(slice("a", "b"), range(8)) test_sel(slice(("a", 1), ("b", 1)), range(6)) test_sel({"one": "a", "two": 1, "three": -1}, 0) test_sel({"one": "a", "two": 1}, [0, 1], replaced_idx=True, renamed_dim="three") test_sel({"one": "a"}, range(4), replaced_idx=True) assert_identical(mdata.loc["a"], mdata.sel(x="a")) assert_identical(mdata.loc[("a", 1), ...], mdata.sel(x=("a", 1))) assert_identical(mdata.loc[{"one": "a"}, ...], mdata.sel(x={"one": "a"})) with pytest.raises(IndexError): mdata.loc[("a", 1)] assert_identical(mdata.sel(x={"one": "a", "two": 1}), mdata.sel(one="a", two=1)) def test_selection_multiindex_remove_unused(self) -> None: # GH2619. For MultiIndex, we need to call remove_unused. ds = xr.DataArray( np.arange(40).reshape(8, 5), dims=["x", "y"], coords={"x": np.arange(8), "y": np.arange(5)}, ) ds = ds.stack(xy=["x", "y"]) ds_isel = ds.isel(xy=ds["x"] < 4) with pytest.raises(KeyError): ds_isel.sel(x=5) actual = ds_isel.unstack() expected = ds.reset_index("xy").isel(xy=ds["x"] < 4) expected = expected.set_index(xy=["x", "y"]).unstack() assert_identical(expected, actual) def test_selection_multiindex_from_level(self) -> None: # GH: 3512 da = DataArray([0, 1], dims=["x"], coords={"x": [0, 1], "y": "a"}) db = DataArray([2, 3], dims=["x"], coords={"x": [0, 1], "y": "b"}) data = xr.concat( [da, db], dim="x", coords="different", compat="equals" ).set_index(xy=["x", "y"]) assert data.dims == ("xy",) actual = data.sel(y="a") expected = data.isel(xy=[0, 1]).unstack("xy").squeeze("y") assert_equal(actual, expected) def test_concat_with_default_coords_warns(self) -> None: da = DataArray([0, 1], dims=["x"], coords={"x": [0, 1], "y": "a"}) db = DataArray([2, 3], dims=["x"], coords={"x": [0, 1], "y": "b"}) with pytest.warns(FutureWarning): original = xr.concat([da, db], dim="x") assert original.y.size == 4 with set_options(use_new_combine_kwarg_defaults=True): # default compat="override" will pick the first one new = xr.concat([da, db], dim="x") assert new.y.size == 1 def test_virtual_default_coords(self) -> None: array = DataArray(np.zeros((5,)), dims="x") expected = DataArray(range(5), dims="x", name="x") assert_identical(expected, array["x"]) assert_identical(expected, array.coords["x"]) def test_virtual_time_components(self) -> None: dates = pd.date_range("2000-01-01", periods=10) da = DataArray(np.arange(1, 11), [("time", dates)]) assert_array_equal(da["time.dayofyear"], da.values) assert_array_equal(da.coords["time.dayofyear"], da.values) def test_coords(self) -> None: # use int64 to ensure repr() consistency on windows coords = [ IndexVariable("x", np.array([-1, -2], "int64")), IndexVariable("y", np.array([0, 1, 2], "int64")), ] da = DataArray(np.random.randn(2, 3), coords, name="foo") # len assert len(da.coords) == 2 # iter assert list(da.coords) == ["x", "y"] assert coords[0].identical(da.coords["x"]) assert coords[1].identical(da.coords["y"]) assert "x" in da.coords assert 0 not in da.coords assert "foo" not in da.coords with pytest.raises(KeyError): da.coords[0] with pytest.raises(KeyError): da.coords["foo"] # repr expected_repr = dedent( """\ Coordinates: * x (x) int64 16B -1 -2 * y (y) int64 24B 0 1 2""" ) actual = repr(da.coords) assert expected_repr == actual # dtypes assert da.coords.dtypes == {"x": np.dtype("int64"), "y": np.dtype("int64")} del da.coords["x"] da._indexes = filter_indexes_from_coords(da.xindexes, set(da.coords)) expected = DataArray(da.values, {"y": [0, 1, 2]}, dims=["x", "y"], name="foo") assert_identical(da, expected) with pytest.raises( ValueError, match=r"cannot drop or update coordinate.*corrupt.*index " ): self.mda["level_1"] = ("x", np.arange(4)) self.mda.coords["level_1"] = ("x", np.arange(4)) def test_coords_to_index(self) -> None: da = DataArray(np.zeros((2, 3)), [("x", [1, 2]), ("y", list("abc"))]) with pytest.raises(ValueError, match=r"no valid index"): da[0, 0].coords.to_index() expected = pd.Index(["a", "b", "c"], name="y") actual = da[0].coords.to_index() assert expected.equals(actual) expected = pd.MultiIndex.from_product( [[1, 2], ["a", "b", "c"]], names=["x", "y"] ) actual = da.coords.to_index() assert expected.equals(actual) expected = pd.MultiIndex.from_product( [["a", "b", "c"], [1, 2]], names=["y", "x"] ) actual = da.coords.to_index(["y", "x"]) assert expected.equals(actual) with pytest.raises(ValueError, match=r"ordered_dims must match"): da.coords.to_index(["x"]) def test_coord_coords(self) -> None: orig = DataArray( [10, 20], {"x": [1, 2], "x2": ("x", ["a", "b"]), "z": 4}, dims="x" ) actual = orig.coords["x"] expected = DataArray( [1, 2], {"z": 4, "x2": ("x", ["a", "b"]), "x": [1, 2]}, dims="x", name="x" ) assert_identical(expected, actual) del actual.coords["x2"] assert_identical(expected.reset_coords("x2", drop=True), actual) actual.coords["x3"] = ("x", ["a", "b"]) expected = DataArray( [1, 2], {"z": 4, "x3": ("x", ["a", "b"]), "x": [1, 2]}, dims="x", name="x" ) assert_identical(expected, actual) def test_reset_coords(self) -> None: data = DataArray( np.zeros((3, 4)), {"bar": ("x", ["a", "b", "c"]), "baz": ("y", range(4)), "y": range(4)}, dims=["x", "y"], name="foo", ) actual1 = data.reset_coords() expected1 = Dataset( { "foo": (["x", "y"], np.zeros((3, 4))), "bar": ("x", ["a", "b", "c"]), "baz": ("y", range(4)), "y": range(4), } ) assert_identical(actual1, expected1) actual2 = data.reset_coords(["bar", "baz"]) assert_identical(actual2, expected1) actual3 = data.reset_coords("bar") expected3 = Dataset( {"foo": (["x", "y"], np.zeros((3, 4))), "bar": ("x", ["a", "b", "c"])}, {"baz": ("y", range(4)), "y": range(4)}, ) assert_identical(actual3, expected3) actual4 = data.reset_coords(["bar"]) assert_identical(actual4, expected3) actual5 = data.reset_coords(drop=True) expected5 = DataArray( np.zeros((3, 4)), coords={"y": range(4)}, dims=["x", "y"], name="foo" ) assert_identical(actual5, expected5) actual6 = data.copy().reset_coords(drop=True) assert_identical(actual6, expected5) actual7 = data.reset_coords("bar", drop=True) expected7 = DataArray( np.zeros((3, 4)), {"baz": ("y", range(4)), "y": range(4)}, dims=["x", "y"], name="foo", ) assert_identical(actual7, expected7) with pytest.raises(ValueError, match=r"cannot be found"): data.reset_coords("foo", drop=True) with pytest.raises(ValueError, match=r"cannot be found"): data.reset_coords("not_found") with pytest.raises(ValueError, match=r"cannot remove index"): data.reset_coords("y") # non-dimension index coordinate midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=("lvl1", "lvl2")) data = DataArray([1, 2, 3, 4], coords={"x": midx}, dims="x", name="foo") with pytest.raises(ValueError, match=r"cannot remove index"): data.reset_coords("lvl1") def test_assign_coords(self) -> None: array = DataArray(10) actual = array.assign_coords(c=42) expected = DataArray(10, {"c": 42}) assert_identical(actual, expected) with pytest.raises( ValueError, match=r"cannot drop or update coordinate.*corrupt.*index " ): self.mda.assign_coords(level_1=("x", range(4))) # GH: 2112 da = xr.DataArray([0, 1, 2], dims="x") with pytest.raises(CoordinateValidationError): da["x"] = [0, 1, 2, 3] # size conflict with pytest.raises(CoordinateValidationError): da.coords["x"] = [0, 1, 2, 3] # size conflict with pytest.raises(CoordinateValidationError): da.coords["x"] = ("y", [1, 2, 3]) # no new dimension to a DataArray def test_assign_coords_existing_multiindex(self) -> None: data = self.mda with pytest.warns( FutureWarning, match=r"updating coordinate.*MultiIndex.*inconsistent" ): data.assign_coords(x=range(4)) def test_assign_coords_custom_index(self) -> None: class CustomIndex(Index): pass coords = Coordinates( coords={"x": ("x", [1, 2, 3])}, indexes={"x": CustomIndex()} ) da = xr.DataArray([0, 1, 2], dims="x") actual = da.assign_coords(coords) assert isinstance(actual.xindexes["x"], CustomIndex) def test_assign_coords_no_default_index(self) -> None: coords = Coordinates({"y": [1, 2, 3]}, indexes={}) da = DataArray([1, 2, 3], dims="y") actual = da.assign_coords(coords) assert_identical(actual.coords, coords, check_default_indexes=False) assert "y" not in actual.xindexes def test_assign_coords_extra_dim_index_coord(self) -> None: class AnyIndex(Index): def should_add_coord_to_array(self, name, var, dims): return True idx = AnyIndex() coords = Coordinates( coords={ "x": ("x", [1, 2]), "x_bounds": (("x", "x_bnds"), [(0.5, 1.5), (1.5, 2.5)]), }, indexes={"x": idx, "x_bounds": idx}, ) da = DataArray([1.0, 2.0], dims="x") actual = da.assign_coords(coords) expected = DataArray([1.0, 2.0], coords=coords, dims="x") assert_identical(actual, expected, check_default_indexes=False) assert "x_bnds" not in actual.dims def test_coords_alignment(self) -> None: lhs = DataArray([1, 2, 3], [("x", [0, 1, 2])]) rhs = DataArray([2, 3, 4], [("x", [1, 2, 3])]) lhs.coords["rhs"] = rhs expected = DataArray( [1, 2, 3], coords={"rhs": ("x", [np.nan, 2, 3]), "x": [0, 1, 2]}, dims="x" ) assert_identical(lhs, expected) def test_set_coords_update_index(self) -> None: actual = DataArray([1, 2, 3], [("x", [1, 2, 3])]) actual.coords["x"] = ["a", "b", "c"] assert actual.xindexes["x"].to_pandas_index().equals(pd.Index(["a", "b", "c"])) def test_set_coords_multiindex_level(self) -> None: with pytest.raises( ValueError, match=r"cannot drop or update coordinate.*corrupt.*index " ): self.mda["level_1"] = range(4) def test_coords_replacement_alignment(self) -> None: # regression test for GH725 arr = DataArray([0, 1, 2], dims=["abc"]) new_coord = DataArray([1, 2, 3], dims=["abc"], coords=[[1, 2, 3]]) arr["abc"] = new_coord expected = DataArray([0, 1, 2], coords=[("abc", [1, 2, 3])]) assert_identical(arr, expected) def test_coords_non_string(self) -> None: arr = DataArray(0, coords={1: 2}) actual = arr.coords[1] expected = DataArray(2, coords={1: 2}, name=1) assert_identical(actual, expected) def test_coords_delitem_delete_indexes(self) -> None: # regression test for GH3746 arr = DataArray(np.ones((2,)), dims="x", coords={"x": [0, 1]}) del arr.coords["x"] assert "x" not in arr.xindexes def test_coords_delitem_multiindex_level(self) -> None: with pytest.raises( ValueError, match=r"cannot remove coordinate.*corrupt.*index " ): del self.mda.coords["level_1"] def test_broadcast_like(self) -> None: arr1 = DataArray( np.ones((2, 3)), dims=["x", "y"], coords={"x": ["a", "b"], "y": ["a", "b", "c"]}, ) arr2 = DataArray( np.ones((3, 2)), dims=["x", "y"], coords={"x": ["a", "b", "c"], "y": ["a", "b"]}, ) orig1, orig2 = broadcast(arr1, arr2) new1 = arr1.broadcast_like(arr2) new2 = arr2.broadcast_like(arr1) assert_identical(orig1, new1) assert_identical(orig2, new2) orig3 = DataArray(np.random.randn(5), [("x", range(5))]) orig4 = DataArray(np.random.randn(6), [("y", range(6))]) new3, new4 = broadcast(orig3, orig4) assert_identical(orig3.broadcast_like(orig4), new3.transpose("y", "x")) assert_identical(orig4.broadcast_like(orig3), new4) def test_reindex_like(self) -> None: foo = DataArray(np.random.randn(5, 6), [("x", range(5)), ("y", range(6))]) bar = foo[:2, :2] assert_identical(foo.reindex_like(bar), bar) expected = foo.copy() expected[:] = np.nan expected[:2, :2] = bar assert_identical(bar.reindex_like(foo), expected) def test_reindex_like_no_index(self) -> None: foo = DataArray(np.random.randn(5, 6), dims=["x", "y"]) assert_identical(foo, foo.reindex_like(foo)) bar = foo[:4] with pytest.raises(ValueError, match=r"different size for unlabeled"): foo.reindex_like(bar) def test_reindex_regressions(self) -> None: da = DataArray(np.random.randn(5), coords=[("time", range(5))]) time2 = DataArray(np.arange(5), dims="time2") with pytest.raises(ValueError): da.reindex(time=time2) # regression test for #736, reindex can not change complex nums dtype xnp = np.array([1, 2, 3], dtype=complex) x = DataArray(xnp, coords=[[0.1, 0.2, 0.3]]) y = DataArray([2, 5, 6, 7, 8], coords=[[-1.1, 0.21, 0.31, 0.41, 0.51]]) re_dtype = x.reindex_like(y, method="pad").dtype assert x.dtype == re_dtype def test_reindex_method(self) -> None: x = DataArray([10, 20], dims="y", coords={"y": [0, 1]}) y = [-0.1, 0.5, 1.1] actual = x.reindex(y=y, method="backfill", tolerance=0.2) expected = DataArray([10, np.nan, np.nan], coords=[("y", y)]) assert_identical(expected, actual) actual = x.reindex(y=y, method="backfill", tolerance=[0.1, 0.1, 0.01]) expected = DataArray([10, np.nan, np.nan], coords=[("y", y)]) assert_identical(expected, actual) alt = Dataset({"y": y}) actual = x.reindex_like(alt, method="backfill") expected = DataArray([10, 20, np.nan], coords=[("y", y)]) assert_identical(expected, actual) @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {None: 2, "u": 1}]) def test_reindex_fill_value(self, fill_value) -> None: x = DataArray([10, 20], dims="y", coords={"y": [0, 1], "u": ("y", [1, 2])}) y = [0, 1, 2] if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value_var = fill_value_u = np.nan elif isinstance(fill_value, dict): fill_value_var = fill_value[None] fill_value_u = fill_value["u"] else: fill_value_var = fill_value_u = fill_value actual = x.reindex(y=y, fill_value=fill_value) expected = DataArray( [10, 20, fill_value_var], dims="y", coords={"y": y, "u": ("y", [1, 2, fill_value_u])}, ) assert_identical(expected, actual) @pytest.mark.parametrize("dtype", [str, bytes]) def test_reindex_str_dtype(self, dtype) -> None: data = DataArray( [1, 2], dims="x", coords={"x": np.array(["a", "b"], dtype=dtype)} ) actual = data.reindex(x=data.x) expected = data assert_identical(expected, actual) assert actual.dtype == expected.dtype def test_reindex_empty_array_dtype(self) -> None: # Dtype of reindex result should match dtype of the original DataArray. # See GH issue #7299 x = xr.DataArray([], dims=("x",), coords={"x": []}).astype("float32") y = x.reindex(x=[1.0, 2.0]) assert x.dtype == y.dtype, ( "Dtype of reindexed DataArray should match dtype of the original DataArray" ) assert y.dtype == np.float32, ( "Dtype of reindexed DataArray should remain float32" ) def test_rename(self) -> None: da = xr.DataArray( [1, 2, 3], dims="dim", name="name", coords={"coord": ("dim", [5, 6, 7])} ) # change name renamed_name = da.rename("name_new") assert renamed_name.name == "name_new" expected_name = da.copy() expected_name.name = "name_new" assert_identical(renamed_name, expected_name) # change name to None? renamed_noname = da.rename(None) assert renamed_noname.name is None expected_noname = da.copy() expected_noname.name = None assert_identical(renamed_noname, expected_noname) renamed_noname = da.rename() assert renamed_noname.name is None assert_identical(renamed_noname, expected_noname) # change dim renamed_dim = da.rename({"dim": "dim_new"}) assert renamed_dim.dims == ("dim_new",) expected_dim = xr.DataArray( [1, 2, 3], dims="dim_new", name="name", coords={"coord": ("dim_new", [5, 6, 7])}, ) assert_identical(renamed_dim, expected_dim) # change dim with kwargs renamed_dimkw = da.rename(dim="dim_new") assert renamed_dimkw.dims == ("dim_new",) assert_identical(renamed_dimkw, expected_dim) # change coords renamed_coord = da.rename({"coord": "coord_new"}) assert "coord_new" in renamed_coord.coords expected_coord = xr.DataArray( [1, 2, 3], dims="dim", name="name", coords={"coord_new": ("dim", [5, 6, 7])} ) assert_identical(renamed_coord, expected_coord) # change coords with kwargs renamed_coordkw = da.rename(coord="coord_new") assert "coord_new" in renamed_coordkw.coords assert_identical(renamed_coordkw, expected_coord) # change coord and dim renamed_both = da.rename({"dim": "dim_new", "coord": "coord_new"}) assert renamed_both.dims == ("dim_new",) assert "coord_new" in renamed_both.coords expected_both = xr.DataArray( [1, 2, 3], dims="dim_new", name="name", coords={"coord_new": ("dim_new", [5, 6, 7])}, ) assert_identical(renamed_both, expected_both) # change coord and dim with kwargs renamed_bothkw = da.rename(dim="dim_new", coord="coord_new") assert renamed_bothkw.dims == ("dim_new",) assert "coord_new" in renamed_bothkw.coords assert_identical(renamed_bothkw, expected_both) # change all renamed_all = da.rename("name_new", dim="dim_new", coord="coord_new") assert renamed_all.name == "name_new" assert renamed_all.dims == ("dim_new",) assert "coord_new" in renamed_all.coords expected_all = xr.DataArray( [1, 2, 3], dims="dim_new", name="name_new", coords={"coord_new": ("dim_new", [5, 6, 7])}, ) assert_identical(renamed_all, expected_all) def test_rename_dimension_coord_warnings(self) -> None: # create a dimension coordinate by renaming a dimension or coordinate # should raise a warning (no index created) da = DataArray([0, 0], coords={"x": ("y", [0, 1])}, dims="y") with pytest.warns( UserWarning, match="rename 'x' to 'y' does not create an index.*" ): da.rename(x="y") da = xr.DataArray([0, 0], coords={"y": ("x", [0, 1])}, dims="x") with pytest.warns( UserWarning, match="rename 'x' to 'y' does not create an index.*" ): da.rename(x="y") # No operation should not raise a warning da = xr.DataArray( data=np.ones((2, 3)), dims=["x", "y"], coords={"x": range(2), "y": range(3), "a": ("x", [3, 4])}, ) with warnings.catch_warnings(): warnings.simplefilter("error") da.rename(x="x") def test_replace(self) -> None: # Tests the `attrs` replacement and whether it interferes with a # `variable` replacement da = self.mda attrs1 = {"a1": "val1", "a2": 161} x = np.ones((10, 20)) v = Variable(["x", "y"], x) assert da._replace(variable=v, attrs=attrs1).attrs == attrs1 attrs2 = {"b1": "val2", "b2": 1312} va = Variable(["x", "y"], x, attrs2) # assuming passed `attrs` should prevail assert da._replace(variable=va, attrs=attrs1).attrs == attrs1 # assuming `va.attrs` should be adopted assert da._replace(variable=va).attrs == attrs2 def test_init_value(self) -> None: expected = DataArray( np.full((3, 4), 3), dims=["x", "y"], coords=[range(3), range(4)] ) actual = DataArray(3, dims=["x", "y"], coords=[range(3), range(4)]) assert_identical(expected, actual) expected = DataArray( np.full((1, 10, 2), 0), dims=["w", "x", "y"], coords={"x": np.arange(10), "y": ["north", "south"]}, ) actual = DataArray(0, dims=expected.dims, coords=expected.coords) assert_identical(expected, actual) expected = DataArray( np.full((10, 2), np.nan), coords=[("x", np.arange(10)), ("y", ["a", "b"])] ) actual = DataArray(coords=[("x", np.arange(10)), ("y", ["a", "b"])]) assert_identical(expected, actual) with pytest.raises(ValueError, match=r"different number of dim"): DataArray(np.array(1), coords={"x": np.arange(10)}, dims=["x"]) with pytest.raises(ValueError, match=r"does not match the 0 dim"): DataArray(np.array(1), coords=[("x", np.arange(10))]) def test_swap_dims(self) -> None: array = DataArray(np.random.randn(3), {"x": list("abc")}, "x") expected = DataArray(array.values, {"x": ("y", list("abc"))}, dims="y") actual = array.swap_dims({"x": "y"}) assert_identical(expected, actual) for dim_name in set().union(expected.xindexes.keys(), actual.xindexes.keys()): assert actual.xindexes[dim_name].equals(expected.xindexes[dim_name]) # as kwargs array = DataArray(np.random.randn(3), {"x": list("abc")}, "x") expected = DataArray(array.values, {"x": ("y", list("abc"))}, dims="y") actual = array.swap_dims(x="y") assert_identical(expected, actual) for dim_name in set().union(expected.xindexes.keys(), actual.xindexes.keys()): assert actual.xindexes[dim_name].equals(expected.xindexes[dim_name]) # multiindex case idx = pd.MultiIndex.from_arrays([list("aab"), list("yzz")], names=["y1", "y2"]) array = DataArray(np.random.randn(3), {"y": ("x", idx)}, "x") expected = DataArray(array.values, {"y": idx}, "y") actual = array.swap_dims({"x": "y"}) assert_identical(expected, actual) for dim_name in set().union(expected.xindexes.keys(), actual.xindexes.keys()): assert actual.xindexes[dim_name].equals(expected.xindexes[dim_name]) def test_expand_dims_error(self) -> None: array = DataArray( np.random.randn(3, 4), dims=["x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3)}, attrs={"key": "entry"}, ) with pytest.raises(TypeError, match=r"dim should be Hashable or"): array.expand_dims(0) with pytest.raises(ValueError, match=r"lengths of dim and axis"): # dims and axis argument should be the same length array.expand_dims(dim=["a", "b"], axis=[1, 2, 3]) with pytest.raises(ValueError, match=r"Dimension x already"): # Should not pass the already existing dimension. array.expand_dims(dim=["x"]) # raise if duplicate with pytest.raises(ValueError, match=r"duplicate values"): array.expand_dims(dim=["y", "y"]) with pytest.raises(ValueError, match=r"duplicate values"): array.expand_dims(dim=["y", "z"], axis=[1, 1]) with pytest.raises(ValueError, match=r"duplicate values"): array.expand_dims(dim=["y", "z"], axis=[2, -2]) # out of bounds error, axis must be in [-4, 3] with pytest.raises(IndexError): array.expand_dims(dim=["y", "z"], axis=[2, 4]) with pytest.raises(IndexError): array.expand_dims(dim=["y", "z"], axis=[2, -5]) # Does not raise an IndexError array.expand_dims(dim=["y", "z"], axis=[2, -4]) array.expand_dims(dim=["y", "z"], axis=[2, 3]) array = DataArray( np.random.randn(3, 4), dims=["x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3)}, attrs={"key": "entry"}, ) with pytest.raises(TypeError): array.expand_dims({"new_dim": 3.2}) # Attempt to use both dim and kwargs with pytest.raises(ValueError): array.expand_dims({"d": 4}, e=4) def test_expand_dims(self) -> None: array = DataArray( np.random.randn(3, 4), dims=["x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3)}, attrs={"key": "entry"}, ) # pass only dim label actual = array.expand_dims(dim="y") expected = DataArray( np.expand_dims(array.values, 0), dims=["y", "x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3)}, attrs={"key": "entry"}, ) assert_identical(expected, actual) roundtripped = actual.squeeze("y", drop=True) assert_identical(array, roundtripped) # pass multiple dims actual = array.expand_dims(dim=["y", "z"]) expected = DataArray( np.expand_dims(np.expand_dims(array.values, 0), 0), dims=["y", "z", "x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3)}, attrs={"key": "entry"}, ) assert_identical(expected, actual) roundtripped = actual.squeeze(["y", "z"], drop=True) assert_identical(array, roundtripped) # pass multiple dims and axis. Axis is out of order actual = array.expand_dims(dim=["z", "y"], axis=[2, 1]) expected = DataArray( np.expand_dims(np.expand_dims(array.values, 1), 2), dims=["x", "y", "z", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3)}, attrs={"key": "entry"}, ) assert_identical(expected, actual) # make sure the attrs are tracked assert actual.attrs["key"] == "entry" roundtripped = actual.squeeze(["z", "y"], drop=True) assert_identical(array, roundtripped) # Negative axis and they are out of order actual = array.expand_dims(dim=["y", "z"], axis=[-1, -2]) expected = DataArray( np.expand_dims(np.expand_dims(array.values, -1), -1), dims=["x", "dim_0", "z", "y"], coords={"x": np.linspace(0.0, 1.0, 3)}, attrs={"key": "entry"}, ) assert_identical(expected, actual) assert actual.attrs["key"] == "entry" roundtripped = actual.squeeze(["y", "z"], drop=True) assert_identical(array, roundtripped) def test_expand_dims_with_scalar_coordinate(self) -> None: array = DataArray( np.random.randn(3, 4), dims=["x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3), "z": 1.0}, attrs={"key": "entry"}, ) actual = array.expand_dims(dim="z") expected = DataArray( np.expand_dims(array.values, 0), dims=["z", "x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3), "z": np.ones(1)}, attrs={"key": "entry"}, ) assert_identical(expected, actual) roundtripped = actual.squeeze(["z"], drop=False) assert_identical(array, roundtripped) def test_expand_dims_with_greater_dim_size(self) -> None: array = DataArray( np.random.randn(3, 4), dims=["x", "dim_0"], coords={"x": np.linspace(0.0, 1.0, 3), "z": 1.0}, attrs={"key": "entry"}, ) actual = array.expand_dims({"y": 2, "z": 1, "dim_1": ["a", "b", "c"]}) expected_coords = { "y": [0, 1], "z": [1.0], "dim_1": ["a", "b", "c"], "x": np.linspace(0, 1, 3), "dim_0": range(4), } expected = DataArray( array.values * np.ones([2, 1, 3, 3, 4]), coords=expected_coords, dims=list(expected_coords.keys()), attrs={"key": "entry"}, ).drop_vars(["y", "dim_0"]) assert_identical(expected, actual) # Test with kwargs instead of passing dict to dim arg. other_way = array.expand_dims(dim_1=["a", "b", "c"]) other_way_expected = DataArray( array.values * np.ones([3, 3, 4]), coords={ "dim_1": ["a", "b", "c"], "x": np.linspace(0, 1, 3), "dim_0": range(4), "z": 1.0, }, dims=["dim_1", "x", "dim_0"], attrs={"key": "entry"}, ).drop_vars("dim_0") assert_identical(other_way_expected, other_way) def test_set_index(self) -> None: indexes = [self.mindex.get_level_values(n) for n in self.mindex.names] coords = {idx.name: ("x", idx) for idx in indexes} array = DataArray(self.mda.values, coords=coords, dims="x") expected = self.mda.copy() level_3 = ("x", [1, 2, 3, 4]) array["level_3"] = level_3 expected["level_3"] = level_3 obj = array.set_index(x=self.mindex.names) assert_identical(obj, expected) obj = obj.set_index(x="level_3", append=True) expected = array.set_index(x=["level_1", "level_2", "level_3"]) assert_identical(obj, expected) array = array.set_index(x=["level_1", "level_2", "level_3"]) assert_identical(array, expected) array2d = DataArray( np.random.rand(2, 2), coords={"x": ("x", [0, 1]), "level": ("y", [1, 2])}, dims=("x", "y"), ) with pytest.raises(ValueError, match=r"dimension mismatch"): array2d.set_index(x="level") # Issue 3176: Ensure clear error message on key error. with pytest.raises(ValueError, match=r".*variable\(s\) do not exist"): obj.set_index(x="level_4") def test_reset_index(self) -> None: indexes = [self.mindex.get_level_values(n) for n in self.mindex.names] coords = {idx.name: ("x", idx) for idx in indexes} expected = DataArray(self.mda.values, coords=coords, dims="x") obj = self.mda.reset_index("x") assert_identical(obj, expected, check_default_indexes=False) assert len(obj.xindexes) == 0 obj = self.mda.reset_index(self.mindex.names) assert_identical(obj, expected, check_default_indexes=False) assert len(obj.xindexes) == 0 obj = self.mda.reset_index(["x", "level_1"]) assert_identical(obj, expected, check_default_indexes=False) assert len(obj.xindexes) == 0 coords = { "x": ("x", self.mindex.droplevel("level_1")), "level_1": ("x", self.mindex.get_level_values("level_1")), } expected = DataArray(self.mda.values, coords=coords, dims="x") obj = self.mda.reset_index(["level_1"]) assert_identical(obj, expected, check_default_indexes=False) assert list(obj.xindexes) == ["x"] assert type(obj.xindexes["x"]) is PandasIndex expected = DataArray(self.mda.values, dims="x") obj = self.mda.reset_index("x", drop=True) assert_identical(obj, expected, check_default_indexes=False) array = self.mda.copy() array = array.reset_index(["x"], drop=True) assert_identical(array, expected, check_default_indexes=False) # single index array = DataArray([1, 2], coords={"x": ["a", "b"]}, dims="x") obj = array.reset_index("x") print(obj.x.variable) print(array.x.variable) assert_equal(obj.x.variable, array.x.variable.to_base_variable()) assert len(obj.xindexes) == 0 def test_reset_index_keep_attrs(self) -> None: coord_1 = DataArray([1, 2], dims=["coord_1"], attrs={"attrs": True}) da = DataArray([1, 0], [coord_1]) obj = da.reset_index("coord_1") assert obj.coord_1.attrs == da.coord_1.attrs assert len(obj.xindexes) == 0 def test_reorder_levels(self) -> None: midx = self.mindex.reorder_levels(["level_2", "level_1"]) expected = DataArray(self.mda.values, coords={"x": midx}, dims="x") obj = self.mda.reorder_levels(x=["level_2", "level_1"]) assert_identical(obj, expected) array = DataArray([1, 2], dims="x") with pytest.raises(KeyError): array.reorder_levels(x=["level_1", "level_2"]) array["x"] = [0, 1] with pytest.raises(ValueError, match=r"has no MultiIndex"): array.reorder_levels(x=["level_1", "level_2"]) def test_set_xindex(self) -> None: da = DataArray( [1, 2, 3, 4], coords={"foo": ("x", ["a", "a", "b", "b"])}, dims="x" ) class IndexWithOptions(Index): def __init__(self, opt): self.opt = opt @classmethod def from_variables(cls, variables, options): return cls(options["opt"]) indexed = da.set_xindex("foo", IndexWithOptions, opt=1) assert "foo" in indexed.xindexes assert indexed.xindexes["foo"].opt == 1 # type: ignore[attr-defined] def test_dataset_getitem(self) -> None: dv = self.ds["foo"] assert_identical(dv, self.dv) def test_array_interface(self) -> None: assert_array_equal(np.asarray(self.dv), self.x) # test patched in methods assert_array_equal(self.dv.astype(float), self.v.astype(float)) assert_array_equal(self.dv.argsort(), self.v.argsort()) assert_array_equal(self.dv.clip(2, 3), self.v.clip(2, 3)) # test ufuncs expected = deepcopy(self.ds) expected["foo"][:] = np.sin(self.x) assert_equal(expected["foo"], np.sin(self.dv)) assert_array_equal(self.dv, np.maximum(self.v, self.dv)) bar = Variable(["x", "y"], np.zeros((10, 20))) assert_equal(self.dv, np.maximum(self.dv, bar)) def test_astype_attrs(self) -> None: for v in [self.va.copy(), self.mda.copy(), self.ds.copy()]: v.attrs["foo"] = "bar" assert v.attrs == v.astype(float).attrs assert not v.astype(float, keep_attrs=False).attrs def test_astype_dtype(self) -> None: original = DataArray([-1, 1, 2, 3, 1000]) converted = original.astype(float) assert_array_equal(original, converted) assert np.issubdtype(original.dtype, np.integer) assert np.issubdtype(converted.dtype, np.floating) def test_astype_order(self) -> None: original = DataArray([[1, 2], [3, 4]]) converted = original.astype("d", order="F") assert_equal(original, converted) assert original.values.flags["C_CONTIGUOUS"] assert converted.values.flags["F_CONTIGUOUS"] def test_astype_subok(self) -> None: class NdArraySubclass(np.ndarray): pass original = DataArray(NdArraySubclass(np.arange(3))) converted_not_subok = original.astype("d", subok=False) converted_subok = original.astype("d", subok=True) if not isinstance(original.data, NdArraySubclass): pytest.xfail("DataArray cannot be backed yet by a subclasses of np.ndarray") assert isinstance(converted_not_subok.data, np.ndarray) assert not isinstance(converted_not_subok.data, NdArraySubclass) assert isinstance(converted_subok.data, NdArraySubclass) def test_is_null(self) -> None: x = np.random.default_rng(42).random((5, 6)) x[x < 0] = np.nan original = DataArray(x, [-np.arange(5), np.arange(6)], ["x", "y"]) expected = DataArray(pd.isnull(x), [-np.arange(5), np.arange(6)], ["x", "y"]) assert_identical(expected, original.isnull()) assert_identical(~expected, original.notnull()) def test_math(self) -> None: x = self.x v = self.v a = self.dv # variable math was already tested extensively, so let's just make sure # that all types are properly converted here assert_equal(a, +a) assert_equal(a, a + 0) assert_equal(a, 0 + a) assert_equal(a, a + 0 * v) assert_equal(a, 0 * v + a) assert_equal(a, a + 0 * x) assert_equal(a, 0 * x + a) assert_equal(a, a + 0 * a) assert_equal(a, 0 * a + a) def test_math_automatic_alignment(self) -> None: a = DataArray(range(5), [("x", range(5))]) b = DataArray(range(5), [("x", range(1, 6))]) expected = DataArray(np.ones(4), [("x", [1, 2, 3, 4])]) assert_identical(a - b, expected) def test_non_overlapping_dataarrays_return_empty_result(self) -> None: a = DataArray(range(5), [("x", range(5))]) result = a.isel(x=slice(2)) + a.isel(x=slice(2, None)) assert len(result["x"]) == 0 def test_empty_dataarrays_return_empty_result(self) -> None: a = DataArray(data=[]) result = a * a assert len(result["dim_0"]) == 0 def test_inplace_math_basics(self) -> None: x = self.x a = self.dv v = a.variable b = a b += 1 assert b is a assert b.variable is v assert_array_equal(b.values, x) assert source_ndarray(b.values) is x def test_inplace_math_error(self) -> None: data = np.random.rand(4) times = np.arange(4) foo = DataArray(data, coords=[times], dims=["time"]) b = times.copy() with pytest.raises( TypeError, match=r"Values of an IndexVariable are immutable" ): foo.coords["time"] += 1 # Check error throwing prevented inplace operation assert_array_equal(foo.coords["time"], b) def test_inplace_math_automatic_alignment(self) -> None: a = DataArray(range(5), [("x", range(5))]) b = DataArray(range(1, 6), [("x", range(1, 6))]) with pytest.raises(xr.MergeError, match="Automatic alignment is not supported"): a += b with pytest.raises(xr.MergeError, match="Automatic alignment is not supported"): b += a def test_math_name(self) -> None: # Verify that name is preserved only when it can be done unambiguously. # The rule (copied from pandas.Series) is keep the current name only if # the other object has the same name or no name attribute and this # object isn't a coordinate; otherwise reset to None. a = self.dv assert (+a).name == "foo" assert (a + 0).name == "foo" assert (a + a.rename(None)).name is None assert (a + a.rename("bar")).name is None assert (a + a).name == "foo" assert (+a["x"]).name == "x" assert (a["x"] + 0).name == "x" assert (a + a["x"]).name is None def test_math_with_coords(self) -> None: coords = { "x": [-1, -2], "y": ["ab", "cd", "ef"], "lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]), "c": -999, } orig = DataArray(np.random.randn(2, 3), coords, dims=["x", "y"]) actual = orig + 1 expected = DataArray(orig.values + 1, orig.coords) assert_identical(expected, actual) actual = 1 + orig assert_identical(expected, actual) actual = orig + orig[0, 0] exp_coords = {k: v for k, v in coords.items() if k != "lat"} expected = DataArray( orig.values + orig.values[0, 0], exp_coords, dims=["x", "y"] ) assert_identical(expected, actual) actual = orig[0, 0] + orig assert_identical(expected, actual) actual = orig[0, 0] + orig[-1, -1] expected = DataArray(orig.values[0, 0] + orig.values[-1, -1], {"c": -999}) assert_identical(expected, actual) actual = orig[:, 0] + orig[0, :] exp_values = orig[:, 0].values[:, None] + orig[0, :].values[None, :] expected = DataArray(exp_values, exp_coords, dims=["x", "y"]) assert_identical(expected, actual) actual = orig[0, :] + orig[:, 0] assert_identical(expected.transpose(transpose_coords=True), actual) actual = orig - orig.transpose(transpose_coords=True) expected = DataArray(np.zeros((2, 3)), orig.coords) assert_identical(expected, actual) actual = orig.transpose(transpose_coords=True) - orig assert_identical(expected.transpose(transpose_coords=True), actual) alt = DataArray([1, 1], {"x": [-1, -2], "c": "foo", "d": 555}, "x") actual = orig + alt expected = orig + 1 expected.coords["d"] = 555 del expected.coords["c"] assert_identical(expected, actual) actual = alt + orig assert_identical(expected, actual) def test_index_math(self) -> None: orig = DataArray(range(3), dims="x", name="x") actual = orig + 1 expected = DataArray(1 + np.arange(3), dims="x", name="x") assert_identical(expected, actual) # regression tests for #254 actual = orig[0] < orig expected = DataArray([False, True, True], dims="x", name="x") assert_identical(expected, actual) actual = orig > orig[0] assert_identical(expected, actual) def test_dataset_math(self) -> None: # more comprehensive tests with multiple dataset variables obs = Dataset( {"tmin": ("x", np.arange(5)), "tmax": ("x", 10 + np.arange(5))}, {"x": ("x", 0.5 * np.arange(5)), "loc": ("x", range(-2, 3))}, ) actual1 = 2 * obs["tmax"] expected1 = DataArray(2 * (10 + np.arange(5)), obs.coords, name="tmax") assert_identical(actual1, expected1) actual2 = obs["tmax"] - obs["tmin"] expected2 = DataArray(10 * np.ones(5), obs.coords) assert_identical(actual2, expected2) sim = Dataset( { "tmin": ("x", 1 + np.arange(5)), "tmax": ("x", 11 + np.arange(5)), # does *not* include 'loc' as a coordinate "x": ("x", 0.5 * np.arange(5)), } ) actual3 = sim["tmin"] - obs["tmin"] expected3 = DataArray(np.ones(5), obs.coords, name="tmin") assert_identical(actual3, expected3) actual4 = -obs["tmin"] + sim["tmin"] assert_identical(actual4, expected3) actual5 = sim["tmin"].copy() actual5 -= obs["tmin"] assert_identical(actual5, expected3) actual6 = sim.copy() actual6["tmin"] = sim["tmin"] - obs["tmin"] expected6 = Dataset( {"tmin": ("x", np.ones(5)), "tmax": ("x", sim["tmax"].values)}, obs.coords ) assert_identical(actual6, expected6) actual7 = sim.copy() actual7["tmin"] -= obs["tmin"] assert_identical(actual7, expected6) def test_stack_unstack(self) -> None: orig = DataArray( [[0, 1], [2, 3]], dims=["x", "y"], attrs={"foo": 2}, ) assert_identical(orig, orig.unstack()) # test GH3000 a = orig[:0, :1].stack(new_dim=("x", "y")).indexes["new_dim"] b = pd.MultiIndex( levels=[ pd.Index([], dtype=np.int64), # type: ignore[list-item,unused-ignore] pd.Index([0], dtype=np.int64), # type: ignore[list-item,unused-ignore] ], codes=[[], []], names=["x", "y"], ) pd.testing.assert_index_equal(a, b) actual = orig.stack(z=["x", "y"]).unstack("z").drop_vars(["x", "y"]) assert_identical(orig, actual) actual = orig.stack(z=[...]).unstack("z").drop_vars(["x", "y"]) assert_identical(orig, actual) dims = ["a", "b", "c", "d", "e"] coords = { "a": [0], "b": [1, 2], "c": [3, 4, 5], "d": [6, 7], "e": [8], } orig = xr.DataArray(np.random.rand(1, 2, 3, 2, 1), coords=coords, dims=dims) stacked = orig.stack(ab=["a", "b"], cd=["c", "d"]) unstacked = stacked.unstack(["ab", "cd"]) assert_identical(orig, unstacked.transpose(*dims)) unstacked = stacked.unstack() assert_identical(orig, unstacked.transpose(*dims)) def test_stack_unstack_decreasing_coordinate(self) -> None: # regression test for GH980 orig = DataArray( np.random.rand(3, 4), dims=("y", "x"), coords={"x": np.arange(4), "y": np.arange(3, 0, -1)}, ) stacked = orig.stack(allpoints=["y", "x"]) actual = stacked.unstack("allpoints") assert_identical(orig, actual) def test_unstack_pandas_consistency(self) -> None: df = pd.DataFrame({"foo": range(3), "x": ["a", "b", "b"], "y": [0, 0, 1]}) s = df.set_index(["x", "y"])["foo"] expected = DataArray(s.unstack(), name="foo") actual = DataArray(s, dims="z").unstack("z") assert_identical(expected, actual) def test_unstack_requires_unique(self) -> None: df = pd.DataFrame({"foo": range(2), "x": ["a", "a"], "y": [0, 0]}) s = df.set_index(["x", "y"])["foo"] with pytest.raises( ValueError, match="Cannot unstack MultiIndex containing duplicates" ): DataArray(s, dims="z").unstack("z") @pytest.mark.filterwarnings("error") def test_unstack_roundtrip_integer_array(self) -> None: arr = xr.DataArray( np.arange(6).reshape(2, 3), coords={"x": ["a", "b"], "y": [0, 1, 2]}, dims=["x", "y"], ) stacked = arr.stack(z=["x", "y"]) roundtripped = stacked.unstack() assert_identical(arr, roundtripped) def test_stack_nonunique_consistency(self, da) -> None: da = da.isel(time=0, drop=True) # 2D actual = da.stack(z=["a", "x"]) expected = DataArray(da.to_pandas().stack(), dims="z") assert_identical(expected, actual) def test_to_unstacked_dataset_raises_value_error(self) -> None: data = DataArray([0, 1], dims="x", coords={"x": [0, 1]}) with pytest.raises(ValueError, match="'x' is not a stacked coordinate"): data.to_unstacked_dataset("x", 0) def test_transpose(self) -> None: da = DataArray( np.random.randn(3, 4, 5), dims=("x", "y", "z"), coords={ "x": range(3), "y": range(4), "z": range(5), "xy": (("x", "y"), np.random.randn(3, 4)), }, ) actual = da.transpose(transpose_coords=False) expected = DataArray(da.values.T, dims=("z", "y", "x"), coords=da.coords) assert_equal(expected, actual) actual = da.transpose("z", "y", "x", transpose_coords=True) expected = DataArray( da.values.T, dims=("z", "y", "x"), coords={ "x": da.x.values, "y": da.y.values, "z": da.z.values, "xy": (("y", "x"), da.xy.values.T), }, ) assert_equal(expected, actual) # same as previous but with ellipsis actual = da.transpose("z", ..., "x", transpose_coords=True) assert_equal(expected, actual) # same as previous but with a missing dimension actual = da.transpose( "z", "y", "x", "not_a_dim", transpose_coords=True, missing_dims="ignore" ) assert_equal(expected, actual) with pytest.raises(ValueError): da.transpose("x", "y") with pytest.raises(ValueError): da.transpose("not_a_dim", "z", "x", ...) with pytest.warns(UserWarning): da.transpose("not_a_dim", "y", "x", ..., missing_dims="warn") def test_squeeze(self) -> None: assert_equal(self.dv.variable.squeeze(), self.dv.squeeze().variable) def test_squeeze_drop(self) -> None: array = DataArray([1], [("x", [0])]) expected = DataArray(1) actual = array.squeeze(drop=True) assert_identical(expected, actual) expected = DataArray(1, {"x": 0}) actual = array.squeeze(drop=False) assert_identical(expected, actual) array = DataArray([[[0.0, 1.0]]], dims=["dim_0", "dim_1", "dim_2"]) expected = DataArray([[0.0, 1.0]], dims=["dim_1", "dim_2"]) actual = array.squeeze(axis=0) assert_identical(expected, actual) array = DataArray([[[[0.0, 1.0]]]], dims=["dim_0", "dim_1", "dim_2", "dim_3"]) expected = DataArray([[0.0, 1.0]], dims=["dim_1", "dim_3"]) actual = array.squeeze(axis=(0, 2)) assert_identical(expected, actual) array = DataArray([[[0.0, 1.0]]], dims=["dim_0", "dim_1", "dim_2"]) with pytest.raises(ValueError): array.squeeze(axis=0, dim="dim_1") def test_drop_coordinates(self) -> None: expected = DataArray(np.random.randn(2, 3), dims=["x", "y"]) arr = expected.copy() arr.coords["z"] = 2 actual = arr.drop_vars("z") assert_identical(expected, actual) with pytest.raises(ValueError): arr.drop_vars("not found") actual = expected.drop_vars("not found", errors="ignore") assert_identical(actual, expected) with pytest.raises(ValueError, match=r"cannot be found"): arr.drop_vars("w") actual = expected.drop_vars("w", errors="ignore") assert_identical(actual, expected) renamed = arr.rename("foo") with pytest.raises(ValueError, match=r"cannot be found"): renamed.drop_vars("foo") actual = renamed.drop_vars("foo", errors="ignore") assert_identical(actual, renamed) def test_drop_vars_callable(self) -> None: A = DataArray( np.random.randn(2, 3), dims=["x", "y"], coords={"x": [1, 2], "y": [3, 4, 5]} ) expected = A.drop_vars(["x", "y"]) actual = A.drop_vars(lambda x: x.indexes) assert_identical(expected, actual) def test_drop_multiindex_level(self) -> None: # GH6505 expected = self.mda.drop_vars(["x", "level_1", "level_2"]) with pytest.warns(DeprecationWarning): actual = self.mda.drop_vars("level_1") assert_identical(expected, actual) def test_drop_all_multiindex_levels(self) -> None: dim_levels = ["x", "level_1", "level_2"] actual = self.mda.drop_vars(dim_levels) # no error, multi-index dropped for key in dim_levels: assert key not in actual.xindexes def test_drop_index_labels(self) -> None: arr = DataArray(np.random.randn(2, 3), coords={"y": [0, 1, 2]}, dims=["x", "y"]) actual = arr.drop_sel(y=[0, 1]) expected = arr[:, 2:] assert_identical(actual, expected) with pytest.raises((KeyError, ValueError), match=r"not .* in axis"): actual = arr.drop_sel(y=[0, 1, 3]) actual = arr.drop_sel(y=[0, 1, 3], errors="ignore") assert_identical(actual, expected) with pytest.warns(DeprecationWarning): arr.drop([0, 1, 3], dim="y", errors="ignore") # type: ignore[arg-type] def test_drop_index_positions(self) -> None: arr = DataArray(np.random.randn(2, 3), dims=["x", "y"]) actual = arr.drop_isel(y=[0, 1]) expected = arr[:, 2:] assert_identical(actual, expected) def test_drop_indexes(self) -> None: arr = DataArray([1, 2, 3], coords={"x": ("x", [1, 2, 3])}, dims="x") actual = arr.drop_indexes("x") assert "x" not in actual.xindexes actual = arr.drop_indexes("not_a_coord", errors="ignore") assert_identical(actual, arr) def test_dropna(self) -> None: x = np.random.randn(4, 4) x[::2, 0] = np.nan arr = DataArray(x, dims=["a", "b"]) actual = arr.dropna("a") expected = arr[1::2] assert_identical(actual, expected) actual = arr.dropna("b", how="all") assert_identical(actual, arr) actual = arr.dropna("a", thresh=1) assert_identical(actual, arr) actual = arr.dropna("b", thresh=3) expected = arr[:, 1:] assert_identical(actual, expected) def test_where(self) -> None: arr = DataArray(np.arange(4), dims="x") expected = arr.sel(x=slice(2)) actual = arr.where(arr.x < 2, drop=True) assert_identical(actual, expected) def test_where_lambda(self) -> None: arr = DataArray(np.arange(4), dims="y") expected = arr.sel(y=slice(2)) actual = arr.where(lambda x: x.y < 2, drop=True) assert_identical(actual, expected) def test_where_other_lambda(self) -> None: arr = DataArray(np.arange(4), dims="y") expected = xr.concat( [arr.sel(y=slice(2)), arr.sel(y=slice(2, None)) + 1], dim="y" ) actual = arr.where(lambda x: x.y < 2, lambda x: x + 1) assert_identical(actual, expected) def test_where_string(self) -> None: array = DataArray(["a", "b"]) expected = DataArray(np.array(["a", np.nan], dtype=object)) actual = array.where([True, False]) assert_identical(actual, expected) def test_cumops(self) -> None: coords = { "x": [-1, -2], "y": ["ab", "cd", "ef"], "lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]), "c": -999, } orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"]) actual = orig.cumsum() expected = DataArray([[-1, -1, 0], [-4, -4, 0]], coords, dims=["x", "y"]) assert_identical(expected, actual) actual = orig.cumsum("x") expected = DataArray([[-1, 0, 1], [-4, 0, 4]], coords, dims=["x", "y"]) assert_identical(expected, actual) actual = orig.cumsum("y") expected = DataArray([[-1, -1, 0], [-3, -3, 0]], coords, dims=["x", "y"]) assert_identical(expected, actual) actual = orig.cumprod("x") expected = DataArray([[-1, 0, 1], [3, 0, 3]], coords, dims=["x", "y"]) assert_identical(expected, actual) actual = orig.cumprod("y") expected = DataArray([[-1, 0, 0], [-3, 0, 0]], coords, dims=["x", "y"]) assert_identical(expected, actual) def test_reduce(self) -> None: coords = { "x": [-1, -2], "y": ["ab", "cd", "ef"], "lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]), "c": -999, } orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"]) actual = orig.mean() expected = DataArray(0, {"c": -999}) assert_identical(expected, actual) actual = orig.mean(["x", "y"]) assert_identical(expected, actual) actual = orig.mean("x") expected = DataArray([-2, 0, 2], {"y": coords["y"], "c": -999}, "y") assert_identical(expected, actual) actual = orig.mean(["x"]) assert_identical(expected, actual) actual = orig.mean("y") expected = DataArray([0, 0], {"x": coords["x"], "c": -999}, "x") assert_identical(expected, actual) assert_equal(self.dv.reduce(np.mean, "x").variable, self.v.reduce(np.mean, "x")) orig = DataArray([[1, 0, np.nan], [3, 0, 3]], coords, dims=["x", "y"]) actual = orig.count() expected = DataArray(5, {"c": -999}) assert_identical(expected, actual) # uint support orig = DataArray(np.arange(6).reshape(3, 2).astype("uint"), dims=["x", "y"]) assert orig.dtype.kind == "u" actual = orig.mean(dim="x", skipna=True) expected = DataArray(orig.values.astype(int), dims=["x", "y"]).mean("x") assert_equal(actual, expected) def test_reduce_keepdims(self) -> None: coords = { "x": [-1, -2], "y": ["ab", "cd", "ef"], "lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]), "c": -999, } orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"]) # Mean on all axes loses non-constant coordinates actual = orig.mean(keepdims=True) expected = DataArray( orig.data.mean(keepdims=True), dims=orig.dims, coords={k: v for k, v in coords.items() if k == "c"}, ) assert_equal(actual, expected) assert actual.sizes["x"] == 1 assert actual.sizes["y"] == 1 # Mean on specific axes loses coordinates not involving that axis actual = orig.mean("y", keepdims=True) expected = DataArray( orig.data.mean(axis=1, keepdims=True), dims=orig.dims, coords={k: v for k, v in coords.items() if k not in ["y", "lat"]}, ) assert_equal(actual, expected) @requires_bottleneck def test_reduce_keepdims_bottleneck(self) -> None: import bottleneck coords = { "x": [-1, -2], "y": ["ab", "cd", "ef"], "lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]), "c": -999, } orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"]) # Bottleneck does not have its own keepdims implementation actual = orig.reduce(bottleneck.nanmean, keepdims=True) expected = orig.mean(keepdims=True) assert_equal(actual, expected) def test_reduce_dtype(self) -> None: coords = { "x": [-1, -2], "y": ["ab", "cd", "ef"], "lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]), "c": -999, } orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"]) for dtype in [np.float16, np.float32, np.float64]: assert orig.astype(float).mean(dtype=dtype).dtype == dtype def test_reduce_out(self) -> None: coords = { "x": [-1, -2], "y": ["ab", "cd", "ef"], "lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]), "c": -999, } orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"]) with pytest.raises(TypeError): orig.mean(out=np.ones(orig.shape)) @pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True) @pytest.mark.parametrize("skipna", [True, False, None]) @pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]]) @pytest.mark.parametrize( "axis, dim", zip([None, 0, [0], [0, 1]], [None, "x", ["x"], ["x", "y"]], strict=True), ) def test_quantile(self, q, axis, dim, skipna, compute_backend) -> None: va = self.va.copy(deep=True) va[0, 0] = np.nan actual = DataArray(va).quantile(q, dim=dim, keep_attrs=True, skipna=skipna) _percentile_func = np.nanpercentile if skipna in (True, None) else np.percentile expected = _percentile_func(va.values, np.array(q) * 100, axis=axis) np.testing.assert_allclose(actual.values, expected) if is_scalar(q): assert "quantile" not in actual.dims else: assert "quantile" in actual.dims assert actual.attrs == self.attrs @pytest.mark.parametrize("method", ["midpoint", "lower"]) def test_quantile_method(self, method) -> None: q = [0.25, 0.5, 0.75] actual = DataArray(self.va).quantile(q, method=method) expected = np.nanquantile(self.dv.values, np.array(q), method=method) np.testing.assert_allclose(actual.values, expected) @pytest.mark.filterwarnings( "default:The `interpolation` argument to quantile was renamed to `method`:FutureWarning" ) @pytest.mark.parametrize("method", ["midpoint", "lower"]) def test_quantile_interpolation_deprecated(self, method) -> None: da = DataArray(self.va) q = [0.25, 0.5, 0.75] with pytest.warns( FutureWarning, match="`interpolation` argument to quantile was renamed to `method`", ): actual = da.quantile(q, interpolation=method) expected = da.quantile(q, method=method) np.testing.assert_allclose(actual.values, expected.values) with warnings.catch_warnings(record=True): with pytest.raises(TypeError, match="interpolation and method keywords"): da.quantile(q, method=method, interpolation=method) def test_reduce_keep_attrs(self) -> None: # Test dropped attrs vm = self.va.mean() assert len(vm.attrs) == 0 assert vm.attrs == {} # Test kept attrs vm = self.va.mean(keep_attrs=True) assert len(vm.attrs) == len(self.attrs) assert vm.attrs == self.attrs def test_assign_attrs(self) -> None: expected = DataArray([], attrs=dict(a=1, b=2)) expected.attrs["a"] = 1 expected.attrs["b"] = 2 new = DataArray([]) actual = DataArray([]).assign_attrs(a=1, b=2) assert_identical(actual, expected) assert new.attrs == {} expected.attrs["c"] = 3 new_actual = actual.assign_attrs({"c": 3}) assert_identical(new_actual, expected) assert actual.attrs == {"a": 1, "b": 2} def test_drop_attrs(self) -> None: # Mostly tested in test_dataset.py, but adding a very small test here coord_ = DataArray([], attrs=dict(d=3, e=4)) da = DataArray([], attrs=dict(a=1, b=2)).assign_coords(dict(coord_=coord_)) assert da.drop_attrs().attrs == {} assert da.drop_attrs().coord_.attrs == {} assert da.drop_attrs(deep=False).coord_.attrs == dict(d=3, e=4) @pytest.mark.parametrize( "func", [lambda x: x.clip(0, 1), lambda x: np.float64(1.0) * x, np.abs, abs] ) def test_propagate_attrs(self, func) -> None: da = DataArray(self.va) # test defaults assert func(da).attrs == da.attrs with set_options(keep_attrs=False): assert func(da).attrs == {} with set_options(keep_attrs=True): assert func(da).attrs == da.attrs def test_fillna(self) -> None: a = DataArray([np.nan, 1, np.nan, 3], coords={"x": range(4)}, dims="x") actual = a.fillna(-1) expected = DataArray([-1, 1, -1, 3], coords={"x": range(4)}, dims="x") assert_identical(expected, actual) b = DataArray(range(4), coords={"x": range(4)}, dims="x") actual = a.fillna(b) expected = b.copy() assert_identical(expected, actual) actual = a.fillna(np.arange(4)) assert_identical(expected, actual) actual = a.fillna(b[:3]) assert_identical(expected, actual) actual = a.fillna(b[:0]) assert_identical(a, actual) with pytest.raises(TypeError, match=r"fillna on a DataArray"): a.fillna({0: 0}) with pytest.raises(ValueError, match=r"broadcast"): a.fillna(np.array([1, 2])) def test_align(self) -> None: array = DataArray( np.random.random((6, 8)), coords={"x": list("abcdef")}, dims=["x", "y"] ) array1, array2 = align(array, array[:5], join="inner") assert_identical(array1, array[:5]) assert_identical(array2, array[:5]) def test_align_dtype(self) -> None: # regression test for #264 x1 = np.arange(30) x2 = np.arange(5, 35) a = DataArray(np.random.random((30,)).astype(np.float32), [("x", x1)]) b = DataArray(np.random.random((30,)).astype(np.float32), [("x", x2)]) c, d = align(a, b, join="outer") assert c.dtype == np.float32 def test_align_copy(self) -> None: x = DataArray([1, 2, 3], coords=[("a", [1, 2, 3])]) y = DataArray([1, 2], coords=[("a", [3, 1])]) expected_x2 = x expected_y2 = DataArray([2, np.nan, 1], coords=[("a", [1, 2, 3])]) x2, y2 = align(x, y, join="outer", copy=False) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) assert source_ndarray(x2.data) is source_ndarray(x.data) x2, y2 = align(x, y, join="outer", copy=True) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) assert source_ndarray(x2.data) is not source_ndarray(x.data) # Trivial align - 1 element x = DataArray([1, 2, 3], coords=[("a", [1, 2, 3])]) (x2,) = align(x, copy=False) assert_identical(x, x2) assert source_ndarray(x2.data) is source_ndarray(x.data) (x2,) = align(x, copy=True) assert_identical(x, x2) assert source_ndarray(x2.data) is not source_ndarray(x.data) def test_align_override(self) -> None: left = DataArray([1, 2, 3], dims="x", coords={"x": [0, 1, 2]}) right = DataArray( np.arange(9).reshape((3, 3)), dims=["x", "y"], coords={"x": [0.1, 1.1, 2.1], "y": [1, 2, 3]}, ) expected_right = DataArray( np.arange(9).reshape(3, 3), dims=["x", "y"], coords={"x": [0, 1, 2], "y": [1, 2, 3]}, ) new_left, new_right = align(left, right, join="override") assert_identical(left, new_left) assert_identical(new_right, expected_right) new_left, new_right = align(left, right, exclude="x", join="override") assert_identical(left, new_left) assert_identical(right, new_right) new_left, new_right = xr.align( left.isel(x=0, drop=True), right, exclude="x", join="override" ) assert_identical(left.isel(x=0, drop=True), new_left) assert_identical(right, new_right) with pytest.raises( ValueError, match=r"cannot align.*join.*override.*same size" ): align(left.isel(x=0).expand_dims("x"), right, join="override") @pytest.mark.parametrize( "darrays", [ [ DataArray(0), DataArray([1], [("x", [1])]), DataArray([2, 3], [("x", [2, 3])]), ], [ DataArray([2, 3], [("x", [2, 3])]), DataArray([1], [("x", [1])]), DataArray(0), ], ], ) def test_align_override_error(self, darrays) -> None: with pytest.raises( ValueError, match=r"cannot align.*join.*override.*same size" ): xr.align(*darrays, join="override") def test_align_exclude(self) -> None: x = DataArray([[1, 2], [3, 4]], coords=[("a", [-1, -2]), ("b", [3, 4])]) y = DataArray([[1, 2], [3, 4]], coords=[("a", [-1, 20]), ("b", [5, 6])]) z = DataArray([1], dims=["a"], coords={"a": [20], "b": 7}) x2, y2, z2 = align(x, y, z, join="outer", exclude=["b"]) expected_x2 = DataArray( [[3, 4], [1, 2], [np.nan, np.nan]], coords=[("a", [-2, -1, 20]), ("b", [3, 4])], ) expected_y2 = DataArray( [[np.nan, np.nan], [1, 2], [3, 4]], coords=[("a", [-2, -1, 20]), ("b", [5, 6])], ) expected_z2 = DataArray( [np.nan, np.nan, 1], dims=["a"], coords={"a": [-2, -1, 20], "b": 7} ) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) assert_identical(expected_z2, z2) def test_align_indexes(self) -> None: x = DataArray([1, 2, 3], coords=[("a", [-1, 10, -2])]) y = DataArray([1, 2], coords=[("a", [-2, -1])]) x2, y2 = align(x, y, join="outer", indexes={"a": [10, -1, -2]}) expected_x2 = DataArray([2, 1, 3], coords=[("a", [10, -1, -2])]) expected_y2 = DataArray([np.nan, 2, 1], coords=[("a", [10, -1, -2])]) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) (x2,) = align(x, join="outer", indexes={"a": [-2, 7, 10, -1]}) expected_x2 = DataArray([3, np.nan, 2, 1], coords=[("a", [-2, 7, 10, -1])]) assert_identical(expected_x2, x2) def test_align_without_indexes_exclude(self) -> None: arrays = [DataArray([1, 2, 3], dims=["x"]), DataArray([1, 2], dims=["x"])] result0, result1 = align(*arrays, exclude=["x"]) assert_identical(result0, arrays[0]) assert_identical(result1, arrays[1]) def test_align_mixed_indexes(self) -> None: array_no_coord = DataArray([1, 2], dims=["x"]) array_with_coord = DataArray([1, 2], coords=[("x", ["a", "b"])]) result0, result1 = align(array_no_coord, array_with_coord) assert_identical(result0, array_with_coord) assert_identical(result1, array_with_coord) result0, result1 = align(array_no_coord, array_with_coord, exclude=["x"]) assert_identical(result0, array_no_coord) assert_identical(result1, array_with_coord) def test_align_without_indexes_errors(self) -> None: with pytest.raises( ValueError, match=r"cannot.*align.*dimension.*conflicting.*sizes.*", ): align(DataArray([1, 2, 3], dims=["x"]), DataArray([1, 2], dims=["x"])) with pytest.raises( ValueError, match=r"cannot.*align.*dimension.*conflicting.*sizes.*", ): align( DataArray([1, 2, 3], dims=["x"]), DataArray([1, 2], coords=[("x", [0, 1])]), ) def test_align_str_dtype(self) -> None: a = DataArray([0, 1], dims=["x"], coords={"x": ["a", "b"]}) b = DataArray([1, 2], dims=["x"], coords={"x": ["b", "c"]}) expected_a = DataArray( [0, 1, np.nan], dims=["x"], coords={"x": ["a", "b", "c"]} ) expected_b = DataArray( [np.nan, 1, 2], dims=["x"], coords={"x": ["a", "b", "c"]} ) actual_a, actual_b = xr.align(a, b, join="outer") assert_identical(expected_a, actual_a) assert expected_a.x.dtype == actual_a.x.dtype assert_identical(expected_b, actual_b) assert expected_b.x.dtype == actual_b.x.dtype def test_broadcast_on_vs_off_global_option_different_dims(self) -> None: xda_1 = xr.DataArray([1], dims="x1") xda_2 = xr.DataArray([1], dims="x2") with xr.set_options(arithmetic_broadcast=True): expected_xda = xr.DataArray([[1.0]], dims=("x1", "x2")) actual_xda = xda_1 / xda_2 assert_identical(actual_xda, expected_xda) with xr.set_options(arithmetic_broadcast=False): with pytest.raises( ValueError, match=re.escape( "Broadcasting is necessary but automatic broadcasting is disabled via " "global option `'arithmetic_broadcast'`. " "Use `xr.set_options(arithmetic_broadcast=True)` to enable automatic broadcasting." ), ): xda_1 / xda_2 @pytest.mark.parametrize("arithmetic_broadcast", [True, False]) def test_broadcast_on_vs_off_global_option_same_dims( self, arithmetic_broadcast: bool ) -> None: # Ensure that no error is raised when arithmetic broadcasting is disabled, # when broadcasting is not needed. The two DataArrays have the same # dimensions of the same size. xda_1 = xr.DataArray([1], dims="x") xda_2 = xr.DataArray([1], dims="x") expected_xda = xr.DataArray([2.0], dims=("x",)) with xr.set_options(arithmetic_broadcast=arithmetic_broadcast): assert_identical(xda_1 + xda_2, expected_xda) assert_identical(xda_1 + np.array([1.0]), expected_xda) assert_identical(np.array([1.0]) + xda_1, expected_xda) def test_broadcast_arrays(self) -> None: x = DataArray([1, 2], coords=[("a", [-1, -2])], name="x") y = DataArray([1, 2], coords=[("b", [3, 4])], name="y") x2, y2 = broadcast(x, y) expected_coords = [("a", [-1, -2]), ("b", [3, 4])] expected_x2 = DataArray([[1, 1], [2, 2]], expected_coords, name="x") expected_y2 = DataArray([[1, 2], [1, 2]], expected_coords, name="y") assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) x = DataArray(np.random.randn(2, 3), dims=["a", "b"]) y = DataArray(np.random.randn(3, 2), dims=["b", "a"]) x2, y2 = broadcast(x, y) expected_x2 = x expected_y2 = y.T assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) def test_broadcast_arrays_misaligned(self) -> None: # broadcast on misaligned coords must auto-align x = DataArray([[1, 2], [3, 4]], coords=[("a", [-1, -2]), ("b", [3, 4])]) y = DataArray([1, 2], coords=[("a", [-1, 20])]) expected_x2 = DataArray( [[3, 4], [1, 2], [np.nan, np.nan]], coords=[("a", [-2, -1, 20]), ("b", [3, 4])], ) expected_y2 = DataArray( [[np.nan, np.nan], [1, 1], [2, 2]], coords=[("a", [-2, -1, 20]), ("b", [3, 4])], ) x2, y2 = broadcast(x, y) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) def test_broadcast_arrays_nocopy(self) -> None: # Test that input data is not copied over in case # no alteration is needed x = DataArray([1, 2], coords=[("a", [-1, -2])], name="x") y = DataArray(3, name="y") expected_x2 = DataArray([1, 2], coords=[("a", [-1, -2])], name="x") expected_y2 = DataArray([3, 3], coords=[("a", [-1, -2])], name="y") x2, y2 = broadcast(x, y) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) assert source_ndarray(x2.data) is source_ndarray(x.data) # single-element broadcast (trivial case) (x2,) = broadcast(x) assert_identical(x, x2) assert source_ndarray(x2.data) is source_ndarray(x.data) def test_broadcast_arrays_exclude(self) -> None: x = DataArray([[1, 2], [3, 4]], coords=[("a", [-1, -2]), ("b", [3, 4])]) y = DataArray([1, 2], coords=[("a", [-1, 20])]) z = DataArray(5, coords={"b": 5}) x2, y2, z2 = broadcast(x, y, z, exclude=["b"]) expected_x2 = DataArray( [[3, 4], [1, 2], [np.nan, np.nan]], coords=[("a", [-2, -1, 20]), ("b", [3, 4])], ) expected_y2 = DataArray([np.nan, 1, 2], coords=[("a", [-2, -1, 20])]) expected_z2 = DataArray( [5, 5, 5], dims=["a"], coords={"a": [-2, -1, 20], "b": 5} ) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) assert_identical(expected_z2, z2) def test_broadcast_coordinates(self) -> None: # regression test for GH649 ds = Dataset({"a": (["x", "y"], np.ones((5, 6)))}) x_bc, y_bc, a_bc = broadcast(ds.x, ds.y, ds.a) assert_identical(ds.a, a_bc) X, Y = np.meshgrid(np.arange(5), np.arange(6), indexing="ij") exp_x = DataArray(X, dims=["x", "y"], name="x") exp_y = DataArray(Y, dims=["x", "y"], name="y") assert_identical(exp_x, x_bc) assert_identical(exp_y, y_bc) def test_to_pandas(self) -> None: # 0d actual_xr = DataArray(42).to_pandas() expected = np.array(42) assert_array_equal(actual_xr, expected) # 1d values = np.random.randn(3) index = pd.Index(["a", "b", "c"], name="x") da = DataArray(values, coords=[index]) actual_s = da.to_pandas() assert_array_equal(np.asarray(actual_s.values), values) assert_array_equal(actual_s.index, index) assert_array_equal(actual_s.index.name, "x") # 2d values = np.random.randn(3, 2) da = DataArray( values, coords=[("x", ["a", "b", "c"]), ("y", [0, 1])], name="foo" ) actual_df = da.to_pandas() assert_array_equal(np.asarray(actual_df.values), values) assert_array_equal(actual_df.index, ["a", "b", "c"]) assert_array_equal(actual_df.columns, [0, 1]) # roundtrips for shape in [(3,), (3, 4)]: dims = list("abc")[: len(shape)] da = DataArray(np.random.randn(*shape), dims=dims) roundtripped = DataArray(da.to_pandas()).drop_vars(dims) assert_identical(da, roundtripped) with pytest.raises(ValueError, match=r"Cannot convert"): DataArray(np.random.randn(1, 2, 3, 4, 5)).to_pandas() def test_to_dataframe(self) -> None: # regression test for #260 arr_np = np.random.randn(3, 4) arr = DataArray(arr_np, [("B", [1, 2, 3]), ("A", list("cdef"))], name="foo") expected_s = arr.to_series() actual_s = arr.to_dataframe()["foo"] assert_array_equal(np.asarray(expected_s.values), np.asarray(actual_s.values)) assert_array_equal(np.asarray(expected_s.name), np.asarray(actual_s.name)) assert_array_equal(expected_s.index.values, actual_s.index.values) actual_s = arr.to_dataframe(dim_order=["A", "B"])["foo"] assert_array_equal(arr_np.transpose().reshape(-1), np.asarray(actual_s.values)) # regression test for coords with different dimensions arr.coords["C"] = ("B", [-1, -2, -3]) expected_df = arr.to_series().to_frame() expected_df["C"] = [-1] * 4 + [-2] * 4 + [-3] * 4 expected_df = expected_df[["C", "foo"]] actual_df = arr.to_dataframe() assert_array_equal(np.asarray(expected_df.values), np.asarray(actual_df.values)) assert_array_equal(expected_df.columns.values, actual_df.columns.values) assert_array_equal(expected_df.index.values, actual_df.index.values) with pytest.raises(ValueError, match="does not match the set of dimensions"): arr.to_dataframe(dim_order=["B", "A", "C"]) with pytest.raises(ValueError, match=r"cannot convert a scalar"): arr.sel(A="c", B=2).to_dataframe() arr.name = None # unnamed with pytest.raises(ValueError, match=r"unnamed"): arr.to_dataframe() def test_to_dataframe_multiindex(self) -> None: # regression test for #3008 arr_np = np.random.randn(4, 3) mindex = pd.MultiIndex.from_product([[1, 2], list("ab")], names=["A", "B"]) arr = DataArray(arr_np, [("MI", mindex), ("C", [5, 6, 7])], name="foo") actual = arr.to_dataframe() index_pd = actual.index assert isinstance(index_pd, pd.MultiIndex) assert_array_equal(np.asarray(actual["foo"].values), arr_np.flatten()) assert_array_equal(index_pd.names, list("ABC")) assert_array_equal(index_pd.levels[0], [1, 2]) assert_array_equal(index_pd.levels[1], ["a", "b"]) assert_array_equal(index_pd.levels[2], [5, 6, 7]) def test_to_dataframe_0length(self) -> None: # regression test for #3008 arr_np = np.random.randn(4, 0) mindex = pd.MultiIndex.from_product([[1, 2], list("ab")], names=["A", "B"]) arr = DataArray(arr_np, [("MI", mindex), ("C", [])], name="foo") actual = arr.to_dataframe() assert len(actual) == 0 assert_array_equal(actual.index.names, list("ABC")) @requires_dask_expr @requires_dask @pytest.mark.xfail(not has_dask_ge_2025_1_0, reason="dask-expr is broken") def test_to_dask_dataframe(self) -> None: arr_np = np.arange(3 * 4).reshape(3, 4) arr = DataArray(arr_np, [("B", [1, 2, 3]), ("A", list("cdef"))], name="foo") expected_s = arr.to_series() actual = arr.to_dask_dataframe()["foo"] assert_array_equal(actual.values, np.asarray(expected_s.values)) actual = arr.to_dask_dataframe(dim_order=["A", "B"])["foo"] assert_array_equal(arr_np.transpose().reshape(-1), actual.values) # regression test for coords with different dimensions arr.coords["C"] = ("B", [-1, -2, -3]) expected_df = arr.to_series().to_frame() expected_df["C"] = [-1] * 4 + [-2] * 4 + [-3] * 4 expected_df = expected_df[["C", "foo"]] actual = arr.to_dask_dataframe()[["C", "foo"]] assert_array_equal(expected_df.values, np.asarray(actual.values)) assert_array_equal( expected_df.columns.values, np.asarray(actual.columns.values) ) with pytest.raises(ValueError, match="does not match the set of dimensions"): arr.to_dask_dataframe(dim_order=["B", "A", "C"]) arr.name = None with pytest.raises( ValueError, match="Cannot convert an unnamed DataArray", ): arr.to_dask_dataframe() def test_to_pandas_name_matches_coordinate(self) -> None: # coordinate with same name as array arr = DataArray([1, 2, 3], dims="x", name="x") series = arr.to_series() assert_array_equal([1, 2, 3], list(series.values)) assert_array_equal([0, 1, 2], list(series.index.values)) assert "x" == series.name assert "x" == series.index.name frame = arr.to_dataframe() expected = series.to_frame() assert expected.equals(frame) def test_to_and_from_series(self) -> None: expected = self.dv.to_dataframe()["foo"] actual = self.dv.to_series() assert_array_equal(expected.values, actual.values) assert_array_equal(expected.index.values, actual.index.values) assert "foo" == actual.name # test roundtrip assert_identical(self.dv, DataArray.from_series(actual).drop_vars(["x", "y"])) # test name is None actual.name = None expected_da = self.dv.rename(None) assert_identical( expected_da, DataArray.from_series(actual).drop_vars(["x", "y"]) ) def test_from_series_multiindex(self) -> None: # GH:3951 df = pd.DataFrame({"B": [1, 2, 3], "A": [4, 5, 6]}) df = df.rename_axis("num").rename_axis("alpha", axis=1) actual = df.stack("alpha").to_xarray() assert (actual.sel(alpha="B") == [1, 2, 3]).all() assert (actual.sel(alpha="A") == [4, 5, 6]).all() @requires_sparse def test_from_series_sparse(self) -> None: import sparse series = pd.Series([1, 2], index=[("a", 1), ("b", 2)]) actual_sparse = DataArray.from_series(series, sparse=True) actual_dense = DataArray.from_series(series, sparse=False) assert isinstance(actual_sparse.data, sparse.COO) actual_sparse.data = actual_sparse.data.todense() assert_identical(actual_sparse, actual_dense) @requires_sparse def test_from_multiindex_series_sparse(self) -> None: # regression test for GH4019 import sparse idx = pd.MultiIndex.from_product( [list(np.arange(3)), list(np.arange(5))], names=["a", "b"] ) series: pd.Series = pd.Series( np.random.default_rng(0).random(len(idx)), index=idx ).sample(n=5, random_state=3) dense = DataArray.from_series(series, sparse=False) expected_coords = sparse.COO.from_numpy(dense.data, np.nan).coords actual_sparse = xr.DataArray.from_series(series, sparse=True) actual_coords = actual_sparse.data.coords np.testing.assert_equal(actual_coords, expected_coords) def test_nbytes_does_not_load_data(self) -> None: array = InaccessibleArray(np.zeros((3, 3), dtype="uint8")) da = xr.DataArray(array, dims=["x", "y"]) # If xarray tries to instantiate the InaccessibleArray to compute # nbytes, the following will raise an error. # However, it should still be able to accurately give us information # about the number of bytes from the metadata assert da.nbytes == 9 # Here we confirm that this does not depend on array having the # nbytes property, since it isn't really required by the array # interface. nbytes is more a property of arrays that have been # cast to numpy arrays. assert not hasattr(array, "nbytes") def test_to_and_from_empty_series(self) -> None: # GH697 expected: pd.Series[Any] = pd.Series([], dtype=np.float64) da = DataArray.from_series(expected) assert len(da) == 0 actual = da.to_series() assert len(actual) == 0 assert expected.equals(actual) def test_series_categorical_index(self) -> None: # regression test for GH700 if not hasattr(pd, "CategoricalIndex"): pytest.skip("requires pandas with CategoricalIndex") s = pd.Series(np.arange(5), index=pd.CategoricalIndex(list("aabbc"))) arr = DataArray(s) assert "a a b b" in repr(arr) # should not error @pytest.mark.parametrize("use_dask", [True, False]) @pytest.mark.parametrize("data", ["list", "array", True]) @pytest.mark.parametrize("encoding", [True, False]) def test_to_and_from_dict( self, encoding: bool, data: bool | Literal["list", "array"], use_dask: bool ) -> None: if use_dask and not has_dask: pytest.skip("requires dask") encoding_data = {"bar": "spam"} array = DataArray( np.random.randn(2, 3), {"x": ["a", "b"]}, ["x", "y"], name="foo" ) array.encoding = encoding_data return_data = array.to_numpy() coords_data = np.array(["a", "b"]) if data == "list" or data is True: return_data = return_data.tolist() coords_data = coords_data.tolist() expected: dict[str, Any] = { "name": "foo", "dims": ("x", "y"), "data": return_data, "attrs": {}, "coords": {"x": {"dims": ("x",), "data": coords_data, "attrs": {}}}, } if encoding: expected["encoding"] = encoding_data if has_dask: da = array.chunk() else: da = array if data == "array" or data is False: with raise_if_dask_computes(): actual = da.to_dict(encoding=encoding, data=data) else: actual = da.to_dict(encoding=encoding, data=data) # check that they are identical np.testing.assert_equal(expected, actual) # check roundtrip assert_identical(da, DataArray.from_dict(actual)) # a more bare bones representation still roundtrips d = { "name": "foo", "dims": ("x", "y"), "data": da.values.tolist(), "coords": {"x": {"dims": "x", "data": ["a", "b"]}}, } assert_identical(da, DataArray.from_dict(d)) # and the most bare bones representation still roundtrips d = {"name": "foo", "dims": ("x", "y"), "data": da.values} assert_identical(da.drop_vars("x"), DataArray.from_dict(d)) # missing a dims in the coords d = { "dims": ("x", "y"), "data": da.values, "coords": {"x": {"data": ["a", "b"]}}, } with pytest.raises( ValueError, match=r"cannot convert dict when coords are missing the key 'dims'", ): DataArray.from_dict(d) # this one is missing some necessary information d = {"dims": "t"} with pytest.raises( ValueError, match=r"cannot convert dict without the key 'data'" ): DataArray.from_dict(d) # check the data=False option expected_no_data = expected.copy() del expected_no_data["data"] del expected_no_data["coords"]["x"]["data"] endiantype = "U1" expected_no_data["coords"]["x"].update({"dtype": endiantype, "shape": (2,)}) expected_no_data.update({"dtype": "float64", "shape": (2, 3)}) actual_no_data = da.to_dict(data=False, encoding=encoding) assert expected_no_data == actual_no_data def test_to_and_from_dict_with_time_dim(self) -> None: x = np.random.randn(10, 3) t = pd.date_range("20130101", periods=10) lat = [77.7, 83.2, 76] da = DataArray(x, {"t": t, "lat": lat}, dims=["t", "lat"]) roundtripped = DataArray.from_dict(da.to_dict()) assert_identical(da, roundtripped) def test_to_and_from_dict_with_nan_nat(self) -> None: y = np.random.randn(10, 3) y[2] = np.nan t = pd.Series(pd.date_range("20130101", periods=10)) t[2] = np.nan lat = [77.7, 83.2, 76] da = DataArray(y, {"t": t, "lat": lat}, dims=["t", "lat"]) roundtripped = DataArray.from_dict(da.to_dict()) assert_identical(da, roundtripped) def test_to_dict_with_numpy_attrs(self) -> None: # this doesn't need to roundtrip x = np.random.randn(10, 3) t = list("abcdefghij") lat = [77.7, 83.2, 76] attrs = { "created": np.float64(1998), "coords": np.array([37, -110.1, 100]), "maintainer": "bar", } da = DataArray(x, {"t": t, "lat": lat}, dims=["t", "lat"], attrs=attrs) expected_attrs = { "created": attrs["created"].item(), # type: ignore[attr-defined] "coords": attrs["coords"].tolist(), # type: ignore[attr-defined] "maintainer": "bar", } actual = da.to_dict() # check that they are identical assert expected_attrs == actual["attrs"] def test_to_masked_array(self) -> None: rs = np.random.default_rng(44) x = rs.random(size=(10, 20)) x_masked = np.ma.masked_where(x < 0.5, x) da = DataArray(x_masked) # Test round trip x_masked_2 = da.to_masked_array() da_2 = DataArray(x_masked_2) assert_array_equal(x_masked, x_masked_2) assert_equal(da, da_2) da_masked_array = da.to_masked_array(copy=True) assert isinstance(da_masked_array, np.ma.MaskedArray) # Test masks assert_array_equal(da_masked_array.mask, x_masked.mask) # Test that mask is unpacked correctly assert_array_equal(da.values, x_masked.filled(np.nan)) # Test that the underlying data (including nans) hasn't changed assert_array_equal(da_masked_array, x_masked.filled(np.nan)) # Test that copy=False gives access to values masked_array = da.to_masked_array(copy=False) masked_array[0, 0] = 10.0 assert masked_array[0, 0] == 10.0 assert da[0, 0].values == 10.0 assert masked_array.base is da.values assert isinstance(masked_array, np.ma.MaskedArray) # Test with some odd arrays for v in [4, np.nan, True, "4", "four"]: da = DataArray(v) ma = da.to_masked_array() assert isinstance(ma, np.ma.MaskedArray) # Fix GH issue 684 - masked arrays mask should be an array not a scalar N = 4 v = range(N) da = DataArray(v) ma = da.to_masked_array() assert len(ma.mask) == N def test_to_dataset_whole(self) -> None: unnamed = DataArray([1, 2], dims="x") with pytest.raises(ValueError, match=r"unable to convert unnamed"): unnamed.to_dataset() actual = unnamed.to_dataset(name="foo") expected = Dataset({"foo": ("x", [1, 2])}) assert_identical(expected, actual) named = DataArray([1, 2], dims="x", name="foo", attrs={"y": "testattr"}) actual = named.to_dataset() expected = Dataset({"foo": ("x", [1, 2], {"y": "testattr"})}) assert_identical(expected, actual) # Test promoting attrs actual = named.to_dataset(promote_attrs=True) expected = Dataset( {"foo": ("x", [1, 2], {"y": "testattr"})}, attrs={"y": "testattr"} ) assert_identical(expected, actual) with pytest.raises(TypeError): actual = named.to_dataset("bar") def test_to_dataset_split(self) -> None: array = DataArray( [[1, 2], [3, 4], [5, 6]], coords=[("x", list("abc")), ("y", [0.0, 0.1])], attrs={"a": 1}, ) expected = Dataset( {"a": ("y", [1, 2]), "b": ("y", [3, 4]), "c": ("y", [5, 6])}, coords={"y": [0.0, 0.1]}, attrs={"a": 1}, ) actual = array.to_dataset("x") assert_identical(expected, actual) with pytest.raises(TypeError): array.to_dataset("x", name="foo") roundtripped = actual.to_dataarray(dim="x") assert_identical(array, roundtripped) array = DataArray([1, 2, 3], dims="x") expected = Dataset({0: 1, 1: 2, 2: 3}) actual = array.to_dataset("x") assert_identical(expected, actual) def test_to_dataset_retains_keys(self) -> None: # use dates as convenient non-str objects. Not a specific date test import datetime dates = [datetime.date(2000, 1, d) for d in range(1, 4)] array = DataArray([1, 2, 3], coords=[("x", dates)], attrs={"a": 1}) # convert to dataset and back again result = array.to_dataset("x").to_dataarray(dim="x") assert_equal(array, result) def test_to_dataset_coord_value_is_dim(self) -> None: # github issue #7823 array = DataArray( np.zeros((3, 3)), coords={ # 'a' is both a coordinate value and the name of a coordinate "x": ["a", "b", "c"], "a": [1, 2, 3], }, ) with pytest.raises( ValueError, match=( re.escape("dimension 'x' would produce the variables ('a',)") + ".*" + re.escape("DataArray.rename(a=...) or DataArray.assign_coords(x=...)") ), ): array.to_dataset("x") # test error message formatting when there are multiple ambiguous # values/coordinates array2 = DataArray( np.zeros((3, 3, 2)), coords={ "x": ["a", "b", "c"], "a": [1, 2, 3], "b": [0.0, 0.1], }, ) with pytest.raises( ValueError, match=( re.escape("dimension 'x' would produce the variables ('a', 'b')") + ".*" + re.escape( "DataArray.rename(a=..., b=...) or DataArray.assign_coords(x=...)" ) ), ): array2.to_dataset("x") def test__title_for_slice(self) -> None: array = DataArray( np.ones((4, 3, 2)), dims=["a", "b", "c"], coords={"a": range(4), "b": range(3), "c": range(2)}, ) assert "" == array._title_for_slice() assert "c = 0" == array.isel(c=0)._title_for_slice() title = array.isel(b=1, c=0)._title_for_slice() assert title in {"b = 1, c = 0", "c = 0, b = 1"} a2 = DataArray(np.ones((4, 1)), dims=["a", "b"]) assert "" == a2._title_for_slice() def test__title_for_slice_truncate(self) -> None: array = DataArray(np.ones(4)) array.coords["a"] = "a" * 100 array.coords["b"] = "b" * 100 nchar = 80 title = array._title_for_slice(truncate=nchar) assert nchar == len(title) assert title.endswith("...") def test_dataarray_diff_n1(self) -> None: da = DataArray(np.random.randn(3, 4), dims=["x", "y"]) actual = da.diff("y") expected = DataArray(np.diff(da.values, axis=1), dims=["x", "y"]) assert_equal(expected, actual) def test_coordinate_diff(self) -> None: # regression test for GH634 arr = DataArray(range(0, 20, 2), dims=["lon"], coords=[range(10)]) lon = arr.coords["lon"] expected = DataArray([1] * 9, dims=["lon"], coords=[range(1, 10)], name="lon") actual = lon.diff("lon") assert_equal(expected, actual) @pytest.mark.parametrize("offset", [-5, 0, 1, 2]) @pytest.mark.parametrize("fill_value, dtype", [(2, int), (dtypes.NA, float)]) def test_shift(self, offset, fill_value, dtype) -> None: arr = DataArray([1, 2, 3], dims="x") actual = arr.shift(x=1, fill_value=fill_value) if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value = np.nan expected = DataArray([fill_value, 1, 2], dims="x") assert_identical(expected, actual) assert actual.dtype == dtype arr = DataArray([1, 2, 3], [("x", ["a", "b", "c"])]) expected = DataArray(arr.to_pandas().shift(offset)) actual = arr.shift(x=offset) assert_identical(expected, actual) def test_roll_coords(self) -> None: arr = DataArray([1, 2, 3], coords={"x": range(3)}, dims="x") actual = arr.roll(x=1, roll_coords=True) expected = DataArray([3, 1, 2], coords=[("x", [2, 0, 1])]) assert_identical(expected, actual) def test_roll_no_coords(self) -> None: arr = DataArray([1, 2, 3], coords={"x": range(3)}, dims="x") actual = arr.roll(x=1) expected = DataArray([3, 1, 2], coords=[("x", [0, 1, 2])]) assert_identical(expected, actual) def test_copy_with_data(self) -> None: orig = DataArray( np.random.random(size=(2, 2)), dims=("x", "y"), attrs={"attr1": "value1"}, coords={"x": [4, 3]}, name="helloworld", ) new_data = np.arange(4).reshape(2, 2) actual = orig.copy(data=new_data) expected = orig.copy() expected.data = new_data assert_identical(expected, actual) @pytest.mark.xfail(raises=AssertionError) @pytest.mark.parametrize( "deep, expected_orig", [ [ True, xr.DataArray( xr.IndexVariable("a", np.array([1, 2])), coords={"a": [1, 2]}, dims=["a"], ), ], [ False, xr.DataArray( xr.IndexVariable("a", np.array([999, 2])), coords={"a": [999, 2]}, dims=["a"], ), ], ], ) def test_copy_coords(self, deep, expected_orig) -> None: """The test fails for the shallow copy, and apparently only on Windows for some reason. In windows coords seem to be immutable unless it's one dataarray deep copied from another.""" da = xr.DataArray( np.ones([2, 2, 2]), coords={"a": [1, 2], "b": ["x", "y"], "c": [0, 1]}, dims=["a", "b", "c"], ) da_cp = da.copy(deep) new_a = np.array([999, 2]) da_cp.coords["a"] = da_cp["a"].copy(data=new_a) expected_cp = xr.DataArray( xr.IndexVariable("a", np.array([999, 2])), coords={"a": [999, 2]}, dims=["a"], ) assert_identical(da_cp["a"], expected_cp) assert_identical(da["a"], expected_orig) def test_real_and_imag(self) -> None: array = DataArray(1 + 2j) assert_identical(array.real, DataArray(1)) assert_identical(array.imag, DataArray(2)) def test_setattr_raises(self) -> None: array = DataArray(0, coords={"scalar": 1}, attrs={"foo": "bar"}) with pytest.raises(AttributeError, match=r"cannot set attr"): array.scalar = 2 with pytest.raises(AttributeError, match=r"cannot set attr"): array.foo = 2 with pytest.raises(AttributeError, match=r"cannot set attr"): array.other = 2 def test_full_like(self) -> None: # For more thorough tests, see test_variable.py da = DataArray( np.random.random(size=(2, 2)), dims=("x", "y"), attrs={"attr1": "value1"}, coords={"x": [4, 3]}, name="helloworld", ) actual = full_like(da, 2) expect = da.copy(deep=True) expect.values = np.array([[2.0, 2.0], [2.0, 2.0]]) assert_identical(expect, actual) # override dtype actual = full_like(da, fill_value=True, dtype=bool) expect.values = np.array([[True, True], [True, True]]) assert expect.dtype == bool assert_identical(expect, actual) with pytest.raises(ValueError, match="'dtype' cannot be dict-like"): full_like(da, fill_value=True, dtype={"x": bool}) def test_dot(self) -> None: x = np.linspace(-3, 3, 6) y = np.linspace(-3, 3, 5) z = range(4) da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4)) da = DataArray(da_vals, coords=[x, y, z], dims=["x", "y", "z"]) dm_vals1 = range(4) dm1 = DataArray(dm_vals1, coords=[z], dims=["z"]) # nd dot 1d actual1 = da.dot(dm1) expected_vals1 = np.tensordot(da_vals, dm_vals1, (2, 0)) expected1 = DataArray(expected_vals1, coords=[x, y], dims=["x", "y"]) assert_equal(expected1, actual1) # all shared dims actual2 = da.dot(da) expected_vals2 = np.tensordot(da_vals, da_vals, axes=([0, 1, 2], [0, 1, 2])) expected2 = DataArray(expected_vals2) assert_equal(expected2, actual2) # multiple shared dims dm_vals3 = np.arange(20 * 5 * 4).reshape((20, 5, 4)) j = np.linspace(-3, 3, 20) dm3 = DataArray(dm_vals3, coords=[j, y, z], dims=["j", "y", "z"]) actual3 = da.dot(dm3) expected_vals3 = np.tensordot(da_vals, dm_vals3, axes=([1, 2], [1, 2])) expected3 = DataArray(expected_vals3, coords=[x, j], dims=["x", "j"]) assert_equal(expected3, actual3) # Ellipsis: all dims are shared actual4 = da.dot(da, dim=...) expected4 = da.dot(da) assert_equal(expected4, actual4) # Ellipsis: not all dims are shared actual5 = da.dot(dm3, dim=...) expected5 = da.dot(dm3, dim=("j", "x", "y", "z")) assert_equal(expected5, actual5) with pytest.raises(NotImplementedError): da.dot(dm3.to_dataset(name="dm")) with pytest.raises(TypeError): da.dot(dm3.values) # type: ignore[type-var] def test_dot_align_coords(self) -> None: # GH 3694 x = np.linspace(-3, 3, 6) y = np.linspace(-3, 3, 5) z_a = range(4) da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4)) da = DataArray(da_vals, coords=[x, y, z_a], dims=["x", "y", "z"]) z_m = range(2, 6) dm_vals1 = range(4) dm1 = DataArray(dm_vals1, coords=[z_m], dims=["z"]) with xr.set_options(arithmetic_join="exact"): with pytest.raises( ValueError, match=r"cannot align.*join.*exact.*not equal.*" ): da.dot(dm1) da_aligned, dm_aligned = xr.align(da, dm1, join="inner") # nd dot 1d actual1 = da.dot(dm1) expected_vals1 = np.tensordot(da_aligned.values, dm_aligned.values, (2, 0)) expected1 = DataArray(expected_vals1, coords=[x, da_aligned.y], dims=["x", "y"]) assert_equal(expected1, actual1) # multiple shared dims dm_vals2 = np.arange(20 * 5 * 4).reshape((20, 5, 4)) j = np.linspace(-3, 3, 20) dm2 = DataArray(dm_vals2, coords=[j, y, z_m], dims=["j", "y", "z"]) da_aligned, dm_aligned = xr.align(da, dm2, join="inner") actual2 = da.dot(dm2) expected_vals2 = np.tensordot( da_aligned.values, dm_aligned.values, axes=([1, 2], [1, 2]) ) expected2 = DataArray(expected_vals2, coords=[x, j], dims=["x", "j"]) assert_equal(expected2, actual2) def test_matmul(self) -> None: # copied from above (could make a fixture) x = np.linspace(-3, 3, 6) y = np.linspace(-3, 3, 5) z = range(4) da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4)) da = DataArray(da_vals, coords=[x, y, z], dims=["x", "y", "z"]) result = da @ da expected = da.dot(da) assert_identical(result, expected) def test_matmul_align_coords(self) -> None: # GH 3694 x_a = np.arange(6) x_b = np.arange(2, 8) da_vals = np.arange(6) da_a = DataArray(da_vals, coords=[x_a], dims=["x"]) da_b = DataArray(da_vals, coords=[x_b], dims=["x"]) # only test arithmetic_join="inner" (=default) result = da_a @ da_b expected = da_a.dot(da_b) assert_identical(result, expected) with xr.set_options(arithmetic_join="exact"): with pytest.raises( ValueError, match=r"cannot align.*join.*exact.*not equal.*" ): da_a @ da_b def test_binary_op_propagate_indexes(self) -> None: # regression test for GH2227 self.dv["x"] = np.arange(self.dv.sizes["x"]) expected = self.dv.xindexes["x"] actual = (self.dv * 10).xindexes["x"] assert expected is actual actual = (self.dv > 10).xindexes["x"] assert expected is actual # use mda for bitshift test as it's type int actual = (self.mda << 2).xindexes["x"] expected = self.mda.xindexes["x"] assert expected is actual def test_binary_op_join_setting(self) -> None: dim = "x" align_type: Final = "outer" coords_l, coords_r = [0, 1, 2], [1, 2, 3] missing_3 = xr.DataArray(coords_l, [(dim, coords_l)]) missing_0 = xr.DataArray(coords_r, [(dim, coords_r)]) with xr.set_options(arithmetic_join=align_type): actual = missing_0 + missing_3 missing_0_aligned, missing_3_aligned = xr.align( missing_0, missing_3, join=align_type ) expected = xr.DataArray([np.nan, 2, 4, np.nan], [(dim, [0, 1, 2, 3])]) assert_equal(actual, expected) def test_combine_first(self) -> None: ar0 = DataArray([[0, 0], [0, 0]], [("x", ["a", "b"]), ("y", [-1, 0])]) ar1 = DataArray([[1, 1], [1, 1]], [("x", ["b", "c"]), ("y", [0, 1])]) ar2 = DataArray([2], [("x", ["d"])]) actual = ar0.combine_first(ar1) expected = DataArray( [[0, 0, np.nan], [0, 0, 1], [np.nan, 1, 1]], [("x", ["a", "b", "c"]), ("y", [-1, 0, 1])], ) assert_equal(actual, expected) actual = ar1.combine_first(ar0) expected = DataArray( [[0, 0, np.nan], [0, 1, 1], [np.nan, 1, 1]], [("x", ["a", "b", "c"]), ("y", [-1, 0, 1])], ) assert_equal(actual, expected) actual = ar0.combine_first(ar2) expected = DataArray( [[0, 0], [0, 0], [2, 2]], [("x", ["a", "b", "d"]), ("y", [-1, 0])] ) assert_equal(actual, expected) def test_sortby(self) -> None: da = DataArray( [[1, 2], [3, 4], [5, 6]], [("x", ["c", "b", "a"]), ("y", [1, 0])] ) sorted1d = DataArray( [[5, 6], [3, 4], [1, 2]], [("x", ["a", "b", "c"]), ("y", [1, 0])] ) sorted2d = DataArray( [[6, 5], [4, 3], [2, 1]], [("x", ["a", "b", "c"]), ("y", [0, 1])] ) expected = sorted1d dax = DataArray([100, 99, 98], [("x", ["c", "b", "a"])]) actual = da.sortby(dax) assert_equal(actual, expected) # test descending order sort actual = da.sortby(dax, ascending=False) assert_equal(actual, da) # test alignment (fills in nan for 'c') dax_short = DataArray([98, 97], [("x", ["b", "a"])]) actual = da.sortby(dax_short) assert_equal(actual, expected) # test multi-dim sort by 1D dataarray values expected = sorted2d dax = DataArray([100, 99, 98], [("x", ["c", "b", "a"])]) day = DataArray([90, 80], [("y", [1, 0])]) actual = da.sortby([day, dax]) assert_equal(actual, expected) expected = sorted1d actual = da.sortby("x") assert_equal(actual, expected) expected = sorted2d actual = da.sortby(["x", "y"]) assert_equal(actual, expected) @requires_bottleneck def test_rank(self) -> None: # floats ar = DataArray([[3, 4, np.nan, 1]]) expect_0 = DataArray([[1, 1, np.nan, 1]]) expect_1 = DataArray([[2, 3, np.nan, 1]]) assert_equal(ar.rank("dim_0"), expect_0) assert_equal(ar.rank("dim_1"), expect_1) # int x = DataArray([3, 2, 1]) assert_equal(x.rank("dim_0"), x) # str y = DataArray(["c", "b", "a"]) assert_equal(y.rank("dim_0"), x) x = DataArray([3.0, 1.0, np.nan, 2.0, 4.0], dims=("z",)) y = DataArray([0.75, 0.25, np.nan, 0.5, 1.0], dims=("z",)) assert_equal(y.rank("z", pct=True), y) @pytest.mark.parametrize("use_dask", [True, False]) @pytest.mark.parametrize("use_datetime", [True, False]) @pytest.mark.filterwarnings("ignore:overflow encountered in multiply") def test_polyfit(self, use_dask, use_datetime) -> None: if use_dask and not has_dask: pytest.skip("requires dask") xcoord = xr.DataArray( pd.date_range("1970-01-01", freq="D", periods=10), dims=("x",), name="x" ) x = xr.core.missing.get_clean_interp_index(xcoord, "x") if not use_datetime: xcoord = x da_raw = DataArray( np.stack((10 + 1e-15 * x + 2e-28 * x**2, 30 + 2e-14 * x + 1e-29 * x**2)), dims=("d", "x"), coords={"x": xcoord, "d": [0, 1]}, ) if use_dask: da = da_raw.chunk({"d": 1}) else: da = da_raw out = da.polyfit("x", 2) expected = DataArray( [[2e-28, 1e-15, 10], [1e-29, 2e-14, 30]], dims=("d", "degree"), coords={"degree": [2, 1, 0], "d": [0, 1]}, ).T assert_allclose(out.polyfit_coefficients, expected, rtol=1e-3) # Full output and deficient rank with warnings.catch_warnings(): warnings.simplefilter("ignore", RankWarning) out = da.polyfit("x", 12, full=True) assert out.polyfit_residuals.isnull().all() # With NaN da_raw[0, 1:3] = np.nan if use_dask: da = da_raw.chunk({"d": 1}) else: da = da_raw out = da.polyfit("x", 2, skipna=True, cov=True) assert_allclose(out.polyfit_coefficients, expected, rtol=1e-3) assert "polyfit_covariance" in out # Skipna + Full output out = da.polyfit("x", 2, skipna=True, full=True) assert_allclose(out.polyfit_coefficients, expected, rtol=1e-3) assert out.x_matrix_rank == 3 np.testing.assert_almost_equal(out.polyfit_residuals, [0, 0]) with warnings.catch_warnings(): warnings.simplefilter("ignore", RankWarning) out = da.polyfit("x", 8, full=True) np.testing.assert_array_equal(out.polyfit_residuals.isnull(), [True, False]) @requires_dask def test_polyfit_nd_dask(self) -> None: da = ( DataArray(np.arange(120), dims="time", coords={"time": np.arange(120)}) .chunk({"time": 20}) .expand_dims(lat=5, lon=5) .chunk({"lat": 2, "lon": 2}) ) actual = da.polyfit("time", 1, skipna=False) expected = da.compute().polyfit("time", 1, skipna=False) assert_allclose(actual, expected) def test_pad_constant(self) -> None: ar = DataArray(np.arange(3 * 4 * 5).reshape(3, 4, 5)) actual = ar.pad(dim_0=(1, 3)) expected = DataArray( np.pad( np.arange(3 * 4 * 5).reshape(3, 4, 5).astype(np.float32), mode="constant", pad_width=((1, 3), (0, 0), (0, 0)), constant_values=np.nan, ) ) assert actual.shape == (7, 4, 5) assert_identical(actual, expected) ar = xr.DataArray([9], dims="x") actual = ar.pad(x=1) expected = xr.DataArray([np.nan, 9, np.nan], dims="x") assert_identical(actual, expected) actual = ar.pad(x=1, constant_values=1.23456) expected = xr.DataArray([1, 9, 1], dims="x") assert_identical(actual, expected) with pytest.raises(ValueError, match="cannot convert float NaN to integer"): ar.pad(x=1, constant_values=np.nan) def test_pad_coords(self) -> None: ar = DataArray( np.arange(3 * 4 * 5).reshape(3, 4, 5), [("x", np.arange(3)), ("y", np.arange(4)), ("z", np.arange(5))], ) actual = ar.pad(x=(1, 3), constant_values=1) expected = DataArray( np.pad( np.arange(3 * 4 * 5).reshape(3, 4, 5), mode="constant", pad_width=((1, 3), (0, 0), (0, 0)), constant_values=1, ), [ ( "x", np.pad( np.arange(3).astype(np.float32), mode="constant", pad_width=(1, 3), constant_values=np.nan, ), ), ("y", np.arange(4)), ("z", np.arange(5)), ], ) assert_identical(actual, expected) @pytest.mark.parametrize("mode", ("minimum", "maximum", "mean", "median")) @pytest.mark.parametrize( "stat_length", (None, 3, (1, 3), {"dim_0": (2, 1), "dim_2": (4, 2)}) ) def test_pad_stat_length(self, mode, stat_length) -> None: ar = DataArray(np.arange(3 * 4 * 5).reshape(3, 4, 5)) actual = ar.pad(dim_0=(1, 3), dim_2=(2, 2), mode=mode, stat_length=stat_length) if isinstance(stat_length, dict): stat_length = (stat_length["dim_0"], (4, 4), stat_length["dim_2"]) expected = DataArray( np.pad( np.arange(3 * 4 * 5).reshape(3, 4, 5), pad_width=((1, 3), (0, 0), (2, 2)), mode=mode, stat_length=stat_length, ) ) assert actual.shape == (7, 4, 9) assert_identical(actual, expected) @pytest.mark.parametrize( "end_values", (None, 3, (3, 5), {"dim_0": (2, 1), "dim_2": (4, 2)}) ) def test_pad_linear_ramp(self, end_values) -> None: ar = DataArray(np.arange(3 * 4 * 5).reshape(3, 4, 5)) actual = ar.pad( dim_0=(1, 3), dim_2=(2, 2), mode="linear_ramp", end_values=end_values ) if end_values is None: end_values = 0 elif isinstance(end_values, dict): end_values = (end_values["dim_0"], (4, 4), end_values["dim_2"]) expected = DataArray( np.pad( np.arange(3 * 4 * 5).reshape(3, 4, 5), pad_width=((1, 3), (0, 0), (2, 2)), mode="linear_ramp", end_values=end_values, ) ) assert actual.shape == (7, 4, 9) assert_identical(actual, expected) @pytest.mark.parametrize("mode", ("reflect", "symmetric")) @pytest.mark.parametrize("reflect_type", (None, "even", "odd")) def test_pad_reflect(self, mode, reflect_type) -> None: ar = DataArray(np.arange(3 * 4 * 5).reshape(3, 4, 5)) actual = ar.pad( dim_0=(1, 3), dim_2=(2, 2), mode=mode, reflect_type=reflect_type ) np_kwargs = { "array": np.arange(3 * 4 * 5).reshape(3, 4, 5), "pad_width": ((1, 3), (0, 0), (2, 2)), "mode": mode, } # numpy does not support reflect_type=None if reflect_type is not None: np_kwargs["reflect_type"] = reflect_type expected = DataArray(np.pad(**np_kwargs)) assert actual.shape == (7, 4, 9) assert_identical(actual, expected) @pytest.mark.parametrize( ["keep_attrs", "attrs", "expected"], [ pytest.param(None, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="default"), pytest.param(False, {"a": 1, "b": 2}, {}, id="False"), pytest.param(True, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="True"), ], ) def test_pad_keep_attrs(self, keep_attrs, attrs, expected) -> None: arr = xr.DataArray( [1, 2], dims="x", coords={"c": ("x", [-1, 1], attrs)}, attrs=attrs ) expected = xr.DataArray( [0, 1, 2, 0], dims="x", coords={"c": ("x", [np.nan, -1, 1, np.nan], expected)}, attrs=expected, ) keep_attrs_ = "default" if keep_attrs is None else keep_attrs with set_options(keep_attrs=keep_attrs_): actual = arr.pad({"x": (1, 1)}, mode="constant", constant_values=0) xr.testing.assert_identical(actual, expected) actual = arr.pad( {"x": (1, 1)}, mode="constant", constant_values=0, keep_attrs=keep_attrs ) xr.testing.assert_identical(actual, expected) @pytest.mark.parametrize("parser", ["pandas", "python"]) @pytest.mark.parametrize( "engine", ["python", None, pytest.param("numexpr", marks=[requires_numexpr])] ) @pytest.mark.parametrize( "backend", ["numpy", pytest.param("dask", marks=[requires_dask])] ) def test_query( self, backend, engine: QueryEngineOptions, parser: QueryParserOptions ) -> None: """Test querying a dataset.""" # setup test data np.random.seed(42) a = np.arange(0, 10, 1) b = np.random.randint(0, 100, size=10) c = np.linspace(0, 1, 20) d = np.random.choice(["foo", "bar", "baz"], size=30, replace=True).astype( object ) aa = DataArray(data=a, dims=["x"], name="a", coords={"a2": ("x", a)}) bb = DataArray(data=b, dims=["x"], name="b", coords={"b2": ("x", b)}) cc = DataArray(data=c, dims=["y"], name="c", coords={"c2": ("y", c)}) dd = DataArray(data=d, dims=["z"], name="d", coords={"d2": ("z", d)}) if backend == "dask": import dask.array as da aa = aa.copy(data=da.from_array(a, chunks=3)) bb = bb.copy(data=da.from_array(b, chunks=3)) cc = cc.copy(data=da.from_array(c, chunks=7)) dd = dd.copy(data=da.from_array(d, chunks=12)) # query single dim, single variable with raise_if_dask_computes(): actual = aa.query(x="a2 > 5", engine=engine, parser=parser) expect = aa.isel(x=(a > 5)) assert_identical(expect, actual) # query single dim, single variable, via dict with raise_if_dask_computes(): actual = aa.query(dict(x="a2 > 5"), engine=engine, parser=parser) expect = aa.isel(dict(x=(a > 5))) assert_identical(expect, actual) # query single dim, single variable with raise_if_dask_computes(): actual = bb.query(x="b2 > 50", engine=engine, parser=parser) expect = bb.isel(x=(b > 50)) assert_identical(expect, actual) # query single dim, single variable with raise_if_dask_computes(): actual = cc.query(y="c2 < .5", engine=engine, parser=parser) expect = cc.isel(y=(c < 0.5)) assert_identical(expect, actual) # query single dim, single string variable if parser == "pandas": # N.B., this query currently only works with the pandas parser # xref https://github.com/pandas-dev/pandas/issues/40436 with raise_if_dask_computes(): actual = dd.query(z='d2 == "bar"', engine=engine, parser=parser) expect = dd.isel(z=(d == "bar")) assert_identical(expect, actual) # test error handling with pytest.raises(ValueError): aa.query("a > 5") # type: ignore[arg-type] # must be dict or kwargs with pytest.raises(ValueError): aa.query(x=(a > 5)) # must be query string with pytest.raises(UndefinedVariableError): aa.query(x="spam > 50") # name not present @requires_scipy @pytest.mark.parametrize("use_dask", [True, False]) def test_curvefit(self, use_dask) -> None: if use_dask and not has_dask: pytest.skip("requires dask") def exp_decay(t, n0, tau=1): return n0 * np.exp(-t / tau) t = np.arange(0, 5, 0.5) da = DataArray( np.stack([exp_decay(t, 3, 3), exp_decay(t, 5, 4), np.nan * t], axis=-1), dims=("t", "x"), coords={"t": t, "x": [0, 1, 2]}, ) da[0, 0] = np.nan expected = DataArray( [[3, 3], [5, 4], [np.nan, np.nan]], dims=("x", "param"), coords={"x": [0, 1, 2], "param": ["n0", "tau"]}, ) if use_dask: da = da.chunk({"x": 1}) fit = da.curvefit( coords=[da.t], func=exp_decay, p0={"n0": 4}, bounds={"tau": (2, 6)} ) assert_allclose(fit.curvefit_coefficients, expected, rtol=1e-3) da = da.compute() fit = da.curvefit(coords="t", func=np.power, reduce_dims="x", param_names=["a"]) assert "a" in fit.param assert "x" not in fit.dims def test_curvefit_helpers(self) -> None: def exp_decay(t, n0, tau=1): return n0 * np.exp(-t / tau) from xarray.computation.fit import _get_func_args, _initialize_curvefit_params params, func_args = _get_func_args(exp_decay, []) assert params == ["n0", "tau"] param_defaults, bounds_defaults = _initialize_curvefit_params( params, {"n0": 4}, {"tau": [5, np.inf]}, func_args ) assert param_defaults == {"n0": 4, "tau": 6} assert bounds_defaults == {"n0": (-np.inf, np.inf), "tau": (5, np.inf)} # DataArray as bound param_defaults, bounds_defaults = _initialize_curvefit_params( params=params, p0={"n0": 4}, bounds={"tau": [DataArray([3, 4], coords=[("x", [1, 2])]), np.inf]}, func_args=func_args, ) assert param_defaults["n0"] == 4 assert ( param_defaults["tau"] == xr.DataArray([4, 5], coords=[("x", [1, 2])]) ).all() assert bounds_defaults["n0"] == (-np.inf, np.inf) assert ( bounds_defaults["tau"][0] == DataArray([3, 4], coords=[("x", [1, 2])]) ).all() assert bounds_defaults["tau"][1] == np.inf param_names = ["a"] params, func_args = _get_func_args(np.power, param_names) assert params == param_names with pytest.raises(ValueError): _get_func_args(np.power, []) @requires_scipy @pytest.mark.parametrize("use_dask", [True, False]) def test_curvefit_multidimensional_guess(self, use_dask: bool) -> None: if use_dask and not has_dask: pytest.skip("requires dask") def sine(t, a, f, p): return a * np.sin(2 * np.pi * (f * t + p)) t = np.arange(0, 2, 0.02) da = DataArray( np.stack([sine(t, 1.0, 2, 0), sine(t, 1.0, 2, 0)]), coords={"x": [0, 1], "t": t}, ) # Fitting to a sine curve produces a different result depending on the # initial guess: either the phase is zero and the amplitude is positive # or the phase is 0.5 * 2pi and the amplitude is negative. expected = DataArray( [[1, 2, 0], [-1, 2, 0.5]], coords={"x": [0, 1], "param": ["a", "f", "p"]}, ) # Different initial guesses for different values of x a_guess = DataArray([1, -1], coords=[da.x]) p_guess = DataArray([0, 0.5], coords=[da.x]) if use_dask: da = da.chunk({"x": 1}) fit = da.curvefit( coords=[da.t], func=sine, p0={"a": a_guess, "p": p_guess, "f": 2}, ) assert_allclose(fit.curvefit_coefficients, expected) with pytest.raises( ValueError, match=r"Initial guess for 'a' has unexpected dimensions .* should only have " "dimensions that are in data dimensions", ): # initial guess with additional dimensions should be an error da.curvefit( coords=[da.t], func=sine, p0={"a": DataArray([1, 2], coords={"foo": [1, 2]})}, ) @requires_scipy @pytest.mark.parametrize("use_dask", [True, False]) def test_curvefit_multidimensional_bounds(self, use_dask: bool) -> None: if use_dask and not has_dask: pytest.skip("requires dask") def sine(t, a, f, p): return a * np.sin(2 * np.pi * (f * t + p)) t = np.arange(0, 2, 0.02) da = xr.DataArray( np.stack([sine(t, 1.0, 2, 0), sine(t, 1.0, 2, 0)]), coords={"x": [0, 1], "t": t}, ) # Fit a sine with different bounds: positive amplitude should result in a fit with # phase 0 and negative amplitude should result in phase 0.5 * 2pi. expected = DataArray( [[1, 2, 0], [-1, 2, 0.5]], coords={"x": [0, 1], "param": ["a", "f", "p"]}, ) if use_dask: da = da.chunk({"x": 1}) fit = da.curvefit( coords=[da.t], func=sine, p0={"f": 2, "p": 0.25}, # this guess is needed to get the expected result bounds={ "a": ( DataArray([0, -2], coords=[da.x]), DataArray([2, 0], coords=[da.x]), ), }, ) assert_allclose(fit.curvefit_coefficients, expected) # Scalar lower bound with array upper bound fit2 = da.curvefit( coords=[da.t], func=sine, p0={"f": 2, "p": 0.25}, # this guess is needed to get the expected result bounds={ "a": (-2, DataArray([2, 0], coords=[da.x])), }, ) assert_allclose(fit2.curvefit_coefficients, expected) with pytest.raises( ValueError, match=r"Upper bound for 'a' has unexpected dimensions .* should only have " "dimensions that are in data dimensions", ): # bounds with additional dimensions should be an error da.curvefit( coords=[da.t], func=sine, bounds={"a": (0, DataArray([1], coords={"foo": [1]}))}, ) @requires_scipy @pytest.mark.parametrize("use_dask", [True, False]) def test_curvefit_ignore_errors(self, use_dask: bool) -> None: if use_dask and not has_dask: pytest.skip("requires dask") # nonsense function to make the optimization fail def line(x, a, b): if a > 10: return 0 return a * x + b da = DataArray( [[1, 3, 5], [0, 20, 40]], coords={"i": [1, 2], "x": [0.0, 1.0, 2.0]}, ) if use_dask: da = da.chunk({"i": 1}) expected = DataArray( [[2, 1], [np.nan, np.nan]], coords={"i": [1, 2], "param": ["a", "b"]} ) with pytest.raises(RuntimeError, match="calls to function has reached maxfev"): da.curvefit( coords="x", func=line, # limit maximum number of calls so the optimization fails kwargs=dict(maxfev=5), ).compute() # have to compute to raise the error fit = da.curvefit( coords="x", func=line, errors="ignore", # limit maximum number of calls so the optimization fails kwargs=dict(maxfev=5), ).compute() assert_allclose(fit.curvefit_coefficients, expected) class TestReduce: @pytest.fixture(autouse=True) def setup(self): self.attrs = {"attr1": "value1", "attr2": 2929} @pytest.mark.parametrize( ["x", "minindex", "maxindex", "nanindex"], [ pytest.param(np.array([0, 1, 2, 0, -2, -4, 2]), 5, 2, None, id="int"), pytest.param( np.array([0.0, 1.0, 2.0, 0.0, -2.0, -4.0, 2.0]), 5, 2, None, id="float" ), pytest.param( np.array([1.0, np.nan, 2.0, np.nan, -2.0, -4.0, 2.0]), 5, 2, 1, id="nan" ), pytest.param( np.array([1.0, np.nan, 2.0, np.nan, -2.0, -4.0, 2.0]).astype("object"), 5, 2, 1, marks=pytest.mark.filterwarnings( "ignore:invalid value encountered in reduce:RuntimeWarning" ), id="obj", ), pytest.param(np.array([np.nan, np.nan]), np.nan, np.nan, 0, id="allnan"), pytest.param( np.array( ["2015-12-31", "2020-01-02", "2020-01-01", "2016-01-01"], dtype="datetime64[ns]", ), 0, 1, None, id="datetime", ), ], ) class TestReduce1D(TestReduce): def test_min( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, ) -> None: ar = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) if np.isnan(minindex): minindex = 0 expected0 = ar.isel(x=minindex, drop=True) result0 = ar.min(keep_attrs=True) assert_identical(result0, expected0) result1 = ar.min() expected1 = expected0.copy() expected1.attrs = {} assert_identical(result1, expected1) result2 = ar.min(skipna=False) if nanindex is not None and ar.dtype.kind != "O": expected2 = ar.isel(x=nanindex, drop=True) expected2.attrs = {} else: expected2 = expected1 assert_identical(result2, expected2) def test_max( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, ) -> None: ar = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) if np.isnan(minindex): maxindex = 0 expected0 = ar.isel(x=maxindex, drop=True) result0 = ar.max(keep_attrs=True) assert_identical(result0, expected0) result1 = ar.max() expected1 = expected0.copy() expected1.attrs = {} assert_identical(result1, expected1) result2 = ar.max(skipna=False) if nanindex is not None and ar.dtype.kind != "O": expected2 = ar.isel(x=nanindex, drop=True) expected2.attrs = {} else: expected2 = expected1 assert_identical(result2, expected2) @pytest.mark.filterwarnings( "ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning" ) def test_argmin( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, ) -> None: ar = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) indarr = xr.DataArray(np.arange(x.size, dtype=np.intp), dims=["x"]) if np.isnan(minindex): with pytest.raises(ValueError): ar.argmin() return expected0 = indarr[minindex] result0 = ar.argmin() assert_identical(result0, expected0) result1 = ar.argmin(keep_attrs=True) expected1 = expected0.copy() expected1.attrs = self.attrs assert_identical(result1, expected1) result2 = ar.argmin(skipna=False) if nanindex is not None and ar.dtype.kind != "O": expected2 = indarr.isel(x=nanindex, drop=True) expected2.attrs = {} else: expected2 = expected0 assert_identical(result2, expected2) @pytest.mark.filterwarnings( "ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning" ) def test_argmax( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, ) -> None: ar = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) indarr = xr.DataArray(np.arange(x.size, dtype=np.intp), dims=["x"]) if np.isnan(maxindex): with pytest.raises(ValueError): ar.argmax() return expected0 = indarr[maxindex] result0 = ar.argmax() assert_identical(result0, expected0) result1 = ar.argmax(keep_attrs=True) expected1 = expected0.copy() expected1.attrs = self.attrs assert_identical(result1, expected1) result2 = ar.argmax(skipna=False) if nanindex is not None and ar.dtype.kind != "O": expected2 = indarr.isel(x=nanindex, drop=True) expected2.attrs = {} else: expected2 = expected0 assert_identical(result2, expected2) @pytest.mark.parametrize( "use_dask", [ pytest.param( True, marks=pytest.mark.skipif(not has_dask, reason="no dask") ), False, ], ) def test_idxmin( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, use_dask: bool, ) -> None: ar0_raw = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) if use_dask: ar0 = ar0_raw.chunk() else: ar0 = ar0_raw with pytest.raises( KeyError, match=r"'spam' not found in array dimensions", ): ar0.idxmin(dim="spam") # Scalar Dataarray with pytest.raises(ValueError): xr.DataArray(5).idxmin() coordarr0 = xr.DataArray(ar0.coords["x"].data, dims=["x"]) coordarr1 = coordarr0.copy() hasna = np.isnan(minindex) if np.isnan(minindex): minindex = 0 if hasna: coordarr1[...] = 1 fill_value_0 = np.nan else: fill_value_0 = 1 expected0 = ( (coordarr1 * fill_value_0).isel(x=minindex, drop=True).astype("float") ) expected0.name = "x" # Default fill value (NaN) result0 = ar0.idxmin() assert_identical(result0, expected0) # Manually specify NaN fill_value result1 = ar0.idxmin(fill_value=np.nan) assert_identical(result1, expected0) # keep_attrs result2 = ar0.idxmin(keep_attrs=True) expected2 = expected0.copy() expected2.attrs = self.attrs assert_identical(result2, expected2) # skipna=False if nanindex is not None and ar0.dtype.kind != "O": expected3 = coordarr0.isel(x=nanindex, drop=True).astype("float") expected3.name = "x" expected3.attrs = {} else: expected3 = expected0.copy() result3 = ar0.idxmin(skipna=False) assert_identical(result3, expected3) # fill_value should be ignored with skipna=False result4 = ar0.idxmin(skipna=False, fill_value=-100j) assert_identical(result4, expected3) # Float fill_value if hasna: fill_value_5 = -1.1 else: fill_value_5 = 1 expected5 = (coordarr1 * fill_value_5).isel(x=minindex, drop=True) expected5.name = "x" result5 = ar0.idxmin(fill_value=-1.1) assert_identical(result5, expected5) # Integer fill_value if hasna: fill_value_6 = -1 else: fill_value_6 = 1 expected6 = (coordarr1 * fill_value_6).isel(x=minindex, drop=True) expected6.name = "x" result6 = ar0.idxmin(fill_value=-1) assert_identical(result6, expected6) # Complex fill_value if hasna: fill_value_7 = -1j else: fill_value_7 = 1 expected7 = (coordarr1 * fill_value_7).isel(x=minindex, drop=True) expected7.name = "x" result7 = ar0.idxmin(fill_value=-1j) assert_identical(result7, expected7) @pytest.mark.parametrize("use_dask", [True, False]) def test_idxmax( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, use_dask: bool, ) -> None: if use_dask and not has_dask: pytest.skip("requires dask") if use_dask and x.dtype.kind == "M": pytest.xfail("dask operation 'argmax' breaks when dtype is datetime64 (M)") ar0_raw = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) if use_dask: ar0 = ar0_raw.chunk({}) else: ar0 = ar0_raw with pytest.raises( KeyError, match=r"'spam' not found in array dimensions", ): ar0.idxmax(dim="spam") # Scalar Dataarray with pytest.raises(ValueError): xr.DataArray(5).idxmax() coordarr0 = xr.DataArray(ar0.coords["x"].data, dims=["x"]) coordarr1 = coordarr0.copy() hasna = np.isnan(maxindex) if np.isnan(maxindex): maxindex = 0 if hasna: coordarr1[...] = 1 fill_value_0 = np.nan else: fill_value_0 = 1 expected0 = ( (coordarr1 * fill_value_0).isel(x=maxindex, drop=True).astype("float") ) expected0.name = "x" # Default fill value (NaN) result0 = ar0.idxmax() assert_identical(result0, expected0) # Manually specify NaN fill_value result1 = ar0.idxmax(fill_value=np.nan) assert_identical(result1, expected0) # keep_attrs result2 = ar0.idxmax(keep_attrs=True) expected2 = expected0.copy() expected2.attrs = self.attrs assert_identical(result2, expected2) # skipna=False if nanindex is not None and ar0.dtype.kind != "O": expected3 = coordarr0.isel(x=nanindex, drop=True).astype("float") expected3.name = "x" expected3.attrs = {} else: expected3 = expected0.copy() result3 = ar0.idxmax(skipna=False) assert_identical(result3, expected3) # fill_value should be ignored with skipna=False result4 = ar0.idxmax(skipna=False, fill_value=-100j) assert_identical(result4, expected3) # Float fill_value if hasna: fill_value_5 = -1.1 else: fill_value_5 = 1 expected5 = (coordarr1 * fill_value_5).isel(x=maxindex, drop=True) expected5.name = "x" result5 = ar0.idxmax(fill_value=-1.1) assert_identical(result5, expected5) # Integer fill_value if hasna: fill_value_6 = -1 else: fill_value_6 = 1 expected6 = (coordarr1 * fill_value_6).isel(x=maxindex, drop=True) expected6.name = "x" result6 = ar0.idxmax(fill_value=-1) assert_identical(result6, expected6) # Complex fill_value if hasna: fill_value_7 = -1j else: fill_value_7 = 1 expected7 = (coordarr1 * fill_value_7).isel(x=maxindex, drop=True) expected7.name = "x" result7 = ar0.idxmax(fill_value=-1j) assert_identical(result7, expected7) @pytest.mark.filterwarnings( "ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning" ) def test_argmin_dim( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, ) -> None: ar = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) indarr = xr.DataArray(np.arange(x.size, dtype=np.intp), dims=["x"]) if np.isnan(minindex): with pytest.raises(ValueError): ar.argmin() return expected0 = {"x": indarr[minindex]} result0 = ar.argmin(...) for key in expected0: assert_identical(result0[key], expected0[key]) result1 = ar.argmin(..., keep_attrs=True) expected1 = deepcopy(expected0) for da in expected1.values(): da.attrs = self.attrs for key in expected1: assert_identical(result1[key], expected1[key]) result2 = ar.argmin(..., skipna=False) if nanindex is not None and ar.dtype.kind != "O": expected2 = {"x": indarr.isel(x=nanindex, drop=True)} expected2["x"].attrs = {} else: expected2 = expected0 for key in expected2: assert_identical(result2[key], expected2[key]) @pytest.mark.filterwarnings( "ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning" ) def test_argmax_dim( self, x: np.ndarray, minindex: int | float, maxindex: int | float, nanindex: int | None, ) -> None: ar = xr.DataArray( x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs ) indarr = xr.DataArray(np.arange(x.size, dtype=np.intp), dims=["x"]) if np.isnan(maxindex): with pytest.raises(ValueError): ar.argmax() return expected0 = {"x": indarr[maxindex]} result0 = ar.argmax(...) for key in expected0: assert_identical(result0[key], expected0[key]) result1 = ar.argmax(..., keep_attrs=True) expected1 = deepcopy(expected0) for da in expected1.values(): da.attrs = self.attrs for key in expected1: assert_identical(result1[key], expected1[key]) result2 = ar.argmax(..., skipna=False) if nanindex is not None and ar.dtype.kind != "O": expected2 = {"x": indarr.isel(x=nanindex, drop=True)} expected2["x"].attrs = {} else: expected2 = expected0 for key in expected2: assert_identical(result2[key], expected2[key]) @pytest.mark.parametrize( ["x", "minindex", "maxindex", "nanindex"], [ pytest.param( np.array( [ [0, 1, 2, 0, -2, -4, 2], [1, 1, 1, 1, 1, 1, 1], [0, 0, -10, 5, 20, 0, 0], ] ), [5, 0, 2], [2, 0, 4], [None, None, None], id="int", ), pytest.param( np.array( [ [2.0, 1.0, 2.0, 0.0, -2.0, -4.0, 2.0], [-4.0, np.nan, 2.0, np.nan, -2.0, -4.0, 2.0], [np.nan] * 7, ] ), [5, 0, np.nan], [0, 2, np.nan], [None, 1, 0], id="nan", ), pytest.param( np.array( [ [2.0, 1.0, 2.0, 0.0, -2.0, -4.0, 2.0], [-4.0, np.nan, 2.0, np.nan, -2.0, -4.0, 2.0], [np.nan] * 7, ] ).astype("object"), [5, 0, np.nan], [0, 2, np.nan], [None, 1, 0], marks=pytest.mark.filterwarnings( "ignore:invalid value encountered in reduce:RuntimeWarning:" ), id="obj", ), pytest.param( np.array( [ ["2015-12-31", "2020-01-02", "2020-01-01", "2016-01-01"], ["2020-01-02", "2020-01-02", "2020-01-02", "2020-01-02"], ["1900-01-01", "1-02-03", "1900-01-02", "1-02-03"], ], dtype="datetime64[ns]", ), [0, 0, 1], [1, 0, 2], [None, None, None], id="datetime", ), ], ) class TestReduce2D(TestReduce): def test_min( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], ) -> None: ar = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) minindex = [x if not np.isnan(x) else 0 for x in minindex] expected0list = [ ar.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex) ] expected0 = xr.concat(expected0list, dim="y") result0 = ar.min(dim="x", keep_attrs=True) assert_identical(result0, expected0) result1 = ar.min(dim="x") expected1 = expected0 expected1.attrs = {} assert_identical(result1, expected1) result2 = ar.min(axis=1) assert_identical(result2, expected1) minindex = [ x if y is None or ar.dtype.kind == "O" else y for x, y in zip(minindex, nanindex, strict=True) ] expected2list = [ ar.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex) ] expected2 = xr.concat(expected2list, dim="y") expected2.attrs = {} result3 = ar.min(dim="x", skipna=False) assert_identical(result3, expected2) def test_max( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], ) -> None: ar = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) maxindex = [x if not np.isnan(x) else 0 for x in maxindex] expected0list = [ ar.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex) ] expected0 = xr.concat(expected0list, dim="y") result0 = ar.max(dim="x", keep_attrs=True) assert_identical(result0, expected0) result1 = ar.max(dim="x") expected1 = expected0.copy() expected1.attrs = {} assert_identical(result1, expected1) result2 = ar.max(axis=1) assert_identical(result2, expected1) maxindex = [ x if y is None or ar.dtype.kind == "O" else y for x, y in zip(maxindex, nanindex, strict=True) ] expected2list = [ ar.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex) ] expected2 = xr.concat(expected2list, dim="y") expected2.attrs = {} result3 = ar.max(dim="x", skipna=False) assert_identical(result3, expected2) def test_argmin( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], ) -> None: ar = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) indarrnp = np.tile(np.arange(x.shape[1], dtype=np.intp), [x.shape[0], 1]) indarr = xr.DataArray(indarrnp, dims=ar.dims, coords=ar.coords) if np.isnan(minindex).any(): with pytest.raises(ValueError): ar.argmin(dim="x") return expected0list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex) ] expected0 = xr.concat(expected0list, dim="y") result0 = ar.argmin(dim="x") assert_identical(result0, expected0) result1 = ar.argmin(axis=1) assert_identical(result1, expected0) result2 = ar.argmin(dim="x", keep_attrs=True) expected1 = expected0.copy() expected1.attrs = self.attrs assert_identical(result2, expected1) minindex = [ x if y is None or ar.dtype.kind == "O" else y for x, y in zip(minindex, nanindex, strict=True) ] expected2list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex) ] expected2 = xr.concat(expected2list, dim="y") expected2.attrs = {} result3 = ar.argmin(dim="x", skipna=False) assert_identical(result3, expected2) def test_argmax( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], ) -> None: ar = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) indarr_np = np.tile(np.arange(x.shape[1], dtype=np.intp), [x.shape[0], 1]) indarr = xr.DataArray(indarr_np, dims=ar.dims, coords=ar.coords) if np.isnan(maxindex).any(): with pytest.raises(ValueError): ar.argmax(dim="x") return expected0list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex) ] expected0 = xr.concat(expected0list, dim="y") result0 = ar.argmax(dim="x") assert_identical(result0, expected0) result1 = ar.argmax(axis=1) assert_identical(result1, expected0) result2 = ar.argmax(dim="x", keep_attrs=True) expected1 = expected0.copy() expected1.attrs = self.attrs assert_identical(result2, expected1) maxindex = [ x if y is None or ar.dtype.kind == "O" else y for x, y in zip(maxindex, nanindex, strict=True) ] expected2list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex) ] expected2 = xr.concat(expected2list, dim="y") expected2.attrs = {} result3 = ar.argmax(dim="x", skipna=False) assert_identical(result3, expected2) @pytest.mark.parametrize( "use_dask", [pytest.param(True, id="dask"), pytest.param(False, id="nodask")] ) def test_idxmin( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], use_dask: bool, ) -> None: if use_dask and not has_dask: pytest.skip("requires dask") if use_dask and x.dtype.kind == "M": pytest.xfail("dask operation 'argmin' breaks when dtype is datetime64 (M)") if x.dtype.kind == "O": # TODO: nanops._nan_argminmax_object computes once to check for all-NaN slices. max_computes = 1 else: max_computes = 0 ar0_raw = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) if use_dask: ar0 = ar0_raw.chunk({}) else: ar0 = ar0_raw assert_identical(ar0, ar0) # No dimension specified with pytest.raises(ValueError): ar0.idxmin() # dim doesn't exist with pytest.raises(KeyError): ar0.idxmin(dim="Y") assert_identical(ar0, ar0) coordarr0 = xr.DataArray( np.tile(ar0.coords["x"], [x.shape[0], 1]), dims=ar0.dims, coords=ar0.coords ) hasna = [np.isnan(x) for x in minindex] coordarr1 = coordarr0.copy() coordarr1[hasna, :] = 1 minindex0 = [x if not np.isnan(x) else 0 for x in minindex] nan_mult_0 = np.array([np.nan if x else 1 for x in hasna])[:, None] expected0list = [ (coordarr1 * nan_mult_0).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex0) ] expected0 = xr.concat(expected0list, dim="y") expected0.name = "x" # Default fill value (NaN) with raise_if_dask_computes(max_computes=max_computes): result0 = ar0.idxmin(dim="x") assert_identical(result0, expected0) # Manually specify NaN fill_value with raise_if_dask_computes(max_computes=max_computes): result1 = ar0.idxmin(dim="x", fill_value=np.nan) assert_identical(result1, expected0) # keep_attrs with raise_if_dask_computes(max_computes=max_computes): result2 = ar0.idxmin(dim="x", keep_attrs=True) expected2 = expected0.copy() expected2.attrs = self.attrs assert_identical(result2, expected2) # skipna=False minindex3 = [ x if y is None or ar0.dtype.kind == "O" else y for x, y in zip(minindex0, nanindex, strict=True) ] expected3list = [ coordarr0.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex3) ] expected3 = xr.concat(expected3list, dim="y") expected3.name = "x" expected3.attrs = {} with raise_if_dask_computes(max_computes=max_computes): result3 = ar0.idxmin(dim="x", skipna=False) assert_identical(result3, expected3) # fill_value should be ignored with skipna=False with raise_if_dask_computes(max_computes=max_computes): result4 = ar0.idxmin(dim="x", skipna=False, fill_value=-100j) assert_identical(result4, expected3) # Float fill_value nan_mult_5 = np.array([-1.1 if x else 1 for x in hasna])[:, None] expected5list = [ (coordarr1 * nan_mult_5).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex0) ] expected5 = xr.concat(expected5list, dim="y") expected5.name = "x" with raise_if_dask_computes(max_computes=max_computes): result5 = ar0.idxmin(dim="x", fill_value=-1.1) assert_identical(result5, expected5) # Integer fill_value nan_mult_6 = np.array([-1 if x else 1 for x in hasna])[:, None] expected6list = [ (coordarr1 * nan_mult_6).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex0) ] expected6 = xr.concat(expected6list, dim="y") expected6.name = "x" with raise_if_dask_computes(max_computes=max_computes): result6 = ar0.idxmin(dim="x", fill_value=-1) assert_identical(result6, expected6) # Complex fill_value nan_mult_7 = np.array([-5j if x else 1 for x in hasna])[:, None] expected7list = [ (coordarr1 * nan_mult_7).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex0) ] expected7 = xr.concat(expected7list, dim="y") expected7.name = "x" with raise_if_dask_computes(max_computes=max_computes): result7 = ar0.idxmin(dim="x", fill_value=-5j) assert_identical(result7, expected7) @pytest.mark.parametrize( "use_dask", [pytest.param(True, id="dask"), pytest.param(False, id="nodask")] ) def test_idxmax( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], use_dask: bool, ) -> None: if use_dask and not has_dask: pytest.skip("requires dask") if use_dask and x.dtype.kind == "M": pytest.xfail("dask operation 'argmax' breaks when dtype is datetime64 (M)") if x.dtype.kind == "O": # TODO: nanops._nan_argminmax_object computes once to check for all-NaN slices. max_computes = 1 else: max_computes = 0 ar0_raw = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) if use_dask: ar0 = ar0_raw.chunk({}) else: ar0 = ar0_raw # No dimension specified with pytest.raises(ValueError): ar0.idxmax() # dim doesn't exist with pytest.raises(KeyError): ar0.idxmax(dim="Y") ar1 = ar0.copy() del ar1.coords["y"] with pytest.raises(KeyError): ar1.idxmax(dim="y") coordarr0 = xr.DataArray( np.tile(ar0.coords["x"], [x.shape[0], 1]), dims=ar0.dims, coords=ar0.coords ) hasna = [np.isnan(x) for x in maxindex] coordarr1 = coordarr0.copy() coordarr1[hasna, :] = 1 maxindex0 = [x if not np.isnan(x) else 0 for x in maxindex] nan_mult_0 = np.array([np.nan if x else 1 for x in hasna])[:, None] expected0list = [ (coordarr1 * nan_mult_0).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex0) ] expected0 = xr.concat(expected0list, dim="y") expected0.name = "x" # Default fill value (NaN) with raise_if_dask_computes(max_computes=max_computes): result0 = ar0.idxmax(dim="x") assert_identical(result0, expected0) # Manually specify NaN fill_value with raise_if_dask_computes(max_computes=max_computes): result1 = ar0.idxmax(dim="x", fill_value=np.nan) assert_identical(result1, expected0) # keep_attrs with raise_if_dask_computes(max_computes=max_computes): result2 = ar0.idxmax(dim="x", keep_attrs=True) expected2 = expected0.copy() expected2.attrs = self.attrs assert_identical(result2, expected2) # skipna=False maxindex3 = [ x if y is None or ar0.dtype.kind == "O" else y for x, y in zip(maxindex0, nanindex, strict=True) ] expected3list = [ coordarr0.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex3) ] expected3 = xr.concat(expected3list, dim="y") expected3.name = "x" expected3.attrs = {} with raise_if_dask_computes(max_computes=max_computes): result3 = ar0.idxmax(dim="x", skipna=False) assert_identical(result3, expected3) # fill_value should be ignored with skipna=False with raise_if_dask_computes(max_computes=max_computes): result4 = ar0.idxmax(dim="x", skipna=False, fill_value=-100j) assert_identical(result4, expected3) # Float fill_value nan_mult_5 = np.array([-1.1 if x else 1 for x in hasna])[:, None] expected5list = [ (coordarr1 * nan_mult_5).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex0) ] expected5 = xr.concat(expected5list, dim="y") expected5.name = "x" with raise_if_dask_computes(max_computes=max_computes): result5 = ar0.idxmax(dim="x", fill_value=-1.1) assert_identical(result5, expected5) # Integer fill_value nan_mult_6 = np.array([-1 if x else 1 for x in hasna])[:, None] expected6list = [ (coordarr1 * nan_mult_6).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex0) ] expected6 = xr.concat(expected6list, dim="y") expected6.name = "x" with raise_if_dask_computes(max_computes=max_computes): result6 = ar0.idxmax(dim="x", fill_value=-1) assert_identical(result6, expected6) # Complex fill_value nan_mult_7 = np.array([-5j if x else 1 for x in hasna])[:, None] expected7list = [ (coordarr1 * nan_mult_7).isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex0) ] expected7 = xr.concat(expected7list, dim="y") expected7.name = "x" with raise_if_dask_computes(max_computes=max_computes): result7 = ar0.idxmax(dim="x", fill_value=-5j) assert_identical(result7, expected7) @pytest.mark.filterwarnings( "ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning" ) def test_argmin_dim( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], ) -> None: ar = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) indarrnp = np.tile(np.arange(x.shape[1], dtype=np.intp), [x.shape[0], 1]) indarr = xr.DataArray(indarrnp, dims=ar.dims, coords=ar.coords) if np.isnan(minindex).any(): with pytest.raises(ValueError): ar.argmin(dim="x") return expected0list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex) ] expected0 = {"x": xr.concat(expected0list, dim="y")} result0 = ar.argmin(dim=["x"]) for key in expected0: assert_identical(result0[key], expected0[key]) result1 = ar.argmin(dim=["x"], keep_attrs=True) expected1 = deepcopy(expected0) expected1["x"].attrs = self.attrs for key in expected1: assert_identical(result1[key], expected1[key]) minindex = [ x if y is None or ar.dtype.kind == "O" else y for x, y in zip(minindex, nanindex, strict=True) ] expected2list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex) ] expected2 = {"x": xr.concat(expected2list, dim="y")} expected2["x"].attrs = {} result2 = ar.argmin(dim=["x"], skipna=False) for key in expected2: assert_identical(result2[key], expected2[key]) result3 = ar.argmin(...) # TODO: remove cast once argmin typing is overloaded min_xind = cast(DataArray, ar.isel(expected0).argmin()) expected3 = { "y": DataArray(min_xind), "x": DataArray(minindex[min_xind.item()]), } for key in expected3: assert_identical(result3[key], expected3[key]) @pytest.mark.filterwarnings( "ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning" ) def test_argmax_dim( self, x: np.ndarray, minindex: list[int | float], maxindex: list[int | float], nanindex: list[int | None], ) -> None: ar = xr.DataArray( x, dims=["y", "x"], coords={"x": np.arange(x.shape[1]) * 4, "y": 1 - np.arange(x.shape[0])}, attrs=self.attrs, ) indarrnp = np.tile(np.arange(x.shape[1], dtype=np.intp), [x.shape[0], 1]) indarr = xr.DataArray(indarrnp, dims=ar.dims, coords=ar.coords) if np.isnan(maxindex).any(): with pytest.raises(ValueError): ar.argmax(dim="x") return expected0list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex) ] expected0 = {"x": xr.concat(expected0list, dim="y")} result0 = ar.argmax(dim=["x"]) for key in expected0: assert_identical(result0[key], expected0[key]) result1 = ar.argmax(dim=["x"], keep_attrs=True) expected1 = deepcopy(expected0) expected1["x"].attrs = self.attrs for key in expected1: assert_identical(result1[key], expected1[key]) maxindex = [ x if y is None or ar.dtype.kind == "O" else y for x, y in zip(maxindex, nanindex, strict=True) ] expected2list = [ indarr.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex) ] expected2 = {"x": xr.concat(expected2list, dim="y")} expected2["x"].attrs = {} result2 = ar.argmax(dim=["x"], skipna=False) for key in expected2: assert_identical(result2[key], expected2[key]) result3 = ar.argmax(...) # TODO: remove cast once argmax typing is overloaded max_xind = cast(DataArray, ar.isel(expected0).argmax()) expected3 = { "y": DataArray(max_xind), "x": DataArray(maxindex[max_xind.item()]), } for key in expected3: assert_identical(result3[key], expected3[key]) @pytest.mark.parametrize( "x, minindices_x, minindices_y, minindices_z, minindices_xy, " "minindices_xz, minindices_yz, minindices_xyz, maxindices_x, " "maxindices_y, maxindices_z, maxindices_xy, maxindices_xz, maxindices_yz, " "maxindices_xyz, nanindices_x, nanindices_y, nanindices_z, nanindices_xy, " "nanindices_xz, nanindices_yz, nanindices_xyz", [ pytest.param( np.array( [ [[0, 1, 2, 0], [-2, -4, 2, 0]], [[1, 1, 1, 1], [1, 1, 1, 1]], [[0, 0, -10, 5], [20, 0, 0, 0]], ] ), {"x": np.array([[0, 2, 2, 0], [0, 0, 2, 0]])}, {"y": np.array([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]])}, {"z": np.array([[0, 1], [0, 0], [2, 1]])}, {"x": np.array([0, 0, 2, 0]), "y": np.array([1, 1, 0, 0])}, {"x": np.array([2, 0]), "z": np.array([2, 1])}, {"y": np.array([1, 0, 0]), "z": np.array([1, 0, 2])}, {"x": np.array(2), "y": np.array(0), "z": np.array(2)}, {"x": np.array([[1, 0, 0, 2], [2, 1, 0, 1]])}, {"y": np.array([[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 1, 0]])}, {"z": np.array([[2, 2], [0, 0], [3, 0]])}, {"x": np.array([2, 0, 0, 2]), "y": np.array([1, 0, 0, 0])}, {"x": np.array([2, 2]), "z": np.array([3, 0])}, {"y": np.array([0, 0, 1]), "z": np.array([2, 0, 0])}, {"x": np.array(2), "y": np.array(1), "z": np.array(0)}, {"x": np.array([[None, None, None, None], [None, None, None, None]])}, { "y": np.array( [ [None, None, None, None], [None, None, None, None], [None, None, None, None], ] ) }, {"z": np.array([[None, None], [None, None], [None, None]])}, { "x": np.array([None, None, None, None]), "y": np.array([None, None, None, None]), }, {"x": np.array([None, None]), "z": np.array([None, None])}, {"y": np.array([None, None, None]), "z": np.array([None, None, None])}, {"x": np.array(None), "y": np.array(None), "z": np.array(None)}, id="int", ), pytest.param( np.array( [ [[2.0, 1.0, 2.0, 0.0], [-2.0, -4.0, 2.0, 0.0]], [[-4.0, np.nan, 2.0, np.nan], [-2.0, -4.0, 2.0, 0.0]], [[np.nan] * 4, [np.nan] * 4], ] ), {"x": np.array([[1, 0, 0, 0], [0, 0, 0, 0]])}, { "y": np.array( [[1, 1, 0, 0], [0, 1, 0, 1], [np.nan, np.nan, np.nan, np.nan]] ) }, {"z": np.array([[3, 1], [0, 1], [np.nan, np.nan]])}, {"x": np.array([1, 0, 0, 0]), "y": np.array([0, 1, 0, 0])}, {"x": np.array([1, 0]), "z": np.array([0, 1])}, {"y": np.array([1, 0, np.nan]), "z": np.array([1, 0, np.nan])}, {"x": np.array(0), "y": np.array(1), "z": np.array(1)}, {"x": np.array([[0, 0, 0, 0], [0, 0, 0, 0]])}, { "y": np.array( [[0, 0, 0, 0], [1, 1, 0, 1], [np.nan, np.nan, np.nan, np.nan]] ) }, {"z": np.array([[0, 2], [2, 2], [np.nan, np.nan]])}, {"x": np.array([0, 0, 0, 0]), "y": np.array([0, 0, 0, 0])}, {"x": np.array([0, 0]), "z": np.array([2, 2])}, {"y": np.array([0, 0, np.nan]), "z": np.array([0, 2, np.nan])}, {"x": np.array(0), "y": np.array(0), "z": np.array(0)}, {"x": np.array([[2, 1, 2, 1], [2, 2, 2, 2]])}, { "y": np.array( [[None, None, None, None], [None, 0, None, 0], [0, 0, 0, 0]] ) }, {"z": np.array([[None, None], [1, None], [0, 0]])}, {"x": np.array([2, 1, 2, 1]), "y": np.array([0, 0, 0, 0])}, {"x": np.array([1, 2]), "z": np.array([1, 0])}, {"y": np.array([None, 0, 0]), "z": np.array([None, 1, 0])}, {"x": np.array(1), "y": np.array(0), "z": np.array(1)}, id="nan", ), pytest.param( np.array( [ [[2.0, 1.0, 2.0, 0.0], [-2.0, -4.0, 2.0, 0.0]], [[-4.0, np.nan, 2.0, np.nan], [-2.0, -4.0, 2.0, 0.0]], [[np.nan] * 4, [np.nan] * 4], ] ).astype("object"), {"x": np.array([[1, 0, 0, 0], [0, 0, 0, 0]])}, { "y": np.array( [[1, 1, 0, 0], [0, 1, 0, 1], [np.nan, np.nan, np.nan, np.nan]] ) }, {"z": np.array([[3, 1], [0, 1], [np.nan, np.nan]])}, {"x": np.array([1, 0, 0, 0]), "y": np.array([0, 1, 0, 0])}, {"x": np.array([1, 0]), "z": np.array([0, 1])}, {"y": np.array([1, 0, np.nan]), "z": np.array([1, 0, np.nan])}, {"x": np.array(0), "y": np.array(1), "z": np.array(1)}, {"x": np.array([[0, 0, 0, 0], [0, 0, 0, 0]])}, { "y": np.array( [[0, 0, 0, 0], [1, 1, 0, 1], [np.nan, np.nan, np.nan, np.nan]] ) }, {"z": np.array([[0, 2], [2, 2], [np.nan, np.nan]])}, {"x": np.array([0, 0, 0, 0]), "y": np.array([0, 0, 0, 0])}, {"x": np.array([0, 0]), "z": np.array([2, 2])}, {"y": np.array([0, 0, np.nan]), "z": np.array([0, 2, np.nan])}, {"x": np.array(0), "y": np.array(0), "z": np.array(0)}, {"x": np.array([[2, 1, 2, 1], [2, 2, 2, 2]])}, { "y": np.array( [[None, None, None, None], [None, 0, None, 0], [0, 0, 0, 0]] ) }, {"z": np.array([[None, None], [1, None], [0, 0]])}, {"x": np.array([2, 1, 2, 1]), "y": np.array([0, 0, 0, 0])}, {"x": np.array([1, 2]), "z": np.array([1, 0])}, {"y": np.array([None, 0, 0]), "z": np.array([None, 1, 0])}, {"x": np.array(1), "y": np.array(0), "z": np.array(1)}, id="obj", ), pytest.param( np.array( [ [["2015-12-31", "2020-01-02"], ["2020-01-01", "2016-01-01"]], [["2020-01-02", "2020-01-02"], ["2020-01-02", "2020-01-02"]], [["1900-01-01", "1-02-03"], ["1900-01-02", "1-02-03"]], ], dtype="datetime64[ns]", ), {"x": np.array([[2, 2], [2, 2]])}, {"y": np.array([[0, 1], [0, 0], [0, 0]])}, {"z": np.array([[0, 1], [0, 0], [1, 1]])}, {"x": np.array([2, 2]), "y": np.array([0, 0])}, {"x": np.array([2, 2]), "z": np.array([1, 1])}, {"y": np.array([0, 0, 0]), "z": np.array([0, 0, 1])}, {"x": np.array(2), "y": np.array(0), "z": np.array(1)}, {"x": np.array([[1, 0], [1, 1]])}, {"y": np.array([[1, 0], [0, 0], [1, 0]])}, {"z": np.array([[1, 0], [0, 0], [0, 0]])}, {"x": np.array([1, 0]), "y": np.array([0, 0])}, {"x": np.array([0, 1]), "z": np.array([1, 0])}, {"y": np.array([0, 0, 1]), "z": np.array([1, 0, 0])}, {"x": np.array(0), "y": np.array(0), "z": np.array(1)}, {"x": np.array([[None, None], [None, None]])}, {"y": np.array([[None, None], [None, None], [None, None]])}, {"z": np.array([[None, None], [None, None], [None, None]])}, {"x": np.array([None, None]), "y": np.array([None, None])}, {"x": np.array([None, None]), "z": np.array([None, None])}, {"y": np.array([None, None, None]), "z": np.array([None, None, None])}, {"x": np.array(None), "y": np.array(None), "z": np.array(None)}, id="datetime", ), ], ) class TestReduce3D(TestReduce): def test_argmin_dim( self, x: np.ndarray, minindices_x: dict[str, np.ndarray], minindices_y: dict[str, np.ndarray], minindices_z: dict[str, np.ndarray], minindices_xy: dict[str, np.ndarray], minindices_xz: dict[str, np.ndarray], minindices_yz: dict[str, np.ndarray], minindices_xyz: dict[str, np.ndarray], maxindices_x: dict[str, np.ndarray], maxindices_y: dict[str, np.ndarray], maxindices_z: dict[str, np.ndarray], maxindices_xy: dict[str, np.ndarray], maxindices_xz: dict[str, np.ndarray], maxindices_yz: dict[str, np.ndarray], maxindices_xyz: dict[str, np.ndarray], nanindices_x: dict[str, np.ndarray], nanindices_y: dict[str, np.ndarray], nanindices_z: dict[str, np.ndarray], nanindices_xy: dict[str, np.ndarray], nanindices_xz: dict[str, np.ndarray], nanindices_yz: dict[str, np.ndarray], nanindices_xyz: dict[str, np.ndarray], ) -> None: ar = xr.DataArray( x, dims=["x", "y", "z"], coords={ "x": np.arange(x.shape[0]) * 4, "y": 1 - np.arange(x.shape[1]), "z": 2 + 3 * np.arange(x.shape[2]), }, attrs=self.attrs, ) for inds in [ minindices_x, minindices_y, minindices_z, minindices_xy, minindices_xz, minindices_yz, minindices_xyz, ]: if np.array([np.isnan(i) for i in inds.values()]).any(): with pytest.raises(ValueError): ar.argmin(dim=list(inds)) return result0 = ar.argmin(dim=["x"]) assert isinstance(result0, dict) expected0 = { key: xr.DataArray(value, dims=("y", "z")) for key, value in minindices_x.items() } for key in expected0: assert_identical(result0[key].drop_vars(["y", "z"]), expected0[key]) result1 = ar.argmin(dim=["y"]) assert isinstance(result1, dict) expected1 = { key: xr.DataArray(value, dims=("x", "z")) for key, value in minindices_y.items() } for key in expected1: assert_identical(result1[key].drop_vars(["x", "z"]), expected1[key]) result2 = ar.argmin(dim=["z"]) assert isinstance(result2, dict) expected2 = { key: xr.DataArray(value, dims=("x", "y")) for key, value in minindices_z.items() } for key in expected2: assert_identical(result2[key].drop_vars(["x", "y"]), expected2[key]) result3 = ar.argmin(dim=("x", "y")) assert isinstance(result3, dict) expected3 = { key: xr.DataArray(value, dims=("z")) for key, value in minindices_xy.items() } for key in expected3: assert_identical(result3[key].drop_vars("z"), expected3[key]) result4 = ar.argmin(dim=("x", "z")) assert isinstance(result4, dict) expected4 = { key: xr.DataArray(value, dims=("y")) for key, value in minindices_xz.items() } for key in expected4: assert_identical(result4[key].drop_vars("y"), expected4[key]) result5 = ar.argmin(dim=("y", "z")) assert isinstance(result5, dict) expected5 = { key: xr.DataArray(value, dims=("x")) for key, value in minindices_yz.items() } for key in expected5: assert_identical(result5[key].drop_vars("x"), expected5[key]) result6 = ar.argmin(...) assert isinstance(result6, dict) expected6 = {key: xr.DataArray(value) for key, value in minindices_xyz.items()} for key in expected6: assert_identical(result6[key], expected6[key]) minindices_x = { key: xr.where( nanindices_x[key] == None, # noqa: E711 minindices_x[key], nanindices_x[key], ) for key in minindices_x } expected7 = { key: xr.DataArray(value, dims=("y", "z")) for key, value in minindices_x.items() } result7 = ar.argmin(dim=["x"], skipna=False) assert isinstance(result7, dict) for key in expected7: assert_identical(result7[key].drop_vars(["y", "z"]), expected7[key]) minindices_y = { key: xr.where( nanindices_y[key] == None, # noqa: E711 minindices_y[key], nanindices_y[key], ) for key in minindices_y } expected8 = { key: xr.DataArray(value, dims=("x", "z")) for key, value in minindices_y.items() } result8 = ar.argmin(dim=["y"], skipna=False) assert isinstance(result8, dict) for key in expected8: assert_identical(result8[key].drop_vars(["x", "z"]), expected8[key]) minindices_z = { key: xr.where( nanindices_z[key] == None, # noqa: E711 minindices_z[key], nanindices_z[key], ) for key in minindices_z } expected9 = { key: xr.DataArray(value, dims=("x", "y")) for key, value in minindices_z.items() } result9 = ar.argmin(dim=["z"], skipna=False) assert isinstance(result9, dict) for key in expected9: assert_identical(result9[key].drop_vars(["x", "y"]), expected9[key]) minindices_xy = { key: xr.where( nanindices_xy[key] == None, # noqa: E711 minindices_xy[key], nanindices_xy[key], ) for key in minindices_xy } expected10 = { key: xr.DataArray(value, dims="z") for key, value in minindices_xy.items() } result10 = ar.argmin(dim=("x", "y"), skipna=False) assert isinstance(result10, dict) for key in expected10: assert_identical(result10[key].drop_vars("z"), expected10[key]) minindices_xz = { key: xr.where( nanindices_xz[key] == None, # noqa: E711 minindices_xz[key], nanindices_xz[key], ) for key in minindices_xz } expected11 = { key: xr.DataArray(value, dims="y") for key, value in minindices_xz.items() } result11 = ar.argmin(dim=("x", "z"), skipna=False) assert isinstance(result11, dict) for key in expected11: assert_identical(result11[key].drop_vars("y"), expected11[key]) minindices_yz = { key: xr.where( nanindices_yz[key] == None, # noqa: E711 minindices_yz[key], nanindices_yz[key], ) for key in minindices_yz } expected12 = { key: xr.DataArray(value, dims="x") for key, value in minindices_yz.items() } result12 = ar.argmin(dim=("y", "z"), skipna=False) assert isinstance(result12, dict) for key in expected12: assert_identical(result12[key].drop_vars("x"), expected12[key]) minindices_xyz = { key: xr.where( nanindices_xyz[key] == None, # noqa: E711 minindices_xyz[key], nanindices_xyz[key], ) for key in minindices_xyz } expected13 = {key: xr.DataArray(value) for key, value in minindices_xyz.items()} result13 = ar.argmin(..., skipna=False) assert isinstance(result13, dict) for key in expected13: assert_identical(result13[key], expected13[key]) def test_argmax_dim( self, x: np.ndarray, minindices_x: dict[str, np.ndarray], minindices_y: dict[str, np.ndarray], minindices_z: dict[str, np.ndarray], minindices_xy: dict[str, np.ndarray], minindices_xz: dict[str, np.ndarray], minindices_yz: dict[str, np.ndarray], minindices_xyz: dict[str, np.ndarray], maxindices_x: dict[str, np.ndarray], maxindices_y: dict[str, np.ndarray], maxindices_z: dict[str, np.ndarray], maxindices_xy: dict[str, np.ndarray], maxindices_xz: dict[str, np.ndarray], maxindices_yz: dict[str, np.ndarray], maxindices_xyz: dict[str, np.ndarray], nanindices_x: dict[str, np.ndarray], nanindices_y: dict[str, np.ndarray], nanindices_z: dict[str, np.ndarray], nanindices_xy: dict[str, np.ndarray], nanindices_xz: dict[str, np.ndarray], nanindices_yz: dict[str, np.ndarray], nanindices_xyz: dict[str, np.ndarray], ) -> None: ar = xr.DataArray( x, dims=["x", "y", "z"], coords={ "x": np.arange(x.shape[0]) * 4, "y": 1 - np.arange(x.shape[1]), "z": 2 + 3 * np.arange(x.shape[2]), }, attrs=self.attrs, ) for inds in [ maxindices_x, maxindices_y, maxindices_z, maxindices_xy, maxindices_xz, maxindices_yz, maxindices_xyz, ]: if np.array([np.isnan(i) for i in inds.values()]).any(): with pytest.raises(ValueError): ar.argmax(dim=list(inds)) return result0 = ar.argmax(dim=["x"]) assert isinstance(result0, dict) expected0 = { key: xr.DataArray(value, dims=("y", "z")) for key, value in maxindices_x.items() } for key in expected0: assert_identical(result0[key].drop_vars(["y", "z"]), expected0[key]) result1 = ar.argmax(dim=["y"]) assert isinstance(result1, dict) expected1 = { key: xr.DataArray(value, dims=("x", "z")) for key, value in maxindices_y.items() } for key in expected1: assert_identical(result1[key].drop_vars(["x", "z"]), expected1[key]) result2 = ar.argmax(dim=["z"]) assert isinstance(result2, dict) expected2 = { key: xr.DataArray(value, dims=("x", "y")) for key, value in maxindices_z.items() } for key in expected2: assert_identical(result2[key].drop_vars(["x", "y"]), expected2[key]) result3 = ar.argmax(dim=("x", "y")) assert isinstance(result3, dict) expected3 = { key: xr.DataArray(value, dims=("z")) for key, value in maxindices_xy.items() } for key in expected3: assert_identical(result3[key].drop_vars("z"), expected3[key]) result4 = ar.argmax(dim=("x", "z")) assert isinstance(result4, dict) expected4 = { key: xr.DataArray(value, dims=("y")) for key, value in maxindices_xz.items() } for key in expected4: assert_identical(result4[key].drop_vars("y"), expected4[key]) result5 = ar.argmax(dim=("y", "z")) assert isinstance(result5, dict) expected5 = { key: xr.DataArray(value, dims=("x")) for key, value in maxindices_yz.items() } for key in expected5: assert_identical(result5[key].drop_vars("x"), expected5[key]) result6 = ar.argmax(...) assert isinstance(result6, dict) expected6 = {key: xr.DataArray(value) for key, value in maxindices_xyz.items()} for key in expected6: assert_identical(result6[key], expected6[key]) maxindices_x = { key: xr.where( nanindices_x[key] == None, # noqa: E711 maxindices_x[key], nanindices_x[key], ) for key in maxindices_x } expected7 = { key: xr.DataArray(value, dims=("y", "z")) for key, value in maxindices_x.items() } result7 = ar.argmax(dim=["x"], skipna=False) assert isinstance(result7, dict) for key in expected7: assert_identical(result7[key].drop_vars(["y", "z"]), expected7[key]) maxindices_y = { key: xr.where( nanindices_y[key] == None, # noqa: E711 maxindices_y[key], nanindices_y[key], ) for key in maxindices_y } expected8 = { key: xr.DataArray(value, dims=("x", "z")) for key, value in maxindices_y.items() } result8 = ar.argmax(dim=["y"], skipna=False) assert isinstance(result8, dict) for key in expected8: assert_identical(result8[key].drop_vars(["x", "z"]), expected8[key]) maxindices_z = { key: xr.where( nanindices_z[key] == None, # noqa: E711 maxindices_z[key], nanindices_z[key], ) for key in maxindices_z } expected9 = { key: xr.DataArray(value, dims=("x", "y")) for key, value in maxindices_z.items() } result9 = ar.argmax(dim=["z"], skipna=False) assert isinstance(result9, dict) for key in expected9: assert_identical(result9[key].drop_vars(["x", "y"]), expected9[key]) maxindices_xy = { key: xr.where( nanindices_xy[key] == None, # noqa: E711 maxindices_xy[key], nanindices_xy[key], ) for key in maxindices_xy } expected10 = { key: xr.DataArray(value, dims="z") for key, value in maxindices_xy.items() } result10 = ar.argmax(dim=("x", "y"), skipna=False) assert isinstance(result10, dict) for key in expected10: assert_identical(result10[key].drop_vars("z"), expected10[key]) maxindices_xz = { key: xr.where( nanindices_xz[key] == None, # noqa: E711 maxindices_xz[key], nanindices_xz[key], ) for key in maxindices_xz } expected11 = { key: xr.DataArray(value, dims="y") for key, value in maxindices_xz.items() } result11 = ar.argmax(dim=("x", "z"), skipna=False) assert isinstance(result11, dict) for key in expected11: assert_identical(result11[key].drop_vars("y"), expected11[key]) maxindices_yz = { key: xr.where( nanindices_yz[key] == None, # noqa: E711 maxindices_yz[key], nanindices_yz[key], ) for key in maxindices_yz } expected12 = { key: xr.DataArray(value, dims="x") for key, value in maxindices_yz.items() } result12 = ar.argmax(dim=("y", "z"), skipna=False) assert isinstance(result12, dict) for key in expected12: assert_identical(result12[key].drop_vars("x"), expected12[key]) maxindices_xyz = { key: xr.where( nanindices_xyz[key] == None, # noqa: E711 maxindices_xyz[key], nanindices_xyz[key], ) for key in maxindices_xyz } expected13 = {key: xr.DataArray(value) for key, value in maxindices_xyz.items()} result13 = ar.argmax(..., skipna=False) assert isinstance(result13, dict) for key in expected13: assert_identical(result13[key], expected13[key]) class TestReduceND(TestReduce): @pytest.mark.parametrize("op", ["idxmin", "idxmax"]) @pytest.mark.parametrize("ndim", [3, 5]) def test_idxminmax_dask(self, op: str, ndim: int) -> None: if not has_dask: pytest.skip("requires dask") ar0_raw = xr.DataArray( np.random.random_sample(size=[10] * ndim), dims=list("abcdefghij"[: ndim - 1]) + ["x"], coords={"x": np.arange(10)}, attrs=self.attrs, ) ar0_dsk = ar0_raw.chunk({}) # Assert idx is the same with dask and without assert_equal(getattr(ar0_dsk, op)(dim="x"), getattr(ar0_raw, op)(dim="x")) @pytest.mark.parametrize("da", ("repeating_ints",), indirect=True) def test_isin(da) -> None: expected = DataArray( np.asarray([[0, 0, 0], [1, 0, 0]]), dims=list("yx"), coords={"x": list("abc"), "y": list("de")}, ).astype("bool") result = da.isin([3]).sel(y=list("de"), z=0) assert_equal(result, expected) expected = DataArray( np.asarray([[0, 0, 1], [1, 0, 0]]), dims=list("yx"), coords={"x": list("abc"), "y": list("de")}, ).astype("bool") result = da.isin([2, 3]).sel(y=list("de"), z=0) assert_equal(result, expected) def test_raise_no_warning_for_nan_in_binary_ops() -> None: with assert_no_warnings(): _ = xr.DataArray([1, 2, np.nan]) > 0 @pytest.mark.filterwarnings("error") def test_no_warning_for_all_nan() -> None: _ = xr.DataArray([np.nan, np.nan]).mean() def test_name_in_masking() -> None: name = "RingoStarr" da = xr.DataArray(range(10), coords=[("x", range(10))], name=name) assert da.where(da > 5).name == name assert da.where((da > 5).rename("YokoOno")).name == name assert da.where(da > 5, drop=True).name == name assert da.where((da > 5).rename("YokoOno"), drop=True).name == name class TestIrisConversion: @requires_iris def test_to_and_from_iris(self) -> None: import cf_units # iris requirement import iris # to iris coord_dict: dict[Hashable, Any] = {} coord_dict["distance"] = ("distance", [-2, 2], {"units": "meters"}) coord_dict["time"] = ("time", pd.date_range("2000-01-01", periods=3)) coord_dict["height"] = 10 coord_dict["distance2"] = ("distance", [0, 1], {"foo": "bar"}) coord_dict["time2"] = (("distance", "time"), [[0, 1, 2], [2, 3, 4]]) original = DataArray( np.arange(6, dtype="float").reshape(2, 3), coord_dict, name="Temperature", attrs={ "baz": 123, "units": "Kelvin", "standard_name": "fire_temperature", "long_name": "Fire Temperature", }, dims=("distance", "time"), ) # Set a bad value to test the masking logic original.data[0, 2] = np.nan original.attrs["cell_methods"] = "height: mean (comment: A cell method)" actual = original.to_iris() assert_array_equal(actual.data, original.data) assert actual.var_name == original.name assert tuple(d.var_name for d in actual.dim_coords) == original.dims assert actual.cell_methods == ( iris.coords.CellMethod( method="mean", coords=("height",), intervals=(), comments=("A cell method",), ), ) for coord, original_key in zip((actual.coords()), original.coords, strict=True): original_coord = original.coords[original_key] assert coord.var_name == original_coord.name assert_array_equal( coord.points, CFDatetimeCoder().encode(original_coord.variable).values ) assert actual.coord_dims(coord) == original.get_axis_num( original.coords[coord.var_name].dims ) assert ( actual.coord("distance2").attributes["foo"] == original.coords["distance2"].attrs["foo"] ) assert actual.coord("distance").units == cf_units.Unit( original.coords["distance"].units ) assert actual.attributes["baz"] == original.attrs["baz"] assert actual.standard_name == original.attrs["standard_name"] roundtripped = DataArray.from_iris(actual) assert_identical(original, roundtripped) actual.remove_coord("time") auto_time_dimension = DataArray.from_iris(actual) assert auto_time_dimension.dims == ("distance", "dim_1") @requires_iris @requires_dask def test_to_and_from_iris_dask(self) -> None: import cf_units # iris requirement import dask.array as da import iris coord_dict: dict[Hashable, Any] = {} coord_dict["distance"] = ("distance", [-2, 2], {"units": "meters"}) coord_dict["time"] = ("time", pd.date_range("2000-01-01", periods=3)) coord_dict["height"] = 10 coord_dict["distance2"] = ("distance", [0, 1], {"foo": "bar"}) coord_dict["time2"] = (("distance", "time"), [[0, 1, 2], [2, 3, 4]]) original = DataArray( da.from_array(np.arange(-1, 5, dtype="float").reshape(2, 3), 3), coord_dict, name="Temperature", attrs=dict( baz=123, units="Kelvin", standard_name="fire_temperature", long_name="Fire Temperature", ), dims=("distance", "time"), ) # Set a bad value to test the masking logic original.data = da.ma.masked_less(original.data, 0) original.attrs["cell_methods"] = "height: mean (comment: A cell method)" actual = original.to_iris() # Be careful not to trigger the loading of the iris data actual_data = ( actual.core_data() if hasattr(actual, "core_data") else actual.data ) assert_array_equal(actual_data, original.data) assert actual.var_name == original.name assert tuple(d.var_name for d in actual.dim_coords) == original.dims assert actual.cell_methods == ( iris.coords.CellMethod( method="mean", coords=("height",), intervals=(), comments=("A cell method",), ), ) for coord, original_key in zip((actual.coords()), original.coords, strict=True): original_coord = original.coords[original_key] assert coord.var_name == original_coord.name assert_array_equal( coord.points, CFDatetimeCoder().encode(original_coord.variable).values ) assert actual.coord_dims(coord) == original.get_axis_num( original.coords[coord.var_name].dims ) assert ( actual.coord("distance2").attributes["foo"] == original.coords["distance2"].attrs["foo"] ) assert actual.coord("distance").units == cf_units.Unit( original.coords["distance"].units ) assert actual.attributes["baz"] == original.attrs["baz"] assert actual.standard_name == original.attrs["standard_name"] roundtripped = DataArray.from_iris(actual) assert_identical(original, roundtripped) # If the Iris version supports it then we should have a dask array # at each stage of the conversion if hasattr(actual, "core_data"): assert isinstance(original.data, type(actual.core_data())) assert isinstance(original.data, type(roundtripped.data)) actual.remove_coord("time") auto_time_dimension = DataArray.from_iris(actual) assert auto_time_dimension.dims == ("distance", "dim_1") @requires_iris @pytest.mark.parametrize( "var_name, std_name, long_name, name, attrs", [ ( "var_name", "height", "Height", "var_name", {"standard_name": "height", "long_name": "Height"}, ), ( None, "height", "Height", "height", {"standard_name": "height", "long_name": "Height"}, ), (None, None, "Height", "Height", {"long_name": "Height"}), (None, None, None, None, {}), ], ) def test_da_name_from_cube( self, std_name, long_name, var_name, name, attrs ) -> None: from iris.cube import Cube cube = Cube([], var_name=var_name, standard_name=std_name, long_name=long_name) result = xr.DataArray.from_iris(cube) expected = xr.DataArray([], name=name, attrs=attrs) xr.testing.assert_identical(result, expected) @requires_iris @pytest.mark.parametrize( "var_name, std_name, long_name, name, attrs", [ ( "var_name", "height", "Height", "var_name", {"standard_name": "height", "long_name": "Height"}, ), ( None, "height", "Height", "height", {"standard_name": "height", "long_name": "Height"}, ), (None, None, "Height", "Height", {"long_name": "Height"}), (None, None, None, "unknown", {}), ], ) def test_da_coord_name_from_cube( self, std_name, long_name, var_name, name, attrs ) -> None: from iris.coords import DimCoord from iris.cube import Cube latitude = DimCoord( [-90, 0, 90], standard_name=std_name, var_name=var_name, long_name=long_name ) data = [0, 0, 0] cube = Cube(data, dim_coords_and_dims=[(latitude, 0)]) result = xr.DataArray.from_iris(cube) expected = xr.DataArray(data, coords=[(name, [-90, 0, 90], attrs)]) xr.testing.assert_identical(result, expected) @requires_iris def test_prevent_duplicate_coord_names(self) -> None: from iris.coords import DimCoord from iris.cube import Cube # Iris enforces unique coordinate names. Because we use a different # name resolution order a valid iris Cube with coords that have the # same var_name would lead to duplicate dimension names in the # DataArray longitude = DimCoord([0, 360], standard_name="longitude", var_name="duplicate") latitude = DimCoord( [-90, 0, 90], standard_name="latitude", var_name="duplicate" ) data = [[0, 0, 0], [0, 0, 0]] cube = Cube(data, dim_coords_and_dims=[(longitude, 0), (latitude, 1)]) with pytest.raises(ValueError): xr.DataArray.from_iris(cube) @requires_iris @pytest.mark.parametrize( "coord_values", [["IA", "IL", "IN"], [0, 2, 1]], # non-numeric values # non-monotonic values ) def test_fallback_to_iris_AuxCoord(self, coord_values) -> None: from iris.coords import AuxCoord from iris.cube import Cube data = [0, 0, 0] da = xr.DataArray(data, coords=[coord_values], dims=["space"]) result = xr.DataArray.to_iris(da) expected = Cube( data, aux_coords_and_dims=[(AuxCoord(coord_values, var_name="space"), 0)] ) assert result == expected def test_no_dict() -> None: d = DataArray() with pytest.raises(AttributeError): _ = d.__dict__ def test_subclass_slots() -> None: """Test that DataArray subclasses must explicitly define ``__slots__``. .. note:: As of 0.13.0, this is actually mitigated into a FutureWarning for any class defined outside of the xarray package. """ with pytest.raises(AttributeError) as e: class MyArray(DataArray): pass assert str(e.value) == "MyArray must explicitly define __slots__" def test_weakref() -> None: """Classes with __slots__ are incompatible with the weakref module unless they explicitly state __weakref__ among their slots """ from weakref import ref a = DataArray(1) r = ref(a) assert r() is a def test_delete_coords() -> None: """Make sure that deleting a coordinate doesn't corrupt the DataArray. See issue #3899. Also test that deleting succeeds and produces the expected output. """ a0 = DataArray( np.array([[1, 2, 3], [4, 5, 6]]), dims=["y", "x"], coords={"x": ["a", "b", "c"], "y": [-1, 1]}, ) assert_identical(a0, a0) a1 = a0.copy() del a1.coords["y"] # This test will detect certain sorts of corruption in the DataArray assert_identical(a0, a0) assert a0.dims == ("y", "x") assert a1.dims == ("y", "x") assert set(a0.coords.keys()) == {"x", "y"} assert set(a1.coords.keys()) == {"x"} def test_deepcopy_nested_attrs() -> None: """Check attrs deep copy, see :issue:`2835`""" da1 = xr.DataArray([[1, 2], [3, 4]], dims=("x", "y"), coords={"x": [10, 20]}) da1.attrs["flat"] = "0" da1.attrs["nested"] = {"level1a": "1", "level1b": "1"} da2 = da1.copy(deep=True) da2.attrs["new"] = "2" da2.attrs.update({"new2": "2"}) da2.attrs["flat"] = "2" da2.attrs["nested"]["level1a"] = "2" da2.attrs["nested"].update({"level1b": "2"}) # Coarse test assert not da1.identical(da2) # Check attrs levels assert da1.attrs["flat"] != da2.attrs["flat"] assert da1.attrs["nested"] != da2.attrs["nested"] assert "new" not in da1.attrs assert "new2" not in da1.attrs def test_deepcopy_obj_array() -> None: x0 = DataArray(np.array([object()])) x1 = deepcopy(x0) assert x0.values[0] is not x1.values[0] def test_deepcopy_recursive() -> None: # GH:issue:7111 # direct recursion da = xr.DataArray([1, 2], dims=["x"]) da.attrs["other"] = da # TODO: cannot use assert_identical on recursive Vars yet... # lets just ensure that deep copy works without RecursionError da.copy(deep=True) # indirect recursion da2 = xr.DataArray([5, 6], dims=["y"]) da.attrs["other"] = da2 da2.attrs["other"] = da # TODO: cannot use assert_identical on recursive Vars yet... # lets just ensure that deep copy works without RecursionError da.copy(deep=True) da2.copy(deep=True) def test_clip(da: DataArray) -> None: with raise_if_dask_computes(): result = da.clip(min=0.5) assert result.min() >= 0.5 result = da.clip(max=0.5) assert result.max() <= 0.5 result = da.clip(min=0.25, max=0.75) assert result.min() >= 0.25 assert result.max() <= 0.75 with raise_if_dask_computes(): result = da.clip(min=da.mean("x"), max=da.mean("a")) assert result.dims == da.dims assert_array_equal( result.data, np.clip(da.data, da.mean("x").data[:, :, np.newaxis], da.mean("a").data), ) with_nans = da.isel(time=[0, 1]).reindex_like(da) with raise_if_dask_computes(): result = da.clip(min=da.mean("x"), max=da.mean("a")) result = da.clip(with_nans) # The values should be the same where there were NaNs. assert_array_equal(result.isel(time=[0, 1]), with_nans.isel(time=[0, 1])) # Unclear whether we want this work, OK to adjust the test when we have decided. with pytest.raises(ValueError, match="cannot reindex or align along dimension.*"): result = da.clip(min=da.mean("x"), max=da.mean("a").isel(x=[0, 1])) class TestDropDuplicates: @pytest.mark.parametrize("keep", ["first", "last", False]) def test_drop_duplicates_1d(self, keep) -> None: da = xr.DataArray( [0, 5, 6, 7], dims="time", coords={"time": [0, 0, 1, 2]}, name="test" ) if keep == "first": data = [0, 6, 7] time = [0, 1, 2] elif keep == "last": data = [5, 6, 7] time = [0, 1, 2] else: data = [6, 7] time = [1, 2] expected = xr.DataArray(data, dims="time", coords={"time": time}, name="test") result = da.drop_duplicates("time", keep=keep) assert_equal(expected, result) with pytest.raises( ValueError, match=re.escape( "Dimensions ('space',) not found in data dimensions ('time',)" ), ): da.drop_duplicates("space", keep=keep) def test_drop_duplicates_2d(self) -> None: da = xr.DataArray( [[0, 5, 6, 7], [2, 1, 3, 4]], dims=["space", "time"], coords={"space": [10, 10], "time": [0, 0, 1, 2]}, name="test", ) expected = xr.DataArray( [[0, 6, 7]], dims=["space", "time"], coords={"time": ("time", [0, 1, 2]), "space": ("space", [10])}, name="test", ) result = da.drop_duplicates(["time", "space"], keep="first") assert_equal(expected, result) result = da.drop_duplicates(..., keep="first") assert_equal(expected, result) class TestNumpyCoercion: # TODO once flexible indexes refactor complete also test coercion of dimension coords def test_from_numpy(self) -> None: da = xr.DataArray([1, 2, 3], dims="x", coords={"lat": ("x", [4, 5, 6])}) assert_identical(da.as_numpy(), da) np.testing.assert_equal(da.to_numpy(), np.array([1, 2, 3])) np.testing.assert_equal(da["lat"].to_numpy(), np.array([4, 5, 6])) def test_to_numpy(self) -> None: arr = np.array([1, 2, 3]) da = xr.DataArray(arr, dims="x", coords={"lat": ("x", [4, 5, 6])}) with assert_no_warnings(): np.testing.assert_equal(np.asarray(da), arr) np.testing.assert_equal(np.array(da), arr) @requires_dask def test_from_dask(self) -> None: da = xr.DataArray([1, 2, 3], dims="x", coords={"lat": ("x", [4, 5, 6])}) da_chunked = da.chunk(1) assert_identical(da_chunked.as_numpy(), da.compute()) np.testing.assert_equal(da.to_numpy(), np.array([1, 2, 3])) np.testing.assert_equal(da["lat"].to_numpy(), np.array([4, 5, 6])) @requires_pint def test_from_pint(self) -> None: from pint import Quantity arr = np.array([1, 2, 3]) da = xr.DataArray( Quantity(arr, units="Pa"), dims="x", coords={"lat": ("x", Quantity(arr + 3, units="m"))}, ) expected = xr.DataArray(arr, dims="x", coords={"lat": ("x", arr + 3)}) assert_identical(da.as_numpy(), expected) np.testing.assert_equal(da.to_numpy(), arr) np.testing.assert_equal(da["lat"].to_numpy(), arr + 3) @requires_sparse def test_from_sparse(self) -> None: import sparse arr = np.diagflat([1, 2, 3]) sparr = sparse.COO.from_numpy(arr) da = xr.DataArray( sparr, dims=["x", "y"], coords={"elev": (("x", "y"), sparr + 3)} ) expected = xr.DataArray( arr, dims=["x", "y"], coords={"elev": (("x", "y"), arr + 3)} ) assert_identical(da.as_numpy(), expected) np.testing.assert_equal(da.to_numpy(), arr) @requires_cupy def test_from_cupy(self) -> None: import cupy as cp arr = np.array([1, 2, 3]) da = xr.DataArray( cp.array(arr), dims="x", coords={"lat": ("x", cp.array(arr + 3))} ) expected = xr.DataArray(arr, dims="x", coords={"lat": ("x", arr + 3)}) assert_identical(da.as_numpy(), expected) np.testing.assert_equal(da.to_numpy(), arr) @requires_dask @requires_pint def test_from_pint_wrapping_dask(self) -> None: import dask from pint import Quantity arr = np.array([1, 2, 3]) d = dask.array.from_array(arr) da = xr.DataArray( Quantity(d, units="Pa"), dims="x", coords={"lat": ("x", Quantity(d, units="m") * 2)}, ) result = da.as_numpy() result.name = None # remove dask-assigned name expected = xr.DataArray(arr, dims="x", coords={"lat": ("x", arr * 2)}) assert_identical(result, expected) np.testing.assert_equal(da.to_numpy(), arr) class TestStackEllipsis: # https://github.com/pydata/xarray/issues/6051 def test_result_as_expected(self) -> None: da = DataArray([[1, 2], [1, 2]], dims=("x", "y")) result = da.stack(flat=[...]) expected = da.stack(flat=da.dims) assert_identical(result, expected) def test_error_on_ellipsis_without_list(self) -> None: da = DataArray([[1, 2], [1, 2]], dims=("x", "y")) with pytest.raises(ValueError): da.stack(flat=...) # type: ignore[arg-type] def test_nD_coord_dataarray() -> None: # should succeed da = DataArray( np.ones((2, 4)), dims=("x", "y"), coords={ "x": (("x", "y"), np.arange(8).reshape((2, 4))), "y": ("y", np.arange(4)), }, ) _assert_internal_invariants(da, check_default_indexes=True) da2 = DataArray(np.ones(4), dims=("y"), coords={"y": ("y", np.arange(4))}) da3 = DataArray(np.ones(4), dims=("z")) _, actual = xr.align(da, da2) assert_identical(da2, actual) expected = da.drop_vars("x") _, actual = xr.broadcast(da, da2) assert_identical(expected, actual) actual, _ = xr.broadcast(da, da3) expected = da.expand_dims(z=4, axis=-1) assert_identical(actual, expected) da4 = DataArray(np.ones((2, 4)), coords={"x": 0}, dims=["x", "y"]) _assert_internal_invariants(da4, check_default_indexes=True) assert "x" not in da4.xindexes assert "x" in da4.coords def test_lazy_data_variable_not_loaded(): # GH8753 array = InaccessibleArray(np.array([1, 2, 3])) v = Variable(data=array, dims="x") # No data needs to be accessed, so no error should be raised da = xr.DataArray(v) # No data needs to be accessed, so no error should be raised xr.DataArray(da) def test_unstack_index_var() -> None: source = xr.DataArray(range(2), dims=["x"], coords=[["a", "b"]]) da = source.x da = da.assign_coords(y=("x", ["c", "d"]), z=("x", ["e", "f"])) da = da.set_index(x=["y", "z"]) actual = da.unstack("x") expected = xr.DataArray( np.array([["a", np.nan], [np.nan, "b"]], dtype=object), coords={"y": ["c", "d"], "z": ["e", "f"]}, name="x", ) assert_identical(actual, expected) xarray-2025.09.0/xarray/tests/test_dataarray_typing.yml000066400000000000000000000200121505620616400231330ustar00rootroot00000000000000- case: test_mypy_pipe_lambda_noarg_return_type main: | from xarray import DataArray da = DataArray().pipe(lambda data: data) reveal_type(da) # N: Revealed type is "xarray.core.dataarray.DataArray" - case: test_mypy_pipe_lambda_posarg_return_type main: | from xarray import DataArray da = DataArray().pipe(lambda data, arg: arg, "foo") reveal_type(da) # N: Revealed type is "builtins.str" - case: test_mypy_pipe_lambda_chaining_return_type main: | from xarray import DataArray answer = DataArray().pipe(lambda data, arg: arg, "foo").count("o") reveal_type(answer) # N: Revealed type is "builtins.int" - case: test_mypy_pipe_lambda_missing_arg main: | from xarray import DataArray # Call to pipe missing argument for lambda parameter `arg` da = DataArray().pipe(lambda data, arg: data) out: | main:4: error: No overload variant of "pipe" of "DataWithCoords" matches argument type "Callable[[Any, Any], Any]" [call-overload] main:4: note: Possible overload variants: main:4: note: def [P`2, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:4: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_lambda_extra_arg main: | from xarray import DataArray # Call to pipe with extra argument for lambda da = DataArray().pipe(lambda data: data, "oops!") out: | main:4: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[Any], Any]", "str" [call-overload] main:4: note: Possible overload variants: main:4: note: def [P`2, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:4: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_posarg main: | from xarray import DataArray def f(da: DataArray, arg: int) -> DataArray: return da # Call to pipe missing argument for function parameter `arg` da = DataArray().pipe(f) out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument type "Callable[[DataArray, int], DataArray]" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_extra_posarg main: | from xarray import DataArray def f(da: DataArray, arg: int) -> DataArray: return da # Call to pipe missing keyword for kwonly parameter `kwonly` da = DataArray().pipe(f, 42, "oops!") out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[DataArray, int], DataArray]", "int", "str" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_kwarg main: | from xarray import DataArray def f(da: DataArray, arg: int, *, kwonly: int) -> DataArray: return da # Call to pipe missing argument for kwonly parameter `kwonly` da = DataArray().pipe(f, 42) out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[DataArray, int, NamedArg(int, 'kwonly')], DataArray]", "int" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_keyword main: | from xarray import DataArray def f(da: DataArray, arg: int, *, kwonly: int) -> DataArray: return da # Call to pipe missing keyword for kwonly parameter `kwonly` da = DataArray().pipe(f, 42, 99) out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[DataArray, int, NamedArg(int, 'kwonly')], DataArray]", "int", "int" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_unexpected_keyword main: | from xarray import DataArray def f(da: DataArray, arg: int, *, kwonly: int) -> DataArray: return da # Call to pipe using wrong keyword: `kw` instead of `kwonly` da = DataArray().pipe(f, 42, kw=99) out: | main:7: error: Unexpected keyword argument "kw" for "pipe" of "DataWithCoords" [call-arg] - case: test_mypy_pipe_tuple_return_type_dataarray main: | from xarray import DataArray def f(arg: int, da: DataArray) -> DataArray: return da da = DataArray().pipe((f, "da"), 42) reveal_type(da) # N: Revealed type is "xarray.core.dataarray.DataArray" - case: test_mypy_pipe_tuple_return_type_other main: | from xarray import DataArray def f(arg: int, da: DataArray) -> int: return arg answer = DataArray().pipe((f, "da"), 42) reveal_type(answer) # N: Revealed type is "builtins.int" - case: test_mypy_pipe_tuple_missing_arg main: | from xarray import DataArray def f(arg: int, da: DataArray) -> DataArray: return da # Since we cannot provide a precise type annotation when passing a tuple to # pipe, there's not enough information for type analysis to indicate that # we are missing an argument for parameter `arg`, so we get no error here. da = DataArray().pipe((f, "da")) reveal_type(da) # N: Revealed type is "xarray.core.dataarray.DataArray" # Rather than passing a tuple, passing a lambda that calls `f` with args in # the correct order allows for proper type analysis, indicating (perhaps # somewhat cryptically) that we failed to pass an argument for `arg`. da = DataArray().pipe(lambda data, arg: f(arg, data)) out: | main:17: error: No overload variant of "pipe" of "DataWithCoords" matches argument type "Callable[[Any, Any], DataArray]" [call-overload] main:17: note: Possible overload variants: main:17: note: def [P`9, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:17: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_tuple_extra_arg main: | from xarray import DataArray def f(arg: int, da: DataArray) -> DataArray: return da # Since we cannot provide a precise type annotation when passing a tuple to # pipe, there's not enough information for type analysis to indicate that # we are providing too many args for `f`, so we get no error here. da = DataArray().pipe((f, "da"), 42, "foo") reveal_type(da) # N: Revealed type is "xarray.core.dataarray.DataArray" # Rather than passing a tuple, passing a lambda that calls `f` with args in # the correct order allows for proper type analysis, indicating (perhaps # somewhat cryptically) that we passed too many arguments. da = DataArray().pipe(lambda data, arg: f(arg, data), 42, "foo") out: | main:17: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[Any, Any], DataArray]", "int", "str" [call-overload] main:17: note: Possible overload variants: main:17: note: def [P`9, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:17: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T xarray-2025.09.0/xarray/tests/test_dataset.py000066400000000000000000011056661505620616400210710ustar00rootroot00000000000000from __future__ import annotations import pickle import re import sys import warnings from collections.abc import Hashable from copy import copy, deepcopy from io import StringIO from textwrap import dedent from typing import Any, Literal, cast import numpy as np import pandas as pd import pytest from packaging.version import Version from pandas.core.indexes.datetimes import DatetimeIndex # remove once numpy 2.0 is the oldest supported version try: from numpy.exceptions import RankWarning except ImportError: from numpy import RankWarning # type: ignore[no-redef,attr-defined,unused-ignore] import contextlib from pandas.errors import UndefinedVariableError import xarray as xr from xarray import ( AlignmentError, DataArray, Dataset, IndexVariable, MergeError, Variable, align, backends, broadcast, open_dataset, set_options, ) from xarray.coding.cftimeindex import CFTimeIndex from xarray.core import dtypes, indexing, utils from xarray.core.common import duck_array_ops, full_like from xarray.core.coordinates import Coordinates, DatasetCoordinates from xarray.core.indexes import Index, PandasIndex from xarray.core.types import ArrayLike from xarray.core.utils import is_scalar from xarray.groupers import SeasonResampler, TimeResampler from xarray.namedarray.pycompat import array_type, integer_types from xarray.testing import _assert_internal_invariants from xarray.tests import ( DuckArrayWrapper, InaccessibleArray, UnexpectedDataAccess, assert_allclose, assert_array_equal, assert_equal, assert_identical, assert_no_warnings, assert_writeable, create_test_data, has_cftime, has_dask, has_pyarrow, raise_if_dask_computes, requires_bottleneck, requires_cftime, requires_cupy, requires_dask, requires_numexpr, requires_pint, requires_scipy, requires_sparse, source_ndarray, ) from xarray.tests.indexes import ScalarIndex, XYIndex with contextlib.suppress(ImportError): import dask.array as da # from numpy version 2.0 trapz is deprecated and renamed to trapezoid # remove once numpy 2.0 is the oldest supported version try: from numpy import trapezoid # type: ignore[attr-defined,unused-ignore] except ImportError: from numpy import ( # type: ignore[arg-type,no-redef,attr-defined,unused-ignore] trapz as trapezoid, ) sparse_array_type = array_type("sparse") pytestmark = [ pytest.mark.filterwarnings("error:Mean of empty slice"), pytest.mark.filterwarnings("error:All-NaN (slice|axis) encountered"), ] def create_append_test_data(seed=None) -> tuple[Dataset, Dataset, Dataset]: rs = np.random.default_rng(seed) lat = [2, 1, 0] lon = [0, 1, 2] nt1 = 3 nt2 = 2 time1 = pd.date_range("2000-01-01", periods=nt1).as_unit("ns") time2 = pd.date_range("2000-02-01", periods=nt2).as_unit("ns") string_var = np.array(["a", "bc", "def"], dtype=object) string_var_to_append = np.array(["asdf", "asdfg"], dtype=object) string_var_fixed_length = np.array(["aa", "bb", "cc"], dtype="|S2") string_var_fixed_length_to_append = np.array(["dd", "ee"], dtype="|S2") unicode_var = np.array(["Ñó", "Ñó", "Ñó"]) datetime_var = np.array( ["2019-01-01", "2019-01-02", "2019-01-03"], dtype="datetime64[ns]" ) datetime_var_to_append = np.array( ["2019-01-04", "2019-01-05"], dtype="datetime64[ns]" ) bool_var = np.array([True, False, True], dtype=bool) bool_var_to_append = np.array([False, True], dtype=bool) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Converting non-default") ds = xr.Dataset( data_vars={ "da": xr.DataArray( rs.random((3, 3, nt1)), coords=[lat, lon, time1], dims=["lat", "lon", "time"], ), "string_var": ("time", string_var), "string_var_fixed_length": ("time", string_var_fixed_length), "unicode_var": ("time", unicode_var), "datetime_var": ("time", datetime_var), "bool_var": ("time", bool_var), } ) ds_to_append = xr.Dataset( data_vars={ "da": xr.DataArray( rs.random((3, 3, nt2)), coords=[lat, lon, time2], dims=["lat", "lon", "time"], ), "string_var": ("time", string_var_to_append), "string_var_fixed_length": ("time", string_var_fixed_length_to_append), "unicode_var": ("time", unicode_var[:nt2]), "datetime_var": ("time", datetime_var_to_append), "bool_var": ("time", bool_var_to_append), } ) ds_with_new_var = xr.Dataset( data_vars={ "new_var": xr.DataArray( rs.random((3, 3, nt1 + nt2)), coords=[lat, lon, time1.append(time2)], dims=["lat", "lon", "time"], ) } ) assert_writeable(ds) assert_writeable(ds_to_append) assert_writeable(ds_with_new_var) return ds, ds_to_append, ds_with_new_var def create_append_string_length_mismatch_test_data(dtype) -> tuple[Dataset, Dataset]: def make_datasets(data, data_to_append) -> tuple[Dataset, Dataset]: ds = xr.Dataset( {"temperature": (["time"], data)}, coords={"time": [0, 1, 2]}, ) ds_to_append = xr.Dataset( {"temperature": (["time"], data_to_append)}, coords={"time": [0, 1, 2]} ) assert_writeable(ds) assert_writeable(ds_to_append) return ds, ds_to_append u2_strings = ["ab", "cd", "ef"] u5_strings = ["abc", "def", "ghijk"] s2_strings = np.array(["aa", "bb", "cc"], dtype="|S2") s3_strings = np.array(["aaa", "bbb", "ccc"], dtype="|S3") if dtype == "U": return make_datasets(u2_strings, u5_strings) elif dtype == "S": return make_datasets(s2_strings, s3_strings) else: raise ValueError(f"unsupported dtype {dtype}.") def create_test_multiindex() -> Dataset: mindex = pd.MultiIndex.from_product( [["a", "b"], [1, 2]], names=("level_1", "level_2") ) return Dataset({}, Coordinates.from_pandas_multiindex(mindex, "x")) def create_test_stacked_array() -> tuple[DataArray, DataArray]: x = DataArray(pd.Index(np.r_[:10], name="x")) y = DataArray(pd.Index(np.r_[:20], name="y")) a = x * y b = x * y * y return a, b class InaccessibleVariableDataStore(backends.InMemoryDataStore): """ Store that does not allow any data access. """ def __init__(self): super().__init__() self._indexvars = set() def store(self, variables, *args, **kwargs) -> None: super().store(variables, *args, **kwargs) for k, v in variables.items(): if isinstance(v, IndexVariable): self._indexvars.add(k) def get_variables(self): def lazy_inaccessible(k, v): if k in self._indexvars: return v data = indexing.LazilyIndexedArray(InaccessibleArray(v.values)) return Variable(v.dims, data, v.attrs) return {k: lazy_inaccessible(k, v) for k, v in self._variables.items()} class DuckBackendArrayWrapper(backends.common.BackendArray): """Mimic a BackendArray wrapper around DuckArrayWrapper""" def __init__(self, array): self.array = DuckArrayWrapper(array) self.shape = array.shape self.dtype = array.dtype def get_array(self): return self.array def __getitem__(self, key): return self.array[key.tuple] class AccessibleAsDuckArrayDataStore(backends.InMemoryDataStore): """ Store that returns a duck array, not convertible to numpy array, on read. Modeled after nVIDIA's kvikio. """ def __init__(self): super().__init__() self._indexvars = set() def store(self, variables, *args, **kwargs) -> None: super().store(variables, *args, **kwargs) for k, v in variables.items(): if isinstance(v, IndexVariable): self._indexvars.add(k) def get_variables(self) -> dict[Any, xr.Variable]: def lazy_accessible(k, v) -> xr.Variable: if k in self._indexvars: return v data = indexing.LazilyIndexedArray(DuckBackendArrayWrapper(v.values)) return Variable(v.dims, data, v.attrs) return {k: lazy_accessible(k, v) for k, v in self._variables.items()} class TestDataset: def test_repr(self) -> None: data = create_test_data(seed=123, use_extension_array=True) data.attrs["foo"] = "bar" # need to insert str dtype at runtime to handle different endianness var5 = ( "\n var5 (dim1) int64[pyarrow] 64B 5 9 7 2 6 2 8 1" if has_pyarrow else "" ) expected = dedent( f"""\ Size: 2kB Dimensions: (dim2: 9, dim3: 10, time: 20, dim1: 8) Coordinates: * dim2 (dim2) float64 72B 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0 * dim3 (dim3) {data["dim3"].dtype} 40B 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' * time (time) datetime64[ns] 160B 2000-01-01 2000-01-02 ... 2000-01-20 numbers (dim3) int64 80B 0 1 2 0 0 1 1 2 2 3 Dimensions without coordinates: dim1 Data variables: var1 (dim1, dim2) float64 576B -0.9891 -0.3678 1.288 ... -0.2116 0.364 var2 (dim1, dim2) float64 576B 0.953 1.52 1.704 ... 0.1347 -0.6423 var3 (dim3, dim1) float64 640B 0.4107 0.9941 0.1665 ... 0.716 1.555 var4 (dim1) category 3{6 if Version(pd.__version__) >= Version("3.0.0dev0") else 2}B b c b a c a c a{var5} Attributes: foo: bar""" ) actual = "\n".join(x.rstrip() for x in repr(data).split("\n")) assert expected == actual with set_options(display_width=100): max_len = max(map(len, repr(data).split("\n"))) assert 90 < max_len < 100 expected = dedent( """\ Size: 0B Dimensions: () Data variables: *empty*""" ) actual = "\n".join(x.rstrip() for x in repr(Dataset()).split("\n")) print(actual) assert expected == actual # verify that ... doesn't appear for scalar coordinates data = Dataset({"foo": ("x", np.ones(10))}).mean() expected = dedent( """\ Size: 8B Dimensions: () Data variables: foo float64 8B 1.0""" ) actual = "\n".join(x.rstrip() for x in repr(data).split("\n")) print(actual) assert expected == actual # verify long attributes are truncated data = Dataset(attrs={"foo": "bar" * 1000}) assert len(repr(data)) < 1000 def test_repr_multiindex(self) -> None: data = create_test_multiindex() obj_size = np.dtype("O").itemsize expected = dedent( f"""\ Size: {8 * obj_size + 32}B Dimensions: (x: 4) Coordinates: * x (x) object {4 * obj_size}B MultiIndex * level_1 (x) object {4 * obj_size}B 'a' 'a' 'b' 'b' * level_2 (x) int64 32B 1 2 1 2 Data variables: *empty*""" ) actual = "\n".join(x.rstrip() for x in repr(data).split("\n")) print(actual) assert expected == actual # verify that long level names are not truncated midx = pd.MultiIndex.from_product( [["a", "b"], [1, 2]], names=("a_quite_long_level_name", "level_2") ) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") data = Dataset({}, midx_coords) expected = dedent( f"""\ Size: {8 * obj_size + 32}B Dimensions: (x: 4) Coordinates: * x (x) object {4 * obj_size}B MultiIndex * a_quite_long_level_name (x) object {4 * obj_size}B 'a' 'a' 'b' 'b' * level_2 (x) int64 32B 1 2 1 2 Data variables: *empty*""" ) actual = "\n".join(x.rstrip() for x in repr(data).split("\n")) print(actual) assert expected == actual def test_repr_period_index(self) -> None: data = create_test_data(seed=456) data.coords["time"] = pd.period_range("2000-01-01", periods=20, freq="D") # check that creating the repr doesn't raise an error #GH645 repr(data) def test_unicode_data(self) -> None: # regression test for GH834 data = Dataset({"foΓΈ": ["baΒ"]}, attrs={"Γ₯": "βˆ‘"}) repr(data) # should not raise byteorder = "<" if sys.byteorder == "little" else ">" expected = dedent( f"""\ Size: 12B Dimensions: (foΓΈ: 1) Coordinates: * foΓΈ (foΓΈ) {byteorder}U3 12B {"baΒ"!r} Data variables: *empty* Attributes: Γ₯: βˆ‘""" ) actual = str(data) assert expected == actual def test_repr_nep18(self) -> None: class Array: def __init__(self): self.shape = (2,) self.ndim = 1 self.dtype = np.dtype(np.float64) def __array_function__(self, *args, **kwargs): return NotImplemented def __array_ufunc__(self, *args, **kwargs): return NotImplemented def __repr__(self): return "Custom\nArray" dataset = Dataset({"foo": ("x", Array())}) expected = dedent( """\ Size: 16B Dimensions: (x: 2) Dimensions without coordinates: x Data variables: foo (x) float64 16B Custom Array""" ) assert expected == repr(dataset) def test_info(self) -> None: ds = create_test_data(seed=123) ds = ds.drop_vars("dim3") # string type prints differently in PY2 vs PY3 ds.attrs["unicode_attr"] = "baΒ" ds.attrs["string_attr"] = "bar" buf = StringIO() ds.info(buf=buf) expected = dedent( """\ xarray.Dataset { dimensions: \tdim2 = 9 ; \ttime = 20 ; \tdim1 = 8 ; \tdim3 = 10 ; variables: \tfloat64 dim2(dim2) ; \tdatetime64[ns] time(time) ; \tfloat64 var1(dim1, dim2) ; \t\tvar1:foo = variable ; \tfloat64 var2(dim1, dim2) ; \t\tvar2:foo = variable ; \tfloat64 var3(dim3, dim1) ; \t\tvar3:foo = variable ; \tint64 numbers(dim3) ; // global attributes: \t:unicode_attr = baΒ ; \t:string_attr = bar ; }""" ) actual = buf.getvalue() assert expected == actual buf.close() def test_constructor(self) -> None: x1 = ("x", 2 * np.arange(100)) x2 = ("x", np.arange(1000)) z = (["x", "y"], np.arange(1000).reshape(100, 10)) with pytest.raises(ValueError, match=r"conflicting sizes"): Dataset({"a": x1, "b": x2}) with pytest.raises(TypeError, match=r"tuple of form"): Dataset({"x": (1, 2, 3, 4, 5, 6, 7)}) with pytest.raises(ValueError, match=r"already exists as a scalar"): Dataset({"x": 0, "y": ("x", [1, 2, 3])}) # nD coordinate variable "x" sharing name with dimension actual = Dataset({"a": x1, "x": z}) assert "x" not in actual.xindexes _assert_internal_invariants(actual, check_default_indexes=True) # verify handling of DataArrays expected = Dataset({"x": x1, "z": z}) actual = Dataset({"z": expected["z"]}) assert_identical(expected, actual) def test_constructor_1d(self) -> None: expected = Dataset({"x": (["x"], 5.0 + np.arange(5))}) actual = Dataset({"x": 5.0 + np.arange(5)}) assert_identical(expected, actual) actual = Dataset({"x": [5, 6, 7, 8, 9]}) assert_identical(expected, actual) def test_constructor_0d(self) -> None: expected = Dataset({"x": ([], 1)}) for arg in [1, np.array(1), expected["x"]]: actual = Dataset({"x": arg}) assert_identical(expected, actual) class Arbitrary: pass d = pd.Timestamp("2000-01-01T12") args = [ True, None, 3.4, np.nan, "hello", b"raw", np.datetime64("2000-01-01"), d, d.to_pydatetime(), Arbitrary(), ] for arg in args: print(arg) expected = Dataset({"x": ([], arg)}) actual = Dataset({"x": arg}) assert_identical(expected, actual) def test_constructor_auto_align(self) -> None: a = DataArray([1, 2], [("x", [0, 1])]) b = DataArray([3, 4], [("x", [1, 2])]) # verify align uses outer join expected = Dataset( {"a": ("x", [1, 2, np.nan]), "b": ("x", [np.nan, 3, 4])}, {"x": [0, 1, 2]} ) actual = Dataset({"a": a, "b": b}) assert_identical(expected, actual) # regression test for GH346 assert isinstance(actual.variables["x"], IndexVariable) # variable with different dimensions c = ("y", [3, 4]) expected2 = expected.merge({"c": c}) actual = Dataset({"a": a, "b": b, "c": c}) assert_identical(expected2, actual) # variable that is only aligned against the aligned variables d = ("x", [3, 2, 1]) expected3 = expected.merge({"d": d}) actual = Dataset({"a": a, "b": b, "d": d}) assert_identical(expected3, actual) e = ("x", [0, 0]) with pytest.raises(ValueError, match=r"conflicting sizes"): Dataset({"a": a, "b": b, "e": e}) def test_constructor_pandas_sequence(self) -> None: ds = self.make_example_math_dataset() pandas_objs = { var_name: ds[var_name].to_pandas() for var_name in ["foo", "bar"] } ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs) del ds_based_on_pandas["x"] assert_equal(ds, ds_based_on_pandas) # reindex pandas obj, check align works rearranged_index = reversed(pandas_objs["foo"].index) pandas_objs["foo"] = pandas_objs["foo"].reindex(rearranged_index) ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs) del ds_based_on_pandas["x"] assert_equal(ds, ds_based_on_pandas) def test_constructor_pandas_single(self) -> None: das = [ DataArray(np.random.rand(4), dims=["a"]), # series DataArray(np.random.rand(4, 3), dims=["a", "b"]), # df ] for a in das: pandas_obj = a.to_pandas() ds_based_on_pandas = Dataset(pandas_obj) # type: ignore[arg-type] # TODO: improve typing of __init__ for dim in ds_based_on_pandas.data_vars: assert isinstance(dim, int) assert_array_equal(ds_based_on_pandas[dim], pandas_obj[dim]) def test_constructor_compat(self) -> None: data = {"x": DataArray(0, coords={"y": 1}), "y": ("z", [1, 1, 1])} expected = Dataset({"x": 0}, {"y": ("z", [1, 1, 1])}) actual = Dataset(data) assert_identical(expected, actual) data = {"y": ("z", [1, 1, 1]), "x": DataArray(0, coords={"y": 1})} actual = Dataset(data) assert_identical(expected, actual) original = Dataset( {"a": (("x", "y"), np.ones((2, 3)))}, {"c": (("x", "y"), np.zeros((2, 3))), "x": [0, 1]}, ) expected = Dataset( {"a": ("x", np.ones(2)), "b": ("y", np.ones(3))}, {"c": (("x", "y"), np.zeros((2, 3))), "x": [0, 1]}, ) actual = Dataset( {"a": original["a"][:, 0], "b": original["a"][0].drop_vars("x")} ) assert_identical(expected, actual) data = {"x": DataArray(0, coords={"y": 3}), "y": ("z", [1, 1, 1])} with pytest.raises(MergeError): Dataset(data) data = {"x": DataArray(0, coords={"y": 1}), "y": [1, 1]} actual = Dataset(data) expected = Dataset({"x": 0}, {"y": [1, 1]}) assert_identical(expected, actual) def test_constructor_with_coords(self) -> None: with pytest.raises(ValueError, match=r"found in both data_vars and"): Dataset({"a": ("x", [1])}, {"a": ("x", [1])}) ds = Dataset({}, {"a": ("x", [1])}) assert not ds.data_vars assert list(ds.coords.keys()) == ["a"] mindex = pd.MultiIndex.from_product( [["a", "b"], [1, 2]], names=("level_1", "level_2") ) with pytest.raises(ValueError, match=r"conflicting MultiIndex"): with pytest.warns( FutureWarning, match=".*`pandas.MultiIndex`.*no longer be implicitly promoted.*", ): Dataset({}, {"x": mindex, "y": mindex}) Dataset({}, {"x": mindex, "level_1": range(4)}) def test_constructor_no_default_index(self) -> None: # explicitly passing a Coordinates object skips the creation of default index ds = Dataset(coords=Coordinates({"x": [1, 2, 3]}, indexes={})) assert "x" in ds assert "x" not in ds.xindexes def test_constructor_multiindex(self) -> None: midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two")) coords = Coordinates.from_pandas_multiindex(midx, "x") ds = Dataset(coords=coords) assert_identical(ds, coords.to_dataset()) with pytest.warns( FutureWarning, match=".*`pandas.MultiIndex`.*no longer be implicitly promoted.*", ): Dataset(data_vars={"x": midx}) with pytest.warns( FutureWarning, match=".*`pandas.MultiIndex`.*no longer be implicitly promoted.*", ): Dataset(coords={"x": midx}) def test_constructor_custom_index(self) -> None: class CustomIndex(Index): ... coords = Coordinates( coords={"x": ("x", [1, 2, 3])}, indexes={"x": CustomIndex()} ) ds = Dataset(coords=coords) assert isinstance(ds.xindexes["x"], CustomIndex) # test coordinate variables copied assert ds.variables["x"] is not coords.variables["x"] @pytest.mark.filterwarnings("ignore:return type") def test_properties(self) -> None: ds = create_test_data() # dims / sizes # These exact types aren't public API, but this makes sure we don't # change them inadvertently: assert isinstance(ds.dims, utils.Frozen) # TODO change after deprecation cycle in GH #8500 is complete assert isinstance(ds.dims.mapping, dict) assert type(ds.dims.mapping) is dict with pytest.warns( FutureWarning, match=" To access a mapping from dimension names to lengths, please use `Dataset.sizes`", ): assert ds.dims == ds.sizes assert ds.sizes == {"dim1": 8, "dim2": 9, "dim3": 10, "time": 20} # dtypes assert isinstance(ds.dtypes, utils.Frozen) assert isinstance(ds.dtypes.mapping, dict) assert ds.dtypes == { "var1": np.dtype("float64"), "var2": np.dtype("float64"), "var3": np.dtype("float64"), } # data_vars assert list(ds) == list(ds.data_vars) assert list(ds.keys()) == list(ds.data_vars) assert "aasldfjalskdfj" not in ds.variables assert "dim1" in repr(ds.variables) assert len(ds) == 3 assert bool(ds) assert list(ds.data_vars) == ["var1", "var2", "var3"] assert list(ds.data_vars.keys()) == ["var1", "var2", "var3"] assert "var1" in ds.data_vars assert "dim1" not in ds.data_vars assert "numbers" not in ds.data_vars assert len(ds.data_vars) == 3 # xindexes assert set(ds.xindexes) == {"dim2", "dim3", "time"} assert len(ds.xindexes) == 3 assert "dim2" in repr(ds.xindexes) assert all(isinstance(idx, Index) for idx in ds.xindexes.values()) # indexes assert set(ds.indexes) == {"dim2", "dim3", "time"} assert len(ds.indexes) == 3 assert "dim2" in repr(ds.indexes) assert all(isinstance(idx, pd.Index) for idx in ds.indexes.values()) # coords assert list(ds.coords) == ["dim2", "dim3", "time", "numbers"] assert "dim2" in ds.coords assert "numbers" in ds.coords assert "var1" not in ds.coords assert "dim1" not in ds.coords assert len(ds.coords) == 4 # nbytes assert ( Dataset({"x": np.int64(1), "y": np.array([1, 2], dtype=np.float32)}).nbytes == 16 ) def test_warn_ds_dims_deprecation(self) -> None: # TODO remove after deprecation cycle in GH #8500 is complete ds = create_test_data() with pytest.warns(FutureWarning, match="return type"): ds.dims["dim1"] with pytest.warns(FutureWarning, match="return type"): ds.dims.keys() with pytest.warns(FutureWarning, match="return type"): ds.dims.values() with pytest.warns(FutureWarning, match="return type"): ds.dims.items() with assert_no_warnings(): len(ds.dims) ds.dims.__iter__() _ = "dim1" in ds.dims def test_asarray(self) -> None: ds = Dataset({"x": 0}) with pytest.raises(TypeError, match=r"cannot directly convert"): np.asarray(ds) def test_get_index(self) -> None: ds = Dataset({"foo": (("x", "y"), np.zeros((2, 3)))}, coords={"x": ["a", "b"]}) assert ds.get_index("x").equals(pd.Index(["a", "b"])) assert ds.get_index("y").equals(pd.Index([0, 1, 2])) with pytest.raises(KeyError): ds.get_index("z") def test_attr_access(self) -> None: ds = Dataset( {"tmin": ("x", [42], {"units": "Celsius"})}, attrs={"title": "My test data"} ) assert_identical(ds.tmin, ds["tmin"]) assert_identical(ds.tmin.x, ds.x) assert ds.title == ds.attrs["title"] assert ds.tmin.units == ds["tmin"].attrs["units"] assert {"tmin", "title"} <= set(dir(ds)) assert "units" in set(dir(ds.tmin)) # should defer to variable of same name ds.attrs["tmin"] = -999 assert ds.attrs["tmin"] == -999 assert_identical(ds.tmin, ds["tmin"]) def test_variable(self) -> None: a = Dataset() d = np.random.random((10, 3)) a["foo"] = (("time", "x"), d) assert "foo" in a.variables assert "foo" in a a["bar"] = (("time", "x"), d) # order of creation is preserved assert list(a.variables) == ["foo", "bar"] assert_array_equal(a["foo"].values, d) # try to add variable with dim (10,3) with data that's (3,10) with pytest.raises(ValueError): a["qux"] = (("time", "x"), d.T) def test_modify_inplace(self) -> None: a = Dataset() vec = np.random.random((10,)) attributes = {"foo": "bar"} a["x"] = ("x", vec, attributes) assert "x" in a.coords assert isinstance(a.coords["x"].to_index(), pd.Index) assert_identical(a.coords["x"].variable, a.variables["x"]) b = Dataset() b["x"] = ("x", vec, attributes) assert_identical(a["x"], b["x"]) assert a.sizes == b.sizes # this should work a["x"] = ("x", vec[:5]) a["z"] = ("x", np.arange(5)) with pytest.raises(ValueError): # now it shouldn't, since there is a conflicting length a["x"] = ("x", vec[:4]) arr = np.random.random((10, 1)) scal = np.array(0) with pytest.raises(ValueError): a["y"] = ("y", arr) with pytest.raises(ValueError): a["y"] = ("y", scal) assert "y" not in a.dims def test_coords_properties(self) -> None: # use int64 for repr consistency on windows data = Dataset( { "x": ("x", np.array([-1, -2], "int64")), "y": ("y", np.array([0, 1, 2], "int64")), "foo": (["x", "y"], np.random.randn(2, 3)), }, {"a": ("x", np.array([4, 5], "int64")), "b": np.int64(-10)}, ) coords = data.coords assert isinstance(coords, DatasetCoordinates) # len assert len(coords) == 4 # iter assert list(coords) == ["x", "y", "a", "b"] assert_identical(coords["x"].variable, data["x"].variable) assert_identical(coords["y"].variable, data["y"].variable) assert "x" in coords assert "a" in coords assert 0 not in coords assert "foo" not in coords with pytest.raises(KeyError): coords["foo"] with pytest.raises(KeyError): coords[0] # repr expected = dedent( """\ Coordinates: * x (x) int64 16B -1 -2 * y (y) int64 24B 0 1 2 a (x) int64 16B 4 5 b int64 8B -10""" ) actual = repr(coords) assert expected == actual # dims assert coords.sizes == {"x": 2, "y": 3} # dtypes assert coords.dtypes == { "x": np.dtype("int64"), "y": np.dtype("int64"), "a": np.dtype("int64"), "b": np.dtype("int64"), } def test_coords_modify(self) -> None: data = Dataset( { "x": ("x", [-1, -2]), "y": ("y", [0, 1, 2]), "foo": (["x", "y"], np.random.randn(2, 3)), }, {"a": ("x", [4, 5]), "b": -10}, ) actual = data.copy(deep=True) actual.coords["x"] = ("x", ["a", "b"]) assert_array_equal(actual["x"], ["a", "b"]) actual = data.copy(deep=True) actual.coords["z"] = ("z", ["a", "b"]) assert_array_equal(actual["z"], ["a", "b"]) actual = data.copy(deep=True) with pytest.raises(ValueError, match=r"conflicting dimension sizes"): actual.coords["x"] = ("x", [-1]) assert_identical(actual, data) # should not be modified actual = data.copy() del actual.coords["b"] expected = data.reset_coords("b", drop=True) assert_identical(expected, actual) with pytest.raises(KeyError): del data.coords["not_found"] with pytest.raises(KeyError): del data.coords["foo"] actual = data.copy(deep=True) actual.coords.update({"c": 11}) expected = data.merge({"c": 11}).set_coords("c") assert_identical(expected, actual) # regression test for GH3746 del actual.coords["x"] assert "x" not in actual.xindexes def test_update_index(self) -> None: actual = Dataset(coords={"x": [1, 2, 3]}) actual["x"] = ["a", "b", "c"] assert actual.xindexes["x"].to_pandas_index().equals(pd.Index(["a", "b", "c"])) def test_coords_setitem_with_new_dimension(self) -> None: actual = Dataset() actual.coords["foo"] = ("x", [1, 2, 3]) expected = Dataset(coords={"foo": ("x", [1, 2, 3])}) assert_identical(expected, actual) def test_coords_setitem_multiindex(self) -> None: data = create_test_multiindex() with pytest.raises(ValueError, match=r"cannot drop or update.*corrupt.*index "): data.coords["level_1"] = range(4) def test_coords_set(self) -> None: one_coord = Dataset({"x": ("x", [0]), "yy": ("x", [1]), "zzz": ("x", [2])}) two_coords = Dataset({"zzz": ("x", [2])}, {"x": ("x", [0]), "yy": ("x", [1])}) all_coords = Dataset( coords={"x": ("x", [0]), "yy": ("x", [1]), "zzz": ("x", [2])} ) actual = one_coord.set_coords("x") assert_identical(one_coord, actual) actual = one_coord.set_coords(["x"]) assert_identical(one_coord, actual) actual = one_coord.set_coords("yy") assert_identical(two_coords, actual) actual = one_coord.set_coords(["yy", "zzz"]) assert_identical(all_coords, actual) actual = one_coord.reset_coords() assert_identical(one_coord, actual) actual = two_coords.reset_coords() assert_identical(one_coord, actual) actual = all_coords.reset_coords() assert_identical(one_coord, actual) actual = all_coords.reset_coords(["yy", "zzz"]) assert_identical(one_coord, actual) actual = all_coords.reset_coords("zzz") assert_identical(two_coords, actual) with pytest.raises(ValueError, match=r"cannot remove index"): one_coord.reset_coords("x") actual = all_coords.reset_coords("zzz", drop=True) expected = all_coords.drop_vars("zzz") assert_identical(expected, actual) expected = two_coords.drop_vars("zzz") assert_identical(expected, actual) def test_coords_to_dataset(self) -> None: orig = Dataset({"foo": ("y", [-1, 0, 1])}, {"x": 10, "y": [2, 3, 4]}) expected = Dataset(coords={"x": 10, "y": [2, 3, 4]}) actual = orig.coords.to_dataset() assert_identical(expected, actual) def test_coords_merge(self) -> None: orig_coords = Dataset(coords={"a": ("x", [1, 2]), "x": [0, 1]}).coords other_coords = Dataset(coords={"b": ("x", ["a", "b"]), "x": [0, 1]}).coords expected = Dataset( coords={"a": ("x", [1, 2]), "b": ("x", ["a", "b"]), "x": [0, 1]} ) actual = orig_coords.merge(other_coords) assert_identical(expected, actual) actual = other_coords.merge(orig_coords) assert_identical(expected, actual) other_coords = Dataset(coords={"x": ("x", ["a"])}).coords with pytest.raises(MergeError): orig_coords.merge(other_coords) other_coords = Dataset(coords={"x": ("x", ["a", "b"])}).coords with pytest.raises(MergeError): orig_coords.merge(other_coords) other_coords = Dataset(coords={"x": ("x", ["a", "b", "c"])}).coords with pytest.raises(MergeError): orig_coords.merge(other_coords) other_coords = Dataset(coords={"a": ("x", [8, 9])}).coords expected = Dataset(coords={"x": range(2)}) actual = orig_coords.merge(other_coords) assert_identical(expected, actual) actual = other_coords.merge(orig_coords) assert_identical(expected, actual) other_coords = Dataset(coords={"x": np.nan}).coords actual = orig_coords.merge(other_coords) assert_identical(orig_coords.to_dataset(), actual) actual = other_coords.merge(orig_coords) assert_identical(orig_coords.to_dataset(), actual) def test_coords_merge_mismatched_shape(self) -> None: orig_coords = Dataset(coords={"a": ("x", [1, 1])}).coords other_coords = Dataset(coords={"a": 1}).coords expected = orig_coords.to_dataset() actual = orig_coords.merge(other_coords) assert_identical(expected, actual) other_coords = Dataset(coords={"a": ("y", [1])}).coords expected = Dataset(coords={"a": (["x", "y"], [[1], [1]])}) actual = orig_coords.merge(other_coords) assert_identical(expected, actual) actual = other_coords.merge(orig_coords) assert_identical(expected.transpose(), actual) orig_coords = Dataset(coords={"a": ("x", [np.nan])}).coords other_coords = Dataset(coords={"a": np.nan}).coords expected = orig_coords.to_dataset() actual = orig_coords.merge(other_coords) assert_identical(expected, actual) def test_data_vars_properties(self) -> None: ds = Dataset() ds["foo"] = (("x",), [1.0]) ds["bar"] = 2.0 # iter assert set(ds.data_vars) == {"foo", "bar"} assert "foo" in ds.data_vars assert "x" not in ds.data_vars assert_identical(ds["foo"], ds.data_vars["foo"]) # repr expected = dedent( """\ Data variables: foo (x) float64 8B 1.0 bar float64 8B 2.0""" ) actual = repr(ds.data_vars) assert expected == actual # dtypes assert ds.data_vars.dtypes == { "foo": np.dtype("float64"), "bar": np.dtype("float64"), } # len ds.coords["x"] = [1] assert len(ds.data_vars) == 2 # https://github.com/pydata/xarray/issues/7588 with pytest.raises( AssertionError, match="something is wrong with Dataset._coord_names" ): ds._coord_names = {"w", "x", "y", "z"} len(ds.data_vars) def test_equals_and_identical(self) -> None: data = create_test_data(seed=42) assert data.equals(data) assert data.identical(data) data2 = create_test_data(seed=42) data2.attrs["foobar"] = "baz" assert data.equals(data2) assert not data.identical(data2) del data2["time"] assert not data.equals(data2) data = create_test_data(seed=42).rename({"var1": None}) assert data.equals(data) assert data.identical(data) data2 = data.reset_coords() assert not data2.equals(data) assert not data2.identical(data) def test_equals_failures(self) -> None: data = create_test_data() assert not data.equals("foo") # type: ignore[arg-type] assert not data.identical(123) # type: ignore[arg-type] assert not data.broadcast_equals({1: 2}) # type: ignore[arg-type] def test_broadcast_equals(self) -> None: data1 = Dataset(coords={"x": 0}) data2 = Dataset(coords={"x": [0]}) assert data1.broadcast_equals(data2) assert not data1.equals(data2) assert not data1.identical(data2) def test_attrs(self) -> None: data = create_test_data(seed=42) data.attrs = {"foobar": "baz"} assert data.attrs["foobar"], "baz" assert isinstance(data.attrs, dict) def test_chunks_does_not_load_data(self) -> None: # regression test for GH6538 store = InaccessibleVariableDataStore() create_test_data().dump_to_store(store) ds = open_dataset(store) assert ds.chunks == {} @requires_dask @pytest.mark.parametrize( "use_cftime,calendar", [ (False, "standard"), (pytest.param(True, marks=pytest.mark.skipif(not has_cftime)), "standard"), (pytest.param(True, marks=pytest.mark.skipif(not has_cftime)), "noleap"), (pytest.param(True, marks=pytest.mark.skipif(not has_cftime)), "360_day"), ], ) def test_chunk_by_season_resampler(self, use_cftime: bool, calendar: str) -> None: import dask.array N = 365 + 365 # 2 years - 1 day time = xr.date_range( "2000-01-01", periods=N, freq="D", use_cftime=use_cftime, calendar=calendar ) ds = Dataset( { "pr": ("time", dask.array.random.random((N), chunks=(20))), "pr2d": (("x", "time"), dask.array.random.random((10, N), chunks=(20))), "ones": ("time", np.ones((N,))), }, coords={"time": time}, ) # Standard seasons rechunked = ds.chunk( {"x": 2, "time": SeasonResampler(["DJF", "MAM", "JJA", "SON"])} ) assert rechunked.chunksizes["x"] == (2,) * 5 assert len(rechunked.chunksizes["time"]) == 9 assert rechunked.chunksizes["x"] == (2,) * 5 assert sum(rechunked.chunksizes["time"]) == ds.sizes["time"] if calendar == "standard": assert rechunked.chunksizes["time"] == (60, 92, 92, 91, 90, 92, 92, 91, 30) elif calendar == "noleap": assert rechunked.chunksizes["time"] == (59, 92, 92, 91, 90, 92, 92, 91, 31) elif calendar == "360_day": assert rechunked.chunksizes["time"] == (60, 90, 90, 90, 90, 90, 90, 90, 40) else: raise AssertionError("unreachable") # Custom seasons rechunked = ds.chunk( {"x": 2, "time": SeasonResampler(["DJFM", "AM", "JJA", "SON"])} ) assert len(rechunked.chunksizes["time"]) == 9 assert sum(rechunked.chunksizes["time"]) == ds.sizes["time"] assert rechunked.chunksizes["x"] == (2,) * 5 if calendar == "standard": assert rechunked.chunksizes["time"] == (91, 61, 92, 91, 121, 61, 92, 91, 30) elif calendar == "noleap": assert rechunked.chunksizes["time"] == (90, 61, 92, 91, 121, 61, 92, 91, 31) elif calendar == "360_day": assert rechunked.chunksizes["time"] == (90, 60, 90, 90, 120, 60, 90, 90, 40) else: raise AssertionError("unreachable") # Test that drop_incomplete doesn't affect chunking rechunked_drop_true = ds.chunk( time=SeasonResampler(["DJF", "MAM", "JJA", "SON"], drop_incomplete=True) ) rechunked_drop_false = ds.chunk( time=SeasonResampler(["DJF", "MAM", "JJA", "SON"], drop_incomplete=False) ) assert ( rechunked_drop_true.chunksizes["time"] == rechunked_drop_false.chunksizes["time"] ) @requires_dask def test_chunk_by_season_resampler_errors(self): """Test error handling for SeasonResampler chunking.""" # Test error on missing season (should fail with incomplete seasons) ds = Dataset( {"x": ("time", np.arange(12))}, coords={"time": pd.date_range("2000-01-01", periods=12, freq="MS")}, ) with pytest.raises(ValueError, match="does not cover all 12 months"): ds.chunk(time=SeasonResampler(["DJF", "MAM", "SON"])) ds = Dataset({"foo": ("x", [1, 2, 3])}) # Test error on virtual variable with pytest.raises(ValueError, match="virtual variable"): ds.chunk(x=SeasonResampler(["DJF", "MAM", "JJA", "SON"])) # Test error on non-datetime variable ds["x"] = ("x", [1, 2, 3]) with pytest.raises(ValueError, match="datetime variables"): ds.chunk(x=SeasonResampler(["DJF", "MAM", "JJA", "SON"])) # Test successful case with 1D datetime variable ds["x"] = ("x", xr.date_range("2001-01-01", periods=3, freq="D")) # This should work result = ds.chunk(x=SeasonResampler(["DJF", "MAM", "JJA", "SON"])) assert result.chunks is not None # Test error on missing season (should fail with incomplete seasons) with pytest.raises(ValueError): ds.chunk(x=SeasonResampler(["DJF", "MAM", "SON"])) @requires_dask def test_chunk(self) -> None: data = create_test_data() for v in data.variables.values(): assert isinstance(v.data, np.ndarray) assert data.chunks == {} reblocked = data.chunk() for k, v in reblocked.variables.items(): if k in reblocked.dims: assert isinstance(v.data, np.ndarray) else: assert isinstance(v.data, da.Array) expected_chunks: dict[Hashable, tuple[int, ...]] = { "dim1": (8,), "dim2": (9,), "dim3": (10,), } assert reblocked.chunks == expected_chunks # test kwargs form of chunks assert data.chunk(expected_chunks).chunks == expected_chunks def get_dask_names(ds): return {k: v.data.name for k, v in ds.items()} orig_dask_names = get_dask_names(reblocked) reblocked = data.chunk({"time": 5, "dim1": 5, "dim2": 5, "dim3": 5}) # time is not a dim in any of the data_vars, so it # doesn't get chunked expected_chunks = {"dim1": (5, 3), "dim2": (5, 4), "dim3": (5, 5)} assert reblocked.chunks == expected_chunks # make sure dask names change when rechunking by different amounts # regression test for GH3350 new_dask_names = get_dask_names(reblocked) for k, v in new_dask_names.items(): assert v != orig_dask_names[k] reblocked = data.chunk(expected_chunks) assert reblocked.chunks == expected_chunks # reblock on already blocked data orig_dask_names = get_dask_names(reblocked) reblocked = reblocked.chunk(expected_chunks) new_dask_names = get_dask_names(reblocked) assert reblocked.chunks == expected_chunks assert_identical(reblocked, data) # rechunking with same chunk sizes should not change names for k, v in new_dask_names.items(): assert v == orig_dask_names[k] with pytest.raises( ValueError, match=re.escape( "chunks keys ('foo',) not found in data dimensions ('dim2', 'dim3', 'time', 'dim1')" ), ): data.chunk({"foo": 10}) @requires_dask @pytest.mark.parametrize( "calendar", ( "standard", pytest.param( "gregorian", marks=pytest.mark.skipif(not has_cftime, reason="needs cftime"), ), ), ) @pytest.mark.parametrize("freq", ["D", "W", "5ME", "YE"]) @pytest.mark.parametrize("add_gap", [True, False]) def test_chunk_by_frequency(self, freq: str, calendar: str, add_gap: bool) -> None: import dask.array N = 365 * 2 Ξ”N = 28 # noqa: PLC2401 time = xr.date_range( "2001-01-01", periods=N + Ξ”N, freq="D", calendar=calendar ).to_numpy(copy=True) if add_gap: # introduce an empty bin time[31 : 31 + Ξ”N] = np.datetime64("NaT") time = time[~np.isnat(time)] else: time = time[:N] ds = Dataset( { "pr": ("time", dask.array.random.random((N), chunks=(20))), "pr2d": (("x", "time"), dask.array.random.random((10, N), chunks=(20))), "ones": ("time", np.ones((N,))), }, coords={"time": time}, ) rechunked = ds.chunk(x=2, time=TimeResampler(freq)) expected = tuple( ds.ones.resample(time=freq).sum().dropna("time").astype(int).data.tolist() ) assert rechunked.chunksizes["time"] == expected assert rechunked.chunksizes["x"] == (2,) * 5 rechunked = ds.chunk({"x": 2, "time": TimeResampler(freq)}) assert rechunked.chunksizes["time"] == expected assert rechunked.chunksizes["x"] == (2,) * 5 def test_chunk_by_frequency_errors(self): ds = Dataset({"foo": ("x", [1, 2, 3])}) with pytest.raises(ValueError, match="virtual variable"): ds.chunk(x=TimeResampler("YE")) ds["x"] = ("x", [1, 2, 3]) with pytest.raises(ValueError, match="datetime variables"): ds.chunk(x=TimeResampler("YE")) ds["x"] = ("x", xr.date_range("2001-01-01", periods=3, freq="D")) with pytest.raises(ValueError, match="Invalid frequency"): ds.chunk(x=TimeResampler("foo")) @requires_dask def test_dask_is_lazy(self) -> None: store = InaccessibleVariableDataStore() create_test_data().dump_to_store(store) ds = open_dataset(store).chunk() with pytest.raises(UnexpectedDataAccess): ds.load() with pytest.raises(UnexpectedDataAccess): _ = ds["var1"].values # these should not raise UnexpectedDataAccess: _ = ds.var1.data ds.isel(time=10) ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1) ds.transpose() ds.mean() ds.fillna(0) ds.rename({"dim1": "foobar"}) ds.set_coords("var1") ds.drop_vars("var1") def test_isel(self) -> None: data = create_test_data() slicers: dict[Hashable, slice] = { "dim1": slice(None, None, 2), "dim2": slice(0, 2), } ret = data.isel(slicers) # Verify that only the specified dimension was altered assert list(data.dims) == list(ret.dims) for d in data.dims: if d in slicers: assert ret.sizes[d] == np.arange(data.sizes[d])[slicers[d]].size else: assert data.sizes[d] == ret.sizes[d] # Verify that the data is what we expect for v in data.variables: assert data[v].dims == ret[v].dims assert data[v].attrs == ret[v].attrs slice_list = [slice(None)] * data[v].values.ndim for d, s in slicers.items(): if d in data[v].dims: inds = np.nonzero(np.array(data[v].dims) == d)[0] for ind in inds: slice_list[ind] = s expected = data[v].values[tuple(slice_list)] actual = ret[v].values np.testing.assert_array_equal(expected, actual) with pytest.raises(ValueError): data.isel(not_a_dim=slice(0, 2)) with pytest.raises( ValueError, match=r"Dimensions {'not_a_dim'} do not exist. Expected " r"one or more of " r"[\w\W]*'dim\d'[\w\W]*'dim\d'[\w\W]*'time'[\w\W]*'dim\d'[\w\W]*", ): data.isel(not_a_dim=slice(0, 2)) with pytest.warns( UserWarning, match=r"Dimensions {'not_a_dim'} do not exist. " r"Expected one or more of " r"[\w\W]*'dim\d'[\w\W]*'dim\d'[\w\W]*'time'[\w\W]*'dim\d'[\w\W]*", ): data.isel(not_a_dim=slice(0, 2), missing_dims="warn") assert_identical(data, data.isel(not_a_dim=slice(0, 2), missing_dims="ignore")) ret = data.isel(dim1=0) assert {"time": 20, "dim2": 9, "dim3": 10} == ret.sizes assert set(data.data_vars) == set(ret.data_vars) assert set(data.coords) == set(ret.coords) assert set(data.xindexes) == set(ret.xindexes) ret = data.isel(time=slice(2), dim1=0, dim2=slice(5)) assert {"time": 2, "dim2": 5, "dim3": 10} == ret.sizes assert set(data.data_vars) == set(ret.data_vars) assert set(data.coords) == set(ret.coords) assert set(data.xindexes) == set(ret.xindexes) ret = data.isel(time=0, dim1=0, dim2=slice(5)) assert {"dim2": 5, "dim3": 10} == ret.sizes assert set(data.data_vars) == set(ret.data_vars) assert set(data.coords) == set(ret.coords) assert set(data.xindexes) == set(list(ret.xindexes) + ["time"]) def test_isel_fancy(self) -> None: # isel with fancy indexing. data = create_test_data() pdim1 = [1, 2, 3] pdim2 = [4, 5, 1] pdim3 = [1, 2, 3] actual = data.isel( dim1=(("test_coord",), pdim1), dim2=(("test_coord",), pdim2), dim3=(("test_coord",), pdim3), ) assert "test_coord" in actual.dims assert actual.coords["test_coord"].shape == (len(pdim1),) # Should work with DataArray actual = data.isel( dim1=DataArray(pdim1, dims="test_coord"), dim2=(("test_coord",), pdim2), dim3=(("test_coord",), pdim3), ) assert "test_coord" in actual.dims assert actual.coords["test_coord"].shape == (len(pdim1),) expected = data.isel( dim1=(("test_coord",), pdim1), dim2=(("test_coord",), pdim2), dim3=(("test_coord",), pdim3), ) assert_identical(actual, expected) # DataArray with coordinate idx1 = DataArray(pdim1, dims=["a"], coords={"a": np.random.randn(3)}) idx2 = DataArray(pdim2, dims=["b"], coords={"b": np.random.randn(3)}) idx3 = DataArray(pdim3, dims=["c"], coords={"c": np.random.randn(3)}) # Should work with DataArray actual = data.isel(dim1=idx1, dim2=idx2, dim3=idx3) assert "a" in actual.dims assert "b" in actual.dims assert "c" in actual.dims assert "time" in actual.coords assert "dim2" in actual.coords assert "dim3" in actual.coords expected = data.isel( dim1=(("a",), pdim1), dim2=(("b",), pdim2), dim3=(("c",), pdim3) ) expected = expected.assign_coords(a=idx1["a"], b=idx2["b"], c=idx3["c"]) assert_identical(actual, expected) idx1 = DataArray(pdim1, dims=["a"], coords={"a": np.random.randn(3)}) idx2 = DataArray(pdim2, dims=["a"]) idx3 = DataArray(pdim3, dims=["a"]) # Should work with DataArray actual = data.isel(dim1=idx1, dim2=idx2, dim3=idx3) assert "a" in actual.dims assert "time" in actual.coords assert "dim2" in actual.coords assert "dim3" in actual.coords expected = data.isel( dim1=(("a",), pdim1), dim2=(("a",), pdim2), dim3=(("a",), pdim3) ) expected = expected.assign_coords(a=idx1["a"]) assert_identical(actual, expected) actual = data.isel(dim1=(("points",), pdim1), dim2=(("points",), pdim2)) assert "points" in actual.dims assert "dim3" in actual.dims assert "dim3" not in actual.data_vars np.testing.assert_array_equal(data["dim2"][pdim2], actual["dim2"]) # test that the order of the indexers doesn't matter assert_identical( data.isel(dim1=(("points",), pdim1), dim2=(("points",), pdim2)), data.isel(dim2=(("points",), pdim2), dim1=(("points",), pdim1)), ) # make sure we're raising errors in the right places with pytest.raises(IndexError, match=r"Dimensions of indexers mismatch"): data.isel(dim1=(("points",), [1, 2]), dim2=(("points",), [1, 2, 3])) with pytest.raises(TypeError, match=r"cannot use a Dataset"): data.isel(dim1=Dataset({"points": [1, 2]})) # test to be sure we keep around variables that were not indexed ds = Dataset({"x": [1, 2, 3, 4], "y": 0}) actual = ds.isel(x=(("points",), [0, 1, 2])) assert_identical(ds["y"], actual["y"]) # tests using index or DataArray as indexers stations = Dataset() stations["station"] = (("station",), ["A", "B", "C"]) stations["dim1s"] = (("station",), [1, 2, 3]) stations["dim2s"] = (("station",), [4, 5, 1]) actual = data.isel(dim1=stations["dim1s"], dim2=stations["dim2s"]) assert "station" in actual.coords assert "station" in actual.dims assert_identical(actual["station"].drop_vars(["dim2"]), stations["station"]) with pytest.raises(ValueError, match=r"conflicting values/indexes on "): data.isel( dim1=DataArray( [0, 1, 2], dims="station", coords={"station": [0, 1, 2]} ), dim2=DataArray( [0, 1, 2], dims="station", coords={"station": [0, 1, 3]} ), ) # multi-dimensional selection stations = Dataset() stations["a"] = (("a",), ["A", "B", "C"]) stations["b"] = (("b",), [0, 1]) stations["dim1s"] = (("a", "b"), [[1, 2], [2, 3], [3, 4]]) stations["dim2s"] = (("a",), [4, 5, 1]) actual = data.isel(dim1=stations["dim1s"], dim2=stations["dim2s"]) assert "a" in actual.coords assert "a" in actual.dims assert "b" in actual.coords assert "b" in actual.dims assert "dim2" in actual.coords assert "a" in actual["dim2"].dims assert_identical(actual["a"].drop_vars(["dim2"]), stations["a"]) assert_identical(actual["b"], stations["b"]) expected_var1 = data["var1"].variable[ stations["dim1s"].variable, stations["dim2s"].variable ] expected_var2 = data["var2"].variable[ stations["dim1s"].variable, stations["dim2s"].variable ] expected_var3 = data["var3"].variable[slice(None), stations["dim1s"].variable] assert_equal(actual["a"].drop_vars("dim2"), stations["a"]) assert_array_equal(actual["var1"], expected_var1) assert_array_equal(actual["var2"], expected_var2) assert_array_equal(actual["var3"], expected_var3) # test that drop works ds = xr.Dataset({"a": (("x",), [1, 2, 3])}, coords={"b": (("x",), [5, 6, 7])}) actual = ds.isel({"x": 1}, drop=False) expected = xr.Dataset({"a": 2}, coords={"b": 6}) assert_identical(actual, expected) actual = ds.isel({"x": 1}, drop=True) expected = xr.Dataset({"a": 2}) assert_identical(actual, expected) actual = ds.isel({"x": DataArray(1)}, drop=False) expected = xr.Dataset({"a": 2}, coords={"b": 6}) assert_identical(actual, expected) actual = ds.isel({"x": DataArray(1)}, drop=True) expected = xr.Dataset({"a": 2}) assert_identical(actual, expected) def test_isel_dataarray(self) -> None: """Test for indexing by DataArray""" data = create_test_data() # indexing with DataArray with same-name coordinates. indexing_da = DataArray( np.arange(1, 4), dims=["dim1"], coords={"dim1": np.random.randn(3)} ) actual = data.isel(dim1=indexing_da) assert_identical(indexing_da["dim1"], actual["dim1"]) assert_identical(data["dim2"], actual["dim2"]) # Conflict in the dimension coordinate indexing_da = DataArray( np.arange(1, 4), dims=["dim2"], coords={"dim2": np.random.randn(3)} ) with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"): data.isel(dim2=indexing_da) # Also the case for DataArray with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"): data["var2"].isel(dim2=indexing_da) with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"): data["dim2"].isel(dim2=indexing_da) # same name coordinate which does not conflict indexing_da = DataArray( np.arange(1, 4), dims=["dim2"], coords={"dim2": data["dim2"].values[1:4]} ) actual = data.isel(dim2=indexing_da) assert_identical(actual["dim2"], indexing_da["dim2"]) # Silently drop conflicted (non-dimensional) coordinate of indexer indexing_da = DataArray( np.arange(1, 4), dims=["dim2"], coords={ "dim2": data["dim2"].values[1:4], "numbers": ("dim2", np.arange(2, 5)), }, ) actual = data.isel(dim2=indexing_da) assert_identical(actual["numbers"], data["numbers"]) # boolean data array with coordinate with the same name indexing_da = DataArray( np.arange(1, 10), dims=["dim2"], coords={"dim2": data["dim2"].values} ) indexing_da = indexing_da < 3 actual = data.isel(dim2=indexing_da) assert_identical(actual["dim2"], data["dim2"][:2]) # boolean data array with non-dimensioncoordinate indexing_da = DataArray( np.arange(1, 10), dims=["dim2"], coords={ "dim2": data["dim2"].values, "non_dim": (("dim2",), np.random.randn(9)), "non_dim2": 0, }, ) indexing_da = indexing_da < 3 actual = data.isel(dim2=indexing_da) assert_identical( actual["dim2"].drop_vars("non_dim").drop_vars("non_dim2"), data["dim2"][:2] ) assert_identical(actual["non_dim"], indexing_da["non_dim"][:2]) assert_identical(actual["non_dim2"], indexing_da["non_dim2"]) # non-dimension coordinate will be also attached indexing_da = DataArray( np.arange(1, 4), dims=["dim2"], coords={"non_dim": (("dim2",), np.random.randn(3))}, ) actual = data.isel(dim2=indexing_da) assert "non_dim" in actual assert "non_dim" in actual.coords # Index by a scalar DataArray indexing_da = DataArray(3, dims=[], coords={"station": 2}) actual = data.isel(dim2=indexing_da) assert "station" in actual actual = data.isel(dim2=indexing_da["station"]) assert "station" in actual # indexer generated from coordinates indexing_ds = Dataset({}, coords={"dim2": [0, 1, 2]}) with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"): actual = data.isel(dim2=indexing_ds["dim2"]) def test_isel_fancy_convert_index_variable(self) -> None: # select index variable "x" with a DataArray of dim "z" # -> drop index and convert index variable to base variable ds = xr.Dataset({"foo": ("x", [1, 2, 3])}, coords={"x": [0, 1, 2]}) idxr = xr.DataArray([1], dims="z", name="x") actual = ds.isel(x=idxr) assert "x" not in actual.xindexes assert not isinstance(actual.x.variable, IndexVariable) def test_isel_multicoord_index(self) -> None: # regression test https://github.com/pydata/xarray/issues/10063 # isel on a multi-coordinate index should return a unique index associated # to each coordinate coords = xr.Coordinates(coords={"x": [0, 1], "y": [1, 2]}, indexes={}) ds = xr.Dataset(coords=coords).set_xindex(["x", "y"], XYIndex) ds2 = ds.isel(x=slice(None), y=slice(None)) assert ds2.xindexes["x"] is ds2.xindexes["y"] def test_sel(self) -> None: data = create_test_data() int_slicers = {"dim1": slice(None, None, 2), "dim2": slice(2), "dim3": slice(3)} loc_slicers = { "dim1": slice(None, None, 2), "dim2": slice(0, 0.5), "dim3": slice("a", "c"), } assert_equal(data.isel(int_slicers), data.sel(loc_slicers)) data["time"] = ("time", pd.date_range("2000-01-01", periods=20)) assert_equal(data.isel(time=0), data.sel(time="2000-01-01")) assert_equal( data.isel(time=slice(10)), data.sel(time=slice("2000-01-01", "2000-01-10")) ) assert_equal(data, data.sel(time=slice("1999", "2005"))) times = pd.date_range("2000-01-01", periods=3) assert_equal(data.isel(time=slice(3)), data.sel(time=times)) assert_equal( data.isel(time=slice(3)), data.sel(time=(data["time.dayofyear"] <= 3)) ) td = pd.to_timedelta(np.arange(3), unit="days") data = Dataset({"x": ("td", np.arange(3)), "td": td}) assert_equal(data, data.sel(td=td)) assert_equal(data, data.sel(td=slice("3 days"))) assert_equal(data.isel(td=0), data.sel(td=pd.Timedelta("0 days"))) assert_equal(data.isel(td=0), data.sel(td=pd.Timedelta("0h"))) assert_equal(data.isel(td=slice(1, 3)), data.sel(td=slice("1 days", "2 days"))) def test_sel_dataarray(self) -> None: data = create_test_data() ind = DataArray([0.0, 0.5, 1.0], dims=["dim2"]) actual = data.sel(dim2=ind) assert_equal(actual, data.isel(dim2=[0, 1, 2])) # with different dimension ind = DataArray([0.0, 0.5, 1.0], dims=["new_dim"]) actual = data.sel(dim2=ind) expected = data.isel(dim2=Variable("new_dim", [0, 1, 2])) assert "new_dim" in actual.dims assert_equal(actual, expected) # Multi-dimensional ind = DataArray([[0.0], [0.5], [1.0]], dims=["new_dim", "new_dim2"]) actual = data.sel(dim2=ind) expected = data.isel(dim2=Variable(("new_dim", "new_dim2"), [[0], [1], [2]])) assert "new_dim" in actual.dims assert "new_dim2" in actual.dims assert_equal(actual, expected) # with coordinate ind = DataArray( [0.0, 0.5, 1.0], dims=["new_dim"], coords={"new_dim": ["a", "b", "c"]} ) actual = data.sel(dim2=ind) expected = data.isel(dim2=[0, 1, 2]).rename({"dim2": "new_dim"}) assert "new_dim" in actual.dims assert "new_dim" in actual.coords assert_equal( actual.drop_vars("new_dim").drop_vars("dim2"), expected.drop_vars("new_dim") ) assert_equal(actual["new_dim"].drop_vars("dim2"), ind["new_dim"]) # with conflicted coordinate (silently ignored) ind = DataArray( [0.0, 0.5, 1.0], dims=["dim2"], coords={"dim2": ["a", "b", "c"]} ) actual = data.sel(dim2=ind) expected = data.isel(dim2=[0, 1, 2]) assert_equal(actual, expected) # with conflicted coordinate (silently ignored) ind = DataArray( [0.0, 0.5, 1.0], dims=["new_dim"], coords={"new_dim": ["a", "b", "c"], "dim2": 3}, ) actual = data.sel(dim2=ind) assert_equal( actual["new_dim"].drop_vars("dim2"), ind["new_dim"].drop_vars("dim2") ) expected = data.isel(dim2=[0, 1, 2]) expected["dim2"] = (("new_dim"), expected["dim2"].values) assert_equal(actual["dim2"].drop_vars("new_dim"), expected["dim2"]) assert actual["var1"].dims == ("dim1", "new_dim") # with non-dimensional coordinate ind = DataArray( [0.0, 0.5, 1.0], dims=["dim2"], coords={ "dim2": ["a", "b", "c"], "numbers": ("dim2", [0, 1, 2]), "new_dim": ("dim2", [1.1, 1.2, 1.3]), }, ) actual = data.sel(dim2=ind) expected = data.isel(dim2=[0, 1, 2]) assert_equal(actual.drop_vars("new_dim"), expected) assert np.allclose(actual["new_dim"].values, ind["new_dim"].values) def test_sel_dataarray_mindex(self) -> None: midx = pd.MultiIndex.from_product([list("abc"), [0, 1]], names=("one", "two")) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") midx_coords["y"] = range(3) mds = xr.Dataset( {"var": (("x", "y"), np.random.rand(6, 3))}, coords=midx_coords ) actual_isel = mds.isel(x=xr.DataArray(np.arange(3), dims="x")) actual_sel = mds.sel(x=DataArray(midx[:3], dims="x")) assert actual_isel["x"].dims == ("x",) assert actual_sel["x"].dims == ("x",) assert_identical(actual_isel, actual_sel) actual_isel = mds.isel(x=xr.DataArray(np.arange(3), dims="z")) actual_sel = mds.sel(x=Variable("z", midx[:3])) assert actual_isel["x"].dims == ("z",) assert actual_sel["x"].dims == ("z",) assert_identical(actual_isel, actual_sel) # with coordinate actual_isel = mds.isel( x=xr.DataArray(np.arange(3), dims="z", coords={"z": [0, 1, 2]}) ) actual_sel = mds.sel( x=xr.DataArray(midx[:3], dims="z", coords={"z": [0, 1, 2]}) ) assert actual_isel["x"].dims == ("z",) assert actual_sel["x"].dims == ("z",) assert_identical(actual_isel, actual_sel) # Vectorized indexing with level-variables raises an error with pytest.raises(ValueError, match=r"Vectorized selection is "): mds.sel(one=["a", "b"]) with pytest.raises( ValueError, match=r"Vectorized selection is not available along coordinate 'x' with a multi-index", ): mds.sel( x=xr.DataArray( [np.array(midx[:2]), np.array(midx[-2:])], dims=["a", "b"] ) ) def test_sel_categorical(self) -> None: ind = pd.Series(["foo", "bar"], dtype="category") df = pd.DataFrame({"ind": ind, "values": [1, 2]}) ds = df.set_index("ind").to_xarray() actual = ds.sel(ind="bar") expected = ds.isel(ind=1) assert_identical(expected, actual) def test_sel_categorical_error(self) -> None: ind = pd.Series(["foo", "bar"], dtype="category") df = pd.DataFrame({"ind": ind, "values": [1, 2]}) ds = df.set_index("ind").to_xarray() with pytest.raises(ValueError): ds.sel(ind="bar", method="nearest") with pytest.raises(ValueError): ds.sel(ind="bar", tolerance="nearest") # type: ignore[arg-type] def test_categorical_index(self) -> None: cat = pd.CategoricalIndex( ["foo", "bar", "foo"], categories=["foo", "bar", "baz", "qux", "quux", "corge"], ) ds = xr.Dataset( {"var": ("cat", np.arange(3))}, coords={"cat": ("cat", cat), "c": ("cat", [0, 1, 1])}, ) # test slice actual1 = ds.sel(cat="foo") expected1 = ds.isel(cat=[0, 2]) assert_identical(expected1, actual1) # make sure the conversion to the array works actual2 = ds.sel(cat="foo")["cat"].values assert (actual2 == np.array(["foo", "foo"])).all() ds = ds.set_index(index=["cat", "c"]) actual3 = ds.unstack("index") assert actual3["var"].shape == (2, 2) def test_categorical_index_reindex(self) -> None: cat = pd.CategoricalIndex( ["foo", "bar", "baz"], categories=["foo", "bar", "baz", "qux", "quux", "corge"], ) ds = xr.Dataset( {"var": ("cat", np.arange(3))}, coords={"cat": ("cat", cat), "c": ("cat", [0, 1, 2])}, ) actual = ds.reindex(cat=["foo"])["cat"].values assert (actual == np.array(["foo"])).all() @pytest.mark.parametrize("fill_value", [np.nan, pd.NA]) def test_extensionarray_negative_reindex(self, fill_value) -> None: cat = pd.Categorical( ["foo", "bar", "baz"], categories=["foo", "bar", "baz", "qux", "quux", "corge"], ) ds = xr.Dataset( {"cat": ("index", cat)}, coords={"index": ("index", np.arange(3))}, ) reindexed_cat = cast( pd.api.extensions.ExtensionArray, ( ds.reindex(index=[-1, 1, 1], fill_value=fill_value)["cat"] .to_pandas() .values ), ) assert reindexed_cat.equals(pd.array([pd.NA, "bar", "bar"], dtype=cat.dtype)) # type: ignore[attr-defined] def test_extension_array_reindex_same(self) -> None: series = pd.Series([1, 2, pd.NA, 3], dtype=pd.Int32Dtype()) test = xr.Dataset({"test": series}) res = test.reindex(dim_0=series.index) align(res, test, join="exact") def test_categorical_multiindex(self) -> None: i1 = pd.Series([0, 0]) cat = pd.CategoricalDtype(categories=["foo", "baz", "bar"]) i2 = pd.Series(["baz", "bar"], dtype=cat) df = pd.DataFrame({"i1": i1, "i2": i2, "values": [1, 2]}).set_index( ["i1", "i2"] ) actual = df.to_xarray() assert actual["values"].shape == (1, 2) def test_sel_drop(self) -> None: data = Dataset({"foo": ("x", [1, 2, 3])}, {"x": [0, 1, 2]}) expected = Dataset({"foo": 1}) selected = data.sel(x=0, drop=True) assert_identical(expected, selected) expected = Dataset({"foo": 1}, {"x": 0}) selected = data.sel(x=0, drop=False) assert_identical(expected, selected) data = Dataset({"foo": ("x", [1, 2, 3])}) expected = Dataset({"foo": 1}) selected = data.sel(x=0, drop=True) assert_identical(expected, selected) def test_sel_drop_mindex(self) -> None: midx = pd.MultiIndex.from_arrays([["a", "a"], [1, 2]], names=("foo", "bar")) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") data = Dataset(coords=midx_coords) actual = data.sel(foo="a", drop=True) assert "foo" not in actual.coords actual = data.sel(foo="a", drop=False) assert_equal(actual.foo, DataArray("a", coords={"foo": "a"})) def test_isel_drop(self) -> None: data = Dataset({"foo": ("x", [1, 2, 3])}, {"x": [0, 1, 2]}) expected = Dataset({"foo": 1}) selected = data.isel(x=0, drop=True) assert_identical(expected, selected) expected = Dataset({"foo": 1}, {"x": 0}) selected = data.isel(x=0, drop=False) assert_identical(expected, selected) def test_head(self) -> None: data = create_test_data() expected = data.isel(time=slice(5), dim2=slice(6)) actual = data.head(time=5, dim2=6) assert_equal(expected, actual) expected = data.isel(time=slice(0)) actual = data.head(time=0) assert_equal(expected, actual) expected = data.isel({dim: slice(6) for dim in data.dims}) actual = data.head(6) assert_equal(expected, actual) expected = data.isel({dim: slice(5) for dim in data.dims}) actual = data.head() assert_equal(expected, actual) with pytest.raises(TypeError, match=r"either dict-like or a single int"): data.head([3]) # type: ignore[arg-type] with pytest.raises(TypeError, match=r"expected integer type"): data.head(dim2=3.1) with pytest.raises(ValueError, match=r"expected positive int"): data.head(time=-3) def test_tail(self) -> None: data = create_test_data() expected = data.isel(time=slice(-5, None), dim2=slice(-6, None)) actual = data.tail(time=5, dim2=6) assert_equal(expected, actual) expected = data.isel(dim1=slice(0)) actual = data.tail(dim1=0) assert_equal(expected, actual) expected = data.isel({dim: slice(-6, None) for dim in data.dims}) actual = data.tail(6) assert_equal(expected, actual) expected = data.isel({dim: slice(-5, None) for dim in data.dims}) actual = data.tail() assert_equal(expected, actual) with pytest.raises(TypeError, match=r"either dict-like or a single int"): data.tail([3]) # type: ignore[arg-type] with pytest.raises(TypeError, match=r"expected integer type"): data.tail(dim2=3.1) with pytest.raises(ValueError, match=r"expected positive int"): data.tail(time=-3) def test_thin(self) -> None: data = create_test_data() expected = data.isel(time=slice(None, None, 5), dim2=slice(None, None, 6)) actual = data.thin(time=5, dim2=6) assert_equal(expected, actual) expected = data.isel({dim: slice(None, None, 6) for dim in data.dims}) actual = data.thin(6) assert_equal(expected, actual) with pytest.raises(TypeError, match=r"either dict-like or a single int"): data.thin([3]) # type: ignore[arg-type] with pytest.raises(TypeError, match=r"expected integer type"): data.thin(dim2=3.1) with pytest.raises(ValueError, match=r"cannot be zero"): data.thin(time=0) with pytest.raises(ValueError, match=r"expected positive int"): data.thin(time=-3) @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_sel_fancy(self) -> None: data = create_test_data() # add in a range() index data["dim1"] = data.dim1 pdim1 = [1, 2, 3] pdim2 = [4, 5, 1] pdim3 = [1, 2, 3] expected = data.isel( dim1=Variable(("test_coord",), pdim1), dim2=Variable(("test_coord",), pdim2), dim3=Variable(("test_coord"), pdim3), ) actual = data.sel( dim1=Variable(("test_coord",), data.dim1[pdim1]), dim2=Variable(("test_coord",), data.dim2[pdim2]), dim3=Variable(("test_coord",), data.dim3[pdim3]), ) assert_identical(expected, actual) # DataArray Indexer idx_t = DataArray( data["time"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]} ) idx_2 = DataArray( data["dim2"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]} ) idx_3 = DataArray( data["dim3"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]} ) actual = data.sel(time=idx_t, dim2=idx_2, dim3=idx_3) expected = data.isel( time=Variable(("a",), [3, 2, 1]), dim2=Variable(("a",), [3, 2, 1]), dim3=Variable(("a",), [3, 2, 1]), ) expected = expected.assign_coords(a=idx_t["a"]) assert_identical(expected, actual) idx_t = DataArray( data["time"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]} ) idx_2 = DataArray( data["dim2"][[2, 1, 3]].values, dims=["b"], coords={"b": [0, 1, 2]} ) idx_3 = DataArray( data["dim3"][[1, 2, 1]].values, dims=["c"], coords={"c": [0.0, 1.1, 2.2]} ) actual = data.sel(time=idx_t, dim2=idx_2, dim3=idx_3) expected = data.isel( time=Variable(("a",), [3, 2, 1]), dim2=Variable(("b",), [2, 1, 3]), dim3=Variable(("c",), [1, 2, 1]), ) expected = expected.assign_coords(a=idx_t["a"], b=idx_2["b"], c=idx_3["c"]) assert_identical(expected, actual) # test from sel_points data = Dataset({"foo": (("x", "y"), np.arange(9).reshape(3, 3))}) data.coords.update({"x": [0, 1, 2], "y": [0, 1, 2]}) expected = Dataset( {"foo": ("points", [0, 4, 8])}, coords={ "x": Variable(("points",), [0, 1, 2]), "y": Variable(("points",), [0, 1, 2]), }, ) actual = data.sel( x=Variable(("points",), [0, 1, 2]), y=Variable(("points",), [0, 1, 2]) ) assert_identical(expected, actual) expected.coords.update({"x": ("points", [0, 1, 2]), "y": ("points", [0, 1, 2])}) actual = data.sel( x=Variable(("points",), [0.1, 1.1, 2.5]), y=Variable(("points",), [0, 1.2, 2.0]), method="pad", ) assert_identical(expected, actual) idx_x = DataArray([0, 1, 2], dims=["a"], coords={"a": ["a", "b", "c"]}) idx_y = DataArray([0, 2, 1], dims=["b"], coords={"b": [0, 3, 6]}) expected_ary = data["foo"][[0, 1, 2], [0, 2, 1]] actual = data.sel(x=idx_x, y=idx_y) assert_array_equal(expected_ary, actual["foo"]) assert_identical(actual["a"].drop_vars("x"), idx_x["a"]) assert_identical(actual["b"].drop_vars("y"), idx_y["b"]) with pytest.raises(KeyError): data.sel(x=[2.5], y=[2.0], method="pad", tolerance=1e-3) def test_sel_method(self) -> None: data = create_test_data() expected = data.sel(dim2=1) actual = data.sel(dim2=0.95, method="nearest") assert_identical(expected, actual) actual = data.sel(dim2=0.95, method="nearest", tolerance=1) assert_identical(expected, actual) with pytest.raises(KeyError): actual = data.sel(dim2=np.pi, method="nearest", tolerance=0) expected = data.sel(dim2=[1.5]) actual = data.sel(dim2=[1.45], method="backfill") assert_identical(expected, actual) with pytest.raises(NotImplementedError, match=r"slice objects"): data.sel(dim2=slice(1, 3), method="ffill") with pytest.raises(TypeError, match=r"``method``"): # this should not pass silently data.sel(dim2=1, method=data) # type: ignore[arg-type] # cannot pass method if there is no associated coordinate with pytest.raises(ValueError, match=r"cannot supply"): data.sel(dim1=0, method="nearest") def test_loc(self) -> None: data = create_test_data() expected = data.sel(dim3="a") actual = data.loc[dict(dim3="a")] assert_identical(expected, actual) with pytest.raises(TypeError, match=r"can only lookup dict"): data.loc["a"] # type: ignore[index] def test_selection_multiindex(self) -> None: midx = pd.MultiIndex.from_product( [["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three") ) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") mdata = Dataset(data_vars={"var": ("x", range(8))}, coords=midx_coords) def test_sel( lab_indexer, pos_indexer, replaced_idx=False, renamed_dim=None ) -> None: ds = mdata.sel(x=lab_indexer) expected_ds = mdata.isel(x=pos_indexer) if not replaced_idx: assert_identical(ds, expected_ds) else: if renamed_dim: assert ds["var"].dims[0] == renamed_dim ds = ds.rename({renamed_dim: "x"}) assert_identical(ds["var"].variable, expected_ds["var"].variable) assert not ds["x"].equals(expected_ds["x"]) test_sel(("a", 1, -1), 0) test_sel(("b", 2, -2), -1) test_sel(("a", 1), [0, 1], replaced_idx=True, renamed_dim="three") test_sel(("a",), range(4), replaced_idx=True) test_sel("a", range(4), replaced_idx=True) test_sel([("a", 1, -1), ("b", 2, -2)], [0, 7]) test_sel(slice("a", "b"), range(8)) test_sel(slice(("a", 1), ("b", 1)), range(6)) test_sel({"one": "a", "two": 1, "three": -1}, 0) test_sel({"one": "a", "two": 1}, [0, 1], replaced_idx=True, renamed_dim="three") test_sel({"one": "a"}, range(4), replaced_idx=True) assert_identical(mdata.loc[{"x": {"one": "a"}}], mdata.sel(x={"one": "a"})) assert_identical(mdata.loc[{"x": "a"}], mdata.sel(x="a")) assert_identical(mdata.loc[{"x": ("a", 1)}], mdata.sel(x=("a", 1))) assert_identical(mdata.loc[{"x": ("a", 1, -1)}], mdata.sel(x=("a", 1, -1))) assert_identical(mdata.sel(x={"one": "a", "two": 1}), mdata.sel(one="a", two=1)) def test_broadcast_like(self) -> None: original1 = DataArray( np.random.randn(5), [("x", range(5))], name="a" ).to_dataset() original2 = DataArray(np.random.randn(6), [("y", range(6))], name="b") expected1, expected2 = broadcast(original1, original2) assert_identical( original1.broadcast_like(original2), expected1.transpose("y", "x") ) assert_identical(original2.broadcast_like(original1), expected2) def test_to_pandas(self) -> None: # 0D -> series actual = Dataset({"a": 1, "b": 2}).to_pandas() expected = pd.Series([1, 2], ["a", "b"]) assert_array_equal(actual, expected) # 1D -> dataframe x = np.random.randn(10) y = np.random.randn(10) t = list("abcdefghij") ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)}) actual_df = ds.to_pandas() expected_df = ds.to_dataframe() assert expected_df.equals(actual_df), (expected_df, actual_df) # 2D -> error x2d = np.random.randn(10, 10) y2d = np.random.randn(10, 10) with pytest.raises(ValueError, match=r"cannot convert Datasets"): Dataset({"a": (["t", "r"], x2d), "b": (["t", "r"], y2d)}).to_pandas() def test_reindex_like(self) -> None: data = create_test_data() data["letters"] = ("dim3", 10 * ["a"]) expected = data.isel(dim1=slice(10), time=slice(13)) actual = data.reindex_like(expected) assert_identical(actual, expected) expected = data.copy(deep=True) expected["dim3"] = ("dim3", list("cdefghijkl")) expected["var3"][:-2] = expected["var3"][2:].values expected["var3"][-2:] = np.nan expected["letters"] = expected["letters"].astype(object) expected["letters"][-2:] = np.nan expected["numbers"] = expected["numbers"].astype(float) expected["numbers"][:-2] = expected["numbers"][2:].values expected["numbers"][-2:] = np.nan actual = data.reindex_like(expected) assert_identical(actual, expected) def test_reindex(self) -> None: data = create_test_data() assert_identical(data, data.reindex()) expected = data.assign_coords(dim1=data["dim1"]) actual = data.reindex(dim1=data["dim1"]) assert_identical(actual, expected) actual = data.reindex(dim1=data["dim1"].values) assert_identical(actual, expected) actual = data.reindex(dim1=data["dim1"].to_index()) assert_identical(actual, expected) with pytest.raises( ValueError, match=r"cannot reindex or align along dimension" ): data.reindex(dim1=data["dim1"][:5]) expected = data.isel(dim2=slice(5)) actual = data.reindex(dim2=data["dim2"][:5]) assert_identical(actual, expected) # test dict-like argument actual = data.reindex({"dim2": data["dim2"]}) expected = data assert_identical(actual, expected) with pytest.raises(ValueError, match=r"cannot specify both"): data.reindex({"x": 0}, x=0) with pytest.raises(ValueError, match=r"dictionary"): data.reindex("foo") # type: ignore[arg-type] # invalid dimension # TODO: (benbovy - explicit indexes): uncomment? # --> from reindex docstrings: "any mismatched dimension is simply ignored" # with pytest.raises(ValueError, match=r"indexer keys.*not correspond.*"): # data.reindex(invalid=0) # out of order expected = data.sel(dim2=data["dim2"][:5:-1]) actual = data.reindex(dim2=data["dim2"][:5:-1]) assert_identical(actual, expected) # multiple fill values expected = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1]).assign( var1=lambda ds: ds.var1.copy(data=[[-10, -10, -10, -10]] * len(ds.dim1)), var2=lambda ds: ds.var2.copy(data=[[-20, -20, -20, -20]] * len(ds.dim1)), ) actual = data.reindex( dim2=[0.1, 2.1, 3.1, 4.1], fill_value={"var1": -10, "var2": -20} ) assert_identical(actual, expected) # use the default value expected = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1]).assign( var1=lambda ds: ds.var1.copy(data=[[-10, -10, -10, -10]] * len(ds.dim1)), var2=lambda ds: ds.var2.copy( data=[[np.nan, np.nan, np.nan, np.nan]] * len(ds.dim1) ), ) actual = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1], fill_value={"var1": -10}) assert_identical(actual, expected) # regression test for #279 expected = Dataset({"x": ("time", np.random.randn(5))}, {"time": range(5)}) time2 = DataArray(np.arange(5), dims="time2") with pytest.raises(ValueError): actual = expected.reindex(time=time2) # another regression test ds = Dataset( {"foo": (["x", "y"], np.zeros((3, 4)))}, {"x": range(3), "y": range(4)} ) expected = Dataset( {"foo": (["x", "y"], np.zeros((3, 2)))}, {"x": [0, 1, 3], "y": [0, 1]} ) expected["foo"][-1] = np.nan actual = ds.reindex(x=[0, 1, 3], y=[0, 1]) assert_identical(expected, actual) def test_reindex_attrs_encoding(self) -> None: ds = Dataset( {"data": ("x", [1, 2, 3])}, {"x": ("x", [0, 1, 2], {"foo": "bar"}, {"bar": "baz"})}, ) actual = ds.reindex(x=[0, 1]) expected = Dataset( {"data": ("x", [1, 2])}, {"x": ("x", [0, 1], {"foo": "bar"}, {"bar": "baz"})}, ) assert_identical(actual, expected) assert actual.x.encoding == expected.x.encoding def test_reindex_warning(self) -> None: data = create_test_data() with pytest.raises(ValueError): # DataArray with different dimension raises Future warning ind = xr.DataArray([0.0, 1.0], dims=["new_dim"], name="ind") data.reindex(dim2=ind) # Should not warn ind = xr.DataArray([0.0, 1.0], dims=["dim2"], name="ind") with warnings.catch_warnings(record=True) as ws: data.reindex(dim2=ind) assert len(ws) == 0 def test_reindex_variables_copied(self) -> None: data = create_test_data() reindexed_data = data.reindex(copy=False) for k in data.variables: assert reindexed_data.variables[k] is not data.variables[k] def test_reindex_method(self) -> None: ds = Dataset({"x": ("y", [10, 20]), "y": [0, 1]}) y = [-0.5, 0.5, 1.5] actual = ds.reindex(y=y, method="backfill") expected = Dataset({"x": ("y", [10, 20, np.nan]), "y": y}) assert_identical(expected, actual) actual = ds.reindex(y=y, method="backfill", tolerance=0.1) expected = Dataset({"x": ("y", 3 * [np.nan]), "y": y}) assert_identical(expected, actual) actual = ds.reindex(y=y, method="backfill", tolerance=[0.1, 0.5, 0.1]) expected = Dataset({"x": ("y", [np.nan, 20, np.nan]), "y": y}) assert_identical(expected, actual) actual = ds.reindex(y=[0.1, 0.1, 1], tolerance=[0, 0.1, 0], method="nearest") expected = Dataset({"x": ("y", [np.nan, 10, 20]), "y": [0.1, 0.1, 1]}) assert_identical(expected, actual) actual = ds.reindex(y=y, method="pad") expected = Dataset({"x": ("y", [np.nan, 10, 20]), "y": y}) assert_identical(expected, actual) alt = Dataset({"y": y}) actual = ds.reindex_like(alt, method="pad") assert_identical(expected, actual) @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"x": 2, "z": 1}]) def test_reindex_fill_value(self, fill_value) -> None: ds = Dataset({"x": ("y", [10, 20]), "z": ("y", [-20, -10]), "y": [0, 1]}) y = [0, 1, 2] actual = ds.reindex(y=y, fill_value=fill_value) if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value_x = fill_value_z = np.nan elif isinstance(fill_value, dict): fill_value_x = fill_value["x"] fill_value_z = fill_value["z"] else: fill_value_x = fill_value_z = fill_value expected = Dataset( { "x": ("y", [10, 20, fill_value_x]), "z": ("y", [-20, -10, fill_value_z]), "y": y, } ) assert_identical(expected, actual) @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"x": 2, "z": 1}]) def test_reindex_like_fill_value(self, fill_value) -> None: ds = Dataset({"x": ("y", [10, 20]), "z": ("y", [-20, -10]), "y": [0, 1]}) y = [0, 1, 2] alt = Dataset({"y": y}) actual = ds.reindex_like(alt, fill_value=fill_value) if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value_x = fill_value_z = np.nan elif isinstance(fill_value, dict): fill_value_x = fill_value["x"] fill_value_z = fill_value["z"] else: fill_value_x = fill_value_z = fill_value expected = Dataset( { "x": ("y", [10, 20, fill_value_x]), "z": ("y", [-20, -10, fill_value_z]), "y": y, } ) assert_identical(expected, actual) @pytest.mark.parametrize("dtype", [str, bytes]) def test_reindex_str_dtype(self, dtype) -> None: data = Dataset({"data": ("x", [1, 2]), "x": np.array(["a", "b"], dtype=dtype)}) actual = data.reindex(x=data.x) expected = data assert_identical(expected, actual) assert actual.x.dtype == expected.x.dtype def test_reindex_with_multiindex_level(self) -> None: # test for https://github.com/pydata/xarray/issues/10347 mindex = pd.MultiIndex.from_product( [[100, 200, 300], [1, 2, 3, 4]], names=["x", "y"] ) y_idx = PandasIndex(mindex.levels[1], "y") ds1 = xr.Dataset(coords={"y": [1, 2, 3]}) ds2 = xr.Dataset(coords=xr.Coordinates.from_xindex(y_idx)) actual = ds1.reindex(y=ds2.y) assert_identical(actual, ds2) @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"foo": 2, "bar": 1}]) def test_align_fill_value(self, fill_value) -> None: x = Dataset({"foo": DataArray([1, 2], dims=["x"], coords={"x": [1, 2]})}) y = Dataset({"bar": DataArray([1, 2], dims=["x"], coords={"x": [1, 3]})}) x2, y2 = align(x, y, join="outer", fill_value=fill_value) if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value_foo = fill_value_bar = np.nan elif isinstance(fill_value, dict): fill_value_foo = fill_value["foo"] fill_value_bar = fill_value["bar"] else: fill_value_foo = fill_value_bar = fill_value expected_x2 = Dataset( { "foo": DataArray( [1, 2, fill_value_foo], dims=["x"], coords={"x": [1, 2, 3]} ) } ) expected_y2 = Dataset( { "bar": DataArray( [1, fill_value_bar, 2], dims=["x"], coords={"x": [1, 2, 3]} ) } ) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) def test_align(self) -> None: left = create_test_data() right = left.copy(deep=True) right["dim3"] = ("dim3", list("cdefghijkl")) right["var3"][:-2] = right["var3"][2:].values right["var3"][-2:] = np.random.randn(*right["var3"][-2:].shape) right["numbers"][:-2] = right["numbers"][2:].values right["numbers"][-2:] = -10 intersection = list("cdefghij") union = list("abcdefghijkl") left2, right2 = align(left, right, join="inner") assert_array_equal(left2["dim3"], intersection) assert_identical(left2, right2) left2, right2 = align(left, right, join="outer") assert_array_equal(left2["dim3"], union) assert_equal(left2["dim3"].variable, right2["dim3"].variable) assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection)) assert np.isnan(left2["var3"][-2:]).all() assert np.isnan(right2["var3"][:2]).all() left2, right2 = align(left, right, join="left") assert_equal(left2["dim3"].variable, right2["dim3"].variable) assert_equal(left2["dim3"].variable, left["dim3"].variable) assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection)) assert np.isnan(right2["var3"][:2]).all() left2, right2 = align(left, right, join="right") assert_equal(left2["dim3"].variable, right2["dim3"].variable) assert_equal(left2["dim3"].variable, right["dim3"].variable) assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection)) assert np.isnan(left2["var3"][-2:]).all() with pytest.raises(ValueError, match=r"invalid value for join"): align(left, right, join="foobar") # type: ignore[call-overload] with pytest.raises(TypeError): align(left, right, foo="bar") # type: ignore[call-overload] def test_align_exact(self) -> None: left = xr.Dataset(coords={"x": [0, 1]}) right = xr.Dataset(coords={"x": [1, 2]}) left1, left2 = xr.align(left, left, join="exact") assert_identical(left1, left) assert_identical(left2, left) with pytest.raises(ValueError, match=r"cannot align.*join.*exact.*not equal.*"): xr.align(left, right, join="exact") def test_align_override(self) -> None: left = xr.Dataset(coords={"x": [0, 1, 2]}) right = xr.Dataset(coords={"x": [0.1, 1.1, 2.1], "y": [1, 2, 3]}) expected_right = xr.Dataset(coords={"x": [0, 1, 2], "y": [1, 2, 3]}) new_left, new_right = xr.align(left, right, join="override") assert_identical(left, new_left) assert_identical(new_right, expected_right) new_left, new_right = xr.align(left, right, exclude="x", join="override") assert_identical(left, new_left) assert_identical(right, new_right) new_left, new_right = xr.align( left.isel(x=0, drop=True), right, exclude="x", join="override" ) assert_identical(left.isel(x=0, drop=True), new_left) assert_identical(right, new_right) with pytest.raises( ValueError, match=r"cannot align.*join.*override.*same size" ): xr.align(left.isel(x=0).expand_dims("x"), right, join="override") def test_align_exclude(self) -> None: x = Dataset( { "foo": DataArray( [[1, 2], [3, 4]], dims=["x", "y"], coords={"x": [1, 2], "y": [3, 4]} ) } ) y = Dataset( { "bar": DataArray( [[1, 2], [3, 4]], dims=["x", "y"], coords={"x": [1, 3], "y": [5, 6]} ) } ) x2, y2 = align(x, y, exclude=["y"], join="outer") expected_x2 = Dataset( { "foo": DataArray( [[1, 2], [3, 4], [np.nan, np.nan]], dims=["x", "y"], coords={"x": [1, 2, 3], "y": [3, 4]}, ) } ) expected_y2 = Dataset( { "bar": DataArray( [[1, 2], [np.nan, np.nan], [3, 4]], dims=["x", "y"], coords={"x": [1, 2, 3], "y": [5, 6]}, ) } ) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) def test_align_nocopy(self) -> None: x = Dataset({"foo": DataArray([1, 2, 3], coords=[("x", [1, 2, 3])])}) y = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 2])])}) expected_x2 = x expected_y2 = Dataset( {"foo": DataArray([1, 2, np.nan], coords=[("x", [1, 2, 3])])} ) x2, y2 = align(x, y, copy=False, join="outer") assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) assert source_ndarray(x["foo"].data) is source_ndarray(x2["foo"].data) x2, y2 = align(x, y, copy=True, join="outer") assert source_ndarray(x["foo"].data) is not source_ndarray(x2["foo"].data) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) def test_align_indexes(self) -> None: x = Dataset({"foo": DataArray([1, 2, 3], dims="x", coords=[("x", [1, 2, 3])])}) (x2,) = align(x, indexes={"x": [2, 3, 1]}) expected_x2 = Dataset( {"foo": DataArray([2, 3, 1], dims="x", coords={"x": [2, 3, 1]})} ) assert_identical(expected_x2, x2) def test_align_multiple_indexes_common_dim(self) -> None: a = Dataset(coords={"x": [1, 2], "xb": ("x", [3, 4])}).set_xindex("xb") b = Dataset(coords={"x": [1], "xb": ("x", [3])}).set_xindex("xb") (a2, b2) = align(a, b, join="inner") assert_identical(a2, b, check_default_indexes=False) assert_identical(b2, b, check_default_indexes=False) c = Dataset(coords={"x": [1, 3], "xb": ("x", [2, 4])}).set_xindex("xb") with pytest.raises(AlignmentError, match=".*conflicting re-indexers"): align(a, c) def test_align_conflicting_indexes(self) -> None: class CustomIndex(PandasIndex): ... a = Dataset(coords={"xb": ("x", [3, 4])}).set_xindex("xb") b = Dataset(coords={"xb": ("x", [3])}).set_xindex("xb", CustomIndex) with pytest.raises(AlignmentError, match="cannot align.*conflicting indexes"): align(a, b) def test_align_non_unique(self) -> None: x = Dataset({"foo": ("x", [3, 4, 5]), "x": [0, 0, 1]}) x1, x2 = align(x, x) assert_identical(x1, x) assert_identical(x2, x) y = Dataset({"bar": ("x", [6, 7]), "x": [0, 1]}) with pytest.raises(ValueError, match=r"cannot reindex or align"): align(x, y) def test_align_str_dtype(self) -> None: a = Dataset({"foo": ("x", [0, 1])}, coords={"x": ["a", "b"]}) b = Dataset({"foo": ("x", [1, 2])}, coords={"x": ["b", "c"]}) expected_a = Dataset( {"foo": ("x", [0, 1, np.nan])}, coords={"x": ["a", "b", "c"]} ) expected_b = Dataset( {"foo": ("x", [np.nan, 1, 2])}, coords={"x": ["a", "b", "c"]} ) actual_a, actual_b = xr.align(a, b, join="outer") assert_identical(expected_a, actual_a) assert expected_a.x.dtype == actual_a.x.dtype assert_identical(expected_b, actual_b) assert expected_b.x.dtype == actual_b.x.dtype @pytest.mark.parametrize("join", ["left", "override"]) def test_align_index_var_attrs(self, join) -> None: # regression test https://github.com/pydata/xarray/issues/6852 # aligning two objects should have no side effect on their index variable # metadata. ds = Dataset(coords={"x": ("x", [1, 2, 3], {"units": "m"})}) ds_noattr = Dataset(coords={"x": ("x", [1, 2, 3])}) xr.align(ds_noattr, ds, join=join) assert ds.x.attrs == {"units": "m"} assert ds_noattr.x.attrs == {} def test_align_scalar_index(self) -> None: # ensure that indexes associated with scalar coordinates are not ignored # during alignment ds1 = Dataset(coords={"x": 0}).set_xindex("x", ScalarIndex) ds2 = Dataset(coords={"x": 0}).set_xindex("x", ScalarIndex) actual = xr.align(ds1, ds2, join="exact") assert_identical(actual[0], ds1, check_default_indexes=False) assert_identical(actual[1], ds2, check_default_indexes=False) ds3 = Dataset(coords={"x": 1}).set_xindex("x", ScalarIndex) with pytest.raises(AlignmentError, match="cannot align objects"): xr.align(ds1, ds3, join="exact") def test_align_multi_dim_index_exclude_dims(self) -> None: ds1 = ( Dataset(coords={"x": [1, 2], "y": [3, 4]}) .drop_indexes(["x", "y"]) .set_xindex(["x", "y"], XYIndex) ) ds2 = ( Dataset(coords={"x": [1, 2], "y": [5, 6]}) .drop_indexes(["x", "y"]) .set_xindex(["x", "y"], XYIndex) ) for join in ("outer", "exact"): actual = xr.align(ds1, ds2, join=join, exclude="y") assert_identical(actual[0], ds1, check_default_indexes=False) assert_identical(actual[1], ds2, check_default_indexes=False) with pytest.raises( AlignmentError, match="cannot align objects.*index.*not equal" ): xr.align(ds1, ds2, join="exact") with pytest.raises(AlignmentError, match="cannot exclude dimension"): xr.align(ds1, ds2, join="override", exclude="y") def test_align_index_equals_future_warning(self) -> None: # TODO: remove this test once the deprecation cycle is completed class DeprecatedEqualsSignatureIndex(PandasIndex): def equals(self, other: Index) -> bool: # type: ignore[override] return super().equals(other, exclude=None) ds = ( Dataset(coords={"x": [1, 2]}) .drop_indexes("x") .set_xindex("x", DeprecatedEqualsSignatureIndex) ) with pytest.warns(FutureWarning, match="signature.*deprecated"): xr.align(ds, ds.copy(), join="exact") def test_broadcast(self) -> None: ds = Dataset( {"foo": 0, "bar": ("x", [1]), "baz": ("y", [2, 3])}, {"c": ("x", [4])} ) expected = Dataset( { "foo": (("x", "y"), [[0, 0]]), "bar": (("x", "y"), [[1, 1]]), "baz": (("x", "y"), [[2, 3]]), }, {"c": ("x", [4])}, ) (actual,) = broadcast(ds) assert_identical(expected, actual) ds_x = Dataset({"foo": ("x", [1])}) ds_y = Dataset({"bar": ("y", [2, 3])}) expected_x = Dataset({"foo": (("x", "y"), [[1, 1]])}) expected_y = Dataset({"bar": (("x", "y"), [[2, 3]])}) actual_x, actual_y = broadcast(ds_x, ds_y) assert_identical(expected_x, actual_x) assert_identical(expected_y, actual_y) array_y = ds_y["bar"] expected_y2 = expected_y["bar"] actual_x2, actual_y2 = broadcast(ds_x, array_y) assert_identical(expected_x, actual_x2) assert_identical(expected_y2, actual_y2) def test_broadcast_nocopy(self) -> None: # Test that data is not copied if not needed x = Dataset({"foo": (("x", "y"), [[1, 1]])}) y = Dataset({"bar": ("y", [2, 3])}) (actual_x,) = broadcast(x) assert_identical(x, actual_x) assert source_ndarray(actual_x["foo"].data) is source_ndarray(x["foo"].data) actual_x, actual_y = broadcast(x, y) assert_identical(x, actual_x) assert source_ndarray(actual_x["foo"].data) is source_ndarray(x["foo"].data) def test_broadcast_exclude(self) -> None: x = Dataset( { "foo": DataArray( [[1, 2], [3, 4]], dims=["x", "y"], coords={"x": [1, 2], "y": [3, 4]} ), "bar": DataArray(5), } ) y = Dataset( { "foo": DataArray( [[1, 2]], dims=["z", "y"], coords={"z": [1], "y": [5, 6]} ) } ) x2, y2 = broadcast(x, y, exclude=["y"]) expected_x2 = Dataset( { "foo": DataArray( [[[1, 2]], [[3, 4]]], dims=["x", "z", "y"], coords={"z": [1], "x": [1, 2], "y": [3, 4]}, ), "bar": DataArray( [[5], [5]], dims=["x", "z"], coords={"x": [1, 2], "z": [1]} ), } ) expected_y2 = Dataset( { "foo": DataArray( [[[1, 2]], [[1, 2]]], dims=["x", "z", "y"], coords={"z": [1], "x": [1, 2], "y": [5, 6]}, ) } ) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) def test_broadcast_misaligned(self) -> None: x = Dataset({"foo": DataArray([1, 2, 3], coords=[("x", [-1, -2, -3])])}) y = Dataset( { "bar": DataArray( [[1, 2], [3, 4]], dims=["y", "x"], coords={"y": [1, 2], "x": [10, -3]}, ) } ) x2, y2 = broadcast(x, y) expected_x2 = Dataset( { "foo": DataArray( [[3, 3], [2, 2], [1, 1], [np.nan, np.nan]], dims=["x", "y"], coords={"y": [1, 2], "x": [-3, -2, -1, 10]}, ) } ) expected_y2 = Dataset( { "bar": DataArray( [[2, 4], [np.nan, np.nan], [np.nan, np.nan], [1, 3]], dims=["x", "y"], coords={"y": [1, 2], "x": [-3, -2, -1, 10]}, ) } ) assert_identical(expected_x2, x2) assert_identical(expected_y2, y2) def test_broadcast_multi_index(self) -> None: # GH6430 ds = Dataset( {"foo": (("x", "y", "z"), np.ones((3, 4, 2)))}, {"x": ["a", "b", "c"], "y": [1, 2, 3, 4]}, ) stacked = ds.stack(space=["x", "y"]) broadcasted, _ = broadcast(stacked, stacked.space) assert broadcasted.xindexes["x"] is broadcasted.xindexes["space"] assert broadcasted.xindexes["y"] is broadcasted.xindexes["space"] def test_variable_indexing(self) -> None: data = create_test_data() v = data["var1"] d1 = data["dim1"] d2 = data["dim2"] assert_equal(v, v[d1.values]) assert_equal(v, v[d1]) assert_equal(v[:3], v[d1 < 3]) assert_equal(v[:, 3:], v[:, d2 >= 1.5]) assert_equal(v[:3, 3:], v[d1 < 3, d2 >= 1.5]) assert_equal(v[:3, :2], v[range(3), range(2)]) assert_equal(v[:3, :2], v.loc[d1[:3], d2[:2]]) def test_drop_variables(self) -> None: data = create_test_data() assert_identical(data, data.drop_vars([])) expected = Dataset({k: data[k] for k in data.variables if k != "time"}) actual = data.drop_vars("time") assert_identical(expected, actual) actual = data.drop_vars(["time"]) assert_identical(expected, actual) with pytest.raises( ValueError, match=re.escape( "These variables cannot be found in this dataset: ['not_found_here']" ), ): data.drop_vars("not_found_here") actual = data.drop_vars("not_found_here", errors="ignore") assert_identical(data, actual) actual = data.drop_vars(["not_found_here"], errors="ignore") assert_identical(data, actual) actual = data.drop_vars(["time", "not_found_here"], errors="ignore") assert_identical(expected, actual) # deprecated approach with `drop` works (straight copy paste from above) with pytest.warns(DeprecationWarning): actual = data.drop("not_found_here", errors="ignore") assert_identical(data, actual) with pytest.warns(DeprecationWarning): actual = data.drop(["not_found_here"], errors="ignore") assert_identical(data, actual) with pytest.warns(DeprecationWarning): actual = data.drop(["time", "not_found_here"], errors="ignore") assert_identical(expected, actual) with pytest.warns(DeprecationWarning): actual = data.drop({"time", "not_found_here"}, errors="ignore") assert_identical(expected, actual) def test_drop_multiindex_level(self) -> None: data = create_test_multiindex() expected = data.drop_vars(["x", "level_1", "level_2"]) with pytest.warns(DeprecationWarning): actual = data.drop_vars("level_1") assert_identical(expected, actual) def test_drop_index_labels(self) -> None: data = Dataset({"A": (["x", "y"], np.random.randn(2, 3)), "x": ["a", "b"]}) with pytest.warns(DeprecationWarning): actual = data.drop(["a"], dim="x") expected = data.isel(x=[1]) assert_identical(expected, actual) with pytest.warns(DeprecationWarning): actual = data.drop(["a", "b"], dim="x") expected = data.isel(x=slice(0, 0)) assert_identical(expected, actual) with pytest.raises(KeyError): # not contained in axis with pytest.warns(DeprecationWarning): data.drop(["c"], dim="x") with pytest.warns(DeprecationWarning): actual = data.drop(["c"], dim="x", errors="ignore") assert_identical(data, actual) with pytest.raises(ValueError): data.drop(["c"], dim="x", errors="wrong_value") # type: ignore[arg-type] with pytest.warns(DeprecationWarning): actual = data.drop(["a", "b", "c"], "x", errors="ignore") expected = data.isel(x=slice(0, 0)) assert_identical(expected, actual) # DataArrays as labels are a nasty corner case as they are not # Iterable[Hashable] - DataArray.__iter__ yields scalar DataArrays. actual = data.drop_sel(x=DataArray(["a", "b", "c"]), errors="ignore") expected = data.isel(x=slice(0, 0)) assert_identical(expected, actual) with pytest.warns(DeprecationWarning): data.drop(DataArray(["a", "b", "c"]), dim="x", errors="ignore") assert_identical(expected, actual) actual = data.drop_sel(y=[1]) expected = data.isel(y=[0, 2]) assert_identical(expected, actual) with pytest.raises(KeyError, match=r"not found in axis"): data.drop_sel(x=0) def test_drop_labels_by_keyword(self) -> None: data = Dataset( {"A": (["x", "y"], np.random.randn(2, 6)), "x": ["a", "b"], "y": range(6)} ) # Basic functionality. assert len(data.coords["x"]) == 2 with pytest.warns(DeprecationWarning): ds1 = data.drop(["a"], dim="x") ds2 = data.drop_sel(x="a") ds3 = data.drop_sel(x=["a"]) ds4 = data.drop_sel(x=["a", "b"]) ds5 = data.drop_sel(x=["a", "b"], y=range(0, 6, 2)) arr = DataArray(range(3), dims=["c"]) with pytest.warns(DeprecationWarning): data.drop(arr.coords) with pytest.warns(DeprecationWarning): data.drop(arr.xindexes) assert_array_equal(ds1.coords["x"], ["b"]) assert_array_equal(ds2.coords["x"], ["b"]) assert_array_equal(ds3.coords["x"], ["b"]) assert ds4.coords["x"].size == 0 assert ds5.coords["x"].size == 0 assert_array_equal(ds5.coords["y"], [1, 3, 5]) # Error handling if user tries both approaches. with pytest.raises(ValueError): data.drop(labels=["a"], x="a") with pytest.raises(ValueError): data.drop(labels=["a"], dim="x", x="a") warnings.filterwarnings("ignore", r"\W*drop") with pytest.raises(ValueError): data.drop(dim="x", x="a") def test_drop_labels_by_position(self) -> None: data = Dataset( {"A": (["x", "y"], np.random.randn(2, 6)), "x": ["a", "b"], "y": range(6)} ) # Basic functionality. assert len(data.coords["x"]) == 2 actual = data.drop_isel(x=0) expected = data.drop_sel(x="a") assert_identical(expected, actual) actual = data.drop_isel(x=[0]) expected = data.drop_sel(x=["a"]) assert_identical(expected, actual) actual = data.drop_isel(x=[0, 1]) expected = data.drop_sel(x=["a", "b"]) assert_identical(expected, actual) assert actual.coords["x"].size == 0 actual = data.drop_isel(x=[0, 1], y=range(0, 6, 2)) expected = data.drop_sel(x=["a", "b"], y=range(0, 6, 2)) assert_identical(expected, actual) assert actual.coords["x"].size == 0 with pytest.raises(KeyError): data.drop_isel(z=1) def test_drop_indexes(self) -> None: ds = Dataset( coords={ "x": ("x", [0, 1, 2]), "y": ("y", [3, 4, 5]), "foo": ("x", ["a", "a", "b"]), } ) actual = ds.drop_indexes("x") assert "x" not in actual.xindexes assert type(actual.x.variable) is Variable actual = ds.drop_indexes(["x", "y"]) assert "x" not in actual.xindexes assert "y" not in actual.xindexes assert type(actual.x.variable) is Variable assert type(actual.y.variable) is Variable with pytest.raises( ValueError, match=r"The coordinates \('not_a_coord',\) are not found in the dataset coordinates", ): ds.drop_indexes("not_a_coord") with pytest.raises(ValueError, match="those coordinates do not have an index"): ds.drop_indexes("foo") actual = ds.drop_indexes(["foo", "not_a_coord"], errors="ignore") assert_identical(actual, ds) # test index corrupted midx = pd.MultiIndex.from_tuples([(1, 2), (3, 4)], names=["a", "b"]) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") ds = Dataset(coords=midx_coords) with pytest.raises(ValueError, match=".*would corrupt the following index.*"): ds.drop_indexes("a") def test_drop_dims(self) -> None: data = xr.Dataset( { "A": (["x", "y"], np.random.randn(2, 3)), "B": ("x", np.random.randn(2)), "x": ["a", "b"], "z": np.pi, } ) actual = data.drop_dims("x") expected = data.drop_vars(["A", "B", "x"]) assert_identical(expected, actual) actual = data.drop_dims("y") expected = data.drop_vars("A") assert_identical(expected, actual) actual = data.drop_dims(["x", "y"]) expected = data.drop_vars(["A", "B", "x"]) assert_identical(expected, actual) with pytest.raises((ValueError, KeyError)): data.drop_dims("z") # not a dimension with pytest.raises((ValueError, KeyError)): data.drop_dims(None) # type:ignore[arg-type] actual = data.drop_dims("z", errors="ignore") assert_identical(data, actual) # should this be allowed? actual = data.drop_dims(None, errors="ignore") # type:ignore[arg-type] assert_identical(data, actual) with pytest.raises(ValueError): actual = data.drop_dims("z", errors="wrong_value") # type: ignore[arg-type] actual = data.drop_dims(["x", "y", "z"], errors="ignore") expected = data.drop_vars(["A", "B", "x"]) assert_identical(expected, actual) def test_copy(self) -> None: data = create_test_data() data.attrs["Test"] = [1, 2, 3] for copied in [data.copy(deep=False), copy(data)]: assert_identical(data, copied) assert data.encoding == copied.encoding # Note: IndexVariable objects with string dtype are always # copied because of xarray.core.indexes.safe_cast_to_index. # Limiting the test to data variables. for k in data.data_vars: v0 = data.variables[k] v1 = copied.variables[k] assert source_ndarray(v0.data) is source_ndarray(v1.data) copied["foo"] = ("z", np.arange(5)) assert "foo" not in data copied.attrs["foo"] = "bar" assert "foo" not in data.attrs assert data.attrs["Test"] is copied.attrs["Test"] for copied in [data.copy(deep=True), deepcopy(data)]: assert_identical(data, copied) for k, v0 in data.variables.items(): v1 = copied.variables[k] assert v0 is not v1 assert data.attrs["Test"] is not copied.attrs["Test"] def test_copy_with_data(self) -> None: orig = create_test_data() new_data = {k: np.random.randn(*v.shape) for k, v in orig.data_vars.items()} actual = orig.copy(data=new_data) expected = orig.copy() for k, v in new_data.items(): expected[k].data = v assert_identical(expected, actual) @pytest.mark.xfail(raises=AssertionError) @pytest.mark.parametrize( "deep, expected_orig", [ [ True, xr.DataArray( xr.IndexVariable("a", np.array([1, 2])), coords={"a": [1, 2]}, dims=["a"], ), ], [ False, xr.DataArray( xr.IndexVariable("a", np.array([999, 2])), coords={"a": [999, 2]}, dims=["a"], ), ], ], ) def test_copy_coords(self, deep, expected_orig) -> None: """The test fails for the shallow copy, and apparently only on Windows for some reason. In windows coords seem to be immutable unless it's one dataset deep copied from another.""" ds = xr.DataArray( np.ones([2, 2, 2]), coords={"a": [1, 2], "b": ["x", "y"], "c": [0, 1]}, dims=["a", "b", "c"], name="value", ).to_dataset() ds_cp = ds.copy(deep=deep) new_a = np.array([999, 2]) ds_cp.coords["a"] = ds_cp.a.copy(data=new_a) expected_cp = xr.DataArray( xr.IndexVariable("a", new_a), coords={"a": [999, 2]}, dims=["a"], ) assert_identical(ds_cp.coords["a"], expected_cp) assert_identical(ds.coords["a"], expected_orig) def test_copy_with_data_errors(self) -> None: orig = create_test_data() new_var1 = np.arange(orig["var1"].size).reshape(orig["var1"].shape) with pytest.raises(ValueError, match=r"Data must be dict-like"): orig.copy(data=new_var1) # type: ignore[arg-type] with pytest.raises(ValueError, match=r"only contain variables in original"): orig.copy(data={"not_in_original": new_var1}) with pytest.raises(ValueError, match=r"contain all variables in original"): orig.copy(data={"var1": new_var1}) def test_drop_encoding(self) -> None: orig = create_test_data() vencoding = {"scale_factor": 10} orig.encoding = {"foo": "bar"} for k in orig.variables.keys(): orig[k].encoding = vencoding actual = orig.drop_encoding() assert actual.encoding == {} for v in actual.variables.values(): assert v.encoding == {} assert_equal(actual, orig) def test_rename(self) -> None: data = create_test_data() newnames = { "var1": "renamed_var1", "dim2": "renamed_dim2", } renamed = data.rename(newnames) variables = dict(data.variables) for nk, nv in newnames.items(): variables[nv] = variables.pop(nk) for k, v in variables.items(): dims = list(v.dims) for name, newname in newnames.items(): if name in dims: dims[dims.index(name)] = newname assert_equal( Variable(dims, v.values, v.attrs), renamed[k].variable.to_base_variable(), ) assert v.encoding == renamed[k].encoding assert type(v) is type(renamed.variables[k]) assert "var1" not in renamed assert "dim2" not in renamed with pytest.raises(ValueError, match=r"cannot rename 'not_a_var'"): data.rename({"not_a_var": "nada"}) with pytest.raises(ValueError, match=r"'var1' conflicts"): data.rename({"var2": "var1"}) # verify that we can rename a variable without accessing the data var1 = data["var1"] data["var1"] = (var1.dims, InaccessibleArray(var1.values)) renamed = data.rename(newnames) with pytest.raises(UnexpectedDataAccess): _ = renamed["renamed_var1"].values # https://github.com/python/mypy/issues/10008 renamed_kwargs = data.rename(**newnames) # type: ignore[arg-type] assert_identical(renamed, renamed_kwargs) def test_rename_old_name(self) -> None: # regtest for GH1477 data = create_test_data() with pytest.raises(ValueError, match=r"'samecol' conflicts"): data.rename({"var1": "samecol", "var2": "samecol"}) # This shouldn't cause any problems. data.rename({"var1": "var2", "var2": "var1"}) def test_rename_same_name(self) -> None: data = create_test_data() newnames = {"var1": "var1", "dim2": "dim2"} renamed = data.rename(newnames) assert_identical(renamed, data) def test_rename_dims(self) -> None: original = Dataset({"x": ("x", [0, 1, 2]), "y": ("x", [10, 11, 12]), "z": 42}) expected = Dataset( {"x": ("x_new", [0, 1, 2]), "y": ("x_new", [10, 11, 12]), "z": 42} ) # TODO: (benbovy - explicit indexes) update when set_index supports # setting index for non-dimension variables expected = expected.set_coords("x") actual = original.rename_dims({"x": "x_new"}) assert_identical(expected, actual, check_default_indexes=False) actual_2 = original.rename_dims(x="x_new") assert_identical(expected, actual_2, check_default_indexes=False) # Test to raise ValueError dims_dict_bad = {"x_bad": "x_new"} with pytest.raises(ValueError): original.rename_dims(dims_dict_bad) with pytest.raises(ValueError): original.rename_dims({"x": "z"}) def test_rename_vars(self) -> None: original = Dataset({"x": ("x", [0, 1, 2]), "y": ("x", [10, 11, 12]), "z": 42}) expected = Dataset( {"x_new": ("x", [0, 1, 2]), "y": ("x", [10, 11, 12]), "z": 42} ) # TODO: (benbovy - explicit indexes) update when set_index supports # setting index for non-dimension variables expected = expected.set_coords("x_new") actual = original.rename_vars({"x": "x_new"}) assert_identical(expected, actual, check_default_indexes=False) actual_2 = original.rename_vars(x="x_new") assert_identical(expected, actual_2, check_default_indexes=False) # Test to raise ValueError names_dict_bad = {"x_bad": "x_new"} with pytest.raises(ValueError): original.rename_vars(names_dict_bad) def test_rename_dimension_coord(self) -> None: # rename a dimension corodinate to a non-dimension coordinate # should preserve index original = Dataset(coords={"x": ("x", [0, 1, 2])}) actual = original.rename_vars({"x": "x_new"}) assert "x_new" in actual.xindexes actual_2 = original.rename_dims({"x": "x_new"}) assert "x" in actual_2.xindexes def test_rename_dimension_coord_warnings(self) -> None: # create a dimension coordinate by renaming a dimension or coordinate # should raise a warning (no index created) ds = Dataset(coords={"x": ("y", [0, 1])}) with pytest.warns( UserWarning, match="rename 'x' to 'y' does not create an index.*" ): ds.rename(x="y") ds = Dataset(coords={"y": ("x", [0, 1])}) with pytest.warns( UserWarning, match="rename 'x' to 'y' does not create an index.*" ): ds.rename(x="y") # No operation should not raise a warning ds = Dataset( data_vars={"data": (("x", "y"), np.ones((2, 3)))}, coords={"x": range(2), "y": range(3), "a": ("x", [3, 4])}, ) with warnings.catch_warnings(): warnings.simplefilter("error") ds.rename(x="x") def test_rename_multiindex(self) -> None: midx = pd.MultiIndex.from_tuples([(1, 2), (3, 4)], names=["a", "b"]) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") original = Dataset({}, midx_coords) midx_renamed = midx.rename(["a", "c"]) midx_coords_renamed = Coordinates.from_pandas_multiindex(midx_renamed, "x") expected = Dataset({}, midx_coords_renamed) actual = original.rename({"b": "c"}) assert_identical(expected, actual) with pytest.raises(ValueError, match=r"'a' conflicts"): with pytest.warns(UserWarning, match="does not create an index anymore"): original.rename({"x": "a"}) with pytest.raises(ValueError, match=r"'x' conflicts"): with pytest.warns(UserWarning, match="does not create an index anymore"): original.rename({"a": "x"}) with pytest.raises(ValueError, match=r"'b' conflicts"): original.rename({"a": "b"}) def test_rename_preserve_attrs_encoding(self) -> None: # test propagate attrs/encoding to new variable(s) created from Index object original = Dataset(coords={"x": ("x", [0, 1, 2])}) expected = Dataset(coords={"y": ("y", [0, 1, 2])}) for ds, dim in zip([original, expected], ["x", "y"], strict=True): ds[dim].attrs = {"foo": "bar"} ds[dim].encoding = {"foo": "bar"} actual = original.rename({"x": "y"}) assert_identical(actual, expected) @requires_cftime def test_rename_does_not_change_CFTimeIndex_type(self) -> None: # make sure CFTimeIndex is not converted to DatetimeIndex #3522 time = xr.date_range( start="2000", periods=6, freq="2MS", calendar="noleap", use_cftime=True ) orig = Dataset(coords={"time": time}) renamed = orig.rename(time="time_new") assert "time_new" in renamed.xindexes # TODO: benbovy - flexible indexes: update when CFTimeIndex # inherits from xarray.Index assert isinstance(renamed.xindexes["time_new"].to_pandas_index(), CFTimeIndex) assert renamed.xindexes["time_new"].to_pandas_index().name == "time_new" # check original has not changed assert "time" in orig.xindexes assert isinstance(orig.xindexes["time"].to_pandas_index(), CFTimeIndex) assert orig.xindexes["time"].to_pandas_index().name == "time" # note: rename_dims(time="time_new") drops "ds.indexes" renamed = orig.rename_dims() assert isinstance(renamed.xindexes["time"].to_pandas_index(), CFTimeIndex) renamed = orig.rename_vars() assert isinstance(renamed.xindexes["time"].to_pandas_index(), CFTimeIndex) def test_rename_does_not_change_DatetimeIndex_type(self) -> None: # make sure DatetimeIndex is conderved on rename time = pd.date_range(start="2000", periods=6, freq="2MS") orig = Dataset(coords={"time": time}) renamed = orig.rename(time="time_new") assert "time_new" in renamed.xindexes # TODO: benbovy - flexible indexes: update when DatetimeIndex # inherits from xarray.Index? assert isinstance(renamed.xindexes["time_new"].to_pandas_index(), DatetimeIndex) assert renamed.xindexes["time_new"].to_pandas_index().name == "time_new" # check original has not changed assert "time" in orig.xindexes assert isinstance(orig.xindexes["time"].to_pandas_index(), DatetimeIndex) assert orig.xindexes["time"].to_pandas_index().name == "time" # note: rename_dims(time="time_new") drops "ds.indexes" renamed = orig.rename_dims() assert isinstance(renamed.xindexes["time"].to_pandas_index(), DatetimeIndex) renamed = orig.rename_vars() assert isinstance(renamed.xindexes["time"].to_pandas_index(), DatetimeIndex) def test_swap_dims(self) -> None: original = Dataset({"x": [1, 2, 3], "y": ("x", list("abc")), "z": 42}) expected = Dataset({"z": 42}, {"x": ("y", [1, 2, 3]), "y": list("abc")}) actual = original.swap_dims({"x": "y"}) assert_identical(expected, actual) assert isinstance(actual.variables["y"], IndexVariable) assert isinstance(actual.variables["x"], Variable) assert actual.xindexes["y"].equals(expected.xindexes["y"]) roundtripped = actual.swap_dims({"y": "x"}) assert_identical(original.set_coords("y"), roundtripped) with pytest.raises(ValueError, match=r"cannot swap"): original.swap_dims({"y": "x"}) with pytest.raises(ValueError, match=r"replacement dimension"): original.swap_dims({"x": "z"}) expected = Dataset( {"y": ("u", list("abc")), "z": 42}, coords={"x": ("u", [1, 2, 3])} ) actual = original.swap_dims({"x": "u"}) assert_identical(expected, actual) # as kwargs expected = Dataset( {"y": ("u", list("abc")), "z": 42}, coords={"x": ("u", [1, 2, 3])} ) actual = original.swap_dims(x="u") assert_identical(expected, actual) # handle multiindex case midx = pd.MultiIndex.from_arrays([list("aab"), list("yzz")], names=["y1", "y2"]) original = Dataset({"x": [1, 2, 3], "y": ("x", midx), "z": 42}) midx_coords = Coordinates.from_pandas_multiindex(midx, "y") midx_coords["x"] = ("y", [1, 2, 3]) expected = Dataset({"z": 42}, midx_coords) actual = original.swap_dims({"x": "y"}) assert_identical(expected, actual) assert isinstance(actual.variables["y"], IndexVariable) assert isinstance(actual.variables["x"], Variable) assert actual.xindexes["y"].equals(expected.xindexes["y"]) def test_expand_dims_error(self) -> None: original = Dataset( { "x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3)), "z": ("a", np.random.randn(3)), }, coords={ "a": np.linspace(0, 1, 3), "b": np.linspace(0, 1, 4), "c": np.linspace(0, 1, 5), }, attrs={"key": "entry"}, ) with pytest.raises(ValueError, match=r"already exists"): original.expand_dims(dim=["x"]) # Make sure it raises true error also for non-dimensional coordinates # which has dimension. original = original.set_coords("z") with pytest.raises(ValueError, match=r"already exists"): original.expand_dims(dim=["z"]) original = Dataset( { "x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3)), "z": ("a", np.random.randn(3)), }, coords={ "a": np.linspace(0, 1, 3), "b": np.linspace(0, 1, 4), "c": np.linspace(0, 1, 5), }, attrs={"key": "entry"}, ) with pytest.raises(TypeError, match=r"value of new dimension"): original.expand_dims({"d": 3.2}) with pytest.raises(ValueError, match=r"both keyword and positional"): original.expand_dims({"d": 4}, e=4) def test_expand_dims_int(self) -> None: original = Dataset( {"x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3))}, coords={ "a": np.linspace(0, 1, 3), "b": np.linspace(0, 1, 4), "c": np.linspace(0, 1, 5), }, attrs={"key": "entry"}, ) actual = original.expand_dims(["z"], [1]) expected = Dataset( { "x": original["x"].expand_dims("z", 1), "y": original["y"].expand_dims("z", 1), }, coords={ "a": np.linspace(0, 1, 3), "b": np.linspace(0, 1, 4), "c": np.linspace(0, 1, 5), }, attrs={"key": "entry"}, ) assert_identical(expected, actual) # make sure squeeze restores the original data set. roundtripped = actual.squeeze("z") assert_identical(original, roundtripped) # another test with a negative axis actual = original.expand_dims(["z"], [-1]) expected = Dataset( { "x": original["x"].expand_dims("z", -1), "y": original["y"].expand_dims("z", -1), }, coords={ "a": np.linspace(0, 1, 3), "b": np.linspace(0, 1, 4), "c": np.linspace(0, 1, 5), }, attrs={"key": "entry"}, ) assert_identical(expected, actual) # make sure squeeze restores the original data set. roundtripped = actual.squeeze("z") assert_identical(original, roundtripped) def test_expand_dims_coords(self) -> None: original = Dataset({"x": ("a", np.array([1, 2, 3]))}) expected = Dataset( {"x": (("b", "a"), np.array([[1, 2, 3], [1, 2, 3]]))}, coords={"b": [1, 2]} ) actual = original.expand_dims(dict(b=[1, 2])) assert_identical(expected, actual) assert "b" not in original._coord_names def test_expand_dims_existing_scalar_coord(self) -> None: original = Dataset({"x": 1}, {"a": 2}) expected = Dataset({"x": (("a",), [1])}, {"a": [2]}) actual = original.expand_dims("a") assert_identical(expected, actual) def test_isel_expand_dims_roundtrip(self) -> None: original = Dataset({"x": (("a",), [1])}, {"a": [2]}) actual = original.isel(a=0).expand_dims("a") assert_identical(actual, original) def test_expand_dims_mixed_int_and_coords(self) -> None: # Test expanding one dimension to have size > 1 that doesn't have # coordinates, and also expanding another dimension to have size > 1 # that DOES have coordinates. original = Dataset( {"x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3))}, coords={ "a": np.linspace(0, 1, 3), "b": np.linspace(0, 1, 4), "c": np.linspace(0, 1, 5), }, ) actual = original.expand_dims({"d": 4, "e": ["l", "m", "n"]}) expected = Dataset( { "x": xr.DataArray( original["x"].values * np.ones([4, 3, 3]), coords=dict(d=range(4), e=["l", "m", "n"], a=np.linspace(0, 1, 3)), dims=["d", "e", "a"], ).drop_vars("d"), "y": xr.DataArray( original["y"].values * np.ones([4, 3, 4, 3]), coords=dict( d=range(4), e=["l", "m", "n"], b=np.linspace(0, 1, 4), a=np.linspace(0, 1, 3), ), dims=["d", "e", "b", "a"], ).drop_vars("d"), }, coords={"c": np.linspace(0, 1, 5)}, ) assert_identical(actual, expected) def test_expand_dims_kwargs_python36plus(self) -> None: original = Dataset( {"x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3))}, coords={ "a": np.linspace(0, 1, 3), "b": np.linspace(0, 1, 4), "c": np.linspace(0, 1, 5), }, attrs={"key": "entry"}, ) other_way = original.expand_dims(e=["l", "m", "n"]) other_way_expected = Dataset( { "x": xr.DataArray( original["x"].values * np.ones([3, 3]), coords=dict(e=["l", "m", "n"], a=np.linspace(0, 1, 3)), dims=["e", "a"], ), "y": xr.DataArray( original["y"].values * np.ones([3, 4, 3]), coords=dict( e=["l", "m", "n"], b=np.linspace(0, 1, 4), a=np.linspace(0, 1, 3), ), dims=["e", "b", "a"], ), }, coords={"c": np.linspace(0, 1, 5)}, attrs={"key": "entry"}, ) assert_identical(other_way_expected, other_way) @pytest.mark.parametrize("create_index_for_new_dim_flag", [True, False]) def test_expand_dims_create_index_data_variable( self, create_index_for_new_dim_flag ): # data variables should not gain an index ever ds = Dataset({"x": 0}) if create_index_for_new_dim_flag: with pytest.warns(UserWarning, match="No index created"): expanded = ds.expand_dims( "x", create_index_for_new_dim=create_index_for_new_dim_flag ) else: expanded = ds.expand_dims( "x", create_index_for_new_dim=create_index_for_new_dim_flag ) # TODO Can't just create the expected dataset directly using constructor because of GH issue 8959 expected = Dataset({"x": ("x", [0])}).drop_indexes("x").reset_coords("x") assert_identical(expanded, expected, check_default_indexes=False) assert expanded.indexes == {} def test_expand_dims_create_index_coordinate_variable(self): # coordinate variables should gain an index only if create_index_for_new_dim is True (the default) ds = Dataset(coords={"x": 0}) expanded = ds.expand_dims("x") expected = Dataset({"x": ("x", [0])}) assert_identical(expanded, expected) expanded_no_index = ds.expand_dims("x", create_index_for_new_dim=False) # TODO Can't just create the expected dataset directly using constructor because of GH issue 8959 expected = Dataset(coords={"x": ("x", [0])}).drop_indexes("x") assert_identical(expanded_no_index, expected, check_default_indexes=False) assert expanded_no_index.indexes == {} def test_expand_dims_create_index_from_iterable(self): ds = Dataset(coords={"x": 0}) expanded = ds.expand_dims(x=[0, 1]) expected = Dataset({"x": ("x", [0, 1])}) assert_identical(expanded, expected) expanded_no_index = ds.expand_dims(x=[0, 1], create_index_for_new_dim=False) # TODO Can't just create the expected dataset directly using constructor because of GH issue 8959 expected = Dataset(coords={"x": ("x", [0, 1])}).drop_indexes("x") assert_identical(expanded, expected, check_default_indexes=False) assert expanded_no_index.indexes == {} def test_expand_dims_non_nanosecond_conversion(self) -> None: # Regression test for https://github.com/pydata/xarray/issues/7493#issuecomment-1953091000 # todo: test still needed? ds = Dataset().expand_dims({"time": [np.datetime64("2018-01-01", "m")]}) assert ds.time.dtype == np.dtype("datetime64[s]") def test_set_index(self) -> None: expected = create_test_multiindex() mindex = expected["x"].to_index() indexes = [mindex.get_level_values(str(n)) for n in mindex.names] coords = {idx.name: ("x", idx) for idx in indexes} ds = Dataset({}, coords=coords) obj = ds.set_index(x=mindex.names) assert_identical(obj, expected) # ensure pre-existing indexes involved are removed # (level_2 should be a coordinate with no index) ds = create_test_multiindex() coords = {"x": coords["level_1"], "level_2": coords["level_2"]} expected = Dataset({}, coords=coords) obj = ds.set_index(x="level_1") assert_identical(obj, expected) # ensure set_index with no existing index and a single data var given # doesn't return multi-index ds = Dataset(data_vars={"x_var": ("x", [0, 1, 2])}) expected = Dataset(coords={"x": [0, 1, 2]}) assert_identical(ds.set_index(x="x_var"), expected) with pytest.raises(ValueError, match=r"bar variable\(s\) do not exist"): ds.set_index(foo="bar") with pytest.raises(ValueError, match=r"dimension mismatch.*"): ds.set_index(y="x_var") ds = Dataset(coords={"x": 1}) with pytest.raises( ValueError, match=r".*cannot set a PandasIndex.*scalar variable.*" ): ds.set_index(x="x") def test_set_index_deindexed_coords(self) -> None: # test de-indexed coordinates are converted to base variable # https://github.com/pydata/xarray/issues/6969 one = ["a", "a", "b", "b"] two = [1, 2, 1, 2] three = ["c", "c", "d", "d"] four = [3, 4, 3, 4] midx_12 = pd.MultiIndex.from_arrays([one, two], names=["one", "two"]) midx_34 = pd.MultiIndex.from_arrays([three, four], names=["three", "four"]) coords = Coordinates.from_pandas_multiindex(midx_12, "x") coords["three"] = ("x", three) coords["four"] = ("x", four) ds = xr.Dataset(coords=coords) actual = ds.set_index(x=["three", "four"]) coords_expected = Coordinates.from_pandas_multiindex(midx_34, "x") coords_expected["one"] = ("x", one) coords_expected["two"] = ("x", two) expected = xr.Dataset(coords=coords_expected) assert_identical(actual, expected) def test_reset_index(self) -> None: ds = create_test_multiindex() mindex = ds["x"].to_index() indexes = [mindex.get_level_values(str(n)) for n in mindex.names] coords = {idx.name: ("x", idx) for idx in indexes} expected = Dataset({}, coords=coords) obj = ds.reset_index("x") assert_identical(obj, expected, check_default_indexes=False) assert len(obj.xindexes) == 0 ds = Dataset(coords={"y": ("x", [1, 2, 3])}) with pytest.raises(ValueError, match=r".*not coordinates with an index"): ds.reset_index("y") def test_reset_index_keep_attrs(self) -> None: coord_1 = DataArray([1, 2], dims=["coord_1"], attrs={"attrs": True}) ds = Dataset({}, {"coord_1": coord_1}) obj = ds.reset_index("coord_1") assert ds.coord_1.attrs == obj.coord_1.attrs assert len(obj.xindexes) == 0 def test_reset_index_drop_dims(self) -> None: ds = Dataset(coords={"x": [1, 2]}) reset = ds.reset_index("x", drop=True) assert len(reset.dims) == 0 @pytest.mark.parametrize( ["arg", "drop", "dropped", "converted", "renamed"], [ ("foo", False, [], [], {"bar": "x"}), ("foo", True, ["foo"], [], {"bar": "x"}), ("x", False, ["x"], ["foo", "bar"], {}), ("x", True, ["x", "foo", "bar"], [], {}), (["foo", "bar"], False, ["x"], ["foo", "bar"], {}), (["foo", "bar"], True, ["x", "foo", "bar"], [], {}), (["x", "foo"], False, ["x"], ["foo", "bar"], {}), (["foo", "x"], True, ["x", "foo", "bar"], [], {}), ], ) def test_reset_index_drop_convert( self, arg: str | list[str], drop: bool, dropped: list[str], converted: list[str], renamed: dict[str, str], ) -> None: # regressions https://github.com/pydata/xarray/issues/6946 and # https://github.com/pydata/xarray/issues/6989 # check that multi-index dimension or level coordinates are dropped, converted # from IndexVariable to Variable or renamed to dimension as expected midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("foo", "bar")) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") ds = xr.Dataset(coords=midx_coords) reset = ds.reset_index(arg, drop=drop) for name in dropped: assert name not in reset.variables for name in converted: assert_identical(reset[name].variable, ds[name].variable.to_base_variable()) for old_name, new_name in renamed.items(): assert_identical(ds[old_name].variable, reset[new_name].variable) def test_reorder_levels(self) -> None: ds = create_test_multiindex() mindex = ds["x"].to_index() assert isinstance(mindex, pd.MultiIndex) midx = mindex.reorder_levels(["level_2", "level_1"]) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") expected = Dataset({}, coords=midx_coords) # check attrs propagated ds["level_1"].attrs["foo"] = "bar" expected["level_1"].attrs["foo"] = "bar" reindexed = ds.reorder_levels(x=["level_2", "level_1"]) assert_identical(reindexed, expected) ds = Dataset({}, coords={"x": [1, 2]}) with pytest.raises(ValueError, match=r"has no MultiIndex"): ds.reorder_levels(x=["level_1", "level_2"]) def test_set_xindex(self) -> None: ds = Dataset( coords={"foo": ("x", ["a", "a", "b", "b"]), "bar": ("x", [0, 1, 2, 3])} ) actual = ds.set_xindex("foo") expected = ds.set_index(x="foo").rename_vars(x="foo") assert_identical(actual, expected, check_default_indexes=False) actual_mindex = ds.set_xindex(["foo", "bar"]) expected_mindex = ds.set_index(x=["foo", "bar"]) assert_identical(actual_mindex, expected_mindex) class NotAnIndex: ... with pytest.raises(TypeError, match=".*not a subclass of xarray.Index"): ds.set_xindex("foo", NotAnIndex) # type: ignore[arg-type] with pytest.raises(ValueError, match="those variables don't exist"): ds.set_xindex("not_a_coordinate", PandasIndex) ds["data_var"] = ("x", [1, 2, 3, 4]) with pytest.raises(ValueError, match="those variables are data variables"): ds.set_xindex("data_var", PandasIndex) ds2 = Dataset(coords={"x": ("x", [0, 1, 2, 3])}) with pytest.raises(ValueError, match="those coordinates already have an index"): ds2.set_xindex("x", PandasIndex) def test_set_xindex_options(self) -> None: ds = Dataset(coords={"foo": ("x", ["a", "a", "b", "b"])}) class IndexWithOptions(Index): def __init__(self, opt): self.opt = opt @classmethod def from_variables(cls, variables, options): return cls(options["opt"]) indexed = ds.set_xindex("foo", IndexWithOptions, opt=1) assert indexed.xindexes["foo"].opt == 1 # type: ignore[attr-defined] def test_stack(self) -> None: ds = Dataset( data_vars={"b": (("x", "y"), [[0, 1], [2, 3]])}, coords={"x": ("x", [0, 1]), "y": ["a", "b"]}, ) midx_expected = pd.MultiIndex.from_product( [[0, 1], ["a", "b"]], names=["x", "y"] ) midx_coords_expected = Coordinates.from_pandas_multiindex(midx_expected, "z") expected = Dataset( data_vars={"b": ("z", [0, 1, 2, 3])}, coords=midx_coords_expected ) # check attrs propagated ds["x"].attrs["foo"] = "bar" expected["x"].attrs["foo"] = "bar" actual = ds.stack(z=["x", "y"]) assert_identical(expected, actual) assert list(actual.xindexes) == ["z", "x", "y"] actual = ds.stack(z=[...]) assert_identical(expected, actual) # non list dims with ellipsis actual = ds.stack(z=(...,)) assert_identical(expected, actual) # ellipsis with given dim actual = ds.stack(z=[..., "y"]) assert_identical(expected, actual) midx_expected = pd.MultiIndex.from_product( [["a", "b"], [0, 1]], names=["y", "x"] ) midx_coords_expected = Coordinates.from_pandas_multiindex(midx_expected, "z") expected = Dataset( data_vars={"b": ("z", [0, 2, 1, 3])}, coords=midx_coords_expected ) expected["x"].attrs["foo"] = "bar" actual = ds.stack(z=["y", "x"]) assert_identical(expected, actual) assert list(actual.xindexes) == ["z", "y", "x"] @pytest.mark.parametrize( "create_index,expected_keys", [ (True, ["z", "x", "y"]), (False, []), (None, ["z", "x", "y"]), ], ) def test_stack_create_index(self, create_index, expected_keys) -> None: ds = Dataset( data_vars={"b": (("x", "y"), [[0, 1], [2, 3]])}, coords={"x": ("x", [0, 1]), "y": ["a", "b"]}, ) actual = ds.stack(z=["x", "y"], create_index=create_index) assert list(actual.xindexes) == expected_keys # TODO: benbovy (flexible indexes) - test error multiple indexes found # along dimension + create_index=True def test_stack_multi_index(self) -> None: # multi-index on a dimension to stack is discarded too midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=("lvl1", "lvl2")) coords = Coordinates.from_pandas_multiindex(midx, "x") coords["y"] = [0, 1] ds = xr.Dataset( data_vars={"b": (("x", "y"), [[0, 1], [2, 3], [4, 5], [6, 7]])}, coords=coords, ) expected = Dataset( data_vars={"b": ("z", [0, 1, 2, 3, 4, 5, 6, 7])}, coords={ "x": ("z", np.repeat(midx.values, 2)), "lvl1": ("z", np.repeat(midx.get_level_values("lvl1"), 2)), "lvl2": ("z", np.repeat(midx.get_level_values("lvl2"), 2)), "y": ("z", [0, 1, 0, 1] * 2), }, ) actual = ds.stack(z=["x", "y"], create_index=False) assert_identical(expected, actual) assert len(actual.xindexes) == 0 with pytest.raises(ValueError, match=r"cannot create.*wraps a multi-index"): ds.stack(z=["x", "y"], create_index=True) def test_stack_non_dim_coords(self) -> None: ds = Dataset( data_vars={"b": (("x", "y"), [[0, 1], [2, 3]])}, coords={"x": ("x", [0, 1]), "y": ["a", "b"]}, ).rename_vars(x="xx") exp_index = pd.MultiIndex.from_product([[0, 1], ["a", "b"]], names=["xx", "y"]) exp_coords = Coordinates.from_pandas_multiindex(exp_index, "z") expected = Dataset(data_vars={"b": ("z", [0, 1, 2, 3])}, coords=exp_coords) actual = ds.stack(z=["x", "y"]) assert_identical(expected, actual) assert list(actual.xindexes) == ["z", "xx", "y"] def test_unstack(self) -> None: index = pd.MultiIndex.from_product([[0, 1], ["a", "b"]], names=["x", "y"]) coords = Coordinates.from_pandas_multiindex(index, "z") ds = Dataset(data_vars={"b": ("z", [0, 1, 2, 3])}, coords=coords) expected = Dataset( {"b": (("x", "y"), [[0, 1], [2, 3]]), "x": [0, 1], "y": ["a", "b"]} ) # check attrs propagated ds["x"].attrs["foo"] = "bar" expected["x"].attrs["foo"] = "bar" for dim in ["z", ["z"], None]: actual = ds.unstack(dim) assert_identical(actual, expected) def test_unstack_errors(self) -> None: ds = Dataset({"x": [1, 2, 3]}) with pytest.raises( ValueError, match=re.escape("Dimensions ('foo',) not found in data dimensions ('x',)"), ): ds.unstack("foo") with pytest.raises(ValueError, match=r".*do not have exactly one multi-index"): ds.unstack("x") ds = Dataset({"da": [1, 2]}, coords={"y": ("x", [1, 1]), "z": ("x", [0, 0])}) ds = ds.set_index(x=("y", "z")) with pytest.raises( ValueError, match="Cannot unstack MultiIndex containing duplicates" ): ds.unstack("x") def test_unstack_fill_value(self) -> None: ds = xr.Dataset( {"var": (("x",), np.arange(6)), "other_var": (("x",), np.arange(3, 9))}, coords={"x": [0, 1, 2] * 2, "y": (("x",), ["a"] * 3 + ["b"] * 3)}, ) # make ds incomplete ds = ds.isel(x=[0, 2, 3, 4]).set_index(index=["x", "y"]) # test fill_value actual1 = ds.unstack("index", fill_value=-1) expected1 = ds.unstack("index").fillna(-1).astype(int) assert actual1["var"].dtype == int assert_equal(actual1, expected1) actual2 = ds["var"].unstack("index", fill_value=-1) expected2 = ds["var"].unstack("index").fillna(-1).astype(int) assert_equal(actual2, expected2) actual3 = ds.unstack("index", fill_value={"var": -1, "other_var": 1}) expected3 = ds.unstack("index").fillna({"var": -1, "other_var": 1}).astype(int) assert_equal(actual3, expected3) @requires_sparse def test_unstack_sparse(self) -> None: ds = xr.Dataset( {"var": (("x",), np.arange(6))}, coords={"x": [0, 1, 2] * 2, "y": (("x",), ["a"] * 3 + ["b"] * 3)}, ) # make ds incomplete ds = ds.isel(x=[0, 2, 3, 4]).set_index(index=["x", "y"]) # test fill_value actual1 = ds.unstack("index", sparse=True) expected1 = ds.unstack("index") assert isinstance(actual1["var"].data, sparse_array_type) assert actual1["var"].variable._to_dense().equals(expected1["var"].variable) assert actual1["var"].data.density < 1.0 actual2 = ds["var"].unstack("index", sparse=True) expected2 = ds["var"].unstack("index") assert isinstance(actual2.data, sparse_array_type) assert actual2.variable._to_dense().equals(expected2.variable) assert actual2.data.density < 1.0 midx = pd.MultiIndex.from_arrays([np.arange(3), np.arange(3)], names=["a", "b"]) coords = Coordinates.from_pandas_multiindex(midx, "z") coords["foo"] = np.arange(4) coords["bar"] = np.arange(5) ds_eye = Dataset( {"var": (("z", "foo", "bar"), np.ones((3, 4, 5)))}, coords=coords ) actual3 = ds_eye.unstack(sparse=True, fill_value=0) assert isinstance(actual3["var"].data, sparse_array_type) expected3 = xr.Dataset( { "var": ( ("foo", "bar", "a", "b"), np.broadcast_to(np.eye(3, 3), (4, 5, 3, 3)), ) }, coords={ "foo": np.arange(4), "bar": np.arange(5), "a": np.arange(3), "b": np.arange(3), }, ) actual3["var"].data = actual3["var"].data.todense() assert_equal(expected3, actual3) def test_stack_unstack_fast(self) -> None: ds = Dataset( { "a": ("x", [0, 1]), "b": (("x", "y"), [[0, 1], [2, 3]]), "x": [0, 1], "y": ["a", "b"], } ) actual = ds.stack(z=["x", "y"]).unstack("z") assert actual.broadcast_equals(ds) actual = ds[["b"]].stack(z=["x", "y"]).unstack("z") assert actual.identical(ds[["b"]]) def test_stack_unstack_slow(self) -> None: ds = Dataset( data_vars={ "a": ("x", [0, 1]), "b": (("x", "y"), [[0, 1], [2, 3]]), }, coords={"x": [0, 1], "y": ["a", "b"]}, ) stacked = ds.stack(z=["x", "y"]) actual = stacked.isel(z=slice(None, None, -1)).unstack("z") assert actual.broadcast_equals(ds) stacked = ds[["b"]].stack(z=["x", "y"]) actual = stacked.isel(z=slice(None, None, -1)).unstack("z") assert actual.identical(ds[["b"]]) def test_to_stacked_array_invalid_sample_dims(self) -> None: data = xr.Dataset( data_vars={"a": (("x", "y"), [[0, 1, 2], [3, 4, 5]]), "b": ("x", [6, 7])}, coords={"y": ["u", "v", "w"]}, ) with pytest.raises( ValueError, match=r"Variables in the dataset must contain all ``sample_dims`` \(\['y'\]\) but 'b' misses \['y'\]", ): data.to_stacked_array("features", sample_dims=["y"]) def test_to_stacked_array_name(self) -> None: name = "adf9d" # make a two dimensional dataset a, b = create_test_stacked_array() D = xr.Dataset({"a": a, "b": b}) sample_dims = ["x"] y = D.to_stacked_array("features", sample_dims, name=name) assert y.name == name def test_to_stacked_array_dtype_dims(self) -> None: # make a two dimensional dataset a, b = create_test_stacked_array() D = xr.Dataset({"a": a, "b": b}) sample_dims = ["x"] y = D.to_stacked_array("features", sample_dims) mindex = y.xindexes["features"].to_pandas_index() assert isinstance(mindex, pd.MultiIndex) assert mindex.levels[1].dtype == D.y.dtype assert y.dims == ("x", "features") def test_to_stacked_array_to_unstacked_dataset(self) -> None: # single dimension: regression test for GH4049 arr = xr.DataArray(np.arange(3), coords=[("x", [0, 1, 2])]) data = xr.Dataset({"a": arr, "b": arr}) stacked = data.to_stacked_array("y", sample_dims=["x"]) unstacked = stacked.to_unstacked_dataset("y") assert_identical(unstacked, data) # make a two dimensional dataset a, b = create_test_stacked_array() D = xr.Dataset({"a": a, "b": b}) sample_dims = ["x"] y = D.to_stacked_array("features", sample_dims).transpose("x", "features") x = y.to_unstacked_dataset("features") assert_identical(D, x) # test on just one sample x0 = y[0].to_unstacked_dataset("features") d0 = D.isel(x=0) assert_identical(d0, x0) def test_to_stacked_array_to_unstacked_dataset_different_dimension(self) -> None: # test when variables have different dimensionality a, b = create_test_stacked_array() sample_dims = ["x"] D = xr.Dataset({"a": a, "b": b.isel(y=0)}) y = D.to_stacked_array("features", sample_dims) x = y.to_unstacked_dataset("features") assert_identical(D, x) def test_to_stacked_array_preserves_dtype(self) -> None: # regression test for bug found in https://github.com/pydata/xarray/pull/8872#issuecomment-2081218616 ds = xr.Dataset( data_vars={ "a": (("x", "y"), [[0, 1, 2], [3, 4, 5]]), "b": ("x", [6, 7]), }, coords={"y": ["u", "v", "w"]}, ) stacked = ds.to_stacked_array("z", sample_dims=["x"]) # coordinate created from variables names should be of string dtype data = np.array(["a", "a", "a", "b"], dtype=" None: # test that to_stacked_array uses updated dim order after transposition ds = xr.Dataset( data_vars=dict( v1=(["d1", "d2"], np.arange(6).reshape((2, 3))), ), coords=dict( d1=(["d1"], np.arange(2)), d2=(["d2"], np.arange(3)), ), ) da = ds.to_stacked_array( new_dim="new_dim", sample_dims=[], variable_dim="variable", ) dsT = ds.transpose() daT = dsT.to_stacked_array( new_dim="new_dim", sample_dims=[], variable_dim="variable", ) v1 = np.arange(6) v1T = np.arange(6).reshape((2, 3)).T.flatten() np.testing.assert_equal(da.to_numpy(), v1) np.testing.assert_equal(daT.to_numpy(), v1T) def test_update(self) -> None: data = create_test_data(seed=0) expected = data.copy() var2 = Variable("dim1", np.arange(8)) actual = data actual.update({"var2": var2}) expected["var2"] = var2 assert_identical(expected, actual) actual = data.copy() actual.update(data) assert_identical(expected, actual) other = Dataset(attrs={"new": "attr"}) actual = data.copy() actual.update(other) assert_identical(expected, actual) def test_update_overwrite_coords(self) -> None: data = Dataset({"a": ("x", [1, 2])}, {"b": 3}) data.update(Dataset(coords={"b": 4})) expected = Dataset({"a": ("x", [1, 2])}, {"b": 4}) assert_identical(data, expected) data = Dataset({"a": ("x", [1, 2])}, {"b": 3}) data.update(Dataset({"c": 5}, coords={"b": 4})) expected = Dataset({"a": ("x", [1, 2]), "c": 5}, {"b": 4}) assert_identical(data, expected) data = Dataset({"a": ("x", [1, 2])}, {"b": 3}) data.update({"c": DataArray(5, coords={"b": 4})}) expected = Dataset({"a": ("x", [1, 2]), "c": 5}, {"b": 3}) assert_identical(data, expected) def test_update_multiindex_level(self) -> None: data = create_test_multiindex() with pytest.raises( ValueError, match=r"cannot set or update variable.*corrupt.*index " ): data.update({"level_1": range(4)}) def test_update_auto_align(self) -> None: ds = Dataset({"x": ("t", [3, 4])}, {"t": [0, 1]}) expected1 = Dataset( {"x": ("t", [3, 4]), "y": ("t", [np.nan, 5])}, {"t": [0, 1]} ) actual1 = ds.copy() other1 = {"y": ("t", [5]), "t": [1]} with pytest.raises(ValueError, match=r"conflicting sizes"): actual1.update(other1) actual1.update(Dataset(other1)) assert_identical(expected1, actual1) actual2 = ds.copy() other2 = Dataset({"y": ("t", [5]), "t": [100]}) actual2.update(other2) expected2 = Dataset( {"x": ("t", [3, 4]), "y": ("t", [np.nan] * 2)}, {"t": [0, 1]} ) assert_identical(expected2, actual2) def test_getitem(self) -> None: data = create_test_data() assert isinstance(data["var1"], DataArray) assert_equal(data["var1"].variable, data.variables["var1"]) with pytest.raises(KeyError): data["notfound"] with pytest.raises(KeyError): data[["var1", "notfound"]] with pytest.raises( KeyError, match=r"Hint: use a list to select multiple variables, for example `ds\[\['var1', 'var2'\]\]`", ): data["var1", "var2"] actual1 = data[["var1", "var2"]] expected1 = Dataset({"var1": data["var1"], "var2": data["var2"]}) assert_equal(expected1, actual1) actual2 = data["numbers"] expected2 = DataArray( data["numbers"].variable, {"dim3": data["dim3"], "numbers": data["numbers"]}, dims="dim3", name="numbers", ) assert_identical(expected2, actual2) actual3 = data[dict(dim1=0)] expected3 = data.isel(dim1=0) assert_identical(expected3, actual3) def test_getitem_hashable(self) -> None: data = create_test_data() data[(3, 4)] = data["var1"] + 1 expected = data["var1"] + 1 expected.name = (3, 4) assert_identical(expected, data[(3, 4)]) with pytest.raises(KeyError, match=r"('var1', 'var2')"): data[("var1", "var2")] def test_getitem_multiple_dtype(self) -> None: keys = ["foo", 1] dataset = Dataset({key: ("dim0", range(1)) for key in keys}) assert_identical(dataset, dataset[keys]) def test_getitem_extra_dim_index_coord(self) -> None: class AnyIndex(Index): def should_add_coord_to_array(self, name, var, dims): return True idx = AnyIndex() coords = Coordinates( coords={ "x": ("x", [1, 2]), "x_bounds": (("x", "x_bnds"), [(0.5, 1.5), (1.5, 2.5)]), }, indexes={"x": idx, "x_bounds": idx}, ) ds = Dataset({"foo": (("x"), [1.0, 2.0])}, coords=coords) actual = ds["foo"] assert_identical(actual.coords, coords, check_default_indexes=False) assert "x_bnds" not in actual.dims def test_virtual_variables_default_coords(self) -> None: dataset = Dataset({"foo": ("x", range(10))}) expected1 = DataArray(range(10), dims="x", name="x") actual1 = dataset["x"] assert_identical(expected1, actual1) assert isinstance(actual1.variable, IndexVariable) actual2 = dataset[["x", "foo"]] expected2 = dataset.assign_coords(x=range(10)) assert_identical(expected2, actual2) def test_virtual_variables_time(self) -> None: # access virtual variables data = create_test_data() index = data.variables["time"].to_index() assert isinstance(index, pd.DatetimeIndex) assert_array_equal(data["time.month"].values, index.month) assert_array_equal(data["time.season"].values, "DJF") # test virtual variable math assert_array_equal(data["time.dayofyear"] + 1, 2 + np.arange(20)) assert_array_equal(np.sin(data["time.dayofyear"]), np.sin(1 + np.arange(20))) # ensure they become coordinates expected = Dataset({}, {"dayofyear": data["time.dayofyear"]}) actual = data[["time.dayofyear"]] assert_equal(expected, actual) # non-coordinate variables ds = Dataset({"t": ("x", pd.date_range("2000-01-01", periods=3))}) assert (ds["t.year"] == 2000).all() def test_virtual_variable_same_name(self) -> None: # regression test for GH367 times = pd.date_range("2000-01-01", freq="h", periods=5) data = Dataset({"time": times}) actual = data["time.time"] expected = DataArray(times.time, [("time", times)], name="time") assert_identical(actual, expected) def test_time_season(self) -> None: time = xr.date_range("2000-01-01", periods=12, freq="ME", use_cftime=False) ds = Dataset({"t": time}) seas = ["DJF"] * 2 + ["MAM"] * 3 + ["JJA"] * 3 + ["SON"] * 3 + ["DJF"] assert_array_equal(seas, ds["t.season"]) def test_slice_virtual_variable(self) -> None: data = create_test_data() assert_equal( data["time.dayofyear"][:10].variable, Variable(["time"], 1 + np.arange(10)) ) assert_equal(data["time.dayofyear"][0].variable, Variable([], 1)) def test_setitem(self) -> None: # assign a variable var = Variable(["dim1"], np.random.randn(8)) data1 = create_test_data() data1["A"] = var data2 = data1.copy() data2["A"] = var assert_identical(data1, data2) # assign a dataset array dv = 2 * data2["A"] data1["B"] = dv.variable data2["B"] = dv assert_identical(data1, data2) # can't assign an ND array without dimensions with pytest.raises(ValueError, match=r"without explicit dimension names"): data2["C"] = var.values.reshape(2, 4) # but can assign a 1D array data1["C"] = var.values data2["C"] = ("C", var.values) assert_identical(data1, data2) # can assign a scalar data1["scalar"] = 0 data2["scalar"] = ([], 0) assert_identical(data1, data2) # can't use the same dimension name as a scalar var with pytest.raises(ValueError, match=r"already exists as a scalar"): data1["newvar"] = ("scalar", [3, 4, 5]) # can't resize a used dimension with pytest.raises(ValueError, match=r"conflicting dimension sizes"): data1["dim1"] = data1["dim1"][:5] # override an existing value data1["A"] = 3 * data2["A"] assert_equal(data1["A"], 3 * data2["A"]) # can't assign a dataset to a single key with pytest.raises(TypeError, match="Cannot assign a Dataset to a single key"): data1["D"] = xr.Dataset() # test assignment with positional and label-based indexing data3 = data1[["var1", "var2"]] data3["var3"] = data3.var1.isel(dim1=0) data4 = data3.copy() err_msg = ( "can only set locations defined by dictionaries from Dataset.loc. Got: a" ) with pytest.raises(TypeError, match=err_msg): data1.loc["a"] = 0 err_msg = r"Variables \['A', 'B', 'scalar'\] in new values not available in original dataset:" with pytest.raises(ValueError, match=err_msg): data4[{"dim2": 1}] = data1[{"dim2": 2}] err_msg = "Variable 'var3': indexer {'dim2': 0} not available" with pytest.raises(ValueError, match=err_msg): data1[{"dim2": 0}] = 0.0 err_msg = "Variable 'var1': indexer {'dim2': 10} not available" with pytest.raises(ValueError, match=err_msg): data4[{"dim2": 10}] = data3[{"dim2": 2}] err_msg = "Variable 'var1': dimension 'dim2' appears in new values" with pytest.raises(KeyError, match=err_msg): data4[{"dim2": 2}] = data3[{"dim2": [2]}] err_msg = ( "Variable 'var2': dimension order differs between original and new data" ) data3["var2"] = data3["var2"].T with pytest.raises(ValueError, match=err_msg): data4[{"dim2": [2, 3]}] = data3[{"dim2": [2, 3]}] data3["var2"] = data3["var2"].T err_msg = r"cannot align objects.*not equal along these coordinates.*" with pytest.raises(ValueError, match=err_msg): data4[{"dim2": [2, 3]}] = data3[{"dim2": [2, 3, 4]}] err_msg = "Dataset assignment only accepts DataArrays, Datasets, and scalars." with pytest.raises(TypeError, match=err_msg): data4[{"dim2": [2, 3]}] = data3["var1"][{"dim2": [3, 4]}].values data5 = data4.astype(str) data5["var4"] = data4["var1"] # convert to `np.str_('a')` once `numpy<2.0` has been dropped err_msg = "could not convert string to float: .*'a'.*" with pytest.raises(ValueError, match=err_msg): data5[{"dim2": 1}] = "a" data4[{"dim2": 0}] = 0.0 data4[{"dim2": 1}] = data3[{"dim2": 2}] data4.loc[{"dim2": 1.5}] = 1.0 data4.loc[{"dim2": 2.0}] = data3.loc[{"dim2": 2.5}] for v, dat3 in data3.items(): dat4 = data4[v] assert_array_equal(dat4[{"dim2": 0}], 0.0) assert_array_equal(dat4[{"dim2": 1}], dat3[{"dim2": 2}]) assert_array_equal(dat4.loc[{"dim2": 1.5}], 1.0) assert_array_equal(dat4.loc[{"dim2": 2.0}], dat3.loc[{"dim2": 2.5}]) unchanged = [1.0, 2.5, 3.0, 3.5, 4.0] assert_identical( dat4.loc[{"dim2": unchanged}], dat3.loc[{"dim2": unchanged}] ) def test_setitem_pandas(self) -> None: ds = self.make_example_math_dataset() ds["x"] = np.arange(3) ds_copy = ds.copy() ds_copy["bar"] = ds["bar"].to_pandas() assert_equal(ds, ds_copy) def test_setitem_auto_align(self) -> None: ds = Dataset() ds["x"] = ("y", range(3)) ds["y"] = 1 + np.arange(3) expected = Dataset({"x": ("y", range(3)), "y": 1 + np.arange(3)}) assert_identical(ds, expected) ds["y"] = DataArray(range(3), dims="y") expected = Dataset({"x": ("y", range(3))}, {"y": range(3)}) assert_identical(ds, expected) ds["x"] = DataArray([1, 2], coords=[("y", [0, 1])]) expected = Dataset({"x": ("y", [1, 2, np.nan])}, {"y": range(3)}) assert_identical(ds, expected) ds["x"] = 42 expected = Dataset({"x": 42, "y": range(3)}) assert_identical(ds, expected) ds["x"] = DataArray([4, 5, 6, 7], coords=[("y", [0, 1, 2, 3])]) expected = Dataset({"x": ("y", [4, 5, 6])}, {"y": range(3)}) assert_identical(ds, expected) def test_setitem_dimension_override(self) -> None: # regression test for GH-3377 ds = xr.Dataset({"x": [0, 1, 2]}) ds["x"] = ds["x"][:2] expected = Dataset({"x": [0, 1]}) assert_identical(ds, expected) ds = xr.Dataset({"x": [0, 1, 2]}) ds["x"] = np.array([0, 1]) assert_identical(ds, expected) ds = xr.Dataset({"x": [0, 1, 2]}) ds.coords["x"] = [0, 1] assert_identical(ds, expected) def test_setitem_with_coords(self) -> None: # Regression test for GH:2068 ds = create_test_data() other = DataArray( np.arange(10), dims="dim3", coords={"numbers": ("dim3", np.arange(10))} ) expected = ds.copy() expected["var3"] = other.drop_vars("numbers") actual = ds.copy() actual["var3"] = other assert_identical(expected, actual) assert "numbers" in other.coords # should not change other # with alignment other = ds["var3"].isel(dim3=slice(1, -1)) other["numbers"] = ("dim3", np.arange(8)) actual = ds.copy() actual["var3"] = other assert "numbers" in other.coords # should not change other expected = ds.copy() expected["var3"] = ds["var3"].isel(dim3=slice(1, -1)) assert_identical(expected, actual) # with non-duplicate coords other = ds["var3"].isel(dim3=slice(1, -1)) other["numbers"] = ("dim3", np.arange(8)) other["position"] = ("dim3", np.arange(8)) actual = ds.copy() actual["var3"] = other assert "position" in actual assert "position" in other.coords # assigning a coordinate-only dataarray actual = ds.copy() other = actual["numbers"] other[0] = 10 actual["numbers"] = other assert actual["numbers"][0] == 10 # GH: 2099 ds = Dataset( {"var": ("x", [1, 2, 3])}, coords={"x": [0, 1, 2], "z1": ("x", [1, 2, 3]), "z2": ("x", [1, 2, 3])}, ) ds["var"] = ds["var"] * 2 assert np.allclose(ds["var"], [2, 4, 6]) def test_setitem_align_new_indexes(self) -> None: ds = Dataset({"foo": ("x", [1, 2, 3])}, {"x": [0, 1, 2]}) ds["bar"] = DataArray([2, 3, 4], [("x", [1, 2, 3])]) expected = Dataset( {"foo": ("x", [1, 2, 3]), "bar": ("x", [np.nan, 2, 3])}, {"x": [0, 1, 2]} ) assert_identical(ds, expected) def test_setitem_vectorized(self) -> None: # Regression test for GH:7030 # Positional indexing da = xr.DataArray(np.r_[:120].reshape(2, 3, 4, 5), dims=["a", "b", "c", "d"]) ds = xr.Dataset({"da": da}) b = xr.DataArray([[0, 0], [1, 0]], dims=["u", "v"]) c = xr.DataArray([[0, 1], [2, 3]], dims=["u", "v"]) w = xr.DataArray([-1, -2], dims=["u"]) index = dict(b=b, c=c) ds[index] = xr.Dataset({"da": w}) assert (ds[index]["da"] == w).all() # Indexing with coordinates da = xr.DataArray(np.r_[:120].reshape(2, 3, 4, 5), dims=["a", "b", "c", "d"]) ds = xr.Dataset({"da": da}) ds.coords["b"] = [2, 4, 6] b = xr.DataArray([[2, 2], [4, 2]], dims=["u", "v"]) c = xr.DataArray([[0, 1], [2, 3]], dims=["u", "v"]) w = xr.DataArray([-1, -2], dims=["u"]) index = dict(b=b, c=c) ds.loc[index] = xr.Dataset({"da": w}, coords={"b": ds.coords["b"]}) assert (ds.loc[index]["da"] == w).all() @pytest.mark.parametrize("dtype", [str, bytes]) def test_setitem_str_dtype(self, dtype) -> None: ds = xr.Dataset(coords={"x": np.array(["x", "y"], dtype=dtype)}) # test Dataset update ds["foo"] = xr.DataArray(np.array([0, 0]), dims=["x"]) assert np.issubdtype(ds.x.dtype, dtype) def test_setitem_using_list(self) -> None: # assign a list of variables var1 = Variable(["dim1"], np.random.randn(8)) var2 = Variable(["dim1"], np.random.randn(8)) actual = create_test_data() expected = actual.copy() expected["A"] = var1 expected["B"] = var2 actual[["A", "B"]] = [var1, var2] assert_identical(actual, expected) # assign a list of dataset arrays dv = 2 * expected[["A", "B"]] actual[["C", "D"]] = [d.variable for d in dv.data_vars.values()] expected[["C", "D"]] = dv assert_identical(actual, expected) @pytest.mark.parametrize( "var_list, data, error_regex", [ ( ["A", "B"], [Variable(["dim1"], np.random.randn(8))], r"Different lengths", ), ([], [Variable(["dim1"], np.random.randn(8))], r"Empty list of variables"), (["A", "B"], xr.DataArray([1, 2]), r"assign single DataArray"), ], ) def test_setitem_using_list_errors(self, var_list, data, error_regex) -> None: actual = create_test_data() with pytest.raises(ValueError, match=error_regex): actual[var_list] = data def test_assign(self) -> None: ds = Dataset() actual = ds.assign(x=[0, 1, 2], y=2) expected = Dataset({"x": [0, 1, 2], "y": 2}) assert_identical(actual, expected) assert list(actual.variables) == ["x", "y"] assert_identical(ds, Dataset()) actual = actual.assign(y=lambda ds: ds.x**2) expected = Dataset({"y": ("x", [0, 1, 4]), "x": [0, 1, 2]}) assert_identical(actual, expected) actual = actual.assign_coords(z=2) expected = Dataset({"y": ("x", [0, 1, 4])}, {"z": 2, "x": [0, 1, 2]}) assert_identical(actual, expected) def test_assign_coords(self) -> None: ds = Dataset() actual = ds.assign(x=[0, 1, 2], y=2) actual = actual.assign_coords(x=list("abc")) expected = Dataset({"x": list("abc"), "y": 2}) assert_identical(actual, expected) actual = ds.assign(x=[0, 1, 2], y=[2, 3]) actual = actual.assign_coords({"y": [2.0, 3.0]}) expected = ds.assign(x=[0, 1, 2], y=[2.0, 3.0]) assert_identical(actual, expected) def test_assign_attrs(self) -> None: expected = Dataset(attrs=dict(a=1, b=2)) new = Dataset() actual = new.assign_attrs(a=1, b=2) assert_identical(actual, expected) assert new.attrs == {} expected.attrs["c"] = 3 new_actual = actual.assign_attrs({"c": 3}) assert_identical(new_actual, expected) assert actual.attrs == dict(a=1, b=2) def test_drop_attrs(self) -> None: # Simple example ds = Dataset().assign_attrs(a=1, b=2) original = ds.copy() expected = Dataset() result = ds.drop_attrs() assert_identical(result, expected) # Doesn't change original assert_identical(ds, original) # Example with variables and coords with attrs, and a multiindex. (arguably # should have used a canonical dataset with all the features we're should # support...) var = Variable("x", [1, 2, 3], attrs=dict(x=1, y=2)) idx = IndexVariable("y", [1, 2, 3], attrs=dict(c=1, d=2)) mx = xr.Coordinates.from_pandas_multiindex( pd.MultiIndex.from_tuples([(1, 2), (3, 4)], names=["d", "e"]), "z" ) ds = Dataset(dict(var1=var), coords=dict(y=idx, z=mx)).assign_attrs(a=1, b=2) assert ds.attrs != {} assert ds["var1"].attrs != {} assert ds["y"].attrs != {} assert ds.coords["y"].attrs != {} original = ds.copy(deep=True) result = ds.drop_attrs() assert result.attrs == {} assert result["var1"].attrs == {} assert result["y"].attrs == {} assert list(result.data_vars) == list(ds.data_vars) assert list(result.coords) == list(ds.coords) # Doesn't change original assert_identical(ds, original) # Specifically test that the attrs on the coords are still there. (The index # can't currently contain `attrs`, so we can't test those.) assert ds.coords["y"].attrs != {} # Test for deep=False result_shallow = ds.drop_attrs(deep=False) assert result_shallow.attrs == {} assert result_shallow["var1"].attrs != {} assert result_shallow["y"].attrs != {} assert list(result.data_vars) == list(ds.data_vars) assert list(result.coords) == list(ds.coords) def test_assign_multiindex_level(self) -> None: data = create_test_multiindex() with pytest.raises(ValueError, match=r"cannot drop or update.*corrupt.*index "): data.assign(level_1=range(4)) data.assign_coords(level_1=range(4)) def test_assign_new_multiindex(self) -> None: midx = pd.MultiIndex.from_arrays([["a", "a", "b", "b"], [0, 1, 0, 1]]) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") ds = Dataset(coords={"x": [1, 2]}) expected = Dataset(coords=midx_coords) with pytest.warns( FutureWarning, match=".*`pandas.MultiIndex`.*no longer be implicitly promoted.*", ): actual = ds.assign(x=midx) assert_identical(actual, expected) @pytest.mark.parametrize("orig_coords", [{}, {"x": range(4)}]) def test_assign_coords_new_multiindex(self, orig_coords) -> None: ds = Dataset(coords=orig_coords) midx = pd.MultiIndex.from_arrays( [["a", "a", "b", "b"], [0, 1, 0, 1]], names=("one", "two") ) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") expected = Dataset(coords=midx_coords) with pytest.warns( FutureWarning, match=".*`pandas.MultiIndex`.*no longer be implicitly promoted.*", ): actual = ds.assign_coords({"x": midx}) assert_identical(actual, expected) actual = ds.assign_coords(midx_coords) assert_identical(actual, expected) def test_assign_coords_existing_multiindex(self) -> None: data = create_test_multiindex() with pytest.warns( FutureWarning, match=r"updating coordinate.*MultiIndex.*inconsistent" ): updated = data.assign_coords(x=range(4)) # https://github.com/pydata/xarray/issues/7097 (coord names updated) assert len(updated.coords) == 1 with pytest.warns( FutureWarning, match=r"updating coordinate.*MultiIndex.*inconsistent" ): updated = data.assign(x=range(4)) # https://github.com/pydata/xarray/issues/7097 (coord names updated) assert len(updated.coords) == 1 def test_assign_all_multiindex_coords(self) -> None: data = create_test_multiindex() actual = data.assign(x=range(4), level_1=range(4), level_2=range(4)) # no error but multi-index dropped in favor of single indexes for each level assert ( actual.xindexes["x"] is not actual.xindexes["level_1"] is not actual.xindexes["level_2"] ) def test_assign_coords_custom_index_side_effect(self) -> None: # test that assigning new coordinates do not reset other dimension coord indexes # to default (pandas) index (https://github.com/pydata/xarray/issues/7346) class CustomIndex(PandasIndex): pass ds = ( Dataset(coords={"x": [1, 2, 3]}) .drop_indexes("x") .set_xindex("x", CustomIndex) ) actual = ds.assign_coords(y=[4, 5, 6]) assert isinstance(actual.xindexes["x"], CustomIndex) def test_assign_coords_custom_index(self) -> None: class CustomIndex(Index): pass coords = Coordinates( coords={"x": ("x", [1, 2, 3])}, indexes={"x": CustomIndex()} ) ds = Dataset() actual = ds.assign_coords(coords) assert isinstance(actual.xindexes["x"], CustomIndex) def test_assign_coords_no_default_index(self) -> None: coords = Coordinates({"y": [1, 2, 3]}, indexes={}) ds = Dataset() actual = ds.assign_coords(coords) expected = coords.to_dataset() assert_identical(expected, actual, check_default_indexes=False) assert "y" not in actual.xindexes def test_merge_multiindex_level(self) -> None: data = create_test_multiindex() other = Dataset({"level_1": ("x", [0, 1])}) with pytest.raises(ValueError, match=r".*conflicting dimension sizes.*"): data.merge(other) other = Dataset({"level_1": ("x", range(4))}) with pytest.raises( ValueError, match=r"unable to determine.*coordinates or not.*" ): data.merge(other) # `other` Dataset coordinates are ignored (bug or feature?) other = Dataset(coords={"level_1": ("x", range(4))}) assert_identical(data.merge(other), data) def test_setitem_original_non_unique_index(self) -> None: # regression test for GH943 original = Dataset({"data": ("x", np.arange(5))}, coords={"x": [0, 1, 2, 0, 1]}) expected = Dataset({"data": ("x", np.arange(5))}, {"x": range(5)}) actual = original.copy() actual["x"] = list(range(5)) assert_identical(actual, expected) actual = original.copy() actual["x"] = ("x", list(range(5))) assert_identical(actual, expected) actual = original.copy() actual.coords["x"] = list(range(5)) assert_identical(actual, expected) def test_setitem_both_non_unique_index(self) -> None: # regression test for GH956 names = ["joaquin", "manolo", "joaquin"] values = np.random.randint(0, 256, (3, 4, 4)) array = DataArray( values, dims=["name", "row", "column"], coords=[names, range(4), range(4)] ) expected = Dataset({"first": array, "second": array}) actual = array.rename("first").to_dataset() actual["second"] = array assert_identical(expected, actual) def test_setitem_multiindex_level(self) -> None: data = create_test_multiindex() with pytest.raises( ValueError, match=r"cannot set or update variable.*corrupt.*index " ): data["level_1"] = range(4) def test_delitem(self) -> None: data = create_test_data() all_items = set(data.variables) assert set(data.variables) == all_items del data["var1"] assert set(data.variables) == all_items - {"var1"} del data["numbers"] assert set(data.variables) == all_items - {"var1", "numbers"} assert "numbers" not in data.coords expected = Dataset() actual = Dataset({"y": ("x", [1, 2])}) del actual["y"] assert_identical(expected, actual) def test_delitem_multiindex_level(self) -> None: data = create_test_multiindex() with pytest.raises( ValueError, match=r"cannot remove coordinate.*corrupt.*index " ): del data["level_1"] def test_squeeze(self) -> None: data = Dataset({"foo": (["x", "y", "z"], [[[1], [2]]])}) test_args: list[list] = [[], [["x"]], [["x", "z"]]] for args in test_args: def get_args(args, v): return [set(args[0]) & set(v.dims)] if args else [] expected = Dataset( {k: v.squeeze(*get_args(args, v)) for k, v in data.variables.items()} ) expected = expected.set_coords(data.coords) assert_identical(expected, data.squeeze(*args)) # invalid squeeze with pytest.raises(ValueError, match=r"cannot select a dimension"): data.squeeze("y") def test_squeeze_drop(self) -> None: data = Dataset({"foo": ("x", [1])}, {"x": [0]}) expected = Dataset({"foo": 1}) selected = data.squeeze(drop=True) assert_identical(expected, selected) expected = Dataset({"foo": 1}, {"x": 0}) selected = data.squeeze(drop=False) assert_identical(expected, selected) data = Dataset({"foo": (("x", "y"), [[1]])}, {"x": [0], "y": [0]}) expected = Dataset({"foo": 1}) selected = data.squeeze(drop=True) assert_identical(expected, selected) expected = Dataset({"foo": ("x", [1])}, {"x": [0]}) selected = data.squeeze(dim="y", drop=True) assert_identical(expected, selected) data = Dataset({"foo": (("x",), [])}, {"x": []}) selected = data.squeeze(drop=True) assert_identical(data, selected) def test_to_dataarray(self) -> None: ds = Dataset( {"a": 1, "b": ("x", [1, 2, 3])}, coords={"c": 42}, attrs={"Conventions": "None"}, ) data = [[1, 1, 1], [1, 2, 3]] coords = {"c": 42, "variable": ["a", "b"]} dims = ("variable", "x") expected = DataArray(data, coords, dims, attrs=ds.attrs) actual = ds.to_dataarray() assert_identical(expected, actual) actual = ds.to_dataarray("abc", name="foo") expected = expected.rename({"variable": "abc"}).rename("foo") assert_identical(expected, actual) def test_to_and_from_dataframe(self) -> None: x = np.random.randn(10) y = np.random.randn(10) t = list("abcdefghij") cat = pd.Categorical(["a", "b"] * 5) ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t), "cat": ("t", cat)}) expected = pd.DataFrame( np.array([x, y]).T, columns=["a", "b"], index=pd.Index(t, name="t") ) expected["cat"] = cat actual = ds.to_dataframe() # use the .equals method to check all DataFrame metadata assert expected.equals(actual), (expected, actual) # verify coords are included actual = ds.set_coords("b").to_dataframe() assert expected.equals(actual), (expected, actual) # check roundtrip assert_identical(ds, Dataset.from_dataframe(actual)) assert isinstance(ds["cat"].variable.data.dtype, pd.CategoricalDtype) # test a case with a MultiIndex w = np.random.randn(2, 3) cat = pd.Categorical(["a", "a", "c"]) ds = Dataset({"w": (("x", "y"), w), "cat": ("y", cat)}) ds["y"] = ("y", list("abc")) exp_index = pd.MultiIndex.from_arrays( [[0, 0, 0, 1, 1, 1], ["a", "b", "c", "a", "b", "c"]], names=["x", "y"] ) expected = pd.DataFrame( {"w": w.reshape(-1), "cat": pd.Categorical(["a", "a", "c", "a", "a", "c"])}, index=exp_index, ) actual = ds.to_dataframe() assert expected.equals(actual) # check roundtrip # from_dataframe attempts to broadcast across because it doesn't know better, so cat must be converted ds["cat"] = (("x", "y"), np.stack((ds["cat"].to_numpy(), ds["cat"].to_numpy()))) assert_identical(ds.assign_coords(x=[0, 1]), Dataset.from_dataframe(actual)) # Check multiindex reordering new_order = ["x", "y"] # revert broadcasting fix above for 1d arrays ds["cat"] = ("y", cat) actual = ds.to_dataframe(dim_order=new_order) assert expected.equals(actual) new_order = ["y", "x"] exp_index = pd.MultiIndex.from_arrays( [["a", "a", "b", "b", "c", "c"], [0, 1, 0, 1, 0, 1]], names=["y", "x"] ) expected = pd.DataFrame( { "w": w.transpose().reshape(-1), "cat": pd.Categorical(["a", "a", "a", "a", "c", "c"]), }, index=exp_index, ) actual = ds.to_dataframe(dim_order=new_order) assert expected.equals(actual) invalid_order = ["x"] with pytest.raises( ValueError, match="does not match the set of dimensions of this" ): ds.to_dataframe(dim_order=invalid_order) invalid_order = ["x", "z"] with pytest.raises( ValueError, match="does not match the set of dimensions of this" ): ds.to_dataframe(dim_order=invalid_order) # check pathological cases df = pd.DataFrame([1]) actual_ds = Dataset.from_dataframe(df) expected_ds = Dataset({0: ("index", [1])}, {"index": [0]}) assert_identical(expected_ds, actual_ds) df = pd.DataFrame() actual_ds = Dataset.from_dataframe(df) expected_ds = Dataset(coords={"index": []}) assert_identical(expected_ds, actual_ds) # GH697 df = pd.DataFrame({"A": []}) actual_ds = Dataset.from_dataframe(df) expected_ds = Dataset({"A": DataArray([], dims=("index",))}, {"index": []}) assert_identical(expected_ds, actual_ds) # regression test for GH278 # use int64 to ensure consistent results for the pandas .equals method # on windows (which requires the same dtype) ds = Dataset({"x": pd.Index(["bar"]), "a": ("y", np.array([1], "int64"))}).isel( x=0 ) # use .loc to ensure consistent results on Python 3 actual = ds.to_dataframe().loc[:, ["a", "x"]] expected = pd.DataFrame( [[1, "bar"]], index=pd.Index([0], name="y"), columns=["a", "x"] ) assert expected.equals(actual), (expected, actual) ds = Dataset({"x": np.array([0], "int64"), "y": np.array([1], "int64")}) actual = ds.to_dataframe() idx = pd.MultiIndex.from_arrays([[0], [1]], names=["x", "y"]) expected = pd.DataFrame([[]], index=idx) assert expected.equals(actual), (expected, actual) def test_from_dataframe_categorical_dtype_index(self) -> None: cat = pd.CategoricalIndex(list("abcd")) df = pd.DataFrame({"f": [0, 1, 2, 3]}, index=cat) ds = df.to_xarray() restored = ds.to_dataframe() df.index.name = ( "index" # restored gets the name because it has the coord with the name ) pd.testing.assert_frame_equal(df, restored) def test_from_dataframe_categorical_index(self) -> None: cat = pd.CategoricalDtype( categories=["foo", "bar", "baz", "qux", "quux", "corge"] ) i1 = pd.Series(["foo", "bar", "foo"], dtype=cat) i2 = pd.Series(["bar", "bar", "baz"], dtype=cat) df = pd.DataFrame({"i1": i1, "i2": i2, "values": [1, 2, 3]}) ds = df.set_index("i1").to_xarray() assert len(ds["i1"]) == 3 ds = df.set_index(["i1", "i2"]).to_xarray() assert len(ds["i1"]) == 2 assert len(ds["i2"]) == 2 def test_from_dataframe_categorical_index_string_categories(self) -> None: cat = pd.CategoricalIndex( pd.Categorical.from_codes( np.array([1, 1, 0, 2], dtype=np.int64), # type: ignore[arg-type] categories=pd.Index(["foo", "bar", "baz"], dtype="string"), ) ) ser = pd.Series(1, index=cat) ds = ser.to_xarray() assert ds.coords.dtypes["index"] == ser.index.dtype @requires_sparse def test_from_dataframe_sparse(self) -> None: import sparse df_base = pd.DataFrame( {"x": range(10), "y": list("abcdefghij"), "z": np.arange(0, 100, 10)} ) ds_sparse = Dataset.from_dataframe(df_base.set_index("x"), sparse=True) ds_dense = Dataset.from_dataframe(df_base.set_index("x"), sparse=False) assert isinstance(ds_sparse["y"].data, sparse.COO) assert isinstance(ds_sparse["z"].data, sparse.COO) ds_sparse["y"].data = ds_sparse["y"].data.todense() ds_sparse["z"].data = ds_sparse["z"].data.todense() assert_identical(ds_dense, ds_sparse) ds_sparse = Dataset.from_dataframe(df_base.set_index(["x", "y"]), sparse=True) ds_dense = Dataset.from_dataframe(df_base.set_index(["x", "y"]), sparse=False) assert isinstance(ds_sparse["z"].data, sparse.COO) ds_sparse["z"].data = ds_sparse["z"].data.todense() assert_identical(ds_dense, ds_sparse) def test_to_and_from_empty_dataframe(self) -> None: # GH697 expected = pd.DataFrame({"foo": []}) ds = Dataset.from_dataframe(expected) assert len(ds["foo"]) == 0 actual = ds.to_dataframe() assert len(actual) == 0 assert expected.equals(actual) def test_from_dataframe_multiindex(self) -> None: index = pd.MultiIndex.from_product([["a", "b"], [1, 2, 3]], names=["x", "y"]) df = pd.DataFrame({"z": np.arange(6)}, index=index) expected = Dataset( {"z": (("x", "y"), [[0, 1, 2], [3, 4, 5]])}, coords={"x": ["a", "b"], "y": [1, 2, 3]}, ) actual = Dataset.from_dataframe(df) assert_identical(actual, expected) df2 = df.iloc[[3, 2, 1, 0, 4, 5], :] actual = Dataset.from_dataframe(df2) assert_identical(actual, expected) df3 = df.iloc[:4, :] expected3 = Dataset( {"z": (("x", "y"), [[0, 1, 2], [3, np.nan, np.nan]])}, coords={"x": ["a", "b"], "y": [1, 2, 3]}, ) actual = Dataset.from_dataframe(df3) assert_identical(actual, expected3) df_nonunique = df.iloc[[0, 0], :] with pytest.raises(ValueError, match=r"non-unique MultiIndex"): Dataset.from_dataframe(df_nonunique) def test_from_dataframe_unsorted_levels(self) -> None: # regression test for GH-4186 index = pd.MultiIndex( levels=[["b", "a"], ["foo"]], codes=[[0, 1], [0, 0]], names=["lev1", "lev2"] ) df = pd.DataFrame({"c1": [0, 2], "c2": [1, 3]}, index=index) expected = Dataset( { "c1": (("lev1", "lev2"), [[0], [2]]), "c2": (("lev1", "lev2"), [[1], [3]]), }, coords={"lev1": ["b", "a"], "lev2": ["foo"]}, ) actual = Dataset.from_dataframe(df) assert_identical(actual, expected) def test_from_dataframe_non_unique_columns(self) -> None: # regression test for GH449 df = pd.DataFrame(np.zeros((2, 2))) df.columns = ["foo", "foo"] # type: ignore[assignment,unused-ignore] with pytest.raises(ValueError, match=r"non-unique columns"): Dataset.from_dataframe(df) def test_convert_dataframe_with_many_types_and_multiindex(self) -> None: # regression test for GH737 df = pd.DataFrame( { "a": list("abc"), "b": list(range(1, 4)), "c": np.arange(3, 6).astype("u1"), "d": np.arange(4.0, 7.0, dtype="float64"), "e": [True, False, True], "f": pd.Categorical(list("abc")), "g": pd.date_range("20130101", periods=3), "h": pd.date_range("20130101", periods=3, tz="America/New_York"), } ) df.index = pd.MultiIndex.from_product([["a"], range(3)], names=["one", "two"]) roundtripped = Dataset.from_dataframe(df).to_dataframe() # we can't do perfectly, but we should be at least as faithful as # np.asarray expected = df.apply(np.asarray) assert roundtripped.equals(expected) @pytest.mark.parametrize("encoding", [True, False]) @pytest.mark.parametrize("data", [True, "list", "array"]) def test_to_and_from_dict( self, encoding: bool, data: bool | Literal["list", "array"] ) -> None: # # Dimensions: (t: 10) # Coordinates: # * t (t) U1" expected_no_data["coords"]["t"].update({"dtype": endiantype, "shape": (10,)}) expected_no_data["data_vars"]["a"].update({"dtype": "float64", "shape": (10,)}) expected_no_data["data_vars"]["b"].update({"dtype": "float64", "shape": (10,)}) actual_no_data = ds.to_dict(data=False, encoding=encoding) assert expected_no_data == actual_no_data # verify coords are included roundtrip expected_ds = ds.set_coords("b") actual2 = Dataset.from_dict(expected_ds.to_dict(data=data, encoding=encoding)) assert_identical(expected_ds, actual2) if encoding: assert set(expected_ds.variables) == set(actual2.variables) for vv in ds.variables: np.testing.assert_equal(expected_ds[vv].encoding, actual2[vv].encoding) # test some incomplete dicts: # this one has no attrs field, the dims are strings, and x, y are # np.arrays d = { "coords": {"t": {"dims": "t", "data": t}}, "dims": "t", "data_vars": {"a": {"dims": "t", "data": x}, "b": {"dims": "t", "data": y}}, } assert_identical(ds, Dataset.from_dict(d)) # this is kind of a flattened version with no coords, or data_vars d = { "a": {"dims": "t", "data": x}, "t": {"data": t, "dims": "t"}, "b": {"dims": "t", "data": y}, } assert_identical(ds, Dataset.from_dict(d)) # this one is missing some necessary information d = { "a": {"data": x}, "t": {"data": t, "dims": "t"}, "b": {"dims": "t", "data": y}, } with pytest.raises( ValueError, match=r"cannot convert dict without the key 'dims'" ): Dataset.from_dict(d) def test_to_and_from_dict_with_time_dim(self) -> None: x = np.random.randn(10, 3) y = np.random.randn(10, 3) t = pd.date_range("20130101", periods=10) lat = [77.7, 83.2, 76] ds = Dataset( { "a": (["t", "lat"], x), "b": (["t", "lat"], y), "t": ("t", t), "lat": ("lat", lat), } ) roundtripped = Dataset.from_dict(ds.to_dict()) assert_identical(ds, roundtripped) @pytest.mark.parametrize("data", [True, "list", "array"]) def test_to_and_from_dict_with_nan_nat( self, data: bool | Literal["list", "array"] ) -> None: x = np.random.randn(10, 3) y = np.random.randn(10, 3) y[2] = np.nan t = pd.Series(pd.date_range("20130101", periods=10)) t[2] = np.nan lat = [77.7, 83.2, 76] ds = Dataset( { "a": (["t", "lat"], x), "b": (["t", "lat"], y), "t": ("t", t), "lat": ("lat", lat), } ) roundtripped = Dataset.from_dict(ds.to_dict(data=data)) assert_identical(ds, roundtripped) def test_to_dict_with_numpy_attrs(self) -> None: # this doesn't need to roundtrip x = np.random.randn(10) y = np.random.randn(10) t = list("abcdefghij") attrs = { "created": np.float64(1998), "coords": np.array([37, -110.1, 100]), "maintainer": "bar", } ds = Dataset({"a": ("t", x, attrs), "b": ("t", y, attrs), "t": ("t", t)}) expected_attrs = { "created": attrs["created"].item(), # type: ignore[attr-defined] "coords": attrs["coords"].tolist(), # type: ignore[attr-defined] "maintainer": "bar", } actual = ds.to_dict() # check that they are identical assert expected_attrs == actual["data_vars"]["a"]["attrs"] def test_pickle(self) -> None: data = create_test_data() roundtripped = pickle.loads(pickle.dumps(data)) assert_identical(data, roundtripped) # regression test for #167: assert data.sizes == roundtripped.sizes def test_lazy_load(self) -> None: store = InaccessibleVariableDataStore() create_test_data().dump_to_store(store) for decode_cf in [True, False]: ds = open_dataset(store, decode_cf=decode_cf) with pytest.raises(UnexpectedDataAccess): ds.load() with pytest.raises(UnexpectedDataAccess): _ = ds["var1"].values # these should not raise UnexpectedDataAccess: ds.isel(time=10) ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1) def test_lazy_load_duck_array(self) -> None: store = AccessibleAsDuckArrayDataStore() create_test_data().dump_to_store(store) for decode_cf in [True, False]: ds = open_dataset(store, decode_cf=decode_cf) with pytest.raises(UnexpectedDataAccess): _ = ds["var1"].values # these should not raise UnexpectedDataAccess: _ = ds.var1.data ds.isel(time=10) ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1) repr(ds) # preserve the duck array type and don't cast to array assert isinstance(ds["var1"].load().data, DuckArrayWrapper) assert isinstance( ds["var1"].isel(dim2=0, dim1=0).load().data, DuckArrayWrapper ) ds.close() def test_dropna(self) -> None: x = np.random.randn(4, 4) x[::2, 0] = np.nan y = np.random.randn(4) y[-1] = np.nan ds = Dataset({"foo": (("a", "b"), x), "bar": (("b", y))}) expected = ds.isel(a=slice(1, None, 2)) actual = ds.dropna("a") assert_identical(actual, expected) expected = ds.isel(b=slice(1, 3)) actual = ds.dropna("b") assert_identical(actual, expected) actual = ds.dropna("b", subset=["foo", "bar"]) assert_identical(actual, expected) expected = ds.isel(b=slice(1, None)) actual = ds.dropna("b", subset=["foo"]) assert_identical(actual, expected) expected = ds.isel(b=slice(3)) actual = ds.dropna("b", subset=["bar"]) assert_identical(actual, expected) actual = ds.dropna("a", subset=[]) assert_identical(actual, ds) actual = ds.dropna("a", subset=["bar"]) assert_identical(actual, ds) actual = ds.dropna("a", how="all") assert_identical(actual, ds) actual = ds.dropna("b", how="all", subset=["bar"]) expected = ds.isel(b=[0, 1, 2]) assert_identical(actual, expected) actual = ds.dropna("b", thresh=1, subset=["bar"]) assert_identical(actual, expected) actual = ds.dropna("b", thresh=2) assert_identical(actual, ds) actual = ds.dropna("b", thresh=4) expected = ds.isel(b=[1, 2, 3]) assert_identical(actual, expected) actual = ds.dropna("a", thresh=3) expected = ds.isel(a=[1, 3]) assert_identical(actual, ds) with pytest.raises( ValueError, match=r"'foo' not found in data dimensions \('a', 'b'\)", ): ds.dropna("foo") with pytest.raises(ValueError, match=r"invalid how"): ds.dropna("a", how="somehow") # type: ignore[arg-type] with pytest.raises(TypeError, match=r"must specify how or thresh"): ds.dropna("a", how=None) # type: ignore[arg-type] def test_fillna(self) -> None: ds = Dataset({"a": ("x", [np.nan, 1, np.nan, 3])}, {"x": [0, 1, 2, 3]}) # fill with -1 actual1 = ds.fillna(-1) expected = Dataset({"a": ("x", [-1, 1, -1, 3])}, {"x": [0, 1, 2, 3]}) assert_identical(expected, actual1) actual2 = ds.fillna({"a": -1}) assert_identical(expected, actual2) other = Dataset({"a": -1}) actual3 = ds.fillna(other) assert_identical(expected, actual3) actual4 = ds.fillna({"a": other.a}) assert_identical(expected, actual4) # fill with range(4) b = DataArray(range(4), coords=[("x", range(4))]) actual5 = ds.fillna(b) expected = b.rename("a").to_dataset() assert_identical(expected, actual5) actual6 = ds.fillna(expected) assert_identical(expected, actual6) actual7 = ds.fillna(np.arange(4)) assert_identical(expected, actual7) actual8 = ds.fillna(b[:3]) assert_identical(expected, actual8) # okay to only include some data variables ds["b"] = np.nan actual9 = ds.fillna({"a": -1}) expected = Dataset( {"a": ("x", [-1, 1, -1, 3]), "b": np.nan}, {"x": [0, 1, 2, 3]} ) assert_identical(expected, actual9) # but new data variables is not okay with pytest.raises(ValueError, match=r"must be contained"): ds.fillna({"x": 0}) # empty argument should be OK result1 = ds.fillna({}) assert_identical(ds, result1) result2 = ds.fillna(Dataset(coords={"c": 42})) expected = ds.assign_coords(c=42) assert_identical(expected, result2) da = DataArray(range(5), name="a", attrs={"attr": "da"}) actual10 = da.fillna(1) assert actual10.name == "a" assert actual10.attrs == da.attrs ds = Dataset({"a": da}, attrs={"attr": "ds"}) actual11 = ds.fillna({"a": 1}) assert actual11.attrs == ds.attrs assert actual11.a.name == "a" assert actual11.a.attrs == ds.a.attrs @pytest.mark.parametrize( "func", [lambda x: x.clip(0, 1), lambda x: np.float64(1.0) * x, np.abs, abs] ) def test_propagate_attrs(self, func) -> None: da = DataArray(range(5), name="a", attrs={"attr": "da"}) ds = Dataset({"a": da}, attrs={"attr": "ds"}) # test defaults assert func(ds).attrs == ds.attrs with set_options(keep_attrs=False): assert func(ds).attrs != ds.attrs assert func(ds).a.attrs != ds.a.attrs with set_options(keep_attrs=False): assert func(ds).attrs != ds.attrs assert func(ds).a.attrs != ds.a.attrs with set_options(keep_attrs=True): assert func(ds).attrs == ds.attrs assert func(ds).a.attrs == ds.a.attrs def test_where(self) -> None: ds = Dataset({"a": ("x", range(5))}) expected1 = Dataset({"a": ("x", [np.nan, np.nan, 2, 3, 4])}) actual1 = ds.where(ds > 1) assert_identical(expected1, actual1) actual2 = ds.where(ds.a > 1) assert_identical(expected1, actual2) actual3 = ds.where(ds.a.values > 1) assert_identical(expected1, actual3) actual4 = ds.where(True) assert_identical(ds, actual4) expected5 = ds.copy(deep=True) expected5["a"].values = np.array([np.nan] * 5) actual5 = ds.where(False) assert_identical(expected5, actual5) # 2d ds = Dataset({"a": (("x", "y"), [[0, 1], [2, 3]])}) expected6 = Dataset({"a": (("x", "y"), [[np.nan, 1], [2, 3]])}) actual6 = ds.where(ds > 0) assert_identical(expected6, actual6) # attrs da = DataArray(range(5), name="a", attrs={"attr": "da"}) actual7 = da.where(da.values > 1) assert actual7.name == "a" assert actual7.attrs == da.attrs ds = Dataset({"a": da}, attrs={"attr": "ds"}) actual8 = ds.where(ds > 0) assert actual8.attrs == ds.attrs assert actual8.a.name == "a" assert actual8.a.attrs == ds.a.attrs # lambda ds = Dataset({"a": ("x", range(5))}) expected9 = Dataset({"a": ("x", [np.nan, np.nan, 2, 3, 4])}) actual9 = ds.where(lambda x: x > 1) assert_identical(expected9, actual9) def test_where_other(self) -> None: ds = Dataset({"a": ("x", range(5))}, {"x": range(5)}) expected = Dataset({"a": ("x", [-1, -1, 2, 3, 4])}, {"x": range(5)}) actual = ds.where(ds > 1, -1) assert_equal(expected, actual) assert actual.a.dtype == int actual = ds.where(lambda x: x > 1, -1) assert_equal(expected, actual) actual = ds.where(ds > 1, other=-1, drop=True) expected_nodrop = ds.where(ds > 1, -1) _, expected = xr.align(actual, expected_nodrop, join="left") assert_equal(actual, expected) assert actual.a.dtype == int with pytest.raises(ValueError, match=r"cannot align .* are not equal"): ds.where(ds > 1, ds.isel(x=slice(3))) with pytest.raises(ValueError, match=r"exact match required"): ds.where(ds > 1, ds.assign(b=2)) def test_where_drop(self) -> None: # if drop=True # 1d # data array case array = DataArray(range(5), coords=[range(5)], dims=["x"]) expected1 = DataArray(range(5)[2:], coords=[range(5)[2:]], dims=["x"]) actual1 = array.where(array > 1, drop=True) assert_identical(expected1, actual1) # dataset case ds = Dataset({"a": array}) expected2 = Dataset({"a": expected1}) actual2 = ds.where(ds > 1, drop=True) assert_identical(expected2, actual2) actual3 = ds.where(ds.a > 1, drop=True) assert_identical(expected2, actual3) with pytest.raises(TypeError, match=r"must be a"): ds.where(np.arange(5) > 1, drop=True) # 1d with odd coordinates array = DataArray( np.array([2, 7, 1, 8, 3]), coords=[np.array([3, 1, 4, 5, 9])], dims=["x"] ) expected4 = DataArray( np.array([7, 8, 3]), coords=[np.array([1, 5, 9])], dims=["x"] ) actual4 = array.where(array > 2, drop=True) assert_identical(expected4, actual4) # 1d multiple variables ds = Dataset({"a": (("x"), [0, 1, 2, 3]), "b": (("x"), [4, 5, 6, 7])}) expected5 = Dataset( {"a": (("x"), [np.nan, 1, 2, 3]), "b": (("x"), [4, 5, 6, np.nan])} ) actual5 = ds.where((ds > 0) & (ds < 7), drop=True) assert_identical(expected5, actual5) # 2d ds = Dataset({"a": (("x", "y"), [[0, 1], [2, 3]])}) expected6 = Dataset({"a": (("x", "y"), [[np.nan, 1], [2, 3]])}) actual6 = ds.where(ds > 0, drop=True) assert_identical(expected6, actual6) # 2d with odd coordinates ds = Dataset( {"a": (("x", "y"), [[0, 1], [2, 3]])}, coords={ "x": [4, 3], "y": [1, 2], "z": (["x", "y"], [[np.e, np.pi], [np.pi * np.e, np.pi * 3]]), }, ) expected7 = Dataset( {"a": (("x", "y"), [[3]])}, coords={"x": [3], "y": [2], "z": (["x", "y"], [[np.pi * 3]])}, ) actual7 = ds.where(ds > 2, drop=True) assert_identical(expected7, actual7) # 2d multiple variables ds = Dataset( {"a": (("x", "y"), [[0, 1], [2, 3]]), "b": (("x", "y"), [[4, 5], [6, 7]])} ) expected8 = Dataset( { "a": (("x", "y"), [[np.nan, 1], [2, 3]]), "b": (("x", "y"), [[4, 5], [6, 7]]), } ) actual8 = ds.where(ds > 0, drop=True) assert_identical(expected8, actual8) # mixed dimensions: PR#6690, Issue#6227 ds = xr.Dataset( { "a": ("x", [1, 2, 3]), "b": ("y", [2, 3, 4]), "c": (("x", "y"), np.arange(9).reshape((3, 3))), } ) expected9 = xr.Dataset( { "a": ("x", [np.nan, 3]), "b": ("y", [np.nan, 3, 4]), "c": (("x", "y"), np.arange(3.0, 9.0).reshape((2, 3))), } ) actual9 = ds.where(ds > 2, drop=True) assert actual9.sizes["x"] == 2 assert_identical(expected9, actual9) def test_where_drop_empty(self) -> None: # regression test for GH1341 array = DataArray(np.random.rand(100, 10), dims=["nCells", "nVertLevels"]) mask = DataArray(np.zeros((100,), dtype="bool"), dims="nCells") actual = array.where(mask, drop=True) expected = DataArray(np.zeros((0, 10)), dims=["nCells", "nVertLevels"]) assert_identical(expected, actual) def test_where_drop_no_indexes(self) -> None: ds = Dataset({"foo": ("x", [0.0, 1.0])}) expected = Dataset({"foo": ("x", [1.0])}) actual = ds.where(ds == 1, drop=True) assert_identical(expected, actual) def test_reduce(self) -> None: data = create_test_data() assert len(data.mean().coords) == 0 actual = data.max() expected = Dataset({k: v.max() for k, v in data.data_vars.items()}) assert_equal(expected, actual) assert_equal(data.min(dim=["dim1"]), data.min(dim="dim1")) for reduct, expected_dims in [ ("dim2", ["dim3", "time", "dim1"]), (["dim2", "time"], ["dim3", "dim1"]), (("dim2", "time"), ["dim3", "dim1"]), ((), ["dim2", "dim3", "time", "dim1"]), ]: actual_dims = list(data.min(dim=reduct).dims) assert actual_dims == expected_dims assert_equal(data.mean(dim=[]), data) with pytest.raises(ValueError): data.mean(axis=0) def test_reduce_coords(self) -> None: # regression test for GH1470 data = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"b": 4}) expected = xr.Dataset({"a": 2}, coords={"b": 4}) actual = data.mean("x") assert_identical(actual, expected) # should be consistent actual = data["a"].mean("x").to_dataset() assert_identical(actual, expected) def test_mean_uint_dtype(self) -> None: data = xr.Dataset( { "a": (("x", "y"), np.arange(6).reshape(3, 2).astype("uint")), "b": (("x",), np.array([0.1, 0.2, np.nan])), } ) actual = data.mean("x", skipna=True) expected = xr.Dataset( {"a": data["a"].mean("x"), "b": data["b"].mean("x", skipna=True)} ) assert_identical(actual, expected) def test_reduce_bad_dim(self) -> None: data = create_test_data() with pytest.raises( ValueError, match=re.escape("Dimension(s) 'bad_dim' do not exist"), ): data.mean(dim="bad_dim") def test_reduce_cumsum(self) -> None: data = xr.Dataset( {"a": 1, "b": ("x", [1, 2]), "c": (("x", "y"), [[np.nan, 3], [0, 4]])} ) assert_identical(data.fillna(0), data.cumsum("y")) expected = xr.Dataset( {"a": 1, "b": ("x", [1, 3]), "c": (("x", "y"), [[0, 3], [0, 7]])} ) assert_identical(expected, data.cumsum()) @pytest.mark.parametrize( "reduct, expected", [ ("dim1", ["dim2", "dim3", "time", "dim1"]), ("dim2", ["dim3", "time", "dim1", "dim2"]), ("dim3", ["dim2", "time", "dim1", "dim3"]), ("time", ["dim2", "dim3", "dim1"]), ], ) @pytest.mark.parametrize("func", ["cumsum", "cumprod"]) def test_reduce_cumsum_test_dims(self, reduct, expected, func) -> None: data = create_test_data() with pytest.raises( ValueError, match=re.escape("Dimension(s) 'bad_dim' do not exist"), ): getattr(data, func)(dim="bad_dim") # ensure dimensions are correct actual = getattr(data, func)(dim=reduct).dims assert list(actual) == expected def test_reduce_non_numeric(self) -> None: data1 = create_test_data(seed=44, use_extension_array=True) data2 = create_test_data(seed=44) add_vars = {"var6": ["dim1", "dim2"], "var7": ["dim1"]} for v, dims in sorted(add_vars.items()): size = tuple(data1.sizes[d] for d in dims) data = np.random.randint(0, 100, size=size).astype(np.str_) data1[v] = (dims, data, {"foo": "variable"}) # var4 and var5 are extension arrays and should be dropped assert ( "var4" not in data1.mean() and "var5" not in data1.mean() and "var6" not in data1.mean() and "var7" not in data1.mean() ) assert_equal(data1.mean(), data2.mean()) assert_equal(data1.mean(dim="dim1"), data2.mean(dim="dim1")) assert "var6" not in data1.mean(dim="dim2") and "var7" in data1.mean(dim="dim2") @pytest.mark.filterwarnings( "ignore:Once the behaviour of DataArray:DeprecationWarning" ) def test_reduce_strings(self) -> None: expected = Dataset({"x": "a"}) ds = Dataset({"x": ("y", ["a", "b"])}) ds.coords["y"] = [-10, 10] actual = ds.min() assert_identical(expected, actual) expected = Dataset({"x": "b"}) actual = ds.max() assert_identical(expected, actual) expected = Dataset({"x": 0}) actual = ds.argmin() assert_identical(expected, actual) expected = Dataset({"x": 1}) actual = ds.argmax() assert_identical(expected, actual) expected = Dataset({"x": -10}) actual = ds.idxmin() assert_identical(expected, actual) expected = Dataset({"x": 10}) actual = ds.idxmax() assert_identical(expected, actual) expected = Dataset({"x": b"a"}) ds = Dataset({"x": ("y", np.array(["a", "b"], "S1"))}) actual = ds.min() assert_identical(expected, actual) expected = Dataset({"x": "a"}) ds = Dataset({"x": ("y", np.array(["a", "b"], "U1"))}) actual = ds.min() assert_identical(expected, actual) def test_reduce_dtypes(self) -> None: # regression test for GH342 expected = Dataset({"x": 1}) actual = Dataset({"x": True}).sum() assert_identical(expected, actual) # regression test for GH505 expected = Dataset({"x": 3}) actual = Dataset({"x": ("y", np.array([1, 2], "uint16"))}).sum() assert_identical(expected, actual) expected = Dataset({"x": 1 + 1j}) actual = Dataset({"x": ("y", [1, 1j])}).sum() assert_identical(expected, actual) def test_reduce_keep_attrs(self) -> None: data = create_test_data() _attrs = {"attr1": "value1", "attr2": 2929} attrs = dict(_attrs) data.attrs = attrs # Test dropped attrs ds = data.mean() assert ds.attrs == {} for v in ds.data_vars.values(): assert v.attrs == {} # Test kept attrs ds = data.mean(keep_attrs=True) assert ds.attrs == attrs for k, v in ds.data_vars.items(): assert v.attrs == data[k].attrs @pytest.mark.filterwarnings( "ignore:Once the behaviour of DataArray:DeprecationWarning" ) def test_reduce_argmin(self) -> None: # regression test for #205 ds = Dataset({"a": ("x", [0, 1])}) expected = Dataset({"a": ([], 0)}) actual = ds.argmin() assert_identical(expected, actual) actual = ds.argmin("x") assert_identical(expected, actual) def test_reduce_scalars(self) -> None: ds = Dataset({"x": ("a", [2, 2]), "y": 2, "z": ("b", [2])}) expected = Dataset({"x": 0, "y": 0, "z": 0}) actual = ds.var() assert_identical(expected, actual) expected = Dataset({"x": 0, "y": 0, "z": ("b", [0])}) actual = ds.var("a") assert_identical(expected, actual) def test_reduce_only_one_axis(self) -> None: def mean_only_one_axis(x, axis): if not isinstance(axis, integer_types): raise TypeError("non-integer axis") return x.mean(axis) ds = Dataset({"a": (["x", "y"], [[0, 1, 2, 3, 4]])}) expected = Dataset({"a": ("x", [2])}) actual = ds.reduce(mean_only_one_axis, "y") assert_identical(expected, actual) with pytest.raises( TypeError, match=r"missing 1 required positional argument: 'axis'" ): ds.reduce(mean_only_one_axis) def test_reduce_no_axis(self) -> None: def total_sum(x): return np.sum(x.flatten()) ds = Dataset({"a": (["x", "y"], [[0, 1, 2, 3, 4]])}) expected = Dataset({"a": ((), 10)}) actual = ds.reduce(total_sum) assert_identical(expected, actual) with pytest.raises(TypeError, match=r"unexpected keyword argument 'axis'"): ds.reduce(total_sum, dim="x") def test_reduce_keepdims(self) -> None: ds = Dataset( {"a": (["x", "y"], [[0, 1, 2, 3, 4]])}, coords={ "y": [0, 1, 2, 3, 4], "x": [0], "lat": (["x", "y"], [[0, 1, 2, 3, 4]]), "c": -999.0, }, ) # Shape should match behaviour of numpy reductions with keepdims=True # Coordinates involved in the reduction should be removed actual = ds.mean(keepdims=True) expected = Dataset( {"a": (["x", "y"], np.mean(ds.a, keepdims=True).data)}, coords={"c": ds.c} ) assert_identical(expected, actual) actual = ds.mean("x", keepdims=True) expected = Dataset( {"a": (["x", "y"], np.mean(ds.a, axis=0, keepdims=True).data)}, coords={"y": ds.y, "c": ds.c}, ) assert_identical(expected, actual) @pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True) @pytest.mark.parametrize("skipna", [True, False, None]) @pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]]) def test_quantile(self, q, skipna, compute_backend) -> None: ds = create_test_data(seed=123) ds.var1.data[0, 0] = np.nan for dim in [None, "dim1", ["dim1"]]: ds_quantile = ds.quantile(q, dim=dim, skipna=skipna) if is_scalar(q): assert "quantile" not in ds_quantile.dims else: assert "quantile" in ds_quantile.dims for var, dar in ds.data_vars.items(): assert var in ds_quantile assert_identical( ds_quantile[var], dar.quantile(q, dim=dim, skipna=skipna) ) dim = ["dim1", "dim2"] ds_quantile = ds.quantile(q, dim=dim, skipna=skipna) assert "dim3" in ds_quantile.dims assert all(d not in ds_quantile.dims for d in dim) @pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True) @pytest.mark.parametrize("skipna", [True, False]) def test_quantile_skipna(self, skipna, compute_backend) -> None: q = 0.1 dim = "time" ds = Dataset({"a": ([dim], np.arange(0, 11))}) ds = ds.where(ds >= 1) result = ds.quantile(q=q, dim=dim, skipna=skipna) value = 1.9 if skipna else np.nan expected = Dataset({"a": value}, coords={"quantile": q}) assert_identical(result, expected) @pytest.mark.parametrize("method", ["midpoint", "lower"]) def test_quantile_method(self, method) -> None: ds = create_test_data(seed=123) q = [0.25, 0.5, 0.75] result = ds.quantile(q, method=method) assert_identical(result.var1, ds.var1.quantile(q, method=method)) assert_identical(result.var2, ds.var2.quantile(q, method=method)) assert_identical(result.var3, ds.var3.quantile(q, method=method)) @pytest.mark.filterwarnings( "default:The `interpolation` argument to quantile was renamed to `method`:FutureWarning" ) @pytest.mark.parametrize("method", ["midpoint", "lower"]) def test_quantile_interpolation_deprecated(self, method) -> None: ds = create_test_data(seed=123) q = [0.25, 0.5, 0.75] with pytest.warns( FutureWarning, match="`interpolation` argument to quantile was renamed to `method`", ): ds.quantile(q, interpolation=method) with warnings.catch_warnings(record=True): with pytest.raises(TypeError, match="interpolation and method keywords"): ds.quantile(q, method=method, interpolation=method) @requires_bottleneck def test_rank(self) -> None: ds = create_test_data(seed=1234) # only ds.var3 depends on dim3 z = ds.rank("dim3") assert ["var3"] == list(z.data_vars) # same as dataarray version x = z.var3 y = ds.var3.rank("dim3") assert_equal(x, y) # coordinates stick assert list(z.coords) == list(ds.coords) assert list(x.coords) == list(y.coords) # invalid dim with pytest.raises( ValueError, match=re.escape( "Dimension 'invalid_dim' not found in data dimensions ('dim3', 'dim1')" ), ): x.rank("invalid_dim") def test_rank_use_bottleneck(self) -> None: ds = Dataset({"a": ("x", [0, np.nan, 2]), "b": ("y", [4, 6, 3, 4])}) with xr.set_options(use_bottleneck=False): with pytest.raises(RuntimeError): ds.rank("x") def test_count(self) -> None: ds = Dataset({"x": ("a", [np.nan, 1]), "y": 0, "z": np.nan}) expected = Dataset({"x": 1, "y": 1, "z": 0}) actual = ds.count() assert_identical(expected, actual) def test_map(self) -> None: data = create_test_data() data.attrs["foo"] = "bar" assert_identical(data.map(np.mean), data.mean()) expected = data.mean(keep_attrs=True) actual = data.map(lambda x: x.mean(keep_attrs=True), keep_attrs=True) assert_identical(expected, actual) assert_identical(data.map(lambda x: x, keep_attrs=True), data.drop_vars("time")) def scale(x, multiple=1): return multiple * x actual = data.map(scale, multiple=2) assert_equal(actual["var1"], 2 * data["var1"]) assert_identical(actual["numbers"], data["numbers"]) actual = data.map(np.asarray) expected = data.drop_vars("time") # time is not used on a data var assert_equal(expected, actual) def test_apply_pending_deprecated_map(self) -> None: data = create_test_data() data.attrs["foo"] = "bar" with pytest.warns(PendingDeprecationWarning): assert_identical(data.apply(np.mean), data.mean()) def make_example_math_dataset(self): variables = { "bar": ("x", np.arange(100, 400, 100)), "foo": (("x", "y"), 1.0 * np.arange(12).reshape(3, 4)), } coords = {"abc": ("x", ["a", "b", "c"]), "y": 10 * np.arange(4)} ds = Dataset(variables, coords) ds["foo"][0, 0] = np.nan return ds def test_dataset_number_math(self) -> None: ds = self.make_example_math_dataset() assert_identical(ds, +ds) assert_identical(ds, ds + 0) assert_identical(ds, 0 + ds) assert_identical(ds, ds + np.array(0)) assert_identical(ds, np.array(0) + ds) actual = ds.copy(deep=True) actual += 0 assert_identical(ds, actual) # casting nan warns @pytest.mark.filterwarnings("ignore:invalid value encountered in cast") def test_unary_ops(self) -> None: ds = self.make_example_math_dataset() assert_identical(ds.map(abs), abs(ds)) assert_identical(ds.map(lambda x: x + 4), ds + 4) for func in [ lambda x: x.isnull(), lambda x: x.round(), lambda x: x.astype(int), ]: assert_identical(ds.map(func), func(ds)) assert_identical(ds.isnull(), ~ds.notnull()) # don't actually patch these methods in with pytest.raises(AttributeError): _ = ds.item with pytest.raises(AttributeError): _ = ds.searchsorted def test_dataset_array_math(self) -> None: ds = self.make_example_math_dataset() expected = ds.map(lambda x: x - ds["foo"]) assert_identical(expected, ds - ds["foo"]) assert_identical(expected, -ds["foo"] + ds) assert_identical(expected, ds - ds["foo"].variable) assert_identical(expected, -ds["foo"].variable + ds) actual = ds.copy(deep=True) actual -= ds["foo"] assert_identical(expected, actual) expected = ds.map(lambda x: x + ds["bar"]) assert_identical(expected, ds + ds["bar"]) actual = ds.copy(deep=True) actual += ds["bar"] assert_identical(expected, actual) expected = Dataset({"bar": ds["bar"] + np.arange(3)}) assert_identical(expected, ds[["bar"]] + np.arange(3)) assert_identical(expected, np.arange(3) + ds[["bar"]]) def test_dataset_dataset_math(self) -> None: ds = self.make_example_math_dataset() assert_identical(ds, ds + 0 * ds) assert_identical(ds, ds + {"foo": 0, "bar": 0}) expected = ds.map(lambda x: 2 * x) assert_identical(expected, 2 * ds) assert_identical(expected, ds + ds) assert_identical(expected, ds + ds.data_vars) assert_identical(expected, ds + dict(ds.data_vars)) actual = ds.copy(deep=True) expected_id = id(actual) actual += ds assert_identical(expected, actual) assert expected_id == id(actual) assert_identical(ds == ds, ds.notnull()) subsampled = ds.isel(y=slice(2)) expected = 2 * subsampled assert_identical(expected, subsampled + ds) assert_identical(expected, ds + subsampled) def test_dataset_math_auto_align(self) -> None: ds = self.make_example_math_dataset() subset = ds.isel(y=[1, 3]) expected = 2 * subset actual = ds + subset assert_identical(expected, actual) actual = ds.isel(y=slice(1)) + ds.isel(y=slice(1, None)) expected = 2 * ds.drop_sel(y=ds.y) assert_equal(actual, expected) actual = ds + ds[["bar"]] expected = (2 * ds[["bar"]]).merge(ds.coords, compat="override") assert_identical(expected, actual) assert_identical(ds + Dataset(), ds.coords.to_dataset()) assert_identical(Dataset() + Dataset(), Dataset()) ds2 = Dataset(coords={"bar": 42}) assert_identical(ds + ds2, ds.coords.merge(ds2)) # maybe unary arithmetic with empty datasets should raise instead? assert_identical(Dataset() + 1, Dataset()) actual = ds.copy(deep=True) other = ds.isel(y=slice(2)) actual += other expected = ds + other.reindex_like(ds) assert_identical(expected, actual) def test_dataset_math_errors(self) -> None: ds = self.make_example_math_dataset() with pytest.raises(TypeError): ds["foo"] += ds with pytest.raises(TypeError): ds["foo"].variable += ds with pytest.raises(ValueError, match=r"must have the same"): ds += ds[["bar"]] # verify we can rollback in-place operations if something goes wrong # nb. inplace datetime64 math actually will work with an integer array # but not floats thanks to numpy's inconsistent handling other = DataArray(np.datetime64("2000-01-01"), coords={"c": 2}) actual = ds.copy(deep=True) with pytest.raises(TypeError): actual += other assert_identical(actual, ds) def test_dataset_transpose(self) -> None: ds = Dataset( { "a": (("x", "y"), np.random.randn(3, 4)), "b": (("y", "x"), np.random.randn(4, 3)), }, coords={ "x": range(3), "y": range(4), "xy": (("x", "y"), np.random.randn(3, 4)), }, ) actual = ds.transpose() expected = Dataset( {"a": (("y", "x"), ds.a.values.T), "b": (("x", "y"), ds.b.values.T)}, coords={ "x": ds.x.values, "y": ds.y.values, "xy": (("y", "x"), ds.xy.values.T), }, ) assert_identical(expected, actual) actual = ds.transpose(...) expected = ds assert_identical(expected, actual) actual = ds.transpose("x", "y") expected = ds.map(lambda x: x.transpose("x", "y", transpose_coords=True)) assert_identical(expected, actual) ds = create_test_data() actual = ds.transpose() for k in ds.variables: assert actual[k].dims[::-1] == ds[k].dims new_order = ("dim2", "dim3", "dim1", "time") actual = ds.transpose(*new_order) for k in ds.variables: expected_dims = tuple(d for d in new_order if d in ds[k].dims) assert actual[k].dims == expected_dims # same as above but with ellipsis new_order = ("dim2", "dim3", "dim1", "time") actual = ds.transpose("dim2", "dim3", ...) for k in ds.variables: expected_dims = tuple(d for d in new_order if d in ds[k].dims) assert actual[k].dims == expected_dims # test missing dimension, raise error with pytest.raises(ValueError): ds.transpose(..., "not_a_dim") # test missing dimension, ignore error actual = ds.transpose(..., "not_a_dim", missing_dims="ignore") expected_ell = ds.transpose(...) assert_identical(expected_ell, actual) # test missing dimension, raise warning with pytest.warns(UserWarning): actual = ds.transpose(..., "not_a_dim", missing_dims="warn") assert_identical(expected_ell, actual) assert "T" not in dir(ds) def test_dataset_ellipsis_transpose_different_ordered_vars(self) -> None: # https://github.com/pydata/xarray/issues/1081#issuecomment-544350457 ds = Dataset( dict( a=(("w", "x", "y", "z"), np.ones((2, 3, 4, 5))), b=(("x", "w", "y", "z"), np.zeros((3, 2, 4, 5))), ) ) result = ds.transpose(..., "z", "y") assert list(result["a"].dims) == list("wxzy") assert list(result["b"].dims) == list("xwzy") def test_dataset_retains_period_index_on_transpose(self) -> None: ds = create_test_data() ds["time"] = pd.period_range("2000-01-01", periods=20) transposed = ds.transpose() assert isinstance(transposed.time.to_index(), pd.PeriodIndex) def test_dataset_diff_n1_simple(self) -> None: ds = Dataset({"foo": ("x", [5, 5, 6, 6])}) actual = ds.diff("x") expected = Dataset({"foo": ("x", [0, 1, 0])}) assert_equal(expected, actual) def test_dataset_diff_n1_label(self) -> None: ds = Dataset({"foo": ("x", [5, 5, 6, 6])}, {"x": [0, 1, 2, 3]}) actual = ds.diff("x", label="lower") expected = Dataset({"foo": ("x", [0, 1, 0])}, {"x": [0, 1, 2]}) assert_equal(expected, actual) actual = ds.diff("x", label="upper") expected = Dataset({"foo": ("x", [0, 1, 0])}, {"x": [1, 2, 3]}) assert_equal(expected, actual) def test_dataset_diff_n1(self) -> None: ds = create_test_data(seed=1) actual = ds.diff("dim2") expected_dict = {} expected_dict["var1"] = DataArray( np.diff(ds["var1"].values, axis=1), {"dim2": ds["dim2"].values[1:]}, ["dim1", "dim2"], ) expected_dict["var2"] = DataArray( np.diff(ds["var2"].values, axis=1), {"dim2": ds["dim2"].values[1:]}, ["dim1", "dim2"], ) expected_dict["var3"] = ds["var3"] expected = Dataset(expected_dict, coords={"time": ds["time"].values}) expected.coords["numbers"] = ("dim3", ds["numbers"].values) assert_equal(expected, actual) def test_dataset_diff_n2(self) -> None: ds = create_test_data(seed=1) actual = ds.diff("dim2", n=2) expected_dict = {} expected_dict["var1"] = DataArray( np.diff(ds["var1"].values, axis=1, n=2), {"dim2": ds["dim2"].values[2:]}, ["dim1", "dim2"], ) expected_dict["var2"] = DataArray( np.diff(ds["var2"].values, axis=1, n=2), {"dim2": ds["dim2"].values[2:]}, ["dim1", "dim2"], ) expected_dict["var3"] = ds["var3"] expected = Dataset(expected_dict, coords={"time": ds["time"].values}) expected.coords["numbers"] = ("dim3", ds["numbers"].values) assert_equal(expected, actual) def test_dataset_diff_exception_n_neg(self) -> None: ds = create_test_data(seed=1) with pytest.raises(ValueError, match=r"must be non-negative"): ds.diff("dim2", n=-1) def test_dataset_diff_exception_label_str(self) -> None: ds = create_test_data(seed=1) with pytest.raises(ValueError, match=r"'label' argument has to"): ds.diff("dim2", label="raise_me") # type: ignore[arg-type] @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"foo": -10}]) def test_shift(self, fill_value) -> None: coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]} attrs = {"meta": "data"} ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs) actual = ds.shift(x=1, fill_value=fill_value) if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value = np.nan elif isinstance(fill_value, dict): fill_value = fill_value.get("foo", np.nan) expected = Dataset({"foo": ("x", [fill_value, 1, 2])}, coords, attrs) assert_identical(expected, actual) with pytest.raises(ValueError, match=r"dimensions"): ds.shift(foo=123) def test_roll_coords(self) -> None: coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]} attrs = {"meta": "data"} ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs) actual = ds.roll(x=1, roll_coords=True) ex_coords = {"bar": ("x", list("cab")), "x": [2, -4, 3]} expected = Dataset({"foo": ("x", [3, 1, 2])}, ex_coords, attrs) assert_identical(expected, actual) with pytest.raises(ValueError, match=r"dimensions"): ds.roll(foo=123, roll_coords=True) def test_roll_no_coords(self) -> None: coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]} attrs = {"meta": "data"} ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs) actual = ds.roll(x=1) expected = Dataset({"foo": ("x", [3, 1, 2])}, coords, attrs) assert_identical(expected, actual) with pytest.raises(ValueError, match=r"dimensions"): ds.roll(abc=321) def test_roll_multidim(self) -> None: # regression test for 2445 arr = xr.DataArray( [[1, 2, 3], [4, 5, 6]], coords={"x": range(3), "y": range(2)}, dims=("y", "x"), ) actual = arr.roll(x=1, roll_coords=True) expected = xr.DataArray( [[3, 1, 2], [6, 4, 5]], coords=[("y", [0, 1]), ("x", [2, 0, 1])] ) assert_identical(expected, actual) def test_real_and_imag(self) -> None: attrs = {"foo": "bar"} ds = Dataset({"x": ((), 1 + 2j, attrs)}, attrs=attrs) expected_re = Dataset({"x": ((), 1, attrs)}, attrs=attrs) assert_identical(ds.real, expected_re) expected_im = Dataset({"x": ((), 2, attrs)}, attrs=attrs) assert_identical(ds.imag, expected_im) def test_setattr_raises(self) -> None: ds = Dataset({}, coords={"scalar": 1}, attrs={"foo": "bar"}) with pytest.raises(AttributeError, match=r"cannot set attr"): ds.scalar = 2 with pytest.raises(AttributeError, match=r"cannot set attr"): ds.foo = 2 with pytest.raises(AttributeError, match=r"cannot set attr"): ds.other = 2 def test_filter_by_attrs(self) -> None: precip = dict(standard_name="convective_precipitation_flux") temp0 = dict(standard_name="air_potential_temperature", height="0 m") temp10 = dict(standard_name="air_potential_temperature", height="10 m") ds = Dataset( { "temperature_0": (["t"], [0], temp0), "temperature_10": (["t"], [0], temp10), "precipitation": (["t"], [0], precip), }, coords={"time": (["t"], [0], dict(axis="T", long_name="time_in_seconds"))}, ) # Test return empty Dataset. ds.filter_by_attrs(standard_name="invalid_standard_name") new_ds = ds.filter_by_attrs(standard_name="invalid_standard_name") assert not bool(new_ds.data_vars) # Test return one DataArray. new_ds = ds.filter_by_attrs(standard_name="convective_precipitation_flux") assert new_ds["precipitation"].standard_name == "convective_precipitation_flux" assert_equal(new_ds["precipitation"], ds["precipitation"]) # Test filter coordinates new_ds = ds.filter_by_attrs(long_name="time_in_seconds") assert new_ds["time"].long_name == "time_in_seconds" assert not bool(new_ds.data_vars) # Test return more than one DataArray. new_ds = ds.filter_by_attrs(standard_name="air_potential_temperature") assert len(new_ds.data_vars) == 2 for var in new_ds.data_vars: assert new_ds[var].standard_name == "air_potential_temperature" # Test callable. new_ds = ds.filter_by_attrs(height=lambda v: v is not None) assert len(new_ds.data_vars) == 2 for var in new_ds.data_vars: assert new_ds[var].standard_name == "air_potential_temperature" new_ds = ds.filter_by_attrs(height="10 m") assert len(new_ds.data_vars) == 1 for var in new_ds.data_vars: assert new_ds[var].height == "10 m" # Test return empty Dataset due to conflicting filters new_ds = ds.filter_by_attrs( standard_name="convective_precipitation_flux", height="0 m" ) assert not bool(new_ds.data_vars) # Test return one DataArray with two filter conditions new_ds = ds.filter_by_attrs( standard_name="air_potential_temperature", height="0 m" ) for var in new_ds.data_vars: assert new_ds[var].standard_name == "air_potential_temperature" assert new_ds[var].height == "0 m" assert new_ds[var].height != "10 m" # Test return empty Dataset due to conflicting callables new_ds = ds.filter_by_attrs( standard_name=lambda v: False, height=lambda v: True ) assert not bool(new_ds.data_vars) def test_binary_op_propagate_indexes(self) -> None: ds = Dataset( {"d1": DataArray([1, 2, 3], dims=["x"], coords={"x": [10, 20, 30]})} ) expected = ds.xindexes["x"] actual = (ds * 2).xindexes["x"] assert expected is actual def test_binary_op_join_setting(self) -> None: # arithmetic_join applies to data array coordinates missing_2 = xr.Dataset({"x": [0, 1]}) missing_0 = xr.Dataset({"x": [1, 2]}) with xr.set_options(arithmetic_join="outer"): actual = missing_2 + missing_0 expected = xr.Dataset({"x": [0, 1, 2]}) assert_equal(actual, expected) # arithmetic join also applies to data_vars ds1 = xr.Dataset({"foo": 1, "bar": 2}) ds2 = xr.Dataset({"bar": 2, "baz": 3}) expected = xr.Dataset({"bar": 4}) # default is inner joining actual = ds1 + ds2 assert_equal(actual, expected) with xr.set_options(arithmetic_join="outer"): expected = xr.Dataset({"foo": np.nan, "bar": 4, "baz": np.nan}) actual = ds1 + ds2 assert_equal(actual, expected) with xr.set_options(arithmetic_join="left"): expected = xr.Dataset({"foo": np.nan, "bar": 4}) actual = ds1 + ds2 assert_equal(actual, expected) with xr.set_options(arithmetic_join="right"): expected = xr.Dataset({"bar": 4, "baz": np.nan}) actual = ds1 + ds2 assert_equal(actual, expected) @pytest.mark.parametrize( ["keep_attrs", "expected"], ( pytest.param(False, {}, id="False"), pytest.param(True, {"foo": "a", "bar": "b"}, id="True"), ), ) def test_binary_ops_keep_attrs(self, keep_attrs, expected) -> None: ds1 = xr.Dataset({"a": 1}, attrs={"foo": "a", "bar": "b"}) ds2 = xr.Dataset({"a": 1}, attrs={"foo": "a", "baz": "c"}) with xr.set_options(keep_attrs=keep_attrs): ds_result = ds1 + ds2 assert ds_result.attrs == expected def test_full_like(self) -> None: # For more thorough tests, see test_variable.py # Note: testing data_vars with mismatched dtypes ds = Dataset( { "d1": DataArray([1, 2, 3], dims=["x"], coords={"x": [10, 20, 30]}), "d2": DataArray([1.1, 2.2, 3.3], dims=["y"]), }, attrs={"foo": "bar"}, ) actual = full_like(ds, 2) expected = ds.copy(deep=True) # https://github.com/python/mypy/issues/3004 expected["d1"].values = [2, 2, 2] # type: ignore[assignment,unused-ignore] expected["d2"].values = [2.0, 2.0, 2.0] # type: ignore[assignment,unused-ignore] assert expected["d1"].dtype == int assert expected["d2"].dtype == float assert_identical(expected, actual) # override dtype actual = full_like(ds, fill_value=True, dtype=bool) expected = ds.copy(deep=True) expected["d1"].values = [True, True, True] # type: ignore[assignment,unused-ignore] expected["d2"].values = [True, True, True] # type: ignore[assignment,unused-ignore] assert expected["d1"].dtype == bool assert expected["d2"].dtype == bool assert_identical(expected, actual) # with multiple fill values actual = full_like(ds, {"d1": 1, "d2": 2.3}) expected = ds.assign(d1=("x", [1, 1, 1]), d2=("y", [2.3, 2.3, 2.3])) assert expected["d1"].dtype == int assert expected["d2"].dtype == float assert_identical(expected, actual) # override multiple dtypes actual = full_like(ds, fill_value={"d1": 1, "d2": 2.3}, dtype={"d1": bool}) expected = ds.assign(d1=("x", [True, True, True]), d2=("y", [2.3, 2.3, 2.3])) assert expected["d1"].dtype == bool assert expected["d2"].dtype == float assert_identical(expected, actual) def test_combine_first(self) -> None: dsx0 = DataArray([0, 0], [("x", ["a", "b"])]).to_dataset(name="dsx0") dsx1 = DataArray([1, 1], [("x", ["b", "c"])]).to_dataset(name="dsx1") actual = dsx0.combine_first(dsx1) expected = Dataset( {"dsx0": ("x", [0, 0, np.nan]), "dsx1": ("x", [np.nan, 1, 1])}, coords={"x": ["a", "b", "c"]}, ) assert_equal(actual, expected) assert_equal(actual, xr.merge([dsx0, dsx1], join="outer")) # works just like xr.merge([self, other]) dsy2 = DataArray([2, 2, 2], [("x", ["b", "c", "d"])]).to_dataset(name="dsy2") actual = dsx0.combine_first(dsy2) expected = xr.merge([dsy2, dsx0], join="outer") assert_equal(actual, expected) def test_sortby(self) -> None: ds = Dataset( { "A": DataArray( [[1, 2], [3, 4], [5, 6]], [("x", ["c", "b", "a"]), ("y", [1, 0])] ), "B": DataArray([[5, 6], [7, 8], [9, 10]], dims=["x", "y"]), } ) sorted1d = Dataset( { "A": DataArray( [[5, 6], [3, 4], [1, 2]], [("x", ["a", "b", "c"]), ("y", [1, 0])] ), "B": DataArray([[9, 10], [7, 8], [5, 6]], dims=["x", "y"]), } ) sorted2d = Dataset( { "A": DataArray( [[6, 5], [4, 3], [2, 1]], [("x", ["a", "b", "c"]), ("y", [0, 1])] ), "B": DataArray([[10, 9], [8, 7], [6, 5]], dims=["x", "y"]), } ) expected = sorted1d dax = DataArray([100, 99, 98], [("x", ["c", "b", "a"])]) actual = ds.sortby(dax) assert_equal(actual, expected) # test descending order sort actual = ds.sortby(dax, ascending=False) assert_equal(actual, ds) # test alignment (fills in nan for 'c') dax_short = DataArray([98, 97], [("x", ["b", "a"])]) actual = ds.sortby(dax_short) assert_equal(actual, expected) # test 1-D lexsort # dax0 is sorted first to give indices of [1, 2, 0] # and then dax1 would be used to move index 2 ahead of 1 dax0 = DataArray([100, 95, 95], [("x", ["c", "b", "a"])]) dax1 = DataArray([0, 1, 0], [("x", ["c", "b", "a"])]) actual = ds.sortby([dax0, dax1]) # lexsort underneath gives [2, 1, 0] assert_equal(actual, expected) expected = sorted2d # test multi-dim sort by 1D dataarray values day = DataArray([90, 80], [("y", [1, 0])]) actual = ds.sortby([day, dax]) assert_equal(actual, expected) # test exception-raising with pytest.raises(KeyError): actual = ds.sortby("z") with pytest.raises(ValueError) as excinfo: actual = ds.sortby(ds["A"]) assert "DataArray is not 1-D" in str(excinfo.value) expected = sorted1d actual = ds.sortby("x") assert_equal(actual, expected) # test pandas.MultiIndex indices = (("b", 1), ("b", 0), ("a", 1), ("a", 0)) midx = pd.MultiIndex.from_tuples(indices, names=["one", "two"]) ds_midx = Dataset( { "A": DataArray( [[1, 2], [3, 4], [5, 6], [7, 8]], [("x", midx), ("y", [1, 0])] ), "B": DataArray([[5, 6], [7, 8], [9, 10], [11, 12]], dims=["x", "y"]), } ) actual = ds_midx.sortby("x") midx_reversed = pd.MultiIndex.from_tuples( tuple(reversed(indices)), names=["one", "two"] ) expected = Dataset( { "A": DataArray( [[7, 8], [5, 6], [3, 4], [1, 2]], [("x", midx_reversed), ("y", [1, 0])], ), "B": DataArray([[11, 12], [9, 10], [7, 8], [5, 6]], dims=["x", "y"]), } ) assert_equal(actual, expected) # multi-dim sort by coordinate objects expected = sorted2d actual = ds.sortby(["x", "y"]) assert_equal(actual, expected) # test descending order sort actual = ds.sortby(["x", "y"], ascending=False) assert_equal(actual, ds) def test_attribute_access(self) -> None: ds = create_test_data(seed=1) for key in ["var1", "var2", "var3", "time", "dim1", "dim2", "dim3", "numbers"]: assert_equal(ds[key], getattr(ds, key)) assert key in dir(ds) for key in ["dim3", "dim1", "numbers"]: assert_equal(ds["var3"][key], getattr(ds.var3, key)) assert key in dir(ds["var3"]) # attrs assert ds["var3"].attrs["foo"] == ds.var3.foo assert "foo" in dir(ds["var3"]) def test_ipython_key_completion(self) -> None: ds = create_test_data(seed=1) actual = ds._ipython_key_completions_() expected = ["var1", "var2", "var3", "time", "dim1", "dim2", "dim3", "numbers"] for item in actual: ds[item] # should not raise assert sorted(actual) == sorted(expected) # for dataarray actual = ds["var3"]._ipython_key_completions_() expected = ["dim3", "dim1", "numbers"] for item in actual: ds["var3"][item] # should not raise assert sorted(actual) == sorted(expected) # MultiIndex ds_midx = ds.stack(dim12=["dim2", "dim3"]) actual = ds_midx._ipython_key_completions_() expected = [ "var1", "var2", "var3", "time", "dim1", "dim2", "dim3", "numbers", "dim12", ] for item in actual: ds_midx[item] # should not raise assert sorted(actual) == sorted(expected) # coords actual = ds.coords._ipython_key_completions_() expected = ["time", "dim1", "dim2", "dim3", "numbers"] for item in actual: ds.coords[item] # should not raise assert sorted(actual) == sorted(expected) actual = ds["var3"].coords._ipython_key_completions_() expected = ["dim1", "dim3", "numbers"] for item in actual: ds["var3"].coords[item] # should not raise assert sorted(actual) == sorted(expected) coords = Coordinates(ds.coords) actual = coords._ipython_key_completions_() expected = ["time", "dim2", "dim3", "numbers"] for item in actual: coords[item] # should not raise assert sorted(actual) == sorted(expected) # data_vars actual = ds.data_vars._ipython_key_completions_() expected = ["var1", "var2", "var3", "dim1"] for item in actual: ds.data_vars[item] # should not raise assert sorted(actual) == sorted(expected) def test_polyfit_output(self) -> None: ds = create_test_data(seed=1) out = ds.polyfit("dim2", 2, full=False) assert "var1_polyfit_coefficients" in out out = ds.polyfit("dim1", 2, full=True) assert "var1_polyfit_coefficients" in out assert "dim1_matrix_rank" in out out = ds.polyfit("time", 2) assert len(out.data_vars) == 0 def test_polyfit_weighted(self) -> None: ds = create_test_data(seed=1) ds = ds.broadcast_like(ds) # test more than 2 dimensions (issue #9972) ds_copy = ds.copy(deep=True) expected = ds.polyfit("dim2", 2) actual = ds.polyfit("dim2", 2, w=np.ones(ds.sizes["dim2"])) xr.testing.assert_identical(expected, actual) # Make sure weighted polyfit does not change the original object (issue #5644) xr.testing.assert_identical(ds, ds_copy) def test_polyfit_coord(self) -> None: # Make sure polyfit works when given a non-dimension coordinate. ds = create_test_data(seed=1) out = ds.polyfit("numbers", 2, full=False) assert "var3_polyfit_coefficients" in out assert "dim1" in out.dims assert "dim2" not in out assert "dim3" not in out def test_polyfit_coord_output(self) -> None: da = xr.DataArray( [1, 3, 2], dims=["x"], coords=dict(x=["a", "b", "c"], y=("x", [0, 1, 2])) ) out = da.polyfit("y", deg=1)["polyfit_coefficients"] assert out.sel(degree=0).item() == pytest.approx(1.5) assert out.sel(degree=1).item() == pytest.approx(0.5) def test_polyfit_warnings(self) -> None: ds = create_test_data(seed=1) with warnings.catch_warnings(record=True) as ws: ds.var1.polyfit("dim2", 10, full=False) assert len(ws) == 1 assert ws[0].category == RankWarning ds.var1.polyfit("dim2", 10, full=True) assert len(ws) == 1 def test_polyfit_polyval(self) -> None: da = xr.DataArray( np.arange(1, 10).astype(np.float64), dims=["x"], coords=dict(x=np.arange(9)) ) out = da.polyfit("x", 3, full=False) da_fitval = xr.polyval(da.x, out.polyfit_coefficients) # polyval introduces very small errors (1e-16 here) xr.testing.assert_allclose(da_fitval, da) da = da.assign_coords(x=xr.date_range("2001-01-01", periods=9, freq="YS")) out = da.polyfit("x", 3, full=False) da_fitval = xr.polyval(da.x, out.polyfit_coefficients) xr.testing.assert_allclose(da_fitval, da, rtol=1e-3) @requires_cftime def test_polyfit_polyval_cftime(self) -> None: da = xr.DataArray( np.arange(1, 10).astype(np.float64), dims=["x"], coords=dict( x=xr.date_range("2001-01-01", periods=9, freq="YS", calendar="noleap") ), ) out = da.polyfit("x", 3, full=False) da_fitval = xr.polyval(da.x, out.polyfit_coefficients) np.testing.assert_allclose(da_fitval, da) @staticmethod def _test_data_var_interior( original_data_var, padded_data_var, padded_dim_name, expected_pad_values ): np.testing.assert_equal( np.unique(padded_data_var.isel({padded_dim_name: [0, -1]})), expected_pad_values, ) np.testing.assert_array_equal( padded_data_var.isel({padded_dim_name: slice(1, -1)}), original_data_var ) @pytest.mark.parametrize("padded_dim_name", ["dim1", "dim2", "dim3", "time"]) @pytest.mark.parametrize( ["constant_values"], [ pytest.param(None, id="default"), pytest.param(42, id="scalar"), pytest.param((42, 43), id="tuple"), pytest.param({"dim1": 42, "dim2": 43}, id="per dim scalar"), pytest.param({"dim1": (42, 43), "dim2": (43, 44)}, id="per dim tuple"), pytest.param({"var1": 42, "var2": (42, 43)}, id="per var"), pytest.param({"var1": 42, "dim1": (42, 43)}, id="mixed"), ], ) def test_pad(self, padded_dim_name, constant_values) -> None: ds = create_test_data(seed=1) padded = ds.pad({padded_dim_name: (1, 1)}, constant_values=constant_values) # test padded dim values and size for ds_dim_name, ds_dim in ds.sizes.items(): if ds_dim_name == padded_dim_name: np.testing.assert_equal(padded.sizes[ds_dim_name], ds_dim + 2) if ds_dim_name in padded.coords: assert padded[ds_dim_name][[0, -1]].isnull().all() else: np.testing.assert_equal(padded.sizes[ds_dim_name], ds_dim) # check if coord "numbers" with dimension dim3 is padded correctly if padded_dim_name == "dim3": assert padded["numbers"][[0, -1]].isnull().all() # twarning: passes but dtype changes from int to float np.testing.assert_array_equal(padded["numbers"][1:-1], ds["numbers"]) # test if data_vars are paded with correct values for data_var_name, data_var in padded.data_vars.items(): if padded_dim_name in data_var.dims: if utils.is_dict_like(constant_values): if ( expected := constant_values.get(data_var_name, None) ) is not None or ( expected := constant_values.get(padded_dim_name, None) ) is not None: self._test_data_var_interior( ds[data_var_name], data_var, padded_dim_name, expected ) else: self._test_data_var_interior( ds[data_var_name], data_var, padded_dim_name, 0 ) elif constant_values: self._test_data_var_interior( ds[data_var_name], data_var, padded_dim_name, constant_values ) else: self._test_data_var_interior( ds[data_var_name], data_var, padded_dim_name, np.nan ) else: assert_array_equal(data_var, ds[data_var_name]) @pytest.mark.parametrize( ["keep_attrs", "attrs", "expected"], [ pytest.param(None, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="default"), pytest.param(False, {"a": 1, "b": 2}, {}, id="False"), pytest.param(True, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="True"), ], ) def test_pad_keep_attrs(self, keep_attrs, attrs, expected) -> None: ds = xr.Dataset( {"a": ("x", [1, 2], attrs), "b": ("y", [1, 2], attrs)}, coords={"c": ("x", [-1, 1], attrs), "d": ("y", [-1, 1], attrs)}, attrs=attrs, ) expected = xr.Dataset( {"a": ("x", [0, 1, 2, 0], expected), "b": ("y", [1, 2], attrs)}, coords={ "c": ("x", [np.nan, -1, 1, np.nan], expected), "d": ("y", [-1, 1], attrs), }, attrs=expected, ) keep_attrs_ = "default" if keep_attrs is None else keep_attrs with set_options(keep_attrs=keep_attrs_): actual = ds.pad({"x": (1, 1)}, mode="constant", constant_values=0) xr.testing.assert_identical(actual, expected) actual = ds.pad( {"x": (1, 1)}, mode="constant", constant_values=0, keep_attrs=keep_attrs ) xr.testing.assert_identical(actual, expected) def test_astype_attrs(self) -> None: data = create_test_data(seed=123) data.attrs["foo"] = "bar" assert data.attrs == data.astype(float).attrs assert data.var1.attrs == data.astype(float).var1.attrs assert not data.astype(float, keep_attrs=False).attrs assert not data.astype(float, keep_attrs=False).var1.attrs @pytest.mark.parametrize("parser", ["pandas", "python"]) @pytest.mark.parametrize( "engine", ["python", None, pytest.param("numexpr", marks=[requires_numexpr])] ) @pytest.mark.parametrize( "backend", ["numpy", pytest.param("dask", marks=[requires_dask])] ) def test_query(self, backend, engine, parser) -> None: """Test querying a dataset.""" # setup test data np.random.seed(42) a = np.arange(0, 10, 1) b = np.random.randint(0, 100, size=10) c = np.linspace(0, 1, 20) d = np.random.choice(["foo", "bar", "baz"], size=30, replace=True).astype( object ) e = np.arange(0, 10 * 20).reshape(10, 20) f = np.random.normal(0, 1, size=(10, 20, 30)) if backend == "numpy": ds = Dataset( { "a": ("x", a), "b": ("x", b), "c": ("y", c), "d": ("z", d), "e": (("x", "y"), e), "f": (("x", "y", "z"), f), }, coords={ "a2": ("x", a), "b2": ("x", b), "c2": ("y", c), "d2": ("z", d), "e2": (("x", "y"), e), "f2": (("x", "y", "z"), f), }, ) elif backend == "dask": ds = Dataset( { "a": ("x", da.from_array(a, chunks=3)), "b": ("x", da.from_array(b, chunks=3)), "c": ("y", da.from_array(c, chunks=7)), "d": ("z", da.from_array(d, chunks=12)), "e": (("x", "y"), da.from_array(e, chunks=(3, 7))), "f": (("x", "y", "z"), da.from_array(f, chunks=(3, 7, 12))), }, coords={ "a2": ("x", a), "b2": ("x", b), "c2": ("y", c), "d2": ("z", d), "e2": (("x", "y"), e), "f2": (("x", "y", "z"), f), }, ) # query single dim, single variable with raise_if_dask_computes(): actual = ds.query(x="a2 > 5", engine=engine, parser=parser) expect = ds.isel(x=(a > 5)) assert_identical(expect, actual) # query single dim, single variable, via dict with raise_if_dask_computes(): actual = ds.query(dict(x="a2 > 5"), engine=engine, parser=parser) expect = ds.isel(dict(x=(a > 5))) assert_identical(expect, actual) # query single dim, single variable with raise_if_dask_computes(): actual = ds.query(x="b2 > 50", engine=engine, parser=parser) expect = ds.isel(x=(b > 50)) assert_identical(expect, actual) # query single dim, single variable with raise_if_dask_computes(): actual = ds.query(y="c2 < .5", engine=engine, parser=parser) expect = ds.isel(y=(c < 0.5)) assert_identical(expect, actual) # query single dim, single string variable if parser == "pandas": # N.B., this query currently only works with the pandas parser # xref https://github.com/pandas-dev/pandas/issues/40436 with raise_if_dask_computes(): actual = ds.query(z='d2 == "bar"', engine=engine, parser=parser) expect = ds.isel(z=(d == "bar")) assert_identical(expect, actual) # query single dim, multiple variables with raise_if_dask_computes(): actual = ds.query(x="(a2 > 5) & (b2 > 50)", engine=engine, parser=parser) expect = ds.isel(x=((a > 5) & (b > 50))) assert_identical(expect, actual) # query single dim, multiple variables with computation with raise_if_dask_computes(): actual = ds.query(x="(a2 * b2) > 250", engine=engine, parser=parser) expect = ds.isel(x=(a * b) > 250) assert_identical(expect, actual) # check pandas query syntax is supported if parser == "pandas": with raise_if_dask_computes(): actual = ds.query( x="(a2 > 5) and (b2 > 50)", engine=engine, parser=parser ) expect = ds.isel(x=((a > 5) & (b > 50))) assert_identical(expect, actual) # query multiple dims via kwargs with raise_if_dask_computes(): actual = ds.query(x="a2 > 5", y="c2 < .5", engine=engine, parser=parser) expect = ds.isel(x=(a > 5), y=(c < 0.5)) assert_identical(expect, actual) # query multiple dims via kwargs if parser == "pandas": with raise_if_dask_computes(): actual = ds.query( x="a2 > 5", y="c2 < .5", z="d2 == 'bar'", engine=engine, parser=parser, ) expect = ds.isel(x=(a > 5), y=(c < 0.5), z=(d == "bar")) assert_identical(expect, actual) # query multiple dims via dict with raise_if_dask_computes(): actual = ds.query( dict(x="a2 > 5", y="c2 < .5"), engine=engine, parser=parser ) expect = ds.isel(dict(x=(a > 5), y=(c < 0.5))) assert_identical(expect, actual) # query multiple dims via dict if parser == "pandas": with raise_if_dask_computes(): actual = ds.query( dict(x="a2 > 5", y="c2 < .5", z="d2 == 'bar'"), engine=engine, parser=parser, ) expect = ds.isel(dict(x=(a > 5), y=(c < 0.5), z=(d == "bar"))) assert_identical(expect, actual) # test error handling with pytest.raises(ValueError): ds.query("a > 5") # type: ignore[arg-type] # must be dict or kwargs with pytest.raises(ValueError): ds.query(x=(a > 5)) with pytest.raises(IndexError): ds.query(y="a > 5") # wrong length dimension with pytest.raises(IndexError): ds.query(x="c < .5") # wrong length dimension with pytest.raises(IndexError): ds.query(x="e > 100") # wrong number of dimensions with pytest.raises(UndefinedVariableError): ds.query(x="spam > 50") # name not present # pytest tests β€” new tests should go here, rather than in the class. @pytest.mark.parametrize("parser", ["pandas", "python"]) def test_eval(ds, parser) -> None: """Currently much more minimal testing that `query` above, and much of the setup isn't used. But the risks are fairly low β€” `query` shares much of the code, and the method is currently experimental.""" actual = ds.eval("z1 + 5", parser=parser) expect = ds["z1"] + 5 assert_identical(expect, actual) # check pandas query syntax is supported if parser == "pandas": actual = ds.eval("(z1 > 5) and (z2 > 0)", parser=parser) expect = (ds["z1"] > 5) & (ds["z2"] > 0) assert_identical(expect, actual) @pytest.mark.parametrize("test_elements", ([1, 2], np.array([1, 2]), DataArray([1, 2]))) def test_isin(test_elements, backend) -> None: expected = Dataset( data_vars={ "var1": (("dim1",), [0, 1]), "var2": (("dim1",), [1, 1]), "var3": (("dim1",), [0, 1]), } ).astype("bool") if backend == "dask": expected = expected.chunk() result = Dataset( data_vars={ "var1": (("dim1",), [0, 1]), "var2": (("dim1",), [1, 2]), "var3": (("dim1",), [0, 1]), } ).isin(test_elements) assert_equal(result, expected) def test_isin_dataset() -> None: ds = Dataset({"x": [1, 2]}) with pytest.raises(TypeError): ds.isin(ds) @pytest.mark.parametrize( "unaligned_coords", ( {"x": [2, 1, 0]}, {"x": (["x"], np.asarray([2, 1, 0]))}, {"x": (["x"], np.asarray([1, 2, 0]))}, {"x": pd.Index([2, 1, 0])}, {"x": Variable(dims="x", data=[0, 2, 1])}, {"x": IndexVariable(dims="x", data=[0, 1, 2])}, {"y": 42}, {"y": ("x", [2, 1, 0])}, {"y": ("x", np.asarray([2, 1, 0]))}, {"y": (["x"], np.asarray([2, 1, 0]))}, ), ) @pytest.mark.parametrize("coords", ({"x": ("x", [0, 1, 2])}, {"x": [0, 1, 2]})) def test_dataset_constructor_aligns_to_explicit_coords( unaligned_coords, coords ) -> None: a = xr.DataArray([1, 2, 3], dims=["x"], coords=unaligned_coords) expected = xr.Dataset(coords=coords) expected["a"] = a result = xr.Dataset({"a": a}, coords=coords) assert_equal(expected, result) def test_error_message_on_set_supplied() -> None: with pytest.raises(TypeError, match="has invalid type "): xr.Dataset(dict(date=[1, 2, 3], sec={4})) @pytest.mark.parametrize("unaligned_coords", ({"y": ("b", np.asarray([2, 1, 0]))},)) def test_constructor_raises_with_invalid_coords(unaligned_coords) -> None: with pytest.raises(ValueError, match="not a subset of the DataArray dimensions"): xr.DataArray([1, 2, 3], dims=["x"], coords=unaligned_coords) @pytest.mark.parametrize("ds", [3], indirect=True) def test_dir_expected_attrs(ds) -> None: some_expected_attrs = {"pipe", "mean", "isnull", "var1", "dim2", "numbers"} result = dir(ds) assert set(result) >= some_expected_attrs def test_dir_non_string(ds) -> None: # add a numbered key to ensure this doesn't break dir ds[5] = "foo" result = dir(ds) assert 5 not in result # GH2172 sample_data = np.random.uniform(size=[2, 2000, 10000]) x = xr.Dataset({"sample_data": (sample_data.shape, sample_data)}) x2 = x["sample_data"] dir(x2) def test_dir_unicode(ds) -> None: ds["unicode"] = "uni" result = dir(ds) assert "unicode" in result def test_raise_no_warning_for_nan_in_binary_ops() -> None: with assert_no_warnings(): _ = Dataset(data_vars={"x": ("y", [1, 2, np.nan])}) > 0 @pytest.mark.filterwarnings("error") @pytest.mark.parametrize("ds", (2,), indirect=True) def test_raise_no_warning_assert_close(ds) -> None: assert_allclose(ds, ds) @pytest.mark.parametrize("dask", [True, False]) @pytest.mark.parametrize("edge_order", [1, 2]) def test_differentiate(dask, edge_order) -> None: rs = np.random.default_rng(42) coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8] da = xr.DataArray( rs.random((8, 6)), dims=["x", "y"], coords={"x": coord, "z": 3, "x2d": (("x", "y"), rs.random((8, 6)))}, ) if dask and has_dask: da = da.chunk({"x": 4}) ds = xr.Dataset({"var": da}) # along x actual = da.differentiate("x", edge_order) expected_x = xr.DataArray( np.gradient(da, da["x"], axis=0, edge_order=edge_order), dims=da.dims, coords=da.coords, ) assert_equal(expected_x, actual) assert_equal( ds["var"].differentiate("x", edge_order=edge_order), ds.differentiate("x", edge_order=edge_order)["var"], ) # coordinate should not change assert_equal(da["x"], actual["x"]) # along y actual = da.differentiate("y", edge_order) expected_y = xr.DataArray( np.gradient(da, da["y"], axis=1, edge_order=edge_order), dims=da.dims, coords=da.coords, ) assert_equal(expected_y, actual) assert_equal(actual, ds.differentiate("y", edge_order=edge_order)["var"]) assert_equal( ds["var"].differentiate("y", edge_order=edge_order), ds.differentiate("y", edge_order=edge_order)["var"], ) with pytest.raises(ValueError): da.differentiate("x2d") @pytest.mark.parametrize("dask", [True, False]) def test_differentiate_datetime(dask) -> None: rs = np.random.default_rng(42) coord = np.array( [ "2004-07-13", "2006-01-13", "2010-08-13", "2010-09-13", "2010-10-11", "2010-12-13", "2011-02-13", "2012-08-13", ], dtype="datetime64", ) da = xr.DataArray( rs.random((8, 6)), dims=["x", "y"], coords={"x": coord, "z": 3, "x2d": (("x", "y"), rs.random((8, 6)))}, ) if dask and has_dask: da = da.chunk({"x": 4}) # along x actual = da.differentiate("x", edge_order=1, datetime_unit="D") expected_x = xr.DataArray( np.gradient( da, da["x"].variable._to_numeric(datetime_unit="D"), axis=0, edge_order=1 ), dims=da.dims, coords=da.coords, ) assert_equal(expected_x, actual) actual2 = da.differentiate("x", edge_order=1, datetime_unit="h") assert np.allclose(actual, actual2 * 24) # for datetime variable actual = da["x"].differentiate("x", edge_order=1, datetime_unit="D") assert np.allclose(actual, 1.0) # with different date unit da = xr.DataArray(coord.astype("datetime64[ms]"), dims=["x"], coords={"x": coord}) actual = da.differentiate("x", edge_order=1) assert np.allclose(actual, 1.0) @requires_cftime @pytest.mark.parametrize("dask", [True, False]) def test_differentiate_cftime(dask) -> None: rs = np.random.default_rng(42) coord = xr.date_range("2000", periods=8, freq="2ME", use_cftime=True) da = xr.DataArray( rs.random((8, 6)), coords={"time": coord, "z": 3, "t2d": (("time", "y"), rs.random((8, 6)))}, dims=["time", "y"], ) if dask and has_dask: da = da.chunk({"time": 4}) actual = da.differentiate("time", edge_order=1, datetime_unit="D") expected_data = np.gradient( da, da["time"].variable._to_numeric(datetime_unit="D"), axis=0, edge_order=1 ) expected = xr.DataArray(expected_data, coords=da.coords, dims=da.dims) assert_equal(expected, actual) actual2 = da.differentiate("time", edge_order=1, datetime_unit="h") assert_allclose(actual, actual2 * 24) # Test the differentiation of datetimes themselves actual = da["time"].differentiate("time", edge_order=1, datetime_unit="D") assert_allclose(actual, xr.ones_like(da["time"]).astype(float)) @pytest.mark.parametrize("dask", [True, False]) def test_integrate(dask) -> None: rs = np.random.default_rng(42) coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8] da = xr.DataArray( rs.random((8, 6)), dims=["x", "y"], coords={ "x": coord, "x2": (("x",), rs.random(8)), "z": 3, "x2d": (("x", "y"), rs.random((8, 6))), }, ) if dask and has_dask: da = da.chunk({"x": 4}) ds = xr.Dataset({"var": da}) # along x actual = da.integrate("x") # coordinate that contains x should be dropped. expected_x = xr.DataArray( trapezoid(da.compute(), da["x"], axis=0), dims=["y"], coords={k: v for k, v in da.coords.items() if "x" not in v.dims}, ) assert_allclose(expected_x, actual.compute()) assert_equal(ds["var"].integrate("x"), ds.integrate("x")["var"]) # make sure result is also a dask array (if the source is dask array) assert isinstance(actual.data, type(da.data)) # along y actual = da.integrate("y") expected_y = xr.DataArray( trapezoid(da, da["y"], axis=1), dims=["x"], coords={k: v for k, v in da.coords.items() if "y" not in v.dims}, ) assert_allclose(expected_y, actual.compute()) assert_equal(actual, ds.integrate("y")["var"]) assert_equal(ds["var"].integrate("y"), ds.integrate("y")["var"]) # along x and y actual = da.integrate(("y", "x")) assert actual.ndim == 0 with pytest.raises(ValueError): da.integrate("x2d") @requires_scipy @pytest.mark.parametrize("dask", [True, False]) def test_cumulative_integrate(dask) -> None: rs = np.random.default_rng(43) coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8] da = xr.DataArray( rs.random((8, 6)), dims=["x", "y"], coords={ "x": coord, "x2": (("x",), rs.random(8)), "z": 3, "x2d": (("x", "y"), rs.random((8, 6))), }, ) if dask and has_dask: da = da.chunk({"x": 4}) ds = xr.Dataset({"var": da}) # along x actual = da.cumulative_integrate("x") from scipy.integrate import cumulative_trapezoid expected_x = xr.DataArray( cumulative_trapezoid(da.compute(), da["x"], axis=0, initial=0.0), # type: ignore[call-overload,unused-ignore] dims=["x", "y"], coords=da.coords, ) assert_allclose(expected_x, actual.compute()) assert_equal( ds["var"].cumulative_integrate("x"), ds.cumulative_integrate("x")["var"], ) # make sure result is also a dask array (if the source is dask array) assert isinstance(actual.data, type(da.data)) # along y actual = da.cumulative_integrate("y") expected_y = xr.DataArray( cumulative_trapezoid(da, da["y"], axis=1, initial=0.0), # type: ignore[call-overload,unused-ignore] dims=["x", "y"], coords=da.coords, ) assert_allclose(expected_y, actual.compute()) assert_equal(actual, ds.cumulative_integrate("y")["var"]) assert_equal( ds["var"].cumulative_integrate("y"), ds.cumulative_integrate("y")["var"], ) # along x and y actual = da.cumulative_integrate(("y", "x")) assert actual.ndim == 2 with pytest.raises(ValueError): da.cumulative_integrate("x2d") @pytest.mark.parametrize("dask", [True, False]) @pytest.mark.parametrize("which_datetime", ["np", "cftime"]) def test_trapezoid_datetime(dask, which_datetime) -> None: rs = np.random.default_rng(42) coord: ArrayLike if which_datetime == "np": coord = np.array( [ "2004-07-13", "2006-01-13", "2010-08-13", "2010-09-13", "2010-10-11", "2010-12-13", "2011-02-13", "2012-08-13", ], dtype="datetime64", ) else: if not has_cftime: pytest.skip("Test requires cftime.") coord = xr.date_range("2000", periods=8, freq="2D", use_cftime=True) da = xr.DataArray( rs.random((8, 6)), coords={"time": coord, "z": 3, "t2d": (("time", "y"), rs.random((8, 6)))}, dims=["time", "y"], ) if dask and has_dask: da = da.chunk({"time": 4}) actual = da.integrate("time", datetime_unit="D") expected_data = trapezoid( da.compute().data, duck_array_ops.datetime_to_numeric(da["time"].data, datetime_unit="D"), axis=0, ) expected = xr.DataArray( expected_data, dims=["y"], coords={k: v for k, v in da.coords.items() if "time" not in v.dims}, ) assert_allclose(expected, actual.compute()) # make sure result is also a dask array (if the source is dask array) assert isinstance(actual.data, type(da.data)) actual2 = da.integrate("time", datetime_unit="h") assert_allclose(actual, actual2 / 24.0) def test_no_dict() -> None: d = Dataset() with pytest.raises(AttributeError): _ = d.__dict__ def test_subclass_slots() -> None: """Test that Dataset subclasses must explicitly define ``__slots__``. .. note:: As of 0.13.0, this is actually mitigated into a FutureWarning for any class defined outside of the xarray package. """ with pytest.raises(AttributeError) as e: class MyDS(Dataset): pass assert str(e.value) == "MyDS must explicitly define __slots__" def test_weakref() -> None: """Classes with __slots__ are incompatible with the weakref module unless they explicitly state __weakref__ among their slots """ from weakref import ref ds = Dataset() r = ref(ds) assert r() is ds def test_deepcopy_obj_array() -> None: x0 = Dataset(dict(foo=DataArray(np.array([object()])))) x1 = deepcopy(x0) assert x0["foo"].values[0] is not x1["foo"].values[0] def test_deepcopy_recursive() -> None: # GH:issue:7111 # direct recursion ds = xr.Dataset({"a": (["x"], [1, 2])}) ds.attrs["other"] = ds # TODO: cannot use assert_identical on recursive Vars yet... # lets just ensure that deep copy works without RecursionError ds.copy(deep=True) # indirect recursion ds2 = xr.Dataset({"b": (["y"], [3, 4])}) ds.attrs["other"] = ds2 ds2.attrs["other"] = ds # TODO: cannot use assert_identical on recursive Vars yet... # lets just ensure that deep copy works without RecursionError ds.copy(deep=True) ds2.copy(deep=True) def test_clip(ds) -> None: result = ds.clip(min=0.5) assert all((result.min(...) >= 0.5).values()) result = ds.clip(max=0.5) assert all((result.max(...) <= 0.5).values()) result = ds.clip(min=0.25, max=0.75) assert all((result.min(...) >= 0.25).values()) assert all((result.max(...) <= 0.75).values()) result = ds.clip(min=ds.mean("y"), max=ds.mean("y")) assert result.sizes == ds.sizes class TestDropDuplicates: @pytest.mark.parametrize("keep", ["first", "last", False]) def test_drop_duplicates_1d(self, keep) -> None: ds = xr.Dataset( {"a": ("time", [0, 5, 6, 7]), "b": ("time", [9, 3, 8, 2])}, coords={"time": [0, 0, 1, 2]}, ) if keep == "first": a = [0, 6, 7] b = [9, 8, 2] time = [0, 1, 2] elif keep == "last": a = [5, 6, 7] b = [3, 8, 2] time = [0, 1, 2] else: a = [6, 7] b = [8, 2] time = [1, 2] expected = xr.Dataset( {"a": ("time", a), "b": ("time", b)}, coords={"time": time} ) result = ds.drop_duplicates("time", keep=keep) assert_equal(expected, result) with pytest.raises( ValueError, match=re.escape( "Dimensions ('space',) not found in data dimensions ('time',)" ), ): ds.drop_duplicates("space", keep=keep) class TestNumpyCoercion: def test_from_numpy(self) -> None: ds = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"lat": ("x", [4, 5, 6])}) assert_identical(ds.as_numpy(), ds) @requires_dask def test_from_dask(self) -> None: ds = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"lat": ("x", [4, 5, 6])}) ds_chunked = ds.chunk(1) assert_identical(ds_chunked.as_numpy(), ds.compute()) @requires_pint def test_from_pint(self) -> None: from pint import Quantity arr = np.array([1, 2, 3]) ds = xr.Dataset( {"a": ("x", Quantity(arr, units="Pa"))}, coords={"lat": ("x", Quantity(arr + 3, units="m"))}, ) expected = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"lat": ("x", arr + 3)}) assert_identical(ds.as_numpy(), expected) @requires_sparse def test_from_sparse(self) -> None: import sparse arr = np.diagflat([1, 2, 3]) sparr = sparse.COO.from_numpy(arr) ds = xr.Dataset( {"a": (["x", "y"], sparr)}, coords={"elev": (("x", "y"), sparr + 3)} ) expected = xr.Dataset( {"a": (["x", "y"], arr)}, coords={"elev": (("x", "y"), arr + 3)} ) assert_identical(ds.as_numpy(), expected) @requires_cupy def test_from_cupy(self) -> None: import cupy as cp arr = np.array([1, 2, 3]) ds = xr.Dataset( {"a": ("x", cp.array(arr))}, coords={"lat": ("x", cp.array(arr + 3))} ) expected = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"lat": ("x", arr + 3)}) assert_identical(ds.as_numpy(), expected) @requires_dask @requires_pint def test_from_pint_wrapping_dask(self) -> None: import dask from pint import Quantity arr = np.array([1, 2, 3]) d = dask.array.from_array(arr) ds = xr.Dataset( {"a": ("x", Quantity(d, units="Pa"))}, coords={"lat": ("x", Quantity(d, units="m") * 2)}, ) result = ds.as_numpy() expected = xr.Dataset({"a": ("x", arr)}, coords={"lat": ("x", arr * 2)}) assert_identical(result, expected) def test_string_keys_typing() -> None: """Tests that string keys to `variables` are permitted by mypy""" da = xr.DataArray(np.arange(10), dims=["x"]) ds = xr.Dataset(dict(x=da)) mapping = {"y": da} ds.assign(variables=mapping) def test_transpose_error() -> None: # Transpose dataset with list as argument # Should raise error ds = xr.Dataset({"foo": (("x", "y"), [[21]]), "bar": (("x", "y"), [[12]])}) with pytest.raises( TypeError, match=re.escape( "transpose requires dim to be passed as multiple arguments. Expected `'y', 'x'`. Received `['y', 'x']` instead" ), ): ds.transpose(["y", "x"]) # type: ignore[arg-type] xarray-2025.09.0/xarray/tests/test_dataset_typing.yml000066400000000000000000000175661505620616400226340ustar00rootroot00000000000000- case: test_mypy_pipe_lambda_noarg_return_type main: | from xarray import Dataset ds = Dataset().pipe(lambda data: data) reveal_type(ds) # N: Revealed type is "xarray.core.dataset.Dataset" - case: test_mypy_pipe_lambda_posarg_return_type main: | from xarray import Dataset ds = Dataset().pipe(lambda data, arg: arg, "foo") reveal_type(ds) # N: Revealed type is "builtins.str" - case: test_mypy_pipe_lambda_chaining_return_type main: | from xarray import Dataset answer = Dataset().pipe(lambda data, arg: arg, "foo").count("o") reveal_type(answer) # N: Revealed type is "builtins.int" - case: test_mypy_pipe_lambda_missing_arg main: | from xarray import Dataset # Call to pipe missing argument for lambda parameter `arg` ds = Dataset().pipe(lambda data, arg: data) out: | main:4: error: No overload variant of "pipe" of "DataWithCoords" matches argument type "Callable[[Any, Any], Any]" [call-overload] main:4: note: Possible overload variants: main:4: note: def [P`2, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:4: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_lambda_extra_arg main: | from xarray import Dataset # Call to pipe with extra argument for lambda ds = Dataset().pipe(lambda data: data, "oops!") out: | main:4: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[Any], Any]", "str" [call-overload] main:4: note: Possible overload variants: main:4: note: def [P`2, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:4: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_posarg main: | from xarray import Dataset def f(ds: Dataset, arg: int) -> Dataset: return ds # Call to pipe missing argument for function parameter `arg` ds = Dataset().pipe(f) out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument type "Callable[[Dataset, int], Dataset]" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_extra_posarg main: | from xarray import Dataset def f(ds: Dataset, arg: int) -> Dataset: return ds # Call to pipe missing keyword for kwonly parameter `kwonly` ds = Dataset().pipe(f, 42, "oops!") out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[Dataset, int], Dataset]", "int", "str" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_kwarg main: | from xarray import Dataset def f(ds: Dataset, arg: int, *, kwonly: int) -> Dataset: return ds # Call to pipe missing argument for kwonly parameter `kwonly` ds = Dataset().pipe(f, 42) out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[Dataset, int, NamedArg(int, 'kwonly')], Dataset]", "int" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_keyword main: | from xarray import Dataset def f(ds: Dataset, arg: int, *, kwonly: int) -> Dataset: return ds # Call to pipe missing keyword for kwonly parameter `kwonly` ds = Dataset().pipe(f, 42, 99) out: | main:7: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[Dataset, int, NamedArg(int, 'kwonly')], Dataset]", "int", "int" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_unexpected_keyword main: | from xarray import Dataset def f(ds: Dataset, arg: int, *, kwonly: int) -> Dataset: return ds # Call to pipe using wrong keyword: `kw` instead of `kwonly` ds = Dataset().pipe(f, 42, kw=99) out: | main:7: error: Unexpected keyword argument "kw" for "pipe" of "DataWithCoords" [call-arg] - case: test_mypy_pipe_tuple_return_type_dataset main: | from xarray import Dataset def f(arg: int, ds: Dataset) -> Dataset: return ds ds = Dataset().pipe((f, "ds"), 42) reveal_type(ds) # N: Revealed type is "xarray.core.dataset.Dataset" - case: test_mypy_pipe_tuple_return_type_other main: | from xarray import Dataset def f(arg: int, ds: Dataset) -> int: return arg answer = Dataset().pipe((f, "ds"), 42) reveal_type(answer) # N: Revealed type is "builtins.int" - case: test_mypy_pipe_tuple_missing_arg main: | from xarray import Dataset def f(arg: int, ds: Dataset) -> Dataset: return ds # Since we cannot provide a precise type annotation when passing a tuple to # pipe, there's not enough information for type analysis to indicate that # we are missing an argument for parameter `arg`, so we get no error here. ds = Dataset().pipe((f, "ds")) reveal_type(ds) # N: Revealed type is "xarray.core.dataset.Dataset" # Rather than passing a tuple, passing a lambda that calls `f` with args in # the correct order allows for proper type analysis, indicating (perhaps # somewhat cryptically) that we failed to pass an argument for `arg`. ds = Dataset().pipe(lambda data, arg: f(arg, data)) out: | main:17: error: No overload variant of "pipe" of "DataWithCoords" matches argument type "Callable[[Any, Any], Dataset]" [call-overload] main:17: note: Possible overload variants: main:17: note: def [P`9, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:17: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_tuple_extra_arg main: | from xarray import Dataset def f(arg: int, ds: Dataset) -> Dataset: return ds # Since we cannot provide a precise type annotation when passing a tuple to # pipe, there's not enough information for type analysis to indicate that # we are providing too many args for `f`, so we get no error here. ds = Dataset().pipe((f, "ds"), 42, "foo") reveal_type(ds) # N: Revealed type is "xarray.core.dataset.Dataset" # Rather than passing a tuple, passing a lambda that calls `f` with args in # the correct order allows for proper type analysis, indicating (perhaps # somewhat cryptically) that we passed too many arguments. ds = Dataset().pipe(lambda data, arg: f(arg, data), 42, "foo") out: | main:17: error: No overload variant of "pipe" of "DataWithCoords" matches argument types "Callable[[Any, Any], Dataset]", "int", "str" [call-overload] main:17: note: Possible overload variants: main:17: note: def [P`9, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:17: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T xarray-2025.09.0/xarray/tests/test_datatree.py000066400000000000000000002611711505620616400212260ustar00rootroot00000000000000import re import sys import typing from collections.abc import Callable, Mapping from copy import copy, deepcopy from textwrap import dedent import numpy as np import pytest import xarray as xr from xarray import DataArray, Dataset from xarray.core.coordinates import DataTreeCoordinates from xarray.core.datatree import DataTree from xarray.core.treenode import NotFoundInTreeError from xarray.testing import assert_equal, assert_identical from xarray.tests import ( assert_array_equal, create_test_data, requires_dask, source_ndarray, ) ON_WINDOWS = sys.platform == "win32" class TestTreeCreation: def test_empty(self) -> None: dt = DataTree(name="root") assert dt.name == "root" assert dt.parent is None assert dt.children == {} assert_identical(dt.to_dataset(), xr.Dataset()) def test_name(self) -> None: dt = DataTree() assert dt.name is None dt = DataTree(name="foo") assert dt.name == "foo" dt.name = "bar" assert dt.name == "bar" dt = DataTree(children={"foo": DataTree()}) assert dt["/foo"].name == "foo" with pytest.raises( ValueError, match="cannot set the name of a node which already has a parent" ): dt["/foo"].name = "bar" detached = dt["/foo"].copy() assert detached.name == "foo" detached.name = "bar" assert detached.name == "bar" def test_bad_names(self) -> None: with pytest.raises(TypeError): DataTree(name=5) # type: ignore[arg-type] with pytest.raises(ValueError): DataTree(name="folder/data") def test_data_arg(self) -> None: ds = xr.Dataset({"foo": 42}) tree: DataTree = DataTree(dataset=ds) assert_identical(tree.to_dataset(), ds) with pytest.raises(TypeError): DataTree(dataset=xr.DataArray(42, name="foo")) # type: ignore[arg-type] def test_child_data_not_copied(self) -> None: # regression test for https://github.com/pydata/xarray/issues/9683 class NoDeepCopy: def __deepcopy__(self, memo): raise TypeError("class can't be deepcopied") da = xr.DataArray(NoDeepCopy()) ds = xr.Dataset({"var": da}) dt1 = xr.DataTree(ds) dt2 = xr.DataTree(ds, children={"child": dt1}) dt3 = xr.DataTree.from_dict({"/": ds, "child": ds}) assert_identical(dt2, dt3) class TestFamilyTree: def test_dont_modify_children_inplace(self) -> None: # GH issue 9196 child = DataTree() DataTree(children={"child": child}) assert child.parent is None def test_create_two_children(self) -> None: root_data = xr.Dataset({"a": ("y", [6, 7, 8]), "set0": ("x", [9, 10])}) set1_data = xr.Dataset({"a": 0, "b": 1}) root = DataTree.from_dict( {"/": root_data, "/set1": set1_data, "/set1/set2": None} ) assert root["/set1"].name == "set1" assert root["/set1/set2"].name == "set2" def test_create_full_tree(self, simple_datatree) -> None: d = simple_datatree.to_dict() d_keys = list(d.keys()) expected_keys = [ "/", "/set1", "/set2", "/set3", "/set1/set1", "/set1/set2", "/set2/set1", ] assert d_keys == expected_keys class TestNames: def test_child_gets_named_on_attach(self) -> None: sue = DataTree() mary = DataTree(children={"Sue": sue}) assert mary.children["Sue"].name == "Sue" def test_dataset_containing_slashes(self) -> None: xda: xr.DataArray = xr.DataArray( [[1, 2]], coords={"label": ["a"], "R30m/y": [30, 60]}, ) xds: xr.Dataset = xr.Dataset({"group/subgroup/my_variable": xda}) with pytest.raises( ValueError, match=re.escape( "Given variables have names containing the '/' character: " "['R30m/y', 'group/subgroup/my_variable']. " "Variables stored in DataTree objects cannot have names containing '/' characters, " "as this would make path-like access to variables ambiguous." ), ): DataTree(xds) class TestPaths: def test_path_property(self) -> None: john = DataTree.from_dict( { "/Mary/Sue": DataTree(), } ) assert john["/Mary/Sue"].path == "/Mary/Sue" assert john.path == "/" def test_path_roundtrip(self) -> None: john = DataTree.from_dict( { "/Mary/Sue": DataTree(), } ) assert john["/Mary/Sue"].name == "Sue" def test_same_tree(self) -> None: john = DataTree.from_dict( { "/Mary": DataTree(), "/Kate": DataTree(), } ) mary = john.children["Mary"] kate = john.children["Kate"] assert mary.same_tree(kate) def test_relative_paths(self) -> None: john = DataTree.from_dict( { "/Mary/Sue": DataTree(), "/Annie": DataTree(), } ) sue = john.children["Mary"].children["Sue"] annie = john.children["Annie"] assert sue.relative_to(john) == "Mary/Sue" assert john.relative_to(sue) == "../.." assert annie.relative_to(sue) == "../../Annie" assert sue.relative_to(annie) == "../Mary/Sue" assert sue.relative_to(sue) == "." evil_kate = DataTree() with pytest.raises( NotFoundInTreeError, match="nodes do not lie within the same tree" ): sue.relative_to(evil_kate) class TestStoreDatasets: def test_create_with_data(self) -> None: dat = xr.Dataset({"a": 0}) john = DataTree(name="john", dataset=dat) assert_identical(john.to_dataset(), dat) with pytest.raises(TypeError): DataTree(name="mary", dataset="junk") # type: ignore[arg-type] def test_set_data(self) -> None: john = DataTree(name="john") dat = xr.Dataset({"a": 0}) john.dataset = dat # type: ignore[assignment,unused-ignore] assert_identical(john.to_dataset(), dat) with pytest.raises(TypeError): john.dataset = "junk" # type: ignore[assignment] def test_has_data(self) -> None: john = DataTree(name="john", dataset=xr.Dataset({"a": 0})) assert john.has_data john_no_data = DataTree(name="john", dataset=None) assert not john_no_data.has_data def test_is_hollow(self) -> None: john = DataTree(dataset=xr.Dataset({"a": 0})) assert john.is_hollow eve = DataTree(children={"john": john}) assert eve.is_hollow eve.dataset = xr.Dataset({"a": 1}) # type: ignore[assignment,unused-ignore] assert not eve.is_hollow class TestToDataset: def test_to_dataset_inherited(self) -> None: base = xr.Dataset(coords={"a": [1], "b": 2}) sub = xr.Dataset(coords={"c": [3]}) tree = DataTree.from_dict({"/": base, "/sub": sub}) subtree = typing.cast(DataTree, tree["sub"]) assert_identical(tree.to_dataset(inherit=False), base) assert_identical(subtree.to_dataset(inherit=False), sub) sub_and_base = xr.Dataset(coords={"a": [1], "c": [3]}) # no "b" assert_identical(tree.to_dataset(inherit=True), base) assert_identical(subtree.to_dataset(inherit=True), sub_and_base) class TestVariablesChildrenNameCollisions: def test_parent_already_has_variable_with_childs_name(self) -> None: with pytest.raises(KeyError, match="already contains a variable named a"): DataTree.from_dict({"/": xr.Dataset({"a": [0], "b": 1}), "/a": None}) def test_parent_already_has_variable_with_childs_name_update(self) -> None: dt = DataTree(dataset=xr.Dataset({"a": [0], "b": 1})) with pytest.raises(ValueError, match="already contains a variable named a"): dt.update({"a": DataTree()}) def test_assign_when_already_child_with_variables_name(self) -> None: dt = DataTree.from_dict( { "/a": DataTree(), } ) with pytest.raises(ValueError, match="node already contains a variable"): dt.dataset = xr.Dataset({"a": 0}) # type: ignore[assignment,unused-ignore] dt.dataset = xr.Dataset() # type: ignore[assignment,unused-ignore] new_ds = dt.to_dataset().assign(a=xr.DataArray(0)) with pytest.raises(ValueError, match="node already contains a variable"): dt.dataset = new_ds # type: ignore[assignment,unused-ignore] class TestGet: ... class TestGetItem: def test_getitem_node(self) -> None: folder1 = DataTree.from_dict( { "/results/highres": DataTree(), } ) assert folder1["results"].name == "results" assert folder1["results/highres"].name == "highres" def test_getitem_self(self) -> None: dt = DataTree() assert dt["."] is dt def test_getitem_single_data_variable(self) -> None: data = xr.Dataset({"temp": [0, 50]}) results = DataTree(name="results", dataset=data) assert_identical(results["temp"], data["temp"]) def test_getitem_single_data_variable_from_node(self) -> None: data = xr.Dataset({"temp": [0, 50]}) folder1 = DataTree.from_dict( { "/results/highres": data, } ) assert_identical(folder1["results/highres/temp"], data["temp"]) def test_getitem_nonexistent_node(self) -> None: folder1 = DataTree.from_dict({"/results": DataTree()}, name="folder1") with pytest.raises(KeyError): folder1["results/highres"] def test_getitem_nonexistent_variable(self) -> None: data = xr.Dataset({"temp": [0, 50]}) results = DataTree(name="results", dataset=data) with pytest.raises(KeyError): results["pressure"] @pytest.mark.xfail(reason="Should be deprecated in favour of .subset") def test_getitem_multiple_data_variables(self) -> None: data = xr.Dataset({"temp": [0, 50], "p": [5, 8, 7]}) results = DataTree(name="results", dataset=data) assert_identical(results[["temp", "p"]], data[["temp", "p"]]) # type: ignore[index] @pytest.mark.xfail( reason="Indexing needs to return whole tree (GH https://github.com/xarray-contrib/datatree/issues/77)" ) def test_getitem_dict_like_selection_access_to_dataset(self) -> None: data = xr.Dataset({"temp": [0, 50]}) results = DataTree(name="results", dataset=data) assert_identical(results[{"temp": 1}], data[{"temp": 1}]) # type: ignore[index] class TestUpdate: def test_update(self) -> None: dt = DataTree() dt.update({"foo": xr.DataArray(0), "a": DataTree()}) expected = DataTree.from_dict({"/": xr.Dataset({"foo": 0}), "a": None}) assert_equal(dt, expected) assert dt.groups == ("/", "/a") def test_update_new_named_dataarray(self) -> None: da = xr.DataArray(name="temp", data=[0, 50]) folder1 = DataTree(name="folder1") folder1.update({"results": da}) expected = da.rename("results") assert_equal(folder1["results"], expected) def test_update_doesnt_alter_child_name(self) -> None: dt = DataTree() dt.update({"foo": xr.DataArray(0), "a": DataTree(name="b")}) assert "a" in dt.children child = dt["a"] assert child.name == "a" def test_update_overwrite(self) -> None: actual = DataTree.from_dict({"a": DataTree(xr.Dataset({"x": 1}))}) actual.update({"a": DataTree(xr.Dataset({"x": 2}))}) expected = DataTree.from_dict({"a": DataTree(xr.Dataset({"x": 2}))}) assert_equal(actual, expected) def test_update_coordinates(self) -> None: expected = DataTree.from_dict({"/": xr.Dataset(coords={"a": 1})}) actual = DataTree.from_dict({"/": xr.Dataset()}) actual.update(xr.Dataset(coords={"a": 1})) assert_equal(actual, expected) def test_update_inherited_coords(self) -> None: expected = DataTree.from_dict( { "/": xr.Dataset(coords={"a": 1}), "/b": xr.Dataset(coords={"c": 1}), } ) actual = DataTree.from_dict( { "/": xr.Dataset(coords={"a": 1}), "/b": xr.Dataset(), } ) actual["/b"].update(xr.Dataset(coords={"c": 1})) assert_identical(actual, expected) # DataTree.identical() currently does not require that non-inherited # coordinates are defined identically, so we need to check this # explicitly actual_node = actual.children["b"].to_dataset(inherit=False) expected_node = expected.children["b"].to_dataset(inherit=False) assert_identical(actual_node, expected_node) class TestCopy: def test_copy(self, create_test_datatree) -> None: dt = create_test_datatree() for node in dt.root.subtree: node.attrs["Test"] = [1, 2, 3] for copied in [dt.copy(deep=False), copy(dt)]: assert_identical(dt, copied) for node, copied_node in zip( dt.root.subtree, copied.root.subtree, strict=True ): assert node.encoding == copied_node.encoding # Note: IndexVariable objects with string dtype are always # copied because of xarray.core.util.safe_cast_to_index. # Limiting the test to data variables. for k in node.data_vars: v0 = node.variables[k] v1 = copied_node.variables[k] assert source_ndarray(v0.data) is source_ndarray(v1.data) copied_node["foo"] = xr.DataArray(data=np.arange(5), dims="z") assert "foo" not in node copied_node.attrs["foo"] = "bar" assert "foo" not in node.attrs assert node.attrs["Test"] is copied_node.attrs["Test"] def test_copy_subtree(self) -> None: dt = DataTree.from_dict({"/level1/level2/level3": xr.Dataset()}) actual = dt["/level1/level2"].copy() expected = DataTree.from_dict({"/level3": xr.Dataset()}, name="level2") assert_identical(actual, expected) def test_copy_coord_inheritance(self) -> None: tree = DataTree.from_dict( {"/": xr.Dataset(coords={"x": [0, 1]}), "/c": DataTree()} ) actual = tree.copy() node_ds = actual.children["c"].to_dataset(inherit=False) assert_identical(node_ds, xr.Dataset()) actual = tree.children["c"].copy() expected = DataTree(Dataset(coords={"x": [0, 1]}), name="c") assert_identical(expected, actual) actual = tree.children["c"].copy(inherit=False) expected = DataTree(name="c") assert_identical(expected, actual) def test_deepcopy(self, create_test_datatree) -> None: dt = create_test_datatree() for node in dt.root.subtree: node.attrs["Test"] = [1, 2, 3] for copied in [dt.copy(deep=True), deepcopy(dt)]: assert_identical(dt, copied) for node, copied_node in zip( dt.root.subtree, copied.root.subtree, strict=True ): assert node.encoding == copied_node.encoding # Note: IndexVariable objects with string dtype are always # copied because of xarray.core.util.safe_cast_to_index. # Limiting the test to data variables. for k in node.data_vars: v0 = node.variables[k] v1 = copied_node.variables[k] assert source_ndarray(v0.data) is not source_ndarray(v1.data) copied_node["foo"] = xr.DataArray(data=np.arange(5), dims="z") assert "foo" not in node copied_node.attrs["foo"] = "bar" assert "foo" not in node.attrs assert node.attrs["Test"] is not copied_node.attrs["Test"] @pytest.mark.xfail(reason="data argument not yet implemented") def test_copy_with_data(self, create_test_datatree) -> None: orig = create_test_datatree() # TODO use .data_vars once that property is available data_vars = { k: v for k, v in orig.variables.items() if k not in orig._coord_names } new_data = {k: np.random.randn(*v.shape) for k, v in data_vars.items()} actual = orig.copy(data=new_data) expected = orig.copy() for k, v in new_data.items(): expected[k].data = v assert_identical(expected, actual) # TODO test parents and children? class TestSetItem: def test_setitem_new_child_node(self) -> None: john = DataTree(name="john") mary = DataTree(name="mary") john["mary"] = mary grafted_mary = john["mary"] assert grafted_mary.parent is john assert grafted_mary.name == "mary" def test_setitem_unnamed_child_node_becomes_named(self) -> None: john2 = DataTree(name="john2") john2["sonny"] = DataTree() assert john2["sonny"].name == "sonny" def test_setitem_new_grandchild_node(self) -> None: john = DataTree.from_dict({"/Mary/Rose": DataTree()}) new_rose = DataTree(dataset=xr.Dataset({"x": 0})) john["Mary/Rose"] = new_rose grafted_rose = john["Mary/Rose"] assert grafted_rose.parent is john["/Mary"] assert grafted_rose.name == "Rose" def test_grafted_subtree_retains_name(self) -> None: subtree = DataTree(name="original_subtree_name") root = DataTree(name="root") root["new_subtree_name"] = subtree assert subtree.name == "original_subtree_name" def test_setitem_new_empty_node(self) -> None: john = DataTree(name="john") john["mary"] = DataTree() mary = john["mary"] assert isinstance(mary, DataTree) assert_identical(mary.to_dataset(), xr.Dataset()) def test_setitem_overwrite_data_in_node_with_none(self) -> None: john = DataTree.from_dict({"/mary": xr.Dataset()}, name="john") john["mary"] = DataTree() assert_identical(john["mary"].to_dataset(), xr.Dataset()) john.dataset = xr.Dataset() # type: ignore[assignment,unused-ignore] with pytest.raises(ValueError, match="has no name"): john["."] = DataTree() @pytest.mark.xfail(reason="assigning Datasets doesn't yet create new nodes") def test_setitem_dataset_on_this_node(self) -> None: data = xr.Dataset({"temp": [0, 50]}) results = DataTree(name="results") results["."] = data assert_identical(results.to_dataset(), data) def test_setitem_dataset_as_new_node(self) -> None: data = xr.Dataset({"temp": [0, 50]}) folder1 = DataTree(name="folder1") folder1["results"] = data assert_identical(folder1["results"].to_dataset(), data) def test_setitem_dataset_as_new_node_requiring_intermediate_nodes(self) -> None: data = xr.Dataset({"temp": [0, 50]}) folder1 = DataTree(name="folder1") folder1["results/highres"] = data assert_identical(folder1["results/highres"].to_dataset(), data) def test_setitem_named_dataarray(self) -> None: da = xr.DataArray(name="temp", data=[0, 50]) folder1 = DataTree(name="folder1") folder1["results"] = da expected = da.rename("results") assert_equal(folder1["results"], expected) def test_setitem_unnamed_dataarray(self) -> None: data = xr.DataArray([0, 50]) folder1 = DataTree(name="folder1") folder1["results"] = data assert_equal(folder1["results"], data) def test_setitem_variable(self) -> None: var = xr.Variable(data=[0, 50], dims="x") folder1 = DataTree(name="folder1") folder1["results"] = var assert_equal(folder1["results"], xr.DataArray(var)) def test_setitem_coerce_to_dataarray(self) -> None: folder1 = DataTree(name="folder1") folder1["results"] = 0 assert_equal(folder1["results"], xr.DataArray(0)) def test_setitem_add_new_variable_to_empty_node(self) -> None: results = DataTree(name="results") results["pressure"] = xr.DataArray(data=[2, 3]) assert "pressure" in results.dataset results["temp"] = xr.Variable(data=[10, 11], dims=["x"]) assert "temp" in results.dataset # What if there is a path to traverse first? results_with_path = DataTree(name="results") results_with_path["highres/pressure"] = xr.DataArray(data=[2, 3]) assert "pressure" in results_with_path["highres"].dataset results_with_path["highres/temp"] = xr.Variable(data=[10, 11], dims=["x"]) assert "temp" in results_with_path["highres"].dataset def test_setitem_dataarray_replace_existing_node(self) -> None: t = xr.Dataset({"temp": [0, 50]}) results = DataTree(name="results", dataset=t) p = xr.DataArray(data=[2, 3]) results["pressure"] = p expected = t.assign(pressure=p) assert_identical(results.to_dataset(), expected) class TestCoords: def test_properties(self) -> None: # use int64 for repr consistency on windows ds = Dataset( data_vars={ "foo": (["x", "y"], np.random.randn(2, 3)), }, coords={ "x": ("x", np.array([-1, -2], "int64")), "y": ("y", np.array([0, 1, 2], "int64")), "a": ("x", np.array([4, 5], "int64")), "b": np.int64(-10), }, ) dt = DataTree(dataset=ds) dt["child"] = DataTree() coords = dt.coords assert isinstance(coords, DataTreeCoordinates) # len assert len(coords) == 4 # iter assert list(coords) == ["x", "y", "a", "b"] assert_identical(coords["x"].variable, dt["x"].variable) assert_identical(coords["y"].variable, dt["y"].variable) assert "x" in coords assert "a" in coords assert 0 not in coords assert "foo" not in coords assert "child" not in coords with pytest.raises(KeyError): coords["foo"] # TODO this currently raises a ValueError instead of a KeyError # with pytest.raises(KeyError): # coords[0] # repr expected = dedent( """\ Coordinates: * x (x) int64 16B -1 -2 * y (y) int64 24B 0 1 2 a (x) int64 16B 4 5 b int64 8B -10""" ) actual = repr(coords) assert expected == actual # dims assert coords.sizes == {"x": 2, "y": 3} # dtypes assert coords.dtypes == { "x": np.dtype("int64"), "y": np.dtype("int64"), "a": np.dtype("int64"), "b": np.dtype("int64"), } def test_modify(self) -> None: ds = Dataset( data_vars={ "foo": (["x", "y"], np.random.randn(2, 3)), }, coords={ "x": ("x", np.array([-1, -2], "int64")), "y": ("y", np.array([0, 1, 2], "int64")), "a": ("x", np.array([4, 5], "int64")), "b": np.int64(-10), }, ) dt = DataTree(dataset=ds) dt["child"] = DataTree() actual = dt.copy(deep=True) actual.coords["x"] = ("x", ["a", "b"]) assert_array_equal(actual["x"], ["a", "b"]) actual = dt.copy(deep=True) actual.coords["z"] = ("z", ["a", "b"]) assert_array_equal(actual["z"], ["a", "b"]) actual = dt.copy(deep=True) with pytest.raises(ValueError, match=r"conflicting dimension sizes"): actual.coords["x"] = ("x", [-1]) assert_identical(actual, dt) # should not be modified # TODO: re-enable after implementing reset_coords() # actual = dt.copy() # del actual.coords["b"] # expected = dt.reset_coords("b", drop=True) # assert_identical(expected, actual) with pytest.raises(KeyError): del dt.coords["not_found"] with pytest.raises(KeyError): del dt.coords["foo"] # TODO: re-enable after implementing assign_coords() # actual = dt.copy(deep=True) # actual.coords.update({"c": 11}) # expected = dt.assign_coords({"c": 11}) # assert_identical(expected, actual) # # regression test for GH3746 # del actual.coords["x"] # assert "x" not in actual.xindexes # test that constructors can also handle the `DataTreeCoordinates` object ds2 = Dataset(coords=dt.coords) assert_identical(ds2.coords, dt.coords) da = DataArray(coords=dt.coords) assert_identical(da.coords, dt.coords) # DataTree constructor doesn't accept coords= but should still be able to handle DatasetCoordinates dt2 = DataTree(dataset=dt.coords) assert_identical(dt2.coords, dt.coords) def test_inherited(self) -> None: ds = Dataset( data_vars={ "foo": (["x", "y"], np.random.randn(2, 3)), }, coords={ "x": ("x", np.array([-1, -2], "int64")), "y": ("y", np.array([0, 1, 2], "int64")), "a": ("x", np.array([4, 5], "int64")), "b": np.int64(-10), }, ) dt = DataTree(dataset=ds) dt["child"] = DataTree() child = dt["child"] assert set(dt.coords) == {"x", "y", "a", "b"} assert set(child.coords) == {"x", "y"} actual = child.copy(deep=True) actual.coords["x"] = ("x", ["a", "b"]) assert_array_equal(actual["x"], ["a", "b"]) actual = child.copy(deep=True) actual.coords.update({"c": 11}) expected = child.copy(deep=True) expected.coords["c"] = 11 # check we have only altered the child node assert_identical(expected.root, actual.root) with pytest.raises(KeyError): # cannot delete inherited coordinate from child node del child["x"] # TODO requires a fix for #9472 # actual = child.copy(deep=True) # actual.coords.update({"c": 11}) # expected = child.assign_coords({"c": 11}) # assert_identical(expected, actual) def test_delitem() -> None: ds = Dataset({"a": 0}, coords={"x": ("x", [1, 2]), "z": "a"}) dt = DataTree(ds, children={"c": DataTree()}) with pytest.raises(KeyError): del dt["foo"] # test delete children del dt["c"] assert dt.children == {} assert set(dt.variables) == {"x", "z", "a"} with pytest.raises(KeyError): del dt["c"] # test delete variables del dt["a"] assert set(dt.coords) == {"x", "z"} with pytest.raises(KeyError): del dt["a"] # test delete coordinates del dt["z"] assert set(dt.coords) == {"x"} with pytest.raises(KeyError): del dt["z"] # test delete indexed coordinates del dt["x"] assert dt.variables == {} assert dt.coords == {} assert dt.indexes == {} with pytest.raises(KeyError): del dt["x"] class TestTreeFromDict: def test_data_in_root(self) -> None: dat = xr.Dataset() dt = DataTree.from_dict({"/": dat}) assert dt.name is None assert dt.parent is None assert dt.children == {} assert_identical(dt.to_dataset(), dat) def test_one_layer(self) -> None: dat1, dat2 = xr.Dataset({"a": 1}), xr.Dataset({"b": 2}) dt = DataTree.from_dict({"run1": dat1, "run2": dat2}) assert_identical(dt.to_dataset(), xr.Dataset()) assert dt.name is None assert_identical(dt["run1"].to_dataset(), dat1) assert dt["run1"].children == {} assert_identical(dt["run2"].to_dataset(), dat2) assert dt["run2"].children == {} def test_two_layers(self) -> None: dat1, dat2 = xr.Dataset({"a": 1}), xr.Dataset({"a": [1, 2]}) dt = DataTree.from_dict({"highres/run": dat1, "lowres/run": dat2}) assert "highres" in dt.children assert "lowres" in dt.children highres_run = dt["highres/run"] assert_identical(highres_run.to_dataset(), dat1) def test_nones(self) -> None: dt = DataTree.from_dict({"d": None, "d/e": None}) assert [node.name for node in dt.subtree] == [None, "d", "e"] assert [node.path for node in dt.subtree] == ["/", "/d", "/d/e"] assert_identical(dt["d/e"].to_dataset(), xr.Dataset()) def test_full(self, simple_datatree) -> None: dt = simple_datatree paths = [node.path for node in dt.subtree] assert paths == [ "/", "/set1", "/set2", "/set3", "/set1/set1", "/set1/set2", "/set2/set1", ] def test_datatree_values(self) -> None: dat1 = DataTree(dataset=xr.Dataset({"a": 1})) expected = DataTree() expected["a"] = dat1 actual = DataTree.from_dict({"a": dat1}) assert_identical(actual, expected) def test_roundtrip_to_dict(self, simple_datatree) -> None: tree = simple_datatree roundtrip = DataTree.from_dict(tree.to_dict()) assert_identical(tree, roundtrip) def test_to_dict(self): tree = DataTree.from_dict({"/a/b/c": None}) roundtrip = DataTree.from_dict(tree.to_dict()) assert_identical(tree, roundtrip) roundtrip = DataTree.from_dict(tree.to_dict(relative=True)) assert_identical(tree, roundtrip) roundtrip = DataTree.from_dict(tree.children["a"].to_dict(relative=False)) assert_identical(tree, roundtrip) expected = DataTree.from_dict({"b/c": None}) actual = DataTree.from_dict(tree.children["a"].to_dict(relative=True)) assert_identical(expected, actual) def test_roundtrip_unnamed_root(self, simple_datatree) -> None: # See GH81 dt = simple_datatree dt.name = "root" roundtrip = DataTree.from_dict(dt.to_dict()) assert roundtrip.equals(dt) def test_insertion_order(self) -> None: # regression test for GH issue #9276 reversed = DataTree.from_dict( { "/Homer/Lisa": xr.Dataset({"age": 8}), "/Homer/Bart": xr.Dataset({"age": 10}), "/Homer": xr.Dataset({"age": 39}), "/": xr.Dataset({"age": 83}), } ) expected = DataTree.from_dict( { "/": xr.Dataset({"age": 83}), "/Homer": xr.Dataset({"age": 39}), "/Homer/Lisa": xr.Dataset({"age": 8}), "/Homer/Bart": xr.Dataset({"age": 10}), } ) assert reversed.equals(expected) # Check that Bart and Lisa's order is still preserved within the group, # despite 'Bart' coming before 'Lisa' when sorted alphabetically assert list(reversed["Homer"].children.keys()) == ["Lisa", "Bart"] def test_array_values(self) -> None: data = {"foo": xr.DataArray(1, name="bar")} with pytest.raises(TypeError): DataTree.from_dict(data) # type: ignore[arg-type] def test_relative_paths(self) -> None: tree = DataTree.from_dict({".": None, "foo": None, "./bar": None, "x/y": None}) paths = [node.path for node in tree.subtree] assert paths == [ "/", "/foo", "/bar", "/x", "/x/y", ] def test_root_keys(self): ds = Dataset({"x": 1}) expected = DataTree(dataset=ds) actual = DataTree.from_dict({"": ds}) assert_identical(actual, expected) actual = DataTree.from_dict({".": ds}) assert_identical(actual, expected) actual = DataTree.from_dict({"/": ds}) assert_identical(actual, expected) actual = DataTree.from_dict({"./": ds}) assert_identical(actual, expected) with pytest.raises( ValueError, match="multiple entries found corresponding to the root node" ): DataTree.from_dict({"": ds, "/": ds}) def test_name(self): tree = DataTree.from_dict({"/": None}, name="foo") assert tree.name == "foo" tree = DataTree.from_dict({"/": DataTree()}, name="foo") assert tree.name == "foo" tree = DataTree.from_dict({"/": DataTree(name="bar")}, name="foo") assert tree.name == "foo" class TestDatasetView: def test_view_contents(self) -> None: ds = create_test_data() dt = DataTree(dataset=ds) assert ds.identical( dt.dataset ) # this only works because Dataset.identical doesn't check types assert isinstance(dt.dataset, xr.Dataset) def test_immutability(self) -> None: # See issue https://github.com/xarray-contrib/datatree/issues/38 dt = DataTree.from_dict( { "/": None, "/a": None, }, name="root", ) with pytest.raises( AttributeError, match="Mutation of the DatasetView is not allowed" ): dt.dataset["a"] = xr.DataArray(0) with pytest.raises( AttributeError, match="Mutation of the DatasetView is not allowed" ): dt.dataset.update({"a": 0}) # TODO are there any other ways you can normally modify state (in-place)? # (not attribute-like assignment because that doesn't work on Dataset anyway) def test_methods(self) -> None: ds = create_test_data() dt = DataTree(dataset=ds) assert ds.mean().identical(dt.dataset.mean()) assert isinstance(dt.dataset.mean(), xr.Dataset) def test_arithmetic(self, create_test_datatree) -> None: dt = create_test_datatree() expected = create_test_datatree(modify=lambda ds: 10.0 * ds)[ "set1" ].to_dataset() result = 10.0 * dt["set1"].dataset assert result.identical(expected) def test_init_via_type(self) -> None: # from datatree GH issue https://github.com/xarray-contrib/datatree/issues/188 # xarray's .weighted is unusual because it uses type() to create a Dataset/DataArray a = xr.DataArray( np.random.rand(3, 4, 10), dims=["x", "y", "time"], coords={"area": (["x", "y"], np.random.rand(3, 4))}, ).to_dataset(name="data") dt = DataTree(dataset=a) def weighted_mean(ds): return ds.weighted(ds.area).mean(["x", "y"]) weighted_mean(dt.dataset) def test_map_keep_attrs(self) -> None: # test DatasetView.map(..., keep_attrs=...) data = xr.DataArray([1, 2, 3], dims="x", attrs={"da": "attrs"}) ds = xr.Dataset({"data": data}, attrs={"ds": "attrs"}) dt = DataTree(ds) def func_keep(ds): # x.mean() removes the attrs of the data_vars return ds.map(lambda x: x.mean(), keep_attrs=True) result = xr.map_over_datasets(func_keep, dt) expected = dt.mean(keep_attrs=True) xr.testing.assert_identical(result, expected) # per default DatasetView.map does not keep attrs def func(ds): # x.mean() removes the attrs of the data_vars return ds.map(lambda x: x.mean()) result = xr.map_over_datasets(func, dt) expected = dt.mean() xr.testing.assert_identical(result, expected.mean()) class TestAccess: def test_attribute_access(self, create_test_datatree) -> None: dt = create_test_datatree() # vars / coords for key in ["a", "set0"]: assert_equal(dt[key], getattr(dt, key)) assert key in dir(dt) # dims assert_equal(dt["a"]["y"], dt.a.y) assert "y" in dir(dt["a"]) # children for key in ["set1", "set2", "set3"]: assert_equal(dt[key], getattr(dt, key)) assert key in dir(dt) # attrs dt.attrs["meta"] = "NASA" assert dt.attrs["meta"] == "NASA" assert "meta" in dir(dt) def test_ipython_key_completions_complex(self, create_test_datatree) -> None: dt = create_test_datatree() key_completions = dt._ipython_key_completions_() node_keys = [node.path[1:] for node in dt.descendants] assert all(node_key in key_completions for node_key in node_keys) var_keys = list(dt.variables.keys()) assert all(var_key in key_completions for var_key in var_keys) def test_ipython_key_completitions_subnode(self) -> None: tree = xr.DataTree.from_dict({"/": None, "/a": None, "/a/b/": None}) expected = ["b"] actual = tree["a"]._ipython_key_completions_() assert expected == actual def test_operation_with_attrs_but_no_data(self) -> None: # tests bug from xarray-datatree GH262 xs = xr.Dataset({"testvar": xr.DataArray(np.ones((2, 3)))}) dt = DataTree.from_dict({"node1": xs, "node2": xs}) dt.attrs["test_key"] = 1 # sel works fine without this line dt.sel(dim_0=0) class TestRepr: def test_repr_four_nodes(self) -> None: dt = DataTree.from_dict( { "/": xr.Dataset( {"e": (("x",), [1.0, 2.0])}, coords={"x": [2.0, 3.0]}, ), "/b": xr.Dataset({"f": (("y",), [3.0])}), "/b/c": xr.Dataset(), "/b/d": xr.Dataset({"g": 4.0}), } ) result = repr(dt) expected = dedent( """ Group: / β”‚ Dimensions: (x: 2) β”‚ Coordinates: β”‚ * x (x) float64 16B 2.0 3.0 β”‚ Data variables: β”‚ e (x) float64 16B 1.0 2.0 └── Group: /b β”‚ Dimensions: (y: 1) β”‚ Dimensions without coordinates: y β”‚ Data variables: β”‚ f (y) float64 8B 3.0 β”œβ”€β”€ Group: /b/c └── Group: /b/d Dimensions: () Data variables: g float64 8B 4.0 """ ).strip() assert result == expected result = repr(dt.b) expected = dedent( """ Group: /b β”‚ Dimensions: (x: 2, y: 1) β”‚ Inherited coordinates: β”‚ * x (x) float64 16B 2.0 3.0 β”‚ Dimensions without coordinates: y β”‚ Data variables: β”‚ f (y) float64 8B 3.0 β”œβ”€β”€ Group: /b/c └── Group: /b/d Dimensions: () Data variables: g float64 8B 4.0 """ ).strip() assert result == expected result = repr(dt.b.d) expected = dedent( """ Group: /b/d Dimensions: (x: 2, y: 1) Inherited coordinates: * x (x) float64 16B 2.0 3.0 Dimensions without coordinates: y Data variables: g float64 8B 4.0 """ ).strip() assert result == expected def test_repr_two_children(self) -> None: tree = DataTree.from_dict( { "/": Dataset(coords={"x": [1.0]}), "/first_child": None, "/second_child": Dataset({"foo": ("x", [0.0])}, coords={"z": 1.0}), } ) result = repr(tree) expected = dedent( """ Group: / β”‚ Dimensions: (x: 1) β”‚ Coordinates: β”‚ * x (x) float64 8B 1.0 β”œβ”€β”€ Group: /first_child └── Group: /second_child Dimensions: (x: 1) Coordinates: z float64 8B 1.0 Data variables: foo (x) float64 8B 0.0 """ ).strip() assert result == expected result = repr(tree["first_child"]) expected = dedent( """ Group: /first_child Dimensions: (x: 1) Inherited coordinates: * x (x) float64 8B 1.0 """ ).strip() assert result == expected result = repr(tree["second_child"]) expected = dedent( """ Group: /second_child Dimensions: (x: 1) Coordinates: z float64 8B 1.0 Inherited coordinates: * x (x) float64 8B 1.0 Data variables: foo (x) float64 8B 0.0 """ ).strip() assert result == expected def test_repr_truncates_nodes(self) -> None: # construct a datatree with 50 nodes number_of_files = 10 number_of_groups = 5 tree_dict = {} for f in range(number_of_files): for g in range(number_of_groups): tree_dict[f"file_{f}/group_{g}"] = Dataset({"g": f * g}) tree = DataTree.from_dict(tree_dict) with xr.set_options(display_max_children=3): result = repr(tree) expected = dedent( """ Group: / β”œβ”€β”€ Group: /file_0 β”‚ β”œβ”€β”€ Group: /file_0/group_0 β”‚ β”‚ Dimensions: () β”‚ β”‚ Data variables: β”‚ β”‚ g int64 8B 0 β”‚ β”œβ”€β”€ Group: /file_0/group_1 β”‚ β”‚ Dimensions: () β”‚ β”‚ Data variables: β”‚ β”‚ g int64 8B 0 β”‚ ... β”‚ └── Group: /file_0/group_4 β”‚ Dimensions: () β”‚ Data variables: β”‚ g int64 8B 0 β”œβ”€β”€ Group: /file_1 β”‚ β”œβ”€β”€ Group: /file_1/group_0 β”‚ β”‚ Dimensions: () β”‚ β”‚ Data variables: β”‚ β”‚ g int64 8B 0 β”‚ β”œβ”€β”€ Group: /file_1/group_1 β”‚ β”‚ Dimensions: () β”‚ β”‚ Data variables: β”‚ β”‚ g int64 8B 1 β”‚ ... β”‚ └── Group: /file_1/group_4 β”‚ Dimensions: () β”‚ Data variables: β”‚ g int64 8B 4 ... └── Group: /file_9 β”œβ”€β”€ Group: /file_9/group_0 β”‚ Dimensions: () β”‚ Data variables: β”‚ g int64 8B 0 β”œβ”€β”€ Group: /file_9/group_1 β”‚ Dimensions: () β”‚ Data variables: β”‚ g int64 8B 9 ... └── Group: /file_9/group_4 Dimensions: () Data variables: g int64 8B 36 """ ).strip() assert expected == result with xr.set_options(display_max_children=10): result = repr(tree) for key in tree_dict: assert key in result def test_repr_inherited_dims(self) -> None: tree = DataTree.from_dict( { "/": Dataset({"foo": ("x", [1.0])}), "/child": Dataset({"bar": ("y", [2.0])}), } ) result = repr(tree) expected = dedent( """ Group: / β”‚ Dimensions: (x: 1) β”‚ Dimensions without coordinates: x β”‚ Data variables: β”‚ foo (x) float64 8B 1.0 └── Group: /child Dimensions: (y: 1) Dimensions without coordinates: y Data variables: bar (y) float64 8B 2.0 """ ).strip() assert result == expected result = repr(tree["child"]) expected = dedent( """ Group: /child Dimensions: (x: 1, y: 1) Dimensions without coordinates: x, y Data variables: bar (y) float64 8B 2.0 """ ).strip() assert result == expected @pytest.mark.skipif( ON_WINDOWS, reason="windows (pre NumPy2) uses int32 instead of int64" ) def test_doc_example(self) -> None: # regression test for https://github.com/pydata/xarray/issues/9499 time = xr.DataArray( data=np.array(["2022-01", "2023-01"], dtype=" Group: / β”‚ Dimensions: (time: 2) β”‚ Coordinates: β”‚ * time (time) Group: /weather β”‚ Dimensions: (time: 2, station: 6) β”‚ Coordinates: β”‚ * station (station) str: return re.escape(dedent(message).strip()) class TestInheritance: def test_inherited_dims(self) -> None: dt = DataTree.from_dict( { "/": xr.Dataset({"d": (("x",), [1, 2])}), "/b": xr.Dataset({"e": (("y",), [3])}), "/c": xr.Dataset({"f": (("y",), [3, 4, 5])}), } ) assert dt.sizes == {"x": 2} # nodes should include inherited dimensions assert dt.b.sizes == {"x": 2, "y": 1} assert dt.c.sizes == {"x": 2, "y": 3} # dataset objects created from nodes should not assert dt.b.dataset.sizes == {"y": 1} assert dt.b.to_dataset(inherit=True).sizes == {"y": 1} assert dt.b.to_dataset(inherit=False).sizes == {"y": 1} def test_inherited_coords_index(self) -> None: dt = DataTree.from_dict( { "/": xr.Dataset({"d": (("x",), [1, 2])}, coords={"x": [2, 3]}), "/b": xr.Dataset({"e": (("y",), [3])}), } ) assert "x" in dt["/b"].indexes assert "x" in dt["/b"].coords xr.testing.assert_identical(dt["/x"], dt["/b/x"]) def test_inherit_only_index_coords(self) -> None: dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1], "y": 2}), "/b": xr.Dataset(coords={"z": 3}), } ) assert dt.coords.keys() == {"x", "y"} xr.testing.assert_equal( dt["/x"], xr.DataArray([1], dims=["x"], coords={"x": [1], "y": 2}) ) xr.testing.assert_equal(dt["/y"], xr.DataArray(2, coords={"y": 2})) assert dt["/b"].coords.keys() == {"x", "z"} xr.testing.assert_equal( dt["/b/x"], xr.DataArray([1], dims=["x"], coords={"x": [1], "z": 3}) ) xr.testing.assert_equal(dt["/b/z"], xr.DataArray(3, coords={"z": 3})) def test_inherited_coords_with_index_are_deduplicated(self) -> None: dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2]}), "/b": xr.Dataset(coords={"x": [1, 2]}), } ) child_dataset = dt.children["b"].to_dataset(inherit=False) expected = xr.Dataset() assert_identical(child_dataset, expected) dt["/c"] = xr.Dataset({"foo": ("x", [4, 5])}, coords={"x": [1, 2]}) child_dataset = dt.children["c"].to_dataset(inherit=False) expected = xr.Dataset({"foo": ("x", [4, 5])}) assert_identical(child_dataset, expected) def test_deduplicated_after_setitem(self) -> None: # regression test for GH #9601 dt = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2]}), "/b": None, } ) dt["b/x"] = dt["x"] child_dataset = dt.children["b"].to_dataset(inherit=False) expected = xr.Dataset() assert_identical(child_dataset, expected) def test_inconsistent_dims(self) -> None: expected_msg = _exact_match( """ group '/b' is not aligned with its parents: Group: Dimensions: (x: 1) Dimensions without coordinates: x Data variables: c (x) float64 8B 3.0 From parents: Dimensions: (x: 2) Dimensions without coordinates: x """ ) with pytest.raises(ValueError, match=expected_msg): DataTree.from_dict( { "/": xr.Dataset({"a": (("x",), [1.0, 2.0])}), "/b": xr.Dataset({"c": (("x",), [3.0])}), } ) dt = DataTree() dt["/a"] = xr.DataArray([1.0, 2.0], dims=["x"]) with pytest.raises(ValueError, match=expected_msg): dt["/b/c"] = xr.DataArray([3.0], dims=["x"]) b = DataTree(dataset=xr.Dataset({"c": (("x",), [3.0])})) with pytest.raises(ValueError, match=expected_msg): DataTree( dataset=xr.Dataset({"a": (("x",), [1.0, 2.0])}), children={"b": b}, ) def test_inconsistent_child_indexes(self) -> None: expected_msg = _exact_match( """ group '/b' is not aligned with its parents: Group: Dimensions: (x: 1) Coordinates: * x (x) float64 8B 2.0 Data variables: *empty* From parents: Dimensions: (x: 1) Coordinates: * x (x) float64 8B 1.0 """ ) with pytest.raises(ValueError, match=expected_msg): DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1.0]}), "/b": xr.Dataset(coords={"x": [2.0]}), } ) dt = DataTree() dt.dataset = xr.Dataset(coords={"x": [1.0]}) # type: ignore[assignment,unused-ignore] dt["/b"] = DataTree() with pytest.raises(ValueError, match=expected_msg): dt["/b"].dataset = xr.Dataset(coords={"x": [2.0]}) b = DataTree(xr.Dataset(coords={"x": [2.0]})) with pytest.raises(ValueError, match=expected_msg): DataTree(dataset=xr.Dataset(coords={"x": [1.0]}), children={"b": b}) def test_inconsistent_grandchild_indexes(self) -> None: expected_msg = _exact_match( """ group '/b/c' is not aligned with its parents: Group: Dimensions: (x: 1) Coordinates: * x (x) float64 8B 2.0 Data variables: *empty* From parents: Dimensions: (x: 1) Coordinates: * x (x) float64 8B 1.0 """ ) with pytest.raises(ValueError, match=expected_msg): DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1.0]}), "/b/c": xr.Dataset(coords={"x": [2.0]}), } ) dt = DataTree() dt.dataset = xr.Dataset(coords={"x": [1.0]}) # type: ignore[assignment,unused-ignore] dt["/b/c"] = DataTree() with pytest.raises(ValueError, match=expected_msg): dt["/b/c"].dataset = xr.Dataset(coords={"x": [2.0]}) c = DataTree(xr.Dataset(coords={"x": [2.0]})) b = DataTree(children={"c": c}) with pytest.raises(ValueError, match=expected_msg): DataTree(dataset=xr.Dataset(coords={"x": [1.0]}), children={"b": b}) def test_inconsistent_grandchild_dims(self) -> None: expected_msg = _exact_match( """ group '/b/c' is not aligned with its parents: Group: Dimensions: (x: 1) Dimensions without coordinates: x Data variables: d (x) float64 8B 3.0 From parents: Dimensions: (x: 2) Dimensions without coordinates: x """ ) with pytest.raises(ValueError, match=expected_msg): DataTree.from_dict( { "/": xr.Dataset({"a": (("x",), [1.0, 2.0])}), "/b/c": xr.Dataset({"d": (("x",), [3.0])}), } ) dt = DataTree() dt["/a"] = xr.DataArray([1.0, 2.0], dims=["x"]) with pytest.raises(ValueError, match=expected_msg): dt["/b/c/d"] = xr.DataArray([3.0], dims=["x"]) class TestRestructuring: def test_drop_nodes(self) -> None: sue = DataTree.from_dict({"Mary": None, "Kate": None, "Ashley": None}) # test drop just one node dropped_one = sue.drop_nodes(names="Mary") assert "Mary" not in dropped_one.children # test drop multiple nodes dropped = sue.drop_nodes(names=["Mary", "Kate"]) assert not {"Mary", "Kate"}.intersection(set(dropped.children)) assert "Ashley" in dropped.children # test raise with pytest.raises(KeyError, match="nodes {'Mary'} not present"): dropped.drop_nodes(names=["Mary", "Ashley"]) # test ignore childless = dropped.drop_nodes(names=["Mary", "Ashley"], errors="ignore") assert childless.children == {} def test_assign(self) -> None: dt = DataTree() expected = DataTree.from_dict({"/": xr.Dataset({"foo": 0}), "/a": None}) # kwargs form result = dt.assign(foo=xr.DataArray(0), a=DataTree()) assert_equal(result, expected) # dict form result = dt.assign({"foo": xr.DataArray(0), "a": DataTree()}) assert_equal(result, expected) def test_filter_like(self) -> None: flower_tree = DataTree.from_dict( {"root": None, "trunk": None, "leaves": None, "flowers": None} ) fruit_tree = DataTree.from_dict( {"root": None, "trunk": None, "leaves": None, "fruit": None} ) barren_tree = DataTree.from_dict({"root": None, "trunk": None}) # test filter_like tree filtered_tree = flower_tree.filter_like(barren_tree) assert filtered_tree.equals(barren_tree) assert "flowers" not in filtered_tree.children # test symmetrical pruning results in isomorphic trees assert flower_tree.filter_like(fruit_tree).isomorphic( fruit_tree.filter_like(flower_tree) ) # test "deep" pruning dt = DataTree.from_dict( {"/a/A": None, "/a/B": None, "/b/A": None, "/b/B": None} ) other = DataTree.from_dict({"/a/A": None, "/b/A": None}) filtered = dt.filter_like(other) assert filtered.equals(other) class TestPipe: def test_noop(self, create_test_datatree: Callable[[], DataTree]) -> None: dt = create_test_datatree() actual = dt.pipe(lambda tree: tree) assert actual.identical(dt) def test_args(self, create_test_datatree: Callable[[], DataTree]) -> None: dt = create_test_datatree() def f(tree: DataTree, x: int, y: int) -> DataTree: return tree.assign( arr_with_attrs=xr.Variable("dim0", [], attrs=dict(x=x, y=y)) ) actual = dt.pipe(f, 1, 2) assert actual["arr_with_attrs"].attrs == dict(x=1, y=2) def test_kwargs(self, create_test_datatree: Callable[[], DataTree]) -> None: dt = create_test_datatree() def f(tree: DataTree, *, x: int, y: int, z: int) -> DataTree: return tree.assign( arr_with_attrs=xr.Variable("dim0", [], attrs=dict(x=x, y=y, z=z)) ) attrs = {"x": 1, "y": 2, "z": 3} actual = dt.pipe(f, **attrs) assert actual["arr_with_attrs"].attrs == attrs def test_args_kwargs(self, create_test_datatree: Callable[[], DataTree]) -> None: dt = create_test_datatree() def f(tree: DataTree, x: int, *, y: int, z: int) -> DataTree: return tree.assign( arr_with_attrs=xr.Variable("dim0", [], attrs=dict(x=x, y=y, z=z)) ) attrs = {"x": 1, "y": 2, "z": 3} actual = dt.pipe(f, attrs["x"], y=attrs["y"], z=attrs["z"]) assert actual["arr_with_attrs"].attrs == attrs def test_named_self(self, create_test_datatree: Callable[[], DataTree]) -> None: dt = create_test_datatree() def f(x: int, tree: DataTree, y: int): tree.attrs.update({"x": x, "y": y}) return tree attrs = {"x": 1, "y": 2} actual = dt.pipe((f, "tree"), **attrs) assert actual is dt and actual.attrs == attrs class TestIsomorphicEqualsAndIdentical: def test_isomorphic(self): tree = DataTree.from_dict({"/a": None, "/a/b": None, "/c": None}) diff_data = DataTree.from_dict( {"/a": None, "/a/b": None, "/c": xr.Dataset({"foo": 1})} ) assert tree.isomorphic(diff_data) diff_order = DataTree.from_dict({"/c": None, "/a": None, "/a/b": None}) assert tree.isomorphic(diff_order) diff_nodes = DataTree.from_dict({"/a": None, "/a/b": None, "/d": None}) assert not tree.isomorphic(diff_nodes) more_nodes = DataTree.from_dict( {"/a": None, "/a/b": None, "/c": None, "/d": None} ) assert not tree.isomorphic(more_nodes) def test_minimal_variations(self): tree = DataTree.from_dict( { "/": Dataset({"x": 1}), "/child": Dataset({"x": 2}), } ) assert tree.equals(tree) assert tree.identical(tree) child = tree.children["child"] assert child.equals(child) assert child.identical(child) new_child = DataTree(dataset=Dataset({"x": 2}), name="child") assert child.equals(new_child) assert child.identical(new_child) anonymous_child = DataTree(dataset=Dataset({"x": 2})) # TODO: re-enable this after fixing .equals() not to require matching # names on the root node (i.e., after switching to use zip_subtrees) # assert child.equals(anonymous_child) assert not child.identical(anonymous_child) different_variables = DataTree.from_dict( { "/": Dataset(), "/other": Dataset({"x": 2}), } ) assert not tree.equals(different_variables) assert not tree.identical(different_variables) different_root_data = DataTree.from_dict( { "/": Dataset({"x": 4}), "/child": Dataset({"x": 2}), } ) assert not tree.equals(different_root_data) assert not tree.identical(different_root_data) different_child_data = DataTree.from_dict( { "/": Dataset({"x": 1}), "/child": Dataset({"x": 3}), } ) assert not tree.equals(different_child_data) assert not tree.identical(different_child_data) different_child_node_attrs = DataTree.from_dict( { "/": Dataset({"x": 1}), "/child": Dataset({"x": 2}, attrs={"foo": "bar"}), } ) assert tree.equals(different_child_node_attrs) assert not tree.identical(different_child_node_attrs) different_child_variable_attrs = DataTree.from_dict( { "/": Dataset({"x": 1}), "/child": Dataset({"x": ((), 2, {"foo": "bar"})}), } ) assert tree.equals(different_child_variable_attrs) assert not tree.identical(different_child_variable_attrs) different_name = DataTree.from_dict( { "/": Dataset({"x": 1}), "/child": Dataset({"x": 2}), }, name="different", ) # TODO: re-enable this after fixing .equals() not to require matching # names on the root node (i.e., after switching to use zip_subtrees) # assert tree.equals(different_name) assert not tree.identical(different_name) def test_differently_inherited_coordinates(self): root = DataTree.from_dict( { "/": Dataset(coords={"x": [1, 2]}), "/child": Dataset(), } ) child = root.children["child"] assert child.equals(child) assert child.identical(child) new_child = DataTree(dataset=Dataset(coords={"x": [1, 2]}), name="child") assert child.equals(new_child) assert not child.identical(new_child) deeper_root = DataTree(children={"root": root}) grandchild = deeper_root.children["root"].children["child"] assert child.equals(grandchild) assert child.identical(grandchild) class TestSubset: def test_match(self) -> None: # TODO is this example going to cause problems with case sensitivity? dt = DataTree.from_dict( { "/a/A": None, "/a/B": None, "/b/A": None, "/b/B": None, } ) result = dt.match("*/B") expected = DataTree.from_dict( { "/a/B": None, "/b/B": None, } ) assert_identical(result, expected) result = dt.children["a"].match("B") expected = DataTree.from_dict({"/B": None}, name="a") assert_identical(result, expected) def test_filter(self) -> None: simpsons = DataTree.from_dict( { "/": xr.Dataset({"age": 83}), "/Herbert": xr.Dataset({"age": 40}), "/Homer": xr.Dataset({"age": 39}), "/Homer/Bart": xr.Dataset({"age": 10}), "/Homer/Lisa": xr.Dataset({"age": 8}), "/Homer/Maggie": xr.Dataset({"age": 1}), }, name="Abe", ) expected = DataTree.from_dict( { "/": xr.Dataset({"age": 83}), "/Herbert": xr.Dataset({"age": 40}), "/Homer": xr.Dataset({"age": 39}), }, name="Abe", ) elders = simpsons.filter(lambda node: node["age"].item() > 18) assert_identical(elders, expected) expected = DataTree.from_dict({"/Bart": xr.Dataset({"age": 10})}, name="Homer") actual = simpsons.children["Homer"].filter( lambda node: node["age"].item() == 10 ) assert_identical(actual, expected) def test_prune_basic(self) -> None: tree = DataTree.from_dict( {"/a": xr.Dataset({"foo": ("x", [1, 2])}), "/b": xr.Dataset()} ) pruned = tree.prune() assert "a" in pruned.children assert "b" not in pruned.children assert_identical( pruned.children["a"].to_dataset(), tree.children["a"].to_dataset() ) def test_prune_with_zero_size_vars(self) -> None: tree = DataTree.from_dict( { "/a": xr.Dataset({"foo": ("x", [1, 2])}), "/b": xr.Dataset({"empty": ("dim", [])}), "/c": xr.Dataset(), } ) pruned_default = tree.prune() expected_default = DataTree.from_dict( { "/a": xr.Dataset({"foo": ("x", [1, 2])}), "/b": xr.Dataset({"empty": ("dim", [])}), } ) assert_identical(pruned_default, expected_default) pruned_strict = tree.prune(drop_size_zero_vars=True) expected_strict = DataTree.from_dict( { "/a": xr.Dataset({"foo": ("x", [1, 2])}), } ) assert_identical(pruned_strict, expected_strict) def test_prune_with_intermediate_nodes(self) -> None: tree = DataTree.from_dict( { "/": xr.Dataset(), "/group1": xr.Dataset(), "/group1/subA": xr.Dataset({"temp": ("x", [1, 2])}), "/group1/subB": xr.Dataset(), "/group2": xr.Dataset({"empty": ("dim", [])}), } ) pruned = tree.prune() expected_tree = DataTree.from_dict( { "/group1/subA": xr.Dataset({"temp": ("x", [1, 2])}), "/group2": xr.Dataset({"empty": ("dim", [])}), } ) assert_identical(pruned, expected_tree) def test_prune_after_filtering(self) -> None: from pandas import date_range ds1 = xr.Dataset( {"foo": ("time", [1, 2, 3, 4, 5])}, coords={"time": date_range("2023-01-01", periods=5, freq="D")}, ) ds2 = xr.Dataset( {"var": ("time", [1, 2, 3, 4, 5])}, coords={"time": date_range("2023-01-04", periods=5, freq="D")}, ) tree = DataTree.from_dict({"a": ds1, "b": ds2}) filtered = tree.sel(time=slice("2023-01-01", "2023-01-03")) pruned = filtered.prune(drop_size_zero_vars=True) expected_tree = DataTree.from_dict( {"a": ds1.sel(time=slice("2023-01-01", "2023-01-03"))} ) assert_identical(pruned, expected_tree) class TestIndexing: def test_isel_siblings(self) -> None: tree = DataTree.from_dict( { "/first": xr.Dataset({"a": ("x", [1, 2])}), "/second": xr.Dataset({"b": ("x", [1, 2, 3])}), } ) expected = DataTree.from_dict( { "/first": xr.Dataset({"a": 2}), "/second": xr.Dataset({"b": 3}), } ) actual = tree.isel(x=-1) assert_identical(actual, expected) expected = DataTree.from_dict( { "/first": xr.Dataset({"a": ("x", [1])}), "/second": xr.Dataset({"b": ("x", [1])}), } ) actual = tree.isel(x=slice(1)) assert_identical(actual, expected) actual = tree.isel(x=[0]) assert_identical(actual, expected) actual = tree.isel(x=slice(None)) assert_identical(actual, tree) def test_isel_inherited(self) -> None: tree = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1, 2]}), "/child": xr.Dataset({"foo": ("x", [3, 4])}), } ) expected = DataTree.from_dict( { "/": xr.Dataset(coords={"x": 2}), "/child": xr.Dataset({"foo": 4}), } ) actual = tree.isel(x=-1) assert_identical(actual, expected) expected = DataTree.from_dict( { "/child": xr.Dataset({"foo": 4}), } ) actual = tree.isel(x=-1, drop=True) assert_identical(actual, expected) expected = DataTree.from_dict( { "/": xr.Dataset(coords={"x": [1]}), "/child": xr.Dataset({"foo": ("x", [3])}), } ) actual = tree.isel(x=[0]) assert_identical(actual, expected) actual = tree.isel(x=slice(None)) # TODO: re-enable after the fix to copy() from #9628 is submitted # actual = tree.children["child"].isel(x=slice(None)) # expected = tree.children["child"].copy() # assert_identical(actual, expected) actual = tree.children["child"].isel(x=0) expected = DataTree( dataset=xr.Dataset({"foo": 3}, coords={"x": 1}), name="child", ) assert_identical(actual, expected) def test_sel(self) -> None: tree = DataTree.from_dict( { "/first": xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"x": [1, 2, 3]}), "/second": xr.Dataset({"b": ("x", [4, 5])}, coords={"x": [2, 3]}), } ) expected = DataTree.from_dict( { "/first": xr.Dataset({"a": 2}, coords={"x": 2}), "/second": xr.Dataset({"b": 4}, coords={"x": 2}), } ) actual = tree.sel(x=2) assert_identical(actual, expected) actual = tree.children["first"].sel(x=2) expected = DataTree( dataset=xr.Dataset({"a": 2}, coords={"x": 2}), name="first", ) assert_identical(actual, expected) def test_sel_isel_error_has_node_info(self) -> None: tree = DataTree.from_dict( { "/first": xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"x": [1, 2, 3]}), "/second": xr.Dataset({"b": ("x", [4, 5])}, coords={"x": [2, 3]}), } ) with pytest.raises( KeyError, match="Raised whilst mapping function over node with path 'second'", ): tree.sel(x=1) with pytest.raises( IndexError, match="Raised whilst mapping function over node with path 'first'", ): tree.isel(x=4) class TestAggregations: def test_reduce_method(self) -> None: ds = xr.Dataset({"a": ("x", [False, True, False])}) dt = DataTree.from_dict({"/": ds, "/results": ds}) expected = DataTree.from_dict({"/": ds.any(), "/results": ds.any()}) result = dt.any() assert_equal(result, expected) def test_nan_reduce_method(self) -> None: ds = xr.Dataset({"a": ("x", [1, 2, 3])}) dt = DataTree.from_dict({"/": ds, "/results": ds}) expected = DataTree.from_dict({"/": ds.mean(), "/results": ds.mean()}) result = dt.mean() assert_equal(result, expected) def test_cum_method(self) -> None: ds = xr.Dataset({"a": ("x", [1, 2, 3])}) dt = DataTree.from_dict({"/": ds, "/results": ds}) expected = DataTree.from_dict( { "/": ds.cumsum(), "/results": ds.cumsum(), } ) result = dt.cumsum() assert_equal(result, expected) def test_dim_argument(self) -> None: dt = DataTree.from_dict( { "/a": xr.Dataset({"A": ("x", [1, 2])}), "/b": xr.Dataset({"B": ("y", [1, 2])}), } ) expected = DataTree.from_dict( { "/a": xr.Dataset({"A": 1.5}), "/b": xr.Dataset({"B": 1.5}), } ) actual = dt.mean() assert_equal(expected, actual) actual = dt.mean(dim=...) assert_equal(expected, actual) expected = DataTree.from_dict( { "/a": xr.Dataset({"A": 1.5}), "/b": xr.Dataset({"B": ("y", [1.0, 2.0])}), } ) actual = dt.mean("x") assert_equal(expected, actual) with pytest.raises( ValueError, match=re.escape("Dimension(s) 'invalid' do not exist."), ): dt.mean("invalid") def test_subtree(self) -> None: tree = DataTree.from_dict( { "/child": Dataset({"a": ("x", [1, 2])}), } ) expected = DataTree(dataset=Dataset({"a": 1.5}), name="child") actual = tree.children["child"].mean() assert_identical(expected, actual) class TestOps: def test_unary_op(self) -> None: ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict({"/": ds1, "/subnode": ds2}) expected = DataTree.from_dict({"/": (-ds1), "/subnode": (-ds2)}) result = -dt assert_equal(result, expected) def test_unary_op_inherited_coords(self) -> None: tree = DataTree(xr.Dataset(coords={"x": [1, 2, 3]})) tree["/foo"] = DataTree(xr.Dataset({"bar": ("x", [4, 5, 6])})) actual = -tree actual_dataset = actual.children["foo"].to_dataset(inherit=False) assert "x" not in actual_dataset.coords expected = tree.copy() # unary ops are not applied to coordinate variables, only data variables expected["/foo/bar"].data = np.array([-4, -5, -6]) assert_identical(actual, expected) def test_binary_op_on_int(self) -> None: ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict({"/": ds1, "/subnode": ds2}) expected = DataTree.from_dict({"/": ds1 * 5, "/subnode": ds2 * 5}) result = dt * 5 assert_equal(result, expected) def test_binary_op_on_dataarray(self) -> None: ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict( { "/": ds1, "/subnode": ds2, } ) other_da = xr.DataArray(name="z", data=[0.1, 0.2], dims="z") expected = DataTree.from_dict( { "/": ds1 * other_da, "/subnode": ds2 * other_da, } ) result = dt * other_da assert_equal(result, expected) def test_binary_op_on_dataset(self) -> None: ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict( { "/": ds1, "/subnode": ds2, } ) other_ds = xr.Dataset({"z": ("z", [0.1, 0.2])}) expected = DataTree.from_dict( { "/": ds1 * other_ds, "/subnode": ds2 * other_ds, } ) result = dt * other_ds assert_equal(result, expected) def test_binary_op_on_datatree(self) -> None: ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict({"/": ds1, "/subnode": ds2}) expected = DataTree.from_dict({"/": ds1 * ds1, "/subnode": ds2 * ds2}) result = dt * dt assert_equal(result, expected) def test_binary_op_order_invariant(self) -> None: tree_ab = DataTree.from_dict({"/a": Dataset({"a": 1}), "/b": Dataset({"b": 2})}) tree_ba = DataTree.from_dict({"/b": Dataset({"b": 2}), "/a": Dataset({"a": 1})}) expected = DataTree.from_dict( {"/a": Dataset({"a": 2}), "/b": Dataset({"b": 4})} ) actual = tree_ab + tree_ba assert_identical(expected, actual) def test_arithmetic_inherited_coords(self) -> None: tree = DataTree(xr.Dataset(coords={"x": [1, 2, 3]})) tree["/foo"] = DataTree(xr.Dataset({"bar": ("x", [4, 5, 6])})) actual = 2 * tree actual_dataset = actual.children["foo"].to_dataset(inherit=False) assert "x" not in actual_dataset.coords expected = tree.copy() expected["/foo/bar"].data = np.array([8, 10, 12]) assert_identical(actual, expected) def test_binary_op_commutativity_with_dataset(self) -> None: # regression test for #9365 ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict( { "/": ds1, "/subnode": ds2, } ) other_ds = xr.Dataset({"z": ("z", [0.1, 0.2])}) expected = DataTree.from_dict( { "/": ds1 * other_ds, "/subnode": ds2 * other_ds, } ) result = other_ds * dt assert_equal(result, expected) def test_inplace_binary_op(self) -> None: ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict({"/": ds1, "/subnode": ds2}) expected = DataTree.from_dict({"/": ds1 + 1, "/subnode": ds2 + 1}) dt += 1 assert_equal(dt, expected) def test_dont_broadcast_single_node_tree(self) -> None: # regression test for https://github.com/pydata/xarray/issues/9365#issuecomment-2291622577 ds1 = xr.Dataset({"a": [5], "b": [3]}) ds2 = xr.Dataset({"x": [0.1, 0.2], "y": [10, 20]}) dt = DataTree.from_dict({"/": ds1, "/subnode": ds2}) node = dt["/subnode"] with pytest.raises( xr.TreeIsomorphismError, match=re.escape(r"children at root node do not match: ['subnode'] vs []"), ): dt * node class TestUFuncs: @pytest.mark.xfail(reason="__array_ufunc__ not implemented yet") def test_tree(self, create_test_datatree): dt = create_test_datatree() expected = create_test_datatree(modify=np.sin) result_tree = np.sin(dt) assert_equal(result_tree, expected) class Closer: def __init__(self): self.closed = False def close(self): if self.closed: raise RuntimeError("already closed") self.closed = True @pytest.fixture def tree_and_closers(): tree = DataTree.from_dict({"/child/grandchild": None}) closers = { "/": Closer(), "/child": Closer(), "/child/grandchild": Closer(), } for path, closer in closers.items(): tree[path].set_close(closer.close) return tree, closers class TestClose: def test_close(self, tree_and_closers): tree, closers = tree_and_closers assert not any(closer.closed for closer in closers.values()) tree.close() assert all(closer.closed for closer in closers.values()) tree.close() # should not error def test_context_manager(self, tree_and_closers): tree, closers = tree_and_closers assert not any(closer.closed for closer in closers.values()) with tree: pass assert all(closer.closed for closer in closers.values()) def test_close_child(self, tree_and_closers): tree, closers = tree_and_closers assert not any(closer.closed for closer in closers.values()) tree["child"].close() # should only close descendants assert not closers["/"].closed assert closers["/child"].closed assert closers["/child/grandchild"].closed def test_close_datasetview(self, tree_and_closers): tree, _ = tree_and_closers with pytest.raises( AttributeError, match=re.escape( r"cannot close a DatasetView(). Close the associated DataTree node instead" ), ): tree.dataset.close() with pytest.raises( AttributeError, match=re.escape(r"cannot modify a DatasetView()") ): tree.dataset.set_close(None) def test_close_dataset(self, tree_and_closers): tree, closers = tree_and_closers ds = tree.to_dataset() # should discard closers ds.close() assert not closers["/"].closed # with tree: # pass @requires_dask class TestDask: def test_chunksizes(self): ds1 = xr.Dataset({"a": ("x", np.arange(10))}) ds2 = xr.Dataset({"b": ("y", np.arange(5))}) ds3 = xr.Dataset({"c": ("z", np.arange(4))}) ds4 = xr.Dataset({"d": ("x", np.arange(-5, 5))}) groups = { "/": ds1.chunk({"x": 5}), "/group1": ds2.chunk({"y": 3}), "/group2": ds3.chunk({"z": 2}), "/group1/subgroup1": ds4.chunk({"x": 5}), } tree = xr.DataTree.from_dict(groups) expected_chunksizes = {path: node.chunksizes for path, node in groups.items()} assert tree.chunksizes == expected_chunksizes def test_load(self): ds1 = xr.Dataset({"a": ("x", np.arange(10))}) ds2 = xr.Dataset({"b": ("y", np.arange(5))}) ds3 = xr.Dataset({"c": ("z", np.arange(4))}) ds4 = xr.Dataset({"d": ("x", np.arange(-5, 5))}) groups = {"/": ds1, "/group1": ds2, "/group2": ds3, "/group1/subgroup1": ds4} expected = xr.DataTree.from_dict(groups) tree = xr.DataTree.from_dict( { "/": ds1.chunk({"x": 5}), "/group1": ds2.chunk({"y": 3}), "/group2": ds3.chunk({"z": 2}), "/group1/subgroup1": ds4.chunk({"x": 5}), } ) expected_chunksizes: Mapping[str, Mapping] expected_chunksizes = {node.path: {} for node in tree.subtree} actual = tree.load() assert_identical(actual, expected) assert tree.chunksizes == expected_chunksizes assert actual.chunksizes == expected_chunksizes tree = xr.DataTree.from_dict(groups) actual = tree.load() assert_identical(actual, expected) assert actual.chunksizes == expected_chunksizes def test_compute(self): ds1 = xr.Dataset({"a": ("x", np.arange(10))}) ds2 = xr.Dataset({"b": ("y", np.arange(5))}) ds3 = xr.Dataset({"c": ("z", np.arange(4))}) ds4 = xr.Dataset({"d": ("x", np.arange(-5, 5))}) expected = xr.DataTree.from_dict( {"/": ds1, "/group1": ds2, "/group2": ds3, "/group1/subgroup1": ds4} ) tree = xr.DataTree.from_dict( { "/": ds1.chunk({"x": 5}), "/group1": ds2.chunk({"y": 3}), "/group2": ds3.chunk({"z": 2}), "/group1/subgroup1": ds4.chunk({"x": 5}), } ) original_chunksizes = tree.chunksizes expected_chunksizes: Mapping[str, Mapping] expected_chunksizes = {node.path: {} for node in tree.subtree} actual = tree.compute() assert_identical(actual, expected) assert actual.chunksizes == expected_chunksizes, "mismatching chunksizes" assert tree.chunksizes == original_chunksizes, "original tree was modified" def test_persist(self): ds1 = xr.Dataset({"a": ("x", np.arange(10))}) ds2 = xr.Dataset({"b": ("y", np.arange(5))}) ds3 = xr.Dataset({"c": ("z", np.arange(4))}) ds4 = xr.Dataset({"d": ("x", np.arange(-5, 5))}) def fn(x): return 2 * x expected = xr.DataTree.from_dict( { "/": fn(ds1).chunk({"x": 5}), "/group1": fn(ds2).chunk({"y": 3}), "/group2": fn(ds3).chunk({"z": 2}), "/group1/subgroup1": fn(ds4).chunk({"x": 5}), } ) # Add trivial second layer to the task graph, persist should reduce to one tree = xr.DataTree.from_dict( { "/": fn(ds1.chunk({"x": 5})), "/group1": fn(ds2.chunk({"y": 3})), "/group2": fn(ds3.chunk({"z": 2})), "/group1/subgroup1": fn(ds4.chunk({"x": 5})), } ) original_chunksizes = tree.chunksizes original_hlg_depths = { node.path: len(node.dataset.__dask_graph__().layers) for node in tree.subtree } actual = tree.persist() actual_hlg_depths = { node.path: len(node.dataset.__dask_graph__().layers) for node in actual.subtree } assert_identical(actual, expected) assert actual.chunksizes == original_chunksizes, "chunksizes were modified" assert tree.chunksizes == original_chunksizes, ( "original chunksizes were modified" ) assert all(d == 1 for d in actual_hlg_depths.values()), ( "unexpected dask graph depth" ) assert all(d == 2 for d in original_hlg_depths.values()), ( "original dask graph was modified" ) def test_chunk(self): ds1 = xr.Dataset({"a": ("x", np.arange(10))}) ds2 = xr.Dataset({"b": ("y", np.arange(5))}) ds3 = xr.Dataset({"c": ("z", np.arange(4))}) ds4 = xr.Dataset({"d": ("x", np.arange(-5, 5))}) expected = xr.DataTree.from_dict( { "/": ds1.chunk({"x": 5}), "/group1": ds2.chunk({"y": 3}), "/group2": ds3.chunk({"z": 2}), "/group1/subgroup1": ds4.chunk({"x": 5}), } ) tree = xr.DataTree.from_dict( {"/": ds1, "/group1": ds2, "/group2": ds3, "/group1/subgroup1": ds4} ) actual = tree.chunk({"x": 5, "y": 3, "z": 2}) assert_identical(actual, expected) assert actual.chunksizes == expected.chunksizes with pytest.raises(TypeError, match="invalid type"): tree.chunk(None) with pytest.raises(TypeError, match="invalid type"): tree.chunk((1, 2)) with pytest.raises(ValueError, match="not found in data dimensions"): tree.chunk({"u": 2}) xarray-2025.09.0/xarray/tests/test_datatree_mapping.py000066400000000000000000000226341505620616400227400ustar00rootroot00000000000000import re import numpy as np import pytest import xarray as xr from xarray.core.datatree_mapping import map_over_datasets from xarray.core.treenode import TreeIsomorphismError from xarray.testing import assert_equal, assert_identical empty = xr.Dataset() class TestMapOverSubTree: def test_no_trees_passed(self): with pytest.raises(TypeError, match="must pass at least one tree object"): map_over_datasets(lambda x: x, "dt") def test_not_isomorphic(self, create_test_datatree): dt1 = create_test_datatree() dt2 = create_test_datatree() dt2["set1/set2/extra"] = xr.DataTree(name="extra") with pytest.raises( TreeIsomorphismError, match=re.escape( r"children at node 'set1/set2' do not match: [] vs ['extra']" ), ): map_over_datasets(lambda x, y: None, dt1, dt2) def test_no_trees_returned(self, create_test_datatree): dt1 = create_test_datatree() dt2 = create_test_datatree() expected = xr.DataTree.from_dict(dict.fromkeys(dt1.to_dict())) actual = map_over_datasets(lambda x, y: None, dt1, dt2) assert_equal(expected, actual) def test_single_tree_arg(self, create_test_datatree): dt = create_test_datatree() expected = create_test_datatree(modify=lambda x: 10.0 * x) result_tree = map_over_datasets(lambda x: 10 * x, dt) assert_equal(result_tree, expected) def test_single_tree_arg_plus_arg(self, create_test_datatree): dt = create_test_datatree() expected = create_test_datatree(modify=lambda ds: (10.0 * ds)) result_tree = map_over_datasets(lambda x, y: x * y, dt, 10.0) assert_equal(result_tree, expected) result_tree = map_over_datasets(lambda x, y: x * y, 10.0, dt) assert_equal(result_tree, expected) def test_single_tree_arg_plus_kwarg(self, create_test_datatree): dt = create_test_datatree() expected = create_test_datatree(modify=lambda ds: (10.0 * ds)) def multiply_by_kwarg(ds, **kwargs): ds = ds * kwargs.pop("multiplier") return ds result_tree = map_over_datasets( multiply_by_kwarg, dt, kwargs=dict(multiplier=10.0) ) assert_equal(result_tree, expected) def test_multiple_tree_args(self, create_test_datatree): dt1 = create_test_datatree() dt2 = create_test_datatree() expected = create_test_datatree(modify=lambda ds: 2.0 * ds) result = map_over_datasets(lambda x, y: x + y, dt1, dt2) assert_equal(result, expected) def test_return_multiple_trees(self, create_test_datatree): dt = create_test_datatree() dt_min, dt_max = map_over_datasets(lambda x: (x.min(), x.max()), dt) expected_min = create_test_datatree(modify=lambda ds: ds.min()) assert_equal(dt_min, expected_min) expected_max = create_test_datatree(modify=lambda ds: ds.max()) assert_equal(dt_max, expected_max) def test_return_wrong_type(self, simple_datatree): dt1 = simple_datatree with pytest.raises( TypeError, match=re.escape( "the result of calling func on the node at position '.' is not a " "Dataset or None or a tuple of such types" ), ): map_over_datasets(lambda x: "string", dt1) # type: ignore[arg-type,return-value] def test_return_tuple_of_wrong_types(self, simple_datatree): dt1 = simple_datatree with pytest.raises( TypeError, match=re.escape( "the result of calling func on the node at position '.' is not a " "Dataset or None or a tuple of such types" ), ): map_over_datasets(lambda x: (x, "string"), dt1) # type: ignore[arg-type,return-value] def test_return_inconsistent_number_of_results(self, simple_datatree): dt1 = simple_datatree with pytest.raises( TypeError, match=re.escape( r"Calling func on the nodes at position set1 returns a tuple " "of 0 datasets, whereas calling func on the nodes at position " ". instead returns a tuple of 2 datasets." ), ): # Datasets in simple_datatree have different numbers of dims map_over_datasets(lambda ds: tuple((None,) * len(ds.dims)), dt1) def test_wrong_number_of_arguments_for_func(self, simple_datatree): dt = simple_datatree with pytest.raises( TypeError, match="takes 1 positional argument but 2 were given" ): map_over_datasets(lambda x: 10 * x, dt, dt) def test_map_single_dataset_against_whole_tree(self, create_test_datatree): dt = create_test_datatree() def nodewise_merge(node_ds, fixed_ds): return xr.merge([node_ds, fixed_ds]) other_ds = xr.Dataset({"z": ("z", [0])}) expected = create_test_datatree(modify=lambda ds: xr.merge([ds, other_ds])) result_tree = map_over_datasets(nodewise_merge, dt, other_ds) assert_equal(result_tree, expected) @pytest.mark.xfail def test_trees_with_different_node_names(self): # TODO test this after I've got good tests for renaming nodes raise NotImplementedError def test_tree_method(self, create_test_datatree): dt = create_test_datatree() def multiply(ds, times): return times * ds expected = create_test_datatree(modify=lambda ds: 10.0 * ds) result_tree = dt.map_over_datasets(multiply, 10.0) assert_equal(result_tree, expected) def test_tree_method_with_kwarg(self, create_test_datatree): dt = create_test_datatree() def multiply(ds, **kwargs): return kwargs.pop("times") * ds expected = create_test_datatree(modify=lambda ds: 10.0 * ds) result_tree = dt.map_over_datasets(multiply, kwargs=dict(times=10.0)) assert_equal(result_tree, expected) def test_discard_ancestry(self, create_test_datatree): # Check for datatree GH issue https://github.com/xarray-contrib/datatree/issues/48 dt = create_test_datatree() subtree = dt["set1"] expected = create_test_datatree(modify=lambda ds: 10.0 * ds)["set1"] result_tree = map_over_datasets(lambda x: 10.0 * x, subtree) assert_equal(result_tree, expected) def test_keep_attrs_on_empty_nodes(self, create_test_datatree): # GH278 dt = create_test_datatree() dt["set1/set2"].attrs["foo"] = "bar" def empty_func(ds): return ds result = dt.map_over_datasets(empty_func) assert result["set1/set2"].attrs == dt["set1/set2"].attrs def test_error_contains_path_of_offending_node(self, create_test_datatree): dt = create_test_datatree() dt["set1"]["bad_var"] = 0 print(dt) def fail_on_specific_node(ds): if "bad_var" in ds: raise ValueError("Failed because 'bar_var' present in dataset") with pytest.raises( ValueError, match=re.escape( r"Raised whilst mapping function over node with path 'set1'" ), ): dt.map_over_datasets(fail_on_specific_node) def test_inherited_coordinates_with_index(self): root = xr.Dataset(coords={"x": [1, 2]}) child = xr.Dataset({"foo": ("x", [0, 1])}) # no coordinates tree = xr.DataTree.from_dict({"/": root, "/child": child}) actual = tree.map_over_datasets(lambda ds: ds) # identity assert isinstance(actual, xr.DataTree) assert_identical(tree, actual) actual_child = actual.children["child"].to_dataset(inherit=False) assert_identical(actual_child, child) class TestMutableOperations: def test_construct_using_type(self): # from datatree GH issue https://github.com/xarray-contrib/datatree/issues/188 # xarray's .weighted is unusual because it uses type() to create a Dataset/DataArray a = xr.DataArray( np.random.rand(3, 4, 10), dims=["x", "y", "time"], coords={"area": (["x", "y"], np.random.rand(3, 4))}, ).to_dataset(name="data") b = xr.DataArray( np.random.rand(2, 6, 14), dims=["x", "y", "time"], coords={"area": (["x", "y"], np.random.rand(2, 6))}, ).to_dataset(name="data") dt = xr.DataTree.from_dict({"a": a, "b": b}) def weighted_mean(ds): if "area" not in ds.coords: return None return ds.weighted(ds.area).mean(["x", "y"]) dt.map_over_datasets(weighted_mean) def test_alter_inplace_forbidden(self): simpsons = xr.DataTree.from_dict( { "/": xr.Dataset({"age": 83}), "/Herbert": xr.Dataset({"age": 40}), "/Homer": xr.Dataset({"age": 39}), "/Homer/Bart": xr.Dataset({"age": 10}), "/Homer/Lisa": xr.Dataset({"age": 8}), "/Homer/Maggie": xr.Dataset({"age": 1}), }, name="Abe", ) def fast_forward(ds: xr.Dataset, years: float) -> xr.Dataset: """Add some years to the age, but by altering the given dataset""" ds["age"] = ds["age"] + years return ds with pytest.raises(AttributeError): simpsons.map_over_datasets(fast_forward, 10) xarray-2025.09.0/xarray/tests/test_datatree_typing.yml000066400000000000000000000176121505620616400227700ustar00rootroot00000000000000- case: test_mypy_pipe_lambda_noarg_return_type main: | from xarray import DataTree dt = DataTree().pipe(lambda data: data) reveal_type(dt) # N: Revealed type is "xarray.core.datatree.DataTree" - case: test_mypy_pipe_lambda_posarg_return_type main: | from xarray import DataTree dt = DataTree().pipe(lambda data, arg: arg, "foo") reveal_type(dt) # N: Revealed type is "builtins.str" - case: test_mypy_pipe_lambda_chaining_return_type main: | from xarray import DataTree answer = DataTree().pipe(lambda data, arg: arg, "foo").count("o") reveal_type(answer) # N: Revealed type is "builtins.int" - case: test_mypy_pipe_lambda_missing_arg main: | from xarray import DataTree # Call to pipe missing argument for lambda parameter `arg` dt = DataTree().pipe(lambda data, arg: data) out: | main:4: error: No overload variant of "pipe" of "DataTree" matches argument type "Callable[[Any, Any], Any]" [call-overload] main:4: note: Possible overload variants: main:4: note: def [P`2, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:4: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_lambda_extra_arg main: | from xarray import DataTree # Call to pipe with extra argument for lambda dt = DataTree().pipe(lambda data: data, "oops!") out: | main:4: error: No overload variant of "pipe" of "DataTree" matches argument types "Callable[[Any], Any]", "str" [call-overload] main:4: note: Possible overload variants: main:4: note: def [P`2, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:4: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_posarg main: | from xarray import DataTree def f(dt: DataTree, arg: int) -> DataTree: return dt # Call to pipe missing argument for function parameter `arg` dt = DataTree().pipe(f) out: | main:7: error: No overload variant of "pipe" of "DataTree" matches argument type "Callable[[DataTree, int], DataTree]" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_extra_posarg main: | from xarray import DataTree def f(dt: DataTree, arg: int) -> DataTree: return dt # Call to pipe missing keyword for kwonly parameter `kwonly` dt = DataTree().pipe(f, 42, "oops!") out: | main:7: error: No overload variant of "pipe" of "DataTree" matches argument types "Callable[[DataTree, int], DataTree]", "int", "str" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_kwarg main: | from xarray import DataTree def f(dt: DataTree, arg: int, *, kwonly: int) -> DataTree: return dt # Call to pipe missing argument for kwonly parameter `kwonly` dt = DataTree().pipe(f, 42) out: | main:7: error: No overload variant of "pipe" of "DataTree" matches argument types "Callable[[DataTree, int, NamedArg(int, 'kwonly')], DataTree]", "int" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_missing_keyword main: | from xarray import DataTree def f(dt: DataTree, arg: int, *, kwonly: int) -> DataTree: return dt # Call to pipe missing keyword for kwonly parameter `kwonly` dt = DataTree().pipe(f, 42, 99) out: | main:7: error: No overload variant of "pipe" of "DataTree" matches argument types "Callable[[DataTree, int, NamedArg(int, 'kwonly')], DataTree]", "int", "int" [call-overload] main:7: note: Possible overload variants: main:7: note: def [P`2, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:7: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_function_unexpected_keyword main: | from xarray import DataTree def f(dt: DataTree, arg: int, *, kwonly: int) -> DataTree: return dt # Call to pipe using wrong keyword: `kw` instead of `kwonly` dt = DataTree().pipe(f, 42, kw=99) out: | main:7: error: Unexpected keyword argument "kw" for "pipe" of "DataTree" [call-arg] - case: test_mypy_pipe_tuple_return_type_datatree main: | from xarray import DataTree def f(arg: int, dt: DataTree) -> DataTree: return dt dt = DataTree().pipe((f, "dt"), 42) reveal_type(dt) # N: Revealed type is "xarray.core.datatree.DataTree" - case: test_mypy_pipe_tuple_return_type_other main: | from xarray import DataTree def f(arg: int, dt: DataTree) -> int: return arg answer = DataTree().pipe((f, "dt"), 42) reveal_type(answer) # N: Revealed type is "builtins.int" - case: test_mypy_pipe_tuple_missing_arg main: | from xarray import DataTree def f(arg: int, dt: DataTree) -> DataTree: return dt # Since we cannot provide a precise type annotation when passing a tuple to # pipe, there's not enough information for type analysis to indicate that # we are missing an argument for parameter `arg`, so we get no error here. dt = DataTree().pipe((f, "dt")) reveal_type(dt) # N: Revealed type is "xarray.core.datatree.DataTree" # Rather than passing a tuple, passing a lambda that calls `f` with args in # the correct order allows for proper type analysis, indicating (perhaps # somewhat cryptically) that we failed to pass an argument for `arg`. dt = DataTree().pipe(lambda data, arg: f(arg, data)) out: | main:17: error: No overload variant of "pipe" of "DataTree" matches argument type "Callable[[Any, Any], DataTree]" [call-overload] main:17: note: Possible overload variants: main:17: note: def [P`9, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:17: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T - case: test_mypy_pipe_tuple_extra_arg main: | from xarray import DataTree def f(arg: int, dt: DataTree) -> DataTree: return dt # Since we cannot provide a precise type annotation when passing a tuple to # pipe, there's not enough information for type analysis to indicate that # we are providing too many args for `f`, so we get no error here. dt = DataTree().pipe((f, "dt"), 42, "foo") reveal_type(dt) # N: Revealed type is "xarray.core.datatree.DataTree" # Rather than passing a tuple, passing a lambda that calls `f` with args in # the correct order allows for proper type analysis, indicating (perhaps # somewhat cryptically) that we passed too many arguments. dt = DataTree().pipe(lambda data, arg: f(arg, data), 42, "foo") out: | main:17: error: No overload variant of "pipe" of "DataTree" matches argument types "Callable[[Any, Any], DataTree]", "int", "str" [call-overload] main:17: note: Possible overload variants: main:17: note: def [P`9, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T main:17: note: def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T xarray-2025.09.0/xarray/tests/test_deprecation_helpers.py000066400000000000000000000110261505620616400234440ustar00rootroot00000000000000import pytest from xarray.util.deprecation_helpers import _deprecate_positional_args def test_deprecate_positional_args_warns_for_function(): @_deprecate_positional_args("v0.1") def f1(a, b, *, c="c", d="d"): return a, b, c, d result = f1(1, 2) assert result == (1, 2, "c", "d") result = f1(1, 2, c=3, d=4) assert result == (1, 2, 3, 4) with pytest.warns(FutureWarning, match=r".*v0.1"): result = f1(1, 2, 3) # type: ignore[misc] assert result == (1, 2, 3, "d") with pytest.warns(FutureWarning, match=r"Passing 'c' as positional"): result = f1(1, 2, 3) # type: ignore[misc] assert result == (1, 2, 3, "d") with pytest.warns(FutureWarning, match=r"Passing 'c, d' as positional"): result = f1(1, 2, 3, 4) # type: ignore[misc] assert result == (1, 2, 3, 4) @_deprecate_positional_args("v0.1") def f2(a="a", *, b="b", c="c", d="d"): return a, b, c, d with pytest.warns(FutureWarning, match=r"Passing 'b' as positional"): result = f2(1, 2) # type: ignore[misc] assert result == (1, 2, "c", "d") @_deprecate_positional_args("v0.1") def f3(a, *, b="b", **kwargs): return a, b, kwargs with pytest.warns(FutureWarning, match=r"Passing 'b' as positional"): result = f3(1, 2) # type: ignore[misc] assert result == (1, 2, {}) with pytest.warns(FutureWarning, match=r"Passing 'b' as positional"): result = f3(1, 2, f="f") # type: ignore[misc] assert result == (1, 2, {"f": "f"}) @_deprecate_positional_args("v0.1") def f4(a, /, *, b="b", **kwargs): return a, b, kwargs result = f4(1) assert result == (1, "b", {}) result = f4(1, b=2, f="f") assert result == (1, 2, {"f": "f"}) with pytest.warns(FutureWarning, match=r"Passing 'b' as positional"): result = f4(1, 2, f="f") # type: ignore[misc] assert result == (1, 2, {"f": "f"}) with pytest.raises(TypeError, match=r"Keyword-only param without default"): @_deprecate_positional_args("v0.1") def f5(a, *, b, c=3, **kwargs): pass def test_deprecate_positional_args_warns_for_class(): class A1: @_deprecate_positional_args("v0.1") def method(self, a, b, *, c="c", d="d"): return a, b, c, d result = A1().method(1, 2) assert result == (1, 2, "c", "d") result = A1().method(1, 2, c=3, d=4) assert result == (1, 2, 3, 4) with pytest.warns(FutureWarning, match=r".*v0.1"): result = A1().method(1, 2, 3) # type: ignore[misc] assert result == (1, 2, 3, "d") with pytest.warns(FutureWarning, match=r"Passing 'c' as positional"): result = A1().method(1, 2, 3) # type: ignore[misc] assert result == (1, 2, 3, "d") with pytest.warns(FutureWarning, match=r"Passing 'c, d' as positional"): result = A1().method(1, 2, 3, 4) # type: ignore[misc] assert result == (1, 2, 3, 4) class A2: @_deprecate_positional_args("v0.1") def method(self, a=1, b=1, *, c="c", d="d"): return a, b, c, d with pytest.warns(FutureWarning, match=r"Passing 'c' as positional"): result = A2().method(1, 2, 3) # type: ignore[misc] assert result == (1, 2, 3, "d") with pytest.warns(FutureWarning, match=r"Passing 'c, d' as positional"): result = A2().method(1, 2, 3, 4) # type: ignore[misc] assert result == (1, 2, 3, 4) class A3: @_deprecate_positional_args("v0.1") def method(self, a, *, b="b", **kwargs): return a, b, kwargs with pytest.warns(FutureWarning, match=r"Passing 'b' as positional"): result = A3().method(1, 2) # type: ignore[misc] assert result == (1, 2, {}) with pytest.warns(FutureWarning, match=r"Passing 'b' as positional"): result = A3().method(1, 2, f="f") # type: ignore[misc] assert result == (1, 2, {"f": "f"}) class A4: @_deprecate_positional_args("v0.1") def method(self, a, /, *, b="b", **kwargs): return a, b, kwargs result = A4().method(1) assert result == (1, "b", {}) result = A4().method(1, b=2, f="f") assert result == (1, 2, {"f": "f"}) with pytest.warns(FutureWarning, match=r"Passing 'b' as positional"): result = A4().method(1, 2, f="f") # type: ignore[misc] assert result == (1, 2, {"f": "f"}) with pytest.raises(TypeError, match=r"Keyword-only param without default"): class A5: @_deprecate_positional_args("v0.1") def __init__(self, a, *, b, c=3, **kwargs): pass xarray-2025.09.0/xarray/tests/test_distributed.py000066400000000000000000000240651505620616400217560ustar00rootroot00000000000000"""isort:skip_file""" from __future__ import annotations import pickle from typing import TYPE_CHECKING, Any import numpy as np import pytest if TYPE_CHECKING: import dask import dask.array as da import distributed else: dask = pytest.importorskip("dask") da = pytest.importorskip("dask.array") distributed = pytest.importorskip("distributed") import contextlib from dask.distributed import Client, Lock from distributed.client import futures_of from distributed.utils_test import ( cleanup, # noqa: F401 client, # noqa: F401 cluster, cluster_fixture, # noqa: F401 gen_cluster, loop, # noqa: F401 loop_in_thread, # noqa: F401 ) import xarray as xr from xarray.backends.locks import HDF5_LOCK, CombinedLock, SerializableLock from xarray.tests import ( assert_allclose, assert_identical, has_h5netcdf, has_netCDF4, has_scipy, requires_cftime, requires_netCDF4, requires_zarr, ) from xarray.tests.test_backends import ( ON_WINDOWS, create_tmp_file, ) from xarray.tests.test_dataset import create_test_data @pytest.fixture def tmp_netcdf_filename(tmpdir): return str(tmpdir.join("testfile.nc")) ENGINES = [] if has_scipy: ENGINES.append("scipy") if has_netCDF4: ENGINES.append("netcdf4") if has_h5netcdf: ENGINES.append("h5netcdf") NC_FORMATS = { "netcdf4": [ "NETCDF3_CLASSIC", "NETCDF3_64BIT_OFFSET", "NETCDF3_64BIT_DATA", "NETCDF4_CLASSIC", "NETCDF4", ], "scipy": ["NETCDF3_CLASSIC", "NETCDF3_64BIT"], "h5netcdf": ["NETCDF4"], } ENGINES_AND_FORMATS = [ ("netcdf4", "NETCDF3_CLASSIC"), ("netcdf4", "NETCDF4_CLASSIC"), ("netcdf4", "NETCDF4"), ("h5netcdf", "NETCDF4"), ("scipy", "NETCDF3_64BIT"), ] @pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS) def test_dask_distributed_netcdf_roundtrip( loop, # noqa: F811 tmp_netcdf_filename, engine, nc_format, ): if engine not in ENGINES: pytest.skip("engine not available") chunks = {"dim1": 4, "dim2": 3, "dim3": 6} with cluster() as (s, [a, b]): with Client(s["address"], loop=loop): original = create_test_data().chunk(chunks) if engine == "scipy": with pytest.raises(NotImplementedError): original.to_netcdf( tmp_netcdf_filename, engine=engine, format=nc_format ) return original.to_netcdf(tmp_netcdf_filename, engine=engine, format=nc_format) with xr.open_dataset( tmp_netcdf_filename, chunks=chunks, engine=engine ) as restored: assert isinstance(restored.var1.data, da.Array) computed = restored.compute() assert_allclose(original, computed) @requires_netCDF4 def test_dask_distributed_write_netcdf_with_dimensionless_variables( loop, # noqa: F811 tmp_netcdf_filename, ): with cluster() as (s, [a, b]): with Client(s["address"], loop=loop): original = xr.Dataset({"x": da.zeros(())}) original.to_netcdf(tmp_netcdf_filename) with xr.open_dataset(tmp_netcdf_filename) as actual: assert actual.x.shape == () @requires_cftime @requires_netCDF4 @pytest.mark.parametrize("parallel", (True, False)) def test_open_mfdataset_can_open_files_with_cftime_index(parallel, tmp_path): T = xr.date_range("20010101", "20010501", calendar="360_day", use_cftime=True) Lon = np.arange(100) data = np.random.random((T.size, Lon.size)) da = xr.DataArray(data, coords={"time": T, "Lon": Lon}, name="test") file_path = tmp_path / "test.nc" da.to_netcdf(file_path) with cluster() as (s, [a, b]): with Client(s["address"]): with xr.open_mfdataset(file_path, parallel=parallel) as tf: assert_identical(tf["test"], da) @requires_cftime @requires_netCDF4 @pytest.mark.parametrize("parallel", (True, False)) def test_open_mfdataset_multiple_files_parallel_distributed(parallel, tmp_path): lon = np.arange(100) time = xr.date_range("20010101", periods=100, calendar="360_day", use_cftime=True) data = np.random.random((time.size, lon.size)) da = xr.DataArray(data, coords={"time": time, "lon": lon}, name="test") fnames = [] for i in range(0, 100, 10): fname = tmp_path / f"test_{i}.nc" da.isel(time=slice(i, i + 10)).to_netcdf(fname) fnames.append(fname) with cluster() as (s, [a, b]): with Client(s["address"]): with xr.open_mfdataset( fnames, parallel=parallel, concat_dim="time", combine="nested" ) as tf: assert_identical(tf["test"], da) # TODO: move this to test_backends.py @requires_cftime @requires_netCDF4 @pytest.mark.parametrize("parallel", (True, False)) def test_open_mfdataset_multiple_files_parallel(parallel, tmp_path): if parallel: pytest.skip( "Flaky in CI. Would be a welcome contribution to make a similar test reliable." ) lon = np.arange(100) time = xr.date_range("20010101", periods=100, calendar="360_day", use_cftime=True) data = np.random.random((time.size, lon.size)) da = xr.DataArray(data, coords={"time": time, "lon": lon}, name="test") fnames = [] for i in range(0, 100, 10): fname = tmp_path / f"test_{i}.nc" da.isel(time=slice(i, i + 10)).to_netcdf(fname) fnames.append(fname) for get in [dask.threaded.get, dask.multiprocessing.get, dask.local.get_sync, None]: with dask.config.set(scheduler=get): with xr.open_mfdataset( fnames, parallel=parallel, concat_dim="time", combine="nested" ) as tf: assert_identical(tf["test"], da) @pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS) def test_dask_distributed_read_netcdf_integration_test( loop, # noqa: F811 tmp_netcdf_filename, engine, nc_format, ): if engine not in ENGINES: pytest.skip("engine not available") chunks = {"dim1": 4, "dim2": 3, "dim3": 6} with cluster() as (s, [a, b]): with Client(s["address"], loop=loop): original = create_test_data() original.to_netcdf(tmp_netcdf_filename, engine=engine, format=nc_format) with xr.open_dataset( tmp_netcdf_filename, chunks=chunks, engine=engine ) as restored: assert isinstance(restored.var1.data, da.Array) computed = restored.compute() assert_allclose(original, computed) # fixture vendored from dask # heads-up, this is using quite private zarr API # https://github.com/dask/dask/blob/e04734b4d8959ba259801f2e2a490cb4ee8d891f/dask/tests/test_distributed.py#L338-L358 @pytest.fixture def zarr(client): # noqa: F811 zarr_lib = pytest.importorskip("zarr") # Zarr-Python 3 lazily allocates a dedicated thread/IO loop # for to execute async tasks. To avoid having this thread # be picked up as a "leaked thread", we manually trigger it's # creation before using zarr try: _ = zarr_lib.core.sync._get_loop() _ = zarr_lib.core.sync._get_executor() yield zarr_lib except AttributeError: yield zarr_lib finally: # Zarr-Python 3 lazily allocates a IO thread, a thread pool executor, and # an IO loop. Here we clean up these resources to avoid leaking threads # In normal operations, this is done as by an atexit handler when Zarr # is shutting down. with contextlib.suppress(AttributeError): zarr_lib.core.sync.cleanup_resources() @requires_zarr @pytest.mark.parametrize("consolidated", [True, False]) @pytest.mark.parametrize("compute", [True, False]) def test_dask_distributed_zarr_integration_test( client, # noqa: F811 zarr, consolidated: bool, compute: bool, ) -> None: if consolidated: write_kwargs: dict[str, Any] = {"consolidated": True} read_kwargs: dict[str, Any] = {"backend_kwargs": {"consolidated": True}} else: write_kwargs = read_kwargs = {} chunks = {"dim1": 4, "dim2": 3, "dim3": 5} original = create_test_data().chunk(chunks) with create_tmp_file(allow_cleanup_failure=ON_WINDOWS, suffix=".zarrc") as filename: maybe_futures = original.to_zarr( # type: ignore[call-overload] #mypy bug? filename, compute=compute, **write_kwargs ) if not compute: maybe_futures.compute() with xr.open_dataset( filename, chunks="auto", engine="zarr", **read_kwargs ) as restored: assert isinstance(restored.var1.data, da.Array) computed = restored.compute() assert_allclose(original, computed) @gen_cluster(client=True) async def test_async(c, s, a, b) -> None: x = create_test_data() assert not dask.is_dask_collection(x) y = x.chunk({"dim2": 4}) + 10 assert dask.is_dask_collection(y) assert dask.is_dask_collection(y.var1) assert dask.is_dask_collection(y.var2) z = c.persist(y) assert str(z) assert dask.is_dask_collection(z) assert dask.is_dask_collection(z.var1) assert dask.is_dask_collection(z.var2) assert len(y.__dask_graph__()) > len(z.__dask_graph__()) assert not futures_of(y) assert futures_of(z) future = c.compute(z) w = await future assert not dask.is_dask_collection(w) assert_allclose(x + 10, w) assert s.tasks def test_hdf5_lock() -> None: assert isinstance(HDF5_LOCK, SerializableLock) @gen_cluster(client=True) async def test_serializable_locks(c, s, a, b) -> None: def f(x, lock=None): with lock: return x + 1 # note, the creation of Lock needs to be done inside a cluster for lock in [ HDF5_LOCK, Lock(), Lock("filename.nc"), CombinedLock([HDF5_LOCK]), CombinedLock([HDF5_LOCK, Lock("filename.nc")]), ]: futures = c.map(f, list(range(10)), lock=lock) await c.gather(futures) lock2 = pickle.loads(pickle.dumps(lock)) assert type(lock) is type(lock2) xarray-2025.09.0/xarray/tests/test_dtypes.py000066400000000000000000000137331505620616400207440ustar00rootroot00000000000000from __future__ import annotations import numpy as np import pytest from xarray.core import dtypes from xarray.tests import requires_array_api_strict try: import array_api_strict except ImportError: class DummyArrayAPINamespace: bool = None # type: ignore[unused-ignore,var-annotated] int32 = None # type: ignore[unused-ignore,var-annotated] float64 = None # type: ignore[unused-ignore,var-annotated] array_api_strict = DummyArrayAPINamespace @pytest.mark.parametrize( "args, expected", [ ([bool], bool), ([bool, np.bytes_], np.object_), ([np.float32, np.float64], np.float64), ([np.float32, np.bytes_], np.object_), ([np.str_, np.int64], np.object_), ([np.str_, np.str_], np.str_), ([np.bytes_, np.str_], np.object_), ([np.dtype(" None: actual = dtypes.result_type(*args) assert actual == expected @pytest.mark.parametrize( ["values", "expected"], ( ([np.arange(3, dtype="float32"), np.nan], np.float32), ([np.arange(3, dtype="int8"), 1], np.int8), ([np.array(["a", "b"], dtype=str), np.nan], object), ([np.array([b"a", b"b"], dtype=bytes), True], object), ([np.array([b"a", b"b"], dtype=bytes), "c"], object), ([np.array(["a", "b"], dtype=str), "c"], np.dtype(str)), ([np.array(["a", "b"], dtype=str), None], object), ([0, 1], np.dtype("int")), ), ) def test_result_type_scalars(values, expected) -> None: actual = dtypes.result_type(*values) assert np.issubdtype(actual, expected) def test_result_type_dask_array() -> None: # verify it works without evaluating dask arrays da = pytest.importorskip("dask.array") dask = pytest.importorskip("dask") def error(): raise RuntimeError array = da.from_delayed(dask.delayed(error)(), (), np.float64) with pytest.raises(RuntimeError): array.compute() actual = dtypes.result_type(array) assert actual == np.float64 # note that this differs from the behavior for scalar numpy arrays, which # would get promoted to float32 actual = dtypes.result_type(array, np.array([0.5, 1.0], dtype=np.float32)) assert actual == np.float64 @pytest.mark.parametrize("obj", [1.0, np.inf, "ab", 1.0 + 1.0j, True]) def test_inf(obj) -> None: assert dtypes.INF > obj assert dtypes.NINF < obj @pytest.mark.parametrize( "kind, expected", [ ("b", (np.float32, "nan")), # dtype('int8') ("B", (np.float32, "nan")), # dtype('uint8') ("c", (np.dtype("O"), "nan")), # dtype('S1') ("D", (np.complex128, "(nan+nanj)")), # dtype('complex128') ("d", (np.float64, "nan")), # dtype('float64') ("e", (np.float16, "nan")), # dtype('float16') ("F", (np.complex64, "(nan+nanj)")), # dtype('complex64') ("f", (np.float32, "nan")), # dtype('float32') ("h", (np.float32, "nan")), # dtype('int16') ("H", (np.float32, "nan")), # dtype('uint16') ("i", (np.float64, "nan")), # dtype('int32') ("I", (np.float64, "nan")), # dtype('uint32') ("l", (np.float64, "nan")), # dtype('int64') ("L", (np.float64, "nan")), # dtype('uint64') ("m", (np.timedelta64, "NaT")), # dtype(' None: # 'g': np.float128 is not tested : not available on all platforms # 'G': np.complex256 is not tested : not available on all platforms actual = dtypes.maybe_promote(np.dtype(kind)) assert actual[0] == expected[0] assert str(actual[1]) == expected[1] def test_nat_types_membership() -> None: assert np.datetime64("NaT").dtype in dtypes.NAT_TYPES assert np.timedelta64("NaT").dtype in dtypes.NAT_TYPES assert np.float64 not in dtypes.NAT_TYPES @pytest.mark.parametrize( ["dtype", "kinds", "xp", "expected"], ( (np.dtype("int32"), "integral", np, True), (np.dtype("float16"), "real floating", np, True), (np.dtype("complex128"), "complex floating", np, True), (np.dtype("U"), "numeric", np, False), pytest.param( array_api_strict.int32, "integral", array_api_strict, True, marks=requires_array_api_strict, id="array_api-int", ), pytest.param( array_api_strict.float64, "real floating", array_api_strict, True, marks=requires_array_api_strict, id="array_api-float", ), pytest.param( array_api_strict.bool, "numeric", array_api_strict, False, marks=requires_array_api_strict, id="array_api-bool", ), ), ) def test_isdtype(dtype, kinds, xp, expected) -> None: actual = dtypes.isdtype(dtype, kinds, xp=xp) assert actual == expected @pytest.mark.parametrize( ["dtype", "kinds", "xp", "error", "pattern"], ( (np.dtype("int32"), "foo", np, (TypeError, ValueError), "kind"), (np.dtype("int32"), np.signedinteger, np, TypeError, "kind"), (np.dtype("float16"), 1, np, TypeError, "kind"), ), ) def test_isdtype_error(dtype, kinds, xp, error, pattern): with pytest.raises(error, match=pattern): dtypes.isdtype(dtype, kinds, xp=xp) xarray-2025.09.0/xarray/tests/test_duck_array_ops.py000066400000000000000000001122571505620616400224420ustar00rootroot00000000000000from __future__ import annotations import copy import datetime as dt import pickle import warnings import numpy as np import pandas as pd import pytest from numpy import array, nan from xarray import DataArray, Dataset, concat, date_range from xarray.coding.times import _NS_PER_TIME_DELTA from xarray.core import dtypes, duck_array_ops from xarray.core.duck_array_ops import ( array_notnull_equiv, concatenate, count, first, gradient, last, least_squares, mean, np_timedelta64_to_float, pd_timedelta_to_float, push, py_timedelta_to_float, stack, timedelta_to_numeric, where, ) from xarray.core.extension_array import PandasExtensionArray from xarray.core.types import NPDatetimeUnitOptions, PDDatetimeUnitOptions from xarray.namedarray.pycompat import array_type from xarray.testing import assert_allclose, assert_equal, assert_identical from xarray.tests import ( arm_xfail, assert_array_equal, has_dask, has_scipy, raise_if_dask_computes, requires_bottleneck, requires_cftime, requires_dask, requires_pyarrow, ) dask_array_type = array_type("dask") @pytest.fixture def categorical1(): return pd.Categorical(["cat1", "cat2", "cat2", "cat1", "cat2"]) @pytest.fixture def categorical2(): return pd.Categorical(["cat2", "cat1", "cat2", "cat3", "cat1"]) try: import pyarrow as pa @pytest.fixture def arrow1(): return pd.arrays.ArrowExtensionArray( pa.array([{"x": 1, "y": True}, {"x": 2, "y": False}]) ) @pytest.fixture def arrow2(): return pd.arrays.ArrowExtensionArray( pa.array([{"x": 3, "y": False}, {"x": 4, "y": True}]) ) except ImportError: pass @pytest.fixture def int1(): return pd.arrays.IntegerArray( np.array([1, 2, 3, 4, 5]), np.array([True, False, False, True, True]) ) @pytest.fixture def int2(): return pd.arrays.IntegerArray( np.array([6, 7, 8, 9, 10]), np.array([True, True, False, True, False]) ) class TestOps: @pytest.fixture(autouse=True) def setUp(self): self.x = array( [ [ [nan, nan, 2.0, nan], [nan, 5.0, 6.0, nan], [8.0, 9.0, 10.0, nan], ], [ [nan, 13.0, 14.0, 15.0], [nan, 17.0, 18.0, nan], [nan, 21.0, nan, nan], ], ] ) def test_first(self): expected_results = [ array([[nan, 13, 2, 15], [nan, 5, 6, nan], [8, 9, 10, nan]]), array([[8, 5, 2, nan], [nan, 13, 14, 15]]), array([[2, 5, 8], [13, 17, 21]]), ] for axis, expected in zip( [0, 1, 2, -3, -2, -1], 2 * expected_results, strict=True ): actual = first(self.x, axis) assert_array_equal(expected, actual) expected = self.x[0] actual = first(self.x, axis=0, skipna=False) assert_array_equal(expected, actual) expected = self.x[..., 0] actual = first(self.x, axis=-1, skipna=False) assert_array_equal(expected, actual) with pytest.raises(IndexError, match=r"out of bounds"): first(self.x, 3) def test_last(self): expected_results = [ array([[nan, 13, 14, 15], [nan, 17, 18, nan], [8, 21, 10, nan]]), array([[8, 9, 10, nan], [nan, 21, 18, 15]]), array([[2, 6, 10], [15, 18, 21]]), ] for axis, expected in zip( [0, 1, 2, -3, -2, -1], 2 * expected_results, strict=True ): actual = last(self.x, axis) assert_array_equal(expected, actual) expected = self.x[-1] actual = last(self.x, axis=0, skipna=False) assert_array_equal(expected, actual) expected = self.x[..., -1] actual = last(self.x, axis=-1, skipna=False) assert_array_equal(expected, actual) with pytest.raises(IndexError, match=r"out of bounds"): last(self.x, 3) def test_count(self): assert 12 == count(self.x) expected = array([[1, 2, 3], [3, 2, 1]]) assert_array_equal(expected, count(self.x, axis=-1)) assert 1 == count(np.datetime64("2000-01-01")) def test_where_type_promotion(self): result = where(np.array([True, False]), np.array([1, 2]), np.array(["a", "b"])) assert_array_equal(result, np.array([1, "b"], dtype=object)) result = where([True, False], np.array([1, 2], np.float32), np.nan) assert result.dtype == np.float32 assert_array_equal(result, np.array([1, np.nan], dtype=np.float32)) def test_where_extension_duck_array(self, categorical1, categorical2): where_res = where( np.array([True, False, True, False, False]), PandasExtensionArray(categorical1), PandasExtensionArray(categorical2), ) assert isinstance(where_res, PandasExtensionArray) assert ( where_res == pd.Categorical(["cat1", "cat1", "cat2", "cat3", "cat1"]) ).all() def test_concatenate_extension_duck_array(self, categorical1, categorical2): concate_res = concatenate( [PandasExtensionArray(categorical1), PandasExtensionArray(categorical2)] ) assert isinstance(concate_res, PandasExtensionArray) assert ( concate_res == type(categorical1)._concat_same_type((categorical1, categorical2)) ).all() @requires_pyarrow def test_extension_array_pyarrow_concatenate(self, arrow1, arrow2): concatenated = concatenate( (PandasExtensionArray(arrow1), PandasExtensionArray(arrow2)) ) assert concatenated[2].array[0]["x"] == 3 assert concatenated[3].array[0]["y"] @requires_pyarrow def test_extension_array_copy_arrow_type(self): arr = pd.array([pd.NA, 1, 2], dtype="int64[pyarrow]") # Relying on the `__getattr__` of `PandasExtensionArray` to do the deep copy # recursively only fails for `int64[pyarrow]` and similar types so this # test ensures that copying still works there. assert isinstance( copy.deepcopy(PandasExtensionArray(arr), memo=None).array, type(arr) ) def test___getitem__extension_duck_array(self, categorical1): extension_duck_array = PandasExtensionArray(categorical1) assert (extension_duck_array[0:2] == categorical1[0:2]).all() assert isinstance(extension_duck_array[0:2], PandasExtensionArray) assert extension_duck_array[0] == categorical1[0] assert isinstance(extension_duck_array[0], PandasExtensionArray) mask = [True, False, True, False, True] assert (extension_duck_array[mask] == categorical1[mask]).all() def test__setitem__extension_duck_array(self, categorical1): extension_duck_array = PandasExtensionArray(categorical1) extension_duck_array[2] = "cat1" # already existing category assert extension_duck_array[2] == "cat1" with pytest.raises(TypeError, match="Cannot setitem on a Categorical"): extension_duck_array[2] = "cat4" # new category def test_stack_type_promotion(self): result = stack([1, "b"]) assert_array_equal(result, np.array([1, "b"], dtype=object)) def test_concatenate_type_promotion(self): result = concatenate([np.array([1]), np.array(["b"])]) assert_array_equal(result, np.array([1, "b"], dtype=object)) @pytest.mark.filterwarnings("error") def test_all_nan_arrays(self): assert np.isnan(mean([np.nan, np.nan])) @requires_dask class TestDaskOps(TestOps): @pytest.fixture(autouse=True) def setUp(self): import dask.array self.x = dask.array.from_array( [ [ [nan, nan, 2.0, nan], [nan, 5.0, 6.0, nan], [8.0, 9.0, 10.0, nan], ], [ [nan, 13.0, 14.0, 15.0], [nan, 17.0, 18.0, nan], [nan, 21.0, nan, nan], ], ], chunks=(2, 1, 2), ) def test_cumsum_1d(): inputs = np.array([0, 1, 2, 3]) expected = np.array([0, 1, 3, 6]) actual = duck_array_ops.cumsum(inputs) assert_array_equal(expected, actual) actual = duck_array_ops.cumsum(inputs, axis=0) assert_array_equal(expected, actual) actual = duck_array_ops.cumsum(inputs, axis=-1) assert_array_equal(expected, actual) actual = duck_array_ops.cumsum(inputs, axis=(0,)) assert_array_equal(expected, actual) actual = duck_array_ops.cumsum(inputs, axis=()) assert_array_equal(inputs, actual) def test_cumsum_2d(): inputs = np.array([[1, 2], [3, 4]]) expected = np.array([[1, 3], [4, 10]]) actual = duck_array_ops.cumsum(inputs) assert_array_equal(expected, actual) actual = duck_array_ops.cumsum(inputs, axis=(0, 1)) assert_array_equal(expected, actual) actual = duck_array_ops.cumsum(inputs, axis=()) assert_array_equal(inputs, actual) def test_cumprod_2d(): inputs = np.array([[1, 2], [3, 4]]) expected = np.array([[1, 2], [3, 2 * 3 * 4]]) actual = duck_array_ops.cumprod(inputs) assert_array_equal(expected, actual) actual = duck_array_ops.cumprod(inputs, axis=(0, 1)) assert_array_equal(expected, actual) actual = duck_array_ops.cumprod(inputs, axis=()) assert_array_equal(inputs, actual) class TestArrayNotNullEquiv: @pytest.mark.parametrize( "arr1, arr2", [ (np.array([1, 2, 3]), np.array([1, 2, 3])), (np.array([1, 2, np.nan]), np.array([1, np.nan, 3])), (np.array([np.nan, 2, np.nan]), np.array([1, np.nan, np.nan])), ], ) def test_equal(self, arr1, arr2): assert array_notnull_equiv(arr1, arr2) def test_some_not_equal(self): a = np.array([1, 2, 4]) b = np.array([1, np.nan, 3]) assert not array_notnull_equiv(a, b) def test_wrong_shape(self): a = np.array([[1, np.nan, np.nan, 4]]) b = np.array([[1, 2], [np.nan, 4]]) assert not array_notnull_equiv(a, b) @pytest.mark.parametrize( "val1, val2, val3, null", [ ( np.datetime64("2000"), np.datetime64("2001"), np.datetime64("2002"), np.datetime64("NaT"), ), (1.0, 2.0, 3.0, np.nan), ("foo", "bar", "baz", None), ("foo", "bar", "baz", np.nan), ], ) def test_types(self, val1, val2, val3, null): dtype = object if isinstance(val1, str) else None arr1 = np.array([val1, null, val3, null], dtype=dtype) arr2 = np.array([val1, val2, null, null], dtype=dtype) assert array_notnull_equiv(arr1, arr2) def construct_dataarray(dim_num, dtype, contains_nan, dask): # dimnum <= 3 rng = np.random.default_rng(0) shapes = [16, 8, 4][:dim_num] dims = ("x", "y", "z")[:dim_num] if np.issubdtype(dtype, np.floating): array = rng.random(shapes).astype(dtype) elif np.issubdtype(dtype, np.integer): array = rng.integers(0, 10, size=shapes).astype(dtype) elif np.issubdtype(dtype, np.bool_): array = rng.integers(0, 1, size=shapes).astype(dtype) elif dtype is str: array = rng.choice(["a", "b", "c", "d"], size=shapes) else: raise ValueError if contains_nan: inds = rng.choice(range(array.size), int(array.size * 0.2)) dtype, fill_value = dtypes.maybe_promote(array.dtype) array = array.astype(dtype) array.flat[inds] = fill_value da = DataArray(array, dims=dims, coords={"x": np.arange(16)}, name="da") if dask and has_dask: chunks = dict.fromkeys(dims, 4) da = da.chunk(chunks) return da def from_series_or_scalar(se): if isinstance(se, pd.Series): return DataArray.from_series(se) else: # scalar case return DataArray(se) def series_reduce(da, func, dim, **kwargs): """convert DataArray to pd.Series, apply pd.func, then convert back to a DataArray. Multiple dims cannot be specified.""" # pd no longer accepts skipna=None https://github.com/pandas-dev/pandas/issues/44178 if kwargs.get("skipna", True) is None: kwargs["skipna"] = True if dim is None or da.ndim == 1: se = da.to_series() return from_series_or_scalar(getattr(se, func)(**kwargs)) else: dims = list(da.dims) dims.remove(dim) d = dims[0] da1 = [ series_reduce(da.isel(**{d: i}), func, dim, **kwargs) for i in range(len(da[d])) ] if d in da.coords: return concat(da1, dim=da[d]) return concat(da1, dim=d) def assert_dask_array(da, dask): if dask and da.ndim > 0: assert isinstance(da.data, dask_array_type) @arm_xfail @pytest.mark.filterwarnings("ignore:All-NaN .* encountered:RuntimeWarning") @pytest.mark.parametrize("dask", [False, True] if has_dask else [False]) def test_datetime_mean(dask: bool, time_unit: PDDatetimeUnitOptions) -> None: # Note: only testing numpy, as dask is broken upstream dtype = f"M8[{time_unit}]" da = DataArray( np.array(["2010-01-01", "NaT", "2010-01-03", "NaT", "NaT"], dtype=dtype), dims=["time"], ) if dask: # Trigger use case where a chunk is full of NaT da = da.chunk({"time": 3}) expect = DataArray(np.array("2010-01-02", dtype="M8[ns]")) expect_nat = DataArray(np.array("NaT", dtype="M8[ns]")) actual = da.mean() if dask: assert actual.chunks is not None assert_equal(actual, expect) actual = da.mean(skipna=False) if dask: assert actual.chunks is not None assert_equal(actual, expect_nat) # tests for 1d array full of NaT assert_equal(da[[1]].mean(), expect_nat) assert_equal(da[[1]].mean(skipna=False), expect_nat) # tests for a 0d array assert_equal(da[0].mean(), da[0]) assert_equal(da[0].mean(skipna=False), da[0]) assert_equal(da[1].mean(), expect_nat) assert_equal(da[1].mean(skipna=False), expect_nat) @requires_cftime @pytest.mark.parametrize("dask", [False, True]) def test_cftime_datetime_mean(dask): if dask and not has_dask: pytest.skip("requires dask") times = date_range("2000", periods=4, use_cftime=True) da = DataArray(times, dims=["time"]) da_2d = DataArray(times.values.reshape(2, 2)) if dask: da = da.chunk({"time": 2}) da_2d = da_2d.chunk({"dim_0": 2}) expected = da.isel(time=0) # one compute needed to check the array contains cftime datetimes with raise_if_dask_computes(max_computes=1): result = da.isel(time=0).mean() assert_dask_array(result, dask) assert_equal(result, expected) expected = DataArray(times.date_type(2000, 1, 2, 12)) with raise_if_dask_computes(max_computes=1): result = da.mean() assert_dask_array(result, dask) assert_equal(result, expected) with raise_if_dask_computes(max_computes=1): result = da_2d.mean() assert_dask_array(result, dask) assert_equal(result, expected) @pytest.mark.parametrize("dask", [False, True]) def test_mean_over_long_spanning_datetime64(dask) -> None: if dask and not has_dask: pytest.skip("requires dask") array = np.array(["1678-01-01", "NaT", "2260-01-01"], dtype="datetime64[ns]") da = DataArray(array, dims=["time"]) if dask: da = da.chunk({"time": 2}) expected = DataArray(np.array("1969-01-01", dtype="datetime64[ns]")) result = da.mean() assert_equal(result, expected) @requires_cftime @requires_dask def test_mean_over_non_time_dim_of_dataset_with_dask_backed_cftime_data(): # Regression test for part two of GH issue 5897: averaging over a non-time # dimension still fails if the time variable is dask-backed. ds = Dataset( { "var1": ( ("time",), date_range("2021-10-31", periods=10, freq="D", use_cftime=True), ), "var2": (("x",), list(range(10))), } ) expected = ds.mean("x") result = ds.chunk({}).mean("x") assert_equal(result, expected) @requires_cftime def test_cftime_datetime_mean_long_time_period(): import cftime times = np.array( [ [ cftime.DatetimeNoLeap(400, 12, 31, 0, 0, 0, 0), cftime.DatetimeNoLeap(520, 12, 31, 0, 0, 0, 0), ], [ cftime.DatetimeNoLeap(520, 12, 31, 0, 0, 0, 0), cftime.DatetimeNoLeap(640, 12, 31, 0, 0, 0, 0), ], [ cftime.DatetimeNoLeap(640, 12, 31, 0, 0, 0, 0), cftime.DatetimeNoLeap(760, 12, 31, 0, 0, 0, 0), ], ] ) da = DataArray(times, dims=["time", "d2"]) result = da.mean("d2") expected = DataArray( [ cftime.DatetimeNoLeap(460, 12, 31, 0, 0, 0, 0), cftime.DatetimeNoLeap(580, 12, 31, 0, 0, 0, 0), cftime.DatetimeNoLeap(700, 12, 31, 0, 0, 0, 0), ], dims=["time"], ) assert_equal(result, expected) def test_empty_axis_dtype(): ds = Dataset() ds["pos"] = [1, 2, 3] ds["data"] = ("pos", "time"), [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]] ds["var"] = "pos", [2, 3, 4] assert_identical(ds.mean(dim="time")["var"], ds["var"]) assert_identical(ds.max(dim="time")["var"], ds["var"]) assert_identical(ds.min(dim="time")["var"], ds["var"]) assert_identical(ds.sum(dim="time")["var"], ds["var"]) @pytest.mark.parametrize("dim_num", [1, 2]) @pytest.mark.parametrize("dtype", [float, int, np.float32, np.bool_]) @pytest.mark.parametrize("dask", [False, True]) @pytest.mark.parametrize("func", ["sum", "min", "max", "mean", "var"]) # TODO test cumsum, cumprod @pytest.mark.parametrize("skipna", [False, True]) @pytest.mark.parametrize("aggdim", [None, "x"]) def test_reduce(dim_num, dtype, dask, func, skipna, aggdim): if aggdim == "y" and dim_num < 2: pytest.skip("dim not in this test") if dtype == np.bool_ and func == "mean": pytest.skip("numpy does not support this") if dask and not has_dask: pytest.skip("requires dask") if dask and skipna is False and dtype == np.bool_: pytest.skip("dask does not compute object-typed array") rtol = 1e-04 if dtype == np.float32 else 1e-05 da = construct_dataarray(dim_num, dtype, contains_nan=True, dask=dask) axis = None if aggdim is None else da.get_axis_num(aggdim) # TODO: remove these after resolving # https://github.com/dask/dask/issues/3245 with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Mean of empty slice") warnings.filterwarnings("ignore", "All-NaN slice") warnings.filterwarnings("ignore", "invalid value encountered in") if da.dtype.kind == "O" and skipna: # Numpy < 1.13 does not handle object-type array. try: if skipna: expected = getattr(np, f"nan{func}")(da.values, axis=axis) else: expected = getattr(np, func)(da.values, axis=axis) actual = getattr(da, func)(skipna=skipna, dim=aggdim) assert_dask_array(actual, dask) np.testing.assert_allclose( actual.values, np.array(expected), rtol=1.0e-4, equal_nan=True ) except (TypeError, AttributeError, ZeroDivisionError): # TODO currently, numpy does not support some methods such as # nanmean for object dtype pass actual = getattr(da, func)(skipna=skipna, dim=aggdim) # for dask case, make sure the result is the same for numpy backend expected = getattr(da.compute(), func)(skipna=skipna, dim=aggdim) assert_allclose(actual, expected, rtol=rtol) # make sure the compatibility with pandas' results. if func in ["var", "std"]: expected = series_reduce(da, func, skipna=skipna, dim=aggdim, ddof=0) assert_allclose(actual, expected, rtol=rtol) # also check ddof!=0 case actual = getattr(da, func)(skipna=skipna, dim=aggdim, ddof=5) if dask: assert isinstance(da.data, dask_array_type) expected = series_reduce(da, func, skipna=skipna, dim=aggdim, ddof=5) assert_allclose(actual, expected, rtol=rtol) else: expected = series_reduce(da, func, skipna=skipna, dim=aggdim) assert_allclose(actual, expected, rtol=rtol) # make sure the dtype argument if func not in ["max", "min"]: actual = getattr(da, func)(skipna=skipna, dim=aggdim, dtype=float) assert_dask_array(actual, dask) assert actual.dtype == float # without nan da = construct_dataarray(dim_num, dtype, contains_nan=False, dask=dask) actual = getattr(da, func)(skipna=skipna) if dask: assert isinstance(da.data, dask_array_type) expected = getattr(np, f"nan{func}")(da.values) if actual.dtype == object: assert actual.values == np.array(expected) else: assert np.allclose(actual.values, np.array(expected), rtol=rtol) @pytest.mark.parametrize("dim_num", [1, 2]) @pytest.mark.parametrize("dtype", [float, int, np.float32, np.bool_, str]) @pytest.mark.parametrize("contains_nan", [True, False]) @pytest.mark.parametrize("dask", [False, True]) @pytest.mark.parametrize("func", ["min", "max"]) @pytest.mark.parametrize("skipna", [False, True]) @pytest.mark.parametrize("aggdim", ["x", "y"]) def test_argmin_max(dim_num, dtype, contains_nan, dask, func, skipna, aggdim): # pandas-dev/pandas#16830, we do not check consistency with pandas but # just make sure da[da.argmin()] == da.min() if aggdim == "y" and dim_num < 2: pytest.skip("dim not in this test") if dask and not has_dask: pytest.skip("requires dask") if contains_nan: if not skipna: pytest.skip("numpy's argmin (not nanargmin) does not handle object-dtype") if skipna and np.dtype(dtype).kind in "iufc": pytest.skip("numpy's nanargmin raises ValueError for all nan axis") da = construct_dataarray(dim_num, dtype, contains_nan=contains_nan, dask=dask) with warnings.catch_warnings(): warnings.filterwarnings("ignore", "All-NaN slice") actual = da.isel( **{aggdim: getattr(da, "arg" + func)(dim=aggdim, skipna=skipna).compute()} ) expected = getattr(da, func)(dim=aggdim, skipna=skipna) assert_allclose( actual.drop_vars(list(actual.coords)), expected.drop_vars(list(expected.coords)), ) def test_argmin_max_error(): da = construct_dataarray(2, np.bool_, contains_nan=True, dask=False) da[0] = np.nan with pytest.raises(ValueError): da.argmin(dim="y") @pytest.mark.parametrize( ["array", "expected"], [ ( np.array([np.datetime64("2000-01-01"), np.datetime64("NaT")]), np.array([False, True]), ), ( np.array([np.timedelta64(1, "h"), np.timedelta64("NaT")]), np.array([False, True]), ), ( np.array([0.0, np.nan]), np.array([False, True]), ), ( np.array([1j, np.nan]), np.array([False, True]), ), ( np.array(["foo", np.nan], dtype=object), np.array([False, True]), ), ( np.array([1, 2], dtype=int), np.array([False, False]), ), ( np.array([True, False], dtype=bool), np.array([False, False]), ), ], ) def test_isnull(array, expected): actual = duck_array_ops.isnull(array) np.testing.assert_equal(expected, actual) @requires_dask def test_isnull_with_dask(): da = construct_dataarray(2, np.float32, contains_nan=True, dask=True) assert isinstance(da.isnull().data, dask_array_type) assert_equal(da.isnull().load(), da.load().isnull()) @pytest.mark.skipif(not has_dask, reason="This is for dask.") @pytest.mark.parametrize("axis", [0, -1, 1]) @pytest.mark.parametrize("edge_order", [1, 2]) def test_dask_gradient(axis, edge_order): import dask.array as da array = np.array(np.random.randn(100, 5, 40)) x = np.exp(np.linspace(0, 1, array.shape[axis])) darray = da.from_array(array, chunks=[(6, 30, 30, 20, 14), 5, 8]) expected = gradient(array, x, axis=axis, edge_order=edge_order) actual = gradient(darray, x, axis=axis, edge_order=edge_order) assert isinstance(actual, da.Array) assert_array_equal(actual, expected) @pytest.mark.parametrize("dim_num", [1, 2]) @pytest.mark.parametrize("dtype", [float, int, np.float32, np.bool_]) @pytest.mark.parametrize("dask", [False, True]) @pytest.mark.parametrize("func", ["sum", "prod"]) @pytest.mark.parametrize("aggdim", [None, "x"]) @pytest.mark.parametrize("contains_nan", [True, False]) @pytest.mark.parametrize("skipna", [True, False, None]) def test_min_count(dim_num, dtype, dask, func, aggdim, contains_nan, skipna): if dask and not has_dask: pytest.skip("requires dask") da = construct_dataarray(dim_num, dtype, contains_nan=contains_nan, dask=dask) min_count = 3 # If using Dask, the function call should be lazy. with raise_if_dask_computes(): actual = getattr(da, func)(dim=aggdim, skipna=skipna, min_count=min_count) expected = series_reduce(da, func, skipna=skipna, dim=aggdim, min_count=min_count) assert_allclose(actual, expected) assert_dask_array(actual, dask) @pytest.mark.parametrize("dtype", [float, int, np.float32, np.bool_]) @pytest.mark.parametrize("dask", [False, True]) @pytest.mark.parametrize("func", ["sum", "prod"]) def test_min_count_nd(dtype, dask, func): if dask and not has_dask: pytest.skip("requires dask") min_count = 3 dim_num = 3 da = construct_dataarray(dim_num, dtype, contains_nan=True, dask=dask) # If using Dask, the function call should be lazy. with raise_if_dask_computes(): actual = getattr(da, func)( dim=["x", "y", "z"], skipna=True, min_count=min_count ) # Supplying all dims is equivalent to supplying `...` or `None` expected = getattr(da, func)(dim=..., skipna=True, min_count=min_count) assert_allclose(actual, expected) assert_dask_array(actual, dask) @pytest.mark.parametrize("dask", [False, True]) @pytest.mark.parametrize("func", ["sum", "prod"]) @pytest.mark.parametrize("dim", [None, "a", "b"]) def test_min_count_specific(dask, func, dim): if dask and not has_dask: pytest.skip("requires dask") # Simple array with four non-NaN values. da = DataArray(np.ones((6, 6), dtype=np.float64) * np.nan, dims=("a", "b")) da[0][0] = 2 da[0][3] = 2 da[3][0] = 2 da[3][3] = 2 if dask: da = da.chunk({"a": 3, "b": 3}) # Expected result if we set min_count to the number of non-NaNs in a # row/column/the entire array. if dim: min_count = 2 expected = DataArray( [4.0, np.nan, np.nan] * 2, dims=("a" if dim == "b" else "b",) ) else: min_count = 4 expected = DataArray(8.0 if func == "sum" else 16.0) # Check for that min_count. with raise_if_dask_computes(): actual = getattr(da, func)(dim, skipna=True, min_count=min_count) assert_dask_array(actual, dask) assert_allclose(actual, expected) # With min_count being one higher, should get all NaN. min_count += 1 expected *= np.nan with raise_if_dask_computes(): actual = getattr(da, func)(dim, skipna=True, min_count=min_count) assert_dask_array(actual, dask) assert_allclose(actual, expected) @pytest.mark.parametrize("func", ["sum", "prod"]) def test_min_count_dataset(func): da = construct_dataarray(2, dtype=float, contains_nan=True, dask=False) ds = Dataset({"var1": da}, coords={"scalar": 0}) actual = getattr(ds, func)(dim="x", skipna=True, min_count=3)["var1"] expected = getattr(ds["var1"], func)(dim="x", skipna=True, min_count=3) assert_allclose(actual, expected) @pytest.mark.parametrize("dtype", [float, int, np.float32, np.bool_]) @pytest.mark.parametrize("dask", [False, True]) @pytest.mark.parametrize("skipna", [False, True]) @pytest.mark.parametrize("func", ["sum", "prod"]) def test_multiple_dims(dtype, dask, skipna, func): if dask and not has_dask: pytest.skip("requires dask") da = construct_dataarray(3, dtype, contains_nan=True, dask=dask) actual = getattr(da, func)(("x", "y"), skipna=skipna) expected = getattr(getattr(da, func)("x", skipna=skipna), func)("y", skipna=skipna) assert_allclose(actual, expected) @pytest.mark.parametrize("dask", [True, False]) def test_datetime_to_numeric_datetime64(dask, time_unit: PDDatetimeUnitOptions): if dask and not has_dask: pytest.skip("requires dask") times = pd.date_range("2000", periods=5, freq="7D").as_unit(time_unit).values if dask: import dask.array times = dask.array.from_array(times, chunks=-1) with raise_if_dask_computes(): result = duck_array_ops.datetime_to_numeric(times, datetime_unit="h") expected = 24 * np.arange(0, 35, 7) np.testing.assert_array_equal(result, expected) offset = times[1] with raise_if_dask_computes(): result = duck_array_ops.datetime_to_numeric( times, offset=offset, datetime_unit="h" ) expected = 24 * np.arange(-7, 28, 7) np.testing.assert_array_equal(result, expected) dtype = np.float32 with raise_if_dask_computes(): result = duck_array_ops.datetime_to_numeric( times, datetime_unit="h", dtype=dtype ) expected2 = 24 * np.arange(0, 35, 7).astype(dtype) np.testing.assert_array_equal(result, expected2) @requires_cftime @pytest.mark.parametrize("dask", [True, False]) def test_datetime_to_numeric_cftime(dask): if dask and not has_dask: pytest.skip("requires dask") times = date_range( "2000", periods=5, freq="7D", calendar="standard", use_cftime=True ).values if dask: import dask.array times = dask.array.from_array(times, chunks=-1) with raise_if_dask_computes(): result = duck_array_ops.datetime_to_numeric(times, datetime_unit="h", dtype=int) expected = 24 * np.arange(0, 35, 7) np.testing.assert_array_equal(result, expected) offset = times[1] with raise_if_dask_computes(): result = duck_array_ops.datetime_to_numeric( times, offset=offset, datetime_unit="h", dtype=int ) expected = 24 * np.arange(-7, 28, 7) np.testing.assert_array_equal(result, expected) dtype = np.float32 with raise_if_dask_computes(): result = duck_array_ops.datetime_to_numeric( times, datetime_unit="h", dtype=dtype ) expected = 24 * np.arange(0, 35, 7).astype(dtype) np.testing.assert_array_equal(result, expected) with raise_if_dask_computes(): if dask: time = dask.array.asarray(times[1]) else: time = np.asarray(times[1]) result = duck_array_ops.datetime_to_numeric( time, offset=times[0], datetime_unit="h", dtype=int ) expected = np.array(24 * 7).astype(int) np.testing.assert_array_equal(result, expected) @requires_cftime def test_datetime_to_numeric_potential_overflow(time_unit: PDDatetimeUnitOptions): import cftime if time_unit == "ns": pytest.skip("out-of-bounds datetime64 overflow") dtype = f"M8[{time_unit}]" times = pd.date_range("2000", periods=5, freq="7D").values.astype(dtype) cftimes = date_range( "2000", periods=5, freq="7D", calendar="proleptic_gregorian", use_cftime=True ).values offset = np.datetime64("0001-01-01", time_unit) cfoffset = cftime.DatetimeProlepticGregorian(1, 1, 1) result = duck_array_ops.datetime_to_numeric( times, offset=offset, datetime_unit="D", dtype=int ) cfresult = duck_array_ops.datetime_to_numeric( cftimes, offset=cfoffset, datetime_unit="D", dtype=int ) expected = 730119 + np.arange(0, 35, 7) np.testing.assert_array_equal(result, expected) np.testing.assert_array_equal(cfresult, expected) def test_py_timedelta_to_float(): assert py_timedelta_to_float(dt.timedelta(days=1), "ns") == 86400 * 1e9 assert py_timedelta_to_float(dt.timedelta(days=1e6), "ps") == 86400 * 1e18 assert py_timedelta_to_float(dt.timedelta(days=1e6), "ns") == 86400 * 1e15 assert py_timedelta_to_float(dt.timedelta(days=1e6), "us") == 86400 * 1e12 assert py_timedelta_to_float(dt.timedelta(days=1e6), "ms") == 86400 * 1e9 assert py_timedelta_to_float(dt.timedelta(days=1e6), "s") == 86400 * 1e6 assert py_timedelta_to_float(dt.timedelta(days=1e6), "D") == 1e6 @pytest.mark.parametrize("np_dt_unit", ["D", "h", "m", "s", "ms", "us", "ns"]) def test_np_timedelta64_to_float( np_dt_unit: NPDatetimeUnitOptions, time_unit: PDDatetimeUnitOptions ): # tests any combination of source np.timedelta64 (NPDatetimeUnitOptions) with # np_timedelta_to_float with dedicated target unit (PDDatetimeUnitOptions) td = np.timedelta64(1, np_dt_unit) expected = _NS_PER_TIME_DELTA[np_dt_unit] / _NS_PER_TIME_DELTA[time_unit] out = np_timedelta64_to_float(td, datetime_unit=time_unit) np.testing.assert_allclose(out, expected) assert isinstance(out, float) out = np_timedelta64_to_float(np.atleast_1d(td), datetime_unit=time_unit) np.testing.assert_allclose(out, expected) @pytest.mark.parametrize("np_dt_unit", ["D", "h", "m", "s", "ms", "us", "ns"]) def test_pd_timedelta_to_float( np_dt_unit: NPDatetimeUnitOptions, time_unit: PDDatetimeUnitOptions ): # tests any combination of source pd.Timedelta (NPDatetimeUnitOptions) with # np_timedelta_to_float with dedicated target unit (PDDatetimeUnitOptions) td = pd.Timedelta(1, np_dt_unit) expected = _NS_PER_TIME_DELTA[np_dt_unit] / _NS_PER_TIME_DELTA[time_unit] out = pd_timedelta_to_float(td, datetime_unit=time_unit) np.testing.assert_allclose(out, expected) assert isinstance(out, float) @pytest.mark.parametrize( "td", [dt.timedelta(days=1), np.timedelta64(1, "D"), pd.Timedelta(1, "D"), "1 day"] ) def test_timedelta_to_numeric(td, time_unit: PDDatetimeUnitOptions): # Scalar input out = timedelta_to_numeric(td, time_unit) expected = _NS_PER_TIME_DELTA["D"] / _NS_PER_TIME_DELTA[time_unit] np.testing.assert_allclose(out, expected) assert isinstance(out, float) @pytest.mark.parametrize("use_dask", [True, False]) @pytest.mark.parametrize("skipna", [True, False]) def test_least_squares(use_dask, skipna): if use_dask and (not has_dask or not has_scipy): pytest.skip("requires dask and scipy") lhs = np.array([[1, 2], [1, 2], [3, 2]]) rhs = DataArray(np.array([3, 5, 7]), dims=("y",)) if use_dask: rhs = rhs.chunk({"y": 1}) coeffs, residuals = least_squares(lhs, rhs.data, skipna=skipna) np.testing.assert_allclose(coeffs, [1.5, 1.25]) np.testing.assert_allclose(residuals, [2.0]) @requires_dask @requires_bottleneck @pytest.mark.parametrize("method", ["sequential", "blelloch"]) @pytest.mark.parametrize( "arr", [ [np.nan, 1, 2, 3, np.nan, np.nan, np.nan, np.nan, 4, 5, np.nan, 6], [ np.nan, np.nan, np.nan, 2, np.nan, np.nan, np.nan, 9, np.nan, np.nan, np.nan, np.nan, ], ], ) def test_push_dask(method, arr): import bottleneck import dask.array as da arr = np.array(arr) chunks = list(range(1, 11)) + [(1, 2, 3, 2, 2, 1, 1)] for n in [None, 1, 2, 3, 4, 5, 11]: expected = bottleneck.push(arr, axis=0, n=n) for c in chunks: with raise_if_dask_computes(): actual = push(da.from_array(arr, chunks=c), axis=0, n=n, method=method) np.testing.assert_equal(actual, expected) def test_extension_array_equality(categorical1, int1): int_duck_array = PandasExtensionArray(int1) categorical_duck_array = PandasExtensionArray(categorical1) assert (int_duck_array != categorical_duck_array).all() assert (categorical_duck_array == categorical1).all() assert (int_duck_array[0:2] == int1[0:2]).all() def test_extension_array_singleton_equality(categorical1): categorical_duck_array = PandasExtensionArray(categorical1) assert (categorical_duck_array != "cat3").all() def test_extension_array_repr(int1): int_duck_array = PandasExtensionArray(int1) assert repr(int1) in repr(int_duck_array) def test_extension_array_attr(): array = pd.Categorical(["cat2", "cat1", "cat2", "cat3", "cat1"]) wrapped = PandasExtensionArray(array) assert_array_equal(array.categories, wrapped.categories) assert array.nbytes == wrapped.nbytes roundtripped = pickle.loads(pickle.dumps(wrapped)) assert isinstance(roundtripped, PandasExtensionArray) assert (roundtripped == wrapped).all() interval_array = pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3], closed="right") wrapped = PandasExtensionArray(interval_array) assert_array_equal(wrapped.left, interval_array.left, strict=True) assert wrapped.closed == interval_array.closed xarray-2025.09.0/xarray/tests/test_duck_array_wrapping.py000066400000000000000000000430261505620616400234650ustar00rootroot00000000000000import numpy as np import pandas as pd import pytest import xarray as xr # Don't run cupy in CI because it requires a GPU NAMESPACE_ARRAYS = { "cupy": { "attrs": { "array": "ndarray", "constructor": "asarray", }, "xfails": {"quantile": "no nanquantile"}, }, "dask.array": { "attrs": { "array": "Array", "constructor": "from_array", }, "xfails": { "argsort": "no argsort", "conjugate": "conj but no conjugate", "searchsorted": "dask.array.searchsorted but no Array.searchsorted", }, }, "jax.numpy": { "attrs": { "array": "ndarray", "constructor": "asarray", }, "xfails": { "rolling_construct": "no sliding_window_view", "rolling_reduce": "no sliding_window_view", "cumulative_construct": "no sliding_window_view", "cumulative_reduce": "no sliding_window_view", }, }, "pint": { "attrs": { "array": "Quantity", "constructor": "Quantity", }, "xfails": { "all": "returns a bool", "any": "returns a bool", "argmax": "returns an int", "argmin": "returns an int", "argsort": "returns an int", "count": "returns an int", "dot": "no tensordot", "full_like": "should work, see: https://github.com/hgrecco/pint/pull/1669", "idxmax": "returns the coordinate", "idxmin": "returns the coordinate", "isin": "returns a bool", "isnull": "returns a bool", "notnull": "returns a bool", "rolling_reduce": "no dispatch for numbagg/bottleneck", "cumulative_reduce": "no dispatch for numbagg/bottleneck", "searchsorted": "returns an int", "weighted": "no tensordot", }, }, "sparse": { "attrs": { "array": "COO", "constructor": "COO", }, "xfails": { "cov": "dense output", "corr": "no nanstd", "cross": "no cross", "count": "dense output", "dot": "fails on some platforms/versions", "isin": "no isin", "rolling_construct": "no sliding_window_view", "rolling_reduce": "no sliding_window_view", "cumulative_construct": "no sliding_window_view", "cumulative_reduce": "no sliding_window_view", "coarsen_construct": "pad constant_values must be fill_value", "coarsen_reduce": "pad constant_values must be fill_value", "weighted": "fill_value error", "coarsen": "pad constant_values must be fill_value", "quantile": "no non skipping version", "differentiate": "no gradient", "argmax": "no nan skipping version", "argmin": "no nan skipping version", "idxmax": "no nan skipping version", "idxmin": "no nan skipping version", "median": "no nan skipping version", "std": "no nan skipping version", "var": "no nan skipping version", "cumsum": "no cumsum", "cumprod": "no cumprod", "argsort": "no argsort", "conjugate": "no conjugate", "searchsorted": "no searchsorted", "shift": "pad constant_values must be fill_value", "pad": "pad constant_values must be fill_value", }, }, } try: import jax # type: ignore[import-not-found,unused-ignore] # enable double-precision jax.config.update("jax_enable_x64", True) except ImportError: pass class _BaseTest: def setup_for_test(self, request, namespace): self.namespace = namespace self.xp = pytest.importorskip(namespace) self.Array = getattr(self.xp, NAMESPACE_ARRAYS[namespace]["attrs"]["array"]) self.constructor = getattr( self.xp, NAMESPACE_ARRAYS[namespace]["attrs"]["constructor"] ) xarray_method = request.node.name.split("test_")[1].split("[")[0] if xarray_method in NAMESPACE_ARRAYS[namespace]["xfails"]: reason = NAMESPACE_ARRAYS[namespace]["xfails"][xarray_method] pytest.xfail(f"xfail for {self.namespace}: {reason}") def get_test_dataarray(self): data = np.asarray([[1, 2, 3, np.nan, 5]]) x = np.arange(5) data = self.constructor(data) return xr.DataArray( data, dims=["y", "x"], coords={"y": [1], "x": x}, name="foo", ) @pytest.mark.parametrize("namespace", NAMESPACE_ARRAYS) class TestTopLevelMethods(_BaseTest): @pytest.fixture(autouse=True) def setUp(self, request, namespace): self.setup_for_test(request, namespace) self.x1 = self.get_test_dataarray() self.x2 = self.get_test_dataarray().assign_coords(x=np.arange(2, 7)) def test_apply_ufunc(self): func = lambda x: x + 1 result = xr.apply_ufunc(func, self.x1, dask="parallelized") assert isinstance(result.data, self.Array) def test_align(self): result = xr.align(self.x1, self.x2) assert isinstance(result[0].data, self.Array) assert isinstance(result[1].data, self.Array) def test_broadcast(self): result = xr.broadcast(self.x1, self.x2) assert isinstance(result[0].data, self.Array) assert isinstance(result[1].data, self.Array) def test_concat(self): result = xr.concat([self.x1, self.x2], dim="x") assert isinstance(result.data, self.Array) def test_merge(self): result = xr.merge([self.x1, self.x2], compat="override", join="outer") assert isinstance(result.foo.data, self.Array) def test_where(self): x1, x2 = xr.align(self.x1, self.x2, join="inner") result = xr.where(x1 > 2, x1, x2) assert isinstance(result.data, self.Array) def test_full_like(self): result = xr.full_like(self.x1, 0) assert isinstance(result.data, self.Array) def test_cov(self): result = xr.cov(self.x1, self.x2) assert isinstance(result.data, self.Array) def test_corr(self): result = xr.corr(self.x1, self.x2) assert isinstance(result.data, self.Array) def test_cross(self): x1, x2 = xr.align(self.x1.squeeze(), self.x2.squeeze(), join="inner") result = xr.cross(x1, x2, dim="x") assert isinstance(result.data, self.Array) def test_dot(self): result = xr.dot(self.x1, self.x2) assert isinstance(result.data, self.Array) def test_map_blocks(self): result = xr.map_blocks(lambda x: x + 1, self.x1) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("namespace", NAMESPACE_ARRAYS) class TestDataArrayMethods(_BaseTest): @pytest.fixture(autouse=True) def setUp(self, request, namespace): self.setup_for_test(request, namespace) self.x = self.get_test_dataarray() def test_loc(self): result = self.x.loc[{"x": slice(1, 3)}] assert isinstance(result.data, self.Array) def test_isel(self): result = self.x.isel(x=slice(1, 3)) assert isinstance(result.data, self.Array) def test_sel(self): result = self.x.sel(x=slice(1, 3)) assert isinstance(result.data, self.Array) def test_squeeze(self): result = self.x.squeeze("y") assert isinstance(result.data, self.Array) @pytest.mark.xfail(reason="interp uses numpy and scipy") def test_interp(self): # TODO: some cases could be made to work result = self.x.interp(x=2.5) assert isinstance(result.data, self.Array) def test_isnull(self): result = self.x.isnull() assert isinstance(result.data, self.Array) def test_notnull(self): result = self.x.notnull() assert isinstance(result.data, self.Array) def test_count(self): result = self.x.count() assert isinstance(result.data, self.Array) def test_dropna(self): result = self.x.dropna(dim="x") assert isinstance(result.data, self.Array) def test_fillna(self): result = self.x.fillna(0) assert isinstance(result.data, self.Array) @pytest.mark.xfail(reason="ffill uses bottleneck or numbagg") def test_ffill(self): result = self.x.ffill() assert isinstance(result.data, self.Array) @pytest.mark.xfail(reason="bfill uses bottleneck or numbagg") def test_bfill(self): result = self.x.bfill() assert isinstance(result.data, self.Array) @pytest.mark.xfail(reason="interpolate_na uses numpy and scipy") def test_interpolate_na(self): result = self.x.interpolate_na() assert isinstance(result.data, self.Array) def test_where(self): result = self.x.where(self.x > 2) assert isinstance(result.data, self.Array) def test_isin(self): test_elements = self.constructor(np.asarray([1])) result = self.x.isin(test_elements) assert isinstance(result.data, self.Array) def test_groupby(self): result = self.x.groupby("x").mean() assert isinstance(result.data, self.Array) def test_groupby_bins(self): result = self.x.groupby_bins("x", bins=[0, 2, 4, 6]).mean() assert isinstance(result.data, self.Array) def test_rolling_iter(self): result = self.x.rolling(x=3) elem = next(iter(result))[1] assert isinstance(elem.data, self.Array) def test_rolling_construct(self): result = self.x.rolling(x=3).construct(x="window") assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_rolling_reduce(self, skipna): result = self.x.rolling(x=3).mean(skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.xfail(reason="rolling_exp uses numbagg") def test_rolling_exp_reduce(self): result = self.x.rolling_exp(x=3).mean() assert isinstance(result.data, self.Array) def test_cumulative_iter(self): result = self.x.cumulative("x") elem = next(iter(result))[1] assert isinstance(elem.data, self.Array) def test_cumulative_construct(self): result = self.x.cumulative("x").construct(x="window") assert isinstance(result.data, self.Array) def test_cumulative_reduce(self): result = self.x.cumulative("x").sum() assert isinstance(result.data, self.Array) def test_weighted(self): result = self.x.weighted(self.x.fillna(0)).mean() assert isinstance(result.data, self.Array) def test_coarsen_construct(self): result = self.x.coarsen(x=2, boundary="pad").construct(x=["a", "b"]) assert isinstance(result.data, self.Array) def test_coarsen_reduce(self): result = self.x.coarsen(x=2, boundary="pad").mean() assert isinstance(result.data, self.Array) def test_resample(self): time_coord = pd.date_range("2000-01-01", periods=5) result = self.x.assign_coords(x=time_coord).resample(x="D").mean() assert isinstance(result.data, self.Array) def test_diff(self): result = self.x.diff("x") assert isinstance(result.data, self.Array) def test_dot(self): result = self.x.dot(self.x) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_quantile(self, skipna): result = self.x.quantile(0.5, skipna=skipna) assert isinstance(result.data, self.Array) def test_differentiate(self): # edge_order is not implemented in jax, and only supports passing None edge_order = None if self.namespace == "jax.numpy" else 1 result = self.x.differentiate("x", edge_order=edge_order) assert isinstance(result.data, self.Array) def test_integrate(self): result = self.x.integrate("x") assert isinstance(result.data, self.Array) @pytest.mark.xfail(reason="polyfit uses numpy linalg") def test_polyfit(self): # TODO: this could work, there are just a lot of different linalg calls result = self.x.polyfit("x", 1) assert isinstance(result.polyfit_coefficients.data, self.Array) def test_map_blocks(self): result = self.x.map_blocks(lambda x: x + 1) assert isinstance(result.data, self.Array) def test_all(self): result = self.x.all(dim="x") assert isinstance(result.data, self.Array) def test_any(self): result = self.x.any(dim="x") assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_argmax(self, skipna): result = self.x.argmax(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_argmin(self, skipna): result = self.x.argmin(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_idxmax(self, skipna): result = self.x.idxmax(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_idxmin(self, skipna): result = self.x.idxmin(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_max(self, skipna): result = self.x.max(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_min(self, skipna): result = self.x.min(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_mean(self, skipna): result = self.x.mean(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_median(self, skipna): result = self.x.median(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_prod(self, skipna): result = self.x.prod(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_sum(self, skipna): result = self.x.sum(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_std(self, skipna): result = self.x.std(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_var(self, skipna): result = self.x.var(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_cumsum(self, skipna): result = self.x.cumsum(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) @pytest.mark.parametrize("skipna", [True, False]) def test_cumprod(self, skipna): result = self.x.cumprod(dim="x", skipna=skipna) assert isinstance(result.data, self.Array) def test_argsort(self): result = self.x.argsort() assert isinstance(result.data, self.Array) def test_astype(self): result = self.x.astype(int) assert isinstance(result.data, self.Array) def test_clip(self): result = self.x.clip(min=2.0, max=4.0) assert isinstance(result.data, self.Array) def test_conj(self): result = self.x.conj() assert isinstance(result.data, self.Array) def test_conjugate(self): result = self.x.conjugate() assert isinstance(result.data, self.Array) def test_imag(self): result = self.x.imag assert isinstance(result.data, self.Array) def test_searchsorted(self): v = self.constructor(np.asarray([3])) result = self.x.squeeze().searchsorted(v) assert isinstance(result, self.Array) def test_round(self): result = self.x.round() assert isinstance(result.data, self.Array) def test_real(self): result = self.x.real assert isinstance(result.data, self.Array) def test_T(self): result = self.x.T assert isinstance(result.data, self.Array) @pytest.mark.xfail(reason="rank uses bottleneck") def test_rank(self): # TODO: scipy has rankdata, as does jax, so this can work result = self.x.rank() assert isinstance(result.data, self.Array) def test_transpose(self): result = self.x.transpose() assert isinstance(result.data, self.Array) def test_stack(self): result = self.x.stack(z=("x", "y")) assert isinstance(result.data, self.Array) def test_unstack(self): result = self.x.stack(z=("x", "y")).unstack("z") assert isinstance(result.data, self.Array) def test_shift(self): result = self.x.shift(x=1) assert isinstance(result.data, self.Array) def test_roll(self): result = self.x.roll(x=1) assert isinstance(result.data, self.Array) def test_pad(self): result = self.x.pad(x=1) assert isinstance(result.data, self.Array) def test_sortby(self): result = self.x.sortby("x") assert isinstance(result.data, self.Array) def test_broadcast_like(self): result = self.x.broadcast_like(self.x) assert isinstance(result.data, self.Array) xarray-2025.09.0/xarray/tests/test_error_messages.py000066400000000000000000000007771505620616400224600ustar00rootroot00000000000000""" This new file is intended to test the quality & friendliness of error messages that are raised by xarray. It's currently separate from the standard tests, which are more focused on the functions working (though we could consider integrating them.). """ import pytest def test_no_var_in_dataset(ds): with pytest.raises( KeyError, match=( r"No variable named 'foo'. Variables on the dataset include \['z1', 'z2', 'x', 'time', 'c', 'y'\]" ), ): ds["foo"] xarray-2025.09.0/xarray/tests/test_extensions.py000066400000000000000000000057061505620616400216340ustar00rootroot00000000000000from __future__ import annotations import pickle import pytest import xarray as xr from xarray.core.extensions import register_datatree_accessor from xarray.tests import assert_identical @register_datatree_accessor("example_accessor") @xr.register_dataset_accessor("example_accessor") @xr.register_dataarray_accessor("example_accessor") class ExampleAccessor: """For the pickling tests below.""" def __init__(self, xarray_obj): self.obj = xarray_obj class TestAccessor: def test_register(self) -> None: @register_datatree_accessor("demo") @xr.register_dataset_accessor("demo") @xr.register_dataarray_accessor("demo") class DemoAccessor: """Demo accessor.""" def __init__(self, xarray_obj): self._obj = xarray_obj @property def foo(self): return "bar" dt: xr.DataTree = xr.DataTree() assert dt.demo.foo == "bar" ds = xr.Dataset() assert ds.demo.foo == "bar" da = xr.DataArray(0) assert da.demo.foo == "bar" # accessor is cached assert ds.demo is ds.demo # check descriptor assert ds.demo.__doc__ == "Demo accessor." # TODO: typing doesn't seem to work with accessors assert xr.Dataset.demo.__doc__ == "Demo accessor." # type: ignore[attr-defined] assert isinstance(ds.demo, DemoAccessor) assert xr.Dataset.demo is DemoAccessor # type: ignore[attr-defined] # ensure we can remove it del xr.Dataset.demo # type: ignore[attr-defined] assert not hasattr(xr.Dataset, "demo") with pytest.warns(Warning, match="overriding a preexisting attribute"): @xr.register_dataarray_accessor("demo") class Foo: pass # it didn't get registered again assert not hasattr(xr.Dataset, "demo") def test_pickle_dataset(self) -> None: ds = xr.Dataset() ds_restored = pickle.loads(pickle.dumps(ds)) assert_identical(ds, ds_restored) # state save on the accessor is restored assert ds.example_accessor is ds.example_accessor ds.example_accessor.value = "foo" ds_restored = pickle.loads(pickle.dumps(ds)) assert_identical(ds, ds_restored) assert ds_restored.example_accessor.value == "foo" def test_pickle_dataarray(self) -> None: array = xr.Dataset() assert array.example_accessor is array.example_accessor array_restored = pickle.loads(pickle.dumps(array)) assert_identical(array, array_restored) def test_broken_accessor(self) -> None: # regression test for GH933 @xr.register_dataset_accessor("stupid_accessor") class BrokenAccessor: def __init__(self, xarray_obj): raise AttributeError("broken") with pytest.raises(RuntimeError, match=r"error initializing"): _ = xr.Dataset().stupid_accessor xarray-2025.09.0/xarray/tests/test_formatting.py000066400000000000000000001174601505620616400216100ustar00rootroot00000000000000from __future__ import annotations import sys from textwrap import dedent import numpy as np import pandas as pd import pytest import xarray as xr from xarray.core import formatting from xarray.core.indexes import Index from xarray.tests import requires_cftime, requires_dask, requires_netCDF4 class CustomIndex(Index): names: tuple[str, ...] def __init__(self, names: tuple[str, ...]): self.names = names def __repr__(self): return f"CustomIndex(coords={self.names})" class TestFormatting: def test_get_indexer_at_least_n_items(self) -> None: cases = [ ((20,), (slice(10),), (slice(-10, None),)), ((3, 20), (0, slice(10)), (-1, slice(-10, None))), ((2, 10), (0, slice(10)), (-1, slice(-10, None))), ((2, 5), (slice(2), slice(None)), (slice(-2, None), slice(None))), ((1, 2, 5), (0, slice(2), slice(None)), (-1, slice(-2, None), slice(None))), ((2, 3, 5), (0, slice(2), slice(None)), (-1, slice(-2, None), slice(None))), ( (1, 10, 1), (0, slice(10), slice(None)), (-1, slice(-10, None), slice(None)), ), ( (2, 5, 1), (slice(2), slice(None), slice(None)), (slice(-2, None), slice(None), slice(None)), ), ((2, 5, 3), (0, slice(4), slice(None)), (-1, slice(-4, None), slice(None))), ( (2, 3, 3), (slice(2), slice(None), slice(None)), (slice(-2, None), slice(None), slice(None)), ), ] for shape, start_expected, end_expected in cases: actual = formatting._get_indexer_at_least_n_items(shape, 10, from_end=False) assert start_expected == actual actual = formatting._get_indexer_at_least_n_items(shape, 10, from_end=True) assert end_expected == actual def test_first_n_items(self) -> None: array = np.arange(100).reshape(10, 5, 2) for n in [3, 10, 13, 100, 200]: actual = formatting.first_n_items(array, n) expected = array.flat[:n] assert (expected == actual).all() with pytest.raises(ValueError, match=r"at least one item"): formatting.first_n_items(array, 0) def test_last_n_items(self) -> None: array = np.arange(100).reshape(10, 5, 2) for n in [3, 10, 13, 100, 200]: actual = formatting.last_n_items(array, n) expected = array.flat[-n:] assert (expected == actual).all() with pytest.raises(ValueError, match=r"at least one item"): formatting.first_n_items(array, 0) def test_last_item(self) -> None: array = np.arange(100) reshape = ((10, 10), (1, 100), (2, 2, 5, 5)) expected = np.array([99]) for r in reshape: result = formatting.last_item(array.reshape(r)) assert result == expected def test_format_item(self) -> None: cases = [ (pd.Timestamp("2000-01-01T12"), "2000-01-01T12:00:00"), (pd.Timestamp("2000-01-01"), "2000-01-01"), (pd.Timestamp("NaT"), "NaT"), (pd.Timedelta("10 days 1 hour"), "10 days 01:00:00"), (pd.Timedelta("-3 days"), "-3 days +00:00:00"), (pd.Timedelta("3 hours"), "0 days 03:00:00"), (pd.Timedelta("NaT"), "NaT"), ("foo", "'foo'"), (b"foo", "b'foo'"), (1, "1"), (1.0, "1.0"), (np.float16(1.1234), "1.123"), (np.float32(1.0111111), "1.011"), (np.float64(22.222222), "22.22"), (np.zeros((1, 1)), "[[0.]]"), (np.zeros(2), "[0. 0.]"), (np.zeros((2, 2)), "[[0. 0.]\n [0. 0.]]"), ] for item, expected in cases: actual = formatting.format_item(item) assert expected == actual def test_format_items(self) -> None: cases = [ (np.arange(4) * np.timedelta64(1, "D"), "0 days 1 days 2 days 3 days"), ( np.arange(4) * np.timedelta64(3, "h"), "00:00:00 03:00:00 06:00:00 09:00:00", ), ( np.arange(4) * np.timedelta64(500, "ms"), "00:00:00 00:00:00.500000 00:00:01 00:00:01.500000", ), (pd.to_timedelta(["NaT", "0s", "1s", "NaT"]), "NaT 00:00:00 00:00:01 NaT"), # type: ignore[arg-type, unused-ignore] ( pd.to_timedelta(["1 day 1 hour", "1 day", "0 hours"]), # type: ignore[arg-type, unused-ignore] "1 days 01:00:00 1 days 00:00:00 0 days 00:00:00", ), ([1, 2, 3], "1 2 3"), ] for item, expected in cases: actual = " ".join(formatting.format_items(item)) assert expected == actual def test_format_array_flat(self) -> None: actual = formatting.format_array_flat(np.arange(100), 2) expected = "..." assert expected == actual actual = formatting.format_array_flat(np.arange(100), 9) expected = "0 ... 99" assert expected == actual actual = formatting.format_array_flat(np.arange(100), 10) expected = "0 1 ... 99" assert expected == actual actual = formatting.format_array_flat(np.arange(100), 13) expected = "0 1 ... 98 99" assert expected == actual actual = formatting.format_array_flat(np.arange(100), 15) expected = "0 1 2 ... 98 99" assert expected == actual # NB: Probably not ideal; an alternative would be cutting after the # first ellipsis actual = formatting.format_array_flat(np.arange(100.0), 11) expected = "0.0 ... ..." assert expected == actual actual = formatting.format_array_flat(np.arange(100.0), 12) expected = "0.0 ... 99.0" assert expected == actual actual = formatting.format_array_flat(np.arange(3), 5) expected = "0 1 2" assert expected == actual actual = formatting.format_array_flat(np.arange(4.0), 11) expected = "0.0 ... 3.0" assert expected == actual actual = formatting.format_array_flat(np.arange(0), 0) expected = "" assert expected == actual actual = formatting.format_array_flat(np.arange(1), 1) expected = "0" assert expected == actual actual = formatting.format_array_flat(np.arange(2), 3) expected = "0 1" assert expected == actual actual = formatting.format_array_flat(np.arange(4), 7) expected = "0 1 2 3" assert expected == actual actual = formatting.format_array_flat(np.arange(5), 7) expected = "0 ... 4" assert expected == actual long_str = [" ".join(["hello world" for _ in range(100)])] actual = formatting.format_array_flat(np.asarray([long_str]), 21) expected = "'hello world hello..." assert expected == actual def test_pretty_print(self) -> None: assert formatting.pretty_print("abcdefghij", 8) == "abcde..." assert formatting.pretty_print("ß", 1) == "ß" def test_maybe_truncate(self) -> None: assert formatting.maybe_truncate("ß", 10) == "ß" def test_format_timestamp_invalid_pandas_format(self) -> None: expected = "2021-12-06 17:00:00 00" with pytest.raises(ValueError): formatting.format_timestamp(expected) def test_format_timestamp_out_of_bounds(self) -> None: from datetime import datetime date = datetime(1300, 12, 1) expected = "1300-12-01" result = formatting.format_timestamp(date) assert result == expected date = datetime(2300, 12, 1) expected = "2300-12-01" result = formatting.format_timestamp(date) assert result == expected def test_attribute_repr(self) -> None: short = formatting.summarize_attr("key", "Short string") long = formatting.summarize_attr("key", 100 * "Very long string ") newlines = formatting.summarize_attr("key", "\n\n\n") tabs = formatting.summarize_attr("key", "\t\t\t") assert short == " key: Short string" assert len(long) <= 80 assert long.endswith("...") assert "\n" not in newlines assert "\t" not in tabs def test_index_repr(self) -> None: coord_names = ("x", "y") index = CustomIndex(coord_names) names = ("x",) normal = formatting.summarize_index(names, index, col_width=20) assert names[0] in normal assert len(normal.splitlines()) == len(names) assert "CustomIndex" in normal class IndexWithInlineRepr(CustomIndex): def _repr_inline_(self, max_width: int): return f"CustomIndex[{', '.join(self.names)}]" index = IndexWithInlineRepr(coord_names) inline = formatting.summarize_index(names, index, col_width=20) assert names[0] in inline assert index._repr_inline_(max_width=40) in inline @pytest.mark.parametrize( "names", ( ("x",), ("x", "y"), ("x", "y", "z"), ("x", "y", "z", "a"), ), ) def test_index_repr_grouping(self, names) -> None: index = CustomIndex(names) normal = formatting.summarize_index(names, index, col_width=20) assert all(name in normal for name in names) assert len(normal.splitlines()) == len(names) assert "CustomIndex" in normal hint_chars = [line[2] for line in normal.splitlines()] if len(names) <= 1: assert hint_chars == [" "] else: assert hint_chars[0] == "β”Œ" and hint_chars[-1] == "β””" assert len(names) == 2 or hint_chars[1:-1] == ["β”‚"] * (len(names) - 2) def test_diff_array_repr(self) -> None: da_a = xr.DataArray( np.array([[1, 2, 3], [4, 5, 6]], dtype="int64"), dims=("x", "y"), coords={ "x": np.array(["a", "b"], dtype="U1"), "y": np.array([1, 2, 3], dtype="int64"), }, attrs={"units": "m", "description": "desc"}, ) da_b = xr.DataArray( np.array([1, 2], dtype="int64"), dims="x", coords={ "x": np.array(["a", "c"], dtype="U1"), "label": ("x", np.array([1, 2], dtype="int64")), }, attrs={"units": "kg"}, ) byteorder = "<" if sys.byteorder == "little" else ">" expected = dedent( f"""\ Left and right DataArray objects are not identical Differing dimensions: (x: 2, y: 3) != (x: 2) Differing values: L array([[1, 2, 3], [4, 5, 6]], dtype=int64) R array([1, 2], dtype=int64) Differing coordinates: L * x (x) {byteorder}U1 8B 'a' 'b' R * x (x) {byteorder}U1 8B 'a' 'c' Coordinates only on the left object: * y (y) int64 24B 1 2 3 Coordinates only on the right object: label (x) int64 16B 1 2 Differing attributes: L units: m R units: kg Attributes only on the left object: description: desc""" ) actual = formatting.diff_array_repr(da_a, da_b, "identical") try: assert actual == expected except AssertionError: # depending on platform, dtype may not be shown in numpy array repr assert actual == expected.replace(", dtype=int64", "") da_a = xr.DataArray( np.array([[1, 2, 3], [4, 5, 6]], dtype="int8"), dims=("x", "y"), coords=xr.Coordinates( { "x": np.array([True, False], dtype="bool"), "y": np.array([1, 2, 3], dtype="int16"), }, indexes={"y": CustomIndex(("y",))}, ), ) da_b = xr.DataArray( np.array([1, 2], dtype="int8"), dims="x", coords=xr.Coordinates( { "x": np.array([True, False], dtype="bool"), "label": ("x", np.array([1, 2], dtype="int16")), }, indexes={"label": CustomIndex(("label",))}, ), ) expected = dedent( """\ Left and right DataArray objects are not equal Differing dimensions: (x: 2, y: 3) != (x: 2) Differing values: L array([[1, 2, 3], [4, 5, 6]], dtype=int8) R array([1, 2], dtype=int8) Coordinates only on the left object: * y (y) int16 6B 1 2 3 Coordinates only on the right object: * label (x) int16 4B 1 2 """.rstrip() ) actual = formatting.diff_array_repr(da_a, da_b, "equals") assert actual == expected va = xr.Variable( "x", np.array([1, 2, 3], dtype="int64"), {"title": "test Variable"} ) vb = xr.Variable(("x", "y"), np.array([[1, 2, 3], [4, 5, 6]], dtype="int64")) expected = dedent( """\ Left and right Variable objects are not equal Differing dimensions: (x: 3) != (x: 2, y: 3) Differing values: L array([1, 2, 3], dtype=int64) R array([[1, 2, 3], [4, 5, 6]], dtype=int64)""" ) actual = formatting.diff_array_repr(va, vb, "equals") try: assert actual == expected except AssertionError: assert actual == expected.replace(", dtype=int64", "") @pytest.mark.filterwarnings("error") def test_diff_attrs_repr_with_array(self) -> None: attrs_a = {"attr": np.array([0, 1])} attrs_b = {"attr": 1} expected = dedent( """\ Differing attributes: L attr: [0 1] R attr: 1 """ ).strip() actual = formatting.diff_attrs_repr(attrs_a, attrs_b, "equals") assert expected == actual attrs_c = {"attr": np.array([-3, 5])} expected = dedent( """\ Differing attributes: L attr: [0 1] R attr: [-3 5] """ ).strip() actual = formatting.diff_attrs_repr(attrs_a, attrs_c, "equals") assert expected == actual # should not raise a warning attrs_c = {"attr": np.array([0, 1, 2])} expected = dedent( """\ Differing attributes: L attr: [0 1] R attr: [0 1 2] """ ).strip() actual = formatting.diff_attrs_repr(attrs_a, attrs_c, "equals") assert expected == actual def test__diff_mapping_repr_array_attrs_on_variables(self) -> None: a = { "a": xr.DataArray( dims="x", data=np.array([1], dtype="int16"), attrs={"b": np.array([1, 2], dtype="int8")}, ) } b = { "a": xr.DataArray( dims="x", data=np.array([1], dtype="int16"), attrs={"b": np.array([2, 3], dtype="int8")}, ) } actual = formatting.diff_data_vars_repr(a, b, compat="identical", col_width=8) expected = dedent( """\ Differing data variables: L a (x) int16 2B 1 Differing variable attributes: b: [1 2] R a (x) int16 2B 1 Differing variable attributes: b: [2 3] """.rstrip() ) assert actual == expected def test_diff_dataset_repr(self) -> None: ds_a = xr.Dataset( data_vars={ "var1": (("x", "y"), np.array([[1, 2, 3], [4, 5, 6]], dtype="int64")), "var2": ("x", np.array([3, 4], dtype="int64")), }, coords={ "x": ( "x", np.array(["a", "b"], dtype="U1"), {"foo": "bar", "same": "same"}, ), "y": np.array([1, 2, 3], dtype="int64"), }, attrs={"title": "mytitle", "description": "desc"}, ) ds_b = xr.Dataset( data_vars={"var1": ("x", np.array([1, 2], dtype="int64"))}, coords={ "x": ( "x", np.array(["a", "c"], dtype="U1"), {"source": 0, "foo": "baz", "same": "same"}, ), "label": ("x", np.array([1, 2], dtype="int64")), }, attrs={"title": "newtitle"}, ) byteorder = "<" if sys.byteorder == "little" else ">" expected = dedent( f"""\ Left and right Dataset objects are not identical Differing dimensions: (x: 2, y: 3) != (x: 2) Differing coordinates: L * x (x) {byteorder}U1 8B 'a' 'b' Differing variable attributes: foo: bar R * x (x) {byteorder}U1 8B 'a' 'c' Differing variable attributes: source: 0 foo: baz Coordinates only on the left object: * y (y) int64 24B 1 2 3 Coordinates only on the right object: label (x) int64 16B 1 2 Differing data variables: L var1 (x, y) int64 48B 1 2 3 4 5 6 R var1 (x) int64 16B 1 2 Data variables only on the left object: var2 (x) int64 16B 3 4 Differing attributes: L title: mytitle R title: newtitle Attributes only on the left object: description: desc""" ) actual = formatting.diff_dataset_repr(ds_a, ds_b, "identical") assert actual == expected def test_array_repr(self) -> None: ds = xr.Dataset( coords={ "foo": np.array([1, 2, 3], dtype=np.uint64), "bar": np.array([1, 2, 3], dtype=np.uint64), } ) ds[(1, 2)] = xr.DataArray(np.array([0], dtype=np.uint64), dims="test") ds_12 = ds[(1, 2)] # Test repr function behaves correctly: actual = formatting.array_repr(ds_12) expected = dedent( """\ Size: 8B array([0], dtype=uint64) Dimensions without coordinates: test""" ) assert actual == expected # Test repr, str prints returns correctly as well: assert repr(ds_12) == expected assert str(ds_12) == expected # f-strings (aka format(...)) by default should use the repr: actual = f"{ds_12}" assert actual == expected with xr.set_options(display_expand_data=False): actual = formatting.array_repr(ds[(1, 2)]) expected = dedent( """\ Size: 8B 0 Dimensions without coordinates: test""" ) assert actual == expected def test_array_repr_variable(self) -> None: var = xr.Variable("x", [0, 1]) formatting.array_repr(var) with xr.set_options(display_expand_data=False): formatting.array_repr(var) def test_array_repr_recursive(self) -> None: # GH:issue:7111 # direct recursion var = xr.Variable("x", [0, 1]) var.attrs["x"] = var formatting.array_repr(var) da = xr.DataArray([0, 1], dims=["x"]) da.attrs["x"] = da formatting.array_repr(da) # indirect recursion var.attrs["x"] = da da.attrs["x"] = var formatting.array_repr(var) formatting.array_repr(da) @requires_dask def test_array_scalar_format(self) -> None: # Test numpy scalars: var = xr.DataArray(np.array(0)) assert format(var, "") == repr(var) assert format(var, "d") == "0" assert format(var, ".2f") == "0.00" # Test dask scalars, not supported however: import dask.array as da var = xr.DataArray(da.array(0)) assert format(var, "") == repr(var) with pytest.raises(TypeError) as excinfo: format(var, ".2f") assert "unsupported format string passed to" in str(excinfo.value) # Test numpy arrays raises: var = xr.DataArray([0.1, 0.2]) with pytest.raises(NotImplementedError) as excinfo: # type: ignore[assignment] format(var, ".2f") assert "Using format_spec is only supported" in str(excinfo.value) def test_datatree_print_empty_node(self): dt: xr.DataTree = xr.DataTree(name="root") printout = str(dt) assert printout == "\nGroup: /" def test_datatree_print_empty_node_with_attrs(self): dat = xr.Dataset(attrs={"note": "has attrs"}) dt: xr.DataTree = xr.DataTree(name="root", dataset=dat) printout = str(dt) assert printout == dedent( """\ Group: / Attributes: note: has attrs""" ) def test_datatree_print_node_with_data(self): dat = xr.Dataset({"a": [0, 2]}) dt: xr.DataTree = xr.DataTree(name="root", dataset=dat) printout = str(dt) expected = [ "", "Group: /", "Dimensions", "Coordinates", "a", ] for expected_line, printed_line in zip( expected, printout.splitlines(), strict=True ): assert expected_line in printed_line def test_datatree_printout_nested_node(self): dat = xr.Dataset({"a": [0, 2]}) root = xr.DataTree.from_dict( { "/results": dat, } ) printout = str(root) assert printout.splitlines()[3].startswith(" ") def test_datatree_repr_of_node_with_data(self): dat = xr.Dataset({"a": [0, 2]}) dt: xr.DataTree = xr.DataTree(name="root", dataset=dat) assert "Coordinates" in repr(dt) def test_diff_datatree_repr_different_groups(self): dt_1: xr.DataTree = xr.DataTree.from_dict({"a": None}) dt_2: xr.DataTree = xr.DataTree.from_dict({"b": None}) expected = dedent( """\ Left and right DataTree objects are not identical Children at root node do not match: ['a'] vs ['b']""" ) actual = formatting.diff_datatree_repr(dt_1, dt_2, "identical") assert actual == expected def test_diff_datatree_repr_different_subgroups(self): dt_1: xr.DataTree = xr.DataTree.from_dict({"a": None, "a/b": None, "a/c": None}) dt_2: xr.DataTree = xr.DataTree.from_dict({"a": None, "a/b": None}) expected = dedent( """\ Left and right DataTree objects are not isomorphic Children at node 'a' do not match: ['b', 'c'] vs ['b']""" ) actual = formatting.diff_datatree_repr(dt_1, dt_2, "isomorphic") assert actual == expected def test_diff_datatree_repr_node_data(self): # casting to int64 explicitly ensures that int64s are created on all architectures ds1 = xr.Dataset({"u": np.int64(0), "v": np.int64(1)}) ds3 = xr.Dataset({"w": np.int64(5)}) dt_1: xr.DataTree = xr.DataTree.from_dict({"a": ds1, "a/b": ds3}) ds2 = xr.Dataset({"u": np.int64(0)}) ds4 = xr.Dataset({"w": np.int64(6)}) dt_2: xr.DataTree = xr.DataTree.from_dict({"a": ds2, "a/b": ds4}, name="foo") expected = dedent( """\ Left and right DataTree objects are not identical Differing names: None != 'foo' Data at node 'a' does not match: Data variables only on the left object: v int64 8B 1 Data at node 'a/b' does not match: Differing data variables: L w int64 8B 5 R w int64 8B 6""" ) actual = formatting.diff_datatree_repr(dt_1, dt_2, "identical") assert actual == expected def test_diff_datatree_repr_equals(self) -> None: ds1 = xr.Dataset(data_vars={"data": ("y", [5, 2])}) ds2 = xr.Dataset(data_vars={"data": (("x", "y"), [[5, 2]])}) dt1 = xr.DataTree.from_dict({"node": ds1}) dt2 = xr.DataTree.from_dict({"node": ds2}) expected = dedent( """\ Left and right DataTree objects are not equal Data at node 'node' does not match: Differing dimensions: (y: 2) != (x: 1, y: 2) Differing data variables: L data (y) int64 16B 5 2 R data (x, y) int64 16B 5 2""" ) actual = formatting.diff_datatree_repr(dt1, dt2, "equals") assert actual == expected def test_inline_variable_array_repr_custom_repr() -> None: class CustomArray: def __init__(self, value, attr): self.value = value self.attr = attr def _repr_inline_(self, width): formatted = f"({self.attr}) {self.value}" if len(formatted) > width: formatted = f"({self.attr}) ..." return formatted def __array_namespace__(self, *args, **kwargs): return NotImplemented @property def shape(self) -> tuple[int, ...]: return self.value.shape @property def dtype(self): return self.value.dtype @property def ndim(self): return self.value.ndim value = CustomArray(np.array([20, 40]), "m") variable = xr.Variable("x", value) max_width = 10 actual = formatting.inline_variable_array_repr(variable, max_width=10) assert actual == value._repr_inline_(max_width) def test_set_numpy_options() -> None: original_options = np.get_printoptions() with formatting.set_numpy_options(threshold=10): assert len(repr(np.arange(500))) < 200 # original options are restored assert np.get_printoptions() == original_options def test_short_array_repr() -> None: cases = [ np.random.randn(500), np.random.randn(20, 20), np.random.randn(5, 10, 15), np.random.randn(5, 10, 15, 3), np.random.randn(100, 5, 1), ] # number of lines: # for default numpy repr: 167, 140, 254, 248, 599 # for short_array_repr: 1, 7, 24, 19, 25 for array in cases: num_lines = formatting.short_array_repr(array).count("\n") + 1 assert num_lines < 30 # threshold option (default: 200) array2 = np.arange(100) assert "..." not in formatting.short_array_repr(array2) with xr.set_options(display_values_threshold=10): assert "..." in formatting.short_array_repr(array2) def test_large_array_repr_length() -> None: da = xr.DataArray(np.random.randn(100, 5, 1)) result = repr(da).splitlines() assert len(result) < 50 @requires_netCDF4 def test_repr_file_collapsed(tmp_path) -> None: arr_to_store = xr.DataArray(np.arange(300, dtype=np.int64), dims="test") arr_to_store.to_netcdf(tmp_path / "test.nc", engine="netcdf4") with ( xr.open_dataarray(tmp_path / "test.nc") as arr, xr.set_options(display_expand_data=False), ): actual = repr(arr) expected = dedent( """\ Size: 2kB [300 values with dtype=int64] Dimensions without coordinates: test""" ) assert actual == expected arr_loaded = arr.compute() actual = arr_loaded.__repr__() expected = dedent( """\ Size: 2kB 0 1 2 3 4 5 6 7 8 9 10 11 12 ... 288 289 290 291 292 293 294 295 296 297 298 299 Dimensions without coordinates: test""" ) assert actual == expected @pytest.mark.parametrize( "display_max_rows, n_vars, n_attr", [(50, 40, 30), (35, 40, 30), (11, 40, 30), (1, 40, 30)], ) def test__mapping_repr(display_max_rows, n_vars, n_attr) -> None: long_name = "long_name" a = np.char.add(long_name, np.arange(0, n_vars).astype(str)) b = np.char.add("attr_", np.arange(0, n_attr).astype(str)) c = np.char.add("coord", np.arange(0, n_vars).astype(str)) attrs = dict.fromkeys(b, 2) coords = {_c: np.array([0, 1], dtype=np.uint64) for _c in c} data_vars = dict() for v, _c in zip(a, coords.items(), strict=True): data_vars[v] = xr.DataArray( name=v, data=np.array([3, 4], dtype=np.uint64), dims=[_c[0]], coords=dict([_c]), ) ds = xr.Dataset(data_vars) ds.attrs = attrs with xr.set_options(display_max_rows=display_max_rows): # Parse the data_vars print and show only data_vars rows: summary = formatting.dataset_repr(ds).split("\n") summary = [v for v in summary if long_name in v] # The length should be less than or equal to display_max_rows: len_summary = len(summary) data_vars_print_size = min(display_max_rows, len_summary) assert len_summary == data_vars_print_size summary = formatting.data_vars_repr(ds.data_vars).split("\n") summary = [v for v in summary if long_name in v] # The length should be equal to the number of data variables len_summary = len(summary) assert len_summary == n_vars summary = formatting.coords_repr(ds.coords).split("\n") summary = [v for v in summary if "coord" in v] # The length should be equal to the number of data variables len_summary = len(summary) assert len_summary == n_vars with xr.set_options( display_max_rows=display_max_rows, display_expand_coords=False, display_expand_data_vars=False, display_expand_attrs=False, ): actual = formatting.dataset_repr(ds) col_width = formatting._calculate_col_width(ds.variables) dims_start = formatting.pretty_print("Dimensions:", col_width) dims_values = formatting.dim_summary_limited( ds.sizes, col_width=col_width + 1, max_rows=display_max_rows ) expected_size = "1kB" expected = f"""\ Size: {expected_size} {dims_start}({dims_values}) Coordinates: ({n_vars}) Data variables: ({n_vars}) Attributes: ({n_attr})""" expected = dedent(expected) assert actual == expected def test__mapping_repr_recursive() -> None: # GH:issue:7111 # direct recursion ds = xr.Dataset({"a": ("x", [1, 2, 3])}) ds.attrs["ds"] = ds formatting.dataset_repr(ds) # indirect recursion ds2 = xr.Dataset({"b": ("y", [1, 2, 3])}) ds.attrs["ds"] = ds2 ds2.attrs["ds"] = ds formatting.dataset_repr(ds2) def test__element_formatter(n_elements: int = 100) -> None: expected = """\ Dimensions without coordinates: dim_0: 3, dim_1: 3, dim_2: 3, dim_3: 3, dim_4: 3, dim_5: 3, dim_6: 3, dim_7: 3, dim_8: 3, dim_9: 3, dim_10: 3, dim_11: 3, dim_12: 3, dim_13: 3, dim_14: 3, dim_15: 3, dim_16: 3, dim_17: 3, dim_18: 3, dim_19: 3, dim_20: 3, dim_21: 3, dim_22: 3, dim_23: 3, ... dim_76: 3, dim_77: 3, dim_78: 3, dim_79: 3, dim_80: 3, dim_81: 3, dim_82: 3, dim_83: 3, dim_84: 3, dim_85: 3, dim_86: 3, dim_87: 3, dim_88: 3, dim_89: 3, dim_90: 3, dim_91: 3, dim_92: 3, dim_93: 3, dim_94: 3, dim_95: 3, dim_96: 3, dim_97: 3, dim_98: 3, dim_99: 3""" expected = dedent(expected) intro = "Dimensions without coordinates: " elements = [ f"{k}: {v}" for k, v in {f"dim_{k}": 3 for k in np.arange(n_elements)}.items() ] values = xr.core.formatting._element_formatter( elements, col_width=len(intro), max_rows=12 ) actual = intro + values assert expected == actual def test_lazy_array_wont_compute() -> None: from xarray.core.indexing import LazilyIndexedArray class LazilyIndexedArrayNotComputable(LazilyIndexedArray): def __array__( self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None ) -> np.ndarray: raise NotImplementedError("Computing this array is not possible.") arr = LazilyIndexedArrayNotComputable(np.array([1, 2])) var = xr.DataArray(arr) # These will crash if var.data are converted to numpy arrays: var.__repr__() var._repr_html_() @pytest.mark.parametrize("as_dataset", (False, True)) def test_format_xindexes_none(as_dataset: bool) -> None: # ensure repr for empty xindexes can be displayed #8367 expected = """\ Indexes: *empty*""" expected = dedent(expected) obj: xr.DataArray | xr.Dataset = xr.DataArray() obj = obj._to_temp_dataset() if as_dataset else obj actual = repr(obj.xindexes) assert actual == expected @pytest.mark.parametrize("as_dataset", (False, True)) def test_format_xindexes(as_dataset: bool) -> None: expected = """\ Indexes: x PandasIndex""" expected = dedent(expected) obj: xr.DataArray | xr.Dataset = xr.DataArray([1], coords={"x": [1]}) obj = obj._to_temp_dataset() if as_dataset else obj actual = repr(obj.xindexes) assert actual == expected @requires_cftime def test_empty_cftimeindex_repr() -> None: index = xr.coding.cftimeindex.CFTimeIndex([]) expected = """\ Indexes: time CFTimeIndex([], dtype='object', length=0, calendar=None, freq=None)""" expected = dedent(expected) da = xr.DataArray([], coords={"time": index}) actual = repr(da.indexes) assert actual == expected def test_display_nbytes() -> None: xds = xr.Dataset( { "foo": np.arange(1200, dtype=np.int16), "bar": np.arange(111, dtype=np.int16), } ) # Note: int16 is used to ensure that dtype is shown in the # numpy array representation for all OSes included Windows actual = repr(xds) expected = """ Size: 3kB Dimensions: (foo: 1200, bar: 111) Coordinates: * foo (foo) int16 2kB 0 1 2 3 4 5 6 ... 1194 1195 1196 1197 1198 1199 * bar (bar) int16 222B 0 1 2 3 4 5 6 7 ... 104 105 106 107 108 109 110 Data variables: *empty* """.strip() assert actual == expected actual = repr(xds["foo"]) array_repr = repr(xds.foo.data).replace("\n ", "") expected = f""" Size: 2kB {array_repr} Coordinates: * foo (foo) int16 2kB 0 1 2 3 4 5 6 ... 1194 1195 1196 1197 1198 1199 """.strip() assert actual == expected def test_array_repr_dtypes(): # These dtypes are expected to be represented similarly # on Ubuntu, macOS and Windows environments of the CI. # Unsigned integer could be used as easy replacements # for tests where the data-type does not matter, # but the repr does, including the size # (size of a int == size of an uint) # Signed integer dtypes ds = xr.DataArray(np.array([0], dtype="int8"), dims="x") actual = repr(ds) expected = """ Size: 1B array([0], dtype=int8) Dimensions without coordinates: x """.strip() assert actual == expected ds = xr.DataArray(np.array([0], dtype="int16"), dims="x") actual = repr(ds) expected = """ Size: 2B array([0], dtype=int16) Dimensions without coordinates: x """.strip() assert actual == expected # Unsigned integer dtypes ds = xr.DataArray(np.array([0], dtype="uint8"), dims="x") actual = repr(ds) expected = """ Size: 1B array([0], dtype=uint8) Dimensions without coordinates: x """.strip() assert actual == expected ds = xr.DataArray(np.array([0], dtype="uint16"), dims="x") actual = repr(ds) expected = """ Size: 2B array([0], dtype=uint16) Dimensions without coordinates: x """.strip() assert actual == expected ds = xr.DataArray(np.array([0], dtype="uint32"), dims="x") actual = repr(ds) expected = """ Size: 4B array([0], dtype=uint32) Dimensions without coordinates: x """.strip() assert actual == expected ds = xr.DataArray(np.array([0], dtype="uint64"), dims="x") actual = repr(ds) expected = """ Size: 8B array([0], dtype=uint64) Dimensions without coordinates: x """.strip() assert actual == expected # Float dtypes ds = xr.DataArray(np.array([0.0]), dims="x") actual = repr(ds) expected = """ Size: 8B array([0.]) Dimensions without coordinates: x """.strip() assert actual == expected ds = xr.DataArray(np.array([0], dtype="float16"), dims="x") actual = repr(ds) expected = """ Size: 2B array([0.], dtype=float16) Dimensions without coordinates: x """.strip() assert actual == expected ds = xr.DataArray(np.array([0], dtype="float32"), dims="x") actual = repr(ds) expected = """ Size: 4B array([0.], dtype=float32) Dimensions without coordinates: x """.strip() assert actual == expected ds = xr.DataArray(np.array([0], dtype="float64"), dims="x") actual = repr(ds) expected = """ Size: 8B array([0.]) Dimensions without coordinates: x """.strip() assert actual == expected # Signed integer dtypes array = np.array([0]) ds = xr.DataArray(array, dims="x") actual = repr(ds) expected = f""" Size: {array.dtype.itemsize}B {array!r} Dimensions without coordinates: x """.strip() assert actual == expected array = np.array([0], dtype="int32") ds = xr.DataArray(array, dims="x") actual = repr(ds) expected = f""" Size: 4B {array!r} Dimensions without coordinates: x """.strip() assert actual == expected array = np.array([0], dtype="int64") ds = xr.DataArray(array, dims="x") actual = repr(ds) expected = f""" Size: 8B {array!r} Dimensions without coordinates: x """.strip() assert actual == expected def test_repr_pandas_range_index() -> None: # lazy data repr but values shown in inline repr xidx = xr.indexes.PandasIndex(pd.RangeIndex(10), "x") ds = xr.Dataset(coords=xr.Coordinates.from_xindex(xidx)) actual = repr(ds.x) expected = """ Size: 80B [10 values with dtype=int64] Coordinates: * x (x) int64 80B 0 1 2 3 4 5 6 7 8 9 """.strip() assert actual == expected def test_repr_pandas_multi_index() -> None: # lazy data repr but values shown in inline repr midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=["foo", "bar"]) coords = xr.Coordinates.from_pandas_multiindex(midx, "x") ds = xr.Dataset(coords=coords) actual = repr(ds.x) expected = """ Size: 32B [4 values with dtype=object] Coordinates: * x (x) object 32B MultiIndex * foo (x) object 32B 'a' 'a' 'b' 'b' * bar (x) int64 32B 1 2 1 2 """.strip() assert actual == expected actual = repr(ds.foo) expected = """ Size: 32B [4 values with dtype=object] Coordinates: * x (x) object 32B MultiIndex * foo (x) object 32B 'a' 'a' 'b' 'b' * bar (x) int64 32B 1 2 1 2 """.strip() assert actual == expected xarray-2025.09.0/xarray/tests/test_formatting_html.py000066400000000000000000000350241505620616400226270ustar00rootroot00000000000000from __future__ import annotations import numpy as np import pandas as pd import pytest import xarray as xr from xarray.core import formatting_html as fh from xarray.core.coordinates import Coordinates @pytest.fixture def dataarray() -> xr.DataArray: return xr.DataArray(np.random.default_rng(0).random((4, 6))) @pytest.fixture def dask_dataarray(dataarray: xr.DataArray) -> xr.DataArray: pytest.importorskip("dask") return dataarray.chunk() @pytest.fixture def multiindex() -> xr.Dataset: midx = pd.MultiIndex.from_product( [["a", "b"], [1, 2]], names=("level_1", "level_2") ) midx_coords = Coordinates.from_pandas_multiindex(midx, "x") return xr.Dataset({}, midx_coords) @pytest.fixture def dataset() -> xr.Dataset: times = pd.date_range("2000-01-01", "2001-12-31", name="time") annual_cycle = np.sin(2 * np.pi * (times.dayofyear.values / 365.25 - 0.28)) base = 10 + 15 * annual_cycle.reshape(-1, 1) tmin_values = base + 3 * np.random.randn(annual_cycle.size, 3) tmax_values = base + 10 + 3 * np.random.randn(annual_cycle.size, 3) return xr.Dataset( { "tmin": (("time", "location"), tmin_values), "tmax": (("time", "location"), tmax_values), }, {"time": times, "location": ["", "IN", "IL"]}, attrs={"description": "Test data."}, ) def test_short_data_repr_html(dataarray: xr.DataArray) -> None: data_repr = fh.short_data_repr_html(dataarray) assert data_repr.startswith("
    array")
    
    
    def test_short_data_repr_html_non_str_keys(dataset: xr.Dataset) -> None:
        ds = dataset.assign({2: lambda x: x["tmin"]})
        fh.dataset_repr(ds)
    
    
    def test_short_data_repr_html_dask(dask_dataarray: xr.DataArray) -> None:
        assert hasattr(dask_dataarray.data, "_repr_html_")
        data_repr = fh.short_data_repr_html(dask_dataarray)
        assert data_repr == dask_dataarray.data._repr_html_()
    
    
    def test_format_dims_no_dims() -> None:
        dims: dict = {}
        dims_with_index: list = []
        formatted = fh.format_dims(dims, dims_with_index)
        assert formatted == ""
    
    
    def test_format_dims_unsafe_dim_name() -> None:
        dims = {"": 3, "y": 2}
        dims_with_index: list = []
        formatted = fh.format_dims(dims, dims_with_index)
        assert "<x>" in formatted
    
    
    def test_format_dims_non_index() -> None:
        dims, dims_with_index = {"x": 3, "y": 2}, ["time"]
        formatted = fh.format_dims(dims, dims_with_index)
        assert "class='xr-has-index'" not in formatted
    
    
    def test_format_dims_index() -> None:
        dims, dims_with_index = {"x": 3, "y": 2}, ["x"]
        formatted = fh.format_dims(dims, dims_with_index)
        assert "class='xr-has-index'" in formatted
    
    
    def test_summarize_attrs_with_unsafe_attr_name_and_value() -> None:
        attrs = {"": 3, "y": ""}
        formatted = fh.summarize_attrs(attrs)
        assert "
    <x> :
    " in formatted assert "
    y :
    " in formatted assert "
    3
    " in formatted assert "
    <pd.DataFrame>
    " in formatted def test_repr_of_dataarray(dataarray: xr.DataArray) -> None: formatted = fh.array_repr(dataarray) assert "dim_0" in formatted # has an expanded data section assert formatted.count("class='xr-array-in' type='checkbox' checked>") == 1 # coords, indexes and attrs don't have an items so they'll be be disabled and collapsed assert ( formatted.count("class='xr-section-summary-in' type='checkbox' disabled >") == 3 ) with xr.set_options(display_expand_data=False): formatted = fh.array_repr(dataarray) assert "dim_0" in formatted # has a collapsed data section assert formatted.count("class='xr-array-in' type='checkbox' checked>") == 0 # coords, indexes and attrs don't have an items so they'll be be disabled and collapsed assert ( formatted.count("class='xr-section-summary-in' type='checkbox' disabled >") == 3 ) def test_repr_of_multiindex(multiindex: xr.Dataset) -> None: formatted = fh.dataset_repr(multiindex) assert "(x)" in formatted def test_repr_of_dataset(dataset: xr.Dataset) -> None: formatted = fh.dataset_repr(dataset) # coords, attrs, and data_vars are expanded assert ( formatted.count("class='xr-section-summary-in' type='checkbox' checked>") == 3 ) # indexes is collapsed assert formatted.count("class='xr-section-summary-in' type='checkbox' >") == 1 assert "<U4" in formatted or ">U4" in formatted assert "<IA>" in formatted with xr.set_options( display_expand_coords=False, display_expand_data_vars=False, display_expand_attrs=False, display_expand_indexes=True, ): formatted = fh.dataset_repr(dataset) # coords, attrs, and data_vars are collapsed, indexes is expanded assert ( formatted.count("class='xr-section-summary-in' type='checkbox' checked>") == 1 ) assert "<U4" in formatted or ">U4" in formatted assert "<IA>" in formatted def test_repr_text_fallback(dataset: xr.Dataset) -> None: formatted = fh.dataset_repr(dataset) # Just test that the "pre" block used for fallback to plain text is present. assert "
    " in formatted
    
    
    def test_variable_repr_html() -> None:
        v = xr.Variable(["time", "x"], [[1, 2, 3], [4, 5, 6]], {"foo": "bar"})
        assert hasattr(v, "_repr_html_")
        with xr.set_options(display_style="html"):
            html = v._repr_html_().strip()
        # We don't do a complete string identity since
        # html output is probably subject to change, is long and... reasons.
        # Just test that something reasonable was produced.
        assert html.startswith("")
        assert "xarray.Variable" in html
    
    
    def test_repr_of_nonstr_dataset(dataset: xr.Dataset) -> None:
        ds = dataset.copy()
        ds.attrs[1] = "Test value"
        ds[2] = ds["tmin"]
        formatted = fh.dataset_repr(ds)
        assert "
    1 :
    Test value
    " in formatted assert "
    2" in formatted def test_repr_of_nonstr_dataarray(dataarray: xr.DataArray) -> None: da = dataarray.rename(dim_0=15) da.attrs[1] = "value" formatted = fh.array_repr(da) assert "
    1 :
    value
    " in formatted assert "
  • 15: 4
  • " in formatted def test_nonstr_variable_repr_html() -> None: v = xr.Variable(["time", 10], [[1, 2, 3], [4, 5, 6]], {22: "bar"}) assert hasattr(v, "_repr_html_") with xr.set_options(display_style="html"): html = v._repr_html_().strip() assert "
    22 :
    bar
    " in html assert "
  • 10: 3
  • " in html @pytest.fixture(scope="module", params=["some html", "some other html"]) def repr(request): return request.param class Test_summarize_datatree_children: """ Unit tests for summarize_datatree_children. """ func = staticmethod(fh.summarize_datatree_children) @pytest.fixture(scope="class") def childfree_tree_factory(self): """ Fixture for a child-free DataTree factory. """ from random import randint def _childfree_tree_factory(): return xr.DataTree( dataset=xr.Dataset({"z": ("y", [randint(1, 100) for _ in range(3)])}) ) return _childfree_tree_factory @pytest.fixture(scope="class") def childfree_tree(self, childfree_tree_factory): """ Fixture for a child-free DataTree. """ return childfree_tree_factory() @pytest.fixture def mock_datatree_node_repr(self, monkeypatch): """ Apply mocking for datatree_node_repr. """ def mock(group_title, dt): """ Mock with a simple result """ return group_title + " " + str(id(dt)) monkeypatch.setattr(fh, "datatree_node_repr", mock) @pytest.fixture def mock_wrap_datatree_repr(self, monkeypatch): """ Apply mocking for _wrap_datatree_repr. """ def mock(r, *, end, **kwargs): """ Mock by appending "end" or "not end". """ return r + " " + ("end" if end else "not end") + "//" monkeypatch.setattr(fh, "_wrap_datatree_repr", mock) def test_empty_mapping(self): """ Test with an empty mapping of children. """ children: dict[str, xr.DataTree] = {} assert self.func(children) == ( "
    " "
    " ) def test_one_child( self, childfree_tree, mock_wrap_datatree_repr, mock_datatree_node_repr ): """ Test with one child. Uses a mock of _wrap_datatree_repr and _datatree_node_repr to essentially mock the inline lambda function "lines_callback". """ # Create mapping of children children = {"a": childfree_tree} # Expect first line to be produced from the first child, and # wrapped as the last child first_line = f"a {id(children['a'])} end//" assert self.func(children) == ( "
    " f"{first_line}" "
    " ) def test_two_children( self, childfree_tree_factory, mock_wrap_datatree_repr, mock_datatree_node_repr ): """ Test with two level deep children. Uses a mock of _wrap_datatree_repr and datatree_node_repr to essentially mock the inline lambda function "lines_callback". """ # Create mapping of children children = {"a": childfree_tree_factory(), "b": childfree_tree_factory()} # Expect first line to be produced from the first child, and # wrapped as _not_ the last child first_line = f"a {id(children['a'])} not end//" # Expect second line to be produced from the second child, and # wrapped as the last child second_line = f"b {id(children['b'])} end//" assert self.func(children) == ( "
    " f"{first_line}" f"{second_line}" "
    " ) class TestDataTreeTruncatesNodes: def test_many_nodes(self) -> None: # construct a datatree with 500 nodes number_of_files = 20 number_of_groups = 25 tree_dict = {} for f in range(number_of_files): for g in range(number_of_groups): tree_dict[f"file_{f}/group_{g}"] = xr.Dataset({"g": f * g}) tree = xr.DataTree.from_dict(tree_dict) with xr.set_options(display_style="html"): result = tree._repr_html_() assert "6/20" in result for i in range(number_of_files): if i < 3 or i >= (number_of_files - 3): assert f"file_{i}
    " in result else: assert f"file_{i}
    " not in result assert "6/25" in result for i in range(number_of_groups): if i < 3 or i >= (number_of_groups - 3): assert f"group_{i}" in result else: assert f"group_{i}" not in result with xr.set_options(display_style="html", display_max_children=3): result = tree._repr_html_() assert "3/20" in result for i in range(number_of_files): if i < 2 or i >= (number_of_files - 1): assert f"file_{i}" in result else: assert f"file_{i}" not in result assert "3/25" in result for i in range(number_of_groups): if i < 2 or i >= (number_of_groups - 1): assert f"group_{i}" in result else: assert f"group_{i}" not in result class TestDataTreeInheritance: def test_inherited_section_present(self) -> None: dt = xr.DataTree.from_dict( { "/": None, "a": None, } ) with xr.set_options(display_style="html"): html = dt._repr_html_().strip() # checks that the section appears somewhere assert "Inherited coordinates" in html # TODO how can we assert that the Inherited coordinates section does not appear in the child group? # with xr.set_options(display_style="html"): # child_html = dt["a"]._repr_html_().strip() # assert "Inherited coordinates" not in child_html class Test__wrap_datatree_repr: """ Unit tests for _wrap_datatree_repr. """ func = staticmethod(fh._wrap_datatree_repr) def test_end(self, repr): """ Test with end=True. """ r = self.func(repr, end=True) assert r == ( "
    " "
    " "
    " "
    " "
    " "
    " f"{repr}" "
    " "
    " ) def test_not_end(self, repr): """ Test with end=False. """ r = self.func(repr, end=False) assert r == ( "
    " "
    " "
    " "
    " "
    " "
    " f"{repr}" "
    " "
    " ) xarray-2025.09.0/xarray/tests/test_groupby.py000066400000000000000000003747261505620616400211370ustar00rootroot00000000000000from __future__ import annotations import datetime import operator import warnings from itertools import pairwise from typing import Literal, cast from unittest import mock import numpy as np import pandas as pd import pytest from packaging.version import Version import xarray as xr from xarray import DataArray, Dataset, Variable, date_range from xarray.core.groupby import _consolidate_slices from xarray.core.types import InterpOptions, ResampleCompatible from xarray.groupers import ( BinGrouper, EncodedGroups, Grouper, SeasonGrouper, SeasonResampler, TimeResampler, UniqueGrouper, season_to_month_tuple, ) from xarray.namedarray.pycompat import is_chunked_array from xarray.structure.alignment import broadcast from xarray.tests import ( _ALL_CALENDARS, InaccessibleArray, assert_allclose, assert_equal, assert_identical, create_test_data, has_cftime, has_dask, has_dask_ge_2024_08_1, has_flox, has_pandas_ge_2_2, raise_if_dask_computes, requires_cftime, requires_dask, requires_dask_ge_2024_08_1, requires_flox, requires_flox_0_9_12, requires_pandas_ge_2_2, requires_scipy, ) @pytest.fixture def dataset() -> xr.Dataset: ds = xr.Dataset( { "foo": (("x", "y", "z"), np.random.randn(3, 4, 2)), "baz": ("x", ["e", "f", "g"]), "cat": ("y", pd.Categorical(["cat1", "cat2", "cat2", "cat1"])), }, {"x": ("x", ["a", "b", "c"], {"name": "x"}), "y": [1, 2, 3, 4], "z": [1, 2]}, ) ds["boo"] = (("z", "y"), [["f", "g", "h", "j"]] * 2) return ds @pytest.fixture def array(dataset) -> xr.DataArray: return dataset["foo"] def test_consolidate_slices() -> None: assert _consolidate_slices([slice(3), slice(3, 5)]) == [slice(5)] assert _consolidate_slices([slice(2, 3), slice(3, 6)]) == [slice(2, 6)] assert _consolidate_slices([slice(2, 3, 1), slice(3, 6, 1)]) == [slice(2, 6, 1)] slices = [slice(2, 3), slice(5, 6)] assert _consolidate_slices(slices) == slices # ignore type because we're checking for an error anyway with pytest.raises(ValueError): _consolidate_slices([slice(3), 4]) # type: ignore[list-item] @pytest.mark.filterwarnings("ignore:return type") def test_groupby_dims_property(dataset) -> None: with pytest.warns(FutureWarning, match="The return type of"): assert dataset.groupby("x").dims == dataset.isel(x=[1]).dims with pytest.warns(FutureWarning, match="The return type of"): assert dataset.groupby("y").dims == dataset.isel(y=[1]).dims assert tuple(dataset.groupby("x").dims) == tuple(dataset.isel(x=slice(1, 2)).dims) assert tuple(dataset.groupby("y").dims) == tuple(dataset.isel(y=slice(1, 2)).dims) dataset = dataset.drop_vars(["cat"]) stacked = dataset.stack({"xy": ("x", "y")}) assert tuple(stacked.groupby("xy").dims) == tuple(stacked.isel(xy=[0]).dims) def test_groupby_sizes_property(dataset) -> None: assert dataset.groupby("x").sizes == dataset.isel(x=[1]).sizes assert dataset.groupby("y").sizes == dataset.isel(y=[1]).sizes dataset = dataset.drop_vars("cat") stacked = dataset.stack({"xy": ("x", "y")}) assert stacked.groupby("xy").sizes == stacked.isel(xy=[0]).sizes def test_multi_index_groupby_map(dataset) -> None: # regression test for GH873 ds = dataset.isel(z=1, drop=True)[["foo"]] expected = 2 * ds actual = ( ds.stack(space=["x", "y"]) .groupby("space") .map(lambda x: 2 * x) .unstack("space") ) assert_equal(expected, actual) @pytest.mark.parametrize("grouper", [dict(group="x"), dict(x=UniqueGrouper())]) def test_reduce_numeric_only(dataset, grouper: dict) -> None: gb = dataset.groupby(**grouper) with xr.set_options(use_flox=False): expected = gb.sum() with xr.set_options(use_flox=True): actual = gb.sum() assert_identical(expected, actual) def test_multi_index_groupby_sum() -> None: # regression test for GH873 ds = xr.Dataset( {"foo": (("x", "y", "z"), np.ones((3, 4, 2)))}, {"x": ["a", "b", "c"], "y": [1, 2, 3, 4]}, ) expected = ds.sum("z") actual = ds.stack(space=["x", "y"]).groupby("space").sum("z").unstack("space") assert_equal(expected, actual) with pytest.raises(NotImplementedError): actual = ( ds.stack(space=["x", "y"]) .groupby(space=UniqueGrouper(), z=UniqueGrouper()) .sum("z") .unstack("space") ) assert_equal(expected, ds) if not has_pandas_ge_2_2: # the next line triggers a mysterious multiindex error on pandas 2.0 return actual = ds.stack(space=["x", "y"]).groupby("space").sum(...).unstack("space") assert_equal(expected, actual) @requires_pandas_ge_2_2 def test_multi_index_propagation() -> None: # regression test for GH9648 times = pd.date_range("2023-01-01", periods=4) locations = ["A", "B"] data = [[0.5, 0.7], [0.6, 0.5], [0.4, 0.6], [0.4, 0.9]] da = xr.DataArray( data, dims=["time", "location"], coords={"time": times, "location": locations} ) da = da.stack(multiindex=["time", "location"]) grouped = da.groupby("multiindex") with xr.set_options(use_flox=True): actual = grouped.sum() with xr.set_options(use_flox=False): expected = grouped.first() assert_identical(actual, expected) def test_groupby_da_datetime() -> None: # test groupby with a DataArray of dtype datetime for GH1132 # create test data times = pd.date_range("2000-01-01", periods=4) foo = xr.DataArray([1, 2, 3, 4], coords=dict(time=times), dims="time") # create test index reference_dates = [times[0], times[2]] labels = reference_dates[0:1] * 2 + reference_dates[1:2] * 2 ind = xr.DataArray( labels, coords=dict(time=times), dims="time", name="reference_date" ) g = foo.groupby(ind) actual = g.sum(dim="time") expected = xr.DataArray( [3, 7], coords=dict(reference_date=reference_dates), dims="reference_date" ) assert_equal(expected, actual) def test_groupby_duplicate_coordinate_labels() -> None: # fix for https://stackoverflow.com/questions/38065129 array = xr.DataArray([1, 2, 3], [("x", [1, 1, 2])]) expected = xr.DataArray([3, 3], [("x", [1, 2])]) actual = array.groupby("x").sum() assert_equal(expected, actual) def test_groupby_input_mutation() -> None: # regression test for GH2153 array = xr.DataArray([1, 2, 3], [("x", [2, 2, 1])]) array_copy = array.copy() expected = xr.DataArray([3, 3], [("x", [1, 2])]) actual = array.groupby("x").sum() assert_identical(expected, actual) assert_identical(array, array_copy) # should not modify inputs @pytest.mark.parametrize("use_flox", [True, False]) def test_groupby_indexvariable(use_flox: bool) -> None: # regression test for GH7919 array = xr.DataArray([1, 2, 3], [("x", [2, 2, 1])]) iv = xr.IndexVariable(dims="x", data=pd.Index(array.x.values)) with xr.set_options(use_flox=use_flox): actual = array.groupby(iv).sum() actual = array.groupby(iv).sum() expected = xr.DataArray([3, 3], [("x", [1, 2])]) assert_identical(expected, actual) @pytest.mark.parametrize( "obj", [ xr.DataArray([1, 2, 3, 4, 5, 6], [("x", [1, 1, 1, 2, 2, 2])]), xr.Dataset({"foo": ("x", [1, 2, 3, 4, 5, 6])}, {"x": [1, 1, 1, 2, 2, 2]}), ], ) def test_groupby_map_shrink_groups(obj) -> None: expected = obj.isel(x=[0, 1, 3, 4]) actual = obj.groupby("x").map(lambda f: f.isel(x=[0, 1])) assert_identical(expected, actual) @pytest.mark.parametrize( "obj", [ xr.DataArray([1, 2, 3], [("x", [1, 2, 2])]), xr.Dataset({"foo": ("x", [1, 2, 3])}, {"x": [1, 2, 2]}), ], ) def test_groupby_map_change_group_size(obj) -> None: def func(group): if group.sizes["x"] == 1: result = group.isel(x=[0, 0]) else: result = group.isel(x=[0]) return result expected = obj.isel(x=[0, 0, 1]) actual = obj.groupby("x").map(func) assert_identical(expected, actual) def test_da_groupby_map_func_args() -> None: def func(arg1, arg2, arg3=0): return arg1 + arg2 + arg3 array = xr.DataArray([1, 1, 1], [("x", [1, 2, 3])]) expected = xr.DataArray([3, 3, 3], [("x", [1, 2, 3])]) actual = array.groupby("x").map(func, args=(1,), arg3=1) assert_identical(expected, actual) def test_ds_groupby_map_func_args() -> None: def func(arg1, arg2, arg3=0): return arg1 + arg2 + arg3 dataset = xr.Dataset({"foo": ("x", [1, 1, 1])}, {"x": [1, 2, 3]}) expected = xr.Dataset({"foo": ("x", [3, 3, 3])}, {"x": [1, 2, 3]}) actual = dataset.groupby("x").map(func, args=(1,), arg3=1) assert_identical(expected, actual) def test_da_groupby_empty() -> None: empty_array = xr.DataArray([], dims="dim") with pytest.raises(ValueError): empty_array.groupby("dim") @requires_dask def test_dask_da_groupby_quantile() -> None: # Scalar quantile expected = xr.DataArray( data=[2, 5], coords={"x": [1, 2], "quantile": 0.5}, dims="x" ) array = xr.DataArray( data=[1, 2, 3, 4, 5, 6], coords={"x": [1, 1, 1, 2, 2, 2]}, dims="x" ) # will work blockwise with flox actual = array.chunk(x=3).groupby("x").quantile(0.5) assert_identical(expected, actual) # will work blockwise with flox actual = array.chunk(x=-1).groupby("x").quantile(0.5) assert_identical(expected, actual) @requires_dask def test_dask_da_groupby_median() -> None: expected = xr.DataArray(data=[2, 5], coords={"x": [1, 2]}, dims="x") array = xr.DataArray( data=[1, 2, 3, 4, 5, 6], coords={"x": [1, 1, 1, 2, 2, 2]}, dims="x" ) with xr.set_options(use_flox=False): actual = array.chunk(x=1).groupby("x").median() assert_identical(expected, actual) with xr.set_options(use_flox=True): actual = array.chunk(x=1).groupby("x").median() assert_identical(expected, actual) # will work blockwise with flox actual = array.chunk(x=3).groupby("x").median() assert_identical(expected, actual) # will work blockwise with flox actual = array.chunk(x=-1).groupby("x").median() assert_identical(expected, actual) @pytest.mark.parametrize("use_flox", [pytest.param(True, marks=requires_flox), False]) def test_da_groupby_quantile(use_flox: bool) -> None: array = xr.DataArray( data=[1, 2, 3, 4, 5, 6], coords={"x": [1, 1, 1, 2, 2, 2]}, dims="x" ) # Scalar quantile expected = xr.DataArray( data=[2, 5], coords={"x": [1, 2], "quantile": 0.5}, dims="x" ) with xr.set_options(use_flox=use_flox): actual = array.groupby("x").quantile(0.5) assert_identical(expected, actual) # Vector quantile expected = xr.DataArray( data=[[1, 3], [4, 6]], coords={"x": [1, 2], "quantile": [0, 1]}, dims=("x", "quantile"), ) with xr.set_options(use_flox=use_flox): actual = array.groupby("x").quantile([0, 1]) assert_identical(expected, actual) array = xr.DataArray( data=[np.nan, 2, 3, 4, 5, 6], coords={"x": [1, 1, 1, 2, 2, 2]}, dims="x" ) for skipna in (True, False, None): e = [np.nan, 5] if skipna is False else [2.5, 5] expected = xr.DataArray(data=e, coords={"x": [1, 2], "quantile": 0.5}, dims="x") with xr.set_options(use_flox=use_flox): actual = array.groupby("x").quantile(0.5, skipna=skipna) assert_identical(expected, actual) # Multiple dimensions array = xr.DataArray( data=[[1, 11, 26], [2, 12, 22], [3, 13, 23], [4, 16, 24], [5, 15, 25]], coords={"x": [1, 1, 1, 2, 2], "y": [0, 0, 1]}, dims=("x", "y"), ) actual_x = array.groupby("x").quantile(0, dim=...) expected_x = xr.DataArray( data=[1, 4], coords={"x": [1, 2], "quantile": 0}, dims="x" ) assert_identical(expected_x, actual_x) actual_y = array.groupby("y").quantile(0, dim=...) expected_y = xr.DataArray( data=[1, 22], coords={"y": [0, 1], "quantile": 0}, dims="y" ) assert_identical(expected_y, actual_y) actual_xx = array.groupby("x").quantile(0) expected_xx = xr.DataArray( data=[[1, 11, 22], [4, 15, 24]], coords={"x": [1, 2], "y": [0, 0, 1], "quantile": 0}, dims=("x", "y"), ) assert_identical(expected_xx, actual_xx) actual_yy = array.groupby("y").quantile(0) expected_yy = xr.DataArray( data=[[1, 26], [2, 22], [3, 23], [4, 24], [5, 25]], coords={"x": [1, 1, 1, 2, 2], "y": [0, 1], "quantile": 0}, dims=("x", "y"), ) assert_identical(expected_yy, actual_yy) times = pd.date_range("2000-01-01", periods=365) x = [0, 1] foo = xr.DataArray( np.reshape(np.arange(365 * 2), (365, 2)), coords={"time": times, "x": x}, dims=("time", "x"), ) g = foo.groupby(foo.time.dt.month) actual = g.quantile(0, dim=...) expected = xr.DataArray( data=[ 0.0, 62.0, 120.0, 182.0, 242.0, 304.0, 364.0, 426.0, 488.0, 548.0, 610.0, 670.0, ], coords={"month": np.arange(1, 13), "quantile": 0}, dims="month", ) assert_identical(expected, actual) actual = g.quantile(0, dim="time")[:2] expected = xr.DataArray( data=[[0.0, 1], [62.0, 63]], coords={"month": [1, 2], "x": [0, 1], "quantile": 0}, dims=("month", "x"), ) assert_identical(expected, actual) # method keyword array = xr.DataArray(data=[1, 2, 3, 4], coords={"x": [1, 1, 2, 2]}, dims="x") expected = xr.DataArray( data=[1, 3], coords={"x": [1, 2], "quantile": 0.5}, dims="x" ) actual = array.groupby("x").quantile(0.5, method="lower") assert_identical(expected, actual) def test_ds_groupby_quantile() -> None: ds = xr.Dataset( data_vars={"a": ("x", [1, 2, 3, 4, 5, 6])}, coords={"x": [1, 1, 1, 2, 2, 2]} ) # Scalar quantile expected = xr.Dataset( data_vars={"a": ("x", [2, 5])}, coords={"quantile": 0.5, "x": [1, 2]} ) actual = ds.groupby("x").quantile(0.5) assert_identical(expected, actual) # Vector quantile expected = xr.Dataset( data_vars={"a": (("x", "quantile"), [[1, 3], [4, 6]])}, coords={"x": [1, 2], "quantile": [0, 1]}, ) actual = ds.groupby("x").quantile([0, 1]) assert_identical(expected, actual) ds = xr.Dataset( data_vars={"a": ("x", [np.nan, 2, 3, 4, 5, 6])}, coords={"x": [1, 1, 1, 2, 2, 2]}, ) for skipna in (True, False, None): e = [np.nan, 5] if skipna is False else [2.5, 5] expected = xr.Dataset( data_vars={"a": ("x", e)}, coords={"quantile": 0.5, "x": [1, 2]} ) actual = ds.groupby("x").quantile(0.5, skipna=skipna) assert_identical(expected, actual) # Multiple dimensions ds = xr.Dataset( data_vars={ "a": ( ("x", "y"), [[1, 11, 26], [2, 12, 22], [3, 13, 23], [4, 16, 24], [5, 15, 25]], ) }, coords={"x": [1, 1, 1, 2, 2], "y": [0, 0, 1]}, ) actual_x = ds.groupby("x").quantile(0, dim=...) expected_x = xr.Dataset({"a": ("x", [1, 4])}, coords={"x": [1, 2], "quantile": 0}) assert_identical(expected_x, actual_x) actual_y = ds.groupby("y").quantile(0, dim=...) expected_y = xr.Dataset({"a": ("y", [1, 22])}, coords={"y": [0, 1], "quantile": 0}) assert_identical(expected_y, actual_y) actual_xx = ds.groupby("x").quantile(0) expected_xx = xr.Dataset( {"a": (("x", "y"), [[1, 11, 22], [4, 15, 24]])}, coords={"x": [1, 2], "y": [0, 0, 1], "quantile": 0}, ) assert_identical(expected_xx, actual_xx) actual_yy = ds.groupby("y").quantile(0) expected_yy = xr.Dataset( {"a": (("x", "y"), [[1, 26], [2, 22], [3, 23], [4, 24], [5, 25]])}, coords={"x": [1, 1, 1, 2, 2], "y": [0, 1], "quantile": 0}, ).transpose() assert_identical(expected_yy, actual_yy) times = pd.date_range("2000-01-01", periods=365) x = [0, 1] foo = xr.Dataset( {"a": (("time", "x"), np.reshape(np.arange(365 * 2), (365, 2)))}, coords=dict(time=times, x=x), ) g = foo.groupby(foo.time.dt.month) actual = g.quantile(0, dim=...) expected = xr.Dataset( { "a": ( "month", [ 0.0, 62.0, 120.0, 182.0, 242.0, 304.0, 364.0, 426.0, 488.0, 548.0, 610.0, 670.0, ], ) }, coords={"month": np.arange(1, 13), "quantile": 0}, ) assert_identical(expected, actual) actual = g.quantile(0, dim="time").isel(month=slice(None, 2)) expected = xr.Dataset( data_vars={"a": (("month", "x"), [[0.0, 1], [62.0, 63]])}, coords={"month": [1, 2], "x": [0, 1], "quantile": 0}, ) assert_identical(expected, actual) ds = xr.Dataset(data_vars={"a": ("x", [1, 2, 3, 4])}, coords={"x": [1, 1, 2, 2]}) # method keyword expected = xr.Dataset( data_vars={"a": ("x", [1, 3])}, coords={"quantile": 0.5, "x": [1, 2]} ) actual = ds.groupby("x").quantile(0.5, method="lower") assert_identical(expected, actual) @pytest.mark.filterwarnings( "default:The `interpolation` argument to quantile was renamed to `method`:FutureWarning" ) @pytest.mark.parametrize("as_dataset", [False, True]) def test_groupby_quantile_interpolation_deprecated(as_dataset: bool) -> None: array = xr.DataArray(data=[1, 2, 3, 4], coords={"x": [1, 1, 2, 2]}, dims="x") arr: xr.DataArray | xr.Dataset arr = array.to_dataset(name="name") if as_dataset else array with pytest.warns( FutureWarning, match="`interpolation` argument to quantile was renamed to `method`", ): actual = arr.quantile(0.5, interpolation="lower") expected = arr.quantile(0.5, method="lower") assert_identical(actual, expected) with warnings.catch_warnings(record=True): with pytest.raises(TypeError, match="interpolation and method keywords"): arr.quantile(0.5, method="lower", interpolation="lower") def test_da_groupby_assign_coords() -> None: actual = xr.DataArray( [[3, 4, 5], [6, 7, 8]], dims=["y", "x"], coords={"y": range(2), "x": range(3)} ) actual1 = actual.groupby("x").assign_coords({"y": [-1, -2]}) actual2 = actual.groupby("x").assign_coords(y=[-1, -2]) expected = xr.DataArray( [[3, 4, 5], [6, 7, 8]], dims=["y", "x"], coords={"y": [-1, -2], "x": range(3)} ) assert_identical(expected, actual1) assert_identical(expected, actual2) repr_da = xr.DataArray( np.random.randn(10, 20, 6, 24), dims=["x", "y", "z", "t"], coords={ "z": ["a", "b", "c", "a", "b", "c"], "x": [1, 1, 1, 2, 2, 3, 4, 5, 3, 4], "t": xr.date_range("2001-01-01", freq="ME", periods=24, use_cftime=False), "month": ("t", list(range(1, 13)) * 2), }, ) @pytest.mark.parametrize("dim", ["x", "y", "z", "month"]) @pytest.mark.parametrize("obj", [repr_da, repr_da.to_dataset(name="a")]) def test_groupby_repr(obj, dim) -> None: actual = repr(obj.groupby(dim)) N = len(np.unique(obj[dim])) expected = f"<{obj.__class__.__name__}GroupBy" expected += f", grouped over 1 grouper(s), {N} groups in total:" expected += f"\n {dim!r}: UniqueGrouper({dim!r}), {N}/{N} groups with labels " if dim == "x": expected += "1, 2, 3, 4, 5>" elif dim == "y": expected += "0, 1, 2, 3, 4, 5, ..., 15, 16, 17, 18, 19>" elif dim == "z": expected += "'a', 'b', 'c'>" elif dim == "month": expected += "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12>" assert actual == expected @pytest.mark.parametrize("obj", [repr_da, repr_da.to_dataset(name="a")]) def test_groupby_repr_datetime(obj) -> None: actual = repr(obj.groupby("t.month")) expected = f"<{obj.__class__.__name__}GroupBy" expected += ", grouped over 1 grouper(s), 12 groups in total:\n" expected += " 'month': UniqueGrouper('month'), 12/12 groups with labels " expected += "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12>" assert actual == expected @pytest.mark.filterwarnings("ignore:No index created for dimension id:UserWarning") @pytest.mark.filterwarnings("ignore:invalid value encountered in divide:RuntimeWarning") @pytest.mark.parametrize("shuffle", [True, False]) @pytest.mark.parametrize( "chunk", [ pytest.param( dict(lat=1), marks=pytest.mark.skipif(not has_dask, reason="no dask") ), pytest.param( dict(lat=2, lon=2), marks=pytest.mark.skipif(not has_dask, reason="no dask") ), False, ], ) def test_groupby_drops_nans(shuffle: bool, chunk: Literal[False] | dict) -> None: if shuffle and chunk and not has_dask_ge_2024_08_1: pytest.skip() # GH2383 # nan in 2D data variable (requires stacking) ds = xr.Dataset( { "variable": (("lat", "lon", "time"), np.arange(60.0).reshape((4, 3, 5))), "id": (("lat", "lon"), np.arange(12.0).reshape((4, 3))), }, coords={"lat": np.arange(4), "lon": np.arange(3), "time": np.arange(5)}, ) ds["id"].values[0, 0] = np.nan ds["id"].values[3, 0] = np.nan ds["id"].values[-1, -1] = np.nan if chunk: ds["variable"] = ds["variable"].chunk(chunk) grouped = ds.groupby(ds.id) if shuffle: grouped = grouped.shuffle_to_chunks().groupby(ds.id) # non reduction operation expected1 = ds.copy() expected1.variable.data[0, 0, :] = np.nan expected1.variable.data[-1, -1, :] = np.nan expected1.variable.data[3, 0, :] = np.nan actual1 = grouped.map(lambda x: x).transpose(*ds.variable.dims) assert_identical(actual1, expected1) # reduction along grouped dimension actual2 = grouped.mean() stacked = ds.stack({"xy": ["lat", "lon"]}) expected2 = ( stacked.variable.where(stacked.id.notnull()) .rename({"xy": "id"}) .to_dataset() .reset_index("id", drop=True) .assign(id=stacked.id.values) .dropna("id") .transpose(*actual2.variable.dims) ) assert_identical(actual2, expected2) # reduction operation along a different dimension actual3 = grouped.mean("time") expected3 = ds.mean("time").where(ds.id.notnull()) assert_identical(actual3, expected3) # NaN in non-dimensional coordinate array = xr.DataArray([1, 2, 3], [("x", [1, 2, 3])]) array["x1"] = ("x", [1, 1, np.nan]) expected4 = xr.DataArray(3, [("x1", [1])]) actual4 = array.groupby("x1").sum() assert_equal(expected4, actual4) # NaT in non-dimensional coordinate array["t"] = ( "x", [ np.datetime64("2001-01-01"), np.datetime64("2001-01-01"), np.datetime64("NaT"), ], ) expected5 = xr.DataArray(3, [("t", [np.datetime64("2001-01-01")])]) actual5 = array.groupby("t").sum() assert_equal(expected5, actual5) # test for repeated coordinate labels array = xr.DataArray([0, 1, 2, 4, 3, 4], [("x", [np.nan, 1, 1, np.nan, 2, np.nan])]) expected6 = xr.DataArray([3, 3], [("x", [1, 2])]) actual6 = array.groupby("x").sum() assert_equal(expected6, actual6) def test_groupby_grouping_errors() -> None: dataset = xr.Dataset({"foo": ("x", [1, 1, 1])}, {"x": [1, 2, 3]}) with pytest.raises( ValueError, match=r"None of the data falls within bins with edges" ): dataset.groupby_bins("x", bins=[0.1, 0.2, 0.3]) with pytest.raises( ValueError, match=r"None of the data falls within bins with edges" ): dataset.to_dataarray().groupby_bins("x", bins=[0.1, 0.2, 0.3]) with pytest.raises(ValueError, match=r"All bin edges are NaN."): dataset.groupby_bins("x", bins=[np.nan, np.nan, np.nan]) with pytest.raises(ValueError, match=r"All bin edges are NaN."): dataset.to_dataarray().groupby_bins("x", bins=[np.nan, np.nan, np.nan]) with pytest.raises(ValueError, match=r"Failed to group data."): dataset.groupby(dataset.foo * np.nan) with pytest.raises(ValueError, match=r"Failed to group data."): dataset.to_dataarray().groupby(dataset.foo * np.nan) with pytest.raises(TypeError, match=r"Cannot group by a Grouper object"): dataset.groupby(UniqueGrouper(labels=[1, 2, 3])) # type: ignore[arg-type] with pytest.raises(TypeError, match=r"got multiple values for argument"): UniqueGrouper(dataset.x, labels=[1, 2, 3]) # type: ignore[misc] def test_groupby_reduce_dimension_error(array) -> None: grouped = array.groupby("y") # assert_identical(array, grouped.mean()) with pytest.raises(ValueError, match=r"cannot reduce over dimensions"): grouped.mean("huh") with pytest.raises(ValueError, match=r"cannot reduce over dimensions"): grouped.mean(("x", "y", "asd")) assert_identical(array.mean("x"), grouped.reduce(np.mean, "x")) assert_allclose(array.mean(["x", "z"]), grouped.reduce(np.mean, ["x", "z"])) grouped = array.groupby("y") assert_identical(array, grouped.mean()) assert_identical(array.mean("x"), grouped.reduce(np.mean, "x")) assert_allclose(array.mean(["x", "z"]), grouped.reduce(np.mean, ["x", "z"])) def test_groupby_multiple_string_args(array) -> None: with pytest.raises(TypeError): array.groupby("x", squeeze="y") def test_groupby_bins_timeseries() -> None: ds = xr.Dataset() ds["time"] = xr.DataArray( pd.date_range("2010-08-01", "2010-08-15", freq="15min"), dims="time" ) ds["val"] = xr.DataArray(np.ones(ds["time"].shape), dims="time") time_bins = pd.date_range(start="2010-08-01", end="2010-08-15", freq="24h") actual = ds.groupby_bins("time", time_bins).sum() expected = xr.DataArray( 96 * np.ones((14,)), dims=["time_bins"], coords={"time_bins": pd.cut(time_bins, time_bins).categories}, # type: ignore[arg-type] ).to_dataset(name="val") assert_identical(actual, expected) def test_groupby_none_group_name() -> None: # GH158 # xarray should not fail if a DataArray's name attribute is None data = np.arange(10) + 10 da = xr.DataArray(data) # da.name = None key = xr.DataArray(np.floor_divide(data, 2)) mean = da.groupby(key).mean() assert "group" in mean.dims def test_groupby_getitem(dataset) -> None: assert_identical(dataset.sel(x=["a"]), dataset.groupby("x")["a"]) assert_identical(dataset.sel(z=[1]), dataset.groupby("z")[1]) assert_identical(dataset.foo.sel(x=["a"]), dataset.foo.groupby("x")["a"]) assert_identical(dataset.foo.sel(z=[1]), dataset.foo.groupby("z")[1]) assert_identical(dataset.cat.sel(y=[1]), dataset.cat.groupby("y")[1]) with pytest.raises( NotImplementedError, match="Cannot broadcast 1d-only pandas extension array." ): dataset.groupby("boo") dataset = dataset.drop_vars(["cat"]) actual = dataset.groupby("boo")["f"].unstack().transpose("x", "y", "z") expected = dataset.sel(y=[1], z=[1, 2]).transpose("x", "y", "z") assert_identical(expected, actual) def test_groupby_dataset() -> None: data = Dataset( {"z": (["x", "y"], np.random.randn(3, 5))}, {"x": ("x", list("abc")), "c": ("x", [0, 1, 0]), "y": range(5)}, ) groupby = data.groupby("x") assert len(groupby) == 3 expected_groups = {"a": slice(0, 1), "b": slice(1, 2), "c": slice(2, 3)} assert groupby.groups == expected_groups expected_items = [ ("a", data.isel(x=[0])), ("b", data.isel(x=[1])), ("c", data.isel(x=[2])), ] for actual1, expected1 in zip(groupby, expected_items, strict=True): assert actual1[0] == expected1[0] assert_equal(actual1[1], expected1[1]) def identity(x): return x for k in ["x", "c", "y"]: actual2 = data.groupby(k).map(identity) assert_equal(data, actual2) def test_groupby_dataset_returns_new_type() -> None: data = Dataset({"z": (["x", "y"], np.random.randn(3, 5))}) actual1 = data.groupby("x").map(lambda ds: ds["z"]) expected1 = data["z"] assert_identical(expected1, actual1) actual2 = data["z"].groupby("x").map(lambda x: x.to_dataset()) expected2 = data assert_identical(expected2, actual2) def test_groupby_dataset_iter() -> None: data = create_test_data() for n, (t, sub) in enumerate(list(data.groupby("dim1"))[:3]): assert data["dim1"][n] == t assert_equal(data["var1"][[n]], sub["var1"]) assert_equal(data["var2"][[n]], sub["var2"]) assert_equal(data["var3"][:, [n]], sub["var3"]) def test_groupby_dataset_errors() -> None: data = create_test_data() with pytest.raises(TypeError, match=r"`group` must be"): data.groupby(np.arange(10)) # type: ignore[arg-type,unused-ignore] with pytest.raises(ValueError, match=r"length does not match"): data.groupby(data["dim1"][:3]) with pytest.raises(TypeError, match=r"`group` must be"): data.groupby(data.coords["dim1"].to_index()) # type: ignore[arg-type] @pytest.mark.parametrize("use_flox", [True, False]) @pytest.mark.parametrize( "by_func", [ pytest.param(lambda x: x, id="group-by-string"), pytest.param(lambda x: {x: UniqueGrouper()}, id="group-by-unique-grouper"), ], ) @pytest.mark.parametrize("letters_as_coord", [True, False]) def test_groupby_dataset_reduce_ellipsis( by_func, use_flox: bool, letters_as_coord: bool ) -> None: data = Dataset( { "xy": (["x", "y"], np.random.randn(3, 4)), "xonly": ("x", np.random.randn(3)), "yonly": ("y", np.random.randn(4)), "letters": ("y", ["a", "a", "b", "b"]), } ) if letters_as_coord: data = data.set_coords("letters") expected = data.mean("y") expected["yonly"] = expected["yonly"].variable.set_dims({"x": 3}) gb = data.groupby(by_func("x")) with xr.set_options(use_flox=use_flox): actual = gb.mean(...) assert_allclose(expected, actual) with xr.set_options(use_flox=use_flox): actual = gb.mean("y") assert_allclose(expected, actual) letters = data["letters"] expected = Dataset( { "xy": data["xy"].groupby(letters).mean(...), "xonly": (data["xonly"].mean().variable.set_dims({"letters": 2})), "yonly": data["yonly"].groupby(letters).mean(), } ) gb = data.groupby(by_func("letters")) with xr.set_options(use_flox=use_flox): actual = gb.mean(...) assert_allclose(expected, actual) def test_groupby_dataset_math() -> None: def reorder_dims(x): return x.transpose("dim1", "dim2", "dim3", "time") ds = create_test_data() ds["dim1"] = ds["dim1"] grouped = ds.groupby("dim1") expected = reorder_dims(ds + ds.coords["dim1"]) actual = grouped + ds.coords["dim1"] assert_identical(expected, reorder_dims(actual)) actual = ds.coords["dim1"] + grouped assert_identical(expected, reorder_dims(actual)) ds2 = 2 * ds expected = reorder_dims(ds + ds2) actual = grouped + ds2 assert_identical(expected, reorder_dims(actual)) actual = ds2 + grouped assert_identical(expected, reorder_dims(actual)) def test_groupby_math_more() -> None: ds = create_test_data() grouped = ds.groupby("numbers") zeros = DataArray([0, 0, 0, 0], [("numbers", range(4))]) expected = (ds + Variable("dim3", np.zeros(10))).transpose( "dim3", "dim1", "dim2", "time" ) actual = grouped + zeros assert_equal(expected, actual) actual = zeros + grouped assert_equal(expected, actual) with pytest.raises(ValueError, match=r"incompat.* grouped binary"): grouped + ds with pytest.raises(ValueError, match=r"incompat.* grouped binary"): ds + grouped with pytest.raises(TypeError, match=r"only support binary ops"): grouped + 1 # type: ignore[operator] with pytest.raises(TypeError, match=r"only support binary ops"): grouped + grouped # type: ignore[operator] with pytest.raises(TypeError, match=r"in-place operations"): ds += grouped # type: ignore[arg-type] ds = Dataset( { "x": ("time", np.arange(100)), "time": pd.date_range("2000-01-01", periods=100), } ) with pytest.raises(ValueError, match=r"incompat.* grouped binary"): ds + ds.groupby("time.month") def test_groupby_math_bitshift() -> None: # create new dataset of int's only ds = Dataset( { "x": ("index", np.ones(4, dtype=int)), "y": ("index", np.ones(4, dtype=int) * -1), "level": ("index", [1, 1, 2, 2]), "index": [0, 1, 2, 3], } ) shift = DataArray([1, 2, 1], [("level", [1, 2, 8])]) left_expected = Dataset( { "x": ("index", [2, 2, 4, 4]), "y": ("index", [-2, -2, -4, -4]), "level": ("index", [2, 2, 8, 8]), "index": [0, 1, 2, 3], } ) left_manual = [] for lev, group in ds.groupby("level"): shifter = shift.sel(level=lev) left_manual.append(group << shifter) left_actual = xr.concat(left_manual, dim="index").reset_coords(names="level") assert_equal(left_expected, left_actual) left_actual = (ds.groupby("level") << shift).reset_coords(names="level") assert_equal(left_expected, left_actual) right_expected = Dataset( { "x": ("index", [0, 0, 2, 2]), "y": ("index", [-1, -1, -2, -2]), "level": ("index", [0, 0, 4, 4]), "index": [0, 1, 2, 3], } ) right_manual = [] for lev, group in left_expected.groupby("level"): shifter = shift.sel(level=lev) right_manual.append(group >> shifter) right_actual = xr.concat(right_manual, dim="index").reset_coords(names="level") assert_equal(right_expected, right_actual) right_actual = (left_expected.groupby("level") >> shift).reset_coords(names="level") assert_equal(right_expected, right_actual) @pytest.mark.parametrize( "x_bins", ((0, 2, 4, 6), pd.IntervalIndex.from_breaks((0, 2, 4, 6), closed="left")) ) @pytest.mark.parametrize("use_flox", [True, False]) def test_groupby_bins_cut_kwargs(use_flox: bool, x_bins) -> None: da = xr.DataArray(np.arange(12).reshape(6, 2), dims=("x", "y")) with xr.set_options(use_flox=use_flox): actual = da.groupby_bins( "x", bins=x_bins, include_lowest=True, right=False ).mean() expected = xr.DataArray( np.array([[1.0, 2.0], [5.0, 6.0], [9.0, 10.0]]), dims=("x_bins", "y"), coords={ "x_bins": ( "x_bins", x_bins if isinstance(x_bins, pd.IntervalIndex) else pd.IntervalIndex.from_breaks(x_bins, closed="left"), ) }, ) assert_identical(expected, actual) with xr.set_options(use_flox=use_flox): actual = da.groupby( x=BinGrouper(bins=x_bins, include_lowest=True, right=False), ).mean() assert_identical(expected, actual) with xr.set_options(use_flox=use_flox): labels = ["one", "two", "three"] actual = da.groupby(x=BinGrouper(bins=x_bins, labels=labels)).sum() assert actual.xindexes["x_bins"].index.equals(pd.Index(labels)) # type: ignore[attr-defined] @pytest.mark.parametrize("indexed_coord", [True, False]) @pytest.mark.parametrize( ["groupby_method", "args"], ( ("groupby_bins", ("x", np.arange(0, 8, 3))), ("groupby", ({"x": BinGrouper(bins=np.arange(0, 8, 3))},)), ), ) def test_groupby_bins_math(groupby_method, args, indexed_coord) -> None: N = 7 da = DataArray(np.random.random((N, N)), dims=("x", "y")) if indexed_coord: da["x"] = np.arange(N) da["y"] = np.arange(N) g = getattr(da, groupby_method)(*args) mean = g.mean() expected = da.isel(x=slice(1, None)) - mean.isel(x_bins=("x", [0, 0, 0, 1, 1, 1])) actual = g - mean assert_identical(expected, actual) def test_groupby_math_nD_group() -> None: N = 40 da = DataArray( np.random.random((N, N)), dims=("x", "y"), coords={ "labels": ( "x", np.repeat(["a", "b", "c", "d", "e", "f", "g", "h"], repeats=N // 8), ), }, ) da["labels2d"] = xr.broadcast(da.labels, da)[0] g = da.groupby("labels2d") mean = g.mean() expected = da - mean.sel(labels2d=da.labels2d) expected["labels"] = expected.labels.broadcast_like(expected.labels2d) actual = g - mean assert_identical(expected, actual) da["num"] = ( "x", np.repeat([1, 2, 3, 4, 5, 6, 7, 8], repeats=N // 8), ) da["num2d"] = xr.broadcast(da.num, da)[0] g = da.groupby_bins("num2d", bins=[0, 4, 6]) mean = g.mean() idxr = np.digitize(da.num2d, bins=(0, 4, 6), right=True)[:30, :] - 1 expanded_mean = mean.drop_vars("num2d_bins").isel(num2d_bins=(("x", "y"), idxr)) expected = da.isel(x=slice(30)) - expanded_mean expected["labels"] = expected.labels.broadcast_like(expected.labels2d) expected["num"] = expected.num.broadcast_like(expected.num2d) # mean.num2d_bins.data is a pandas IntervalArray so needs to be put in `numpy` to allow indexing expected["num2d_bins"] = (("x", "y"), mean.num2d_bins.data.to_numpy()[idxr]) actual = g - mean assert_identical(expected, actual) def test_groupby_dataset_math_virtual() -> None: ds = Dataset({"x": ("t", [1, 2, 3])}, {"t": pd.date_range("20100101", periods=3)}) grouped = ds.groupby("t.day") actual = grouped - grouped.mean(...) expected = Dataset({"x": ("t", [0, 0, 0])}, ds[["t", "t.day"]]) assert_identical(actual, expected) def test_groupby_math_dim_order() -> None: da = DataArray( np.ones((10, 10, 12)), dims=("x", "y", "time"), coords={"time": pd.date_range("2001-01-01", periods=12, freq="6h")}, ) grouped = da.groupby("time.day") result = grouped - grouped.mean() assert result.dims == da.dims def test_groupby_dataset_nan() -> None: # nan should be excluded from groupby ds = Dataset({"foo": ("x", [1, 2, 3, 4])}, {"bar": ("x", [1, 1, 2, np.nan])}) actual = ds.groupby("bar").mean(...) expected = Dataset({"foo": ("bar", [1.5, 3]), "bar": [1, 2]}) assert_identical(actual, expected) def test_groupby_dataset_order() -> None: # groupby should preserve variables order ds = Dataset() for vn in ["a", "b", "c"]: ds[vn] = DataArray(np.arange(10), dims=["t"]) data_vars_ref = list(ds.data_vars.keys()) ds = ds.groupby("t").mean(...) data_vars = list(ds.data_vars.keys()) assert data_vars == data_vars_ref # coords are now at the end of the list, so the test below fails # all_vars = list(ds.variables.keys()) # all_vars_ref = list(ds.variables.keys()) # .assertEqual(all_vars, all_vars_ref) def test_groupby_dataset_fillna() -> None: ds = Dataset({"a": ("x", [np.nan, 1, np.nan, 3])}, {"x": [0, 1, 2, 3]}) expected = Dataset({"a": ("x", range(4))}, {"x": [0, 1, 2, 3]}) for target in [ds, expected]: target.coords["b"] = ("x", [0, 0, 1, 1]) actual = ds.groupby("b").fillna(DataArray([0, 2], dims="b")) assert_identical(expected, actual) actual = ds.groupby("b").fillna(Dataset({"a": ("b", [0, 2])})) assert_identical(expected, actual) # attrs with groupby ds.attrs["attr"] = "ds" ds.a.attrs["attr"] = "da" actual = ds.groupby("b").fillna(Dataset({"a": ("b", [0, 2])})) assert actual.attrs == ds.attrs assert actual.a.name == "a" assert actual.a.attrs == ds.a.attrs def test_groupby_dataset_where() -> None: # groupby ds = Dataset({"a": ("x", range(5))}, {"c": ("x", [0, 0, 1, 1, 1])}) cond = Dataset({"a": ("c", [True, False])}) expected = ds.copy(deep=True) expected["a"].values = np.array([0, 1] + [np.nan] * 3) actual = ds.groupby("c").where(cond) assert_identical(expected, actual) # attrs with groupby ds.attrs["attr"] = "ds" ds.a.attrs["attr"] = "da" actual = ds.groupby("c").where(cond) assert actual.attrs == ds.attrs assert actual.a.name == "a" assert actual.a.attrs == ds.a.attrs def test_groupby_dataset_assign() -> None: ds = Dataset({"a": ("x", range(3))}, {"b": ("x", ["A"] * 2 + ["B"])}) actual = ds.groupby("b").assign(c=lambda ds: 2 * ds.a) expected = ds.merge({"c": ("x", [0, 2, 4])}) assert_identical(actual, expected) actual = ds.groupby("b").assign(c=lambda ds: ds.a.sum()) expected = ds.merge({"c": ("x", [1, 1, 2])}) assert_identical(actual, expected) actual = ds.groupby("b").assign_coords(c=lambda ds: ds.a.sum()) expected = expected.set_coords("c") assert_identical(actual, expected) def test_groupby_dataset_map_dataarray_func() -> None: # regression GH6379 ds = Dataset({"foo": ("x", [1, 2, 3, 4])}, coords={"x": [0, 0, 1, 1]}) actual = ds.groupby("x").map(lambda grp: grp.foo.mean()) expected = DataArray([1.5, 3.5], coords={"x": [0, 1]}, dims="x", name="foo") assert_identical(actual, expected) def test_groupby_dataarray_map_dataset_func() -> None: # regression GH6379 da = DataArray([1, 2, 3, 4], coords={"x": [0, 0, 1, 1]}, dims="x", name="foo") actual = da.groupby("x").map(lambda grp: grp.mean().to_dataset()) expected = xr.Dataset({"foo": ("x", [1.5, 3.5])}, coords={"x": [0, 1]}) assert_identical(actual, expected) @requires_flox @pytest.mark.parametrize("kwargs", [{"method": "map-reduce"}, {"engine": "numpy"}]) def test_groupby_flox_kwargs(kwargs) -> None: ds = Dataset({"a": ("x", range(5))}, {"c": ("x", [0, 0, 1, 1, 1])}) with xr.set_options(use_flox=False): expected = ds.groupby("c").mean() with xr.set_options(use_flox=True): actual = ds.groupby("c").mean(**kwargs) assert_identical(expected, actual) class TestDataArrayGroupBy: @pytest.fixture(autouse=True) def setup(self) -> None: self.attrs = {"attr1": "value1", "attr2": 2929} self.x = np.random.random((10, 20)) self.v = Variable(["x", "y"], self.x) self.va = Variable(["x", "y"], self.x, self.attrs) self.ds = Dataset({"foo": self.v}) self.dv = self.ds["foo"] self.mindex = pd.MultiIndex.from_product( [["a", "b"], [1, 2]], names=("level_1", "level_2") ) self.mda = DataArray([0, 1, 2, 3], coords={"x": self.mindex}, dims="x") self.da = self.dv.copy() self.da.coords["abc"] = ("y", np.array(["a"] * 9 + ["c"] + ["b"] * 10)) self.da.coords["y"] = 20 + 100 * self.da["y"] def test_stack_groupby_unsorted_coord(self) -> None: data = [[0, 1], [2, 3]] data_flat = [0, 1, 2, 3] dims = ["x", "y"] y_vals = [2, 3] arr = xr.DataArray(data, dims=dims, coords={"y": y_vals}) actual1 = arr.stack(z=dims).groupby("z").first() midx1 = pd.MultiIndex.from_product([[0, 1], [2, 3]], names=dims) expected1 = xr.DataArray(data_flat, dims=["z"], coords={"z": midx1}) assert_equal(actual1, expected1) # GH: 3287. Note that y coord values are not in sorted order. arr = xr.DataArray(data, dims=dims, coords={"y": y_vals[::-1]}) actual2 = arr.stack(z=dims).groupby("z").first() midx2 = pd.MultiIndex.from_product([[0, 1], [3, 2]], names=dims) expected2 = xr.DataArray(data_flat, dims=["z"], coords={"z": midx2}) assert_equal(actual2, expected2) def test_groupby_iter(self) -> None: for (act_x, act_dv), (exp_x, exp_ds) in zip( self.dv.groupby("y"), self.ds.groupby("y"), strict=True ): assert exp_x == act_x assert_identical(exp_ds["foo"], act_dv) for (_, exp_dv), (_, act_dv) in zip( self.dv.groupby("x"), self.dv.groupby("x"), strict=True ): assert_identical(exp_dv, act_dv) def test_groupby_properties(self) -> None: grouped = self.da.groupby("abc") expected_groups = {"a": range(9), "c": [9], "b": range(10, 20)} assert expected_groups.keys() == grouped.groups.keys() for key, expected_group in expected_groups.items(): actual_group = grouped.groups[key] # TODO: array_api doesn't allow slice: assert not isinstance(expected_group, slice) assert not isinstance(actual_group, slice) np.testing.assert_array_equal(expected_group, actual_group) assert 3 == len(grouped) @pytest.mark.parametrize( "by, use_da", [("x", False), ("y", False), ("y", True), ("abc", False)] ) @pytest.mark.parametrize("shortcut", [True, False]) def test_groupby_map_identity(self, by, use_da, shortcut) -> None: expected = self.da if use_da: by = expected.coords[by] def identity(x): return x grouped = expected.groupby(by) actual = grouped.map(identity, shortcut=shortcut) assert_identical(expected, actual) def test_groupby_sum(self) -> None: array = self.da grouped = array.groupby("abc") expected_sum_all = Dataset( { "foo": Variable( ["abc"], np.array( [ self.x[:, :9].sum(), self.x[:, 10:].sum(), self.x[:, 9:10].sum(), ] ).T, ), "abc": Variable(["abc"], np.array(["a", "b", "c"])), } )["foo"] assert_allclose(expected_sum_all, grouped.reduce(np.sum, dim=...)) assert_allclose(expected_sum_all, grouped.sum(...)) expected = DataArray( [ array["y"].values[idx].sum() for idx in [slice(9), slice(10, None), slice(9, 10)] ], [["a", "b", "c"]], ["abc"], ) actual = array["y"].groupby("abc").map(np.sum) assert_allclose(expected, actual) actual = array["y"].groupby("abc").sum(...) assert_allclose(expected, actual) expected_sum_axis1 = Dataset( { "foo": ( ["x", "abc"], np.array( [ self.x[:, :9].sum(1), self.x[:, 10:].sum(1), self.x[:, 9:10].sum(1), ] ).T, ), "abc": Variable(["abc"], np.array(["a", "b", "c"])), } )["foo"] assert_allclose(expected_sum_axis1, grouped.reduce(np.sum, "y")) assert_allclose(expected_sum_axis1, grouped.sum("y")) @pytest.mark.parametrize("use_flox", [True, False]) @pytest.mark.parametrize("shuffle", [True, False]) @pytest.mark.parametrize( "chunk", [ pytest.param( True, marks=pytest.mark.skipif(not has_dask, reason="no dask") ), False, ], ) @pytest.mark.parametrize("method", ["sum", "mean", "median"]) def test_groupby_reductions( self, use_flox: bool, method: str, shuffle: bool, chunk: bool ) -> None: if shuffle and chunk and not has_dask_ge_2024_08_1: pytest.skip() array = self.da if chunk: array.data = array.chunk({"y": 5}).data reduction = getattr(np, method) expected = Dataset( { "foo": Variable( ["x", "abc"], np.array( [ reduction(self.x[:, :9], axis=-1), reduction(self.x[:, 10:], axis=-1), reduction(self.x[:, 9:10], axis=-1), ] ).T, ), "abc": Variable(["abc"], np.array(["a", "b", "c"])), } )["foo"] with raise_if_dask_computes(): grouped = array.groupby("abc") if shuffle: grouped = grouped.shuffle_to_chunks().groupby("abc") with xr.set_options(use_flox=use_flox): actual = getattr(grouped, method)(dim="y") assert_allclose(expected, actual) def test_groupby_count(self) -> None: array = DataArray( [0, 0, np.nan, np.nan, 0, 0], coords={"cat": ("x", ["a", "b", "b", "c", "c", "c"])}, dims="x", ) actual = array.groupby("cat").count() expected = DataArray([1, 1, 2], coords=[("cat", ["a", "b", "c"])]) assert_identical(actual, expected) @pytest.mark.parametrize("shortcut", [True, False]) @pytest.mark.parametrize("keep_attrs", [None, True, False]) def test_groupby_reduce_keep_attrs( self, shortcut: bool, keep_attrs: bool | None ) -> None: array = self.da array.attrs["foo"] = "bar" actual = array.groupby("abc").reduce( np.mean, keep_attrs=keep_attrs, shortcut=shortcut ) with xr.set_options(use_flox=False): expected = array.groupby("abc").mean(keep_attrs=keep_attrs) assert_identical(expected, actual) @pytest.mark.parametrize("keep_attrs", [None, True, False]) def test_groupby_keep_attrs(self, keep_attrs: bool | None) -> None: array = self.da array.attrs["foo"] = "bar" with xr.set_options(use_flox=False): expected = array.groupby("abc").mean(keep_attrs=keep_attrs) with xr.set_options(use_flox=True): actual = array.groupby("abc").mean(keep_attrs=keep_attrs) # values are tested elsewhere, here we just check data # TODO: add check_attrs kwarg to assert_allclose actual.data = expected.data assert_identical(expected, actual) def test_groupby_map_center(self) -> None: def center(x): return x - np.mean(x) array = self.da grouped = array.groupby("abc") expected_ds = array.to_dataset() exp_data = np.hstack( [center(self.x[:, :9]), center(self.x[:, 9:10]), center(self.x[:, 10:])] ) expected_ds["foo"] = (["x", "y"], exp_data) expected_centered = expected_ds["foo"] assert_allclose(expected_centered, grouped.map(center)) def test_groupby_map_ndarray(self) -> None: # regression test for #326 array = self.da grouped = array.groupby("abc") actual = grouped.map(np.asarray) # type: ignore[arg-type] # TODO: Not sure using np.asarray like this makes sense with array api assert_equal(array, actual) def test_groupby_map_changes_metadata(self) -> None: def change_metadata(x): x.coords["x"] = x.coords["x"] * 2 x.attrs["fruit"] = "lemon" return x array = self.da grouped = array.groupby("abc") actual = grouped.map(change_metadata) expected = array.copy() expected = change_metadata(expected) assert_equal(expected, actual) def test_groupby_math_squeeze(self) -> None: array = self.da grouped = array.groupby("x") expected = array + array.coords["x"] actual = grouped + array.coords["x"] assert_identical(expected, actual) actual = array.coords["x"] + grouped assert_identical(expected, actual) ds = array.coords["x"].to_dataset(name="X") expected = array + ds actual = grouped + ds assert_identical(expected, actual) actual = ds + grouped assert_identical(expected, actual) def test_groupby_math(self) -> None: array = self.da grouped = array.groupby("abc") expected_agg = (grouped.mean(...) - np.arange(3)).rename(None) actual = grouped - DataArray(range(3), [("abc", ["a", "b", "c"])]) actual_agg = actual.groupby("abc").mean(...) assert_allclose(expected_agg, actual_agg) with pytest.raises(TypeError, match=r"only support binary ops"): grouped + 1 # type: ignore[type-var] with pytest.raises(TypeError, match=r"only support binary ops"): grouped + grouped # type: ignore[type-var] with pytest.raises(TypeError, match=r"in-place operations"): array += grouped # type: ignore[arg-type] def test_groupby_math_not_aligned(self) -> None: array = DataArray( range(4), {"b": ("x", [0, 0, 1, 1]), "x": [0, 1, 2, 3]}, dims="x" ) other = DataArray([10], coords={"b": [0]}, dims="b") actual = array.groupby("b") + other expected = DataArray([10, 11, np.nan, np.nan], array.coords) assert_identical(expected, actual) # regression test for #7797 other = array.groupby("b").sum() actual = array.sel(x=[0, 1]).groupby("b") - other expected = DataArray([-1, 0], {"b": ("x", [0, 0]), "x": [0, 1]}, dims="x") assert_identical(expected, actual) other = DataArray([10], coords={"c": 123, "b": [0]}, dims="b") actual = array.groupby("b") + other expected = DataArray([10, 11, np.nan, np.nan], array.coords) expected.coords["c"] = (["x"], [123] * 2 + [np.nan] * 2) assert_identical(expected, actual) other_ds = Dataset({"a": ("b", [10])}, {"b": [0]}) actual_ds = array.groupby("b") + other_ds expected_ds = Dataset({"a": ("x", [10, 11, np.nan, np.nan])}, array.coords) assert_identical(expected_ds, actual_ds) def test_groupby_restore_dim_order(self) -> None: array = DataArray( np.random.randn(5, 3), coords={"a": ("x", range(5)), "b": ("y", range(3))}, dims=["x", "y"], ) for by, expected_dims in [ ("x", ("x", "y")), ("y", ("x", "y")), ("a", ("a", "y")), ("b", ("x", "b")), ]: result = array.groupby(by).map(lambda x: x.squeeze()) assert result.dims == expected_dims def test_groupby_restore_coord_dims(self) -> None: array = DataArray( np.random.randn(5, 3), coords={ "a": ("x", range(5)), "b": ("y", range(3)), "c": (("x", "y"), np.random.randn(5, 3)), }, dims=["x", "y"], ) for by, expected_dims in [ ("x", ("x", "y")), ("y", ("x", "y")), ("a", ("a", "y")), ("b", ("x", "b")), ]: result = array.groupby(by, restore_coord_dims=True).map( lambda x: x.squeeze() )["c"] assert result.dims == expected_dims def test_groupby_first_and_last(self) -> None: array = DataArray([1, 2, 3, 4, 5], dims="x") by = DataArray(["a"] * 2 + ["b"] * 3, dims="x", name="ab") expected = DataArray([1, 3], [("ab", ["a", "b"])]) actual = array.groupby(by).first() assert_identical(expected, actual) expected = DataArray([2, 5], [("ab", ["a", "b"])]) actual = array.groupby(by).last() assert_identical(expected, actual) array = DataArray(np.random.randn(5, 3), dims=["x", "y"]) expected = DataArray(array[[0, 2]], {"ab": ["a", "b"]}, ["ab", "y"]) actual = array.groupby(by).first() assert_identical(expected, actual) actual = array.groupby("x").first() expected = array # should be a no-op assert_identical(expected, actual) # TODO: groupby_bins too def make_groupby_multidim_example_array(self) -> DataArray: return DataArray( [[[0, 1], [2, 3]], [[5, 10], [15, 20]]], coords={ "lon": (["ny", "nx"], [[30, 40], [40, 50]]), "lat": (["ny", "nx"], [[10, 10], [20, 20]]), }, dims=["time", "ny", "nx"], ) def test_groupby_multidim(self) -> None: array = self.make_groupby_multidim_example_array() for dim, expected_sum in [ ("lon", DataArray([5, 28, 23], coords=[("lon", [30.0, 40.0, 50.0])])), ("lat", DataArray([16, 40], coords=[("lat", [10.0, 20.0])])), ]: actual_sum = array.groupby(dim).sum(...) assert_identical(expected_sum, actual_sum) if has_flox: # GH9803 # reduce over one dim of a nD grouper array.coords["labels"] = (("ny", "nx"), np.array([["a", "b"], ["b", "a"]])) actual = array.groupby("labels").sum("nx") expected_np = np.array([[[0, 1], [3, 2]], [[5, 10], [20, 15]]]) expected = xr.DataArray( expected_np, dims=("time", "ny", "labels"), coords={"labels": ["a", "b"]}, ) assert_identical(expected, actual) def test_groupby_multidim_map(self) -> None: array = self.make_groupby_multidim_example_array() actual = array.groupby("lon").map(lambda x: x - x.mean()) expected = DataArray( [[[-2.5, -6.0], [-5.0, -8.5]], [[2.5, 3.0], [8.0, 8.5]]], coords=array.coords, dims=array.dims, ) assert_identical(expected, actual) @pytest.mark.parametrize("use_flox", [True, False]) @pytest.mark.parametrize("coords", [np.arange(4), np.arange(4)[::-1], [2, 0, 3, 1]]) @pytest.mark.parametrize( "cut_kwargs", ( {"labels": None, "include_lowest": True}, {"labels": None, "include_lowest": False}, {"labels": ["a", "b"]}, {"labels": [1.2, 3.5]}, {"labels": ["b", "a"]}, ), ) def test_groupby_bins( self, coords: np.typing.ArrayLike, use_flox: bool, cut_kwargs: dict, ) -> None: array = DataArray( np.arange(4), dims="dim_0", coords={"dim_0": coords}, name="a" ) # the first value should not be part of any group ("right" binning) array[0] = 99 # bins follow conventions for pandas.cut # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.cut.html bins = [0, 1.5, 5] df = array.to_dataframe() df["dim_0_bins"] = pd.cut(array["dim_0"], bins, **cut_kwargs) # type: ignore[call-overload] expected_df = df.groupby("dim_0_bins", observed=True).sum() expected = expected_df.to_xarray().assign_coords( dim_0_bins=cast(pd.CategoricalIndex, expected_df.index).categories )["a"] with xr.set_options(use_flox=use_flox): gb = array.groupby_bins("dim_0", bins=bins, **cut_kwargs) shuffled = gb.shuffle_to_chunks().groupby_bins( "dim_0", bins=bins, **cut_kwargs ) actual = gb.sum() assert_identical(expected, actual) assert_identical(expected, shuffled.sum()) actual = gb.map(lambda x: x.sum()) assert_identical(expected, actual) assert_identical(expected, shuffled.map(lambda x: x.sum())) # make sure original array dims are unchanged assert len(array.dim_0) == 4 def test_groupby_bins_ellipsis(self) -> None: da = xr.DataArray(np.ones((2, 3, 4))) bins = [-1, 0, 1, 2] with xr.set_options(use_flox=False): actual = da.groupby_bins("dim_0", bins).mean(...) with xr.set_options(use_flox=True): expected = da.groupby_bins("dim_0", bins).mean(...) assert_allclose(actual, expected) @pytest.mark.parametrize("use_flox", [True, False]) def test_groupby_bins_gives_correct_subset(self, use_flox: bool) -> None: # GH7766 rng = np.random.default_rng(42) coords = rng.normal(5, 5, 1000) bins = np.logspace(-4, 1, 10) labels = [ "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", ] # xArray # Make a mock dataarray darr = xr.DataArray(coords, coords=[coords], dims=["coords"]) expected = xr.DataArray( [np.nan, np.nan, 1, 1, 1, 8, 31, 104, 542], dims="coords_bins", coords={"coords_bins": labels}, ) gb = darr.groupby_bins("coords", bins, labels=labels) with xr.set_options(use_flox=use_flox): actual = gb.count() assert_identical(actual, expected) def test_groupby_bins_empty(self) -> None: array = DataArray(np.arange(4), [("x", range(4))]) # one of these bins will be empty bins = [0, 4, 5] bin_coords = pd.cut(array["x"], bins).categories # type: ignore[call-overload] actual = array.groupby_bins("x", bins).sum() expected = DataArray([6, np.nan], dims="x_bins", coords={"x_bins": bin_coords}) assert_identical(expected, actual) # make sure original array is unchanged # (was a problem in earlier versions) assert len(array.x) == 4 def test_groupby_bins_multidim(self) -> None: array = self.make_groupby_multidim_example_array() bins = [0, 15, 20] bin_coords = pd.cut(array["lat"].values.flat, bins).categories # type: ignore[call-overload] expected = DataArray([16, 40], dims="lat_bins", coords={"lat_bins": bin_coords}) actual = array.groupby_bins("lat", bins).map(lambda x: x.sum()) assert_identical(expected, actual) # modify the array coordinates to be non-monotonic after unstacking array["lat"].data = np.array([[10.0, 20.0], [20.0, 10.0]]) expected = DataArray([28, 28], dims="lat_bins", coords={"lat_bins": bin_coords}) actual = array.groupby_bins("lat", bins).map(lambda x: x.sum()) assert_identical(expected, actual) bins = [-2, -1, 0, 1, 2] field = DataArray(np.ones((5, 3)), dims=("x", "y")) by = DataArray( np.array([[-1.5, -1.5, 0.5, 1.5, 1.5] * 3]).reshape(5, 3), dims=("x", "y") ) actual = field.groupby_bins(by, bins=bins).count() bincoord = np.array( [ pd.Interval(left, right, closed="right") for left, right in pairwise(bins) ], dtype=object, ) expected = DataArray( np.array([6, np.nan, 3, 6]), dims="group_bins", coords={"group_bins": bincoord}, ) assert_identical(actual, expected) def test_groupby_bins_sort(self) -> None: data = xr.DataArray( np.arange(100), dims="x", coords={"x": np.linspace(-100, 100, num=100)} ) binned_mean = data.groupby_bins("x", bins=11).mean() assert binned_mean.to_index().is_monotonic_increasing with xr.set_options(use_flox=True): actual = data.groupby_bins("x", bins=11).count() with xr.set_options(use_flox=False): expected = data.groupby_bins("x", bins=11).count() assert_identical(actual, expected) def test_groupby_assign_coords(self) -> None: array = DataArray([1, 2, 3, 4], {"c": ("x", [0, 0, 1, 1])}, dims="x") actual = array.groupby("c").assign_coords(d=lambda a: a.mean()) expected = array.copy() expected.coords["d"] = ("x", [1.5, 1.5, 3.5, 3.5]) assert_identical(actual, expected) def test_groupby_fillna(self) -> None: a = DataArray([np.nan, 1, np.nan, 3], coords={"x": range(4)}, dims="x") fill_value = DataArray([0, 1], dims="y") actual = a.fillna(fill_value) expected = DataArray( [[0, 1], [1, 1], [0, 1], [3, 3]], coords={"x": range(4)}, dims=("x", "y") ) assert_identical(expected, actual) b = DataArray(range(4), coords={"x": range(4)}, dims="x") expected = b.copy() for target in [a, expected]: target.coords["b"] = ("x", [0, 0, 1, 1]) actual = a.groupby("b").fillna(DataArray([0, 2], dims="b")) assert_identical(expected, actual) @pytest.mark.parametrize("use_flox", [True, False]) def test_groupby_fastpath_for_monotonic(self, use_flox: bool) -> None: # Fixes https://github.com/pydata/xarray/issues/6220 # Fixes https://github.com/pydata/xarray/issues/9279 index = [1, 2, 3, 4, 7, 9, 10] array = DataArray(np.arange(len(index)), [("idx", index)]) array_rev = array.copy().assign_coords({"idx": index[::-1]}) fwd = array.groupby("idx", squeeze=False) rev = array_rev.groupby("idx", squeeze=False) for gb in [fwd, rev]: assert all(isinstance(elem, slice) for elem in gb.encoded.group_indices) with xr.set_options(use_flox=use_flox): assert_identical(fwd.sum(), array) assert_identical(rev.sum(), array_rev) class TestDataArrayResample: @pytest.mark.parametrize("shuffle", [True, False]) @pytest.mark.parametrize( "resample_freq", [ "24h", "123456s", "1234567890us", pd.Timedelta(hours=2), pd.offsets.MonthBegin(), pd.offsets.Second(123456), datetime.timedelta(days=1, hours=6), ], ) def test_resample( self, use_cftime: bool, shuffle: bool, resample_freq: ResampleCompatible ) -> None: if use_cftime and not has_cftime: pytest.skip() times = xr.date_range( "2000-01-01", freq="6h", periods=10, use_cftime=use_cftime ) def resample_as_pandas(array, *args, **kwargs): array_ = array.copy(deep=True) if use_cftime: array_["time"] = times.to_datetimeindex(time_unit="ns") result = DataArray.from_series( array_.to_series().resample(*args, **kwargs).mean() ) if use_cftime: result = result.convert_calendar( calendar="standard", use_cftime=use_cftime ) return result array = DataArray(np.arange(10), [("time", times)]) rs = array.resample(time=resample_freq) shuffled = rs.shuffle_to_chunks().resample(time=resample_freq) actual = rs.mean() expected = resample_as_pandas(array, resample_freq) assert_identical(expected, actual) assert_identical(expected, shuffled.mean()) assert_identical(expected, rs.reduce(np.mean)) assert_identical(expected, shuffled.reduce(np.mean)) rs = array.resample(time="24h", closed="right") actual = rs.mean() shuffled = rs.shuffle_to_chunks().resample(time="24h", closed="right") expected = resample_as_pandas(array, "24h", closed="right") assert_identical(expected, actual) assert_identical(expected, shuffled.mean()) with pytest.raises(ValueError, match=r"Index must be monotonic"): array[[2, 0, 1]].resample(time=resample_freq) reverse = array.isel(time=slice(-1, None, -1)) with pytest.raises(ValueError): reverse.resample(time=resample_freq).mean() def test_resample_doctest(self, use_cftime: bool) -> None: # run the doctest example here so we are not surprised da = xr.DataArray( np.array([1, 2, 3, 1, 2, np.nan]), dims="time", coords=dict( time=( "time", xr.date_range( "2001-01-01", freq="ME", periods=6, use_cftime=use_cftime ), ), labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ), ) actual = da.resample(time="3ME").count() expected = DataArray( [1, 3, 1], dims="time", coords={ "time": xr.date_range( "2001-01-01", freq="3ME", periods=3, use_cftime=use_cftime ) }, ) assert_identical(actual, expected) def test_da_resample_func_args(self) -> None: def func(arg1, arg2, arg3=0.0): return arg1.mean("time") + arg2 + arg3 times = pd.date_range("2000", periods=3, freq="D") da = xr.DataArray([1.0, 1.0, 1.0], coords=[times], dims=["time"]) expected = xr.DataArray([3.0, 3.0, 3.0], coords=[times], dims=["time"]) actual = da.resample(time="D").map(func, args=(1.0,), arg3=1.0) assert_identical(actual, expected) def test_resample_first_last(self, use_cftime) -> None: times = xr.date_range( "2000-01-01", freq="6h", periods=10, use_cftime=use_cftime ) array = DataArray(np.arange(10), [("time", times)]) # resample to same frequency actual = array.resample(time="6h").first() assert_identical(array, actual) actual = array.resample(time="1D").first() expected = DataArray([0, 4, 8], [("time", times[::4])]) assert_identical(expected, actual) # verify that labels don't use the first value actual = array.resample(time="24h").first() expected = array.isel(time=[0, 4, 8]) assert_identical(expected, actual) # missing values array = array.astype(float) array[:2] = np.nan actual = array.resample(time="1D").first() expected = DataArray([2, 4, 8], [("time", times[::4])]) assert_identical(expected, actual) actual = array.resample(time="1D").first(skipna=False) expected = DataArray([np.nan, 4, 8], [("time", times[::4])]) assert_identical(expected, actual) # regression test for https://stackoverflow.com/questions/33158558/ array = Dataset({"time": times})["time"] actual = array.resample(time="1D").last() expected = array.isel(time=[3, 7, 9]).assign_coords(time=times[::4]) assert_identical(expected, actual) # missing periods, GH10169 actual = array.isel(time=[0, 1, 2, 3, 8, 9]).resample(time="1D").last() expected = DataArray( np.array([times[3], np.datetime64("NaT"), times[9]]), dims="time", coords={"time": times[::4]}, name="time", ) assert_identical(expected, actual) def test_resample_bad_resample_dim(self) -> None: times = pd.date_range("2000-01-01", freq="6h", periods=10) array = DataArray(np.arange(10), [("__resample_dim__", times)]) with pytest.raises(ValueError, match=r"Proxy resampling dimension"): array.resample(__resample_dim__="1D").first() @requires_scipy def test_resample_drop_nondim_coords(self) -> None: xs = np.arange(6) ys = np.arange(3) times = pd.date_range("2000-01-01", freq="6h", periods=5) data = np.tile(np.arange(5), (6, 3, 1)) xx, yy = np.meshgrid(xs * 5, ys * 2.5) tt = np.arange(len(times), dtype=int) array = DataArray(data, {"time": times, "x": xs, "y": ys}, ("x", "y", "time")) xcoord = DataArray(xx.T, {"x": xs, "y": ys}, ("x", "y")) ycoord = DataArray(yy.T, {"x": xs, "y": ys}, ("x", "y")) tcoord = DataArray(tt, {"time": times}, ("time",)) ds = Dataset({"data": array, "xc": xcoord, "yc": ycoord, "tc": tcoord}) ds = ds.set_coords(["xc", "yc", "tc"]) # Select the data now, with the auxiliary coordinates in place array = ds["data"] # Re-sample actual = array.resample(time="12h", restore_coord_dims=True).mean("time") assert "tc" not in actual.coords # Up-sample - filling actual = array.resample(time="1h", restore_coord_dims=True).ffill() assert "tc" not in actual.coords # Up-sample - interpolation actual = array.resample(time="1h", restore_coord_dims=True).interpolate( "linear" ) assert "tc" not in actual.coords def test_resample_keep_attrs(self) -> None: times = pd.date_range("2000-01-01", freq="6h", periods=10) array = DataArray(np.ones(10), [("time", times)]) array.attrs["meta"] = "data" result = array.resample(time="1D").mean(keep_attrs=True) expected = DataArray([1, 1, 1], [("time", times[::4])], attrs=array.attrs) assert_identical(result, expected) def test_resample_skipna(self) -> None: times = pd.date_range("2000-01-01", freq="6h", periods=10) array = DataArray(np.ones(10), [("time", times)]) array[1] = np.nan result = array.resample(time="1D").mean(skipna=False) expected = DataArray([np.nan, 1, 1], [("time", times[::4])]) assert_identical(result, expected) def test_upsample(self) -> None: times = pd.date_range("2000-01-01", freq="6h", periods=5) array = DataArray(np.arange(5), [("time", times)]) # Forward-fill actual = array.resample(time="3h").ffill() expected = DataArray(array.to_series().resample("3h").ffill()) assert_identical(expected, actual) # Backward-fill actual = array.resample(time="3h").bfill() expected = DataArray(array.to_series().resample("3h").bfill()) assert_identical(expected, actual) # As frequency actual = array.resample(time="3h").asfreq() expected = DataArray(array.to_series().resample("3h").asfreq()) assert_identical(expected, actual) # Pad actual = array.resample(time="3h").pad() expected = DataArray(array.to_series().resample("3h").ffill()) assert_identical(expected, actual) # Nearest rs = array.resample(time="3h") actual = rs.nearest() new_times = rs.groupers[0].full_index expected = DataArray(array.reindex(time=new_times, method="nearest")) assert_identical(expected, actual) def test_upsample_nd(self) -> None: # Same as before, but now we try on multi-dimensional DataArrays. xs = np.arange(6) ys = np.arange(3) times = pd.date_range("2000-01-01", freq="6h", periods=5) data = np.tile(np.arange(5), (6, 3, 1)) array = DataArray(data, {"time": times, "x": xs, "y": ys}, ("x", "y", "time")) # Forward-fill actual = array.resample(time="3h").ffill() expected_data = np.repeat(data, 2, axis=-1) expected_times = times.to_series().resample("3h").asfreq().index expected_data = expected_data[..., : len(expected_times)] expected = DataArray( expected_data, {"time": expected_times, "x": xs, "y": ys}, ("x", "y", "time"), ) assert_identical(expected, actual) # Backward-fill actual = array.resample(time="3h").ffill() expected_data = np.repeat(np.flipud(data.T).T, 2, axis=-1) expected_data = np.flipud(expected_data.T).T expected_times = times.to_series().resample("3h").asfreq().index expected_data = expected_data[..., : len(expected_times)] expected = DataArray( expected_data, {"time": expected_times, "x": xs, "y": ys}, ("x", "y", "time"), ) assert_identical(expected, actual) # As frequency actual = array.resample(time="3h").asfreq() expected_data = np.repeat(data, 2, axis=-1).astype(float)[..., :-1] expected_data[..., 1::2] = np.nan expected_times = times.to_series().resample("3h").asfreq().index expected = DataArray( expected_data, {"time": expected_times, "x": xs, "y": ys}, ("x", "y", "time"), ) assert_identical(expected, actual) # Pad actual = array.resample(time="3h").pad() expected_data = np.repeat(data, 2, axis=-1) expected_data[..., 1::2] = expected_data[..., ::2] expected_data = expected_data[..., :-1] expected_times = times.to_series().resample("3h").asfreq().index expected = DataArray( expected_data, {"time": expected_times, "x": xs, "y": ys}, ("x", "y", "time"), ) assert_identical(expected, actual) def test_upsample_tolerance(self) -> None: # Test tolerance keyword for upsample methods bfill, pad, nearest times = pd.date_range("2000-01-01", freq="1D", periods=2) times_upsampled = pd.date_range("2000-01-01", freq="6h", periods=5) array = DataArray(np.arange(2), [("time", times)]) # Forward fill actual = array.resample(time="6h").ffill(tolerance="12h") expected = DataArray([0.0, 0.0, 0.0, np.nan, 1.0], [("time", times_upsampled)]) assert_identical(expected, actual) # Backward fill actual = array.resample(time="6h").bfill(tolerance="12h") expected = DataArray([0.0, np.nan, 1.0, 1.0, 1.0], [("time", times_upsampled)]) assert_identical(expected, actual) # Nearest actual = array.resample(time="6h").nearest(tolerance="6h") expected = DataArray([0, 0, np.nan, 1, 1], [("time", times_upsampled)]) assert_identical(expected, actual) @requires_scipy def test_upsample_interpolate(self) -> None: from scipy.interpolate import interp1d xs = np.arange(6) ys = np.arange(3) times = pd.date_range("2000-01-01", freq="6h", periods=5) z = np.arange(5) ** 2 data = np.tile(z, (6, 3, 1)) array = DataArray(data, {"time": times, "x": xs, "y": ys}, ("x", "y", "time")) expected_times = times.to_series().resample("1h").asfreq().index # Split the times into equal sub-intervals to simulate the 6 hour # to 1 hour up-sampling new_times_idx = np.linspace(0, len(times) - 1, len(times) * 5) kinds: list[InterpOptions] = [ "linear", "nearest", "zero", "slinear", "quadratic", "cubic", "polynomial", ] for kind in kinds: kwargs = {} if kind == "polynomial": kwargs["order"] = 1 actual = array.resample(time="1h").interpolate(kind, **kwargs) # using interp1d, polynomial order is to set directly in kind using int f = interp1d( np.arange(len(times)), data, kind=kwargs["order"] if kind == "polynomial" else kind, # type: ignore[arg-type,unused-ignore] axis=-1, bounds_error=True, assume_sorted=True, ) expected_data = f(new_times_idx) expected = DataArray( expected_data, {"time": expected_times, "x": xs, "y": ys}, ("x", "y", "time"), ) # Use AllClose because there are some small differences in how # we upsample timeseries versus the integer indexing as I've # done here due to floating point arithmetic assert_allclose(expected, actual, rtol=1e-16) @requires_scipy def test_upsample_interpolate_bug_2197(self) -> None: dates = pd.date_range("2007-02-01", "2007-03-01", freq="D", unit="s") da = xr.DataArray(np.arange(len(dates)), [("time", dates)]) result = da.resample(time="ME").interpolate("linear") expected_times = np.array( [np.datetime64("2007-02-28"), np.datetime64("2007-03-31")] ) expected = xr.DataArray([27.0, np.nan], [("time", expected_times)]) assert_equal(result, expected) @requires_scipy def test_upsample_interpolate_regression_1605(self) -> None: dates = pd.date_range("2016-01-01", "2016-03-31", freq="1D") expected = xr.DataArray( np.random.random((len(dates), 2, 3)), dims=("time", "x", "y"), coords={"time": dates}, ) actual = expected.resample(time="1D").interpolate("linear") assert_allclose(actual, expected, rtol=1e-16) @requires_dask @requires_scipy @pytest.mark.parametrize("chunked_time", [True, False]) def test_upsample_interpolate_dask(self, chunked_time: bool) -> None: from scipy.interpolate import interp1d xs = np.arange(6) ys = np.arange(3) times = pd.date_range("2000-01-01", freq="6h", periods=5) z = np.arange(5) ** 2 data = np.tile(z, (6, 3, 1)) array = DataArray(data, {"time": times, "x": xs, "y": ys}, ("x", "y", "time")) chunks = {"x": 2, "y": 1} if chunked_time: chunks["time"] = 3 expected_times = times.to_series().resample("1h").asfreq().index # Split the times into equal sub-intervals to simulate the 6 hour # to 1 hour up-sampling new_times_idx = np.linspace(0, len(times) - 1, len(times) * 5) kinds: list[InterpOptions] = [ "linear", "nearest", "zero", "slinear", "quadratic", "cubic", "polynomial", ] for kind in kinds: kwargs = {} if kind == "polynomial": kwargs["order"] = 1 actual = array.chunk(chunks).resample(time="1h").interpolate(kind, **kwargs) actual = actual.compute() # using interp1d, polynomial order is to set directly in kind using int f = interp1d( np.arange(len(times)), data, kind=kwargs["order"] if kind == "polynomial" else kind, # type: ignore[arg-type,unused-ignore] axis=-1, bounds_error=True, assume_sorted=True, ) expected_data = f(new_times_idx) expected = DataArray( expected_data, {"time": expected_times, "x": xs, "y": ys}, ("x", "y", "time"), ) # Use AllClose because there are some small differences in how # we upsample timeseries versus the integer indexing as I've # done here due to floating point arithmetic assert_allclose(expected, actual, rtol=1e-16) def test_resample_offset(self) -> None: times = pd.date_range("2000-01-01T02:03:01", freq="6h", periods=10) array = DataArray(np.arange(10), [("time", times)]) offset = pd.Timedelta("11h") actual = array.resample(time="24h", offset=offset).mean() expected = DataArray(array.to_series().resample("24h", offset=offset).mean()) assert_identical(expected, actual) def test_resample_origin(self) -> None: times = pd.date_range("2000-01-01T02:03:01", freq="6h", periods=10) array = DataArray(np.arange(10), [("time", times)]) origin: Literal["start"] = "start" actual = array.resample(time="24h", origin=origin).mean() expected = DataArray(array.to_series().resample("24h", origin=origin).mean()) assert_identical(expected, actual) class TestDatasetResample: @pytest.mark.parametrize( "resample_freq", [ "24h", "123456s", "1234567890us", pd.Timedelta(hours=2), pd.offsets.MonthBegin(), pd.offsets.Second(123456), datetime.timedelta(days=1, hours=6), ], ) def test_resample( self, use_cftime: bool, resample_freq: ResampleCompatible ) -> None: if use_cftime and not has_cftime: pytest.skip() times = xr.date_range( "2000-01-01", freq="6h", periods=10, use_cftime=use_cftime ) def resample_as_pandas(ds, *args, **kwargs): ds_ = ds.copy(deep=True) if use_cftime: ds_["time"] = times.to_datetimeindex(time_unit="ns") result = Dataset.from_dataframe( ds_.to_dataframe().resample(*args, **kwargs).mean() ) if use_cftime: result = result.convert_calendar( calendar="standard", use_cftime=use_cftime ) return result ds = Dataset( { "foo": ("time", np.random.randint(1, 1000, 10)), "bar": ("time", np.random.randint(1, 1000, 10)), "time": times, } ) actual = ds.resample(time=resample_freq).mean() expected = resample_as_pandas(ds, resample_freq) assert_identical(expected, actual) actual = ds.resample(time=resample_freq).reduce(np.mean) assert_identical(expected, actual) actual = ds.resample(time=resample_freq, closed="right").mean() expected = resample_as_pandas(ds, resample_freq, closed="right") assert_identical(expected, actual) with pytest.raises(ValueError, match=r"Index must be monotonic"): ds.isel(time=[2, 0, 1]).resample(time=resample_freq) reverse = ds.isel(time=slice(-1, None, -1)) with pytest.raises(ValueError): reverse.resample(time=resample_freq).mean() def test_resample_and_first(self) -> None: times = pd.date_range("2000-01-01", freq="6h", periods=10) ds = Dataset( { "foo": (["time", "x", "y"], np.random.randn(10, 5, 3)), "bar": ("time", np.random.randn(10), {"meta": "data"}), "time": times, } ) actual = ds.resample(time="1D").first(keep_attrs=True) expected = ds.isel(time=[0, 4, 8]) assert_identical(expected, actual) # upsampling expected_time = pd.date_range("2000-01-01", freq="3h", periods=19) expected = ds.reindex(time=expected_time) rs = ds.resample(time="3h") for how in ["mean", "sum", "first", "last"]: method = getattr(rs, how) result = method() assert_equal(expected, result) for method in [np.mean]: result = rs.reduce(method) assert_equal(expected, result) def test_resample_min_count(self) -> None: times = pd.date_range("2000-01-01", freq="6h", periods=10) ds = Dataset( { "foo": (["time", "x", "y"], np.random.randn(10, 5, 3)), "bar": ("time", np.random.randn(10), {"meta": "data"}), "time": times, } ) # inject nan ds["foo"] = xr.where(ds["foo"] > 2.0, np.nan, ds["foo"]) actual = ds.resample(time="1D").sum(min_count=1) expected = xr.concat( [ ds.isel(time=slice(i * 4, (i + 1) * 4)).sum("time", min_count=1) for i in range(3) ], dim=actual["time"], data_vars="all", ) assert_allclose(expected, actual) def test_resample_by_mean_with_keep_attrs(self) -> None: times = pd.date_range("2000-01-01", freq="6h", periods=10) ds = Dataset( { "foo": (["time", "x", "y"], np.random.randn(10, 5, 3)), "bar": ("time", np.random.randn(10), {"meta": "data"}), "time": times, } ) ds.attrs["dsmeta"] = "dsdata" resampled_ds = ds.resample(time="1D").mean(keep_attrs=True) actual = resampled_ds["bar"].attrs expected = ds["bar"].attrs assert expected == actual actual = resampled_ds.attrs expected = ds.attrs assert expected == actual def test_resample_by_mean_discarding_attrs(self) -> None: times = pd.date_range("2000-01-01", freq="6h", periods=10) ds = Dataset( { "foo": (["time", "x", "y"], np.random.randn(10, 5, 3)), "bar": ("time", np.random.randn(10), {"meta": "data"}), "time": times, } ) ds.attrs["dsmeta"] = "dsdata" resampled_ds = ds.resample(time="1D").mean(keep_attrs=False) assert resampled_ds["bar"].attrs == {} assert resampled_ds.attrs == {} def test_resample_by_last_discarding_attrs(self) -> None: times = pd.date_range("2000-01-01", freq="6h", periods=10) ds = Dataset( { "foo": (["time", "x", "y"], np.random.randn(10, 5, 3)), "bar": ("time", np.random.randn(10), {"meta": "data"}), "time": times, } ) ds.attrs["dsmeta"] = "dsdata" resampled_ds = ds.resample(time="1D").last(keep_attrs=False) assert resampled_ds["bar"].attrs == {} assert resampled_ds.attrs == {} @requires_scipy def test_resample_drop_nondim_coords(self) -> None: xs = np.arange(6) ys = np.arange(3) times = pd.date_range("2000-01-01", freq="6h", periods=5) data = np.tile(np.arange(5), (6, 3, 1)) xx, yy = np.meshgrid(xs * 5, ys * 2.5) tt = np.arange(len(times), dtype=int) array = DataArray(data, {"time": times, "x": xs, "y": ys}, ("x", "y", "time")) xcoord = DataArray(xx.T, {"x": xs, "y": ys}, ("x", "y")) ycoord = DataArray(yy.T, {"x": xs, "y": ys}, ("x", "y")) tcoord = DataArray(tt, {"time": times}, ("time",)) ds = Dataset({"data": array, "xc": xcoord, "yc": ycoord, "tc": tcoord}) ds = ds.set_coords(["xc", "yc", "tc"]) # Re-sample actual = ds.resample(time="12h").mean("time") assert "tc" not in actual.coords # Up-sample - filling actual = ds.resample(time="1h").ffill() assert "tc" not in actual.coords # Up-sample - interpolation actual = ds.resample(time="1h").interpolate("linear") assert "tc" not in actual.coords def test_resample_ds_da_are_the_same(self) -> None: time = pd.date_range("2000-01-01", freq="6h", periods=365 * 4) ds = xr.Dataset( { "foo": (("time", "x"), np.random.randn(365 * 4, 5)), "time": time, "x": np.arange(5), } ) assert_allclose( ds.resample(time="ME").mean()["foo"], ds.foo.resample(time="ME").mean() ) def test_ds_resample_apply_func_args(self) -> None: def func(arg1, arg2, arg3=0.0): return arg1.mean("time") + arg2 + arg3 times = pd.date_range("2000", freq="D", periods=3) ds = xr.Dataset({"foo": ("time", [1.0, 1.0, 1.0]), "time": times}) expected = xr.Dataset({"foo": ("time", [3.0, 3.0, 3.0]), "time": times}) actual = ds.resample(time="D").map(func, args=(1.0,), arg3=1.0) assert_identical(expected, actual) def test_groupby_cumsum() -> None: ds = xr.Dataset( {"foo": (("x",), [7, 3, 1, 1, 1, 1, 1])}, coords={"x": [0, 1, 2, 3, 4, 5, 6], "group_id": ("x", [0, 0, 1, 1, 2, 2, 2])}, ) actual = ds.groupby("group_id").cumsum(dim="x") expected = xr.Dataset( { "foo": (("x",), [7, 10, 1, 2, 1, 2, 3]), }, coords={ "x": [0, 1, 2, 3, 4, 5, 6], "group_id": ds.group_id, }, ) # TODO: Remove drop_vars when GH6528 is fixed # when Dataset.cumsum propagates indexes, and the group variable? assert_identical(expected.drop_vars(["x", "group_id"]), actual) actual = ds.foo.groupby("group_id").cumsum(dim="x") expected.coords["group_id"] = ds.group_id expected.coords["x"] = np.arange(7) assert_identical(expected.foo, actual) def test_groupby_cumprod() -> None: ds = xr.Dataset( {"foo": (("x",), [7, 3, 0, 1, 1, 2, 1])}, coords={"x": [0, 1, 2, 3, 4, 5, 6], "group_id": ("x", [0, 0, 1, 1, 2, 2, 2])}, ) actual = ds.groupby("group_id").cumprod(dim="x") expected = xr.Dataset( { "foo": (("x",), [7, 21, 0, 0, 1, 2, 2]), }, coords={ "x": [0, 1, 2, 3, 4, 5, 6], "group_id": ds.group_id, }, ) # TODO: Remove drop_vars when GH6528 is fixed # when Dataset.cumsum propagates indexes, and the group variable? assert_identical(expected.drop_vars(["x", "group_id"]), actual) actual = ds.foo.groupby("group_id").cumprod(dim="x") expected.coords["group_id"] = ds.group_id expected.coords["x"] = np.arange(7) assert_identical(expected.foo, actual) @pytest.mark.parametrize( "method, expected_array", [ ("cumsum", [1.0, 2.0, 5.0, 6.0, 2.0, 2.0]), ("cumprod", [1.0, 2.0, 6.0, 6.0, 2.0, 2.0]), ], ) def test_resample_cumsum(method: str, expected_array: list[float]) -> None: ds = xr.Dataset( {"foo": ("time", [1, 2, 3, 1, 2, np.nan])}, coords={ "time": xr.date_range("01-01-2001", freq="ME", periods=6, use_cftime=False), }, ) actual = getattr(ds.resample(time="3ME"), method)(dim="time") expected = xr.Dataset( {"foo": (("time",), expected_array)}, coords={ "time": xr.date_range("01-01-2001", freq="ME", periods=6, use_cftime=False), }, ) # TODO: Remove drop_vars when GH6528 is fixed # when Dataset.cumsum propagates indexes, and the group variable? assert_identical(expected.drop_vars(["time"]), actual) actual = getattr(ds.foo.resample(time="3ME"), method)(dim="time") expected.coords["time"] = ds.time assert_identical(expected.drop_vars(["time"]).foo, actual) def test_groupby_binary_op_regression() -> None: # regression test for #7797 # monthly timeseries that should return "zero anomalies" everywhere time = xr.date_range("2023-01-01", "2023-12-31", freq="MS") data = np.linspace(-1, 1, 12) x = xr.DataArray(data, coords={"time": time}) clim = xr.DataArray(data, coords={"month": np.arange(1, 13, 1)}) # seems to give the correct result if we use the full x, but not with a slice x_slice = x.sel(time=["2023-04-01"]) # two typical ways of computing anomalies anom_gb = x_slice.groupby("time.month") - clim assert_identical(xr.zeros_like(anom_gb), anom_gb) def test_groupby_multiindex_level() -> None: # GH6836 midx = pd.MultiIndex.from_product([list("abc"), [0, 1]], names=("one", "two")) mda = xr.DataArray(np.random.rand(6, 3), [("x", midx), ("y", range(3))]) groups = mda.groupby("one").groups assert groups == {"a": [0, 1], "b": [2, 3], "c": [4, 5]} @requires_flox @pytest.mark.parametrize("func", ["sum", "prod"]) @pytest.mark.parametrize("skipna", [True, False]) @pytest.mark.parametrize("min_count", [None, 1]) def test_min_count_vs_flox(func: str, min_count: int | None, skipna: bool) -> None: da = DataArray( data=np.array([np.nan, 1, 1, np.nan, 1, 1]), dims="x", coords={"labels": ("x", np.array([1, 2, 3, 1, 2, 3]))}, ) gb = da.groupby("labels") method = operator.methodcaller(func, min_count=min_count, skipna=skipna) with xr.set_options(use_flox=True): actual = method(gb) with xr.set_options(use_flox=False): expected = method(gb) assert_identical(actual, expected) @pytest.mark.parametrize("use_flox", [True, False]) def test_min_count_error(use_flox: bool) -> None: if use_flox and not has_flox: pytest.skip() da = DataArray( data=np.array([np.nan, 1, 1, np.nan, 1, 1]), dims="x", coords={"labels": ("x", np.array([1, 2, 3, 1, 2, 3]))}, ) with xr.set_options(use_flox=use_flox): with pytest.raises(TypeError): da.groupby("labels").mean(min_count=1) @requires_dask def test_groupby_math_auto_chunk() -> None: da = xr.DataArray( [[1, 2, 3], [1, 2, 3], [1, 2, 3]], dims=("y", "x"), coords={"label": ("x", [2, 2, 1])}, ) sub = xr.DataArray( InaccessibleArray(np.array([1, 2])), dims="label", coords={"label": [1, 2]} ) chunked = da.chunk(x=1, y=2) chunked.label.load() actual = chunked.groupby("label") - sub assert actual.chunksizes == {"x": (1, 1, 1), "y": (2, 1)} @pytest.mark.parametrize("use_flox", [True, False]) def test_groupby_dim_no_dim_equal(use_flox: bool) -> None: # https://github.com/pydata/xarray/issues/8263 da = DataArray( data=[1, 2, 3, 4], dims="lat", coords={"lat": np.linspace(0, 1.01, 4)} ) with xr.set_options(use_flox=use_flox): actual1 = da.drop_vars("lat").groupby("lat").sum() actual2 = da.groupby("lat").sum() assert_identical(actual1, actual2.drop_vars("lat")) @requires_flox def test_default_flox_method() -> None: import flox.xarray da = xr.DataArray([1, 2, 3], dims="x", coords={"label": ("x", [2, 2, 1])}) result = xr.DataArray([3, 3], dims="label", coords={"label": [1, 2]}) with mock.patch("flox.xarray.xarray_reduce", return_value=result) as mocked_reduce: da.groupby("label").sum() kwargs = mocked_reduce.call_args.kwargs if Version(flox.__version__) < Version("0.9.0"): assert kwargs["method"] == "cohorts" else: assert "method" not in kwargs @requires_cftime @pytest.mark.filterwarnings("ignore") def test_cftime_resample_gh_9108() -> None: import cftime ds = Dataset( {"pr": ("time", np.random.random((10,)))}, coords={"time": xr.date_range("0001-01-01", periods=10, freq="D")}, ) actual = ds.resample(time="ME").mean() expected = ds.mean("time").expand_dims( time=[cftime.DatetimeGregorian(1, 1, 31, 0, 0, 0, 0, has_year_zero=False)] ) assert actual.time.data[0].has_year_zero == ds.time.data[0].has_year_zero assert_equal(actual, expected) def test_custom_grouper() -> None: class YearGrouper(Grouper): """ An example re-implementation of ``.groupby("time.year")``. """ def factorize(self, group) -> EncodedGroups: assert np.issubdtype(group.dtype, np.datetime64) year = group.dt.year.data codes_, uniques = pd.factorize(year) codes = group.copy(data=codes_).rename("year") return EncodedGroups(codes=codes, full_index=pd.Index(uniques)) def reset(self): return type(self)() da = xr.DataArray( dims="time", data=np.arange(20), coords={"time": ("time", pd.date_range("2000-01-01", freq="3MS", periods=20))}, name="foo", ) ds = da.to_dataset() expected = ds.groupby("time.year").mean() actual = ds.groupby(time=YearGrouper()).mean() assert_identical(expected, actual) actual = ds.groupby({"time": YearGrouper()}).mean() assert_identical(expected, actual) expected = ds.foo.groupby("time.year").mean() actual = ds.foo.groupby(time=YearGrouper()).mean() assert_identical(expected, actual) actual = ds.foo.groupby({"time": YearGrouper()}).mean() assert_identical(expected, actual) for obj in [ds, ds.foo]: with pytest.raises(ValueError): obj.groupby("time.year", time=YearGrouper()) with pytest.raises(ValueError): obj.groupby() @pytest.mark.parametrize("use_flox", [True, False]) def test_weather_data_resample(use_flox): # from the docs times = pd.date_range("2000-01-01", "2001-12-31", name="time") annual_cycle = np.sin(2 * np.pi * (times.dayofyear.values / 365.25 - 0.28)) base = 10 + 15 * annual_cycle.reshape(-1, 1) tmin_values = base + 3 * np.random.randn(annual_cycle.size, 3) tmax_values = base + 10 + 3 * np.random.randn(annual_cycle.size, 3) ds = xr.Dataset( { "tmin": (("time", "location"), tmin_values), "tmax": (("time", "location"), tmax_values), }, { "time": ("time", times, {"time_key": "time_values"}), "location": ("location", ["IA", "IN", "IL"], {"loc_key": "loc_value"}), }, ) with xr.set_options(use_flox=use_flox): actual = ds.resample(time="1MS").mean() assert "location" in actual._indexes gb = ds.groupby(time=TimeResampler(freq="1MS"), location=UniqueGrouper()) with xr.set_options(use_flox=use_flox): actual = gb.mean() expected = ds.resample(time="1MS").mean().sortby("location") assert_allclose(actual, expected) assert actual.time.attrs == ds.time.attrs assert actual.location.attrs == ds.location.attrs assert expected.time.attrs == ds.time.attrs assert expected.location.attrs == ds.location.attrs @pytest.mark.parametrize("as_dataset", [True, False]) def test_multiple_groupers_string(as_dataset) -> None: obj = DataArray( np.array([1, 2, 3, 0, 2, np.nan]), dims="d", coords=dict( labels1=("d", np.array(["a", "b", "c", "c", "b", "a"])), labels2=("d", np.array(["x", "y", "z", "z", "y", "x"])), ), name="foo", ) if as_dataset: obj = obj.to_dataset() # type: ignore[assignment] expected = obj.groupby(labels1=UniqueGrouper(), labels2=UniqueGrouper()).mean() actual = obj.groupby(("labels1", "labels2")).mean() assert_identical(expected, actual) # Passes `"labels2"` to squeeze; will raise an error around kwargs rather than the # warning & type error in the future with pytest.warns(FutureWarning): with pytest.raises(TypeError): obj.groupby("labels1", "labels2") # type: ignore[arg-type, misc] with pytest.raises(ValueError): obj.groupby("labels1", foo="bar") # type: ignore[arg-type] with pytest.raises(ValueError): obj.groupby("labels1", foo=UniqueGrouper()) @pytest.mark.parametrize("shuffle", [True, False]) @pytest.mark.parametrize("use_flox", [True, False]) def test_multiple_groupers(use_flox: bool, shuffle: bool) -> None: da = DataArray( np.array([1, 2, 3, 0, 2, np.nan]), dims="d", coords=dict( labels1=("d", np.array(["a", "b", "c", "c", "b", "a"])), labels2=("d", np.array(["x", "y", "z", "z", "y", "x"])), ), name="foo", ) groupers: dict[str, Grouper] groupers = dict(labels1=UniqueGrouper(), labels2=UniqueGrouper()) gb = da.groupby(groupers) if shuffle: gb = gb.shuffle_to_chunks().groupby(groupers) repr(gb) expected = DataArray( np.array([[1.0, np.nan, np.nan], [np.nan, 2.0, np.nan], [np.nan, np.nan, 1.5]]), dims=("labels1", "labels2"), coords={ "labels1": np.array(["a", "b", "c"], dtype=object), "labels2": np.array(["x", "y", "z"], dtype=object), }, name="foo", ) with xr.set_options(use_flox=use_flox): actual = gb.mean() assert_identical(actual, expected) # ------- coords = {"a": ("x", [0, 0, 1, 1]), "b": ("y", [0, 0, 1, 1])} square = DataArray(np.arange(16).reshape(4, 4), coords=coords, dims=["x", "y"]) groupers = dict(a=UniqueGrouper(), b=UniqueGrouper()) gb = square.groupby(groupers) if shuffle: gb = gb.shuffle_to_chunks().groupby(groupers) repr(gb) with xr.set_options(use_flox=use_flox): actual = gb.mean() expected = DataArray( np.array([[2.5, 4.5], [10.5, 12.5]]), dims=("a", "b"), coords={"a": [0, 1], "b": [0, 1]}, ) assert_identical(actual, expected) expected = square.astype(np.float64) expected["a"], expected["b"] = broadcast(square.a, square.b) with xr.set_options(use_flox=use_flox): assert_identical( square.groupby(x=UniqueGrouper(), y=UniqueGrouper()).mean(), expected ) b = xr.DataArray( np.random.default_rng(0).random((2, 3, 4)), coords={"xy": (("x", "y"), [["a", "b", "c"], ["b", "c", "c"]], {"foo": "bar"})}, dims=["x", "y", "z"], ) groupers = dict(x=UniqueGrouper(), y=UniqueGrouper()) gb = b.groupby(groupers) if shuffle: gb = gb.shuffle_to_chunks().groupby(groupers) repr(gb) with xr.set_options(use_flox=use_flox): assert_identical(gb.mean("z"), b.mean("z")) groupers = dict(x=UniqueGrouper(), xy=UniqueGrouper()) gb = b.groupby(groupers) if shuffle: gb = gb.shuffle_to_chunks().groupby(groupers) repr(gb) with xr.set_options(use_flox=use_flox): actual = gb.mean() expected = b.drop_vars("xy").rename({"y": "xy"}).copy(deep=True) newval = b.isel(x=1, y=slice(1, None)).mean("y").data expected.loc[dict(x=1, xy=1)] = expected.sel(x=1, xy=0).data expected.loc[dict(x=1, xy=0)] = np.nan expected.loc[dict(x=1, xy=2)] = newval expected["xy"] = ("xy", ["a", "b", "c"], {"foo": "bar"}) # TODO: is order of dims correct? assert_identical(actual, expected.transpose("z", "x", "xy")) if has_dask: b["xy"] = b["xy"].chunk() expected = xr.DataArray( [[[1, 1, 1], [np.nan, 1, 2]]] * 4, dims=("z", "x", "xy"), coords={"xy": ("xy", ["a", "b", "c"], {"foo": "bar"})}, ) with raise_if_dask_computes(max_computes=0): gb = b.groupby(x=UniqueGrouper(), xy=UniqueGrouper(labels=["a", "b", "c"])) assert is_chunked_array(gb.encoded.codes.data) assert not gb.encoded.group_indices if has_flox: with raise_if_dask_computes(max_computes=1): assert_identical(gb.count(), expected) else: with pytest.raises(ValueError, match="when lazily grouping"): gb.count() @pytest.mark.parametrize("use_flox", [True, False]) @pytest.mark.parametrize("shuffle", [True, False]) def test_multiple_groupers_mixed(use_flox: bool, shuffle: bool) -> None: # This groupby has missing groups ds = xr.Dataset( {"foo": (("x", "y"), np.arange(12).reshape((4, 3)))}, coords={"x": [10, 20, 30, 40], "letters": ("x", list("abba"))}, ) groupers: dict[str, Grouper] = dict( x=BinGrouper(bins=[5, 15, 25]), letters=UniqueGrouper() ) gb = ds.groupby(groupers) if shuffle: gb = gb.shuffle_to_chunks().groupby(groupers) expected_data = np.array( [ [[0.0, np.nan], [np.nan, 3.0]], [[1.0, np.nan], [np.nan, 4.0]], [[2.0, np.nan], [np.nan, 5.0]], ] ) expected = xr.Dataset( {"foo": (("y", "x_bins", "letters"), expected_data)}, coords={ "x_bins": ( "x_bins", np.array( [ pd.Interval(5, 15, closed="right"), pd.Interval(15, 25, closed="right"), ], dtype=object, ), ), "letters": ("letters", np.array(["a", "b"], dtype=object)), }, ) with xr.set_options(use_flox=use_flox): actual = gb.sum() assert_identical(actual, expected) # assert_identical( # b.groupby(['x', 'y']).apply(lambda x: x - x.mean()), # b - b.mean("z"), # ) # gb = square.groupby(x=UniqueGrouper(), y=UniqueGrouper()) # gb - gb.mean() # ------ @requires_flox_0_9_12 @pytest.mark.parametrize( "reduction", ["max", "min", "nanmax", "nanmin", "sum", "nansum", "prod", "nanprod"] ) def test_groupby_preserve_dtype(reduction): # all groups are present, we should follow numpy exactly ds = xr.Dataset( { "test": ( ["x", "y"], np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype="int16"), ) }, coords={"idx": ("x", [1, 2, 1])}, ) kwargs = {} if "nan" in reduction: kwargs["skipna"] = True # TODO: fix dtype with numbagg/bottleneck and use_flox=False with xr.set_options(use_numbagg=False, use_bottleneck=False): actual = getattr(ds.groupby("idx"), reduction.removeprefix("nan"))( **kwargs ).test.dtype expected = getattr(np, reduction)(ds.test.data, axis=0).dtype assert actual == expected @requires_dask @requires_flox_0_9_12 @pytest.mark.parametrize("reduction", ["any", "all", "count"]) def test_gappy_resample_reductions(reduction): # GH8090 dates = (("1988-12-01", "1990-11-30"), ("2000-12-01", "2001-11-30")) times = [xr.date_range(*d, freq="D") for d in dates] da = xr.concat( [ xr.DataArray(np.random.rand(len(t)), coords={"time": t}, dims="time") for t in times ], dim="time", ).chunk(time=100) rs = (da > 0.5).resample(time="YS-DEC") method = getattr(rs, reduction) with xr.set_options(use_flox=True): actual = method(dim="time") with xr.set_options(use_flox=False): expected = method(dim="time") assert_identical(expected, actual) def test_groupby_transpose() -> None: # GH5361 data = xr.DataArray( np.random.randn(4, 2), dims=["x", "z"], coords={"x": ["a", "b", "a", "c"], "y": ("x", [0, 1, 0, 2])}, ) first = data.T.groupby("x").sum() second = data.groupby("x").sum() assert_identical(first, second.transpose(*first.dims)) @requires_dask @pytest.mark.parametrize( "grouper, expect_index", [ [UniqueGrouper(labels=np.arange(1, 5)), pd.Index(np.arange(1, 5))], [UniqueGrouper(labels=np.arange(1, 5)[::-1]), pd.Index(np.arange(1, 5)[::-1])], [ BinGrouper(bins=np.arange(1, 5)), pd.IntervalIndex.from_breaks(np.arange(1, 5)), ], ], ) def test_lazy_grouping(grouper, expect_index): import dask.array data = DataArray( dims=("x", "y"), data=dask.array.arange(20, chunks=3).reshape((4, 5)), name="zoo", ) with raise_if_dask_computes(): encoded = grouper.factorize(data) assert encoded.codes.ndim == data.ndim pd.testing.assert_index_equal(encoded.full_index, expect_index) np.testing.assert_array_equal(encoded.unique_coord.values, np.array(expect_index)) eager = ( xr.Dataset({"foo": data}, coords={"zoo": data.compute()}) .groupby(zoo=grouper) .count() ) expected = Dataset( {"foo": (encoded.codes.name, np.ones(encoded.full_index.size))}, coords={encoded.codes.name: expect_index}, ) assert_identical(eager, expected) if has_flox: lazy = ( xr.Dataset({"foo": data}, coords={"zoo": data}).groupby(zoo=grouper).count() ) assert_identical(eager, lazy) @requires_dask def test_lazy_grouping_errors() -> None: import dask.array data = DataArray( dims=("x",), data=dask.array.arange(20, chunks=3), name="foo", coords={"y": ("x", dask.array.arange(20, chunks=3))}, ) gb = data.groupby(y=UniqueGrouper(labels=np.arange(5, 10))) message = "not supported when lazily grouping by" with pytest.raises(ValueError, match=message): gb.map(lambda x: x) with pytest.raises(ValueError, match=message): gb.reduce(np.mean) with pytest.raises(ValueError, match=message): for _, _ in gb: pass @requires_dask def test_lazy_int_bins_error() -> None: import dask.array with pytest.raises(ValueError, match="Bin edges must be provided"): with raise_if_dask_computes(): _ = BinGrouper(bins=4).factorize(DataArray(dask.array.arange(3))) def test_time_grouping_seasons_specified() -> None: time = xr.date_range("2001-01-01", "2002-01-01", freq="D") ds = xr.Dataset({"foo": np.arange(time.size)}, coords={"time": ("time", time)}) labels = ["DJF", "MAM", "JJA", "SON"] actual = ds.groupby({"time.season": UniqueGrouper(labels=labels)}).sum() expected = ds.groupby("time.season").sum() assert_identical(actual, expected.reindex(season=labels)) def test_multiple_grouper_unsorted_order() -> None: time = xr.date_range("2001-01-01", "2003-01-01", freq="MS") ds = xr.Dataset({"foo": np.arange(time.size)}, coords={"time": ("time", time)}) labels = ["DJF", "MAM", "JJA", "SON"] actual = ds.groupby( { "time.season": UniqueGrouper(labels=labels), "time.year": UniqueGrouper(labels=[2002, 2001]), } ).sum() expected = ( ds.groupby({"time.season": UniqueGrouper(), "time.year": UniqueGrouper()}) .sum() .reindex(season=labels, year=[2002, 2001]) ) assert_identical(actual, expected.reindex(season=labels)) b = xr.DataArray( np.random.default_rng(0).random((2, 3, 4)), coords={"x": [0, 1], "y": [0, 1, 2]}, dims=["x", "y", "z"], ) actual2 = b.groupby( x=UniqueGrouper(labels=[1, 0]), y=UniqueGrouper(labels=[2, 0, 1]) ).sum() expected2 = b.reindex(x=[1, 0], y=[2, 0, 1]).transpose("z", ...) assert_identical(actual2, expected2) def test_groupby_multiple_bin_grouper_missing_groups() -> None: from numpy import nan ds = xr.Dataset( {"foo": (("z"), np.arange(12))}, coords={"x": ("z", np.arange(12)), "y": ("z", np.arange(12))}, ) actual = ds.groupby( x=BinGrouper(np.arange(0, 13, 4)), y=BinGrouper(bins=np.arange(0, 16, 2)) ).count() expected = Dataset( { "foo": ( ("x_bins", "y_bins"), np.array( [ [2.0, 2.0, nan, nan, nan, nan, nan], [nan, nan, 2.0, 2.0, nan, nan, nan], [nan, nan, nan, nan, 2.0, 1.0, nan], ] ), ) }, coords={ "x_bins": ("x_bins", pd.IntervalIndex.from_breaks(np.arange(0, 13, 4))), "y_bins": ("y_bins", pd.IntervalIndex.from_breaks(np.arange(0, 16, 2))), }, ) assert_identical(actual, expected) @requires_dask_ge_2024_08_1 def test_shuffle_simple() -> None: import dask da = xr.DataArray( dims="x", data=dask.array.from_array([1, 2, 3, 4, 5, 6], chunks=2), coords={"label": ("x", ["a", "b", "c", "a", "b", "c"])}, ) actual = da.groupby(label=UniqueGrouper()).shuffle_to_chunks() expected = da.isel(x=[0, 3, 1, 4, 2, 5]) assert_identical(actual, expected) with pytest.raises(ValueError): da.chunk(x=2, eagerly_load_group=False).groupby("label").shuffle_to_chunks() @requires_dask_ge_2024_08_1 @pytest.mark.parametrize( "chunks, expected_chunks", [ ((1,), (1, 3, 3, 3)), ((10,), (10,)), ], ) def test_shuffle_by(chunks, expected_chunks): import dask.array da = xr.DataArray( dims="x", data=dask.array.arange(10, chunks=chunks), coords={"x": [1, 2, 3, 1, 2, 3, 1, 2, 3, 0]}, name="a", ) ds = da.to_dataset() for obj in [ds, da]: actual = obj.groupby(x=UniqueGrouper()).shuffle_to_chunks() assert_identical(actual, obj.sortby("x")) assert actual.chunksizes["x"] == expected_chunks @requires_dask def test_groupby_dask_eager_load_warnings() -> None: ds = xr.Dataset( {"foo": (("z"), np.arange(12))}, coords={"x": ("z", np.arange(12)), "y": ("z", np.arange(12))}, ).chunk(z=6) with pytest.raises(ValueError, match="Please pass"): with pytest.warns(DeprecationWarning): ds.groupby("x", eagerly_compute_group=False) with pytest.raises(ValueError, match="Eagerly computing"): ds.groupby("x", eagerly_compute_group=True) # type: ignore[arg-type] # This is technically fine but anyone iterating over the groupby object # will see an error, so let's warn and have them opt-in. ds.groupby(x=UniqueGrouper(labels=[1, 2, 3])) with pytest.warns(DeprecationWarning): ds.groupby(x=UniqueGrouper(labels=[1, 2, 3]), eagerly_compute_group=False) with pytest.raises(ValueError, match="Please pass"): with pytest.warns(DeprecationWarning): ds.groupby_bins("x", bins=3, eagerly_compute_group=False) with pytest.raises(ValueError, match="Eagerly computing"): ds.groupby_bins("x", bins=3, eagerly_compute_group=True) # type: ignore[arg-type] ds.groupby_bins("x", bins=[1, 2, 3]) with pytest.warns(DeprecationWarning): ds.groupby_bins("x", bins=[1, 2, 3], eagerly_compute_group=False) class TestSeasonGrouperAndResampler: def test_season_to_month_tuple(self): assert season_to_month_tuple(["JF", "MAM", "JJAS", "OND"]) == ( (1, 2), (3, 4, 5), (6, 7, 8, 9), (10, 11, 12), ) assert season_to_month_tuple(["DJFM", "AM", "JJAS", "ON"]) == ( (12, 1, 2, 3), (4, 5), (6, 7, 8, 9), (10, 11), ) def test_season_grouper_raises_error_if_months_are_not_valid_or_not_continuous( self, ): calendar = "standard" time = date_range("2001-01-01", "2002-12-30", freq="D", calendar=calendar) da = DataArray(np.ones(time.size), dims="time", coords={"time": time}) with pytest.raises(KeyError, match="IN"): da.groupby(time=SeasonGrouper(["INVALID_SEASON"])) with pytest.raises(KeyError, match="MD"): da.groupby(time=SeasonGrouper(["MDF"])) @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_season_grouper_with_months_spanning_calendar_year_using_same_year( self, calendar ): time = date_range("2001-01-01", "2002-12-30", freq="MS", calendar=calendar) # fmt: off data = np.array( [ 1.0, 1.25, 1.5, 1.75, 2.0, 1.1, 1.35, 1.6, 1.85, 1.2, 1.45, 1.7, 1.95, 1.05, 1.3, 1.55, 1.8, 1.15, 1.4, 1.65, 1.9, 1.25, 1.5, 1.75, ] ) # fmt: on da = DataArray(data, dims="time", coords={"time": time}) da["year"] = da.time.dt.year actual = da.groupby( year=UniqueGrouper(), time=SeasonGrouper(["NDJFM", "AMJ"]) ).mean() # Expected if the same year "ND" is used for seasonal grouping expected = xr.DataArray( data=np.array([[1.38, 1.616667], [1.51, 1.5]]), dims=["year", "season"], coords={"year": [2001, 2002], "season": ["NDJFM", "AMJ"]}, ) assert_allclose(expected, actual) @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_season_grouper_with_partial_years(self, calendar): time = date_range("2001-01-01", "2002-06-30", freq="MS", calendar=calendar) # fmt: off data = np.array( [ 1.0, 1.25, 1.5, 1.75, 2.0, 1.1, 1.35, 1.6, 1.85, 1.2, 1.45, 1.7, 1.95, 1.05, 1.3, 1.55, 1.8, 1.15, ] ) # fmt: on da = DataArray(data, dims="time", coords={"time": time}) da["year"] = da.time.dt.year actual = da.groupby( year=UniqueGrouper(), time=SeasonGrouper(["NDJFM", "AMJ"]) ).mean() # Expected if partial years are handled correctly expected = xr.DataArray( data=np.array([[1.38, 1.616667], [1.43333333, 1.5]]), dims=["year", "season"], coords={"year": [2001, 2002], "season": ["NDJFM", "AMJ"]}, ) assert_allclose(expected, actual) @pytest.mark.parametrize("calendar", ["standard"]) def test_season_grouper_with_single_month_seasons(self, calendar): time = date_range("2001-01-01", "2002-12-30", freq="MS", calendar=calendar) # fmt: off data = np.array( [ 1.0, 1.25, 1.5, 1.75, 2.0, 1.1, 1.35, 1.6, 1.85, 1.2, 1.45, 1.7, 1.95, 1.05, 1.3, 1.55, 1.8, 1.15, 1.4, 1.65, 1.9, 1.25, 1.5, 1.75, ] ) # fmt: on da = DataArray(data, dims="time", coords={"time": time}) da["year"] = da.time.dt.year # TODO: Consider supporting this if needed # It does not work without flox, because the group labels are not unique, # and so the stack/unstack approach does not work. with pytest.raises(ValueError): da.groupby( year=UniqueGrouper(), time=SeasonGrouper( ["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"] ), ).mean() # Expected if single month seasons are handled correctly # expected = xr.DataArray( # data=np.array( # [ # [1.0, 1.25, 1.5, 1.75, 2.0, 1.1, 1.35, 1.6, 1.85, 1.2, 1.45, 1.7], # [1.95, 1.05, 1.3, 1.55, 1.8, 1.15, 1.4, 1.65, 1.9, 1.25, 1.5, 1.75], # ] # ), # dims=["year", "season"], # coords={ # "year": [2001, 2002], # "season": ["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"], # }, # ) # assert_allclose(expected, actual) @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_season_grouper_with_months_spanning_calendar_year_using_previous_year( self, calendar ): time = date_range("2001-01-01", "2002-12-30", freq="MS", calendar=calendar) # fmt: off data = np.array( [ 1.0, 1.25, 1.5, 1.75, 2.0, 1.1, 1.35, 1.6, 1.85, 1.2, 1.45, 1.7, 1.95, 1.05, 1.3, 1.55, 1.8, 1.15, 1.4, 1.65, 1.9, 1.25, 1.5, 1.75, ] ) # fmt: on da = DataArray(data, dims="time", coords={"time": time}) gb = da.resample(time=SeasonResampler(["NDJFM", "AMJ"], drop_incomplete=False)) actual = gb.mean() # fmt: off new_time_da = xr.DataArray( dims="time", data=pd.DatetimeIndex( [ "2000-11-01", "2001-04-01", "2001-11-01", "2002-04-01", "2002-11-01" ] ), ) # fmt: on if calendar != "standard": new_time_da = new_time_da.convert_calendar( calendar=calendar, align_on="date" ) new_time = new_time_da.time.variable # Expected if the previous "ND" is used for seasonal grouping expected = xr.DataArray( data=np.array([1.25, 1.616667, 1.49, 1.5, 1.625]), dims="time", coords={"time": new_time}, ) assert_allclose(expected, actual) @pytest.mark.parametrize("calendar", _ALL_CALENDARS) def test_season_grouper_simple(self, calendar) -> None: time = date_range("2001-01-01", "2002-12-30", freq="D", calendar=calendar) da = DataArray(np.ones(time.size), dims="time", coords={"time": time}) expected = da.groupby("time.season").mean() # note season order matches expected actual = da.groupby( time=SeasonGrouper( ["DJF", "JJA", "MAM", "SON"], # drop_incomplete=False ) ).mean() assert_identical(expected, actual) @pytest.mark.parametrize("seasons", [["JJA", "MAM", "SON", "DJF"]]) def test_season_resampling_raises_unsorted_seasons(self, seasons): calendar = "standard" time = date_range("2001-01-01", "2002-12-30", freq="D", calendar=calendar) da = DataArray(np.ones(time.size), dims="time", coords={"time": time}) with pytest.raises(ValueError, match="sort"): da.resample(time=SeasonResampler(seasons)) @pytest.mark.parametrize( "use_cftime", [pytest.param(True, marks=requires_cftime), False] ) @pytest.mark.parametrize("drop_incomplete", [True, False]) @pytest.mark.parametrize( "seasons", [ pytest.param(["DJF", "MAM", "JJA", "SON"], id="standard"), pytest.param(["NDJ", "FMA", "MJJ", "ASO"], id="nov-first"), pytest.param(["MAM", "JJA", "SON", "DJF"], id="standard-diff-order"), pytest.param(["JFM", "AMJ", "JAS", "OND"], id="december-same-year"), pytest.param(["DJF", "MAM", "JJA", "ON"], id="skip-september"), pytest.param(["JJAS"], id="jjas-only"), ], ) def test_season_resampler( self, seasons: list[str], drop_incomplete: bool, use_cftime: bool ) -> None: calendar = "standard" time = date_range( "2001-01-01", "2002-12-30", freq="D", calendar=calendar, use_cftime=use_cftime, ) da = DataArray(np.ones(time.size), dims="time", coords={"time": time}) counts = da.resample(time="ME").count() seasons_as_ints = season_to_month_tuple(seasons) month = counts.time.dt.month.data year = counts.time.dt.year.data for season, as_ints in zip(seasons, seasons_as_ints, strict=True): if "DJ" in season: for imonth in as_ints[season.index("D") + 1 :]: year[month == imonth] -= 1 counts["time"] = ( "time", [pd.Timestamp(f"{y}-{m}-01") for y, m in zip(year, month, strict=True)], ) if has_cftime: counts = counts.convert_calendar(calendar, "time", align_on="date") expected_vals = [] expected_time = [] for year in [2001, 2002, 2003]: for season, as_ints in zip(seasons, seasons_as_ints, strict=True): out_year = year if "DJ" in season: out_year = year - 1 if out_year == 2003: # this is a dummy year added to make sure we cover 2002-DJF continue available = [ counts.sel(time=f"{out_year}-{month:02d}").data for month in as_ints ] if any(len(a) == 0 for a in available) and drop_incomplete: continue output_label = pd.Timestamp(f"{out_year}-{as_ints[0]:02d}-01") expected_time.append(output_label) # use concatenate to handle empty array when dec value does not exist expected_vals.append(np.concatenate(available).sum()) expected = ( # we construct expected in the standard calendar xr.DataArray(expected_vals, dims="time", coords={"time": expected_time}) ) if has_cftime: # and then convert to the expected calendar, expected = expected.convert_calendar( calendar, align_on="date", use_cftime=use_cftime ) # and finally sort since DJF will be out-of-order expected = expected.sortby("time") rs = SeasonResampler(seasons, drop_incomplete=drop_incomplete) # through resample actual = da.resample(time=rs).sum() assert_identical(actual, expected) @requires_cftime def test_season_resampler_errors(self): time = date_range("2001-01-01", "2002-12-30", freq="D", calendar="360_day") da = DataArray(np.ones(time.size), dims="time", coords={"time": time}) # non-datetime array with pytest.raises(ValueError): DataArray(np.ones(5), dims="time").groupby(time=SeasonResampler(["DJF"])) # ndim > 1 array with pytest.raises(ValueError): DataArray( np.ones((5, 5)), dims=("t", "x"), coords={"x": np.arange(5)} ).groupby(x=SeasonResampler(["DJF"])) # overlapping seasons with pytest.raises(ValueError): da.groupby(time=SeasonResampler(["DJFM", "MAMJ", "JJAS", "SOND"])).sum() @requires_cftime def test_season_resampler_groupby_identical(self): time = date_range("2001-01-01", "2002-12-30", freq="D") da = DataArray(np.ones(time.size), dims="time", coords={"time": time}) # through resample resampler = SeasonResampler(["DJF", "MAM", "JJA", "SON"]) rs = da.resample(time=resampler).sum() # through groupby gb = da.groupby(time=resampler).sum() assert_identical(rs, gb) # TODO: Possible property tests to add to this module # 1. lambda x: x # 2. grouped-reduce on unique coords is identical to array # 3. group_over == groupby-reduce along other dimensions # 4. result is equivalent for transposed input xarray-2025.09.0/xarray/tests/test_hashable.py000066400000000000000000000021321505620616400211720ustar00rootroot00000000000000from __future__ import annotations from enum import Enum from typing import TYPE_CHECKING, Union import pytest from xarray import DataArray, Dataset, Variable if TYPE_CHECKING: from xarray.core.types import TypeAlias DimT: TypeAlias = Union[int, tuple, "DEnum", "CustomHashable"] class DEnum(Enum): dim = "dim" class CustomHashable: def __init__(self, a: int) -> None: self.a = a def __hash__(self) -> int: return self.a parametrize_dim = pytest.mark.parametrize( "dim", [ pytest.param(5, id="int"), pytest.param(("a", "b"), id="tuple"), pytest.param(DEnum.dim, id="enum"), pytest.param(CustomHashable(3), id="HashableObject"), ], ) @parametrize_dim def test_hashable_dims(dim: DimT) -> None: v = Variable([dim], [1, 2, 3]) da = DataArray([1, 2, 3], dims=[dim]) Dataset({"a": ([dim], [1, 2, 3])}) # alternative constructors DataArray(v) Dataset({"a": v}) Dataset({"a": da}) @parametrize_dim def test_dataset_variable_hashable_names(dim: DimT) -> None: Dataset({dim: ("x", [1, 2, 3])}) xarray-2025.09.0/xarray/tests/test_indexes.py000066400000000000000000000676421505620616400211030ustar00rootroot00000000000000from __future__ import annotations import copy from datetime import datetime from typing import Any import numpy as np import pandas as pd import pytest import xarray as xr from xarray.coding.cftimeindex import CFTimeIndex from xarray.core.indexes import ( Hashable, Index, Indexes, PandasIndex, PandasMultiIndex, _asarray_tuplesafe, safe_cast_to_index, ) from xarray.core.variable import IndexVariable, Variable from xarray.tests import assert_array_equal, assert_identical, requires_cftime from xarray.tests.test_coding_times import _all_cftime_date_types def test_asarray_tuplesafe() -> None: res = _asarray_tuplesafe(("a", 1)) assert isinstance(res, np.ndarray) assert res.ndim == 0 assert res.item() == ("a", 1) res = _asarray_tuplesafe([(0,), (1,)]) assert res.shape == (2,) assert res[0] == (0,) assert res[1] == (1,) class CustomIndex(Index): def __init__(self, dims) -> None: self.dims = dims class TestIndex: @pytest.fixture def index(self) -> CustomIndex: return CustomIndex({"x": 2}) def test_from_variables(self) -> None: with pytest.raises(NotImplementedError): Index.from_variables({}, options={}) def test_concat(self) -> None: with pytest.raises(NotImplementedError): Index.concat([], "x") def test_stack(self) -> None: with pytest.raises(NotImplementedError): Index.stack({}, "x") def test_unstack(self, index) -> None: with pytest.raises(NotImplementedError): index.unstack() def test_create_variables(self, index) -> None: assert index.create_variables() == {} assert index.create_variables({"x": "var"}) == {"x": "var"} def test_to_pandas_index(self, index) -> None: with pytest.raises(TypeError): index.to_pandas_index() def test_isel(self, index) -> None: assert index.isel({}) is None def test_sel(self, index) -> None: with pytest.raises(NotImplementedError): index.sel({}) def test_join(self, index) -> None: with pytest.raises(NotImplementedError): index.join(CustomIndex({"y": 2})) def test_reindex_like(self, index) -> None: with pytest.raises(NotImplementedError): index.reindex_like(CustomIndex({"y": 2})) def test_equals(self, index) -> None: with pytest.raises(NotImplementedError): index.equals(CustomIndex({"y": 2})) def test_roll(self, index) -> None: assert index.roll({}) is None def test_rename(self, index) -> None: assert index.rename({}, {}) is index @pytest.mark.parametrize("deep", [True, False]) def test_copy(self, index, deep) -> None: copied = index.copy(deep=deep) assert isinstance(copied, CustomIndex) assert copied is not index copied.dims["x"] = 3 if deep: assert copied.dims != index.dims assert copied.dims != copy.deepcopy(index).dims else: assert copied.dims is index.dims assert copied.dims is copy.copy(index).dims def test_getitem(self, index) -> None: with pytest.raises(NotImplementedError): index[:] class TestPandasIndex: def test_constructor(self) -> None: pd_idx = pd.Index([1, 2, 3]) index = PandasIndex(pd_idx, "x") assert index.index.equals(pd_idx) # makes a shallow copy assert index.index is not pd_idx assert index.dim == "x" # test no name set for pd.Index pd_idx.name = None index = PandasIndex(pd_idx, "x") assert index.index.name == "x" def test_from_variables(self) -> None: # pandas has only Float64Index but variable dtype should be preserved data = np.array([1.1, 2.2, 3.3], dtype=np.float32) var = xr.Variable( "x", data, attrs={"unit": "m"}, encoding={"dtype": np.float64} ) index = PandasIndex.from_variables({"x": var}, options={}) assert index.dim == "x" assert index.index.equals(pd.Index(data)) assert index.coord_dtype == data.dtype var2 = xr.Variable(("x", "y"), [[1, 2, 3], [4, 5, 6]]) with pytest.raises(ValueError, match=r".*only accepts one variable.*"): PandasIndex.from_variables({"x": var, "foo": var2}, options={}) with pytest.raises( ValueError, match=r".*cannot set a PandasIndex.*scalar variable.*" ): PandasIndex.from_variables({"foo": xr.Variable((), 1)}, options={}) with pytest.raises( ValueError, match=r".*only accepts a 1-dimensional variable.*" ): PandasIndex.from_variables({"foo": var2}, options={}) def test_from_variables_index_adapter(self) -> None: # test index type is preserved when variable wraps a pd.Index data = pd.Series(["foo", "bar"], dtype="category") pd_idx = pd.Index(data) var = xr.Variable("x", pd_idx) index = PandasIndex.from_variables({"x": var}, options={}) assert isinstance(index.index, pd.CategoricalIndex) def test_concat_periods(self): periods = pd.period_range("2000-01-01", periods=10) indexes = [PandasIndex(periods[:5], "t"), PandasIndex(periods[5:], "t")] expected = PandasIndex(periods, "t") actual = PandasIndex.concat(indexes, dim="t") assert actual.equals(expected) assert isinstance(actual.index, pd.PeriodIndex) positions = [list(range(5)), list(range(5, 10))] actual = PandasIndex.concat(indexes, dim="t", positions=positions) assert actual.equals(expected) assert isinstance(actual.index, pd.PeriodIndex) @pytest.mark.parametrize("dtype", [str, bytes]) def test_concat_str_dtype(self, dtype) -> None: a = PandasIndex(np.array(["a"], dtype=dtype), "x", coord_dtype=dtype) b = PandasIndex(np.array(["b"], dtype=dtype), "x", coord_dtype=dtype) expected = PandasIndex( np.array(["a", "b"], dtype=dtype), "x", coord_dtype=dtype ) actual = PandasIndex.concat([a, b], "x") assert actual.equals(expected) assert np.issubdtype(actual.coord_dtype, dtype) def test_concat_empty(self) -> None: idx = PandasIndex.concat([], "x") assert idx.coord_dtype is np.dtype("O") def test_concat_dim_error(self) -> None: indexes = [PandasIndex([0, 1], "x"), PandasIndex([2, 3], "y")] with pytest.raises(ValueError, match=r"Cannot concatenate.*dimensions.*"): PandasIndex.concat(indexes, "x") def test_create_variables(self) -> None: # pandas has only Float64Index but variable dtype should be preserved data = np.array([1.1, 2.2, 3.3], dtype=np.float32) pd_idx = pd.Index(data, name="foo") index = PandasIndex(pd_idx, "x", coord_dtype=data.dtype) index_vars = { "foo": IndexVariable( "x", data, attrs={"unit": "m"}, encoding={"fill_value": 0.0} ) } actual = index.create_variables(index_vars) assert_identical(actual["foo"], index_vars["foo"]) assert actual["foo"].dtype == index_vars["foo"].dtype assert actual["foo"].dtype == index.coord_dtype def test_to_pandas_index(self) -> None: pd_idx = pd.Index([1, 2, 3], name="foo") index = PandasIndex(pd_idx, "x") assert index.to_pandas_index() is index.index def test_sel(self) -> None: # TODO: add tests that aren't just for edge cases index = PandasIndex(pd.Index([1, 2, 3]), "x") with pytest.raises(KeyError, match=r"not all values found"): index.sel({"x": [0]}) with pytest.raises(KeyError): index.sel({"x": 0}) with pytest.raises(ValueError, match=r"does not have a MultiIndex"): index.sel({"x": {"one": 0}}) def test_sel_boolean(self) -> None: # index should be ignored and indexer dtype should not be coerced # see https://github.com/pydata/xarray/issues/5727 index = PandasIndex(pd.Index([0.0, 2.0, 1.0, 3.0]), "x") actual = index.sel({"x": [False, True, False, True]}) expected_dim_indexers = {"x": [False, True, False, True]} np.testing.assert_array_equal( actual.dim_indexers["x"], expected_dim_indexers["x"] ) def test_sel_datetime(self) -> None: index = PandasIndex( pd.to_datetime(["2000-01-01", "2001-01-01", "2002-01-01"]), "x" ) actual = index.sel({"x": "2001-01-01"}) expected_dim_indexers = {"x": 1} assert actual.dim_indexers == expected_dim_indexers actual = index.sel({"x": index.to_pandas_index().to_numpy()[1]}) assert actual.dim_indexers == expected_dim_indexers def test_sel_unsorted_datetime_index_raises(self) -> None: index = PandasIndex(pd.to_datetime(["2001", "2000", "2002"]), "x") with pytest.raises(KeyError): # pandas will try to convert this into an array indexer. We should # raise instead, so we can be sure the result of indexing with a # slice is always a view. index.sel({"x": slice("2001", "2002")}) def test_equals(self) -> None: index1 = PandasIndex([1, 2, 3], "x") index2 = PandasIndex([1, 2, 3], "x") assert index1.equals(index2) is True def test_join(self) -> None: index1 = PandasIndex(["a", "aa", "aaa"], "x", coord_dtype=" None: index1 = PandasIndex([0, 1, 2], "x") index2 = PandasIndex([1, 2, 3, 4], "x") expected = {"x": [1, 2, -1, -1]} actual = index1.reindex_like(index2) assert actual.keys() == expected.keys() np.testing.assert_array_equal(actual["x"], expected["x"]) index3 = PandasIndex([1, 1, 2], "x") with pytest.raises(ValueError, match=r".*index has duplicate values"): index3.reindex_like(index2) def test_rename(self) -> None: index = PandasIndex(pd.Index([1, 2, 3], name="a"), "x", coord_dtype=np.int32) # shortcut new_index = index.rename({}, {}) assert new_index is index new_index = index.rename({"a": "b"}, {}) assert new_index.index.name == "b" assert new_index.dim == "x" assert new_index.coord_dtype == np.int32 new_index = index.rename({}, {"x": "y"}) assert new_index.index.name == "a" assert new_index.dim == "y" assert new_index.coord_dtype == np.int32 def test_copy(self) -> None: expected = PandasIndex([1, 2, 3], "x", coord_dtype=np.int32) actual = expected.copy() assert actual.index.equals(expected.index) assert actual.index is not expected.index assert actual.dim == expected.dim assert actual.coord_dtype == expected.coord_dtype def test_getitem(self) -> None: pd_idx = pd.Index([1, 2, 3]) expected = PandasIndex(pd_idx, "x", coord_dtype=np.int32) actual = expected[1:] assert actual.index.equals(pd_idx[1:]) assert actual.dim == expected.dim assert actual.coord_dtype == expected.coord_dtype class TestPandasMultiIndex: def test_constructor(self) -> None: foo_data = np.array([0, 0, 1], dtype="int64") bar_data = np.array([1.1, 1.2, 1.3], dtype="float64") pd_idx = pd.MultiIndex.from_arrays([foo_data, bar_data], names=("foo", "bar")) index = PandasMultiIndex(pd_idx, "x") assert index.dim == "x" assert index.index.equals(pd_idx) assert index.index.names == ("foo", "bar") assert index.index.name == "x" assert index.level_coords_dtype == { "foo": foo_data.dtype, "bar": bar_data.dtype, } with pytest.raises(ValueError, match=".*conflicting multi-index level name.*"): PandasMultiIndex(pd_idx, "foo") # default level names pd_idx = pd.MultiIndex.from_arrays([foo_data, bar_data]) index = PandasMultiIndex(pd_idx, "x") assert list(index.index.names) == ["x_level_0", "x_level_1"] def test_from_variables(self) -> None: v_level1 = xr.Variable( "x", [1, 2, 3], attrs={"unit": "m"}, encoding={"dtype": np.int32} ) v_level2 = xr.Variable( "x", ["a", "b", "c"], attrs={"unit": "m"}, encoding={"dtype": "U"} ) index = PandasMultiIndex.from_variables( {"level1": v_level1, "level2": v_level2}, options={} ) expected_idx = pd.MultiIndex.from_arrays([v_level1.data, v_level2.data]) assert index.dim == "x" assert index.index.equals(expected_idx) assert index.index.name == "x" assert list(index.index.names) == ["level1", "level2"] var = xr.Variable(("x", "y"), [[1, 2, 3], [4, 5, 6]]) with pytest.raises( ValueError, match=r".*only accepts 1-dimensional variables.*" ): PandasMultiIndex.from_variables({"var": var}, options={}) v_level3 = xr.Variable("y", [4, 5, 6]) with pytest.raises( ValueError, match=r"unmatched dimensions for multi-index variables.*" ): PandasMultiIndex.from_variables( {"level1": v_level1, "level3": v_level3}, options={} ) def test_concat(self) -> None: pd_midx = pd.MultiIndex.from_product( [[0, 1, 2], ["a", "b"]], names=("foo", "bar") ) level_coords_dtype = {"foo": np.int32, "bar": "=U1"} midx1 = PandasMultiIndex( pd_midx[:2], "x", level_coords_dtype=level_coords_dtype ) midx2 = PandasMultiIndex( pd_midx[2:], "x", level_coords_dtype=level_coords_dtype ) expected = PandasMultiIndex(pd_midx, "x", level_coords_dtype=level_coords_dtype) actual = PandasMultiIndex.concat([midx1, midx2], "x") assert actual.equals(expected) assert actual.level_coords_dtype == expected.level_coords_dtype def test_stack(self) -> None: prod_vars = { "x": xr.Variable("x", pd.Index(["b", "a"]), attrs={"foo": "bar"}), "y": xr.Variable("y", pd.Index([1, 3, 2])), } index_xr = PandasMultiIndex.stack(prod_vars, "z") assert index_xr.dim == "z" index_pd = index_xr.index assert isinstance(index_pd, pd.MultiIndex) # TODO: change to tuple when pandas 3 is minimum assert list(index_pd.names) == ["x", "y"] np.testing.assert_array_equal( index_pd.codes, [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]] ) with pytest.raises( ValueError, match=r"conflicting dimensions for multi-index product.*" ): PandasMultiIndex.stack( {"x": xr.Variable("x", ["a", "b"]), "x2": xr.Variable("x", [1, 2])}, "z", ) def test_stack_non_unique(self) -> None: prod_vars = { "x": xr.Variable("x", pd.Index(["b", "a"]), attrs={"foo": "bar"}), "y": xr.Variable("y", pd.Index([1, 1, 2])), } index_xr = PandasMultiIndex.stack(prod_vars, "z") index_pd = index_xr.index assert isinstance(index_pd, pd.MultiIndex) np.testing.assert_array_equal( index_pd.codes, [[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1]] ) np.testing.assert_array_equal(index_pd.levels[0], ["b", "a"]) np.testing.assert_array_equal(index_pd.levels[1], [1, 2]) def test_unstack(self) -> None: pd_midx = pd.MultiIndex.from_product( [["a", "b"], [1, 2, 3]], names=["one", "two"] ) index = PandasMultiIndex(pd_midx, "x") new_indexes, new_pd_idx = index.unstack() assert list(new_indexes) == ["one", "two"] assert new_indexes["one"].equals(PandasIndex(["a", "b"], "one")) assert new_indexes["two"].equals(PandasIndex([1, 2, 3], "two")) assert new_pd_idx.equals(pd_midx) def test_unstack_requires_unique(self) -> None: pd_midx = pd.MultiIndex.from_product([["a", "a"], [1, 2]], names=["one", "two"]) index = PandasMultiIndex(pd_midx, "x") with pytest.raises( ValueError, match="Cannot unstack MultiIndex containing duplicates" ): index.unstack() def test_create_variables(self) -> None: foo_data = np.array([0, 0, 1], dtype="int64") bar_data = np.array([1.1, 1.2, 1.3], dtype="float64") pd_idx = pd.MultiIndex.from_arrays([foo_data, bar_data], names=("foo", "bar")) index_vars = { "x": IndexVariable("x", pd_idx), "foo": IndexVariable("x", foo_data, attrs={"unit": "m"}), "bar": IndexVariable("x", bar_data, encoding={"fill_value": 0}), } index = PandasMultiIndex(pd_idx, "x") actual = index.create_variables(index_vars) for k, expected in index_vars.items(): assert_identical(actual[k], expected) assert actual[k].dtype == expected.dtype if k != "x": assert actual[k].dtype == index.level_coords_dtype[k] def test_sel(self) -> None: index = PandasMultiIndex( pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two")), "x" ) # test tuples inside slice are considered as scalar indexer values actual = index.sel({"x": slice(("a", 1), ("b", 2))}) expected_dim_indexers = {"x": slice(0, 4)} assert actual.dim_indexers == expected_dim_indexers with pytest.raises(KeyError, match=r"not all values found"): index.sel({"x": [0]}) with pytest.raises(KeyError): index.sel({"x": 0}) with pytest.raises(ValueError, match=r"cannot provide labels for both.*"): index.sel({"one": 0, "x": "a"}) with pytest.raises( ValueError, match=r"multi-index level names \('three',\) not found in indexes", ): index.sel({"x": {"three": 0}}) with pytest.raises(IndexError): index.sel({"x": (slice(None), 1, "no_level")}) def test_join(self): midx = pd.MultiIndex.from_product([["a", "aa"], [1, 2]], names=("one", "two")) level_coords_dtype = {"one": "=U2", "two": "i"} index1 = PandasMultiIndex(midx, "x", level_coords_dtype=level_coords_dtype) index2 = PandasMultiIndex(midx[0:2], "x", level_coords_dtype=level_coords_dtype) actual = index1.join(index2) assert actual.equals(index2) assert actual.level_coords_dtype == level_coords_dtype actual = index1.join(index2, how="outer") assert actual.equals(index1) assert actual.level_coords_dtype == level_coords_dtype def test_rename(self) -> None: level_coords_dtype = {"one": " None: level_coords_dtype = {"one": "U<1", "two": np.int32} expected = PandasMultiIndex( pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two")), "x", level_coords_dtype=level_coords_dtype, ) actual = expected.copy() assert actual.index.equals(expected.index) assert actual.index is not expected.index assert actual.dim == expected.dim assert actual.level_coords_dtype == expected.level_coords_dtype class TestIndexes: @pytest.fixture def indexes_and_vars(self) -> tuple[list[PandasIndex], dict[Hashable, Variable]]: x_idx = PandasIndex(pd.Index([1, 2, 3], name="x"), "x") y_idx = PandasIndex(pd.Index([4, 5, 6], name="y"), "y") z_pd_midx = pd.MultiIndex.from_product( [["a", "b"], [1, 2]], names=["one", "two"] ) z_midx = PandasMultiIndex(z_pd_midx, "z") indexes = [x_idx, y_idx, z_midx] variables = {} for idx in indexes: variables.update(idx.create_variables()) return indexes, variables @pytest.fixture(params=["pd_index", "xr_index"]) def unique_indexes( self, request, indexes_and_vars ) -> list[PandasIndex] | list[pd.Index]: xr_indexes, _ = indexes_and_vars if request.param == "pd_index": return [idx.index for idx in xr_indexes] else: return xr_indexes @pytest.fixture def indexes( self, unique_indexes, indexes_and_vars ) -> Indexes[Index] | Indexes[pd.Index]: x_idx, y_idx, z_midx = unique_indexes indexes: dict[Any, Index] = { "x": x_idx, "y": y_idx, "z": z_midx, "one": z_midx, "two": z_midx, } _, variables = indexes_and_vars index_type = Index if isinstance(x_idx, Index) else pd.Index return Indexes(indexes, variables, index_type=index_type) def test_interface(self, unique_indexes, indexes) -> None: x_idx = unique_indexes[0] assert list(indexes) == ["x", "y", "z", "one", "two"] assert len(indexes) == 5 assert "x" in indexes assert indexes["x"] is x_idx def test_variables(self, indexes) -> None: assert tuple(indexes.variables) == ("x", "y", "z", "one", "two") def test_dims(self, indexes) -> None: assert indexes.dims == {"x": 3, "y": 3, "z": 4} def test_get_unique(self, unique_indexes, indexes) -> None: assert indexes.get_unique() == unique_indexes def test_is_multi(self, indexes) -> None: assert indexes.is_multi("one") is True assert indexes.is_multi("x") is False def test_get_all_coords(self, indexes) -> None: expected = { "z": indexes.variables["z"], "one": indexes.variables["one"], "two": indexes.variables["two"], } assert indexes.get_all_coords("one") == expected with pytest.raises(ValueError, match="errors must be.*"): indexes.get_all_coords("x", errors="invalid") with pytest.raises(ValueError, match="no index found.*"): indexes.get_all_coords("no_coord") assert indexes.get_all_coords("no_coord", errors="ignore") == {} def test_get_all_dims(self, indexes) -> None: expected = {"z": 4} assert indexes.get_all_dims("one") == expected def test_group_by_index(self, unique_indexes, indexes): expected = [ (unique_indexes[0], {"x": indexes.variables["x"]}), (unique_indexes[1], {"y": indexes.variables["y"]}), ( unique_indexes[2], { "z": indexes.variables["z"], "one": indexes.variables["one"], "two": indexes.variables["two"], }, ), ] assert indexes.group_by_index() == expected def test_to_pandas_indexes(self, indexes) -> None: pd_indexes = indexes.to_pandas_indexes() assert isinstance(pd_indexes, Indexes) assert all(isinstance(idx, pd.Index) for idx in pd_indexes.values()) assert indexes.variables == pd_indexes.variables def test_copy_indexes(self, indexes) -> None: copied, index_vars = indexes.copy_indexes() assert copied.keys() == indexes.keys() for new, original in zip(copied.values(), indexes.values(), strict=True): assert new.equals(original) # check unique index objects preserved assert copied["z"] is copied["one"] is copied["two"] assert index_vars.keys() == indexes.variables.keys() for new, original in zip( index_vars.values(), indexes.variables.values(), strict=True ): assert_identical(new, original) def test_safe_cast_to_index(): dates = pd.date_range("2000-01-01", periods=10) x = np.arange(5) td = x * np.timedelta64(1, "D") for expected, array in [ (dates, dates.values), (pd.Index(x, dtype=object), x.astype(object)), (pd.Index(td), td), (pd.Index(td, dtype=object), td.astype(object)), ]: actual = safe_cast_to_index(array) assert_array_equal(expected, actual) assert expected.dtype == actual.dtype @requires_cftime def test_safe_cast_to_index_cftimeindex(): date_types = _all_cftime_date_types() for date_type in date_types.values(): dates = [date_type(1, 1, day) for day in range(1, 20)] expected = CFTimeIndex(dates) actual = safe_cast_to_index(np.array(dates)) assert_array_equal(expected, actual) assert expected.dtype == actual.dtype assert isinstance(actual, type(expected)) # Test that datetime.datetime objects are never used in a CFTimeIndex @requires_cftime def test_safe_cast_to_index_datetime_datetime(): dates = [datetime(1, 1, day) for day in range(1, 20)] expected = pd.Index(dates) actual = safe_cast_to_index(np.array(dates)) assert_array_equal(expected, actual) assert isinstance(actual, pd.Index) @pytest.mark.parametrize("dtype", ["int32", "float32"]) def test_restore_dtype_on_multiindexes(dtype: str) -> None: foo = xr.Dataset(coords={"bar": ("bar", np.array([0, 1], dtype=dtype))}) foo = foo.stack(baz=("bar",)) assert str(foo["bar"].values.dtype) == dtype class IndexWithExtraVariables(Index): @classmethod def from_variables(cls, variables, *, options=None): return cls() def create_variables(self, variables=None): if variables is None: # For Coordinates.from_xindex(), return all variables the index can create return { "time": Variable(dims=("time",), data=[1, 2, 3]), "valid_time": Variable( dims=("time",), data=[2, 3, 4], # time + 1 attrs={"description": "time + 1"}, ), } result = dict(variables) if "time" in variables: result["valid_time"] = Variable( dims=("time",), data=variables["time"].data + 1, attrs={"description": "time + 1"}, ) return result def test_set_xindex_with_extra_variables() -> None: """Test that set_xindex raises an error when custom index creates extra variables.""" ds = xr.Dataset(coords={"time": [1, 2, 3]}).reset_index("time") # Test that set_xindex raises error for extra variables with pytest.raises(ValueError, match="extra variables 'valid_time'"): ds.set_xindex("time", IndexWithExtraVariables) def test_set_xindex_factory_method_pattern() -> None: ds = xr.Dataset(coords={"time": [1, 2, 3]}).reset_index("time") # Test the recommended factory method pattern coord_vars = {"time": ds._variables["time"]} index = IndexWithExtraVariables.from_variables(coord_vars) coords = xr.Coordinates.from_xindex(index) result = ds.assign_coords(coords) assert "time" in result.variables assert "valid_time" in result.variables assert_array_equal(result.valid_time.data, result.time.data + 1) xarray-2025.09.0/xarray/tests/test_indexing.py000066400000000000000000001135241505620616400212400ustar00rootroot00000000000000from __future__ import annotations import itertools from typing import Any import numpy as np import pandas as pd import pytest from xarray import DataArray, Dataset, Variable from xarray.core import indexing, nputils from xarray.core.indexes import PandasIndex, PandasMultiIndex from xarray.core.types import T_Xarray from xarray.tests import ( IndexerMaker, ReturnItem, assert_array_equal, assert_identical, raise_if_dask_computes, requires_dask, ) from xarray.tests.arrays import DuckArrayWrapper B = IndexerMaker(indexing.BasicIndexer) class TestIndexCallable: def test_getitem(self): def getter(key): return key * 2 indexer = indexing.IndexCallable(getter) assert indexer[3] == 6 assert indexer[0] == 0 assert indexer[-1] == -2 def test_setitem(self): def getter(key): return key * 2 def setter(key, value): raise NotImplementedError("Setter not implemented") indexer = indexing.IndexCallable(getter, setter) with pytest.raises(NotImplementedError): indexer[3] = 6 class TestIndexers: def set_to_zero(self, x, i): x = x.copy() x[i] = 0 return x def test_expanded_indexer(self) -> None: x = np.random.randn(10, 11, 12, 13, 14) y = np.arange(5) arr = ReturnItem() for i in [ arr[:], arr[...], arr[0, :, 10], arr[..., 10], arr[:5, ..., 0], arr[..., 0, :], arr[y], arr[y, y], arr[..., y, y], arr[..., 0, 1, 2, 3, 4], ]: j = indexing.expanded_indexer(i, x.ndim) assert_array_equal(x[i], x[j]) assert_array_equal(self.set_to_zero(x, i), self.set_to_zero(x, j)) with pytest.raises(IndexError, match=r"too many indices"): indexing.expanded_indexer(arr[1, 2, 3], 2) def test_stacked_multiindex_min_max(self) -> None: data = np.random.randn(3, 23, 4) da = DataArray( data, name="value", dims=["replicate", "rsample", "exp"], coords=dict( replicate=[0, 1, 2], exp=["a", "b", "c", "d"], rsample=list(range(23)) ), ) da2 = da.stack(sample=("replicate", "rsample")) s = da2.sample assert_array_equal(da2.loc["a", s.max()], data[2, 22, 0]) assert_array_equal(da2.loc["b", s.min()], data[0, 0, 1]) def test_group_indexers_by_index(self) -> None: mindex = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two")) data = DataArray( np.zeros((4, 2, 2)), coords={"x": mindex, "y": [1, 2]}, dims=("x", "y", "z") ) data.coords["y2"] = ("y", [2.0, 3.0]) grouped_indexers = indexing.group_indexers_by_index( data, {"z": 0, "one": "a", "two": 1, "y": 0}, {} ) for idx, indexers in grouped_indexers: if idx is None: assert indexers == {"z": 0} elif idx.equals(data.xindexes["x"]): assert indexers == {"one": "a", "two": 1} elif idx.equals(data.xindexes["y"]): assert indexers == {"y": 0} assert len(grouped_indexers) == 3 with pytest.raises(KeyError, match=r"no index found for coordinate 'y2'"): indexing.group_indexers_by_index(data, {"y2": 2.0}, {}) with pytest.raises( KeyError, match=r"'w' is not a valid dimension or coordinate" ): indexing.group_indexers_by_index(data, {"w": "a"}, {}) with pytest.raises(ValueError, match=r"cannot supply.*"): indexing.group_indexers_by_index(data, {"z": 1}, {"method": "nearest"}) def test_map_index_queries(self) -> None: def create_sel_results( x_indexer, x_index, other_vars, drop_coords, drop_indexes, rename_dims, ): dim_indexers = {"x": x_indexer} index_vars = x_index.create_variables() indexes = dict.fromkeys(index_vars, x_index) variables = {} variables.update(index_vars) variables.update(other_vars) return indexing.IndexSelResult( dim_indexers=dim_indexers, indexes=indexes, variables=variables, drop_coords=drop_coords, drop_indexes=drop_indexes, rename_dims=rename_dims, ) def test_indexer( data: T_Xarray, x: Any, expected: indexing.IndexSelResult, ) -> None: results = indexing.map_index_queries(data, {"x": x}) assert results.dim_indexers.keys() == expected.dim_indexers.keys() assert_array_equal(results.dim_indexers["x"], expected.dim_indexers["x"]) assert results.indexes.keys() == expected.indexes.keys() for k in results.indexes: assert results.indexes[k].equals(expected.indexes[k]) assert results.variables.keys() == expected.variables.keys() for k in results.variables: assert_array_equal(results.variables[k], expected.variables[k]) assert set(results.drop_coords) == set(expected.drop_coords) assert set(results.drop_indexes) == set(expected.drop_indexes) assert results.rename_dims == expected.rename_dims data = Dataset({"x": ("x", [1, 2, 3])}) mindex = pd.MultiIndex.from_product( [["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three") ) mdata = DataArray(range(8), [("x", mindex)]) test_indexer(data, 1, indexing.IndexSelResult({"x": 0})) test_indexer(data, np.int32(1), indexing.IndexSelResult({"x": 0})) test_indexer(data, Variable([], 1), indexing.IndexSelResult({"x": 0})) test_indexer(mdata, ("a", 1, -1), indexing.IndexSelResult({"x": 0})) expected = create_sel_results( [True, True, False, False, False, False, False, False], PandasIndex(pd.Index([-1, -2]), "three"), {"one": Variable((), "a"), "two": Variable((), 1)}, ["x"], ["one", "two"], {"x": "three"}, ) test_indexer(mdata, ("a", 1), expected) expected = create_sel_results( slice(0, 4, None), PandasMultiIndex( pd.MultiIndex.from_product([[1, 2], [-1, -2]], names=("two", "three")), "x", ), {"one": Variable((), "a")}, [], ["one"], {}, ) test_indexer(mdata, "a", expected) expected = create_sel_results( [True, True, True, True, False, False, False, False], PandasMultiIndex( pd.MultiIndex.from_product([[1, 2], [-1, -2]], names=("two", "three")), "x", ), {"one": Variable((), "a")}, [], ["one"], {}, ) test_indexer(mdata, ("a",), expected) test_indexer( mdata, [("a", 1, -1), ("b", 2, -2)], indexing.IndexSelResult({"x": [0, 7]}) ) test_indexer( mdata, slice("a", "b"), indexing.IndexSelResult({"x": slice(0, 8, None)}) ) test_indexer( mdata, slice(("a", 1), ("b", 1)), indexing.IndexSelResult({"x": slice(0, 6, None)}), ) test_indexer( mdata, {"one": "a", "two": 1, "three": -1}, indexing.IndexSelResult({"x": 0}), ) expected = create_sel_results( [True, True, False, False, False, False, False, False], PandasIndex(pd.Index([-1, -2]), "three"), {"one": Variable((), "a"), "two": Variable((), 1)}, ["x"], ["one", "two"], {"x": "three"}, ) test_indexer(mdata, {"one": "a", "two": 1}, expected) expected = create_sel_results( [True, False, True, False, False, False, False, False], PandasIndex(pd.Index([1, 2]), "two"), {"one": Variable((), "a"), "three": Variable((), -1)}, ["x"], ["one", "three"], {"x": "two"}, ) test_indexer(mdata, {"one": "a", "three": -1}, expected) expected = create_sel_results( [True, True, True, True, False, False, False, False], PandasMultiIndex( pd.MultiIndex.from_product([[1, 2], [-1, -2]], names=("two", "three")), "x", ), {"one": Variable((), "a")}, [], ["one"], {}, ) test_indexer(mdata, {"one": "a"}, expected) def test_read_only_view(self) -> None: arr = DataArray( np.random.rand(3, 3), coords={"x": np.arange(3), "y": np.arange(3)}, dims=("x", "y"), ) # Create a 2D DataArray arr = arr.expand_dims({"z": 3}, -1) # New dimension 'z' arr["z"] = np.arange(3) # New coords to dimension 'z' with pytest.raises(ValueError, match="Do you want to .copy()"): arr.loc[0, 0, 0] = 999 class TestLazyArray: def test_slice_slice(self) -> None: arr = ReturnItem() for size in [100, 99]: # We test even/odd size cases x = np.arange(size) slices = [ arr[:3], arr[:4], arr[2:4], arr[:1], arr[:-1], arr[5:-1], arr[-5:-1], arr[::-1], arr[5::-1], arr[:3:-1], arr[:30:-1], arr[10:4:], arr[::4], arr[4:4:4], arr[:4:-4], arr[::-2], ] for i in slices: for j in slices: expected = x[i][j] new_slice = indexing.slice_slice(i, j, size=size) actual = x[new_slice] assert_array_equal(expected, actual) @pytest.mark.parametrize( ["old_slice", "array", "size"], ( (slice(None, 8), np.arange(2, 6), 10), (slice(2, None), np.arange(2, 6), 10), (slice(1, 10, 2), np.arange(1, 4), 15), (slice(10, None, -1), np.array([2, 5, 7]), 12), (slice(2, None, 2), np.array([3, -2, 5, -1]), 13), (slice(8, None), np.array([1, -2, 2, -1, -7]), 20), ), ) def test_slice_slice_by_array(self, old_slice, array, size): actual = indexing.slice_slice_by_array(old_slice, array, size) expected = np.arange(size)[old_slice][array] assert_array_equal(actual, expected) def test_lazily_indexed_array(self) -> None: original = np.random.rand(10, 20, 30) x = indexing.NumpyIndexingAdapter(original) v = Variable(["i", "j", "k"], original) lazy = indexing.LazilyIndexedArray(x) v_lazy = Variable(["i", "j", "k"], lazy) arr = ReturnItem() # test orthogonally applied indexers indexers = [arr[:], 0, -2, arr[:3], [0, 1, 2, 3], [0], np.arange(10) < 5] for i in indexers: for j in indexers: for k in indexers: if isinstance(j, np.ndarray) and j.dtype.kind == "b": j = np.arange(20) < 5 if isinstance(k, np.ndarray) and k.dtype.kind == "b": k = np.arange(30) < 5 expected = np.asarray(v[i, j, k]) for actual in [ v_lazy[i, j, k], v_lazy[:, j, k][i], v_lazy[:, :, k][:, j][i], ]: assert expected.shape == actual.shape assert_array_equal(expected, actual) assert isinstance(actual._data, indexing.LazilyIndexedArray) assert isinstance(v_lazy._data, indexing.LazilyIndexedArray) # make sure actual.key is appropriate type if all( isinstance(k, int | slice) for k in v_lazy._data.key.tuple ): assert isinstance(v_lazy._data.key, indexing.BasicIndexer) else: assert isinstance(v_lazy._data.key, indexing.OuterIndexer) # test sequentially applied indexers indexers = [ (3, 2), (arr[:], 0), (arr[:2], -1), (arr[:4], [0]), ([4, 5], 0), ([0, 1, 2], [0, 1]), ([0, 3, 5], arr[:2]), ] for i, j in indexers: expected_b = v[i][j] actual = v_lazy[i][j] assert expected_b.shape == actual.shape assert_array_equal(expected_b, actual) # test transpose if actual.ndim > 1: order = np.random.choice(actual.ndim, actual.ndim) order = np.array(actual.dims) transposed = actual.transpose(*order) assert_array_equal(expected_b.transpose(*order), transposed) assert isinstance( actual._data, indexing.LazilyVectorizedIndexedArray | indexing.LazilyIndexedArray, ) assert isinstance(actual._data, indexing.LazilyIndexedArray) assert isinstance(actual._data.array, indexing.NumpyIndexingAdapter) def test_vectorized_lazily_indexed_array(self) -> None: original = np.random.rand(10, 20, 30) x = indexing.NumpyIndexingAdapter(original) v_eager = Variable(["i", "j", "k"], x) lazy = indexing.LazilyIndexedArray(x) v_lazy = Variable(["i", "j", "k"], lazy) arr = ReturnItem() def check_indexing(v_eager, v_lazy, indexers): for indexer in indexers: actual = v_lazy[indexer] expected = v_eager[indexer] assert expected.shape == actual.shape assert isinstance( actual._data, indexing.LazilyVectorizedIndexedArray | indexing.LazilyIndexedArray, ) assert_array_equal(expected, actual) v_eager = expected v_lazy = actual # test orthogonal indexing indexers = [(arr[:], 0, 1), (Variable("i", [0, 1]),)] check_indexing(v_eager, v_lazy, indexers) # vectorized indexing indexers = [ (Variable("i", [0, 1]), Variable("i", [0, 1]), slice(None)), (slice(1, 3, 2), 0), ] check_indexing(v_eager, v_lazy, indexers) indexers = [ (slice(None, None, 2), 0, slice(None, 10)), (Variable("i", [3, 2, 4, 3]), Variable("i", [3, 2, 1, 0])), (Variable(["i", "j"], [[0, 1], [1, 2]]),), ] check_indexing(v_eager, v_lazy, indexers) indexers = [ (Variable("i", [3, 2, 4, 3]), Variable("i", [3, 2, 1, 0])), (Variable(["i", "j"], [[0, 1], [1, 2]]),), ] check_indexing(v_eager, v_lazy, indexers) def test_lazily_indexed_array_vindex_setitem(self) -> None: lazy = indexing.LazilyIndexedArray(np.random.rand(10, 20, 30)) # vectorized indexing indexer = indexing.VectorizedIndexer( (np.array([0, 1]), np.array([0, 1]), slice(None, None, None)) ) with pytest.raises( NotImplementedError, match=r"Lazy item assignment with the vectorized indexer is not yet", ): lazy.vindex[indexer] = 0 @pytest.mark.parametrize( "indexer_class, key, value", [ (indexing.OuterIndexer, (0, 1, slice(None, None, None)), 10), (indexing.BasicIndexer, (0, 1, slice(None, None, None)), 10), ], ) def test_lazily_indexed_array_setitem(self, indexer_class, key, value) -> None: original = np.random.rand(10, 20, 30) x = indexing.NumpyIndexingAdapter(original) lazy = indexing.LazilyIndexedArray(x) if indexer_class is indexing.BasicIndexer: indexer = indexer_class(key) lazy[indexer] = value elif indexer_class is indexing.OuterIndexer: indexer = indexer_class(key) lazy.oindex[indexer] = value assert_array_equal(original[key], value) class TestCopyOnWriteArray: def test_setitem(self) -> None: original = np.arange(10) wrapped = indexing.CopyOnWriteArray(original) wrapped[B[:]] = 0 assert_array_equal(original, np.arange(10)) assert_array_equal(wrapped, np.zeros(10)) def test_sub_array(self) -> None: original = np.arange(10) wrapped = indexing.CopyOnWriteArray(original) child = wrapped[B[:5]] assert isinstance(child, indexing.CopyOnWriteArray) child[B[:]] = 0 assert_array_equal(original, np.arange(10)) assert_array_equal(wrapped, np.arange(10)) assert_array_equal(child, np.zeros(5)) def test_index_scalar(self) -> None: # regression test for GH1374 x = indexing.CopyOnWriteArray(np.array(["foo", "bar"])) assert np.array(x[B[0]][B[()]]) == "foo" class TestMemoryCachedArray: def test_wrapper(self) -> None: original = indexing.LazilyIndexedArray(np.arange(10)) wrapped = indexing.MemoryCachedArray(original) assert_array_equal(wrapped, np.arange(10)) assert isinstance(wrapped.array, indexing.NumpyIndexingAdapter) def test_sub_array(self) -> None: original = indexing.LazilyIndexedArray(np.arange(10)) wrapped = indexing.MemoryCachedArray(original) child = wrapped[B[:5]] assert isinstance(child, indexing.MemoryCachedArray) assert_array_equal(child, np.arange(5)) assert isinstance(child.array, indexing.NumpyIndexingAdapter) assert isinstance(wrapped.array, indexing.LazilyIndexedArray) @pytest.mark.asyncio async def test_async_wrapper(self) -> None: original = indexing.LazilyIndexedArray(np.arange(10)) wrapped = indexing.MemoryCachedArray(original) await wrapped.async_get_duck_array() assert_array_equal(wrapped, np.arange(10)) assert isinstance(wrapped.array, indexing.NumpyIndexingAdapter) @pytest.mark.asyncio async def test_async_sub_array(self) -> None: original = indexing.LazilyIndexedArray(np.arange(10)) wrapped = indexing.MemoryCachedArray(original) child = wrapped[B[:5]] assert isinstance(child, indexing.MemoryCachedArray) await child.async_get_duck_array() assert_array_equal(child, np.arange(5)) assert isinstance(child.array, indexing.NumpyIndexingAdapter) assert isinstance(wrapped.array, indexing.LazilyIndexedArray) def test_setitem(self) -> None: original = np.arange(10) wrapped = indexing.MemoryCachedArray(original) wrapped[B[:]] = 0 assert_array_equal(original, np.zeros(10)) def test_index_scalar(self) -> None: # regression test for GH1374 x = indexing.MemoryCachedArray(np.array(["foo", "bar"])) assert np.array(x[B[0]][B[()]]) == "foo" def test_base_explicit_indexer() -> None: with pytest.raises(TypeError): indexing.ExplicitIndexer(()) class Subclass(indexing.ExplicitIndexer): pass value = Subclass((1, 2, 3)) assert value.tuple == (1, 2, 3) assert repr(value) == "Subclass((1, 2, 3))" @pytest.mark.parametrize( "indexer_cls", [indexing.BasicIndexer, indexing.OuterIndexer, indexing.VectorizedIndexer], ) def test_invalid_for_all(indexer_cls) -> None: with pytest.raises(TypeError): indexer_cls(None) with pytest.raises(TypeError): indexer_cls(([],)) with pytest.raises(TypeError): indexer_cls((None,)) with pytest.raises(TypeError): indexer_cls(("foo",)) with pytest.raises(TypeError): indexer_cls((1.0,)) with pytest.raises(TypeError): indexer_cls((slice("foo"),)) with pytest.raises(TypeError): indexer_cls((np.array(["foo"]),)) with pytest.raises(TypeError): indexer_cls(True) with pytest.raises(TypeError): indexer_cls(np.array(True)) def check_integer(indexer_cls): value = indexer_cls((1, np.uint64(2))).tuple assert all(isinstance(v, int) for v in value) assert value == (1, 2) def check_slice(indexer_cls): (value,) = indexer_cls((slice(1, None, np.int64(2)),)).tuple assert value == slice(1, None, 2) assert isinstance(value.step, int) def check_array1d(indexer_cls): (value,) = indexer_cls((np.arange(3, dtype=np.int32),)).tuple assert value.dtype == np.int64 np.testing.assert_array_equal(value, [0, 1, 2]) def check_array2d(indexer_cls): array = np.array([[1, 2], [3, 4]], dtype=np.int64) (value,) = indexer_cls((array,)).tuple assert value.dtype == np.int64 np.testing.assert_array_equal(value, array) def test_basic_indexer() -> None: check_integer(indexing.BasicIndexer) check_slice(indexing.BasicIndexer) with pytest.raises(TypeError): check_array1d(indexing.BasicIndexer) with pytest.raises(TypeError): check_array2d(indexing.BasicIndexer) def test_outer_indexer() -> None: check_integer(indexing.OuterIndexer) check_slice(indexing.OuterIndexer) check_array1d(indexing.OuterIndexer) with pytest.raises(TypeError): check_array2d(indexing.OuterIndexer) def test_vectorized_indexer() -> None: with pytest.raises(TypeError): check_integer(indexing.VectorizedIndexer) check_slice(indexing.VectorizedIndexer) check_array1d(indexing.VectorizedIndexer) check_array2d(indexing.VectorizedIndexer) with pytest.raises(ValueError, match=r"numbers of dimensions"): indexing.VectorizedIndexer( (np.array(1, dtype=np.int64), np.arange(5, dtype=np.int64)) ) class Test_vectorized_indexer: @pytest.fixture(autouse=True) def setup(self): self.data = indexing.NumpyIndexingAdapter(np.random.randn(10, 12, 13)) self.indexers = [ np.array([[0, 3, 2]]), np.array([[0, 3, 3], [4, 6, 7]]), slice(2, -2, 2), slice(2, -2, 3), slice(None), ] def test_arrayize_vectorized_indexer(self) -> None: for i, j, k in itertools.product(self.indexers, repeat=3): vindex = indexing.VectorizedIndexer((i, j, k)) vindex_array = indexing._arrayize_vectorized_indexer( vindex, self.data.shape ) np.testing.assert_array_equal( self.data.vindex[vindex], self.data.vindex[vindex_array] ) actual = indexing._arrayize_vectorized_indexer( indexing.VectorizedIndexer((slice(None),)), shape=(5,) ) np.testing.assert_array_equal(actual.tuple, [np.arange(5)]) actual = indexing._arrayize_vectorized_indexer( indexing.VectorizedIndexer((np.arange(5),) * 3), shape=(8, 10, 12) ) expected = np.stack([np.arange(5)] * 3) np.testing.assert_array_equal(np.stack(actual.tuple), expected) actual = indexing._arrayize_vectorized_indexer( indexing.VectorizedIndexer((np.arange(5), slice(None))), shape=(8, 10) ) a, b = actual.tuple np.testing.assert_array_equal(a, np.arange(5)[:, np.newaxis]) np.testing.assert_array_equal(b, np.arange(10)[np.newaxis, :]) actual = indexing._arrayize_vectorized_indexer( indexing.VectorizedIndexer((slice(None), np.arange(5))), shape=(8, 10) ) a, b = actual.tuple np.testing.assert_array_equal(a, np.arange(8)[np.newaxis, :]) np.testing.assert_array_equal(b, np.arange(5)[:, np.newaxis]) def get_indexers(shape, mode): if mode == "vectorized": indexed_shape = (3, 4) indexer = tuple(np.random.randint(0, s, size=indexed_shape) for s in shape) return indexing.VectorizedIndexer(indexer) elif mode == "outer": indexer = tuple(np.random.randint(0, s, s + 2) for s in shape) return indexing.OuterIndexer(indexer) elif mode == "outer_scalar": indexer = (np.random.randint(0, 3, 4), 0, slice(None, None, 2)) return indexing.OuterIndexer(indexer[: len(shape)]) elif mode == "outer_scalar2": indexer = (np.random.randint(0, 3, 4), -2, slice(None, None, 2)) return indexing.OuterIndexer(indexer[: len(shape)]) elif mode == "outer1vec": indexer = [slice(2, -3) for s in shape] indexer[1] = np.random.randint(0, shape[1], shape[1] + 2) return indexing.OuterIndexer(tuple(indexer)) elif mode == "basic": # basic indexer indexer = [slice(2, -3) for s in shape] indexer[0] = 3 return indexing.BasicIndexer(tuple(indexer)) elif mode == "basic1": # basic indexer return indexing.BasicIndexer((3,)) elif mode == "basic2": # basic indexer indexer = [0, 2, 4] return indexing.BasicIndexer(tuple(indexer[: len(shape)])) elif mode == "basic3": # basic indexer indexer = [slice(None) for s in shape] indexer[0] = slice(-2, 2, -2) indexer[1] = slice(1, -1, 2) return indexing.BasicIndexer(tuple(indexer[: len(shape)])) @pytest.mark.parametrize("size", [100, 99]) @pytest.mark.parametrize( "sl", [slice(1, -1, 1), slice(None, -1, 2), slice(-1, 1, -1), slice(-1, 1, -2)] ) def test_decompose_slice(size, sl) -> None: x = np.arange(size) slice1, slice2 = indexing._decompose_slice(sl, size) expected = x[sl] actual = x[slice1][slice2] assert_array_equal(expected, actual) @pytest.mark.parametrize("shape", [(10, 5, 8), (10, 3)]) @pytest.mark.parametrize( "indexer_mode", [ "vectorized", "outer", "outer_scalar", "outer_scalar2", "outer1vec", "basic", "basic1", "basic2", "basic3", ], ) @pytest.mark.parametrize( "indexing_support", [ indexing.IndexingSupport.BASIC, indexing.IndexingSupport.OUTER, indexing.IndexingSupport.OUTER_1VECTOR, indexing.IndexingSupport.VECTORIZED, ], ) def test_decompose_indexers(shape, indexer_mode, indexing_support) -> None: data = np.random.randn(*shape) indexer = get_indexers(shape, indexer_mode) backend_ind, np_ind = indexing.decompose_indexer(indexer, shape, indexing_support) indexing_adapter = indexing.NumpyIndexingAdapter(data) # Dispatch to appropriate indexing method if indexer_mode.startswith("vectorized"): expected = indexing_adapter.vindex[indexer] elif indexer_mode.startswith("outer"): expected = indexing_adapter.oindex[indexer] else: expected = indexing_adapter[indexer] # Basic indexing if isinstance(backend_ind, indexing.VectorizedIndexer): array = indexing_adapter.vindex[backend_ind] elif isinstance(backend_ind, indexing.OuterIndexer): array = indexing_adapter.oindex[backend_ind] else: array = indexing_adapter[backend_ind] if len(np_ind.tuple) > 0: array_indexing_adapter = indexing.NumpyIndexingAdapter(array) if isinstance(np_ind, indexing.VectorizedIndexer): array = array_indexing_adapter.vindex[np_ind] elif isinstance(np_ind, indexing.OuterIndexer): array = array_indexing_adapter.oindex[np_ind] else: array = array_indexing_adapter[np_ind] np.testing.assert_array_equal(expected, array) if not all(isinstance(k, indexing.integer_types) for k in np_ind.tuple): combined_ind = indexing._combine_indexers(backend_ind, shape, np_ind) assert isinstance(combined_ind, indexing.VectorizedIndexer) array = indexing_adapter.vindex[combined_ind] np.testing.assert_array_equal(expected, array) def test_implicit_indexing_adapter() -> None: array = np.arange(10, dtype=np.int64) implicit = indexing.ImplicitToExplicitIndexingAdapter( indexing.NumpyIndexingAdapter(array), indexing.BasicIndexer ) np.testing.assert_array_equal(array, np.asarray(implicit)) np.testing.assert_array_equal(array, implicit[:]) def test_implicit_indexing_adapter_copy_on_write() -> None: array = np.arange(10, dtype=np.int64) implicit = indexing.ImplicitToExplicitIndexingAdapter( indexing.CopyOnWriteArray(array) ) assert isinstance(implicit[:], indexing.ImplicitToExplicitIndexingAdapter) def test_outer_indexer_consistency_with_broadcast_indexes_vectorized() -> None: def nonzero(x): if isinstance(x, np.ndarray) and x.dtype.kind == "b": x = x.nonzero()[0] return x original = np.random.rand(10, 20, 30) v = Variable(["i", "j", "k"], original) arr = ReturnItem() # test orthogonally applied indexers indexers = [ arr[:], 0, -2, arr[:3], np.array([0, 1, 2, 3]), np.array([0]), np.arange(10) < 5, ] for i, j, k in itertools.product(indexers, repeat=3): if isinstance(j, np.ndarray) and j.dtype.kind == "b": # match size j = np.arange(20) < 4 if isinstance(k, np.ndarray) and k.dtype.kind == "b": k = np.arange(30) < 8 _, expected, new_order = v._broadcast_indexes_vectorized((i, j, k)) expected_data = nputils.NumpyVIndexAdapter(v.data)[expected.tuple] if new_order: old_order = range(len(new_order)) expected_data = np.moveaxis(expected_data, old_order, new_order) outer_index = indexing.OuterIndexer((nonzero(i), nonzero(j), nonzero(k))) actual = indexing._outer_to_numpy_indexer(outer_index, v.shape) actual_data = v.data[actual] np.testing.assert_array_equal(actual_data, expected_data) def test_create_mask_outer_indexer() -> None: indexer = indexing.OuterIndexer((np.array([0, -1, 2]),)) expected = np.array([False, True, False]) actual = indexing.create_mask(indexer, (5,)) np.testing.assert_array_equal(expected, actual) indexer = indexing.OuterIndexer((1, slice(2), np.array([0, -1, 2]))) expected = np.array(2 * [[False, True, False]]) actual = indexing.create_mask(indexer, (5, 5, 5)) np.testing.assert_array_equal(expected, actual) def test_create_mask_vectorized_indexer() -> None: indexer = indexing.VectorizedIndexer((np.array([0, -1, 2]), np.array([0, 1, -1]))) expected = np.array([False, True, True]) actual = indexing.create_mask(indexer, (5,)) np.testing.assert_array_equal(expected, actual) indexer = indexing.VectorizedIndexer( (np.array([0, -1, 2]), slice(None), np.array([0, 1, -1])) ) expected = np.array([[False, True, True]] * 2).T actual = indexing.create_mask(indexer, (5, 2)) np.testing.assert_array_equal(expected, actual) def test_create_mask_basic_indexer() -> None: indexer = indexing.BasicIndexer((-1,)) actual = indexing.create_mask(indexer, (3,)) np.testing.assert_array_equal(True, actual) indexer = indexing.BasicIndexer((0,)) actual = indexing.create_mask(indexer, (3,)) np.testing.assert_array_equal(False, actual) def test_create_mask_dask() -> None: da = pytest.importorskip("dask.array") indexer = indexing.OuterIndexer((1, slice(2), np.array([0, -1, 2]))) expected = np.array(2 * [[False, True, False]]) actual = indexing.create_mask( indexer, (5, 5, 5), da.empty((2, 3), chunks=((1, 1), (2, 1))) ) assert actual.chunks == ((1, 1), (2, 1)) np.testing.assert_array_equal(expected, actual) indexer_vec = indexing.VectorizedIndexer( (np.array([0, -1, 2]), slice(None), np.array([0, 1, -1])) ) expected = np.array([[False, True, True]] * 2).T actual = indexing.create_mask( indexer_vec, (5, 2), da.empty((3, 2), chunks=((3,), (2,))) ) assert isinstance(actual, da.Array) np.testing.assert_array_equal(expected, actual) with pytest.raises(ValueError): indexing.create_mask(indexer_vec, (5, 2), da.empty((5,), chunks=(1,))) def test_create_mask_error() -> None: with pytest.raises(TypeError, match=r"unexpected key type"): indexing.create_mask((1, 2), (3, 4)) # type: ignore[arg-type] @pytest.mark.parametrize( "indices, expected", [ (np.arange(5), np.arange(5)), (np.array([0, -1, -1]), np.array([0, 0, 0])), (np.array([-1, 1, -1]), np.array([1, 1, 1])), (np.array([-1, -1, 2]), np.array([2, 2, 2])), (np.array([-1]), np.array([0])), (np.array([0, -1, 1, -1, -1]), np.array([0, 0, 1, 1, 1])), (np.array([0, -1, -1, -1, 1]), np.array([0, 0, 0, 0, 1])), ], ) def test_posify_mask_subindexer(indices, expected) -> None: actual = indexing._posify_mask_subindexer(indices) np.testing.assert_array_equal(expected, actual) class ArrayWithNamespace: def __array_namespace__(self, version=None): pass class ArrayWithArrayFunction: def __array_function__(self, func, types, args, kwargs): pass class ArrayWithNamespaceAndArrayFunction: def __array_namespace__(self, version=None): pass def __array_function__(self, func, types, args, kwargs): pass def as_dask_array(arr, chunks): try: import dask.array as da except ImportError: return None return da.from_array(arr, chunks=chunks) @pytest.mark.parametrize( ["array", "expected_type"], ( pytest.param( indexing.CopyOnWriteArray(np.array([1, 2])), indexing.CopyOnWriteArray, id="ExplicitlyIndexed", ), pytest.param( np.array([1, 2]), indexing.NumpyIndexingAdapter, id="numpy.ndarray" ), pytest.param( pd.Index([1, 2]), indexing.PandasIndexingAdapter, id="pandas.Index" ), pytest.param( as_dask_array(np.array([1, 2]), chunks=(1,)), indexing.DaskIndexingAdapter, id="dask.array", marks=requires_dask, ), pytest.param( ArrayWithNamespace(), indexing.ArrayApiIndexingAdapter, id="array_api" ), pytest.param( ArrayWithArrayFunction(), indexing.NdArrayLikeIndexingAdapter, id="array_like", ), pytest.param( ArrayWithNamespaceAndArrayFunction(), indexing.ArrayApiIndexingAdapter, id="array_api_with_fallback", ), ), ) def test_as_indexable(array, expected_type): actual = indexing.as_indexable(array) assert isinstance(actual, expected_type) def test_indexing_1d_object_array() -> None: items = (np.arange(3), np.arange(6)) arr = DataArray(np.array(items, dtype=object)) actual = arr[0] expected_data = np.empty((), dtype=object) expected_data[()] = items[0] expected = DataArray(expected_data) assert [actual.data.item()] == [expected.data.item()] @requires_dask def test_indexing_dask_array() -> None: import dask.array da = DataArray( np.ones(10 * 3 * 3).reshape((10, 3, 3)), dims=("time", "x", "y"), ).chunk(dict(time=-1, x=1, y=1)) with raise_if_dask_computes(): actual = da.isel(time=dask.array.from_array([9], chunks=(1,))) expected = da.isel(time=[9]) assert_identical(actual, expected) @requires_dask def test_indexing_dask_array_scalar() -> None: # GH4276 import dask.array a = dask.array.from_array(np.linspace(0.0, 1.0)) da = DataArray(a, dims="x") x_selector = da.argmax(dim=...) assert not isinstance(x_selector, DataArray) with raise_if_dask_computes(): actual = da.isel(x_selector) expected = da.isel(x=-1) assert_identical(actual, expected) @requires_dask def test_vectorized_indexing_dask_array() -> None: # https://github.com/pydata/xarray/issues/2511#issuecomment-563330352 darr = DataArray(data=[0.2, 0.4, 0.6], coords={"z": range(3)}, dims=("z",)) indexer = DataArray( data=np.random.randint(0, 3, 8).reshape(4, 2).astype(int), coords={"y": range(4), "x": range(2)}, dims=("y", "x"), ) expected = darr[indexer] # fails because we can't index pd.Index lazily (yet). # We could make this succeed by auto-chunking the values # and constructing a lazy index variable, and not automatically # create an index for it. with pytest.raises(ValueError, match="Cannot index with"): with raise_if_dask_computes(): darr.chunk()[indexer.chunk({"y": 2})] with pytest.raises(ValueError, match="Cannot index with"): with raise_if_dask_computes(): actual = darr[indexer.chunk({"y": 2})] with raise_if_dask_computes(): actual = darr.drop_vars("z").chunk()[indexer.chunk({"y": 2})] assert_identical(actual, expected.drop_vars("z")) with raise_if_dask_computes(): actual_variable = darr.variable.chunk()[indexer.variable.chunk({"y": 2})] assert_identical(actual_variable, expected.variable) @requires_dask def test_advanced_indexing_dask_array() -> None: # GH4663 import dask.array as da ds = Dataset( dict( a=("x", da.from_array(np.random.randint(0, 100, 100))), b=(("x", "y"), da.random.random((100, 10))), ) ) expected = ds.b.sel(x=ds.a.compute()) with raise_if_dask_computes(): actual = ds.b.sel(x=ds.a) assert_identical(expected, actual) with raise_if_dask_computes(): actual = ds.b.sel(x=ds.a.data) assert_identical(expected, actual) def test_backend_indexing_non_numpy() -> None: """This model indexing of a Zarr store that reads to GPU memory.""" array = DuckArrayWrapper(np.array([1, 2, 3])) indexed = indexing.explicit_indexing_adapter( indexing.BasicIndexer((slice(1),)), shape=array.shape, indexing_support=indexing.IndexingSupport.BASIC, raw_indexing_method=array.__getitem__, ) np.testing.assert_array_equal(indexed.array, np.array([1])) xarray-2025.09.0/xarray/tests/test_interp.py000066400000000000000000001124031505620616400207270ustar00rootroot00000000000000from __future__ import annotations import contextlib from itertools import combinations, permutations, product from typing import cast, get_args import numpy as np import pandas as pd import pytest import xarray as xr from xarray.coding.cftimeindex import _parse_array_of_cftime_strings from xarray.core.types import ( Interp1dOptions, InterpnOptions, InterpolantOptions, InterpOptions, ) from xarray.tests import ( assert_allclose, assert_equal, assert_identical, has_dask, has_scipy, has_scipy_ge_1_13, raise_if_dask_computes, requires_cftime, requires_dask, requires_scipy, ) from xarray.tests.test_dataset import create_test_data with contextlib.suppress(ImportError): import scipy ALL_1D = get_args(Interp1dOptions) + get_args(InterpolantOptions) def get_example_data(case: int) -> xr.DataArray: if case == 0: # 2D x = np.linspace(0, 1, 100) y = np.linspace(0, 0.1, 30) return xr.DataArray( np.sin(x[:, np.newaxis]) * np.cos(y), dims=["x", "y"], coords={"x": x, "y": y, "x2": ("x", x**2)}, ) elif case == 1: # 2D chunked single dim return get_example_data(0).chunk({"y": 3}) elif case == 2: # 2D chunked both dims return get_example_data(0).chunk({"x": 25, "y": 3}) elif case == 3: # 3D x = np.linspace(0, 1, 100) y = np.linspace(0, 0.1, 30) z = np.linspace(0.1, 0.2, 10) return xr.DataArray( np.sin(x[:, np.newaxis, np.newaxis]) * np.cos(y[:, np.newaxis]) * z, dims=["x", "y", "z"], coords={"x": x, "y": y, "x2": ("x", x**2), "z": z}, ) elif case == 4: # 3D chunked single dim # chunksize=5 lets us check whether we rechunk to 1 with quintic return get_example_data(3).chunk({"z": 5}) else: raise ValueError("case must be 1-4") @pytest.fixture def nd_interp_coords(): # interpolation indices for nd interpolation of da from case 3 of get_example_data da = get_example_data(case=3) coords = {} # grid -> grid coords["xdestnp"] = np.linspace(0.1, 1.0, 11) coords["ydestnp"] = np.linspace(0.0, 0.2, 10) coords["zdestnp"] = da.z.data # list of the points defined by the above mesh in C order mesh_x, mesh_y, mesh_z = np.meshgrid( coords["xdestnp"], coords["ydestnp"], coords["zdestnp"], indexing="ij" ) coords["grid_grid_points"] = np.column_stack( [mesh_x.ravel(), mesh_y.ravel(), mesh_z.ravel()] ) # grid -> oned coords["xdest"] = xr.DataArray(np.linspace(0.1, 1.0, 11), dims="y") # type: ignore[assignment] coords["ydest"] = xr.DataArray(np.linspace(0.0, 0.2, 11), dims="y") # type: ignore[assignment] coords["zdest"] = da.z # grid of the points defined by the oned gridded with zdest in C order coords["grid_oned_points"] = np.array( [ (a, b, c) for (a, b), c in product( zip(coords["xdest"].data, coords["ydest"].data, strict=False), coords["zdest"].data, ) ] ) return coords def test_keywargs(): if not has_scipy: pytest.skip("scipy is not installed.") da = get_example_data(0) assert_equal(da.interp(x=[0.5, 0.8]), da.interp({"x": [0.5, 0.8]})) @pytest.mark.parametrize("method", ["linear", "cubic"]) @pytest.mark.parametrize("dim", ["x", "y"]) @pytest.mark.parametrize( "case", [pytest.param(0, id="no_chunk"), pytest.param(1, id="chunk_y")] ) def test_interpolate_1d(method: InterpOptions, dim: str, case: int) -> None: if not has_scipy: pytest.skip("scipy is not installed.") if not has_dask and case == 1: pytest.skip("dask is not installed in the environment.") da = get_example_data(case) xdest = np.linspace(0.0, 0.9, 80) actual = da.interp(method=method, coords={dim: xdest}) # scipy interpolation for the reference def func(obj, new_x): return scipy.interpolate.interp1d( da[dim], obj.data, axis=obj.get_axis_num(dim), bounds_error=False, fill_value=np.nan, kind=method, # type: ignore[arg-type,unused-ignore] )(new_x) if dim == "x": coords = {"x": xdest, "y": da["y"], "x2": ("x", func(da["x2"], xdest))} else: # y coords = {"x": da["x"], "y": xdest, "x2": da["x2"]} expected = xr.DataArray(func(da, xdest), dims=["x", "y"], coords=coords) assert_allclose(actual, expected) @pytest.mark.parametrize("method", ["cubic", "zero"]) def test_interpolate_1d_methods(method: InterpOptions) -> None: if not has_scipy: pytest.skip("scipy is not installed.") da = get_example_data(0) dim = "x" xdest = np.linspace(0.0, 0.9, 80) actual = da.interp(method=method, coords={dim: xdest}) # scipy interpolation for the reference def func(obj, new_x): return scipy.interpolate.interp1d( da[dim], obj.data, axis=obj.get_axis_num(dim), bounds_error=False, fill_value=np.nan, kind=method, # type: ignore[arg-type,unused-ignore] )(new_x) coords = {"x": xdest, "y": da["y"], "x2": ("x", func(da["x2"], xdest))} expected = xr.DataArray(func(da, xdest), dims=["x", "y"], coords=coords) assert_allclose(actual, expected) @requires_scipy @pytest.mark.parametrize( "use_dask, method", ( (False, "linear"), (False, "akima"), pytest.param( False, "makima", marks=pytest.mark.skipif(not has_scipy_ge_1_13, reason="scipy too old"), ), pytest.param( True, "linear", marks=pytest.mark.skipif(not has_dask, reason="dask not available"), ), pytest.param( True, "akima", marks=pytest.mark.skipif(not has_dask, reason="dask not available"), ), ), ) def test_interpolate_vectorize(use_dask: bool, method: InterpOptions) -> None: # scipy interpolation for the reference def func(obj, dim, new_x, method): scipy_kwargs = {} interpolant_options = { "barycentric": scipy.interpolate.BarycentricInterpolator, "krogh": scipy.interpolate.KroghInterpolator, "pchip": scipy.interpolate.PchipInterpolator, "akima": scipy.interpolate.Akima1DInterpolator, "makima": scipy.interpolate.Akima1DInterpolator, } shape = [s for i, s in enumerate(obj.shape) if i != obj.get_axis_num(dim)] for s in new_x.shape[::-1]: shape.insert(obj.get_axis_num(dim), s) if method in interpolant_options: interpolant = interpolant_options[method] if method == "makima": scipy_kwargs["method"] = method return interpolant( da[dim], obj.data, axis=obj.get_axis_num(dim), **scipy_kwargs )(new_x).reshape(shape) else: return scipy.interpolate.interp1d( da[dim], obj.data, axis=obj.get_axis_num(dim), kind=method, # type: ignore[arg-type,unused-ignore] bounds_error=False, fill_value=np.nan, **scipy_kwargs, )(new_x).reshape(shape) da = get_example_data(0) if use_dask: da = da.chunk({"y": 5}) # xdest is 1d but has different dimension xdest = xr.DataArray( np.linspace(0.1, 0.9, 30), dims="z", coords={"z": np.random.randn(30), "z2": ("z", np.random.randn(30))}, ) actual = da.interp(x=xdest, method=method) expected = xr.DataArray( func(da, "x", xdest, method), dims=["z", "y"], coords={ "z": xdest["z"], "z2": xdest["z2"], "y": da["y"], "x": ("z", xdest.values), "x2": ("z", func(da["x2"], "x", xdest, method)), }, ) assert_allclose(actual, expected.transpose("z", "y", transpose_coords=True)) # xdest is 2d xdest = xr.DataArray( np.linspace(0.1, 0.9, 30).reshape(6, 5), dims=["z", "w"], coords={ "z": np.random.randn(6), "w": np.random.randn(5), "z2": ("z", np.random.randn(6)), }, ) actual = da.interp(x=xdest, method=method) expected = xr.DataArray( func(da, "x", xdest, method), dims=["z", "w", "y"], coords={ "z": xdest["z"], "w": xdest["w"], "z2": xdest["z2"], "y": da["y"], "x": (("z", "w"), xdest.data), "x2": (("z", "w"), func(da["x2"], "x", xdest, method)), }, ) assert_allclose(actual, expected.transpose("z", "w", "y", transpose_coords=True)) @requires_scipy @pytest.mark.parametrize("method", get_args(InterpnOptions)) @pytest.mark.parametrize( "case", [ pytest.param(3, id="no_chunk"), pytest.param( 4, id="chunked", marks=pytest.mark.skipif(not has_dask, reason="no dask") ), ], ) def test_interpolate_nd(case: int, method: InterpnOptions, nd_interp_coords) -> None: da = get_example_data(case) # grid -> grid xdestnp = nd_interp_coords["xdestnp"] ydestnp = nd_interp_coords["ydestnp"] zdestnp = nd_interp_coords["zdestnp"] grid_grid_points = nd_interp_coords["grid_grid_points"] # the presence/absence of z coordinate may affect nd interpolants, even when the # coordinate is unchanged # TODO: test this? actual = da.interp(x=xdestnp, y=ydestnp, z=zdestnp, method=method) expected_data = scipy.interpolate.interpn( points=(da.x, da.y, da.z), values=da.load().data, xi=grid_grid_points, method=method, bounds_error=False, ).reshape((len(xdestnp), len(ydestnp), len(zdestnp))) expected = xr.DataArray( expected_data, dims=["x", "y", "z"], coords={ "x": xdestnp, "y": ydestnp, "z": zdestnp, "x2": da["x2"].interp(x=xdestnp, method=method), }, ) assert_allclose(actual.transpose("x", "y", "z"), expected.transpose("x", "y", "z")) # grid -> 1d-sample xdest = nd_interp_coords["xdest"] ydest = nd_interp_coords["ydest"] zdest = nd_interp_coords["zdest"] grid_oned_points = nd_interp_coords["grid_oned_points"] actual = da.interp(x=xdest, y=ydest, z=zdest, method=method) expected_data_1d: np.ndarray = scipy.interpolate.interpn( points=(da.x, da.y, da.z), values=da.data, xi=grid_oned_points, method=method, bounds_error=False, ).reshape([len(xdest), len(zdest)]) expected = xr.DataArray( expected_data_1d, dims=["y", "z"], coords={ "y": ydest, "z": zdest, "x": ("y", xdest.values), "x2": da["x2"].interp(x=xdest, method=method), }, ) assert_allclose(actual.transpose("y", "z"), expected) # reversed order actual = da.interp(y=ydest, x=xdest, z=zdest, method=method) assert_allclose(actual.transpose("y", "z"), expected) @requires_scipy # omit cubic, pchip, quintic because not enough points @pytest.mark.parametrize("method", ("linear", "nearest", "slinear")) def test_interpolate_nd_nd(method: InterpnOptions) -> None: """Interpolate nd array with an nd indexer sharing coordinates.""" # Create original array a = [0, 2] x = [0, 1, 2] values = np.arange(6).reshape(2, 3) da = xr.DataArray(values, dims=("a", "x"), coords={"a": a, "x": x}) # Create indexer into `a` with dimensions (y, x) y = [10] a_targets = [1, 2, 2] c = {"x": x, "y": y} ia = xr.DataArray([a_targets], dims=("y", "x"), coords=c) out = da.interp(a=ia, method=method) expected_xi = list(zip(a_targets, x, strict=False)) expected_vec = scipy.interpolate.interpn( points=(a, x), values=values, xi=expected_xi, method=method ) expected = xr.DataArray([expected_vec], dims=("y", "x"), coords=c) xr.testing.assert_allclose(out.drop_vars("a"), expected) # If the *shared* indexing coordinates do not match, interp should fail. with pytest.raises(ValueError): c = {"x": [1], "y": y} ia = xr.DataArray([[1]], dims=("y", "x"), coords=c) da.interp(a=ia) with pytest.raises(ValueError): c = {"x": [5, 6, 7], "y": y} ia = xr.DataArray([[1]], dims=("y", "x"), coords=c) da.interp(a=ia) @requires_scipy @pytest.mark.filterwarnings("ignore:All-NaN slice") def test_interpolate_nd_with_nan() -> None: """Interpolate an array with an nd indexer and `NaN` values.""" # Create indexer into `a` with dimensions (y, x) x = [0, 1, 2] y = [10, 20] c = {"x": x, "y": y} a = np.arange(6, dtype=float).reshape(2, 3) a[0, 1] = np.nan ia = xr.DataArray(a, dims=("y", "x"), coords=c) da = xr.DataArray([1, 2, 2], dims=("a"), coords={"a": [0, 2, 4]}) out = da.interp(a=ia) expected = xr.DataArray( [[1.0, np.nan, 2.0], [2.0, 2.0, np.nan]], dims=("y", "x"), coords=c ) xr.testing.assert_allclose(out.drop_vars("a"), expected) db = 2 * da ds = xr.Dataset({"da": da, "db": db}) out2 = ds.interp(a=ia) expected_ds = xr.Dataset({"da": expected, "db": 2 * expected}) xr.testing.assert_allclose(out2.drop_vars("a"), expected_ds) @requires_scipy @pytest.mark.parametrize("method", ("linear",)) @pytest.mark.parametrize( "case", [pytest.param(0, id="no_chunk"), pytest.param(1, id="chunk_y")] ) def test_interpolate_scalar(method: InterpOptions, case: int) -> None: if not has_dask and case == 1: pytest.skip("dask is not installed in the environment.") da = get_example_data(case) xdest = 0.4 actual = da.interp(x=xdest, method=method) # scipy interpolation for the reference def func(obj, new_x): return scipy.interpolate.interp1d( da["x"], obj.data, axis=obj.get_axis_num("x"), bounds_error=False, fill_value=np.nan, kind=method, # type: ignore[arg-type,unused-ignore] )(new_x) coords = {"x": xdest, "y": da["y"], "x2": func(da["x2"], xdest)} expected = xr.DataArray(func(da, xdest), dims=["y"], coords=coords) assert_allclose(actual, expected) @requires_scipy @pytest.mark.parametrize("method", ("linear",)) @pytest.mark.parametrize( "case", [pytest.param(3, id="no_chunk"), pytest.param(4, id="chunked")] ) def test_interpolate_nd_scalar(method: InterpOptions, case: int) -> None: if not has_dask and case == 4: pytest.skip("dask is not installed in the environment.") da = get_example_data(case) xdest = 0.4 ydest = 0.05 zdest = da.get_index("z") actual = da.interp(x=xdest, y=ydest, z=zdest, method=method) # scipy interpolation for the reference expected_data = scipy.interpolate.RegularGridInterpolator( (da["x"], da["y"], da["z"]), da.transpose("x", "y", "z").values, method=method, # type: ignore[arg-type,unused-ignore] bounds_error=False, fill_value=np.nan, )(np.asarray([(xdest, ydest, z_val) for z_val in zdest])) coords = { "x": xdest, "y": ydest, "x2": da["x2"].interp(x=xdest, method=method), "z": da["z"], } expected = xr.DataArray(expected_data, dims=["z"], coords=coords) assert_allclose(actual, expected) @pytest.mark.parametrize("use_dask", [True, False]) def test_nans(use_dask: bool) -> None: if not has_scipy: pytest.skip("scipy is not installed.") da = xr.DataArray([0, 1, np.nan, 2], dims="x", coords={"x": range(4)}) if not has_dask and use_dask: pytest.skip("dask is not installed in the environment.") da = da.chunk() actual = da.interp(x=[0.5, 1.5]) # not all values are nan assert actual.count() > 0 @requires_scipy @pytest.mark.parametrize("use_dask", [True, False]) def test_errors(use_dask: bool) -> None: # spline is unavailable da = xr.DataArray([0, 1, np.nan, 2], dims="x", coords={"x": range(4)}) if not has_dask and use_dask: pytest.skip("dask is not installed in the environment.") da = da.chunk() for method in ["spline"]: with pytest.raises(ValueError), pytest.warns(PendingDeprecationWarning): da.interp(x=[0.5, 1.5], method=method) # type: ignore[arg-type] # not sorted if use_dask: da = get_example_data(3) else: da = get_example_data(0) result = da.interp(x=[-1, 1, 3], kwargs={"fill_value": 0.0}) assert not np.isnan(result.values).any() result = da.interp(x=[-1, 1, 3]) assert np.isnan(result.values).any() # invalid method with pytest.raises(ValueError): da.interp(x=[2, 0], method="boo") # type: ignore[arg-type] with pytest.raises(ValueError): da.interp(y=[2, 0], method="boo") # type: ignore[arg-type] # object-type DataArray cannot be interpolated da = xr.DataArray(["a", "b", "c"], dims="x", coords={"x": [0, 1, 2]}) with pytest.raises(TypeError): da.interp(x=0) @requires_scipy def test_dtype() -> None: data_vars = dict( a=("time", np.array([1, 1.25, 2])), b=("time", np.array([True, True, False], dtype=bool)), c=("time", np.array(["start", "start", "end"], dtype=str)), ) time = np.array([0, 0.25, 1], dtype=float) expected = xr.Dataset(data_vars, coords=dict(time=time)) actual = xr.Dataset( {k: (dim, arr[[0, -1]]) for k, (dim, arr) in data_vars.items()}, coords=dict(time=time[[0, -1]]), ) actual = actual.interp(time=time, method="linear") assert_identical(expected, actual) @requires_scipy def test_sorted() -> None: # unsorted non-uniform gridded data x = np.random.randn(100) y = np.random.randn(30) z = np.linspace(0.1, 0.2, 10) * 3.0 da = xr.DataArray( np.cos(x[:, np.newaxis, np.newaxis]) * np.cos(y[:, np.newaxis]) * z, dims=["x", "y", "z"], coords={"x": x, "y": y, "x2": ("x", x**2), "z": z}, ) x_new = np.linspace(0, 1, 30) y_new = np.linspace(0, 1, 20) da_sorted = da.sortby("x") assert_allclose(da.interp(x=x_new), da_sorted.interp(x=x_new, assume_sorted=True)) da_sorted = da.sortby(["x", "y"]) assert_allclose( da.interp(x=x_new, y=y_new), da_sorted.interp(x=x_new, y=y_new, assume_sorted=True), ) with pytest.raises(ValueError): da.interp(x=[0, 1, 2], assume_sorted=True) @requires_scipy def test_dimension_wo_coords() -> None: da = xr.DataArray( np.arange(12).reshape(3, 4), dims=["x", "y"], coords={"y": [0, 1, 2, 3]} ) da_w_coord = da.copy() da_w_coord["x"] = np.arange(3) assert_equal(da.interp(x=[0.1, 0.2, 0.3]), da_w_coord.interp(x=[0.1, 0.2, 0.3])) assert_equal( da.interp(x=[0.1, 0.2, 0.3], y=[0.5]), da_w_coord.interp(x=[0.1, 0.2, 0.3], y=[0.5]), ) @requires_scipy def test_dataset() -> None: ds = create_test_data() ds.attrs["foo"] = "var" ds["var1"].attrs["buz"] = "var2" new_dim2 = xr.DataArray([0.11, 0.21, 0.31], dims="z") interpolated = ds.interp(dim2=new_dim2) assert_allclose(interpolated["var1"], ds["var1"].interp(dim2=new_dim2)) assert interpolated["var3"].equals(ds["var3"]) # make sure modifying interpolated does not affect the original dataset interpolated["var1"][:, 1] = 1.0 interpolated["var2"][:, 1] = 1.0 interpolated["var3"][:, 1] = 1.0 assert not interpolated["var1"].equals(ds["var1"]) assert not interpolated["var2"].equals(ds["var2"]) assert not interpolated["var3"].equals(ds["var3"]) # attrs should be kept assert interpolated.attrs["foo"] == "var" assert interpolated["var1"].attrs["buz"] == "var2" @pytest.mark.parametrize("case", [pytest.param(0, id="2D"), pytest.param(3, id="3D")]) def test_interpolate_dimorder(case: int) -> None: """Make sure the resultant dimension order is consistent with .sel()""" if not has_scipy: pytest.skip("scipy is not installed.") da = get_example_data(case) new_x = xr.DataArray([0, 1, 2], dims="x") assert da.interp(x=new_x).dims == da.sel(x=new_x, method="nearest").dims new_y = xr.DataArray([0, 1, 2], dims="y") actual = da.interp(x=new_x, y=new_y).dims expected = da.sel(x=new_x, y=new_y, method="nearest").dims assert actual == expected # reversed order actual = da.interp(y=new_y, x=new_x).dims expected = da.sel(y=new_y, x=new_x, method="nearest").dims assert actual == expected new_x = xr.DataArray([0, 1, 2], dims="a") assert da.interp(x=new_x).dims == da.sel(x=new_x, method="nearest").dims assert da.interp(y=new_x).dims == da.sel(y=new_x, method="nearest").dims new_y = xr.DataArray([0, 1, 2], dims="a") actual = da.interp(x=new_x, y=new_y).dims expected = da.sel(x=new_x, y=new_y, method="nearest").dims assert actual == expected new_x = xr.DataArray([[0], [1], [2]], dims=["a", "b"]) assert da.interp(x=new_x).dims == da.sel(x=new_x, method="nearest").dims assert da.interp(y=new_x).dims == da.sel(y=new_x, method="nearest").dims if case == 3: new_x = xr.DataArray([[0], [1], [2]], dims=["a", "b"]) new_z = xr.DataArray([[0], [1], [2]], dims=["a", "b"]) actual = da.interp(x=new_x, z=new_z).dims expected = da.sel(x=new_x, z=new_z, method="nearest").dims assert actual == expected actual = da.interp(z=new_z, x=new_x).dims expected = da.sel(z=new_z, x=new_x, method="nearest").dims assert actual == expected actual = da.interp(x=0.5, z=new_z).dims expected = da.sel(x=0.5, z=new_z, method="nearest").dims assert actual == expected @requires_scipy def test_interp_like() -> None: ds = create_test_data() ds.attrs["foo"] = "var" ds["var1"].attrs["buz"] = "var2" other = xr.DataArray(np.random.randn(3), dims=["dim2"], coords={"dim2": [0, 1, 2]}) interpolated = ds.interp_like(other) assert_allclose(interpolated["var1"], ds["var1"].interp(dim2=other["dim2"])) assert_allclose(interpolated["var1"], ds["var1"].interp_like(other)) assert interpolated["var3"].equals(ds["var3"]) # attrs should be kept assert interpolated.attrs["foo"] == "var" assert interpolated["var1"].attrs["buz"] == "var2" other = xr.DataArray( np.random.randn(3), dims=["dim3"], coords={"dim3": ["a", "b", "c"]} ) actual = ds.interp_like(other) expected = ds.reindex_like(other) assert_allclose(actual, expected) @requires_scipy @pytest.mark.parametrize( "x_new, expected", [ (pd.date_range("2000-01-02", periods=3), [1, 2, 3]), ( np.array( [np.datetime64("2000-01-01T12:00"), np.datetime64("2000-01-02T12:00")] ), [0.5, 1.5], ), (["2000-01-01T12:00", "2000-01-02T12:00"], [0.5, 1.5]), (["2000-01-01T12:00", "2000-01-02T12:00", "NaT"], [0.5, 1.5, np.nan]), (["2000-01-01T12:00"], 0.5), pytest.param("2000-01-01T12:00", 0.5, marks=pytest.mark.xfail), ], ) def test_datetime(x_new, expected) -> None: da = xr.DataArray( np.arange(24), dims="time", coords={"time": pd.date_range("2000-01-01", periods=24)}, ) actual = da.interp(time=x_new) expected_da = xr.DataArray( np.atleast_1d(expected), dims=["time"], coords={"time": (np.atleast_1d(x_new).astype("datetime64[ns]"))}, ) assert_allclose(actual, expected_da) @requires_scipy def test_datetime_single_string() -> None: da = xr.DataArray( np.arange(24), dims="time", coords={"time": pd.date_range("2000-01-01", periods=24)}, ) actual = da.interp(time="2000-01-01T12:00") expected = xr.DataArray(0.5) assert_allclose(actual.drop_vars("time"), expected) @requires_cftime @requires_scipy def test_cftime() -> None: times = xr.date_range("2000", periods=24, freq="D", use_cftime=True) da = xr.DataArray(np.arange(24), coords=[times], dims="time") times_new = xr.date_range( "2000-01-01T12:00:00", periods=3, freq="D", use_cftime=True ) actual = da.interp(time=times_new) expected = xr.DataArray([0.5, 1.5, 2.5], coords=[times_new], dims=["time"]) assert_allclose(actual, expected) @requires_cftime @requires_scipy def test_cftime_type_error() -> None: times = xr.date_range("2000", periods=24, freq="D", use_cftime=True) da = xr.DataArray(np.arange(24), coords=[times], dims="time") times_new = xr.date_range( "2000-01-01T12:00:00", periods=3, freq="D", calendar="noleap", use_cftime=True ) with pytest.raises(TypeError): da.interp(time=times_new) @requires_cftime @requires_scipy def test_cftime_list_of_strings() -> None: from cftime import DatetimeProlepticGregorian times = xr.date_range( "2000", periods=24, freq="D", calendar="proleptic_gregorian", use_cftime=True ) da = xr.DataArray(np.arange(24), coords=[times], dims="time") times_new = ["2000-01-01T12:00", "2000-01-02T12:00", "2000-01-03T12:00"] actual = da.interp(time=times_new) times_new_array = _parse_array_of_cftime_strings( np.array(times_new), DatetimeProlepticGregorian ) expected = xr.DataArray([0.5, 1.5, 2.5], coords=[times_new_array], dims=["time"]) assert_allclose(actual, expected) @requires_cftime @requires_scipy def test_cftime_single_string() -> None: from cftime import DatetimeProlepticGregorian times = xr.date_range( "2000", periods=24, freq="D", calendar="proleptic_gregorian", use_cftime=True ) da = xr.DataArray(np.arange(24), coords=[times], dims="time") times_new = "2000-01-01T12:00" actual = da.interp(time=times_new) times_new_array = _parse_array_of_cftime_strings( np.array(times_new), DatetimeProlepticGregorian ) expected = xr.DataArray(0.5, coords={"time": times_new_array}) assert_allclose(actual, expected) @requires_scipy def test_datetime_to_non_datetime_error() -> None: da = xr.DataArray( np.arange(24), dims="time", coords={"time": pd.date_range("2000-01-01", periods=24)}, ) with pytest.raises(TypeError): da.interp(time=0.5) @requires_cftime @requires_scipy def test_cftime_to_non_cftime_error() -> None: times = xr.date_range("2000", periods=24, freq="D", use_cftime=True) da = xr.DataArray(np.arange(24), coords=[times], dims="time") with pytest.raises(TypeError): da.interp(time=0.5) @requires_scipy def test_datetime_interp_noerror() -> None: # GH:2667 a = xr.DataArray( np.arange(21).reshape(3, 7), dims=["x", "time"], coords={ "x": [1, 2, 3], "time": pd.date_range("01-01-2001", periods=7, freq="D"), }, ) xi = xr.DataArray( np.linspace(1, 3, 50), dims=["time"], coords={"time": pd.date_range("01-01-2001", periods=50, freq="h")}, ) a.interp(x=xi, time=xi.time) # should not raise an error @requires_cftime @requires_scipy def test_3641() -> None: times = xr.date_range("0001", periods=3, freq="500YE", use_cftime=True) da = xr.DataArray(range(3), dims=["time"], coords=[times]) da.interp(time=["0002-05-01"]) @requires_scipy # cubic, quintic, pchip omitted because not enough points @pytest.mark.parametrize("method", ("linear", "nearest", "slinear")) def test_decompose(method: InterpOptions) -> None: da = xr.DataArray( np.arange(6).reshape(3, 2), dims=["x", "y"], coords={"x": [0, 1, 2], "y": [-0.1, -0.3]}, ) x_new = xr.DataArray([0.5, 1.5, 2.5], dims=["x1"]) y_new = xr.DataArray([-0.15, -0.25], dims=["y1"]) x_broadcast, y_broadcast = xr.broadcast(x_new, y_new) assert x_broadcast.ndim == 2 actual = da.interp(x=x_new, y=y_new, method=method).drop_vars(("x", "y")) expected = da.interp(x=x_broadcast, y=y_broadcast, method=method).drop_vars( ("x", "y") ) assert_allclose(actual, expected) @requires_scipy @requires_dask @pytest.mark.parametrize("method", ("linear", "nearest", "cubic", "pchip", "quintic")) @pytest.mark.parametrize("chunked", [True, False]) @pytest.mark.parametrize( "data_ndim,interp_ndim,nscalar", [ (data_ndim, interp_ndim, nscalar) for data_ndim in range(1, 4) for interp_ndim in range(1, data_ndim + 1) for nscalar in range(interp_ndim + 1) ], ) @pytest.mark.filterwarnings("ignore:Increasing number of chunks") def test_interpolate_chunk_1d( method: InterpOptions, data_ndim, interp_ndim, nscalar, chunked: bool ) -> None: """Interpolate nd array with multiple independent indexers It should do a series of 1d interpolation """ if method in ["cubic", "pchip", "quintic"] and interp_ndim == 3: pytest.skip("Too slow.") # 3d non chunked data x = np.linspace(0, 1, 6) y = np.linspace(2, 4, 7) z = np.linspace(-0.5, 0.5, 8) da = xr.DataArray( data=np.sin(x[:, np.newaxis, np.newaxis]) * np.cos(y[:, np.newaxis]) * np.exp(z), coords=[("x", x), ("y", y), ("z", z)], ) # choose the data dimensions for data_dims in permutations(da.dims, data_ndim): # select only data_ndim dim da = da.isel( # take the middle line {dim: len(da.coords[dim]) // 2 for dim in da.dims if dim not in data_dims} ) # chunk data da = da.chunk(chunks={dim: i + 1 for i, dim in enumerate(da.dims)}) # choose the interpolation dimensions for interp_dims in permutations(da.dims, interp_ndim): # choose the scalar interpolation dimensions for scalar_dims in combinations(interp_dims, nscalar): dest = {} for dim in interp_dims: if dim in scalar_dims: # take the middle point dest[dim] = 0.5 * (da.coords[dim][0] + da.coords[dim][-1]) else: # pick some points, including outside the domain before = 2 * da.coords[dim][0] - da.coords[dim][1] after = 2 * da.coords[dim][-1] - da.coords[dim][-2] dest[dim] = cast( xr.DataArray, np.linspace( before.item(), after.item(), len(da.coords[dim]) * 13 ), ) if chunked: dest[dim] = xr.DataArray(data=dest[dim], dims=[dim]) dest[dim] = dest[dim].chunk(2) actual = da.interp(method=method, **dest) expected = da.compute().interp(method=method, **dest) assert_identical(actual, expected) # all the combinations are usually not necessary break break break @requires_scipy @requires_dask # quintic omitted because not enough points @pytest.mark.parametrize("method", ("linear", "nearest", "slinear", "cubic", "pchip")) @pytest.mark.filterwarnings("ignore:Increasing number of chunks") def test_interpolate_chunk_advanced(method: InterpOptions) -> None: """Interpolate nd array with an nd indexer sharing coordinates.""" # Create original array x = np.linspace(-1, 1, 5) y = np.linspace(-1, 1, 7) z = np.linspace(-1, 1, 11) t = np.linspace(0, 1, 13) q = np.linspace(0, 1, 17) da = xr.DataArray( data=np.sin(x[:, np.newaxis, np.newaxis, np.newaxis, np.newaxis]) * np.cos(y[:, np.newaxis, np.newaxis, np.newaxis]) * np.exp(z[:, np.newaxis, np.newaxis]) * t[:, np.newaxis] + q, dims=("x", "y", "z", "t", "q"), coords={"x": x, "y": y, "z": z, "t": t, "q": q, "label": "dummy_attr"}, ) # Create indexer into `da` with shared coordinate ("full-twist" MΓΆbius strip) theta = np.linspace(0, 2 * np.pi, 5) w = np.linspace(-0.25, 0.25, 7) r = xr.DataArray( data=1 + w[:, np.newaxis] * np.cos(theta), coords=[("w", w), ("theta", theta)], ) xda = r * np.cos(theta) yda = r * np.sin(theta) zda = xr.DataArray( data=w[:, np.newaxis] * np.sin(theta), coords=[("w", w), ("theta", theta)], ) kwargs = {"fill_value": None} expected = da.interp(t=0.5, x=xda, y=yda, z=zda, kwargs=kwargs, method=method) da = da.chunk(2) xda = xda.chunk(1) zda = zda.chunk(3) actual = da.interp(t=0.5, x=xda, y=yda, z=zda, kwargs=kwargs, method=method) assert_identical(actual, expected) @requires_scipy def test_interp1d_bounds_error() -> None: """Ensure exception on bounds error is raised if requested""" da = xr.DataArray( np.sin(0.3 * np.arange(4)), [("time", np.arange(4))], ) with pytest.raises(ValueError): da.interp(time=3.5, kwargs=dict(bounds_error=True)) # default is to fill with nans, so this should pass da.interp(time=3.5) @requires_scipy @pytest.mark.parametrize( "x, expect_same_attrs", [ (2.5, True), (np.array([2.5, 5]), True), (("x", np.array([0, 0.5, 1, 2]), dict(unit="s")), False), ], ) def test_coord_attrs( x, expect_same_attrs: bool, ) -> None: base_attrs = dict(foo="bar") ds = xr.Dataset( data_vars=dict(a=2 * np.arange(5)), coords={"x": ("x", np.arange(5), base_attrs)}, ) has_same_attrs = ds.interp(x=x).x.attrs == base_attrs assert expect_same_attrs == has_same_attrs @requires_scipy def test_interp1d_complex_out_of_bounds() -> None: """Ensure complex nans are used by default""" da = xr.DataArray( np.exp(0.3j * np.arange(4)), [("time", np.arange(4))], ) expected = da.interp(time=3.5, kwargs=dict(fill_value=np.nan + np.nan * 1j)) actual = da.interp(time=3.5) assert_identical(actual, expected) @requires_scipy def test_interp_non_numeric_scalar() -> None: ds = xr.Dataset( { "non_numeric": ("time", np.array(["a"])), }, coords={"time": (np.array([0]))}, ) actual = ds.interp(time=np.linspace(0, 3, 3)) expected = xr.Dataset( { "non_numeric": ("time", np.array(["a", "a", "a"])), }, coords={"time": np.linspace(0, 3, 3)}, ) xr.testing.assert_identical(actual, expected) # Make sure the array is a copy: assert actual["non_numeric"].data.base is None @requires_scipy def test_interp_non_numeric_1d() -> None: ds = xr.Dataset( { "numeric": ("time", 1 + np.arange(0, 4, 1)), "non_numeric": ("time", np.array(["a", "b", "c", "d"])), }, coords={"time": (np.arange(0, 4, 1))}, ) actual = ds.interp(time=np.linspace(0, 3, 7)) expected = xr.Dataset( { "numeric": ("time", 1 + np.linspace(0, 3, 7)), "non_numeric": ("time", np.array(["a", "b", "b", "c", "c", "d", "d"])), }, coords={"time": np.linspace(0, 3, 7)}, ) xr.testing.assert_identical(actual, expected) @requires_scipy def test_interp_non_numeric_nd() -> None: # regression test for GH8099, GH9839 ds = xr.Dataset({"x": ("a", np.arange(4))}, coords={"a": (np.arange(4) - 1.5)}) t = xr.DataArray( np.random.randn(6).reshape((2, 3)) * 0.5, dims=["r", "s"], coords={"r": np.arange(2) - 0.5, "s": np.arange(3) - 1}, ) ds["m"] = ds.x > 1 actual = ds.interp(a=t, method="linear") # with numeric only expected = ds[["x"]].interp(a=t, method="linear") assert_identical(actual[["x"]], expected) @requires_dask @requires_scipy def test_interp_vectorized_dask() -> None: # Synthetic dataset chunked in the two interpolation dimensions import dask.array as da nt = 10 nlat = 20 nlon = 10 nq = 21 ds = xr.Dataset( data_vars={ "foo": ( ("lat", "lon", "dayofyear", "q"), da.random.random((nlat, nlon, nt, nq), chunks=(10, 10, 10, -1)), ), "bar": (("lat", "lon"), da.random.random((nlat, nlon), chunks=(10, 10))), }, coords={ "lat": np.linspace(-89.5, 89.6, nlat), "lon": np.linspace(-179.5, 179.6, nlon), "dayofyear": np.arange(0, nt), "q": np.linspace(0, 1, nq), }, ) # Interpolate along non-chunked dimension with raise_if_dask_computes(): actual = ds.interp(q=ds["bar"], kwargs={"fill_value": None}) expected = ds.compute().interp(q=ds["bar"], kwargs={"fill_value": None}) assert_identical(actual, expected) @requires_scipy @pytest.mark.parametrize( "chunk", [ pytest.param( True, marks=pytest.mark.skipif(not has_dask, reason="requires_dask") ), False, ], ) def test_interp_vectorized_shared_dims(chunk: bool) -> None: # GH4463 da = xr.DataArray( [[[1, 2, 3], [2, 3, 4]], [[1, 2, 3], [2, 3, 4]]], dims=("t", "x", "y"), coords={"x": [1, 2], "y": [1, 2, 3], "t": [10, 12]}, ) dy = xr.DataArray([1.5, 2.5], dims=("u",), coords={"u": [45, 55]}) dx = xr.DataArray( [[1.5, 1.5], [1.5, 1.5]], dims=("t", "u"), coords={"u": [45, 55], "t": [10, 12]} ) if chunk: da = da.chunk(t=1) with raise_if_dask_computes(): actual = da.interp(y=dy, x=dx, method="linear") expected = xr.DataArray( [[2, 3], [2, 3]], dims=("t", "u"), coords={"u": [45, 55], "t": [10, 12], "x": dx, "y": dy}, ) assert_identical(actual, expected) xarray-2025.09.0/xarray/tests/test_merge.py000066400000000000000000000561501505620616400205330ustar00rootroot00000000000000from __future__ import annotations import numpy as np import pytest import xarray as xr from xarray.core import dtypes from xarray.core.options import set_options from xarray.structure import merge from xarray.structure.merge import MergeError from xarray.testing import assert_equal, assert_identical from xarray.tests.test_dataset import create_test_data class TestMergeInternals: def test_broadcast_dimension_size(self): actual = merge.broadcast_dimension_size( [xr.Variable("x", [1]), xr.Variable("y", [2, 1])] ) assert actual == {"x": 1, "y": 2} actual = merge.broadcast_dimension_size( [xr.Variable(("x", "y"), [[1, 2]]), xr.Variable("y", [2, 1])] ) assert actual == {"x": 1, "y": 2} with pytest.raises(ValueError): merge.broadcast_dimension_size( [xr.Variable(("x", "y"), [[1, 2]]), xr.Variable("y", [2])] ) class TestMergeFunction: def test_merge_arrays(self): data = create_test_data(add_attrs=False) actual = xr.merge([data.var1, data.var2]) expected = data[["var1", "var2"]] assert_identical(actual, expected) @pytest.mark.parametrize("use_new_combine_kwarg_defaults", [True, False]) def test_merge_datasets(self, use_new_combine_kwarg_defaults): with set_options(use_new_combine_kwarg_defaults=use_new_combine_kwarg_defaults): data = create_test_data(add_attrs=False, use_extension_array=True) actual = xr.merge([data[["var1"]], data[["var2"]]]) expected = data[["var1", "var2"]] assert_identical(actual, expected) actual = xr.merge([data, data]) assert_identical(actual, data) def test_merge_dataarray_unnamed(self): data = xr.DataArray([1, 2], dims="x") with pytest.raises(ValueError, match=r"without providing an explicit name"): xr.merge([data]) def test_merge_arrays_attrs_default(self): var1_attrs = {"a": 1, "b": 2} var2_attrs = {"a": 1, "c": 3} expected_attrs = {"a": 1, "b": 2} data = create_test_data(add_attrs=False) expected = data[["var1", "var2"]].copy() expected.var1.attrs = var1_attrs expected.var2.attrs = var2_attrs expected.attrs = expected_attrs data.var1.attrs = var1_attrs data.var2.attrs = var2_attrs actual = xr.merge([data.var1, data.var2]) assert_identical(actual, expected) @pytest.mark.parametrize( "combine_attrs, var1_attrs, var2_attrs, expected_attrs, expect_exception", [ ( "no_conflicts", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}, False, ), ("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False), ("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False), ( "no_conflicts", {"a": 1, "b": 2}, {"a": 4, "c": 3}, {"a": 1, "b": 2, "c": 3}, True, ), ("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True), ( "override", {"a": 1, "b": 2}, {"a": 4, "b": 5, "c": 3}, {"a": 1, "b": 2}, False, ), ( "drop_conflicts", {"a": 1, "b": 2, "c": 3}, {"b": 1, "c": 3, "d": 4}, {"a": 1, "c": 3, "d": 4}, False, ), ( "drop_conflicts", {"a": 1, "b": np.array([2]), "c": np.array([3])}, {"b": 1, "c": np.array([3]), "d": 4}, {"a": 1, "c": np.array([3]), "d": 4}, False, ), ( lambda attrs, context: attrs[1], {"a": 1, "b": 2, "c": 3}, {"a": 4, "b": 3, "c": 1}, {"a": 4, "b": 3, "c": 1}, False, ), ], ) def test_merge_arrays_attrs( self, combine_attrs, var1_attrs, var2_attrs, expected_attrs, expect_exception ): data1 = xr.Dataset(attrs=var1_attrs) data2 = xr.Dataset(attrs=var2_attrs) if expect_exception: with pytest.raises(MergeError, match="combine_attrs"): actual = xr.merge([data1, data2], combine_attrs=combine_attrs) else: actual = xr.merge([data1, data2], combine_attrs=combine_attrs) expected = xr.Dataset(attrs=expected_attrs) assert_identical(actual, expected) @pytest.mark.parametrize( "combine_attrs, attrs1, attrs2, expected_attrs, expect_exception", [ ( "no_conflicts", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}, False, ), ("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False), ("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False), ( "no_conflicts", {"a": 1, "b": 2}, {"a": 4, "c": 3}, {"a": 1, "b": 2, "c": 3}, True, ), ("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True), ( "override", {"a": 1, "b": 2}, {"a": 4, "b": 5, "c": 3}, {"a": 1, "b": 2}, False, ), ( "drop_conflicts", {"a": 1, "b": 2, "c": 3}, {"b": 1, "c": 3, "d": 4}, {"a": 1, "c": 3, "d": 4}, False, ), ( lambda attrs, context: attrs[1], {"a": 1, "b": 2, "c": 3}, {"a": 4, "b": 3, "c": 1}, {"a": 4, "b": 3, "c": 1}, False, ), ], ) def test_merge_arrays_attrs_variables( self, combine_attrs, attrs1, attrs2, expected_attrs, expect_exception ): """check that combine_attrs is used on data variables and coords""" input_attrs1 = attrs1.copy() data1 = xr.Dataset( {"var1": ("dim1", [], attrs1)}, coords={"dim1": ("dim1", [], attrs1)} ) input_attrs2 = attrs2.copy() data2 = xr.Dataset( {"var1": ("dim1", [], attrs2)}, coords={"dim1": ("dim1", [], attrs2)} ) if expect_exception: with pytest.raises(MergeError, match="combine_attrs"): with pytest.warns( FutureWarning, match="will change from compat='no_conflicts' to compat='override'", ): actual = xr.merge([data1, data2], combine_attrs=combine_attrs) else: actual = xr.merge( [data1, data2], compat="no_conflicts", combine_attrs=combine_attrs ) expected = xr.Dataset( {"var1": ("dim1", [], expected_attrs)}, coords={"dim1": ("dim1", [], expected_attrs)}, ) assert_identical(actual, expected) # Check also that input attributes weren't modified assert data1["var1"].attrs == input_attrs1 assert data1.coords["dim1"].attrs == input_attrs1 assert data2["var1"].attrs == input_attrs2 assert data2.coords["dim1"].attrs == input_attrs2 def test_merge_attrs_override_copy(self): ds1 = xr.Dataset(attrs={"x": 0}) ds2 = xr.Dataset(attrs={"x": 1}) ds3 = xr.merge([ds1, ds2], combine_attrs="override") ds3.attrs["x"] = 2 assert ds1.x == 0 def test_merge_attrs_drop_conflicts(self): ds1 = xr.Dataset(attrs={"a": 0, "b": 0, "c": 0}) ds2 = xr.Dataset(attrs={"b": 0, "c": 1, "d": 0}) ds3 = xr.Dataset(attrs={"a": 0, "b": 1, "c": 0, "e": 0}) actual = xr.merge([ds1, ds2, ds3], combine_attrs="drop_conflicts") expected = xr.Dataset(attrs={"a": 0, "d": 0, "e": 0}) assert_identical(actual, expected) def test_merge_attrs_no_conflicts_compat_minimal(self): """make sure compat="minimal" does not silence errors""" ds1 = xr.Dataset({"a": ("x", [], {"a": 0})}) ds2 = xr.Dataset({"a": ("x", [], {"a": 1})}) with pytest.raises(xr.MergeError, match="combine_attrs"): xr.merge([ds1, ds2], combine_attrs="no_conflicts", compat="minimal") def test_merge_dicts_simple(self): actual = xr.merge([{"foo": 0}, {"bar": "one"}, {"baz": 3.5}]) expected = xr.Dataset({"foo": 0, "bar": "one", "baz": 3.5}) assert_identical(actual, expected) def test_merge_dicts_dims(self): actual = xr.merge([{"y": ("x", [13])}, {"x": [12]}]) expected = xr.Dataset({"x": [12], "y": ("x", [13])}) assert_identical(actual, expected) def test_merge_coordinates(self): coords1 = xr.Coordinates({"x": ("x", [0, 1, 2])}) coords2 = xr.Coordinates({"y": ("y", [3, 4, 5])}) expected = xr.Dataset(coords={"x": [0, 1, 2], "y": [3, 4, 5]}) actual = xr.merge([coords1, coords2]) assert_identical(actual, expected) def test_merge_error(self): ds = xr.Dataset({"x": 0}) with pytest.raises(xr.MergeError): xr.merge([ds, ds + 1]) def test_merge_alignment_error(self): ds = xr.Dataset(coords={"x": [1, 2]}) other = xr.Dataset(coords={"x": [2, 3]}) with pytest.raises(ValueError, match=r"cannot align.*join.*exact.*not equal.*"): xr.merge([ds, other], join="exact") def test_merge_wrong_input_error(self): with pytest.raises(TypeError, match=r"objects must be an iterable"): xr.merge([1]) ds = xr.Dataset(coords={"x": [1, 2]}) with pytest.raises(TypeError, match=r"objects must be an iterable"): xr.merge({"a": ds}) with pytest.raises(TypeError, match=r"objects must be an iterable"): xr.merge([ds, 1]) def test_merge_no_conflicts_single_var(self): ds1 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]}) ds2 = xr.Dataset({"a": ("x", [2, 3]), "x": [1, 2]}) expected = xr.Dataset({"a": ("x", [1, 2, 3]), "x": [0, 1, 2]}) assert expected.identical( xr.merge([ds1, ds2], compat="no_conflicts", join="outer") ) assert expected.identical( xr.merge([ds2, ds1], compat="no_conflicts", join="outer") ) assert ds1.identical(xr.merge([ds1, ds2], compat="no_conflicts", join="left")) assert ds2.identical(xr.merge([ds1, ds2], compat="no_conflicts", join="right")) expected = xr.Dataset({"a": ("x", [2]), "x": [1]}) assert expected.identical( xr.merge([ds1, ds2], compat="no_conflicts", join="inner") ) with pytest.raises(xr.MergeError): ds3 = xr.Dataset({"a": ("x", [99, 3]), "x": [1, 2]}) xr.merge([ds1, ds3], compat="no_conflicts", join="outer") with pytest.raises(xr.MergeError): ds3 = xr.Dataset({"a": ("y", [2, 3]), "y": [1, 2]}) xr.merge([ds1, ds3], compat="no_conflicts", join="outer") def test_merge_no_conflicts_multi_var(self): data = create_test_data(add_attrs=False) data1 = data.copy(deep=True) data2 = data.copy(deep=True) expected = data[["var1", "var2"]] actual = xr.merge([data1.var1, data2.var2], compat="no_conflicts") assert_identical(expected, actual) data1["var1"][:, :5] = np.nan data2["var1"][:, 5:] = np.nan data1["var2"][:4, :] = np.nan data2["var2"][4:, :] = np.nan del data2["var3"] actual = xr.merge([data1, data2], compat="no_conflicts") assert_equal(data, actual) def test_merge_no_conflicts_preserve_attrs(self): data = xr.Dataset({"x": ([], 0, {"foo": "bar"})}) actual = xr.merge([data, data], combine_attrs="no_conflicts") assert_identical(data, actual) def test_merge_no_conflicts_broadcast(self): datasets = [xr.Dataset({"x": ("y", [0])}), xr.Dataset({"x": np.nan})] actual = xr.merge(datasets, compat="no_conflicts") expected = xr.Dataset({"x": ("y", [0])}) assert_identical(expected, actual) datasets = [xr.Dataset({"x": ("y", [np.nan])}), xr.Dataset({"x": 0})] actual = xr.merge(datasets, compat="no_conflicts") assert_identical(expected, actual) class TestMergeMethod: def test_merge(self): data = create_test_data() ds1 = data[["var1"]] ds2 = data[["var3"]] expected = data[["var1", "var3"]] actual = ds1.merge(ds2) assert_identical(expected, actual) actual = ds2.merge(ds1) assert_identical(expected, actual) actual = data.merge(data) assert_identical(data, actual) actual = data.reset_coords(drop=True).merge(data) assert_identical(data, actual) actual = data.merge(data.reset_coords(drop=True)) assert_identical(data, actual) with pytest.raises(ValueError, match="conflicting values for variable"): ds1.merge(ds2.rename({"var3": "var1"})) with pytest.raises(ValueError, match=r"should be coordinates or not"): data.reset_coords().merge(data) with pytest.raises(ValueError, match=r"should be coordinates or not"): data.merge(data.reset_coords()) @pytest.mark.parametrize( "join", ["outer", "inner", "left", "right", "exact", "override"] ) def test_merge_drop_attrs(self, join): data = create_test_data() ds1 = data[["var1"]] ds2 = data[["var3"]] ds1.coords["dim2"].attrs["keep me"] = "example" ds2.coords["numbers"].attrs["foo"] = "bar" actual = ds1.merge(ds2, combine_attrs="drop", join=join) assert actual.coords["dim2"].attrs == {} assert actual.coords["numbers"].attrs == {} assert ds1.coords["dim2"].attrs["keep me"] == "example" assert ds2.coords["numbers"].attrs["foo"] == "bar" def test_merge_compat_broadcast_equals(self): ds1 = xr.Dataset({"x": 0}) ds2 = xr.Dataset({"x": ("y", [0, 0])}) actual = ds1.merge(ds2, compat="broadcast_equals") assert_identical(ds2, actual) actual = ds2.merge(ds1, compat="broadcast_equals") assert_identical(ds2, actual) actual = ds1.copy() actual.update(ds2) assert_identical(ds2, actual) ds1 = xr.Dataset({"x": np.nan}) ds2 = xr.Dataset({"x": ("y", [np.nan, np.nan])}) actual = ds1.merge(ds2, compat="broadcast_equals") assert_identical(ds2, actual) def test_merge_compat(self): ds1 = xr.Dataset({"x": 0}) ds2 = xr.Dataset({"x": 1}) for compat in ["broadcast_equals", "equals", "identical", "no_conflicts"]: with pytest.raises(xr.MergeError): ds1.merge(ds2, compat=compat) ds2 = xr.Dataset({"x": [0, 0]}) for compat in ["equals", "identical"]: with pytest.raises(ValueError, match=r"should be coordinates or not"): ds1.merge(ds2, compat=compat) ds2 = xr.Dataset({"x": ((), 0, {"foo": "bar"})}) with pytest.raises(xr.MergeError): ds1.merge(ds2, compat="identical") with pytest.raises(ValueError, match=r"compat=.* invalid"): ds1.merge(ds2, compat="foobar") assert ds1.identical(ds1.merge(ds2, compat="override")) def test_merge_compat_minimal(self) -> None: """Test that we drop the conflicting bar coordinate.""" # https://github.com/pydata/xarray/issues/7405 # https://github.com/pydata/xarray/issues/7588 ds1 = xr.Dataset(coords={"foo": [1, 2, 3], "bar": 4}) ds2 = xr.Dataset(coords={"foo": [1, 2, 3], "bar": 5}) actual = xr.merge([ds1, ds2], compat="minimal") expected = xr.Dataset(coords={"foo": [1, 2, 3]}) assert_identical(actual, expected) def test_merge_join_outer(self): ds1 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]}) ds2 = xr.Dataset({"b": ("x", [3, 4]), "x": [1, 2]}) expected = xr.Dataset( {"a": ("x", [1, 2, np.nan]), "b": ("x", [np.nan, 3, 4])}, {"x": [0, 1, 2]} ) assert expected.identical(ds1.merge(ds2, join="outer")) assert expected.identical(ds2.merge(ds1, join="outer")) expected = expected.isel(x=slice(2)) assert expected.identical(ds1.merge(ds2, join="left")) assert expected.identical(ds2.merge(ds1, join="right")) expected = expected.isel(x=slice(1, 2)) assert expected.identical(ds1.merge(ds2, join="inner")) assert expected.identical(ds2.merge(ds1, join="inner")) @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"a": 2, "b": 1}]) def test_merge_fill_value(self, fill_value): ds1 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]}) ds2 = xr.Dataset({"b": ("x", [3, 4]), "x": [1, 2]}) if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value_a = fill_value_b = np.nan elif isinstance(fill_value, dict): fill_value_a = fill_value["a"] fill_value_b = fill_value["b"] else: fill_value_a = fill_value_b = fill_value expected = xr.Dataset( {"a": ("x", [1, 2, fill_value_a]), "b": ("x", [fill_value_b, 3, 4])}, {"x": [0, 1, 2]}, ) assert expected.identical(ds1.merge(ds2, join="outer", fill_value=fill_value)) assert expected.identical(ds2.merge(ds1, join="outer", fill_value=fill_value)) assert expected.identical( xr.merge([ds1, ds2], join="outer", fill_value=fill_value) ) def test_merge_no_conflicts(self): ds1 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]}) ds2 = xr.Dataset({"a": ("x", [2, 3]), "x": [1, 2]}) expected = xr.Dataset({"a": ("x", [1, 2, 3]), "x": [0, 1, 2]}) assert expected.identical(ds1.merge(ds2, compat="no_conflicts", join="outer")) assert expected.identical(ds2.merge(ds1, compat="no_conflicts", join="outer")) assert ds1.identical(ds1.merge(ds2, compat="no_conflicts", join="left")) assert ds2.identical(ds1.merge(ds2, compat="no_conflicts", join="right")) expected2 = xr.Dataset({"a": ("x", [2]), "x": [1]}) assert expected2.identical(ds1.merge(ds2, compat="no_conflicts", join="inner")) with pytest.raises(xr.MergeError): ds3 = xr.Dataset({"a": ("x", [99, 3]), "x": [1, 2]}) ds1.merge(ds3, compat="no_conflicts", join="outer") with pytest.raises(xr.MergeError): ds3 = xr.Dataset({"a": ("y", [2, 3]), "y": [1, 2]}) ds1.merge(ds3, compat="no_conflicts", join="outer") def test_merge_dataarray(self): ds = xr.Dataset({"a": 0}) da = xr.DataArray(data=1, name="b") assert_identical(ds.merge(da), xr.merge([ds, da])) @pytest.mark.parametrize( ["combine_attrs", "attrs1", "attrs2", "expected_attrs", "expect_error"], # don't need to test thoroughly ( ("drop", {"a": 0, "b": 1, "c": 2}, {"a": 1, "b": 2, "c": 3}, {}, False), ( "drop_conflicts", {"a": 0, "b": 1, "c": 2}, {"b": 2, "c": 2, "d": 3}, {"a": 0, "c": 2, "d": 3}, False, ), ("override", {"a": 0, "b": 1}, {"a": 1, "b": 2}, {"a": 0, "b": 1}, False), ("no_conflicts", {"a": 0, "b": 1}, {"a": 0, "b": 2}, None, True), ("identical", {"a": 0, "b": 1}, {"a": 0, "b": 2}, None, True), ), ) def test_merge_combine_attrs( self, combine_attrs, attrs1, attrs2, expected_attrs, expect_error ): ds1 = xr.Dataset(attrs=attrs1) ds2 = xr.Dataset(attrs=attrs2) if expect_error: with pytest.raises(xr.MergeError): ds1.merge(ds2, combine_attrs=combine_attrs) else: actual = ds1.merge(ds2, combine_attrs=combine_attrs) expected = xr.Dataset(attrs=expected_attrs) assert_identical(actual, expected) class TestNewDefaults: def test_merge_datasets_false_warning(self): data = create_test_data(add_attrs=False, use_extension_array=True) with set_options(use_new_combine_kwarg_defaults=False): old = xr.merge([data, data]) with set_options(use_new_combine_kwarg_defaults=True): new = xr.merge([data, data]) assert_identical(old, new) def test_merge(self): data = create_test_data() ds1 = data[["var1"]] ds2 = data[["var3"]] expected = data[["var1", "var3"]] with set_options(use_new_combine_kwarg_defaults=True): actual = ds1.merge(ds2) assert_identical(expected, actual) actual = ds2.merge(ds1) assert_identical(expected, actual) actual = data.merge(data) assert_identical(data, actual) ds1.merge(ds2.rename({"var3": "var1"})) with pytest.raises(ValueError, match=r"should be coordinates or not"): data.reset_coords().merge(data) with pytest.raises(ValueError, match=r"should be coordinates or not"): data.merge(data.reset_coords()) def test_merge_broadcast_equals(self): ds1 = xr.Dataset({"x": 0}) ds2 = xr.Dataset({"x": ("y", [0, 0])}) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from compat='no_conflicts' to compat='override'", ): old = ds1.merge(ds2) with set_options(use_new_combine_kwarg_defaults=True): new = ds1.merge(ds2) assert_identical(ds2, old) with pytest.raises(AssertionError): assert_identical(old, new) def test_merge_auto_align(self): ds1 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]}) ds2 = xr.Dataset({"b": ("x", [3, 4]), "x": [1, 2]}) expected = xr.Dataset( {"a": ("x", [1, 2, np.nan]), "b": ("x", [np.nan, 3, 4])}, {"x": [0, 1, 2]} ) with set_options(use_new_combine_kwarg_defaults=False): with pytest.warns( FutureWarning, match="will change from join='outer' to join='exact'" ): assert expected.identical(ds1.merge(ds2)) with pytest.warns( FutureWarning, match="will change from join='outer' to join='exact'" ): assert expected.identical(ds2.merge(ds1)) with set_options(use_new_combine_kwarg_defaults=True): with pytest.raises(ValueError, match="might be related to new default"): expected.identical(ds2.merge(ds1)) xarray-2025.09.0/xarray/tests/test_missing.py000066400000000000000000000625461505620616400211130ustar00rootroot00000000000000from __future__ import annotations import itertools from typing import Any from unittest import mock import numpy as np import pandas as pd import pytest import xarray as xr from xarray.core import indexing from xarray.core.missing import ( NumpyInterpolator, ScipyInterpolator, SplineInterpolator, _get_nan_block_lengths, get_clean_interp_index, ) from xarray.namedarray.pycompat import array_type from xarray.tests import ( _CFTIME_CALENDARS, assert_allclose, assert_array_equal, assert_equal, raise_if_dask_computes, requires_bottleneck, requires_cftime, requires_dask, requires_numbagg, requires_numbagg_or_bottleneck, requires_scipy, ) dask_array_type = array_type("dask") @pytest.fixture def da(): return xr.DataArray([0, np.nan, 1, 2, np.nan, 3, 4, 5, np.nan, 6, 7], dims="time") @pytest.fixture def cf_da(): def _cf_da(calendar, freq="1D"): times = xr.date_range( start="1970-01-01", freq=freq, periods=10, calendar=calendar, use_cftime=True, ) values = np.arange(10) return xr.DataArray(values, dims=("time",), coords={"time": times}) return _cf_da @pytest.fixture def ds(): ds = xr.Dataset() ds["var1"] = xr.DataArray( [0, np.nan, 1, 2, np.nan, 3, 4, 5, np.nan, 6, 7], dims="time" ) ds["var2"] = xr.DataArray( [10, np.nan, 11, 12, np.nan, 13, 14, 15, np.nan, 16, 17], dims="x" ) return ds def make_interpolate_example_data(shape, frac_nan, seed=12345, non_uniform=False): rs = np.random.default_rng(seed) vals = rs.normal(size=shape) if frac_nan == 1: vals[:] = np.nan elif frac_nan == 0: pass else: n_missing = int(vals.size * frac_nan) ys = np.arange(shape[0]) xs = np.arange(shape[1]) if n_missing: np.random.shuffle(ys) ys = ys[:n_missing] np.random.shuffle(xs) xs = xs[:n_missing] vals[ys, xs] = np.nan if non_uniform: # construct a datetime index that has irregular spacing deltas = pd.to_timedelta(rs.normal(size=shape[0], scale=10), unit="D") coords = {"time": (pd.Timestamp("2000-01-01") + deltas).sort_values()} else: coords = {"time": pd.date_range("2000-01-01", freq="D", periods=shape[0])} da = xr.DataArray(vals, dims=("time", "x"), coords=coords) df = da.to_pandas() return da, df @pytest.mark.parametrize("fill_value", [None, np.nan, 47.11]) @pytest.mark.parametrize( "method", ["linear", "nearest", "zero", "slinear", "quadratic", "cubic"] ) @requires_scipy def test_interpolate_pd_compat(method, fill_value) -> None: shapes = [(8, 8), (1, 20), (20, 1), (100, 100)] frac_nans = [0, 0.5, 1] for shape, frac_nan in itertools.product(shapes, frac_nans): da, df = make_interpolate_example_data(shape, frac_nan) for dim in ["time", "x"]: actual = da.interpolate_na(method=method, dim=dim, fill_value=fill_value) # need limit_direction="both" here, to let pandas fill # in both directions instead of default forward direction only expected = df.interpolate( method=method, axis=da.get_axis_num(dim), limit_direction="both", fill_value=fill_value, ) if method == "linear": # Note, Pandas does not take left/right fill_value into account # for the numpy linear methods. # see https://github.com/pandas-dev/pandas/issues/55144 # This aligns the pandas output with the xarray output fixed = expected.values.copy() fixed[pd.isnull(actual.values)] = np.nan fixed[actual.values == fill_value] = fill_value else: fixed = expected.values np.testing.assert_allclose(actual.values, fixed) @requires_scipy @pytest.mark.parametrize("method", ["barycentric", "krogh", "pchip", "spline", "akima"]) def test_scipy_methods_function(method) -> None: # Note: Pandas does some wacky things with these methods and the full # integration tests won't work. da, _ = make_interpolate_example_data((25, 25), 0.4, non_uniform=True) if method == "spline": with pytest.warns(PendingDeprecationWarning): actual = da.interpolate_na(method=method, dim="time") else: actual = da.interpolate_na(method=method, dim="time") assert (da.count("time") <= actual.count("time")).all() @requires_scipy def test_interpolate_pd_compat_non_uniform_index(): shapes = [(8, 8), (1, 20), (20, 1), (100, 100)] frac_nans = [0, 0.5, 1] methods = ["time", "index", "values"] for shape, frac_nan, method in itertools.product(shapes, frac_nans, methods): da, df = make_interpolate_example_data(shape, frac_nan, non_uniform=True) for dim in ["time", "x"]: if method == "time" and dim != "time": continue actual = da.interpolate_na( method="linear", dim=dim, use_coordinate=True, fill_value=np.nan ) expected = df.interpolate( method=method, axis=da.get_axis_num(dim), ) # Note, Pandas does some odd things with the left/right fill_value # for the linear methods. This next line inforces the xarray # fill_value convention on the pandas output. Therefore, this test # only checks that interpolated values are the same (not nans) expected_values = expected.values.copy() expected_values[pd.isnull(actual.values)] = np.nan np.testing.assert_allclose(actual.values, expected_values) @requires_scipy def test_interpolate_pd_compat_polynomial(): shapes = [(8, 8), (1, 20), (20, 1), (100, 100)] frac_nans = [0, 0.5, 1] orders = [1, 2, 3] for shape, frac_nan, order in itertools.product(shapes, frac_nans, orders): da, df = make_interpolate_example_data(shape, frac_nan) for dim in ["time", "x"]: actual = da.interpolate_na( method="polynomial", order=order, dim=dim, use_coordinate=False ) expected = df.interpolate( method="polynomial", order=order, axis=da.get_axis_num(dim) ) np.testing.assert_allclose(actual.values, expected.values) @requires_scipy def test_interpolate_unsorted_index_raises(): vals = np.array([1, 2, 3], dtype=np.float64) expected = xr.DataArray(vals, dims="x", coords={"x": [2, 1, 3]}) with pytest.raises(ValueError, match=r"Index 'x' must be monotonically increasing"): expected.interpolate_na(dim="x", method="index") # type: ignore[arg-type] def test_interpolate_no_dim_raises(): da = xr.DataArray(np.array([1, 2, np.nan, 5], dtype=np.float64), dims="x") with pytest.raises(NotImplementedError, match=r"dim is a required argument"): da.interpolate_na(method="linear") def test_interpolate_invalid_interpolator_raises(): da = xr.DataArray(np.array([1, 2, np.nan, 5], dtype=np.float64), dims="x") with pytest.raises(ValueError, match=r"not a valid"): da.interpolate_na(dim="x", method="foo") # type: ignore[arg-type] def test_interpolate_duplicate_values_raises(): data = np.random.randn(2, 3) da = xr.DataArray(data, coords=[("x", ["a", "a"]), ("y", [0, 1, 2])]) with pytest.raises(ValueError, match=r"Index 'x' has duplicate values"): da.interpolate_na(dim="x", method="foo") # type: ignore[arg-type] def test_interpolate_multiindex_raises(): data = np.random.randn(2, 3) data[1, 1] = np.nan da = xr.DataArray(data, coords=[("x", ["a", "b"]), ("y", [0, 1, 2])]) das = da.stack(z=("x", "y")) with pytest.raises(TypeError, match=r"Index 'z' must be castable to float64"): das.interpolate_na(dim="z") def test_interpolate_2d_coord_raises(): coords = { "x": xr.Variable(("a", "b"), np.arange(6).reshape(2, 3)), "y": xr.Variable(("a", "b"), np.arange(6).reshape(2, 3)) * 2, } data = np.random.randn(2, 3) data[1, 1] = np.nan da = xr.DataArray(data, dims=("a", "b"), coords=coords) with pytest.raises(ValueError, match=r"interpolation must be 1D"): da.interpolate_na(dim="a", use_coordinate="x") @requires_scipy def test_interpolate_kwargs(): da = xr.DataArray(np.array([4, 5, np.nan], dtype=np.float64), dims="x") expected = xr.DataArray(np.array([4, 5, 6], dtype=np.float64), dims="x") actual = da.interpolate_na(dim="x", fill_value="extrapolate") assert_equal(actual, expected) expected = xr.DataArray(np.array([4, 5, -999], dtype=np.float64), dims="x") actual = da.interpolate_na(dim="x", fill_value=-999) assert_equal(actual, expected) def test_interpolate_keep_attrs(): vals = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64) mvals = vals.copy() mvals[2] = np.nan missing = xr.DataArray(mvals, dims="x") missing.attrs = {"test": "value"} actual = missing.interpolate_na(dim="x", keep_attrs=True) assert actual.attrs == {"test": "value"} def test_interpolate(): vals = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64) expected = xr.DataArray(vals, dims="x") mvals = vals.copy() mvals[2] = np.nan missing = xr.DataArray(mvals, dims="x") actual = missing.interpolate_na(dim="x") assert_equal(actual, expected) @requires_scipy @pytest.mark.parametrize( "method,vals", [ pytest.param(method, vals, id=f"{desc}:{method}") for method in [ "linear", "nearest", "zero", "slinear", "quadratic", "cubic", "polynomial", ] for (desc, vals) in [ ("no nans", np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)), ("one nan", np.array([1, np.nan, np.nan], dtype=np.float64)), ("all nans", np.full(6, np.nan, dtype=np.float64)), ] ], ) def test_interp1d_fastrack(method, vals): expected = xr.DataArray(vals, dims="x") actual = expected.interpolate_na(dim="x", method=method) assert_equal(actual, expected) @requires_bottleneck def test_interpolate_limits(): da = xr.DataArray( np.array([1, 2, np.nan, np.nan, np.nan, 6], dtype=np.float64), dims="x" ) actual = da.interpolate_na(dim="x", limit=None) assert actual.isnull().sum() == 0 actual = da.interpolate_na(dim="x", limit=2) expected = xr.DataArray( np.array([1, 2, 3, 4, np.nan, 6], dtype=np.float64), dims="x" ) assert_equal(actual, expected) @requires_scipy def test_interpolate_methods(): for method in ["linear", "nearest", "zero", "slinear", "quadratic", "cubic"]: kwargs: dict[str, Any] = {} da = xr.DataArray( np.array([0, 1, 2, np.nan, np.nan, np.nan, 6, 7, 8], dtype=np.float64), dims="x", ) actual = da.interpolate_na("x", method=method, **kwargs) # type: ignore[arg-type] assert actual.isnull().sum() == 0 actual = da.interpolate_na("x", method=method, limit=2, **kwargs) # type: ignore[arg-type] assert actual.isnull().sum() == 1 @requires_scipy def test_interpolators(): for method, interpolator in [ ("linear", NumpyInterpolator), ("linear", ScipyInterpolator), ("spline", SplineInterpolator), ]: xi = np.array([-1, 0, 1, 2, 5], dtype=np.float64) yi = np.array([-10, 0, 10, 20, 50], dtype=np.float64) x = np.array([3, 4], dtype=np.float64) f = interpolator(xi, yi, method=method) out = f(x) assert pd.isnull(out).sum() == 0 def test_interpolate_use_coordinate(): xc = xr.Variable("x", [100, 200, 300, 400, 500, 600]) da = xr.DataArray( np.array([1, 2, np.nan, np.nan, np.nan, 6], dtype=np.float64), dims="x", coords={"xc": xc}, ) # use_coordinate == False is same as using the default index actual = da.interpolate_na(dim="x", use_coordinate=False) expected = da.interpolate_na(dim="x") assert_equal(actual, expected) # possible to specify non index coordinate actual = da.interpolate_na(dim="x", use_coordinate="xc") expected = da.interpolate_na(dim="x") assert_equal(actual, expected) # possible to specify index coordinate by name actual = da.interpolate_na(dim="x", use_coordinate="x") expected = da.interpolate_na(dim="x") assert_equal(actual, expected) @requires_dask def test_interpolate_dask(): da, _ = make_interpolate_example_data((40, 40), 0.5) da = da.chunk({"x": 5}) actual = da.interpolate_na("time") expected = da.load().interpolate_na("time") assert isinstance(actual.data, dask_array_type) assert_equal(actual.compute(), expected) # with limit da = da.chunk({"x": 5}) actual = da.interpolate_na("time", limit=3) expected = da.load().interpolate_na("time", limit=3) assert isinstance(actual.data, dask_array_type) assert_equal(actual, expected) @requires_dask def test_interpolate_dask_raises_for_invalid_chunk_dim(): da, _ = make_interpolate_example_data((40, 40), 0.5) da = da.chunk({"time": 5}) # this checks for ValueError in dask.array.apply_gufunc with pytest.raises(ValueError, match=r"consists of multiple chunks"): da.interpolate_na("time") @requires_dask @requires_scipy @pytest.mark.parametrize("dtype, method", [(int, "linear"), (int, "nearest")]) def test_interpolate_dask_expected_dtype(dtype, method): da = xr.DataArray( data=np.array([0, 1], dtype=dtype), dims=["time"], coords=dict(time=np.array([0, 1])), ).chunk(dict(time=2)) da = da.interp(time=np.array([0, 0.5, 1, 2]), method=method) assert da.dtype == da.compute().dtype @requires_numbagg_or_bottleneck def test_ffill(): da = xr.DataArray(np.array([4, 5, np.nan], dtype=np.float64), dims="x") expected = xr.DataArray(np.array([4, 5, 5], dtype=np.float64), dims="x") actual = da.ffill("x") assert_equal(actual, expected) @pytest.mark.parametrize("compute_backend", [None], indirect=True) @pytest.mark.parametrize("method", ["ffill", "bfill"]) def test_b_ffill_use_bottleneck_numbagg(method, compute_backend): """ bfill & ffill fail if both bottleneck and numba are disabled """ da = xr.DataArray(np.array([4, 5, np.nan], dtype=np.float64), dims="x") with pytest.raises(RuntimeError): getattr(da, method)("x") @requires_dask @pytest.mark.parametrize("compute_backend", [None], indirect=True) @pytest.mark.parametrize("method", ["ffill", "bfill"]) def test_b_ffill_use_bottleneck_dask(method, compute_backend): """ ffill fails if both bottleneck and numba are disabled, on dask arrays """ da = xr.DataArray(np.array([4, 5, np.nan], dtype=np.float64), dims="x") with pytest.raises(RuntimeError): getattr(da, method)("x") @requires_numbagg @requires_dask @pytest.mark.parametrize("compute_backend", ["numbagg"], indirect=True) def test_ffill_use_numbagg_dask(compute_backend): da = xr.DataArray(np.array([4, 5, np.nan], dtype=np.float64), dims="x") da = da.chunk(x=-1) # Succeeds with a single chunk: _ = da.ffill("x").compute() @requires_bottleneck @requires_dask @pytest.mark.parametrize("method", ["ffill", "bfill"]) def test_ffill_bfill_dask(method): da, _ = make_interpolate_example_data((40, 40), 0.5) da = da.chunk({"x": 5}) dask_method = getattr(da, method) numpy_method = getattr(da.compute(), method) # unchunked axis with raise_if_dask_computes(): actual = dask_method("time") expected = numpy_method("time") assert_equal(actual, expected) # chunked axis with raise_if_dask_computes(): actual = dask_method("x") expected = numpy_method("x") assert_equal(actual, expected) # with limit with raise_if_dask_computes(): actual = dask_method("time", limit=3) expected = numpy_method("time", limit=3) assert_equal(actual, expected) # limit < axis size with raise_if_dask_computes(): actual = dask_method("x", limit=2) expected = numpy_method("x", limit=2) assert_equal(actual, expected) # limit > axis size with raise_if_dask_computes(): actual = dask_method("x", limit=41) expected = numpy_method("x", limit=41) assert_equal(actual, expected) @requires_bottleneck def test_ffill_bfill_nonans(): vals = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64) expected = xr.DataArray(vals, dims="x") actual = expected.ffill(dim="x") assert_equal(actual, expected) actual = expected.bfill(dim="x") assert_equal(actual, expected) @requires_bottleneck def test_ffill_bfill_allnans(): vals = np.full(6, np.nan, dtype=np.float64) expected = xr.DataArray(vals, dims="x") actual = expected.ffill(dim="x") assert_equal(actual, expected) actual = expected.bfill(dim="x") assert_equal(actual, expected) @requires_bottleneck def test_ffill_functions(da): result = da.ffill("time") assert result.isnull().sum() == 0 @requires_bottleneck def test_ffill_limit(): da = xr.DataArray( [0, np.nan, np.nan, np.nan, np.nan, 3, 4, 5, np.nan, 6, 7], dims="time" ) result = da.ffill("time") expected = xr.DataArray([0, 0, 0, 0, 0, 3, 4, 5, 5, 6, 7], dims="time") assert_array_equal(result, expected) result = da.ffill("time", limit=1) expected = xr.DataArray( [0, 0, np.nan, np.nan, np.nan, 3, 4, 5, 5, 6, 7], dims="time" ) assert_array_equal(result, expected) def test_interpolate_dataset(ds): actual = ds.interpolate_na(dim="time") # no missing values in var1 assert actual["var1"].count("time") == actual.sizes["time"] # var2 should be the same as it was assert_array_equal(actual["var2"], ds["var2"]) @requires_bottleneck def test_ffill_dataset(ds): ds.ffill(dim="time") @requires_bottleneck def test_bfill_dataset(ds): ds.ffill(dim="time") @requires_bottleneck @pytest.mark.parametrize( "y, lengths_expected", [ [np.arange(9), [[1, 0, 7, 7, 7, 7, 7, 7, 0], [3, 3, 3, 0, 3, 3, 0, 2, 2]]], [ np.arange(9) * 3, [[3, 0, 21, 21, 21, 21, 21, 21, 0], [9, 9, 9, 0, 9, 9, 0, 6, 6]], ], [ [0, 2, 5, 6, 7, 8, 10, 12, 14], [[2, 0, 12, 12, 12, 12, 12, 12, 0], [6, 6, 6, 0, 4, 4, 0, 4, 4]], ], ], ) def test_interpolate_na_nan_block_lengths(y, lengths_expected): arr = [ [np.nan, 1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 4], [np.nan, np.nan, np.nan, 1, np.nan, np.nan, 4, np.nan, np.nan], ] da = xr.DataArray(arr, dims=["x", "y"], coords={"x": [0, 1], "y": y}) index = get_clean_interp_index(da, dim="y", use_coordinate=True) actual = _get_nan_block_lengths(da, dim="y", index=index) expected = da.copy(data=lengths_expected) assert_equal(actual, expected) @requires_cftime @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS) def test_get_clean_interp_index_cf_calendar(cf_da, calendar): """The index for CFTimeIndex is in units of days. This means that if two series using a 360 and 365 days calendar each have a trend of .01C/year, the linear regression coefficients will be different because they have different number of days. Another option would be to have an index in units of years, but this would likely create other difficulties. """ i = get_clean_interp_index(cf_da(calendar), dim="time") np.testing.assert_array_equal(i, np.arange(10) * 1e9 * 86400) @requires_cftime @pytest.mark.parametrize("calendar", ["gregorian", "proleptic_gregorian"]) @pytest.mark.parametrize("freq", ["1D", "1ME", "1YE"]) def test_get_clean_interp_index_dt(cf_da, calendar, freq) -> None: """In the gregorian case, the index should be proportional to normal datetimes.""" g = cf_da(calendar, freq=freq) g["stime"] = xr.Variable( data=g.time.to_index().to_datetimeindex(time_unit="ns"), dims=("time",) ) gi = get_clean_interp_index(g, "time") si = get_clean_interp_index(g, "time", use_coordinate="stime") np.testing.assert_array_equal(gi, si) @requires_cftime def test_get_clean_interp_index_potential_overflow(): da = xr.DataArray( [0, 1, 2], dims=("time",), coords={ "time": xr.date_range( "0000-01-01", periods=3, calendar="360_day", use_cftime=True ) }, ) get_clean_interp_index(da, "time") @pytest.mark.parametrize("index", ([0, 2, 1], [0, 1, 1])) def test_get_clean_interp_index_strict(index): da = xr.DataArray([0, 1, 2], dims=("x",), coords={"x": index}) with pytest.raises(ValueError): get_clean_interp_index(da, "x") clean = get_clean_interp_index(da, "x", strict=False) np.testing.assert_array_equal(index, clean) assert clean.dtype == np.float64 @pytest.fixture def da_time(): return xr.DataArray( [np.nan, 1, 2, np.nan, np.nan, 5, np.nan, np.nan, np.nan, np.nan, 10], dims=["t"], ) def test_interpolate_na_max_gap_errors(da_time): with pytest.raises( NotImplementedError, match=r"max_gap not implemented for unlabeled coordinates" ): da_time.interpolate_na("t", max_gap=1) with pytest.raises(ValueError, match=r"max_gap must be a scalar."): da_time.interpolate_na("t", max_gap=(1,)) da_time["t"] = pd.date_range("2001-01-01", freq="h", periods=11) with pytest.raises(TypeError, match=r"Expected value of type str"): da_time.interpolate_na("t", max_gap=1) with pytest.raises(TypeError, match=r"Expected integer or floating point"): da_time.interpolate_na("t", max_gap="1h", use_coordinate=False) with pytest.raises(ValueError, match=r"Could not convert 'huh' to timedelta64"): da_time.interpolate_na("t", max_gap="huh") @requires_bottleneck @pytest.mark.parametrize( "use_cftime", [False, pytest.param(True, marks=requires_cftime)], ) @pytest.mark.parametrize("transform", [lambda x: x, lambda x: x.to_dataset(name="a")]) @pytest.mark.parametrize( "max_gap", ["3h", np.timedelta64(3, "h"), pd.to_timedelta("3h")] ) def test_interpolate_na_max_gap_time_specifier(da_time, max_gap, transform, use_cftime): da_time["t"] = xr.date_range( "2001-01-01", freq="h", periods=11, use_cftime=use_cftime ) expected = transform( da_time.copy(data=[np.nan, 1, 2, 3, 4, 5, np.nan, np.nan, np.nan, np.nan, 10]) ) actual = transform(da_time).interpolate_na("t", max_gap=max_gap) assert_allclose(actual, expected) @requires_bottleneck @pytest.mark.parametrize( "coords", [ pytest.param(None, marks=pytest.mark.xfail()), {"x": np.arange(4), "y": np.arange(12)}, ], ) def test_interpolate_na_2d(coords): n = np.nan da = xr.DataArray( [ [1, 2, 3, 4, n, 6, n, n, n, 10, 11, n], [n, n, 3, n, n, 6, n, n, n, 10, n, n], [n, n, 3, n, n, 6, n, n, n, 10, n, n], [n, 2, 3, 4, n, 6, n, n, n, 10, 11, n], ], dims=["x", "y"], coords=coords, ) actual = da.interpolate_na("y", max_gap=2) expected_y = da.copy( data=[ [1, 2, 3, 4, 5, 6, n, n, n, 10, 11, n], [n, n, 3, n, n, 6, n, n, n, 10, n, n], [n, n, 3, n, n, 6, n, n, n, 10, n, n], [n, 2, 3, 4, 5, 6, n, n, n, 10, 11, n], ] ) assert_equal(actual, expected_y) actual = da.interpolate_na("y", max_gap=1, fill_value="extrapolate") expected_y_extra = da.copy( data=[ [1, 2, 3, 4, n, 6, n, n, n, 10, 11, 12], [n, n, 3, n, n, 6, n, n, n, 10, n, n], [n, n, 3, n, n, 6, n, n, n, 10, n, n], [1, 2, 3, 4, n, 6, n, n, n, 10, 11, 12], ] ) assert_equal(actual, expected_y_extra) actual = da.interpolate_na("x", max_gap=3) expected_x = xr.DataArray( [ [1, 2, 3, 4, n, 6, n, n, n, 10, 11, n], [n, 2, 3, 4, n, 6, n, n, n, 10, 11, n], [n, 2, 3, 4, n, 6, n, n, n, 10, 11, n], [n, 2, 3, 4, n, 6, n, n, n, 10, 11, n], ], dims=["x", "y"], coords=coords, ) assert_equal(actual, expected_x) @requires_scipy def test_interpolators_complex_out_of_bounds(): """Ensure complex nans are used for complex data""" xi = np.array([-1, 0, 1, 2, 5], dtype=np.float64) yi = np.exp(1j * xi) x = np.array([-2, 1, 6], dtype=np.float64) expected = np.array( [np.nan + np.nan * 1j, np.exp(1j), np.nan + np.nan * 1j], dtype=yi.dtype ) for method, interpolator in [ ("linear", NumpyInterpolator), ("linear", ScipyInterpolator), ]: f = interpolator(xi, yi, method=method) actual = f(x) assert_array_equal(actual, expected) @requires_scipy def test_indexing_localize(): # regression test for GH10287 ds = xr.Dataset( { "sigma_a": xr.DataArray( data=np.ones((16, 8, 36811)), dims=["p", "t", "w"], coords={"w": np.linspace(0, 30000, 36811)}, ) } ) original_func = indexing.NumpyIndexingAdapter.__getitem__ def wrapper(self, indexer): return original_func(self, indexer) with mock.patch.object( indexing.NumpyIndexingAdapter, "__getitem__", side_effect=wrapper, autospec=True ) as mock_func: ds["sigma_a"].interp(w=15000.5) actual_indexer = mock_func.mock_calls[0].args[1]._key assert actual_indexer == (slice(None), slice(None), slice(18404, 18408)) xarray-2025.09.0/xarray/tests/test_namedarray.py000066400000000000000000000532461505620616400215620ustar00rootroot00000000000000from __future__ import annotations import copy import sys from abc import abstractmethod from collections.abc import Mapping from typing import TYPE_CHECKING, Any, Generic, cast, overload import numpy as np import pytest from packaging.version import Version from xarray.core.indexing import ExplicitlyIndexed from xarray.namedarray._typing import ( _arrayfunction_or_api, _default, _DType_co, _ShapeType_co, ) from xarray.namedarray.core import NamedArray, from_array if TYPE_CHECKING: from types import ModuleType from numpy.typing import ArrayLike, DTypeLike, NDArray from xarray.namedarray._typing import ( Default, _AttrsLike, _Dim, _DimsLike, _DType, _IndexKeyLike, _IntOrUnknown, _Shape, _ShapeLike, duckarray, ) class CustomArrayBase(Generic[_ShapeType_co, _DType_co]): def __init__(self, array: duckarray[Any, _DType_co]) -> None: self.array: duckarray[Any, _DType_co] = array @property def dtype(self) -> _DType_co: return self.array.dtype @property def shape(self) -> _Shape: return self.array.shape class CustomArray( CustomArrayBase[_ShapeType_co, _DType_co], Generic[_ShapeType_co, _DType_co] ): def __array__( self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None ) -> np.ndarray[Any, np.dtype[np.generic]]: if Version(np.__version__) >= Version("2.0.0"): return np.asarray(self.array, dtype=dtype, copy=copy) else: return np.asarray(self.array, dtype=dtype) class CustomArrayIndexable( CustomArrayBase[_ShapeType_co, _DType_co], ExplicitlyIndexed, Generic[_ShapeType_co, _DType_co], ): def __getitem__( self, key: _IndexKeyLike | CustomArrayIndexable[Any, Any], / ) -> CustomArrayIndexable[Any, _DType_co]: if isinstance(key, CustomArrayIndexable): if isinstance(key.array, type(self.array)): # TODO: key.array is duckarray here, can it be narrowed down further? # an _arrayapi cannot be used on a _arrayfunction for example. return type(self)(array=self.array[key.array]) # type: ignore[index] else: raise TypeError("key must have the same array type as self") else: return type(self)(array=self.array[key]) def __array_namespace__(self) -> ModuleType: return np def check_duck_array_typevar(a: duckarray[Any, _DType]) -> duckarray[Any, _DType]: # Mypy checks a is valid: b: duckarray[Any, _DType] = a # Runtime check if valid: if isinstance(b, _arrayfunction_or_api): return b else: missing_attrs = "" actual_attrs = set(dir(b)) for t in _arrayfunction_or_api: if sys.version_info >= (3, 13): # https://github.com/python/cpython/issues/104873 from typing import get_protocol_members expected_attrs = get_protocol_members(t) elif sys.version_info >= (3, 12): expected_attrs = t.__protocol_attrs__ else: from typing import _get_protocol_attrs # type: ignore[attr-defined] expected_attrs = _get_protocol_attrs(t) missing_attrs_ = expected_attrs - actual_attrs if missing_attrs_: missing_attrs += f"{t.__name__} - {missing_attrs_}\n" raise TypeError( f"a ({type(a)}) is not a valid _arrayfunction or _arrayapi. " "Missing following attrs:\n" f"{missing_attrs}" ) class NamedArraySubclassobjects: @pytest.fixture def target(self, data: np.ndarray[Any, Any]) -> Any: """Fixture that needs to be overridden""" raise NotImplementedError @abstractmethod def cls(self, *args: Any, **kwargs: Any) -> Any: """Method that needs to be overridden""" raise NotImplementedError @pytest.fixture def data(self) -> np.ndarray[Any, np.dtype[Any]]: return 0.5 * np.arange(10).reshape(2, 5) @pytest.fixture def random_inputs(self) -> np.ndarray[Any, np.dtype[np.float32]]: return np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) def test_properties(self, target: Any, data: Any) -> None: assert target.dims == ("x", "y") assert np.array_equal(target.data, data) assert target.dtype == float assert target.shape == (2, 5) assert target.ndim == 2 assert target.sizes == {"x": 2, "y": 5} assert target.size == 10 assert target.nbytes == 80 assert len(target) == 2 def test_attrs(self, target: Any) -> None: assert target.attrs == {} attrs = {"foo": "bar"} target.attrs = attrs assert target.attrs == attrs assert isinstance(target.attrs, dict) target.attrs["foo"] = "baz" assert target.attrs["foo"] == "baz" @pytest.mark.parametrize( "expected", [np.array([1, 2], dtype=np.dtype(np.int8)), [1, 2]] ) def test_init(self, expected: Any) -> None: actual = self.cls(("x",), expected) assert np.array_equal(np.asarray(actual.data), expected) actual = self.cls(("x",), expected) assert np.array_equal(np.asarray(actual.data), expected) def test_data(self, random_inputs: Any) -> None: expected = self.cls(["x", "y", "z"], random_inputs) assert np.array_equal(np.asarray(expected.data), random_inputs) with pytest.raises(ValueError): expected.data = np.random.random((3, 4)).astype(np.float64) d2 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) expected.data = d2 assert np.array_equal(np.asarray(expected.data), d2) class TestNamedArray(NamedArraySubclassobjects): def cls(self, *args: Any, **kwargs: Any) -> NamedArray[Any, Any]: return NamedArray(*args, **kwargs) @pytest.fixture def target(self, data: np.ndarray[Any, Any]) -> NamedArray[Any, Any]: return NamedArray(["x", "y"], data) @pytest.mark.parametrize( "expected", [ np.array([1, 2], dtype=np.dtype(np.int8)), pytest.param( [1, 2], marks=pytest.mark.xfail( reason="NamedArray only supports array-like objects" ), ), ], ) def test_init(self, expected: Any) -> None: super().test_init(expected) @pytest.mark.parametrize( "dims, data, expected, raise_error", [ (("x",), [1, 2, 3], np.array([1, 2, 3]), False), ((1,), np.array([4, 5, 6]), np.array([4, 5, 6]), False), ((), 2, np.array(2), False), # Fail: ( ("x",), NamedArray("time", np.array([1, 2, 3], dtype=np.dtype(np.int64))), np.array([1, 2, 3]), True, ), ], ) def test_from_array( self, dims: _DimsLike, data: ArrayLike, expected: np.ndarray[Any, Any], raise_error: bool, ) -> None: actual: NamedArray[Any, Any] if raise_error: with pytest.raises(TypeError, match="already a Named array"): actual = from_array(dims, data) # Named arrays are not allowed: from_array(actual) # type: ignore[call-overload] else: actual = from_array(dims, data) assert np.array_equal(np.asarray(actual.data), expected) def test_from_array_with_masked_array(self) -> None: masked_array: np.ndarray[Any, np.dtype[np.generic]] masked_array = np.ma.array([1, 2, 3], mask=[False, True, False]) # type: ignore[no-untyped-call] with pytest.raises(NotImplementedError): from_array(("x",), masked_array) def test_from_array_with_0d_object(self) -> None: data = np.empty((), dtype=object) data[()] = (10, 12, 12) narr = from_array((), data) np.array_equal(np.asarray(narr.data), data) # TODO: Make xr.core.indexing.ExplicitlyIndexed pass as a subclass of_arrayfunction_or_api # and remove this test. def test_from_array_with_explicitly_indexed( self, random_inputs: np.ndarray[Any, Any] ) -> None: array: CustomArray[Any, Any] array = CustomArray(random_inputs) output: NamedArray[Any, Any] output = from_array(("x", "y", "z"), array) assert isinstance(output.data, np.ndarray) array2: CustomArrayIndexable[Any, Any] array2 = CustomArrayIndexable(random_inputs) output2: NamedArray[Any, Any] output2 = from_array(("x", "y", "z"), array2) assert isinstance(output2.data, CustomArrayIndexable) def test_real_and_imag(self) -> None: expected_real: np.ndarray[Any, np.dtype[np.float64]] expected_real = np.arange(3, dtype=np.float64) expected_imag: np.ndarray[Any, np.dtype[np.float64]] expected_imag = -np.arange(3, dtype=np.float64) arr: np.ndarray[Any, np.dtype[np.complex128]] arr = expected_real + 1j * expected_imag named_array: NamedArray[Any, np.dtype[np.complex128]] named_array = NamedArray(["x"], arr) actual_real: duckarray[Any, np.dtype[np.float64]] = named_array.real.data assert np.array_equal(np.asarray(actual_real), expected_real) assert actual_real.dtype == expected_real.dtype actual_imag: duckarray[Any, np.dtype[np.float64]] = named_array.imag.data assert np.array_equal(np.asarray(actual_imag), expected_imag) assert actual_imag.dtype == expected_imag.dtype # Additional tests as per your original class-based code @pytest.mark.parametrize( "data, dtype", [ ("foo", np.dtype("U3")), (b"foo", np.dtype("S3")), ], ) def test_from_array_0d_string(self, data: Any, dtype: DTypeLike) -> None: named_array: NamedArray[Any, Any] named_array = from_array([], data) assert named_array.data == data assert named_array.dims == () assert named_array.sizes == {} assert named_array.attrs == {} assert named_array.ndim == 0 assert named_array.size == 1 assert named_array.dtype == dtype def test_from_array_0d_object(self) -> None: named_array: NamedArray[Any, Any] named_array = from_array([], (10, 12, 12)) expected_data = np.empty((), dtype=object) expected_data[()] = (10, 12, 12) assert np.array_equal(np.asarray(named_array.data), expected_data) assert named_array.dims == () assert named_array.sizes == {} assert named_array.attrs == {} assert named_array.ndim == 0 assert named_array.size == 1 assert named_array.dtype == np.dtype("O") def test_from_array_0d_datetime(self) -> None: named_array: NamedArray[Any, Any] named_array = from_array([], np.datetime64("2000-01-01")) assert named_array.dtype == np.dtype("datetime64[D]") @pytest.mark.parametrize( "timedelta, expected_dtype", [ (np.timedelta64(1, "D"), np.dtype("timedelta64[D]")), (np.timedelta64(1, "s"), np.dtype("timedelta64[s]")), (np.timedelta64(1, "m"), np.dtype("timedelta64[m]")), (np.timedelta64(1, "h"), np.dtype("timedelta64[h]")), (np.timedelta64(1, "us"), np.dtype("timedelta64[us]")), (np.timedelta64(1, "ns"), np.dtype("timedelta64[ns]")), (np.timedelta64(1, "ps"), np.dtype("timedelta64[ps]")), (np.timedelta64(1, "fs"), np.dtype("timedelta64[fs]")), (np.timedelta64(1, "as"), np.dtype("timedelta64[as]")), ], ) def test_from_array_0d_timedelta( self, timedelta: np.timedelta64, expected_dtype: np.dtype[np.timedelta64] ) -> None: named_array: NamedArray[Any, Any] named_array = from_array([], timedelta) assert named_array.dtype == expected_dtype assert named_array.data == timedelta @pytest.mark.parametrize( "dims, data_shape, new_dims, raises", [ (["x", "y", "z"], (2, 3, 4), ["a", "b", "c"], False), (["x", "y", "z"], (2, 3, 4), ["a", "b"], True), (["x", "y", "z"], (2, 4, 5), ["a", "b", "c", "d"], True), ([], [], (), False), ([], [], ("x",), True), ], ) def test_dims_setter( self, dims: Any, data_shape: Any, new_dims: Any, raises: bool ) -> None: named_array: NamedArray[Any, Any] named_array = NamedArray(dims, np.asarray(np.random.random(data_shape))) assert named_array.dims == tuple(dims) if raises: with pytest.raises(ValueError): named_array.dims = new_dims else: named_array.dims = new_dims assert named_array.dims == tuple(new_dims) def test_duck_array_class(self) -> None: numpy_a: NDArray[np.int64] numpy_a = np.array([2.1, 4], dtype=np.dtype(np.int64)) check_duck_array_typevar(numpy_a) masked_a: np.ma.MaskedArray[Any, np.dtype[np.int64]] masked_a = np.ma.asarray([2.1, 4], dtype=np.dtype(np.int64)) # type: ignore[no-untyped-call] check_duck_array_typevar(masked_a) custom_a: CustomArrayIndexable[Any, np.dtype[np.int64]] custom_a = CustomArrayIndexable(numpy_a) check_duck_array_typevar(custom_a) def test_duck_array_class_array_api(self) -> None: # Test numpy's array api: nxp = pytest.importorskip("array_api_strict", minversion="1.0") # TODO: nxp doesn't use dtype typevars, so can only use Any for the moment: arrayapi_a: duckarray[Any, Any] # duckarray[Any, np.dtype[np.int64]] arrayapi_a = nxp.asarray([2.1, 4], dtype=nxp.int64) check_duck_array_typevar(arrayapi_a) def test_new_namedarray(self) -> None: dtype_float = np.dtype(np.float32) narr_float: NamedArray[Any, np.dtype[np.float32]] narr_float = NamedArray(("x",), np.array([1.5, 3.2], dtype=dtype_float)) assert narr_float.dtype == dtype_float dtype_int = np.dtype(np.int8) narr_int: NamedArray[Any, np.dtype[np.int8]] narr_int = narr_float._new(("x",), np.array([1, 3], dtype=dtype_int)) assert narr_int.dtype == dtype_int class Variable( NamedArray[_ShapeType_co, _DType_co], Generic[_ShapeType_co, _DType_co] ): @overload def _new( self, dims: _DimsLike | Default = ..., data: duckarray[Any, _DType] = ..., attrs: _AttrsLike | Default = ..., ) -> Variable[Any, _DType]: ... @overload def _new( self, dims: _DimsLike | Default = ..., data: Default = ..., attrs: _AttrsLike | Default = ..., ) -> Variable[_ShapeType_co, _DType_co]: ... def _new( self, dims: _DimsLike | Default = _default, data: duckarray[Any, _DType] | Default = _default, attrs: _AttrsLike | Default = _default, ) -> Variable[Any, _DType] | Variable[_ShapeType_co, _DType_co]: dims_ = copy.copy(self._dims) if dims is _default else dims attrs_: Mapping[Any, Any] | None if attrs is _default: attrs_ = None if self._attrs is None else self._attrs.copy() else: attrs_ = attrs if data is _default: return type(self)(dims_, copy.copy(self._data), attrs_) cls_ = cast("type[Variable[Any, _DType]]", type(self)) return cls_(dims_, data, attrs_) var_float: Variable[Any, np.dtype[np.float32]] var_float = Variable(("x",), np.array([1.5, 3.2], dtype=dtype_float)) assert var_float.dtype == dtype_float var_int: Variable[Any, np.dtype[np.int8]] var_int = var_float._new(("x",), np.array([1, 3], dtype=dtype_int)) assert var_int.dtype == dtype_int def test_replace_namedarray(self) -> None: dtype_float = np.dtype(np.float32) np_val: np.ndarray[Any, np.dtype[np.float32]] np_val = np.array([1.5, 3.2], dtype=dtype_float) np_val2: np.ndarray[Any, np.dtype[np.float32]] np_val2 = 2 * np_val narr_float: NamedArray[Any, np.dtype[np.float32]] narr_float = NamedArray(("x",), np_val) assert narr_float.dtype == dtype_float narr_float2: NamedArray[Any, np.dtype[np.float32]] narr_float2 = NamedArray(("x",), np_val2) assert narr_float2.dtype == dtype_float class Variable( NamedArray[_ShapeType_co, _DType_co], Generic[_ShapeType_co, _DType_co] ): @overload def _new( self, dims: _DimsLike | Default = ..., data: duckarray[Any, _DType] = ..., attrs: _AttrsLike | Default = ..., ) -> Variable[Any, _DType]: ... @overload def _new( self, dims: _DimsLike | Default = ..., data: Default = ..., attrs: _AttrsLike | Default = ..., ) -> Variable[_ShapeType_co, _DType_co]: ... def _new( self, dims: _DimsLike | Default = _default, data: duckarray[Any, _DType] | Default = _default, attrs: _AttrsLike | Default = _default, ) -> Variable[Any, _DType] | Variable[_ShapeType_co, _DType_co]: dims_ = copy.copy(self._dims) if dims is _default else dims attrs_: Mapping[Any, Any] | None if attrs is _default: attrs_ = None if self._attrs is None else self._attrs.copy() else: attrs_ = attrs if data is _default: return type(self)(dims_, copy.copy(self._data), attrs_) cls_ = cast("type[Variable[Any, _DType]]", type(self)) return cls_(dims_, data, attrs_) var_float: Variable[Any, np.dtype[np.float32]] var_float = Variable(("x",), np_val) assert var_float.dtype == dtype_float var_float2: Variable[Any, np.dtype[np.float32]] var_float2 = var_float._replace(("x",), np_val2) assert var_float2.dtype == dtype_float @pytest.mark.parametrize( "dim,expected_ndim,expected_shape,expected_dims", [ (None, 3, (1, 2, 5), (None, "x", "y")), (_default, 3, (1, 2, 5), ("dim_2", "x", "y")), ("z", 3, (1, 2, 5), ("z", "x", "y")), ], ) def test_expand_dims( self, target: NamedArray[Any, np.dtype[np.float32]], dim: _Dim | Default, expected_ndim: int, expected_shape: _ShapeLike, expected_dims: _DimsLike, ) -> None: result = target.expand_dims(dim=dim) assert result.ndim == expected_ndim assert result.shape == expected_shape assert result.dims == expected_dims @pytest.mark.parametrize( "dims, expected_sizes", [ ((), {"y": 5, "x": 2}), (["y", "x"], {"y": 5, "x": 2}), (["y", ...], {"y": 5, "x": 2}), ], ) def test_permute_dims( self, target: NamedArray[Any, np.dtype[np.float32]], dims: _DimsLike, expected_sizes: dict[_Dim, _IntOrUnknown], ) -> None: actual = target.permute_dims(*dims) assert actual.sizes == expected_sizes def test_permute_dims_errors( self, target: NamedArray[Any, np.dtype[np.float32]], ) -> None: with pytest.raises(ValueError, match=r"'y'.*permuted list"): dims = ["y"] target.permute_dims(*dims) @pytest.mark.parametrize( "broadcast_dims,expected_ndim", [ ({"x": 2, "y": 5}, 2), ({"x": 2, "y": 5, "z": 2}, 3), ({"w": 1, "x": 2, "y": 5}, 3), ], ) def test_broadcast_to( self, target: NamedArray[Any, np.dtype[np.float32]], broadcast_dims: Mapping[_Dim, int], expected_ndim: int, ) -> None: expand_dims = set(broadcast_dims.keys()) - set(target.dims) # loop over expand_dims and call .expand_dims(dim=dim) in a loop for dim in expand_dims: target = target.expand_dims(dim=dim) result = target.broadcast_to(broadcast_dims) assert result.ndim == expected_ndim assert result.sizes == broadcast_dims def test_broadcast_to_errors( self, target: NamedArray[Any, np.dtype[np.float32]] ) -> None: with pytest.raises( ValueError, match=r"operands could not be broadcast together with remapped shapes", ): target.broadcast_to({"x": 2, "y": 2}) with pytest.raises(ValueError, match=r"Cannot add new dimensions"): target.broadcast_to({"x": 2, "y": 2, "z": 2}) def test_warn_on_repeated_dimension_names(self) -> None: with pytest.warns(UserWarning, match="Duplicate dimension names"): NamedArray(("x", "x"), np.arange(4).reshape(2, 2)) def test_aggregation(self) -> None: x: NamedArray[Any, np.dtype[np.int64]] x = NamedArray(("x", "y"), np.arange(4).reshape(2, 2)) result = x.sum() assert isinstance(result.data, np.ndarray) def test_repr() -> None: x: NamedArray[Any, np.dtype[np.uint64]] x = NamedArray(("x",), np.array([0], dtype=np.uint64)) # Reprs should not crash: r = x.__repr__() x._repr_html_() # Basic comparison: assert r == " Size: 8B\narray([0], dtype=uint64)" xarray-2025.09.0/xarray/tests/test_nd_point_index.py000066400000000000000000000141431505620616400224310ustar00rootroot00000000000000import numpy as np import pytest import xarray as xr from xarray.indexes import NDPointIndex from xarray.tests import assert_identical pytest.importorskip("scipy") def test_tree_index_init() -> None: from xarray.indexes.nd_point_index import ScipyKDTreeAdapter xx, yy = np.meshgrid([1.0, 2.0], [3.0, 4.0]) ds = xr.Dataset(coords={"xx": (("y", "x"), xx), "yy": (("y", "x"), yy)}) ds_indexed1 = ds.set_xindex(("xx", "yy"), NDPointIndex) assert "xx" in ds_indexed1.xindexes assert "yy" in ds_indexed1.xindexes assert isinstance(ds_indexed1.xindexes["xx"], NDPointIndex) assert ds_indexed1.xindexes["xx"] is ds_indexed1.xindexes["yy"] ds_indexed2 = ds.set_xindex( ("xx", "yy"), NDPointIndex, tree_adapter_cls=ScipyKDTreeAdapter ) assert ds_indexed1.xindexes["xx"].equals(ds_indexed2.xindexes["yy"]) def test_tree_index_init_errors() -> None: xx, yy = np.meshgrid([1.0, 2.0], [3.0, 4.0]) ds = xr.Dataset(coords={"xx": (("y", "x"), xx), "yy": (("y", "x"), yy)}) with pytest.raises(ValueError, match="number of variables"): ds.set_xindex("xx", NDPointIndex) ds2 = ds.assign_coords(yy=(("u", "v"), [[3.0, 3.0], [4.0, 4.0]])) with pytest.raises(ValueError, match="same dimensions"): ds2.set_xindex(("xx", "yy"), NDPointIndex) def test_tree_index_sel() -> None: xx, yy = np.meshgrid([1.0, 2.0], [3.0, 4.0]) ds = xr.Dataset(coords={"xx": (("y", "x"), xx), "yy": (("y", "x"), yy)}).set_xindex( ("xx", "yy"), NDPointIndex ) # 1-dimensional labels actual = ds.sel( xx=xr.Variable("u", [1.1, 1.1, 1.1]), yy=xr.Variable("u", [3.1, 3.1, 3.1]), method="nearest", ) expected = xr.Dataset( coords={"xx": ("u", [1.0, 1.0, 1.0]), "yy": ("u", [3.0, 3.0, 3.0])} ) assert_identical(actual, expected) # 2-dimensional labels actual = ds.sel( xx=xr.Variable(("u", "v"), [[1.1, 1.1, 1.1], [1.9, 1.9, 1.9]]), yy=xr.Variable(("u", "v"), [[3.1, 3.1, 3.1], [3.9, 3.9, 3.9]]), method="nearest", ) expected = xr.Dataset( coords={ "xx": (("u", "v"), [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), "yy": (("u", "v"), [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]), }, ) assert_identical(actual, expected) # all scalar labels actual = ds.sel(xx=1.1, yy=3.1, method="nearest") expected = xr.Dataset(coords={"xx": 1.0, "yy": 3.0}) assert_identical(actual, expected) # broadcast scalar to label shape and dimensions actual = ds.sel(xx=1.1, yy=xr.Variable("u", [3.1, 3.1, 3.1]), method="nearest") expected = ds.sel( xx=xr.Variable("u", [1.1, 1.1, 1.1]), yy=xr.Variable("u", [3.1, 3.1, 3.1]), method="nearest", ) assert_identical(actual, expected) # broadcast orthogonal 1-dimensional labels actual = ds.sel( xx=xr.Variable("u", [1.1, 1.1]), yy=xr.Variable("v", [3.1, 3.1]), method="nearest", ) expected = xr.Dataset( coords={ "xx": (("u", "v"), [[1.0, 1.0], [1.0, 1.0]]), "yy": (("u", "v"), [[3.0, 3.0], [3.0, 3.0]]), }, ) assert_identical(actual, expected) # implicit dimension array-like labels actual = ds.sel( xx=[[1.1, 1.1, 1.1], [1.9, 1.9, 1.9]], yy=[[3.1, 3.1, 3.1], [3.9, 3.9, 3.9]], method="nearest", ) expected = ds.sel( xx=xr.Variable(ds.xx.dims, [[1.1, 1.1, 1.1], [1.9, 1.9, 1.9]]), yy=xr.Variable(ds.yy.dims, [[3.1, 3.1, 3.1], [3.9, 3.9, 3.9]]), method="nearest", ) assert_identical(actual, expected) def test_tree_index_sel_errors() -> None: xx, yy = np.meshgrid([1.0, 2.0], [3.0, 4.0]) ds = xr.Dataset(coords={"xx": (("y", "x"), xx), "yy": (("y", "x"), yy)}).set_xindex( ("xx", "yy"), NDPointIndex ) with pytest.raises(ValueError, match="method='nearest'"): ds.sel(xx=1.1, yy=3.1) with pytest.raises(ValueError, match="missing labels"): ds.sel(xx=1.1, method="nearest") with pytest.raises(ValueError, match="invalid label value"): # invalid array-like dimensions ds.sel(xx=[1.1, 1.9], yy=[3.1, 3.9], method="nearest") # error while trying to broadcast labels with pytest.raises(xr.AlignmentError, match=".*conflicting dimension sizes"): ds.sel( xx=xr.Variable("u", [1.1, 1.1, 1.1]), yy=xr.Variable("u", [3.1, 3.1]), method="nearest", ) def test_tree_index_equals() -> None: xx1, yy1 = np.meshgrid([1.0, 2.0], [3.0, 4.0]) ds1 = xr.Dataset( coords={"xx": (("y", "x"), xx1), "yy": (("y", "x"), yy1)} ).set_xindex(("xx", "yy"), NDPointIndex) xx2, yy2 = np.meshgrid([1.0, 2.0], [3.0, 4.0]) ds2 = xr.Dataset( coords={"xx": (("y", "x"), xx2), "yy": (("y", "x"), yy2)} ).set_xindex(("xx", "yy"), NDPointIndex) xx3, yy3 = np.meshgrid([10.0, 20.0], [30.0, 40.0]) ds3 = xr.Dataset( coords={"xx": (("y", "x"), xx3), "yy": (("y", "x"), yy3)} ).set_xindex(("xx", "yy"), NDPointIndex) assert ds1.xindexes["xx"].equals(ds2.xindexes["xx"]) assert not ds1.xindexes["xx"].equals(ds3.xindexes["xx"]) def test_tree_index_rename() -> None: xx, yy = np.meshgrid([1.0, 2.0], [3.0, 4.0]) ds = xr.Dataset(coords={"xx": (("y", "x"), xx), "yy": (("y", "x"), yy)}).set_xindex( ("xx", "yy"), NDPointIndex ) ds_renamed = ds.rename_dims(y="u").rename_vars(yy="uu") assert "uu" in ds_renamed.xindexes assert isinstance(ds_renamed.xindexes["uu"], NDPointIndex) assert ds_renamed.xindexes["xx"] is ds_renamed.xindexes["uu"] # test via sel() with implicit dimension array-like labels, which relies on # NDPointIndex._coord_names and NDPointIndex._dims internal attrs actual = ds_renamed.sel( xx=[[1.1, 1.1, 1.1], [1.9, 1.9, 1.9]], uu=[[3.1, 3.1, 3.1], [3.9, 3.9, 3.9]], method="nearest", ) expected = ds_renamed.sel( xx=xr.Variable(ds_renamed.xx.dims, [[1.1, 1.1, 1.1], [1.9, 1.9, 1.9]]), uu=xr.Variable(ds_renamed.uu.dims, [[3.1, 3.1, 3.1], [3.9, 3.9, 3.9]]), method="nearest", ) assert_identical(actual, expected) xarray-2025.09.0/xarray/tests/test_nputils.py000066400000000000000000000017351505620616400211310ustar00rootroot00000000000000from __future__ import annotations import numpy as np from numpy.testing import assert_array_equal from xarray.core.nputils import NumpyVIndexAdapter, _is_contiguous def test_is_contiguous() -> None: assert _is_contiguous([1]) assert _is_contiguous([1, 2, 3]) assert not _is_contiguous([1, 3]) def test_vindex() -> None: x = np.arange(3 * 4 * 5).reshape((3, 4, 5)) vindex = NumpyVIndexAdapter(x) # getitem assert_array_equal(vindex[0], x[0]) assert_array_equal(vindex[[1, 2], [1, 2]], x[([1, 2], [1, 2])]) assert vindex[[0, 1], [0, 1], :].shape == (2, 5) assert vindex[[0, 1], :, [0, 1]].shape == (2, 4) assert vindex[:, [0, 1], [0, 1]].shape == (2, 3) # setitem vindex[:] = 0 assert_array_equal(x, np.zeros_like(x)) # assignment should not raise vindex[[0, 1], [0, 1], :] = vindex[[0, 1], [0, 1], :] vindex[[0, 1], :, [0, 1]] = vindex[[0, 1], :, [0, 1]] vindex[:, [0, 1], [0, 1]] = vindex[:, [0, 1], [0, 1]] xarray-2025.09.0/xarray/tests/test_options.py000066400000000000000000000167371505620616400211360ustar00rootroot00000000000000from __future__ import annotations import pytest import xarray from xarray import concat, merge from xarray.backends.file_manager import FILE_CACHE from xarray.core.options import OPTIONS, _get_keep_attrs from xarray.tests.test_dataset import create_test_data def test_invalid_option_raises() -> None: with pytest.raises(ValueError): xarray.set_options(not_a_valid_options=True) def test_display_width() -> None: with pytest.raises(ValueError): xarray.set_options(display_width=0) with pytest.raises(ValueError): xarray.set_options(display_width=-10) with pytest.raises(ValueError): xarray.set_options(display_width=3.5) def test_arithmetic_join() -> None: with pytest.raises(ValueError): xarray.set_options(arithmetic_join="invalid") with xarray.set_options(arithmetic_join="exact"): assert OPTIONS["arithmetic_join"] == "exact" def test_enable_cftimeindex() -> None: with pytest.raises(ValueError): xarray.set_options(enable_cftimeindex=None) with pytest.warns(FutureWarning, match="no-op"): with xarray.set_options(enable_cftimeindex=True): assert OPTIONS["enable_cftimeindex"] def test_file_cache_maxsize() -> None: with pytest.raises(ValueError): xarray.set_options(file_cache_maxsize=0) original_size = FILE_CACHE.maxsize with xarray.set_options(file_cache_maxsize=123): assert FILE_CACHE.maxsize == 123 assert FILE_CACHE.maxsize == original_size def test_keep_attrs() -> None: with pytest.raises(ValueError): xarray.set_options(keep_attrs="invalid_str") with xarray.set_options(keep_attrs=True): assert OPTIONS["keep_attrs"] with xarray.set_options(keep_attrs=False): assert not OPTIONS["keep_attrs"] with xarray.set_options(keep_attrs="default"): assert _get_keep_attrs(default=True) assert not _get_keep_attrs(default=False) def test_nested_options() -> None: original = OPTIONS["display_width"] with xarray.set_options(display_width=1): assert OPTIONS["display_width"] == 1 with xarray.set_options(display_width=2): assert OPTIONS["display_width"] == 2 assert OPTIONS["display_width"] == 1 assert OPTIONS["display_width"] == original def test_display_style() -> None: original = "html" assert OPTIONS["display_style"] == original with pytest.raises(ValueError): xarray.set_options(display_style="invalid_str") with xarray.set_options(display_style="text"): assert OPTIONS["display_style"] == "text" assert OPTIONS["display_style"] == original def create_test_dataset_attrs(seed=0): ds = create_test_data(seed) ds.attrs = {"attr1": 5, "attr2": "history", "attr3": {"nested": "more_info"}} return ds def create_test_dataarray_attrs(seed=0, var="var1"): da = create_test_data(seed)[var] da.attrs = {"attr1": 5, "attr2": "history", "attr3": {"nested": "more_info"}} return da class TestAttrRetention: def test_dataset_attr_retention(self) -> None: # Use .mean() for all tests: a typical reduction operation ds = create_test_dataset_attrs() original_attrs = ds.attrs # Test default behaviour result = ds.mean() assert result.attrs == {} with xarray.set_options(keep_attrs="default"): result = ds.mean() assert result.attrs == {} with xarray.set_options(keep_attrs=True): result = ds.mean() assert result.attrs == original_attrs with xarray.set_options(keep_attrs=False): result = ds.mean() assert result.attrs == {} def test_dataarray_attr_retention(self) -> None: # Use .mean() for all tests: a typical reduction operation da = create_test_dataarray_attrs() original_attrs = da.attrs # Test default behaviour result = da.mean() assert result.attrs == {} with xarray.set_options(keep_attrs="default"): result = da.mean() assert result.attrs == {} with xarray.set_options(keep_attrs=True): result = da.mean() assert result.attrs == original_attrs with xarray.set_options(keep_attrs=False): result = da.mean() assert result.attrs == {} def test_groupby_attr_retention(self) -> None: da = xarray.DataArray([1, 2, 3], [("x", [1, 1, 2])]) da.attrs = {"attr1": 5, "attr2": "history", "attr3": {"nested": "more_info"}} original_attrs = da.attrs # Test default behaviour result = da.groupby("x").sum(keep_attrs=True) assert result.attrs == original_attrs with xarray.set_options(keep_attrs="default"): result = da.groupby("x").sum(keep_attrs=True) assert result.attrs == original_attrs with xarray.set_options(keep_attrs=True): result1 = da.groupby("x") result = result1.sum() assert result.attrs == original_attrs with xarray.set_options(keep_attrs=False): result = da.groupby("x").sum() assert result.attrs == {} def test_concat_attr_retention(self) -> None: ds1 = create_test_dataset_attrs() ds2 = create_test_dataset_attrs() ds2.attrs = {"wrong": "attributes"} original_attrs = ds1.attrs # Test default behaviour of keeping the attrs of the first # dataset in the supplied list # global keep_attrs option current doesn't affect concat result = concat([ds1, ds2], dim="dim1") assert result.attrs == original_attrs def test_merge_attr_retention(self) -> None: da1 = create_test_dataarray_attrs(var="var1") da2 = create_test_dataarray_attrs(var="var2") da2.attrs = {"wrong": "attributes"} original_attrs = da1.attrs # merge currently discards attrs, and the global keep_attrs # option doesn't affect this result = merge([da1, da2]) assert result.attrs == original_attrs def test_display_style_text(self) -> None: ds = create_test_dataset_attrs() with xarray.set_options(display_style="text"): text = ds._repr_html_() assert text.startswith("
    ")
                assert "'nested'" in text
                assert "<xarray.Dataset>" in text
    
        def test_display_style_html(self) -> None:
            ds = create_test_dataset_attrs()
            with xarray.set_options(display_style="html"):
                html = ds._repr_html_()
                assert html.startswith("
    ") assert "'nested'" in html def test_display_dataarray_style_text(self) -> None: da = create_test_dataarray_attrs() with xarray.set_options(display_style="text"): text = da._repr_html_() assert text.startswith("
    ")
                assert "<xarray.DataArray 'var1'" in text
    
        def test_display_dataarray_style_html(self) -> None:
            da = create_test_dataarray_attrs()
            with xarray.set_options(display_style="html"):
                html = da._repr_html_()
                assert html.startswith("
    ") assert "#x27;nested'" in html @pytest.mark.parametrize( "set_value", [("left"), ("exact")], ) def test_get_options_retention(set_value): """Test to check if get_options will return changes made by set_options""" with xarray.set_options(arithmetic_join=set_value): get_options = xarray.get_options() assert get_options["arithmetic_join"] == set_value xarray-2025.09.0/xarray/tests/test_pandas_to_xarray.py000066400000000000000000000202761505620616400227720ustar00rootroot00000000000000# This file contains code vendored from pandas # For reference, here is a copy of the pandas copyright notice: # BSD 3-Clause License # Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team # All rights reserved. # Copyright (c) 2011-2025, Open source contributors. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np import pandas as pd import pandas._testing as tm import pytest from packaging.version import Version from pandas import ( Categorical, CategoricalIndex, DataFrame, Index, IntervalIndex, MultiIndex, RangeIndex, Series, date_range, period_range, timedelta_range, ) indices_dict: dict[str, Index] = { "object": Index([f"pandas_{i}" for i in range(10)], dtype=object), "string": Index([f"pandas_{i}" for i in range(10)], dtype="str"), "datetime": date_range("2020-01-01", periods=10), "datetime-tz": date_range("2020-01-01", periods=10, tz="US/Pacific"), "period": period_range("2020-01-01", periods=10, freq="D"), "timedelta": timedelta_range(start="1 day", periods=10, freq="D"), "range": RangeIndex(10), "int8": Index(np.arange(10), dtype="int8"), "int16": Index(np.arange(10), dtype="int16"), "int32": Index(np.arange(10), dtype="int32"), "int64": Index(np.arange(10), dtype="int64"), "uint8": Index(np.arange(10), dtype="uint8"), "uint16": Index(np.arange(10), dtype="uint16"), "uint32": Index(np.arange(10), dtype="uint32"), "uint64": Index(np.arange(10), dtype="uint64"), "float32": Index(np.arange(10), dtype="float32"), "float64": Index(np.arange(10), dtype="float64"), "bool-object": Index([True, False] * 5, dtype=object), "bool-dtype": Index([True, False] * 5, dtype=bool), "complex64": Index( np.arange(10, dtype="complex64") + 1.0j * np.arange(10, dtype="complex64") ), "complex128": Index( np.arange(10, dtype="complex128") + 1.0j * np.arange(10, dtype="complex128") ), "categorical": CategoricalIndex(list("abcd") * 2), "interval": IntervalIndex.from_breaks(np.linspace(0, 100, num=11, dtype="int")), "empty": Index([]), # "tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])), # "mi-with-dt64tz-level": _create_mi_with_dt64tz_level(), # "multi": _create_multiindex(), "repeats": Index([0, 0, 1, 1, 2, 2]), "nullable_int": Index(np.arange(10), dtype="Int64"), "nullable_uint": Index(np.arange(10), dtype="UInt16"), "nullable_float": Index(np.arange(10), dtype="Float32"), "nullable_bool": Index(np.arange(10).astype(bool), dtype="boolean"), "string-python": Index( pd.array([f"pandas_{i}" for i in range(10)], dtype="string[python]") ), } @pytest.fixture( params=[ key for key, value in indices_dict.items() if not isinstance(value, MultiIndex) ] ) def index_flat(request): """ index fixture, but excluding MultiIndex cases. """ key = request.param return indices_dict[key].copy() class TestDataFrameToXArray: @pytest.fixture def df(self): return DataFrame( { "a": list("abcd"), "b": list(range(1, 5)), "c": np.arange(3, 7).astype("u1"), "d": np.arange(4.0, 8.0, dtype="float64"), "e": [True, False, True, False], "f": Categorical(list("abcd")), "g": date_range("20130101", periods=4), "h": date_range("20130101", periods=4, tz="US/Eastern"), } ) def test_to_xarray_index_types(self, index_flat, df): index = index_flat # MultiIndex is tested in test_to_xarray_with_multiindex if len(index) == 0: pytest.skip("Test doesn't make sense for empty index") from xarray import Dataset df.index = index[:4] df.index.name = "foo" df.columns.name = "bar" result = df.to_xarray() assert result.sizes["foo"] == 4 assert len(result.coords) == 1 assert len(result.data_vars) == 8 tm.assert_almost_equal(list(result.coords.keys()), ["foo"]) assert isinstance(result, Dataset) # idempotency # datetimes w/tz are preserved # column names are lost expected = df.copy() expected.columns.name = None tm.assert_frame_equal(result.to_dataframe(), expected) def test_to_xarray_empty(self, df): from xarray import Dataset df.index.name = "foo" result = df[0:0].to_xarray() assert result.sizes["foo"] == 0 assert isinstance(result, Dataset) def test_to_xarray_with_multiindex(self, df): from xarray import Dataset # MultiIndex df.index = MultiIndex.from_product([["a"], range(4)], names=["one", "two"]) result = df.to_xarray() assert result.sizes["one"] == 1 assert result.sizes["two"] == 4 assert len(result.coords) == 2 assert len(result.data_vars) == 8 tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"]) assert isinstance(result, Dataset) result = result.to_dataframe() expected = df.copy() expected["f"] = expected["f"].astype( object if Version(pd.__version__) < Version("3.0.0dev0") else str ) expected.columns.name = None tm.assert_frame_equal(result, expected) class TestSeriesToXArray: def test_to_xarray_index_types(self, index_flat): index = index_flat # MultiIndex is tested in test_to_xarray_with_multiindex from xarray import DataArray ser = Series(range(len(index)), index=index, dtype="int64") ser.index.name = "foo" result = ser.to_xarray() repr(result) assert len(result) == len(index) assert len(result.coords) == 1 tm.assert_almost_equal(list(result.coords.keys()), ["foo"]) assert isinstance(result, DataArray) # idempotency tm.assert_series_equal(result.to_series(), ser) def test_to_xarray_empty(self): from xarray import DataArray ser = Series([], dtype=object) ser.index.name = "foo" result = ser.to_xarray() assert len(result) == 0 assert len(result.coords) == 1 tm.assert_almost_equal(list(result.coords.keys()), ["foo"]) assert isinstance(result, DataArray) def test_to_xarray_with_multiindex(self): from xarray import DataArray mi = MultiIndex.from_product([["a", "b"], range(3)], names=["one", "two"]) ser = Series(range(6), dtype="int64", index=mi) result = ser.to_xarray() assert len(result) == 2 tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"]) assert isinstance(result, DataArray) res = result.to_series() tm.assert_series_equal(res, ser) xarray-2025.09.0/xarray/tests/test_parallelcompat.py000066400000000000000000000206711505620616400224330ustar00rootroot00000000000000from __future__ import annotations from importlib.metadata import EntryPoint from typing import Any import numpy as np import pytest from xarray import set_options from xarray.core.types import T_Chunks, T_DuckArray, T_NormalizedChunks from xarray.namedarray._typing import _Chunks from xarray.namedarray.daskmanager import DaskManager from xarray.namedarray.parallelcompat import ( KNOWN_CHUNKMANAGERS, ChunkManagerEntrypoint, get_chunked_array_type, guess_chunkmanager, list_chunkmanagers, load_chunkmanagers, ) from xarray.tests import requires_dask class DummyChunkedArray(np.ndarray): """ Mock-up of a chunked array class. Adds a (non-functional) .chunks attribute by following this example in the numpy docs https://numpy.org/doc/stable/user/basics.subclassing.html#simple-example-adding-an-extra-attribute-to-ndarray """ chunks: T_NormalizedChunks def __new__( cls, shape, dtype=float, buffer=None, offset=0, strides=None, order=None, chunks=None, ): obj = super().__new__(cls, shape, dtype, buffer, offset, strides, order) obj.chunks = chunks return obj def __array_finalize__(self, obj): if obj is None: return self.chunks = getattr(obj, "chunks", None) # type: ignore[assignment] def rechunk(self, chunks, **kwargs): copied = self.copy() copied.chunks = chunks return copied class DummyChunkManager(ChunkManagerEntrypoint): """Mock-up of ChunkManager class for DummyChunkedArray""" def __init__(self): self.array_cls = DummyChunkedArray def is_chunked_array(self, data: Any) -> bool: return isinstance(data, DummyChunkedArray) def chunks(self, data: DummyChunkedArray) -> T_NormalizedChunks: return data.chunks def normalize_chunks( self, chunks: T_Chunks | T_NormalizedChunks, shape: tuple[int, ...] | None = None, limit: int | None = None, dtype: np.dtype | None = None, previous_chunks: T_NormalizedChunks | None = None, ) -> T_NormalizedChunks: from dask.array.core import normalize_chunks return normalize_chunks(chunks, shape, limit, dtype, previous_chunks) def from_array( self, data: T_DuckArray | np.typing.ArrayLike, chunks: _Chunks, **kwargs ) -> DummyChunkedArray: from dask import array as da return da.from_array(data, chunks, **kwargs) def rechunk(self, data: DummyChunkedArray, chunks, **kwargs) -> DummyChunkedArray: return data.rechunk(chunks, **kwargs) def compute(self, *data: DummyChunkedArray, **kwargs) -> tuple[np.ndarray, ...]: from dask.array import compute return compute(*data, **kwargs) def apply_gufunc( self, func, signature, *args, axes=None, axis=None, keepdims=False, output_dtypes=None, output_sizes=None, vectorize=None, allow_rechunk=False, meta=None, **kwargs, ): from dask.array.gufunc import apply_gufunc return apply_gufunc( func, signature, *args, axes=axes, axis=axis, keepdims=keepdims, output_dtypes=output_dtypes, output_sizes=output_sizes, vectorize=vectorize, allow_rechunk=allow_rechunk, meta=meta, **kwargs, ) @pytest.fixture def register_dummy_chunkmanager(monkeypatch): """ Mocks the registering of an additional ChunkManagerEntrypoint. This preserves the presence of the existing DaskManager, so a test that relies on this and DaskManager both being returned from list_chunkmanagers() at once would still work. The monkeypatching changes the behavior of list_chunkmanagers when called inside xarray.namedarray.parallelcompat, but not when called from this tests file. """ # Should include DaskManager iff dask is available to be imported preregistered_chunkmanagers = list_chunkmanagers() monkeypatch.setattr( "xarray.namedarray.parallelcompat.list_chunkmanagers", lambda: {"dummy": DummyChunkManager()} | preregistered_chunkmanagers, ) yield class TestGetChunkManager: def test_get_chunkmanger(self, register_dummy_chunkmanager) -> None: chunkmanager = guess_chunkmanager("dummy") assert isinstance(chunkmanager, DummyChunkManager) def test_get_chunkmanger_via_set_options(self, register_dummy_chunkmanager) -> None: with set_options(chunk_manager="dummy"): chunkmanager = guess_chunkmanager(None) assert isinstance(chunkmanager, DummyChunkManager) def test_fail_on_known_but_missing_chunkmanager( self, register_dummy_chunkmanager, monkeypatch ) -> None: monkeypatch.setitem(KNOWN_CHUNKMANAGERS, "test", "test-package") with pytest.raises( ImportError, match="chunk manager 'test' is not available.+test-package" ): guess_chunkmanager("test") def test_fail_on_nonexistent_chunkmanager( self, register_dummy_chunkmanager ) -> None: with pytest.raises(ValueError, match="unrecognized chunk manager 'foo'"): guess_chunkmanager("foo") @requires_dask def test_get_dask_if_installed(self) -> None: chunkmanager = guess_chunkmanager(None) assert isinstance(chunkmanager, DaskManager) def test_no_chunk_manager_available(self, monkeypatch) -> None: monkeypatch.setattr("xarray.namedarray.parallelcompat.list_chunkmanagers", dict) with pytest.raises(ImportError, match="no chunk managers available"): guess_chunkmanager("foo") def test_no_chunk_manager_available_but_known_manager_requested( self, monkeypatch ) -> None: monkeypatch.setattr("xarray.namedarray.parallelcompat.list_chunkmanagers", dict) with pytest.raises(ImportError, match="chunk manager 'dask' is not available"): guess_chunkmanager("dask") @requires_dask def test_choose_dask_over_other_chunkmanagers( self, register_dummy_chunkmanager ) -> None: chunk_manager = guess_chunkmanager(None) assert isinstance(chunk_manager, DaskManager) class TestGetChunkedArrayType: def test_detect_chunked_arrays(self, register_dummy_chunkmanager) -> None: dummy_arr = DummyChunkedArray([1, 2, 3]) chunk_manager = get_chunked_array_type(dummy_arr) assert isinstance(chunk_manager, DummyChunkManager) def test_ignore_inmemory_arrays(self, register_dummy_chunkmanager) -> None: dummy_arr = DummyChunkedArray([1, 2, 3]) chunk_manager = get_chunked_array_type(*[dummy_arr, 1.0, np.array([5, 6])]) assert isinstance(chunk_manager, DummyChunkManager) with pytest.raises(TypeError, match="Expected a chunked array"): get_chunked_array_type(5.0) def test_raise_if_no_arrays_chunked(self, register_dummy_chunkmanager) -> None: with pytest.raises(TypeError, match="Expected a chunked array "): get_chunked_array_type(*[1.0, np.array([5, 6])]) def test_raise_if_no_matching_chunkmanagers(self) -> None: dummy_arr = DummyChunkedArray([1, 2, 3]) with pytest.raises( TypeError, match="Could not find a Chunk Manager which recognises" ): get_chunked_array_type(dummy_arr) @requires_dask def test_detect_dask_if_installed(self) -> None: import dask.array as da dask_arr = da.from_array([1, 2, 3], chunks=(1,)) chunk_manager = get_chunked_array_type(dask_arr) assert isinstance(chunk_manager, DaskManager) @requires_dask def test_raise_on_mixed_array_types(self, register_dummy_chunkmanager) -> None: import dask.array as da dummy_arr = DummyChunkedArray([1, 2, 3]) dask_arr = da.from_array([1, 2, 3], chunks=(1,)) with pytest.raises(TypeError, match="received multiple types"): get_chunked_array_type(*[dask_arr, dummy_arr]) def test_bogus_entrypoint() -> None: # Create a bogus entry-point as if the user broke their setup.cfg # or is actively developing their new chunk manager entry_point = EntryPoint( "bogus", "xarray.bogus.doesnotwork", "xarray.chunkmanagers" ) with pytest.warns(UserWarning, match="Failed to load chunk manager"): assert len(load_chunkmanagers([entry_point])) == 0 xarray-2025.09.0/xarray/tests/test_plot.py000066400000000000000000003770501505620616400204170ustar00rootroot00000000000000from __future__ import annotations import contextlib import inspect import math from collections.abc import Callable, Generator, Hashable from copy import copy from datetime import date, timedelta from typing import Any, Literal, cast import numpy as np import pandas as pd import pytest import xarray as xr import xarray.plot as xplt from xarray import DataArray, Dataset from xarray.namedarray.utils import module_available from xarray.plot.dataarray_plot import _infer_interval_breaks from xarray.plot.dataset_plot import _infer_meta_data from xarray.plot.utils import ( _assert_valid_xy, _build_discrete_cmap, _color_palette, _determine_cmap_params, _maybe_gca, get_axis, label_from_attrs, ) from xarray.tests import ( assert_array_equal, assert_equal, assert_no_warnings, requires_cartopy, requires_cftime, requires_dask, requires_matplotlib, requires_seaborn, ) # this should not be imported to test if the automatic lazy import works has_nc_time_axis = module_available("nc_time_axis") # import mpl and change the backend before other mpl imports try: import matplotlib as mpl import matplotlib.dates import matplotlib.pyplot as plt import mpl_toolkits except ImportError: pass with contextlib.suppress(ImportError): import cartopy @contextlib.contextmanager def figure_context(*args, **kwargs): """context manager which autocloses a figure (even if the test failed)""" try: yield None finally: plt.close("all") @pytest.fixture(autouse=True) def test_all_figures_closed(): """meta-test to ensure all figures are closed at the end of a test Notes: Scope is kept to module (only invoke this function once per test module) else tests cannot be run in parallel (locally). Disadvantage: only catches one open figure per run. May still give a false positive if tests are run in parallel. """ yield None open_figs = len(plt.get_fignums()) if open_figs: raise RuntimeError( f"tests did not close all figures ({open_figs} figures open)" ) @pytest.mark.flaky @pytest.mark.skip(reason="maybe flaky") def text_in_fig() -> set[str]: """ Return the set of all text in the figure """ return {t.get_text() for t in plt.gcf().findobj(mpl.text.Text)} def find_possible_colorbars() -> list[mpl.collections.QuadMesh]: # nb. this function also matches meshes from pcolormesh return plt.gcf().findobj(mpl.collections.QuadMesh) def substring_in_axes(substring: str, ax: mpl.axes.Axes) -> bool: """ Return True if a substring is found anywhere in an axes """ alltxt: set[str] = {t.get_text() for t in ax.findobj(mpl.text.Text)} return any(substring in txt for txt in alltxt) def substring_not_in_axes(substring: str, ax: mpl.axes.Axes) -> bool: """ Return True if a substring is not found anywhere in an axes """ alltxt: set[str] = {t.get_text() for t in ax.findobj(mpl.text.Text)} check = [(substring not in txt) for txt in alltxt] return all(check) def property_in_axes_text( property, property_str, target_txt, ax: mpl.axes.Axes ) -> bool: """ Return True if the specified text in an axes has the property assigned to property_str """ alltxt: list[mpl.text.Text] = ax.findobj(mpl.text.Text) return all( plt.getp(t, property) == property_str for t in alltxt if t.get_text() == target_txt ) def easy_array(shape: tuple[int, ...], start: float = 0, stop: float = 1) -> np.ndarray: """ Make an array with desired shape using np.linspace shape is a tuple like (2, 3) """ a = np.linspace(start, stop, num=math.prod(shape)) return a.reshape(shape) def get_colorbar_label(colorbar) -> str: if colorbar.orientation == "vertical": return colorbar.ax.get_ylabel() else: return colorbar.ax.get_xlabel() @requires_matplotlib class PlotTestCase: @pytest.fixture(autouse=True) def setup(self) -> Generator: yield # Remove all matplotlib figures plt.close("all") def pass_in_axis(self, plotmethod, subplot_kw=None) -> None: fig, axs = plt.subplots(ncols=2, subplot_kw=subplot_kw, squeeze=False) ax = axs[0, 0] plotmethod(ax=ax) assert ax.has_data() @pytest.mark.slow def imshow_called(self, plotmethod) -> bool: plotmethod() images = plt.gca().findobj(mpl.image.AxesImage) return len(images) > 0 def contourf_called(self, plotmethod) -> bool: plotmethod() # Compatible with mpl before (PathCollection) and after (QuadContourSet) 3.8 def matchfunc(x) -> bool: return isinstance( x, mpl.collections.PathCollection | mpl.contour.QuadContourSet ) paths = plt.gca().findobj(matchfunc) return len(paths) > 0 class TestPlot(PlotTestCase): @pytest.fixture(autouse=True) def setup_array(self) -> None: self.darray = DataArray(easy_array((2, 3, 4))) def test_accessor(self) -> None: from xarray.plot.accessor import DataArrayPlotAccessor assert DataArray.plot is DataArrayPlotAccessor assert isinstance(self.darray.plot, DataArrayPlotAccessor) def test_label_from_attrs(self) -> None: da = self.darray.copy() assert "" == label_from_attrs(da) da.name = 0 assert "0" == label_from_attrs(da) da.name = "a" da.attrs["units"] = "a_units" da.attrs["long_name"] = "a_long_name" da.attrs["standard_name"] = "a_standard_name" assert "a_long_name [a_units]" == label_from_attrs(da) da.attrs.pop("long_name") assert "a_standard_name [a_units]" == label_from_attrs(da) da.attrs.pop("units") assert "a_standard_name" == label_from_attrs(da) da.attrs["units"] = "a_units" da.attrs.pop("standard_name") assert "a [a_units]" == label_from_attrs(da) da.attrs.pop("units") assert "a" == label_from_attrs(da) # Latex strings can be longer without needing a new line: long_latex_name = r"$Ra_s = \mathrm{mean}(\epsilon_k) / \mu M^2_\infty$" da.attrs = dict(long_name=long_latex_name) assert label_from_attrs(da) == long_latex_name def test1d(self) -> None: self.darray[:, 0, 0].plot() # type: ignore[call-arg] with pytest.raises(ValueError, match=r"x must be one of None, 'dim_0'"): self.darray[:, 0, 0].plot(x="dim_1") # type: ignore[call-arg] with pytest.raises(TypeError, match=r"complex128"): (self.darray[:, 0, 0] + 1j).plot() # type: ignore[call-arg] def test_1d_bool(self) -> None: xr.ones_like(self.darray[:, 0, 0], dtype=bool).plot() # type: ignore[call-arg] def test_1d_x_y_kw(self) -> None: z = np.arange(10) da = DataArray(np.cos(z), dims=["z"], coords=[z], name="f") xy: list[list[str | None]] = [[None, None], [None, "z"], ["z", None]] f, axs = plt.subplots(3, 1, squeeze=False) for aa, (x, y) in enumerate(xy): da.plot(x=x, y=y, ax=axs.flat[aa]) # type: ignore[call-arg] with pytest.raises(ValueError, match=r"Cannot specify both"): da.plot(x="z", y="z") # type: ignore[call-arg] error_msg = "must be one of None, 'z'" with pytest.raises(ValueError, match=rf"x {error_msg}"): da.plot(x="f") # type: ignore[call-arg] with pytest.raises(ValueError, match=rf"y {error_msg}"): da.plot(y="f") # type: ignore[call-arg] def test_multiindex_level_as_coord(self) -> None: da = xr.DataArray( np.arange(5), dims="x", coords=dict(a=("x", np.arange(5)), b=("x", np.arange(5, 10))), ) da = da.set_index(x=["a", "b"]) for x in ["a", "b"]: h = da.plot(x=x)[0] # type: ignore[call-arg] assert_array_equal(h.get_xdata(), da[x].values) for y in ["a", "b"]: h = da.plot(y=y)[0] # type: ignore[call-arg] assert_array_equal(h.get_ydata(), da[y].values) # Test for bug in GH issue #2725 def test_infer_line_data(self) -> None: current = DataArray( name="I", data=np.array([5, 8]), dims=["t"], coords={ "t": (["t"], np.array([0.1, 0.2])), "V": (["t"], np.array([100, 200])), }, ) # Plot current against voltage line = current.plot.line(x="V")[0] assert_array_equal(line.get_xdata(), current.coords["V"].values) # Plot current against time line = current.plot.line()[0] assert_array_equal(line.get_xdata(), current.coords["t"].values) def test_line_plot_along_1d_coord(self) -> None: # Test for bug in GH #3334 x_coord = xr.DataArray(data=[0.1, 0.2], dims=["x"]) t_coord = xr.DataArray(data=[10, 20], dims=["t"]) da = xr.DataArray( data=np.array([[0, 1], [5, 9]]), dims=["x", "t"], coords={"x": x_coord, "time": t_coord}, ) line = da.plot(x="time", hue="x")[0] # type: ignore[call-arg] assert_array_equal(line.get_xdata(), da.coords["time"].values) line = da.plot(y="time", hue="x")[0] # type: ignore[call-arg] assert_array_equal(line.get_ydata(), da.coords["time"].values) def test_line_plot_wrong_hue(self) -> None: da = xr.DataArray( data=np.array([[0, 1], [5, 9]]), dims=["x", "t"], ) with pytest.raises(ValueError, match="hue must be one of"): da.plot(x="t", hue="wrong_coord") # type: ignore[call-arg] def test_2d_line(self) -> None: with pytest.raises(ValueError, match=r"hue"): self.darray[:, :, 0].plot.line() self.darray[:, :, 0].plot.line(hue="dim_1") self.darray[:, :, 0].plot.line(x="dim_1") self.darray[:, :, 0].plot.line(y="dim_1") self.darray[:, :, 0].plot.line(x="dim_0", hue="dim_1") self.darray[:, :, 0].plot.line(y="dim_0", hue="dim_1") with pytest.raises(ValueError, match=r"Cannot"): self.darray[:, :, 0].plot.line(x="dim_1", y="dim_0", hue="dim_1") def test_2d_line_accepts_legend_kw(self) -> None: self.darray[:, :, 0].plot.line(x="dim_0", add_legend=False) assert not plt.gca().get_legend() plt.cla() self.darray[:, :, 0].plot.line(x="dim_0", add_legend=True) legend = plt.gca().get_legend() assert legend is not None # check whether legend title is set assert legend.get_title().get_text() == "dim_1" def test_2d_line_accepts_x_kw(self) -> None: self.darray[:, :, 0].plot.line(x="dim_0") assert plt.gca().get_xlabel() == "dim_0" plt.cla() self.darray[:, :, 0].plot.line(x="dim_1") assert plt.gca().get_xlabel() == "dim_1" def test_2d_line_accepts_hue_kw(self) -> None: self.darray[:, :, 0].plot.line(hue="dim_0") legend = plt.gca().get_legend() assert legend is not None assert legend.get_title().get_text() == "dim_0" plt.cla() self.darray[:, :, 0].plot.line(hue="dim_1") legend = plt.gca().get_legend() assert legend is not None assert legend.get_title().get_text() == "dim_1" def test_2d_coords_line_plot(self) -> None: lon, lat = np.meshgrid(np.linspace(-20, 20, 5), np.linspace(0, 30, 4)) lon += lat / 10 lat += lon / 10 da = xr.DataArray( np.arange(20).reshape(4, 5), dims=["y", "x"], coords={"lat": (("y", "x"), lat), "lon": (("y", "x"), lon)}, ) with figure_context(): hdl = da.plot.line(x="lon", hue="x") assert len(hdl) == 5 with figure_context(): hdl = da.plot.line(x="lon", hue="y") assert len(hdl) == 4 with pytest.raises(ValueError, match="For 2D inputs, hue must be a dimension"): da.plot.line(x="lon", hue="lat") def test_2d_coord_line_plot_coords_transpose_invariant(self) -> None: # checks for bug reported in GH #3933 x = np.arange(10) y = np.arange(20) ds = xr.Dataset(coords={"x": x, "y": y}) for z in [ds.y + ds.x, ds.x + ds.y]: ds = ds.assign_coords(z=z) ds["v"] = ds.x + ds.y ds["v"].plot.line(y="z", hue="x") def test_2d_before_squeeze(self) -> None: a = DataArray(easy_array((1, 5))) a.plot() # type: ignore[call-arg] def test2d_uniform_calls_imshow(self) -> None: assert self.imshow_called(self.darray[:, :, 0].plot.imshow) @pytest.mark.slow def test2d_nonuniform_calls_contourf(self) -> None: a = self.darray[:, :, 0] a.coords["dim_1"] = [2, 1, 89] assert self.contourf_called(a.plot.contourf) def test2d_1d_2d_coordinates_contourf(self) -> None: sz = (20, 10) depth = easy_array(sz) a = DataArray( easy_array(sz), dims=["z", "time"], coords={"depth": (["z", "time"], depth), "time": np.linspace(0, 1, sz[1])}, ) a.plot.contourf(x="time", y="depth") a.plot.contourf(x="depth", y="time") def test2d_1d_2d_coordinates_pcolormesh(self) -> None: # Test with equal coordinates to catch bug from #5097 sz = 10 y2d, x2d = np.meshgrid(np.arange(sz), np.arange(sz)) a = DataArray( easy_array((sz, sz)), dims=["x", "y"], coords={"x2d": (["x", "y"], x2d), "y2d": (["x", "y"], y2d)}, ) for x, y in [ ("x", "y"), ("y", "x"), ("x2d", "y"), ("y", "x2d"), ("x", "y2d"), ("y2d", "x"), ("x2d", "y2d"), ("y2d", "x2d"), ]: p = a.plot.pcolormesh(x=x, y=y) v = p.get_paths()[0].vertices assert isinstance(v, np.ndarray) # Check all vertices are different, except last vertex which should be the # same as the first _, unique_counts = np.unique(v[:-1], axis=0, return_counts=True) assert np.all(unique_counts == 1) def test_str_coordinates_pcolormesh(self) -> None: # test for #6775 x = DataArray( [[1, 2, 3], [4, 5, 6]], dims=("a", "b"), coords={"a": [1, 2], "b": ["a", "b", "c"]}, ) x.plot.pcolormesh() x.T.plot.pcolormesh() def test_contourf_cmap_set(self) -> None: a = DataArray(easy_array((4, 4)), dims=["z", "time"]) cmap_expected = mpl.colormaps["viridis"] # use copy to ensure cmap is not changed by contourf() # Set vmin and vmax so that _build_discrete_colormap is called with # extend='both'. extend is passed to # mpl.colors.from_levels_and_colors(), which returns a result with # sensible under and over values if extend='both', but not if # extend='neither' (but if extend='neither' the under and over values # would not be used because the data would all be within the plotted # range) pl = a.plot.contourf(cmap=copy(cmap_expected), vmin=0.1, vmax=0.9) # check the set_bad color cmap = pl.cmap assert cmap is not None assert_array_equal( cmap(np.ma.masked_invalid([np.nan]))[0], cmap_expected(np.ma.masked_invalid([np.nan]))[0], ) # check the set_under color assert cmap(-np.inf) == cmap_expected(-np.inf) # check the set_over color assert cmap(np.inf) == cmap_expected(np.inf) def test_contourf_cmap_set_with_bad_under_over(self) -> None: a = DataArray(easy_array((4, 4)), dims=["z", "time"]) # make a copy here because we want a local cmap that we will modify. cmap_expected = copy(mpl.colormaps["viridis"]) cmap_expected.set_bad("w") # check we actually changed the set_bad color assert np.all( cmap_expected(np.ma.masked_invalid([np.nan]))[0] != mpl.colormaps["viridis"](np.ma.masked_invalid([np.nan]))[0] ) cmap_expected.set_under("r") # check we actually changed the set_under color assert cmap_expected(-np.inf) != mpl.colormaps["viridis"](-np.inf) cmap_expected.set_over("g") # check we actually changed the set_over color assert cmap_expected(np.inf) != mpl.colormaps["viridis"](-np.inf) # copy to ensure cmap is not changed by contourf() pl = a.plot.contourf(cmap=copy(cmap_expected)) cmap = pl.cmap assert cmap is not None # check the set_bad color has been kept assert_array_equal( cmap(np.ma.masked_invalid([np.nan]))[0], cmap_expected(np.ma.masked_invalid([np.nan]))[0], ) # check the set_under color has been kept assert cmap(-np.inf) == cmap_expected(-np.inf) # check the set_over color has been kept assert cmap(np.inf) == cmap_expected(np.inf) def test3d(self) -> None: self.darray.plot() # type: ignore[call-arg] def test_can_pass_in_axis(self) -> None: self.pass_in_axis(self.darray.plot) def test__infer_interval_breaks(self) -> None: assert_array_equal([-0.5, 0.5, 1.5], _infer_interval_breaks([0, 1])) assert_array_equal( [-0.5, 0.5, 5.0, 9.5, 10.5], _infer_interval_breaks([0, 1, 9, 10]) ) assert_array_equal( pd.date_range("20000101", periods=4) - np.timedelta64(12, "h"), # type: ignore[operator] _infer_interval_breaks(pd.date_range("20000101", periods=3)), ) # make a bounded 2D array that we will center and re-infer xref, yref = np.meshgrid(np.arange(6), np.arange(5)) cx = (xref[1:, 1:] + xref[:-1, :-1]) / 2 cy = (yref[1:, 1:] + yref[:-1, :-1]) / 2 x = _infer_interval_breaks(cx, axis=1) x = _infer_interval_breaks(x, axis=0) y = _infer_interval_breaks(cy, axis=1) y = _infer_interval_breaks(y, axis=0) np.testing.assert_allclose(xref, x) np.testing.assert_allclose(yref, y) # test that ValueError is raised for non-monotonic 1D inputs with pytest.raises(ValueError): _infer_interval_breaks(np.array([0, 2, 1]), check_monotonic=True) def test__infer_interval_breaks_logscale(self) -> None: """ Check if interval breaks are defined in the logspace if scale="log" """ # Check for 1d arrays x = np.logspace(-4, 3, 8) expected_interval_breaks = 10 ** np.linspace(-4.5, 3.5, 9) np.testing.assert_allclose( _infer_interval_breaks(x, scale="log"), expected_interval_breaks ) # Check for 2d arrays x = np.logspace(-4, 3, 8) y = np.linspace(-5, 5, 11) x, y = np.meshgrid(x, y) expected_interval_breaks = np.vstack([10 ** np.linspace(-4.5, 3.5, 9)] * 12) x = _infer_interval_breaks(x, axis=1, scale="log") x = _infer_interval_breaks(x, axis=0, scale="log") np.testing.assert_allclose(x, expected_interval_breaks) def test__infer_interval_breaks_logscale_invalid_coords(self) -> None: """ Check error is raised when passing non-positive coordinates with logscale """ # Check if error is raised after a zero value in the array x = np.linspace(0, 5, 6) with pytest.raises(ValueError): _infer_interval_breaks(x, scale="log") # Check if error is raised after negative values in the array x = np.linspace(-5, 5, 11) with pytest.raises(ValueError): _infer_interval_breaks(x, scale="log") def test_geo_data(self) -> None: # Regression test for gh2250 # Realistic coordinates taken from the example dataset lat = np.array( [ [16.28, 18.48, 19.58, 19.54, 18.35], [28.07, 30.52, 31.73, 31.68, 30.37], [39.65, 42.27, 43.56, 43.51, 42.11], [50.52, 53.22, 54.55, 54.50, 53.06], ] ) lon = np.array( [ [-126.13, -113.69, -100.92, -88.04, -75.29], [-129.27, -115.62, -101.54, -87.32, -73.26], [-133.10, -118.00, -102.31, -86.42, -70.76], [-137.85, -120.99, -103.28, -85.28, -67.62], ] ) data = np.hypot(lon, lat) da = DataArray( data, dims=("y", "x"), coords={"lon": (("y", "x"), lon), "lat": (("y", "x"), lat)}, ) da.plot(x="lon", y="lat") # type: ignore[call-arg] ax = plt.gca() assert ax.has_data() da.plot(x="lat", y="lon") # type: ignore[call-arg] ax = plt.gca() assert ax.has_data() def test_datetime_dimension(self) -> None: nrow = 3 ncol = 4 time = pd.date_range("2000-01-01", periods=nrow) a = DataArray( easy_array((nrow, ncol)), coords=[("time", time), ("y", range(ncol))] ) a.plot() # type: ignore[call-arg] ax = plt.gca() assert ax.has_data() def test_date_dimension(self) -> None: nrow = 3 ncol = 4 start = date(2000, 1, 1) time = [start + timedelta(days=i) for i in range(nrow)] a = DataArray( easy_array((nrow, ncol)), coords=[("time", time), ("y", range(ncol))] ) a.plot() # type: ignore[call-arg] ax = plt.gca() assert ax.has_data() @pytest.mark.slow @pytest.mark.filterwarnings("ignore:tight_layout cannot") def test_convenient_facetgrid(self) -> None: a = easy_array((10, 15, 4)) d = DataArray(a, dims=["y", "x", "z"]) d.coords["z"] = list("abcd") g = d.plot(x="x", y="y", col="z", col_wrap=2, cmap="cool") # type: ignore[call-arg] assert_array_equal(g.axs.shape, [2, 2]) for ax in g.axs.flat: assert ax.has_data() with pytest.raises(ValueError, match=r"[Ff]acet"): d.plot(x="x", y="y", col="z", ax=plt.gca()) # type: ignore[call-arg] with pytest.raises(ValueError, match=r"[Ff]acet"): d[0].plot(x="x", y="y", col="z", ax=plt.gca()) # type: ignore[call-arg] @pytest.mark.slow def test_subplot_kws(self) -> None: a = easy_array((10, 15, 4)) d = DataArray(a, dims=["y", "x", "z"]) d.coords["z"] = list("abcd") g = d.plot( # type: ignore[call-arg] x="x", y="y", col="z", col_wrap=2, cmap="cool", subplot_kws=dict(facecolor="r"), ) for ax in g.axs.flat: # mpl V2 assert ax.get_facecolor()[0:3] == mpl.colors.to_rgb("r") @pytest.mark.slow def test_plot_size(self) -> None: self.darray[:, 0, 0].plot(figsize=(13, 5)) # type: ignore[call-arg] assert tuple(plt.gcf().get_size_inches()) == (13, 5) self.darray.plot(figsize=(13, 5)) # type: ignore[call-arg] assert tuple(plt.gcf().get_size_inches()) == (13, 5) self.darray.plot(size=5) # type: ignore[call-arg] assert plt.gcf().get_size_inches()[1] == 5 self.darray.plot(size=5, aspect=2) # type: ignore[call-arg] assert tuple(plt.gcf().get_size_inches()) == (10, 5) with pytest.raises(ValueError, match=r"cannot provide both"): self.darray.plot(ax=plt.gca(), figsize=(3, 4)) # type: ignore[call-arg] with pytest.raises(ValueError, match=r"cannot provide both"): self.darray.plot(size=5, figsize=(3, 4)) # type: ignore[call-arg] with pytest.raises(ValueError, match=r"cannot provide both"): self.darray.plot(size=5, ax=plt.gca()) # type: ignore[call-arg] with pytest.raises(ValueError, match=r"cannot provide `aspect`"): self.darray.plot(aspect=1) # type: ignore[call-arg] @pytest.mark.slow @pytest.mark.filterwarnings("ignore:tight_layout cannot") def test_convenient_facetgrid_4d(self) -> None: a = easy_array((10, 15, 2, 3)) d = DataArray(a, dims=["y", "x", "columns", "rows"]) g = d.plot(x="x", y="y", col="columns", row="rows") # type: ignore[call-arg] assert_array_equal(g.axs.shape, [3, 2]) for ax in g.axs.flat: assert ax.has_data() with pytest.raises(ValueError, match=r"[Ff]acet"): d.plot(x="x", y="y", col="columns", ax=plt.gca()) # type: ignore[call-arg] def test_coord_with_interval(self) -> None: """Test line plot with intervals.""" bins = [-1, 0, 1, 2] self.darray.groupby_bins("dim_0", bins).mean(...).plot() # type: ignore[call-arg] def test_coord_with_interval_x(self) -> None: """Test line plot with intervals explicitly on x axis.""" bins = [-1, 0, 1, 2] self.darray.groupby_bins("dim_0", bins).mean(...).plot(x="dim_0_bins") # type: ignore[call-arg] def test_coord_with_interval_y(self) -> None: """Test line plot with intervals explicitly on y axis.""" bins = [-1, 0, 1, 2] self.darray.groupby_bins("dim_0", bins).mean(...).plot(y="dim_0_bins") # type: ignore[call-arg] def test_coord_with_interval_xy(self) -> None: """Test line plot with intervals on both x and y axes.""" bins = [-1, 0, 1, 2] self.darray.groupby_bins("dim_0", bins).mean(...).dim_0_bins.plot() @pytest.mark.parametrize("dim", ("x", "y")) def test_labels_with_units_with_interval(self, dim) -> None: """Test line plot with intervals and a units attribute.""" bins = [-1, 0, 1, 2] arr = self.darray.groupby_bins("dim_0", bins).mean(...) arr.dim_0_bins.attrs["units"] = "m" (mappable,) = arr.plot(**{dim: "dim_0_bins"}) # type: ignore[arg-type] ax = mappable.figure.gca() actual = getattr(ax, f"get_{dim}label")() expected = "dim_0_bins_center [m]" assert actual == expected def test_multiplot_over_length_one_dim(self) -> None: a = easy_array((3, 1, 1, 1)) d = DataArray(a, dims=("x", "col", "row", "hue")) d.plot(col="col") # type: ignore[call-arg] d.plot(row="row") # type: ignore[call-arg] d.plot(hue="hue") # type: ignore[call-arg] class TestPlot1D(PlotTestCase): @pytest.fixture(autouse=True) def setUp(self) -> None: d = [0, 1.1, 0, 2] self.darray = DataArray(d, coords={"period": range(len(d))}, dims="period") self.darray.period.attrs["units"] = "s" def test_xlabel_is_index_name(self) -> None: self.darray.plot() # type: ignore[call-arg] assert "period [s]" == plt.gca().get_xlabel() def test_no_label_name_on_x_axis(self) -> None: self.darray.plot(y="period") # type: ignore[call-arg] assert "" == plt.gca().get_xlabel() def test_no_label_name_on_y_axis(self) -> None: self.darray.plot() # type: ignore[call-arg] assert "" == plt.gca().get_ylabel() def test_ylabel_is_data_name(self) -> None: self.darray.name = "temperature" self.darray.attrs["units"] = "degrees_Celsius" self.darray.plot() # type: ignore[call-arg] assert "temperature [degrees_Celsius]" == plt.gca().get_ylabel() def test_xlabel_is_data_name(self) -> None: self.darray.name = "temperature" self.darray.attrs["units"] = "degrees_Celsius" self.darray.plot(y="period") # type: ignore[call-arg] assert "temperature [degrees_Celsius]" == plt.gca().get_xlabel() def test_format_string(self) -> None: self.darray.plot.line("ro") def test_can_pass_in_axis(self) -> None: self.pass_in_axis(self.darray.plot.line) def test_nonnumeric_index(self) -> None: a = DataArray([1, 2, 3], {"letter": ["a", "b", "c"]}, dims="letter") a.plot.line() def test_primitive_returned(self) -> None: p = self.darray.plot.line() assert isinstance(p[0], mpl.lines.Line2D) @pytest.mark.slow def test_plot_nans(self) -> None: self.darray[1] = np.nan self.darray.plot.line() def test_dates_are_concise(self) -> None: import matplotlib.dates as mdates time = pd.date_range("2000-01-01", "2000-01-10") a = DataArray(np.arange(len(time)), [("t", time)]) a.plot.line() ax = plt.gca() assert isinstance(ax.xaxis.get_major_locator(), mdates.AutoDateLocator) assert isinstance(ax.xaxis.get_major_formatter(), mdates.ConciseDateFormatter) def test_xyincrease_false_changes_axes(self) -> None: self.darray.plot.line(xincrease=False, yincrease=False) xlim = plt.gca().get_xlim() ylim = plt.gca().get_ylim() diffs = xlim[1] - xlim[0], ylim[1] - ylim[0] assert all(x < 0 for x in diffs) def test_slice_in_title(self) -> None: self.darray.coords["d"] = 10.009 self.darray.plot.line() title = plt.gca().get_title() assert "d = 10.01" == title def test_slice_in_title_single_item_array(self) -> None: """Edge case for data of shape (1, N) or (N, 1).""" darray = self.darray.expand_dims({"d": np.array([10.009])}) darray.plot.line(x="period") title = plt.gca().get_title() assert "d = [10.009]" == title class TestPlotStep(PlotTestCase): @pytest.fixture(autouse=True) def setUp(self) -> None: self.darray = DataArray(easy_array((2, 3, 4))) def test_step(self) -> None: hdl = self.darray[0, 0].plot.step() assert "steps" in hdl[0].get_drawstyle() @pytest.mark.parametrize("where", ["pre", "post", "mid"]) def test_step_with_where(self, where) -> None: hdl = self.darray[0, 0].plot.step(where=where) assert hdl[0].get_drawstyle() == f"steps-{where}" def test_step_with_hue(self) -> None: hdl = self.darray[0].plot.step(hue="dim_2") assert hdl[0].get_drawstyle() == "steps-pre" @pytest.mark.parametrize("where", ["pre", "post", "mid"]) def test_step_with_hue_and_where(self, where) -> None: hdl = self.darray[0].plot.step(hue="dim_2", where=where) assert hdl[0].get_drawstyle() == f"steps-{where}" def test_drawstyle_steps(self) -> None: hdl = self.darray[0].plot(hue="dim_2", drawstyle="steps") # type: ignore[call-arg] assert hdl[0].get_drawstyle() == "steps" @pytest.mark.parametrize("where", ["pre", "post", "mid"]) def test_drawstyle_steps_with_where(self, where) -> None: hdl = self.darray[0].plot(hue="dim_2", drawstyle=f"steps-{where}") # type: ignore[call-arg] assert hdl[0].get_drawstyle() == f"steps-{where}" def test_coord_with_interval_step(self) -> None: """Test step plot with intervals.""" bins = [-1, 0, 1, 2] self.darray.groupby_bins("dim_0", bins).mean(...).plot.step() line = plt.gca().lines[0] assert isinstance(line, mpl.lines.Line2D) assert len(np.asarray(line.get_xdata())) == ((len(bins) - 1) * 2) def test_coord_with_interval_step_x(self) -> None: """Test step plot with intervals explicitly on x axis.""" bins = [-1, 0, 1, 2] self.darray.groupby_bins("dim_0", bins).mean(...).plot.step(x="dim_0_bins") line = plt.gca().lines[0] assert isinstance(line, mpl.lines.Line2D) assert len(np.asarray(line.get_xdata())) == ((len(bins) - 1) * 2) def test_coord_with_interval_step_y(self) -> None: """Test step plot with intervals explicitly on y axis.""" bins = [-1, 0, 1, 2] self.darray.groupby_bins("dim_0", bins).mean(...).plot.step(y="dim_0_bins") line = plt.gca().lines[0] assert isinstance(line, mpl.lines.Line2D) assert len(np.asarray(line.get_xdata())) == ((len(bins) - 1) * 2) def test_coord_with_interval_step_x_and_y_raises_valueeerror(self) -> None: """Test that step plot with intervals both on x and y axes raises an error.""" arr = xr.DataArray( [pd.Interval(0, 1), pd.Interval(1, 2)], coords=[("x", [pd.Interval(0, 1), pd.Interval(1, 2)])], ) with pytest.raises(TypeError, match="intervals against intervals"): arr.plot.step() class TestPlotHistogram(PlotTestCase): @pytest.fixture(autouse=True) def setUp(self) -> None: self.darray = DataArray(easy_array((2, 3, 4))) def test_3d_array(self) -> None: self.darray.plot.hist() # type: ignore[call-arg] def test_xlabel_uses_name(self) -> None: self.darray.name = "testpoints" self.darray.attrs["units"] = "testunits" self.darray.plot.hist() # type: ignore[call-arg] assert "testpoints [testunits]" == plt.gca().get_xlabel() def test_title_is_histogram(self) -> None: self.darray.coords["d"] = 10 self.darray.plot.hist() # type: ignore[call-arg] assert "d = 10" == plt.gca().get_title() def test_can_pass_in_kwargs(self) -> None: nbins = 5 self.darray.plot.hist(bins=nbins) # type: ignore[call-arg] assert nbins == len(plt.gca().patches) def test_can_pass_in_axis(self) -> None: self.pass_in_axis(self.darray.plot.hist) def test_primitive_returned(self) -> None: n, bins, patches = self.darray.plot.hist() # type: ignore[call-arg] assert isinstance(n, np.ndarray) assert isinstance(bins, np.ndarray) assert isinstance(patches, mpl.container.BarContainer) assert isinstance(patches[0], mpl.patches.Rectangle) @pytest.mark.slow def test_plot_nans(self) -> None: self.darray[0, 0, 0] = np.nan self.darray.plot.hist() # type: ignore[call-arg] def test_hist_coord_with_interval(self) -> None: ( self.darray.groupby_bins("dim_0", [-1, 0, 1, 2]) # type: ignore[call-arg] .mean(...) .plot.hist(range=(-1, 2)) ) @requires_matplotlib class TestDetermineCmapParams: @pytest.fixture(autouse=True) def setUp(self) -> None: self.data = np.linspace(0, 1, num=100) def test_robust(self) -> None: cmap_params = _determine_cmap_params(self.data, robust=True) assert cmap_params["vmin"] == np.percentile(self.data, 2) assert cmap_params["vmax"] == np.percentile(self.data, 98) assert cmap_params["cmap"] == "viridis" assert cmap_params["extend"] == "both" assert cmap_params["levels"] is None assert cmap_params["norm"] is None def test_center(self) -> None: cmap_params = _determine_cmap_params(self.data, center=0.5) assert cmap_params["vmax"] - 0.5 == 0.5 - cmap_params["vmin"] assert cmap_params["cmap"] == "RdBu_r" assert cmap_params["extend"] == "neither" assert cmap_params["levels"] is None assert cmap_params["norm"] is None def test_cmap_sequential_option(self) -> None: with xr.set_options(cmap_sequential="magma"): cmap_params = _determine_cmap_params(self.data) assert cmap_params["cmap"] == "magma" def test_cmap_sequential_explicit_option(self) -> None: with xr.set_options(cmap_sequential=mpl.colormaps["magma"]): cmap_params = _determine_cmap_params(self.data) assert cmap_params["cmap"] == mpl.colormaps["magma"] def test_cmap_divergent_option(self) -> None: with xr.set_options(cmap_divergent="magma"): cmap_params = _determine_cmap_params(self.data, center=0.5) assert cmap_params["cmap"] == "magma" def test_nan_inf_are_ignored(self) -> None: cmap_params1 = _determine_cmap_params(self.data) data = self.data data[50:55] = np.nan data[56:60] = np.inf cmap_params2 = _determine_cmap_params(data) assert cmap_params1["vmin"] == cmap_params2["vmin"] assert cmap_params1["vmax"] == cmap_params2["vmax"] @pytest.mark.slow def test_integer_levels(self) -> None: data = self.data + 1 # default is to cover full data range but with no guarantee on Nlevels for level in np.arange(2, 10, dtype=int): cmap_params = _determine_cmap_params(data, levels=level) assert cmap_params["vmin"] is None assert cmap_params["vmax"] is None assert cmap_params["norm"].vmin == cmap_params["levels"][0] assert cmap_params["norm"].vmax == cmap_params["levels"][-1] assert cmap_params["extend"] == "neither" # with min max we are more strict cmap_params = _determine_cmap_params( data, levels=5, vmin=0, vmax=5, cmap="Blues" ) assert cmap_params["vmin"] is None assert cmap_params["vmax"] is None assert cmap_params["norm"].vmin == 0 assert cmap_params["norm"].vmax == 5 assert cmap_params["norm"].vmin == cmap_params["levels"][0] assert cmap_params["norm"].vmax == cmap_params["levels"][-1] assert cmap_params["cmap"].name == "Blues" assert cmap_params["extend"] == "neither" assert cmap_params["cmap"].N == 4 assert cmap_params["norm"].N == 5 cmap_params = _determine_cmap_params(data, levels=5, vmin=0.5, vmax=1.5) assert cmap_params["cmap"].name == "viridis" assert cmap_params["extend"] == "max" cmap_params = _determine_cmap_params(data, levels=5, vmin=1.5) assert cmap_params["cmap"].name == "viridis" assert cmap_params["extend"] == "min" cmap_params = _determine_cmap_params(data, levels=5, vmin=1.3, vmax=1.5) assert cmap_params["cmap"].name == "viridis" assert cmap_params["extend"] == "both" def test_list_levels(self) -> None: data = self.data + 1 orig_levels = [0, 1, 2, 3, 4, 5] # vmin and vmax should be ignored if levels are explicitly provided cmap_params = _determine_cmap_params(data, levels=orig_levels, vmin=0, vmax=3) assert cmap_params["vmin"] is None assert cmap_params["vmax"] is None assert cmap_params["norm"].vmin == 0 assert cmap_params["norm"].vmax == 5 assert cmap_params["cmap"].N == 5 assert cmap_params["norm"].N == 6 for wrap_levels in cast( list[Callable[[Any], dict[Any, Any]]], [list, np.array, pd.Index, DataArray] ): cmap_params = _determine_cmap_params(data, levels=wrap_levels(orig_levels)) assert_array_equal(cmap_params["levels"], orig_levels) def test_divergentcontrol(self) -> None: neg = self.data - 0.1 pos = self.data # Default with positive data will be a normal cmap cmap_params = _determine_cmap_params(pos) assert cmap_params["vmin"] == 0 assert cmap_params["vmax"] == 1 assert cmap_params["cmap"] == "viridis" # Default with negative data will be a divergent cmap cmap_params = _determine_cmap_params(neg) assert cmap_params["vmin"] == -0.9 assert cmap_params["vmax"] == 0.9 assert cmap_params["cmap"] == "RdBu_r" # Setting vmin or vmax should prevent this only if center is false cmap_params = _determine_cmap_params(neg, vmin=-0.1, center=False) assert cmap_params["vmin"] == -0.1 assert cmap_params["vmax"] == 0.9 assert cmap_params["cmap"] == "viridis" cmap_params = _determine_cmap_params(neg, vmax=0.5, center=False) assert cmap_params["vmin"] == -0.1 assert cmap_params["vmax"] == 0.5 assert cmap_params["cmap"] == "viridis" # Setting center=False too cmap_params = _determine_cmap_params(neg, center=False) assert cmap_params["vmin"] == -0.1 assert cmap_params["vmax"] == 0.9 assert cmap_params["cmap"] == "viridis" # However, I should still be able to set center and have a div cmap cmap_params = _determine_cmap_params(neg, center=0) assert cmap_params["vmin"] == -0.9 assert cmap_params["vmax"] == 0.9 assert cmap_params["cmap"] == "RdBu_r" # Setting vmin or vmax alone will force symmetric bounds around center cmap_params = _determine_cmap_params(neg, vmin=-0.1) assert cmap_params["vmin"] == -0.1 assert cmap_params["vmax"] == 0.1 assert cmap_params["cmap"] == "RdBu_r" cmap_params = _determine_cmap_params(neg, vmax=0.5) assert cmap_params["vmin"] == -0.5 assert cmap_params["vmax"] == 0.5 assert cmap_params["cmap"] == "RdBu_r" cmap_params = _determine_cmap_params(neg, vmax=0.6, center=0.1) assert cmap_params["vmin"] == -0.4 assert cmap_params["vmax"] == 0.6 assert cmap_params["cmap"] == "RdBu_r" # But this is only true if vmin or vmax are negative cmap_params = _determine_cmap_params(pos, vmin=-0.1) assert cmap_params["vmin"] == -0.1 assert cmap_params["vmax"] == 0.1 assert cmap_params["cmap"] == "RdBu_r" cmap_params = _determine_cmap_params(pos, vmin=0.1) assert cmap_params["vmin"] == 0.1 assert cmap_params["vmax"] == 1 assert cmap_params["cmap"] == "viridis" cmap_params = _determine_cmap_params(pos, vmax=0.5) assert cmap_params["vmin"] == 0 assert cmap_params["vmax"] == 0.5 assert cmap_params["cmap"] == "viridis" # If both vmin and vmax are provided, output is non-divergent cmap_params = _determine_cmap_params(neg, vmin=-0.2, vmax=0.6) assert cmap_params["vmin"] == -0.2 assert cmap_params["vmax"] == 0.6 assert cmap_params["cmap"] == "viridis" # regression test for GH3524 # infer diverging colormap from divergent levels cmap_params = _determine_cmap_params(pos, levels=[-0.1, 0, 1]) # specifying levels makes cmap a Colormap object assert cmap_params["cmap"].name == "RdBu_r" def test_norm_sets_vmin_vmax(self) -> None: vmin = self.data.min() vmax = self.data.max() for norm, extend, levels in zip( [ mpl.colors.Normalize(), mpl.colors.Normalize(), mpl.colors.Normalize(vmin + 0.1, vmax - 0.1), mpl.colors.Normalize(None, vmax - 0.1), mpl.colors.Normalize(vmin + 0.1, None), ], ["neither", "neither", "both", "max", "min"], [7, None, None, None, None], strict=True, ): test_min = vmin if norm.vmin is None else norm.vmin test_max = vmax if norm.vmax is None else norm.vmax cmap_params = _determine_cmap_params(self.data, norm=norm, levels=levels) assert cmap_params["vmin"] is None assert cmap_params["vmax"] is None assert cmap_params["norm"].vmin == test_min assert cmap_params["norm"].vmax == test_max assert cmap_params["extend"] == extend assert cmap_params["norm"] == norm @requires_matplotlib class TestDiscreteColorMap: @pytest.fixture(autouse=True) def setUp(self): x = np.arange(start=0, stop=10, step=2) y = np.arange(start=9, stop=-7, step=-3) xy = np.dstack(np.meshgrid(x, y)) distance = np.linalg.norm(xy, axis=2) self.darray = DataArray(distance, list(zip(("y", "x"), (y, x), strict=True))) self.data_min = distance.min() self.data_max = distance.max() yield # Remove all matplotlib figures plt.close("all") @pytest.mark.slow def test_recover_from_seaborn_jet_exception(self) -> None: pal = _color_palette("jet", 4) assert type(pal) is np.ndarray assert len(pal) == 4 @pytest.mark.slow def test_build_discrete_cmap(self) -> None: for cmap, levels, extend, filled in [ ("jet", [0, 1], "both", False), ("hot", [-4, 4], "max", True), ]: ncmap, cnorm = _build_discrete_cmap(cmap, levels, extend, filled) assert ncmap.N == len(levels) - 1 assert len(ncmap.colors) == len(levels) - 1 assert cnorm.N == len(levels) assert_array_equal(cnorm.boundaries, levels) assert max(levels) == cnorm.vmax assert min(levels) == cnorm.vmin if filled: assert ncmap.colorbar_extend == extend else: assert ncmap.colorbar_extend == "max" @pytest.mark.slow def test_discrete_colormap_list_of_levels(self) -> None: for extend, levels in [ ("max", [-1, 2, 4, 8, 10]), ("both", [2, 5, 10, 11]), ("neither", [0, 5, 10, 15]), ("min", [2, 5, 10, 15]), ]: for kind in ["imshow", "pcolormesh", "contourf", "contour"]: primitive = getattr(self.darray.plot, kind)(levels=levels) assert_array_equal(levels, primitive.norm.boundaries) assert max(levels) == primitive.norm.vmax assert min(levels) == primitive.norm.vmin if kind != "contour": assert extend == primitive.cmap.colorbar_extend else: assert "max" == primitive.cmap.colorbar_extend assert len(levels) - 1 == len(primitive.cmap.colors) @pytest.mark.slow def test_discrete_colormap_int_levels(self) -> None: for extend, levels, vmin, vmax, cmap in [ ("neither", 7, None, None, None), ("neither", 7, None, 20, mpl.colormaps["RdBu"]), ("both", 7, 4, 8, None), ("min", 10, 4, 15, None), ]: for kind in ["imshow", "pcolormesh", "contourf", "contour"]: primitive = getattr(self.darray.plot, kind)( levels=levels, vmin=vmin, vmax=vmax, cmap=cmap ) assert levels >= len(primitive.norm.boundaries) - 1 if vmax is None: assert primitive.norm.vmax >= self.data_max else: assert primitive.norm.vmax >= vmax if vmin is None: assert primitive.norm.vmin <= self.data_min else: assert primitive.norm.vmin <= vmin if kind != "contour": assert extend == primitive.cmap.colorbar_extend else: assert "max" == primitive.cmap.colorbar_extend assert levels >= len(primitive.cmap.colors) def test_discrete_colormap_list_levels_and_vmin_or_vmax(self) -> None: levels = [0, 5, 10, 15] primitive = self.darray.plot(levels=levels, vmin=-3, vmax=20) # type: ignore[call-arg] assert primitive.norm.vmax == max(levels) assert primitive.norm.vmin == min(levels) def test_discrete_colormap_provided_boundary_norm(self) -> None: norm = mpl.colors.BoundaryNorm([0, 5, 10, 15], 4) primitive = self.darray.plot.contourf(norm=norm) np.testing.assert_allclose(list(primitive.levels), norm.boundaries) def test_discrete_colormap_provided_boundary_norm_matching_cmap_levels( self, ) -> None: norm = mpl.colors.BoundaryNorm([0, 5, 10, 15], 4) primitive = self.darray.plot.contourf(norm=norm) cbar = primitive.colorbar assert cbar is not None assert cbar.norm.Ncmap == cbar.norm.N # type: ignore[attr-defined] # Exists, debatable if public though. class Common2dMixin: """ Common tests for 2d plotting go here. These tests assume that a staticmethod for `self.plotfunc` exists. Should have the same name as the method. """ darray: DataArray plotfunc: staticmethod pass_in_axis: Callable # Needs to be overridden in TestSurface for facet grid plots subplot_kws: dict[Any, Any] | None = None @pytest.fixture(autouse=True) def setUp(self) -> None: da = DataArray( easy_array((10, 15), start=-1), dims=["y", "x"], coords={"y": np.arange(10), "x": np.arange(15)}, ) # add 2d coords ds = da.to_dataset(name="testvar") x, y = np.meshgrid(da.x.values, da.y.values) ds["x2d"] = DataArray(x, dims=["y", "x"]) ds["y2d"] = DataArray(y, dims=["y", "x"]) ds = ds.set_coords(["x2d", "y2d"]) # set darray and plot method self.darray: DataArray = ds.testvar # Add CF-compliant metadata self.darray.attrs["long_name"] = "a_long_name" self.darray.attrs["units"] = "a_units" self.darray.x.attrs["long_name"] = "x_long_name" self.darray.x.attrs["units"] = "x_units" self.darray.y.attrs["long_name"] = "y_long_name" self.darray.y.attrs["units"] = "y_units" self.plotmethod = getattr(self.darray.plot, self.plotfunc.__name__) def test_label_names(self) -> None: self.plotmethod() assert "x_long_name [x_units]" == plt.gca().get_xlabel() assert "y_long_name [y_units]" == plt.gca().get_ylabel() def test_1d_raises_valueerror(self) -> None: with pytest.raises(ValueError, match=r"DataArray must be 2d"): self.plotfunc(self.darray[0, :]) def test_bool(self) -> None: xr.ones_like(self.darray, dtype=bool).plot() # type: ignore[call-arg] def test_complex_raises_typeerror(self) -> None: with pytest.raises(TypeError, match=r"complex128"): (self.darray + 1j).plot() # type: ignore[call-arg] def test_3d_raises_valueerror(self) -> None: a = DataArray(easy_array((2, 3, 4))) if self.plotfunc.__name__ == "imshow": pytest.skip() with pytest.raises(ValueError, match=r"DataArray must be 2d"): self.plotfunc(a) def test_nonnumeric_index(self) -> None: a = DataArray(easy_array((3, 2)), coords=[["a", "b", "c"], ["d", "e"]]) if self.plotfunc.__name__ == "surface": # ax.plot_surface errors with nonnumerics: with pytest.raises(TypeError, match="not supported for the input types"): self.plotfunc(a) else: self.plotfunc(a) def test_multiindex_raises_typeerror(self) -> None: a = DataArray( easy_array((3, 2)), dims=("x", "y"), coords=dict(x=("x", [0, 1, 2]), a=("y", [0, 1]), b=("y", [2, 3])), ) a = a.set_index(y=("a", "b")) with pytest.raises(TypeError, match=r"[Pp]lot"): self.plotfunc(a) def test_can_pass_in_axis(self) -> None: self.pass_in_axis(self.plotmethod) def test_xyincrease_defaults(self) -> None: # With default settings the axis must be ordered regardless # of the coords order. self.plotfunc(DataArray(easy_array((3, 2)), coords=[[1, 2, 3], [1, 2]])) bounds = plt.gca().get_ylim() assert bounds[0] < bounds[1] bounds = plt.gca().get_xlim() assert bounds[0] < bounds[1] # Inverted coords self.plotfunc(DataArray(easy_array((3, 2)), coords=[[3, 2, 1], [2, 1]])) bounds = plt.gca().get_ylim() assert bounds[0] < bounds[1] bounds = plt.gca().get_xlim() assert bounds[0] < bounds[1] def test_xyincrease_false_changes_axes(self) -> None: self.plotmethod(xincrease=False, yincrease=False) xlim = plt.gca().get_xlim() ylim = plt.gca().get_ylim() diffs = xlim[0] - 14, xlim[1] - 0, ylim[0] - 9, ylim[1] - 0 assert all(abs(x) < 1 for x in diffs) def test_xyincrease_true_changes_axes(self) -> None: self.plotmethod(xincrease=True, yincrease=True) xlim = plt.gca().get_xlim() ylim = plt.gca().get_ylim() diffs = xlim[0] - 0, xlim[1] - 14, ylim[0] - 0, ylim[1] - 9 assert all(abs(x) < 1 for x in diffs) def test_dates_are_concise(self) -> None: import matplotlib.dates as mdates time = pd.date_range("2000-01-01", "2000-01-10") a = DataArray(np.random.randn(2, len(time)), [("xx", [1, 2]), ("t", time)]) self.plotfunc(a, x="t") ax = plt.gca() assert isinstance(ax.xaxis.get_major_locator(), mdates.AutoDateLocator) assert isinstance(ax.xaxis.get_major_formatter(), mdates.ConciseDateFormatter) def test_plot_nans(self) -> None: x1 = self.darray[:5] x2 = self.darray.copy() x2[5:] = np.nan clim1 = self.plotfunc(x1).get_clim() clim2 = self.plotfunc(x2).get_clim() assert clim1 == clim2 @pytest.mark.filterwarnings("ignore::UserWarning") @pytest.mark.filterwarnings("ignore:invalid value encountered") def test_can_plot_all_nans(self) -> None: # regression test for issue #1780 self.plotfunc(DataArray(np.full((2, 2), np.nan))) @pytest.mark.filterwarnings("ignore: Attempting to set") def test_can_plot_axis_size_one(self) -> None: if self.plotfunc.__name__ not in ("contour", "contourf"): self.plotfunc(DataArray(np.ones((1, 1)))) def test_disallows_rgb_arg(self) -> None: with pytest.raises(ValueError): # Always invalid for most plots. Invalid for imshow with 2D data. self.plotfunc(DataArray(np.ones((2, 2))), rgb="not None") def test_viridis_cmap(self) -> None: cmap_name = self.plotmethod(cmap="viridis").get_cmap().name assert "viridis" == cmap_name def test_default_cmap(self) -> None: cmap_name = self.plotmethod().get_cmap().name assert "RdBu_r" == cmap_name cmap_name = self.plotfunc(abs(self.darray)).get_cmap().name assert "viridis" == cmap_name @requires_seaborn def test_seaborn_palette_as_cmap(self) -> None: cmap_name = self.plotmethod(levels=2, cmap="husl").get_cmap().name assert "husl" == cmap_name def test_can_change_default_cmap(self) -> None: cmap_name = self.plotmethod(cmap="Blues").get_cmap().name assert "Blues" == cmap_name def test_diverging_color_limits(self) -> None: artist = self.plotmethod() vmin, vmax = artist.get_clim() assert round(abs(-vmin - vmax), 7) == 0 def test_xy_strings(self) -> None: self.plotmethod(x="y", y="x") ax = plt.gca() assert "y_long_name [y_units]" == ax.get_xlabel() assert "x_long_name [x_units]" == ax.get_ylabel() def test_positional_coord_string(self) -> None: self.plotmethod(y="x") ax = plt.gca() assert "x_long_name [x_units]" == ax.get_ylabel() assert "y_long_name [y_units]" == ax.get_xlabel() self.plotmethod(x="x") ax = plt.gca() assert "x_long_name [x_units]" == ax.get_xlabel() assert "y_long_name [y_units]" == ax.get_ylabel() def test_bad_x_string_exception(self) -> None: with pytest.raises(ValueError, match=r"x and y cannot be equal."): self.plotmethod(x="y", y="y") error_msg = "must be one of None, 'x', 'x2d', 'y', 'y2d'" with pytest.raises(ValueError, match=rf"x {error_msg}"): self.plotmethod(x="not_a_real_dim", y="y") with pytest.raises(ValueError, match=rf"x {error_msg}"): self.plotmethod(x="not_a_real_dim") with pytest.raises(ValueError, match=rf"y {error_msg}"): self.plotmethod(y="not_a_real_dim") self.darray.coords["z"] = 100 def test_coord_strings(self) -> None: # 1d coords (same as dims) assert {"x", "y"} == set(self.darray.dims) self.plotmethod(y="y", x="x") def test_non_linked_coords(self) -> None: # plot with coordinate names that are not dimensions self.darray.coords["newy"] = self.darray.y + 150 # Normal case, without transpose self.plotfunc(self.darray, x="x", y="newy") ax = plt.gca() assert "x_long_name [x_units]" == ax.get_xlabel() assert "newy" == ax.get_ylabel() # ax limits might change between plotfuncs # simply ensure that these high coords were passed over assert np.min(ax.get_ylim()) > 100.0 def test_non_linked_coords_transpose(self) -> None: # plot with coordinate names that are not dimensions, # and with transposed y and x axes # This used to raise an error with pcolormesh and contour # https://github.com/pydata/xarray/issues/788 self.darray.coords["newy"] = self.darray.y + 150 self.plotfunc(self.darray, x="newy", y="x") ax = plt.gca() assert "newy" == ax.get_xlabel() assert "x_long_name [x_units]" == ax.get_ylabel() # ax limits might change between plotfuncs # simply ensure that these high coords were passed over assert np.min(ax.get_xlim()) > 100.0 def test_multiindex_level_as_coord(self) -> None: da = DataArray( easy_array((3, 2)), dims=("x", "y"), coords=dict(x=("x", [0, 1, 2]), a=("y", [0, 1]), b=("y", [2, 3])), ) da = da.set_index(y=["a", "b"]) for x, y in (("a", "x"), ("b", "x"), ("x", "a"), ("x", "b")): self.plotfunc(da, x=x, y=y) ax = plt.gca() assert x == ax.get_xlabel() assert y == ax.get_ylabel() with pytest.raises(ValueError, match=r"levels of the same MultiIndex"): self.plotfunc(da, x="a", y="b") with pytest.raises(ValueError, match=r"y must be one of None, 'a', 'b', 'x'"): self.plotfunc(da, x="a", y="y") def test_default_title(self) -> None: a = DataArray(easy_array((4, 3, 2)), dims=["a", "b", "c"]) a.coords["c"] = [0, 1] a.coords["d"] = "foo" self.plotfunc(a.isel(c=1)) title = plt.gca().get_title() assert title in {"c = 1, d = foo", "d = foo, c = 1"} def test_colorbar_default_label(self) -> None: self.plotmethod(add_colorbar=True) assert "a_long_name [a_units]" in text_in_fig() def test_no_labels(self) -> None: self.darray.name = "testvar" self.darray.attrs["units"] = "test_units" self.plotmethod(add_labels=False) alltxt = text_in_fig() for string in [ "x_long_name [x_units]", "y_long_name [y_units]", "testvar [test_units]", ]: assert string not in alltxt def test_colorbar_kwargs(self) -> None: # replace label self.darray.attrs.pop("long_name") self.darray.attrs["units"] = "test_units" # check default colorbar label self.plotmethod(add_colorbar=True) alltxt = text_in_fig() assert "testvar [test_units]" in alltxt self.darray.attrs.pop("units") self.darray.name = "testvar" self.plotmethod(add_colorbar=True, cbar_kwargs={"label": "MyLabel"}) alltxt = text_in_fig() assert "MyLabel" in alltxt assert "testvar" not in alltxt # you can use anything accepted by the dict constructor as well self.plotmethod(add_colorbar=True, cbar_kwargs=(("label", "MyLabel"),)) alltxt = text_in_fig() assert "MyLabel" in alltxt assert "testvar" not in alltxt # change cbar ax fig, axs = plt.subplots(1, 2, squeeze=False) ax = axs[0, 0] cax = axs[0, 1] self.plotmethod( ax=ax, cbar_ax=cax, add_colorbar=True, cbar_kwargs={"label": "MyBar"} ) assert ax.has_data() assert cax.has_data() alltxt = text_in_fig() assert "MyBar" in alltxt assert "testvar" not in alltxt # note that there are two ways to achieve this fig, axs = plt.subplots(1, 2, squeeze=False) ax = axs[0, 0] cax = axs[0, 1] self.plotmethod( ax=ax, add_colorbar=True, cbar_kwargs={"label": "MyBar", "cax": cax} ) assert ax.has_data() assert cax.has_data() alltxt = text_in_fig() assert "MyBar" in alltxt assert "testvar" not in alltxt # see that no colorbar is respected self.plotmethod(add_colorbar=False) assert "testvar" not in text_in_fig() # check that error is raised pytest.raises( ValueError, self.plotmethod, add_colorbar=False, cbar_kwargs={"label": "label"}, ) def test_verbose_facetgrid(self) -> None: a = easy_array((10, 15, 3)) d = DataArray(a, dims=["y", "x", "z"]) g = xplt.FacetGrid(d, col="z", subplot_kws=self.subplot_kws) g.map_dataarray(self.plotfunc, "x", "y") for ax in g.axs.flat: assert ax.has_data() def test_2d_function_and_method_signature_same(self) -> None: func_sig = inspect.signature(self.plotfunc) method_sig = inspect.signature(self.plotmethod) for argname, param in method_sig.parameters.items(): assert func_sig.parameters[argname] == param @pytest.mark.filterwarnings("ignore:tight_layout cannot") def test_convenient_facetgrid(self) -> None: a = easy_array((10, 15, 4)) d = DataArray(a, dims=["y", "x", "z"]) g = self.plotfunc(d, x="x", y="y", col="z", col_wrap=2) assert_array_equal(g.axs.shape, [2, 2]) for (y, x), ax in np.ndenumerate(g.axs): assert ax.has_data() if x == 0: assert "y" == ax.get_ylabel() else: assert "" == ax.get_ylabel() if y == 1: assert "x" == ax.get_xlabel() else: assert "" == ax.get_xlabel() # Inferring labels g = self.plotfunc(d, col="z", col_wrap=2) assert_array_equal(g.axs.shape, [2, 2]) for (y, x), ax in np.ndenumerate(g.axs): assert ax.has_data() if x == 0: assert "y" == ax.get_ylabel() else: assert "" == ax.get_ylabel() if y == 1: assert "x" == ax.get_xlabel() else: assert "" == ax.get_xlabel() @pytest.mark.filterwarnings("ignore:tight_layout cannot") def test_convenient_facetgrid_4d(self) -> None: a = easy_array((10, 15, 2, 3)) d = DataArray(a, dims=["y", "x", "columns", "rows"]) g = self.plotfunc(d, x="x", y="y", col="columns", row="rows") assert_array_equal(g.axs.shape, [3, 2]) for ax in g.axs.flat: assert ax.has_data() @pytest.mark.filterwarnings("ignore:This figure includes") def test_facetgrid_map_only_appends_mappables(self) -> None: a = easy_array((10, 15, 2, 3)) d = DataArray(a, dims=["y", "x", "columns", "rows"]) g = self.plotfunc(d, x="x", y="y", col="columns", row="rows") expected = g._mappables g.map(lambda: plt.plot(1, 1)) actual = g._mappables assert expected == actual def test_facetgrid_cmap(self) -> None: # Regression test for GH592 data = np.random.random(size=(20, 25, 12)) + np.linspace(-3, 3, 12) d = DataArray(data, dims=["x", "y", "time"]) fg = d.plot.pcolormesh(col="time") # check that all color limits are the same assert len({m.get_clim() for m in fg._mappables}) == 1 # check that all colormaps are the same assert len({m.get_cmap().name for m in fg._mappables}) == 1 def test_facetgrid_cbar_kwargs(self) -> None: a = easy_array((10, 15, 2, 3)) d = DataArray(a, dims=["y", "x", "columns", "rows"]) g = self.plotfunc( d, x="x", y="y", col="columns", row="rows", cbar_kwargs={"label": "test_label"}, ) # catch contour case if g.cbar is not None: assert get_colorbar_label(g.cbar) == "test_label" def test_facetgrid_no_cbar_ax(self) -> None: a = easy_array((10, 15, 2, 3)) d = DataArray(a, dims=["y", "x", "columns", "rows"]) with pytest.raises(ValueError): self.plotfunc(d, x="x", y="y", col="columns", row="rows", cbar_ax=1) def test_cmap_and_color_both(self) -> None: with pytest.raises(ValueError): self.plotmethod(colors="k", cmap="RdBu") def test_2d_coord_with_interval(self) -> None: for dim in self.darray.dims: gp = self.darray.groupby_bins(dim, range(15), restore_coord_dims=True).mean( [dim] ) for kind in ["imshow", "pcolormesh", "contourf", "contour"]: getattr(gp.plot, kind)() def test_colormap_error_norm_and_vmin_vmax(self) -> None: norm = mpl.colors.LogNorm(0.1, 1e1) with pytest.raises(ValueError): self.darray.plot(norm=norm, vmin=2) # type: ignore[call-arg] with pytest.raises(ValueError): self.darray.plot(norm=norm, vmax=2) # type: ignore[call-arg] @pytest.mark.slow class TestContourf(Common2dMixin, PlotTestCase): plotfunc = staticmethod(xplt.contourf) @pytest.mark.slow def test_contourf_called(self) -> None: # Having both statements ensures the test works properly assert not self.contourf_called(self.darray.plot.imshow) assert self.contourf_called(self.darray.plot.contourf) def test_primitive_artist_returned(self) -> None: artist = self.plotmethod() assert isinstance(artist, mpl.contour.QuadContourSet) @pytest.mark.slow def test_extend(self) -> None: artist = self.plotmethod() assert artist.extend == "neither" self.darray[0, 0] = -100 self.darray[-1, -1] = 100 artist = self.plotmethod(robust=True) assert artist.extend == "both" self.darray[0, 0] = 0 self.darray[-1, -1] = 0 artist = self.plotmethod(vmin=-0, vmax=10) assert artist.extend == "min" artist = self.plotmethod(vmin=-10, vmax=0) assert artist.extend == "max" @pytest.mark.slow def test_2d_coord_names(self) -> None: self.plotmethod(x="x2d", y="y2d") # make sure labels came out ok ax = plt.gca() assert "x2d" == ax.get_xlabel() assert "y2d" == ax.get_ylabel() @pytest.mark.slow def test_levels(self) -> None: artist = self.plotmethod(levels=[-0.5, -0.4, 0.1]) assert artist.extend == "both" artist = self.plotmethod(levels=3) assert artist.extend == "neither" def test_colormap_norm(self) -> None: # Using a norm should plot a nice colorbar and look consistent with pcolormesh. norm = mpl.colors.LogNorm(0.1, 1e1) with pytest.warns(UserWarning): artist = self.plotmethod(norm=norm, add_colorbar=True) actual = artist.colorbar.locator() expected = np.array([0.01, 0.1, 1.0, 10.0]) np.testing.assert_allclose(actual, expected) @pytest.mark.slow class TestContour(Common2dMixin, PlotTestCase): plotfunc = staticmethod(xplt.contour) # matplotlib cmap.colors gives an rgbA ndarray # when seaborn is used, instead we get an rgb tuple @staticmethod def _color_as_tuple(c: Any) -> tuple[Any, Any, Any]: return c[0], c[1], c[2] def test_colors(self) -> None: # with single color, we don't want rgb array artist = self.plotmethod(colors="k") assert artist.cmap.colors[0] == "k" # 2 colors, will repeat every other tick: artist = self.plotmethod(colors=["k", "b"]) assert artist.cmap.colors[:2] == ["k", "b"] # 4 colors, will repeat every 4th tick: artist = self.darray.plot.contour( levels=[-0.5, 0.0, 0.5, 1.0], colors=["k", "r", "w", "b"] ) assert artist.cmap.colors[:5] == ["k", "r", "w", "b"] # type: ignore[attr-defined,unused-ignore] # the last color is now under "over" assert self._color_as_tuple(artist.cmap.get_over()) == (0.0, 0.0, 1.0) def test_colors_np_levels(self) -> None: # https://github.com/pydata/xarray/issues/3284 levels = np.array([-0.5, 0.0, 0.5, 1.0]) artist = self.darray.plot.contour(levels=levels, colors=["k", "r", "w", "b"]) cmap = artist.cmap assert isinstance(cmap, mpl.colors.ListedColormap) assert artist.cmap.colors[:5] == ["k", "r", "w", "b"] # type: ignore[attr-defined,unused-ignore] # the last color is now under "over" assert self._color_as_tuple(cmap.get_over()) == (0.0, 0.0, 1.0) def test_cmap_and_color_both(self) -> None: with pytest.raises(ValueError): self.plotmethod(colors="k", cmap="RdBu") def list_of_colors_in_cmap_raises_error(self) -> None: with pytest.raises(ValueError, match=r"list of colors"): self.plotmethod(cmap=["k", "b"]) @pytest.mark.slow def test_2d_coord_names(self) -> None: self.plotmethod(x="x2d", y="y2d") # make sure labels came out ok ax = plt.gca() assert "x2d" == ax.get_xlabel() assert "y2d" == ax.get_ylabel() def test_single_level(self) -> None: # this used to raise an error, but not anymore since # add_colorbar defaults to false self.plotmethod(levels=[0.1]) self.plotmethod(levels=1) def test_colormap_norm(self) -> None: # Using a norm should plot a nice colorbar and look consistent with pcolormesh. norm = mpl.colors.LogNorm(0.1, 1e1) with pytest.warns(UserWarning): artist = self.plotmethod(norm=norm, add_colorbar=True) actual = artist.colorbar.locator() expected = np.array([0.01, 0.1, 1.0, 10.0]) np.testing.assert_allclose(actual, expected) class TestPcolormesh(Common2dMixin, PlotTestCase): plotfunc = staticmethod(xplt.pcolormesh) def test_primitive_artist_returned(self) -> None: artist = self.plotmethod() assert isinstance(artist, mpl.collections.QuadMesh) def test_everything_plotted(self) -> None: artist = self.plotmethod() assert artist.get_array().size == self.darray.size @pytest.mark.slow def test_2d_coord_names(self) -> None: self.plotmethod(x="x2d", y="y2d") # make sure labels came out ok ax = plt.gca() assert "x2d" == ax.get_xlabel() assert "y2d" == ax.get_ylabel() def test_dont_infer_interval_breaks_for_cartopy(self) -> None: # Regression for GH 781 ax = plt.gca() # Simulate a Cartopy Axis ax.projection = True # type: ignore[attr-defined] artist = self.plotmethod(x="x2d", y="y2d", ax=ax) assert isinstance(artist, mpl.collections.QuadMesh) # Let cartopy handle the axis limits and artist size arr = artist.get_array() assert arr is not None assert arr.size <= self.darray.size class TestPcolormeshLogscale(PlotTestCase): """ Test pcolormesh axes when x and y are in logscale """ plotfunc = staticmethod(xplt.pcolormesh) @pytest.fixture(autouse=True) def setUp(self) -> None: self.boundaries = (-1, 9, -4, 3) shape = (8, 11) x = np.logspace(self.boundaries[0], self.boundaries[1], shape[1]) y = np.logspace(self.boundaries[2], self.boundaries[3], shape[0]) da = DataArray( easy_array(shape, start=-1), dims=["y", "x"], coords={"y": y, "x": x}, name="testvar", ) self.darray = da def test_interval_breaks_logspace(self) -> None: """ Check if the outer vertices of the pcolormesh are the expected values Checks bugfix for #5333 """ artist = self.darray.plot.pcolormesh(xscale="log", yscale="log") # Grab the coordinates of the vertices of the Patches x_vertices = [p.vertices[:, 0] for p in artist.properties()["paths"]] y_vertices = [p.vertices[:, 1] for p in artist.properties()["paths"]] # Get the maximum and minimum values for each set of vertices xmin, xmax = np.min(x_vertices), np.max(x_vertices) ymin, ymax = np.min(y_vertices), np.max(y_vertices) # Check if they are equal to 10 to the power of the outer value of its # corresponding axis plus or minus the interval in the logspace log_interval = 0.5 np.testing.assert_allclose(xmin, 10 ** (self.boundaries[0] - log_interval)) np.testing.assert_allclose(xmax, 10 ** (self.boundaries[1] + log_interval)) np.testing.assert_allclose(ymin, 10 ** (self.boundaries[2] - log_interval)) np.testing.assert_allclose(ymax, 10 ** (self.boundaries[3] + log_interval)) @pytest.mark.slow class TestImshow(Common2dMixin, PlotTestCase): plotfunc = staticmethod(xplt.imshow) @pytest.mark.xfail( reason=( "Failing inside matplotlib. Should probably be fixed upstream because " "other plot functions can handle it. " "Remove this test when it works, already in Common2dMixin" ) ) def test_dates_are_concise(self) -> None: import matplotlib.dates as mdates time = pd.date_range("2000-01-01", "2000-01-10") a = DataArray(np.random.randn(2, len(time)), [("xx", [1, 2]), ("t", time)]) self.plotfunc(a, x="t") ax = plt.gca() assert isinstance(ax.xaxis.get_major_locator(), mdates.AutoDateLocator) assert isinstance(ax.xaxis.get_major_formatter(), mdates.ConciseDateFormatter) @pytest.mark.slow def test_imshow_called(self) -> None: # Having both statements ensures the test works properly assert not self.imshow_called(self.darray.plot.contourf) assert self.imshow_called(self.darray.plot.imshow) def test_xy_pixel_centered(self) -> None: self.darray.plot.imshow(yincrease=False) assert np.allclose([-0.5, 14.5], plt.gca().get_xlim()) assert np.allclose([9.5, -0.5], plt.gca().get_ylim()) def test_default_aspect_is_auto(self) -> None: self.darray.plot.imshow() assert "auto" == plt.gca().get_aspect() @pytest.mark.slow def test_cannot_change_mpl_aspect(self) -> None: with pytest.raises(ValueError, match=r"not available in xarray"): self.darray.plot.imshow(aspect="equal") # with numbers we fall back to fig control self.darray.plot.imshow(size=5, aspect=2) assert "auto" == plt.gca().get_aspect() assert tuple(plt.gcf().get_size_inches()) == (10, 5) @pytest.mark.slow def test_primitive_artist_returned(self) -> None: artist = self.plotmethod() assert isinstance(artist, mpl.image.AxesImage) @pytest.mark.slow @requires_seaborn def test_seaborn_palette_needs_levels(self) -> None: with pytest.raises(ValueError): self.plotmethod(cmap="husl") def test_2d_coord_names(self) -> None: with pytest.raises(ValueError, match=r"requires 1D coordinates"): self.plotmethod(x="x2d", y="y2d") def test_plot_rgb_image(self) -> None: DataArray( easy_array((10, 15, 3), start=0), dims=["y", "x", "band"] ).plot.imshow() assert 0 == len(find_possible_colorbars()) def test_plot_rgb_image_explicit(self) -> None: DataArray( easy_array((10, 15, 3), start=0), dims=["y", "x", "band"] ).plot.imshow(y="y", x="x", rgb="band") assert 0 == len(find_possible_colorbars()) def test_plot_rgb_faceted(self) -> None: DataArray( easy_array((2, 2, 10, 15, 3), start=0), dims=["a", "b", "y", "x", "band"] ).plot.imshow(row="a", col="b") assert 0 == len(find_possible_colorbars()) def test_plot_rgba_image_transposed(self) -> None: # We can handle the color axis being in any position DataArray( easy_array((4, 10, 15), start=0), dims=["band", "y", "x"] ).plot.imshow() def test_warns_ambiguous_dim(self) -> None: arr = DataArray(easy_array((3, 3, 3)), dims=["y", "x", "band"]) with pytest.warns(UserWarning): arr.plot.imshow() # but doesn't warn if dimensions specified arr.plot.imshow(rgb="band") arr.plot.imshow(x="x", y="y") def test_rgb_errors_too_many_dims(self) -> None: arr = DataArray(easy_array((3, 3, 3, 3)), dims=["y", "x", "z", "band"]) with pytest.raises(ValueError): arr.plot.imshow(rgb="band") def test_rgb_errors_bad_dim_sizes(self) -> None: arr = DataArray(easy_array((5, 5, 5)), dims=["y", "x", "band"]) with pytest.raises(ValueError): arr.plot.imshow(rgb="band") @pytest.mark.parametrize( ["vmin", "vmax", "robust"], [ (-1, None, False), (None, 2, False), (-1, 1, False), (0, 0, False), (0, None, True), (None, -1, True), ], ) def test_normalize_rgb_imshow( self, vmin: float | None, vmax: float | None, robust: bool ) -> None: da = DataArray(easy_array((5, 5, 3), start=-0.6, stop=1.4)) arr = da.plot.imshow(vmin=vmin, vmax=vmax, robust=robust).get_array() assert arr is not None assert 0 <= arr.min() <= arr.max() <= 1 def test_normalize_rgb_one_arg_error(self) -> None: da = DataArray(easy_array((5, 5, 3), start=-0.6, stop=1.4)) # If passed one bound that implies all out of range, error: for vmin, vmax in ((None, -1), (2, None)): with pytest.raises(ValueError): da.plot.imshow(vmin=vmin, vmax=vmax) # If passed two that's just moving the range, *not* an error: for vmin2, vmax2 in ((-1.2, -1), (2, 2.1)): da.plot.imshow(vmin=vmin2, vmax=vmax2) @pytest.mark.parametrize("dtype", [np.uint8, np.int8, np.int16]) def test_imshow_rgb_values_in_valid_range(self, dtype) -> None: da = DataArray(np.arange(75, dtype=dtype).reshape((5, 5, 3))) _, ax = plt.subplots() out = da.plot.imshow(ax=ax).get_array() assert out is not None actual_dtype = out.dtype assert actual_dtype is not None assert actual_dtype == np.uint8 assert (out[..., :3] == da.values).all() # Compare without added alpha assert (out[..., -1] == 255).all() # Compare alpha @pytest.mark.filterwarnings("ignore:Several dimensions of this array") def test_regression_rgb_imshow_dim_size_one(self) -> None: # Regression: https://github.com/pydata/xarray/issues/1966 da = DataArray(easy_array((1, 3, 3), start=0.0, stop=1.0)) da.plot.imshow() def test_origin_overrides_xyincrease(self) -> None: da = DataArray(easy_array((3, 2)), coords=[[-2, 0, 2], [-1, 1]]) with figure_context(): da.plot.imshow(origin="upper") assert plt.xlim()[0] < 0 assert plt.ylim()[1] < 0 with figure_context(): da.plot.imshow(origin="lower") assert plt.xlim()[0] < 0 assert plt.ylim()[0] < 0 class TestSurface(Common2dMixin, PlotTestCase): plotfunc = staticmethod(xplt.surface) subplot_kws = {"projection": "3d"} @pytest.mark.xfail( reason=( "Failing inside matplotlib. Should probably be fixed upstream because " "other plot functions can handle it. " "Remove this test when it works, already in Common2dMixin" ) ) def test_dates_are_concise(self) -> None: import matplotlib.dates as mdates time = pd.date_range("2000-01-01", "2000-01-10") a = DataArray(np.random.randn(2, len(time)), [("xx", [1, 2]), ("t", time)]) self.plotfunc(a, x="t") ax = plt.gca() assert isinstance(ax.xaxis.get_major_locator(), mdates.AutoDateLocator) assert isinstance(ax.xaxis.get_major_formatter(), mdates.ConciseDateFormatter) def test_primitive_artist_returned(self) -> None: artist = self.plotmethod() assert isinstance(artist, mpl_toolkits.mplot3d.art3d.Poly3DCollection) @pytest.mark.slow def test_2d_coord_names(self) -> None: self.plotmethod(x="x2d", y="y2d") # make sure labels came out ok ax = plt.gca() assert isinstance(ax, mpl_toolkits.mplot3d.axes3d.Axes3D) assert "x2d" == ax.get_xlabel() assert "y2d" == ax.get_ylabel() assert f"{self.darray.long_name} [{self.darray.units}]" == ax.get_zlabel() def test_xyincrease_false_changes_axes(self) -> None: # Does not make sense for surface plots pytest.skip("does not make sense for surface plots") def test_xyincrease_true_changes_axes(self) -> None: # Does not make sense for surface plots pytest.skip("does not make sense for surface plots") def test_can_pass_in_axis(self) -> None: self.pass_in_axis(self.plotmethod, subplot_kw={"projection": "3d"}) def test_default_cmap(self) -> None: # Does not make sense for surface plots with default arguments pytest.skip("does not make sense for surface plots") def test_diverging_color_limits(self) -> None: # Does not make sense for surface plots with default arguments pytest.skip("does not make sense for surface plots") def test_colorbar_kwargs(self) -> None: # Does not make sense for surface plots with default arguments pytest.skip("does not make sense for surface plots") def test_cmap_and_color_both(self) -> None: # Does not make sense for surface plots with default arguments pytest.skip("does not make sense for surface plots") def test_seaborn_palette_as_cmap(self) -> None: # seaborn does not work with mpl_toolkits.mplot3d with pytest.raises(ValueError): super().test_seaborn_palette_as_cmap() # Need to modify this test for surface(), because all subplots should have labels, # not just left and bottom @pytest.mark.filterwarnings("ignore:tight_layout cannot") def test_convenient_facetgrid(self) -> None: a = easy_array((10, 15, 4)) d = DataArray(a, dims=["y", "x", "z"]) g = self.plotfunc(d, x="x", y="y", col="z", col_wrap=2) # type: ignore[arg-type] # https://github.com/python/mypy/issues/15015 assert_array_equal(g.axs.shape, [2, 2]) for (_y, _x), ax in np.ndenumerate(g.axs): assert ax.has_data() assert "y" == ax.get_ylabel() assert "x" == ax.get_xlabel() # Inferring labels g = self.plotfunc(d, col="z", col_wrap=2) # type: ignore[arg-type] # https://github.com/python/mypy/issues/15015 assert_array_equal(g.axs.shape, [2, 2]) for (_y, _x), ax in np.ndenumerate(g.axs): assert ax.has_data() assert "y" == ax.get_ylabel() assert "x" == ax.get_xlabel() def test_viridis_cmap(self) -> None: return super().test_viridis_cmap() def test_can_change_default_cmap(self) -> None: return super().test_can_change_default_cmap() def test_colorbar_default_label(self) -> None: return super().test_colorbar_default_label() def test_facetgrid_map_only_appends_mappables(self) -> None: return super().test_facetgrid_map_only_appends_mappables() class TestFacetGrid(PlotTestCase): @pytest.fixture(autouse=True) def setUp(self) -> None: d = easy_array((10, 15, 3)) self.darray = DataArray(d, dims=["y", "x", "z"], coords={"z": ["a", "b", "c"]}) self.g = xplt.FacetGrid(self.darray, col="z") @pytest.mark.slow def test_no_args(self) -> None: self.g.map_dataarray(xplt.contourf, "x", "y") # Don't want colorbar labeled with 'None' alltxt = text_in_fig() assert "None" not in alltxt for ax in self.g.axs.flat: assert ax.has_data() @pytest.mark.slow def test_names_appear_somewhere(self) -> None: self.darray.name = "testvar" self.g.map_dataarray(xplt.contourf, "x", "y") for k, ax in zip("abc", self.g.axs.flat, strict=True): assert f"z = {k}" == ax.get_title() alltxt = text_in_fig() assert self.darray.name in alltxt for label in ["x", "y"]: assert label in alltxt @pytest.mark.slow def test_text_not_super_long(self) -> None: self.darray.coords["z"] = [100 * letter for letter in "abc"] g = xplt.FacetGrid(self.darray, col="z") g.map_dataarray(xplt.contour, "x", "y") alltxt = text_in_fig() maxlen = max(len(txt) for txt in alltxt) assert maxlen < 50 t0 = g.axs[0, 0].get_title() assert t0.endswith("...") @pytest.mark.slow def test_colorbar(self) -> None: vmin = self.darray.values.min() vmax = self.darray.values.max() expected = np.array((vmin, vmax)) self.g.map_dataarray(xplt.imshow, "x", "y") for image in plt.gcf().findobj(mpl.image.AxesImage): assert isinstance(image, mpl.image.AxesImage) clim = np.array(image.get_clim()) assert np.allclose(expected, clim) assert 1 == len(find_possible_colorbars()) def test_colorbar_scatter(self) -> None: ds = Dataset({"a": (("x", "y"), np.arange(4).reshape(2, 2))}) fg: xplt.FacetGrid = ds.plot.scatter(x="a", y="a", row="x", hue="a") cbar = fg.cbar assert cbar is not None assert hasattr(cbar, "vmin") assert cbar.vmin == 0 assert hasattr(cbar, "vmax") assert cbar.vmax == 3 @pytest.mark.slow def test_empty_cell(self) -> None: g = xplt.FacetGrid(self.darray, col="z", col_wrap=2) g.map_dataarray(xplt.imshow, "x", "y") bottomright = g.axs[-1, -1] assert not bottomright.has_data() assert not bottomright.get_visible() @pytest.mark.slow def test_norow_nocol_error(self) -> None: with pytest.raises(ValueError, match=r"[Rr]ow"): xplt.FacetGrid(self.darray) @pytest.mark.slow def test_groups(self) -> None: self.g.map_dataarray(xplt.imshow, "x", "y") upperleft_dict = self.g.name_dicts[0, 0] upperleft_array = self.darray.loc[upperleft_dict] z0 = self.darray.isel(z=0) assert_equal(upperleft_array, z0) @pytest.mark.slow def test_float_index(self) -> None: self.darray.coords["z"] = [0.1, 0.2, 0.4] g = xplt.FacetGrid(self.darray, col="z") g.map_dataarray(xplt.imshow, "x", "y") @pytest.mark.slow def test_nonunique_index_error(self) -> None: self.darray.coords["z"] = [0.1, 0.2, 0.2] with pytest.raises(ValueError, match=r"[Uu]nique"): xplt.FacetGrid(self.darray, col="z") @pytest.mark.slow def test_robust(self) -> None: z = np.zeros((20, 20, 2)) darray = DataArray(z, dims=["y", "x", "z"]) darray[:, :, 1] = 1 darray[2, 0, 0] = -1000 darray[3, 0, 0] = 1000 g = xplt.FacetGrid(darray, col="z") g.map_dataarray(xplt.imshow, "x", "y", robust=True) # Color limits should be 0, 1 # The largest number displayed in the figure should be less than 21 numbers = set() alltxt = text_in_fig() for txt in alltxt: with contextlib.suppress(ValueError): numbers.add(float(txt)) largest = max(abs(x) for x in numbers) assert largest < 21 @pytest.mark.slow def test_can_set_vmin_vmax(self) -> None: vmin, vmax = 50.0, 1000.0 expected = np.array((vmin, vmax)) self.g.map_dataarray(xplt.imshow, "x", "y", vmin=vmin, vmax=vmax) for image in plt.gcf().findobj(mpl.image.AxesImage): assert isinstance(image, mpl.image.AxesImage) clim = np.array(image.get_clim()) assert np.allclose(expected, clim) @pytest.mark.slow def test_vmin_vmax_equal(self) -> None: # regression test for GH3734 fg = self.g.map_dataarray(xplt.imshow, "x", "y", vmin=50, vmax=50) for mappable in fg._mappables: assert mappable.norm.vmin != mappable.norm.vmax @pytest.mark.slow @pytest.mark.filterwarnings("ignore") def test_can_set_norm(self) -> None: norm = mpl.colors.SymLogNorm(0.1) self.g.map_dataarray(xplt.imshow, "x", "y", norm=norm) for image in plt.gcf().findobj(mpl.image.AxesImage): assert isinstance(image, mpl.image.AxesImage) assert image.norm is norm @pytest.mark.slow def test_figure_size(self) -> None: assert_array_equal(self.g.fig.get_size_inches(), (10, 3)) g = xplt.FacetGrid(self.darray, col="z", size=6) assert_array_equal(g.fig.get_size_inches(), (19, 6)) g = self.darray.plot.imshow(col="z", size=6) assert_array_equal(g.fig.get_size_inches(), (19, 6)) g = xplt.FacetGrid(self.darray, col="z", size=4, aspect=0.5) assert_array_equal(g.fig.get_size_inches(), (7, 4)) g = xplt.FacetGrid(self.darray, col="z", figsize=(9, 4)) assert_array_equal(g.fig.get_size_inches(), (9, 4)) with pytest.raises(ValueError, match=r"cannot provide both"): g = xplt.plot(self.darray, row=2, col="z", figsize=(6, 4), size=6) with pytest.raises(ValueError, match=r"Can't use"): g = xplt.plot(self.darray, row=2, col="z", ax=plt.gca(), size=6) @pytest.mark.slow def test_num_ticks(self) -> None: nticks = 99 maxticks = nticks + 1 self.g.map_dataarray(xplt.imshow, "x", "y") self.g.set_ticks(max_xticks=nticks, max_yticks=nticks) for ax in self.g.axs.flat: xticks = len(ax.get_xticks()) yticks = len(ax.get_yticks()) assert xticks <= maxticks assert yticks <= maxticks assert xticks >= nticks / 2.0 assert yticks >= nticks / 2.0 @pytest.mark.slow def test_map(self) -> None: assert self.g._finalized is False self.g.map(plt.contourf, "x", "y", ...) assert self.g._finalized is True self.g.map(lambda: None) @pytest.mark.slow def test_map_dataset(self) -> None: g = xplt.FacetGrid(self.darray.to_dataset(name="foo"), col="z") g.map(plt.contourf, "x", "y", "foo") alltxt = text_in_fig() for label in ["x", "y"]: assert label in alltxt # everything has a label assert "None" not in alltxt # colorbar can't be inferred automatically assert "foo" not in alltxt assert 0 == len(find_possible_colorbars()) g.add_colorbar(label="colors!") assert "colors!" in text_in_fig() assert 1 == len(find_possible_colorbars()) @pytest.mark.slow def test_set_axis_labels(self) -> None: g = self.g.map_dataarray(xplt.contourf, "x", "y") g.set_axis_labels("longitude", "latitude") alltxt = text_in_fig() for label in ["longitude", "latitude"]: assert label in alltxt @pytest.mark.slow def test_facetgrid_colorbar(self) -> None: a = easy_array((10, 15, 4)) d = DataArray(a, dims=["y", "x", "z"], name="foo") d.plot.imshow(x="x", y="y", col="z") assert 1 == len(find_possible_colorbars()) d.plot.imshow(x="x", y="y", col="z", add_colorbar=True) assert 1 == len(find_possible_colorbars()) d.plot.imshow(x="x", y="y", col="z", add_colorbar=False) assert 0 == len(find_possible_colorbars()) @pytest.mark.slow def test_facetgrid_polar(self) -> None: # test if polar projection in FacetGrid does not raise an exception self.darray.plot.pcolormesh( col="z", subplot_kws=dict(projection="polar"), sharex=False, sharey=False ) @pytest.mark.slow def test_units_appear_somewhere(self) -> None: # assign coordinates to all dims so we can test for units darray = self.darray.assign_coords( {"x": np.arange(self.darray.x.size), "y": np.arange(self.darray.y.size)} ) darray.x.attrs["units"] = "x_unit" darray.y.attrs["units"] = "y_unit" g = xplt.FacetGrid(darray, col="z") g.map_dataarray(xplt.contourf, "x", "y") alltxt = text_in_fig() # unit should appear as e.g. 'x [x_unit]' for unit_name in ["x_unit", "y_unit"]: assert unit_name in "".join(alltxt) @pytest.mark.filterwarnings("ignore:tight_layout cannot") class TestFacetGrid4d(PlotTestCase): @pytest.fixture(autouse=True) def setUp(self) -> None: a = easy_array((10, 15, 3, 2)) darray = DataArray(a, dims=["y", "x", "col", "row"]) darray.coords["col"] = np.array( ["col" + str(x) for x in darray.coords["col"].values] ) darray.coords["row"] = np.array( ["row" + str(x) for x in darray.coords["row"].values] ) self.darray = darray def test_title_kwargs(self) -> None: g = xplt.FacetGrid(self.darray, col="col", row="row") g.set_titles(template="{value}", weight="bold") # Rightmost column titles should be bold for label, ax in zip( self.darray.coords["row"].values, g.axs[:, -1], strict=True ): assert property_in_axes_text("weight", "bold", label, ax) # Top row titles should be bold for label, ax in zip( self.darray.coords["col"].values, g.axs[0, :], strict=True ): assert property_in_axes_text("weight", "bold", label, ax) @pytest.mark.slow def test_default_labels(self) -> None: g = xplt.FacetGrid(self.darray, col="col", row="row") assert (2, 3) == g.axs.shape g.map_dataarray(xplt.imshow, "x", "y") # Rightmost column should be labeled for label, ax in zip( self.darray.coords["row"].values, g.axs[:, -1], strict=True ): assert substring_in_axes(label, ax) # Top row should be labeled for label, ax in zip( self.darray.coords["col"].values, g.axs[0, :], strict=True ): assert substring_in_axes(label, ax) # ensure that row & col labels can be changed g.set_titles("abc={value}") for label, ax in zip( self.darray.coords["row"].values, g.axs[:, -1], strict=True ): assert substring_in_axes(f"abc={label}", ax) # previous labels were "row=row0" etc. assert substring_not_in_axes("row=", ax) for label, ax in zip( self.darray.coords["col"].values, g.axs[0, :], strict=True ): assert substring_in_axes(f"abc={label}", ax) # previous labels were "col=row0" etc. assert substring_not_in_axes("col=", ax) @pytest.mark.filterwarnings("ignore:tight_layout cannot") class TestFacetedLinePlotsLegend(PlotTestCase): @pytest.fixture(autouse=True) def setUp(self) -> None: self.darray = xr.tutorial.scatter_example_dataset() def test_legend_labels(self) -> None: fg = self.darray.A.plot.line(col="x", row="w", hue="z") all_legend_labels = [t.get_text() for t in fg.figlegend.texts] # labels in legend should be ['0', '1', '2', '3'] assert sorted(all_legend_labels) == ["0", "1", "2", "3"] @pytest.mark.filterwarnings("ignore:tight_layout cannot") class TestFacetedLinePlots(PlotTestCase): @pytest.fixture(autouse=True) def setUp(self) -> None: self.darray = DataArray( np.random.randn(10, 6, 3, 4), dims=["hue", "x", "col", "row"], coords=[range(10), range(6), range(3), ["A", "B", "C", "C++"]], name="Cornelius Ortega the 1st", ) self.darray.hue.name = "huename" self.darray.hue.attrs["units"] = "hunits" self.darray.x.attrs["units"] = "xunits" self.darray.col.attrs["units"] = "colunits" self.darray.row.attrs["units"] = "rowunits" def test_facetgrid_shape(self) -> None: g = self.darray.plot(row="row", col="col", hue="hue") # type: ignore[call-arg] assert g.axs.shape == (len(self.darray.row), len(self.darray.col)) g = self.darray.plot(row="col", col="row", hue="hue") # type: ignore[call-arg] assert g.axs.shape == (len(self.darray.col), len(self.darray.row)) def test_unnamed_args(self) -> None: g = self.darray.plot.line("o--", row="row", col="col", hue="hue") lines = [ q for q in g.axs.flat[0].get_children() if isinstance(q, mpl.lines.Line2D) ] # passing 'o--' as argument should set marker and linestyle assert lines[0].get_marker() == "o" assert lines[0].get_linestyle() == "--" def test_default_labels(self) -> None: g = self.darray.plot(row="row", col="col", hue="hue") # type: ignore[call-arg] # Rightmost column should be labeled for label, ax in zip( self.darray.coords["row"].values, g.axs[:, -1], strict=True ): assert substring_in_axes(label, ax) # Top row should be labeled for label, ax in zip( self.darray.coords["col"].values, g.axs[0, :], strict=True ): assert substring_in_axes(str(label), ax) # Leftmost column should have array name for ax in g.axs[:, 0]: assert substring_in_axes(str(self.darray.name), ax) def test_test_empty_cell(self) -> None: g = ( self.darray.isel(row=1) # type: ignore[call-arg] .drop_vars("row") .plot(col="col", hue="hue", col_wrap=2) ) bottomright = g.axs[-1, -1] assert not bottomright.has_data() assert not bottomright.get_visible() def test_set_axis_labels(self) -> None: g = self.darray.plot(row="row", col="col", hue="hue") # type: ignore[call-arg] g.set_axis_labels("longitude", "latitude") alltxt = text_in_fig() assert "longitude" in alltxt assert "latitude" in alltxt def test_axes_in_faceted_plot(self) -> None: with pytest.raises(ValueError): self.darray.plot.line(row="row", col="col", x="x", ax=plt.axes()) def test_figsize_and_size(self) -> None: with pytest.raises(ValueError): self.darray.plot.line(row="row", col="col", x="x", size=3, figsize=(4, 3)) def test_wrong_num_of_dimensions(self) -> None: with pytest.raises(ValueError): self.darray.plot(row="row", hue="hue") # type: ignore[call-arg] self.darray.plot.line(row="row", hue="hue") @requires_matplotlib class TestDatasetQuiverPlots(PlotTestCase): @pytest.fixture(autouse=True) def setUp(self) -> None: das = [ DataArray( np.random.randn(3, 3, 4, 4), dims=["x", "y", "row", "col"], coords=[range(k) for k in [3, 3, 4, 4]], ) for _ in [1, 2] ] ds = Dataset({"u": das[0], "v": das[1]}) ds.x.attrs["units"] = "xunits" ds.y.attrs["units"] = "yunits" ds.col.attrs["units"] = "colunits" ds.row.attrs["units"] = "rowunits" ds.u.attrs["units"] = "uunits" ds.v.attrs["units"] = "vunits" ds["mag"] = np.hypot(ds.u, ds.v) self.ds = ds def test_quiver(self) -> None: with figure_context(): hdl = self.ds.isel(row=0, col=0).plot.quiver(x="x", y="y", u="u", v="v") assert isinstance(hdl, mpl.quiver.Quiver) with pytest.raises(ValueError, match=r"specify x, y, u, v"): self.ds.isel(row=0, col=0).plot.quiver(x="x", y="y", u="u") with pytest.raises(ValueError, match=r"hue_style"): self.ds.isel(row=0, col=0).plot.quiver( x="x", y="y", u="u", v="v", hue="mag", hue_style="discrete" ) def test_facetgrid(self) -> None: with figure_context(): fg = self.ds.plot.quiver( x="x", y="y", u="u", v="v", row="row", col="col", scale=1, hue="mag" ) for handle in fg._mappables: assert isinstance(handle, mpl.quiver.Quiver) assert fg.quiverkey is not None assert "uunits" in fg.quiverkey.text.get_text() with figure_context(): fg = self.ds.plot.quiver( x="x", y="y", u="u", v="v", row="row", col="col", scale=1, hue="mag", add_guide=False, ) assert fg.quiverkey is None with pytest.raises(ValueError, match=r"Please provide scale"): self.ds.plot.quiver(x="x", y="y", u="u", v="v", row="row", col="col") @pytest.mark.parametrize( "add_guide, hue_style, legend, colorbar", [ (None, None, False, True), (False, None, False, False), (True, None, False, True), (True, "continuous", False, True), ], ) def test_add_guide(self, add_guide, hue_style, legend, colorbar) -> None: meta_data = _infer_meta_data( self.ds, x="x", y="y", hue="mag", hue_style=hue_style, add_guide=add_guide, funcname="quiver", ) assert meta_data["add_legend"] is legend assert meta_data["add_colorbar"] is colorbar @requires_matplotlib class TestDatasetStreamplotPlots(PlotTestCase): @pytest.fixture(autouse=True) def setUp(self) -> None: das = [ DataArray( np.random.randn(3, 4, 2, 2), dims=["x", "y", "row", "col"], coords=[range(k) for k in [3, 4, 2, 2]], ) for _ in [1, 2] ] ds = Dataset({"u": das[0], "v": das[1]}) ds.x.attrs["units"] = "xunits" ds.y.attrs["units"] = "yunits" ds.col.attrs["units"] = "colunits" ds.row.attrs["units"] = "rowunits" ds.u.attrs["units"] = "uunits" ds.v.attrs["units"] = "vunits" ds["mag"] = np.hypot(ds.u, ds.v) self.ds = ds def test_streamline(self) -> None: with figure_context(): hdl = self.ds.isel(row=0, col=0).plot.streamplot(x="x", y="y", u="u", v="v") assert isinstance(hdl, mpl.collections.LineCollection) with pytest.raises(ValueError, match=r"specify x, y, u, v"): self.ds.isel(row=0, col=0).plot.streamplot(x="x", y="y", u="u") with pytest.raises(ValueError, match=r"hue_style"): self.ds.isel(row=0, col=0).plot.streamplot( x="x", y="y", u="u", v="v", hue="mag", hue_style="discrete" ) def test_facetgrid(self) -> None: with figure_context(): fg = self.ds.plot.streamplot( x="x", y="y", u="u", v="v", row="row", col="col", hue="mag" ) for handle in fg._mappables: assert isinstance(handle, mpl.collections.LineCollection) with figure_context(): fg = self.ds.plot.streamplot( x="x", y="y", u="u", v="v", row="row", col="col", hue="mag", add_guide=False, ) @requires_matplotlib class TestDatasetScatterPlots(PlotTestCase): @pytest.fixture(autouse=True) def setUp(self) -> None: das = [ DataArray( np.random.randn(3, 3, 4, 4), dims=["x", "row", "col", "hue"], coords=[range(k) for k in [3, 3, 4, 4]], ) for _ in [1, 2] ] ds = Dataset({"A": das[0], "B": das[1]}) ds.hue.name = "huename" ds.hue.attrs["units"] = "hunits" ds.x.attrs["units"] = "xunits" ds.col.attrs["units"] = "colunits" ds.row.attrs["units"] = "rowunits" ds.A.attrs["units"] = "Aunits" ds.B.attrs["units"] = "Bunits" self.ds = ds def test_accessor(self) -> None: from xarray.plot.accessor import DatasetPlotAccessor assert Dataset.plot is DatasetPlotAccessor assert isinstance(self.ds.plot, DatasetPlotAccessor) @pytest.mark.parametrize( "add_guide, hue_style, legend, colorbar", [ (None, None, False, True), (False, None, False, False), (True, None, False, True), (True, "continuous", False, True), (False, "discrete", False, False), (True, "discrete", True, False), ], ) def test_add_guide( self, add_guide: bool | None, hue_style: Literal["continuous", "discrete"] | None, legend: bool, colorbar: bool, ) -> None: meta_data = _infer_meta_data( self.ds, x="A", y="B", hue="hue", hue_style=hue_style, add_guide=add_guide, funcname="scatter", ) assert meta_data["add_legend"] is legend assert meta_data["add_colorbar"] is colorbar def test_facetgrid_shape(self) -> None: g = self.ds.plot.scatter(x="A", y="B", row="row", col="col") assert g.axs.shape == (len(self.ds.row), len(self.ds.col)) g = self.ds.plot.scatter(x="A", y="B", row="col", col="row") assert g.axs.shape == (len(self.ds.col), len(self.ds.row)) def test_default_labels(self) -> None: g = self.ds.plot.scatter(x="A", y="B", row="row", col="col", hue="hue") # Top row should be labeled for label, ax in zip(self.ds.coords["col"].values, g.axs[0, :], strict=True): assert substring_in_axes(str(label), ax) # Bottom row should have name of x array name and units for ax in g.axs[-1, :]: assert ax.get_xlabel() == "A [Aunits]" # Leftmost column should have name of y array name and units for ax in g.axs[:, 0]: assert ax.get_ylabel() == "B [Bunits]" def test_axes_in_faceted_plot(self) -> None: with pytest.raises(ValueError): self.ds.plot.scatter(x="A", y="B", row="row", ax=plt.axes()) def test_figsize_and_size(self) -> None: with pytest.raises(ValueError): self.ds.plot.scatter(x="A", y="B", row="row", size=3, figsize=(4, 3)) @pytest.mark.parametrize( "x, y, hue, add_legend, add_colorbar, error_type", [ pytest.param( "A", "The Spanish Inquisition", None, None, None, KeyError, id="bad_y" ), pytest.param( "The Spanish Inquisition", "B", None, None, True, ValueError, id="bad_x" ), ], ) def test_bad_args( self, x: Hashable, y: Hashable, hue: Hashable | None, add_legend: bool | None, add_colorbar: bool | None, error_type: type[Exception], ) -> None: with pytest.raises(error_type): self.ds.plot.scatter( x=x, y=y, hue=hue, add_legend=add_legend, add_colorbar=add_colorbar ) def test_datetime_hue(self) -> None: ds2 = self.ds.copy() # TODO: Currently plots as categorical, should it behave as numerical? ds2["hue"] = pd.date_range("2000-1-1", periods=4) ds2.plot.scatter(x="A", y="B", hue="hue") ds2["hue"] = pd.timedelta_range("-1D", periods=4, freq="D") ds2.plot.scatter(x="A", y="B", hue="hue") def test_facetgrid_hue_style(self) -> None: ds2 = self.ds.copy() # Numbers plots as continuous: g = ds2.plot.scatter(x="A", y="B", row="row", col="col", hue="hue") assert isinstance(g._mappables[-1], mpl.collections.PathCollection) # Datetimes plots as categorical: # TODO: Currently plots as categorical, should it behave as numerical? ds2["hue"] = pd.date_range("2000-1-1", periods=4) g = ds2.plot.scatter(x="A", y="B", row="row", col="col", hue="hue") assert isinstance(g._mappables[-1], mpl.collections.PathCollection) # Strings plots as categorical: ds2["hue"] = ["a", "a", "b", "b"] g = ds2.plot.scatter(x="A", y="B", row="row", col="col", hue="hue") assert isinstance(g._mappables[-1], mpl.collections.PathCollection) @pytest.mark.parametrize( ["x", "y", "hue", "markersize"], [("A", "B", "x", "col"), ("x", "row", "A", "B")], ) def test_scatter( self, x: Hashable, y: Hashable, hue: Hashable, markersize: Hashable ) -> None: self.ds.plot.scatter(x=x, y=y, hue=hue, markersize=markersize) with pytest.raises(ValueError, match=r"u, v"): self.ds.plot.scatter(x=x, y=y, u="col", v="row") def test_non_numeric_legend(self) -> None: ds2 = self.ds.copy() ds2["hue"] = ["a", "b", "c", "d"] pc = ds2.plot.scatter(x="A", y="B", markersize="hue") axes = pc.axes assert axes is not None # should make a discrete legend assert hasattr(axes, "legend_") assert axes.legend_ is not None def test_legend_labels(self) -> None: # regression test for #4126: incorrect legend labels ds2 = self.ds.copy() ds2["hue"] = ["a", "a", "b", "b"] pc = ds2.plot.scatter(x="A", y="B", markersize="hue") axes = pc.axes assert axes is not None legend = axes.get_legend() assert legend is not None actual = [t.get_text() for t in legend.texts] expected = ["hue", "a", "b"] assert actual == expected def test_legend_labels_facetgrid(self) -> None: ds2 = self.ds.copy() ds2["hue"] = ["d", "a", "c", "b"] g = ds2.plot.scatter(x="A", y="B", hue="hue", markersize="x", col="col") legend = g.figlegend assert legend is not None actual = tuple(t.get_text() for t in legend.texts) expected = ( "x [xunits]", "$\\mathdefault{0}$", "$\\mathdefault{1}$", "$\\mathdefault{2}$", ) assert actual == expected def test_add_legend_by_default(self) -> None: sc = self.ds.plot.scatter(x="A", y="B", hue="hue") fig = sc.figure assert fig is not None assert len(fig.axes) == 2 class TestDatetimePlot(PlotTestCase): @pytest.fixture(autouse=True) def setUp(self) -> None: """ Create a DataArray with a time-axis that contains datetime objects. """ month = np.arange(1, 13, 1) data = np.sin(2 * np.pi * month / 12.0) times = pd.date_range(start="2017-01-01", freq="MS", periods=12) darray = DataArray(data, dims=["time"], coords=[times]) self.darray = darray def test_datetime_line_plot(self) -> None: # test if line plot raises no Exception self.darray.plot.line() def test_datetime_units(self) -> None: # test that matplotlib-native datetime works: fig, ax = plt.subplots() ax.plot(self.darray["time"], self.darray) # Make sure only mpl converters are used, use type() so only # mpl.dates.AutoDateLocator passes and no other subclasses: assert type(ax.xaxis.get_major_locator()) is mpl.dates.AutoDateLocator def test_datetime_plot1d(self) -> None: # Test that matplotlib-native datetime works: p = self.darray.plot.line() ax = p[0].axes # Make sure only mpl converters are used, use type() so only # mpl.dates.AutoDateLocator passes and no other subclasses: assert type(ax.xaxis.get_major_locator()) is mpl.dates.AutoDateLocator def test_datetime_plot2d(self) -> None: # Test that matplotlib-native datetime works: da = DataArray( np.arange(3 * 4).reshape(3, 4), dims=("x", "y"), coords={ "x": [1, 2, 3], "y": [np.datetime64(f"2000-01-{x:02d}") for x in range(1, 5)], }, ) p = da.plot.pcolormesh() ax = p.axes assert ax is not None # Make sure only mpl converters are used, use type() so only # mpl.dates.AutoDateLocator passes and no other subclasses: assert type(ax.xaxis.get_major_locator()) is mpl.dates.AutoDateLocator @pytest.mark.filterwarnings("ignore:setting an array element with a sequence") @requires_cftime @pytest.mark.skipif(not has_nc_time_axis, reason="nc_time_axis is not installed") class TestCFDatetimePlot(PlotTestCase): @pytest.fixture(autouse=True) def setUp(self) -> None: """ Create a DataArray with a time-axis that contains cftime.datetime objects. """ # case for 1d array data = np.random.rand(4, 12) time = xr.date_range( start="2017", periods=12, freq="1ME", calendar="noleap", use_cftime=True ) darray = DataArray(data, dims=["x", "time"]) darray.coords["time"] = time self.darray = darray def test_cfdatetime_line_plot(self) -> None: self.darray.isel(x=0).plot.line() def test_cfdatetime_pcolormesh_plot(self) -> None: self.darray.plot.pcolormesh() def test_cfdatetime_contour_plot(self) -> None: self.darray.plot.contour() @requires_cftime @pytest.mark.skipif(has_nc_time_axis, reason="nc_time_axis is installed") class TestNcAxisNotInstalled(PlotTestCase): @pytest.fixture(autouse=True) def setUp(self) -> None: """ Create a DataArray with a time-axis that contains cftime.datetime objects. """ month = np.arange(1, 13, 1) data = np.sin(2 * np.pi * month / 12.0) darray = DataArray(data, dims=["time"]) darray.coords["time"] = xr.date_range( start="2017", periods=12, freq="1ME", calendar="noleap", use_cftime=True ) self.darray = darray def test_ncaxis_notinstalled_line_plot(self) -> None: with pytest.raises(ImportError, match=r"optional `nc-time-axis`"): self.darray.plot.line() @requires_matplotlib class TestAxesKwargs: @pytest.fixture(params=[1, 2, 3]) def data_array(self, request) -> DataArray: """ Return a simple DataArray """ dims = request.param if dims == 1: return DataArray(easy_array((10,))) elif dims == 2: return DataArray(easy_array((10, 3))) elif dims == 3: return DataArray(easy_array((10, 3, 2))) else: raise ValueError(f"No DataArray implemented for {dims=}.") @pytest.fixture(params=[1, 2]) def data_array_logspaced(self, request) -> DataArray: """ Return a simple DataArray with logspaced coordinates """ dims = request.param if dims == 1: return DataArray( np.arange(7), dims=("x",), coords={"x": np.logspace(-3, 3, 7)} ) elif dims == 2: return DataArray( np.arange(16).reshape(4, 4), dims=("y", "x"), coords={"x": np.logspace(-1, 2, 4), "y": np.logspace(-5, -1, 4)}, ) else: raise ValueError(f"No DataArray implemented for {dims=}.") @pytest.mark.parametrize("xincrease", [True, False]) def test_xincrease_kwarg(self, data_array, xincrease) -> None: with figure_context(): data_array.plot(xincrease=xincrease) assert plt.gca().xaxis_inverted() == (not xincrease) @pytest.mark.parametrize("yincrease", [True, False]) def test_yincrease_kwarg(self, data_array, yincrease) -> None: with figure_context(): data_array.plot(yincrease=yincrease) assert plt.gca().yaxis_inverted() == (not yincrease) @pytest.mark.parametrize("xscale", ["linear", "logit", "symlog"]) def test_xscale_kwarg(self, data_array, xscale) -> None: with figure_context(): data_array.plot(xscale=xscale) assert plt.gca().get_xscale() == xscale @pytest.mark.parametrize("yscale", ["linear", "logit", "symlog"]) def test_yscale_kwarg(self, data_array, yscale) -> None: with figure_context(): data_array.plot(yscale=yscale) assert plt.gca().get_yscale() == yscale def test_xscale_log_kwarg(self, data_array_logspaced) -> None: xscale = "log" with figure_context(): data_array_logspaced.plot(xscale=xscale) assert plt.gca().get_xscale() == xscale def test_yscale_log_kwarg(self, data_array_logspaced) -> None: yscale = "log" with figure_context(): data_array_logspaced.plot(yscale=yscale) assert plt.gca().get_yscale() == yscale def test_xlim_kwarg(self, data_array) -> None: with figure_context(): expected = (0.0, 1000.0) data_array.plot(xlim=[0, 1000]) assert plt.gca().get_xlim() == expected def test_ylim_kwarg(self, data_array) -> None: with figure_context(): data_array.plot(ylim=[0, 1000]) expected = (0.0, 1000.0) assert plt.gca().get_ylim() == expected def test_xticks_kwarg(self, data_array) -> None: with figure_context(): data_array.plot(xticks=np.arange(5)) expected = np.arange(5).tolist() assert_array_equal(plt.gca().get_xticks(), expected) def test_yticks_kwarg(self, data_array) -> None: with figure_context(): data_array.plot(yticks=np.arange(5)) expected = np.arange(5) assert_array_equal(plt.gca().get_yticks(), expected) @requires_matplotlib @pytest.mark.parametrize("plotfunc", ["pcolormesh", "contourf", "contour"]) def test_plot_transposed_nondim_coord(plotfunc) -> None: x = np.linspace(0, 10, 101) h = np.linspace(3, 7, 101) s = np.linspace(0, 1, 51) z = s[:, np.newaxis] * h[np.newaxis, :] da = xr.DataArray( np.sin(x) * np.cos(z), dims=["s", "x"], coords={"x": x, "s": s, "z": (("s", "x"), z), "zt": (("x", "s"), z.T)}, ) with figure_context(): getattr(da.plot, plotfunc)(x="x", y="zt") with figure_context(): getattr(da.plot, plotfunc)(x="zt", y="x") @requires_matplotlib @pytest.mark.parametrize("plotfunc", ["pcolormesh", "imshow"]) def test_plot_transposes_properly(plotfunc) -> None: # test that we aren't mistakenly transposing when the 2 dimensions have equal sizes. da = xr.DataArray([np.sin(2 * np.pi / 10 * np.arange(10))] * 10, dims=("y", "x")) with figure_context(): hdl = getattr(da.plot, plotfunc)(x="x", y="y") # get_array doesn't work for contour, contourf. It returns the colormap intervals. # pcolormesh returns 1D array but imshow returns a 2D array so it is necessary # to ravel() on the LHS assert_array_equal(hdl.get_array().ravel(), da.to_masked_array().ravel()) @requires_matplotlib def test_facetgrid_single_contour() -> None: # regression test for GH3569 x, y = np.meshgrid(np.arange(12), np.arange(12)) z = xr.DataArray(np.hypot(x, y)) z2 = xr.DataArray(np.hypot(x, y) + 1) ds = xr.concat([z, z2], dim="time") ds["time"] = [0, 1] with figure_context(): ds.plot.contour(col="time", levels=[4], colors=["k"]) @requires_matplotlib def test_get_axis_raises() -> None: # test get_axis raises an error if trying to do invalid things # cannot provide both ax and figsize with pytest.raises(ValueError, match="both `figsize` and `ax`"): get_axis(figsize=[4, 4], size=None, aspect=None, ax="something") # type: ignore[arg-type] # cannot provide both ax and size with pytest.raises(ValueError, match="both `size` and `ax`"): get_axis(figsize=None, size=200, aspect=4 / 3, ax="something") # type: ignore[arg-type] # cannot provide both size and figsize with pytest.raises(ValueError, match="both `figsize` and `size`"): get_axis(figsize=[4, 4], size=200, aspect=None, ax=None) # cannot provide aspect and size with pytest.raises(ValueError, match="`aspect` argument without `size`"): get_axis(figsize=None, size=None, aspect=4 / 3, ax=None) # cannot provide axis and subplot_kws with pytest.raises(ValueError, match="cannot use subplot_kws with existing ax"): get_axis(figsize=None, size=None, aspect=None, ax=1, something_else=5) # type: ignore[arg-type] @requires_matplotlib @pytest.mark.parametrize( ["figsize", "size", "aspect", "ax", "kwargs"], [ pytest.param((3, 2), None, None, False, {}, id="figsize"), pytest.param( (3.5, 2.5), None, None, False, {"label": "test"}, id="figsize_kwargs" ), pytest.param(None, 5, None, False, {}, id="size"), pytest.param(None, 5.5, None, False, {"label": "test"}, id="size_kwargs"), pytest.param(None, 5, 1, False, {}, id="size+aspect"), pytest.param(None, 5, "auto", False, {}, id="auto_aspect"), pytest.param(None, 5, "equal", False, {}, id="equal_aspect"), pytest.param(None, None, None, True, {}, id="ax"), pytest.param(None, None, None, False, {}, id="default"), pytest.param(None, None, None, False, {"label": "test"}, id="default_kwargs"), ], ) def test_get_axis( figsize: tuple[float, float] | None, size: float | None, aspect: float | None, ax: bool, kwargs: dict[str, Any], ) -> None: with figure_context(): inp_ax = plt.axes() if ax else None out_ax = get_axis( figsize=figsize, size=size, aspect=aspect, ax=inp_ax, **kwargs ) assert isinstance(out_ax, mpl.axes.Axes) @requires_matplotlib @requires_cartopy @pytest.mark.parametrize( ["figsize", "size", "aspect"], [ pytest.param((3, 2), None, None, id="figsize"), pytest.param(None, 5, None, id="size"), pytest.param(None, 5, 1, id="size+aspect"), pytest.param(None, None, None, id="default"), ], ) def test_get_axis_cartopy( figsize: tuple[float, float] | None, size: float | None, aspect: float | None ) -> None: kwargs = {"projection": cartopy.crs.PlateCarree()} with figure_context(): out_ax = get_axis(figsize=figsize, size=size, aspect=aspect, **kwargs) assert isinstance(out_ax, cartopy.mpl.geoaxes.GeoAxesSubplot) @requires_matplotlib def test_get_axis_current() -> None: with figure_context(): _, ax = plt.subplots() out_ax = get_axis() assert ax is out_ax @requires_matplotlib def test_maybe_gca() -> None: with figure_context(): ax = _maybe_gca(aspect=1) assert isinstance(ax, mpl.axes.Axes) assert ax.get_aspect() == 1 with figure_context(): # create figure without axes plt.figure() ax = _maybe_gca(aspect=1) assert isinstance(ax, mpl.axes.Axes) assert ax.get_aspect() == 1 with figure_context(): existing_axes = plt.axes() ax = _maybe_gca(aspect=1) # reuses the existing axes assert existing_axes == ax # kwargs are ignored when reusing axes assert ax.get_aspect() == "auto" @requires_matplotlib @pytest.mark.parametrize( "x, y, z, hue, markersize, row, col, add_legend, add_colorbar", [ ("A", "B", None, None, None, None, None, None, None), ("B", "A", None, "w", None, None, None, True, None), ("A", "B", None, "y", "x", None, None, True, True), ("A", "B", "z", None, None, None, None, None, None), ("B", "A", "z", "w", None, None, None, True, None), ("A", "B", "z", "y", "x", None, None, True, True), ("A", "B", "z", "y", "x", "w", None, True, True), ], ) def test_datarray_scatter( x, y, z, hue, markersize, row, col, add_legend, add_colorbar ) -> None: """Test datarray scatter. Merge with TestPlot1D eventually.""" ds = xr.tutorial.scatter_example_dataset() extra_coords = [v for v in [x, hue, markersize] if v is not None] # Base coords: coords = dict(ds.coords) # Add extra coords to the DataArray: coords.update({v: ds[v] for v in extra_coords}) darray = xr.DataArray(ds[y], coords=coords) with figure_context(): darray.plot.scatter( x=x, z=z, hue=hue, markersize=markersize, add_legend=add_legend, add_colorbar=add_colorbar, ) @requires_dask @requires_matplotlib @pytest.mark.parametrize( "plotfunc", ["scatter"], ) def test_dataarray_not_loading_inplace(plotfunc: str) -> None: ds = xr.tutorial.scatter_example_dataset() ds = ds.chunk() with figure_context(): getattr(ds.A.plot, plotfunc)(x="x") from dask.array import Array assert isinstance(ds.A.data, Array) @requires_matplotlib def test_assert_valid_xy() -> None: ds = xr.tutorial.scatter_example_dataset() darray = ds.A # x is valid and should not error: _assert_valid_xy(darray=darray, xy="x", name="x") # None should be valid as well even though it isn't in the valid list: _assert_valid_xy(darray=darray, xy=None, name="x") # A hashable that is not valid should error: with pytest.raises(ValueError, match="x must be one of"): _assert_valid_xy(darray=darray, xy="error_now", name="x") @requires_matplotlib @pytest.mark.parametrize( "val", [pytest.param([], id="empty"), pytest.param(0, id="scalar")] ) @pytest.mark.parametrize( "method", [ "__call__", "line", "step", "contour", "contourf", "hist", "imshow", "pcolormesh", "scatter", "surface", ], ) def test_plot_empty_raises(val: list | float, method: str) -> None: da = xr.DataArray(val) with pytest.raises(TypeError, match="No numeric data"): getattr(da.plot, method)() @requires_matplotlib def test_facetgrid_axes_raises_deprecation_warning() -> None: with pytest.warns( DeprecationWarning, match=( "self.axes is deprecated since 2022.11 in order to align with " "matplotlibs plt.subplots, use self.axs instead." ), ): with figure_context(): ds = xr.tutorial.scatter_example_dataset() g = ds.plot.scatter(x="A", y="B", col="x") _ = g.axes @requires_matplotlib def test_plot1d_default_rcparams() -> None: import matplotlib as mpl ds = xr.tutorial.scatter_example_dataset(seed=42) with figure_context(): # scatter markers should by default have white edgecolor to better # see overlapping markers: fig, ax = plt.subplots(1, 1) ds.plot.scatter(x="A", y="B", marker="o", ax=ax) actual: np.ndarray = mpl.colors.to_rgba_array("w") expected: np.ndarray = ax.collections[0].get_edgecolor() # type: ignore[assignment] np.testing.assert_allclose(actual, expected) # Facetgrids should have the default value as well: fg = ds.plot.scatter(x="A", y="B", col="x", marker="o") ax = fg.axs.ravel()[0] actual = mpl.colors.to_rgba_array("w") expected = ax.collections[0].get_edgecolor() # type: ignore[assignment,unused-ignore] np.testing.assert_allclose(actual, expected) # scatter should not emit any warnings when using unfilled markers: with assert_no_warnings(): fig, ax = plt.subplots(1, 1) ds.plot.scatter(x="A", y="B", ax=ax, marker="x") # Prioritize edgecolor argument over default plot1d values: fig, ax = plt.subplots(1, 1) ds.plot.scatter(x="A", y="B", marker="o", ax=ax, edgecolor="k") actual = mpl.colors.to_rgba_array("k") expected = ax.collections[0].get_edgecolor() # type: ignore[assignment] np.testing.assert_allclose(actual, expected) @requires_matplotlib def test_plot1d_filtered_nulls() -> None: ds = xr.tutorial.scatter_example_dataset(seed=42) y = ds.y.where(ds.y > 0.2) expected = y.notnull().sum().item() with figure_context(): pc = y.plot.scatter() actual = pc.get_offsets().shape[0] assert expected == actual @requires_matplotlib def test_9155() -> None: # A test for types from issue #9155 with figure_context(): data = xr.DataArray([1, 2, 3], dims=["x"]) fig, ax = plt.subplots(ncols=1, nrows=1) data.plot(ax=ax) # type: ignore[call-arg] @requires_matplotlib def test_temp_dataarray() -> None: from xarray.plot.dataset_plot import _temp_dataarray x = np.arange(1, 4) y = np.arange(4, 6) var1 = np.arange(x.size * y.size).reshape((x.size, y.size)) var2 = np.arange(x.size * y.size).reshape((x.size, y.size)) ds = xr.Dataset( { "var1": (["x", "y"], var1), "var2": (["x", "y"], 2 * var2), "var3": (["x"], 3 * x), }, coords={ "x": x, "y": y, "model": np.arange(7), }, ) # No broadcasting: y_ = "var1" locals_ = {"x": "var2"} da = _temp_dataarray(ds, y_, locals_) assert da.shape == (3, 2) # Broadcast from 1 to 2dim: y_ = "var3" locals_ = {"x": "var1"} da = _temp_dataarray(ds, y_, locals_) assert da.shape == (3, 2) # Ignore non-valid coord kwargs: y_ = "var3" locals_ = dict(x="x", extend="var2") da = _temp_dataarray(ds, y_, locals_) assert da.shape == (3,) xarray-2025.09.0/xarray/tests/test_plugins.py000066400000000000000000000233111505620616400211060ustar00rootroot00000000000000from __future__ import annotations import sys from importlib.metadata import EntryPoint, EntryPoints from itertools import starmap from unittest import mock import pytest from xarray.backends import common, plugins from xarray.tests import ( has_h5netcdf, has_netCDF4, has_pydap, has_scipy, has_zarr, ) # Do not import list_engines here, this will break the lazy tests importlib_metadata_mock = "importlib.metadata" class DummyBackendEntrypointArgs(common.BackendEntrypoint): def open_dataset(filename_or_obj, *args): # type: ignore[override] pass class DummyBackendEntrypointKwargs(common.BackendEntrypoint): def open_dataset(filename_or_obj, **kwargs): # type: ignore[override] pass class DummyBackendEntrypoint1(common.BackendEntrypoint): def open_dataset(self, filename_or_obj, *, decoder): # type: ignore[override] pass class DummyBackendEntrypoint2(common.BackendEntrypoint): def open_dataset(self, filename_or_obj, *, decoder): # type: ignore[override] pass @pytest.fixture def dummy_duplicated_entrypoints(): specs = [ ["engine1", "xarray.tests.test_plugins:backend_1", "xarray.backends"], ["engine1", "xarray.tests.test_plugins:backend_2", "xarray.backends"], ["engine2", "xarray.tests.test_plugins:backend_1", "xarray.backends"], ["engine2", "xarray.tests.test_plugins:backend_2", "xarray.backends"], ] eps = list(starmap(EntryPoint, specs)) return eps @pytest.mark.filterwarnings("ignore:Found") def test_remove_duplicates(dummy_duplicated_entrypoints) -> None: with pytest.warns(RuntimeWarning): entrypoints = plugins.remove_duplicates(dummy_duplicated_entrypoints) assert len(entrypoints) == 2 def test_broken_plugin() -> None: broken_backend = EntryPoint( "broken_backend", "xarray.tests.test_plugins:backend_1", "xarray.backends", ) with pytest.warns(RuntimeWarning) as record: _ = plugins.build_engines(EntryPoints([broken_backend])) assert len(record) == 1 message = str(record[0].message) assert "Engine 'broken_backend'" in message def test_remove_duplicates_warnings(dummy_duplicated_entrypoints) -> None: with pytest.warns(RuntimeWarning) as record: _ = plugins.remove_duplicates(dummy_duplicated_entrypoints) assert len(record) == 2 message0 = str(record[0].message) message1 = str(record[1].message) assert "entrypoints" in message0 assert "entrypoints" in message1 @mock.patch( f"{importlib_metadata_mock}.EntryPoint.load", mock.MagicMock(return_value=None) ) def test_backends_dict_from_pkg() -> None: specs = [ ["engine1", "xarray.tests.test_plugins:backend_1", "xarray.backends"], ["engine2", "xarray.tests.test_plugins:backend_2", "xarray.backends"], ] entrypoints = list(starmap(EntryPoint, specs)) engines = plugins.backends_dict_from_pkg(entrypoints) assert len(engines) == 2 assert engines.keys() == {"engine1", "engine2"} def test_set_missing_parameters() -> None: backend_1 = DummyBackendEntrypoint1 backend_2 = DummyBackendEntrypoint2 backend_2.open_dataset_parameters = ("filename_or_obj",) engines = {"engine_1": backend_1, "engine_2": backend_2} plugins.set_missing_parameters(engines) assert len(engines) == 2 assert backend_1.open_dataset_parameters == ("filename_or_obj", "decoder") assert backend_2.open_dataset_parameters == ("filename_or_obj",) backend_kwargs = DummyBackendEntrypointKwargs backend_kwargs.open_dataset_parameters = ("filename_or_obj", "decoder") plugins.set_missing_parameters({"engine": backend_kwargs}) assert backend_kwargs.open_dataset_parameters == ("filename_or_obj", "decoder") backend_args = DummyBackendEntrypointArgs backend_args.open_dataset_parameters = ("filename_or_obj", "decoder") plugins.set_missing_parameters({"engine": backend_args}) assert backend_args.open_dataset_parameters == ("filename_or_obj", "decoder") # reset backend_1.open_dataset_parameters = None backend_1.open_dataset_parameters = None backend_kwargs.open_dataset_parameters = None backend_args.open_dataset_parameters = None def test_set_missing_parameters_raise_error() -> None: backend = DummyBackendEntrypointKwargs with pytest.raises(TypeError): plugins.set_missing_parameters({"engine": backend}) backend_args = DummyBackendEntrypointArgs with pytest.raises(TypeError): plugins.set_missing_parameters({"engine": backend_args}) @mock.patch( f"{importlib_metadata_mock}.EntryPoint.load", mock.MagicMock(return_value=DummyBackendEntrypoint1), ) def test_build_engines() -> None: dummy_pkg_entrypoint = EntryPoint( "dummy", "xarray.tests.test_plugins:backend_1", "xarray_backends" ) backend_entrypoints = plugins.build_engines(EntryPoints([dummy_pkg_entrypoint])) assert isinstance(backend_entrypoints["dummy"], DummyBackendEntrypoint1) assert backend_entrypoints["dummy"].open_dataset_parameters == ( "filename_or_obj", "decoder", ) @mock.patch( f"{importlib_metadata_mock}.EntryPoint.load", mock.MagicMock(return_value=DummyBackendEntrypoint1), ) def test_build_engines_sorted() -> None: dummy_pkg_entrypoints = EntryPoints( [ EntryPoint( "dummy2", "xarray.tests.test_plugins:backend_1", "xarray.backends" ), EntryPoint( "dummy1", "xarray.tests.test_plugins:backend_1", "xarray.backends" ), ] ) backend_entrypoints = list(plugins.build_engines(dummy_pkg_entrypoints)) indices = [] for be in plugins.NETCDF_BACKENDS_ORDER: try: index = backend_entrypoints.index(be) backend_entrypoints.pop(index) indices.append(index) except ValueError: pass assert set(indices) < {0, -1} assert list(backend_entrypoints) == sorted(backend_entrypoints) @mock.patch( "xarray.backends.plugins.list_engines", mock.MagicMock(return_value={"dummy": DummyBackendEntrypointArgs()}), ) def test_no_matching_engine_found() -> None: with pytest.raises(ValueError, match=r"did not find a match in any"): plugins.guess_engine("not-valid") with pytest.raises(ValueError, match=r"found the following matches with the input"): plugins.guess_engine("foo.nc") @mock.patch( "xarray.backends.plugins.list_engines", mock.MagicMock(return_value={}), ) def test_engines_not_installed() -> None: with pytest.raises(ValueError, match=r"xarray is unable to open"): plugins.guess_engine("not-valid") with pytest.raises(ValueError, match=r"found the following matches with the input"): plugins.guess_engine("foo.nc") def test_lazy_import() -> None: """Test that some modules are imported in a lazy manner. When importing xarray these should not be imported as well. Only when running code for the first time that requires them. """ deny_list = [ "cubed", "cupy", # "dask", # TODO: backends.locks is not lazy yet :( "dask.array", "dask.distributed", "flox", "h5netcdf", "matplotlib", "nc_time_axis", "netCDF4", "numbagg", "pint", "pydap", "scipy", "sparse", "zarr", ] # ensure that none of the above modules has been imported before modules_backup = {} for pkg in list(sys.modules.keys()): for mod in deny_list + ["xarray"]: if pkg.startswith(mod): modules_backup[pkg] = sys.modules[pkg] del sys.modules[pkg] break try: import xarray # noqa: F401 from xarray.backends import list_engines list_engines() # ensure that none of the modules that are supposed to be # lazy loaded are loaded when importing xarray is_imported = set() for pkg in sys.modules: for mod in deny_list: if pkg.startswith(mod): is_imported.add(mod) break assert len(is_imported) == 0, ( f"{is_imported} have been imported but should be lazy" ) finally: # restore original sys.modules.update(modules_backup) def test_list_engines() -> None: from xarray.backends import list_engines engines = list_engines() assert list_engines.cache_info().currsize == 1 assert ("scipy" in engines) == has_scipy assert ("h5netcdf" in engines) == has_h5netcdf assert ("netcdf4" in engines) == has_netCDF4 assert ("pydap" in engines) == has_pydap assert ("zarr" in engines) == has_zarr assert "store" in engines def test_refresh_engines() -> None: from xarray.backends import list_engines, refresh_engines EntryPointMock1 = mock.MagicMock() EntryPointMock1.name = "test1" EntryPointMock1.load.return_value = DummyBackendEntrypoint1 return_value = EntryPoints([EntryPointMock1]) with mock.patch("xarray.backends.plugins.entry_points", return_value=return_value): list_engines.cache_clear() engines = list_engines() assert "test1" in engines assert isinstance(engines["test1"], DummyBackendEntrypoint1) EntryPointMock2 = mock.MagicMock() EntryPointMock2.name = "test2" EntryPointMock2.load.return_value = DummyBackendEntrypoint2 return_value2 = EntryPoints([EntryPointMock2]) with mock.patch("xarray.backends.plugins.entry_points", return_value=return_value2): refresh_engines() engines = list_engines() assert "test1" not in engines assert "test2" in engines assert isinstance(engines["test2"], DummyBackendEntrypoint2) # reset to original refresh_engines() xarray-2025.09.0/xarray/tests/test_print_versions.py000066400000000000000000000003101505620616400225030ustar00rootroot00000000000000from __future__ import annotations import io import xarray def test_show_versions() -> None: f = io.StringIO() xarray.show_versions(file=f) assert "INSTALLED VERSIONS" in f.getvalue() xarray-2025.09.0/xarray/tests/test_range_index.py000066400000000000000000000250171505620616400217150ustar00rootroot00000000000000import numpy as np import pandas as pd import pytest import xarray as xr from xarray.indexes import PandasIndex, RangeIndex from xarray.tests import assert_allclose, assert_equal, assert_identical def create_dataset_arange( start: float, stop: float, step: float, dim: str = "x" ) -> xr.Dataset: index = RangeIndex.arange(start, stop, step, dim=dim) return xr.Dataset(coords=xr.Coordinates.from_xindex(index)) @pytest.mark.parametrize( "args,kwargs", [ ((10.0,), {}), ((), {"stop": 10.0}), ( ( 2.0, 10.0, ), {}, ), ((2.0,), {"stop": 10.0}), ((), {"start": 2.0, "stop": 10.0}), ((2.0, 10.0, 2.0), {}), ((), {"start": 2.0, "stop": 10.0, "step": 2.0}), ], ) def test_range_index_arange(args, kwargs) -> None: index = RangeIndex.arange(*args, **kwargs, dim="x") actual = xr.Coordinates.from_xindex(index) expected = xr.Coordinates({"x": np.arange(*args, **kwargs)}) assert_equal(actual, expected, check_default_indexes=False) def test_range_index_arange_error() -> None: with pytest.raises(TypeError, match=".*requires stop to be specified"): RangeIndex.arange(dim="x") def test_range_index_arange_start_as_stop() -> None: # Weird although probably very unlikely case where only `start` is given # as keyword argument, which is interpreted as `stop`. # This has been fixed in numpy (https://github.com/numpy/numpy/pull/17878) # using Python C API. In pure Python it's more tricky as there's no easy way to know # whether a value has been passed as positional or keyword argument. # Note: `pandas.RangeIndex` constructor still has this weird behavior. index = RangeIndex.arange(start=10.0, dim="x") actual = xr.Coordinates.from_xindex(index) expected = xr.Coordinates({"x": np.arange(10.0)}) assert_equal(actual, expected, check_default_indexes=False) def test_range_index_arange_properties() -> None: index = RangeIndex.arange(0.0, 1.0, 0.1, dim="x") assert index.start == 0.0 assert index.stop == 1.0 assert index.step == 0.1 def test_range_index_linspace() -> None: index = RangeIndex.linspace(0.0, 1.0, num=10, endpoint=False, dim="x") actual = xr.Coordinates.from_xindex(index) expected = xr.Coordinates({"x": np.linspace(0.0, 1.0, num=10, endpoint=False)}) assert_equal(actual, expected, check_default_indexes=False) assert index.start == 0.0 assert index.stop == 1.0 assert index.step == 0.1 index = RangeIndex.linspace(0.0, 1.0, num=11, endpoint=True, dim="x") actual = xr.Coordinates.from_xindex(index) expected = xr.Coordinates({"x": np.linspace(0.0, 1.0, num=11, endpoint=True)}) assert_allclose(actual, expected, check_default_indexes=False) assert index.start == 0.0 assert index.stop == 1.1 assert index.step == 0.1 def test_range_index_dtype() -> None: index = RangeIndex.arange(0.0, 1.0, 0.1, dim="x", dtype=np.float32) coords = xr.Coordinates.from_xindex(index) assert coords["x"].dtype == np.dtype(np.float32) def test_range_index_set_xindex() -> None: coords = xr.Coordinates({"x": np.arange(0.0, 1.0, 0.1)}, indexes={}) ds = xr.Dataset(coords=coords) with pytest.raises( NotImplementedError, match="cannot create.*RangeIndex.*existing coordinate" ): ds.set_xindex("x", RangeIndex) def test_range_index_isel() -> None: ds = create_dataset_arange(0.0, 1.0, 0.1) # slicing actual = ds.isel(x=slice(None)) assert_identical(actual, ds, check_default_indexes=False) actual = ds.isel(x=slice(1, None)) expected = create_dataset_arange(0.1, 1.0, 0.1) assert_identical(actual, expected, check_default_indexes=False) actual = ds.isel(x=slice(None, 2)) expected = create_dataset_arange(0.0, 0.2, 0.1) assert_identical(actual, expected, check_default_indexes=False) actual = ds.isel(x=slice(1, 3)) expected = create_dataset_arange(0.1, 0.3, 0.1) assert_identical(actual, expected, check_default_indexes=False) actual = ds.isel(x=slice(None, None, 2)) expected = create_dataset_arange(0.0, 1.0, 0.2) assert_identical(actual, expected, check_default_indexes=False) actual = ds.isel(x=slice(None, None, -1)) expected = create_dataset_arange(0.9, -0.1, -0.1) assert_identical(actual, expected, check_default_indexes=False) actual = ds.isel(x=slice(None, 4, -1)) expected = create_dataset_arange(0.9, 0.4, -0.1) assert_identical(actual, expected, check_default_indexes=False) actual = ds.isel(x=slice(8, 4, -1)) expected = create_dataset_arange(0.8, 0.4, -0.1) assert_identical(actual, expected, check_default_indexes=False) actual = ds.isel(x=slice(8, None, -1)) expected = create_dataset_arange(0.8, -0.1, -0.1) assert_identical(actual, expected, check_default_indexes=False) # https://github.com/pydata/xarray/issues/10441 ds2 = create_dataset_arange(0.0, 3.0, 0.1) actual = ds2.isel(x=slice(4, None, 3)) expected = create_dataset_arange(0.4, 3.0, 0.3) assert_identical(actual, expected, check_default_indexes=False) # scalar actual = ds.isel(x=0) expected = xr.Dataset(coords={"x": 0.0}) assert_identical(actual, expected) # outer indexing with arbitrary array values actual = ds.isel(x=[0, 2]) expected = xr.Dataset(coords={"x": [0.0, 0.2]}) assert_identical(actual, expected) assert isinstance(actual.xindexes["x"], PandasIndex) # fancy indexing with 1-d Variable actual = ds.isel(x=xr.Variable("y", [0, 2])) expected = xr.Dataset(coords={"x": ("y", [0.0, 0.2])}).set_xindex("x") assert_identical(actual, expected, check_default_indexes=False) assert isinstance(actual.xindexes["x"], PandasIndex) # fancy indexing with n-d Variable actual = ds.isel(x=xr.Variable(("u", "v"), [[0, 0], [2, 2]])) expected = xr.Dataset(coords={"x": (("u", "v"), [[0.0, 0.0], [0.2, 0.2]])}) assert_identical(actual, expected) def test_range_index_empty_slice() -> None: """Test that empty slices of RangeIndex are printable and preserve step. Regression test for https://github.com/pydata/xarray/issues/10547 """ # Test with linspace n = 30 step = 1 da = xr.DataArray(np.zeros(n), dims=["x"]) da = da.assign_coords( xr.Coordinates.from_xindex(RangeIndex.linspace(0, (n - 1) * step, n, dim="x")) ) # This should not raise ZeroDivisionError sub = da.isel(x=slice(0)) assert sub.sizes["x"] == 0 # Test that it's printable repr_str = repr(sub) assert "RangeIndex" in repr_str assert "step=1" in repr_str # Test with different step values index = RangeIndex.arange(0, 10, 2.5, dim="y") da2 = xr.DataArray(np.zeros(4), dims=["y"]) da2 = da2.assign_coords(xr.Coordinates.from_xindex(index)) empty = da2.isel(y=slice(0)) # Should preserve step assert empty.sizes["y"] == 0 range_index_y = empty._indexes["y"] assert isinstance(range_index_y, RangeIndex) assert range_index_y.step == 2.5 # Test that it's printable repr_str2 = repr(empty) assert "RangeIndex" in repr_str2 assert "step=2.5" in repr_str2 # Test negative step index3 = RangeIndex.arange(10, 0, -1, dim="z") da3 = xr.DataArray(np.zeros(10), dims=["z"]) da3 = da3.assign_coords(xr.Coordinates.from_xindex(index3)) empty3 = da3.isel(z=slice(0)) assert empty3.sizes["z"] == 0 range_index_z = empty3._indexes["z"] assert isinstance(range_index_z, RangeIndex) assert range_index_z.step == -1.0 # Test that it's printable repr_str3 = repr(empty3) assert "RangeIndex" in repr_str3 assert "step=-1" in repr_str3 def test_range_index_sel() -> None: ds = create_dataset_arange(0.0, 1.0, 0.1) # start-stop slice actual = ds.sel(x=slice(0.12, 0.28), method="nearest") expected = create_dataset_arange(0.1, 0.3, 0.1) assert_identical(actual, expected, check_default_indexes=False) # start-stop-step slice actual = ds.sel(x=slice(0.0, 1.0, 0.2), method="nearest") expected = ds.isel(x=range(0, 10, 2)) assert_identical(actual, expected, check_default_indexes=False) # basic indexing actual = ds.sel(x=0.52, method="nearest") expected = xr.Dataset(coords={"x": 0.5}) assert_allclose(actual, expected) actual = ds.sel(x=0.58, method="nearest") expected = xr.Dataset(coords={"x": 0.6}) assert_allclose(actual, expected) # 1-d array indexing actual = ds.sel(x=[0.52, 0.58], method="nearest") expected = xr.Dataset(coords={"x": [0.5, 0.6]}) assert_allclose(actual, expected) actual = ds.sel(x=xr.Variable("y", [0.52, 0.58]), method="nearest") expected = xr.Dataset(coords={"x": ("y", [0.5, 0.6])}).set_xindex("x") assert_allclose(actual, expected, check_default_indexes=False) actual = ds.sel(x=xr.DataArray([0.52, 0.58], dims="y"), method="nearest") expected = xr.Dataset(coords={"x": ("y", [0.5, 0.6])}).set_xindex("x") assert_allclose(actual, expected, check_default_indexes=False) with pytest.raises(ValueError, match="RangeIndex only supports.*method.*nearest"): ds.sel(x=0.1) with pytest.raises(ValueError, match="RangeIndex doesn't support.*tolerance"): ds.sel(x=0.1, method="nearest", tolerance=1e-3) def test_range_index_to_pandas_index() -> None: ds = create_dataset_arange(0.0, 1.0, 0.1) actual = ds.indexes["x"] expected = pd.Index(np.arange(0.0, 1.0, 0.1)) assert actual.equals(expected) def test_range_index_rename() -> None: index = RangeIndex.arange(0.0, 1.0, 0.1, dim="x") ds = xr.Dataset(coords=xr.Coordinates.from_xindex(index)) actual = ds.rename_vars(x="y") idx = RangeIndex.arange(0.0, 1.0, 0.1, coord_name="y", dim="x") expected = xr.Dataset(coords=xr.Coordinates.from_xindex(idx)) assert_identical(actual, expected, check_default_indexes=False) actual = ds.rename_dims(x="y") idx = RangeIndex.arange(0.0, 1.0, 0.1, coord_name="x", dim="y") expected = xr.Dataset(coords=xr.Coordinates.from_xindex(idx)) assert_identical(actual, expected, check_default_indexes=False) def test_range_index_repr() -> None: index = RangeIndex.arange(0.0, 1.0, 0.1, dim="x") actual = repr(index) expected = ( "RangeIndex (start=0, stop=1, step=0.1, size=10, coord_name='x', dim='x')" ) assert actual == expected def test_range_index_repr_inline() -> None: index = RangeIndex.arange(0.0, 1.0, 0.1, dim="x") actual = index._repr_inline_(max_width=70) expected = "RangeIndex (start=0, stop=1, step=0.1)" assert actual == expected xarray-2025.09.0/xarray/tests/test_rolling.py000066400000000000000000001071051505620616400210770ustar00rootroot00000000000000from __future__ import annotations from typing import Any import numpy as np import pandas as pd import pytest import xarray as xr from xarray import DataArray, Dataset, set_options from xarray.tests import ( assert_allclose, assert_equal, assert_identical, has_dask, requires_dask, requires_dask_ge_2024_11_0, requires_numbagg, ) pytestmark = [ pytest.mark.filterwarnings("error:Mean of empty slice"), pytest.mark.filterwarnings("error:All-NaN (slice|axis) encountered"), ] @pytest.mark.parametrize("func", ["mean", "sum"]) @pytest.mark.parametrize("min_periods", [1, 10]) def test_cumulative(d, func, min_periods) -> None: # One dim result = getattr(d.cumulative("z", min_periods=min_periods), func)() expected = getattr(d.rolling(z=d["z"].size, min_periods=min_periods), func)() assert_identical(result, expected) # Multiple dim result = getattr(d.cumulative(["z", "x"], min_periods=min_periods), func)() expected = getattr( d.rolling(z=d["z"].size, x=d["x"].size, min_periods=min_periods), func, )() assert_identical(result, expected) def test_cumulative_vs_cum(d) -> None: result = d.cumulative("z").sum() expected = d.cumsum("z") # cumsum drops the coord of the dimension; cumulative doesn't expected = expected.assign_coords(z=result["z"]) assert_identical(result, expected) class TestDataArrayRolling: @pytest.mark.parametrize("da", (1, 2), indirect=True) @pytest.mark.parametrize("center", [True, False]) @pytest.mark.parametrize("size", [1, 2, 3, 7]) def test_rolling_iter(self, da: DataArray, center: bool, size: int) -> None: rolling_obj = da.rolling(time=size, center=center) rolling_obj_mean = rolling_obj.mean() assert len(rolling_obj.window_labels) == len(da["time"]) assert_identical(rolling_obj.window_labels, da["time"]) for i, (label, window_da) in enumerate(rolling_obj): assert label == da["time"].isel(time=i) actual = rolling_obj_mean.isel(time=i) expected = window_da.mean("time") np.testing.assert_allclose(actual.values, expected.values) @pytest.mark.parametrize("da", (1,), indirect=True) def test_rolling_repr(self, da) -> None: rolling_obj = da.rolling(time=7) assert repr(rolling_obj) == "DataArrayRolling [time->7]" rolling_obj = da.rolling(time=7, center=True) assert repr(rolling_obj) == "DataArrayRolling [time->7(center)]" rolling_obj = da.rolling(time=7, x=3, center=True) assert repr(rolling_obj) == "DataArrayRolling [time->7(center),x->3(center)]" @requires_dask def test_repeated_rolling_rechunks(self) -> None: # regression test for GH3277, GH2514 dat = DataArray(np.random.rand(7653, 300), dims=("day", "item")) dat_chunk = dat.chunk({"item": 20}) dat_chunk.rolling(day=10).mean().rolling(day=250).std() def test_rolling_doc(self, da) -> None: rolling_obj = da.rolling(time=7) # argument substitution worked assert "`mean`" in rolling_obj.mean.__doc__ def test_rolling_properties(self, da) -> None: rolling_obj = da.rolling(time=4) assert rolling_obj.obj.get_axis_num("time") == 1 # catching invalid args with pytest.raises(ValueError, match="window must be > 0"): da.rolling(time=-2) with pytest.raises(ValueError, match="min_periods must be greater than zero"): da.rolling(time=2, min_periods=0) with pytest.raises( KeyError, match=r"\('foo',\) not found in DataArray dimensions", ): da.rolling(foo=2) @requires_dask @pytest.mark.parametrize( "name", ("sum", "mean", "std", "min", "max", "median", "argmin", "argmax") ) @pytest.mark.parametrize("center", (True, False, None)) @pytest.mark.parametrize("min_periods", (1, None)) @pytest.mark.parametrize("backend", ["numpy", "dask"], indirect=True) def test_rolling_wrapped_bottleneck( self, da, name, center, min_periods, compute_backend ) -> None: bn = pytest.importorskip("bottleneck", minversion="1.1") # Test all bottleneck functions rolling_obj = da.rolling(time=7, min_periods=min_periods) func_name = f"move_{name}" actual = getattr(rolling_obj, name)() window = 7 expected = getattr(bn, func_name)( da.values, window=window, axis=1, min_count=min_periods ) # index 0 is at the rightmost edge of the window # need to reverse index here # see GH #8541 if func_name in ["move_argmin", "move_argmax"]: expected = window - 1 - expected # Using assert_allclose because we get tiny (1e-17) differences in numbagg. np.testing.assert_allclose(actual.values, expected) with pytest.warns(DeprecationWarning, match="Reductions are applied"): getattr(rolling_obj, name)(dim="time") # Test center rolling_obj = da.rolling(time=7, center=center) actual = getattr(rolling_obj, name)()["time"] # Using assert_allclose because we get tiny (1e-17) differences in numbagg. assert_allclose(actual, da["time"]) @requires_dask @pytest.mark.parametrize("name", ("mean", "count")) @pytest.mark.parametrize("center", (True, False, None)) @pytest.mark.parametrize("min_periods", (1, None)) @pytest.mark.parametrize("window", (7, 8)) @pytest.mark.parametrize("backend", ["dask"], indirect=True) def test_rolling_wrapped_dask(self, da, name, center, min_periods, window) -> None: # dask version rolling_obj = da.rolling(time=window, min_periods=min_periods, center=center) actual = getattr(rolling_obj, name)().load() if name != "count": with pytest.warns(DeprecationWarning, match="Reductions are applied"): getattr(rolling_obj, name)(dim="time") # numpy version rolling_obj = da.load().rolling( time=window, min_periods=min_periods, center=center ) expected = getattr(rolling_obj, name)() # using all-close because rolling over ghost cells introduces some # precision errors assert_allclose(actual, expected) # with zero chunked array GH:2113 rolling_obj = da.chunk().rolling( time=window, min_periods=min_periods, center=center ) actual = getattr(rolling_obj, name)().load() assert_allclose(actual, expected) @pytest.mark.parametrize("center", (True, None)) def test_rolling_wrapped_dask_nochunk(self, center) -> None: # GH:2113 pytest.importorskip("dask.array") da_day_clim = xr.DataArray( np.arange(1, 367), coords=[np.arange(1, 367)], dims="dayofyear" ) expected = da_day_clim.rolling(dayofyear=31, center=center).mean() actual = da_day_clim.chunk().rolling(dayofyear=31, center=center).mean() assert_allclose(actual, expected) @pytest.mark.parametrize("center", (True, False)) @pytest.mark.parametrize("min_periods", (None, 1, 2, 3)) @pytest.mark.parametrize("window", (1, 2, 3, 4)) def test_rolling_pandas_compat( self, center, window, min_periods, compute_backend ) -> None: s = pd.Series(np.arange(10)) da = DataArray.from_series(s) if min_periods is not None and window < min_periods: min_periods = window s_rolling = s.rolling(window, center=center, min_periods=min_periods).mean() da_rolling = da.rolling( index=window, center=center, min_periods=min_periods ).mean() da_rolling_np = da.rolling( index=window, center=center, min_periods=min_periods ).reduce(np.nanmean) np.testing.assert_allclose(np.asarray(s_rolling.values), da_rolling.values) np.testing.assert_allclose(s_rolling.index, da_rolling["index"]) np.testing.assert_allclose(np.asarray(s_rolling.values), da_rolling_np.values) np.testing.assert_allclose(s_rolling.index, da_rolling_np["index"]) @pytest.mark.parametrize("center", (True, False)) @pytest.mark.parametrize("window", (1, 2, 3, 4)) def test_rolling_construct(self, center: bool, window: int) -> None: s = pd.Series(np.arange(10)) da = DataArray.from_series(s) s_rolling = s.rolling(window, center=center, min_periods=1).mean() da_rolling = da.rolling(index=window, center=center, min_periods=1) da_rolling_mean = da_rolling.construct("window").mean("window") np.testing.assert_allclose(np.asarray(s_rolling.values), da_rolling_mean.values) np.testing.assert_allclose(s_rolling.index, da_rolling_mean["index"]) # with stride da_rolling_mean = da_rolling.construct("window", stride=2).mean("window") np.testing.assert_allclose( np.asarray(s_rolling.values[::2]), da_rolling_mean.values ) np.testing.assert_allclose(s_rolling.index[::2], da_rolling_mean["index"]) # with fill_value da_rolling_mean = da_rolling.construct("window", stride=2, fill_value=0.0).mean( "window" ) assert da_rolling_mean.isnull().sum() == 0 assert (da_rolling_mean == 0.0).sum() >= 0 @pytest.mark.parametrize("da", (1, 2), indirect=True) @pytest.mark.parametrize("center", (True, False)) @pytest.mark.parametrize("min_periods", (None, 1, 2, 3)) @pytest.mark.parametrize("window", (1, 2, 3, 4)) @pytest.mark.parametrize("name", ("sum", "mean", "std", "max")) def test_rolling_reduce( self, da, center, min_periods, window, name, compute_backend ) -> None: if min_periods is not None and window < min_periods: min_periods = window if da.isnull().sum() > 1 and window == 1: # this causes all nan slices window = 2 rolling_obj = da.rolling(time=window, center=center, min_periods=min_periods) # add nan prefix to numpy methods to get similar # behavior as bottleneck actual = rolling_obj.reduce(getattr(np, f"nan{name}")) expected = getattr(rolling_obj, name)() assert_allclose(actual, expected) assert actual.sizes == expected.sizes @pytest.mark.parametrize("center", (True, False)) @pytest.mark.parametrize("min_periods", (None, 1, 2, 3)) @pytest.mark.parametrize("window", (1, 2, 3, 4)) @pytest.mark.parametrize("name", ("sum", "max")) def test_rolling_reduce_nonnumeric( self, center, min_periods, window, name, compute_backend ) -> None: da = DataArray( [0, np.nan, 1, 2, np.nan, 3, 4, 5, np.nan, 6, 7], dims="time" ).isnull() if min_periods is not None and window < min_periods: min_periods = window rolling_obj = da.rolling(time=window, center=center, min_periods=min_periods) # add nan prefix to numpy methods to get similar behavior as bottleneck actual = rolling_obj.reduce(getattr(np, f"nan{name}")) expected = getattr(rolling_obj, name)() assert_allclose(actual, expected) assert actual.sizes == expected.sizes def test_rolling_count_correct(self, compute_backend) -> None: da = DataArray([0, np.nan, 1, 2, np.nan, 3, 4, 5, np.nan, 6, 7], dims="time") kwargs: list[dict[str, Any]] = [ {"time": 11, "min_periods": 1}, {"time": 11, "min_periods": None}, {"time": 7, "min_periods": 2}, ] expecteds = [ DataArray([1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8], dims="time"), DataArray( [ np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, ], dims="time", ), DataArray([np.nan, np.nan, 2, 3, 3, 4, 5, 5, 5, 5, 5], dims="time"), ] for kwarg, expected in zip(kwargs, expecteds, strict=True): result = da.rolling(**kwarg).count() assert_equal(result, expected) result = da.to_dataset(name="var1").rolling(**kwarg).count()["var1"] assert_equal(result, expected) @pytest.mark.parametrize("da", (1,), indirect=True) @pytest.mark.parametrize("center", (True, False)) @pytest.mark.parametrize("min_periods", (None, 1)) @pytest.mark.parametrize("name", ("sum", "mean", "max")) def test_ndrolling_reduce( self, da, center, min_periods, name, compute_backend ) -> None: rolling_obj = da.rolling(time=3, x=2, center=center, min_periods=min_periods) actual = getattr(rolling_obj, name)() expected = getattr( getattr( da.rolling(time=3, center=center, min_periods=min_periods), name )().rolling(x=2, center=center, min_periods=min_periods), name, )() assert_allclose(actual, expected) assert actual.sizes == expected.sizes if name == "mean": # test our reimplementation of nanmean using np.nanmean expected = getattr(rolling_obj.construct({"time": "tw", "x": "xw"}), name)( ["tw", "xw"] ) count = rolling_obj.count() if min_periods is None: min_periods = 1 assert_allclose(actual, expected.where(count >= min_periods)) @pytest.mark.parametrize("center", (True, False, (True, False))) @pytest.mark.parametrize("fill_value", (np.nan, 0.0)) def test_ndrolling_construct(self, center, fill_value) -> None: da = DataArray( np.arange(5 * 6 * 7).reshape(5, 6, 7).astype(float), dims=["x", "y", "z"], coords={"x": ["a", "b", "c", "d", "e"], "y": np.arange(6)}, ) actual = da.rolling(x=3, z=2, center=center).construct( x="x1", z="z1", fill_value=fill_value ) if not isinstance(center, tuple): center = (center, center) expected = ( da.rolling(x=3, center=center[0]) .construct(x="x1", fill_value=fill_value) .rolling(z=2, center=center[1]) .construct(z="z1", fill_value=fill_value) ) assert_allclose(actual, expected) @pytest.mark.parametrize( "funcname, argument", [ ("reduce", (np.mean,)), ("mean", ()), ("construct", ("window_dim",)), ("count", ()), ], ) def test_rolling_keep_attrs(self, funcname, argument) -> None: attrs_da = {"da_attr": "test"} data = np.linspace(10, 15, 100) coords = np.linspace(1, 10, 100) da = DataArray( data, dims=("coord"), coords={"coord": coords}, attrs=attrs_da, name="name" ) # attrs are now kept per default func = getattr(da.rolling(dim={"coord": 5}), funcname) result = func(*argument) assert result.attrs == attrs_da assert result.name == "name" # discard attrs func = getattr(da.rolling(dim={"coord": 5}), funcname) result = func(*argument, keep_attrs=False) assert result.attrs == {} assert result.name == "name" # test discard attrs using global option func = getattr(da.rolling(dim={"coord": 5}), funcname) with set_options(keep_attrs=False): result = func(*argument) assert result.attrs == {} assert result.name == "name" # keyword takes precedence over global option func = getattr(da.rolling(dim={"coord": 5}), funcname) with set_options(keep_attrs=False): result = func(*argument, keep_attrs=True) assert result.attrs == attrs_da assert result.name == "name" func = getattr(da.rolling(dim={"coord": 5}), funcname) with set_options(keep_attrs=True): result = func(*argument, keep_attrs=False) assert result.attrs == {} assert result.name == "name" @requires_dask @pytest.mark.parametrize("dtype", ["int", "float32", "float64"]) def test_rolling_dask_dtype(self, dtype) -> None: data = DataArray( np.array([1, 2, 3], dtype=dtype), dims="x", coords={"x": [1, 2, 3]} ) unchunked_result = data.rolling(x=3, min_periods=1).mean() chunked_result = data.chunk({"x": 1}).rolling(x=3, min_periods=1).mean() assert chunked_result.dtype == unchunked_result.dtype def test_rolling_mean_bool(self) -> None: bool_raster = DataArray( data=[0, 1, 1, 0, 1, 0], dims=("x"), ).astype(bool) expected = DataArray( data=[np.nan, 2 / 3, 2 / 3, 2 / 3, 1 / 3, np.nan], dims=("x"), ) result = bool_raster.rolling(x=3, center=True).mean() assert_allclose(result, expected) @requires_numbagg class TestDataArrayRollingExp: @pytest.mark.parametrize("dim", ["time", "x"]) @pytest.mark.parametrize( "window_type, window", [["span", 5], ["alpha", 0.5], ["com", 0.5], ["halflife", 5]], ) @pytest.mark.parametrize("backend", ["numpy"], indirect=True) @pytest.mark.parametrize("func", ["mean", "sum", "var", "std"]) def test_rolling_exp_runs(self, da, dim, window_type, window, func) -> None: da = da.where(da > 0.2) rolling_exp = da.rolling_exp(window_type=window_type, **{dim: window}) result = getattr(rolling_exp, func)() assert isinstance(result, DataArray) @pytest.mark.parametrize("dim", ["time", "x"]) @pytest.mark.parametrize( "window_type, window", [["span", 5], ["alpha", 0.5], ["com", 0.5], ["halflife", 5]], ) @pytest.mark.parametrize("backend", ["numpy"], indirect=True) def test_rolling_exp_mean_pandas(self, da, dim, window_type, window) -> None: da = da.isel(a=0).where(lambda x: x > 0.2) result = da.rolling_exp(window_type=window_type, **{dim: window}).mean() assert isinstance(result, DataArray) pandas_array = da.to_pandas() assert pandas_array.index.name == "time" if dim == "x": pandas_array = pandas_array.T expected = xr.DataArray( pandas_array.ewm(**{window_type: window}).mean() ).transpose(*da.dims) assert_allclose(expected.variable, result.variable) @pytest.mark.parametrize("backend", ["numpy"], indirect=True) @pytest.mark.parametrize("func", ["mean", "sum"]) def test_rolling_exp_keep_attrs(self, da, func) -> None: attrs = {"attrs": "da"} da.attrs = attrs # Equivalent of `da.rolling_exp(time=10).mean` rolling_exp_func = getattr(da.rolling_exp(time=10), func) # attrs are kept per default result = rolling_exp_func() assert result.attrs == attrs # discard attrs result = rolling_exp_func(keep_attrs=False) assert result.attrs == {} # test discard attrs using global option with set_options(keep_attrs=False): result = rolling_exp_func() assert result.attrs == {} # keyword takes precedence over global option with set_options(keep_attrs=False): result = rolling_exp_func(keep_attrs=True) assert result.attrs == attrs with set_options(keep_attrs=True): result = rolling_exp_func(keep_attrs=False) assert result.attrs == {} with pytest.warns( UserWarning, match="Passing ``keep_attrs`` to ``rolling_exp`` has no effect.", ): da.rolling_exp(time=10, keep_attrs=True) class TestDatasetRolling: @pytest.mark.parametrize( "funcname, argument", [ ("reduce", (np.mean,)), ("mean", ()), ("construct", ("window_dim",)), ("count", ()), ], ) def test_rolling_keep_attrs(self, funcname, argument) -> None: global_attrs = {"units": "test", "long_name": "testing"} da_attrs = {"da_attr": "test"} da_not_rolled_attrs = {"da_not_rolled_attr": "test"} data = np.linspace(10, 15, 100) coords = np.linspace(1, 10, 100) ds = Dataset( data_vars={"da": ("coord", data), "da_not_rolled": ("no_coord", data)}, coords={"coord": coords}, attrs=global_attrs, ) ds.da.attrs = da_attrs ds.da_not_rolled.attrs = da_not_rolled_attrs # attrs are now kept per default func = getattr(ds.rolling(dim={"coord": 5}), funcname) result = func(*argument) assert result.attrs == global_attrs assert result.da.attrs == da_attrs assert result.da_not_rolled.attrs == da_not_rolled_attrs assert result.da.name == "da" assert result.da_not_rolled.name == "da_not_rolled" # discard attrs func = getattr(ds.rolling(dim={"coord": 5}), funcname) result = func(*argument, keep_attrs=False) assert result.attrs == {} assert result.da.attrs == {} assert result.da_not_rolled.attrs == {} assert result.da.name == "da" assert result.da_not_rolled.name == "da_not_rolled" # test discard attrs using global option func = getattr(ds.rolling(dim={"coord": 5}), funcname) with set_options(keep_attrs=False): result = func(*argument) assert result.attrs == {} assert result.da.attrs == {} assert result.da_not_rolled.attrs == {} assert result.da.name == "da" assert result.da_not_rolled.name == "da_not_rolled" # keyword takes precedence over global option func = getattr(ds.rolling(dim={"coord": 5}), funcname) with set_options(keep_attrs=False): result = func(*argument, keep_attrs=True) assert result.attrs == global_attrs assert result.da.attrs == da_attrs assert result.da_not_rolled.attrs == da_not_rolled_attrs assert result.da.name == "da" assert result.da_not_rolled.name == "da_not_rolled" func = getattr(ds.rolling(dim={"coord": 5}), funcname) with set_options(keep_attrs=True): result = func(*argument, keep_attrs=False) assert result.attrs == {} assert result.da.attrs == {} assert result.da_not_rolled.attrs == {} assert result.da.name == "da" assert result.da_not_rolled.name == "da_not_rolled" def test_rolling_properties(self, ds) -> None: # catching invalid args with pytest.raises(ValueError, match="window must be > 0"): ds.rolling(time=-2) with pytest.raises(ValueError, match="min_periods must be greater than zero"): ds.rolling(time=2, min_periods=0) with pytest.raises(KeyError, match="time2"): ds.rolling(time2=2) with pytest.raises( KeyError, match=r"\('foo',\) not found in Dataset dimensions", ): ds.rolling(foo=2) @requires_dask_ge_2024_11_0 def test_rolling_construct_automatic_rechunk(self): import dask # Construct dataset with chunk size of (400, 400, 1) or 1.22 MiB da = DataArray( dims=["latitude", "longitude", "time"], data=dask.array.random.random((400, 400, 400), chunks=(-1, -1, 1)), name="foo", ) for obj in [da, da.to_dataset()]: # Dataset now has chunks of size (400, 400, 100 100) or 11.92 GiB rechunked = obj.rolling(time=100, center=True).construct( "window", sliding_window_view_kwargs=dict( automatic_rechunk=True, writeable=False ), ) not_rechunked = obj.rolling(time=100, center=True).construct( "window", sliding_window_view_kwargs=dict( automatic_rechunk=False, writeable=True ), ) assert rechunked.chunksizes != not_rechunked.chunksizes roller = obj.isel(time=slice(30)).rolling(time=10, center=True) one = roller.reduce( np.sum, sliding_window_view_kwargs=dict(automatic_rechunk=True) ) two = roller.reduce( np.sum, sliding_window_view_kwargs=dict(automatic_rechunk=False) ) assert_identical(one, two) @pytest.mark.parametrize( "name", ("sum", "mean", "std", "var", "min", "max", "median") ) @pytest.mark.parametrize("center", (True, False, None)) @pytest.mark.parametrize("min_periods", (1, None)) @pytest.mark.parametrize("key", ("z1", "z2")) @pytest.mark.parametrize("backend", ["numpy"], indirect=True) def test_rolling_wrapped_bottleneck( self, ds, name, center, min_periods, key, compute_backend ) -> None: bn = pytest.importorskip("bottleneck", minversion="1.1") # Test all bottleneck functions rolling_obj = ds.rolling(time=7, min_periods=min_periods) func_name = f"move_{name}" actual = getattr(rolling_obj, name)() if key == "z1": # z1 does not depend on 'Time' axis. Stored as it is. expected = ds[key] elif key == "z2": expected = getattr(bn, func_name)( ds[key].values, window=7, axis=0, min_count=min_periods ) else: raise ValueError np.testing.assert_allclose(actual[key].values, expected) # Test center rolling_obj = ds.rolling(time=7, center=center) actual = getattr(rolling_obj, name)()["time"] assert_allclose(actual, ds["time"]) @pytest.mark.parametrize("center", (True, False)) @pytest.mark.parametrize("min_periods", (None, 1, 2, 3)) @pytest.mark.parametrize("window", (1, 2, 3, 4)) def test_rolling_pandas_compat(self, center, window, min_periods) -> None: df = pd.DataFrame( { "x": np.random.randn(20), "y": np.random.randn(20), "time": np.linspace(0, 1, 20), } ) ds = Dataset.from_dataframe(df) if min_periods is not None and window < min_periods: min_periods = window df_rolling = df.rolling(window, center=center, min_periods=min_periods).mean() ds_rolling = ds.rolling( index=window, center=center, min_periods=min_periods ).mean() np.testing.assert_allclose( np.asarray(df_rolling["x"].values), ds_rolling["x"].values ) np.testing.assert_allclose(df_rolling.index, ds_rolling["index"]) @pytest.mark.parametrize("center", (True, False)) @pytest.mark.parametrize("window", (1, 2, 3, 4)) def test_rolling_construct(self, center: bool, window: int) -> None: df = pd.DataFrame( { "x": np.random.randn(20), "y": np.random.randn(20), "time": np.linspace(0, 1, 20), } ) ds = Dataset.from_dataframe(df) df_rolling = df.rolling(window, center=center, min_periods=1).mean() ds_rolling = ds.rolling(index=window, center=center) ds_rolling_mean = ds_rolling.construct("window").mean("window") np.testing.assert_allclose( np.asarray(df_rolling["x"].values), ds_rolling_mean["x"].values ) np.testing.assert_allclose(df_rolling.index, ds_rolling_mean["index"]) # with fill_value ds_rolling_mean = ds_rolling.construct("window", stride=2, fill_value=0.0).mean( "window" ) assert (ds_rolling_mean.isnull().sum() == 0).to_dataarray(dim="vars").all() assert (ds_rolling_mean["x"] == 0.0).sum() >= 0 @pytest.mark.parametrize("center", (True, False)) @pytest.mark.parametrize("window", (1, 2, 3, 4)) def test_rolling_construct_stride(self, center: bool, window: int) -> None: df = pd.DataFrame( { "x": np.random.randn(20), "y": np.random.randn(20), "time": np.linspace(0, 1, 20), } ) ds = Dataset.from_dataframe(df) df_rolling_mean = df.rolling(window, center=center, min_periods=1).mean() # With an index (dimension coordinate) ds_rolling = ds.rolling(index=window, center=center) ds_rolling_mean = ds_rolling.construct("w", stride=2).mean("w") np.testing.assert_allclose( np.asarray(df_rolling_mean["x"][::2].values), ds_rolling_mean["x"].values ) np.testing.assert_allclose(df_rolling_mean.index[::2], ds_rolling_mean["index"]) # Without index (https://github.com/pydata/xarray/issues/7021) ds2 = ds.drop_vars("index") ds2_rolling = ds2.rolling(index=window, center=center) ds2_rolling_mean = ds2_rolling.construct("w", stride=2).mean("w") np.testing.assert_allclose( np.asarray(df_rolling_mean["x"][::2].values), ds2_rolling_mean["x"].values ) # Mixed coordinates, indexes and 2D coordinates ds3 = xr.Dataset( {"x": ("t", range(20)), "x2": ("y", range(5))}, { "t": range(20), "y": ("y", range(5)), "t2": ("t", range(20)), "y2": ("y", range(5)), "yt": (["t", "y"], np.ones((20, 5))), }, ) ds3_rolling = ds3.rolling(t=window, center=center) ds3_rolling_mean = ds3_rolling.construct("w", stride=2).mean("w") for coord in ds3.coords: assert coord in ds3_rolling_mean.coords @pytest.mark.slow @pytest.mark.parametrize("ds", (1, 2), indirect=True) @pytest.mark.parametrize("center", (True, False)) @pytest.mark.parametrize("min_periods", (None, 1, 2, 3)) @pytest.mark.parametrize("window", (1, 2, 3, 4)) @pytest.mark.parametrize( "name", ("sum", "mean", "std", "var", "min", "max", "median") ) def test_rolling_reduce(self, ds, center, min_periods, window, name) -> None: if min_periods is not None and window < min_periods: min_periods = window if name == "std" and window == 1: pytest.skip("std with window == 1 is unstable in bottleneck") rolling_obj = ds.rolling(time=window, center=center, min_periods=min_periods) # add nan prefix to numpy methods to get similar behavior as bottleneck actual = rolling_obj.reduce(getattr(np, f"nan{name}")) expected = getattr(rolling_obj, name)() assert_allclose(actual, expected) assert ds.sizes == actual.sizes # make sure the order of data_var are not changed. assert list(ds.data_vars.keys()) == list(actual.data_vars.keys()) # Make sure the dimension order is restored for key, src_var in ds.data_vars.items(): assert src_var.dims == actual[key].dims @pytest.mark.parametrize("ds", (2,), indirect=True) @pytest.mark.parametrize("center", (True, False)) @pytest.mark.parametrize("min_periods", (None, 1)) @pytest.mark.parametrize("name", ("sum", "max")) @pytest.mark.parametrize("dask", (True, False)) def test_ndrolling_reduce(self, ds, center, min_periods, name, dask) -> None: if dask and has_dask: ds = ds.chunk({"x": 4}) rolling_obj = ds.rolling(time=4, x=3, center=center, min_periods=min_periods) actual = getattr(rolling_obj, name)() expected = getattr( getattr( ds.rolling(time=4, center=center, min_periods=min_periods), name )().rolling(x=3, center=center, min_periods=min_periods), name, )() assert_allclose(actual, expected) assert actual.sizes == expected.sizes # Do it in the opposite order expected = getattr( getattr( ds.rolling(x=3, center=center, min_periods=min_periods), name )().rolling(time=4, center=center, min_periods=min_periods), name, )() assert_allclose(actual, expected) assert actual.sizes == expected.sizes @pytest.mark.parametrize("center", (True, False, (True, False))) @pytest.mark.parametrize("fill_value", (np.nan, 0.0)) @pytest.mark.parametrize("dask", (True, False)) def test_ndrolling_construct(self, center, fill_value, dask) -> None: da = DataArray( np.arange(5 * 6 * 7).reshape(5, 6, 7).astype(float), dims=["x", "y", "z"], coords={"x": ["a", "b", "c", "d", "e"], "y": np.arange(6)}, ) ds = xr.Dataset({"da": da}) if dask and has_dask: ds = ds.chunk({"x": 4}) actual = ds.rolling(x=3, z=2, center=center).construct( x="x1", z="z1", fill_value=fill_value ) if not isinstance(center, tuple): center = (center, center) expected = ( ds.rolling(x=3, center=center[0]) .construct(x="x1", fill_value=fill_value) .rolling(z=2, center=center[1]) .construct(z="z1", fill_value=fill_value) ) assert_allclose(actual, expected) @requires_dask @pytest.mark.filterwarnings("error") @pytest.mark.parametrize("ds", (2,), indirect=True) @pytest.mark.parametrize("name", ("mean", "max")) def test_raise_no_warning_dask_rolling_assert_close(self, ds, name) -> None: """ This is a puzzle β€” I can't easily find the source of the warning. It requires `assert_allclose` to be run, for the `ds` param to be 2, and is different for `mean` and `max`. `sum` raises no warning. """ ds = ds.chunk({"x": 4}) rolling_obj = ds.rolling(time=4, x=3) actual = getattr(rolling_obj, name)() expected = getattr(getattr(ds.rolling(time=4), name)().rolling(x=3), name)() assert_allclose(actual, expected) @requires_numbagg class TestDatasetRollingExp: @pytest.mark.parametrize( "backend", ["numpy", pytest.param("dask", marks=requires_dask)], indirect=True ) def test_rolling_exp(self, ds) -> None: result = ds.rolling_exp(time=10, window_type="span").mean() assert isinstance(result, Dataset) @pytest.mark.parametrize("backend", ["numpy"], indirect=True) def test_rolling_exp_keep_attrs(self, ds) -> None: attrs_global = {"attrs": "global"} attrs_z1 = {"attr": "z1"} ds.attrs = attrs_global ds.z1.attrs = attrs_z1 # attrs are kept per default result = ds.rolling_exp(time=10).mean() assert result.attrs == attrs_global assert result.z1.attrs == attrs_z1 # discard attrs result = ds.rolling_exp(time=10).mean(keep_attrs=False) assert result.attrs == {} # TODO: from #8114 β€”Β this arguably should be empty, but `apply_ufunc` doesn't do # that at the moment. We should change in `apply_func` rather than # special-case it here. # # assert result.z1.attrs == {} # test discard attrs using global option with set_options(keep_attrs=False): result = ds.rolling_exp(time=10).mean() assert result.attrs == {} # See above # assert result.z1.attrs == {} # keyword takes precedence over global option with set_options(keep_attrs=False): result = ds.rolling_exp(time=10).mean(keep_attrs=True) assert result.attrs == attrs_global assert result.z1.attrs == attrs_z1 with set_options(keep_attrs=True): result = ds.rolling_exp(time=10).mean(keep_attrs=False) assert result.attrs == {} # See above # assert result.z1.attrs == {} with pytest.warns( UserWarning, match="Passing ``keep_attrs`` to ``rolling_exp`` has no effect.", ): ds.rolling_exp(time=10, keep_attrs=True) xarray-2025.09.0/xarray/tests/test_sparse.py000066400000000000000000000711361505620616400207320ustar00rootroot00000000000000from __future__ import annotations import math import pickle from textwrap import dedent import numpy as np import pandas as pd import pytest from packaging.version import Version import xarray as xr import xarray.ufuncs as xu from xarray import DataArray, Variable from xarray.namedarray.pycompat import array_type from xarray.tests import assert_equal, assert_identical, requires_dask filterwarnings = pytest.mark.filterwarnings param = pytest.param xfail = pytest.mark.xfail sparse = pytest.importorskip("sparse") sparse_array_type = array_type("sparse") def assert_sparse_equal(a, b): assert isinstance(a, sparse_array_type) assert isinstance(b, sparse_array_type) np.testing.assert_equal(a.todense(), b.todense()) def make_ndarray(shape): return np.arange(math.prod(shape)).reshape(shape) def make_sparray(shape): return sparse.random(shape, density=0.1, random_state=0) def make_xrvar(dim_lengths): return xr.Variable( tuple(dim_lengths.keys()), make_sparray(shape=tuple(dim_lengths.values())) ) def make_xrarray(dim_lengths, coords=None, name="test"): if coords is None: coords = {d: np.arange(n) for d, n in dim_lengths.items()} return xr.DataArray( make_sparray(shape=tuple(dim_lengths.values())), dims=tuple(coords.keys()), coords=coords, name=name, ) class do: def __init__(self, meth, *args, **kwargs): self.meth = meth self.args = args self.kwargs = kwargs def __call__(self, obj): # cannot pass np.sum when using pytest-xdist kwargs = self.kwargs.copy() if "func" in self.kwargs: kwargs["func"] = getattr(np, kwargs["func"]) return getattr(obj, self.meth)(*self.args, **kwargs) def __repr__(self): return f"obj.{self.meth}(*{self.args}, **{self.kwargs})" @pytest.mark.parametrize( "prop", [ "chunks", "data", "dims", "dtype", "encoding", "imag", "nbytes", "ndim", param("values", marks=xfail(reason="Coercion to dense")), ], ) def test_variable_property(prop): var = make_xrvar({"x": 10, "y": 5}) getattr(var, prop) @pytest.mark.parametrize( "func,sparse_output", [ (do("all"), False), (do("any"), False), (do("astype", dtype=int), True), (do("clip", min=0, max=1), True), (do("coarsen", windows={"x": 2}, func="sum"), True), (do("compute"), True), (do("conj"), True), (do("copy"), True), (do("count"), False), (do("get_axis_num", dim="x"), False), (do("isel", x=slice(2, 4)), True), (do("isnull"), True), (do("load"), True), (do("mean"), False), (do("notnull"), True), (do("roll"), True), (do("round"), True), (do("set_dims", dim=("x", "y", "z")), True), (do("stack", dim={"flat": ("x", "y")}), True), (do("to_base_variable"), True), (do("transpose"), True), (do("unstack", dim={"x": {"x1": 5, "x2": 2}}), True), (do("broadcast_equals", make_xrvar({"x": 10, "y": 5})), False), (do("equals", make_xrvar({"x": 10, "y": 5})), False), (do("identical", make_xrvar({"x": 10, "y": 5})), False), param( do("argmax"), True, marks=[ xfail(reason="Missing implementation for np.argmin"), filterwarnings("ignore:Behaviour of argmin/argmax"), ], ), param( do("argmin"), True, marks=[ xfail(reason="Missing implementation for np.argmax"), filterwarnings("ignore:Behaviour of argmin/argmax"), ], ), param( do("argsort"), True, marks=xfail(reason="'COO' object has no attribute 'argsort'"), ), param( do( "concat", variables=[ make_xrvar({"x": 10, "y": 5}), make_xrvar({"x": 10, "y": 5}), ], ), True, ), param( do("conjugate"), True, marks=xfail(reason="'COO' object has no attribute 'conjugate'"), ), param( do("cumprod"), True, marks=xfail(reason="Missing implementation for np.nancumprod"), ), param( do("cumsum"), True, marks=xfail(reason="Missing implementation for np.nancumsum"), ), (do("fillna", 0), True), param( do("item", (1, 1)), False, marks=xfail(reason="'COO' object has no attribute 'item'"), ), param( do("median"), False, marks=xfail(reason="Missing implementation for np.nanmedian"), ), param(do("max"), False), param(do("min"), False), param( do("no_conflicts", other=make_xrvar({"x": 10, "y": 5})), True, marks=xfail(reason="mixed sparse-dense operation"), ), param( do("pad", mode="constant", pad_widths={"x": (1, 1)}, fill_value=5), True, marks=xfail(reason="Missing implementation for np.pad"), ), (do("prod"), False), param( do("quantile", q=0.5), True, marks=xfail(reason="Missing implementation for np.nanpercentile"), ), param( do("rank", dim="x"), False, marks=xfail(reason="Only implemented for NumPy arrays (via bottleneck)"), ), param( do("reduce", func="sum", dim="x"), True, ), param( do("rolling_window", dim="x", window=2, window_dim="x_win"), True, marks=xfail(reason="Missing implementation for np.pad"), ), param( do("shift", x=2), True, marks=xfail(reason="mixed sparse-dense operation") ), param( do("std"), False, marks=xfail(reason="Missing implementation for np.nanstd") ), (do("sum"), False), param( do("var"), False, marks=xfail(reason="Missing implementation for np.nanvar") ), param(do("to_dict"), False), (do("where", cond=make_xrvar({"x": 10, "y": 5}) > 0.5), True), ], ids=repr, ) def test_variable_method(func, sparse_output): var_s = make_xrvar({"x": 10, "y": 5}) var_d = xr.Variable(var_s.dims, var_s.data.todense()) ret_s = func(var_s) ret_d = func(var_d) # TODO: figure out how to verify the results of each method if isinstance(ret_d, xr.Variable) and isinstance(ret_d.data, sparse.SparseArray): ret_d = ret_d.copy(data=ret_d.data.todense()) if sparse_output: assert isinstance(ret_s.data, sparse.SparseArray) assert np.allclose(ret_s.data.todense(), ret_d.data, equal_nan=True) elif func.meth != "to_dict": assert np.allclose(ret_s, ret_d) else: # pop the arrays from the dict arr_s, arr_d = ret_s.pop("data"), ret_d.pop("data") assert np.allclose(arr_s, arr_d) assert ret_s == ret_d @pytest.mark.parametrize( "func,sparse_output", [ (do("squeeze"), True), param(do("to_index"), False, marks=xfail(reason="Coercion to dense")), param(do("to_index_variable"), False, marks=xfail(reason="Coercion to dense")), param( do("searchsorted", 0.5), True, marks=xfail(reason="'COO' object has no attribute 'searchsorted'"), ), ], ) def test_1d_variable_method(func, sparse_output): var_s = make_xrvar({"x": 10}) var_d = xr.Variable(var_s.dims, var_s.data.todense()) ret_s = func(var_s) ret_d = func(var_d) if sparse_output: assert isinstance(ret_s.data, sparse.SparseArray) assert np.allclose(ret_s.data.todense(), ret_d.data) else: assert np.allclose(ret_s, ret_d) class TestSparseVariable: @pytest.fixture(autouse=True) def setUp(self): self.data = sparse.random((4, 6), random_state=0, density=0.5) self.var = xr.Variable(("x", "y"), self.data) def test_nbytes(self): assert self.var.nbytes == self.data.nbytes def test_unary_op(self): assert_sparse_equal(-self.var.data, -self.data) assert_sparse_equal(abs(self.var).data, abs(self.data)) assert_sparse_equal(self.var.round().data, self.data.round()) @pytest.mark.filterwarnings("ignore::FutureWarning") def test_univariate_ufunc(self): assert_sparse_equal(np.sin(self.data), np.sin(self.var).data) @pytest.mark.filterwarnings("ignore::FutureWarning") def test_bivariate_ufunc(self): assert_sparse_equal(np.maximum(self.data, 0), np.maximum(self.var, 0).data) assert_sparse_equal(np.maximum(self.data, 0), np.maximum(0, self.var).data) def test_univariate_xufunc(self): assert_sparse_equal(xu.sin(self.var).data, np.sin(self.data)) def test_bivariate_xufunc(self): assert_sparse_equal(xu.multiply(self.var, 0).data, np.multiply(self.data, 0)) assert_sparse_equal(xu.multiply(0, self.var).data, np.multiply(0, self.data)) def test_repr(self): expected = dedent( """\ Size: 288B """ ) assert expected == repr(self.var) def test_pickle(self): v1 = self.var v2 = pickle.loads(pickle.dumps(v1)) assert_sparse_equal(v1.data, v2.data) def test_missing_values(self): a = np.array([0, 1, np.nan, 3]) s = sparse.COO.from_numpy(a) var_s = Variable("x", s) assert np.all(var_s.fillna(2).data.todense() == np.arange(4)) assert np.all(var_s.count() == 3) @pytest.mark.parametrize( "prop", [ "attrs", "chunks", "coords", "data", "dims", "dtype", "encoding", "imag", "indexes", "loc", "name", "nbytes", "ndim", "plot", "real", "shape", "size", "sizes", "str", "variable", ], ) def test_dataarray_property(prop): arr = make_xrarray({"x": 10, "y": 5}) getattr(arr, prop) @pytest.mark.parametrize( "func,sparse_output", [ (do("all"), False), (do("any"), False), (do("assign_attrs", {"foo": "bar"}), True), (do("assign_coords", x=make_xrarray({"x": 10}).x + 1), True), (do("astype", int), True), (do("clip", min=0, max=1), True), (do("compute"), True), (do("conj"), True), (do("copy"), True), (do("count"), False), (do("diff", "x"), True), (do("drop_vars", "x"), True), (do("expand_dims", {"z": 2}, axis=2), True), (do("get_axis_num", "x"), False), (do("get_index", "x"), False), (do("identical", make_xrarray({"x": 5, "y": 5})), False), (do("integrate", "x"), True), (do("isel", {"x": slice(0, 3), "y": slice(2, 4)}), True), (do("isnull"), True), (do("load"), True), (do("mean"), False), (do("persist"), True), (do("reindex", {"x": [1, 2, 3]}), True), (do("rename", "foo"), True), (do("reorder_levels"), True), (do("reset_coords", drop=True), True), (do("reset_index", "x"), True), (do("round"), True), (do("sel", x=[0, 1, 2]), True), (do("shift"), True), (do("sortby", "x", ascending=False), True), (do("stack", z=["x", "y"]), True), (do("transpose"), True), # TODO # set_index # swap_dims (do("broadcast_equals", make_xrvar({"x": 10, "y": 5})), False), (do("equals", make_xrvar({"x": 10, "y": 5})), False), param( do("argmax"), True, marks=[ xfail(reason="Missing implementation for np.argmax"), filterwarnings("ignore:Behaviour of argmin/argmax"), ], ), param( do("argmin"), True, marks=[ xfail(reason="Missing implementation for np.argmin"), filterwarnings("ignore:Behaviour of argmin/argmax"), ], ), param( do("argsort"), True, marks=xfail(reason="'COO' object has no attribute 'argsort'"), ), param( do("bfill", dim="x"), False, marks=xfail(reason="Missing implementation for np.flip"), ), (do("combine_first", make_xrarray({"x": 10, "y": 5})), True), param( do("conjugate"), False, marks=xfail(reason="'COO' object has no attribute 'conjugate'"), ), param( do("cumprod"), True, marks=xfail(reason="Missing implementation for np.nancumprod"), ), param( do("cumsum"), True, marks=xfail(reason="Missing implementation for np.nancumsum"), ), param( do("differentiate", "x"), False, marks=xfail(reason="Missing implementation for np.gradient"), ), param( do("dot", make_xrarray({"x": 10, "y": 5})), True, marks=xfail(reason="Missing implementation for np.einsum"), ), param(do("dropna", "x"), False, marks=xfail(reason="Coercion to dense")), param(do("ffill", "x"), False, marks=xfail(reason="Coercion to dense")), (do("fillna", 0), True), param( do("interp", coords={"x": np.arange(10) + 0.5}), True, marks=xfail(reason="Coercion to dense"), ), param( do( "interp_like", make_xrarray( {"x": 10, "y": 5}, coords={"x": np.arange(10) + 0.5, "y": np.arange(5) + 0.5}, ), ), True, marks=xfail(reason="Indexing COO with more than one iterable index"), ), param(do("interpolate_na", "x"), True, marks=xfail(reason="Coercion to dense")), param( do("isin", [1, 2, 3]), False, marks=xfail(reason="Missing implementation for np.isin"), ), param( do("item", (1, 1)), False, marks=xfail(reason="'COO' object has no attribute 'item'"), ), param(do("max"), False), param(do("min"), False), param( do("median"), False, marks=xfail(reason="Missing implementation for np.nanmedian"), ), (do("notnull"), True), (do("pipe", func="sum", axis=1), True), (do("prod"), False), param( do("quantile", q=0.5), False, marks=xfail(reason="Missing implementation for np.nanpercentile"), ), param( do("rank", "x"), False, marks=xfail(reason="Only implemented for NumPy arrays (via bottleneck)"), ), param( do("reduce", func="sum", dim="x"), False, marks=xfail(reason="Coercion to dense"), ), param( do( "reindex_like", make_xrarray( {"x": 10, "y": 5}, coords={"x": np.arange(10) + 0.5, "y": np.arange(5) + 0.5}, ), ), True, marks=xfail(reason="Indexing COO with more than one iterable index"), ), (do("roll", x=2, roll_coords=True), True), param( do("sel", x=[0, 1, 2], y=[2, 3]), True, marks=xfail(reason="Indexing COO with more than one iterable index"), ), param( do("std"), False, marks=xfail(reason="Missing implementation for np.nanstd") ), (do("sum"), False), param( do("var"), False, marks=xfail(reason="Missing implementation for np.nanvar") ), param( do("where", make_xrarray({"x": 10, "y": 5}) > 0.5), False, marks=xfail(reason="Conversion of dense to sparse when using sparse mask"), ), ], ids=repr, ) def test_dataarray_method(func, sparse_output): arr_s = make_xrarray( {"x": 10, "y": 5}, coords={"x": np.arange(10), "y": np.arange(5)} ) arr_d = xr.DataArray(arr_s.data.todense(), coords=arr_s.coords, dims=arr_s.dims) ret_s = func(arr_s) ret_d = func(arr_d) if sparse_output: assert isinstance(ret_s.data, sparse.SparseArray) assert np.allclose(ret_s.data.todense(), ret_d.data, equal_nan=True) else: assert np.allclose(ret_s, ret_d, equal_nan=True) @pytest.mark.parametrize( "func,sparse_output", [ (do("squeeze"), True), param( do("searchsorted", [1, 2, 3]), False, marks=xfail(reason="'COO' object has no attribute 'searchsorted'"), ), ], ) def test_datarray_1d_method(func, sparse_output): arr_s = make_xrarray({"x": 10}, coords={"x": np.arange(10)}) arr_d = xr.DataArray(arr_s.data.todense(), coords=arr_s.coords, dims=arr_s.dims) ret_s = func(arr_s) ret_d = func(arr_d) if sparse_output: assert isinstance(ret_s.data, sparse.SparseArray) assert np.allclose(ret_s.data.todense(), ret_d.data, equal_nan=True) else: assert np.allclose(ret_s, ret_d, equal_nan=True) class TestSparseDataArrayAndDataset: @pytest.fixture(autouse=True) def setUp(self): self.sp_ar = sparse.random((4, 6), random_state=0, density=0.5) self.sp_xr = xr.DataArray( self.sp_ar, coords={"x": range(4)}, dims=("x", "y"), name="foo" ) self.ds_ar = self.sp_ar.todense() self.ds_xr = xr.DataArray( self.ds_ar, coords={"x": range(4)}, dims=("x", "y"), name="foo" ) def test_to_dataset_roundtrip(self): x = self.sp_xr assert_equal(x, x.to_dataset("x").to_dataarray("x")) def test_align(self): a1 = xr.DataArray( sparse.COO.from_numpy(np.arange(4)), dims=["x"], coords={"x": ["a", "b", "c", "d"]}, ) b1 = xr.DataArray( sparse.COO.from_numpy(np.arange(4)), dims=["x"], coords={"x": ["a", "b", "d", "e"]}, ) a2, b2 = xr.align(a1, b1, join="inner") assert isinstance(a2.data, sparse.SparseArray) assert isinstance(b2.data, sparse.SparseArray) assert np.all(a2.coords["x"].data == ["a", "b", "d"]) assert np.all(b2.coords["x"].data == ["a", "b", "d"]) @pytest.mark.xfail( reason="COO objects currently do not accept more than one " "iterable index at a time" ) def test_align_2d(self): A1 = xr.DataArray( self.sp_ar, dims=["x", "y"], coords={ "x": np.arange(self.sp_ar.shape[0]), "y": np.arange(self.sp_ar.shape[1]), }, ) A2 = xr.DataArray( self.sp_ar, dims=["x", "y"], coords={ "x": np.arange(1, self.sp_ar.shape[0] + 1), "y": np.arange(1, self.sp_ar.shape[1] + 1), }, ) B1, B2 = xr.align(A1, A2, join="inner") assert np.all(B1.coords["x"] == np.arange(1, self.sp_ar.shape[0])) assert np.all(B1.coords["y"] == np.arange(1, self.sp_ar.shape[0])) assert np.all(B1.coords["x"] == B2.coords["x"]) assert np.all(B1.coords["y"] == B2.coords["y"]) def test_align_outer(self): a1 = xr.DataArray( sparse.COO.from_numpy(np.arange(4)), dims=["x"], coords={"x": ["a", "b", "c", "d"]}, ) b1 = xr.DataArray( sparse.COO.from_numpy(np.arange(4)), dims=["x"], coords={"x": ["a", "b", "d", "e"]}, ) a2, b2 = xr.align(a1, b1, join="outer") assert isinstance(a2.data, sparse.SparseArray) assert isinstance(b2.data, sparse.SparseArray) assert np.all(a2.coords["x"].data == ["a", "b", "c", "d", "e"]) assert np.all(b2.coords["x"].data == ["a", "b", "c", "d", "e"]) def test_concat(self): ds1 = xr.Dataset(data_vars={"d": self.sp_xr}) ds2 = xr.Dataset(data_vars={"d": self.sp_xr}) ds3 = xr.Dataset(data_vars={"d": self.sp_xr}) out = xr.concat([ds1, ds2, ds3], dim="x") assert_sparse_equal( out["d"].data, sparse.concatenate([self.sp_ar, self.sp_ar, self.sp_ar], axis=0), ) out = xr.concat([self.sp_xr, self.sp_xr, self.sp_xr], dim="y") assert_sparse_equal( out.data, sparse.concatenate([self.sp_ar, self.sp_ar, self.sp_ar], axis=1) ) def test_stack(self): arr = make_xrarray({"w": 2, "x": 3, "y": 4}) stacked = arr.stack(z=("x", "y")) z = pd.MultiIndex.from_product([np.arange(3), np.arange(4)], names=["x", "y"]) expected = xr.DataArray( arr.data.reshape((2, -1)), {"w": [0, 1], "z": z}, dims=["w", "z"] ) assert_equal(expected, stacked) roundtripped = stacked.unstack() assert_identical(arr, roundtripped) def test_dataarray_repr(self): a = xr.DataArray( sparse.COO.from_numpy(np.ones(4)), dims=["x"], coords={"y": ("x", sparse.COO.from_numpy(np.arange(4, dtype="i8")))}, ) expected = dedent( """\ Size: 64B Coordinates: y (x) int64 48B Dimensions without coordinates: x""" ) assert expected == repr(a) def test_dataset_repr(self): ds = xr.Dataset( data_vars={"a": ("x", sparse.COO.from_numpy(np.ones(4)))}, coords={"y": ("x", sparse.COO.from_numpy(np.arange(4, dtype="i8")))}, ) expected = dedent( """\ Size: 112B Dimensions: (x: 4) Coordinates: y (x) int64 48B Dimensions without coordinates: x Data variables: a (x) float64 64B """ ) assert expected == repr(ds) @requires_dask def test_sparse_dask_dataset_repr(self): ds = xr.Dataset( data_vars={"a": ("x", sparse.COO.from_numpy(np.ones(4)))} ).chunk() if Version(sparse.__version__) >= Version("0.16.0"): meta = "sparse.numba_backend._coo.core.COO" else: meta = "sparse.COO" expected = dedent( f"""\ Size: 32B Dimensions: (x: 4) Dimensions without coordinates: x Data variables: a (x) float64 32B dask.array""" ) assert expected == repr(ds) def test_dataarray_pickle(self): a1 = xr.DataArray( sparse.COO.from_numpy(np.ones(4)), dims=["x"], coords={"y": ("x", sparse.COO.from_numpy(np.arange(4)))}, ) a2 = pickle.loads(pickle.dumps(a1)) assert_identical(a1, a2) def test_dataset_pickle(self): ds1 = xr.Dataset( data_vars={"a": ("x", sparse.COO.from_numpy(np.ones(4)))}, coords={"y": ("x", sparse.COO.from_numpy(np.arange(4)))}, ) ds2 = pickle.loads(pickle.dumps(ds1)) assert_identical(ds1, ds2) def test_coarsen(self): a1 = self.ds_xr a2 = self.sp_xr m1 = a1.coarsen(x=2, boundary="trim").mean() m2 = a2.coarsen(x=2, boundary="trim").mean() assert isinstance(m2.data, sparse.SparseArray) assert np.allclose(m1.data, m2.data.todense()) @pytest.mark.xfail(reason="No implementation of np.pad") def test_rolling(self): a1 = self.ds_xr a2 = self.sp_xr m1 = a1.rolling(x=2, center=True).mean() m2 = a2.rolling(x=2, center=True).mean() assert isinstance(m2.data, sparse.SparseArray) assert np.allclose(m1.data, m2.data.todense()) @pytest.mark.xfail(reason="Coercion to dense") def test_rolling_exp(self): a1 = self.ds_xr a2 = self.sp_xr m1 = a1.rolling_exp(x=2, center=True).mean() m2 = a2.rolling_exp(x=2, center=True).mean() assert isinstance(m2.data, sparse.SparseArray) assert np.allclose(m1.data, m2.data.todense()) @pytest.mark.xfail(reason="No implementation of np.einsum") def test_dot(self): a1 = self.xp_xr.dot(self.xp_xr[0]) a2 = self.sp_ar.dot(self.sp_ar[0]) assert_equal(a1, a2) @pytest.mark.xfail(reason="Groupby reductions produce dense output") def test_groupby(self): x1 = self.ds_xr x2 = self.sp_xr m1 = x1.groupby("x").mean(...) m2 = x2.groupby("x").mean(...) assert isinstance(m2.data, sparse.SparseArray) assert np.allclose(m1.data, m2.data.todense()) @pytest.mark.xfail(reason="Groupby reductions produce dense output") def test_groupby_first(self): x = self.sp_xr.copy() x.coords["ab"] = ("x", ["a", "a", "b", "b"]) x.groupby("ab").first() x.groupby("ab").first(skipna=False) @pytest.mark.xfail(reason="Groupby reductions produce dense output") def test_groupby_bins(self): x1 = self.ds_xr x2 = self.sp_xr m1 = x1.groupby_bins("x", bins=[0, 3, 7, 10]).sum(...) m2 = x2.groupby_bins("x", bins=[0, 3, 7, 10]).sum(...) assert isinstance(m2.data, sparse.SparseArray) assert np.allclose(m1.data, m2.data.todense()) @pytest.mark.xfail(reason="Resample produces dense output") def test_resample(self): t1 = xr.DataArray( np.linspace(0, 11, num=12), coords=[ pd.date_range("1999-12-15", periods=12, freq=pd.DateOffset(months=1)) ], dims="time", ) t2 = t1.copy() t2.data = sparse.COO(t2.data) m1 = t1.resample(time="QS-DEC").mean() m2 = t2.resample(time="QS-DEC").mean() assert isinstance(m2.data, sparse.SparseArray) assert np.allclose(m1.data, m2.data.todense()) @pytest.mark.xfail def test_reindex(self): x1 = self.ds_xr x2 = self.sp_xr for kwargs in [ {"x": [2, 3, 4]}, {"x": [1, 100, 2, 101, 3]}, {"x": [2.5, 3, 3.5], "y": [2, 2.5, 3]}, ]: m1 = x1.reindex(**kwargs) m2 = x2.reindex(**kwargs) assert np.allclose(m1, m2, equal_nan=True) @pytest.mark.xfail def test_merge(self): x = self.sp_xr y = xr.merge([x, x.rename("bar")]).to_dataarray() assert isinstance(y, sparse.SparseArray) @pytest.mark.xfail def test_where(self): a = np.arange(10) cond = a > 3 xr.DataArray(a).where(cond) s = sparse.COO.from_numpy(a) cond = s > 3 xr.DataArray(s).where(cond) x = xr.DataArray(s) cond = x > 3 x.where(cond) class TestSparseCoords: @pytest.mark.xfail(reason="Coercion of coords to dense") def test_sparse_coords(self): xr.DataArray( sparse.COO.from_numpy(np.arange(4)), dims=["x"], coords={"x": sparse.COO.from_numpy([1, 2, 3, 4])}, ) @requires_dask def test_chunk(): s = sparse.COO.from_numpy(np.array([0, 0, 1, 2])) a = DataArray(s) ac = a.chunk(2) assert ac.chunks == ((2, 2),) assert isinstance(ac.data._meta, sparse.COO) assert_identical(ac, a) ds = a.to_dataset(name="a") dsc = ds.chunk(2) assert dsc.chunks == {"dim_0": (2, 2)} assert_identical(dsc, ds) @requires_dask def test_dask_token(): import dask s = sparse.COO.from_numpy(np.array([0, 0, 1, 2])) a = DataArray(s) t1 = dask.base.tokenize(a) t2 = dask.base.tokenize(a) t3 = dask.base.tokenize(a + 1) assert t1 == t2 assert t3 != t2 assert isinstance(a.data, sparse.COO) ac = a.chunk(2) t4 = dask.base.tokenize(ac) t5 = dask.base.tokenize(ac + 1) assert t4 != t5 assert isinstance(ac.data._meta, sparse.COO) @requires_dask def test_apply_ufunc_check_meta_coherence(): s = sparse.COO.from_numpy(np.array([0, 0, 1, 2])) a = DataArray(s) ac = a.chunk(2) sparse_meta = ac.data._meta result = xr.apply_ufunc(lambda x: x, ac, dask="parallelized").data._meta assert_sparse_equal(result, sparse_meta) xarray-2025.09.0/xarray/tests/test_strategies.py000066400000000000000000000237751505620616400216150ustar00rootroot00000000000000import warnings import numpy as np import numpy.testing as npt import pytest from packaging.version import Version pytest.importorskip("hypothesis") # isort: split import hypothesis.extra.numpy as npst import hypothesis.strategies as st from hypothesis import given from hypothesis.extra.array_api import make_strategies_namespace from xarray.core.options import set_options from xarray.core.variable import Variable from xarray.testing.strategies import ( attrs, dimension_names, dimension_sizes, supported_dtypes, unique_subset_of, variables, ) ALLOWED_ATTRS_VALUES_TYPES = (int, bool, str, np.ndarray) class TestDimensionNamesStrategy: @given(dimension_names()) def test_types(self, dims): assert isinstance(dims, list) for d in dims: assert isinstance(d, str) @given(dimension_names()) def test_unique(self, dims): assert len(set(dims)) == len(dims) @given(st.data(), st.tuples(st.integers(0, 10), st.integers(0, 10)).map(sorted)) def test_number_of_dims(self, data, ndims): min_dims, max_dims = ndims dim_names = data.draw(dimension_names(min_dims=min_dims, max_dims=max_dims)) assert isinstance(dim_names, list) assert min_dims <= len(dim_names) <= max_dims class TestDimensionSizesStrategy: @given(dimension_sizes()) def test_types(self, dims): assert isinstance(dims, dict) for d, n in dims.items(): assert isinstance(d, str) assert len(d) >= 1 assert isinstance(n, int) assert n >= 0 @given(st.data(), st.tuples(st.integers(0, 10), st.integers(0, 10)).map(sorted)) def test_number_of_dims(self, data, ndims): min_dims, max_dims = ndims dim_sizes = data.draw(dimension_sizes(min_dims=min_dims, max_dims=max_dims)) assert isinstance(dim_sizes, dict) assert min_dims <= len(dim_sizes) <= max_dims @given(st.data()) def test_restrict_names(self, data): capitalized_names = st.text(st.characters(), min_size=1).map(str.upper) dim_sizes = data.draw(dimension_sizes(dim_names=capitalized_names)) for dim in dim_sizes.keys(): assert dim.upper() == dim def check_dict_values(dictionary: dict, allowed_attrs_values_types) -> bool: """Helper function to assert that all values in recursive dict match one of a set of types.""" for value in dictionary.values(): if isinstance(value, allowed_attrs_values_types) or value is None: continue elif isinstance(value, dict): # If the value is a dictionary, recursively check it if not check_dict_values(value, allowed_attrs_values_types): return False else: # If the value is not an integer or a dictionary, it's not valid return False return True class TestAttrsStrategy: @given(attrs()) def test_type(self, attrs): assert isinstance(attrs, dict) check_dict_values(attrs, ALLOWED_ATTRS_VALUES_TYPES) class TestVariablesStrategy: @given(variables()) def test_given_nothing(self, var): assert isinstance(var, Variable) @given(st.data()) def test_given_incorrect_types(self, data): with pytest.raises(TypeError, match="dims must be provided as a"): data.draw(variables(dims=["x", "y"])) # type: ignore[arg-type] with pytest.raises(TypeError, match="dtype must be provided as a"): data.draw(variables(dtype=np.dtype("int32"))) # type: ignore[arg-type] with pytest.raises(TypeError, match="attrs must be provided as a"): data.draw(variables(attrs=dict())) # type: ignore[arg-type] with pytest.raises(TypeError, match="Callable"): data.draw(variables(array_strategy_fn=np.array([0]))) # type: ignore[arg-type] @given(st.data(), dimension_names()) def test_given_fixed_dim_names(self, data, fixed_dim_names): var = data.draw(variables(dims=st.just(fixed_dim_names))) assert list(var.dims) == fixed_dim_names @given(st.data(), dimension_sizes()) def test_given_fixed_dim_sizes(self, data, dim_sizes): var = data.draw(variables(dims=st.just(dim_sizes))) assert var.dims == tuple(dim_sizes.keys()) assert var.shape == tuple(dim_sizes.values()) @given(st.data(), supported_dtypes()) def test_given_fixed_dtype(self, data, dtype): var = data.draw(variables(dtype=st.just(dtype))) assert var.dtype == dtype @given(st.data(), npst.arrays(shape=npst.array_shapes(), dtype=supported_dtypes())) def test_given_fixed_data_dims_and_dtype(self, data, arr): def fixed_array_strategy_fn(*, shape=None, dtype=None): """The fact this ignores shape and dtype is only okay because compatible shape & dtype will be passed separately.""" return st.just(arr) dim_names = data.draw(dimension_names(min_dims=arr.ndim, max_dims=arr.ndim)) dim_sizes = dict(zip(dim_names, arr.shape, strict=True)) var = data.draw( variables( array_strategy_fn=fixed_array_strategy_fn, dims=st.just(dim_sizes), dtype=st.just(arr.dtype), ) ) npt.assert_equal(var.data, arr) assert var.dtype == arr.dtype @given(st.data(), st.integers(0, 3)) def test_given_array_strat_arbitrary_size_and_arbitrary_data(self, data, ndims): dim_names = data.draw(dimension_names(min_dims=ndims, max_dims=ndims)) def array_strategy_fn(*, shape=None, dtype=None): return npst.arrays(shape=shape, dtype=dtype) var = data.draw( variables( array_strategy_fn=array_strategy_fn, dims=st.just(dim_names), dtype=supported_dtypes(), ) ) assert var.ndim == ndims @given(st.data()) def test_catch_unruly_dtype_from_custom_array_strategy_fn(self, data): def dodgy_array_strategy_fn(*, shape=None, dtype=None): """Dodgy function which ignores the dtype it was passed""" return npst.arrays(shape=shape, dtype=npst.floating_dtypes()) with pytest.raises( ValueError, match="returned an array object with a different dtype" ): data.draw( variables( array_strategy_fn=dodgy_array_strategy_fn, dtype=st.just(np.dtype("int32")), ) ) @given(st.data()) def test_catch_unruly_shape_from_custom_array_strategy_fn(self, data): def dodgy_array_strategy_fn(*, shape=None, dtype=None): """Dodgy function which ignores the shape it was passed""" return npst.arrays(shape=(3, 2), dtype=dtype) with pytest.raises( ValueError, match="returned an array object with a different shape" ): data.draw( variables( array_strategy_fn=dodgy_array_strategy_fn, dims=st.just({"a": 2, "b": 1}), dtype=supported_dtypes(), ) ) @given(st.data()) def test_make_strategies_namespace(self, data): """ Test not causing a hypothesis.InvalidArgument by generating a dtype that's not in the array API. We still want to generate dtypes not in the array API by default, but this checks we don't accidentally override the user's choice of dtypes with non-API-compliant ones. """ if Version(np.__version__) >= Version("2.0.0.dev0"): nxp = np else: # requires numpy>=1.26.0, and we expect a UserWarning to be raised with warnings.catch_warnings(): warnings.filterwarnings( "ignore", category=UserWarning, message=".+See NEP 47." ) from numpy import ( # type: ignore[attr-defined,no-redef,unused-ignore] array_api as nxp, ) nxp_st = make_strategies_namespace(nxp) data.draw( variables( array_strategy_fn=nxp_st.arrays, dtype=nxp_st.scalar_dtypes(), ) ) class TestUniqueSubsetOf: @given(st.data()) def test_invalid(self, data): with pytest.raises(TypeError, match="must be an Iterable or a Mapping"): data.draw(unique_subset_of(0)) # type: ignore[call-overload] with pytest.raises(ValueError, match="length-zero object"): data.draw(unique_subset_of({})) @given(st.data(), dimension_sizes(min_dims=1)) def test_mapping(self, data, dim_sizes): subset_of_dim_sizes = data.draw(unique_subset_of(dim_sizes)) for dim, length in subset_of_dim_sizes.items(): assert dim in dim_sizes assert dim_sizes[dim] == length @given(st.data(), dimension_names(min_dims=1)) def test_iterable(self, data, dim_names): subset_of_dim_names = data.draw(unique_subset_of(dim_names)) for dim in subset_of_dim_names: assert dim in dim_names class TestReduction: """ These tests are for checking that the examples given in the docs page on testing actually work. """ @given(st.data(), variables(dims=dimension_names(min_dims=1))) def test_mean(self, data, var): """ Test that given a Variable of at least one dimension, the mean of the Variable is always equal to the mean of the underlying array. """ with set_options(use_numbagg=False): # specify arbitrary reduction along at least one dimension reduction_dims = data.draw(unique_subset_of(var.dims, min_size=1)) # create expected result (using nanmean because arrays with Nans will be generated) reduction_axes = tuple(var.get_axis_num(dim) for dim in reduction_dims) expected = np.nanmean(var.data, axis=reduction_axes) # assert property is always satisfied result = var.mean(dim=reduction_dims).data npt.assert_equal(expected, result) xarray-2025.09.0/xarray/tests/test_treenode.py000066400000000000000000000360311505620616400212350ustar00rootroot00000000000000from __future__ import annotations import re import pytest from xarray.core.treenode import ( InvalidTreeError, NamedNode, NodePath, TreeNode, group_subtrees, zip_subtrees, ) class TestFamilyTree: def test_lonely(self) -> None: root: TreeNode = TreeNode() assert root.parent is None assert root.children == {} def test_parenting(self) -> None: john: TreeNode = TreeNode() mary: TreeNode = TreeNode() mary._set_parent(john, "Mary") assert mary.parent == john assert john.children["Mary"] is mary def test_no_time_traveller_loops(self) -> None: john: TreeNode = TreeNode() with pytest.raises(InvalidTreeError, match="cannot be a parent of itself"): john._set_parent(john, "John") with pytest.raises(InvalidTreeError, match="cannot be a parent of itself"): john.children = {"John": john} mary: TreeNode = TreeNode() rose: TreeNode = TreeNode() mary._set_parent(john, "Mary") rose._set_parent(mary, "Rose") with pytest.raises(InvalidTreeError, match="is already a descendant"): john._set_parent(rose, "John") with pytest.raises(InvalidTreeError, match="is already a descendant"): rose.children = {"John": john} def test_parent_swap(self) -> None: john: TreeNode = TreeNode() mary: TreeNode = TreeNode() mary._set_parent(john, "Mary") steve: TreeNode = TreeNode() mary._set_parent(steve, "Mary") assert mary.parent == steve assert steve.children["Mary"] is mary assert "Mary" not in john.children def test_forbid_setting_parent_directly(self) -> None: john: TreeNode = TreeNode() mary: TreeNode = TreeNode() with pytest.raises( AttributeError, match="Cannot set parent attribute directly" ): mary.parent = john def test_dont_modify_children_inplace(self) -> None: # GH issue 9196 child: TreeNode = TreeNode() TreeNode(children={"child": child}) assert child.parent is None def test_multi_child_family(self) -> None: john: TreeNode = TreeNode(children={"Mary": TreeNode(), "Kate": TreeNode()}) assert "Mary" in john.children mary = john.children["Mary"] assert isinstance(mary, TreeNode) assert mary.parent is john assert "Kate" in john.children kate = john.children["Kate"] assert isinstance(kate, TreeNode) assert kate.parent is john def test_disown_child(self) -> None: john: TreeNode = TreeNode(children={"Mary": TreeNode()}) mary = john.children["Mary"] mary.orphan() assert mary.parent is None assert "Mary" not in john.children def test_doppelganger_child(self) -> None: kate: TreeNode = TreeNode() john: TreeNode = TreeNode() with pytest.raises(TypeError): john.children = {"Kate": 666} # type: ignore[dict-item] with pytest.raises(InvalidTreeError, match="Cannot add same node"): john.children = {"Kate": kate, "Evil_Kate": kate} john = TreeNode(children={"Kate": kate}) evil_kate: TreeNode = TreeNode() evil_kate._set_parent(john, "Kate") assert john.children["Kate"] is evil_kate def test_sibling_relationships(self) -> None: john: TreeNode = TreeNode( children={"Mary": TreeNode(), "Kate": TreeNode(), "Ashley": TreeNode()} ) kate = john.children["Kate"] assert list(kate.siblings) == ["Mary", "Ashley"] assert "Kate" not in kate.siblings def test_copy_subtree(self) -> None: tony: TreeNode = TreeNode() michael: TreeNode = TreeNode(children={"Tony": tony}) vito = TreeNode(children={"Michael": michael}) # check that children of assigned children are also copied (i.e. that ._copy_subtree works) copied_tony = vito.children["Michael"].children["Tony"] assert copied_tony is not tony def test_parents(self) -> None: vito: TreeNode = TreeNode( children={"Michael": TreeNode(children={"Tony": TreeNode()})}, ) michael = vito.children["Michael"] tony = michael.children["Tony"] assert tony.root is vito assert tony.parents == (michael, vito) class TestGetNodes: def test_get_child(self) -> None: john: TreeNode = TreeNode( children={ "Mary": TreeNode( children={"Sue": TreeNode(children={"Steven": TreeNode()})} ) } ) mary = john.children["Mary"] sue = mary.children["Sue"] steven = sue.children["Steven"] # get child assert john._get_item("Mary") is mary assert mary._get_item("Sue") is sue # no child exists with pytest.raises(KeyError): john._get_item("Kate") # get grandchild assert john._get_item("Mary/Sue") is sue # get great-grandchild assert john._get_item("Mary/Sue/Steven") is steven # get from middle of tree assert mary._get_item("Sue/Steven") is steven def test_get_upwards(self) -> None: john: TreeNode = TreeNode( children={ "Mary": TreeNode(children={"Sue": TreeNode(), "Kate": TreeNode()}) } ) mary = john.children["Mary"] sue = mary.children["Sue"] kate = mary.children["Kate"] assert sue._get_item("../") is mary assert sue._get_item("../../") is john # relative path assert sue._get_item("../Kate") is kate def test_get_from_root(self) -> None: john: TreeNode = TreeNode( children={"Mary": TreeNode(children={"Sue": TreeNode()})} ) mary = john.children["Mary"] sue = mary.children["Sue"] assert sue._get_item("/Mary") is mary class TestSetNodes: def test_set_child_node(self) -> None: john: TreeNode = TreeNode() mary: TreeNode = TreeNode() john._set_item("Mary", mary) assert john.children["Mary"] is mary assert isinstance(mary, TreeNode) assert mary.children == {} assert mary.parent is john def test_child_already_exists(self) -> None: mary: TreeNode = TreeNode() john: TreeNode = TreeNode(children={"Mary": mary}) mary_2: TreeNode = TreeNode() with pytest.raises(KeyError): john._set_item("Mary", mary_2, allow_overwrite=False) def test_set_grandchild(self) -> None: rose: TreeNode = TreeNode() mary: TreeNode = TreeNode() john: TreeNode = TreeNode() john._set_item("Mary", mary) john._set_item("Mary/Rose", rose) assert john.children["Mary"] is mary assert isinstance(mary, TreeNode) assert "Rose" in mary.children assert rose.parent is mary def test_create_intermediate_child(self) -> None: john: TreeNode = TreeNode() rose: TreeNode = TreeNode() # test intermediate children not allowed with pytest.raises(KeyError, match="Could not reach"): john._set_item(path="Mary/Rose", item=rose, new_nodes_along_path=False) # test intermediate children allowed john._set_item("Mary/Rose", rose, new_nodes_along_path=True) assert "Mary" in john.children mary = john.children["Mary"] assert isinstance(mary, TreeNode) assert mary.children == {"Rose": rose} assert rose.parent == mary assert rose.parent == mary def test_overwrite_child(self) -> None: john: TreeNode = TreeNode() mary: TreeNode = TreeNode() john._set_item("Mary", mary) # test overwriting not allowed marys_evil_twin: TreeNode = TreeNode() with pytest.raises(KeyError, match="Already a node object"): john._set_item("Mary", marys_evil_twin, allow_overwrite=False) assert john.children["Mary"] is mary assert marys_evil_twin.parent is None # test overwriting allowed marys_evil_twin = TreeNode() john._set_item("Mary", marys_evil_twin, allow_overwrite=True) assert john.children["Mary"] is marys_evil_twin assert marys_evil_twin.parent is john class TestPruning: def test_del_child(self) -> None: john: TreeNode = TreeNode() mary: TreeNode = TreeNode() john._set_item("Mary", mary) del john["Mary"] assert "Mary" not in john.children assert mary.parent is None with pytest.raises(KeyError): del john["Mary"] def create_test_tree() -> tuple[NamedNode, NamedNode]: # a # β”œβ”€β”€ b # β”‚ β”œβ”€β”€ d # β”‚ └── e # β”‚ β”œβ”€β”€ f # β”‚ └── g # └── c # └── h # └── i a: NamedNode = NamedNode(name="a") b: NamedNode = NamedNode() c: NamedNode = NamedNode() d: NamedNode = NamedNode() e: NamedNode = NamedNode() f: NamedNode = NamedNode() g: NamedNode = NamedNode() h: NamedNode = NamedNode() i: NamedNode = NamedNode() a.children = {"b": b, "c": c} b.children = {"d": d, "e": e} e.children = {"f": f, "g": g} c.children = {"h": h} h.children = {"i": i} return a, f class TestGroupSubtrees: def test_one_tree(self) -> None: root, _ = create_test_tree() expected_names = [ "a", "b", "c", "d", "e", "h", "f", "g", "i", ] expected_paths = [ ".", "b", "c", "b/d", "b/e", "c/h", "b/e/f", "b/e/g", "c/h/i", ] result_paths, result_names = zip( *[(path, node.name) for path, (node,) in group_subtrees(root)], strict=False ) assert list(result_names) == expected_names assert list(result_paths) == expected_paths result_names_ = [node.name for (node,) in zip_subtrees(root)] assert result_names_ == expected_names def test_different_order(self) -> None: first: NamedNode = NamedNode( name="a", children={"b": NamedNode(), "c": NamedNode()} ) second: NamedNode = NamedNode( name="a", children={"c": NamedNode(), "b": NamedNode()} ) assert [node.name for node in first.subtree] == ["a", "b", "c"] assert [node.name for node in second.subtree] == ["a", "c", "b"] assert [(x.name, y.name) for x, y in zip_subtrees(first, second)] == [ ("a", "a"), ("b", "b"), ("c", "c"), ] assert [path for path, _ in group_subtrees(first, second)] == [".", "b", "c"] def test_different_structure(self) -> None: first: NamedNode = NamedNode(name="a", children={"b": NamedNode()}) second: NamedNode = NamedNode(name="a", children={"c": NamedNode()}) it = group_subtrees(first, second) path, (node1, node2) = next(it) assert path == "." assert node1.name == node2.name == "a" with pytest.raises( ValueError, match=re.escape(r"children at root node do not match: ['b'] vs ['c']"), ): next(it) class TestAncestry: def test_parents(self) -> None: _, leaf_f = create_test_tree() expected = ["e", "b", "a"] assert [node.name for node in leaf_f.parents] == expected def test_lineage(self) -> None: _, leaf_f = create_test_tree() expected = ["f", "e", "b", "a"] with pytest.warns(DeprecationWarning): assert [node.name for node in leaf_f.lineage] == expected def test_ancestors(self) -> None: _, leaf_f = create_test_tree() with pytest.warns(DeprecationWarning): ancestors = leaf_f.ancestors expected = ["a", "b", "e", "f"] for node, expected_name in zip(ancestors, expected, strict=True): assert node.name == expected_name def test_subtree(self) -> None: root, _ = create_test_tree() expected = [ "a", "b", "c", "d", "e", "h", "f", "g", "i", ] actual = [node.name for node in root.subtree] assert expected == actual def test_subtree_with_keys(self) -> None: root, _ = create_test_tree() expected_names = [ "a", "b", "c", "d", "e", "h", "f", "g", "i", ] expected_paths = [ ".", "b", "c", "b/d", "b/e", "c/h", "b/e/f", "b/e/g", "c/h/i", ] result_paths, result_names = zip( *[(path, node.name) for path, node in root.subtree_with_keys], strict=False ) assert list(result_names) == expected_names assert list(result_paths) == expected_paths def test_descendants(self) -> None: root, _ = create_test_tree() descendants = root.descendants expected = [ "b", "c", "d", "e", "h", "f", "g", "i", ] for node, expected_name in zip(descendants, expected, strict=True): assert node.name == expected_name def test_leaves(self) -> None: tree, _ = create_test_tree() leaves = tree.leaves expected = [ "d", "f", "g", "i", ] for node, expected_name in zip(leaves, expected, strict=True): assert node.name == expected_name def test_levels(self) -> None: a, f = create_test_tree() assert a.level == 0 assert f.level == 3 assert a.depth == 3 assert f.depth == 3 assert a.width == 1 assert f.width == 3 class TestRenderTree: def test_render_nodetree(self) -> None: john: NamedNode = NamedNode( children={ "Mary": NamedNode(children={"Sam": NamedNode(), "Ben": NamedNode()}), "Kate": NamedNode(), } ) mary = john.children["Mary"] expected_nodes = [ "NamedNode()", "\tNamedNode('Mary')", "\t\tNamedNode('Sam')", "\t\tNamedNode('Ben')", "\tNamedNode('Kate')", ] expected_str = "NamedNode('Mary')" john_repr = john.__repr__() mary_str = mary.__str__() assert mary_str == expected_str john_nodes = john_repr.splitlines() assert len(john_nodes) == len(expected_nodes) for expected_node, repr_node in zip(expected_nodes, john_nodes, strict=True): assert expected_node == repr_node def test_nodepath(): path = NodePath("/Mary") assert path.root == "/" assert path.stem == "Mary" xarray-2025.09.0/xarray/tests/test_tutorial.py000066400000000000000000000027551505620616400213010ustar00rootroot00000000000000from __future__ import annotations from xarray import DataArray, DataTree, tutorial from xarray.testing import assert_identical from xarray.tests import network @network class TestLoadDataset: def test_download_from_github(self, tmp_path) -> None: cache_dir = tmp_path / tutorial._default_cache_dir_name ds = tutorial.load_dataset("tiny", cache_dir=cache_dir) tiny = DataArray(range(5), name="tiny").to_dataset() assert_identical(ds, tiny) def test_download_from_github_load_without_cache(self, tmp_path) -> None: cache_dir = tmp_path / tutorial._default_cache_dir_name ds_nocache = tutorial.load_dataset("tiny", cache=False, cache_dir=cache_dir) ds_cache = tutorial.load_dataset("tiny", cache_dir=cache_dir) assert_identical(ds_cache, ds_nocache) @network class TestLoadDataTree: def test_download_from_github(self, tmp_path) -> None: cache_dir = tmp_path / tutorial._default_cache_dir_name ds = tutorial.load_datatree("tiny", cache_dir=cache_dir) tiny = DataTree.from_dict({"/": DataArray(range(5), name="tiny").to_dataset()}) assert_identical(ds, tiny) def test_download_from_github_load_without_cache(self, tmp_path) -> None: cache_dir = tmp_path / tutorial._default_cache_dir_name ds_nocache = tutorial.load_datatree("tiny", cache=False, cache_dir=cache_dir) ds_cache = tutorial.load_datatree("tiny", cache_dir=cache_dir) assert_identical(ds_cache, ds_nocache) xarray-2025.09.0/xarray/tests/test_typed_ops.py000066400000000000000000000142761505620616400214450ustar00rootroot00000000000000import numpy as np from xarray import DataArray, Dataset, Variable def test_variable_typed_ops() -> None: """Tests for type checking of typed_ops on Variable""" var = Variable(dims=["t"], data=[1, 2, 3]) def _test(var: Variable) -> None: # mypy checks the input type assert isinstance(var, Variable) _int: int = 1 _list = [1, 2, 3] _ndarray = np.array([1, 2, 3]) # __add__ as an example of binary ops _test(var + _int) _test(var + _list) _test(var + _ndarray) _test(var + var) # __radd__ as an example of reflexive binary ops _test(_int + var) _test(_list + var) _test(_ndarray + var) # type: ignore[arg-type] # numpy problem # __eq__ as an example of cmp ops _test(var == _int) _test(var == _list) _test(var == _ndarray) _test(_int == var) # type: ignore[arg-type] # typeshed problem _test(_list == var) # type: ignore[arg-type] # typeshed problem _test(_ndarray == var) # __lt__ as another example of cmp ops _test(var < _int) _test(var < _list) _test(var < _ndarray) _test(_int > var) _test(_list > var) _test(_ndarray > var) # type: ignore[arg-type] # numpy problem # __iadd__ as an example of inplace binary ops var += _int var += _list var += _ndarray # __neg__ as an example of unary ops _test(-var) def test_dataarray_typed_ops() -> None: """Tests for type checking of typed_ops on DataArray""" da = DataArray([1, 2, 3], dims=["t"]) def _test(da: DataArray) -> None: # mypy checks the input type assert isinstance(da, DataArray) _int: int = 1 _list = [1, 2, 3] _ndarray = np.array([1, 2, 3]) _var = Variable(dims=["t"], data=[1, 2, 3]) # __add__ as an example of binary ops _test(da + _int) _test(da + _list) _test(da + _ndarray) _test(da + _var) _test(da + da) # __radd__ as an example of reflexive binary ops _test(_int + da) _test(_list + da) _test(_ndarray + da) # type: ignore[arg-type] # numpy problem _test(_var + da) # __eq__ as an example of cmp ops _test(da == _int) _test(da == _list) _test(da == _ndarray) _test(da == _var) _test(_int == da) # type: ignore[arg-type] # typeshed problem _test(_list == da) # type: ignore[arg-type] # typeshed problem _test(_ndarray == da) _test(_var == da) # __lt__ as another example of cmp ops _test(da < _int) _test(da < _list) _test(da < _ndarray) _test(da < _var) _test(_int > da) _test(_list > da) _test(_ndarray > da) # type: ignore[arg-type] # numpy problem _test(_var > da) # __iadd__ as an example of inplace binary ops da += _int da += _list da += _ndarray da += _var # __neg__ as an example of unary ops _test(-da) def test_dataset_typed_ops() -> None: """Tests for type checking of typed_ops on Dataset""" ds = Dataset({"a": ("t", [1, 2, 3])}) def _test(ds: Dataset) -> None: # mypy checks the input type assert isinstance(ds, Dataset) _int: int = 1 _list = [1, 2, 3] _ndarray = np.array([1, 2, 3]) _var = Variable(dims=["t"], data=[1, 2, 3]) _da = DataArray([1, 2, 3], dims=["t"]) # __add__ as an example of binary ops _test(ds + _int) _test(ds + _list) _test(ds + _ndarray) _test(ds + _var) _test(ds + _da) _test(ds + ds) # __radd__ as an example of reflexive binary ops _test(_int + ds) _test(_list + ds) _test(_ndarray + ds) _test(_var + ds) _test(_da + ds) # __eq__ as an example of cmp ops _test(ds == _int) _test(ds == _list) _test(ds == _ndarray) _test(ds == _var) _test(ds == _da) _test(_int == ds) # type: ignore[arg-type] # typeshed problem _test(_list == ds) # type: ignore[arg-type] # typeshed problem _test(_ndarray == ds) _test(_var == ds) _test(_da == ds) # __lt__ as another example of cmp ops _test(ds < _int) _test(ds < _list) _test(ds < _ndarray) _test(ds < _var) _test(ds < _da) _test(_int > ds) _test(_list > ds) _test(_ndarray > ds) # type: ignore[arg-type] # numpy problem _test(_var > ds) _test(_da > ds) # __iadd__ as an example of inplace binary ops ds += _int ds += _list ds += _ndarray ds += _var ds += _da # __neg__ as an example of unary ops _test(-ds) def test_dataarray_groupy_typed_ops() -> None: """Tests for type checking of typed_ops on DataArrayGroupBy""" da = DataArray([1, 2, 3], coords={"x": ("t", [1, 2, 2])}, dims=["t"]) grp = da.groupby("x") def _testda(da: DataArray) -> None: # mypy checks the input type assert isinstance(da, DataArray) def _testds(ds: Dataset) -> None: # mypy checks the input type assert isinstance(ds, Dataset) _da = DataArray([5, 6], coords={"x": [1, 2]}, dims="x") _ds = _da.to_dataset(name="a") # __add__ as an example of binary ops _testda(grp + _da) _testds(grp + _ds) # __radd__ as an example of reflexive binary ops _testda(_da + grp) _testds(_ds + grp) # __eq__ as an example of cmp ops _testda(grp == _da) _testda(_da == grp) _testds(grp == _ds) _testds(_ds == grp) # __lt__ as another example of cmp ops _testda(grp < _da) _testda(_da > grp) _testds(grp < _ds) _testds(_ds > grp) def test_dataset_groupy_typed_ops() -> None: """Tests for type checking of typed_ops on DatasetGroupBy""" ds = Dataset({"a": ("t", [1, 2, 3])}, coords={"x": ("t", [1, 2, 2])}) grp = ds.groupby("x") def _test(ds: Dataset) -> None: # mypy checks the input type assert isinstance(ds, Dataset) _da = DataArray([5, 6], coords={"x": [1, 2]}, dims="x") _ds = _da.to_dataset(name="a") # __add__ as an example of binary ops _test(grp + _da) _test(grp + _ds) # __radd__ as an example of reflexive binary ops _test(_da + grp) _test(_ds + grp) # __eq__ as an example of cmp ops _test(grp == _da) _test(_da == grp) _test(grp == _ds) _test(_ds == grp) # __lt__ as another example of cmp ops _test(grp < _da) _test(_da > grp) _test(grp < _ds) _test(_ds > grp) xarray-2025.09.0/xarray/tests/test_ufuncs.py000066400000000000000000000211431505620616400207310ustar00rootroot00000000000000from __future__ import annotations import pickle from unittest.mock import patch import numpy as np import pytest import xarray as xr import xarray.ufuncs as xu from xarray.tests import assert_allclose, assert_array_equal, mock, requires_dask from xarray.tests import assert_identical as assert_identical_ def assert_identical(a, b): assert type(a) is type(b) or float(a) == float(b) if isinstance(a, xr.DataArray | xr.Dataset | xr.Variable): assert_identical_(a, b) else: assert_array_equal(a, b) @pytest.mark.parametrize( "a", [ xr.Variable(["x"], [0, 0]), xr.DataArray([0, 0], dims="x"), xr.Dataset({"y": ("x", [0, 0])}), ], ) def test_unary(a): assert_allclose(a + 1, np.cos(a)) def test_binary(): args = [ 0, np.zeros(2), xr.Variable(["x"], [0, 0]), xr.DataArray([0, 0], dims="x"), xr.Dataset({"y": ("x", [0, 0])}), ] for n, t1 in enumerate(args): for t2 in args[n:]: assert_identical(t2 + 1, np.maximum(t1, t2 + 1)) assert_identical(t2 + 1, np.maximum(t2, t1 + 1)) assert_identical(t2 + 1, np.maximum(t1 + 1, t2)) assert_identical(t2 + 1, np.maximum(t2 + 1, t1)) def test_binary_out(): args = [ 1, np.ones(2), xr.Variable(["x"], [1, 1]), xr.DataArray([1, 1], dims="x"), xr.Dataset({"y": ("x", [1, 1])}), ] for arg in args: actual_mantissa, actual_exponent = np.frexp(arg) assert_identical(actual_mantissa, 0.5 * arg) assert_identical(actual_exponent, arg) def test_binary_coord_attrs(): t = xr.Variable("t", np.arange(2, 4), attrs={"units": "s"}) x = xr.DataArray(t.values**2, coords={"t": t}, attrs={"units": "s^2"}) y = xr.DataArray(t.values**3, coords={"t": t}, attrs={"units": "s^3"}) z1 = xr.apply_ufunc(np.add, x, y, keep_attrs=True) assert z1.coords["t"].attrs == {"units": "s"} z2 = xr.apply_ufunc(np.add, x, y, keep_attrs=False) assert z2.coords["t"].attrs == {} # Check also that input array's coordinate attributes weren't affected assert t.attrs == {"units": "s"} assert x.coords["t"].attrs == {"units": "s"} def test_groupby(): ds = xr.Dataset({"a": ("x", [0, 0, 0])}, {"c": ("x", [0, 0, 1])}) ds_grouped = ds.groupby("c") group_mean = ds_grouped.mean("x") arr_grouped = ds["a"].groupby("c") assert_identical(ds, np.maximum(ds_grouped, group_mean)) assert_identical(ds, np.maximum(group_mean, ds_grouped)) assert_identical(ds, np.maximum(arr_grouped, group_mean)) assert_identical(ds, np.maximum(group_mean, arr_grouped)) assert_identical(ds, np.maximum(ds_grouped, group_mean["a"])) assert_identical(ds, np.maximum(group_mean["a"], ds_grouped)) assert_identical(ds.a, np.maximum(arr_grouped, group_mean.a)) assert_identical(ds.a, np.maximum(group_mean.a, arr_grouped)) with pytest.raises(ValueError, match=r"mismatched lengths for dimension"): np.maximum(ds.a.variable, ds_grouped) def test_alignment(): ds1 = xr.Dataset({"a": ("x", [1, 2])}, {"x": [0, 1]}) ds2 = xr.Dataset({"a": ("x", [2, 3]), "b": 4}, {"x": [1, 2]}) actual = np.add(ds1, ds2) expected = xr.Dataset({"a": ("x", [4])}, {"x": [1]}) assert_identical_(actual, expected) with xr.set_options(arithmetic_join="outer"): actual = np.add(ds1, ds2) expected = xr.Dataset( {"a": ("x", [np.nan, 4, np.nan]), "b": np.nan}, coords={"x": [0, 1, 2]} ) assert_identical_(actual, expected) def test_kwargs(): x = xr.DataArray(0) result = np.add(x, 1, dtype=np.float64) assert result.dtype == np.float64 def test_xarray_defers_to_unrecognized_type(): class Other: def __array_ufunc__(self, *args, **kwargs): return "other" xarray_obj = xr.DataArray([1, 2, 3]) other = Other() assert np.maximum(xarray_obj, other) == "other" assert np.sin(xarray_obj, out=other) == "other" def test_xarray_handles_dask(): da = pytest.importorskip("dask.array") x = xr.DataArray(np.ones((2, 2)), dims=["x", "y"]) y = da.ones((2, 2), chunks=(2, 2)) result = np.add(x, y) assert result.chunks == ((2,), (2,)) assert isinstance(result, xr.DataArray) def test_dask_defers_to_xarray(): da = pytest.importorskip("dask.array") x = xr.DataArray(np.ones((2, 2)), dims=["x", "y"]) y = da.ones((2, 2), chunks=(2, 2)) result = np.add(y, x) assert result.chunks == ((2,), (2,)) assert isinstance(result, xr.DataArray) def test_gufunc_methods(): xarray_obj = xr.DataArray([1, 2, 3]) with pytest.raises(NotImplementedError, match=r"reduce method"): np.add.reduce(xarray_obj, 1) def test_out(): xarray_obj = xr.DataArray([1, 2, 3]) # xarray out arguments should raise with pytest.raises(NotImplementedError, match=r"`out` argument"): np.add(xarray_obj, 1, out=xarray_obj) # but non-xarray should be OK other = np.zeros((3,)) np.add(other, xarray_obj, out=other) assert_identical(other, np.array([1, 2, 3])) def test_gufuncs(): xarray_obj = xr.DataArray([1, 2, 3]) fake_gufunc = mock.Mock(signature="(n)->()", autospec=np.sin) with pytest.raises(NotImplementedError, match=r"generalized ufuncs"): xarray_obj.__array_ufunc__(fake_gufunc, "__call__", xarray_obj) class DuckArray(np.ndarray): # Minimal subclassed duck array with its own self-contained namespace, # which implements a few ufuncs def __new__(cls, array): obj = np.asarray(array).view(cls) return obj def __array_namespace__(self): return DuckArray @staticmethod def sin(x): return np.sin(x) @staticmethod def add(x, y): return x + y class DuckArray2(DuckArray): def __array_namespace__(self): return DuckArray2 class TestXarrayUfuncs: @pytest.fixture(autouse=True) def setUp(self): self.x = xr.DataArray([1, 2, 3]) self.xd = xr.DataArray(DuckArray([1, 2, 3])) self.xd2 = xr.DataArray(DuckArray2([1, 2, 3])) self.xt = xr.DataArray(np.datetime64("2021-01-01", "ns")) @pytest.mark.filterwarnings("ignore::RuntimeWarning") @pytest.mark.parametrize("name", xu.__all__) def test_ufuncs(self, name, request): xu_func = getattr(xu, name) np_func = getattr(np, name, None) if np_func is None and np.lib.NumpyVersion(np.__version__) < "2.0.0": pytest.skip(f"Ufunc {name} is not available in numpy {np.__version__}.") if name == "isnat": args = (self.xt,) elif hasattr(np_func, "nin") and np_func.nin == 2: args = (self.x, self.x) else: args = (self.x,) expected = np_func(*args) actual = xu_func(*args) if name in ["angle", "iscomplex"]: np.testing.assert_equal(expected, actual.values) else: assert_identical(actual, expected) def test_ufunc_pickle(self): a = 1.0 cos_pickled = pickle.loads(pickle.dumps(xu.cos)) assert_identical(cos_pickled(a), xu.cos(a)) def test_ufunc_scalar(self): actual = xu.sin(1) assert isinstance(actual, float) def test_ufunc_duck_array_dataarray(self): actual = xu.sin(self.xd) assert isinstance(actual.data, DuckArray) def test_ufunc_duck_array_variable(self): actual = xu.sin(self.xd.variable) assert isinstance(actual.data, DuckArray) def test_ufunc_duck_array_dataset(self): ds = xr.Dataset({"a": self.xd}) actual = xu.sin(ds) assert isinstance(actual.a.data, DuckArray) @requires_dask def test_ufunc_duck_dask(self): import dask.array as da x = xr.DataArray(da.from_array(DuckArray(np.array([1, 2, 3])))) actual = xu.sin(x) assert isinstance(actual.data._meta, DuckArray) @requires_dask @pytest.mark.xfail(reason="dask ufuncs currently dispatch to numpy") def test_ufunc_duck_dask_no_array_ufunc(self): import dask.array as da # dask ufuncs currently only preserve duck arrays that implement __array_ufunc__ with patch.object(DuckArray, "__array_ufunc__", new=None, create=True): x = xr.DataArray(da.from_array(DuckArray(np.array([1, 2, 3])))) actual = xu.sin(x) assert isinstance(actual.data._meta, DuckArray) def test_ufunc_mixed_arrays_compatible(self): actual = xu.add(self.xd, self.x) assert isinstance(actual.data, DuckArray) def test_ufunc_mixed_arrays_incompatible(self): with pytest.raises(ValueError, match=r"Mixed array types"): xu.add(self.xd, self.xd2) xarray-2025.09.0/xarray/tests/test_units.py000066400000000000000000005641611505620616400206040ustar00rootroot00000000000000from __future__ import annotations import contextlib import functools import operator import numpy as np import pytest import xarray as xr from xarray.core import dtypes, duck_array_ops from xarray.tests import ( assert_allclose, assert_duckarray_allclose, assert_equal, assert_identical, requires_dask, requires_matplotlib, requires_numbagg, ) from xarray.tests.test_plot import PlotTestCase from xarray.tests.test_variable import _PAD_XR_NP_ARGS with contextlib.suppress(ImportError): import matplotlib.pyplot as plt pint = pytest.importorskip("pint") DimensionalityError = pint.errors.DimensionalityError def create_nan_array(values, dtype): """Create array with NaN values, handling cast warnings for int dtypes.""" import warnings # When casting float arrays with NaN to integer, NumPy raises a warning # This is expected behavior when dtype is int with warnings.catch_warnings(): if np.issubdtype(dtype, np.integer): warnings.filterwarnings("ignore", "invalid value encountered in cast") return np.array(values).astype(dtype) # make sure scalars are converted to 0d arrays so quantities can # always be treated like ndarrays unit_registry = pint.UnitRegistry(force_ndarray_like=True) Quantity = unit_registry.Quantity no_unit_values = ("none", None) pytestmark = [ pytest.mark.filterwarnings("error::pint.UnitStrippedWarning"), ] def is_compatible(unit1, unit2): def dimensionality(obj): if isinstance(obj, unit_registry.Quantity | unit_registry.Unit): unit_like = obj else: unit_like = unit_registry.dimensionless return unit_like.dimensionality return dimensionality(unit1) == dimensionality(unit2) def compatible_mappings(first, second): return { key: is_compatible(unit1, unit2) for key, (unit1, unit2) in zip_mappings(first, second) } def merge_mappings(base, *mappings): result = base.copy() for m in mappings: result.update(m) return result def zip_mappings(*mappings): for key in set(mappings[0]).intersection(*mappings[1:]): yield key, tuple(m[key] for m in mappings) def array_extract_units(obj): if isinstance(obj, xr.Variable | xr.DataArray | xr.Dataset): obj = obj.data try: return obj.units except AttributeError: return None def array_strip_units(array): try: return array.magnitude except AttributeError: return array def array_attach_units(data, unit): if isinstance(data, Quantity) and data.units != unit: raise ValueError(f"cannot attach unit {unit} to quantity {data}") if unit in no_unit_values or (isinstance(unit, int) and unit == 1): return data quantity = unit_registry.Quantity(data, unit) return quantity def extract_units(obj): if isinstance(obj, xr.Dataset): vars_units = { name: array_extract_units(value) for name, value in obj.data_vars.items() } coords_units = { name: array_extract_units(value) for name, value in obj.coords.items() } units = {**vars_units, **coords_units} elif isinstance(obj, xr.DataArray): vars_units = {obj.name: array_extract_units(obj)} coords_units = { name: array_extract_units(value) for name, value in obj.coords.items() } units = {**vars_units, **coords_units} elif isinstance(obj, xr.Variable): vars_units = {None: array_extract_units(obj.data)} units = {**vars_units} elif isinstance(obj, Quantity): vars_units = {None: array_extract_units(obj)} units = {**vars_units} else: units = {} return units def strip_units(obj): if isinstance(obj, xr.Dataset): data_vars = { strip_units(name): strip_units(value) for name, value in obj.data_vars.items() } coords = { strip_units(name): strip_units(value) for name, value in obj.coords.items() } new_obj = xr.Dataset(data_vars=data_vars, coords=coords) elif isinstance(obj, xr.DataArray): data = array_strip_units(obj.variable._data) coords = { strip_units(name): ( (value.dims, array_strip_units(value.variable._data)) if isinstance(value.data, Quantity) else value # to preserve multiindexes ) for name, value in obj.coords.items() } new_obj = xr.DataArray( name=strip_units(obj.name), data=data, coords=coords, dims=obj.dims ) elif isinstance(obj, xr.Variable): data = array_strip_units(obj.data) new_obj = obj.copy(data=data) elif isinstance(obj, unit_registry.Quantity): new_obj = obj.magnitude elif isinstance(obj, list | tuple): return type(obj)(strip_units(elem) for elem in obj) else: new_obj = obj return new_obj def attach_units(obj, units): if not isinstance(obj, xr.DataArray | xr.Dataset | xr.Variable): units = units.get("data", None) or units.get(None, None) or 1 return array_attach_units(obj, units) if isinstance(obj, xr.Dataset): data_vars = { name: attach_units(value, units) for name, value in obj.data_vars.items() } coords = { name: attach_units(value, units) for name, value in obj.coords.items() } new_obj = xr.Dataset(data_vars=data_vars, coords=coords, attrs=obj.attrs) elif isinstance(obj, xr.DataArray): # try the array name, "data" and None, then fall back to dimensionless data_units = units.get(obj.name, None) or units.get(None, None) or 1 data = array_attach_units(obj.data, data_units) coords = { name: ( (value.dims, array_attach_units(value.data, units.get(name) or 1)) if name in units else (value.dims, value.data) ) for name, value in obj.coords.items() } dims = obj.dims attrs = obj.attrs new_obj = xr.DataArray( name=obj.name, data=data, coords=coords, attrs=attrs, dims=dims ) else: data_units = units.get("data", None) or units.get(None, None) or 1 data = array_attach_units(obj.data, data_units) new_obj = obj.copy(data=data) return new_obj def convert_units(obj, to): # preprocess to = { key: None if not isinstance(value, unit_registry.Unit) else value for key, value in to.items() } if isinstance(obj, xr.Dataset): data_vars = { name: convert_units(array.variable, {None: to.get(name)}) for name, array in obj.data_vars.items() } coords = { name: convert_units(array.variable, {None: to.get(name)}) for name, array in obj.coords.items() } new_obj = xr.Dataset(data_vars=data_vars, coords=coords, attrs=obj.attrs) elif isinstance(obj, xr.DataArray): name = obj.name new_units = to.get(name) or to.get("data") or to.get(None) or None data = convert_units(obj.variable, {None: new_units}) coords = { name: (array.dims, convert_units(array.variable, {None: to.get(name)})) for name, array in obj.coords.items() if name != obj.name } new_obj = xr.DataArray( name=name, data=data, coords=coords, attrs=obj.attrs, dims=obj.dims ) elif isinstance(obj, xr.Variable): new_data = convert_units(obj.data, to) new_obj = obj.copy(data=new_data) elif isinstance(obj, unit_registry.Quantity): units = to.get(None) new_obj = obj.to(units) if units is not None else obj else: new_obj = obj return new_obj def assert_units_equal(a, b): __tracebackhide__ = True assert extract_units(a) == extract_units(b) @pytest.fixture(params=[np.dtype(float), np.dtype(int)], ids=str) def dtype(request): return request.param def merge_args(default_args, new_args): from itertools import zip_longest fill_value = object() return [ second if second is not fill_value else first for first, second in zip_longest(default_args, new_args, fillvalue=fill_value) ] class method: """wrapper class to help with passing methods via parametrize This is works a bit similar to using `partial(Class.method, arg, kwarg)` """ def __init__(self, name, *args, fallback_func=None, **kwargs): self.name = name self.fallback = fallback_func self.args = args self.kwargs = kwargs def __call__(self, obj, *args, **kwargs): from functools import partial all_args = merge_args(self.args, args) all_kwargs = {**self.kwargs, **kwargs} from xarray.core.groupby import GroupBy xarray_classes = ( xr.Variable, xr.DataArray, xr.Dataset, GroupBy, ) if not isinstance(obj, xarray_classes): # remove typical xarray args like "dim" exclude_kwargs = ("dim", "dims") # TODO: figure out a way to replace dim / dims with axis all_kwargs = { key: value for key, value in all_kwargs.items() if key not in exclude_kwargs } if self.fallback is not None: func = partial(self.fallback, obj) else: func = getattr(obj, self.name, None) if func is None or not callable(func): # fall back to module level numpy functions numpy_func = getattr(np, self.name) func = partial(numpy_func, obj) else: func = getattr(obj, self.name) return func(*all_args, **all_kwargs) def __repr__(self): return f"method_{self.name}" class function: """wrapper class for numpy functions Same as method, but the name is used for referencing numpy functions """ def __init__(self, name_or_function, *args, function_label=None, **kwargs): if callable(name_or_function): self.name = ( function_label if function_label is not None else name_or_function.__name__ ) self.func = name_or_function else: self.name = name_or_function if function_label is None else function_label self.func = getattr(np, name_or_function) if self.func is None: raise AttributeError( f"module 'numpy' has no attribute named '{self.name}'" ) self.args = args self.kwargs = kwargs def __call__(self, *args, **kwargs): all_args = merge_args(self.args, args) all_kwargs = {**self.kwargs, **kwargs} return self.func(*all_args, **all_kwargs) def __repr__(self): return f"function_{self.name}" @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) def test_apply_ufunc_dataarray(variant, dtype): variants = { "data": (unit_registry.m, 1, 1), "dims": (1, unit_registry.m, 1), "coords": (1, 1, unit_registry.m), } data_unit, dim_unit, coord_unit = variants.get(variant) func = functools.partial( xr.apply_ufunc, np.mean, input_core_dims=[["x"]], kwargs={"axis": -1} ) array = np.linspace(0, 10, 20).astype(dtype) * data_unit x = np.arange(20) * dim_unit u = np.linspace(-1, 1, 20) * coord_unit data_array = xr.DataArray(data=array, dims="x", coords={"x": x, "u": ("x", u)}) expected = attach_units(func(strip_units(data_array)), extract_units(data_array)) actual = func(data_array) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) def test_apply_ufunc_dataset(variant, dtype): variants = { "data": (unit_registry.m, 1, 1), "dims": (1, unit_registry.m, 1), "coords": (1, 1, unit_registry.s), } data_unit, dim_unit, coord_unit = variants.get(variant) func = functools.partial( xr.apply_ufunc, np.mean, input_core_dims=[["x"]], kwargs={"axis": -1} ) array1 = np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * data_unit array2 = np.linspace(0, 10, 5).astype(dtype) * data_unit x = np.arange(5) * dim_unit y = np.arange(10) * dim_unit u = np.linspace(-1, 1, 10) * coord_unit ds = xr.Dataset( data_vars={"a": (("x", "y"), array1), "b": ("x", array2)}, coords={"x": x, "y": y, "u": ("y", u)}, ) expected = attach_units(func(strip_units(ds)), extract_units(ds)) actual = func(ds) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.mm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ids=repr, ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.parametrize("value", (10, dtypes.NA)) def test_align_dataarray(value, variant, unit, error, dtype): if variant == "coords" and ( value != dtypes.NA or isinstance(unit, unit_registry.Unit) ): pytest.xfail( reason=( "fill_value is used for both data variables and coords. " "See https://github.com/pydata/xarray/issues/4165" ) ) fill_value = dtypes.get_fill_value(dtype) if value == dtypes.NA else value original_unit = unit_registry.m variants = { "data": ((original_unit, unit), (1, 1), (1, 1)), "dims": ((1, 1), (original_unit, unit), (1, 1)), "coords": ((1, 1), (1, 1), (original_unit, unit)), } ( (data_unit1, data_unit2), (dim_unit1, dim_unit2), (coord_unit1, coord_unit2), ) = variants.get(variant) array1 = np.linspace(0, 10, 2 * 5).reshape(2, 5).astype(dtype) * data_unit1 array2 = np.linspace(0, 8, 2 * 5).reshape(2, 5).astype(dtype) * data_unit2 x = np.arange(2) * dim_unit1 y1 = np.arange(5) * dim_unit1 y2 = np.arange(2, 7) * dim_unit2 u1 = np.array([3, 5, 7, 8, 9]) * coord_unit1 u2 = np.array([7, 8, 9, 11, 13]) * coord_unit2 coords1 = {"x": x, "y": y1} coords2 = {"x": x, "y": y2} if variant == "coords": coords1["y_a"] = ("y", u1) coords2["y_a"] = ("y", u2) data_array1 = xr.DataArray(data=array1, coords=coords1, dims=("x", "y")) data_array2 = xr.DataArray(data=array2, coords=coords2, dims=("x", "y")) fill_value = fill_value * data_unit2 func = function(xr.align, join="outer", fill_value=fill_value) if error is not None and (value != dtypes.NA or isinstance(fill_value, Quantity)): with pytest.raises(error): func(data_array1, data_array2) return stripped_kwargs = { key: strip_units( convert_units(value, {None: data_unit1 if data_unit2 != 1 else None}) ) for key, value in func.kwargs.items() } units_a = extract_units(data_array1) units_b = extract_units(data_array2) expected_a, expected_b = func( strip_units(data_array1), strip_units(convert_units(data_array2, units_a)), **stripped_kwargs, ) expected_a = attach_units(expected_a, units_a) if isinstance(array2, Quantity): expected_b = convert_units(attach_units(expected_b, units_a), units_b) else: expected_b = attach_units(expected_b, units_b) actual_a, actual_b = func(data_array1, data_array2) assert_units_equal(expected_a, actual_a) assert_allclose(expected_a, actual_a) assert_units_equal(expected_b, actual_b) assert_allclose(expected_b, actual_b) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.mm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ids=repr, ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.parametrize("value", (10, dtypes.NA)) def test_align_dataset(value, unit, variant, error, dtype): if variant == "coords" and ( value != dtypes.NA or isinstance(unit, unit_registry.Unit) ): pytest.xfail( reason=( "fill_value is used for both data variables and coords. " "See https://github.com/pydata/xarray/issues/4165" ) ) fill_value = dtypes.get_fill_value(dtype) if value == dtypes.NA else value original_unit = unit_registry.m variants = { "data": ((original_unit, unit), (1, 1), (1, 1)), "dims": ((1, 1), (original_unit, unit), (1, 1)), "coords": ((1, 1), (1, 1), (original_unit, unit)), } ( (data_unit1, data_unit2), (dim_unit1, dim_unit2), (coord_unit1, coord_unit2), ) = variants.get(variant) array1 = np.linspace(0, 10, 2 * 5).reshape(2, 5).astype(dtype) * data_unit1 array2 = np.linspace(0, 10, 2 * 5).reshape(2, 5).astype(dtype) * data_unit2 x = np.arange(2) * dim_unit1 y1 = np.arange(5) * dim_unit1 y2 = np.arange(2, 7) * dim_unit2 u1 = np.array([3, 5, 7, 8, 9]) * coord_unit1 u2 = np.array([7, 8, 9, 11, 13]) * coord_unit2 coords1 = {"x": x, "y": y1} coords2 = {"x": x, "y": y2} if variant == "coords": coords1["u"] = ("y", u1) coords2["u"] = ("y", u2) ds1 = xr.Dataset(data_vars={"a": (("x", "y"), array1)}, coords=coords1) ds2 = xr.Dataset(data_vars={"a": (("x", "y"), array2)}, coords=coords2) fill_value = fill_value * data_unit2 func = function(xr.align, join="outer", fill_value=fill_value) if error is not None and (value != dtypes.NA or isinstance(fill_value, Quantity)): with pytest.raises(error): func(ds1, ds2) return stripped_kwargs = { key: strip_units( convert_units(value, {None: data_unit1 if data_unit2 != 1 else None}) ) for key, value in func.kwargs.items() } units_a = extract_units(ds1) units_b = extract_units(ds2) expected_a, expected_b = func( strip_units(ds1), strip_units(convert_units(ds2, units_a)), **stripped_kwargs, ) expected_a = attach_units(expected_a, units_a) if isinstance(array2, Quantity): expected_b = convert_units(attach_units(expected_b, units_a), units_b) else: expected_b = attach_units(expected_b, units_b) actual_a, actual_b = func(ds1, ds2) assert_units_equal(expected_a, actual_a) assert_allclose(expected_a, actual_a) assert_units_equal(expected_b, actual_b) assert_allclose(expected_b, actual_b) def test_broadcast_dataarray(dtype): # uses align internally so more thorough tests are not needed array1 = np.linspace(0, 10, 2) * unit_registry.Pa array2 = np.linspace(0, 10, 3) * unit_registry.Pa a = xr.DataArray(data=array1, dims="x") b = xr.DataArray(data=array2, dims="y") units_a = extract_units(a) units_b = extract_units(b) expected_a, expected_b = xr.broadcast(strip_units(a), strip_units(b)) expected_a = attach_units(expected_a, units_a) expected_b = convert_units(attach_units(expected_b, units_a), units_b) actual_a, actual_b = xr.broadcast(a, b) assert_units_equal(expected_a, actual_a) assert_identical(expected_a, actual_a) assert_units_equal(expected_b, actual_b) assert_identical(expected_b, actual_b) def test_broadcast_dataset(dtype): # uses align internally so more thorough tests are not needed array1 = np.linspace(0, 10, 2) * unit_registry.Pa array2 = np.linspace(0, 10, 3) * unit_registry.Pa x1 = np.arange(2) y1 = np.arange(3) x2 = np.arange(2, 4) y2 = np.arange(3, 6) ds = xr.Dataset( data_vars={"a": ("x", array1), "b": ("y", array2)}, coords={"x": x1, "y": y1} ) other = xr.Dataset( data_vars={ "a": ("x", array1.to(unit_registry.hPa)), "b": ("y", array2.to(unit_registry.hPa)), }, coords={"x": x2, "y": y2}, ) units_a = extract_units(ds) units_b = extract_units(other) expected_a, expected_b = xr.broadcast(strip_units(ds), strip_units(other)) expected_a = attach_units(expected_a, units_a) expected_b = attach_units(expected_b, units_b) actual_a, actual_b = xr.broadcast(ds, other) assert_units_equal(expected_a, actual_a) assert_identical(expected_a, actual_a) assert_units_equal(expected_b, actual_b) assert_identical(expected_b, actual_b) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.mm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ids=repr, ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.filterwarnings( "ignore:.*the default value for coords will change:FutureWarning" ) def test_combine_by_coords(variant, unit, error, dtype): original_unit = unit_registry.m variants = { "data": ((original_unit, unit), (1, 1), (1, 1)), "dims": ((1, 1), (original_unit, unit), (1, 1)), "coords": ((1, 1), (1, 1), (original_unit, unit)), } ( (data_unit1, data_unit2), (dim_unit1, dim_unit2), (coord_unit1, coord_unit2), ) = variants.get(variant) array1 = np.zeros(shape=(2, 3), dtype=dtype) * data_unit1 array2 = np.zeros(shape=(2, 3), dtype=dtype) * data_unit1 x = np.arange(1, 4) * 10 * dim_unit1 y = np.arange(2) * dim_unit1 u = np.arange(3) * coord_unit1 other_array1 = np.ones_like(array1) * data_unit2 other_array2 = np.ones_like(array2) * data_unit2 other_x = np.arange(1, 4) * 10 * dim_unit2 other_y = np.arange(2, 4) * dim_unit2 other_u = np.arange(3, 6) * coord_unit2 ds = xr.Dataset( data_vars={"a": (("y", "x"), array1), "b": (("y", "x"), array2)}, coords={"x": x, "y": y, "u": ("x", u)}, ) other = xr.Dataset( data_vars={"a": (("y", "x"), other_array1), "b": (("y", "x"), other_array2)}, coords={"x": other_x, "y": other_y, "u": ("x", other_u)}, ) if error is not None: with pytest.raises(error): xr.combine_by_coords([ds, other]) return units = extract_units(ds) expected = attach_units( xr.combine_by_coords( [strip_units(ds), strip_units(convert_units(other, units))] ), units, ) actual = xr.combine_by_coords([ds, other]) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.mm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ids=repr, ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.filterwarnings( "ignore:.*the default value for join will change:FutureWarning" ) @pytest.mark.filterwarnings( "ignore:.*the default value for compat will change:FutureWarning" ) def test_combine_nested(variant, unit, error, dtype): original_unit = unit_registry.m variants = { "data": ((original_unit, unit), (1, 1), (1, 1)), "dims": ((1, 1), (original_unit, unit), (1, 1)), "coords": ((1, 1), (1, 1), (original_unit, unit)), } ( (data_unit1, data_unit2), (dim_unit1, dim_unit2), (coord_unit1, coord_unit2), ) = variants.get(variant) array1 = np.zeros(shape=(2, 3), dtype=dtype) * data_unit1 array2 = np.zeros(shape=(2, 3), dtype=dtype) * data_unit1 x = np.arange(1, 4) * 10 * dim_unit1 y = np.arange(2) * dim_unit1 z = np.arange(3) * coord_unit1 ds1 = xr.Dataset( data_vars={"a": (("y", "x"), array1), "b": (("y", "x"), array2)}, coords={"x": x, "y": y, "z": ("x", z)}, ) ds2 = xr.Dataset( data_vars={ "a": (("y", "x"), np.ones_like(array1) * data_unit2), "b": (("y", "x"), np.ones_like(array2) * data_unit2), }, coords={ "x": np.arange(3) * dim_unit2, "y": np.arange(2, 4) * dim_unit2, "z": ("x", np.arange(-3, 0) * coord_unit2), }, ) ds3 = xr.Dataset( data_vars={ "a": (("y", "x"), np.full_like(array1, fill_value=np.nan) * data_unit2), "b": (("y", "x"), np.full_like(array2, fill_value=np.nan) * data_unit2), }, coords={ "x": np.arange(3, 6) * dim_unit2, "y": np.arange(4, 6) * dim_unit2, "z": ("x", np.arange(3, 6) * coord_unit2), }, ) ds4 = xr.Dataset( data_vars={ "a": (("y", "x"), -1 * np.ones_like(array1) * data_unit2), "b": (("y", "x"), -1 * np.ones_like(array2) * data_unit2), }, coords={ "x": np.arange(6, 9) * dim_unit2, "y": np.arange(6, 8) * dim_unit2, "z": ("x", np.arange(6, 9) * coord_unit2), }, ) func = function(xr.combine_nested, concat_dim=["x", "y"]) if error is not None: with pytest.raises(error): func([[ds1, ds2], [ds3, ds4]]) return units = extract_units(ds1) convert_and_strip = lambda ds: strip_units(convert_units(ds, units)) expected = attach_units( func( [ [strip_units(ds1), convert_and_strip(ds2)], [convert_and_strip(ds3), convert_and_strip(ds4)], ] ), units, ) actual = func([[ds1, ds2], [ds3, ds4]]) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.mm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ids=repr, ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) def test_concat_dataarray(variant, unit, error, dtype): original_unit = unit_registry.m variants = { "data": ((original_unit, unit), (1, 1), (1, 1)), "dims": ((1, 1), (original_unit, unit), (1, 1)), "coords": ((1, 1), (1, 1), (original_unit, unit)), } ( (data_unit1, data_unit2), (dim_unit1, dim_unit2), (coord_unit1, coord_unit2), ) = variants.get(variant) array1 = np.linspace(0, 5, 10).astype(dtype) * data_unit1 array2 = np.linspace(-5, 0, 5).astype(dtype) * data_unit2 x1 = np.arange(5, 15) * dim_unit1 x2 = np.arange(5) * dim_unit2 u1 = np.linspace(1, 2, 10).astype(dtype) * coord_unit1 u2 = np.linspace(0, 1, 5).astype(dtype) * coord_unit2 arr1 = xr.DataArray(data=array1, coords={"x": x1, "u": ("x", u1)}, dims="x") arr2 = xr.DataArray(data=array2, coords={"x": x2, "u": ("x", u2)}, dims="x") if error is not None: with pytest.raises(error): xr.concat([arr1, arr2], dim="x") return units = extract_units(arr1) expected = attach_units( xr.concat( [strip_units(arr1), strip_units(convert_units(arr2, units))], dim="x" ), units, ) actual = xr.concat([arr1, arr2], dim="x") assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.mm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ids=repr, ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) def test_concat_dataset(variant, unit, error, dtype): original_unit = unit_registry.m variants = { "data": ((original_unit, unit), (1, 1), (1, 1)), "dims": ((1, 1), (original_unit, unit), (1, 1)), "coords": ((1, 1), (1, 1), (original_unit, unit)), } ( (data_unit1, data_unit2), (dim_unit1, dim_unit2), (coord_unit1, coord_unit2), ) = variants.get(variant) array1 = np.linspace(0, 5, 10).astype(dtype) * data_unit1 array2 = np.linspace(-5, 0, 5).astype(dtype) * data_unit2 x1 = np.arange(5, 15) * dim_unit1 x2 = np.arange(5) * dim_unit2 u1 = np.linspace(1, 2, 10).astype(dtype) * coord_unit1 u2 = np.linspace(0, 1, 5).astype(dtype) * coord_unit2 ds1 = xr.Dataset(data_vars={"a": ("x", array1)}, coords={"x": x1, "u": ("x", u1)}) ds2 = xr.Dataset(data_vars={"a": ("x", array2)}, coords={"x": x2, "u": ("x", u2)}) if error is not None: with pytest.raises(error): xr.concat([ds1, ds2], dim="x") return units = extract_units(ds1) expected = attach_units( xr.concat([strip_units(ds1), strip_units(convert_units(ds2, units))], dim="x"), units, ) actual = xr.concat([ds1, ds2], dim="x") assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.mm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ids=repr, ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.filterwarnings( "ignore:.*the default value for join will change:FutureWarning" ) @pytest.mark.filterwarnings( "ignore:.*the default value for compat will change:FutureWarning" ) def test_merge_dataarray(variant, unit, error, dtype): original_unit = unit_registry.m variants = { "data": ((original_unit, unit), (1, 1), (1, 1)), "dims": ((1, 1), (original_unit, unit), (1, 1)), "coords": ((1, 1), (1, 1), (original_unit, unit)), } ( (data_unit1, data_unit2), (dim_unit1, dim_unit2), (coord_unit1, coord_unit2), ) = variants.get(variant) array1 = np.linspace(0, 1, 2 * 3).reshape(2, 3).astype(dtype) * data_unit1 x1 = np.arange(2) * dim_unit1 y1 = np.arange(3) * dim_unit1 u1 = np.linspace(10, 20, 2) * coord_unit1 v1 = np.linspace(10, 20, 3) * coord_unit1 array2 = np.linspace(1, 2, 2 * 4).reshape(2, 4).astype(dtype) * data_unit2 x2 = np.arange(2, 4) * dim_unit2 z2 = np.arange(4) * dim_unit1 u2 = np.linspace(20, 30, 2) * coord_unit2 w2 = np.linspace(10, 20, 4) * coord_unit1 array3 = np.linspace(0, 2, 3 * 4).reshape(3, 4).astype(dtype) * data_unit2 y3 = np.arange(3, 6) * dim_unit2 z3 = np.arange(4, 8) * dim_unit2 v3 = np.linspace(10, 20, 3) * coord_unit2 w3 = np.linspace(10, 20, 4) * coord_unit2 arr1 = xr.DataArray( name="a", data=array1, coords={"x": x1, "y": y1, "u": ("x", u1), "v": ("y", v1)}, dims=("x", "y"), ) arr2 = xr.DataArray( name="a", data=array2, coords={"x": x2, "z": z2, "u": ("x", u2), "w": ("z", w2)}, dims=("x", "z"), ) arr3 = xr.DataArray( name="a", data=array3, coords={"y": y3, "z": z3, "v": ("y", v3), "w": ("z", w3)}, dims=("y", "z"), ) if error is not None: with pytest.raises(error): xr.merge([arr1, arr2, arr3]) return units = { "a": data_unit1, "u": coord_unit1, "v": coord_unit1, "w": coord_unit1, "x": dim_unit1, "y": dim_unit1, "z": dim_unit1, } convert_and_strip = lambda arr: strip_units(convert_units(arr, units)) expected = attach_units( xr.merge( [convert_and_strip(arr1), convert_and_strip(arr2), convert_and_strip(arr3)] ), units, ) actual = xr.merge([arr1, arr2, arr3]) assert_units_equal(expected, actual) assert_allclose(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.mm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ids=repr, ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.filterwarnings( "ignore:.*the default value for join will change:FutureWarning" ) @pytest.mark.filterwarnings( "ignore:.*the default value for compat will change:FutureWarning" ) def test_merge_dataset(variant, unit, error, dtype): original_unit = unit_registry.m variants = { "data": ((original_unit, unit), (1, 1), (1, 1)), "dims": ((1, 1), (original_unit, unit), (1, 1)), "coords": ((1, 1), (1, 1), (original_unit, unit)), } ( (data_unit1, data_unit2), (dim_unit1, dim_unit2), (coord_unit1, coord_unit2), ) = variants.get(variant) array1 = np.zeros(shape=(2, 3), dtype=dtype) * data_unit1 array2 = np.zeros(shape=(2, 3), dtype=dtype) * data_unit1 x = np.arange(11, 14) * dim_unit1 y = np.arange(2) * dim_unit1 u = np.arange(3) * coord_unit1 ds1 = xr.Dataset( data_vars={"a": (("y", "x"), array1), "b": (("y", "x"), array2)}, coords={"x": x, "y": y, "u": ("x", u)}, ) ds2 = xr.Dataset( data_vars={ "a": (("y", "x"), np.ones_like(array1) * data_unit2), "b": (("y", "x"), np.ones_like(array2) * data_unit2), }, coords={ "x": np.arange(3) * dim_unit2, "y": np.arange(2, 4) * dim_unit2, "u": ("x", np.arange(-3, 0) * coord_unit2), }, ) ds3 = xr.Dataset( data_vars={ "a": (("y", "x"), np.full_like(array1, np.nan) * data_unit2), "b": (("y", "x"), np.full_like(array2, np.nan) * data_unit2), }, coords={ "x": np.arange(3, 6) * dim_unit2, "y": np.arange(4, 6) * dim_unit2, "u": ("x", np.arange(3, 6) * coord_unit2), }, ) func = function(xr.merge) if error is not None: with pytest.raises(error): func([ds1, ds2, ds3]) return units = extract_units(ds1) convert_and_strip = lambda ds: strip_units(convert_units(ds, units)) expected = attach_units( func([convert_and_strip(ds1), convert_and_strip(ds2), convert_and_strip(ds3)]), units, ) actual = func([ds1, ds2, ds3]) assert_units_equal(expected, actual) assert_allclose(expected, actual) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.parametrize("func", (xr.zeros_like, xr.ones_like)) def test_replication_dataarray(func, variant, dtype): unit = unit_registry.m variants = { "data": (unit, 1, 1), "dims": (1, unit, 1), "coords": (1, 1, unit), } data_unit, dim_unit, coord_unit = variants.get(variant) array = np.linspace(0, 10, 20).astype(dtype) * data_unit x = np.arange(20) * dim_unit u = np.linspace(0, 1, 20) * coord_unit data_array = xr.DataArray(data=array, dims="x", coords={"x": x, "u": ("x", u)}) units = extract_units(data_array) units.pop(data_array.name) expected = attach_units(func(strip_units(data_array)), units) actual = func(data_array) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.parametrize("func", (xr.zeros_like, xr.ones_like)) def test_replication_dataset(func, variant, dtype): unit = unit_registry.m variants = { "data": ((unit_registry.m, unit_registry.Pa), 1, 1), "dims": ((1, 1), unit, 1), "coords": ((1, 1), 1, unit), } (data_unit1, data_unit2), dim_unit, coord_unit = variants.get(variant) array1 = np.linspace(0, 10, 20).astype(dtype) * data_unit1 array2 = np.linspace(5, 10, 10).astype(dtype) * data_unit2 x = np.arange(20).astype(dtype) * dim_unit y = np.arange(10).astype(dtype) * dim_unit u = np.linspace(0, 1, 10) * coord_unit ds = xr.Dataset( data_vars={"a": ("x", array1), "b": ("y", array2)}, coords={"x": x, "y": y, "u": ("y", u)}, ) units = { name: unit for name, unit in extract_units(ds).items() if name not in ds.data_vars } expected = attach_units(func(strip_units(ds)), units) actual = func(ds) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), pytest.param( "coords", marks=pytest.mark.xfail(reason="can't copy quantity into non-quantity"), ), ), ) def test_replication_full_like_dataarray(variant, dtype): # since full_like will strip units and then use the units of the # fill value, we don't need to try multiple units unit = unit_registry.m variants = { "data": (unit, 1, 1), "dims": (1, unit, 1), "coords": (1, 1, unit), } data_unit, dim_unit, coord_unit = variants.get(variant) array = np.linspace(0, 5, 10) * data_unit x = np.arange(10) * dim_unit u = np.linspace(0, 1, 10) * coord_unit data_array = xr.DataArray(data=array, dims="x", coords={"x": x, "u": ("x", u)}) fill_value = -1 * unit_registry.degK units = extract_units(data_array) units[data_array.name] = fill_value.units expected = attach_units( xr.full_like(strip_units(data_array), fill_value=strip_units(fill_value)), units ) actual = xr.full_like(data_array, fill_value=fill_value) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), pytest.param( "coords", marks=pytest.mark.xfail(reason="can't copy quantity into non-quantity"), ), ), ) def test_replication_full_like_dataset(variant, dtype): unit = unit_registry.m variants = { "data": ((unit_registry.s, unit_registry.Pa), 1, 1), "dims": ((1, 1), unit, 1), "coords": ((1, 1), 1, unit), } (data_unit1, data_unit2), dim_unit, coord_unit = variants.get(variant) array1 = np.linspace(0, 10, 20).astype(dtype) * data_unit1 array2 = np.linspace(5, 10, 10).astype(dtype) * data_unit2 x = np.arange(20).astype(dtype) * dim_unit y = np.arange(10).astype(dtype) * dim_unit u = np.linspace(0, 1, 10) * coord_unit ds = xr.Dataset( data_vars={"a": ("x", array1), "b": ("y", array2)}, coords={"x": x, "y": y, "u": ("y", u)}, ) fill_value = -1 * unit_registry.degK units = { **extract_units(ds), **dict.fromkeys(ds.data_vars, unit_registry.degK), } expected = attach_units( xr.full_like(strip_units(ds), fill_value=strip_units(fill_value)), units ) actual = xr.full_like(ds, fill_value=fill_value) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.mm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ids=repr, ) @pytest.mark.parametrize("fill_value", (np.nan, 10.2)) def test_where_dataarray(fill_value, unit, error, dtype): array = np.linspace(0, 5, 10).astype(dtype) * unit_registry.m x = xr.DataArray(data=array, dims="x") cond = x < 5 * unit_registry.m fill_value = fill_value * unit if error is not None and not ( np.isnan(fill_value) and not isinstance(fill_value, Quantity) ): with pytest.raises(error): xr.where(cond, x, fill_value) return expected = attach_units( xr.where( cond, strip_units(x), strip_units(convert_units(fill_value, {None: unit_registry.m})), ), extract_units(x), ) actual = xr.where(cond, x, fill_value) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.mm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ids=repr, ) @pytest.mark.parametrize("fill_value", (np.nan, 10.2)) def test_where_dataset(fill_value, unit, error, dtype): array1 = np.linspace(0, 5, 10).astype(dtype) * unit_registry.m array2 = np.linspace(-5, 0, 10).astype(dtype) * unit_registry.m ds = xr.Dataset(data_vars={"a": ("x", array1), "b": ("x", array2)}) cond = array1 < 2 * unit_registry.m fill_value = fill_value * unit if error is not None and not ( np.isnan(fill_value) and not isinstance(fill_value, Quantity) ): with pytest.raises(error): xr.where(cond, ds, fill_value) return expected = attach_units( xr.where( cond, strip_units(ds), strip_units(convert_units(fill_value, {None: unit_registry.m})), ), extract_units(ds), ) actual = xr.where(cond, ds, fill_value) assert_units_equal(expected, actual) assert_identical(expected, actual) def test_dot_dataarray(dtype): array1 = ( np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * unit_registry.m / unit_registry.s ) array2 = ( np.linspace(10, 20, 10 * 20).reshape(10, 20).astype(dtype) * unit_registry.s ) data_array = xr.DataArray(data=array1, dims=("x", "y")) other = xr.DataArray(data=array2, dims=("y", "z")) with xr.set_options(use_opt_einsum=False): expected = attach_units( xr.dot(strip_units(data_array), strip_units(other)), {None: unit_registry.m} ) actual = xr.dot(data_array, other) assert_units_equal(expected, actual) assert_identical(expected, actual) class TestVariable: @pytest.mark.parametrize( "func", ( method("all"), method("any"), method("argmax", dim="x"), method("argmin", dim="x"), method("argsort"), method("cumprod"), method("cumsum"), method("max"), method("mean"), method("median"), method("min"), method("prod"), method("std"), method("sum"), method("var"), ), ids=repr, ) def test_aggregation(self, func, dtype): array = np.linspace(0, 1, 10).astype(dtype) * ( unit_registry.m if func.name != "cumprod" else unit_registry.dimensionless ) variable = xr.Variable("x", array) numpy_kwargs = func.kwargs.copy() if "dim" in func.kwargs: numpy_kwargs["axis"] = variable.get_axis_num(numpy_kwargs.pop("dim")) units = extract_units(func(array, **numpy_kwargs)) expected = attach_units(func(strip_units(variable)), units) actual = func(variable) assert_units_equal(expected, actual) assert_allclose(expected, actual) def test_aggregate_complex(self): variable = xr.Variable("x", [1, 2j, np.nan] * unit_registry.m) expected = xr.Variable((), (0.5 + 1j) * unit_registry.m) actual = variable.mean() assert_units_equal(expected, actual) assert_allclose(expected, actual) @pytest.mark.parametrize( "func", ( method("astype", np.float32), method("conj"), method("conjugate"), method("clip", min=2, max=7), ), ids=repr, ) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_numpy_methods(self, func, unit, error, dtype): array = np.linspace(0, 1, 10).astype(dtype) * unit_registry.m variable = xr.Variable("x", array) args = [ item * unit if isinstance(item, int | float | list) else item for item in func.args ] kwargs = { key: value * unit if isinstance(value, int | float | list) else value for key, value in func.kwargs.items() } if error is not None and func.name in ("searchsorted", "clip"): with pytest.raises(error): func(variable, *args, **kwargs) return converted_args = [ strip_units(convert_units(item, {None: unit_registry.m})) for item in args ] converted_kwargs = { key: strip_units(convert_units(value, {None: unit_registry.m})) for key, value in kwargs.items() } units = extract_units(func(array, *args, **kwargs)) expected = attach_units( func(strip_units(variable), *converted_args, **converted_kwargs), units ) actual = func(variable, *args, **kwargs) assert_units_equal(expected, actual) assert_allclose(expected, actual) @pytest.mark.parametrize( "func", (method("item", 5), method("searchsorted", 5)), ids=repr ) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_raw_numpy_methods(self, func, unit, error, dtype): array = np.linspace(0, 1, 10).astype(dtype) * unit_registry.m variable = xr.Variable("x", array) args = [ ( item * unit if isinstance(item, int | float | list) and func.name != "item" else item ) for item in func.args ] kwargs = { key: ( value * unit if isinstance(value, int | float | list) and func.name != "item" else value ) for key, value in func.kwargs.items() } if error is not None and func.name != "item": with pytest.raises(error): func(variable, *args, **kwargs) return converted_args = [ ( strip_units(convert_units(item, {None: unit_registry.m})) if func.name != "item" else item ) for item in args ] converted_kwargs = { key: ( strip_units(convert_units(value, {None: unit_registry.m})) if func.name != "item" else value ) for key, value in kwargs.items() } units = extract_units(func(array, *args, **kwargs)) expected = attach_units( func(strip_units(variable), *converted_args, **converted_kwargs), units ) actual = func(variable, *args, **kwargs) assert_units_equal(expected, actual) assert_duckarray_allclose(expected, actual) @pytest.mark.parametrize( "func", (method("isnull"), method("notnull"), method("count")), ids=repr ) def test_missing_value_detection(self, func): array = ( np.array( [ [1.4, 2.3, np.nan, 7.2], [np.nan, 9.7, np.nan, np.nan], [2.1, np.nan, np.nan, 4.6], [9.9, np.nan, 7.2, 9.1], ] ) * unit_registry.degK ) variable = xr.Variable(("x", "y"), array) expected = func(strip_units(variable)) actual = func(variable) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_missing_value_fillna(self, unit, error): value = 10 array = ( np.array( [ [1.4, 2.3, np.nan, 7.2], [np.nan, 9.7, np.nan, np.nan], [2.1, np.nan, np.nan, 4.6], [9.9, np.nan, 7.2, 9.1], ] ) * unit_registry.m ) variable = xr.Variable(("x", "y"), array) fill_value = value * unit if error is not None: with pytest.raises(error): variable.fillna(value=fill_value) return expected = attach_units( strip_units(variable).fillna( value=fill_value.to(unit_registry.m).magnitude ), extract_units(variable), ) actual = variable.fillna(value=fill_value) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit", ( pytest.param(1, id="no_unit"), pytest.param(unit_registry.dimensionless, id="dimensionless"), pytest.param(unit_registry.s, id="incompatible_unit"), pytest.param( unit_registry.cm, id="compatible_unit", ), pytest.param(unit_registry.m, id="identical_unit"), ), ) @pytest.mark.parametrize( "convert_data", ( pytest.param(False, id="no_conversion"), pytest.param(True, id="with_conversion"), ), ) @pytest.mark.parametrize( "func", ( method("equals"), pytest.param( method("identical"), marks=pytest.mark.skip(reason="behavior of identical is undecided"), ), ), ids=repr, ) def test_comparisons(self, func, unit, convert_data, dtype): array = np.linspace(0, 1, 9).astype(dtype) quantity1 = array * unit_registry.m variable = xr.Variable("x", quantity1) if convert_data and is_compatible(unit_registry.m, unit): quantity2 = convert_units(array * unit_registry.m, {None: unit}) else: quantity2 = array * unit other = xr.Variable("x", quantity2) expected = func( strip_units(variable), strip_units( convert_units(other, extract_units(variable)) if is_compatible(unit_registry.m, unit) else other ), ) if func.name == "identical": expected &= extract_units(variable) == extract_units(other) else: expected &= all( compatible_mappings( extract_units(variable), extract_units(other) ).values() ) actual = func(variable, other) assert expected == actual @pytest.mark.parametrize( "unit", ( pytest.param(1, id="no_unit"), pytest.param(unit_registry.dimensionless, id="dimensionless"), pytest.param(unit_registry.s, id="incompatible_unit"), pytest.param(unit_registry.cm, id="compatible_unit"), pytest.param(unit_registry.m, id="identical_unit"), ), ) def test_broadcast_equals(self, unit, dtype): base_unit = unit_registry.m left_array = np.ones(shape=(2, 2), dtype=dtype) * base_unit value = ( (1 * base_unit).to(unit).magnitude if is_compatible(unit, base_unit) else 1 ) right_array = np.full(shape=(2,), fill_value=value, dtype=dtype) * unit left = xr.Variable(("x", "y"), left_array) right = xr.Variable("x", right_array) units = { **extract_units(left), **({} if is_compatible(unit, base_unit) else {None: None}), } expected = strip_units(left).broadcast_equals( strip_units(convert_units(right, units)) ) & is_compatible(unit, base_unit) actual = left.broadcast_equals(right) assert expected == actual @pytest.mark.parametrize("dask", [False, pytest.param(True, marks=[requires_dask])]) @pytest.mark.parametrize( ["variable", "indexers"], ( pytest.param( xr.Variable("x", np.linspace(0, 5, 10)), {"x": 4}, id="single value-single indexer", ), pytest.param( xr.Variable("x", np.linspace(0, 5, 10)), {"x": [5, 2, 9, 1]}, id="multiple values-single indexer", ), pytest.param( xr.Variable(("x", "y"), np.linspace(0, 5, 20).reshape(4, 5)), {"x": 1, "y": 4}, id="single value-multiple indexers", ), pytest.param( xr.Variable(("x", "y"), np.linspace(0, 5, 20).reshape(4, 5)), {"x": [0, 1, 2], "y": [0, 2, 4]}, id="multiple values-multiple indexers", ), ), ) def test_isel(self, variable, indexers, dask, dtype): if dask: variable = variable.chunk(dict.fromkeys(variable.dims, 2)) quantified = xr.Variable( variable.dims, variable.data.astype(dtype) * unit_registry.s ) expected = attach_units( strip_units(quantified).isel(indexers), extract_units(quantified) ) actual = quantified.isel(indexers) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) @pytest.mark.parametrize( "func", ( function(lambda x, *_: +x, function_label="unary_plus"), function(lambda x, *_: -x, function_label="unary_minus"), function(lambda x, *_: abs(x), function_label="absolute"), function(lambda x, y: x + y, function_label="sum"), function(lambda x, y: y + x, function_label="commutative_sum"), function(lambda x, y: x * y, function_label="product"), function(lambda x, y: y * x, function_label="commutative_product"), ), ids=repr, ) def test_1d_math(self, func, unit, error, dtype): base_unit = unit_registry.m array = np.arange(5).astype(dtype) * base_unit variable = xr.Variable("x", array) values = np.ones(5) y = values * unit if error is not None and func.name in ("sum", "commutative_sum"): with pytest.raises(error): func(variable, y) return units = extract_units(func(array, y)) if all(compatible_mappings(units, extract_units(y)).values()): converted_y = convert_units(y, units) else: converted_y = y if all(compatible_mappings(units, extract_units(variable)).values()): converted_variable = convert_units(variable, units) else: converted_variable = variable expected = attach_units( func(strip_units(converted_variable), strip_units(converted_y)), units ) actual = func(variable, y) assert_units_equal(expected, actual) assert_allclose(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) @pytest.mark.parametrize( "func", (method("where"), method("_getitem_with_mask")), ids=repr ) def test_masking(self, func, unit, error, dtype): base_unit = unit_registry.m array = np.linspace(0, 5, 10).astype(dtype) * base_unit variable = xr.Variable("x", array) cond = np.array([True, False] * 5) other = -1 * unit if error is not None: with pytest.raises(error): func(variable, cond, other) return expected = attach_units( func( strip_units(variable), cond, strip_units( convert_units( other, ( {None: base_unit} if is_compatible(base_unit, unit) else {None: None} ), ) ), ), extract_units(variable), ) actual = func(variable, cond, other) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize("dim", ("x", "y", "z", "t", "all")) def test_squeeze(self, dim, dtype): shape = (2, 1, 3, 1, 1, 2) names = list("abcdef") dim_lengths = dict(zip(names, shape, strict=True)) array = np.ones(shape=shape) * unit_registry.m variable = xr.Variable(names, array) kwargs = {"dim": dim} if dim != "all" and dim_lengths.get(dim, 0) == 1 else {} expected = attach_units( strip_units(variable).squeeze(**kwargs), extract_units(variable) ) actual = variable.squeeze(**kwargs) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True) @pytest.mark.parametrize( "func", ( method("coarsen", windows={"y": 2}, func=np.mean), method("quantile", q=[0.25, 0.75]), pytest.param( method("rank", dim="x"), marks=pytest.mark.skip(reason="rank not implemented for non-ndarray"), ), method("roll", {"x": 2}), pytest.param( method("rolling_window", "x", 3, "window"), marks=pytest.mark.xfail(reason="converts to ndarray"), ), method("reduce", np.std, "x"), method("round", 2), method("shift", {"x": -2}), method("transpose", "y", "x"), ), ids=repr, ) def test_computation(self, func, dtype, compute_backend): base_unit = unit_registry.m array = np.linspace(0, 5, 5 * 10).reshape(5, 10).astype(dtype) * base_unit variable = xr.Variable(("x", "y"), array) expected = attach_units(func(strip_units(variable)), extract_units(variable)) actual = func(variable) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_searchsorted(self, unit, error, dtype): base_unit = unit_registry.m array = np.linspace(0, 5, 10).astype(dtype) * base_unit variable = xr.Variable("x", array) value = 0 * unit if error is not None: with pytest.raises(error): variable.searchsorted(value) return expected = strip_units(variable).searchsorted( strip_units(convert_units(value, {None: base_unit})) ) actual = variable.searchsorted(value) assert_units_equal(expected, actual) np.testing.assert_allclose(expected, actual) def test_stack(self, dtype): array = np.linspace(0, 5, 3 * 10).reshape(3, 10).astype(dtype) * unit_registry.m variable = xr.Variable(("x", "y"), array) expected = attach_units( strip_units(variable).stack(z=("x", "y")), extract_units(variable) ) actual = variable.stack(z=("x", "y")) assert_units_equal(expected, actual) assert_identical(expected, actual) def test_unstack(self, dtype): array = np.linspace(0, 5, 3 * 10).astype(dtype) * unit_registry.m variable = xr.Variable("z", array) expected = attach_units( strip_units(variable).unstack(z={"x": 3, "y": 10}), extract_units(variable) ) actual = variable.unstack(z={"x": 3, "y": 10}) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_concat(self, unit, error, dtype): array1 = ( np.linspace(0, 5, 9 * 10).reshape(3, 6, 5).astype(dtype) * unit_registry.m ) array2 = np.linspace(5, 10, 10 * 3).reshape(3, 2, 5).astype(dtype) * unit variable = xr.Variable(("x", "y", "z"), array1) other = xr.Variable(("x", "y", "z"), array2) if error is not None: with pytest.raises(error): xr.Variable.concat([variable, other], dim="y") return units = extract_units(variable) expected = attach_units( xr.Variable.concat( [strip_units(variable), strip_units(convert_units(other, units))], dim="y", ), units, ) actual = xr.Variable.concat([variable, other], dim="y") assert_units_equal(expected, actual) assert_identical(expected, actual) def test_set_dims(self, dtype): array = np.linspace(0, 5, 3 * 10).reshape(3, 10).astype(dtype) * unit_registry.m variable = xr.Variable(("x", "y"), array) dims = {"z": 6, "x": 3, "a": 1, "b": 4, "y": 10} expected = attach_units( strip_units(variable).set_dims(dims), extract_units(variable) ) actual = variable.set_dims(dims) assert_units_equal(expected, actual) assert_identical(expected, actual) def test_copy(self, dtype): array = np.linspace(0, 5, 10).astype(dtype) * unit_registry.m other = np.arange(10).astype(dtype) * unit_registry.s variable = xr.Variable("x", array) expected = attach_units( strip_units(variable).copy(data=strip_units(other)), extract_units(other) ) actual = variable.copy(data=other) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit", ( pytest.param(1, id="no_unit"), pytest.param(unit_registry.dimensionless, id="dimensionless"), pytest.param(unit_registry.s, id="incompatible_unit"), pytest.param(unit_registry.cm, id="compatible_unit"), pytest.param(unit_registry.m, id="identical_unit"), ), ) def test_no_conflicts(self, unit, dtype): base_unit = unit_registry.m array1 = ( np.array( [ [6.3, 0.3, 0.45], [np.nan, 0.3, 0.3], [3.7, np.nan, 0.2], [9.43, 0.3, 0.7], ] ) * base_unit ) array2 = np.array([np.nan, 0.3, np.nan]) * unit variable = xr.Variable(("x", "y"), array1) other = xr.Variable("y", array2) expected = strip_units(variable).no_conflicts( strip_units( convert_units( other, {None: base_unit if is_compatible(base_unit, unit) else None} ) ) ) & is_compatible(base_unit, unit) actual = variable.no_conflicts(other) assert expected == actual @pytest.mark.parametrize( "mode", [ "constant", "mean", "median", "reflect", "edge", "linear_ramp", "maximum", "minimum", "symmetric", "wrap", ], ) @pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS) def test_pad(self, mode, xr_arg, np_arg): data = np.arange(4 * 3 * 2).reshape(4, 3, 2) * unit_registry.m v = xr.Variable(["x", "y", "z"], data) expected = attach_units( strip_units(v).pad(mode=mode, **xr_arg), extract_units(v), ) actual = v.pad(mode=mode, **xr_arg) assert_units_equal(expected, actual) assert_equal(actual, expected) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_pad_unit_constant_value(self, unit, error, dtype): array = np.linspace(0, 5, 3 * 10).reshape(3, 10).astype(dtype) * unit_registry.m variable = xr.Variable(("x", "y"), array) fill_value = -100 * unit func = method("pad", mode="constant", x=(2, 3), y=(1, 4)) if error is not None: with pytest.raises(error): func(variable, constant_values=fill_value) return units = extract_units(variable) expected = attach_units( func( strip_units(variable), constant_values=strip_units(convert_units(fill_value, units)), ), units, ) actual = func(variable, constant_values=fill_value) assert_units_equal(expected, actual) assert_identical(expected, actual) class TestDataArray: @pytest.mark.parametrize( "variant", ( pytest.param( "with_dims", marks=pytest.mark.skip(reason="indexes don't support units"), ), "with_coords", "without_coords", ), ) def test_init(self, variant, dtype): array = np.linspace(1, 2, 10, dtype=dtype) * unit_registry.m x = np.arange(len(array)) * unit_registry.s y = x.to(unit_registry.ms) variants = { "with_dims": {"x": x}, "with_coords": {"y": ("x", y)}, "without_coords": {}, } kwargs = {"data": array, "dims": "x", "coords": variants.get(variant)} data_array = xr.DataArray(**kwargs) assert isinstance(data_array.data, Quantity) assert all( { name: isinstance(coord.data, Quantity) for name, coord in data_array.coords.items() }.values() ) @pytest.mark.parametrize( "func", (pytest.param(str, id="str"), pytest.param(repr, id="repr")) ) @pytest.mark.parametrize( "variant", ( pytest.param( "with_dims", marks=pytest.mark.skip(reason="indexes don't support units"), ), pytest.param("with_coords"), pytest.param("without_coords"), ), ) def test_repr(self, func, variant, dtype): array = np.linspace(1, 2, 10, dtype=dtype) * unit_registry.m x = np.arange(len(array)) * unit_registry.s y = x.to(unit_registry.ms) variants = { "with_dims": {"x": x}, "with_coords": {"y": ("x", y)}, "without_coords": {}, } kwargs = {"data": array, "dims": "x", "coords": variants.get(variant)} data_array = xr.DataArray(**kwargs) # FIXME: this just checks that the repr does not raise # warnings or errors, but does not check the result func(data_array) @pytest.mark.parametrize( "func", ( function("all"), function("any"), pytest.param( function("argmax"), marks=pytest.mark.skip( reason="calling np.argmax as a function on xarray objects is not " "supported" ), ), pytest.param( function("argmin"), marks=pytest.mark.skip( reason="calling np.argmin as a function on xarray objects is not " "supported" ), ), function("max"), function("mean"), pytest.param( function("median"), marks=pytest.mark.skip( reason="median does not work with dataarrays yet" ), ), function("min"), function("prod"), function("sum"), function("std"), function("var"), function("cumsum"), function("cumprod"), method("all"), method("any"), method("argmax", dim="x"), method("argmin", dim="x"), method("max"), method("mean"), method("median"), method("min"), method("prod"), method("sum"), method("std"), method("var"), method("cumsum"), method("cumprod"), ), ids=repr, ) def test_aggregation(self, func, dtype): array = np.arange(10).astype(dtype) * ( unit_registry.m if func.name != "cumprod" else unit_registry.dimensionless ) data_array = xr.DataArray(data=array, dims="x") numpy_kwargs = func.kwargs.copy() if "dim" in numpy_kwargs: numpy_kwargs["axis"] = data_array.get_axis_num(numpy_kwargs.pop("dim")) # units differ based on the applied function, so we need to # first compute the units units = extract_units(func(array)) expected = attach_units(func(strip_units(data_array)), units) actual = func(data_array) assert_units_equal(expected, actual) assert_allclose(expected, actual) @pytest.mark.parametrize( "func", ( pytest.param(operator.neg, id="negate"), pytest.param(abs, id="absolute"), pytest.param(np.round, id="round"), ), ) def test_unary_operations(self, func, dtype): array = np.arange(10).astype(dtype) * unit_registry.m data_array = xr.DataArray(data=array) units = extract_units(func(array)) expected = attach_units(func(strip_units(data_array)), units) actual = func(data_array) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "func", ( pytest.param(lambda x: 2 * x, id="multiply"), pytest.param(lambda x: x + x, id="add"), pytest.param(lambda x: x[0] + x, id="add scalar"), pytest.param(lambda x: x.T @ x, id="matrix multiply"), ), ) def test_binary_operations(self, func, dtype): array = np.arange(10).astype(dtype) * unit_registry.m data_array = xr.DataArray(data=array) units = extract_units(func(array)) with xr.set_options(use_opt_einsum=False): expected = attach_units(func(strip_units(data_array)), units) actual = func(data_array) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "comparison", ( pytest.param(operator.lt, id="less_than"), pytest.param(operator.ge, id="greater_equal"), pytest.param(operator.eq, id="equal"), ), ) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, ValueError, id="without_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.mm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_comparison_operations(self, comparison, unit, error, dtype): array = ( np.array([10.1, 5.2, 6.5, 8.0, 21.3, 7.1, 1.3]).astype(dtype) * unit_registry.m ) data_array = xr.DataArray(data=array) value = 8 to_compare_with = value * unit # incompatible units are all not equal if error is not None and comparison is not operator.eq: with pytest.raises(error): comparison(array, to_compare_with) with pytest.raises(error): comparison(data_array, to_compare_with) return actual = comparison(data_array, to_compare_with) expected_units = {None: unit_registry.m if array.check(unit) else None} expected = array.check(unit) & comparison( strip_units(data_array), strip_units(convert_units(to_compare_with, expected_units)), ) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "units,error", ( pytest.param(unit_registry.dimensionless, None, id="dimensionless"), pytest.param(unit_registry.m, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.degree, None, id="compatible_unit"), ), ) def test_univariate_ufunc(self, units, error, dtype): array = np.arange(10).astype(dtype) * units data_array = xr.DataArray(data=array) func = function("sin") if error is not None: with pytest.raises(error): np.sin(data_array) return expected = attach_units( func(strip_units(convert_units(data_array, {None: unit_registry.radians}))), {None: unit_registry.dimensionless}, ) actual = func(data_array) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="without_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param( unit_registry.mm, None, id="compatible_unit", marks=pytest.mark.xfail(reason="pint converts to the wrong units"), ), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_bivariate_ufunc(self, unit, error, dtype): original_unit = unit_registry.m array = np.arange(10).astype(dtype) * original_unit data_array = xr.DataArray(data=array) if error is not None: with pytest.raises(error): np.maximum(data_array, 1 * unit) return expected_units = {None: original_unit} expected = attach_units( np.maximum( strip_units(data_array), strip_units(convert_units(1 * unit, expected_units)), ), expected_units, ) actual = np.maximum(data_array, 1 * unit) assert_units_equal(expected, actual) assert_identical(expected, actual) actual = np.maximum(1 * unit, data_array) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize("property", ("T", "imag", "real")) def test_numpy_properties(self, property, dtype): array = ( np.arange(5 * 10).astype(dtype) + 1j * np.linspace(-1, 0, 5 * 10).astype(dtype) ).reshape(5, 10) * unit_registry.s data_array = xr.DataArray(data=array, dims=("x", "y")) expected = attach_units( getattr(strip_units(data_array), property), extract_units(data_array) ) actual = getattr(data_array, property) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "func", (method("conj"), method("argsort"), method("conjugate"), method("round")), ids=repr, ) def test_numpy_methods(self, func, dtype): array = np.arange(10).astype(dtype) * unit_registry.m data_array = xr.DataArray(data=array, dims="x") units = extract_units(func(array)) expected = attach_units(strip_units(data_array), units) actual = func(data_array) assert_units_equal(expected, actual) assert_identical(expected, actual) def test_item(self, dtype): array = np.arange(10).astype(dtype) * unit_registry.m data_array = xr.DataArray(data=array) func = method("item", 2) expected = func(strip_units(data_array)) * unit_registry.m actual = func(data_array) assert_duckarray_allclose(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) @pytest.mark.parametrize( "func", ( method("searchsorted", 5), pytest.param( function("searchsorted", 5), marks=pytest.mark.xfail( reason="xarray does not implement __array_function__" ), ), ), ids=repr, ) def test_searchsorted(self, func, unit, error, dtype): array = np.arange(10).astype(dtype) * unit_registry.m data_array = xr.DataArray(data=array) scalar_types = (int, float) args = [value * unit for value in func.args] kwargs = { key: (value * unit if isinstance(value, scalar_types) else value) for key, value in func.kwargs.items() } if error is not None: with pytest.raises(error): func(data_array, *args, **kwargs) return units = extract_units(data_array) expected_units = extract_units(func(array, *args, **kwargs)) stripped_args = [strip_units(convert_units(value, units)) for value in args] stripped_kwargs = { key: strip_units(convert_units(value, units)) for key, value in kwargs.items() } expected = attach_units( func(strip_units(data_array), *stripped_args, **stripped_kwargs), expected_units, ) actual = func(data_array, *args, **kwargs) assert_units_equal(expected, actual) np.testing.assert_allclose(expected, actual) @pytest.mark.parametrize( "func", ( method("clip", min=3, max=8), pytest.param( function("clip", a_min=3, a_max=8), marks=pytest.mark.xfail( reason="xarray does not implement __array_function__" ), ), ), ids=repr, ) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_numpy_methods_with_args(self, func, unit, error, dtype): array = np.arange(10).astype(dtype) * unit_registry.m data_array = xr.DataArray(data=array) scalar_types = (int, float) args = [value * unit for value in func.args] kwargs = { key: (value * unit if isinstance(value, scalar_types) else value) for key, value in func.kwargs.items() } if error is not None: with pytest.raises(error): func(data_array, *args, **kwargs) return units = extract_units(data_array) expected_units = extract_units(func(array, *args, **kwargs)) stripped_args = [strip_units(convert_units(value, units)) for value in args] stripped_kwargs = { key: strip_units(convert_units(value, units)) for key, value in kwargs.items() } expected = attach_units( func(strip_units(data_array), *stripped_args, **stripped_kwargs), expected_units, ) actual = func(data_array, *args, **kwargs) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "func", (method("isnull"), method("notnull"), method("count")), ids=repr ) def test_missing_value_detection(self, func, dtype): array = ( np.array( [ [1.4, 2.3, np.nan, 7.2], [np.nan, 9.7, np.nan, np.nan], [2.1, np.nan, np.nan, 4.6], [9.9, np.nan, 7.2, 9.1], ] ) * unit_registry.degK ) data_array = xr.DataArray(data=array) expected = func(strip_units(data_array)) actual = func(data_array) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.xfail(reason="ffill and bfill lose units in data") @pytest.mark.parametrize("func", (method("ffill"), method("bfill")), ids=repr) def test_missing_value_filling(self, func, dtype): array = ( create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype) * unit_registry.degK ) x = np.arange(len(array)) data_array = xr.DataArray(data=array, coords={"x": x}, dims="x") expected = attach_units( func(strip_units(data_array), dim="x"), extract_units(data_array) ) actual = func(data_array, dim="x") assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) @pytest.mark.parametrize( "fill_value", ( pytest.param(-1, id="python_scalar"), pytest.param(np.array(-1), id="numpy_scalar"), pytest.param(np.array([-1]), id="numpy_array"), ), ) def test_fillna(self, fill_value, unit, error, dtype): original_unit = unit_registry.m array = ( create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype) * original_unit ) data_array = xr.DataArray(data=array) func = method("fillna") value = fill_value * unit if error is not None: with pytest.raises(error): func(data_array, value=value) return units = extract_units(data_array) expected = attach_units( func( strip_units(data_array), value=strip_units(convert_units(value, units)) ), units, ) actual = func(data_array, value=value) assert_units_equal(expected, actual) assert_identical(expected, actual) def test_dropna(self, dtype): array = ( create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype) * unit_registry.m ) x = np.arange(len(array)) data_array = xr.DataArray(data=array, coords={"x": x}, dims=["x"]) units = extract_units(data_array) expected = attach_units(strip_units(data_array).dropna(dim="x"), units) actual = data_array.dropna(dim="x") assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit", ( pytest.param(1, id="no_unit"), pytest.param(unit_registry.dimensionless, id="dimensionless"), pytest.param(unit_registry.s, id="incompatible_unit"), pytest.param(unit_registry.cm, id="compatible_unit"), pytest.param(unit_registry.m, id="identical_unit"), ), ) def test_isin(self, unit, dtype): array = ( create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype) * unit_registry.m ) data_array = xr.DataArray(data=array, dims="x") raw_values = create_nan_array([1.4, np.nan, 2.3], dtype) values = raw_values * unit units = {None: unit_registry.m if array.check(unit) else None} expected = strip_units(data_array).isin( strip_units(convert_units(values, units)) ) & array.check(unit) actual = data_array.isin(values) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "variant", ("masking", "replacing_scalar", "replacing_array", "dropping") ) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_where(self, variant, unit, error, dtype): original_unit = unit_registry.m array = np.linspace(0, 1, 10).astype(dtype) * original_unit data_array = xr.DataArray(data=array) condition = data_array < 0.5 * original_unit other = np.linspace(-2, -1, 10).astype(dtype) * unit variant_kwargs = { "masking": {"cond": condition}, "replacing_scalar": {"cond": condition, "other": -1 * unit}, "replacing_array": {"cond": condition, "other": other}, "dropping": {"cond": condition, "drop": True}, } kwargs = variant_kwargs.get(variant) kwargs_without_units = { key: strip_units( convert_units( value, {None: original_unit if array.check(unit) else None} ) ) for key, value in kwargs.items() } if variant not in ("masking", "dropping") and error is not None: with pytest.raises(error): data_array.where(**kwargs) return expected = attach_units( strip_units(data_array).where(**kwargs_without_units), extract_units(data_array), ) actual = data_array.where(**kwargs) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.xfail(reason="uses numpy.vectorize") def test_interpolate_na(self): array = ( np.array([-1.03, 0.1, 1.4, np.nan, 2.3, np.nan, np.nan, 9.1]) * unit_registry.m ) x = np.arange(len(array)) data_array = xr.DataArray(data=array, coords={"x": x}, dims="x") units = extract_units(data_array) expected = attach_units(strip_units(data_array).interpolate_na(dim="x"), units) actual = data_array.interpolate_na(dim="x") assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param( unit_registry.cm, None, id="compatible_unit", ), pytest.param( unit_registry.m, None, id="identical_unit", ), ), ) def test_combine_first(self, unit, error, dtype): array = np.zeros(shape=(2, 2), dtype=dtype) * unit_registry.m other_array = np.ones_like(array) * unit data_array = xr.DataArray( data=array, coords={"x": ["a", "b"], "y": [-1, 0]}, dims=["x", "y"] ) other = xr.DataArray( data=other_array, coords={"x": ["b", "c"], "y": [0, 1]}, dims=["x", "y"] ) if error is not None: with pytest.raises(error): data_array.combine_first(other) return units = extract_units(data_array) expected = attach_units( strip_units(data_array).combine_first( strip_units(convert_units(other, units)) ), units, ) actual = data_array.combine_first(other) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit", ( pytest.param(1, id="no_unit"), pytest.param(unit_registry.dimensionless, id="dimensionless"), pytest.param(unit_registry.s, id="incompatible_unit"), pytest.param(unit_registry.cm, id="compatible_unit"), pytest.param(unit_registry.m, id="identical_unit"), ), ) @pytest.mark.parametrize( "variation", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.parametrize( "func", ( method("equals"), pytest.param( method("identical"), marks=pytest.mark.skip(reason="the behavior of identical is undecided"), ), ), ids=repr, ) def test_comparisons(self, func, variation, unit, dtype): def is_compatible(a, b): a = a if a is not None else 1 b = b if b is not None else 1 quantity = np.arange(5) * a return a == b or quantity.check(b) data = np.linspace(0, 5, 10).astype(dtype) coord = np.arange(len(data)).astype(dtype) base_unit = unit_registry.m array = data * (base_unit if variation == "data" else 1) x = coord * (base_unit if variation == "dims" else 1) y = coord * (base_unit if variation == "coords" else 1) variations = { "data": (unit, 1, 1), "dims": (1, unit, 1), "coords": (1, 1, unit), } data_unit, dim_unit, coord_unit = variations.get(variation) data_array = xr.DataArray(data=array, coords={"x": x, "y": ("x", y)}, dims="x") other = attach_units( strip_units(data_array), {None: data_unit, "x": dim_unit, "y": coord_unit} ) units = extract_units(data_array) other_units = extract_units(other) equal_arrays = all( is_compatible(units[name], other_units[name]) for name in units.keys() ) and ( strip_units(data_array).equals( strip_units(convert_units(other, extract_units(data_array))) ) ) equal_units = units == other_units expected = equal_arrays and (func.name != "identical" or equal_units) actual = func(data_array, other) assert expected == actual @pytest.mark.parametrize( "unit", ( pytest.param(1, id="no_unit"), pytest.param(unit_registry.dimensionless, id="dimensionless"), pytest.param(unit_registry.s, id="incompatible_unit"), pytest.param(unit_registry.cm, id="compatible_unit"), pytest.param(unit_registry.m, id="identical_unit"), ), ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) def test_broadcast_like(self, variant, unit, dtype): original_unit = unit_registry.m variants = { "data": ((original_unit, unit), (1, 1), (1, 1)), "dims": ((1, 1), (original_unit, unit), (1, 1)), "coords": ((1, 1), (1, 1), (original_unit, unit)), } ( (data_unit1, data_unit2), (dim_unit1, dim_unit2), (coord_unit1, coord_unit2), ) = variants.get(variant) array1 = np.linspace(1, 2, 2 * 1).reshape(2, 1).astype(dtype) * data_unit1 array2 = np.linspace(0, 1, 2 * 3).reshape(2, 3).astype(dtype) * data_unit2 x1 = np.arange(2) * dim_unit1 x2 = np.arange(2) * dim_unit2 y1 = np.array([0]) * dim_unit1 y2 = np.arange(3) * dim_unit2 u1 = np.linspace(0, 1, 2) * coord_unit1 u2 = np.linspace(0, 1, 2) * coord_unit2 arr1 = xr.DataArray( data=array1, coords={"x": x1, "y": y1, "u": ("x", u1)}, dims=("x", "y") ) arr2 = xr.DataArray( data=array2, coords={"x": x2, "y": y2, "u": ("x", u2)}, dims=("x", "y") ) expected = attach_units( strip_units(arr1).broadcast_like(strip_units(arr2)), extract_units(arr1) ) actual = arr1.broadcast_like(arr2) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit", ( pytest.param(1, id="no_unit"), pytest.param(unit_registry.dimensionless, id="dimensionless"), pytest.param(unit_registry.s, id="incompatible_unit"), pytest.param(unit_registry.cm, id="compatible_unit"), pytest.param(unit_registry.m, id="identical_unit"), ), ) def test_broadcast_equals(self, unit, dtype): left_array = np.ones(shape=(2, 2), dtype=dtype) * unit_registry.m right_array = np.ones(shape=(2,), dtype=dtype) * unit left = xr.DataArray(data=left_array, dims=("x", "y")) right = xr.DataArray(data=right_array, dims="x") units = { **extract_units(left), **({} if left_array.check(unit) else {None: None}), } expected = strip_units(left).broadcast_equals( strip_units(convert_units(right, units)) ) & left_array.check(unit) actual = left.broadcast_equals(right) assert expected == actual def test_pad(self, dtype): array = np.linspace(0, 5, 10).astype(dtype) * unit_registry.m data_array = xr.DataArray(data=array, dims="x") units = extract_units(data_array) expected = attach_units(strip_units(data_array).pad(x=(2, 3)), units) actual = data_array.pad(x=(2, 3)) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.parametrize( "func", ( method("pipe", lambda da: da * 10), method("assign_coords", w=("y", np.arange(10) * unit_registry.mm)), method("assign_attrs", attr1="value"), method("rename", u="v"), pytest.param( method("swap_dims", {"x": "u"}), marks=pytest.mark.skip(reason="indexes don't support units"), ), pytest.param( method( "expand_dims", dim={"z": np.linspace(10, 20, 12) * unit_registry.s}, axis=1, ), marks=pytest.mark.skip(reason="indexes don't support units"), ), method("drop_vars", "x"), method("reset_coords", names="u"), method("copy"), method("astype", np.float32), ), ids=repr, ) def test_content_manipulation(self, func, variant, dtype): unit = unit_registry.m variants = { "data": (unit, 1, 1), "dims": (1, unit, 1), "coords": (1, 1, unit), } data_unit, dim_unit, coord_unit = variants.get(variant) quantity = np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * data_unit x = np.arange(quantity.shape[0]) * dim_unit y = np.arange(quantity.shape[1]) * dim_unit u = np.linspace(0, 1, quantity.shape[0]) * coord_unit data_array = xr.DataArray( name="a", data=quantity, coords={"x": x, "u": ("x", u), "y": y}, dims=("x", "y"), ) stripped_kwargs = { key: array_strip_units(value) for key, value in func.kwargs.items() } units = extract_units(data_array) units["u"] = getattr(u, "units", None) units["v"] = getattr(u, "units", None) expected = attach_units(func(strip_units(data_array), **stripped_kwargs), units) actual = func(data_array) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "unit", ( pytest.param(1, id="no_unit"), pytest.param(unit_registry.dimensionless, id="dimensionless"), pytest.param(unit_registry.degK, id="with_unit"), ), ) def test_copy(self, unit, dtype): quantity = np.linspace(0, 10, 20, dtype=dtype) * unit_registry.pascal new_data = np.arange(20) data_array = xr.DataArray(data=quantity, dims="x") expected = attach_units( strip_units(data_array).copy(data=new_data), {None: unit} ) actual = data_array.copy(data=new_data * unit) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "indices", ( pytest.param(4, id="single index"), pytest.param([5, 2, 9, 1], id="multiple indices"), ), ) def test_isel(self, indices, dtype): # TODO: maybe test for units in indexes? array = np.arange(10).astype(dtype) * unit_registry.s data_array = xr.DataArray(data=array, dims="x") expected = attach_units( strip_units(data_array).isel(x=indices), extract_units(data_array) ) actual = data_array.isel(x=indices) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.skip(reason="indexes don't support units") @pytest.mark.parametrize( "raw_values", ( pytest.param(10, id="single_value"), pytest.param([10, 5, 13], id="list_of_values"), pytest.param(np.array([9, 3, 7, 12]), id="array_of_values"), ), ) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, KeyError, id="no_units"), pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"), pytest.param(unit_registry.degree, KeyError, id="incompatible_unit"), pytest.param(unit_registry.dm, KeyError, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_sel(self, raw_values, unit, error, dtype): array = np.linspace(5, 10, 20).astype(dtype) * unit_registry.m x = np.arange(len(array)) * unit_registry.m data_array = xr.DataArray(data=array, coords={"x": x}, dims="x") values = raw_values * unit if error is not None and not ( isinstance(raw_values, int | float) and x.check(unit) ): with pytest.raises(error): data_array.sel(x=values) return expected = attach_units( strip_units(data_array).sel( x=strip_units(convert_units(values, {None: array.units})) ), extract_units(data_array), ) actual = data_array.sel(x=values) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.skip(reason="indexes don't support units") @pytest.mark.parametrize( "raw_values", ( pytest.param(10, id="single_value"), pytest.param([10, 5, 13], id="list_of_values"), pytest.param(np.array([9, 3, 7, 12]), id="array_of_values"), ), ) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, KeyError, id="no_units"), pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"), pytest.param(unit_registry.degree, KeyError, id="incompatible_unit"), pytest.param(unit_registry.dm, KeyError, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_loc(self, raw_values, unit, error, dtype): array = np.linspace(5, 10, 20).astype(dtype) * unit_registry.m x = np.arange(len(array)) * unit_registry.m data_array = xr.DataArray(data=array, coords={"x": x}, dims="x") values = raw_values * unit if error is not None and not ( isinstance(raw_values, int | float) and x.check(unit) ): with pytest.raises(error): data_array.loc[{"x": values}] return expected = attach_units( strip_units(data_array).loc[ {"x": strip_units(convert_units(values, {None: array.units}))} ], extract_units(data_array), ) actual = data_array.loc[{"x": values}] assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.skip(reason="indexes don't support units") @pytest.mark.parametrize( "raw_values", ( pytest.param(10, id="single_value"), pytest.param([10, 5, 13], id="list_of_values"), pytest.param(np.array([9, 3, 7, 12]), id="array_of_values"), ), ) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, KeyError, id="no_units"), pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"), pytest.param(unit_registry.degree, KeyError, id="incompatible_unit"), pytest.param(unit_registry.dm, KeyError, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_drop_sel(self, raw_values, unit, error, dtype): array = np.linspace(5, 10, 20).astype(dtype) * unit_registry.m x = np.arange(len(array)) * unit_registry.m data_array = xr.DataArray(data=array, coords={"x": x}, dims="x") values = raw_values * unit if error is not None and not ( isinstance(raw_values, int | float) and x.check(unit) ): with pytest.raises(error): data_array.drop_sel(x=values) return expected = attach_units( strip_units(data_array).drop_sel( x=strip_units(convert_units(values, {None: x.units})) ), extract_units(data_array), ) actual = data_array.drop_sel(x=values) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize("dim", ("x", "y", "z", "t", "all")) @pytest.mark.parametrize( "shape", ( pytest.param((10, 20), id="nothing_squeezable"), pytest.param((10, 20, 1), id="last_dimension_squeezable"), pytest.param((10, 1, 20), id="middle_dimension_squeezable"), pytest.param((1, 10, 20), id="first_dimension_squeezable"), pytest.param((1, 10, 1, 20), id="first_and_last_dimension_squeezable"), ), ) def test_squeeze(self, shape, dim, dtype): names = "xyzt" dim_lengths = dict(zip(names, shape, strict=False)) names = "xyzt" array = np.arange(10 * 20).astype(dtype).reshape(shape) * unit_registry.J data_array = xr.DataArray(data=array, dims=tuple(names[: len(shape)])) kwargs = {"dim": dim} if dim != "all" and dim_lengths.get(dim, 0) == 1 else {} expected = attach_units( strip_units(data_array).squeeze(**kwargs), extract_units(data_array) ) actual = data_array.squeeze(**kwargs) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "func", (method("head", x=7, y=3), method("tail", x=7, y=3), method("thin", x=7, y=3)), ids=repr, ) def test_head_tail_thin(self, func, dtype): # TODO: works like isel. Maybe also test units in indexes? array = np.linspace(1, 2, 10 * 5).reshape(10, 5) * unit_registry.degK data_array = xr.DataArray(data=array, dims=("x", "y")) expected = attach_units( func(strip_units(data_array)), extract_units(data_array) ) actual = func(data_array) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize("variant", ("data", "coords")) @pytest.mark.parametrize( "func", ( pytest.param( method("interp"), marks=pytest.mark.xfail(reason="uses scipy") ), method("reindex"), ), ids=repr, ) def test_interp_reindex(self, variant, func, dtype): variants = { "data": (unit_registry.m, 1), "coords": (1, unit_registry.m), } data_unit, coord_unit = variants.get(variant) array = np.linspace(1, 2, 10).astype(dtype) * data_unit y = np.arange(10) * coord_unit x = np.arange(10) new_x = np.arange(10) + 0.5 data_array = xr.DataArray(array, coords={"x": x, "y": ("x", y)}, dims="x") units = extract_units(data_array) expected = attach_units(func(strip_units(data_array), x=new_x), units) actual = func(data_array, x=new_x) assert_units_equal(expected, actual) assert_allclose(expected, actual) @pytest.mark.skip(reason="indexes don't support units") @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) @pytest.mark.parametrize( "func", (method("interp"), method("reindex")), ids=repr, ) def test_interp_reindex_indexing(self, func, unit, error, dtype): array = np.linspace(1, 2, 10).astype(dtype) x = np.arange(10) * unit_registry.m new_x = (np.arange(10) + 0.5) * unit data_array = xr.DataArray(array, coords={"x": x}, dims="x") if error is not None: with pytest.raises(error): func(data_array, x=new_x) return units = extract_units(data_array) expected = attach_units( func( strip_units(data_array), x=strip_units(convert_units(new_x, {None: unit_registry.m})), ), units, ) actual = func(data_array, x=new_x) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize("variant", ("data", "coords")) @pytest.mark.parametrize( "func", ( pytest.param( method("interp_like"), marks=pytest.mark.xfail(reason="uses scipy") ), method("reindex_like"), ), ids=repr, ) def test_interp_reindex_like(self, variant, func, dtype): variants = { "data": (unit_registry.m, 1), "coords": (1, unit_registry.m), } data_unit, coord_unit = variants.get(variant) array = np.linspace(1, 2, 10).astype(dtype) * data_unit coord = np.arange(10) * coord_unit x = np.arange(10) new_x = np.arange(-2, 2) + 0.5 data_array = xr.DataArray(array, coords={"x": x, "y": ("x", coord)}, dims="x") other = xr.DataArray(np.empty_like(new_x), coords={"x": new_x}, dims="x") units = extract_units(data_array) expected = attach_units(func(strip_units(data_array), other), units) actual = func(data_array, other) assert_units_equal(expected, actual) assert_allclose(expected, actual) @pytest.mark.skip(reason="indexes don't support units") @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) @pytest.mark.parametrize( "func", (method("interp_like"), method("reindex_like")), ids=repr, ) def test_interp_reindex_like_indexing(self, func, unit, error, dtype): array = np.linspace(1, 2, 10).astype(dtype) x = np.arange(10) * unit_registry.m new_x = (np.arange(-2, 2) + 0.5) * unit data_array = xr.DataArray(array, coords={"x": x}, dims="x") other = xr.DataArray(np.empty_like(new_x), {"x": new_x}, dims="x") if error is not None: with pytest.raises(error): func(data_array, other) return units = extract_units(data_array) expected = attach_units( func( strip_units(data_array), strip_units(convert_units(other, {None: unit_registry.m})), ), units, ) actual = func(data_array, other) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "func", (method("unstack"), method("reset_index", "z"), method("reorder_levels")), ids=repr, ) def test_stacking_stacked(self, func, dtype): array = ( np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * unit_registry.m ) x = np.arange(array.shape[0]) y = np.arange(array.shape[1]) data_array = xr.DataArray( name="data", data=array, coords={"x": x, "y": y}, dims=("x", "y") ) stacked = data_array.stack(z=("x", "y")) expected = attach_units(func(strip_units(stacked)), {"data": unit_registry.m}) actual = func(stacked) assert_units_equal(expected, actual) if func.name == "reset_index": assert_identical(expected, actual, check_default_indexes=False) else: assert_identical(expected, actual) @pytest.mark.skip(reason="indexes don't support units") def test_to_unstacked_dataset(self, dtype): array = ( np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * unit_registry.pascal ) x = np.arange(array.shape[0]) * unit_registry.m y = np.arange(array.shape[1]) * unit_registry.s data_array = xr.DataArray( data=array, coords={"x": x, "y": y}, dims=("x", "y") ).stack(z=("x", "y")) func = method("to_unstacked_dataset", dim="z") expected = attach_units( func(strip_units(data_array)), { "y": y.units, **dict(zip(x.magnitude, [array.units] * len(y), strict=True)), }, ).rename({elem.magnitude: elem for elem in x}) actual = func(data_array) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "func", ( method("transpose", "y", "x", "z"), method("stack", a=("x", "y")), method("set_index", x="x2"), method("shift", x=2), pytest.param( method("rank", dim="x"), marks=pytest.mark.skip(reason="rank not implemented for non-ndarray"), ), method("roll", x=2, roll_coords=False), method("sortby", "x2"), ), ids=repr, ) def test_stacking_reordering(self, func, dtype): array = ( np.linspace(0, 10, 2 * 5 * 10).reshape(2, 5, 10).astype(dtype) * unit_registry.m ) x = np.arange(array.shape[0]) y = np.arange(array.shape[1]) z = np.arange(array.shape[2]) x2 = np.linspace(0, 1, array.shape[0])[::-1] data_array = xr.DataArray( name="data", data=array, coords={"x": x, "y": y, "z": z, "x2": ("x", x2)}, dims=("x", "y", "z"), ) expected = attach_units(func(strip_units(data_array)), {None: unit_registry.m}) actual = func(data_array) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "variant", ( pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.parametrize( "func", ( method("differentiate", fallback_func=np.gradient), method("integrate", fallback_func=duck_array_ops.cumulative_trapezoid), method("cumulative_integrate", fallback_func=duck_array_ops.trapz), ), ids=repr, ) def test_differentiate_integrate(self, func, variant, dtype): data_unit = unit_registry.m unit = unit_registry.s variants = { "dims": ("x", unit, 1), "coords": ("u", 1, unit), } coord, dim_unit, coord_unit = variants.get(variant) array = np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * data_unit x = np.arange(array.shape[0]) * dim_unit y = np.arange(array.shape[1]) * dim_unit u = np.linspace(0, 1, array.shape[0]) * coord_unit data_array = xr.DataArray( data=array, coords={"x": x, "y": y, "u": ("x", u)}, dims=("x", "y") ) # we want to make sure the output unit is correct units = extract_units(data_array) units.update( extract_units( func( data_array.data, getattr(data_array, coord).data, axis=0, ) ) ) expected = attach_units( func(strip_units(data_array), coord=strip_units(coord)), units, ) actual = func(data_array, coord=coord) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.parametrize( "func", ( method("diff", dim="x"), method("quantile", q=[0.25, 0.75]), method("reduce", func=np.sum, dim="x"), pytest.param(lambda x: x.dot(x), id="method_dot"), ), ids=repr, ) def test_computation(self, func, variant, dtype, compute_backend): unit = unit_registry.m variants = { "data": (unit, 1, 1), "dims": (1, unit, 1), "coords": (1, 1, unit), } data_unit, dim_unit, coord_unit = variants.get(variant) array = np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * data_unit x = np.arange(array.shape[0]) * dim_unit y = np.arange(array.shape[1]) * dim_unit u = np.linspace(0, 1, array.shape[0]) * coord_unit data_array = xr.DataArray( data=array, coords={"x": x, "y": y, "u": ("x", u)}, dims=("x", "y") ) # we want to make sure the output unit is correct units = extract_units(data_array) if not isinstance(func, function | method): units.update(extract_units(func(array.reshape(-1)))) with xr.set_options(use_opt_einsum=False): expected = attach_units(func(strip_units(data_array)), units) actual = func(data_array) assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.parametrize( "func", ( method("groupby", "x"), method("groupby_bins", "y", bins=4), method("coarsen", y=2), method("rolling", y=3), pytest.param(method("rolling_exp", y=3), marks=requires_numbagg), method("weighted", xr.DataArray(data=np.linspace(0, 1, 10), dims="y")), ), ids=repr, ) def test_computation_objects(self, func, variant, dtype): if variant == "data": if func.name == "rolling_exp": pytest.xfail(reason="numbagg functions are not supported by pint") elif func.name == "rolling": pytest.xfail( reason="numpy.lib.stride_tricks.as_strided converts to ndarray" ) unit = unit_registry.m variants = { "data": (unit, 1, 1), "dims": (1, unit, 1), "coords": (1, 1, unit), } data_unit, dim_unit, coord_unit = variants.get(variant) array = np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * data_unit x = np.array([0, 0, 1, 2, 2]) * dim_unit y = np.arange(array.shape[1]) * 3 * dim_unit u = np.linspace(0, 1, 5) * coord_unit data_array = xr.DataArray( data=array, coords={"x": x, "y": y, "u": ("x", u)}, dims=("x", "y") ) units = extract_units(data_array) expected = attach_units(func(strip_units(data_array)).mean(), units) actual = func(data_array).mean() assert_units_equal(expected, actual) assert_allclose(expected, actual) def test_resample(self, dtype): array = np.linspace(0, 5, 10).astype(dtype) * unit_registry.m time = xr.date_range("10-09-2010", periods=len(array), freq="YE") data_array = xr.DataArray(data=array, coords={"time": time}, dims="time") units = extract_units(data_array) func = method("resample", time="6ME") expected = attach_units(func(strip_units(data_array)).mean(), units) actual = func(data_array).mean() assert_units_equal(expected, actual) assert_identical(expected, actual) @pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.parametrize( "func", ( method("assign_coords", z=("x", np.arange(5) * unit_registry.s)), method("first"), method("last"), method("quantile", q=[0.25, 0.5, 0.75], dim="x"), ), ids=repr, ) def test_grouped_operations(self, func, variant, dtype, compute_backend): unit = unit_registry.m variants = { "data": (unit, 1, 1), "dims": (1, unit, 1), "coords": (1, 1, unit), } data_unit, dim_unit, coord_unit = variants.get(variant) array = np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * data_unit x = np.arange(array.shape[0]) * dim_unit y = np.arange(array.shape[1]) * 3 * dim_unit u = np.linspace(0, 1, array.shape[0]) * coord_unit data_array = xr.DataArray( data=array, coords={"x": x, "y": y, "u": ("x", u)}, dims=("x", "y") ) units = {**extract_units(data_array), "z": unit_registry.s, "q": None} stripped_kwargs = { key: ( strip_units(value) if not isinstance(value, tuple) else tuple(strip_units(elem) for elem in value) ) for key, value in func.kwargs.items() } expected = attach_units( func( strip_units(data_array).groupby("y", squeeze=False), **stripped_kwargs ), units, ) actual = func(data_array.groupby("y", squeeze=False)) assert_units_equal(expected, actual) assert_identical(expected, actual) class TestDataset: @pytest.mark.parametrize( "unit,error", ( pytest.param(1, xr.MergeError, id="no_unit"), pytest.param( unit_registry.dimensionless, xr.MergeError, id="dimensionless" ), pytest.param(unit_registry.s, xr.MergeError, id="incompatible_unit"), pytest.param(unit_registry.mm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="same_unit"), ), ) @pytest.mark.parametrize( "shared", ( "nothing", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) def test_init(self, shared, unit, error, dtype): original_unit = unit_registry.m scaled_unit = unit_registry.mm a = np.linspace(0, 1, 10).astype(dtype) * unit_registry.Pa b = np.linspace(-1, 0, 10).astype(dtype) * unit_registry.degK values_a = np.arange(a.shape[0]) dim_a = values_a * original_unit coord_a = dim_a.to(scaled_unit) values_b = np.arange(b.shape[0]) dim_b = values_b * unit coord_b = ( dim_b.to(scaled_unit) if unit_registry.is_compatible_with(dim_b, scaled_unit) and unit != scaled_unit else dim_b * 1000 ) variants = { "nothing": ({}, {}), "dims": ({"x": dim_a}, {"x": dim_b}), "coords": ( {"x": values_a, "y": ("x", coord_a)}, {"x": values_b, "y": ("x", coord_b)}, ), } coords_a, coords_b = variants.get(shared) dims_a, dims_b = ("x", "y") if shared == "nothing" else ("x", "x") a = xr.DataArray(data=a, coords=coords_a, dims=dims_a) b = xr.DataArray(data=b, coords=coords_b, dims=dims_b) if error is not None and shared != "nothing": with pytest.raises(error): xr.Dataset(data_vars={"a": a, "b": b}) return actual = xr.Dataset(data_vars={"a": a, "b": b}) units = merge_mappings( extract_units(a.rename("a")), extract_units(b.rename("b")) ) expected = attach_units( xr.Dataset(data_vars={"a": strip_units(a), "b": strip_units(b)}), units ) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "func", (pytest.param(str, id="str"), pytest.param(repr, id="repr")) ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units"), ), "coords", ), ) def test_repr(self, func, variant, dtype): unit1, unit2 = ( (unit_registry.Pa, unit_registry.degK) if variant == "data" else (1, 1) ) array1 = np.linspace(1, 2, 10, dtype=dtype) * unit1 array2 = np.linspace(0, 1, 10, dtype=dtype) * unit2 x = np.arange(len(array1)) * unit_registry.s y = x.to(unit_registry.ms) variants = { "dims": {"x": x}, "coords": {"y": ("x", y)}, "data": {}, } ds = xr.Dataset( data_vars={"a": ("x", array1), "b": ("x", array2)}, coords=variants.get(variant), ) # FIXME: this just checks that the repr does not raise # warnings or errors, but does not check the result func(ds) @pytest.mark.parametrize( "func", ( method("all"), method("any"), method("argmax", dim="x"), method("argmin", dim="x"), method("max"), method("min"), method("mean"), method("median"), method("sum"), method("prod"), method("std"), method("var"), method("cumsum"), method("cumprod"), ), ids=repr, ) def test_aggregation(self, func, dtype): unit_a, unit_b = ( (unit_registry.Pa, unit_registry.degK) if func.name != "cumprod" else (unit_registry.dimensionless, unit_registry.dimensionless) ) a = np.linspace(0, 1, 10).astype(dtype) * unit_a b = np.linspace(-1, 0, 10).astype(dtype) * unit_b ds = xr.Dataset({"a": ("x", a), "b": ("x", b)}) if "dim" in func.kwargs: numpy_kwargs = func.kwargs.copy() dim = numpy_kwargs.pop("dim") axis_a = ds.a.get_axis_num(dim) axis_b = ds.b.get_axis_num(dim) numpy_kwargs_a = numpy_kwargs.copy() numpy_kwargs_a["axis"] = axis_a numpy_kwargs_b = numpy_kwargs.copy() numpy_kwargs_b["axis"] = axis_b else: numpy_kwargs_a = {} numpy_kwargs_b = {} units_a = array_extract_units(func(a, **numpy_kwargs_a)) units_b = array_extract_units(func(b, **numpy_kwargs_b)) units = {"a": units_a, "b": units_b} actual = func(ds) expected = attach_units(func(strip_units(ds)), units) assert_units_equal(expected, actual) assert_allclose(expected, actual) @pytest.mark.parametrize("property", ("imag", "real")) def test_numpy_properties(self, property, dtype): a = np.linspace(0, 1, 10) * unit_registry.Pa b = np.linspace(-1, 0, 15) * unit_registry.degK ds = xr.Dataset({"a": ("x", a), "b": ("y", b)}) units = extract_units(ds) actual = getattr(ds, property) expected = attach_units(getattr(strip_units(ds), property), units) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "func", ( method("astype", float), method("conj"), method("argsort"), method("conjugate"), method("round"), ), ids=repr, ) def test_numpy_methods(self, func, dtype): a = np.linspace(1, -1, 10) * unit_registry.Pa b = np.linspace(-1, 1, 15) * unit_registry.degK ds = xr.Dataset({"a": ("x", a), "b": ("y", b)}) units_a = array_extract_units(func(a)) units_b = array_extract_units(func(b)) units = {"a": units_a, "b": units_b} actual = func(ds) expected = attach_units(func(strip_units(ds)), units) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize("func", (method("clip", min=3, max=8),), ids=repr) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_numpy_methods_with_args(self, func, unit, error, dtype): data_unit = unit_registry.m a = np.linspace(0, 10, 15) * unit_registry.m b = np.linspace(-2, 12, 20) * unit_registry.m ds = xr.Dataset({"a": ("x", a), "b": ("y", b)}) units = extract_units(ds) kwargs = { key: array_attach_units(value, unit) for key, value in func.kwargs.items() } if error is not None: with pytest.raises(error): func(ds, **kwargs) return stripped_kwargs = { key: strip_units(convert_units(value, {None: data_unit})) for key, value in kwargs.items() } actual = func(ds, **kwargs) expected = attach_units(func(strip_units(ds), **stripped_kwargs), units) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "func", (method("isnull"), method("notnull"), method("count")), ids=repr ) def test_missing_value_detection(self, func, dtype): array1 = ( np.array( [ [1.4, 2.3, np.nan, 7.2], [np.nan, 9.7, np.nan, np.nan], [2.1, np.nan, np.nan, 4.6], [9.9, np.nan, 7.2, 9.1], ] ) * unit_registry.degK ) array2 = ( np.array( [ [np.nan, 5.7, 12.0, 7.2], [np.nan, 12.4, np.nan, 4.2], [9.8, np.nan, 4.6, 1.4], [7.2, np.nan, 6.3, np.nan], [8.4, 3.9, np.nan, np.nan], ] ) * unit_registry.Pa ) ds = xr.Dataset({"a": (("x", "y"), array1), "b": (("z", "x"), array2)}) expected = func(strip_units(ds)) actual = func(ds) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.xfail(reason="ffill and bfill lose the unit") @pytest.mark.parametrize("func", (method("ffill"), method("bfill")), ids=repr) def test_missing_value_filling(self, func, dtype): array1 = ( create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype) * unit_registry.degK ) array2 = ( create_nan_array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan], dtype) * unit_registry.Pa ) ds = xr.Dataset({"a": ("x", array1), "b": ("y", array2)}) units = extract_units(ds) expected = attach_units(func(strip_units(ds), dim="x"), units) actual = func(ds, dim="x") assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param( unit_registry.cm, None, id="compatible_unit", ), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) @pytest.mark.parametrize( "fill_value", ( pytest.param(-1, id="python_scalar"), pytest.param(np.array(-1), id="numpy_scalar"), pytest.param(np.array([-1]), id="numpy_array"), ), ) def test_fillna(self, fill_value, unit, error, dtype): array1 = ( create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype) * unit_registry.m ) array2 = ( create_nan_array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan], dtype) * unit_registry.m ) ds = xr.Dataset({"a": ("x", array1), "b": ("x", array2)}) value = fill_value * unit units = extract_units(ds) if error is not None: with pytest.raises(error): ds.fillna(value=value) return actual = ds.fillna(value=value) expected = attach_units( strip_units(ds).fillna( value=strip_units(convert_units(value, {None: unit_registry.m})) ), units, ) assert_units_equal(expected, actual) assert_equal(expected, actual) def test_dropna(self, dtype): array1 = ( create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype) * unit_registry.degK ) array2 = ( create_nan_array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan], dtype) * unit_registry.Pa ) ds = xr.Dataset({"a": ("x", array1), "b": ("x", array2)}) units = extract_units(ds) expected = attach_units(strip_units(ds).dropna(dim="x"), units) actual = ds.dropna(dim="x") assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "unit", ( pytest.param(1, id="no_unit"), pytest.param(unit_registry.dimensionless, id="dimensionless"), pytest.param(unit_registry.s, id="incompatible_unit"), pytest.param(unit_registry.cm, id="compatible_unit"), pytest.param(unit_registry.m, id="same_unit"), ), ) def test_isin(self, unit, dtype): array1 = ( create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype) * unit_registry.m ) array2 = ( create_nan_array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan], dtype) * unit_registry.m ) ds = xr.Dataset({"a": ("x", array1), "b": ("x", array2)}) raw_values = create_nan_array([1.4, np.nan, 2.3], dtype) values = raw_values * unit converted_values = ( convert_units(values, {None: unit_registry.m}) if is_compatible(unit, unit_registry.m) else values ) expected = strip_units(ds).isin(strip_units(converted_values)) # TODO: use `unit_registry.is_compatible_with(unit, unit_registry.m)` instead. # Needs `pint>=0.12.1`, though, so we probably should wait until that is released. if not is_compatible(unit, unit_registry.m): expected.a[:] = False expected.b[:] = False actual = ds.isin(values) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "variant", ("masking", "replacing_scalar", "replacing_array", "dropping") ) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="same_unit"), ), ) def test_where(self, variant, unit, error, dtype): original_unit = unit_registry.m array1 = np.linspace(0, 1, 10).astype(dtype) * original_unit array2 = np.linspace(-1, 0, 10).astype(dtype) * original_unit ds = xr.Dataset({"a": ("x", array1), "b": ("x", array2)}) units = extract_units(ds) condition = ds < 0.5 * original_unit other = np.linspace(-2, -1, 10).astype(dtype) * unit variant_kwargs = { "masking": {"cond": condition}, "replacing_scalar": {"cond": condition, "other": -1 * unit}, "replacing_array": {"cond": condition, "other": other}, "dropping": {"cond": condition, "drop": True}, } kwargs = variant_kwargs.get(variant) if variant not in ("masking", "dropping") and error is not None: with pytest.raises(error): ds.where(**kwargs) return kwargs_without_units = { key: strip_units(convert_units(value, {None: original_unit})) for key, value in kwargs.items() } expected = attach_units( strip_units(ds).where(**kwargs_without_units), units, ) actual = ds.where(**kwargs) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.xfail(reason="interpolate_na uses numpy.vectorize") def test_interpolate_na(self, dtype): array1 = ( create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype) * unit_registry.degK ) array2 = ( create_nan_array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan], dtype) * unit_registry.Pa ) ds = xr.Dataset({"a": ("x", array1), "b": ("x", array2)}) units = extract_units(ds) expected = attach_units( strip_units(ds).interpolate_na(dim="x"), units, ) actual = ds.interpolate_na(dim="x") assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="same_unit"), ), ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units"), ), ), ) def test_combine_first(self, variant, unit, error, dtype): variants = { "data": (unit_registry.m, unit, 1, 1), "dims": (1, 1, unit_registry.m, unit), } data_unit, other_data_unit, dims_unit, other_dims_unit = variants.get(variant) array1 = ( create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype) * data_unit ) array2 = ( create_nan_array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan], dtype) * data_unit ) x = np.arange(len(array1)) * dims_unit ds = xr.Dataset( data_vars={"a": ("x", array1), "b": ("x", array2)}, coords={"x": x}, ) units = extract_units(ds) other_array1 = np.ones_like(array1) * other_data_unit other_array2 = np.full_like(array2, fill_value=-1) * other_data_unit other_x = (np.arange(array1.shape[0]) + 5) * other_dims_unit other = xr.Dataset( data_vars={"a": ("x", other_array1), "b": ("x", other_array2)}, coords={"x": other_x}, ) if error is not None: with pytest.raises(error): ds.combine_first(other) return expected = attach_units( strip_units(ds).combine_first(strip_units(convert_units(other, units))), units, ) actual = ds.combine_first(other) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "unit", ( pytest.param(1, id="no_unit"), pytest.param(unit_registry.dimensionless, id="dimensionless"), pytest.param(unit_registry.s, id="incompatible_unit"), pytest.param(unit_registry.cm, id="compatible_unit"), pytest.param(unit_registry.m, id="identical_unit"), ), ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.parametrize( "func", ( method("equals"), pytest.param( method("identical"), marks=pytest.mark.skip("behaviour of identical is unclear"), ), ), ids=repr, ) def test_comparisons(self, func, variant, unit, dtype): array1 = np.linspace(0, 5, 10).astype(dtype) array2 = np.linspace(-5, 0, 10).astype(dtype) coord = np.arange(len(array1)).astype(dtype) variants = { "data": (unit_registry.m, 1, 1), "dims": (1, unit_registry.m, 1), "coords": (1, 1, unit_registry.m), } data_unit, dim_unit, coord_unit = variants.get(variant) a = array1 * data_unit b = array2 * data_unit x = coord * dim_unit y = coord * coord_unit ds = xr.Dataset( data_vars={"a": ("x", a), "b": ("x", b)}, coords={"x": x, "y": ("x", y)}, ) units = extract_units(ds) other_variants = { "data": (unit, 1, 1), "dims": (1, unit, 1), "coords": (1, 1, unit), } other_data_unit, other_dim_unit, other_coord_unit = other_variants.get(variant) other_units = { "a": other_data_unit, "b": other_data_unit, "x": other_dim_unit, "y": other_coord_unit, } to_convert = { key: unit if is_compatible(unit, reference) else None for key, (unit, reference) in zip_mappings(units, other_units) } # convert units where possible, then attach all units to the converted dataset other = attach_units(strip_units(convert_units(ds, to_convert)), other_units) other_units = extract_units(other) # make sure all units are compatible and only then try to # convert and compare values equal_ds = all( is_compatible(unit, other_unit) for _, (unit, other_unit) in zip_mappings(units, other_units) ) and (strip_units(ds).equals(strip_units(convert_units(other, units)))) equal_units = units == other_units expected = equal_ds and (func.name != "identical" or equal_units) actual = func(ds, other) assert expected == actual # TODO: eventually use another decorator / wrapper function that # applies a filter to the parametrize combinations: # we only need a single test for data @pytest.mark.parametrize( "unit", ( pytest.param(1, id="no_unit"), pytest.param(unit_registry.dimensionless, id="dimensionless"), pytest.param(unit_registry.s, id="incompatible_unit"), pytest.param(unit_registry.cm, id="compatible_unit"), pytest.param(unit_registry.m, id="identical_unit"), ), ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units"), ), ), ) def test_broadcast_like(self, variant, unit, dtype): variants = { "data": ((unit_registry.m, unit), (1, 1)), "dims": ((1, 1), (unit_registry.m, unit)), } (data_unit1, data_unit2), (dim_unit1, dim_unit2) = variants.get(variant) array1 = np.linspace(1, 2, 2 * 1).reshape(2, 1).astype(dtype) * data_unit1 array2 = np.linspace(0, 1, 2 * 3).reshape(2, 3).astype(dtype) * data_unit2 x1 = np.arange(2) * dim_unit1 x2 = np.arange(2) * dim_unit2 y1 = np.array([0]) * dim_unit1 y2 = np.arange(3) * dim_unit2 ds1 = xr.Dataset( data_vars={"a": (("x", "y"), array1)}, coords={"x": x1, "y": y1} ) ds2 = xr.Dataset( data_vars={"a": (("x", "y"), array2)}, coords={"x": x2, "y": y2} ) expected = attach_units( strip_units(ds1).broadcast_like(strip_units(ds2)), extract_units(ds1) ) actual = ds1.broadcast_like(ds2) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "unit", ( pytest.param(1, id="no_unit"), pytest.param(unit_registry.dimensionless, id="dimensionless"), pytest.param(unit_registry.s, id="incompatible_unit"), pytest.param(unit_registry.cm, id="compatible_unit"), pytest.param(unit_registry.m, id="identical_unit"), ), ) def test_broadcast_equals(self, unit, dtype): # TODO: does this use indexes? left_array1 = np.ones(shape=(2, 3), dtype=dtype) * unit_registry.m left_array2 = np.zeros(shape=(3, 6), dtype=dtype) * unit_registry.m right_array1 = np.ones(shape=(2,)) * unit right_array2 = np.zeros(shape=(3,)) * unit left = xr.Dataset( {"a": (("x", "y"), left_array1), "b": (("y", "z"), left_array2)}, ) right = xr.Dataset({"a": ("x", right_array1), "b": ("y", right_array2)}) units = merge_mappings( extract_units(left), {} if is_compatible(left_array1, unit) else {"a": None, "b": None}, ) expected = is_compatible(left_array1, unit) and strip_units( left ).broadcast_equals(strip_units(convert_units(right, units))) actual = left.broadcast_equals(right) assert expected == actual def test_pad(self, dtype): a = np.linspace(0, 5, 10).astype(dtype) * unit_registry.Pa b = np.linspace(-5, 0, 10).astype(dtype) * unit_registry.degK ds = xr.Dataset({"a": ("x", a), "b": ("x", b)}) units = extract_units(ds) expected = attach_units(strip_units(ds).pad(x=(2, 3)), units) actual = ds.pad(x=(2, 3)) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "func", (method("unstack"), method("reset_index", "v"), method("reorder_levels")), ids=repr, ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units"), ), ), ) def test_stacking_stacked(self, variant, func, dtype): variants = { "data": (unit_registry.m, 1), "dims": (1, unit_registry.m), } data_unit, dim_unit = variants.get(variant) array1 = np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * data_unit array2 = ( np.linspace(-10, 0, 5 * 10 * 15).reshape(5, 10, 15).astype(dtype) * data_unit ) x = np.arange(array1.shape[0]) * dim_unit y = np.arange(array1.shape[1]) * dim_unit z = np.arange(array2.shape[2]) * dim_unit ds = xr.Dataset( data_vars={"a": (("x", "y"), array1), "b": (("x", "y", "z"), array2)}, coords={"x": x, "y": y, "z": z}, ) units = extract_units(ds) stacked = ds.stack(v=("x", "y")) expected = attach_units(func(strip_units(stacked)), units) actual = func(stacked) assert_units_equal(expected, actual) if func.name == "reset_index": assert_equal(expected, actual, check_default_indexes=False) else: assert_equal(expected, actual) @pytest.mark.xfail( reason="stacked dimension's labels have to be hashable, but is a numpy.array" ) def test_to_stacked_array(self, dtype): labels = range(5) * unit_registry.s arrays = { name: np.linspace(0, 1, 10).astype(dtype) * unit_registry.m for name in labels } ds = xr.Dataset({name: ("x", array) for name, array in arrays.items()}) units = {None: unit_registry.m, "y": unit_registry.s} func = method("to_stacked_array", "z", variable_dim="y", sample_dims=["x"]) actual = func(ds).rename(None) expected = attach_units( func(strip_units(ds)).rename(None), units, ) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "func", ( method("transpose", "y", "x", "z1", "z2"), method("stack", u=("x", "y")), method("set_index", x="x2"), method("shift", x=2), pytest.param( method("rank", dim="x"), marks=pytest.mark.skip(reason="rank not implemented for non-ndarray"), ), method("roll", x=2, roll_coords=False), method("sortby", "x2"), ), ids=repr, ) def test_stacking_reordering(self, func, dtype): array1 = ( np.linspace(0, 10, 2 * 5 * 10).reshape(2, 5, 10).astype(dtype) * unit_registry.Pa ) array2 = ( np.linspace(0, 10, 2 * 5 * 15).reshape(2, 5, 15).astype(dtype) * unit_registry.degK ) x = np.arange(array1.shape[0]) y = np.arange(array1.shape[1]) z1 = np.arange(array1.shape[2]) z2 = np.arange(array2.shape[2]) x2 = np.linspace(0, 1, array1.shape[0])[::-1] ds = xr.Dataset( data_vars={ "a": (("x", "y", "z1"), array1), "b": (("x", "y", "z2"), array2), }, coords={"x": x, "y": y, "z1": z1, "z2": z2, "x2": ("x", x2)}, ) units = extract_units(ds) expected = attach_units(func(strip_units(ds)), units) actual = func(ds) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "indices", ( pytest.param(4, id="single index"), pytest.param([5, 2, 9, 1], id="multiple indices"), ), ) def test_isel(self, indices, dtype): array1 = np.arange(10).astype(dtype) * unit_registry.s array2 = np.linspace(0, 1, 10).astype(dtype) * unit_registry.Pa ds = xr.Dataset(data_vars={"a": ("x", array1), "b": ("x", array2)}) units = extract_units(ds) expected = attach_units(strip_units(ds).isel(x=indices), units) actual = ds.isel(x=indices) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.skip(reason="indexes don't support units") @pytest.mark.parametrize( "raw_values", ( pytest.param(10, id="single_value"), pytest.param([10, 5, 13], id="list_of_values"), pytest.param(np.array([9, 3, 7, 12]), id="array_of_values"), ), ) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, KeyError, id="no_units"), pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"), pytest.param(unit_registry.degree, KeyError, id="incompatible_unit"), pytest.param(unit_registry.mm, KeyError, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_sel(self, raw_values, unit, error, dtype): array1 = np.linspace(5, 10, 20).astype(dtype) * unit_registry.degK array2 = np.linspace(0, 5, 20).astype(dtype) * unit_registry.Pa x = np.arange(len(array1)) * unit_registry.m ds = xr.Dataset( data_vars={ "a": xr.DataArray(data=array1, dims="x"), "b": xr.DataArray(data=array2, dims="x"), }, coords={"x": x}, ) values = raw_values * unit # TODO: if we choose dm as compatible unit, single value keys # can be found. Should we check that? if error is not None: with pytest.raises(error): ds.sel(x=values) return expected = attach_units( strip_units(ds).sel( x=strip_units(convert_units(values, {None: unit_registry.m})) ), extract_units(ds), ) actual = ds.sel(x=values) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.skip(reason="indexes don't support units") @pytest.mark.parametrize( "raw_values", ( pytest.param(10, id="single_value"), pytest.param([10, 5, 13], id="list_of_values"), pytest.param(np.array([9, 3, 7, 12]), id="array_of_values"), ), ) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, KeyError, id="no_units"), pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"), pytest.param(unit_registry.degree, KeyError, id="incompatible_unit"), pytest.param(unit_registry.mm, KeyError, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_drop_sel(self, raw_values, unit, error, dtype): array1 = np.linspace(5, 10, 20).astype(dtype) * unit_registry.degK array2 = np.linspace(0, 5, 20).astype(dtype) * unit_registry.Pa x = np.arange(len(array1)) * unit_registry.m ds = xr.Dataset( data_vars={ "a": xr.DataArray(data=array1, dims="x"), "b": xr.DataArray(data=array2, dims="x"), }, coords={"x": x}, ) values = raw_values * unit # TODO: if we choose dm as compatible unit, single value keys # can be found. Should we check that? if error is not None: with pytest.raises(error): ds.drop_sel(x=values) return expected = attach_units( strip_units(ds).drop_sel( x=strip_units(convert_units(values, {None: unit_registry.m})) ), extract_units(ds), ) actual = ds.drop_sel(x=values) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.skip(reason="indexes don't support units") @pytest.mark.parametrize( "raw_values", ( pytest.param(10, id="single_value"), pytest.param([10, 5, 13], id="list_of_values"), pytest.param(np.array([9, 3, 7, 12]), id="array_of_values"), ), ) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, KeyError, id="no_units"), pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"), pytest.param(unit_registry.degree, KeyError, id="incompatible_unit"), pytest.param(unit_registry.mm, KeyError, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) def test_loc(self, raw_values, unit, error, dtype): array1 = np.linspace(5, 10, 20).astype(dtype) * unit_registry.degK array2 = np.linspace(0, 5, 20).astype(dtype) * unit_registry.Pa x = np.arange(len(array1)) * unit_registry.m ds = xr.Dataset( data_vars={ "a": xr.DataArray(data=array1, dims="x"), "b": xr.DataArray(data=array2, dims="x"), }, coords={"x": x}, ) values = raw_values * unit # TODO: if we choose dm as compatible unit, single value keys # can be found. Should we check that? if error is not None: with pytest.raises(error): ds.loc[{"x": values}] return expected = attach_units( strip_units(ds).loc[ {"x": strip_units(convert_units(values, {None: unit_registry.m}))} ], extract_units(ds), ) actual = ds.loc[{"x": values}] assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "func", ( method("head", x=7, y=3, z=6), method("tail", x=7, y=3, z=6), method("thin", x=7, y=3, z=6), ), ids=repr, ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) def test_head_tail_thin(self, func, variant, dtype): variants = { "data": ((unit_registry.degK, unit_registry.Pa), 1, 1), "dims": ((1, 1), unit_registry.m, 1), "coords": ((1, 1), 1, unit_registry.m), } (unit_a, unit_b), dim_unit, coord_unit = variants.get(variant) array1 = np.linspace(1, 2, 10 * 5).reshape(10, 5) * unit_a array2 = np.linspace(1, 2, 10 * 8).reshape(10, 8) * unit_b coords = { "x": np.arange(10) * dim_unit, "y": np.arange(5) * dim_unit, "z": np.arange(8) * dim_unit, "u": ("x", np.linspace(0, 1, 10) * coord_unit), "v": ("y", np.linspace(1, 2, 5) * coord_unit), "w": ("z", np.linspace(-1, 0, 8) * coord_unit), } ds = xr.Dataset( data_vars={ "a": xr.DataArray(data=array1, dims=("x", "y")), "b": xr.DataArray(data=array2, dims=("x", "z")), }, coords=coords, ) expected = attach_units(func(strip_units(ds)), extract_units(ds)) actual = func(ds) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize("dim", ("x", "y", "z", "t", "all")) @pytest.mark.parametrize( "shape", ( pytest.param((10, 20), id="nothing squeezable"), pytest.param((10, 20, 1), id="last dimension squeezable"), pytest.param((10, 1, 20), id="middle dimension squeezable"), pytest.param((1, 10, 20), id="first dimension squeezable"), pytest.param((1, 10, 1, 20), id="first and last dimension squeezable"), ), ) def test_squeeze(self, shape, dim, dtype): names = "xyzt" dim_lengths = dict(zip(names, shape, strict=False)) array1 = ( np.linspace(0, 1, 10 * 20).astype(dtype).reshape(shape) * unit_registry.degK ) array2 = ( np.linspace(1, 2, 10 * 20).astype(dtype).reshape(shape) * unit_registry.Pa ) ds = xr.Dataset( data_vars={ "a": (tuple(names[: len(shape)]), array1), "b": (tuple(names[: len(shape)]), array2), }, ) units = extract_units(ds) kwargs = {"dim": dim} if dim != "all" and dim_lengths.get(dim, 0) == 1 else {} expected = attach_units(strip_units(ds).squeeze(**kwargs), units) actual = ds.squeeze(**kwargs) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize("variant", ("data", "coords")) @pytest.mark.parametrize( "func", ( pytest.param( method("interp"), marks=pytest.mark.xfail(reason="uses scipy") ), method("reindex"), ), ids=repr, ) def test_interp_reindex(self, func, variant, dtype): variants = { "data": (unit_registry.m, 1), "coords": (1, unit_registry.m), } data_unit, coord_unit = variants.get(variant) array1 = np.linspace(-1, 0, 10).astype(dtype) * data_unit array2 = np.linspace(0, 1, 10).astype(dtype) * data_unit y = np.arange(10) * coord_unit x = np.arange(10) new_x = np.arange(8) + 0.5 ds = xr.Dataset( {"a": ("x", array1), "b": ("x", array2)}, coords={"x": x, "y": ("x", y)} ) units = extract_units(ds) expected = attach_units(func(strip_units(ds), x=new_x), units) actual = func(ds, x=new_x) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.skip(reason="indexes don't support units") @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) @pytest.mark.parametrize("func", (method("interp"), method("reindex")), ids=repr) def test_interp_reindex_indexing(self, func, unit, error, dtype): array1 = np.linspace(-1, 0, 10).astype(dtype) array2 = np.linspace(0, 1, 10).astype(dtype) x = np.arange(10) * unit_registry.m new_x = (np.arange(8) + 0.5) * unit ds = xr.Dataset({"a": ("x", array1), "b": ("x", array2)}, coords={"x": x}) units = extract_units(ds) if error is not None: with pytest.raises(error): func(ds, x=new_x) return expected = attach_units(func(strip_units(ds), x=new_x), units) actual = func(ds, x=new_x) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize("variant", ("data", "coords")) @pytest.mark.parametrize( "func", ( pytest.param( method("interp_like"), marks=pytest.mark.xfail(reason="uses scipy") ), method("reindex_like"), ), ids=repr, ) def test_interp_reindex_like(self, func, variant, dtype): variants = { "data": (unit_registry.m, 1), "coords": (1, unit_registry.m), } data_unit, coord_unit = variants.get(variant) array1 = np.linspace(-1, 0, 10).astype(dtype) * data_unit array2 = np.linspace(0, 1, 10).astype(dtype) * data_unit y = np.arange(10) * coord_unit x = np.arange(10) new_x = np.arange(8) + 0.5 ds = xr.Dataset( {"a": ("x", array1), "b": ("x", array2)}, coords={"x": x, "y": ("x", y)} ) units = extract_units(ds) other = xr.Dataset({"a": ("x", np.empty_like(new_x))}, coords={"x": new_x}) expected = attach_units(func(strip_units(ds), other), units) actual = func(ds, other) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.skip(reason="indexes don't support units") @pytest.mark.parametrize( "unit,error", ( pytest.param(1, DimensionalityError, id="no_unit"), pytest.param( unit_registry.dimensionless, DimensionalityError, id="dimensionless" ), pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"), pytest.param(unit_registry.cm, None, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) @pytest.mark.parametrize( "func", (method("interp_like"), method("reindex_like")), ids=repr ) def test_interp_reindex_like_indexing(self, func, unit, error, dtype): array1 = np.linspace(-1, 0, 10).astype(dtype) array2 = np.linspace(0, 1, 10).astype(dtype) x = np.arange(10) * unit_registry.m new_x = (np.arange(8) + 0.5) * unit ds = xr.Dataset({"a": ("x", array1), "b": ("x", array2)}, coords={"x": x}) units = extract_units(ds) other = xr.Dataset({"a": ("x", np.empty_like(new_x))}, coords={"x": new_x}) if error is not None: with pytest.raises(error): func(ds, other) return expected = attach_units(func(strip_units(ds), other), units) actual = func(ds, other) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True) @pytest.mark.parametrize( "func", ( method("diff", dim="x"), method("differentiate", coord="x"), method("integrate", coord="x"), method("quantile", q=[0.25, 0.75]), method("reduce", func=np.sum, dim="x"), method("map", np.fabs), ), ids=repr, ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) def test_computation(self, func, variant, dtype, compute_backend): variants = { "data": ((unit_registry.degK, unit_registry.Pa), 1, 1), "dims": ((1, 1), unit_registry.m, 1), "coords": ((1, 1), 1, unit_registry.m), } (unit1, unit2), dim_unit, coord_unit = variants.get(variant) array1 = np.linspace(-5, 5, 4 * 5).reshape(4, 5).astype(dtype) * unit1 array2 = np.linspace(10, 20, 4 * 3).reshape(4, 3).astype(dtype) * unit2 x = np.arange(4) * dim_unit y = np.arange(5) * dim_unit z = np.arange(3) * dim_unit ds = xr.Dataset( data_vars={ "a": xr.DataArray(data=array1, dims=("x", "y")), "b": xr.DataArray(data=array2, dims=("x", "z")), }, coords={"x": x, "y": y, "z": z, "y2": ("y", np.arange(5) * coord_unit)}, ) units = extract_units(ds) expected = attach_units(func(strip_units(ds)), units) actual = func(ds) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "func", ( method("groupby", "x"), method("groupby_bins", "x", bins=2), method("coarsen", x=2), pytest.param( method("rolling", x=3), marks=pytest.mark.xfail(reason="strips units") ), pytest.param( method("rolling_exp", x=3), marks=pytest.mark.xfail( reason="numbagg functions are not supported by pint" ), ), method("weighted", xr.DataArray(data=np.linspace(0, 1, 5), dims="y")), ), ids=repr, ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) def test_computation_objects(self, func, variant, dtype): variants = { "data": ((unit_registry.degK, unit_registry.Pa), 1, 1), "dims": ((1, 1), unit_registry.m, 1), "coords": ((1, 1), 1, unit_registry.m), } (unit1, unit2), dim_unit, coord_unit = variants.get(variant) array1 = np.linspace(-5, 5, 4 * 5).reshape(4, 5).astype(dtype) * unit1 array2 = np.linspace(10, 20, 4 * 3).reshape(4, 3).astype(dtype) * unit2 x = np.arange(4) * dim_unit y = np.arange(5) * dim_unit z = np.arange(3) * dim_unit ds = xr.Dataset( data_vars={"a": (("x", "y"), array1), "b": (("x", "z"), array2)}, coords={"x": x, "y": y, "z": z, "y2": ("y", np.arange(5) * coord_unit)}, ) units = extract_units(ds) args = [] if func.name != "groupby" else ["y"] # Doesn't work with flox because pint doesn't implement # ufunc.reduceat or np.bincount # kwargs = {"engine": "numpy"} if "groupby" in func.name else {} kwargs = {} expected = attach_units(func(strip_units(ds)).mean(*args, **kwargs), units) actual = func(ds).mean(*args, **kwargs) assert_units_equal(expected, actual) assert_allclose(expected, actual) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) def test_resample(self, variant, dtype): # TODO: move this to test_computation_objects variants = { "data": ((unit_registry.degK, unit_registry.Pa), 1, 1), "dims": ((1, 1), unit_registry.m, 1), "coords": ((1, 1), 1, unit_registry.m), } (unit1, unit2), dim_unit, coord_unit = variants.get(variant) array1 = np.linspace(-5, 5, 10 * 5).reshape(10, 5).astype(dtype) * unit1 array2 = np.linspace(10, 20, 10 * 8).reshape(10, 8).astype(dtype) * unit2 t = xr.date_range("10-09-2010", periods=array1.shape[0], freq="YE") y = np.arange(5) * dim_unit z = np.arange(8) * dim_unit u = np.linspace(-1, 0, 5) * coord_unit ds = xr.Dataset( data_vars={"a": (("time", "y"), array1), "b": (("time", "z"), array2)}, coords={"time": t, "y": y, "z": z, "u": ("y", u)}, ) units = extract_units(ds) func = method("resample", time="6ME") expected = attach_units(func(strip_units(ds)).mean(), units) actual = func(ds).mean() assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True) @pytest.mark.parametrize( "func", ( method("assign", c=lambda ds: 10 * ds.b), method("assign_coords", v=("x", np.arange(5) * unit_registry.s)), method("first"), method("last"), method("quantile", q=[0.25, 0.5, 0.75], dim="x"), ), ids=repr, ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) def test_grouped_operations(self, func, variant, dtype, compute_backend): variants = { "data": ((unit_registry.degK, unit_registry.Pa), 1, 1), "dims": ((1, 1), unit_registry.m, 1), "coords": ((1, 1), 1, unit_registry.m), } (unit1, unit2), dim_unit, coord_unit = variants.get(variant) array1 = np.linspace(-5, 5, 5 * 4).reshape(5, 4).astype(dtype) * unit1 array2 = np.linspace(10, 20, 5 * 4 * 3).reshape(5, 4, 3).astype(dtype) * unit2 x = np.arange(5) * dim_unit y = np.arange(4) * dim_unit z = np.arange(3) * dim_unit u = np.linspace(-1, 0, 4) * coord_unit ds = xr.Dataset( data_vars={"a": (("x", "y"), array1), "b": (("x", "y", "z"), array2)}, coords={"x": x, "y": y, "z": z, "u": ("y", u)}, ) assigned_units = {"c": unit2, "v": unit_registry.s} units = merge_mappings(extract_units(ds), assigned_units) stripped_kwargs = { name: strip_units(value) for name, value in func.kwargs.items() } expected = attach_units( func(strip_units(ds).groupby("y", squeeze=False), **stripped_kwargs), units ) actual = func(ds.groupby("y", squeeze=False)) assert_units_equal(expected, actual) assert_equal(expected, actual) @pytest.mark.parametrize( "func", ( method("pipe", lambda ds: ds * 10), method("assign", d=lambda ds: ds.b * 10), method("assign_coords", y2=("y", np.arange(4) * unit_registry.mm)), method("assign_attrs", attr1="value"), method("rename", x2="x_mm"), method("rename_vars", c="temperature"), method("rename_dims", x="offset_x"), method("swap_dims", {"x": "u"}), pytest.param( method( "expand_dims", v=np.linspace(10, 20, 12) * unit_registry.s, axis=1 ), marks=pytest.mark.skip(reason="indexes don't support units"), ), method("drop_vars", "x"), method("drop_dims", "z"), method("set_coords", names="c"), method("reset_coords", names="x2"), method("copy"), ), ids=repr, ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) def test_content_manipulation(self, func, variant, dtype): variants = { "data": ( (unit_registry.m**3, unit_registry.Pa, unit_registry.degK), 1, 1, ), "dims": ((1, 1, 1), unit_registry.m, 1), "coords": ((1, 1, 1), 1, unit_registry.m), } (unit1, unit2, unit3), dim_unit, coord_unit = variants.get(variant) array1 = np.linspace(-5, 5, 5 * 4).reshape(5, 4).astype(dtype) * unit1 array2 = np.linspace(10, 20, 5 * 4 * 3).reshape(5, 4, 3).astype(dtype) * unit2 array3 = np.linspace(0, 10, 5).astype(dtype) * unit3 x = np.arange(5) * dim_unit y = np.arange(4) * dim_unit z = np.arange(3) * dim_unit x2 = np.linspace(-1, 0, 5) * coord_unit ds = xr.Dataset( data_vars={ "a": (("x", "y"), array1), "b": (("x", "y", "z"), array2), "c": ("x", array3), }, coords={"x": x, "y": y, "z": z, "x2": ("x", x2)}, ) new_units = { "y2": unit_registry.mm, "x_mm": coord_unit, "offset_x": unit_registry.m, "d": unit2, "temperature": unit3, } units = merge_mappings(extract_units(ds), new_units) stripped_kwargs = { key: strip_units(value) for key, value in func.kwargs.items() } expected = attach_units(func(strip_units(ds), **stripped_kwargs), units) actual = func(ds) assert_units_equal(expected, actual) if func.name == "rename_dims": assert_equal(expected, actual, check_default_indexes=False) else: assert_equal(expected, actual) @pytest.mark.parametrize( "unit,error", ( pytest.param(1, xr.MergeError, id="no_unit"), pytest.param( unit_registry.dimensionless, xr.MergeError, id="dimensionless" ), pytest.param(unit_registry.s, xr.MergeError, id="incompatible_unit"), pytest.param(unit_registry.cm, xr.MergeError, id="compatible_unit"), pytest.param(unit_registry.m, None, id="identical_unit"), ), ) @pytest.mark.parametrize( "variant", ( "data", pytest.param( "dims", marks=pytest.mark.skip(reason="indexes don't support units") ), "coords", ), ) @pytest.mark.filterwarnings( "ignore:.*the default value for join will change:FutureWarning" ) @pytest.mark.filterwarnings( "ignore:.*the default value for compat will change:FutureWarning" ) def test_merge(self, variant, unit, error, dtype): left_variants = { "data": (unit_registry.m, 1, 1), "dims": (1, unit_registry.m, 1), "coords": (1, 1, unit_registry.m), } left_data_unit, left_dim_unit, left_coord_unit = left_variants.get(variant) right_variants = { "data": (unit, 1, 1), "dims": (1, unit, 1), "coords": (1, 1, unit), } right_data_unit, right_dim_unit, right_coord_unit = right_variants.get(variant) left_array = np.arange(10).astype(dtype) * left_data_unit right_array = np.arange(-5, 5).astype(dtype) * right_data_unit left_dim = np.arange(10, 20) * left_dim_unit right_dim = np.arange(5, 15) * right_dim_unit left_coord = np.arange(-10, 0) * left_coord_unit right_coord = np.arange(-15, -5) * right_coord_unit left = xr.Dataset( data_vars={"a": ("x", left_array)}, coords={"x": left_dim, "y": ("x", left_coord)}, ) right = xr.Dataset( data_vars={"a": ("x", right_array)}, coords={"x": right_dim, "y": ("x", right_coord)}, ) units = extract_units(left) if error is not None: with pytest.raises(error): left.merge(right) return converted = convert_units(right, units) expected = attach_units(strip_units(left).merge(strip_units(converted)), units) actual = left.merge(right) assert_units_equal(expected, actual) assert_equal(expected, actual) @requires_dask class TestPintWrappingDask: def test_duck_array_ops(self): import dask.array d = dask.array.array([1, 2, 3]) q = unit_registry.Quantity(d, units="m") da = xr.DataArray(q, dims="x") actual = da.mean().compute() actual.name = None expected = xr.DataArray(unit_registry.Quantity(np.array(2.0), units="m")) assert_units_equal(expected, actual) # Don't use isinstance b/c we don't want to allow subclasses through assert type(expected.data) is type(actual.data) @requires_matplotlib class TestPlots(PlotTestCase): @pytest.mark.parametrize( "coord_unit, coord_attrs", [ (1, {"units": "meter"}), pytest.param( unit_registry.m, {}, marks=pytest.mark.xfail(reason="indexes don't support units"), ), ], ) def test_units_in_line_plot_labels(self, coord_unit, coord_attrs): arr = np.linspace(1, 10, 3) * unit_registry.Pa coord_arr = np.linspace(1, 3, 3) * coord_unit x_coord = xr.DataArray(coord_arr, dims="x", attrs=coord_attrs) da = xr.DataArray(data=arr, dims="x", coords={"x": x_coord}, name="pressure") da.plot.line() ax = plt.gca() assert ax.get_ylabel() == "pressure [pascal]" assert ax.get_xlabel() == "x [meter]" @pytest.mark.parametrize( "coord_unit, coord_attrs", [ (1, {"units": "meter"}), pytest.param( unit_registry.m, {}, marks=pytest.mark.xfail(reason="indexes don't support units"), ), ], ) def test_units_in_slice_line_plot_labels_sel(self, coord_unit, coord_attrs): arr = xr.DataArray( name="var_a", data=np.array([[1, 2], [3, 4]]), coords=dict( a=("a", np.array([5, 6]) * coord_unit, coord_attrs), b=("b", np.array([7, 8]) * coord_unit, coord_attrs), ), dims=("a", "b"), ) arr.sel(a=5).plot(marker="o") assert plt.gca().get_title() == "a = 5 [meter]" @pytest.mark.parametrize( "coord_unit, coord_attrs", [ (1, {"units": "meter"}), pytest.param( unit_registry.m, {}, marks=pytest.mark.xfail(reason="pint.errors.UnitStrippedWarning"), ), ], ) def test_units_in_slice_line_plot_labels_isel(self, coord_unit, coord_attrs): arr = xr.DataArray( name="var_a", data=np.array([[1, 2], [3, 4]]), coords=dict( a=("x", np.array([5, 6]) * coord_unit, coord_attrs), b=("y", np.array([7, 8])), ), dims=("x", "y"), ) arr.isel(x=0).plot(marker="o") assert plt.gca().get_title() == "a = 5 [meter]" def test_units_in_2d_plot_colorbar_label(self): arr = np.ones((2, 3)) * unit_registry.Pa da = xr.DataArray(data=arr, dims=["x", "y"], name="pressure") fig, (ax, cax) = plt.subplots(1, 2) ax = da.plot.contourf(ax=ax, cbar_ax=cax, add_colorbar=True) assert cax.get_ylabel() == "pressure [pascal]" def test_units_facetgrid_plot_labels(self): arr = np.ones((2, 3)) * unit_registry.Pa da = xr.DataArray(data=arr, dims=["x", "y"], name="pressure") fig, (ax, cax) = plt.subplots(1, 2) fgrid = da.plot.line(x="x", col="y") assert fgrid.axs[0, 0].get_ylabel() == "pressure [pascal]" def test_units_facetgrid_2d_imshow_plot_colorbar_labels(self): arr = np.ones((2, 3, 4, 5)) * unit_registry.Pa da = xr.DataArray(data=arr, dims=["x", "y", "z", "w"], name="pressure") da.plot.imshow(x="x", y="y", col="w") # no colorbar to check labels of def test_units_facetgrid_2d_contourf_plot_colorbar_labels(self): arr = np.ones((2, 3, 4)) * unit_registry.Pa da = xr.DataArray(data=arr, dims=["x", "y", "z"], name="pressure") fig, (ax1, ax2, ax3, cax) = plt.subplots(1, 4) fgrid = da.plot.contourf(x="x", y="y", col="z") assert fgrid.cbar.ax.get_ylabel() == "pressure [pascal]" xarray-2025.09.0/xarray/tests/test_utils.py000066400000000000000000000300041505620616400205620ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Hashable from types import EllipsisType import numpy as np import pandas as pd import pytest from xarray.core import duck_array_ops, utils from xarray.core.utils import ( attempt_import, either_dict_or_kwargs, infix_dims, iterate_nested, ) from xarray.tests import assert_array_equal, requires_dask class TestAlias: def test(self): def new_method(): pass old_method = utils.alias(new_method, "old_method") assert "deprecated" in old_method.__doc__ # type: ignore[operator] with pytest.warns(Warning, match="deprecated"): old_method() @pytest.mark.parametrize( ["a", "b", "expected"], [ [np.array(["a"]), np.array(["b"]), np.array(["a", "b"])], [np.array([1], dtype="int64"), np.array([2], dtype="int64"), pd.Index([1, 2])], ], ) def test_maybe_coerce_to_str(a, b, expected): index = pd.Index(a).append(pd.Index(b)) actual = utils.maybe_coerce_to_str(index, [a, b]) assert_array_equal(expected, actual) assert expected.dtype == actual.dtype def test_maybe_coerce_to_str_minimal_str_dtype(): a = np.array(["a", "a_long_string"]) index = pd.Index(["a"]) actual = utils.maybe_coerce_to_str(index, [a]) expected = np.array("a") assert_array_equal(expected, actual) assert expected.dtype == actual.dtype class TestArrayEquiv: def test_0d(self): # verify our work around for pd.isnull not working for 0-dimensional # object arrays assert duck_array_ops.array_equiv(0, np.array(0, dtype=object)) assert duck_array_ops.array_equiv(np.nan, np.array(np.nan, dtype=object)) assert not duck_array_ops.array_equiv(0, np.array(1, dtype=object)) class TestDictionaries: @pytest.fixture(autouse=True) def setup(self): self.x = {"a": "A", "b": "B"} self.y = {"c": "C", "b": "B"} self.z = {"a": "Z"} def test_equivalent(self): assert utils.equivalent(0, 0) assert utils.equivalent(np.nan, np.nan) assert utils.equivalent(0, np.array(0.0)) assert utils.equivalent([0], np.array([0])) assert utils.equivalent(np.array([0]), [0]) assert utils.equivalent(np.arange(3), 1.0 * np.arange(3)) assert not utils.equivalent(0, np.zeros(3)) def test_safe(self): # should not raise exception: utils.update_safety_check(self.x, self.y) def test_unsafe(self): with pytest.raises(ValueError): utils.update_safety_check(self.x, self.z) def test_compat_dict_intersection(self): assert {"b": "B"} == utils.compat_dict_intersection(self.x, self.y) assert {} == utils.compat_dict_intersection(self.x, self.z) def test_compat_dict_union(self): assert {"a": "A", "b": "B", "c": "C"} == utils.compat_dict_union(self.x, self.y) with pytest.raises( ValueError, match=r"unsafe to merge dictionaries without " "overriding values; conflicting key", ): utils.compat_dict_union(self.x, self.z) def test_dict_equiv(self): x: dict = {} x["a"] = 3 x["b"] = np.array([1, 2, 3]) y: dict = {} y["b"] = np.array([1.0, 2.0, 3.0]) y["a"] = 3 assert utils.dict_equiv(x, y) # two nparrays are equal y["b"] = [1, 2, 3] # np.array not the same as a list assert utils.dict_equiv(x, y) # nparray == list x["b"] = [1.0, 2.0, 3.0] assert utils.dict_equiv(x, y) # list vs. list x["c"] = None assert not utils.dict_equiv(x, y) # new key in x x["c"] = np.nan y["c"] = np.nan assert utils.dict_equiv(x, y) # as intended, nan is nan x["c"] = np.inf y["c"] = np.inf assert utils.dict_equiv(x, y) # inf == inf y = dict(y) assert utils.dict_equiv(x, y) # different dictionary types are fine y["b"] = 3 * np.arange(3) assert not utils.dict_equiv(x, y) # not equal when arrays differ def test_frozen(self): x = utils.Frozen(self.x) with pytest.raises(TypeError): x["foo"] = "bar" # type: ignore[index] with pytest.raises(TypeError): del x["a"] # type: ignore[attr-defined] with pytest.raises(AttributeError): x.update(self.y) # type: ignore[attr-defined] assert x.mapping == self.x assert repr(x) in ( "Frozen({'a': 'A', 'b': 'B'})", "Frozen({'b': 'B', 'a': 'A'})", ) def test_filtered(self): x = utils.FilteredMapping(keys={"a"}, mapping={"a": 1, "b": 2}) assert "a" in x assert "b" not in x assert x["a"] == 1 assert list(x) == ["a"] assert len(x) == 1 assert repr(x) == "FilteredMapping(keys={'a'}, mapping={'a': 1, 'b': 2})" assert dict(x) == {"a": 1} def test_repr_object(): obj = utils.ReprObject("foo") assert repr(obj) == "foo" assert isinstance(obj, Hashable) assert not isinstance(obj, str) def test_repr_object_magic_methods(): o1 = utils.ReprObject("foo") o2 = utils.ReprObject("foo") o3 = utils.ReprObject("bar") o4 = "foo" assert o1 == o2 assert o1 != o3 assert o1 != o4 assert hash(o1) == hash(o2) assert hash(o1) != hash(o3) assert hash(o1) != hash(o4) def test_is_remote_uri(): assert utils.is_remote_uri("http://example.com") assert utils.is_remote_uri("https://example.com") assert not utils.is_remote_uri(" http://example.com") assert not utils.is_remote_uri("example.nc") class Test_is_uniform_and_sorted: def test_sorted_uniform(self): assert utils.is_uniform_spaced(np.arange(5)) def test_sorted_not_uniform(self): assert not utils.is_uniform_spaced([-2, 1, 89]) def test_not_sorted_uniform(self): assert not utils.is_uniform_spaced([1, -1, 3]) def test_not_sorted_not_uniform(self): assert not utils.is_uniform_spaced([4, 1, 89]) def test_two_numbers(self): assert utils.is_uniform_spaced([0, 1.7]) def test_relative_tolerance(self): assert utils.is_uniform_spaced([0, 0.97, 2], rtol=0.1) class Test_hashable: def test_hashable(self): for v in [False, 1, (2,), (3, 4), "four"]: assert utils.hashable(v) for v in [[5, 6], ["seven", "8"], {9: "ten"}]: assert not utils.hashable(v) @requires_dask def test_dask_array_is_scalar(): # regression test for GH1684 import dask.array as da y = da.arange(8, chunks=4) assert not utils.is_scalar(y) def test_hidden_key_dict(): hidden_key = "_hidden_key" data = {"a": 1, "b": 2, hidden_key: 3} data_expected = {"a": 1, "b": 2} hkd = utils.HiddenKeyDict(data, [hidden_key]) assert len(hkd) == 2 assert hidden_key not in hkd for k, v in data_expected.items(): assert hkd[k] == v with pytest.raises(KeyError): hkd[hidden_key] with pytest.raises(KeyError): del hkd[hidden_key] def test_either_dict_or_kwargs(): result = either_dict_or_kwargs(dict(a=1), {}, "foo") expected = dict(a=1) assert result == expected result = either_dict_or_kwargs({}, dict(a=1), "foo") expected = dict(a=1) assert result == expected with pytest.raises(ValueError, match=r"foo"): result = either_dict_or_kwargs(dict(a=1), dict(a=1), "foo") @pytest.mark.parametrize( ["supplied", "all_", "expected"], [ (list("abc"), list("abc"), list("abc")), (["a", ..., "c"], list("abc"), list("abc")), (["a", ...], list("abc"), list("abc")), (["c", ...], list("abc"), list("cab")), ([..., "b"], list("abc"), list("acb")), ([...], list("abc"), list("abc")), ], ) def test_infix_dims(supplied, all_, expected): result = list(infix_dims(supplied, all_)) assert result == expected @pytest.mark.parametrize( ["supplied", "all_"], [([..., ...], list("abc")), ([...], list("aac"))] ) def test_infix_dims_errors(supplied, all_): with pytest.raises(ValueError): list(infix_dims(supplied, all_)) @pytest.mark.parametrize( ["dim", "expected"], [ pytest.param("a", ("a",), id="str"), pytest.param(["a", "b"], ("a", "b"), id="list_of_str"), pytest.param(["a", 1], ("a", 1), id="list_mixed"), pytest.param(["a", ...], ("a", ...), id="list_with_ellipsis"), pytest.param(("a", "b"), ("a", "b"), id="tuple_of_str"), pytest.param(["a", ("b", "c")], ("a", ("b", "c")), id="list_with_tuple"), pytest.param((("b", "c"),), (("b", "c"),), id="tuple_of_tuple"), pytest.param({"a", 1}, tuple({"a", 1}), id="non_sequence_collection"), pytest.param((), (), id="empty_tuple"), pytest.param(set(), (), id="empty_collection"), pytest.param(None, None, id="None"), pytest.param(..., ..., id="ellipsis"), ], ) def test_parse_dims_as_tuple(dim, expected) -> None: all_dims = ("a", "b", 1, ("b", "c")) # selection of different Hashables actual = utils.parse_dims_as_tuple(dim, all_dims, replace_none=False) assert actual == expected def test_parse_dims_set() -> None: all_dims = ("a", "b", 1, ("b", "c")) # selection of different Hashables dim = {"a", 1} actual = utils.parse_dims_as_tuple(dim, all_dims) assert set(actual) == dim @pytest.mark.parametrize( "dim", [pytest.param(None, id="None"), pytest.param(..., id="ellipsis")] ) def test_parse_dims_replace_none(dim: EllipsisType | None) -> None: all_dims = ("a", "b", 1, ("b", "c")) # selection of different Hashables actual = utils.parse_dims_as_tuple(dim, all_dims, replace_none=True) assert actual == all_dims @pytest.mark.parametrize( "dim", [ pytest.param("x", id="str_missing"), pytest.param(["a", "x"], id="list_missing_one"), pytest.param(["x", 2], id="list_missing_all"), ], ) def test_parse_dims_raises(dim) -> None: all_dims = ("a", "b", 1, ("b", "c")) # selection of different Hashables with pytest.raises(ValueError, match="'x'"): utils.parse_dims_as_tuple(dim, all_dims, check_exists=True) @pytest.mark.parametrize( ["dim", "expected"], [ pytest.param("a", ("a",), id="str"), pytest.param(["a", "b"], ("a", "b"), id="list"), pytest.param([...], ("a", "b", "c"), id="list_only_ellipsis"), pytest.param(["a", ...], ("a", "b", "c"), id="list_with_ellipsis"), pytest.param(["a", ..., "b"], ("a", "c", "b"), id="list_with_middle_ellipsis"), ], ) def test_parse_ordered_dims(dim, expected) -> None: all_dims = ("a", "b", "c") actual = utils.parse_ordered_dims(dim, all_dims) assert actual == expected def test_parse_ordered_dims_raises() -> None: all_dims = ("a", "b", "c") with pytest.raises(ValueError, match="'x' do not exist"): utils.parse_ordered_dims("x", all_dims, check_exists=True) with pytest.raises(ValueError, match="repeated dims"): utils.parse_ordered_dims(["a", ...], all_dims + ("a",)) with pytest.raises(ValueError, match="More than one ellipsis"): utils.parse_ordered_dims(["a", ..., "b", ...], all_dims) @pytest.mark.parametrize( "nested_list, expected", [ ([], []), ([1], [1]), ([1, 2, 3], [1, 2, 3]), ([[1]], [1]), ([[1, 2], [3, 4]], [1, 2, 3, 4]), ([[[1, 2, 3], [4]], [5, 6]], [1, 2, 3, 4, 5, 6]), ], ) def test_iterate_nested(nested_list, expected): assert list(iterate_nested(nested_list)) == expected def test_find_stack_level(): assert utils.find_stack_level() == 1 assert utils.find_stack_level(test_mode=True) == 2 def f(): return utils.find_stack_level(test_mode=True) assert f() == 3 def test_attempt_import() -> None: """Test optional dependency handling.""" np = attempt_import("numpy") assert np.__name__ == "numpy" with pytest.raises(ImportError, match="The foo package is required"): attempt_import(module="foo") with pytest.raises(ImportError, match="The foo package is required"): attempt_import(module="foo.bar") xarray-2025.09.0/xarray/tests/test_variable.py000066400000000000000000003471651505620616400212320ustar00rootroot00000000000000from __future__ import annotations import warnings from abc import ABC from copy import copy, deepcopy from datetime import datetime, timedelta from textwrap import dedent from typing import Generic import numpy as np import pandas as pd import pytest import pytz from xarray import DataArray, Dataset, IndexVariable, Variable, set_options from xarray.core import dtypes, duck_array_ops, indexing from xarray.core.common import full_like, ones_like, zeros_like from xarray.core.extension_array import PandasExtensionArray from xarray.core.indexing import ( BasicIndexer, CopyOnWriteArray, DaskIndexingAdapter, LazilyIndexedArray, MemoryCachedArray, NumpyIndexingAdapter, OuterIndexer, PandasIndexingAdapter, VectorizedIndexer, ) from xarray.core.types import T_DuckArray from xarray.core.utils import NDArrayMixin from xarray.core.variable import as_compatible_data, as_variable from xarray.namedarray.pycompat import array_type from xarray.tests import ( assert_allclose, assert_array_equal, assert_equal, assert_identical, assert_no_warnings, has_dask_ge_2024_11_0, has_pandas_3, raise_if_dask_computes, requires_bottleneck, requires_cupy, requires_dask, requires_pint, requires_sparse, source_ndarray, ) from xarray.tests.test_namedarray import NamedArraySubclassobjects dask_array_type = array_type("dask") _PAD_XR_NP_ARGS = [ [{"x": (2, 1)}, ((2, 1), (0, 0), (0, 0))], [{"x": 1}, ((1, 1), (0, 0), (0, 0))], [{"y": (0, 3)}, ((0, 0), (0, 3), (0, 0))], [{"x": (3, 1), "z": (2, 0)}, ((3, 1), (0, 0), (2, 0))], [{"x": (3, 1), "z": 2}, ((3, 1), (0, 0), (2, 2))], ] @pytest.fixture def var(): return Variable(dims=list("xyz"), data=np.random.rand(3, 4, 5)) @pytest.mark.parametrize( "data", [ np.array(["a", "bc", "def"], dtype=object), np.array(["2019-01-01", "2019-01-02", "2019-01-03"], dtype="datetime64[ns]"), ], ) def test_as_compatible_data_writeable(data): pd.set_option("mode.copy_on_write", True) # GH8843, ensure writeable arrays for data_vars even with # pandas copy-on-write mode assert as_compatible_data(data).flags.writeable pd.reset_option("mode.copy_on_write") class VariableSubclassobjects(NamedArraySubclassobjects, ABC): @pytest.fixture def target(self, data): data = 0.5 * np.arange(10).reshape(2, 5) return Variable(["x", "y"], data) def test_getitem_dict(self): v = self.cls(["x"], np.random.randn(5)) actual = v[{"x": 0}] expected = v[0] assert_identical(expected, actual) def test_getitem_1d(self): data = np.array([0, 1, 2]) v = self.cls(["x"], data) v_new = v[dict(x=[0, 1])] assert v_new.dims == ("x",) assert_array_equal(v_new, data[[0, 1]]) v_new = v[dict(x=slice(None))] assert v_new.dims == ("x",) assert_array_equal(v_new, data) v_new = v[dict(x=Variable("a", [0, 1]))] assert v_new.dims == ("a",) assert_array_equal(v_new, data[[0, 1]]) v_new = v[dict(x=1)] assert v_new.dims == () assert_array_equal(v_new, data[1]) # tuple argument v_new = v[slice(None)] assert v_new.dims == ("x",) assert_array_equal(v_new, data) def test_getitem_1d_fancy(self): v = self.cls(["x"], [0, 1, 2]) # 1d-variable should be indexable by multi-dimensional Variable ind = Variable(("a", "b"), [[0, 1], [0, 1]]) v_new = v[ind] assert v_new.dims == ("a", "b") expected = np.array(v._data)[([0, 1], [0, 1]), ...] assert_array_equal(v_new, expected) # boolean indexing ind = Variable(("x",), [True, False, True]) v_new = v[ind] assert_identical(v[[0, 2]], v_new) v_new = v[[True, False, True]] assert_identical(v[[0, 2]], v_new) with pytest.raises(IndexError, match=r"Boolean indexer should"): ind = Variable(("a",), [True, False, True]) v[ind] def test_getitem_with_mask(self): v = self.cls(["x"], [0, 1, 2]) assert_identical(v._getitem_with_mask(-1), Variable((), np.nan)) assert_identical( v._getitem_with_mask([0, -1, 1]), self.cls(["x"], [0, np.nan, 1]) ) assert_identical(v._getitem_with_mask(slice(2)), self.cls(["x"], [0, 1])) assert_identical( v._getitem_with_mask([0, -1, 1], fill_value=-99), self.cls(["x"], [0, -99, 1]), ) def test_getitem_with_mask_size_zero(self): v = self.cls(["x"], []) assert_identical(v._getitem_with_mask(-1), Variable((), np.nan)) assert_identical( v._getitem_with_mask([-1, -1, -1]), self.cls(["x"], [np.nan, np.nan, np.nan]), ) def test_getitem_with_mask_nd_indexer(self): v = self.cls(["x"], [0, 1, 2]) indexer = Variable(("x", "y"), [[0, -1], [-1, 2]]) assert_identical(v._getitem_with_mask(indexer, fill_value=-1), indexer) def _assertIndexedLikeNDArray(self, variable, expected_value0, expected_dtype=None): """Given a 1-dimensional variable, verify that the variable is indexed like a numpy.ndarray. """ assert variable[0].shape == () assert variable[0].ndim == 0 assert variable[0].size == 1 # test identity assert variable.equals(variable.copy()) assert variable.identical(variable.copy()) # check value is equal for both ndarray and Variable with warnings.catch_warnings(): warnings.filterwarnings("ignore", "In the future, 'NAT == x'") np.testing.assert_equal(variable.values[0], expected_value0) np.testing.assert_equal(variable[0].values, expected_value0) # check type or dtype is consistent for both ndarray and Variable if expected_dtype is None: # check output type instead of array dtype assert type(variable.values[0]) is type(expected_value0) assert type(variable[0].values) is type(expected_value0) elif expected_dtype is not False: assert variable.values[0].dtype == expected_dtype assert variable[0].values.dtype == expected_dtype def test_index_0d_int(self): for value, dtype in [(0, np.int_), (np.int32(0), np.int32)]: x = self.cls(["x"], [value]) self._assertIndexedLikeNDArray(x, value, dtype) def test_index_0d_float(self): for value, dtype in [(0.5, float), (np.float32(0.5), np.float32)]: x = self.cls(["x"], [value]) self._assertIndexedLikeNDArray(x, value, dtype) def test_index_0d_string(self): value = "foo" dtype = np.dtype("U3") x = self.cls(["x"], [value]) self._assertIndexedLikeNDArray(x, value, dtype) def test_index_0d_datetime(self): d = datetime(2000, 1, 1) x = self.cls(["x"], [d]) self._assertIndexedLikeNDArray(x, np.datetime64(d)) x = self.cls(["x"], [np.datetime64(d)]) self._assertIndexedLikeNDArray(x, np.datetime64(d), "datetime64[us]") expected_unit = "us" if has_pandas_3 else "ns" x = self.cls(["x"], pd.DatetimeIndex([d])) self._assertIndexedLikeNDArray( x, np.datetime64(d), f"datetime64[{expected_unit}]" ) def test_index_0d_timedelta64(self): td = timedelta(hours=1) # todo: discussion needed x = self.cls(["x"], [np.timedelta64(td)]) self._assertIndexedLikeNDArray( x, np.timedelta64(td), np.dtype("timedelta64[us]") ) x = self.cls(["x"], pd.to_timedelta([td])) self._assertIndexedLikeNDArray(x, np.timedelta64(td), "timedelta64[ns]") def test_index_0d_not_a_time(self): d = np.datetime64("NaT", "ns") x = self.cls(["x"], [d]) self._assertIndexedLikeNDArray(x, d) def test_index_0d_object(self): class HashableItemWrapper: def __init__(self, item): self.item = item def __eq__(self, other): return self.item == other.item def __hash__(self): return hash(self.item) def __repr__(self): return f"{type(self).__name__}(item={self.item!r})" item = HashableItemWrapper((1, 2, 3)) x = self.cls("x", [item]) self._assertIndexedLikeNDArray(x, item, expected_dtype=False) def test_0d_object_array_with_list(self): listarray = np.empty((1,), dtype=object) listarray[0] = [1, 2, 3] x = self.cls("x", listarray) assert_array_equal(x.data, listarray) assert_array_equal(x[0].data, listarray.squeeze()) assert_array_equal(x.squeeze().data, listarray.squeeze()) def test_index_and_concat_datetime(self): # regression test for #125 date_range = pd.date_range("2011-09-01", periods=10) for dates in [date_range, date_range.values, date_range.to_pydatetime()]: expected = self.cls("t", dates) for times in [ [expected[i] for i in range(10)], [expected[i : (i + 1)] for i in range(10)], [expected[[i]] for i in range(10)], ]: actual = Variable.concat(times, "t") assert expected.dtype == actual.dtype assert_array_equal(expected, actual) def test_0d_time_data(self): # regression test for #105 x = self.cls("time", pd.date_range("2000-01-01", periods=5)) expected = np.datetime64("2000-01-01", "ns") assert x[0].values == expected dt64_data = pd.date_range("1970-01-01", periods=3) @pytest.mark.parametrize( "values, unit", [ (dt64_data, "ns"), (dt64_data.values, "ns"), (dt64_data.values.astype("datetime64[m]"), "s"), (dt64_data.values.astype("datetime64[s]"), "s"), (dt64_data.values.astype("datetime64[ps]"), "ns"), ( dt64_data.to_pydatetime(), "us" if has_pandas_3 else "ns", ), ], ) def test_datetime64_conversion(self, values, unit): v = self.cls(["t"], values) assert v.dtype == np.dtype(f"datetime64[{unit}]") assert_array_equal(v.values, self.dt64_data.values) assert v.values.dtype == np.dtype(f"datetime64[{unit}]") td64_data = pd.timedelta_range(start=0, periods=3) @pytest.mark.parametrize( "values, unit", [ (td64_data, "ns"), (td64_data.values, "ns"), (td64_data.values.astype("timedelta64[m]"), "s"), (td64_data.values.astype("timedelta64[s]"), "s"), (td64_data.values.astype("timedelta64[ps]"), "ns"), (td64_data.to_pytimedelta(), "ns"), ], ) def test_timedelta64_conversion(self, values, unit): v = self.cls(["t"], values) assert v.dtype == np.dtype(f"timedelta64[{unit}]") assert_array_equal(v.values, self.td64_data.values) assert v.values.dtype == np.dtype(f"timedelta64[{unit}]") def test_object_conversion(self): data = np.arange(5).astype(str).astype(object) actual = self.cls("x", data) assert actual.dtype == data.dtype def test_pandas_data(self): v = self.cls(["x"], pd.Series([0, 1, 2], index=[3, 2, 1])) assert_identical(v, v[[0, 1, 2]]) v = self.cls(["x"], pd.Index([0, 1, 2])) assert v[0].values == v.values[0] def test_pandas_period_index(self): v = self.cls(["x"], pd.period_range(start="2000", periods=20, freq="D")) v = v.load() # for dask-based Variable assert v[0] == pd.Period("2000", freq="D") assert "PeriodArray" in repr(v) @pytest.mark.parametrize("dtype", [float, int]) def test_1d_math(self, dtype: np.typing.DTypeLike) -> None: x = np.arange(5, dtype=dtype) y = np.ones(5, dtype=dtype) # should we need `.to_base_variable()`? # probably a break that `+v` changes type? v = self.cls(["x"], x) base_v = v.to_base_variable() # unary ops assert_identical(base_v, +v) assert_identical(base_v, abs(v)) assert_array_equal((-v).values, -x) # binary ops with numbers assert_identical(base_v, v + 0) assert_identical(base_v, 0 + v) assert_identical(base_v, v * 1) if dtype is int: assert_identical(base_v, v << 0) assert_array_equal(v << 3, x << 3) assert_array_equal(v >> 2, x >> 2) # binary ops with numpy arrays assert_array_equal((v * x).values, x**2) assert_array_equal((x * v).values, x**2) assert_array_equal(v - y, v - 1) assert_array_equal(y - v, 1 - v) if dtype is int: assert_array_equal(v << x, x << x) assert_array_equal(v >> x, x >> x) # verify attributes are dropped v2 = self.cls(["x"], x, {"units": "meters"}) with set_options(keep_attrs=False): assert_identical(base_v, +v2) # binary ops with all variables assert_array_equal(v + v, 2 * v) w = self.cls(["x"], y, {"foo": "bar"}) assert_identical(v + w, self.cls(["x"], x + y).to_base_variable()) assert_array_equal((v * w).values, x * y) # something complicated assert_array_equal((v**2 * w - 1 + x).values, x**2 * y - 1 + x) # make sure dtype is preserved (for Index objects) assert dtype == (+v).dtype assert dtype == (+v).values.dtype assert dtype == (0 + v).dtype assert dtype == (0 + v).values.dtype # check types of returned data assert isinstance(+v, Variable) assert not isinstance(+v, IndexVariable) assert isinstance(0 + v, Variable) assert not isinstance(0 + v, IndexVariable) def test_1d_reduce(self): x = np.arange(5) v = self.cls(["x"], x) actual = v.sum() expected = Variable((), 10) assert_identical(expected, actual) assert type(actual) is Variable def test_array_interface(self): x = np.arange(5) v = self.cls(["x"], x) assert_array_equal(np.asarray(v), x) # test patched in methods assert_array_equal(v.astype(float), x.astype(float)) # think this is a break, that argsort changes the type assert_identical(v.argsort(), v.to_base_variable()) assert_identical(v.clip(2, 3), self.cls("x", x.clip(2, 3)).to_base_variable()) # test ufuncs assert_identical(np.sin(v), self.cls(["x"], np.sin(x)).to_base_variable()) assert isinstance(np.sin(v), Variable) assert not isinstance(np.sin(v), IndexVariable) def example_1d_objects(self): for data in [ range(3), 0.5 * np.arange(3), 0.5 * np.arange(3, dtype=np.float32), pd.date_range("2000-01-01", periods=3), np.array(["a", "b", "c"], dtype=object), ]: yield (self.cls("x", data), data) def test___array__(self): for v, data in self.example_1d_objects(): assert_array_equal(v.values, np.asarray(data)) assert_array_equal(np.asarray(v), np.asarray(data)) assert v[0].values == np.asarray(data)[0] assert np.asarray(v[0]) == np.asarray(data)[0] def test_equals_all_dtypes(self): for v, _ in self.example_1d_objects(): v2 = v.copy() assert v.equals(v2) assert v.identical(v2) assert v.no_conflicts(v2) assert v[0].equals(v2[0]) assert v[0].identical(v2[0]) assert v[0].no_conflicts(v2[0]) assert v[:2].equals(v2[:2]) assert v[:2].identical(v2[:2]) assert v[:2].no_conflicts(v2[:2]) def test_eq_all_dtypes(self): # ensure that we don't choke on comparisons for which numpy returns # scalars expected = Variable("x", 3 * [False]) for v, _ in self.example_1d_objects(): actual = "z" == v assert_identical(expected, actual) actual = ~("z" != v) assert_identical(expected, actual) def test_encoding_preserved(self): expected = self.cls("x", range(3), {"foo": 1}, {"bar": 2}) for actual in [ expected.T, expected[...], expected.squeeze(), expected.isel(x=slice(None)), expected.set_dims({"x": 3}), expected.copy(deep=True), expected.copy(deep=False), ]: assert_identical(expected.to_base_variable(), actual.to_base_variable()) assert expected.encoding == actual.encoding def test_drop_encoding(self) -> None: encoding1 = {"scale_factor": 1} # encoding set via cls constructor v1 = self.cls(["a"], [0, 1, 2], encoding=encoding1) assert v1.encoding == encoding1 v2 = v1.drop_encoding() assert v1.encoding == encoding1 assert v2.encoding == {} # encoding set via setter encoding3 = {"scale_factor": 10} v3 = self.cls(["a"], [0, 1, 2], encoding=encoding3) assert v3.encoding == encoding3 v4 = v3.drop_encoding() assert v3.encoding == encoding3 assert v4.encoding == {} def test_concat(self): x = np.arange(5) y = np.arange(5, 10) v = self.cls(["a"], x) w = self.cls(["a"], y) assert_identical( Variable(["b", "a"], np.array([x, y])), Variable.concat([v, w], "b") ) assert_identical( Variable(["b", "a"], np.array([x, y])), Variable.concat((v, w), "b") ) assert_identical( Variable(["b", "a"], np.array([x, y])), Variable.concat((v, w), "b") ) with pytest.raises(ValueError, match=r"Variable has dimensions"): Variable.concat([v, Variable(["c"], y)], "b") # test indexers actual = Variable.concat( [v, w], positions=[np.arange(0, 10, 2), np.arange(1, 10, 2)], dim="a" ) expected = Variable("a", np.array([x, y]).ravel(order="F")) assert_identical(expected, actual) # test concatenating along a dimension v = Variable(["time", "x"], np.random.random((10, 8))) assert_identical(v, Variable.concat([v[:5], v[5:]], "time")) assert_identical(v, Variable.concat([v[:5], v[5:6], v[6:]], "time")) assert_identical(v, Variable.concat([v[:1], v[1:]], "time")) # test dimension order assert_identical(v, Variable.concat([v[:, :5], v[:, 5:]], "x")) with pytest.raises(ValueError, match=r"all input arrays must have"): Variable.concat([v[:, 0], v[:, 1:]], "x") def test_concat_attrs(self): # always keep attrs from first variable v = self.cls("a", np.arange(5), {"foo": "bar"}) w = self.cls("a", np.ones(5)) expected = self.cls( "a", np.concatenate([np.arange(5), np.ones(5)]) ).to_base_variable() expected.attrs["foo"] = "bar" assert_identical(expected, Variable.concat([v, w], "a")) def test_concat_fixed_len_str(self): # regression test for #217 for kind in ["S", "U"]: x = self.cls("animal", np.array(["horse"], dtype=kind)) y = self.cls("animal", np.array(["aardvark"], dtype=kind)) actual = Variable.concat([x, y], "animal") expected = Variable("animal", np.array(["horse", "aardvark"], dtype=kind)) assert_equal(expected, actual) def test_concat_number_strings(self): # regression test for #305 a = self.cls("x", ["0", "1", "2"]) b = self.cls("x", ["3", "4"]) actual = Variable.concat([a, b], dim="x") expected = Variable("x", np.arange(5).astype(str)) assert_identical(expected, actual) assert actual.dtype.kind == expected.dtype.kind def test_concat_mixed_dtypes(self): a = self.cls("x", [0, 1]) b = self.cls("x", ["two"]) actual = Variable.concat([a, b], dim="x") expected = Variable("x", np.array([0, 1, "two"], dtype=object)) assert_identical(expected, actual) assert actual.dtype == object @pytest.mark.parametrize("deep", [True, False]) @pytest.mark.parametrize("astype", [float, int, str]) def test_copy(self, deep: bool, astype: type[object]) -> None: v = self.cls("x", (0.5 * np.arange(10)).astype(astype), {"foo": "bar"}) w = v.copy(deep=deep) assert type(v) is type(w) assert_identical(v, w) assert v.dtype == w.dtype if self.cls is Variable: if deep: assert source_ndarray(v.values) is not source_ndarray(w.values) else: assert source_ndarray(v.values) is source_ndarray(w.values) assert_identical(v, copy(v)) def test_copy_deep_recursive(self) -> None: # GH:issue:7111 # direct recursion v = self.cls("x", [0, 1]) v.attrs["other"] = v # TODO: cannot use assert_identical on recursive Vars yet... # lets just ensure that deep copy works without RecursionError v.copy(deep=True) # indirect recursion v2 = self.cls("y", [2, 3]) v.attrs["other"] = v2 v2.attrs["other"] = v # TODO: cannot use assert_identical on recursive Vars yet... # lets just ensure that deep copy works without RecursionError v.copy(deep=True) v2.copy(deep=True) def test_copy_index(self): midx = pd.MultiIndex.from_product( [["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three") ) v = self.cls("x", midx) for deep in [True, False]: w = v.copy(deep=deep) assert isinstance(w._data, PandasIndexingAdapter) assert isinstance(w.to_index(), pd.MultiIndex) assert_array_equal(v._data.array, w._data.array) def test_copy_with_data(self) -> None: orig = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"}) new_data = np.array([[2.5, 5.0], [7.1, 43]]) actual = orig.copy(data=new_data) expected = orig.copy() expected.data = new_data assert_identical(expected, actual) def test_copy_with_data_errors(self) -> None: orig = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"}) new_data = [2.5, 5.0] with pytest.raises(ValueError, match=r"must match shape of object"): orig.copy(data=new_data) # type: ignore[arg-type] def test_copy_index_with_data(self) -> None: orig = IndexVariable("x", np.arange(5)) new_data = np.arange(5, 10) actual = orig.copy(data=new_data) expected = IndexVariable("x", np.arange(5, 10)) assert_identical(expected, actual) def test_copy_index_with_data_errors(self) -> None: orig = IndexVariable("x", np.arange(5)) new_data = np.arange(5, 20) with pytest.raises(ValueError, match=r"must match shape of object"): orig.copy(data=new_data) with pytest.raises(ValueError, match=r"Cannot assign to the .data"): orig.data = new_data with pytest.raises(ValueError, match=r"Cannot assign to the .values"): orig.values = new_data def test_replace(self): var = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"}) result = var._replace() assert_identical(result, var) new_data = np.arange(4).reshape(2, 2) result = var._replace(data=new_data) assert_array_equal(result.data, new_data) def test_real_and_imag(self): v = self.cls("x", np.arange(3) - 1j * np.arange(3), {"foo": "bar"}) expected_re = self.cls("x", np.arange(3), {"foo": "bar"}) assert_identical(v.real, expected_re) expected_im = self.cls("x", -np.arange(3), {"foo": "bar"}) assert_identical(v.imag, expected_im) expected_abs = self.cls("x", np.sqrt(2 * np.arange(3) ** 2)).to_base_variable() assert_allclose(abs(v), expected_abs) def test_aggregate_complex(self): # should skip NaNs v = self.cls("x", [1, 2j, np.nan]) expected = Variable((), 0.5 + 1j) assert_allclose(v.mean(), expected) def test_pandas_categorical_dtype(self): data = pd.Categorical(np.arange(10, dtype="int64")) v = self.cls("x", data) print(v) # should not error assert v.dtype == data.dtype def test_pandas_datetime64_with_tz(self): data = pd.date_range( start="2000-01-01", tz=pytz.timezone("America/New_York"), periods=10, freq="1h", ) v = self.cls("x", data) print(v) # should not error if v.dtype == np.dtype("O"): import dask.array as da assert isinstance(v.data, da.Array) else: assert v.dtype == data.dtype def test_multiindex(self): idx = pd.MultiIndex.from_product([list("abc"), [0, 1]]) v = self.cls("x", idx) assert_identical(Variable((), ("a", 0)), v[0]) assert_identical(v, v[:]) def test_load(self): array = self.cls("x", np.arange(5)) orig_data = array._data copied = array.copy(deep=True) if array.chunks is None: array.load() assert type(array._data) is type(orig_data) assert type(copied._data) is type(orig_data) assert_identical(array, copied) def test_getitem_advanced(self): v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]]) v_data = v.compute().data # orthogonal indexing v_new = v[([0, 1], [1, 0])] assert v_new.dims == ("x", "y") assert_array_equal(v_new, v_data[[0, 1]][:, [1, 0]]) v_new = v[[0, 1]] assert v_new.dims == ("x", "y") assert_array_equal(v_new, v_data[[0, 1]]) # with mixed arguments ind = Variable(["a"], [0, 1]) v_new = v[dict(x=[0, 1], y=ind)] assert v_new.dims == ("x", "a") assert_array_equal(v_new, v_data[[0, 1]][:, [0, 1]]) # boolean indexing v_new = v[dict(x=[True, False], y=[False, True, False])] assert v_new.dims == ("x", "y") assert_array_equal(v_new, v_data[0][1]) # with scalar variable ind = Variable((), 2) v_new = v[dict(y=ind)] expected = v[dict(y=2)] assert_array_equal(v_new, expected) # with boolean variable with wrong shape ind = np.array([True, False]) with pytest.raises(IndexError, match=r"Boolean array size 2 is "): v[Variable(("a", "b"), [[0, 1]]), ind] # boolean indexing with different dimension ind = Variable(["a"], [True, False, False]) with pytest.raises(IndexError, match=r"Boolean indexer should be"): v[dict(y=ind)] def test_getitem_uint_1d(self): # regression test for #1405 v = self.cls(["x"], [0, 1, 2]) v_data = v.compute().data v_new = v[np.array([0])] assert_array_equal(v_new, v_data[0]) v_new = v[np.array([0], dtype="uint64")] assert_array_equal(v_new, v_data[0]) def test_getitem_uint(self): # regression test for #1405 v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]]) v_data = v.compute().data v_new = v[np.array([0])] assert_array_equal(v_new, v_data[[0], :]) v_new = v[np.array([0], dtype="uint64")] assert_array_equal(v_new, v_data[[0], :]) v_new = v[np.uint64(0)] assert_array_equal(v_new, v_data[0, :]) def test_getitem_0d_array(self): # make sure 0d-np.array can be used as an indexer v = self.cls(["x"], [0, 1, 2]) v_data = v.compute().data v_new = v[np.array([0])[0]] assert_array_equal(v_new, v_data[0]) v_new = v[np.array(0)] assert_array_equal(v_new, v_data[0]) v_new = v[Variable((), np.array(0))] assert_array_equal(v_new, v_data[0]) def test_getitem_fancy(self): v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]]) v_data = v.compute().data ind = Variable(["a", "b"], [[0, 1, 1], [1, 1, 0]]) v_new = v[ind] assert v_new.dims == ("a", "b", "y") assert_array_equal(v_new, v_data[[[0, 1, 1], [1, 1, 0]], :]) # It would be ok if indexed with the multi-dimensional array including # the same name ind = Variable(["x", "b"], [[0, 1, 1], [1, 1, 0]]) v_new = v[ind] assert v_new.dims == ("x", "b", "y") assert_array_equal(v_new, v_data[[[0, 1, 1], [1, 1, 0]], :]) ind = Variable(["a", "b"], [[0, 1, 2], [2, 1, 0]]) v_new = v[dict(y=ind)] assert v_new.dims == ("x", "a", "b") assert_array_equal(v_new, v_data[:, ([0, 1, 2], [2, 1, 0])]) ind = Variable(["a", "b"], [[0, 0], [1, 1]]) v_new = v[dict(x=[1, 0], y=ind)] assert v_new.dims == ("x", "a", "b") assert_array_equal(v_new, v_data[[1, 0]][:, ind]) # along diagonal ind = Variable(["a"], [0, 1]) v_new = v[ind, ind] assert v_new.dims == ("a",) assert_array_equal(v_new, v_data[[0, 1], [0, 1]]) # with integer ind = Variable(["a", "b"], [[0, 0], [1, 1]]) v_new = v[dict(x=0, y=ind)] assert v_new.dims == ("a", "b") assert_array_equal(v_new[0], v_data[0][[0, 0]]) assert_array_equal(v_new[1], v_data[0][[1, 1]]) # with slice ind = Variable(["a", "b"], [[0, 0], [1, 1]]) v_new = v[dict(x=slice(None), y=ind)] assert v_new.dims == ("x", "a", "b") assert_array_equal(v_new, v_data[:, [[0, 0], [1, 1]]]) ind = Variable(["a", "b"], [[0, 0], [1, 1]]) v_new = v[dict(x=ind, y=slice(None))] assert v_new.dims == ("a", "b", "y") assert_array_equal(v_new, v_data[[[0, 0], [1, 1]], :]) ind = Variable(["a", "b"], [[0, 0], [1, 1]]) v_new = v[dict(x=ind, y=slice(None, 1))] assert v_new.dims == ("a", "b", "y") assert_array_equal(v_new, v_data[[[0, 0], [1, 1]], slice(None, 1)]) # slice matches explicit dimension ind = Variable(["y"], [0, 1]) v_new = v[ind, :2] assert v_new.dims == ("y",) assert_array_equal(v_new, v_data[[0, 1], [0, 1]]) # with multiple slices v = self.cls(["x", "y", "z"], [[[1, 2, 3], [4, 5, 6]]]) ind = Variable(["a", "b"], [[0]]) v_new = v[ind, :, :] expected = Variable(["a", "b", "y", "z"], v.data[np.newaxis, ...]) assert_identical(v_new, expected) v = Variable(["w", "x", "y", "z"], [[[[1, 2, 3], [4, 5, 6]]]]) ind = Variable(["y"], [0]) v_new = v[ind, :, 1:2, 2] expected = Variable(["y", "x"], [[6]]) assert_identical(v_new, expected) # slice and vector mixed indexing resulting in the same dimension v = Variable(["x", "y", "z"], np.arange(60).reshape(3, 4, 5)) ind = Variable(["x"], [0, 1, 2]) v_new = v[:, ind] expected = Variable(("x", "z"), np.zeros((3, 5))) expected[0] = v.data[0, 0] expected[1] = v.data[1, 1] expected[2] = v.data[2, 2] assert_identical(v_new, expected) v_new = v[:, ind.data] assert v_new.shape == (3, 3, 5) def test_getitem_error(self): v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]]) with pytest.raises(IndexError, match=r"labeled multi-"): v[[[0, 1], [1, 2]]] ind_x = Variable(["a"], [0, 1, 1]) ind_y = Variable(["a"], [0, 1]) with pytest.raises(IndexError, match=r"Dimensions of indexers "): v[ind_x, ind_y] ind = Variable(["a", "b"], [[True, False], [False, True]]) with pytest.raises(IndexError, match=r"2-dimensional boolean"): v[dict(x=ind)] v = Variable(["x", "y", "z"], np.arange(60).reshape(3, 4, 5)) ind = Variable(["x"], [0, 1]) with pytest.raises(IndexError, match=r"Dimensions of indexers mismatch"): v[:, ind] @pytest.mark.parametrize( "mode", [ "mean", "median", "reflect", "edge", "linear_ramp", "maximum", "minimum", "symmetric", "wrap", ], ) @pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS) @pytest.mark.filterwarnings( r"ignore:dask.array.pad.+? converts integers to floats." ) def test_pad(self, mode, xr_arg, np_arg): data = np.arange(4 * 3 * 2).reshape(4, 3, 2) v = self.cls(["x", "y", "z"], data) actual = v.pad(mode=mode, **xr_arg) expected = np.pad(data, np_arg, mode=mode) assert_array_equal(actual, expected) assert isinstance(actual._data, type(v._data)) @pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS) def test_pad_constant_values(self, xr_arg, np_arg): data = np.arange(4 * 3 * 2).reshape(4, 3, 2) v = self.cls(["x", "y", "z"], data) actual = v.pad(**xr_arg) expected = np.pad( np.array(duck_array_ops.astype(v.data, float)), np_arg, mode="constant", constant_values=np.nan, ) assert_array_equal(actual, expected) assert isinstance(actual._data, type(v._data)) # for the boolean array, we pad False data = np.full_like(data, False, dtype=bool).reshape(4, 3, 2) v = self.cls(["x", "y", "z"], data) actual = v.pad(mode="constant", constant_values=False, **xr_arg) expected = np.pad( np.array(v.data), np_arg, mode="constant", constant_values=False ) assert_array_equal(actual, expected) @pytest.mark.parametrize( ["keep_attrs", "attrs", "expected"], [ pytest.param(None, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="default"), pytest.param(False, {"a": 1, "b": 2}, {}, id="False"), pytest.param(True, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="True"), ], ) def test_pad_keep_attrs(self, keep_attrs, attrs, expected): data = np.arange(10, dtype=float) v = self.cls(["x"], data, attrs) keep_attrs_ = "default" if keep_attrs is None else keep_attrs with set_options(keep_attrs=keep_attrs_): actual = v.pad({"x": (1, 1)}, mode="constant", constant_values=np.nan) assert actual.attrs == expected actual = v.pad( {"x": (1, 1)}, mode="constant", constant_values=np.nan, keep_attrs=keep_attrs, ) assert actual.attrs == expected @pytest.mark.parametrize("d, w", (("x", 3), ("y", 5))) def test_rolling_window(self, d, w): # Just a working test. See test_nputils for the algorithm validation v = self.cls(["x", "y", "z"], np.arange(40 * 30 * 2).reshape(40, 30, 2)) v_rolling = v.rolling_window(d, w, d + "_window") assert v_rolling.dims == ("x", "y", "z", d + "_window") assert v_rolling.shape == v.shape + (w,) v_rolling = v.rolling_window(d, w, d + "_window", center=True) assert v_rolling.dims == ("x", "y", "z", d + "_window") assert v_rolling.shape == v.shape + (w,) # dask and numpy result should be the same v_loaded = v.load().rolling_window(d, w, d + "_window", center=True) assert_array_equal(v_rolling, v_loaded) # numpy backend should not be over-written if isinstance(v._data, np.ndarray): with pytest.raises(ValueError): v_loaded[0] = 1.0 def test_rolling_1d(self): x = self.cls("x", np.array([1, 2, 3, 4], dtype=float)) kwargs = dict(dim="x", window=3, window_dim="xw") actual = x.rolling_window(**kwargs, center=True, fill_value=np.nan) expected = Variable( ("x", "xw"), np.array( [[np.nan, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, np.nan]], dtype=float ), ) assert_equal(actual, expected) actual = x.rolling_window(**kwargs, center=False, fill_value=0.0) expected = self.cls( ("x", "xw"), np.array([[0, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 4]], dtype=float), ) assert_equal(actual, expected) x = self.cls(("y", "x"), np.stack([x, x * 1.1])) actual = x.rolling_window(**kwargs, center=False, fill_value=0.0) expected = self.cls( ("y", "x", "xw"), np.stack([expected.data, expected.data * 1.1], axis=0) ) assert_equal(actual, expected) @pytest.mark.parametrize("center", [[True, True], [False, False]]) @pytest.mark.parametrize("dims", [("x", "y"), ("y", "z"), ("z", "x")]) def test_nd_rolling(self, center, dims): x = self.cls( ("x", "y", "z"), np.arange(7 * 6 * 8).reshape(7, 6, 8).astype(float), ) window = [3, 3] actual = x.rolling_window( dim=dims, window=window, window_dim=[f"{k}w" for k in dims], center=center, fill_value=np.nan, ) expected = x for dim, win, cent in zip(dims, window, center, strict=True): expected = expected.rolling_window( dim=dim, window=win, window_dim=f"{dim}w", center=cent, fill_value=np.nan, ) assert_equal(actual, expected) @pytest.mark.parametrize( ("dim, window, window_dim, center"), [ ("x", [3, 3], "x_w", True), ("x", 3, ("x_w", "x_w"), True), ("x", 3, "x_w", [True, True]), ], ) def test_rolling_window_errors(self, dim, window, window_dim, center): x = self.cls( ("x", "y", "z"), np.arange(7 * 6 * 8).reshape(7, 6, 8).astype(float), ) with pytest.raises(ValueError): x.rolling_window( dim=dim, window=window, window_dim=window_dim, center=center, ) class TestVariable(VariableSubclassobjects): def cls(self, *args, **kwargs) -> Variable: return Variable(*args, **kwargs) @pytest.fixture(autouse=True) def setup(self): self.d = np.random.random((10, 3)).astype(np.float64) def test_values(self): v = Variable(["time", "x"], self.d) assert_array_equal(v.values, self.d) assert source_ndarray(v.values) is self.d with pytest.raises(ValueError): # wrong size v.values = np.random.random(5) d2 = np.random.random((10, 3)) v.values = d2 assert source_ndarray(v.values) is d2 def test_numpy_same_methods(self): v = Variable([], np.float32(0.0)) assert v.item() == 0 assert type(v.item()) is float v = IndexVariable("x", np.arange(5)) assert 2 == v.searchsorted(2) @pytest.mark.parametrize( "values, unit", [ (np.datetime64("2000-01-01"), "s"), ( pd.Timestamp("2000-01-01T00"), "s" if has_pandas_3 else "ns", ), ( datetime(2000, 1, 1), "us" if has_pandas_3 else "ns", ), (np.datetime64("2000-01-01T00:00:00.1234567891"), "ns"), ], ) def test_datetime64_conversion_scalar(self, values, unit): v = Variable([], values) assert v.dtype == np.dtype(f"datetime64[{unit}]") assert np.issubdtype(v.values, "datetime64") assert v.values.dtype == np.dtype(f"datetime64[{unit}]") @pytest.mark.parametrize( "values, unit", [ (np.timedelta64(1, "m"), "s"), (np.timedelta64(1, "D"), "s"), (np.timedelta64(1001, "ps"), "ns"), (pd.Timedelta("1 day"), "ns"), (timedelta(days=1), "ns"), ], ) def test_timedelta64_conversion_scalar(self, values, unit): v = Variable([], values) assert v.dtype == np.dtype(f"timedelta64[{unit}]") assert np.issubdtype(v.values, "timedelta64") assert v.values.dtype == np.dtype(f"timedelta64[{unit}]") def test_0d_str(self): v = Variable([], "foo") assert v.dtype == np.dtype("U3") assert v.values == "foo" v = Variable([], np.bytes_("foo")) assert v.dtype == np.dtype("S3") assert v.values == "foo".encode("ascii") def test_0d_datetime(self): v = Variable([], pd.Timestamp("2000-01-01")) expected_unit = "s" if has_pandas_3 else "ns" assert v.dtype == np.dtype(f"datetime64[{expected_unit}]") assert v.values == np.datetime64("2000-01-01", expected_unit) @pytest.mark.parametrize( "values, unit", [(pd.to_timedelta("1s"), "ns"), (np.timedelta64(1, "s"), "s")] ) def test_0d_timedelta(self, values, unit): # todo: check, if this test is OK v = Variable([], values) assert v.dtype == np.dtype(f"timedelta64[{unit}]") assert v.values == np.timedelta64(10**9, "ns") def test_equals_and_identical(self): d = np.random.rand(10, 3) d[0, 0] = np.nan v1 = Variable(("dim1", "dim2"), data=d, attrs={"att1": 3, "att2": [1, 2, 3]}) v2 = Variable(("dim1", "dim2"), data=d, attrs={"att1": 3, "att2": [1, 2, 3]}) assert v1.equals(v2) assert v1.identical(v2) v3 = Variable(("dim1", "dim3"), data=d) assert not v1.equals(v3) v4 = Variable(("dim1", "dim2"), data=d) assert v1.equals(v4) assert not v1.identical(v4) v5 = deepcopy(v1) v5.values[:] = np.random.rand(10, 3) assert not v1.equals(v5) assert not v1.equals(None) assert not v1.equals(d) assert not v1.identical(None) assert not v1.identical(d) def test_broadcast_equals(self): v1 = Variable((), np.nan) v2 = Variable(("x"), [np.nan, np.nan]) assert v1.broadcast_equals(v2) assert not v1.equals(v2) assert not v1.identical(v2) v3 = Variable(("x"), [np.nan]) assert v1.broadcast_equals(v3) assert not v1.equals(v3) assert not v1.identical(v3) assert not v1.broadcast_equals(None) v4 = Variable(("x"), [np.nan] * 3) assert not v2.broadcast_equals(v4) def test_no_conflicts(self): v1 = Variable(("x"), [1, 2, np.nan, np.nan]) v2 = Variable(("x"), [np.nan, 2, 3, np.nan]) assert v1.no_conflicts(v2) assert not v1.equals(v2) assert not v1.broadcast_equals(v2) assert not v1.identical(v2) assert not v1.no_conflicts(None) v3 = Variable(("y"), [np.nan, 2, 3, np.nan]) assert not v3.no_conflicts(v1) d = np.array([1, 2, np.nan, np.nan]) assert not v1.no_conflicts(d) assert not v2.no_conflicts(d) v4 = Variable(("w", "x"), [d]) assert v1.no_conflicts(v4) def test_as_variable(self): data = np.arange(10) expected = Variable("x", data) expected_extra = Variable( "x", data, attrs={"myattr": "val"}, encoding={"scale_factor": 1} ) assert_identical(expected, as_variable(expected)) ds = Dataset({"x": expected}) var = as_variable(ds["x"]).to_base_variable() assert_identical(expected, var) assert not isinstance(ds["x"], Variable) assert isinstance(as_variable(ds["x"]), Variable) xarray_tuple = ( expected_extra.dims, expected_extra.values, expected_extra.attrs, expected_extra.encoding, ) assert_identical(expected_extra, as_variable(xarray_tuple)) with pytest.raises(TypeError, match=r"tuple of form"): as_variable(tuple(data)) with pytest.raises(ValueError, match=r"tuple of form"): # GH1016 as_variable(("five", "six", "seven")) with pytest.raises(TypeError, match=r"without an explicit list of dimensions"): as_variable(data) with pytest.warns(FutureWarning, match="IndexVariable"): actual = as_variable(data, name="x") assert_identical(expected.to_index_variable(), actual) actual = as_variable(0) expected = Variable([], 0) assert_identical(expected, actual) data = np.arange(9).reshape((3, 3)) expected = Variable(("x", "y"), data) with pytest.raises(ValueError, match=r"without explicit dimension names"): as_variable(data, name="x") # name of nD variable matches dimension name actual = as_variable(expected, name="x") assert_identical(expected, actual) # test datetime, timedelta conversion dt = np.array([datetime(1999, 1, 1) + timedelta(days=x) for x in range(10)]) with pytest.warns(FutureWarning, match="IndexVariable"): assert as_variable(dt, "time").dtype.kind == "M" td = np.array([timedelta(days=x) for x in range(10)]) with pytest.warns(FutureWarning, match="IndexVariable"): assert as_variable(td, "time").dtype.kind == "m" with pytest.raises(TypeError): as_variable(("x", DataArray([]))) def test_repr(self): v = Variable(["time", "x"], [[1, 2, 3], [4, 5, 6]], {"foo": "bar"}) v = v.astype(np.uint64) expected = dedent( """ Size: 48B array([[1, 2, 3], [4, 5, 6]], dtype=uint64) Attributes: foo: bar """ ).strip() assert expected == repr(v) def test_repr_lazy_data(self): v = Variable("x", LazilyIndexedArray(np.arange(2e5))) assert "200000 values with dtype" in repr(v) assert isinstance(v._data, LazilyIndexedArray) def test_detect_indexer_type(self): """Tests indexer type was correctly detected.""" data = np.random.random((10, 11)) v = Variable(["x", "y"], data) _, ind, _ = v._broadcast_indexes((0, 1)) assert type(ind) is indexing.BasicIndexer _, ind, _ = v._broadcast_indexes((0, slice(0, 8, 2))) assert type(ind) is indexing.BasicIndexer _, ind, _ = v._broadcast_indexes((0, [0, 1])) assert type(ind) is indexing.OuterIndexer _, ind, _ = v._broadcast_indexes(([0, 1], 1)) assert type(ind) is indexing.OuterIndexer _, ind, _ = v._broadcast_indexes(([0, 1], [1, 2])) assert type(ind) is indexing.OuterIndexer _, ind, _ = v._broadcast_indexes(([0, 1], slice(0, 8, 2))) assert type(ind) is indexing.OuterIndexer vind = Variable(("a",), [0, 1]) _, ind, _ = v._broadcast_indexes((vind, slice(0, 8, 2))) assert type(ind) is indexing.OuterIndexer vind = Variable(("y",), [0, 1]) _, ind, _ = v._broadcast_indexes((vind, 3)) assert type(ind) is indexing.OuterIndexer vind = Variable(("a",), [0, 1]) _, ind, _ = v._broadcast_indexes((vind, vind)) assert type(ind) is indexing.VectorizedIndexer vind = Variable(("a", "b"), [[0, 2], [1, 3]]) _, ind, _ = v._broadcast_indexes((vind, 3)) assert type(ind) is indexing.VectorizedIndexer def test_indexer_type(self): # GH:issue:1688. Wrong indexer type induces NotImplementedError data = np.random.random((10, 11)) v = Variable(["x", "y"], data) def assert_indexer_type(key, object_type): dims, index_tuple, new_order = v._broadcast_indexes(key) assert isinstance(index_tuple, object_type) # should return BasicIndexer assert_indexer_type((0, 1), BasicIndexer) assert_indexer_type((0, slice(None, None)), BasicIndexer) assert_indexer_type((Variable([], 3), slice(None, None)), BasicIndexer) assert_indexer_type((Variable([], 3), (Variable([], 6))), BasicIndexer) # should return OuterIndexer assert_indexer_type(([0, 1], 1), OuterIndexer) assert_indexer_type(([0, 1], [1, 2]), OuterIndexer) assert_indexer_type((Variable(("x"), [0, 1]), 1), OuterIndexer) assert_indexer_type((Variable(("x"), [0, 1]), slice(None, None)), OuterIndexer) assert_indexer_type( (Variable(("x"), [0, 1]), Variable(("y"), [0, 1])), OuterIndexer ) # should return VectorizedIndexer assert_indexer_type((Variable(("y"), [0, 1]), [0, 1]), VectorizedIndexer) assert_indexer_type( (Variable(("z"), [0, 1]), Variable(("z"), [0, 1])), VectorizedIndexer ) assert_indexer_type( ( Variable(("a", "b"), [[0, 1], [1, 2]]), Variable(("a", "b"), [[0, 1], [1, 2]]), ), VectorizedIndexer, ) def test_items(self): data = np.random.random((10, 11)) v = Variable(["x", "y"], data) # test slicing assert_identical(v, v[:]) assert_identical(v, v[...]) assert_identical(Variable(["y"], data[0]), v[0]) assert_identical(Variable(["x"], data[:, 0]), v[:, 0]) assert_identical(Variable(["x", "y"], data[:3, :2]), v[:3, :2]) # test array indexing x = Variable(["x"], np.arange(10)) y = Variable(["y"], np.arange(11)) assert_identical(v, v[x.values]) assert_identical(v, v[x]) assert_identical(v[:3], v[x < 3]) assert_identical(v[:, 3:], v[:, y >= 3]) assert_identical(v[:3, 3:], v[x < 3, y >= 3]) assert_identical(v[:3, :2], v[x[:3], y[:2]]) assert_identical(v[:3, :2], v[range(3), range(2)]) # test iteration for n, item in enumerate(v): assert_identical(Variable(["y"], data[n]), item) with pytest.raises(TypeError, match=r"iteration over a 0-d"): iter(Variable([], 0)) # test setting v.values[:] = 0 assert np.all(v.values == 0) # test orthogonal setting v[range(10), range(11)] = 1 assert_array_equal(v.values, np.ones((10, 11))) def test_getitem_basic(self): v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]]) # int argument v_new = v[0] assert v_new.dims == ("y",) assert_array_equal(v_new, v._data[0]) # slice argument v_new = v[:2] assert v_new.dims == ("x", "y") assert_array_equal(v_new, v._data[:2]) # list arguments v_new = v[[0]] assert v_new.dims == ("x", "y") assert_array_equal(v_new, v._data[[0]]) v_new = v[[]] assert v_new.dims == ("x", "y") assert_array_equal(v_new, v._data[[]]) # dict arguments v_new = v[dict(x=0)] assert v_new.dims == ("y",) assert_array_equal(v_new, v._data[0]) v_new = v[dict(x=0, y=slice(None))] assert v_new.dims == ("y",) assert_array_equal(v_new, v._data[0]) v_new = v[dict(x=0, y=1)] assert v_new.dims == () assert_array_equal(v_new, v._data[0, 1]) v_new = v[dict(y=1)] assert v_new.dims == ("x",) assert_array_equal(v_new, v._data[:, 1]) # tuple argument v_new = v[(slice(None), 1)] assert v_new.dims == ("x",) assert_array_equal(v_new, v._data[:, 1]) # test that we obtain a modifiable view when taking a 0d slice v_new = v[0, 0] v_new[...] += 99 assert_array_equal(v_new, v._data[0, 0]) def test_getitem_with_mask_2d_input(self): v = Variable(("x", "y"), [[0, 1, 2], [3, 4, 5]]) assert_identical( v._getitem_with_mask(([-1, 0], [1, -1])), Variable(("x", "y"), [[np.nan, np.nan], [1, np.nan]]), ) assert_identical(v._getitem_with_mask((slice(2), [0, 1, 2])), v) def test_isel(self): v = Variable(["time", "x"], self.d) assert_identical(v.isel(time=slice(None)), v) assert_identical(v.isel(time=0), v[0]) assert_identical(v.isel(time=slice(0, 3)), v[:3]) assert_identical(v.isel(x=0), v[:, 0]) assert_identical(v.isel(x=[0, 2]), v[:, [0, 2]]) assert_identical(v.isel(time=[]), v[[]]) with pytest.raises( ValueError, match=r"Dimensions {'not_a_dim'} do not exist. Expected one or more of " r"\('time', 'x'\)", ): v.isel(not_a_dim=0) with pytest.warns( UserWarning, match=r"Dimensions {'not_a_dim'} do not exist. Expected one or more of " r"\('time', 'x'\)", ): v.isel(not_a_dim=0, missing_dims="warn") assert_identical(v, v.isel(not_a_dim=0, missing_dims="ignore")) def test_index_0d_numpy_string(self): # regression test to verify our work around for indexing 0d strings v = Variable([], np.bytes_("asdf")) assert_identical(v[()], v) v = Variable([], np.str_("asdf")) assert_identical(v[()], v) def test_indexing_0d_unicode(self): # regression test for GH568 actual = Variable(("x"), ["tmax"])[0][()] expected = Variable((), "tmax") assert_identical(actual, expected) @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0]) def test_shift(self, fill_value): v = Variable("x", [1, 2, 3, 4, 5]) assert_identical(v, v.shift(x=0)) assert v is not v.shift(x=0) expected = Variable("x", [np.nan, np.nan, 1, 2, 3]) assert_identical(expected, v.shift(x=2)) if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value_exp = np.nan else: fill_value_exp = fill_value expected = Variable("x", [fill_value_exp, 1, 2, 3, 4]) assert_identical(expected, v.shift(x=1, fill_value=fill_value)) expected = Variable("x", [2, 3, 4, 5, fill_value_exp]) assert_identical(expected, v.shift(x=-1, fill_value=fill_value)) expected = Variable("x", [fill_value_exp] * 5) assert_identical(expected, v.shift(x=5, fill_value=fill_value)) assert_identical(expected, v.shift(x=6, fill_value=fill_value)) with pytest.raises(ValueError, match=r"dimension"): v.shift(z=0) v = Variable("x", [1, 2, 3, 4, 5], {"foo": "bar"}) assert_identical(v, v.shift(x=0)) expected = Variable("x", [fill_value_exp, 1, 2, 3, 4], {"foo": "bar"}) assert_identical(expected, v.shift(x=1, fill_value=fill_value)) def test_shift2d(self): v = Variable(("x", "y"), [[1, 2], [3, 4]]) expected = Variable(("x", "y"), [[np.nan, np.nan], [np.nan, 1]]) assert_identical(expected, v.shift(x=1, y=1)) def test_roll(self): v = Variable("x", [1, 2, 3, 4, 5]) assert_identical(v, v.roll(x=0)) assert v is not v.roll(x=0) expected = Variable("x", [5, 1, 2, 3, 4]) assert_identical(expected, v.roll(x=1)) assert_identical(expected, v.roll(x=-4)) assert_identical(expected, v.roll(x=6)) expected = Variable("x", [4, 5, 1, 2, 3]) assert_identical(expected, v.roll(x=2)) assert_identical(expected, v.roll(x=-3)) with pytest.raises(ValueError, match=r"dimension"): v.roll(z=0) def test_roll_consistency(self): v = Variable(("x", "y"), np.random.randn(5, 6)) for axis, dim in [(0, "x"), (1, "y")]: for shift in [-3, 0, 1, 7, 11]: expected = np.roll(v.values, shift, axis=axis) actual = v.roll(**{dim: shift}).values assert_array_equal(expected, actual) def test_transpose(self): v = Variable(["time", "x"], self.d) v2 = Variable(["x", "time"], self.d.T) assert_identical(v, v2.transpose()) assert_identical(v.transpose(), v.T) x = np.random.randn(2, 3, 4, 5) w = Variable(["a", "b", "c", "d"], x) w2 = Variable(["d", "b", "c", "a"], np.einsum("abcd->dbca", x)) assert w2.shape == (5, 3, 4, 2) assert_identical(w2, w.transpose("d", "b", "c", "a")) assert_identical(w2, w.transpose("d", ..., "a")) assert_identical(w2, w.transpose("d", "b", "c", ...)) assert_identical(w2, w.transpose(..., "b", "c", "a")) assert_identical(w, w2.transpose("a", "b", "c", "d")) w3 = Variable(["b", "c", "d", "a"], np.einsum("abcd->bcda", x)) assert_identical(w, w3.transpose("a", "b", "c", "d")) # test missing dimension, raise error with pytest.raises(ValueError): v.transpose(..., "not_a_dim") # test missing dimension, ignore error actual = v.transpose(..., "not_a_dim", missing_dims="ignore") expected_ell = v.transpose(...) assert_identical(expected_ell, actual) # test missing dimension, raise warning with pytest.warns(UserWarning): v.transpose(..., "not_a_dim", missing_dims="warn") assert_identical(expected_ell, actual) def test_transpose_0d(self): for value in [ 3.5, ("a", 1), np.datetime64("2000-01-01"), np.timedelta64(1, "h"), None, object(), ]: variable = Variable([], value) actual = variable.transpose() assert_identical(actual, variable) def test_pandas_categorical_dtype(self): data = pd.Categorical(np.arange(10, dtype="int64")) v = self.cls("x", data) print(v) # should not error assert isinstance(v.dtype, pd.CategoricalDtype) def test_squeeze(self): v = Variable(["x", "y"], [[1]]) assert_identical(Variable([], 1), v.squeeze()) assert_identical(Variable(["y"], [1]), v.squeeze("x")) assert_identical(Variable(["y"], [1]), v.squeeze(["x"])) assert_identical(Variable(["x"], [1]), v.squeeze("y")) assert_identical(Variable([], 1), v.squeeze(["x", "y"])) v = Variable(["x", "y"], [[1, 2]]) assert_identical(Variable(["y"], [1, 2]), v.squeeze()) assert_identical(Variable(["y"], [1, 2]), v.squeeze("x")) with pytest.raises(ValueError, match=r"cannot select a dimension"): v.squeeze("y") def test_get_axis_num(self) -> None: v = Variable(["x", "y", "z"], np.random.randn(2, 3, 4)) assert v.get_axis_num("x") == 0 assert v.get_axis_num(["x"]) == (0,) assert v.get_axis_num(["x", "y"]) == (0, 1) assert v.get_axis_num(["z", "y", "x"]) == (2, 1, 0) with pytest.raises(ValueError, match=r"not found in array dim"): v.get_axis_num("foobar") # Test the type annotations: mypy will complain if the inferred # type is wrong v.get_axis_num("x") + 0 v.get_axis_num(["x"]) + () v.get_axis_num(("x", "y")) + () def test_set_dims(self): v = Variable(["x"], [0, 1]) actual = v.set_dims(["x", "y"]) expected = Variable(["x", "y"], [[0], [1]]) assert_identical(actual, expected) actual = v.set_dims(["y", "x"]) assert_identical(actual, expected.T) actual = v.set_dims({"x": 2, "y": 2}) expected = Variable(["x", "y"], [[0, 0], [1, 1]]) assert_identical(actual, expected) v = Variable(["foo"], [0, 1]) actual = v.set_dims("foo") expected = v assert_identical(actual, expected) with pytest.raises(ValueError, match=r"must be a superset"): v.set_dims(["z"]) def test_set_dims_object_dtype(self): v = Variable([], ("a", 1)) actual = v.set_dims(("x",), (3,)) exp_values = np.empty((3,), dtype=object) for i in range(3): exp_values[i] = ("a", 1) expected = Variable(["x"], exp_values) assert_identical(actual, expected) def test_set_dims_without_broadcast(self): class ArrayWithoutBroadcastTo(NDArrayMixin, indexing.ExplicitlyIndexed): def __init__(self, array): self.array = array # Broadcasting with __getitem__ is "easier" to implement # especially for dims of 1 def __getitem__(self, key): return self.array[key] def __array_function__(self, *args, **kwargs): raise NotImplementedError( "Not we don't want to use broadcast_to here " "https://github.com/pydata/xarray/issues/9462" ) arr = ArrayWithoutBroadcastTo(np.zeros((3, 4))) # We should be able to add a new axis without broadcasting assert arr[np.newaxis, :, :].shape == (1, 3, 4) with pytest.raises(NotImplementedError): np.broadcast_to(arr, (1, 3, 4)) v = Variable(["x", "y"], arr) v_expanded = v.set_dims(["z", "x", "y"]) assert v_expanded.dims == ("z", "x", "y") assert v_expanded.shape == (1, 3, 4) v_expanded = v.set_dims(["x", "z", "y"]) assert v_expanded.dims == ("x", "z", "y") assert v_expanded.shape == (3, 1, 4) v_expanded = v.set_dims(["x", "y", "z"]) assert v_expanded.dims == ("x", "y", "z") assert v_expanded.shape == (3, 4, 1) # Explicitly asking for a shape of 1 triggers a different # codepath in set_dims # https://github.com/pydata/xarray/issues/9462 v_expanded = v.set_dims(["z", "x", "y"], shape=(1, 3, 4)) assert v_expanded.dims == ("z", "x", "y") assert v_expanded.shape == (1, 3, 4) v_expanded = v.set_dims(["x", "z", "y"], shape=(3, 1, 4)) assert v_expanded.dims == ("x", "z", "y") assert v_expanded.shape == (3, 1, 4) v_expanded = v.set_dims(["x", "y", "z"], shape=(3, 4, 1)) assert v_expanded.dims == ("x", "y", "z") assert v_expanded.shape == (3, 4, 1) v_expanded = v.set_dims({"z": 1, "x": 3, "y": 4}) assert v_expanded.dims == ("z", "x", "y") assert v_expanded.shape == (1, 3, 4) v_expanded = v.set_dims({"x": 3, "z": 1, "y": 4}) assert v_expanded.dims == ("x", "z", "y") assert v_expanded.shape == (3, 1, 4) v_expanded = v.set_dims({"x": 3, "y": 4, "z": 1}) assert v_expanded.dims == ("x", "y", "z") assert v_expanded.shape == (3, 4, 1) with pytest.raises(NotImplementedError): v.set_dims({"z": 2, "x": 3, "y": 4}) with pytest.raises(NotImplementedError): v.set_dims(["z", "x", "y"], shape=(2, 3, 4)) def test_stack(self): v = Variable(["x", "y"], [[0, 1], [2, 3]], {"foo": "bar"}) actual = v.stack(z=("x", "y")) expected = Variable("z", [0, 1, 2, 3], v.attrs) assert_identical(actual, expected) actual = v.stack(z=("x",)) expected = Variable(("y", "z"), v.data.T, v.attrs) assert_identical(actual, expected) actual = v.stack(z=()) assert_identical(actual, v) actual = v.stack(X=("x",), Y=("y",)).transpose("X", "Y") expected = Variable(("X", "Y"), v.data, v.attrs) assert_identical(actual, expected) def test_stack_errors(self): v = Variable(["x", "y"], [[0, 1], [2, 3]], {"foo": "bar"}) with pytest.raises(ValueError, match=r"invalid existing dim"): v.stack(z=("x1",)) with pytest.raises(ValueError, match=r"cannot create a new dim"): v.stack(x=("x",)) def test_unstack(self): v = Variable("z", [0, 1, 2, 3], {"foo": "bar"}) actual = v.unstack(z={"x": 2, "y": 2}) expected = Variable(("x", "y"), [[0, 1], [2, 3]], v.attrs) assert_identical(actual, expected) actual = v.unstack(z={"x": 4, "y": 1}) expected = Variable(("x", "y"), [[0], [1], [2], [3]], v.attrs) assert_identical(actual, expected) actual = v.unstack(z={"x": 4}) expected = Variable("x", [0, 1, 2, 3], v.attrs) assert_identical(actual, expected) def test_unstack_errors(self): v = Variable("z", [0, 1, 2, 3]) with pytest.raises(ValueError, match=r"invalid existing dim"): v.unstack(foo={"x": 4}) with pytest.raises(ValueError, match=r"cannot create a new dim"): v.stack(z=("z",)) with pytest.raises(ValueError, match=r"the product of the new dim"): v.unstack(z={"x": 5}) def test_unstack_2d(self): v = Variable(["x", "y"], [[0, 1], [2, 3]]) actual = v.unstack(y={"z": 2}) expected = Variable(["x", "z"], v.data) assert_identical(actual, expected) actual = v.unstack(x={"z": 2}) expected = Variable(["y", "z"], v.data.T) assert_identical(actual, expected) def test_stack_unstack_consistency(self): v = Variable(["x", "y"], [[0, 1], [2, 3]]) actual = v.stack(z=("x", "y")).unstack(z={"x": 2, "y": 2}) assert_identical(actual, v) @pytest.mark.filterwarnings("error::RuntimeWarning") def test_unstack_without_missing(self): v = Variable(["z"], [0, 1, 2, 3]) expected = Variable(["x", "y"], [[0, 1], [2, 3]]) actual = v.unstack(z={"x": 2, "y": 2}) assert_identical(actual, expected) def test_broadcasting_math(self): x = np.random.randn(2, 3) v = Variable(["a", "b"], x) # 1d to 2d broadcasting assert_identical(v * v, Variable(["a", "b"], np.einsum("ab,ab->ab", x, x))) assert_identical(v * v[0], Variable(["a", "b"], np.einsum("ab,b->ab", x, x[0]))) assert_identical(v[0] * v, Variable(["b", "a"], np.einsum("b,ab->ba", x[0], x))) assert_identical( v[0] * v[:, 0], Variable(["b", "a"], np.einsum("b,a->ba", x[0], x[:, 0])) ) # higher dim broadcasting y = np.random.randn(3, 4, 5) w = Variable(["b", "c", "d"], y) assert_identical( v * w, Variable(["a", "b", "c", "d"], np.einsum("ab,bcd->abcd", x, y)) ) assert_identical( w * v, Variable(["b", "c", "d", "a"], np.einsum("bcd,ab->bcda", y, x)) ) assert_identical( v * w[0], Variable(["a", "b", "c", "d"], np.einsum("ab,cd->abcd", x, y[0])) ) @pytest.mark.filterwarnings("ignore:Duplicate dimension names") def test_broadcasting_failures(self): a = Variable(["x"], np.arange(10)) b = Variable(["x"], np.arange(5)) c = Variable(["x", "x"], np.arange(100).reshape(10, 10)) with pytest.raises(ValueError, match=r"mismatched lengths"): a + b with pytest.raises(ValueError, match=r"duplicate dimensions"): a + c def test_inplace_math(self): x = np.arange(5) v = Variable(["x"], x) v2 = v v2 += 1 assert v is v2 # since we provided an ndarray for data, it is also modified in-place assert source_ndarray(v.values) is x assert_array_equal(v.values, np.arange(5) + 1) with pytest.raises(ValueError, match=r"dimensions cannot change"): v += Variable("y", np.arange(5)) def test_inplace_math_error(self): x = np.arange(5) v = IndexVariable(["x"], x) with pytest.raises( TypeError, match=r"Values of an IndexVariable are immutable" ): v += 1 def test_reduce(self): v = Variable(["x", "y"], self.d, {"ignored": "attributes"}) assert_identical(v.reduce(np.std, "x"), Variable(["y"], self.d.std(axis=0))) assert_identical(v.reduce(np.std, axis=0), v.reduce(np.std, dim="x")) assert_identical( v.reduce(np.std, ["y", "x"]), Variable([], self.d.std(axis=(0, 1))) ) assert_identical(v.reduce(np.std), Variable([], self.d.std())) assert_identical( v.reduce(np.mean, "x").reduce(np.std, "y"), Variable([], self.d.mean(axis=0).std()), ) assert_allclose(v.mean("x"), v.reduce(np.mean, "x")) with pytest.raises(ValueError, match=r"cannot supply both"): v.mean(dim="x", axis=0) @requires_bottleneck @pytest.mark.parametrize("compute_backend", ["bottleneck"], indirect=True) def test_reduce_use_bottleneck(self, monkeypatch, compute_backend): def raise_if_called(*args, **kwargs): raise RuntimeError("should not have been called") import bottleneck as bn monkeypatch.setattr(bn, "nanmin", raise_if_called) v = Variable("x", [0.0, np.nan, 1.0]) with pytest.raises(RuntimeError, match="should not have been called"): with set_options(use_bottleneck=True): v.min() with set_options(use_bottleneck=False): v.min() @pytest.mark.parametrize("skipna", [True, False, None]) @pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]]) @pytest.mark.parametrize( "axis, dim", zip([None, 0, [0], [0, 1]], [None, "x", ["x"], ["x", "y"]], strict=True), ) def test_quantile(self, q, axis, dim, skipna): d = self.d.copy() d[0, 0] = np.nan v = Variable(["x", "y"], d) actual = v.quantile(q, dim=dim, skipna=skipna) _percentile_func = np.nanpercentile if skipna in (True, None) else np.percentile expected = _percentile_func(d, np.array(q) * 100, axis=axis) np.testing.assert_allclose(actual.values, expected) @requires_dask @pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]]) @pytest.mark.parametrize("axis, dim", [[1, "y"], [[1], ["y"]]]) def test_quantile_dask(self, q, axis, dim): v = Variable(["x", "y"], self.d).chunk({"x": 2}) actual = v.quantile(q, dim=dim) assert isinstance(actual.data, dask_array_type) expected = np.nanpercentile(self.d, np.array(q) * 100, axis=axis) np.testing.assert_allclose(actual.values, expected) @pytest.mark.parametrize("method", ["midpoint", "lower"]) @pytest.mark.parametrize( "use_dask", [pytest.param(True, marks=requires_dask), False] ) def test_quantile_method(self, method, use_dask) -> None: v = Variable(["x", "y"], self.d) if use_dask: v = v.chunk({"x": 2}) q = np.array([0.25, 0.5, 0.75]) actual = v.quantile(q, dim="y", method=method) expected = np.nanquantile(self.d, q, axis=1, method=method) if use_dask: assert isinstance(actual.data, dask_array_type) np.testing.assert_allclose(actual.values, expected) @pytest.mark.filterwarnings( "default:The `interpolation` argument to quantile was renamed to `method`:FutureWarning" ) @pytest.mark.parametrize("method", ["midpoint", "lower"]) def test_quantile_interpolation_deprecation(self, method) -> None: v = Variable(["x", "y"], self.d) q = np.array([0.25, 0.5, 0.75]) with pytest.warns( FutureWarning, match="`interpolation` argument to quantile was renamed to `method`", ): actual = v.quantile(q, dim="y", interpolation=method) expected = v.quantile(q, dim="y", method=method) np.testing.assert_allclose(actual.values, expected.values) with warnings.catch_warnings(record=True): with pytest.raises(TypeError, match="interpolation and method keywords"): v.quantile(q, dim="y", interpolation=method, method=method) @requires_dask def test_quantile_chunked_dim_error(self): v = Variable(["x", "y"], self.d).chunk({"x": 2}) if has_dask_ge_2024_11_0: # Dask rechunks np.testing.assert_allclose( v.compute().quantile(0.5, dim="x"), v.quantile(0.5, dim="x") ) else: # this checks for ValueError in dask.array.apply_gufunc with pytest.raises(ValueError, match=r"consists of multiple chunks"): v.quantile(0.5, dim="x") @pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True) @pytest.mark.parametrize("q", [-0.1, 1.1, [2], [0.25, 2]]) def test_quantile_out_of_bounds(self, q, compute_backend): v = Variable(["x", "y"], self.d) # escape special characters with pytest.raises( ValueError, match=r"(Q|q)uantiles must be in the range \[0, 1\]", ): v.quantile(q, dim="x") @requires_dask @requires_bottleneck def test_rank_dask(self): # Instead of a single test here, we could parameterize the other tests for both # arrays. But this is sufficient. v = Variable( ["x", "y"], [[30.0, 1.0, np.nan, 20.0, 4.0], [30.0, 1.0, np.nan, 20.0, 4.0]] ).chunk(x=1) expected = Variable( ["x", "y"], [[4.0, 1.0, np.nan, 3.0, 2.0], [4.0, 1.0, np.nan, 3.0, 2.0]] ) assert_equal(v.rank("y").compute(), expected) with pytest.raises( ValueError, match=r" with dask='parallelized' consists of multiple chunks" ): v.rank("x") def test_rank_use_bottleneck(self): v = Variable(["x"], [3.0, 1.0, np.nan, 2.0, 4.0]) with set_options(use_bottleneck=False): with pytest.raises(RuntimeError): v.rank("x") @requires_bottleneck def test_rank(self): import bottleneck as bn # floats v = Variable(["x", "y"], [[3, 4, np.nan, 1]]) expect_0 = bn.nanrankdata(v.data, axis=0) expect_1 = bn.nanrankdata(v.data, axis=1) np.testing.assert_allclose(v.rank("x").values, expect_0) np.testing.assert_allclose(v.rank("y").values, expect_1) # int v = Variable(["x"], [3, 2, 1]) expect = bn.rankdata(v.data, axis=0) np.testing.assert_allclose(v.rank("x").values, expect) # str v = Variable(["x"], ["c", "b", "a"]) expect = bn.rankdata(v.data, axis=0) np.testing.assert_allclose(v.rank("x").values, expect) # pct v = Variable(["x"], [3.0, 1.0, np.nan, 2.0, 4.0]) v_expect = Variable(["x"], [0.75, 0.25, np.nan, 0.5, 1.0]) assert_equal(v.rank("x", pct=True), v_expect) # invalid dim with pytest.raises(ValueError): # apply_ufunc error message isn't great here β€” `ValueError: tuple.index(x): x not in tuple` v.rank("y") def test_big_endian_reduce(self): # regression test for GH489 data = np.ones(5, dtype=">f4") v = Variable(["x"], data) expected = Variable([], 5) assert_identical(expected, v.sum()) def test_reduce_funcs(self): v = Variable("x", np.array([1, np.nan, 2, 3])) assert_identical(v.mean(), Variable([], 2)) assert_identical(v.mean(skipna=True), Variable([], 2)) assert_identical(v.mean(skipna=False), Variable([], np.nan)) assert_identical(np.mean(v), Variable([], 2)) assert_identical(v.prod(), Variable([], 6)) assert_identical(v.cumsum(axis=0), Variable("x", np.array([1, 1, 3, 6]))) assert_identical(v.cumprod(axis=0), Variable("x", np.array([1, 1, 2, 6]))) assert_identical(v.var(), Variable([], 2.0 / 3)) assert_identical(v.median(), Variable([], 2)) v = Variable("x", [True, False, False]) assert_identical(v.any(), Variable([], True)) assert_identical(v.all(dim="x"), Variable([], False)) v = Variable("t", pd.date_range("2000-01-01", periods=3)) assert v.argmax(skipna=True, dim="t") == 2 assert_identical(v.max(), Variable([], pd.Timestamp("2000-01-03"))) def test_reduce_keepdims(self): v = Variable(["x", "y"], self.d) with set_options(use_numbagg=False): assert_identical( v.mean(keepdims=True), Variable(v.dims, np.mean(self.d, keepdims=True)) ) assert_identical( v.mean(dim="x", keepdims=True), Variable(v.dims, np.mean(self.d, axis=0, keepdims=True)), ) assert_identical( v.mean(dim="y", keepdims=True), Variable(v.dims, np.mean(self.d, axis=1, keepdims=True)), ) assert_identical( v.mean(dim=["y", "x"], keepdims=True), Variable(v.dims, np.mean(self.d, axis=(1, 0), keepdims=True)), ) v = Variable([], 1.0) assert_identical( v.mean(keepdims=True), Variable([], np.mean(v.data, keepdims=True)) ) @requires_dask def test_reduce_keepdims_dask(self): import dask.array v = Variable(["x", "y"], self.d).chunk() actual = v.mean(keepdims=True) assert isinstance(actual.data, dask.array.Array) expected = Variable(v.dims, np.mean(self.d, keepdims=True)) assert_identical(actual, expected) actual = v.mean(dim="y", keepdims=True) assert isinstance(actual.data, dask.array.Array) expected = Variable(v.dims, np.mean(self.d, axis=1, keepdims=True)) assert_identical(actual, expected) def test_reduce_keep_attrs(self): _attrs = {"units": "test", "long_name": "testing"} v = Variable(["x", "y"], self.d, _attrs) # Test dropped attrs vm = v.mean() assert len(vm.attrs) == 0 assert vm.attrs == {} # Test kept attrs vm = v.mean(keep_attrs=True) assert len(vm.attrs) == len(_attrs) assert vm.attrs == _attrs def test_binary_ops_keep_attrs(self): _attrs = {"units": "test", "long_name": "testing"} a = Variable(["x", "y"], np.random.randn(3, 3), _attrs) b = Variable(["x", "y"], np.random.randn(3, 3), _attrs) # Test dropped attrs d = a - b # just one operation assert d.attrs == {} # Test kept attrs with set_options(keep_attrs=True): d = a - b assert d.attrs == _attrs def test_count(self): expected = Variable([], 3) actual = Variable(["x"], [1, 2, 3, np.nan]).count() assert_identical(expected, actual) v = Variable(["x"], np.array(["1", "2", "3", np.nan], dtype=object)) actual = v.count() assert_identical(expected, actual) actual = Variable(["x"], [True, False, True]).count() assert_identical(expected, actual) assert actual.dtype == int expected = Variable(["x"], [2, 3]) actual = Variable(["x", "y"], [[1, 0, np.nan], [1, 1, 1]]).count("y") assert_identical(expected, actual) def test_setitem(self): v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]]) v[0, 1] = 1 assert v[0, 1] == 1 v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]]) v[dict(x=[0, 1])] = 1 assert_array_equal(v[[0, 1]], np.ones_like(v[[0, 1]])) # boolean indexing v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]]) v[dict(x=[True, False])] = 1 assert_array_equal(v[0], np.ones_like(v[0])) v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]]) v[dict(x=[True, False], y=[False, True, False])] = 1 assert v[0, 1] == 1 def test_setitem_fancy(self): # assignment which should work as np.ndarray does def assert_assigned_2d(array, key_x, key_y, values): expected = array.copy() expected[key_x, key_y] = values v = Variable(["x", "y"], array) v[dict(x=key_x, y=key_y)] = values assert_array_equal(expected, v) # 1d vectorized indexing assert_assigned_2d( np.random.randn(4, 3), key_x=Variable(["a"], [0, 1]), key_y=Variable(["a"], [0, 1]), values=0, ) assert_assigned_2d( np.random.randn(4, 3), key_x=Variable(["a"], [0, 1]), key_y=Variable(["a"], [0, 1]), values=Variable((), 0), ) assert_assigned_2d( np.random.randn(4, 3), key_x=Variable(["a"], [0, 1]), key_y=Variable(["a"], [0, 1]), values=Variable(("a"), [3, 2]), ) assert_assigned_2d( np.random.randn(4, 3), key_x=slice(None), key_y=Variable(["a"], [0, 1]), values=Variable(("a"), [3, 2]), ) # 2d-vectorized indexing assert_assigned_2d( np.random.randn(4, 3), key_x=Variable(["a", "b"], [[0, 1]]), key_y=Variable(["a", "b"], [[1, 0]]), values=0, ) assert_assigned_2d( np.random.randn(4, 3), key_x=Variable(["a", "b"], [[0, 1]]), key_y=Variable(["a", "b"], [[1, 0]]), values=[0], ) assert_assigned_2d( np.random.randn(5, 4), key_x=Variable(["a", "b"], [[0, 1], [2, 3]]), key_y=Variable(["a", "b"], [[1, 0], [3, 3]]), values=[2, 3], ) # vindex with slice v = Variable(["x", "y", "z"], np.ones((4, 3, 2))) ind = Variable(["a"], [0, 1]) v[dict(x=ind, z=ind)] = 0 expected = Variable(["x", "y", "z"], np.ones((4, 3, 2))) expected[0, :, 0] = 0 expected[1, :, 1] = 0 assert_identical(expected, v) # dimension broadcast v = Variable(["x", "y"], np.ones((3, 2))) ind = Variable(["a", "b"], [[0, 1]]) v[ind, :] = 0 expected = Variable(["x", "y"], [[0, 0], [0, 0], [1, 1]]) assert_identical(expected, v) with pytest.raises(ValueError, match=r"shape mismatch"): v[ind, ind] = np.zeros((1, 2, 1)) v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]]) ind = Variable(["a"], [0, 1]) v[dict(x=ind)] = Variable(["a", "y"], np.ones((2, 3), dtype=int) * 10) assert_array_equal(v[0], np.ones_like(v[0]) * 10) assert_array_equal(v[1], np.ones_like(v[1]) * 10) assert v.dims == ("x", "y") # dimension should not change # increment v = Variable(["x", "y"], np.arange(6).reshape(3, 2)) ind = Variable(["a"], [0, 1]) v[dict(x=ind)] += 1 expected = Variable(["x", "y"], [[1, 2], [3, 4], [4, 5]]) assert_identical(v, expected) ind = Variable(["a"], [0, 0]) v[dict(x=ind)] += 1 expected = Variable(["x", "y"], [[2, 3], [3, 4], [4, 5]]) assert_identical(v, expected) def test_coarsen(self): v = self.cls(["x"], [0, 1, 2, 3, 4]) actual = v.coarsen({"x": 2}, boundary="pad", func="mean") expected = self.cls(["x"], [0.5, 2.5, 4]) assert_identical(actual, expected) actual = v.coarsen({"x": 2}, func="mean", boundary="pad", side="right") expected = self.cls(["x"], [0, 1.5, 3.5]) assert_identical(actual, expected) actual = v.coarsen({"x": 2}, func=np.mean, side="right", boundary="trim") expected = self.cls(["x"], [1.5, 3.5]) assert_identical(actual, expected) # working test v = self.cls(["x", "y", "z"], np.arange(40 * 30 * 2).reshape(40, 30, 2)) for windows, func, side, boundary in [ ({"x": 2}, np.mean, "left", "trim"), ({"x": 2}, np.median, {"x": "left"}, "pad"), ({"x": 2, "y": 3}, np.max, "left", {"x": "pad", "y": "trim"}), ]: v.coarsen(windows, func, boundary, side) def test_coarsen_2d(self): # 2d-mean should be the same with the successive 1d-mean v = self.cls(["x", "y"], np.arange(6 * 12).reshape(6, 12)) actual = v.coarsen({"x": 3, "y": 4}, func="mean") expected = v.coarsen({"x": 3}, func="mean").coarsen({"y": 4}, func="mean") assert_equal(actual, expected) v = self.cls(["x", "y"], np.arange(7 * 12).reshape(7, 12)) actual = v.coarsen({"x": 3, "y": 4}, func="mean", boundary="trim") expected = v.coarsen({"x": 3}, func="mean", boundary="trim").coarsen( {"y": 4}, func="mean", boundary="trim" ) assert_equal(actual, expected) # if there is nan, the two should be different v = self.cls(["x", "y"], 1.0 * np.arange(6 * 12).reshape(6, 12)) v[2, 4] = np.nan v[3, 5] = np.nan actual = v.coarsen({"x": 3, "y": 4}, func="mean", boundary="trim") expected = ( v.coarsen({"x": 3}, func="sum", boundary="trim").coarsen( {"y": 4}, func="sum", boundary="trim" ) / 12 ) assert not actual.equals(expected) # adjusting the nan count expected[0, 1] *= 12 / 11 expected[1, 1] *= 12 / 11 assert_allclose(actual, expected) v = self.cls(("x", "y"), np.arange(4 * 4, dtype=np.float32).reshape(4, 4)) actual = v.coarsen(dict(x=2, y=2), func="count", boundary="exact") expected = self.cls(("x", "y"), 4 * np.ones((2, 2))) assert_equal(actual, expected) v[0, 0] = np.nan v[-1, -1] = np.nan expected[0, 0] = 3 expected[-1, -1] = 3 actual = v.coarsen(dict(x=2, y=2), func="count", boundary="exact") assert_equal(actual, expected) actual = v.coarsen(dict(x=2, y=2), func="sum", boundary="exact", skipna=False) expected = self.cls(("x", "y"), [[np.nan, 18], [42, np.nan]]) assert_equal(actual, expected) actual = v.coarsen(dict(x=2, y=2), func="sum", boundary="exact", skipna=True) expected = self.cls(("x", "y"), [[10, 18], [42, 35]]) assert_equal(actual, expected) # perhaps @pytest.mark.parametrize("operation", [f for f in duck_array_ops]) def test_coarsen_keep_attrs(self, operation="mean"): _attrs = {"units": "test", "long_name": "testing"} test_func = getattr(duck_array_ops, operation, None) # Test dropped attrs with set_options(keep_attrs=False): new = Variable(["coord"], np.linspace(1, 10, 100), attrs=_attrs).coarsen( windows={"coord": 1}, func=test_func, boundary="exact", side="left" ) assert new.attrs == {} # Test kept attrs with set_options(keep_attrs=True): new = Variable(["coord"], np.linspace(1, 10, 100), attrs=_attrs).coarsen( windows={"coord": 1}, func=test_func, boundary="exact", side="left", ) assert new.attrs == _attrs @requires_dask class TestVariableWithDask(VariableSubclassobjects): def cls(self, *args, **kwargs) -> Variable: return Variable(*args, **kwargs).chunk() def test_chunk(self): unblocked = Variable(["dim_0", "dim_1"], np.ones((3, 4))) assert unblocked.chunks is None blocked = unblocked.chunk() assert blocked.chunks == ((3,), (4,)) first_dask_name = blocked.data.name blocked = unblocked.chunk(chunks=((2, 1), (2, 2))) assert blocked.chunks == ((2, 1), (2, 2)) assert blocked.data.name != first_dask_name blocked = unblocked.chunk(chunks=(3, 3)) assert blocked.chunks == ((3,), (3, 1)) assert blocked.data.name != first_dask_name # name doesn't change when rechunking by same amount # this fails if ReprObject doesn't have __dask_tokenize__ defined assert unblocked.chunk(2).data.name == unblocked.chunk(2).data.name assert blocked.load().chunks is None # Check that kwargs are passed import dask.array as da blocked = unblocked.chunk(name="testname_") assert isinstance(blocked.data, da.Array) assert "testname_" in blocked.data.name # test kwargs form of chunks blocked = unblocked.chunk(dim_0=3, dim_1=3) assert blocked.chunks == ((3,), (3, 1)) assert blocked.data.name != first_dask_name @pytest.mark.skip def test_0d_object_array_with_list(self): super().test_0d_object_array_with_list() @pytest.mark.skip def test_array_interface(self): # dask array does not have `argsort` super().test_array_interface() @pytest.mark.skip def test_copy_index(self): super().test_copy_index() @pytest.mark.skip @pytest.mark.filterwarnings("ignore:elementwise comparison failed.*:FutureWarning") def test_eq_all_dtypes(self): super().test_eq_all_dtypes() def test_getitem_fancy(self): super().test_getitem_fancy() def test_getitem_1d_fancy(self): super().test_getitem_1d_fancy() def test_getitem_with_mask_nd_indexer(self): import dask.array as da v = Variable(["x"], da.arange(3, chunks=3)) indexer = Variable(("x", "y"), [[0, -1], [-1, 2]]) assert_identical( v._getitem_with_mask(indexer, fill_value=-1), self.cls(("x", "y"), [[0, -1], [-1, 2]]), ) @pytest.mark.parametrize("dim", ["x", "y"]) @pytest.mark.parametrize("window", [3, 8, 11]) @pytest.mark.parametrize("center", [True, False]) def test_dask_rolling(self, dim, window, center): import dask import dask.array as da dask.config.set(scheduler="single-threaded") x = Variable(("x", "y"), np.array(np.random.randn(100, 40), dtype=float)) dx = Variable(("x", "y"), da.from_array(x, chunks=[(6, 30, 30, 20, 14), 8])) expected = x.rolling_window( dim, window, "window", center=center, fill_value=np.nan ) with raise_if_dask_computes(): actual = dx.rolling_window( dim, window, "window", center=center, fill_value=np.nan ) assert isinstance(actual.data, da.Array) assert actual.shape == expected.shape assert_equal(actual, expected) @pytest.mark.xfail(reason="https://github.com/dask/dask/issues/11585") def test_multiindex(self): super().test_multiindex() @pytest.mark.parametrize( "mode", [ "mean", pytest.param( "median", marks=pytest.mark.xfail(reason="median is not implemented by Dask"), ), pytest.param( "reflect", marks=pytest.mark.xfail(reason="dask.array.pad bug") ), "edge", "linear_ramp", "maximum", "minimum", "symmetric", "wrap", ], ) @pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS) @pytest.mark.filterwarnings( r"ignore:dask.array.pad.+? converts integers to floats." ) def test_pad(self, mode, xr_arg, np_arg): super().test_pad(mode, xr_arg, np_arg) @pytest.mark.skip(reason="dask doesn't support extension arrays") def test_pandas_period_index(self): super().test_pandas_period_index() @pytest.mark.skip(reason="dask doesn't support extension arrays") def test_pandas_datetime64_with_tz(self): super().test_pandas_datetime64_with_tz() @pytest.mark.skip(reason="dask doesn't support extension arrays") def test_pandas_categorical_dtype(self): super().test_pandas_categorical_dtype() @requires_sparse class TestVariableWithSparse: # TODO inherit VariableSubclassobjects to cover more tests def test_as_sparse(self): data = np.arange(12).reshape(3, 4) var = Variable(("x", "y"), data)._as_sparse(fill_value=-1) actual = var._to_dense() assert_identical(var, actual) class TestIndexVariable(VariableSubclassobjects): def cls(self, *args, **kwargs) -> IndexVariable: return IndexVariable(*args, **kwargs) def test_init(self): with pytest.raises(ValueError, match=r"must be 1-dimensional"): IndexVariable((), 0) def test_to_index(self): data = 0.5 * np.arange(10) v = IndexVariable(["time"], data, {"foo": "bar"}) assert pd.Index(data, name="time").identical(v.to_index()) def test_to_index_multiindex_level(self): midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two")) with pytest.warns(FutureWarning): ds = Dataset(coords={"x": midx}) assert ds.one.variable.to_index().equals(midx.get_level_values("one")) def test_multiindex_default_level_names(self): midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]]) v = IndexVariable(["x"], midx, {"foo": "bar"}) assert v.to_index().names == ("x_level_0", "x_level_1") def test_data(self): x = IndexVariable("x", np.arange(3.0)) assert isinstance(x._data, PandasIndexingAdapter) assert isinstance(x.data, np.ndarray) assert float == x.dtype assert_array_equal(np.arange(3), x) assert float == x.values.dtype with pytest.raises(TypeError, match=r"cannot be modified"): x[:] = 0 def test_name(self): coord = IndexVariable("x", [10.0]) assert coord.name == "x" with pytest.raises(AttributeError): coord.name = "y" def test_level_names(self): midx = pd.MultiIndex.from_product( [["a", "b"], [1, 2]], names=["level_1", "level_2"] ) x = IndexVariable("x", midx) assert x.level_names == midx.names assert IndexVariable("y", [10.0]).level_names is None def test_get_level_variable(self): midx = pd.MultiIndex.from_product( [["a", "b"], [1, 2]], names=["level_1", "level_2"] ) x = IndexVariable("x", midx) level_1 = IndexVariable("x", midx.get_level_values("level_1")) assert_identical(x.get_level_variable("level_1"), level_1) with pytest.raises(ValueError, match=r"has no MultiIndex"): IndexVariable("y", [10.0]).get_level_variable("level") def test_concat_periods(self): periods = pd.period_range("2000-01-01", periods=10) coords = [IndexVariable("t", periods[:5]), IndexVariable("t", periods[5:])] expected = IndexVariable("t", periods) actual = IndexVariable.concat(coords, dim="t") assert_identical(actual, expected) assert isinstance(actual.to_index(), pd.PeriodIndex) positions = [list(range(5)), list(range(5, 10))] actual = IndexVariable.concat(coords, dim="t", positions=positions) assert_identical(actual, expected) assert isinstance(actual.to_index(), pd.PeriodIndex) def test_concat_multiindex(self): idx = pd.MultiIndex.from_product([[0, 1, 2], ["a", "b"]]) coords = [IndexVariable("x", idx[:2]), IndexVariable("x", idx[2:])] expected = IndexVariable("x", idx) actual = IndexVariable.concat(coords, dim="x") assert_identical(actual, expected) assert isinstance(actual.to_index(), pd.MultiIndex) @pytest.mark.parametrize("dtype", [str, bytes]) def test_concat_str_dtype(self, dtype): a = IndexVariable("x", np.array(["a"], dtype=dtype)) b = IndexVariable("x", np.array(["b"], dtype=dtype)) expected = IndexVariable("x", np.array(["a", "b"], dtype=dtype)) actual = IndexVariable.concat([a, b]) assert actual.identical(expected) assert np.issubdtype(actual.dtype, dtype) def test_datetime64(self): # GH:1932 Make sure indexing keeps precision t = np.array([1518418799999986560, 1518418799999996560], dtype="datetime64[ns]") v = IndexVariable("t", t) assert v[0].data == t[0] # These tests make use of multi-dimensional variables, which are not valid # IndexVariable objects: @pytest.mark.skip def test_getitem_error(self): super().test_getitem_error() @pytest.mark.skip def test_getitem_advanced(self): super().test_getitem_advanced() @pytest.mark.skip def test_getitem_fancy(self): super().test_getitem_fancy() @pytest.mark.skip def test_getitem_uint(self): super().test_getitem_fancy() @pytest.mark.skip @pytest.mark.parametrize( "mode", [ "mean", "median", "reflect", "edge", "linear_ramp", "maximum", "minimum", "symmetric", "wrap", ], ) @pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS) def test_pad(self, mode, xr_arg, np_arg): super().test_pad(mode, xr_arg, np_arg) @pytest.mark.skip def test_pad_constant_values(self, xr_arg, np_arg): super().test_pad_constant_values(xr_arg, np_arg) @pytest.mark.skip def test_rolling_window(self): super().test_rolling_window() @pytest.mark.skip def test_rolling_1d(self): super().test_rolling_1d() @pytest.mark.skip def test_nd_rolling(self): super().test_nd_rolling() @pytest.mark.skip def test_rolling_window_errors(self): super().test_rolling_window_errors() @pytest.mark.skip def test_coarsen_2d(self): super().test_coarsen_2d() def test_to_index_variable_copy(self) -> None: # to_index_variable should return a copy # https://github.com/pydata/xarray/issues/6931 a = IndexVariable("x", ["a"]) b = a.to_index_variable() assert a is not b b.dims = ("y",) assert a.dims == ("x",) class TestAsCompatibleData(Generic[T_DuckArray]): def test_unchanged_types(self): types = (np.asarray, PandasIndexingAdapter, LazilyIndexedArray) for t in types: for data in [ np.arange(3), pd.date_range("2000-01-01", periods=3), pd.date_range("2000-01-01", periods=3).values, ]: x = t(data) assert source_ndarray(x) is source_ndarray(as_compatible_data(x)) def test_converted_types(self): for input_array in [ [[0, 1, 2]], pd.DataFrame([[0, 1, 2]]), np.float64(1.4), np.str_("abc"), ]: actual = as_compatible_data(input_array) assert_array_equal(np.asarray(input_array), actual) assert np.ndarray is type(actual) assert np.asarray(input_array).dtype == actual.dtype def test_masked_array(self): original = np.ma.MaskedArray(np.arange(5)) expected = np.arange(5) actual = as_compatible_data(original) assert_array_equal(expected, actual) assert np.dtype(int) == actual.dtype original = np.ma.MaskedArray(np.arange(5), mask=4 * [False] + [True]) expected = np.arange(5.0) expected[-1] = np.nan actual = as_compatible_data(original) assert_array_equal(expected, actual) assert np.dtype(float) == actual.dtype original = np.ma.MaskedArray([1.0, 2.0], mask=[True, False]) original.flags.writeable = False expected = [np.nan, 2.0] actual = as_compatible_data(original) assert_array_equal(expected, actual) assert np.dtype(float) == actual.dtype # GH2377 actual = Variable(dims=tuple(), data=np.ma.masked) expected = Variable(dims=tuple(), data=np.nan) assert_array_equal(expected, actual) assert actual.dtype == expected.dtype def test_datetime(self): expected = np.datetime64("2000-01-01") actual = as_compatible_data(expected) assert expected == actual assert np.ndarray is type(actual) assert np.dtype("datetime64[s]") == actual.dtype expected = np.array([np.datetime64("2000-01-01")]) actual = as_compatible_data(expected) assert np.asarray(expected) == actual assert np.ndarray is type(actual) assert np.dtype("datetime64[s]") == actual.dtype expected = np.array([np.datetime64("2000-01-01", "ns")]) actual = as_compatible_data(expected) assert np.asarray(expected) == actual assert np.ndarray is type(actual) assert np.dtype("datetime64[ns]") == actual.dtype assert expected is source_ndarray(np.asarray(actual)) expected = np.datetime64( "2000-01-01", "us" if has_pandas_3 else "ns", ) actual = as_compatible_data(datetime(2000, 1, 1)) assert np.asarray(expected) == actual assert np.ndarray is type(actual) assert expected.dtype == actual.dtype def test_tz_datetime(self) -> None: tz = pytz.timezone("America/New_York") times_ns = pd.date_range("2000", periods=1, tz=tz) times_s = times_ns.astype(pd.DatetimeTZDtype("s", tz)) # type: ignore[arg-type] with warnings.catch_warnings(): warnings.simplefilter("ignore") actual: T_DuckArray = as_compatible_data(times_s) assert actual.array == times_s assert actual.array.dtype == pd.DatetimeTZDtype("s", tz) # type: ignore[arg-type] series = pd.Series(times_s) with warnings.catch_warnings(): warnings.simplefilter("ignore") actual2: T_DuckArray = as_compatible_data(series) np.testing.assert_array_equal(actual2, np.asarray(series.values)) assert actual2.dtype == np.dtype("datetime64[s]") def test_full_like(self) -> None: # For more thorough tests, see test_variable.py orig = Variable( dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"} ) expect = orig.copy(deep=True) # see https://github.com/python/mypy/issues/3004 for why we need to ignore type expect.values = [[2.0, 2.0], [2.0, 2.0]] # type: ignore[assignment,unused-ignore] assert_identical(expect, full_like(orig, 2)) # override dtype expect.values = [[True, True], [True, True]] # type: ignore[assignment,unused-ignore] assert expect.dtype == bool assert_identical(expect, full_like(orig, True, dtype=bool)) # raise error on non-scalar fill_value with pytest.raises(ValueError, match=r"must be scalar"): full_like(orig, [1.0, 2.0]) with pytest.raises(ValueError, match="'dtype' cannot be dict-like"): full_like(orig, True, dtype={"x": bool}) @requires_dask def test_full_like_dask(self) -> None: orig = Variable( dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"} ).chunk(dict(x=(1, 1), y=(2,))) def check(actual, expect_dtype, expect_values): assert actual.dtype == expect_dtype assert actual.shape == orig.shape assert actual.dims == orig.dims assert actual.attrs == orig.attrs assert actual.chunks == orig.chunks assert_array_equal(actual.values, expect_values) check(full_like(orig, 2), orig.dtype, np.full_like(orig.values, 2)) # override dtype check( full_like(orig, True, dtype=bool), bool, np.full_like(orig.values, True, dtype=bool), ) # Check that there's no array stored inside dask # (e.g. we didn't create a numpy array and then we chunked it!) dsk = full_like(orig, 1).data.dask for v in dsk.values(): if isinstance(v, tuple): for vi in v: assert not isinstance(vi, np.ndarray) else: assert not isinstance(v, np.ndarray) def test_zeros_like(self) -> None: orig = Variable( dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"} ) assert_identical(zeros_like(orig), full_like(orig, 0)) assert_identical(zeros_like(orig, dtype=int), full_like(orig, 0, dtype=int)) def test_ones_like(self) -> None: orig = Variable( dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"} ) assert_identical(ones_like(orig), full_like(orig, 1)) assert_identical(ones_like(orig, dtype=int), full_like(orig, 1, dtype=int)) def test_numpy_ndarray_subclass(self): class SubclassedArray(np.ndarray): def __new__(cls, array, foo): obj = np.asarray(array).view(cls) obj.foo = foo return obj data = SubclassedArray([1, 2, 3], foo="bar") actual = as_compatible_data(data) assert isinstance(actual, SubclassedArray) assert actual.foo == "bar" assert_array_equal(data, actual) def test_numpy_matrix(self): with pytest.warns(PendingDeprecationWarning): data = np.matrix([[1, 2], [3, 4]]) actual = as_compatible_data(data) assert isinstance(actual, np.ndarray) assert_array_equal(data, actual) def test_unsupported_type(self): # Non indexable type class CustomArray(NDArrayMixin): def __init__(self, array): self.array = array class CustomIndexable(CustomArray, indexing.ExplicitlyIndexed): pass # Type with data stored in values attribute class CustomWithValuesAttr: def __init__(self, array): self.values = array array = CustomArray(np.arange(3)) orig = Variable(dims=("x"), data=array, attrs={"foo": "bar"}) assert isinstance(orig._data, np.ndarray) # should not be CustomArray array = CustomIndexable(np.arange(3)) orig = Variable(dims=("x"), data=array, attrs={"foo": "bar"}) assert isinstance(orig._data, CustomIndexable) array = CustomWithValuesAttr(np.arange(3)) orig = Variable(dims=(), data=array) assert isinstance(orig._data.item(), CustomWithValuesAttr) def test_raise_no_warning_for_nan_in_binary_ops(): with assert_no_warnings(): _ = Variable("x", [1, 2, np.nan]) > 0 class TestBackendIndexing: """Make sure all the array wrappers can be indexed.""" @pytest.fixture(autouse=True) def setUp(self): self.d = np.random.random((10, 3)).astype(np.float64) self.cat = PandasExtensionArray(pd.Categorical(["a", "b"] * 5)) async def check_orthogonal_indexing(self, v, load_async): expected = self.d[[8, 3]][:, [2, 1]] if load_async: result = await v.isel(x=[8, 3], y=[2, 1]).load_async() else: result = v.isel(x=[8, 3], y=[2, 1]) assert np.allclose(result, expected) async def check_vectorized_indexing(self, v, load_async): ind_x = Variable("z", [0, 2]) ind_y = Variable("z", [2, 1]) expected = self.d[ind_x, ind_y] if load_async: result = await v.isel(x=ind_x, y=ind_y).load_async() else: result = v.isel(x=ind_x, y=ind_y).load() assert np.allclose(result, expected) @pytest.mark.asyncio @pytest.mark.parametrize("load_async", [True, False]) async def test_NumpyIndexingAdapter(self, load_async): v = Variable(dims=("x", "y"), data=NumpyIndexingAdapter(self.d)) await self.check_orthogonal_indexing(v, load_async) await self.check_vectorized_indexing(v, load_async) # could not doubly wrapping with pytest.raises(TypeError, match=r"NumpyIndexingAdapter only wraps "): v = Variable( dims=("x", "y"), data=NumpyIndexingAdapter(NumpyIndexingAdapter(self.d)) ) def test_extension_array_duck_array(self): lazy = LazilyIndexedArray(self.cat) assert (lazy.get_duck_array().array == self.cat).all() def test_extension_array_duck_indexed(self): lazy = Variable(dims=("x"), data=LazilyIndexedArray(self.cat)) assert (lazy[[0, 1, 5]] == ["a", "b", "b"]).all() @pytest.mark.asyncio @pytest.mark.parametrize("load_async", [True, False]) async def test_LazilyIndexedArray(self, load_async): v = Variable(dims=("x", "y"), data=LazilyIndexedArray(self.d)) await self.check_orthogonal_indexing(v, load_async) await self.check_vectorized_indexing(v, load_async) # doubly wrapping v = Variable( dims=("x", "y"), data=LazilyIndexedArray(LazilyIndexedArray(self.d)), ) await self.check_orthogonal_indexing(v, load_async) # hierarchical wrapping v = Variable( dims=("x", "y"), data=LazilyIndexedArray(NumpyIndexingAdapter(self.d)) ) await self.check_orthogonal_indexing(v, load_async) @pytest.mark.asyncio @pytest.mark.parametrize("load_async", [True, False]) async def test_CopyOnWriteArray(self, load_async): v = Variable(dims=("x", "y"), data=CopyOnWriteArray(self.d)) await self.check_orthogonal_indexing(v, load_async) await self.check_vectorized_indexing(v, load_async) # doubly wrapping v = Variable(dims=("x", "y"), data=CopyOnWriteArray(LazilyIndexedArray(self.d))) await self.check_orthogonal_indexing(v, load_async) await self.check_vectorized_indexing(v, load_async) @pytest.mark.asyncio @pytest.mark.parametrize("load_async", [True, False]) async def test_MemoryCachedArray(self, load_async): v = Variable(dims=("x", "y"), data=MemoryCachedArray(self.d)) await self.check_orthogonal_indexing(v, load_async) await self.check_vectorized_indexing(v, load_async) # doubly wrapping v = Variable(dims=("x", "y"), data=CopyOnWriteArray(MemoryCachedArray(self.d))) await self.check_orthogonal_indexing(v, load_async) await self.check_vectorized_indexing(v, load_async) @requires_dask @pytest.mark.asyncio @pytest.mark.parametrize("load_async", [True, False]) async def test_DaskIndexingAdapter(self, load_async): import dask.array as da dask_array = da.asarray(self.d) v = Variable(dims=("x", "y"), data=DaskIndexingAdapter(dask_array)) await self.check_orthogonal_indexing(v, load_async) await self.check_vectorized_indexing(v, load_async) # doubly wrapping v = Variable( dims=("x", "y"), data=CopyOnWriteArray(DaskIndexingAdapter(dask_array)) ) await self.check_orthogonal_indexing(v, load_async) await self.check_vectorized_indexing(v, load_async) def test_clip(var): # Copied from test_dataarray (would there be a way to combine the tests?) result = var.clip(min=0.5) assert result.min(...) >= 0.5 result = var.clip(max=0.5) assert result.max(...) <= 0.5 result = var.clip(min=0.25, max=0.75) assert result.min(...) >= 0.25 assert result.max(...) <= 0.75 result = var.clip(min=var.mean("x"), max=var.mean("z")) assert result.dims == var.dims assert_array_equal( result.data, np.clip( var.data, var.mean("x").data[np.newaxis, :, :], var.mean("z").data[:, :, np.newaxis], ), ) @pytest.mark.parametrize("Var", [Variable, IndexVariable]) class TestNumpyCoercion: def test_from_numpy(self, Var): v = Var("x", [1, 2, 3]) assert_identical(v.as_numpy(), v) np.testing.assert_equal(v.to_numpy(), np.array([1, 2, 3])) @requires_dask def test_from_dask(self, Var): v = Var("x", [1, 2, 3]) v_chunked = v.chunk(1) assert_identical(v_chunked.as_numpy(), v.compute()) np.testing.assert_equal(v.to_numpy(), np.array([1, 2, 3])) @requires_pint def test_from_pint(self, Var): import pint arr = np.array([1, 2, 3]) # IndexVariable strips the unit with warnings.catch_warnings(): warnings.simplefilter("ignore", category=pint.UnitStrippedWarning) v = Var("x", pint.Quantity(arr, units="m")) assert_identical(v.as_numpy(), Var("x", arr)) np.testing.assert_equal(v.to_numpy(), arr) @requires_sparse def test_from_sparse(self, Var): if Var is IndexVariable: pytest.skip("Can't have 2D IndexVariables") import sparse arr = np.diagflat([1, 2, 3]) coords = np.array([[0, 1, 2], [0, 1, 2]]) sparr = sparse.COO(coords=coords, data=[1, 2, 3], shape=(3, 3)) v = Variable(["x", "y"], sparr) assert_identical(v.as_numpy(), Variable(["x", "y"], arr)) np.testing.assert_equal(v.to_numpy(), arr) @requires_cupy def test_from_cupy(self, Var): if Var is IndexVariable: pytest.skip("cupy in default indexes is not supported at the moment") import cupy as cp arr = np.array([1, 2, 3]) v = Var("x", cp.array(arr)) assert_identical(v.as_numpy(), Var("x", arr)) np.testing.assert_equal(v.to_numpy(), arr) @requires_dask @requires_pint def test_from_pint_wrapping_dask(self, Var): import dask import pint arr = np.array([1, 2, 3]) d = dask.array.from_array(np.array([1, 2, 3])) # IndexVariable strips the unit with warnings.catch_warnings(): warnings.simplefilter("ignore", category=pint.UnitStrippedWarning) v = Var("x", pint.Quantity(d, units="m")) result = v.as_numpy() assert_identical(result, Var("x", arr)) np.testing.assert_equal(v.to_numpy(), arr) @pytest.mark.parametrize( ("values", "unit"), [ (np.datetime64("2000-01-01", "ns"), "ns"), (np.datetime64("2000-01-01", "s"), "s"), (np.array([np.datetime64("2000-01-01", "ns")]), "ns"), (np.array([np.datetime64("2000-01-01", "s")]), "s"), (pd.date_range("2000", periods=1), "ns"), ( datetime(2000, 1, 1), "us" if has_pandas_3 else "ns", ), ( np.array([datetime(2000, 1, 1)]), "us" if has_pandas_3 else "ns", ), (pd.date_range("2000", periods=1, tz=pytz.timezone("America/New_York")), "ns"), ( pd.Series( pd.date_range("2000", periods=1, tz=pytz.timezone("America/New_York")) ), "ns", ), ], ids=lambda x: f"{x}", ) def test_datetime_conversion(values, unit) -> None: # todo: check for redundancy (suggested per review) dims = ["time"] if isinstance(values, np.ndarray | pd.Index | pd.Series) else [] var = Variable(dims, values) if var.dtype.kind == "M" and isinstance(var.dtype, np.dtype): assert var.dtype == np.dtype(f"datetime64[{unit}]") else: # The only case where a non-datetime64 dtype can occur currently is in # the case that the variable is backed by a timezone-aware # DatetimeIndex, and thus is hidden within the PandasIndexingAdapter class. assert isinstance(var._data, PandasIndexingAdapter) assert var._data.array.dtype == pd.DatetimeTZDtype( "ns", pytz.timezone("America/New_York") ) tz_ny = pytz.timezone("America/New_York") @pytest.mark.parametrize( ["data", "dtype"], [ pytest.param(pd.date_range("2000", periods=1), "datetime64[s]", id="index-sec"), pytest.param( pd.Series(pd.date_range("2000", periods=1)), "datetime64[s]", id="series-sec", ), pytest.param( pd.date_range("2000", periods=1, tz=tz_ny), pd.DatetimeTZDtype("s", tz_ny), # type: ignore[arg-type] id="index-timezone", ), pytest.param( pd.Series(pd.date_range("2000", periods=1, tz=tz_ny)), pd.DatetimeTZDtype("s", tz_ny), # type: ignore[arg-type] id="series-timezone", ), ], ) def test_pandas_two_only_datetime_conversion_warnings( data: pd.DatetimeIndex | pd.Series, dtype: str | pd.DatetimeTZDtype ) -> None: # todo: check for redundancy (suggested per review) var = Variable(["time"], data.astype(dtype)) # type: ignore[arg-type] # we internally convert series to numpy representations to avoid too much nastiness with extension arrays # when calling data.array e.g., with NumpyExtensionArrays if isinstance(data, pd.Series): assert var.dtype == np.dtype("datetime64[s]") elif var.dtype.kind == "M": assert var.dtype == dtype else: # The only case where a non-datetime64 dtype can occur currently is in # the case that the variable is backed by a timezone-aware # DatetimeIndex, and thus is hidden within the PandasIndexingAdapter class. assert isinstance(var._data, PandasIndexingAdapter) assert var._data.array.dtype == pd.DatetimeTZDtype("s", tz_ny) @pytest.mark.parametrize( ("values", "unit"), [ (np.timedelta64(10, "ns"), "ns"), (np.timedelta64(10, "s"), "s"), (np.array([np.timedelta64(10, "ns")]), "ns"), (np.array([np.timedelta64(10, "s")]), "s"), (pd.timedelta_range("1", periods=1), "ns"), (timedelta(days=1), "ns"), (np.array([timedelta(days=1)]), "ns"), (pd.timedelta_range("1", periods=1).astype("timedelta64[s]"), "s"), ], ids=lambda x: f"{x}", ) def test_timedelta_conversion(values, unit) -> None: # todo: check for redundancy dims = ["time"] if isinstance(values, np.ndarray | pd.Index) else [] var = Variable(dims, values) assert var.dtype == np.dtype(f"timedelta64[{unit}]") xarray-2025.09.0/xarray/tests/test_weighted.py000066400000000000000000000574011505620616400212340ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Iterable from typing import Any import numpy as np import pytest import xarray as xr from xarray import DataArray, Dataset from xarray.tests import ( assert_allclose, assert_equal, raise_if_dask_computes, requires_cftime, requires_dask, ) @pytest.mark.parametrize("as_dataset", (True, False)) def test_weighted_non_DataArray_weights(as_dataset: bool) -> None: data: DataArray | Dataset = DataArray([1, 2]) if as_dataset: data = data.to_dataset(name="data") with pytest.raises(ValueError, match=r"`weights` must be a DataArray"): data.weighted([1, 2]) # type: ignore[arg-type] @pytest.mark.parametrize("as_dataset", (True, False)) @pytest.mark.parametrize("weights", ([np.nan, 2], [np.nan, np.nan])) def test_weighted_weights_nan_raises(as_dataset: bool, weights: list[float]) -> None: data: DataArray | Dataset = DataArray([1, 2]) if as_dataset: data = data.to_dataset(name="data") with pytest.raises(ValueError, match="`weights` cannot contain missing values."): data.weighted(DataArray(weights)) @requires_dask @pytest.mark.parametrize("as_dataset", (True, False)) @pytest.mark.parametrize("weights", ([np.nan, 2], [np.nan, np.nan])) def test_weighted_weights_nan_raises_dask(as_dataset, weights): data = DataArray([1, 2]).chunk({"dim_0": -1}) if as_dataset: data = data.to_dataset(name="data") weights = DataArray(weights).chunk({"dim_0": -1}) with raise_if_dask_computes(): weighted = data.weighted(weights) with pytest.raises(ValueError, match="`weights` cannot contain missing values."): weighted.sum().load() @requires_cftime @requires_dask @pytest.mark.parametrize("time_chunks", (1, 5)) @pytest.mark.parametrize("resample_spec", ("1YS", "5YS", "10YS")) def test_weighted_lazy_resample(time_chunks, resample_spec): # https://github.com/pydata/xarray/issues/4625 # simple customized weighted mean function def mean_func(ds): return ds.weighted(ds.weights).mean("time") # example dataset t = xr.date_range(start="2000", periods=20, freq="1YS", use_cftime=True) weights = xr.DataArray(np.random.rand(len(t)), dims=["time"], coords={"time": t}) data = xr.DataArray( np.random.rand(len(t)), dims=["time"], coords={"time": t, "weights": weights} ) ds = xr.Dataset({"data": data}).chunk({"time": time_chunks}) with raise_if_dask_computes(): ds.resample(time=resample_spec).map(mean_func) @pytest.mark.parametrize( ("weights", "expected"), (([1, 2], 3), ([2, 0], 2), ([0, 0], np.nan), ([-1, 1], np.nan)), ) def test_weighted_sum_of_weights_no_nan(weights, expected): da = DataArray([1, 2]) weights = DataArray(weights) result = da.weighted(weights).sum_of_weights() expected = DataArray(expected) assert_equal(expected, result) @pytest.mark.parametrize( ("weights", "expected"), (([1, 2], 2), ([2, 0], np.nan), ([0, 0], np.nan), ([-1, 1], 1)), ) def test_weighted_sum_of_weights_nan(weights, expected): da = DataArray([np.nan, 2]) weights = DataArray(weights) result = da.weighted(weights).sum_of_weights() expected = DataArray(expected) assert_equal(expected, result) def test_weighted_sum_of_weights_bool(): # https://github.com/pydata/xarray/issues/4074 da = DataArray([1, 2]) weights = DataArray([True, True]) result = da.weighted(weights).sum_of_weights() expected = DataArray(2) assert_equal(expected, result) @pytest.mark.parametrize("da", ([1.0, 2], [1, np.nan], [np.nan, np.nan])) @pytest.mark.parametrize("factor", [0, 1, 3.14]) @pytest.mark.parametrize("skipna", (True, False)) def test_weighted_sum_equal_weights(da, factor, skipna): # if all weights are 'f'; weighted sum is f times the ordinary sum da = DataArray(da) weights = xr.full_like(da, factor) expected = da.sum(skipna=skipna) * factor result = da.weighted(weights).sum(skipna=skipna) assert_equal(expected, result) @pytest.mark.parametrize( ("weights", "expected"), (([1, 2], 5), ([0, 2], 4), ([0, 0], 0)) ) def test_weighted_sum_no_nan(weights, expected): da = DataArray([1, 2]) weights = DataArray(weights) result = da.weighted(weights).sum() expected = DataArray(expected) assert_equal(expected, result) @pytest.mark.parametrize( ("weights", "expected"), (([1, 2], 4), ([0, 2], 4), ([1, 0], 0), ([0, 0], 0)) ) @pytest.mark.parametrize("skipna", (True, False)) def test_weighted_sum_nan(weights, expected, skipna): da = DataArray([np.nan, 2]) weights = DataArray(weights) result = da.weighted(weights).sum(skipna=skipna) if skipna: expected = DataArray(expected) else: expected = DataArray(np.nan) assert_equal(expected, result) @pytest.mark.filterwarnings("error") @pytest.mark.parametrize("da", ([1.0, 2], [1, np.nan], [np.nan, np.nan])) @pytest.mark.parametrize("skipna", (True, False)) @pytest.mark.parametrize("factor", [1, 2, 3.14]) def test_weighted_mean_equal_weights(da, skipna, factor): # if all weights are equal (!= 0), should yield the same result as mean da = DataArray(da) # all weights as 1. weights = xr.full_like(da, factor) expected = da.mean(skipna=skipna) result = da.weighted(weights).mean(skipna=skipna) assert_equal(expected, result) @pytest.mark.parametrize( ("weights", "expected"), (([4, 6], 1.6), ([1, 0], 1.0), ([0, 0], np.nan)) ) def test_weighted_mean_no_nan(weights, expected): da = DataArray([1, 2]) weights = DataArray(weights) expected = DataArray(expected) result = da.weighted(weights).mean() assert_equal(expected, result) @pytest.mark.parametrize( ("weights", "expected"), ( ( [0.25, 0.05, 0.15, 0.25, 0.15, 0.1, 0.05], [1.554595, 2.463784, 3.000000, 3.518378], ), ( [0.05, 0.05, 0.1, 0.15, 0.15, 0.25, 0.25], [2.840000, 3.632973, 4.076216, 4.523243], ), ), ) def test_weighted_quantile_no_nan(weights, expected): # Expected values were calculated by running the reference implementation # proposed in https://aakinshin.net/posts/weighted-quantiles/ da = DataArray([1, 1.9, 2.2, 3, 3.7, 4.1, 5]) q = [0.2, 0.4, 0.6, 0.8] weights = DataArray(weights) expected = DataArray(expected, coords={"quantile": q}) result = da.weighted(weights).quantile(q) assert_allclose(expected, result) def test_weighted_quantile_zero_weights(): da = DataArray([0, 1, 2, 3]) weights = DataArray([1, 0, 1, 0]) q = 0.75 result = da.weighted(weights).quantile(q) expected = DataArray([0, 2]).quantile(0.75) assert_allclose(expected, result) def test_weighted_quantile_simple(): # Check that weighted quantiles return the same value as numpy quantiles da = DataArray([0, 1, 2, 3]) w = DataArray([1, 0, 1, 0]) w_eps = DataArray([1, 0.0001, 1, 0.0001]) q = 0.75 expected = DataArray(np.quantile([0, 2], q), coords={"quantile": q}) # 1.5 assert_equal(expected, da.weighted(w).quantile(q)) assert_allclose(expected, da.weighted(w_eps).quantile(q), rtol=0.001) @pytest.mark.parametrize("skipna", (True, False)) def test_weighted_quantile_nan(skipna): # Check skipna behavior da = DataArray([0, 1, 2, 3, np.nan]) w = DataArray([1, 0, 1, 0, 1]) q = [0.5, 0.75] result = da.weighted(w).quantile(q, skipna=skipna) if skipna: expected = DataArray(np.quantile([0, 2], q), coords={"quantile": q}) else: expected = DataArray(np.full(len(q), np.nan), coords={"quantile": q}) assert_allclose(expected, result) @pytest.mark.parametrize( "da", ( pytest.param([1, 1.9, 2.2, 3, 3.7, 4.1, 5], id="nonan"), pytest.param([1, 1.9, 2.2, 3, 3.7, 4.1, np.nan], id="singlenan"), pytest.param( [np.nan, np.nan, np.nan], id="allnan", marks=pytest.mark.filterwarnings( "ignore:All-NaN slice encountered:RuntimeWarning" ), ), ), ) @pytest.mark.parametrize("q", (0.5, (0.2, 0.8))) @pytest.mark.parametrize("skipna", (True, False)) @pytest.mark.parametrize("factor", [1, 3.14]) def test_weighted_quantile_equal_weights( da: list[float], q: float | tuple[float, ...], skipna: bool, factor: float ) -> None: # if all weights are equal (!= 0), should yield the same result as quantile data = DataArray(da) weights = xr.full_like(data, factor) expected = data.quantile(q, skipna=skipna) result = data.weighted(weights).quantile(q, skipna=skipna) assert_allclose(expected, result) @pytest.mark.skip(reason="`method` argument is not currently exposed") @pytest.mark.parametrize( "da", ( [1, 1.9, 2.2, 3, 3.7, 4.1, 5], [1, 1.9, 2.2, 3, 3.7, 4.1, np.nan], [np.nan, np.nan, np.nan], ), ) @pytest.mark.parametrize("q", (0.5, (0.2, 0.8))) @pytest.mark.parametrize("skipna", (True, False)) @pytest.mark.parametrize( "method", [ "linear", "interpolated_inverted_cdf", "hazen", "weibull", "median_unbiased", "normal_unbiased2", ], ) def test_weighted_quantile_equal_weights_all_methods(da, q, skipna, factor, method): # If all weights are equal (!= 0), should yield the same result as numpy quantile da = DataArray(da) weights = xr.full_like(da, 3.14) expected = da.quantile(q, skipna=skipna, method=method) result = da.weighted(weights).quantile(q, skipna=skipna, method=method) assert_allclose(expected, result) def test_weighted_quantile_bool(): # https://github.com/pydata/xarray/issues/4074 da = DataArray([1, 1]) weights = DataArray([True, True]) q = 0.5 expected = DataArray([1], coords={"quantile": [q]}).squeeze() result = da.weighted(weights).quantile(q) assert_equal(expected, result) @pytest.mark.parametrize("q", (-1, 1.1, (0.5, 1.1), ((0.2, 0.4), (0.6, 0.8)))) def test_weighted_quantile_with_invalid_q(q): da = DataArray([1, 1.9, 2.2, 3, 3.7, 4.1, 5]) q = np.asarray(q) weights = xr.ones_like(da) if q.ndim <= 1: with pytest.raises(ValueError, match="q values must be between 0 and 1"): da.weighted(weights).quantile(q) else: with pytest.raises(ValueError, match="q must be a scalar or 1d"): da.weighted(weights).quantile(q) @pytest.mark.parametrize( ("weights", "expected"), (([4, 6], 2.0), ([1, 0], np.nan), ([0, 0], np.nan)) ) @pytest.mark.parametrize("skipna", (True, False)) def test_weighted_mean_nan(weights, expected, skipna): da = DataArray([np.nan, 2]) weights = DataArray(weights) if skipna: expected = DataArray(expected) else: expected = DataArray(np.nan) result = da.weighted(weights).mean(skipna=skipna) assert_equal(expected, result) def test_weighted_mean_bool(): # https://github.com/pydata/xarray/issues/4074 da = DataArray([1, 1]) weights = DataArray([True, True]) expected = DataArray(1) result = da.weighted(weights).mean() assert_equal(expected, result) @pytest.mark.parametrize( ("weights", "expected"), (([1, 2], 2 / 3), ([2, 0], 0), ([0, 0], 0), ([-1, 1], 0)), ) def test_weighted_sum_of_squares_no_nan(weights, expected): da = DataArray([1, 2]) weights = DataArray(weights) result = da.weighted(weights).sum_of_squares() expected = DataArray(expected) assert_equal(expected, result) @pytest.mark.parametrize( ("weights", "expected"), (([1, 2], 0), ([2, 0], 0), ([0, 0], 0), ([-1, 1], 0)), ) def test_weighted_sum_of_squares_nan(weights, expected): da = DataArray([np.nan, 2]) weights = DataArray(weights) result = da.weighted(weights).sum_of_squares() expected = DataArray(expected) assert_equal(expected, result) @pytest.mark.filterwarnings("error") @pytest.mark.parametrize("da", ([1.0, 2], [1, np.nan])) @pytest.mark.parametrize("skipna", (True, False)) @pytest.mark.parametrize("factor", [1, 2, 3.14]) def test_weighted_var_equal_weights(da, skipna, factor): # if all weights are equal (!= 0), should yield the same result as var da = DataArray(da) # all weights as 1. weights = xr.full_like(da, factor) expected = da.var(skipna=skipna) result = da.weighted(weights).var(skipna=skipna) assert_equal(expected, result) @pytest.mark.parametrize( ("weights", "expected"), (([4, 6], 0.24), ([1, 0], 0.0), ([0, 0], np.nan)) ) def test_weighted_var_no_nan(weights, expected): da = DataArray([1, 2]) weights = DataArray(weights) expected = DataArray(expected) result = da.weighted(weights).var() assert_equal(expected, result) @pytest.mark.parametrize( ("weights", "expected"), (([4, 6], 0), ([1, 0], np.nan), ([0, 0], np.nan)) ) def test_weighted_var_nan(weights, expected): da = DataArray([np.nan, 2]) weights = DataArray(weights) expected = DataArray(expected) result = da.weighted(weights).var() assert_equal(expected, result) def test_weighted_var_bool(): # https://github.com/pydata/xarray/issues/4074 da = DataArray([1, 1]) weights = DataArray([True, True]) expected = DataArray(0) result = da.weighted(weights).var() assert_equal(expected, result) @pytest.mark.filterwarnings("error") @pytest.mark.parametrize("da", ([1.0, 2], [1, np.nan])) @pytest.mark.parametrize("skipna", (True, False)) @pytest.mark.parametrize("factor", [1, 2, 3.14]) def test_weighted_std_equal_weights(da, skipna, factor): # if all weights are equal (!= 0), should yield the same result as std da = DataArray(da) # all weights as 1. weights = xr.full_like(da, factor) expected = da.std(skipna=skipna) result = da.weighted(weights).std(skipna=skipna) assert_equal(expected, result) @pytest.mark.parametrize( ("weights", "expected"), (([4, 6], np.sqrt(0.24)), ([1, 0], 0.0), ([0, 0], np.nan)) ) def test_weighted_std_no_nan(weights, expected): da = DataArray([1, 2]) weights = DataArray(weights) expected = DataArray(expected) result = da.weighted(weights).std() assert_equal(expected, result) @pytest.mark.parametrize( ("weights", "expected"), (([4, 6], 0), ([1, 0], np.nan), ([0, 0], np.nan)) ) def test_weighted_std_nan(weights, expected): da = DataArray([np.nan, 2]) weights = DataArray(weights) expected = DataArray(expected) result = da.weighted(weights).std() assert_equal(expected, result) def test_weighted_std_bool(): # https://github.com/pydata/xarray/issues/4074 da = DataArray([1, 1]) weights = DataArray([True, True]) expected = DataArray(0) result = da.weighted(weights).std() assert_equal(expected, result) def expected_weighted(da, weights, dim, skipna, operation): """ Generate expected result using ``*`` and ``sum``. This is checked against the result of da.weighted which uses ``dot`` """ weighted_sum = (da * weights).sum(dim=dim, skipna=skipna) if operation == "sum": return weighted_sum masked_weights = weights.where(da.notnull()) sum_of_weights = masked_weights.sum(dim=dim, skipna=True) valid_weights = sum_of_weights != 0 sum_of_weights = sum_of_weights.where(valid_weights) if operation == "sum_of_weights": return sum_of_weights weighted_mean = weighted_sum / sum_of_weights if operation == "mean": return weighted_mean demeaned = da - weighted_mean sum_of_squares = ((demeaned**2) * weights).sum(dim=dim, skipna=skipna) if operation == "sum_of_squares": return sum_of_squares var = sum_of_squares / sum_of_weights if operation == "var": return var if operation == "std": return np.sqrt(var) def check_weighted_operations(data, weights, dim, skipna): # check sum of weights result = data.weighted(weights).sum_of_weights(dim) expected = expected_weighted(data, weights, dim, skipna, "sum_of_weights") assert_allclose(expected, result) # check weighted sum result = data.weighted(weights).sum(dim, skipna=skipna) expected = expected_weighted(data, weights, dim, skipna, "sum") assert_allclose(expected, result) # check weighted mean result = data.weighted(weights).mean(dim, skipna=skipna) expected = expected_weighted(data, weights, dim, skipna, "mean") assert_allclose(expected, result) # check weighted sum of squares result = data.weighted(weights).sum_of_squares(dim, skipna=skipna) expected = expected_weighted(data, weights, dim, skipna, "sum_of_squares") assert_allclose(expected, result) # check weighted var result = data.weighted(weights).var(dim, skipna=skipna) expected = expected_weighted(data, weights, dim, skipna, "var") assert_allclose(expected, result) # check weighted std result = data.weighted(weights).std(dim, skipna=skipna) expected = expected_weighted(data, weights, dim, skipna, "std") assert_allclose(expected, result) @pytest.mark.parametrize("dim", ("a", "b", "c", ("a", "b"), ("a", "b", "c"), None)) @pytest.mark.parametrize("add_nans", (True, False)) @pytest.mark.parametrize("skipna", (None, True, False)) @pytest.mark.filterwarnings("ignore:invalid value encountered in sqrt") def test_weighted_operations_3D(dim, add_nans, skipna): dims = ("a", "b", "c") coords = dict(a=[0, 1, 2, 3], b=[0, 1, 2, 3], c=[0, 1, 2, 3]) weights = DataArray(np.random.randn(4, 4, 4), dims=dims, coords=coords) data = np.random.randn(4, 4, 4) # add approximately 25 % NaNs (https://stackoverflow.com/a/32182680/3010700) if add_nans: c = int(data.size * 0.25) data.ravel()[np.random.choice(data.size, c, replace=False)] = np.nan data = DataArray(data, dims=dims, coords=coords) check_weighted_operations(data, weights, dim, skipna) data = data.to_dataset(name="data") check_weighted_operations(data, weights, dim, skipna) @pytest.mark.parametrize("dim", ("a", "b", "c", ("a", "b"), ("a", "b", "c"), None)) @pytest.mark.parametrize("q", (0.5, (0.1, 0.9), (0.2, 0.4, 0.6, 0.8))) @pytest.mark.parametrize("add_nans", (True, False)) @pytest.mark.parametrize("skipna", (None, True, False)) def test_weighted_quantile_3D(dim, q, add_nans, skipna): dims = ("a", "b", "c") coords = dict(a=[0, 1, 2], b=[0, 1, 2, 3], c=[0, 1, 2, 3, 4]) data = np.arange(60).reshape(3, 4, 5).astype(float) # add approximately 25 % NaNs (https://stackoverflow.com/a/32182680/3010700) if add_nans: c = int(data.size * 0.25) data.ravel()[np.random.choice(data.size, c, replace=False)] = np.nan da = DataArray(data, dims=dims, coords=coords) # Weights are all ones, because we will compare against DataArray.quantile (non-weighted) weights = xr.ones_like(da) result = da.weighted(weights).quantile(q, dim=dim, skipna=skipna) expected = da.quantile(q, dim=dim, skipna=skipna) assert_allclose(expected, result) ds = da.to_dataset(name="data") result2 = ds.weighted(weights).quantile(q, dim=dim, skipna=skipna) assert_allclose(expected, result2.data) @pytest.mark.parametrize( "coords_weights, coords_data, expected_value_at_weighted_quantile", [ ([0, 1, 2, 3], [1, 2, 3, 4], 2.5), # no weights for coord a == 4 ([0, 1, 2, 3], [2, 3, 4, 5], 1.8), # no weights for coord a == 4 or 5 ([2, 3, 4, 5], [0, 1, 2, 3], 3.8), # no weights for coord a == 0 or 1 ], ) def test_weighted_operations_nonequal_coords( coords_weights: Iterable[Any], coords_data: Iterable[Any], expected_value_at_weighted_quantile: float, ) -> None: """Check that weighted operations work with unequal coords. Parameters ---------- coords_weights : Iterable[Any] The coords for the weights. coords_data : Iterable[Any] The coords for the data. expected_value_at_weighted_quantile : float The expected value for the quantile of the weighted data. """ da_weights = DataArray( [0.5, 1.0, 1.0, 2.0], dims=("a",), coords=dict(a=coords_weights) ) da_data = DataArray([1, 2, 3, 4], dims=("a",), coords=dict(a=coords_data)) check_weighted_operations(da_data, da_weights, dim="a", skipna=None) quantile = 0.5 da_actual = da_data.weighted(da_weights).quantile(quantile, dim="a") da_expected = DataArray( [expected_value_at_weighted_quantile], coords={"quantile": [quantile]} ).squeeze() assert_allclose(da_actual, da_expected) ds_data = da_data.to_dataset(name="data") check_weighted_operations(ds_data, da_weights, dim="a", skipna=None) ds_actual = ds_data.weighted(da_weights).quantile(quantile, dim="a") assert_allclose(ds_actual, da_expected.to_dataset(name="data")) @pytest.mark.parametrize("shape_data", ((4,), (4, 4), (4, 4, 4))) @pytest.mark.parametrize("shape_weights", ((4,), (4, 4), (4, 4, 4))) @pytest.mark.parametrize("add_nans", (True, False)) @pytest.mark.parametrize("skipna", (None, True, False)) @pytest.mark.filterwarnings("ignore:invalid value encountered in sqrt") def test_weighted_operations_different_shapes( shape_data, shape_weights, add_nans, skipna ): weights = DataArray(np.random.randn(*shape_weights)) data = np.random.randn(*shape_data) # add approximately 25 % NaNs if add_nans: c = int(data.size * 0.25) data.ravel()[np.random.choice(data.size, c, replace=False)] = np.nan data = DataArray(data) check_weighted_operations(data, weights, "dim_0", skipna) check_weighted_operations(data, weights, None, skipna) data = data.to_dataset(name="data") check_weighted_operations(data, weights, "dim_0", skipna) check_weighted_operations(data, weights, None, skipna) @pytest.mark.parametrize( "operation", ("sum_of_weights", "sum", "mean", "sum_of_squares", "var", "std", "quantile"), ) @pytest.mark.parametrize("as_dataset", (True, False)) @pytest.mark.parametrize("keep_attrs", (True, False, None)) def test_weighted_operations_keep_attr(operation, as_dataset, keep_attrs): weights = DataArray(np.random.randn(2, 2), attrs=dict(attr="weights")) data = DataArray(np.random.randn(2, 2)) if as_dataset: data = data.to_dataset(name="data") data.attrs = dict(attr="weights") kwargs = {"keep_attrs": keep_attrs} if operation == "quantile": kwargs["q"] = 0.5 result = getattr(data.weighted(weights), operation)(**kwargs) if operation == "sum_of_weights": assert result.attrs == (weights.attrs if keep_attrs else {}) assert result.attrs == (weights.attrs if keep_attrs else {}) else: assert result.attrs == (weights.attrs if keep_attrs else {}) assert result.attrs == (data.attrs if keep_attrs else {}) @pytest.mark.parametrize( "operation", ("sum_of_weights", "sum", "mean", "sum_of_squares", "var", "std", "quantile"), ) def test_weighted_operations_keep_attr_da_in_ds(operation): # GH #3595 weights = DataArray(np.random.randn(2, 2)) data = DataArray(np.random.randn(2, 2), attrs=dict(attr="data")) data = data.to_dataset(name="a") kwargs = {"keep_attrs": True} if operation == "quantile": kwargs["q"] = 0.5 result = getattr(data.weighted(weights), operation)(**kwargs) assert data.a.attrs == result.a.attrs @pytest.mark.parametrize("operation", ("sum_of_weights", "sum", "mean", "quantile")) @pytest.mark.parametrize("as_dataset", (True, False)) def test_weighted_bad_dim(operation, as_dataset): data = DataArray(np.random.randn(2, 2)) weights = xr.ones_like(data) if as_dataset: data = data.to_dataset(name="data") kwargs = {"dim": "bad_dim"} if operation == "quantile": kwargs["q"] = 0.5 with pytest.raises( ValueError, match=( f"Dimensions \\('bad_dim',\\) not found in {data.__class__.__name__}Weighted " # the order of (dim_0, dim_1) varies "dimensions \\(('dim_0', 'dim_1'|'dim_1', 'dim_0')\\)" ), ): getattr(data.weighted(weights), operation)(**kwargs) xarray-2025.09.0/xarray/tutorial.py000066400000000000000000000311071505620616400170710ustar00rootroot00000000000000""" Useful for: * users learning xarray * building tutorials in the documentation. """ from __future__ import annotations import os import pathlib import sys from typing import TYPE_CHECKING import numpy as np from xarray.backends.api import open_dataset as _open_dataset from xarray.backends.api import open_datatree as _open_datatree from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree if TYPE_CHECKING: from xarray.backends.api import T_Engine _default_cache_dir_name = "xarray_tutorial_data" base_url = "https://github.com/pydata/xarray-data" version = "master" def _construct_cache_dir(path): import pooch if isinstance(path, os.PathLike): path = os.fspath(path) elif path is None: path = pooch.os_cache(_default_cache_dir_name) return path external_urls: dict = {} file_formats = { "air_temperature": 3, "air_temperature_gradient": 4, "ASE_ice_velocity": 4, "basin_mask": 4, "ersstv5": 4, "rasm": 3, "ROMS_example": 4, "tiny": 3, "eraint_uvz": 3, } def _check_netcdf_engine_installed(name): version = file_formats.get(name) if version == 3: try: import scipy # noqa: F401 except ImportError: try: import netCDF4 except ImportError as err: raise ImportError( f"opening tutorial dataset {name} requires either scipy or " "netCDF4 to be installed." ) from err if version == 4: try: import h5netcdf # noqa: F401 except ImportError: try: import netCDF4 # noqa: F401 except ImportError as err: raise ImportError( f"opening tutorial dataset {name} requires either h5netcdf " "or netCDF4 to be installed." ) from err # idea borrowed from Seaborn def open_dataset( name: str, cache: bool = True, cache_dir: str | os.PathLike | None = None, *, engine: T_Engine = None, **kws, ) -> Dataset: """ Open a dataset from the online repository (requires internet). If a local copy is found then always use that to avoid network traffic. Available datasets: * ``"air_temperature"``: NCEP reanalysis subset * ``"air_temperature_gradient"``: NCEP reanalysis subset with approximate x,y gradients * ``"basin_mask"``: Dataset with ocean basins marked using integers * ``"ASE_ice_velocity"``: MEaSUREs InSAR-Based Ice Velocity of the Amundsen Sea Embayment, Antarctica, Version 1 * ``"rasm"``: Output of the Regional Arctic System Model (RASM) * ``"ROMS_example"``: Regional Ocean Model System (ROMS) output * ``"tiny"``: small synthetic dataset with a 1D data variable * ``"era5-2mt-2019-03-uk.grib"``: ERA5 temperature data over the UK * ``"eraint_uvz"``: data from ERA-Interim reanalysis, monthly averages of upper level data * ``"ersstv5"``: NOAA's Extended Reconstructed Sea Surface Temperature monthly averages Parameters ---------- name : str Name of the file containing the dataset. e.g. 'air_temperature' cache_dir : path-like, optional The directory in which to search for and write cached data. cache : bool, optional If True, then cache data locally for use on subsequent calls **kws : dict, optional Passed to xarray.open_dataset See Also -------- tutorial.load_dataset open_dataset load_dataset """ try: import pooch except ImportError as e: raise ImportError( "tutorial.open_dataset depends on pooch to download and manage datasets." " To proceed please install pooch." ) from e logger = pooch.get_logger() logger.setLevel("WARNING") cache_dir = _construct_cache_dir(cache_dir) if name in external_urls: url = external_urls[name] else: path = pathlib.Path(name) if not path.suffix: # process the name default_extension = ".nc" if engine is None: _check_netcdf_engine_installed(name) path = path.with_suffix(default_extension) elif path.suffix == ".grib": if engine is None: engine = "cfgrib" try: import cfgrib # noqa: F401 except ImportError as e: raise ImportError( "Reading this tutorial dataset requires the cfgrib package." ) from e url = f"{base_url}/raw/{version}/{path.name}" headers = {"User-Agent": f"xarray {sys.modules['xarray'].__version__}"} downloader = pooch.HTTPDownloader(headers=headers) # retrieve the file filepath = pooch.retrieve( url=url, known_hash=None, path=cache_dir, downloader=downloader ) ds = _open_dataset(filepath, engine=engine, **kws) if not cache: ds = ds.load() pathlib.Path(filepath).unlink() return ds def load_dataset(*args, **kwargs) -> Dataset: """ Open, load into memory, and close a dataset from the online repository (requires internet). If a local copy is found then always use that to avoid network traffic. Available datasets: * ``"air_temperature"``: NCEP reanalysis subset * ``"air_temperature_gradient"``: NCEP reanalysis subset with approximate x,y gradients * ``"basin_mask"``: Dataset with ocean basins marked using integers * ``"rasm"``: Output of the Regional Arctic System Model (RASM) * ``"ROMS_example"``: Regional Ocean Model System (ROMS) output * ``"tiny"``: small synthetic dataset with a 1D data variable * ``"era5-2mt-2019-03-uk.grib"``: ERA5 temperature data over the UK * ``"eraint_uvz"``: data from ERA-Interim reanalysis, monthly averages of upper level data * ``"ersstv5"``: NOAA's Extended Reconstructed Sea Surface Temperature monthly averages Parameters ---------- name : str Name of the file containing the dataset. e.g. 'air_temperature' cache_dir : path-like, optional The directory in which to search for and write cached data. cache : bool, optional If True, then cache data locally for use on subsequent calls **kws : dict, optional Passed to xarray.open_dataset See Also -------- tutorial.open_dataset open_dataset load_dataset """ with open_dataset(*args, **kwargs) as ds: return ds.load() def scatter_example_dataset(*, seed: int | None = None) -> Dataset: """ Create an example dataset. Parameters ---------- seed : int, optional Seed for the random number generation. """ rng = np.random.default_rng(seed) A = DataArray( np.zeros([3, 11, 4, 4]), dims=["x", "y", "z", "w"], coords={ "x": np.arange(3), "y": np.linspace(0, 1, 11), "z": np.arange(4), "w": 0.1 * rng.standard_normal(4), }, ) B = 0.1 * A.x**2 + A.y**2.5 + 0.1 * A.z * A.w A = -0.1 * A.x + A.y / (5 + A.z) + A.w ds = Dataset({"A": A, "B": B}) ds["w"] = ["one", "two", "three", "five"] ds.x.attrs["units"] = "xunits" ds.y.attrs["units"] = "yunits" ds.z.attrs["units"] = "zunits" ds.w.attrs["units"] = "wunits" ds.A.attrs["units"] = "Aunits" ds.B.attrs["units"] = "Bunits" return ds def open_datatree( name: str, cache: bool = True, cache_dir: str | os.PathLike | None = None, *, engine: T_Engine = None, **kws, ) -> DataTree: """ Open a dataset as a `DataTree` from the online repository (requires internet). If a local copy is found then always use that to avoid network traffic. Available datasets: * ``"imerghh_730"``: GPM IMERG Final Precipitation L3 Half Hourly 0.1 degree x 0.1 degree V07 from 2021-08-29T07:30:00.000Z * ``"imerghh_830"``: GPM IMERG Final Precipitation L3 Half Hourly 0.1 degree x 0.1 degree V07 from 2021-08-29T08:30:00.000Z * ``"air_temperature"``: NCEP reanalysis subset * ``"air_temperature_gradient"``: NCEP reanalysis subset with approximate x,y gradients * ``"basin_mask"``: Dataset with ocean basins marked using integers * ``"ASE_ice_velocity"``: MEaSUREs InSAR-Based Ice Velocity of the Amundsen Sea Embayment, Antarctica, Version 1 * ``"rasm"``: Output of the Regional Arctic System Model (RASM) * ``"ROMS_example"``: Regional Ocean Model System (ROMS) output * ``"tiny"``: small synthetic dataset with a 1D data variable * ``"era5-2mt-2019-03-uk.grib"``: ERA5 temperature data over the UK * ``"eraint_uvz"``: data from ERA-Interim reanalysis, monthly averages of upper level data * ``"ersstv5"``: NOAA's Extended Reconstructed Sea Surface Temperature monthly averages Parameters ---------- name : str Name of the file containing the dataset. e.g. 'air_temperature' cache_dir : path-like, optional The directory in which to search for and write cached data. cache : bool, optional If True, then cache data locally for use on subsequent calls **kws : dict, optional Passed to xarray.open_dataset See Also -------- tutorial.load_datatree open_datatree """ try: import pooch except ImportError as e: raise ImportError( "tutorial.open_dataset depends on pooch to download and manage datasets." " To proceed please install pooch." ) from e logger = pooch.get_logger() logger.setLevel("WARNING") cache_dir = _construct_cache_dir(cache_dir) if name in external_urls: url = external_urls[name] else: path = pathlib.Path(name) if not path.suffix: # process the name default_extension = ".nc" if engine is None: _check_netcdf_engine_installed(name) path = path.with_suffix(default_extension) elif path.suffix == ".grib": if engine is None: engine = "cfgrib" try: import cfgrib # noqa: F401 except ImportError as e: raise ImportError( "Reading this tutorial dataset requires the cfgrib package." ) from e url = f"{base_url}/raw/{version}/{path.name}" headers = {"User-Agent": f"xarray {sys.modules['xarray'].__version__}"} downloader = pooch.HTTPDownloader(headers=headers) # retrieve the file filepath = pooch.retrieve( url=url, known_hash=None, path=cache_dir, downloader=downloader ) ds = _open_datatree(filepath, engine=engine, **kws) if not cache: ds = ds.load() pathlib.Path(filepath).unlink() return ds def load_datatree(*args, **kwargs) -> DataTree: """ Open, load into memory (as a `DataTree`), and close a dataset from the online repository (requires internet). If a local copy is found then always use that to avoid network traffic. Available datasets: * ``"imerghh_730"``: GPM IMERG Final Precipitation L3 Half Hourly 0.1 degree x 0.1 degree V07 from 2021-08-29T07:30:00.000Z * ``"imerghh_830"``: GPM IMERG Final Precipitation L3 Half Hourly 0.1 degree x 0.1 degree V07 from 2021-08-29T08:30:00.000Z * ``"air_temperature"``: NCEP reanalysis subset * ``"air_temperature_gradient"``: NCEP reanalysis subset with approximate x,y gradients * ``"basin_mask"``: Dataset with ocean basins marked using integers * ``"ASE_ice_velocity"``: MEaSUREs InSAR-Based Ice Velocity of the Amundsen Sea Embayment, Antarctica, Version 1 * ``"rasm"``: Output of the Regional Arctic System Model (RASM) * ``"ROMS_example"``: Regional Ocean Model System (ROMS) output * ``"tiny"``: small synthetic dataset with a 1D data variable * ``"era5-2mt-2019-03-uk.grib"``: ERA5 temperature data over the UK * ``"eraint_uvz"``: data from ERA-Interim reanalysis, monthly averages of upper level data * ``"ersstv5"``: NOAA's Extended Reconstructed Sea Surface Temperature monthly averages Parameters ---------- name : str Name of the file containing the dataset. e.g. 'air_temperature' cache_dir : path-like, optional The directory in which to search for and write cached data. cache : bool, optional If True, then cache data locally for use on subsequent calls **kws : dict, optional Passed to xarray.open_datatree See Also -------- tutorial.open_datatree open_datatree """ with open_datatree(*args, **kwargs) as ds: return ds.load() xarray-2025.09.0/xarray/typing.py000066400000000000000000000010521505620616400165340ustar00rootroot00000000000000""" Public typing utilities for use by external libraries. """ from xarray.computation.rolling import ( DataArrayCoarsen, DataArrayRolling, DatasetRolling, ) from xarray.computation.weighted import DataArrayWeighted, DatasetWeighted, Weighted from xarray.core.groupby import DataArrayGroupBy from xarray.core.resample import DataArrayResample __all__ = [ "DataArrayCoarsen", "DataArrayGroupBy", "DataArrayResample", "DataArrayRolling", "DataArrayWeighted", "DatasetRolling", "DatasetWeighted", "Weighted", ] xarray-2025.09.0/xarray/ufuncs.py000066400000000000000000000212551505620616400165340ustar00rootroot00000000000000"""xarray specific universal functions.""" import textwrap from abc import ABC, abstractmethod import numpy as np import xarray as xr from xarray.core.groupby import GroupBy def _walk_array_namespaces(obj, namespaces): if isinstance(obj, xr.DataTree): # TODO: DataTree doesn't actually support ufuncs yet for node in obj.subtree: _walk_array_namespaces(node.dataset, namespaces) elif isinstance(obj, xr.Dataset): for name in obj.data_vars: _walk_array_namespaces(obj[name], namespaces) elif isinstance(obj, GroupBy): _walk_array_namespaces(next(iter(obj))[1], namespaces) elif isinstance(obj, xr.DataArray | xr.Variable): _walk_array_namespaces(obj.data, namespaces) else: namespace = getattr(obj, "__array_namespace__", None) if namespace is not None: namespaces.add(namespace()) return namespaces def get_array_namespace(*args): xps = set() for arg in args: _walk_array_namespaces(arg, xps) xps.discard(np) if len(xps) > 1: names = [module.__name__ for module in xps] raise ValueError(f"Mixed array types {names} are not supported.") return next(iter(xps)) if xps else np class _ufunc_wrapper(ABC): def __init__(self, name): self.__name__ = name if hasattr(np, name): self._create_doc() @abstractmethod def __call__(self, *args, **kwargs): raise NotImplementedError def _create_doc(self): doc = getattr(np, self.__name__).__doc__ doc = _remove_unused_reference_labels( _skip_signature(_dedent(doc), self.__name__) ) self.__doc__ = ( f"xarray specific variant of :py:func:`numpy.{self.__name__}`. " "Handles xarray objects by dispatching to the appropriate " "function for the underlying array type.\n\n" f"Documentation from numpy:\n\n{doc}" ) class _unary_ufunc(_ufunc_wrapper): """Wrapper for dispatching unary ufuncs.""" def __call__(self, x, /, **kwargs): xp = get_array_namespace(x) func = getattr(xp, self.__name__) return xr.apply_ufunc(func, x, dask="allowed", **kwargs) class _binary_ufunc(_ufunc_wrapper): """Wrapper for dispatching binary ufuncs.""" def __call__(self, x, y, /, **kwargs): xp = get_array_namespace(x, y) func = getattr(xp, self.__name__) return xr.apply_ufunc(func, x, y, dask="allowed", **kwargs) def _skip_signature(doc, name): if not isinstance(doc, str): return doc # numpy creates some functions as aliases and copies the docstring exactly, # so check the actual name to handle this case np_name = getattr(np, name).__name__ if doc.startswith(np_name): signature_end = doc.find("\n\n") doc = doc[signature_end + 2 :] return doc def _remove_unused_reference_labels(doc): if not isinstance(doc, str): return doc max_references = 5 for num in range(max_references): label = f".. [{num}]" reference = f"[{num}]_" index = f"{num}. " if label not in doc or reference in doc: continue doc = doc.replace(label, index) return doc def _dedent(doc): if not isinstance(doc, str): return doc return textwrap.dedent(doc) # These can be auto-generated from the public numpy ufuncs: # {name for name in dir(np) if isinstance(getattr(np, name), np.ufunc)} # Generalized ufuncs that use core dimensions or produce multiple output # arrays are not currently supported, and left commented out below. # UNARY abs = _unary_ufunc("abs") absolute = _unary_ufunc("absolute") acos = _unary_ufunc("acos") acosh = _unary_ufunc("acosh") arccos = _unary_ufunc("arccos") arccosh = _unary_ufunc("arccosh") arcsin = _unary_ufunc("arcsin") arcsinh = _unary_ufunc("arcsinh") arctan = _unary_ufunc("arctan") arctanh = _unary_ufunc("arctanh") asin = _unary_ufunc("asin") asinh = _unary_ufunc("asinh") atan = _unary_ufunc("atan") atanh = _unary_ufunc("atanh") bitwise_count = _unary_ufunc("bitwise_count") bitwise_invert = _unary_ufunc("bitwise_invert") bitwise_not = _unary_ufunc("bitwise_not") cbrt = _unary_ufunc("cbrt") ceil = _unary_ufunc("ceil") conj = _unary_ufunc("conj") conjugate = _unary_ufunc("conjugate") cos = _unary_ufunc("cos") cosh = _unary_ufunc("cosh") deg2rad = _unary_ufunc("deg2rad") degrees = _unary_ufunc("degrees") exp = _unary_ufunc("exp") exp2 = _unary_ufunc("exp2") expm1 = _unary_ufunc("expm1") fabs = _unary_ufunc("fabs") floor = _unary_ufunc("floor") # frexp = _unary_ufunc("frexp") invert = _unary_ufunc("invert") isfinite = _unary_ufunc("isfinite") isinf = _unary_ufunc("isinf") isnan = _unary_ufunc("isnan") isnat = _unary_ufunc("isnat") log = _unary_ufunc("log") log10 = _unary_ufunc("log10") log1p = _unary_ufunc("log1p") log2 = _unary_ufunc("log2") logical_not = _unary_ufunc("logical_not") # modf = _unary_ufunc("modf") negative = _unary_ufunc("negative") positive = _unary_ufunc("positive") rad2deg = _unary_ufunc("rad2deg") radians = _unary_ufunc("radians") reciprocal = _unary_ufunc("reciprocal") rint = _unary_ufunc("rint") sign = _unary_ufunc("sign") signbit = _unary_ufunc("signbit") sin = _unary_ufunc("sin") sinh = _unary_ufunc("sinh") spacing = _unary_ufunc("spacing") sqrt = _unary_ufunc("sqrt") square = _unary_ufunc("square") tan = _unary_ufunc("tan") tanh = _unary_ufunc("tanh") trunc = _unary_ufunc("trunc") # BINARY add = _binary_ufunc("add") arctan2 = _binary_ufunc("arctan2") atan2 = _binary_ufunc("atan2") bitwise_and = _binary_ufunc("bitwise_and") bitwise_left_shift = _binary_ufunc("bitwise_left_shift") bitwise_or = _binary_ufunc("bitwise_or") bitwise_right_shift = _binary_ufunc("bitwise_right_shift") bitwise_xor = _binary_ufunc("bitwise_xor") copysign = _binary_ufunc("copysign") divide = _binary_ufunc("divide") # divmod = _binary_ufunc("divmod") equal = _binary_ufunc("equal") float_power = _binary_ufunc("float_power") floor_divide = _binary_ufunc("floor_divide") fmax = _binary_ufunc("fmax") fmin = _binary_ufunc("fmin") fmod = _binary_ufunc("fmod") gcd = _binary_ufunc("gcd") greater = _binary_ufunc("greater") greater_equal = _binary_ufunc("greater_equal") heaviside = _binary_ufunc("heaviside") hypot = _binary_ufunc("hypot") lcm = _binary_ufunc("lcm") ldexp = _binary_ufunc("ldexp") left_shift = _binary_ufunc("left_shift") less = _binary_ufunc("less") less_equal = _binary_ufunc("less_equal") logaddexp = _binary_ufunc("logaddexp") logaddexp2 = _binary_ufunc("logaddexp2") logical_and = _binary_ufunc("logical_and") logical_or = _binary_ufunc("logical_or") logical_xor = _binary_ufunc("logical_xor") # matmul = _binary_ufunc("matmul") maximum = _binary_ufunc("maximum") minimum = _binary_ufunc("minimum") mod = _binary_ufunc("mod") multiply = _binary_ufunc("multiply") nextafter = _binary_ufunc("nextafter") not_equal = _binary_ufunc("not_equal") pow = _binary_ufunc("pow") power = _binary_ufunc("power") remainder = _binary_ufunc("remainder") right_shift = _binary_ufunc("right_shift") subtract = _binary_ufunc("subtract") true_divide = _binary_ufunc("true_divide") # vecdot = _binary_ufunc("vecdot") # elementwise non-ufunc angle = _unary_ufunc("angle") isreal = _unary_ufunc("isreal") iscomplex = _unary_ufunc("iscomplex") __all__ = [ "abs", "absolute", "acos", "acosh", "add", "angle", "arccos", "arccosh", "arcsin", "arcsinh", "arctan", "arctan2", "arctanh", "asin", "asinh", "atan", "atan2", "atanh", "bitwise_and", "bitwise_count", "bitwise_invert", "bitwise_left_shift", "bitwise_not", "bitwise_or", "bitwise_right_shift", "bitwise_xor", "cbrt", "ceil", "conj", "conjugate", "copysign", "cos", "cosh", "deg2rad", "degrees", "divide", "equal", "exp", "exp2", "expm1", "fabs", "float_power", "floor", "floor_divide", "fmax", "fmin", "fmod", "gcd", "greater", "greater_equal", "heaviside", "hypot", "invert", "iscomplex", "isfinite", "isinf", "isnan", "isnat", "isreal", "lcm", "ldexp", "left_shift", "less", "less_equal", "log", "log1p", "log2", "log10", "logaddexp", "logaddexp2", "logical_and", "logical_not", "logical_or", "logical_xor", "maximum", "minimum", "mod", "multiply", "negative", "nextafter", "not_equal", "positive", "pow", "power", "rad2deg", "radians", "reciprocal", "remainder", "right_shift", "rint", "sign", "signbit", "sin", "sinh", "spacing", "sqrt", "square", "subtract", "tan", "tanh", "true_divide", "trunc", ] xarray-2025.09.0/xarray/util/000077500000000000000000000000001505620616400156275ustar00rootroot00000000000000xarray-2025.09.0/xarray/util/__init__.py000066400000000000000000000000001505620616400177260ustar00rootroot00000000000000xarray-2025.09.0/xarray/util/deprecation_helpers.py000066400000000000000000000174561505620616400222350ustar00rootroot00000000000000# For reference, here is a copy of the scikit-learn copyright notice: # BSD 3-Clause License # Copyright (c) 2007-2021 The scikit-learn developers. # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE import inspect import warnings from collections.abc import Callable from functools import wraps from typing import Any, Self, TypeVar from xarray.core.options import OPTIONS from xarray.core.utils import emit_user_level_warning T = TypeVar("T", bound=Callable) POSITIONAL_OR_KEYWORD = inspect.Parameter.POSITIONAL_OR_KEYWORD KEYWORD_ONLY = inspect.Parameter.KEYWORD_ONLY POSITIONAL_ONLY = inspect.Parameter.POSITIONAL_ONLY EMPTY = inspect.Parameter.empty def _deprecate_positional_args(version) -> Callable[[T], T]: """Decorator for methods that issues warnings for positional arguments Using the keyword-only argument syntax in pep 3102, arguments after the ``*`` will issue a warning when passed as a positional argument. Parameters ---------- version : str version of the library when the positional arguments were deprecated Examples -------- Deprecate passing `b` as positional argument: def func(a, b=1): pass @_deprecate_positional_args("v0.1.0") def func(a, *, b=2): pass func(1, 2) Notes ----- This function is adapted from scikit-learn under the terms of its license. See licences/SCIKIT_LEARN_LICENSE """ def _decorator(func): signature = inspect.signature(func) pos_or_kw_args = [] kwonly_args = [] for name, param in signature.parameters.items(): if param.kind in (POSITIONAL_OR_KEYWORD, POSITIONAL_ONLY): pos_or_kw_args.append(name) elif param.kind == KEYWORD_ONLY: kwonly_args.append(name) if param.default is EMPTY: # IMHO `def f(a, *, b):` does not make sense -> disallow it # if removing this constraint -> need to add these to kwargs as well raise TypeError("Keyword-only param without default disallowed.") @wraps(func) def inner(*args, **kwargs): name = func.__name__ n_extra_args = len(args) - len(pos_or_kw_args) if n_extra_args > 0: extra_args = ", ".join(kwonly_args[:n_extra_args]) warnings.warn( f"Passing '{extra_args}' as positional argument(s) to {name} " f"was deprecated in version {version} and will raise an error two " "releases later. Please pass them as keyword arguments." "", FutureWarning, stacklevel=2, ) zip_args = zip( kwonly_args[:n_extra_args], args[-n_extra_args:], strict=True ) kwargs.update(zip_args) return func(*args[:-n_extra_args], **kwargs) return func(*args, **kwargs) return inner return _decorator def deprecate_dims(func: T, old_name="dims") -> T: """ For functions that previously took `dims` as a kwarg, and have now transitioned to `dim`. This decorator will issue a warning if `dims` is passed while forwarding it to `dim`. """ @wraps(func) def wrapper(*args, **kwargs): if old_name in kwargs: emit_user_level_warning( f"The `{old_name}` argument has been renamed to `dim`, and will be removed " "in the future. This renaming is taking place throughout xarray over the " "next few releases.", # Upgrade to `DeprecationWarning` in the future, when the renaming is complete. PendingDeprecationWarning, ) kwargs["dim"] = kwargs.pop(old_name) return func(*args, **kwargs) # We're quite confident we're just returning `T` from this function, so it's fine to ignore typing # within the function. return wrapper # type: ignore[return-value] class CombineKwargDefault: """Object that handles deprecation cycle for kwarg default values. Similar to ReprObject """ _old: str _new: str | None _name: str def __init__(self, *, name: str, old: str, new: str | None): self._name = name self._old = old self._new = new def __repr__(self) -> str: return str(self._value) def __eq__(self, other: Self | Any) -> bool: return ( self._value == other._value if isinstance(other, type(self)) else self._value == other ) @property def _value(self) -> str | None: return self._new if OPTIONS["use_new_combine_kwarg_defaults"] else self._old def __hash__(self) -> int: return hash(self._value) def __dask_tokenize__(self) -> object: from dask.base import normalize_token return normalize_token((type(self), self._value)) def warning_message(self, message: str, recommend_set_options: bool = True) -> str: if recommend_set_options: recommendation = ( " To opt in to new defaults and get rid of these warnings now " "use `set_options(use_new_combine_kwarg_defaults=True) or " f"set {self._name} explicitly." ) else: recommendation = ( f" The recommendation is to set {self._name} explicitly for this case." ) return ( f"In a future version of xarray the default value for {self._name} will " f"change from {self._name}={self._old!r} to {self._name}={self._new!r}. " + message + recommendation ) def error_message(self) -> str: return ( f" Error might be related to new default (`{self._name}={self._new!r}`). " f"Previously the default was `{self._name}={self._old!r}`. " f"The recommendation is to set {self._name!r} explicitly for this case." ) _DATA_VARS_DEFAULT = CombineKwargDefault(name="data_vars", old="all", new=None) _COORDS_DEFAULT = CombineKwargDefault(name="coords", old="different", new="minimal") _COMPAT_CONCAT_DEFAULT = CombineKwargDefault( name="compat", old="equals", new="override" ) _COMPAT_DEFAULT = CombineKwargDefault(name="compat", old="no_conflicts", new="override") _JOIN_DEFAULT = CombineKwargDefault(name="join", old="outer", new="exact") xarray-2025.09.0/xarray/util/generate_aggregations.py000066400000000000000000000544161505620616400225370ustar00rootroot00000000000000"""Generate module and stub file for arithmetic operators of various xarray classes. For internal xarray development use only. Usage: python xarray/util/generate_aggregations.py pytest --doctest-modules xarray/{core,namedarray}/_aggregations.py --accept || true pytest --doctest-modules xarray/{core,namedarray}/_aggregations.py This requires [pytest-accept](https://github.com/max-sixty/pytest-accept). The second run of pytest is deliberate, since the first will return an error while replacing the doctests. """ import textwrap from dataclasses import dataclass, field from typing import NamedTuple MODULE_PREAMBLE = '''\ """Mixin classes with reduction operations.""" # This file was generated using xarray.util.generate_aggregations. Do not edit manually. from __future__ import annotations from collections.abc import Callable, Sequence from typing import TYPE_CHECKING, Any from xarray.core import duck_array_ops from xarray.core.options import OPTIONS from xarray.core.types import Dims, Self from xarray.core.utils import contains_only_chunked_or_numpy, module_available if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset flox_available = module_available("flox") ''' NAMED_ARRAY_MODULE_PREAMBLE = '''\ """Mixin classes with reduction operations.""" # This file was generated using xarray.util.generate_aggregations. Do not edit manually. from __future__ import annotations from collections.abc import Callable, Sequence from typing import Any from xarray.core import duck_array_ops from xarray.core.types import Dims, Self ''' AGGREGATIONS_PREAMBLE = """ class {obj}{cls}Aggregations: __slots__ = () def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> Self: raise NotImplementedError()""" NAMED_ARRAY_AGGREGATIONS_PREAMBLE = """ class {obj}{cls}Aggregations: __slots__ = () def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keepdims: bool = False, **kwargs: Any, ) -> Self: raise NotImplementedError()""" GROUPBY_PREAMBLE = """ class {obj}{cls}Aggregations: _obj: {obj} def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> {obj}: raise NotImplementedError() def _flox_reduce( self, dim: Dims, **kwargs: Any, ) -> {obj}: raise NotImplementedError()""" RESAMPLE_PREAMBLE = """ class {obj}{cls}Aggregations: _obj: {obj} def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> {obj}: raise NotImplementedError() def _flox_reduce( self, dim: Dims, **kwargs: Any, ) -> {obj}: raise NotImplementedError()""" TEMPLATE_REDUCTION_SIGNATURE = ''' def {method}( self, dim: Dims = None,{kw_only}{extra_kwargs}{keep_attrs} **kwargs: Any, ) -> Self: """ Reduce this {obj}'s data by applying ``{method}`` along some dimension(s). Parameters ----------''' TEMPLATE_REDUCTION_SIGNATURE_GROUPBY = ''' def {method}( self, dim: Dims = None, *,{extra_kwargs} keep_attrs: bool | None = None, **kwargs: Any, ) -> {obj}: """ Reduce this {obj}'s data by applying ``{method}`` along some dimension(s). Parameters ----------''' TEMPLATE_RETURNS = """ Returns ------- reduced : {obj} New {obj} with ``{method}`` applied to its data and the indicated dimension(s) removed""" TEMPLATE_SEE_ALSO = """ See Also -------- {see_also_methods} :ref:`{docref}` User guide on {docref_description}.""" TEMPLATE_NOTES = """ Notes ----- {notes}""" _DIM_DOCSTRING = """dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``{method}``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions.""" _DIM_DOCSTRING_GROUPBY = """dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``{method}``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If None, will reduce over the {cls} dimensions. If "...", will reduce over all dimensions.""" _SKIPNA_DOCSTRING = """skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64).""" _MINCOUNT_DOCSTRING = """min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array.""" _DDOF_DOCSTRING = """ddof : int, default: 0 β€œDelta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements.""" _KEEP_ATTRS_DOCSTRING = """keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes.""" _KWARGS_DOCSTRING = """**kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``{method}`` on this object's data. These could include dask-specific kwargs like ``split_every``.""" _NUMERIC_ONLY_NOTES = "Non-numeric variables will be removed prior to reducing." _FLOX_NOTES_TEMPLATE = """Use the ``flox`` package to significantly speed up {kind} computations, especially with dask arrays. Xarray will use flox by default if installed. Pass flox-specific keyword arguments in ``**kwargs``. See the `flox documentation `_ for more.""" _FLOX_GROUPBY_NOTES = _FLOX_NOTES_TEMPLATE.format(kind="groupby") _FLOX_RESAMPLE_NOTES = _FLOX_NOTES_TEMPLATE.format(kind="resampling") _CUM_NOTES = """Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future.""" class ExtraKwarg(NamedTuple): docs: str kwarg: str call: str example: str skipna = ExtraKwarg( docs=_SKIPNA_DOCSTRING, kwarg="skipna: bool | None = None,", call="skipna=skipna,", example="""\n Use ``skipna`` to control whether NaNs are ignored. >>> {calculation}(skipna=False)""", ) min_count = ExtraKwarg( docs=_MINCOUNT_DOCSTRING, kwarg="min_count: int | None = None,", call="min_count=min_count,", example="""\n Specify ``min_count`` for finer control over when NaNs are ignored. >>> {calculation}(skipna=True, min_count=2)""", ) ddof = ExtraKwarg( docs=_DDOF_DOCSTRING, kwarg="ddof: int = 0,", call="ddof=ddof,", example="""\n Specify ``ddof=1`` for an unbiased estimate. >>> {calculation}(skipna=True, ddof=1)""", ) @dataclass class DataStructure: name: str create_example: str example_var_name: str numeric_only: bool = False see_also_modules: tuple[str, ...] = tuple class Method: def __init__( self, name, bool_reduce=False, extra_kwargs=tuple(), numeric_only=False, see_also_modules=("numpy", "dask.array"), see_also_methods=(), min_flox_version=None, additional_notes="", ): self.name = name self.extra_kwargs = extra_kwargs self.numeric_only = numeric_only self.see_also_modules = see_also_modules self.see_also_methods = see_also_methods self.min_flox_version = min_flox_version self.additional_notes = additional_notes if bool_reduce: self.array_method = f"array_{name}" self.np_example_array = ( """np.array([True, True, True, True, True, False], dtype=bool)""" ) else: self.array_method = name self.np_example_array = """np.array([1, 2, 3, 0, 2, np.nan])""" @dataclass class AggregationGenerator: _dim_docstring = _DIM_DOCSTRING _template_signature = TEMPLATE_REDUCTION_SIGNATURE cls: str datastructure: DataStructure methods: tuple[Method, ...] docref: str docref_description: str example_call_preamble: str definition_preamble: str has_keep_attrs: bool = True notes: str = "" preamble: str = field(init=False) def __post_init__(self): self.preamble = self.definition_preamble.format( obj=self.datastructure.name, cls=self.cls ) def generate_methods(self): yield [self.preamble] for method in self.methods: yield self.generate_method(method) def generate_method(self, method: Method): has_kw_only = method.extra_kwargs or self.has_keep_attrs template_kwargs = dict( obj=self.datastructure.name, method=method.name, keep_attrs=( "\n keep_attrs: bool | None = None," if self.has_keep_attrs else "" ), kw_only="\n *," if has_kw_only else "", ) if method.extra_kwargs: extra_kwargs = "\n " + "\n ".join( [kwarg.kwarg for kwarg in method.extra_kwargs if kwarg.kwarg] ) else: extra_kwargs = "" yield self._template_signature.format( **template_kwargs, extra_kwargs=extra_kwargs, ) for text in [ self._dim_docstring.format(method=method.name, cls=self.cls), *(kwarg.docs for kwarg in method.extra_kwargs if kwarg.docs), _KEEP_ATTRS_DOCSTRING if self.has_keep_attrs else None, _KWARGS_DOCSTRING.format(method=method.name), ]: if text: yield textwrap.indent(text, 8 * " ") yield TEMPLATE_RETURNS.format(**template_kwargs) # we want Dataset.count to refer to DataArray.count # but we also want DatasetGroupBy.count to refer to Dataset.count # The generic aggregations have self.cls == '' others = ( self.datastructure.see_also_modules if self.cls == "" else (self.datastructure.name,) ) see_also_methods_from_modules = ( " " * 8 + f"{mod}.{method.name}" for mod in (method.see_also_modules + others) ) see_also_methods_from_methods = ( " " * 8 + f"{self.datastructure.name}.{method}" for method in method.see_also_methods ) see_also_methods = "\n".join( [*see_also_methods_from_modules, *see_also_methods_from_methods] ) # Fixes broken links mentioned in #8055 yield TEMPLATE_SEE_ALSO.format( **template_kwargs, docref=self.docref, docref_description=self.docref_description, see_also_methods=see_also_methods, ) notes = self.notes if method.numeric_only: if notes != "": notes += "\n\n" notes += _NUMERIC_ONLY_NOTES if method.additional_notes: if notes != "": notes += "\n\n" notes += method.additional_notes if notes != "": yield TEMPLATE_NOTES.format(notes=textwrap.indent(notes, 8 * " ")) yield textwrap.indent(self.generate_example(method=method), "") yield ' """' yield self.generate_code(method, self.has_keep_attrs) def generate_example(self, method): created = self.datastructure.create_example.format( example_array=method.np_example_array ) calculation = f"{self.datastructure.example_var_name}{self.example_call_preamble}.{method.name}" if method.extra_kwargs: extra_examples = "".join( kwarg.example for kwarg in method.extra_kwargs if kwarg.example ).format(calculation=calculation, method=method.name) else: extra_examples = "" return f""" Examples --------{created} >>> {self.datastructure.example_var_name} >>> {calculation}(){extra_examples}""" class GroupByAggregationGenerator(AggregationGenerator): _dim_docstring = _DIM_DOCSTRING_GROUPBY _template_signature = TEMPLATE_REDUCTION_SIGNATURE_GROUPBY def generate_code(self, method, has_keep_attrs): extra_kwargs = [kwarg.call for kwarg in method.extra_kwargs if kwarg.call] if self.datastructure.numeric_only: extra_kwargs.append(f"numeric_only={method.numeric_only},") # median isn't enabled yet, because it would break if a single group was present in multiple # chunks. The non-flox code path will just rechunk every group to a single chunk and execute the median method_is_not_flox_supported = method.name in ("median", "cumsum", "cumprod") if method_is_not_flox_supported: indent = 12 else: indent = 16 if extra_kwargs: extra_kwargs = textwrap.indent("\n" + "\n".join(extra_kwargs), indent * " ") else: extra_kwargs = "" if method_is_not_flox_supported: return f"""\ return self.reduce( duck_array_ops.{method.array_method}, dim=dim,{extra_kwargs} keep_attrs=keep_attrs, **kwargs, )""" min_version_check = f""" and module_available("flox", minversion="{method.min_flox_version}")""" return ( """\ if ( flox_available and OPTIONS["use_flox"]""" + (min_version_check if method.min_flox_version is not None else "") + f""" and contains_only_chunked_or_numpy(self._obj) ): return self._flox_reduce( func="{method.name}", dim=dim,{extra_kwargs} # fill_value=fill_value, keep_attrs=keep_attrs, **kwargs, ) else: return self.reduce( duck_array_ops.{method.array_method}, dim=dim,{extra_kwargs} keep_attrs=keep_attrs, **kwargs, )""" ) class GenericAggregationGenerator(AggregationGenerator): def generate_code(self, method, has_keep_attrs): extra_kwargs = [kwarg.call for kwarg in method.extra_kwargs if kwarg.call] if self.datastructure.numeric_only: extra_kwargs.append(f"numeric_only={method.numeric_only},") if extra_kwargs: extra_kwargs = textwrap.indent("\n" + "\n".join(extra_kwargs), 12 * " ") else: extra_kwargs = "" keep_attrs = ( "\n" + 12 * " " + "keep_attrs=keep_attrs," if has_keep_attrs else "" ) return f"""\ return self.reduce( duck_array_ops.{method.array_method}, dim=dim,{extra_kwargs}{keep_attrs} **kwargs, )""" AGGREGATION_METHODS = ( # Reductions: Method("count", see_also_modules=("pandas.DataFrame", "dask.dataframe.DataFrame")), Method("all", bool_reduce=True), Method("any", bool_reduce=True), Method("max", extra_kwargs=(skipna,)), Method("min", extra_kwargs=(skipna,)), Method("mean", extra_kwargs=(skipna,), numeric_only=True), Method("prod", extra_kwargs=(skipna, min_count), numeric_only=True), Method("sum", extra_kwargs=(skipna, min_count), numeric_only=True), Method("std", extra_kwargs=(skipna, ddof), numeric_only=True), Method("var", extra_kwargs=(skipna, ddof), numeric_only=True), Method( "median", extra_kwargs=(skipna,), numeric_only=True, min_flox_version="0.9.2" ), # Cumulatives: Method( "cumsum", extra_kwargs=(skipna,), numeric_only=True, see_also_methods=("cumulative",), additional_notes=_CUM_NOTES, ), Method( "cumprod", extra_kwargs=(skipna,), numeric_only=True, see_also_methods=("cumulative",), additional_notes=_CUM_NOTES, ), ) DATATREE_OBJECT = DataStructure( name="DataTree", create_example=""" >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", {example_array})), ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... )""", example_var_name="dt", numeric_only=True, see_also_modules=("Dataset", "DataArray"), ) DATASET_OBJECT = DataStructure( name="Dataset", create_example=""" >>> da = xr.DataArray( ... {example_array}, ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ) >>> ds = xr.Dataset(dict(da=da))""", example_var_name="ds", numeric_only=True, see_also_modules=("DataArray",), ) DATAARRAY_OBJECT = DataStructure( name="DataArray", create_example=""" >>> da = xr.DataArray( ... {example_array}, ... dims="time", ... coords=dict( ... time=("time", pd.date_range("2001-01-01", freq="ME", periods=6)), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... )""", example_var_name="da", numeric_only=False, see_also_modules=("Dataset",), ) DATATREE_GENERATOR = GenericAggregationGenerator( cls="", datastructure=DATATREE_OBJECT, methods=AGGREGATION_METHODS, docref="agg", docref_description="reduction or aggregation operations", example_call_preamble="", definition_preamble=AGGREGATIONS_PREAMBLE, ) DATASET_GENERATOR = GenericAggregationGenerator( cls="", datastructure=DATASET_OBJECT, methods=AGGREGATION_METHODS, docref="agg", docref_description="reduction or aggregation operations", example_call_preamble="", definition_preamble=AGGREGATIONS_PREAMBLE, ) DATAARRAY_GENERATOR = GenericAggregationGenerator( cls="", datastructure=DATAARRAY_OBJECT, methods=AGGREGATION_METHODS, docref="agg", docref_description="reduction or aggregation operations", example_call_preamble="", definition_preamble=AGGREGATIONS_PREAMBLE, ) DATAARRAY_GROUPBY_GENERATOR = GroupByAggregationGenerator( cls="GroupBy", datastructure=DATAARRAY_OBJECT, methods=AGGREGATION_METHODS, docref="groupby", docref_description="groupby operations", example_call_preamble='.groupby("labels")', definition_preamble=GROUPBY_PREAMBLE, notes=_FLOX_GROUPBY_NOTES, ) DATAARRAY_RESAMPLE_GENERATOR = GroupByAggregationGenerator( cls="Resample", datastructure=DATAARRAY_OBJECT, methods=AGGREGATION_METHODS, docref="resampling", docref_description="resampling operations", example_call_preamble='.resample(time="3ME")', definition_preamble=RESAMPLE_PREAMBLE, notes=_FLOX_RESAMPLE_NOTES, ) DATASET_GROUPBY_GENERATOR = GroupByAggregationGenerator( cls="GroupBy", datastructure=DATASET_OBJECT, methods=AGGREGATION_METHODS, docref="groupby", docref_description="groupby operations", example_call_preamble='.groupby("labels")', definition_preamble=GROUPBY_PREAMBLE, notes=_FLOX_GROUPBY_NOTES, ) DATASET_RESAMPLE_GENERATOR = GroupByAggregationGenerator( cls="Resample", datastructure=DATASET_OBJECT, methods=AGGREGATION_METHODS, docref="resampling", docref_description="resampling operations", example_call_preamble='.resample(time="3ME")', definition_preamble=RESAMPLE_PREAMBLE, notes=_FLOX_RESAMPLE_NOTES, ) NAMED_ARRAY_OBJECT = DataStructure( name="NamedArray", create_example=""" >>> from xarray.namedarray.core import NamedArray >>> na = NamedArray( ... "x", {example_array} ... )""", example_var_name="na", numeric_only=False, see_also_modules=("Dataset", "DataArray"), ) NAMED_ARRAY_GENERATOR = GenericAggregationGenerator( cls="", datastructure=NAMED_ARRAY_OBJECT, methods=AGGREGATION_METHODS, docref="agg", docref_description="reduction or aggregation operations", example_call_preamble="", definition_preamble=NAMED_ARRAY_AGGREGATIONS_PREAMBLE, has_keep_attrs=False, ) def write_methods(filepath, generators, preamble): with open(filepath, mode="w", encoding="utf-8") as f: f.write(preamble) for gen in generators: for lines in gen.generate_methods(): f.writelines(line + "\n" for line in lines) if __name__ == "__main__": import os from pathlib import Path p = Path(os.getcwd()) write_methods( filepath=p.parent / "xarray" / "xarray" / "core" / "_aggregations.py", generators=[ DATATREE_GENERATOR, DATASET_GENERATOR, DATAARRAY_GENERATOR, DATASET_GROUPBY_GENERATOR, DATASET_RESAMPLE_GENERATOR, DATAARRAY_GROUPBY_GENERATOR, DATAARRAY_RESAMPLE_GENERATOR, ], preamble=MODULE_PREAMBLE, ) write_methods( filepath=p.parent / "xarray" / "xarray" / "namedarray" / "_aggregations.py", generators=[NAMED_ARRAY_GENERATOR], preamble=NAMED_ARRAY_MODULE_PREAMBLE, ) # filepath = p.parent / "core" / "_aggregations.py" # Run from script location xarray-2025.09.0/xarray/util/generate_ops.py000066400000000000000000000243541505620616400206640ustar00rootroot00000000000000"""Generate module and stub file for arithmetic operators of various xarray classes. For internal xarray development use only. Requires that jinja2 is installed. Usage: python -m pip install jinja2 python xarray/util/generate_ops.py > xarray/core/_typed_ops.py """ # Note: the comments in https://github.com/pydata/xarray/pull/4904 provide some # background to some of the design choices made here. from __future__ import annotations from collections.abc import Iterator, Sequence from typing import Any import jinja2 BINOPS_EQNE = (("__eq__", "nputils.array_eq"), ("__ne__", "nputils.array_ne")) BINOPS_CMP = ( ("__lt__", "operator.lt"), ("__le__", "operator.le"), ("__gt__", "operator.gt"), ("__ge__", "operator.ge"), ) BINOPS_NUM = ( ("__add__", "operator.add"), ("__sub__", "operator.sub"), ("__mul__", "operator.mul"), ("__pow__", "operator.pow"), ("__truediv__", "operator.truediv"), ("__floordiv__", "operator.floordiv"), ("__mod__", "operator.mod"), ("__and__", "operator.and_"), ("__xor__", "operator.xor"), ("__or__", "operator.or_"), ("__lshift__", "operator.lshift"), ("__rshift__", "operator.rshift"), ) BINOPS_REFLEXIVE = ( ("__radd__", "operator.add"), ("__rsub__", "operator.sub"), ("__rmul__", "operator.mul"), ("__rpow__", "operator.pow"), ("__rtruediv__", "operator.truediv"), ("__rfloordiv__", "operator.floordiv"), ("__rmod__", "operator.mod"), ("__rand__", "operator.and_"), ("__rxor__", "operator.xor"), ("__ror__", "operator.or_"), ) BINOPS_INPLACE = ( ("__iadd__", "operator.iadd"), ("__isub__", "operator.isub"), ("__imul__", "operator.imul"), ("__ipow__", "operator.ipow"), ("__itruediv__", "operator.itruediv"), ("__ifloordiv__", "operator.ifloordiv"), ("__imod__", "operator.imod"), ("__iand__", "operator.iand"), ("__ixor__", "operator.ixor"), ("__ior__", "operator.ior"), ("__ilshift__", "operator.ilshift"), ("__irshift__", "operator.irshift"), ) UNARY_OPS = ( ("__neg__", "operator.neg"), ("__pos__", "operator.pos"), ("__abs__", "operator.abs"), ("__invert__", "operator.invert"), ) # round method and numpy/pandas unary methods which don't modify the data shape, # so the result should still be wrapped in an Variable/DataArray/Dataset OTHER_UNARY_METHODS = ( ("round", "ops.round_"), ("argsort", "ops.argsort"), ("conj", "ops.conj"), ("conjugate", "ops.conjugate"), ) required_method_binary = """ def _binary_op( self, other: {{ other_type }}, f: Callable, reflexive: bool = False ) -> {{ return_type }}: raise NotImplementedError""" template_binop = """ def {{ method }}(self, other: {{ other_type }}) -> {{ return_type }}:{{ type_ignore }} return self._binary_op(other, {{ func }})""" template_binop_overload = """ {%- for overload_type in overload_types %} @overload{{ overload_type_ignore if overload_type == overload_types[0] else "" }} def {{ method }}(self, other: {{ overload_type }}) -> {{ overload_type }}: ... {% endfor %} @overload def {{method}}(self, other: {{ other_type }}) -> {{ return_type }}: ... def {{ method }}(self, other: {{ other_type }}) -> {{ return_type }} | {{ ' | '.join(overload_types) }}:{{ type_ignore }} return self._binary_op(other, {{ func }})""" template_reflexive = """ def {{ method }}(self, other: {{ other_type }}) -> {{ return_type }}: return self._binary_op(other, {{ func }}, reflexive=True)""" required_method_inplace = """ def _inplace_binary_op(self, other: {{ other_type }}, f: Callable) -> Self: raise NotImplementedError""" template_inplace = """ def {{ method }}(self, other: {{ other_type }}) -> Self:{{type_ignore}} return self._inplace_binary_op(other, {{ func }})""" required_method_unary = """ def _unary_op(self, f: Callable, *args: Any, **kwargs: Any) -> Self: raise NotImplementedError""" template_unary = """ def {{ method }}(self) -> Self: return self._unary_op({{ func }})""" template_other_unary = """ def {{ method }}(self, *args: Any, **kwargs: Any) -> Self: return self._unary_op({{ func }}, *args, **kwargs)""" unhashable = """ # When __eq__ is defined but __hash__ is not, then an object is unhashable, # and it should be declared as follows: __hash__: None # type:ignore[assignment]""" # For some methods we override return type `bool` defined by base class `object`. # We need to add "# type: ignore[override]" # Keep an eye out for: # https://discuss.python.org/t/make-type-hints-for-eq-of-primitives-less-strict/34240 # The type ignores might not be necessary anymore at some point. # # We require a "hack" to tell type checkers that e.g. Variable + DataArray = DataArray # In reality this returns NotImplemented, but this is not a valid type in python 3.9. # Therefore, we return DataArray. In reality this would call DataArray.__add__(Variable) # TODO: change once python 3.11 is the minimum. # # Mypy seems to require that __iadd__ and __add__ have the same signature. # This requires some extra type: ignores[misc] in the inplace methods :/ def _type_ignore(ignore: str) -> str: return f" # type:ignore[{ignore}]" if ignore else "" FuncType = Sequence[tuple[str | None, str | None]] OpsType = tuple[FuncType, str, dict[str, Any]] def binops( other_type: str, return_type: str = "Self", type_ignore_eq: str = "override" ) -> list[OpsType]: extras = {"other_type": other_type, "return_type": return_type} return [ ([(None, None)], required_method_binary, extras), (BINOPS_NUM + BINOPS_CMP, template_binop, extras | {"type_ignore": ""}), ( BINOPS_EQNE, template_binop, extras | {"type_ignore": _type_ignore(type_ignore_eq)}, ), ([(None, None)], unhashable, extras), (BINOPS_REFLEXIVE, template_reflexive, extras), ] def binops_overload( other_type: str, overload_types: list[str], return_type: str = "Self", type_ignore_eq: str = "override", ) -> list[OpsType]: extras = {"other_type": other_type, "return_type": return_type} return [ ([(None, None)], required_method_binary, extras), ( BINOPS_NUM + BINOPS_CMP, template_binop_overload, extras | { "overload_types": overload_types, "type_ignore": "", "overload_type_ignore": "", }, ), ( BINOPS_EQNE, template_binop_overload, extras | { "overload_types": overload_types, "type_ignore": "", "overload_type_ignore": _type_ignore(type_ignore_eq), }, ), ([(None, None)], unhashable, extras), (BINOPS_REFLEXIVE, template_reflexive, extras), ] def inplace(other_type: str, type_ignore: str = "") -> list[OpsType]: extras = {"other_type": other_type} return [ ([(None, None)], required_method_inplace, extras), ( BINOPS_INPLACE, template_inplace, extras | {"type_ignore": _type_ignore(type_ignore)}, ), ] def unops() -> list[OpsType]: return [ ([(None, None)], required_method_unary, {}), (UNARY_OPS, template_unary, {}), (OTHER_UNARY_METHODS, template_other_unary, {}), ] # We use short names T_DA and T_DS to keep below 88 lines so # ruff does not reformat everything. When reformatting, the # type-ignores end up in the wrong line :/ ops_info = { # TODO add inplace ops for DataTree? "DataTreeOpsMixin": binops(other_type="DtCompatible") + unops(), "DatasetOpsMixin": ( binops_overload(other_type="DsCompatible", overload_types=["DataTree"]) + inplace(other_type="DsCompatible", type_ignore="misc") + unops() ), "DataArrayOpsMixin": ( binops_overload( other_type="DaCompatible", overload_types=["Dataset", "DataTree"] ) + inplace(other_type="DaCompatible", type_ignore="misc") + unops() ), "VariableOpsMixin": ( binops_overload( other_type="VarCompatible", overload_types=["T_DA", "Dataset", "DataTree"] ) + inplace(other_type="VarCompatible", type_ignore="misc") + unops() ), "DatasetGroupByOpsMixin": binops( other_type="Dataset | DataArray", return_type="Dataset" ), "DataArrayGroupByOpsMixin": binops(other_type="T_Xarray", return_type="T_Xarray"), } MODULE_PREAMBLE = '''\ """Mixin classes with arithmetic operators.""" # This file was generated using xarray.util.generate_ops. Do not edit manually. from __future__ import annotations import operator from collections.abc import Callable from typing import TYPE_CHECKING, Any, overload from xarray.core import nputils from xarray.computation import ops from xarray.core.types import ( DaCompatible, DsCompatible, DtCompatible, Self, T_Xarray, VarCompatible, ) if TYPE_CHECKING: from xarray.core.dataarray import DataArray from xarray.core.dataset import Dataset from xarray.core.datatree import DataTree from xarray.core.types import T_DataArray as T_DA''' CLASS_PREAMBLE = """{newline} class {cls_name}: __slots__ = ()""" COPY_DOCSTRING = """\ {method}.__doc__ = {func}.__doc__""" def render(ops_info: dict[str, list[OpsType]]) -> Iterator[str]: """Render the module or stub file.""" yield MODULE_PREAMBLE for cls_name, method_blocks in ops_info.items(): yield CLASS_PREAMBLE.format(cls_name=cls_name, newline="\n") yield from _render_classbody(method_blocks) def _render_classbody(method_blocks: list[OpsType]) -> Iterator[str]: environment = jinja2.Environment() for method_func_pairs, template, extra in method_blocks: if template: for method, func in method_func_pairs: yield environment.from_string(template).render( method=method, func=func, **extra ) yield "" for method_func_pairs, *_ in method_blocks: for method, func in method_func_pairs: if method and func: yield COPY_DOCSTRING.format(method=method, func=func) if __name__ == "__main__": for line in render(ops_info): print(line) xarray-2025.09.0/xarray/util/print_versions.py000066400000000000000000000117711505620616400212740ustar00rootroot00000000000000"""Utility functions for printing version information.""" import contextlib import importlib import locale import os import platform import struct import subprocess import sys def get_sys_info(): """Returns system information as a dict""" blob = [] # get full commit hash commit = None if os.path.isdir(".git") and os.path.isdir("xarray"): try: pipe = subprocess.Popen( ("git", "log", '--format="%H"', "-n", "1"), stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) so, _ = pipe.communicate() except Exception: pass else: if pipe.returncode == 0: commit = so with contextlib.suppress(ValueError): commit = so.decode("utf-8") commit = commit.strip().strip('"') blob.append(("commit", commit)) try: (sysname, _nodename, release, _version, machine, processor) = platform.uname() blob.extend( [ ("python", sys.version), ("python-bits", struct.calcsize("P") * 8), ("OS", f"{sysname}"), ("OS-release", f"{release}"), # ("Version", f"{version}"), ("machine", f"{machine}"), ("processor", f"{processor}"), ("byteorder", f"{sys.byteorder}"), ("LC_ALL", f"{os.environ.get('LC_ALL', 'None')}"), ("LANG", f"{os.environ.get('LANG', 'None')}"), ("LOCALE", f"{locale.getlocale()}"), ] ) except Exception: pass return blob def netcdf_and_hdf5_versions(): libhdf5_version = None libnetcdf_version = None try: import netCDF4 libhdf5_version = netCDF4.__hdf5libversion__ libnetcdf_version = netCDF4.__netcdf4libversion__ except ImportError: try: import h5py libhdf5_version = h5py.version.hdf5_version except ImportError: pass return [("libhdf5", libhdf5_version), ("libnetcdf", libnetcdf_version)] def show_versions(file=sys.stdout): """print the versions of xarray and its dependencies Parameters ---------- file : file-like, optional print to the given file-like object. Defaults to sys.stdout. """ sys_info = get_sys_info() try: sys_info.extend(netcdf_and_hdf5_versions()) except Exception as e: print(f"Error collecting netcdf / hdf5 version: {e}") deps = [ # (MODULE_NAME, f(mod) -> mod version) ("xarray", lambda mod: mod.__version__), ("pandas", lambda mod: mod.__version__), ("numpy", lambda mod: mod.__version__), ("scipy", lambda mod: mod.__version__), # xarray optionals ("netCDF4", lambda mod: mod.__version__), ("pydap", lambda mod: mod.__version__), ("h5netcdf", lambda mod: mod.__version__), ("h5py", lambda mod: mod.__version__), ("zarr", lambda mod: mod.__version__), ("cftime", lambda mod: mod.__version__), ("nc_time_axis", lambda mod: mod.__version__), ("iris", lambda mod: mod.__version__), ("bottleneck", lambda mod: mod.__version__), ("dask", lambda mod: mod.__version__), ("distributed", lambda mod: mod.__version__), ("matplotlib", lambda mod: mod.__version__), ("cartopy", lambda mod: mod.__version__), ("seaborn", lambda mod: mod.__version__), ("numbagg", lambda mod: mod.__version__), ("fsspec", lambda mod: mod.__version__), ("cupy", lambda mod: mod.__version__), ("pint", lambda mod: mod.__version__), ("sparse", lambda mod: mod.__version__), ("flox", lambda mod: mod.__version__), ("numpy_groupies", lambda mod: mod.__version__), # xarray setup/test ("setuptools", lambda mod: mod.__version__), ("pip", lambda mod: mod.__version__), ("conda", lambda mod: mod.__version__), ("pytest", lambda mod: mod.__version__), ("mypy", lambda mod: importlib.metadata.version(mod.__name__)), # Misc. ("IPython", lambda mod: mod.__version__), ("sphinx", lambda mod: mod.__version__), ] deps_blob = [] for modname, ver_f in deps: try: if modname in sys.modules: mod = sys.modules[modname] else: mod = importlib.import_module(modname) except Exception: deps_blob.append((modname, None)) else: try: ver = ver_f(mod) deps_blob.append((modname, ver)) except Exception: deps_blob.append((modname, "installed")) print("\nINSTALLED VERSIONS", file=file) print("------------------", file=file) for k, stat in sys_info: print(f"{k}: {stat}", file=file) print("", file=file) for k, stat in deps_blob: print(f"{k}: {stat}", file=file) if __name__ == "__main__": show_versions()